drm/i915: Use the Linux workqueue API
authorFrançois Tigeot <ftigeot@wolfpond.org>
Sat, 3 May 2014 09:12:14 +0000 (11:12 +0200)
committerFrançois Tigeot <ftigeot@wolfpond.org>
Sat, 3 May 2014 09:25:56 +0000 (11:25 +0200)
Opportunistically reduce differences with Linux 3.8.13

sys/dev/drm/i915/i915_debug.c
sys/dev/drm/i915/i915_dma.c
sys/dev/drm/i915/i915_drv.h
sys/dev/drm/i915/i915_gem.c
sys/dev/drm/i915/i915_irq.c
sys/dev/drm/i915/intel_display.c
sys/dev/drm/i915/intel_dp.c
sys/dev/drm/i915/intel_drv.h
sys/dev/drm/i915/intel_pm.c

index 9246b88..215bc98 100644 (file)
@@ -1164,12 +1164,13 @@ static int i915_ring_freq_table(struct drm_device *dev, struct sbuf *m,
                return (0);
        }
 
-       if (lockmgr(&dev->dev_struct_lock, LK_EXCLUSIVE|LK_SLEEPFAIL))
-               return (EINTR);
+       if (lockmgr(&dev_priv->rps.hw_lock, LK_EXCLUSIVE|LK_SLEEPFAIL))
+               return -EINTR;
 
        sbuf_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n");
 
-       for (gpu_freq = dev_priv->min_delay; gpu_freq <= dev_priv->max_delay;
+       for (gpu_freq = dev_priv->rps.min_delay;
+            gpu_freq <= dev_priv->rps.max_delay;
             gpu_freq++) {
                I915_WRITE(GEN6_PCODE_DATA, gpu_freq);
                I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
@@ -1184,9 +1185,9 @@ static int i915_ring_freq_table(struct drm_device *dev, struct sbuf *m,
                sbuf_printf(m, "%d\t\t%d\n", gpu_freq * 50, ia_freq * 100);
        }
 
-       DRM_UNLOCK(dev);
+       lockmgr(&dev_priv->rps.hw_lock, LK_RELEASE);
 
-       return (0);
+       return 0;
 }
 
 static int
@@ -1452,16 +1453,23 @@ i915_max_freq(SYSCTL_HANDLER_ARGS)
        dev_priv = dev->dev_private;
        if (dev_priv == NULL)
                return (EBUSY);
-       max_freq = dev_priv->max_delay * 50;
+       max_freq = dev_priv->rps.max_delay * 50;
        error = sysctl_handle_int(oidp, &max_freq, 0, req);
        if (error || !req->newptr)
                return (error);
        DRM_DEBUG("Manually setting max freq to %d\n", max_freq);
+
+       if (lockmgr(&dev_priv->rps.hw_lock, LK_EXCLUSIVE|LK_SLEEPFAIL))
+               return -EINTR;
+
        /*
         * Turbo will still be enabled, but won't go above the set value.
         */
-       dev_priv->max_delay = max_freq / 50;
+       dev_priv->rps.max_delay = max_freq / 50;
+
        gen6_set_rps(dev, max_freq / 50);
+       lockmgr(&dev_priv->rps.hw_lock, LK_RELEASE);
+
        return (error);
 }
 
index 79f265d..85bbab2 100644 (file)
@@ -32,6 +32,7 @@
 #include "i915_drv.h"
 #include "intel_drv.h"
 #include "intel_ringbuffer.h"
+#include <linux/workqueue.h>
 
 extern struct drm_i915_private *i915_mch_dev;
 
@@ -1182,8 +1183,18 @@ intel_teardown_mchbar(struct drm_device *dev)
        }
 }
 
-int
-i915_driver_load(struct drm_device *dev, unsigned long flags)
+/**
+ * i915_driver_load - setup chip and create an initial config
+ * @dev: DRM device
+ * @flags: startup flags
+ *
+ * The driver load routine has to do several things:
+ *   - drive output discovery via intel_modeset_init()
+ *   - initialize the memory manager
+ *   - allocate initial config memory
+ *   - setup the DRM framebuffer with the allocated memory
+ */
+int i915_driver_load(struct drm_device *dev, unsigned long flags)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long base, size;
@@ -1221,13 +1232,32 @@ i915_driver_load(struct drm_device *dev, unsigned long flags)
        ret = drm_addmap(dev, base, size, _DRM_REGISTERS,
            _DRM_KERNEL | _DRM_DRIVER, &dev_priv->mmio_map);
 
-       dev_priv->tq = taskqueue_create("915", M_WAITOK,
-           taskqueue_thread_enqueue, &dev_priv->tq);
-       taskqueue_start_threads(&dev_priv->tq, 1, 0, -1, "i915 taskq");
+       /* The i915 workqueue is primarily used for batched retirement of
+        * requests (and thus managing bo) once the task has been completed
+        * by the GPU. i915_gem_retire_requests() is called directly when we
+        * need high-priority retirement, such as waiting for an explicit
+        * bo.
+        *
+        * It is also used for periodic low-priority events, such as
+        * idle-timers and recording error state.
+        *
+        * All tasks on the workqueue are expected to acquire the dev mutex
+        * so there is no point in running more than one instance of the
+        * workqueue at any time.  Use an ordered one.
+        */
+       dev_priv->wq = alloc_ordered_workqueue("i915", 0);
+       if (dev_priv->wq == NULL) {
+               DRM_ERROR("Failed to create our workqueue.\n");
+               ret = -ENOMEM;
+               goto out_mtrrfree;
+       }
+
        lockinit(&dev_priv->gt_lock, "915gt", 0, LK_CANRECURSE);
        lockinit(&dev_priv->error_lock, "915err", 0, LK_CANRECURSE);
+       spin_init(&dev_priv->rps.lock);
        lockinit(&dev_priv->error_completion_lock, "915cmp", 0, LK_CANRECURSE);
-       lockinit(&dev_priv->rps_lock, "915rps", 0, LK_CANRECURSE);
+
+       lockinit(&dev_priv->rps.hw_lock, "i915 rps.hw_lock", 0, LK_CANRECURSE);
 
        dev_priv->has_gem = 1;
        intel_irq_init(dev);
@@ -1257,7 +1287,7 @@ i915_driver_load(struct drm_device *dev, unsigned long flags)
 
        lockinit(&dev_priv->irq_lock, "userirq", 0, LK_CANRECURSE);
 
-       if (IS_IVYBRIDGE(dev))
+       if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
                dev_priv->num_pipe = 3;
        else if (IS_MOBILE(dev) || !IS_GEN2(dev))
                dev_priv->num_pipe = 2;
@@ -1274,9 +1304,7 @@ i915_driver_load(struct drm_device *dev, unsigned long flags)
        intel_detect_pch(dev);
 
        if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-               DRM_UNLOCK(dev);
                ret = i915_load_modeset_init(dev);
-               DRM_LOCK(dev);
                if (ret < 0) {
                        DRM_ERROR("failed to init modeset\n");
                        goto out_gem_unload;
@@ -1301,6 +1329,8 @@ out_gem_unload:
        /* XXXKIB */
        (void) i915_driver_unload_int(dev, true);
        return (ret);
+out_mtrrfree:
+       return ret;
 }
 
 static int
@@ -1364,8 +1394,8 @@ i915_driver_unload_int(struct drm_device *dev, bool locked)
 
        lockuninit(&dev_priv->irq_lock);
 
-       if (dev_priv->tq != NULL)
-               taskqueue_free(dev_priv->tq);
+       if (dev_priv->wq != NULL)
+               destroy_workqueue(dev_priv->wq);
 
        bus_generic_detach(dev->dev);
        drm_rmmap(dev, dev_priv->mmio_map);
@@ -1373,7 +1403,6 @@ i915_driver_unload_int(struct drm_device *dev, bool locked)
 
        lockuninit(&dev_priv->error_lock);
        lockuninit(&dev_priv->error_completion_lock);
-       lockuninit(&dev_priv->rps_lock);
        drm_free(dev->dev_private, DRM_MEM_DRIVER);
 
        return (0);
index f4da243..e812f1f 100644 (file)
@@ -37,6 +37,7 @@
 #include "i915_reg.h"
 #include "intel_bios.h"
 #include "intel_ringbuffer.h"
+#include <linux/workqueue.h>
 
 /* General customization:
  */
@@ -384,6 +385,28 @@ enum intel_pch {
 struct intel_fbdev;
 struct intel_fbc_work;
 
+struct intel_gen6_power_mgmt {
+       struct work_struct work;
+       u32 pm_iir;
+       /* lock - irqsave spinlock that protectects the work_struct and
+        * pm_iir. */
+       struct spinlock lock;
+
+       /* The below variables an all the rps hw state are protected by
+        * dev->struct mutext. */
+       u8 cur_delay;
+       u8 min_delay;
+       u8 max_delay;
+
+       struct delayed_work delayed_resume_work;
+
+       /*
+        * Protects RPS/RC6 register access and PCU communication.
+        * Must be taken after struct_mutex if nested.
+        */
+       struct lock hw_lock;
+};
+
 typedef struct drm_i915_private {
        struct drm_device *dev;
 
@@ -414,6 +437,8 @@ typedef struct drm_i915_private {
        uint32_t next_seqno;
 
        drm_dma_handle_t *status_page_dmah;
+       struct resource *mch_res;
+
        void *hw_status_page;
        dma_addr_t dma_status_page;
        uint32_t counter;
@@ -737,7 +762,7 @@ typedef struct drm_i915_private {
                 * fire periodically while the ring is running. When it
                 * fires, go retire requests.
                 */
-               struct timeout_task retire_task;
+               struct delayed_work retire_work;
 
                /**
                 * Are we in a non-interruptible section of code like
@@ -825,16 +850,12 @@ typedef struct drm_i915_private {
 
        device_t bridge_dev;
        bool mchbar_need_disable;
+
        int mch_res_rid;
-       struct resource *mch_res;
 
-       struct lock rps_lock;
-       u32 pm_iir;
-       struct task rps_task;
+       /* gen6+ rps state */
+       struct intel_gen6_power_mgmt rps;
 
-       u8 cur_delay;
-       u8 min_delay;
-       u8 max_delay;
        u8 fmax;
        u8 fstart;
 
@@ -859,13 +880,14 @@ typedef struct drm_i915_private {
 
        unsigned int fsb_freq, mem_freq, is_ddr3;
 
-       struct taskqueue *tq;
-       struct task error_task;
-       struct task hotplug_task;
+       struct lock error_lock;
+       /* Protected by dev->error_lock. */
+       struct drm_i915_error_state *first_error;
+       struct work_struct error_work;
        int error_completion;
        struct lock error_completion_lock;
-       struct drm_i915_error_state *first_error;
-       struct lock error_lock;
+       struct workqueue_struct *wq;
+       struct work_struct hotplug_work;
 
        unsigned long last_gpu_reset;
 
index c45bc8c..aa59773 100644 (file)
@@ -90,7 +90,6 @@ static void i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
 static void i915_gem_clear_fence_reg(struct drm_device *dev,
     struct drm_i915_fence_reg *reg);
 static void i915_gem_reset_fences(struct drm_device *dev);
-static void i915_gem_retire_task_handler(void *arg, int pending);
 static int i915_gem_phys_pwrite(struct drm_device *dev,
     struct drm_i915_gem_object *obj, uint64_t data_ptr, uint64_t offset,
     uint64_t size, struct drm_file *file_priv);
@@ -214,64 +213,6 @@ i915_gem_free_object(struct drm_gem_object *gem_obj)
        i915_gem_free_object_tail(obj);
 }
 
-static void
-init_ring_lists(struct intel_ring_buffer *ring)
-{
-
-       INIT_LIST_HEAD(&ring->active_list);
-       INIT_LIST_HEAD(&ring->request_list);
-       INIT_LIST_HEAD(&ring->gpu_write_list);
-}
-
-void
-i915_gem_load(struct drm_device *dev)
-{
-       int i;
-       drm_i915_private_t *dev_priv = dev->dev_private;
-
-       INIT_LIST_HEAD(&dev_priv->mm.active_list);
-       INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
-       INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
-       INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
-       INIT_LIST_HEAD(&dev_priv->mm.fence_list);
-       INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
-       INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
-       for (i = 0; i < I915_NUM_RINGS; i++)
-               init_ring_lists(&dev_priv->ring[i]);
-       for (i = 0; i < I915_MAX_NUM_FENCES; i++)
-               INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
-       TIMEOUT_TASK_INIT(dev_priv->tq, &dev_priv->mm.retire_task, 0,
-           i915_gem_retire_task_handler, dev_priv);
-       dev_priv->error_completion = 0;
-
-       /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
-       if (IS_GEN3(dev)) {
-               I915_WRITE(MI_ARB_STATE,
-                          _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
-       }
-
-       dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
-
-       /* Old X drivers will take 0-2 for front, back, depth buffers */
-       if (!drm_core_check_feature(dev, DRIVER_MODESET))
-               dev_priv->fence_reg_start = 3;
-
-       if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
-               dev_priv->num_fence_regs = 16;
-       else
-               dev_priv->num_fence_regs = 8;
-
-       /* Initialize fence registers to zero */
-       i915_gem_reset_fences(dev);
-
-       i915_gem_detect_bit_6_swizzle(dev);
-
-       dev_priv->mm.interruptible = true;
-
-       dev_priv->mm.i915_lowmem = EVENTHANDLER_REGISTER(vm_lowmem,
-           i915_gem_lowmem, dev, EVENTHANDLER_PRI_ANY);
-}
-
 int
 i915_gem_do_init(struct drm_device *dev, unsigned long start,
     unsigned long mappable_end, unsigned long end)
@@ -360,7 +301,8 @@ i915_gem_idle(struct drm_device *dev)
        i915_gem_cleanup_ringbuffer(dev);
 
        /* Cancel the retire work handler, which should be idle now. */
-       taskqueue_cancel_timeout(dev_priv->tq, &dev_priv->mm.retire_task, NULL);
+       cancel_delayed_work_sync(&dev_priv->mm.retire_work);
+
        return (ret);
 }
 
@@ -732,8 +674,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
        lockmgr(&ring->irq_lock, LK_RELEASE);
 
        if (ret == 0)
-               taskqueue_enqueue_timeout(dev_priv->tq,
-                   &dev_priv->mm.retire_task, 0);
+               queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
 
        return ret;
 }
@@ -2651,9 +2592,12 @@ i915_add_request(struct intel_ring_buffer *ring, struct drm_file *file,
                        mod_timer(&dev_priv->hangcheck_timer,
                                  round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
                }
-               if (was_empty)
-                       taskqueue_enqueue_timeout(dev_priv->tq,
-                           &dev_priv->mm.retire_task, hz);
+               if (was_empty) {
+                       queue_delayed_work(dev_priv->wq,
+                                          &dev_priv->mm.retire_work,
+                                          round_jiffies_up_relative(hz));
+                       intel_mark_busy(dev_priv->dev);
+               }
        }
        return (0);
 }
@@ -3344,20 +3288,22 @@ i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
 }
 
 static void
-i915_gem_retire_task_handler(void *arg, int pending)
+i915_gem_retire_work_handler(struct work_struct *work)
 {
        drm_i915_private_t *dev_priv;
        struct drm_device *dev;
+       struct intel_ring_buffer *ring;
        bool idle;
        int i;
 
-       dev_priv = arg;
+       dev_priv = container_of(work, drm_i915_private_t,
+                               mm.retire_work.work);
        dev = dev_priv->dev;
 
        /* Come back later if the device is busy... */
        if (lockmgr(&dev->dev_struct_lock, LK_EXCLUSIVE|LK_NOWAIT)) {
-               taskqueue_enqueue_timeout(dev_priv->tq,
-                   &dev_priv->mm.retire_task, hz);
+               queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
+                                  round_jiffies_up_relative(hz));
                return;
        }
 
@@ -3368,7 +3314,7 @@ i915_gem_retire_task_handler(void *arg, int pending)
         */
        idle = true;
        for (i = 0; i < I915_NUM_RINGS; i++) {
-               struct intel_ring_buffer *ring = &dev_priv->ring[i];
+               ring = &dev_priv->ring[i];
 
                if (!list_empty(&ring->gpu_write_list)) {
                        struct drm_i915_gem_request *request;
@@ -3387,8 +3333,8 @@ i915_gem_retire_task_handler(void *arg, int pending)
        }
 
        if (!dev_priv->mm.suspended && !idle)
-               taskqueue_enqueue_timeout(dev_priv->tq,
-                   &dev_priv->mm.retire_task, hz);
+               queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
+                                  round_jiffies_up_relative(hz));
 
        DRM_UNLOCK(dev);
 }
@@ -3511,6 +3457,64 @@ i915_gem_detach_phys_object(struct drm_device *dev,
        obj->phys_obj = NULL;
 }
 
+static void
+init_ring_lists(struct intel_ring_buffer *ring)
+{
+
+       INIT_LIST_HEAD(&ring->active_list);
+       INIT_LIST_HEAD(&ring->request_list);
+       INIT_LIST_HEAD(&ring->gpu_write_list);
+}
+
+void
+i915_gem_load(struct drm_device *dev)
+{
+       int i;
+       drm_i915_private_t *dev_priv = dev->dev_private;
+
+       INIT_LIST_HEAD(&dev_priv->mm.active_list);
+       INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
+       INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
+       INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
+       INIT_LIST_HEAD(&dev_priv->mm.fence_list);
+       INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
+       INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
+       for (i = 0; i < I915_NUM_RINGS; i++)
+               init_ring_lists(&dev_priv->ring[i]);
+       for (i = 0; i < I915_MAX_NUM_FENCES; i++)
+               INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
+       INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
+                         i915_gem_retire_work_handler);
+       dev_priv->error_completion = 0;
+
+       /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
+       if (IS_GEN3(dev)) {
+               I915_WRITE(MI_ARB_STATE,
+                          _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
+       }
+
+       dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
+
+       /* Old X drivers will take 0-2 for front, back, depth buffers */
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+               dev_priv->fence_reg_start = 3;
+
+       if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+               dev_priv->num_fence_regs = 16;
+       else
+               dev_priv->num_fence_regs = 8;
+
+       /* Initialize fence registers to zero */
+       i915_gem_reset_fences(dev);
+
+       i915_gem_detect_bit_6_swizzle(dev);
+
+       dev_priv->mm.interruptible = true;
+
+       dev_priv->mm.i915_lowmem = EVENTHANDLER_REGISTER(vm_lowmem,
+           i915_gem_lowmem, dev, EVENTHANDLER_PRI_ANY);
+}
+
 int
 i915_gem_attach_phys_object(struct drm_device *dev,
     struct drm_i915_gem_object *obj, int id, int align)
index e888162..6911d20 100644 (file)
@@ -301,20 +301,14 @@ i915_get_vblank_timestamp(struct drm_device *dev, int pipe, int *max_error,
 /*
  * Handle hotplug events outside the interrupt handler proper.
  */
-static void
-i915_hotplug_work_func(void *context, int pending)
+static void i915_hotplug_work_func(struct work_struct *work)
 {
-       drm_i915_private_t *dev_priv = context;
+       drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
+                                                   hotplug_work);
        struct drm_device *dev = dev_priv->dev;
-       struct drm_mode_config *mode_config;
+       struct drm_mode_config *mode_config = &dev->mode_config;
        struct intel_encoder *encoder;
 
-       DRM_DEBUG("running encoder hotplug functions\n");
-       dev_priv = context;
-       dev = dev_priv->dev;
-
-       mode_config = &dev->mode_config;
-
        lockmgr(&mode_config->mutex, LK_EXCLUSIVE);
        DRM_DEBUG_KMS("running encoder hotplug functions\n");
 
@@ -340,7 +334,7 @@ static void ironlake_handle_rps_change(struct drm_device *dev)
 
        I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
 
-       new_delay = dev_priv->cur_delay;
+       new_delay = dev_priv->rps.cur_delay;
 
        I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
        busy_up = I915_READ(RCPREVBSYTUPAVG);
@@ -350,19 +344,19 @@ static void ironlake_handle_rps_change(struct drm_device *dev)
 
        /* Handle RCS change request from hw */
        if (busy_up > max_avg) {
-               if (dev_priv->cur_delay != dev_priv->max_delay)
-                       new_delay = dev_priv->cur_delay - 1;
-               if (new_delay < dev_priv->max_delay)
-                       new_delay = dev_priv->max_delay;
+               if (dev_priv->rps.cur_delay != dev_priv->rps.max_delay)
+                       new_delay = dev_priv->rps.cur_delay - 1;
+               if (new_delay < dev_priv->rps.max_delay)
+                       new_delay = dev_priv->rps.max_delay;
        } else if (busy_down < min_avg) {
-               if (dev_priv->cur_delay != dev_priv->min_delay)
-                       new_delay = dev_priv->cur_delay + 1;
-               if (new_delay > dev_priv->min_delay)
-                       new_delay = dev_priv->min_delay;
+               if (dev_priv->rps.cur_delay != dev_priv->rps.min_delay)
+                       new_delay = dev_priv->rps.cur_delay + 1;
+               if (new_delay > dev_priv->rps.min_delay)
+                       new_delay = dev_priv->rps.min_delay;
        }
 
        if (ironlake_set_drps(dev, new_delay))
-               dev_priv->cur_delay = new_delay;
+               dev_priv->rps.cur_delay = new_delay;
 
        lockmgr(&mchdev_lock, LK_RELEASE);
 
@@ -392,61 +386,39 @@ static void notify_ring(struct drm_device *dev,
        }
 }
 
-static void
-gen6_pm_rps_work_func(void *arg, int pending)
+static void gen6_pm_rps_work(struct work_struct *work)
 {
-       struct drm_device *dev;
-       drm_i915_private_t *dev_priv;
-       u8 new_delay;
+       drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
+                                                   rps.work);
        u32 pm_iir, pm_imr;
+       u8 new_delay;
 
-       dev_priv = (drm_i915_private_t *)arg;
-       dev = dev_priv->dev;
-       new_delay = dev_priv->cur_delay;
-
-       lockmgr(&dev_priv->rps_lock, LK_EXCLUSIVE);
-       pm_iir = dev_priv->pm_iir;
-       dev_priv->pm_iir = 0;
+       spin_lock(&dev_priv->rps.lock);
+       pm_iir = dev_priv->rps.pm_iir;
+       dev_priv->rps.pm_iir = 0;
        pm_imr = I915_READ(GEN6_PMIMR);
        I915_WRITE(GEN6_PMIMR, 0);
-       lockmgr(&dev_priv->rps_lock, LK_RELEASE);
+       spin_unlock(&dev_priv->rps.lock);
 
-       if (!pm_iir)
+       if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
                return;
 
-       DRM_LOCK(dev);
-       if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
-               if (dev_priv->cur_delay != dev_priv->max_delay)
-                       new_delay = dev_priv->cur_delay + 1;
-               if (new_delay > dev_priv->max_delay)
-                       new_delay = dev_priv->max_delay;
-       } else if (pm_iir & (GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT)) {
-               gen6_gt_force_wake_get(dev_priv);
-               if (dev_priv->cur_delay != dev_priv->min_delay)
-                       new_delay = dev_priv->cur_delay - 1;
-               if (new_delay < dev_priv->min_delay) {
-                       new_delay = dev_priv->min_delay;
-                       I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
-                                  I915_READ(GEN6_RP_INTERRUPT_LIMITS) |
-                                  ((new_delay << 16) & 0x3f0000));
-               } else {
-                       /* Make sure we continue to get down interrupts
-                        * until we hit the minimum frequency */
-                       I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
-                                  I915_READ(GEN6_RP_INTERRUPT_LIMITS) & ~0x3f0000);
-               }
-               gen6_gt_force_wake_put(dev_priv);
-       }
+       lockmgr(&dev_priv->rps.hw_lock, LK_EXCLUSIVE);
 
-       gen6_set_rps(dev, new_delay);
-       dev_priv->cur_delay = new_delay;
+       if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
+               new_delay = dev_priv->rps.cur_delay + 1;
+       else
+               new_delay = dev_priv->rps.cur_delay - 1;
 
-       /*
-        * rps_lock not held here because clearing is non-destructive. There is
-        * an *extremely* unlikely race with gen6_rps_enable() that is prevented
-        * by holding struct_mutex for the duration of the write.
+       /* sysfs frequency interfaces may have snuck in while servicing the
+        * interrupt
         */
-       DRM_UNLOCK(dev);
+       if (!(new_delay > dev_priv->rps.max_delay ||
+             new_delay < dev_priv->rps.min_delay)) {
+               gen6_set_rps(dev_priv->dev, new_delay);
+       }
+
+       lockmgr(&dev_priv->rps.hw_lock, LK_RELEASE);
 }
 
 static void snb_gt_irq_handler(struct drm_device *dev,
@@ -489,13 +461,13 @@ static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
         * The mask bit in IMR is cleared by dev_priv->rps.work.
         */
 
-       lockmgr(&dev_priv->rps_lock, LK_EXCLUSIVE);
-       dev_priv->pm_iir |= pm_iir;
-       I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
+       spin_lock(&dev_priv->rps.lock);
+       dev_priv->rps.pm_iir |= pm_iir;
+       I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
        POSTING_READ(GEN6_PMIMR);
-       lockmgr(&dev_priv->rps_lock, LK_RELEASE);
+       spin_unlock(&dev_priv->rps.lock);
 
-       taskqueue_enqueue(dev_priv->tq, &dev_priv->rps_task);
+       queue_work(dev_priv->wq, &dev_priv->rps.work);
 }
 
 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
@@ -504,7 +476,7 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
        int pipe;
 
        if (pch_iir & SDE_HOTPLUG_MASK)
-               taskqueue_enqueue(dev_priv->tq, &dev_priv->hotplug_task);
+               queue_work(dev_priv->wq, &dev_priv->hotplug_work);
 
        if (pch_iir & SDE_AUDIO_POWER_MASK)
                DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
@@ -547,7 +519,7 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
        int pipe;
 
        if (pch_iir & SDE_HOTPLUG_MASK_CPT)
-               taskqueue_enqueue(dev_priv->tq, &dev_priv->hotplug_task);
+               queue_work(dev_priv->wq, &dev_priv->hotplug_work);
 
        if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
                DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
@@ -730,16 +702,16 @@ done:
  * Fire an error uevent so userspace can see that a hang or error
  * was detected.
  */
-static void
-i915_error_work_func(void *context, int pending)
+static void i915_error_work_func(struct work_struct *work)
 {
-       drm_i915_private_t *dev_priv = context;
+       drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
+                                                   error_work);
        struct drm_device *dev = dev_priv->dev;
 
        /* kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); */
 
        if (atomic_read(&dev_priv->mm.wedged)) {
-               DRM_DEBUG("i915: resetting chip\n");
+               DRM_DEBUG_DRIVER("resetting chip\n");
                /* kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event); */
                if (!i915_reset(dev, GRDOM_RENDER)) {
                        atomic_set(&dev_priv->mm.wedged, 0);
@@ -903,7 +875,7 @@ void i915_handle_error(struct drm_device *dev, bool wedged)
                }
        }
 
-       taskqueue_enqueue(dev_priv->tq, &dev_priv->error_task);
+       queue_work(dev_priv->wq, &dev_priv->error_work);
 }
 
 static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
@@ -1014,8 +986,8 @@ i915_driver_irq_handler(void *arg)
                        DRM_DEBUG("i915: hotplug event received, stat 0x%08x\n",
                                  hotplug_status);
                        if (hotplug_status & dev_priv->hotplug_supported_mask)
-                               taskqueue_enqueue(dev_priv->tq,
-                                   &dev_priv->hotplug_task);
+                               queue_work(dev_priv->wq,
+                                          &dev_priv->hotplug_work);
 
                        I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
                        I915_READ(PORT_HOTPLUG_STAT);
@@ -1525,13 +1497,6 @@ ironlake_irq_preinstall(struct drm_device *dev)
 
        atomic_set(&dev_priv->irq_received, 0);
 
-       TASK_INIT(&dev_priv->hotplug_task, 0, i915_hotplug_work_func,
-           dev->dev_private);
-       TASK_INIT(&dev_priv->error_task, 0, i915_error_work_func,
-           dev->dev_private);
-       TASK_INIT(&dev_priv->rps_task, 0, gen6_pm_rps_work_func,
-           dev->dev_private);
-
        I915_WRITE(HWSTAM, 0xeffe);
 
        /* XXX hotplug from PCH */
@@ -1694,13 +1659,6 @@ i915_driver_irq_preinstall(struct drm_device * dev)
 
        atomic_set(&dev_priv->irq_received, 0);
 
-       TASK_INIT(&dev_priv->hotplug_task, 0, i915_hotplug_work_func,
-           dev->dev_private);
-       TASK_INIT(&dev_priv->error_task, 0, i915_error_work_func,
-           dev->dev_private);
-       TASK_INIT(&dev_priv->rps_task, 0, gen6_pm_rps_work_func,
-           dev->dev_private);
-
        if (I915_HAS_HOTPLUG(dev)) {
                I915_WRITE(PORT_HOTPLUG_EN, 0);
                I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
@@ -1822,10 +1780,6 @@ ironlake_irq_uninstall(struct drm_device *dev)
        I915_WRITE(SDEIMR, 0xffffffff);
        I915_WRITE(SDEIER, 0x0);
        I915_WRITE(SDEIIR, I915_READ(SDEIIR));
-
-       taskqueue_drain(dev_priv->tq, &dev_priv->hotplug_task);
-       taskqueue_drain(dev_priv->tq, &dev_priv->error_task);
-       taskqueue_drain(dev_priv->tq, &dev_priv->rps_task);
 }
 
 static void i915_driver_irq_uninstall(struct drm_device * dev)
@@ -1853,15 +1807,16 @@ static void i915_driver_irq_uninstall(struct drm_device * dev)
                I915_WRITE(PIPESTAT(pipe),
                           I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
        I915_WRITE(IIR, I915_READ(IIR));
-
-       taskqueue_drain(dev_priv->tq, &dev_priv->hotplug_task);
-       taskqueue_drain(dev_priv->tq, &dev_priv->error_task);
-       taskqueue_drain(dev_priv->tq, &dev_priv->rps_task);
 }
 
 void
 intel_irq_init(struct drm_device *dev)
 {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
+       INIT_WORK(&dev_priv->error_work, i915_error_work_func);
+       INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
 
        dev->driver->get_vblank_counter = i915_get_vblank_counter;
        dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
index 2920858..52c0f7c 100644 (file)
@@ -5302,50 +5302,30 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
        }
 }
 
-/**
- * intel_idle_update - adjust clocks for idleness
- * @work: work struct
- *
- * Either the GPU or display (or both) went idle.  Check the busy status
- * here and adjust the CRTC and GPU clocks as necessary.
- */
-static void intel_idle_update(void *arg, int pending)
+void intel_mark_busy(struct drm_device *dev)
+{
+       i915_update_gfx_val(dev->dev_private);
+}
+
+void intel_mark_idle(struct drm_device *dev)
 {
-       drm_i915_private_t *dev_priv = arg;
-       struct drm_device *dev = dev_priv->dev;
        struct drm_crtc *crtc;
-       struct intel_crtc *intel_crtc;
 
        if (!i915_powersave)
                return;
 
-       DRM_LOCK(dev);
-
-       i915_update_gfx_val(dev_priv);
-
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-               /* Skip inactive CRTCs */
                if (!crtc->fb)
                        continue;
 
-               intel_crtc = to_intel_crtc(crtc);
-               if (!intel_crtc->busy)
-                       intel_decrease_pllclock(crtc);
+               intel_decrease_pllclock(crtc);
        }
-
-       DRM_UNLOCK(dev);
-}
-
-void intel_mark_busy(struct drm_device *dev)
-{
-       i915_update_gfx_val(dev->dev_private);
 }
 
 static void intel_crtc_destroy(struct drm_crtc *crtc)
 {
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_unpin_work *work;
 
        lockmgr(&dev->event_lock, LK_EXCLUSIVE);
@@ -5354,9 +5334,8 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
        lockmgr(&dev->event_lock, LK_RELEASE);
 
        if (work) {
-               taskqueue_cancel(dev_priv->tq, &work->task, NULL);
-               taskqueue_drain(dev_priv->tq, &work->task);
-               drm_free(work, DRM_MEM_KMS);
+               cancel_work_sync(&work->work);
+               kfree(work, DRM_MEM_KMS);
        }
 
        drm_crtc_cleanup(crtc);
@@ -5364,9 +5343,10 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
        drm_free(intel_crtc, DRM_MEM_KMS);
 }
 
-static void intel_unpin_work_fn(void *arg, int pending)
+static void intel_unpin_work_fn(struct work_struct *__work)
 {
-       struct intel_unpin_work *work = arg;
+       struct intel_unpin_work *work =
+                               container_of(__work, struct intel_unpin_work, work);
        struct drm_device *dev;
 
        dev = work->dev;
@@ -5414,7 +5394,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
                          &obj->pending_flip.counter);
        wakeup(&obj->pending_flip);
 
-       taskqueue_enqueue(dev_priv->tq, &work->task);
+       queue_work(dev_priv->wq, &work->work);
 }
 
 void intel_finish_page_flip(struct drm_device *dev, int pipe)
@@ -5660,7 +5640,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
        work->dev = crtc->dev;
        intel_fb = to_intel_framebuffer(crtc->fb);
        work->old_fb_obj = intel_fb->obj;
-       TASK_INIT(&work->task, 0, intel_unpin_work_fn, work);
+       INIT_WORK(&work->work, intel_unpin_work_fn);
 
        ret = drm_vblank_get(dev, intel_crtc->pipe);
        if (ret)
@@ -6492,7 +6472,6 @@ void intel_modeset_init(struct drm_device *dev)
                gen6_update_ring_freq(dev_priv);
        }
 
-       TASK_INIT(&dev_priv->idle_task, 0, intel_idle_update, dev_priv);
        callout_init_mp(&dev_priv->idle_callout);
 }
 
@@ -6536,19 +6515,16 @@ void intel_modeset_cleanup(struct drm_device *dev)
        if (IS_IRONLAKE_M(dev))
                ironlake_disable_rc6(dev);
 
+       DRM_UNLOCK(dev);
+
        /* Disable the irq before mode object teardown, for the irq might
         * enqueue unpin/hotplug work. */
        drm_irq_uninstall(dev);
-       DRM_UNLOCK(dev);
-
-       if (taskqueue_cancel(dev_priv->tq, &dev_priv->hotplug_task, NULL))
-               taskqueue_drain(dev_priv->tq, &dev_priv->hotplug_task);
-       if (taskqueue_cancel(dev_priv->tq, &dev_priv->rps_task, NULL))
-               taskqueue_drain(dev_priv->tq, &dev_priv->rps_task);
+       cancel_work_sync(&dev_priv->hotplug_work);
+       cancel_work_sync(&dev_priv->rps.work);
 
-       /* Shut off idle work before the crtcs get freed. */
-       if (taskqueue_cancel(dev_priv->tq, &dev_priv->idle_task, NULL))
-               taskqueue_drain(dev_priv->tq, &dev_priv->idle_task);
+       /* flush any delayed tasks or pending work */
+       flush_scheduled_work();
 
        drm_mode_config_cleanup(dev);
 }
index 27c90d0..e3e8e3d 100644 (file)
 
 #define DP_LINK_CONFIGURATION_SIZE     9
 
-struct intel_dp {
-       struct intel_encoder base;
-       uint32_t output_reg;
-       uint32_t DP;
-       uint8_t  link_configuration[DP_LINK_CONFIGURATION_SIZE];
-       bool has_audio;
-       enum hdmi_force_audio force_audio;
-       uint32_t color_range;
-       int dpms_mode;
-       uint8_t link_bw;
-       uint8_t lane_count;
-       uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
-       device_t dp_iic_bus;
-       device_t adapter;
-       bool is_pch_edp;
-       uint8_t train_set[4];
-       int panel_power_up_delay;
-       int panel_power_down_delay;
-       int panel_power_cycle_delay;
-       int backlight_on_delay;
-       int backlight_off_delay;
-       struct drm_display_mode *panel_fixed_mode;  /* for eDP */
-       struct timeout_task panel_vdd_task;
-       bool want_panel_vdd;
-};
-
 /**
  * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
  * @intel_dp: DP struct
@@ -1046,9 +1020,10 @@ static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
        }
 }
 
-static void ironlake_panel_vdd_work(void *arg, int pending __unused)
+static void ironlake_panel_vdd_work(struct work_struct *__work)
 {
-       struct intel_dp *intel_dp = arg;
+       struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
+                                                struct intel_dp, panel_vdd_work);
        struct drm_device *dev = intel_dp->base.base.dev;
 
        lockmgr(&dev->mode_config.mutex, LK_EXCLUSIVE);
@@ -1075,10 +1050,8 @@ static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
                 * time from now (relative to the power down delay)
                 * to keep the panel power up across a sequence of operations
                 */
-               struct drm_i915_private *dev_priv = intel_dp->base.base.dev->dev_private;
-               taskqueue_enqueue_timeout(dev_priv->tq,
-                   &intel_dp->panel_vdd_task,
-                   msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
+               schedule_delayed_work(&intel_dp->panel_vdd_work,
+                                     msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
        }
 }
 
@@ -2275,12 +2248,7 @@ static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
        }
        drm_encoder_cleanup(encoder);
        if (is_edp(intel_dp)) {
-               struct drm_i915_private *dev_priv = intel_dp->base.base.dev->dev_private;
-
-               taskqueue_cancel_timeout(dev_priv->tq,
-                   &intel_dp->panel_vdd_task, NULL);
-               taskqueue_drain_timeout(dev_priv->tq,
-                   &intel_dp->panel_vdd_task);
+               cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
                ironlake_panel_vdd_off_sync(intel_dp);
        }
        drm_free(intel_dp, DRM_MEM_KMS);
@@ -2418,8 +2386,8 @@ intel_dp_init(struct drm_device *dev, int output_reg)
 
        if (is_edp(intel_dp)) {
                intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT);
-               TIMEOUT_TASK_INIT(dev_priv->tq, &intel_dp->panel_vdd_task, 0,
-                   ironlake_panel_vdd_work, intel_dp);
+               INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
+                         ironlake_panel_vdd_work);
        }
 
        intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
index cf6607e..5c2ed34 100644 (file)
@@ -288,6 +288,35 @@ struct dip_infoframe {
        } __attribute__ ((packed)) body;
 } __attribute__((packed));
 
+#define DP_MAX_DOWNSTREAM_PORTS                0x10
+#define DP_LINK_CONFIGURATION_SIZE     9
+
+struct intel_dp {
+       struct intel_encoder base;
+       uint32_t output_reg;
+       uint32_t DP;
+       uint8_t  link_configuration[DP_LINK_CONFIGURATION_SIZE];
+       bool has_audio;
+       enum hdmi_force_audio force_audio;
+       uint32_t color_range;
+       int dpms_mode;
+       uint8_t link_bw;
+       uint8_t lane_count;
+       uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
+       device_t dp_iic_bus;
+       device_t adapter;
+       bool is_pch_edp;
+       uint8_t train_set[4];
+       int panel_power_up_delay;
+       int panel_power_down_delay;
+       int panel_power_cycle_delay;
+       int backlight_on_delay;
+       int backlight_off_delay;
+       struct delayed_work panel_vdd_work;
+       bool want_panel_vdd;
+       struct drm_display_mode *panel_fixed_mode;  /* for eDP */
+};
+
 static inline struct drm_crtc *
 intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
 {
@@ -303,17 +332,20 @@ intel_get_crtc_for_plane(struct drm_device *dev, int plane)
 }
 
 struct intel_unpin_work {
-       struct task task;
+       struct work_struct work;
        struct drm_device *dev;
        struct drm_i915_gem_object *old_fb_obj;
        struct drm_i915_gem_object *pending_flip_obj;
        struct drm_pending_vblank_event *event;
        atomic_t pending;
+#define INTEL_FLIP_INACTIVE    0
+#define INTEL_FLIP_PENDING     1
+#define INTEL_FLIP_COMPLETE    2
        bool enable_stall_check;
 };
 
 struct intel_fbc_work {
-       struct timeout_task task;
+       struct delayed_work work;
        struct drm_crtc *crtc;
        struct drm_framebuffer *fb;
        int interval;
@@ -334,6 +366,7 @@ extern bool intel_sdvo_init(struct drm_device *dev, int output_device);
 extern void intel_dvo_init(struct drm_device *dev);
 extern void intel_tv_init(struct drm_device *dev);
 extern void intel_mark_busy(struct drm_device *dev);
+extern void intel_mark_idle(struct drm_device *dev);
 extern bool intel_lvds_init(struct drm_device *dev);
 extern void intel_dp_init(struct drm_device *dev, int dp_reg);
 void
index d10587b..05e6b11 100644 (file)
@@ -242,9 +242,11 @@ bool intel_fbc_enabled(struct drm_device *dev)
        return dev_priv->display.fbc_enabled(dev);
 }
 
-static void intel_fbc_work_fn(void *arg, int pending)
+static void intel_fbc_work_fn(struct work_struct *__work)
 {
-       struct intel_fbc_work *work = arg;
+       struct intel_fbc_work *work =
+               container_of(to_delayed_work(__work),
+                            struct intel_fbc_work, work);
        struct drm_device *dev = work->crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 
@@ -271,8 +273,6 @@ static void intel_fbc_work_fn(void *arg, int pending)
 
 static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
 {
-       u_int pending;
-
        if (dev_priv->fbc_work == NULL)
                return;
 
@@ -282,10 +282,9 @@ static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
         * dev_priv->fbc_work, so we can perform the cancellation
         * entirely asynchronously.
         */
-       if (taskqueue_cancel_timeout(dev_priv->tq, &dev_priv->fbc_work->task,
-           &pending) == 0)
+       if (cancel_delayed_work(&dev_priv->fbc_work->work))
                /* tasklet was killed before being run, clean up */
-               drm_free(dev_priv->fbc_work, DRM_MEM_KMS);
+               kfree(dev_priv->fbc_work, DRM_MEM_KMS);
 
        /* Mark the work as no longer wanted so that if it does
         * wake-up (because the work was already running and waiting
@@ -310,8 +309,7 @@ void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
        work->crtc = crtc;
        work->fb = crtc->fb;
        work->interval = interval;
-       TIMEOUT_TASK_INIT(dev_priv->tq, &work->task, 0, intel_fbc_work_fn,
-           work);
+       INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
 
        dev_priv->fbc_work = work;
 
@@ -328,8 +326,7 @@ void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
         * and indeed performing the enable as a co-routine and not
         * waiting synchronously upon the vblank.
         */
-       taskqueue_enqueue_timeout(dev_priv->tq, &work->task,
-           msecs_to_jiffies(50));
+       schedule_delayed_work(&work->work, msecs_to_jiffies(50));
 }
 
 void intel_disable_fbc(struct drm_device *dev)
@@ -2047,9 +2044,9 @@ void ironlake_enable_drps(struct drm_device *dev)
        dev_priv->fmax = fmax; /* IPS callback will increase this */
        dev_priv->fstart = fstart;
 
-       dev_priv->max_delay = fstart;
-       dev_priv->min_delay = fmin;
-       dev_priv->cur_delay = fstart;
+       dev_priv->rps.max_delay = fstart;
+       dev_priv->rps.min_delay = fmin;
+       dev_priv->rps.cur_delay = fstart;
 
        DRM_DEBUG("fmax: %d, fmin: %d, fstart: %d\n",
                         fmax, fmin, fstart);
@@ -2123,9 +2120,9 @@ void gen6_disable_rps(struct drm_device *dev)
         * register (PMIMR) to mask PM interrupts. The only risk is in leaving
         * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
 
-       lockmgr(&dev_priv->rps_lock, LK_EXCLUSIVE);
-       dev_priv->pm_iir = 0;
-       lockmgr(&dev_priv->rps_lock, LK_RELEASE);
+       spin_lock(&dev_priv->rps.lock);
+       dev_priv->rps.pm_iir = 0;
+       spin_unlock(&dev_priv->rps.lock);
 
        I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
 }
@@ -2408,7 +2405,7 @@ unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
        unsigned long t, corr, state1, corr2, state2;
        u32 pxvid, ext_v;
 
-       pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4));
+       pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_delay * 4));
        pxvid = (pxvid >> 24) & 0x7f;
        ext_v = pvid_to_extvid(dev_priv, pxvid);
 
@@ -2482,8 +2479,8 @@ bool i915_gpu_raise(void)
        }
        dev_priv = i915_mch_dev;
 
-       if (dev_priv->max_delay > dev_priv->fmax)
-               dev_priv->max_delay--;
+       if (dev_priv->rps.max_delay > dev_priv->fmax)
+               dev_priv->rps.max_delay--;
 
 out_unlock:
        lockmgr(&mchdev_lock, LK_RELEASE);
@@ -2509,8 +2506,8 @@ bool i915_gpu_lower(void)
        }
        dev_priv = i915_mch_dev;
 
-       if (dev_priv->max_delay < dev_priv->min_delay)
-               dev_priv->max_delay++;
+       if (dev_priv->rps.max_delay < dev_priv->rps.min_delay)
+               dev_priv->rps.max_delay++;
 
 out_unlock:
        lockmgr(&mchdev_lock, LK_RELEASE);
@@ -2559,7 +2556,7 @@ bool i915_gpu_turbo_disable(void)
        }
        dev_priv = i915_mch_dev;
 
-       dev_priv->max_delay = dev_priv->fstart;
+       dev_priv->rps.max_delay = dev_priv->fstart;
 
        if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart))
                ret = false;
@@ -2671,7 +2668,6 @@ static int intel_enable_rc6(struct drm_device *dev)
 
 void gen6_enable_rps(struct drm_i915_private *dev_priv)
 {
-       struct drm_device *dev = dev_priv->dev;
        u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
        u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
        u32 pcu_mbox, rc6_mask = 0;
@@ -2687,7 +2683,6 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
         * userspace...
         */
        I915_WRITE(GEN6_RC_STATE, 0);
-       DRM_LOCK(dev);
 
        /* Clear the DBG now so we don't confuse earlier errors */
        if ((gtfifodbg = I915_READ(GTFIFODBG))) {
@@ -2794,9 +2789,9 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
        }
 
        /* In units of 100MHz */
-       dev_priv->max_delay = max_freq;
-       dev_priv->min_delay = min_freq;
-       dev_priv->cur_delay = cur_freq;
+       dev_priv->rps.max_delay = max_freq;
+       dev_priv->rps.min_delay = min_freq;
+       dev_priv->rps.cur_delay = cur_freq;
 
        /* requires MSI enabled */
        I915_WRITE(GEN6_PMIER,
@@ -2807,16 +2802,14 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
                   GEN6_PM_RP_DOWN_THRESHOLD |
                   GEN6_PM_RP_UP_EI_EXPIRED |
                   GEN6_PM_RP_DOWN_EI_EXPIRED);
-       lockmgr(&dev_priv->rps_lock, LK_EXCLUSIVE);
-       if (dev_priv->pm_iir != 0)
-               kprintf("pm_iir %x\n", dev_priv->pm_iir);
+       spin_lock(&dev_priv->rps.lock);
+       WARN_ON(dev_priv->rps.pm_iir != 0);
        I915_WRITE(GEN6_PMIMR, 0);
-       lockmgr(&dev_priv->rps_lock, LK_RELEASE);
+       spin_unlock(&dev_priv->rps.lock);
        /* enable all PM interrupts */
        I915_WRITE(GEN6_PMINTRMSK, 0);
 
        gen6_gt_force_wake_put(dev_priv);
-       DRM_UNLOCK(dev);
 }
 
 void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
@@ -2851,9 +2844,9 @@ void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
         * to use for memory access.  We do this by specifying the IA frequency
         * the PCU should use as a reference to determine the ring frequency.
         */
-       for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay;
+       for (gpu_freq = dev_priv->rps.max_delay; gpu_freq >= dev_priv->rps.min_delay;
             gpu_freq--) {
-               int diff = dev_priv->max_delay - gpu_freq;
+               int diff = dev_priv->rps.max_delay - gpu_freq;
                int d;
 
                /*