drm: Replace the i915 driver by i915kms
authorFrançois Tigeot <ftigeot@wolfpond.org>
Thu, 12 Sep 2013 07:05:08 +0000 (09:05 +0200)
committerFrançois Tigeot <ftigeot@wolfpond.org>
Thu, 12 Sep 2013 14:01:28 +0000 (16:01 +0200)
i915kms was already an updated version of i915, there's no need to
keep maintaining two separate instances of the same driver.

45 files changed:
Makefile_upgrade.inc
sys/conf/files
sys/dev/drm/Makefile
sys/dev/drm/i915/Makefile
sys/dev/drm/i915/i915_debug.c [moved from sys/dev/drm/i915kms/i915_debug.c with 100% similarity]
sys/dev/drm/i915/i915_dma.c
sys/dev/drm/i915/i915_drm.h
sys/dev/drm/i915/i915_drv.c
sys/dev/drm/i915/i915_drv.h
sys/dev/drm/i915/i915_gem.c [moved from sys/dev/drm/i915kms/i915_gem.c with 100% similarity]
sys/dev/drm/i915/i915_gem_evict.c [moved from sys/dev/drm/i915kms/i915_gem_evict.c with 100% similarity]
sys/dev/drm/i915/i915_gem_execbuffer.c [moved from sys/dev/drm/i915kms/i915_gem_execbuffer.c with 100% similarity]
sys/dev/drm/i915/i915_gem_gtt.c [moved from sys/dev/drm/i915kms/i915_gem_gtt.c with 100% similarity]
sys/dev/drm/i915/i915_gem_tiling.c [moved from sys/dev/drm/i915kms/i915_gem_tiling.c with 100% similarity]
sys/dev/drm/i915/i915_irq.c
sys/dev/drm/i915/i915_reg.h
sys/dev/drm/i915/i915_suspend.c
sys/dev/drm/i915/intel_bios.c [moved from sys/dev/drm/i915kms/intel_bios.c with 100% similarity]
sys/dev/drm/i915/intel_bios.h [moved from sys/dev/drm/i915kms/intel_bios.h with 100% similarity]
sys/dev/drm/i915/intel_crt.c [moved from sys/dev/drm/i915kms/intel_crt.c with 100% similarity]
sys/dev/drm/i915/intel_display.c [moved from sys/dev/drm/i915kms/intel_display.c with 100% similarity]
sys/dev/drm/i915/intel_dp.c [moved from sys/dev/drm/i915kms/intel_dp.c with 100% similarity]
sys/dev/drm/i915/intel_drv.h [moved from sys/dev/drm/i915kms/intel_drv.h with 100% similarity]
sys/dev/drm/i915/intel_fb.c [moved from sys/dev/drm/i915kms/intel_fb.c with 100% similarity]
sys/dev/drm/i915/intel_hdmi.c [moved from sys/dev/drm/i915kms/intel_hdmi.c with 100% similarity]
sys/dev/drm/i915/intel_iic.c [moved from sys/dev/drm/i915kms/intel_iic.c with 100% similarity]
sys/dev/drm/i915/intel_lvds.c [moved from sys/dev/drm/i915kms/intel_lvds.c with 100% similarity]
sys/dev/drm/i915/intel_modes.c [moved from sys/dev/drm/i915kms/intel_modes.c with 100% similarity]
sys/dev/drm/i915/intel_opregion.c [moved from sys/dev/drm/i915kms/intel_opregion.c with 100% similarity]
sys/dev/drm/i915/intel_overlay.c [moved from sys/dev/drm/i915kms/intel_overlay.c with 100% similarity]
sys/dev/drm/i915/intel_panel.c [moved from sys/dev/drm/i915kms/intel_panel.c with 100% similarity]
sys/dev/drm/i915/intel_ringbuffer.c [moved from sys/dev/drm/i915kms/intel_ringbuffer.c with 100% similarity]
sys/dev/drm/i915/intel_ringbuffer.h [moved from sys/dev/drm/i915kms/intel_ringbuffer.h with 100% similarity]
sys/dev/drm/i915/intel_sdvo.c [moved from sys/dev/drm/i915kms/intel_sdvo.c with 100% similarity]
sys/dev/drm/i915/intel_sdvo_regs.h [moved from sys/dev/drm/i915kms/intel_sdvo_regs.h with 100% similarity]
sys/dev/drm/i915/intel_sprite.c [moved from sys/dev/drm/i915kms/intel_sprite.c with 100% similarity]
sys/dev/drm/i915/intel_tv.c [moved from sys/dev/drm/i915kms/intel_tv.c with 100% similarity]
sys/dev/drm/i915kms/Makefile [deleted file]
sys/dev/drm/i915kms/i915_dma.c [deleted file]
sys/dev/drm/i915kms/i915_drm.h [deleted file]
sys/dev/drm/i915kms/i915_drv.c [deleted file]
sys/dev/drm/i915kms/i915_drv.h [deleted file]
sys/dev/drm/i915kms/i915_irq.c [deleted file]
sys/dev/drm/i915kms/i915_reg.h [deleted file]
sys/dev/drm/i915kms/i915_suspend.c [deleted file]

index 2ecd9c0..5c0382e 100644 (file)
@@ -2146,6 +2146,7 @@ TO_REMOVE+=/usr/share/man/man2/syslink.2.gz
 TO_REMOVE+=/usr/share/man/cat4/uhidev.4.gz
 TO_REMOVE+=/usr/share/man/man4/uhidev.4.gz
 TO_REMOVE+=/boot/kernel/drmn.ko
+TO_REMOVE+=/boot/kernel/i915.ko
 
 .if ${MACHINE_ARCH} == "x86_64"
 TO_REMOVE+=/usr/sbin/stlstats
index d3baf3b..70d1792 100644 (file)
@@ -1935,11 +1935,32 @@ dev/drm/ttm/ttm_execbuf_util.c          optional drm
 dev/drm/ttm/ttm_memory.c               optional drm
 dev/drm/ttm/ttm_page_alloc.c           optional drm
 dev/drm/ttm/ttm_bo_vm.c                        optional drm
+dev/drm/i915/i915_debug.c              optional i915drm drm
 dev/drm/i915/i915_dma.c                        optional i915drm drm
 dev/drm/i915/i915_drv.c                        optional i915drm drm
+dev/drm/i915/i915_gem.c                        optional i915drm drm
+dev/drm/i915/i915_gem_execbuffer.c     optional i915drm drm
+dev/drm/i915/i915_gem_evict.c          optional i915drm drm
+dev/drm/i915/i915_gem_gtt.c            optional i915drm drm
+dev/drm/i915/i915_gem_tiling.c         optional i915drm drm
 dev/drm/i915/i915_irq.c                        optional i915drm drm
-dev/drm/i915/i915_mem.c                        optional i915drm drm
 dev/drm/i915/i915_suspend.c            optional i915drm drm
+dev/drm/i915/intel_bios.c              optional i915drm drm
+dev/drm/i915/intel_crt.c               optional i915drm drm
+dev/drm/i915/intel_display.c           optional i915drm drm
+dev/drm/i915/intel_dp.c                        optional i915drm drm
+dev/drm/i915/intel_fb.c                        optional i915drm drm
+dev/drm/i915/intel_hdmi.c              optional i915drm drm
+dev/drm/i915/intel_iic.c               optional i915drm drm
+dev/drm/i915/intel_lvds.c              optional i915drm drm
+dev/drm/i915/intel_modes.c             optional i915drm drm
+dev/drm/i915/intel_opregion.c          optional i915drm drm
+dev/drm/i915/intel_overlay.c           optional i915drm drm
+dev/drm/i915/intel_panel.c             optional i915drm drm
+dev/drm/i915/intel_ringbuffer.c                optional i915drm drm
+dev/drm/i915/intel_sdvo.c              optional i915drm drm
+dev/drm/i915/intel_sprite.c            optional i915drm drm
+dev/drm/i915/intel_tv.c                        optional i915drm drm
 dev/drm/mach64/mach64_dma.c            optional mach64drm drm
 dev/drm/mach64/mach64_drv.c            optional mach64drm drm
 dev/drm/mach64/mach64_irq.c            optional mach64drm drm
index 41133cc..c3f0918 100644 (file)
@@ -1,6 +1,6 @@
 # $DragonFly: src/sys/dev/drm/Makefile,v 1.3 2008/04/05 18:12:28 hasso Exp $
 
-SUBDIR = drm mach64 mga r128 radeon savage sis tdfx i915 i915kms
+SUBDIR = drm mach64 mga r128 radeon savage sis tdfx i915
 
 .include <bsd.obj.mk>
 
index 877fce4..4940247 100644 (file)
@@ -1,19 +1,33 @@
-KMOD   = i915
-SRCS    = i915_dma.c i915_drv.c i915_irq.c i915_mem.c i915_suspend.c
-SRCS   += device_if.h bus_if.h pci_if.h opt_drm.h
-CFLAGS += ${DEBUG_FLAGS} -I. -I.. -I@/dev/drm -I@/dev/drm/i915
+KMOD   = i915kms
+SRCS   = \
+       i915_debug.c \
+       i915_dma.c \
+       i915_drv.c \
+       i915_gem.c \
+       i915_gem_execbuffer.c \
+       i915_gem_evict.c \
+       i915_gem_gtt.c \
+       i915_gem_tiling.c \
+       i915_irq.c \
+       i915_suspend.c \
+       intel_bios.c \
+       intel_crt.c \
+       intel_display.c \
+       intel_dp.c \
+       intel_fb.c \
+       intel_hdmi.c \
+       intel_iic.c \
+       intel_lvds.c \
+       intel_modes.c \
+       intel_opregion.c \
+       intel_overlay.c \
+       intel_panel.c \
+       intel_ringbuffer.c \
+       intel_sdvo.c \
+       intel_sprite.c \
+       intel_tv.c
 
-.if defined(DRM_DEBUG)
-DRM_DEBUG_OPT= "\#define DRM_DEBUG 1"
-.endif
-
-.if !defined(DRM_NOLINUX)
-DRM_LINUX_OPT= "\#define DRM_LINUX 1"
-.endif
-
-opt_drm.h:
-       touch ${.TARGET}
-       echo $(DRM_DEBUG_OPT) >> ${.TARGET}
-       echo $(DRM_LINUX_OPT) >> ${.TARGET}
+SRCS   += device_if.h bus_if.h pci_if.h iicbus_if.h iicbb_if.h opt_drm.h \
+         opt_ktr.h
 
 .include <bsd.kmod.mk>
index 5bb119b..010288c 100644 (file)
  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  *
+ * $FreeBSD: src/sys/dev/drm2/i915/i915_dma.c,v 1.1 2012/05/22 11:07:44 kib Exp $
  */
 
-#include "dev/drm/drmP.h"
-#include "dev/drm/drm.h"
+#include <dev/drm/drmP.h>
+#include <dev/drm/drm.h>
 #include "i915_drm.h"
 #include "i915_drv.h"
-
-/* Really want an OS-independent resettable timer.  Would like to have
- * this loop run for (eg) 3 sec, but have the timer reset every time
- * the head pointer changes, so that EBUSY only happens if the ring
- * actually stalls for (eg) 3 seconds.
+#include "intel_drv.h"
+#include "intel_ringbuffer.h"
+
+static struct drm_i915_private *i915_mch_dev;
+/*
+ * Lock protecting IPS related data structures
+ *   - i915_mch_dev
+ *   - dev_priv->max_delay
+ *   - dev_priv->min_delay
+ *   - dev_priv->fmax
+ *   - dev_priv->gpu_busy
  */
-int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
-{
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
-       u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
-       u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
-       u32 last_acthd = I915_READ(acthd_reg);
-       u32 acthd;
-       int i;
-
-       for (i = 0; i < 100000; i++) {
-               ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
-               acthd = I915_READ(acthd_reg);
-               ring->space = ring->head - (ring->tail + 8);
-               if (ring->space < 0)
-                       ring->space += ring->Size;
-               if (ring->space >= n)
-                       return 0;
-
-               if (dev_priv->sarea_priv)
-                       dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
+static struct lock mchdev_lock;
+LOCK_SYSINIT(mchdev, &mchdev_lock, "mchdev", LK_CANRECURSE);
 
-               if (ring->head != last_head)
-                       i = 0;
+static void i915_pineview_get_mem_freq(struct drm_device *dev);
+static void i915_ironlake_get_mem_freq(struct drm_device *dev);
+static int i915_driver_unload_int(struct drm_device *dev, bool locked);
 
-               if (acthd != last_acthd)
-                       i = 0;
-
-               last_head = ring->head;
-               last_acthd = acthd;
-               DRM_UDELAY(10 * 1000);
-       }
+static void i915_write_hws_pga(struct drm_device *dev)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       u32 addr;
 
-       return -EBUSY;
+       addr = dev_priv->status_page_dmah->busaddr;
+       if (INTEL_INFO(dev)->gen >= 4)
+               addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
+       I915_WRITE(HWS_PGA, addr);
 }
 
 /**
@@ -79,8 +68,14 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
 static int i915_init_phys_hws(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
+       struct intel_ring_buffer *ring = LP_RING(dev_priv);
 
-       /* Program Hardware Status Page */
+       /*
+        * Program Hardware Status Page
+        * XXXKIB Keep 4GB limit for allocation for now.  This method
+        * of allocation is used on <= 965 hardware, that has several
+        * erratas regarding the use of physical memory > 4 GB.
+        */
        DRM_UNLOCK(dev);
        dev_priv->status_page_dmah =
                drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
@@ -89,13 +84,15 @@ static int i915_init_phys_hws(struct drm_device *dev)
                DRM_ERROR("Can not allocate hardware status page\n");
                return -ENOMEM;
        }
-       dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
+       ring->status_page.page_addr = dev_priv->hw_status_page =
+           dev_priv->status_page_dmah->vaddr;
        dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
 
        memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
 
-       I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
-       DRM_DEBUG("Enabled hardware status page\n");
+       i915_write_hws_pga(dev);
+       DRM_DEBUG("Enabled hardware status page, phys %jx\n",
+           (uintmax_t)dev_priv->dma_status_page);
        return 0;
 }
 
@@ -106,6 +103,8 @@ static int i915_init_phys_hws(struct drm_device *dev)
 static void i915_free_hws(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
+       struct intel_ring_buffer *ring = LP_RING(dev_priv);
+
        if (dev_priv->status_page_dmah) {
                drm_pci_free(dev, dev_priv->status_page_dmah);
                dev_priv->status_page_dmah = NULL;
@@ -113,6 +112,7 @@ static void i915_free_hws(struct drm_device *dev)
 
        if (dev_priv->status_gfx_addr) {
                dev_priv->status_gfx_addr = 0;
+               ring->status_page.gfx_addr = 0;
                drm_core_ioremapfree(&dev_priv->hws_map, dev);
        }
 
@@ -123,13 +123,27 @@ static void i915_free_hws(struct drm_device *dev)
 void i915_kernel_lost_context(struct drm_device * dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
-       drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
+       struct intel_ring_buffer *ring = LP_RING(dev_priv);
+
+       /*
+        * We should never lose context on the ring with modesetting
+        * as we don't expose it to userspace
+        */
+       if (drm_core_check_feature(dev, DRIVER_MODESET))
+               return;
 
-       ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
-       ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
+       ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
+       ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
        ring->space = ring->head - (ring->tail + 8);
        if (ring->space < 0)
-               ring->space += ring->Size;
+               ring->space += ring->size;
+
+#if 1
+       KIB_NOTYET();
+#else
+       if (!dev->primary->master)
+               return;
+#endif
 
        if (ring->head == ring->tail && dev_priv->sarea_priv)
                dev_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
@@ -138,6 +152,9 @@ void i915_kernel_lost_context(struct drm_device * dev)
 static int i915_dma_cleanup(struct drm_device * dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
+       int i;
+
+
        /* Make sure interrupts are disabled here because the uninstall ioctl
         * may not have been called from userspace and after dev_private
         * is freed, it's too late.
@@ -145,12 +162,8 @@ static int i915_dma_cleanup(struct drm_device * dev)
        if (dev->irq_enabled)
                drm_irq_uninstall(dev);
 
-       if (dev_priv->ring.virtual_start) {
-               drm_core_ioremapfree(&dev_priv->ring.map, dev);
-               dev_priv->ring.virtual_start = NULL;
-               dev_priv->ring.map.virtual = NULL;
-               dev_priv->ring.map.size = 0;
-       }
+       for (i = 0; i < I915_NUM_RINGS; i++)
+               intel_cleanup_ring_buffer(&dev_priv->rings[i]);
 
        /* Clear the HWS virtual address at teardown */
        if (I915_NEED_GFX_HWS(dev))
@@ -162,6 +175,7 @@ static int i915_dma_cleanup(struct drm_device * dev)
 static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
+       int ret;
 
        dev_priv->sarea = drm_getsarea(dev);
        if (!dev_priv->sarea) {
@@ -174,34 +188,22 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
            ((u8 *) dev_priv->sarea->virtual + init->sarea_priv_offset);
 
        if (init->ring_size != 0) {
-               if (dev_priv->ring.ring_obj != NULL) {
+               if (LP_RING(dev_priv)->obj != NULL) {
                        i915_dma_cleanup(dev);
                        DRM_ERROR("Client tried to initialize ringbuffer in "
                                  "GEM mode\n");
                        return -EINVAL;
                }
 
-               dev_priv->ring.Size = init->ring_size;
-               dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
-
-               dev_priv->ring.map.offset = init->ring_start;
-               dev_priv->ring.map.size = init->ring_size;
-               dev_priv->ring.map.type = 0;
-               dev_priv->ring.map.flags = 0;
-               dev_priv->ring.map.mtrr = 0;
-
-               drm_core_ioremap_wc(&dev_priv->ring.map, dev);
-
-               if (dev_priv->ring.map.virtual == NULL) {
+               ret = intel_render_ring_init_dri(dev,
+                                                init->ring_start,
+                                                init->ring_size);
+               if (ret) {
                        i915_dma_cleanup(dev);
-                       DRM_ERROR("can not ioremap virtual address for"
-                                 " ring buffer\n");
-                       return -ENOMEM;
+                       return ret;
                }
        }
 
-       dev_priv->ring.virtual_start = dev_priv->ring.map.virtual;
-
        dev_priv->cpp = init->cpp;
        dev_priv->back_offset = init->back_offset;
        dev_priv->front_offset = init->front_offset;
@@ -218,31 +220,27 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
 static int i915_dma_resume(struct drm_device * dev)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+       struct intel_ring_buffer *ring = LP_RING(dev_priv);
 
        DRM_DEBUG("\n");
 
-       if (!dev_priv->sarea) {
-               DRM_ERROR("can not find sarea!\n");
-               return -EINVAL;
-       }
-
-       if (dev_priv->ring.map.virtual == NULL) {
+       if (ring->map.handle == NULL) {
                DRM_ERROR("can not ioremap virtual address for"
                          " ring buffer\n");
                return -ENOMEM;
        }
 
        /* Program Hardware Status Page */
-       if (!dev_priv->hw_status_page) {
+       if (!ring->status_page.page_addr) {
                DRM_ERROR("Can not find hardware status page\n");
                return -EINVAL;
        }
-       DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
-
-       if (dev_priv->status_gfx_addr != 0)
-               I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
+       DRM_DEBUG("hw status page @ %p\n", ring->status_page.page_addr);
+       if (ring->status_page.gfx_addr != 0)
+               intel_ring_setup_status_page(ring);
        else
-               I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
+               i915_write_hws_pga(dev);
+
        DRM_DEBUG("Enabled hardware status page\n");
 
        return 0;
@@ -353,9 +351,8 @@ static int i915_emit_cmds(struct drm_device *dev, int __user *buffer,
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        int i;
-       RING_LOCALS;
 
-       if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8)
+       if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8)
                return -EINVAL;
 
        BEGIN_LP_RING((dwords+1)&~1);
@@ -389,40 +386,54 @@ static int i915_emit_cmds(struct drm_device *dev, int __user *buffer,
 }
 
 int i915_emit_box(struct drm_device * dev,
-                 struct drm_clip_rect __user * boxes,
+                 struct drm_clip_rect *boxes,
                  int i, int DR1, int DR4)
 {
-       drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_clip_rect box;
-       RING_LOCALS;
 
        if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) {
                return -EFAULT;
        }
 
-       if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
+       return (i915_emit_box_p(dev, &box, DR1, DR4));
+}
+
+int
+i915_emit_box_p(struct drm_device *dev, struct drm_clip_rect *box,
+    int DR1, int DR4)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       int ret;
+
+       if (box->y2 <= box->y1 || box->x2 <= box->x1 || box->y2 <= 0 ||
+           box->x2 <= 0) {
                DRM_ERROR("Bad box %d,%d..%d,%d\n",
-                         box.x1, box.y1, box.x2, box.y2);
+                         box->x1, box->y1, box->x2, box->y2);
                return -EINVAL;
        }
 
-       if (IS_I965G(dev)) {
-               BEGIN_LP_RING(4);
+       if (INTEL_INFO(dev)->gen >= 4) {
+               ret = BEGIN_LP_RING(4);
+               if (ret != 0)
+                       return (ret);
+
                OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
-               OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
-               OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
+               OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
+               OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
                OUT_RING(DR4);
-               ADVANCE_LP_RING();
        } else {
-               BEGIN_LP_RING(6);
+               ret = BEGIN_LP_RING(6);
+               if (ret != 0)
+                       return (ret);
+
                OUT_RING(GFX_OP_DRAWRECT_INFO);
                OUT_RING(DR1);
-               OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
-               OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
+               OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
+               OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
                OUT_RING(DR4);
                OUT_RING(0);
-               ADVANCE_LP_RING();
        }
+       ADVANCE_LP_RING();
 
        return 0;
 }
@@ -434,23 +445,23 @@ int i915_emit_box(struct drm_device * dev,
 static void i915_emit_breadcrumb(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
-       RING_LOCALS;
 
        if (++dev_priv->counter > 0x7FFFFFFFUL)
                dev_priv->counter = 0;
        if (dev_priv->sarea_priv)
                dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
 
-       BEGIN_LP_RING(4);
-       OUT_RING(MI_STORE_DWORD_INDEX);
-       OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-       OUT_RING(dev_priv->counter);
-       OUT_RING(0);
-       ADVANCE_LP_RING();
+       if (BEGIN_LP_RING(4) == 0) {
+               OUT_RING(MI_STORE_DWORD_INDEX);
+               OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+               OUT_RING(dev_priv->counter);
+               OUT_RING(0);
+               ADVANCE_LP_RING();
+       }
 }
 
 static int i915_dispatch_cmdbuffer(struct drm_device * dev,
-                                  drm_i915_cmdbuffer_t * cmd)
+    drm_i915_cmdbuffer_t * cmd, struct drm_clip_rect *cliprects, void *cmdbuf)
 {
        int nbox = cmd->num_cliprects;
        int i = 0, count, ret;
@@ -466,13 +477,13 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev,
 
        for (i = 0; i < count; i++) {
                if (i < nbox) {
-                       ret = i915_emit_box(dev, cmd->cliprects, i,
-                                           cmd->DR1, cmd->DR4);
+                       ret = i915_emit_box_p(dev, &cmd->cliprects[i],
+                           cmd->DR1, cmd->DR4);
                        if (ret)
                                return ret;
                }
 
-               ret = i915_emit_cmds(dev, (int __user *)cmd->buf, cmd->sz / 4);
+               ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4);
                if (ret)
                        return ret;
        }
@@ -481,14 +492,13 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev,
        return 0;
 }
 
-static int i915_dispatch_batchbuffer(struct drm_device * dev,
-                                    drm_i915_batchbuffer_t * batch)
+static int
+i915_dispatch_batchbuffer(struct drm_device * dev,
+    drm_i915_batchbuffer_t * batch, struct drm_clip_rect *cliprects)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_clip_rect __user *boxes = batch->cliprects;
        int nbox = batch->num_cliprects;
-       int i = 0, count;
-       RING_LOCALS;
+       int i, count, ret;
 
        if ((batch->start | batch->used) & 0x7) {
                DRM_ERROR("alignment\n");
@@ -501,30 +511,36 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
 
        for (i = 0; i < count; i++) {
                if (i < nbox) {
-                       int ret = i915_emit_box(dev, boxes, i,
-                                               batch->DR1, batch->DR4);
+                       int ret = i915_emit_box_p(dev, &cliprects[i],
+                           batch->DR1, batch->DR4);
                        if (ret)
                                return ret;
                }
 
                if (!IS_I830(dev) && !IS_845G(dev)) {
-                       BEGIN_LP_RING(2);
-                       if (IS_I965G(dev)) {
-                               OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
+                       ret = BEGIN_LP_RING(2);
+                       if (ret != 0)
+                               return (ret);
+
+                       if (INTEL_INFO(dev)->gen >= 4) {
+                               OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) |
+                                   MI_BATCH_NON_SECURE_I965);
                                OUT_RING(batch->start);
                        } else {
                                OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
                                OUT_RING(batch->start | MI_BATCH_NON_SECURE);
                        }
-                       ADVANCE_LP_RING();
                } else {
-                       BEGIN_LP_RING(4);
+                       ret = BEGIN_LP_RING(4);
+                       if (ret != 0)
+                               return (ret);
+
                        OUT_RING(MI_BATCH_BUFFER);
                        OUT_RING(batch->start | MI_BATCH_NON_SECURE);
                        OUT_RING(batch->start + batch->used - 4);
                        OUT_RING(0);
-                       ADVANCE_LP_RING();
                }
+               ADVANCE_LP_RING();
        }
 
        i915_emit_breadcrumb(dev);
@@ -535,7 +551,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
 static int i915_dispatch_flip(struct drm_device * dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
-       RING_LOCALS;
+       int ret;
 
        if (!dev_priv->sarea_priv)
                return -EINVAL;
@@ -547,12 +563,12 @@ static int i915_dispatch_flip(struct drm_device * dev)
 
        i915_kernel_lost_context(dev);
 
-       BEGIN_LP_RING(2);
+       ret = BEGIN_LP_RING(10);
+       if (ret)
+               return ret;
        OUT_RING(MI_FLUSH | MI_READ_FLUSH);
        OUT_RING(0);
-       ADVANCE_LP_RING();
 
-       BEGIN_LP_RING(6);
        OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
        OUT_RING(0);
        if (dev_priv->current_page == 0) {
@@ -563,11 +579,10 @@ static int i915_dispatch_flip(struct drm_device * dev)
                dev_priv->current_page = 0;
        }
        OUT_RING(0);
-       ADVANCE_LP_RING();
 
-       BEGIN_LP_RING(2);
        OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
        OUT_RING(0);
+
        ADVANCE_LP_RING();
 
        if (++dev_priv->counter > 0x7FFFFFFFUL)
@@ -575,44 +590,48 @@ static int i915_dispatch_flip(struct drm_device * dev)
        if (dev_priv->sarea_priv)
                dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
 
-       BEGIN_LP_RING(4);
-       OUT_RING(MI_STORE_DWORD_INDEX);
-       OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-       OUT_RING(dev_priv->counter);
-       OUT_RING(0);
-       ADVANCE_LP_RING();
+       if (BEGIN_LP_RING(4) == 0) {
+               OUT_RING(MI_STORE_DWORD_INDEX);
+               OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+               OUT_RING(dev_priv->counter);
+               OUT_RING(0);
+               ADVANCE_LP_RING();
+       }
 
        dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
        return 0;
 }
 
-static int i915_quiescent(struct drm_device * dev)
+static int
+i915_quiescent(struct drm_device *dev)
 {
-       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct intel_ring_buffer *ring = LP_RING(dev->dev_private);
 
        i915_kernel_lost_context(dev);
-       return i915_wait_ring(dev, dev_priv->ring.Size - 8, __func__);
+       return (intel_wait_ring_idle(ring));
 }
 
-static int i915_flush_ioctl(struct drm_device *dev, void *data,
-                           struct drm_file *file_priv)
+static int
+i915_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
 {
        int ret;
 
        RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
 
+       DRM_LOCK(dev);
        ret = i915_quiescent(dev);
+       DRM_UNLOCK(dev);
 
-       return ret;
+       return (ret);
 }
 
 static int i915_batchbuffer(struct drm_device *dev, void *data,
                            struct drm_file *file_priv)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-       drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
-           dev_priv->sarea_priv;
+       drm_i915_sarea_t *sarea_priv;
        drm_i915_batchbuffer_t *batch = data;
+       struct drm_clip_rect *cliprects;
        size_t cliplen;
        int ret;
 
@@ -620,32 +639,38 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
                DRM_ERROR("Batchbuffer ioctl disabled\n");
                return -EINVAL;
        }
+       DRM_UNLOCK(dev);
 
        DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n",
                  batch->start, batch->used, batch->num_cliprects);
 
-       RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
-
-       DRM_UNLOCK(dev);
        cliplen = batch->num_cliprects * sizeof(struct drm_clip_rect);
-       if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects,
-           cliplen)) {
-               DRM_LOCK(dev);
+       if (batch->num_cliprects < 0)
                return -EFAULT;
-       }
-       if (batch->num_cliprects) {
-               vslock((caddr_t)batch->cliprects, cliplen);
-       }
+       if (batch->num_cliprects != 0) {
+               cliprects = kmalloc(batch->num_cliprects *
+                   sizeof(struct drm_clip_rect), DRM_MEM_DMA,
+                   M_WAITOK | M_ZERO);
 
-       ret = i915_dispatch_batchbuffer(dev, batch);
+               ret = -copyin(batch->cliprects, cliprects,
+                   batch->num_cliprects * sizeof(struct drm_clip_rect));
+               if (ret != 0) {
+                       DRM_LOCK(dev);
+                       goto fail_free;
+               }
+       } else
+               cliprects = NULL;
 
-       if (batch->num_cliprects)
-               vsunlock((caddr_t)batch->cliprects, cliplen);
+       DRM_LOCK(dev);
+       RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
+       ret = i915_dispatch_batchbuffer(dev, batch, cliprects);
 
+       sarea_priv = (drm_i915_sarea_t *)dev_priv->sarea_priv;
        if (sarea_priv)
                sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
 
-       DRM_LOCK(dev);
+fail_free:
+       drm_free(cliprects, DRM_MEM_DMA);
        return ret;
 }
 
@@ -653,45 +678,57 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
                          struct drm_file *file_priv)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-       drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
-           dev_priv->sarea_priv;
+       drm_i915_sarea_t *sarea_priv;
        drm_i915_cmdbuffer_t *cmdbuf = data;
-       size_t cliplen;
+       struct drm_clip_rect *cliprects = NULL;
+       void *batch_data;
        int ret;
 
        DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
                  cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
 
-       RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
+       if (cmdbuf->num_cliprects < 0)
+               return -EINVAL;
 
        DRM_UNLOCK(dev);
-       cliplen = cmdbuf->num_cliprects * sizeof(struct drm_clip_rect);
-       if (cmdbuf->num_cliprects && DRM_VERIFYAREA_READ(cmdbuf->cliprects,
-           cliplen)) {
-               DRM_ERROR("Fault accessing cliprects\n");
+
+       batch_data = kmalloc(cmdbuf->sz, DRM_MEM_DMA, M_WAITOK);
+
+       ret = -copyin(cmdbuf->buf, batch_data, cmdbuf->sz);
+       if (ret != 0) {
                DRM_LOCK(dev);
-               return -EFAULT;
-       }
-       if (cmdbuf->num_cliprects) {
-               vslock((caddr_t)cmdbuf->cliprects, cliplen);
-               vslock((caddr_t)cmdbuf->buf, cmdbuf->sz);
+               goto fail_batch_free;
        }
 
-       ret = i915_dispatch_cmdbuffer(dev, cmdbuf);
-
        if (cmdbuf->num_cliprects) {
-               vsunlock((caddr_t)cmdbuf->buf, cmdbuf->sz);
-               vsunlock((caddr_t)cmdbuf->cliprects, cliplen);
+               cliprects = kmalloc(cmdbuf->num_cliprects *
+                   sizeof(struct drm_clip_rect), DRM_MEM_DMA,
+                   M_WAITOK | M_ZERO);
+               ret = -copyin(cmdbuf->cliprects, cliprects,
+                   cmdbuf->num_cliprects * sizeof(struct drm_clip_rect));
+               if (ret != 0) {
+                       DRM_LOCK(dev);
+                       goto fail_clip_free;
+               }
        }
+
        DRM_LOCK(dev);
+       RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
+       ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data);
        if (ret) {
                DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
-               return ret;
+               goto fail_clip_free;
        }
 
+       sarea_priv = (drm_i915_sarea_t *)dev_priv->sarea_priv;
        if (sarea_priv)
                sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
-       return 0;
+
+fail_clip_free:
+       drm_free(cliprects, DRM_MEM_DMA);
+fail_batch_free:
+       drm_free(batch_data, DRM_MEM_DMA);
+       return ret;
 }
 
 static int i915_flip_bufs(struct drm_device *dev, void *data,
@@ -734,11 +771,47 @@ static int i915_getparam(struct drm_device *dev, void *data,
                value = dev->pci_device;
                break;
        case I915_PARAM_HAS_GEM:
-               /* We need to reset this to 1 once we have GEM */
-               value = 0;
+               value = 1;
+               break;
+       case I915_PARAM_NUM_FENCES_AVAIL:
+               value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
+               break;
+       case I915_PARAM_HAS_OVERLAY:
+               value = dev_priv->overlay ? 1 : 0;
+               break;
+       case I915_PARAM_HAS_PAGEFLIPPING:
+               value = 1;
+               break;
+       case I915_PARAM_HAS_EXECBUF2:
+               value = 1;
+               break;
+       case I915_PARAM_HAS_BSD:
+               value = HAS_BSD(dev);
+               break;
+       case I915_PARAM_HAS_BLT:
+               value = HAS_BLT(dev);
+               break;
+       case I915_PARAM_HAS_RELAXED_FENCING:
+               value = 1;
+               break;
+       case I915_PARAM_HAS_COHERENT_RINGS:
+               value = 1;
+               break;
+       case I915_PARAM_HAS_EXEC_CONSTANTS:
+               value = INTEL_INFO(dev)->gen >= 4;
+               break;
+       case I915_PARAM_HAS_RELAXED_DELTA:
+               value = 1;
+               break;
+       case I915_PARAM_HAS_GEN7_SOL_RESET:
+               value = 1;
+               break;
+       case I915_PARAM_HAS_LLC:
+               value = HAS_LLC(dev);
                break;
        default:
-               DRM_DEBUG("Unknown parameter %d\n", param->param);
+               DRM_DEBUG_DRIVER("Unknown parameter %d\n",
+                                param->param);
                return -EINVAL;
        }
 
@@ -770,6 +843,13 @@ static int i915_setparam(struct drm_device *dev, void *data,
        case I915_SETPARAM_ALLOW_BATCHBUFFER:
                dev_priv->allow_batchbuffer = param->value;
                break;
+       case I915_SETPARAM_NUM_USED_FENCES:
+               if (param->value > dev_priv->num_fence_regs ||
+                   param->value < 0)
+                       return -EINVAL;
+               /* Userspace can use first N regs */
+               dev_priv->fence_reg_start = param->value;
+               break;
        default:
                DRM_DEBUG("unknown parameter %d\n", param->param);
                return -EINVAL;
@@ -783,6 +863,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        drm_i915_hws_addr_t *hws = data;
+       struct intel_ring_buffer *ring = LP_RING(dev_priv);
 
        if (!I915_NEED_GFX_HWS(dev))
                return -EINVAL;
@@ -793,8 +874,13 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
        }
 
        DRM_DEBUG("set status page addr 0x%08x\n", (u32)hws->addr);
+       if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+               DRM_ERROR("tried to set status page when mode setting active\n");
+               return 0;
+       }
 
-       dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12);
+       ring->status_page.gfx_addr = dev_priv->status_gfx_addr =
+           hws->addr & (0x1ffff<<12);
 
        dev_priv->hws_map.offset = dev->agp->base + hws->addr;
        dev_priv->hws_map.size = 4*1024;
@@ -805,12 +891,13 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
        drm_core_ioremap_wc(&dev_priv->hws_map, dev);
        if (dev_priv->hws_map.virtual == NULL) {
                i915_dma_cleanup(dev);
-               dev_priv->status_gfx_addr = 0;
+               ring->status_page.gfx_addr = dev_priv->status_gfx_addr = 0;
                DRM_ERROR("can not ioremap virtual address for"
                                " G33 hw status page\n");
                return -ENOMEM;
        }
-       dev_priv->hw_status_page = dev_priv->hws_map.virtual;
+       ring->status_page.page_addr = dev_priv->hw_status_page =
+           dev_priv->hws_map.virtual;
 
        memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
        I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
@@ -820,11 +907,298 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
        return 0;
 }
 
-int i915_driver_load(struct drm_device *dev, unsigned long flags)
+static bool
+intel_enable_ppgtt(struct drm_device *dev)
+{
+       if (i915_enable_ppgtt >= 0)
+               return i915_enable_ppgtt;
+
+       /* Disable ppgtt on SNB if VT-d is on. */
+       if (INTEL_INFO(dev)->gen == 6 && intel_iommu_enabled)
+               return false;
+
+       return true;
+}
+
+static int
+i915_load_gem_init(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       unsigned long prealloc_size, gtt_size, mappable_size;
+       int ret;
+
+       prealloc_size = dev_priv->mm.gtt.stolen_size;
+       gtt_size = dev_priv->mm.gtt.gtt_total_entries << PAGE_SHIFT;
+       mappable_size = dev_priv->mm.gtt.gtt_mappable_entries << PAGE_SHIFT;
+
+       /* Basic memrange allocator for stolen space */
+       drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
+
+       DRM_LOCK(dev);
+       if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
+               /* PPGTT pdes are stolen from global gtt ptes, so shrink the
+                * aperture accordingly when using aliasing ppgtt. */
+               gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
+               /* For paranoia keep the guard page in between. */
+               gtt_size -= PAGE_SIZE;
+
+               i915_gem_do_init(dev, 0, mappable_size, gtt_size);
+
+               ret = i915_gem_init_aliasing_ppgtt(dev);
+               if (ret) {
+                       DRM_UNLOCK(dev);
+                       return ret;
+               }
+       } else {
+               /* Let GEM Manage all of the aperture.
+                *
+                * However, leave one page at the end still bound to the scratch
+                * page.  There are a number of places where the hardware
+                * apparently prefetches past the end of the object, and we've
+                * seen multiple hangs with the GPU head pointer stuck in a
+                * batchbuffer bound at the last page of the aperture.  One page
+                * should be enough to keep any prefetching inside of the
+                * aperture.
+                */
+               i915_gem_do_init(dev, 0, mappable_size, gtt_size - PAGE_SIZE);
+       }
+
+       ret = i915_gem_init_hw(dev);
+       DRM_UNLOCK(dev);
+       if (ret != 0) {
+               i915_gem_cleanup_aliasing_ppgtt(dev);
+               return (ret);
+       }
+
+#if 0
+       /* Try to set up FBC with a reasonable compressed buffer size */
+       if (I915_HAS_FBC(dev) && i915_powersave) {
+               int cfb_size;
+
+               /* Leave 1M for line length buffer & misc. */
+
+               /* Try to get a 32M buffer... */
+               if (prealloc_size > (36*1024*1024))
+                       cfb_size = 32*1024*1024;
+               else /* fall back to 7/8 of the stolen space */
+                       cfb_size = prealloc_size * 7 / 8;
+               i915_setup_compression(dev, cfb_size);
+       }
+#endif
+
+       /* Allow hardware batchbuffers unless told otherwise. */
+       dev_priv->allow_batchbuffer = 1;
+       return 0;
+}
+
+static int
+i915_load_modeset_init(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int ret;
+
+       ret = intel_parse_bios(dev);
+       if (ret)
+               DRM_INFO("failed to find VBIOS tables\n");
+
+#if 0
+       intel_register_dsm_handler();
+#endif
+
+       /* IIR "flip pending" bit means done if this bit is set */
+       if (IS_GEN3(dev) && (I915_READ(ECOSKPD) & ECO_FLIP_DONE))
+               dev_priv->flip_pending_is_done = true;
+
+       intel_modeset_init(dev);
+
+       ret = i915_load_gem_init(dev);
+       if (ret != 0)
+               goto cleanup_gem;
+
+       intel_modeset_gem_init(dev);
+
+       ret = drm_irq_install(dev);
+       if (ret)
+               goto cleanup_gem;
+
+       dev->vblank_disable_allowed = 1;
+
+       ret = intel_fbdev_init(dev);
+       if (ret)
+               goto cleanup_gem;
+
+       drm_kms_helper_poll_init(dev);
+
+       /* We're off and running w/KMS */
+       dev_priv->mm.suspended = 0;
+
+       return (0);
+
+cleanup_gem:
+       DRM_LOCK(dev);
+       i915_gem_cleanup_ringbuffer(dev);
+       DRM_UNLOCK(dev);
+       i915_gem_cleanup_aliasing_ppgtt(dev);
+       return (ret);
+}
+
+static int
+i915_get_bridge_dev(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv;
+
+       dev_priv = dev->dev_private;
+
+       dev_priv->bridge_dev = intel_gtt_get_bridge_device();
+       if (dev_priv->bridge_dev == NULL) {
+               DRM_ERROR("bridge device not found\n");
+               return (-1);
+       }
+       return (0);
+}
+
+#define MCHBAR_I915 0x44
+#define MCHBAR_I965 0x48
+#define MCHBAR_SIZE (4*4096)
+
+#define DEVEN_REG 0x54
+#define   DEVEN_MCHBAR_EN (1 << 28)
+
+/* Allocate space for the MCH regs if needed, return nonzero on error */
+static int
+intel_alloc_mchbar_resource(struct drm_device *dev)
+{
+       drm_i915_private_t *dev_priv;
+       device_t vga;
+       int reg;
+       u32 temp_lo, temp_hi;
+       u64 mchbar_addr, temp;
+
+       dev_priv = dev->dev_private;
+       reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+
+       if (INTEL_INFO(dev)->gen >= 4)
+               temp_hi = pci_read_config(dev_priv->bridge_dev, reg + 4, 4);
+       else
+               temp_hi = 0;
+       temp_lo = pci_read_config(dev_priv->bridge_dev, reg, 4);
+       mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
+
+       /* If ACPI doesn't have it, assume we need to allocate it ourselves */
+#ifdef XXX_CONFIG_PNP
+       if (mchbar_addr &&
+           pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
+               return 0;
+#endif
+
+       /* Get some space for it */
+       vga = device_get_parent(dev->device);
+       dev_priv->mch_res_rid = 0x100;
+       dev_priv->mch_res = BUS_ALLOC_RESOURCE(device_get_parent(vga),
+           dev->device, SYS_RES_MEMORY, &dev_priv->mch_res_rid, 0, ~0UL,
+           MCHBAR_SIZE, RF_ACTIVE | RF_SHAREABLE, -1);
+       if (dev_priv->mch_res == NULL) {
+               DRM_ERROR("failed mchbar resource alloc\n");
+               return (-ENOMEM);
+       }
+
+       if (INTEL_INFO(dev)->gen >= 4) {
+               temp = rman_get_start(dev_priv->mch_res);
+               temp >>= 32;
+               pci_write_config(dev_priv->bridge_dev, reg + 4, temp, 4);
+       }
+       pci_write_config(dev_priv->bridge_dev, reg,
+           rman_get_start(dev_priv->mch_res) & UINT32_MAX, 4);
+       return (0);
+}
+
+static void
+intel_setup_mchbar(struct drm_device *dev)
+{
+       drm_i915_private_t *dev_priv;
+       int mchbar_reg;
+       u32 temp;
+       bool enabled;
+
+       dev_priv = dev->dev_private;
+       mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+
+       dev_priv->mchbar_need_disable = false;
+
+       if (IS_I915G(dev) || IS_I915GM(dev)) {
+               temp = pci_read_config(dev_priv->bridge_dev, DEVEN_REG, 4);
+               enabled = (temp & DEVEN_MCHBAR_EN) != 0;
+       } else {
+               temp = pci_read_config(dev_priv->bridge_dev, mchbar_reg, 4);
+               enabled = temp & 1;
+       }
+
+       /* If it's already enabled, don't have to do anything */
+       if (enabled) {
+               DRM_DEBUG("mchbar already enabled\n");
+               return;
+       }
+
+       if (intel_alloc_mchbar_resource(dev))
+               return;
+
+       dev_priv->mchbar_need_disable = true;
+
+       /* Space is allocated or reserved, so enable it. */
+       if (IS_I915G(dev) || IS_I915GM(dev)) {
+               pci_write_config(dev_priv->bridge_dev, DEVEN_REG,
+                   temp | DEVEN_MCHBAR_EN, 4);
+       } else {
+               temp = pci_read_config(dev_priv->bridge_dev, mchbar_reg, 4);
+               pci_write_config(dev_priv->bridge_dev, mchbar_reg, temp | 1, 4);
+       }
+}
+
+static void
+intel_teardown_mchbar(struct drm_device *dev)
+{
+       drm_i915_private_t *dev_priv;
+       device_t vga;
+       int mchbar_reg;
+       u32 temp;
+
+       dev_priv = dev->dev_private;
+       mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+
+       if (dev_priv->mchbar_need_disable) {
+               if (IS_I915G(dev) || IS_I915GM(dev)) {
+                       temp = pci_read_config(dev_priv->bridge_dev,
+                           DEVEN_REG, 4);
+                       temp &= ~DEVEN_MCHBAR_EN;
+                       pci_write_config(dev_priv->bridge_dev, DEVEN_REG,
+                           temp, 4);
+               } else {
+                       temp = pci_read_config(dev_priv->bridge_dev,
+                           mchbar_reg, 4);
+                       temp &= ~1;
+                       pci_write_config(dev_priv->bridge_dev, mchbar_reg,
+                           temp, 4);
+               }
+       }
+
+       if (dev_priv->mch_res != NULL) {
+               vga = device_get_parent(dev->device);
+               BUS_DEACTIVATE_RESOURCE(device_get_parent(vga), dev->device,
+                   SYS_RES_MEMORY, dev_priv->mch_res_rid, dev_priv->mch_res);
+               BUS_RELEASE_RESOURCE(device_get_parent(vga), dev->device,
+                   SYS_RES_MEMORY, dev_priv->mch_res_rid, dev_priv->mch_res);
+               dev_priv->mch_res = NULL;
+       }
+}
+
+int
+i915_driver_load(struct drm_device *dev, unsigned long flags)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long base, size;
-       int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1;
+       int mmio_bar, ret;
+
+       ret = 0;
 
        /* i915 has 4 more counters */
        dev->counters += 4;
@@ -833,33 +1207,48 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
        dev->types[8] = _DRM_STAT_SECONDARY;
        dev->types[9] = _DRM_STAT_DMA;
 
-       dev_priv = drm_alloc(sizeof(drm_i915_private_t), DRM_MEM_DRIVER);
+       dev_priv = kmalloc(sizeof(drm_i915_private_t), DRM_MEM_DRIVER,
+           M_ZERO | M_WAITOK);
        if (dev_priv == NULL)
                return -ENOMEM;
 
-       memset(dev_priv, 0, sizeof(drm_i915_private_t));
-
        dev->dev_private = (void *)dev_priv;
        dev_priv->dev = dev;
+       dev_priv->info = i915_get_device_id(dev->pci_device);
+
+       if (i915_get_bridge_dev(dev)) {
+               drm_free(dev_priv, DRM_MEM_DRIVER);
+               return (-EIO);
+       }
+       dev_priv->mm.gtt = intel_gtt_get();
 
        /* Add register map (needed for suspend/resume) */
+       mmio_bar = IS_GEN2(dev) ? 1 : 0;
        base = drm_get_resource_start(dev, mmio_bar);
        size = drm_get_resource_len(dev, mmio_bar);
 
        ret = drm_addmap(dev, base, size, _DRM_REGISTERS,
            _DRM_KERNEL | _DRM_DRIVER, &dev_priv->mmio_map);
 
-       if (IS_G4X(dev)) {
-               dev->driver->get_vblank_counter = g45_get_vblank_counter;
-               dev->max_vblank_count = 0xffffffff; /* 32 bits of frame count */
-       } else {
-               dev->driver->get_vblank_counter = i915_get_vblank_counter;
-               dev->max_vblank_count = 0x00ffffff; /* 24 bits of frame count */
-       }
+       dev_priv->tq = taskqueue_create("915", M_WAITOK,
+           taskqueue_thread_enqueue, &dev_priv->tq);
+       taskqueue_start_threads(&dev_priv->tq, 1, 0, -1, "i915 taskq");
+       lockinit(&dev_priv->gt_lock, "915gt", 0, LK_CANRECURSE);
+       lockinit(&dev_priv->error_lock, "915err", 0, LK_CANRECURSE);
+       lockinit(&dev_priv->error_completion_lock, "915cmp", 0, LK_CANRECURSE);
+       lockinit(&dev_priv->rps_lock, "915rps", 0, LK_CANRECURSE);
+
+       dev_priv->has_gem = 1;
+       intel_irq_init(dev);
+
+       intel_setup_mchbar(dev);
+       intel_setup_gmbus(dev);
+       intel_opregion_setup(dev);
+
+       intel_setup_bios(dev);
 
-#ifdef I915_HAVE_GEM
        i915_gem_load(dev);
-#endif
+
        /* Init HWS */
        if (!I915_NEED_GFX_HWS(dev)) {
                ret = i915_init_phys_hws(dev);
@@ -869,97 +1258,188 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
                        return ret;
                }
        }
-#ifdef __linux__
-       /* On the 945G/GM, the chipset reports the MSI capability on the
-        * integrated graphics even though the support isn't actually there
-        * according to the published specs.  It doesn't appear to function
-        * correctly in testing on 945G.
-        * This may be a side effect of MSI having been made available for PEG
-        * and the registers being closely associated.
-        *
-        * According to chipset errata, on the 965GM, MSI interrupts may
-        * be lost or delayed
-        */
-       if (!IS_I945G(dev) && !IS_I945GM(dev) && !IS_I965GM(dev))
-               if (pci_enable_msi(dev->pdev))
-                       DRM_ERROR("failed to enable MSI\n");
+
+       if (IS_PINEVIEW(dev))
+               i915_pineview_get_mem_freq(dev);
+       else if (IS_GEN5(dev))
+               i915_ironlake_get_mem_freq(dev);
+
+       lockinit(&dev_priv->irq_lock, "userirq", 0, LK_CANRECURSE);
+
+       if (IS_IVYBRIDGE(dev))
+               dev_priv->num_pipe = 3;
+       else if (IS_MOBILE(dev) || !IS_GEN2(dev))
+               dev_priv->num_pipe = 2;
+       else
+               dev_priv->num_pipe = 1;
+
+       ret = drm_vblank_init(dev, dev_priv->num_pipe);
+       if (ret)
+               goto out_gem_unload;
+
+       /* Start out suspended */
+       dev_priv->mm.suspended = 1;
+
+       intel_detect_pch(dev);
+
+       if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+               DRM_UNLOCK(dev);
+               ret = i915_load_modeset_init(dev);
+               DRM_LOCK(dev);
+               if (ret < 0) {
+                       DRM_ERROR("failed to init modeset\n");
+                       goto out_gem_unload;
+               }
+       }
 
        intel_opregion_init(dev);
-#endif
-       spin_init(&dev_priv->user_irq_lock);
-       dev_priv->user_irq_refcount = 0;
 
-       ret = drm_vblank_init(dev, I915_NUM_PIPE);
+       callout_init_mp(&dev_priv->hangcheck_timer);
+       callout_reset(&dev_priv->hangcheck_timer, DRM_I915_HANGCHECK_PERIOD,
+           i915_hangcheck_elapsed, dev);
 
-       if (ret) {
-               (void) i915_driver_unload(dev);
-               return ret;
+       if (IS_GEN5(dev)) {
+               lockmgr(&mchdev_lock, LK_EXCLUSIVE);
+               i915_mch_dev = dev_priv;
+               dev_priv->mchdev_lock = &mchdev_lock;
+               lockmgr(&mchdev_lock, LK_RELEASE);
        }
 
-       return ret;
+       return (0);
+
+out_gem_unload:
+       /* XXXKIB */
+       (void) i915_driver_unload_int(dev, true);
+       return (ret);
 }
 
-int i915_driver_unload(struct drm_device *dev)
+static int
+i915_driver_unload_int(struct drm_device *dev, bool locked)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
+       int ret;
+
+       if (!locked)
+               DRM_LOCK(dev);
+       ret = i915_gpu_idle(dev, true);
+       if (ret)
+               DRM_ERROR("failed to idle hardware: %d\n", ret);
+       if (!locked)
+               DRM_UNLOCK(dev);
 
        i915_free_hws(dev);
 
-       drm_rmmap(dev, dev_priv->mmio_map);
-#ifdef __linux__
-       intel_opregion_free(dev);
+       intel_teardown_mchbar(dev);
+
+       if (locked)
+               DRM_UNLOCK(dev);
+       if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+               intel_fbdev_fini(dev);
+               intel_modeset_cleanup(dev);
+       }
+
+       /* Free error state after interrupts are fully disabled. */
+       callout_stop(&dev_priv->hangcheck_timer);
+
+       i915_destroy_error_state(dev);
+
+       intel_opregion_fini(dev);
+
+       if (locked)
+               DRM_LOCK(dev);
+
+       if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+               if (!locked)
+                       DRM_LOCK(dev);
+               i915_gem_free_all_phys_object(dev);
+               i915_gem_cleanup_ringbuffer(dev);
+               if (!locked)
+                       DRM_UNLOCK(dev);
+               i915_gem_cleanup_aliasing_ppgtt(dev);
+#if 1
+               KIB_NOTYET();
+#else
+               if (I915_HAS_FBC(dev) && i915_powersave)
+                       i915_cleanup_compression(dev);
 #endif
-       spin_uninit(&dev_priv->user_irq_lock);
+               drm_mm_takedown(&dev_priv->mm.stolen);
+
+               intel_cleanup_overlay(dev);
+
+               if (!I915_NEED_GFX_HWS(dev))
+                       i915_free_hws(dev);
+       }
+
+       i915_gem_unload(dev);
+
+       lockuninit(&dev_priv->irq_lock);
+
+       if (dev_priv->tq != NULL)
+               taskqueue_free(dev_priv->tq);
 
+       bus_generic_detach(dev->device);
+       drm_rmmap(dev, dev_priv->mmio_map);
+       intel_teardown_gmbus(dev);
+
+       lockuninit(&dev_priv->error_lock);
+       lockuninit(&dev_priv->error_completion_lock);
+       lockuninit(&dev_priv->rps_lock);
        drm_free(dev->dev_private, DRM_MEM_DRIVER);
 
-       return 0;
+       return (0);
 }
 
-int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
+int
+i915_driver_unload(struct drm_device *dev)
 {
-       struct drm_i915_file_private *i915_file_priv;
 
-       DRM_DEBUG("\n");
-       i915_file_priv = (struct drm_i915_file_private *)
-           drm_alloc(sizeof(*i915_file_priv), DRM_MEM_FILES);
+       return (i915_driver_unload_int(dev, true));
+}
 
-       if (!i915_file_priv)
-               return -ENOMEM;
+int
+i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
+{
+       struct drm_i915_file_private *i915_file_priv;
 
-       file_priv->driver_priv = i915_file_priv;
+       i915_file_priv = kmalloc(sizeof(*i915_file_priv), DRM_MEM_FILES,
+           M_WAITOK | M_ZERO);
 
-       i915_file_priv->mm.last_gem_seqno = 0;
-       i915_file_priv->mm.last_gem_throttle_seqno = 0;
+       spin_init(&i915_file_priv->mm.lock);
+       INIT_LIST_HEAD(&i915_file_priv->mm.request_list);
+       file_priv->driver_priv = i915_file_priv;
 
-       return 0;
+       return (0);
 }
 
-void i915_driver_lastclose(struct drm_device * dev)
+void
+i915_driver_lastclose(struct drm_device * dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
 
-       if (!dev_priv)
+       if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) {
+#if 1
+               KIB_NOTYET();
+#else
+               drm_fb_helper_restore();
+               vga_switcheroo_process_delayed_switch();
+#endif
                return;
-#ifdef I915_HAVE_GEM
+       }
        i915_gem_lastclose(dev);
-#endif
-       if (dev_priv->agp_heap)
-               i915_mem_takedown(&(dev_priv->agp_heap));
-
        i915_dma_cleanup(dev);
 }
 
 void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
 {
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       i915_mem_release(dev, file_priv, dev_priv->agp_heap);
+
+       i915_gem_release(dev, file_priv);
 }
 
 void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
 {
        struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
 
+       spin_uninit(&i915_file_priv->mm.lock);
        drm_free(i915_file_priv, DRM_MEM_FILES);
 }
 
@@ -972,42 +1452,80 @@ struct drm_ioctl_desc i915_ioctls[] = {
        DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
        DRM_IOCTL_DEF(DRM_I915_GETPARAM, i915_getparam, DRM_AUTH),
        DRM_IOCTL_DEF(DRM_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_I915_ALLOC, i915_mem_alloc, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_I915_FREE, i915_mem_free, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_I915_ALLOC, drm_noop, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_I915_FREE, drm_noop, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP,  i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
+       DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
        DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE,  i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
        DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE,  i915_vblank_pipe_get, DRM_AUTH ),
        DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
        DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-#ifdef I915_HAVE_GEM
        DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH | DRM_UNLOCKED),
+       DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH | DRM_UNLOCKED),
+       DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
+       DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
+       DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH),
        DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, 0),
-       DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, 0),
-       DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, 0),
+       DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED),
+       DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0),
-       DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, 0),
-       DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, 0),
+       DRM_IOCTL_DEF(DRM_I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED),
+       DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED),
+       DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0),
        DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0),
-#endif
+       DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED),
+       DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
+       DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
+       DRM_IOCTL_DEF(DRM_I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+       DRM_IOCTL_DEF(DRM_I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+       DRM_IOCTL_DEF(DRM_I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+       DRM_IOCTL_DEF(DRM_I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
 };
 
-int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
+struct drm_driver_info i915_driver_info = {
+       .driver_features =   DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
+           DRIVER_USE_MTRR | DRIVER_HAVE_IRQ | DRIVER_LOCKLESS_IRQ |
+           DRIVER_GEM /*| DRIVER_MODESET*/,
+
+       .buf_priv_size  = sizeof(drm_i915_private_t),
+       .load           = i915_driver_load,
+       .open           = i915_driver_open,
+       .unload         = i915_driver_unload,
+       .preclose       = i915_driver_preclose,
+       .lastclose      = i915_driver_lastclose,
+       .postclose      = i915_driver_postclose,
+       .device_is_agp  = i915_driver_device_is_agp,
+       .gem_init_object = i915_gem_init_object,
+       .gem_free_object = i915_gem_free_object,
+       .gem_pager_ops  = &i915_gem_pager_ops,
+       .dumb_create    = i915_gem_dumb_create,
+       .dumb_map_offset = i915_gem_mmap_gtt,
+       .dumb_destroy   = i915_gem_dumb_destroy,
+       .sysctl_init    = i915_sysctl_init,
+       .sysctl_cleanup = i915_sysctl_cleanup,
+
+       .ioctls         = i915_ioctls,
+       .max_ioctl      = DRM_ARRAY_SIZE(i915_ioctls),
+
+       .name           = DRIVER_NAME,
+       .desc           = DRIVER_DESC,
+       .date           = DRIVER_DATE,
+       .major          = DRIVER_MAJOR,
+       .minor          = DRIVER_MINOR,
+       .patchlevel     = DRIVER_PATCHLEVEL,
+};
 
 /**
  * Determine if the device really is AGP or not.
  *
  * All Intel graphics chipsets are treated as AGP, even if they are really
- * PCI-e.
+ * built-in.
  *
  * \param dev   The device to be tested.
  *
@@ -1018,3 +1536,535 @@ int i915_driver_device_is_agp(struct drm_device * dev)
 {
        return 1;
 }
+
+static void i915_pineview_get_mem_freq(struct drm_device *dev)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       u32 tmp;
+
+       tmp = I915_READ(CLKCFG);
+
+       switch (tmp & CLKCFG_FSB_MASK) {
+       case CLKCFG_FSB_533:
+               dev_priv->fsb_freq = 533; /* 133*4 */
+               break;
+       case CLKCFG_FSB_800:
+               dev_priv->fsb_freq = 800; /* 200*4 */
+               break;
+       case CLKCFG_FSB_667:
+               dev_priv->fsb_freq =  667; /* 167*4 */
+               break;
+       case CLKCFG_FSB_400:
+               dev_priv->fsb_freq = 400; /* 100*4 */
+               break;
+       }
+
+       switch (tmp & CLKCFG_MEM_MASK) {
+       case CLKCFG_MEM_533:
+               dev_priv->mem_freq = 533;
+               break;
+       case CLKCFG_MEM_667:
+               dev_priv->mem_freq = 667;
+               break;
+       case CLKCFG_MEM_800:
+               dev_priv->mem_freq = 800;
+               break;
+       }
+
+       /* detect pineview DDR3 setting */
+       tmp = I915_READ(CSHRDDR3CTL);
+       dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
+}
+
+static void i915_ironlake_get_mem_freq(struct drm_device *dev)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       u16 ddrpll, csipll;
+
+       ddrpll = I915_READ16(DDRMPLL1);
+       csipll = I915_READ16(CSIPLL0);
+
+       switch (ddrpll & 0xff) {
+       case 0xc:
+               dev_priv->mem_freq = 800;
+               break;
+       case 0x10:
+               dev_priv->mem_freq = 1066;
+               break;
+       case 0x14:
+               dev_priv->mem_freq = 1333;
+               break;
+       case 0x18:
+               dev_priv->mem_freq = 1600;
+               break;
+       default:
+               DRM_DEBUG("unknown memory frequency 0x%02x\n",
+                                ddrpll & 0xff);
+               dev_priv->mem_freq = 0;
+               break;
+       }
+
+       dev_priv->r_t = dev_priv->mem_freq;
+
+       switch (csipll & 0x3ff) {
+       case 0x00c:
+               dev_priv->fsb_freq = 3200;
+               break;
+       case 0x00e:
+               dev_priv->fsb_freq = 3733;
+               break;
+       case 0x010:
+               dev_priv->fsb_freq = 4266;
+               break;
+       case 0x012:
+               dev_priv->fsb_freq = 4800;
+               break;
+       case 0x014:
+               dev_priv->fsb_freq = 5333;
+               break;
+       case 0x016:
+               dev_priv->fsb_freq = 5866;
+               break;
+       case 0x018:
+               dev_priv->fsb_freq = 6400;
+               break;
+       default:
+               DRM_DEBUG("unknown fsb frequency 0x%04x\n",
+                                csipll & 0x3ff);
+               dev_priv->fsb_freq = 0;
+               break;
+       }
+
+       if (dev_priv->fsb_freq == 3200) {
+               dev_priv->c_m = 0;
+       } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
+               dev_priv->c_m = 1;
+       } else {
+               dev_priv->c_m = 2;
+       }
+}
+
+static const struct cparams {
+       u16 i;
+       u16 t;
+       u16 m;
+       u16 c;
+} cparams[] = {
+       { 1, 1333, 301, 28664 },
+       { 1, 1066, 294, 24460 },
+       { 1, 800, 294, 25192 },
+       { 0, 1333, 276, 27605 },
+       { 0, 1066, 276, 27605 },
+       { 0, 800, 231, 23784 },
+};
+
+unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
+{
+       u64 total_count, diff, ret;
+       u32 count1, count2, count3, m = 0, c = 0;
+       unsigned long now = jiffies_to_msecs(jiffies), diff1;
+       int i;
+
+       diff1 = now - dev_priv->last_time1;
+       /*
+        * sysctl(8) reads the value of sysctl twice in rapid
+        * succession.  There is high chance that it happens in the
+        * same timer tick.  Use the cached value to not divide by
+        * zero and give the hw a chance to gather more samples.
+        */
+       if (diff1 <= 10)
+               return (dev_priv->chipset_power);
+
+       count1 = I915_READ(DMIEC);
+       count2 = I915_READ(DDREC);
+       count3 = I915_READ(CSIEC);
+
+       total_count = count1 + count2 + count3;
+
+       /* FIXME: handle per-counter overflow */
+       if (total_count < dev_priv->last_count1) {
+               diff = ~0UL - dev_priv->last_count1;
+               diff += total_count;
+       } else {
+               diff = total_count - dev_priv->last_count1;
+       }
+
+       for (i = 0; i < DRM_ARRAY_SIZE(cparams); i++) {
+               if (cparams[i].i == dev_priv->c_m &&
+                   cparams[i].t == dev_priv->r_t) {
+                       m = cparams[i].m;
+                       c = cparams[i].c;
+                       break;
+               }
+       }
+
+       diff = diff / diff1;
+       ret = ((m * diff) + c);
+       ret = ret / 10;
+
+       dev_priv->last_count1 = total_count;
+       dev_priv->last_time1 = now;
+
+       dev_priv->chipset_power = ret;
+       return (ret);
+}
+
+unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
+{
+       unsigned long m, x, b;
+       u32 tsfs;
+
+       tsfs = I915_READ(TSFS);
+
+       m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
+       x = I915_READ8(I915_TR1);
+
+       b = tsfs & TSFS_INTR_MASK;
+
+       return ((m * x) / 127) - b;
+}
+
+static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
+{
+       static const struct v_table {
+               u16 vd; /* in .1 mil */
+               u16 vm; /* in .1 mil */
+       } v_table[] = {
+               { 0, 0, },
+               { 375, 0, },
+               { 500, 0, },
+               { 625, 0, },
+               { 750, 0, },
+               { 875, 0, },
+               { 1000, 0, },
+               { 1125, 0, },
+               { 4125, 3000, },
+               { 4125, 3000, },
+               { 4125, 3000, },
+               { 4125, 3000, },
+               { 4125, 3000, },
+               { 4125, 3000, },
+               { 4125, 3000, },
+               { 4125, 3000, },
+               { 4125, 3000, },
+               { 4125, 3000, },
+               { 4125, 3000, },
+               { 4125, 3000, },
+               { 4125, 3000, },
+               { 4125, 3000, },
+               { 4125, 3000, },
+               { 4125, 3000, },
+               { 4125, 3000, },
+               { 4125, 3000, },
+               { 4125, 3000, },
+               { 4125, 3000, },
+               { 4125, 3000, },
+               { 4125, 3000, },
+               { 4125, 3000, },
+               { 4125, 3000, },
+               { 4250, 3125, },
+               { 4375, 3250, },
+               { 4500, 3375, },
+               { 4625, 3500, },
+               { 4750, 3625, },
+               { 4875, 3750, },
+               { 5000, 3875, },
+               { 5125, 4000, },
+               { 5250, 4125, },
+               { 5375, 4250, },
+               { 5500, 4375, },
+               { 5625, 4500, },
+               { 5750, 4625, },
+               { 5875, 4750, },
+               { 6000, 4875, },
+               { 6125, 5000, },
+               { 6250, 5125, },
+               { 6375, 5250, },
+               { 6500, 5375, },
+               { 6625, 5500, },
+               { 6750, 5625, },
+               { 6875, 5750, },
+               { 7000, 5875, },
+               { 7125, 6000, },
+               { 7250, 6125, },
+               { 7375, 6250, },
+               { 7500, 6375, },
+               { 7625, 6500, },
+               { 7750, 6625, },
+               { 7875, 6750, },
+               { 8000, 6875, },
+               { 8125, 7000, },
+               { 8250, 7125, },
+               { 8375, 7250, },
+               { 8500, 7375, },
+               { 8625, 7500, },
+               { 8750, 7625, },
+               { 8875, 7750, },
+               { 9000, 7875, },
+               { 9125, 8000, },
+               { 9250, 8125, },
+               { 9375, 8250, },
+               { 9500, 8375, },
+               { 9625, 8500, },
+               { 9750, 8625, },
+               { 9875, 8750, },
+               { 10000, 8875, },
+               { 10125, 9000, },
+               { 10250, 9125, },
+               { 10375, 9250, },
+               { 10500, 9375, },
+               { 10625, 9500, },
+               { 10750, 9625, },
+               { 10875, 9750, },
+               { 11000, 9875, },
+               { 11125, 10000, },
+               { 11250, 10125, },
+               { 11375, 10250, },
+               { 11500, 10375, },
+               { 11625, 10500, },
+               { 11750, 10625, },
+               { 11875, 10750, },
+               { 12000, 10875, },
+               { 12125, 11000, },
+               { 12250, 11125, },
+               { 12375, 11250, },
+               { 12500, 11375, },
+               { 12625, 11500, },
+               { 12750, 11625, },
+               { 12875, 11750, },
+               { 13000, 11875, },
+               { 13125, 12000, },
+               { 13250, 12125, },
+               { 13375, 12250, },
+               { 13500, 12375, },
+               { 13625, 12500, },
+               { 13750, 12625, },
+               { 13875, 12750, },
+               { 14000, 12875, },
+               { 14125, 13000, },
+               { 14250, 13125, },
+               { 14375, 13250, },
+               { 14500, 13375, },
+               { 14625, 13500, },
+               { 14750, 13625, },
+               { 14875, 13750, },
+               { 15000, 13875, },
+               { 15125, 14000, },
+               { 15250, 14125, },
+               { 15375, 14250, },
+               { 15500, 14375, },
+               { 15625, 14500, },
+               { 15750, 14625, },
+               { 15875, 14750, },
+               { 16000, 14875, },
+               { 16125, 15000, },
+       };
+       if (dev_priv->info->is_mobile)
+               return v_table[pxvid].vm;
+       else
+               return v_table[pxvid].vd;
+}
+
+void i915_update_gfx_val(struct drm_i915_private *dev_priv)
+{
+       struct timespec now, diff1;
+       u64 diff;
+       unsigned long diffms;
+       u32 count;
+
+       if (dev_priv->info->gen != 5)
+               return;
+
+       nanotime(&now);
+       diff1 = now;
+       timespecsub(&diff1, &dev_priv->last_time2);
+
+       /* Don't divide by 0 */
+       diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
+       if (!diffms)
+               return;
+
+       count = I915_READ(GFXEC);
+
+       if (count < dev_priv->last_count2) {
+               diff = ~0UL - dev_priv->last_count2;
+               diff += count;
+       } else {
+               diff = count - dev_priv->last_count2;
+       }
+
+       dev_priv->last_count2 = count;
+       dev_priv->last_time2 = now;
+
+       /* More magic constants... */
+       diff = diff * 1181;
+       diff = diff / (diffms * 10);
+       dev_priv->gfx_power = diff;
+}
+
+unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
+{
+       unsigned long t, corr, state1, corr2, state2;
+       u32 pxvid, ext_v;
+
+       pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4));
+       pxvid = (pxvid >> 24) & 0x7f;
+       ext_v = pvid_to_extvid(dev_priv, pxvid);
+
+       state1 = ext_v;
+
+       t = i915_mch_val(dev_priv);
+
+       /* Revel in the empirically derived constants */
+
+       /* Correction factor in 1/100000 units */
+       if (t > 80)
+               corr = ((t * 2349) + 135940);
+       else if (t >= 50)
+               corr = ((t * 964) + 29317);
+       else /* < 50 */
+               corr = ((t * 301) + 1004);
+
+       corr = corr * ((150142 * state1) / 10000 - 78642);
+       corr /= 100000;
+       corr2 = (corr * dev_priv->corr);
+
+       state2 = (corr2 * state1) / 10000;
+       state2 /= 100; /* convert to mW */
+
+       i915_update_gfx_val(dev_priv);
+
+       return dev_priv->gfx_power + state2;
+}
+
+/**
+ * i915_read_mch_val - return value for IPS use
+ *
+ * Calculate and return a value for the IPS driver to use when deciding whether
+ * we have thermal and power headroom to increase CPU or GPU power budget.
+ */
+unsigned long i915_read_mch_val(void)
+{
+       struct drm_i915_private *dev_priv;
+       unsigned long chipset_val, graphics_val, ret = 0;
+
+       lockmgr(&mchdev_lock, LK_EXCLUSIVE);
+       if (!i915_mch_dev)
+               goto out_unlock;
+       dev_priv = i915_mch_dev;
+
+       chipset_val = i915_chipset_val(dev_priv);
+       graphics_val = i915_gfx_val(dev_priv);
+
+       ret = chipset_val + graphics_val;
+
+out_unlock:
+       lockmgr(&mchdev_lock, LK_RELEASE);
+
+       return ret;
+}
+
+/**
+ * i915_gpu_raise - raise GPU frequency limit
+ *
+ * Raise the limit; IPS indicates we have thermal headroom.
+ */
+bool i915_gpu_raise(void)
+{
+       struct drm_i915_private *dev_priv;
+       bool ret = true;
+
+       lockmgr(&mchdev_lock, LK_EXCLUSIVE);
+       if (!i915_mch_dev) {
+               ret = false;
+               goto out_unlock;
+       }
+       dev_priv = i915_mch_dev;
+
+       if (dev_priv->max_delay > dev_priv->fmax)
+               dev_priv->max_delay--;
+
+out_unlock:
+       lockmgr(&mchdev_lock, LK_RELEASE);
+
+       return ret;
+}
+
+/**
+ * i915_gpu_lower - lower GPU frequency limit
+ *
+ * IPS indicates we're close to a thermal limit, so throttle back the GPU
+ * frequency maximum.
+ */
+bool i915_gpu_lower(void)
+{
+       struct drm_i915_private *dev_priv;
+       bool ret = true;
+
+       lockmgr(&mchdev_lock, LK_EXCLUSIVE);
+       if (!i915_mch_dev) {
+               ret = false;
+               goto out_unlock;
+       }
+       dev_priv = i915_mch_dev;
+
+       if (dev_priv->max_delay < dev_priv->min_delay)
+               dev_priv->max_delay++;
+
+out_unlock:
+       lockmgr(&mchdev_lock, LK_RELEASE);
+
+       return ret;
+}
+
+/**
+ * i915_gpu_busy - indicate GPU business to IPS
+ *
+ * Tell the IPS driver whether or not the GPU is busy.
+ */
+bool i915_gpu_busy(void)
+{
+       struct drm_i915_private *dev_priv;
+       bool ret = false;
+
+       lockmgr(&mchdev_lock, LK_EXCLUSIVE);
+       if (!i915_mch_dev)
+               goto out_unlock;
+       dev_priv = i915_mch_dev;
+
+       ret = dev_priv->busy;
+
+out_unlock:
+       lockmgr(&mchdev_lock, LK_RELEASE);
+
+       return ret;
+}
+
+/**
+ * i915_gpu_turbo_disable - disable graphics turbo
+ *
+ * Disable graphics turbo by resetting the max frequency and setting the
+ * current frequency to the default.
+ */
+bool i915_gpu_turbo_disable(void)
+{
+       struct drm_i915_private *dev_priv;
+       bool ret = true;
+
+       lockmgr(&mchdev_lock, LK_EXCLUSIVE);
+       if (!i915_mch_dev) {
+               ret = false;
+               goto out_unlock;
+       }
+       dev_priv = i915_mch_dev;
+
+       dev_priv->max_delay = dev_priv->fstart;
+
+       if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart))
+               ret = false;
+
+out_unlock:
+       lockmgr(&mchdev_lock, LK_RELEASE);
+
+       return ret;
+}
index 769a96b..7487d78 100644 (file)
@@ -22,6 +22,7 @@
  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  *
+ * $FreeBSD: src/sys/dev/drm2/i915/i915_drm.h,v 1.1 2012/05/22 11:07:44 kib Exp $
  */
 
 #ifndef _I915_DRM_H_
@@ -31,7 +32,7 @@
  * subject to backwards-compatibility constraints.
  */
 
-#include "dev/drm/drm.h"
+#include <dev/drm/drm.h>
 
 /* Each region is a minimum of 16k, and there are at most 255 of them.
  */
@@ -192,6 +193,15 @@ typedef struct drm_i915_sarea {
 #define DRM_I915_GEM_SW_FINISH 0x20
 #define DRM_I915_GEM_SET_TILING        0x21
 #define DRM_I915_GEM_GET_TILING        0x22
+#define DRM_I915_GEM_GET_APERTURE 0x23
+#define DRM_I915_GEM_MMAP_GTT  0x24
+#define DRM_I915_GET_PIPE_FROM_CRTC_ID 0x25
+#define DRM_I915_GEM_MADVISE   0x26
+#define DRM_I915_OVERLAY_PUT_IMAGE     0x27
+#define DRM_I915_OVERLAY_ATTRS 0x28
+#define DRM_I915_GEM_EXECBUFFER2       0x29
+#define DRM_I915_GET_SPRITE_COLORKEY 0x2a
+#define DRM_I915_SET_SPRITE_COLORKEY 0x2b
 
 #define DRM_IOCTL_I915_INIT            DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
 #define DRM_IOCTL_I915_FLUSH           DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -213,6 +223,7 @@ typedef struct drm_i915_sarea {
 #define DRM_IOCTL_I915_EXECBUFFER      DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_EXECBUFFER, struct drm_i915_execbuffer)
 #define DRM_IOCTL_I915_GEM_INIT                DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
 #define DRM_IOCTL_I915_GEM_EXECBUFFER  DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
+#define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
 #define DRM_IOCTL_I915_GEM_PIN         DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
 #define DRM_IOCTL_I915_GEM_UNPIN       DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
 #define DRM_IOCTL_I915_GEM_BUSY                DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
@@ -223,10 +234,18 @@ typedef struct drm_i915_sarea {
 #define DRM_IOCTL_I915_GEM_PREAD       DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
 #define DRM_IOCTL_I915_GEM_PWRITE      DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
 #define DRM_IOCTL_I915_GEM_MMAP                DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
+#define DRM_IOCTL_I915_GEM_MMAP_GTT    DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt)
 #define DRM_IOCTL_I915_GEM_SET_DOMAIN  DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
 #define DRM_IOCTL_I915_GEM_SW_FINISH   DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
 #define DRM_IOCTL_I915_GEM_SET_TILING  DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
 #define DRM_IOCTL_I915_GEM_GET_TILING  DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
+#define DRM_IOCTL_I915_GEM_GET_APERTURE        DRM_IOR  (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture)
+#define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id)
+#define DRM_IOCTL_I915_GEM_MADVISE     DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise)
+#define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE       DRM_IOW(DRM_COMMAND_BASE + DRM_IOCTL_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image)
+#define DRM_IOCTL_I915_OVERLAY_ATTRS   DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
+#define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
+#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
 
 /* Asynchronous page flipping:
  */
@@ -281,6 +300,18 @@ typedef struct drm_i915_irq_wait {
 #define I915_PARAM_LAST_DISPATCH         3
 #define I915_PARAM_CHIPSET_ID            4
 #define I915_PARAM_HAS_GEM               5
+#define I915_PARAM_NUM_FENCES_AVAIL      6
+#define I915_PARAM_HAS_OVERLAY           7
+#define I915_PARAM_HAS_PAGEFLIPPING     8
+#define I915_PARAM_HAS_EXECBUF2          9
+#define I915_PARAM_HAS_BSD              10
+#define I915_PARAM_HAS_BLT              11
+#define I915_PARAM_HAS_RELAXED_FENCING  12
+#define I915_PARAM_HAS_COHERENT_RINGS   13
+#define I915_PARAM_HAS_EXEC_CONSTANTS   14
+#define I915_PARAM_HAS_RELAXED_DELTA    15
+#define I915_PARAM_HAS_GEN7_SOL_RESET   16
+#define I915_PARAM_HAS_LLC              17
 
 typedef struct drm_i915_getparam {
        int param;
@@ -292,6 +323,7 @@ typedef struct drm_i915_getparam {
 #define I915_SETPARAM_USE_MI_BATCHBUFFER_START            1
 #define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY             2
 #define I915_SETPARAM_ALLOW_BATCHBUFFER                   3
+#define I915_SETPARAM_NUM_USED_FENCES                     4
 
 typedef struct drm_i915_setparam {
        int param;
@@ -497,6 +529,18 @@ struct drm_i915_gem_mmap {
        uint64_t addr_ptr;      /* void *, but pointers are not 32/64 compatible */
 };
 
+struct drm_i915_gem_mmap_gtt {
+       /** Handle for the object being mapped. */
+       uint32_t handle;
+       uint32_t pad;
+       /**
+        * Fake offset to use for subsequent mmap call
+        *
+        * This is a fixed-size type for 32/64 compatibility.
+        */
+       uint64_t offset;
+};
+
 struct drm_i915_gem_set_domain {
        /** Handle for the object */
        uint32_t handle;
@@ -630,6 +674,76 @@ struct drm_i915_gem_execbuffer {
        uint64_t cliprects_ptr; /* struct drm_clip_rect *cliprects */
 };
 
+struct drm_i915_gem_exec_object2 {
+       /**
+        * User's handle for a buffer to be bound into the GTT for this
+        * operation.
+        */
+       uint32_t handle;
+
+       /** Number of relocations to be performed on this buffer */
+       uint32_t relocation_count;
+       /**
+        * Pointer to array of struct drm_i915_gem_relocation_entry containing
+        * the relocations to be performed in this buffer.
+        */
+       uint64_t relocs_ptr;
+
+       /** Required alignment in graphics aperture */
+       uint64_t alignment;
+
+       /**
+        * Returned value of the updated offset of the object, for future
+        * presumed_offset writes.
+        */
+       uint64_t offset;
+
+#define EXEC_OBJECT_NEEDS_FENCE (1<<0)
+       uint64_t flags;
+       uint64_t rsvd1;
+       uint64_t rsvd2;
+};
+
+struct drm_i915_gem_execbuffer2 {
+       /**
+        * List of gem_exec_object2 structs
+        */
+       uint64_t buffers_ptr;
+       uint32_t buffer_count;
+
+       /** Offset in the batchbuffer to start execution from. */
+       uint32_t batch_start_offset;
+       /** Bytes used in batchbuffer from batch_start_offset */
+       uint32_t batch_len;
+       uint32_t DR1;
+       uint32_t DR4;
+       uint32_t num_cliprects;
+       /** This is a struct drm_clip_rect *cliprects */
+       uint64_t cliprects_ptr;
+#define I915_EXEC_RING_MASK              (7<<0)
+#define I915_EXEC_DEFAULT                (0<<0)
+#define I915_EXEC_RENDER                 (1<<0)
+#define I915_EXEC_BSD                    (2<<0)
+#define I915_EXEC_BLT                    (3<<0)
+
+/* Used for switching the constants addressing mode on gen4+ RENDER ring.
+ * Gen6+ only supports relative addressing to dynamic state (default) and
+ * absolute addressing.
+ *
+ * These flags are ignored for the BSD and BLT rings.
+ */
+#define I915_EXEC_CONSTANTS_MASK       (3<<6)
+#define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */
+#define I915_EXEC_CONSTANTS_ABSOLUTE   (1<<6)
+#define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */
+       uint64_t flags;
+       uint64_t rsvd1;
+       uint64_t rsvd2;
+};
+
+/** Resets the SO write offset registers for transform feedback on gen7. */
+#define I915_EXEC_GEN7_SOL_RESET       (1<<8)
+
 struct drm_i915_gem_pin {
        /** Handle of the buffer to be pinned. */
        uint32_t handle;
@@ -667,6 +781,9 @@ struct drm_i915_gem_busy {
 #define I915_BIT_6_SWIZZLE_9_10_11     4
 /* Not seen by userland */
 #define I915_BIT_6_SWIZZLE_UNKNOWN     5
+/* Seen by userland. */
+#define I915_BIT_6_SWIZZLE_9_17                6
+#define I915_BIT_6_SWIZZLE_9_10_17     7
 
 struct drm_i915_gem_set_tiling {
        /** Handle of the buffer to have its tiling state updated */
@@ -716,4 +833,137 @@ struct drm_i915_gem_get_tiling {
        uint32_t swizzle_mode;
 };
 
+struct drm_i915_gem_get_aperture {
+       /** Total size of the aperture used by i915_gem_execbuffer, in bytes */
+       uint64_t aper_size;
+
+       /**
+        * Available space in the aperture used by i915_gem_execbuffer, in
+        * bytes
+        */
+       uint64_t aper_available_size;
+};
+
+struct drm_i915_get_pipe_from_crtc_id {
+        /** ID of CRTC being requested **/
+        uint32_t crtc_id;
+
+        /** pipe of requested CRTC **/
+        uint32_t pipe;
+};
+
+#define I915_MADV_WILLNEED 0
+#define I915_MADV_DONTNEED 1
+#define I915_MADV_PURGED_INTERNAL 2 /* internal state */
+
+struct drm_i915_gem_madvise {
+       /** Handle of the buffer to change the backing store advice */
+       uint32_t handle;
+
+       /* Advice: either the buffer will be needed again in the near future,
+        *         or wont be and could be discarded under memory pressure.
+        */
+       uint32_t madv;
+
+       /** Whether the backing store still exists. */
+       uint32_t retained;
+};
+
+#define I915_OVERLAY_TYPE_MASK                 0xff
+#define I915_OVERLAY_YUV_PLANAR        0x01
+#define I915_OVERLAY_YUV_PACKED        0x02
+#define I915_OVERLAY_RGB               0x03
+
+#define I915_OVERLAY_DEPTH_MASK                0xff00
+#define I915_OVERLAY_RGB24             0x1000
+#define I915_OVERLAY_RGB16             0x2000
+#define I915_OVERLAY_RGB15             0x3000
+#define I915_OVERLAY_YUV422            0x0100
+#define I915_OVERLAY_YUV411            0x0200
+#define I915_OVERLAY_YUV420            0x0300
+#define I915_OVERLAY_YUV410            0x0400
+
+#define I915_OVERLAY_SWAP_MASK         0xff0000
+#define I915_OVERLAY_NO_SWAP           0x000000
+#define I915_OVERLAY_UV_SWAP           0x010000
+#define I915_OVERLAY_Y_SWAP            0x020000
+#define I915_OVERLAY_Y_AND_UV_SWAP     0x030000
+
+#define I915_OVERLAY_FLAGS_MASK                0xff000000
+#define I915_OVERLAY_ENABLE            0x01000000
+
+struct drm_intel_overlay_put_image {
+       /* various flags and src format description */
+       uint32_t flags;
+       /* source picture description */
+       uint32_t bo_handle;
+       /* stride values and offsets are in bytes, buffer relative */
+       uint16_t stride_Y; /* stride for packed formats */
+       uint16_t stride_UV;
+       uint32_t offset_Y; /* offset for packet formats */
+       uint32_t offset_U;
+       uint32_t offset_V;
+       /* in pixels */
+       uint16_t src_width;
+       uint16_t src_height;
+       /* to compensate the scaling factors for partially covered surfaces */
+       uint16_t src_scan_width;
+       uint16_t src_scan_height;
+       /* output crtc description */
+       uint32_t crtc_id;
+       uint16_t dst_x;
+       uint16_t dst_y;
+       uint16_t dst_width;
+       uint16_t dst_height;
+};
+
+/* flags */
+#define I915_OVERLAY_UPDATE_ATTRS      (1<<0)
+#define I915_OVERLAY_UPDATE_GAMMA      (1<<1)
+struct drm_intel_overlay_attrs {
+       uint32_t flags;
+       uint32_t color_key;
+       int32_t brightness;
+       uint32_t contrast;
+       uint32_t saturation;
+       uint32_t gamma0;
+       uint32_t gamma1;
+       uint32_t gamma2;
+       uint32_t gamma3;
+       uint32_t gamma4;
+       uint32_t gamma5;
+};
+
+/*
+ * Intel sprite handling
+ *
+ * Color keying works with a min/mask/max tuple.  Both source and destination
+ * color keying is allowed.
+ *
+ * Source keying:
+ * Sprite pixels within the min & max values, masked against the color channels
+ * specified in the mask field, will be transparent.  All other pixels will
+ * be displayed on top of the primary plane.  For RGB surfaces, only the min
+ * and mask fields will be used; ranged compares are not allowed.
+ *
+ * Destination keying:
+ * Primary plane pixels that match the min value, masked against the color
+ * channels specified in the mask field, will be replaced by corresponding
+ * pixels from the sprite plane.
+ *
+ * Note that source & destination keying are exclusive; only one can be
+ * active on a given plane.
+ */
+
+#define I915_SET_COLORKEY_NONE         (1<<0) /* disable color key matching */
+#define I915_SET_COLORKEY_DESTINATION  (1<<1)
+#define I915_SET_COLORKEY_SOURCE       (1<<2)
+struct drm_intel_sprite_colorkey {
+       uint32_t plane_id;
+       uint32_t min_value;
+       uint32_t channel_mask;
+       uint32_t max_value;
+       uint32_t flags;
+};
+
 #endif                         /* _I915_DRM_H_ */
index 4fb5150..160b7a1 100644 (file)
  * Authors:
  *    Gareth Hughes <gareth@valinux.com>
  *
+ * $FreeBSD: src/sys/dev/drm2/i915/i915_drv.c,v 1.1 2012/05/22 11:07:44 kib Exp $
  */
 
-#include "dev/drm/drmP.h"
-#include "dev/drm/drm.h"
-#include "dev/drm/drm_mm.h"
+#include <dev/drm/drmP.h>
+#include <dev/drm/drm.h>
+#include <dev/drm/drm_mm.h>
 #include "i915_drm.h"
 #include "i915_drv.h"
-#include "dev/drm/drm_pciids.h"
+#include <dev/drm/drm_pciids.h>
+#include "intel_drv.h"
 
 /* drv_PCI_IDs comes from drm_pciids.h, generated from drm_pciids.txt. */
 static drm_pci_id_list_t i915_pciidlist[] = {
        i915_PCI_IDS
 };
 
-static int i915_suspend(device_t kdev)
+static const struct intel_device_info intel_i830_info = {
+       .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1,
+       .has_overlay = 1, .overlay_needs_physical = 1,
+};
+
+static const struct intel_device_info intel_845g_info = {
+       .gen = 2,
+       .has_overlay = 1, .overlay_needs_physical = 1,
+};
+
+static const struct intel_device_info intel_i85x_info = {
+       .gen = 2, .is_i85x = 1, .is_mobile = 1,
+       .cursor_needs_physical = 1,
+       .has_overlay = 1, .overlay_needs_physical = 1,
+};
+
+static const struct intel_device_info intel_i865g_info = {
+       .gen = 2,
+       .has_overlay = 1, .overlay_needs_physical = 1,
+};
+
+static const struct intel_device_info intel_i915g_info = {
+       .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1,
+       .has_overlay = 1, .overlay_needs_physical = 1,
+};
+static const struct intel_device_info intel_i915gm_info = {
+       .gen = 3, .is_mobile = 1,
+       .cursor_needs_physical = 1,
+       .has_overlay = 1, .overlay_needs_physical = 1,
+       .supports_tv = 1,
+};
+static const struct intel_device_info intel_i945g_info = {
+       .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1,
+       .has_overlay = 1, .overlay_needs_physical = 1,
+};
+static const struct intel_device_info intel_i945gm_info = {
+       .gen = 3, .is_i945gm = 1, .is_mobile = 1,
+       .has_hotplug = 1, .cursor_needs_physical = 1,
+       .has_overlay = 1, .overlay_needs_physical = 1,
+       .supports_tv = 1,
+};
+
+static const struct intel_device_info intel_i965g_info = {
+       .gen = 4, .is_broadwater = 1,
+       .has_hotplug = 1,
+       .has_overlay = 1,
+};
+
+static const struct intel_device_info intel_i965gm_info = {
+       .gen = 4, .is_crestline = 1,
+       .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
+       .has_overlay = 1,
+       .supports_tv = 1,
+};
+
+static const struct intel_device_info intel_g33_info = {
+       .gen = 3, .is_g33 = 1,
+       .need_gfx_hws = 1, .has_hotplug = 1,
+       .has_overlay = 1,
+};
+
+static const struct intel_device_info intel_g45_info = {
+       .gen = 4, .is_g4x = 1, .need_gfx_hws = 1,
+       .has_pipe_cxsr = 1, .has_hotplug = 1,
+       .has_bsd_ring = 1,
+};
+
+static const struct intel_device_info intel_gm45_info = {
+       .gen = 4, .is_g4x = 1,
+       .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
+       .has_pipe_cxsr = 1, .has_hotplug = 1,
+       .supports_tv = 1,
+       .has_bsd_ring = 1,
+};
+
+static const struct intel_device_info intel_pineview_info = {
+       .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1,
+       .need_gfx_hws = 1, .has_hotplug = 1,
+       .has_overlay = 1,
+};
+
+static const struct intel_device_info intel_ironlake_d_info = {
+       .gen = 5,
+       .need_gfx_hws = 1, .has_hotplug = 1,
+       .has_bsd_ring = 1,
+};
+
+static const struct intel_device_info intel_ironlake_m_info = {
+       .gen = 5, .is_mobile = 1,
+       .need_gfx_hws = 1, .has_hotplug = 1,
+       .has_fbc = 0, /* disabled due to buggy hardware */
+       .has_bsd_ring = 1,
+};
+
+static const struct intel_device_info intel_sandybridge_d_info = {
+       .gen = 6,
+       .need_gfx_hws = 1, .has_hotplug = 1,
+       .has_bsd_ring = 1,
+       .has_blt_ring = 1,
+       .has_llc = 1,
+};
+
+static const struct intel_device_info intel_sandybridge_m_info = {
+       .gen = 6, .is_mobile = 1,
+       .need_gfx_hws = 1, .has_hotplug = 1,
+       .has_fbc = 1,
+       .has_bsd_ring = 1,
+       .has_blt_ring = 1,
+       .has_llc = 1,
+};
+
+static const struct intel_device_info intel_ivybridge_d_info = {
+       .is_ivybridge = 1, .gen = 7,
+       .need_gfx_hws = 1, .has_hotplug = 1,
+       .has_bsd_ring = 1,
+       .has_blt_ring = 1,
+       .has_llc = 1,
+};
+
+static const struct intel_device_info intel_ivybridge_m_info = {
+       .is_ivybridge = 1, .gen = 7, .is_mobile = 1,
+       .need_gfx_hws = 1, .has_hotplug = 1,
+       .has_fbc = 0,   /* FBC is not enabled on Ivybridge mobile yet */
+       .has_bsd_ring = 1,
+       .has_blt_ring = 1,
+       .has_llc = 1,
+};
+
+#define INTEL_VGA_DEVICE(id, info_) {          \
+       .device = id,                           \
+       .info = info_,                          \
+}
+
+static const struct intel_gfx_device_id {
+       int device;
+       const struct intel_device_info *info;
+} pciidlist[] = {              /* aka */
+       INTEL_VGA_DEVICE(0x3577, &intel_i830_info),
+       INTEL_VGA_DEVICE(0x2562, &intel_845g_info),
+       INTEL_VGA_DEVICE(0x3582, &intel_i85x_info),
+       INTEL_VGA_DEVICE(0x358e, &intel_i85x_info),
+       INTEL_VGA_DEVICE(0x2572, &intel_i865g_info),
+       INTEL_VGA_DEVICE(0x2582, &intel_i915g_info),
+       INTEL_VGA_DEVICE(0x258a, &intel_i915g_info),
+       INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info),
+       INTEL_VGA_DEVICE(0x2772, &intel_i945g_info),
+       INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info),
+       INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info),
+       INTEL_VGA_DEVICE(0x2972, &intel_i965g_info),
+       INTEL_VGA_DEVICE(0x2982, &intel_i965g_info),
+       INTEL_VGA_DEVICE(0x2992, &intel_i965g_info),
+       INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info),
+       INTEL_VGA_DEVICE(0x29b2, &intel_g33_info),
+       INTEL_VGA_DEVICE(0x29c2, &intel_g33_info),
+       INTEL_VGA_DEVICE(0x29d2, &intel_g33_info),
+       INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info),
+       INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info),
+       INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info),
+       INTEL_VGA_DEVICE(0x2e02, &intel_g45_info),
+       INTEL_VGA_DEVICE(0x2e12, &intel_g45_info),
+       INTEL_VGA_DEVICE(0x2e22, &intel_g45_info),
+       INTEL_VGA_DEVICE(0x2e32, &intel_g45_info),
+       INTEL_VGA_DEVICE(0x2e42, &intel_g45_info),
+       INTEL_VGA_DEVICE(0x2e92, &intel_g45_info),
+       INTEL_VGA_DEVICE(0xa001, &intel_pineview_info),
+       INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
+       INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
+       INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
+       INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info),
+       INTEL_VGA_DEVICE(0x0112, &intel_sandybridge_d_info),
+       INTEL_VGA_DEVICE(0x0122, &intel_sandybridge_d_info),
+       INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info),
+       INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info),
+       INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info),
+       INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info),
+       INTEL_VGA_DEVICE(0x0156, &intel_ivybridge_m_info), /* GT1 mobile */
+       INTEL_VGA_DEVICE(0x0166, &intel_ivybridge_m_info), /* GT2 mobile */
+       INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */
+       INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */
+       INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */
+       INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */
+       {0, 0}
+};
+
+static int i915_drm_freeze(struct drm_device *dev)
 {
-       struct drm_device *dev = device_get_softc(kdev);
+       struct drm_i915_private *dev_priv;
+       int error;
 
-       if (!dev || !dev->dev_private) {
-               DRM_ERROR("DRM not initialized, aborting suspend.\n");
-               return -ENODEV;
-       }
+       dev_priv = dev->dev_private;
+       drm_kms_helper_poll_disable(dev);
+
+#if 0
+       pci_save_state(dev->pdev);
+#endif
 
        DRM_LOCK(dev);
-       DRM_DEBUG("starting suspend\n");
+       /* If KMS is active, we do the leavevt stuff here */
+       if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+               error = -i915_gem_idle(dev);
+               if (error) {
+                       DRM_UNLOCK(dev);
+                       device_printf(dev->device,
+                           "GEM idle failed, resume might fail\n");
+                       return (error);
+               }
+               drm_irq_uninstall(dev);
+       }
+
        i915_save_state(dev);
+
+       intel_opregion_fini(dev);
+
+       /* Modeset on resume, not lid events */
+       dev_priv->modeset_on_lid = 0;
        DRM_UNLOCK(dev);
 
-       return (bus_generic_suspend(kdev));
+       return 0;
 }
 
-static int i915_resume(device_t kdev)
+static int
+i915_suspend(device_t kdev)
 {
-       struct drm_device *dev = device_get_softc(kdev);
+       struct drm_device *dev;
+       int error;
+
+       dev = device_get_softc(kdev);
+       if (dev == NULL || dev->dev_private == NULL) {
+               DRM_ERROR("DRM not initialized, aborting suspend.\n");
+               return -ENODEV;
+       }
+
+       DRM_DEBUG_KMS("starting suspend\n");
+       error = i915_drm_freeze(dev);
+       if (error)
+               return (error);
+
+       error = bus_generic_suspend(kdev);
+       DRM_DEBUG_KMS("finished suspend %d\n", error);
+       return (error);
+}
+
+static int i915_drm_thaw(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int error = 0;
 
        DRM_LOCK(dev);
+       if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+               i915_gem_restore_gtt_mappings(dev);
+       }
+
        i915_restore_state(dev);
-       DRM_DEBUG("finished resume\n");
+       intel_opregion_setup(dev);
+
+       /* KMS EnterVT equivalent */
+       if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+               dev_priv->mm.suspended = 0;
+
+               error = i915_gem_init_hw(dev);
+
+               if (HAS_PCH_SPLIT(dev))
+                       ironlake_init_pch_refclk(dev);
+
+               DRM_UNLOCK(dev);
+               lockmgr(&dev->mode_config.lock, LK_EXCLUSIVE);
+               drm_mode_config_reset(dev);
+               lockmgr(&dev->mode_config.lock, LK_RELEASE);
+               drm_irq_install(dev);
+
+               lockmgr(&dev->mode_config.lock, LK_EXCLUSIVE);
+               /* Resume the modeset for every activated CRTC */
+               drm_helper_resume_force_mode(dev);
+               lockmgr(&dev->mode_config.lock, LK_RELEASE);
+
+               if (IS_IRONLAKE_M(dev))
+                       ironlake_enable_rc6(dev);
+               DRM_LOCK(dev);
+       }
+
+       intel_opregion_init(dev);
+
+       dev_priv->modeset_on_lid = 0;
+
        DRM_UNLOCK(dev);
 
-       return (bus_generic_resume(kdev));
+       return error;
 }
 
-static void i915_configure(struct drm_device *dev)
+static int
+i915_resume(device_t kdev)
 {
-       dev->driver->driver_features =
-          DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR |
-          DRIVER_HAVE_IRQ;
+       struct drm_device *dev;
+       int ret;
+
+       dev = device_get_softc(kdev);
+       DRM_DEBUG_KMS("starting resume\n");
+#if 0
+       if (pci_enable_device(dev->pdev))
+               return -EIO;
 
-       dev->driver->buf_priv_size      = sizeof(drm_i915_private_t);
-       dev->driver->load               = i915_driver_load;
-       dev->driver->unload             = i915_driver_unload;
-       dev->driver->preclose           = i915_driver_preclose;
-       dev->driver->lastclose          = i915_driver_lastclose;
-       dev->driver->device_is_agp      = i915_driver_device_is_agp;
-       dev->driver->enable_vblank      = i915_enable_vblank;
-       dev->driver->disable_vblank     = i915_disable_vblank;
-       dev->driver->irq_preinstall     = i915_driver_irq_preinstall;
-       dev->driver->irq_postinstall    = i915_driver_irq_postinstall;
-       dev->driver->irq_uninstall      = i915_driver_irq_uninstall;
-       dev->driver->irq_handler        = i915_driver_irq_handler;
+       pci_set_master(dev->pdev);
+#endif
 
-       dev->driver->ioctls             = i915_ioctls;
-       dev->driver->max_ioctl          = i915_max_ioctl;
+       ret = -i915_drm_thaw(dev);
+       if (ret != 0)
+               return (ret);
 
-       dev->driver->name               = DRIVER_NAME;
-       dev->driver->desc               = DRIVER_DESC;
-       dev->driver->date               = DRIVER_DATE;
-       dev->driver->major              = DRIVER_MAJOR;
-       dev->driver->minor              = DRIVER_MINOR;
-       dev->driver->patchlevel         = DRIVER_PATCHLEVEL;
+       drm_kms_helper_poll_enable(dev);
+       ret = bus_generic_resume(kdev);
+       DRM_DEBUG_KMS("finished resume %d\n", ret);
+       return (ret);
 }
 
 static int
 i915_probe(device_t kdev)
 {
+
        return drm_probe(kdev, i915_pciidlist);
 }
 
+int i915_modeset;
+
 static int
 i915_attach(device_t kdev)
 {
-       struct drm_device *dev = device_get_softc(kdev);
-
-       dev->driver = kmalloc(sizeof(struct drm_driver_info), DRM_MEM_DRIVER,
-           M_WAITOK | M_ZERO);
-
-       i915_configure(dev);
+       struct drm_device *dev;
 
-       return drm_attach(kdev, i915_pciidlist);
+       dev = device_get_softc(kdev);
+       if (i915_modeset == 1)
+               i915_driver_info.driver_features |= DRIVER_MODESET;
+       dev->driver = &i915_driver_info;
+       return (drm_attach(kdev, i915_pciidlist));
 }
 
-static int
-i915_detach(device_t kdev)
+const struct intel_device_info *
+i915_get_device_id(int device)
 {
-       struct drm_device *dev = device_get_softc(kdev);
-       int ret;
-
-       ret = drm_detach(kdev);
+       const struct intel_gfx_device_id *did;
 
-       kfree(dev->driver, DRM_MEM_DRIVER);
-
-       return ret;
+       for (did = &pciidlist[0]; did->device != 0; did++) {
+               if (did->device != device)
+                       continue;
+               return (did->info);
+       }
+       return (NULL);
 }
 
 static device_method_t i915_methods[] = {
@@ -138,8 +397,7 @@ static device_method_t i915_methods[] = {
        DEVMETHOD(device_attach,        i915_attach),
        DEVMETHOD(device_suspend,       i915_suspend),
        DEVMETHOD(device_resume,        i915_resume),
-       DEVMETHOD(device_detach,        i915_detach),
-
+       DEVMETHOD(device_detach,        drm_detach),
        DEVMETHOD_END
 };
 
@@ -150,5 +408,407 @@ static driver_t i915_driver = {
 };
 
 extern devclass_t drm_devclass;
-DRIVER_MODULE(i915, vgapci, i915_driver, drm_devclass, NULL, NULL);
-MODULE_DEPEND(i915, drm, 1, 1, 1);
+DRIVER_MODULE_ORDERED(i915kms, vgapci, i915_driver, drm_devclass, 0, 0,
+    SI_ORDER_ANY);
+MODULE_DEPEND(i915kms, drm, 1, 1, 1);
+MODULE_DEPEND(i915kms, agp, 1, 1, 1);
+MODULE_DEPEND(i915kms, iicbus, 1, 1, 1);
+MODULE_DEPEND(i915kms, iic, 1, 1, 1);
+MODULE_DEPEND(i915kms, iicbb, 1, 1, 1);
+
+int intel_iommu_enabled = 0;
+TUNABLE_INT("drm.i915.intel_iommu_enabled", &intel_iommu_enabled);
+
+int i915_semaphores = -1;
+TUNABLE_INT("drm.i915.semaphores", &i915_semaphores);
+static int i915_try_reset = 1;
+TUNABLE_INT("drm.i915.try_reset", &i915_try_reset);
+unsigned int i915_lvds_downclock = 0;
+TUNABLE_INT("drm.i915.lvds_downclock", &i915_lvds_downclock);
+int i915_vbt_sdvo_panel_type = -1;
+TUNABLE_INT("drm.i915.vbt_sdvo_panel_type", &i915_vbt_sdvo_panel_type);
+unsigned int i915_powersave = 1;
+TUNABLE_INT("drm.i915.powersave", &i915_powersave);
+int i915_enable_fbc = 0;
+TUNABLE_INT("drm.i915.enable_fbc", &i915_enable_fbc);
+int i915_enable_rc6 = 0;
+TUNABLE_INT("drm.i915.enable_rc6", &i915_enable_rc6);
+int i915_panel_use_ssc = -1;
+TUNABLE_INT("drm.i915.panel_use_ssc", &i915_panel_use_ssc);
+int i915_panel_ignore_lid = 0;
+TUNABLE_INT("drm.i915.panel_ignore_lid", &i915_panel_ignore_lid);
+int i915_modeset = 1;
+TUNABLE_INT("drm.i915.modeset", &i915_modeset);
+int i915_enable_ppgtt = -1;
+TUNABLE_INT("drm.i915.enable_ppgtt", &i915_enable_ppgtt);
+int i915_enable_hangcheck = 1;
+TUNABLE_INT("drm.i915.enable_hangcheck", &i915_enable_hangcheck);
+
+#define        PCI_VENDOR_INTEL                0x8086
+#define INTEL_PCH_DEVICE_ID_MASK       0xff00
+#define INTEL_PCH_IBX_DEVICE_ID_TYPE   0x3b00
+#define INTEL_PCH_CPT_DEVICE_ID_TYPE   0x1c00
+#define INTEL_PCH_PPT_DEVICE_ID_TYPE   0x1e00
+
+void
+intel_detect_pch(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv;
+       device_t pch;
+       uint32_t id;
+
+       dev_priv = dev->dev_private;
+       pch = pci_find_class(PCIC_BRIDGE, PCIS_BRIDGE_ISA);
+       if (pch != NULL && pci_get_vendor(pch) == PCI_VENDOR_INTEL) {
+               id = pci_get_device(pch) & INTEL_PCH_DEVICE_ID_MASK;
+               if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
+                       dev_priv->pch_type = PCH_IBX;
+                       DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
+               } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
+                       dev_priv->pch_type = PCH_CPT;
+                       DRM_DEBUG_KMS("Found CougarPoint PCH\n");
+               } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
+                       /* PantherPoint is CPT compatible */
+                       dev_priv->pch_type = PCH_CPT;
+                       DRM_DEBUG_KMS("Found PatherPoint PCH\n");
+               } else
+                       DRM_DEBUG_KMS("No PCH detected\n");
+       } else
+               DRM_DEBUG_KMS("No Intel PCI-ISA bridge found\n");
+}
+
+void
+__gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
+{
+       int count;
+
+       count = 0;
+       while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
+               DELAY(10);
+
+       I915_WRITE_NOTRACE(FORCEWAKE, 1);
+       POSTING_READ(FORCEWAKE);
+
+       count = 0;
+       while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0)
+               DELAY(10);
+}
+
+void
+__gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
+{
+       int count;
+
+       count = 0;
+       while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1))
+               DELAY(10);
+
+       I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 1);
+       POSTING_READ(FORCEWAKE_MT);
+
+       count = 0;
+       while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1) == 0)
+               DELAY(10);
+}
+
+void
+gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
+{
+
+       lockmgr(&dev_priv->gt_lock, LK_EXCLUSIVE);
+       if (dev_priv->forcewake_count++ == 0)
+               dev_priv->display.force_wake_get(dev_priv);
+       lockmgr(&dev_priv->gt_lock, LK_RELEASE);
+}
+
+static void
+gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
+{
+       u32 gtfifodbg;
+
+       gtfifodbg = I915_READ_NOTRACE(GTFIFODBG);
+       if ((gtfifodbg & GT_FIFO_CPU_ERROR_MASK) != 0) {
+               kprintf("MMIO read or write has been dropped %x\n", gtfifodbg);
+               I915_WRITE_NOTRACE(GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
+       }
+}
+
+void
+__gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
+{
+
+       I915_WRITE_NOTRACE(FORCEWAKE, 0);
+       /* The below doubles as a POSTING_READ */
+       gen6_gt_check_fifodbg(dev_priv);
+}
+
+void
+__gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
+{
+
+       I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 0);
+       /* The below doubles as a POSTING_READ */
+       gen6_gt_check_fifodbg(dev_priv);
+}
+
+void
+gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
+{
+
+       lockmgr(&dev_priv->gt_lock, LK_EXCLUSIVE);
+       if (--dev_priv->forcewake_count == 0)
+               dev_priv->display.force_wake_put(dev_priv);
+       lockmgr(&dev_priv->gt_lock, LK_RELEASE);
+}
+
+int
+__gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
+{
+       int ret = 0;
+
+       if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
+               int loop = 500;
+               u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
+               while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
+                       DELAY(10);
+                       fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
+               }
+               if (loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES) {
+                       kprintf("%s loop\n", __func__);
+                       ++ret;
+               }
+               dev_priv->gt_fifo_count = fifo;
+       }
+       dev_priv->gt_fifo_count--;
+
+       return (ret);
+}
+
+static int
+i8xx_do_reset(struct drm_device *dev, u8 flags)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       if (IS_I85X(dev))
+               return -ENODEV;
+
+       I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830);
+       POSTING_READ(D_STATE);
+
+       if (IS_I830(dev) || IS_845G(dev)) {
+               I915_WRITE(DEBUG_RESET_I830,
+                          DEBUG_RESET_DISPLAY |
+                          DEBUG_RESET_RENDER |
+                          DEBUG_RESET_FULL);
+               POSTING_READ(DEBUG_RESET_I830);
+               DELAY(1000);
+
+               I915_WRITE(DEBUG_RESET_I830, 0);
+               POSTING_READ(DEBUG_RESET_I830);
+       }
+
+       DELAY(1000);
+
+       I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830);
+       POSTING_READ(D_STATE);
+
+       return 0;
+}
+
+static int
+i965_reset_complete(struct drm_device *dev)
+{
+       u8 gdrst;
+
+       gdrst = pci_read_config(dev->device, I965_GDRST, 1);
+       return (gdrst & 0x1);
+}
+
+static int
+i965_do_reset(struct drm_device *dev, u8 flags)
+{
+       u8 gdrst;
+
+       /*
+        * Set the domains we want to reset (GRDOM/bits 2 and 3) as
+        * well as the reset bit (GR/bit 0).  Setting the GR bit
+        * triggers the reset; when done, the hardware will clear it.
+        */
+       gdrst = pci_read_config(dev->device, I965_GDRST, 1);
+       pci_write_config(dev->device, I965_GDRST, gdrst | flags | 0x1, 1);
+
+       return (_intel_wait_for(dev, i965_reset_complete(dev), 500, 1,
+           "915rst"));
+}
+
+static int
+ironlake_do_reset(struct drm_device *dev, u8 flags)
+{
+       struct drm_i915_private *dev_priv;
+       u32 gdrst;
+
+       dev_priv = dev->dev_private;
+       gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
+       I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, gdrst | flags | 0x1);
+       return (_intel_wait_for(dev,
+           (I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1) != 0,
+           500, 1, "915rst"));
+}
+
+static int
+gen6_do_reset(struct drm_device *dev, u8 flags)
+{
+       struct drm_i915_private *dev_priv;
+       int ret;
+
+       dev_priv = dev->dev_private;
+
+       /* Hold gt_lock across reset to prevent any register access
+        * with forcewake not set correctly
+        */
+       lockmgr(&dev_priv->gt_lock, LK_EXCLUSIVE);
+
+       /* Reset the chip */
+
+       /* GEN6_GDRST is not in the gt power well, no need to check
+        * for fifo space for the write or forcewake the chip for
+        * the read
+        */
+       I915_WRITE_NOTRACE(GEN6_GDRST, GEN6_GRDOM_FULL);
+
+       /* Spin waiting for the device to ack the reset request */
+       ret = _intel_wait_for(dev,
+           (I915_READ(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0,
+           500, 1, "915rst");
+
+       /* If reset with a user forcewake, try to restore, otherwise turn it off */
+       if (dev_priv->forcewake_count)
+               dev_priv->display.force_wake_get(dev_priv);
+       else
+               dev_priv->display.force_wake_put(dev_priv);
+
+       /* Restore fifo count */
+       dev_priv->gt_fifo_count = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
+
+       lockmgr(&dev_priv->gt_lock, LK_RELEASE);
+       return (ret);
+}
+
+int
+i915_reset(struct drm_device *dev, u8 flags)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       /*
+        * We really should only reset the display subsystem if we actually
+        * need to
+        */
+       bool need_display = true;
+       int ret;
+
+       if (!i915_try_reset)
+               return (0);
+
+       if (lockmgr(&dev->dev_struct_lock, LK_EXCLUSIVE|LK_NOWAIT))
+               return (-EBUSY);
+
+       i915_gem_reset(dev);
+
+       ret = -ENODEV;
+       if (time_uptime - dev_priv->last_gpu_reset < 5) {
+               DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
+       } else {
+               switch (INTEL_INFO(dev)->gen) {
+               case 7:
+               case 6:
+               ret = gen6_do_reset(dev, flags);
+               break;
+       case 5:
+               ret = ironlake_do_reset(dev, flags);
+                       break;
+               case 4:
+                       ret = i965_do_reset(dev, flags);
+                       break;
+               case 2:
+                       ret = i8xx_do_reset(dev, flags);
+                       break;
+               }
+       }
+       dev_priv->last_gpu_reset = time_uptime;
+       if (ret) {
+               DRM_ERROR("Failed to reset chip.\n");
+               DRM_UNLOCK(dev);
+               return (ret);
+       }
+
+       if (drm_core_check_feature(dev, DRIVER_MODESET) ||
+           !dev_priv->mm.suspended) {
+               dev_priv->mm.suspended = 0;
+
+               i915_gem_init_swizzling(dev);
+
+               dev_priv->rings[RCS].init(&dev_priv->rings[RCS]);
+               if (HAS_BSD(dev))
+                       dev_priv->rings[VCS].init(&dev_priv->rings[VCS]);
+               if (HAS_BLT(dev))
+                       dev_priv->rings[BCS].init(&dev_priv->rings[BCS]);
+
+               i915_gem_init_ppgtt(dev);
+
+               drm_irq_uninstall(dev);
+               drm_mode_config_reset(dev);
+               DRM_UNLOCK(dev);
+               drm_irq_install(dev);
+               DRM_LOCK(dev);
+       }
+       DRM_UNLOCK(dev);
+
+       if (need_display) {
+               lockmgr(&dev->mode_config.lock, LK_EXCLUSIVE);
+               drm_helper_resume_force_mode(dev);
+               lockmgr(&dev->mode_config.lock, LK_RELEASE);
+       }
+
+       return (0);
+}
+
+#define __i915_read(x, y) \
+u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
+       u##x val = 0; \
+       if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
+               lockmgr(&dev_priv->gt_lock, LK_EXCLUSIVE); \
+               if (dev_priv->forcewake_count == 0) \
+                       dev_priv->display.force_wake_get(dev_priv); \
+               val = DRM_READ##y(dev_priv->mmio_map, reg);     \
+               if (dev_priv->forcewake_count == 0) \
+                       dev_priv->display.force_wake_put(dev_priv); \
+               lockmgr(&dev_priv->gt_lock, LK_RELEASE); \
+       } else { \
+               val = DRM_READ##y(dev_priv->mmio_map, reg);     \
+       } \
+       trace_i915_reg_rw(false, reg, val, sizeof(val)); \
+       return val; \
+}
+
+__i915_read(8, 8)
+__i915_read(16, 16)
+__i915_read(32, 32)
+__i915_read(64, 64)
+#undef __i915_read
+
+#define __i915_write(x, y) \
+void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
+       u32 __fifo_ret = 0; \
+       trace_i915_reg_rw(true, reg, val, sizeof(val)); \
+       if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
+               __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
+       } \
+       DRM_WRITE##y(dev_priv->mmio_map, reg, val); \
+       if (__predict_false(__fifo_ret)) { \
+               gen6_gt_check_fifodbg(dev_priv); \
+       } \
+}
+__i915_write(8, 8)
+__i915_write(16, 16)
+__i915_write(32, 32)
+__i915_write(64, 64)
+#undef __i915_write
index faa9b78..0add67e 100644 (file)
  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  *
+ * $FreeBSD: src/sys/dev/drm2/i915/i915_drv.h,v 1.1 2012/05/22 11:07:44 kib Exp $
  */
 
 #ifndef _I915_DRV_H_
 #define _I915_DRV_H_
 
-#include "dev/drm/drm_mm.h"
+#include <sys/eventhandler.h>
+
+#include <dev/agp/agp_i810.h>
+#include <dev/drm/drm_mm.h>
 #include "i915_reg.h"
+#include "intel_ringbuffer.h"
+#include "intel_bios.h"
 
 /* General customization:
  */
 #define DRIVER_DESC            "Intel Graphics"
 #define DRIVER_DATE            "20080730"
 
+MALLOC_DECLARE(DRM_I915_GEM);
+
 enum i915_pipe {
        PIPE_A = 0,
        PIPE_B,
+       PIPE_C,
+       I915_MAX_PIPES
 };
-
+#define pipe_name(p) ((p) + 'A')
 #define I915_NUM_PIPE  2
 
+enum plane {
+       PLANE_A = 0,
+       PLANE_B,
+       PLANE_C,
+};
+#define plane_name(p) ((p) + 'A')
+
+#define        I915_GEM_GPU_DOMAINS    (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
+
+#define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++)
+
 /* Interface history:
  *
  * 1.1: Original.
@@ -71,16 +92,98 @@ enum i915_pipe {
 #define WATCH_INACTIVE 0
 #define WATCH_PWRITE   0
 
-typedef struct _drm_i915_ring_buffer {
-       int tail_mask;
-       unsigned long Size;
-       u8 *virtual_start;
-       int head;
-       int tail;
-       int space;
-       drm_local_map_t map;
-       struct drm_gem_object *ring_obj;
-} drm_i915_ring_buffer_t;
+#define I915_GEM_PHYS_CURSOR_0 1
+#define I915_GEM_PHYS_CURSOR_1 2
+#define I915_GEM_PHYS_OVERLAY_REGS 3
+#define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS)
+
+struct drm_i915_gem_phys_object {
+       int id;
+       drm_dma_handle_t *handle;
+       struct drm_i915_gem_object *cur_obj;
+};
+
+struct drm_i915_private;
+
+struct drm_i915_display_funcs {
+       void (*dpms)(struct drm_crtc *crtc, int mode);
+       bool (*fbc_enabled)(struct drm_device *dev);
+       void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval);
+       void (*disable_fbc)(struct drm_device *dev);
+       int (*get_display_clock_speed)(struct drm_device *dev);
+       int (*get_fifo_size)(struct drm_device *dev, int plane);
+       void (*update_wm)(struct drm_device *dev);
+       void (*update_sprite_wm)(struct drm_device *dev, int pipe,
+                                uint32_t sprite_width, int pixel_size);
+       int (*crtc_mode_set)(struct drm_crtc *crtc,
+                            struct drm_display_mode *mode,
+                            struct drm_display_mode *adjusted_mode,
+                            int x, int y,
+                            struct drm_framebuffer *old_fb);
+       void (*write_eld)(struct drm_connector *connector,
+                         struct drm_crtc *crtc);
+       void (*fdi_link_train)(struct drm_crtc *crtc);
+       void (*init_clock_gating)(struct drm_device *dev);
+       void (*init_pch_clock_gating)(struct drm_device *dev);
+       int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
+                         struct drm_framebuffer *fb,
+                         struct drm_i915_gem_object *obj);
+       void (*force_wake_get)(struct drm_i915_private *dev_priv);
+       void (*force_wake_put)(struct drm_i915_private *dev_priv);
+       int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+                           int x, int y);
+       /* clock updates for mode set */
+       /* cursor updates */
+       /* render clock increase/decrease */
+       /* display clock increase/decrease */
+       /* pll clock increase/decrease */
+};
+
+struct intel_device_info {
+       u8 gen;
+       u8 is_mobile:1;
+       u8 is_i85x:1;
+       u8 is_i915g:1;
+       u8 is_i945gm:1;
+       u8 is_g33:1;
+       u8 need_gfx_hws:1;
+       u8 is_g4x:1;
+       u8 is_pineview:1;
+       u8 is_broadwater:1;
+       u8 is_crestline:1;
+       u8 is_ivybridge:1;
+       u8 has_fbc:1;
+       u8 has_pipe_cxsr:1;
+       u8 has_hotplug:1;
+       u8 cursor_needs_physical:1;
+       u8 has_overlay:1;
+       u8 overlay_needs_physical:1;
+       u8 supports_tv:1;
+       u8 has_bsd_ring:1;
+       u8 has_blt_ring:1;
+       u8 has_llc:1;
+};
+
+#define I915_PPGTT_PD_ENTRIES 512
+#define I915_PPGTT_PT_ENTRIES 1024
+struct i915_hw_ppgtt {
+       unsigned num_pd_entries;
+       vm_page_t *pt_pages;
+       uint32_t pd_offset;
+       vm_paddr_t *pt_dma_addr;
+       vm_paddr_t scratch_page_dma_addr;
+};
+
+enum no_fbc_reason {
+       FBC_NO_OUTPUT, /* no outputs enabled to compress */
+       FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */
+       FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
+       FBC_MODE_TOO_LARGE, /* mode too large for compression */
+       FBC_BAD_PLANE, /* fbc not supported on plane */
+       FBC_NOT_TILED, /* buffer not tiled */
+       FBC_MULTIPLE_PIPES, /* more than one pipe active */
+       FBC_MODULE_PARAM,
+};
 
 struct mem_block {
        struct mem_block *next;
@@ -100,17 +203,72 @@ struct intel_opregion {
        struct opregion_acpi *acpi;
        struct opregion_swsci *swsci;
        struct opregion_asle *asle;
-       int enabled;
+       void *vbt;
+       u32 *lid_state;
+};
+#define OPREGION_SIZE            (8*1024)
+
+#define I915_FENCE_REG_NONE -1
+#define I915_MAX_NUM_FENCES 16
+/* 16 fences + sign bit for FENCE_REG_NONE */
+#define I915_MAX_NUM_FENCE_BITS 5
+
+struct drm_i915_fence_reg {
+       struct list_head lru_list;
+       struct drm_i915_gem_object *obj;
+       uint32_t setup_seqno;
+       int pin_count;
+};
+
+struct sdvo_device_mapping {
+       u8 initialized;
+       u8 dvo_port;
+       u8 slave_addr;
+       u8 dvo_wiring;
+       u8 i2c_pin;
+       u8 ddc_pin;
+};
+
+enum intel_pch {
+       PCH_IBX,        /* Ibexpeak PCH */
+       PCH_CPT,        /* Cougarpoint PCH */
 };
 
+#define QUIRK_PIPEA_FORCE (1<<0)
+#define QUIRK_LVDS_SSC_DISABLE (1<<1)
+
+struct intel_fbdev;
+struct intel_fbc_work;
+
 typedef struct drm_i915_private {
        struct drm_device *dev;
 
+       device_t *gmbus_bridge;
+       device_t *bbbus_bridge;
+       device_t *gmbus;
+       device_t *bbbus;
+       /** gmbus_sx protects against concurrent usage of the single hw gmbus
+        * controller on different i2c buses. */
+       struct lock gmbus_lock;
+
+       int has_gem;
+       int relative_constants_mode;
+
        drm_local_map_t *sarea;
        drm_local_map_t *mmio_map;
 
+       /** gt_fifo_count and the subsequent register write are synchronized
+        * with dev->struct_mutex. */
+       unsigned gt_fifo_count;
+       /** forcewake_count is protected by gt_lock */
+       unsigned forcewake_count;
+       /** gt_lock is also taken in irq contexts. */
+       struct lock gt_lock;
+
        drm_i915_sarea_t *sarea_priv;
-       drm_i915_ring_buffer_t ring;
+       /* drm_i915_ring_buffer_t ring; */
+       struct intel_ring_buffer rings[I915_NUM_RINGS];
+       uint32_t next_seqno;
 
        drm_dma_handle_t *status_page_dmah;
        void *hw_status_page;
@@ -120,35 +278,95 @@ typedef struct drm_i915_private {
        drm_local_map_t hws_map;
        struct drm_gem_object *hws_obj;
 
+       struct drm_i915_gem_object *pwrctx;
+       struct drm_i915_gem_object *renderctx;
+
        unsigned int cpp;
        int back_offset;
        int front_offset;
        int current_page;
        int page_flipping;
 
-       wait_queue_head_t irq_queue;
-       /** Protects user_irq_refcount and irq_mask_reg */
-       struct spinlock user_irq_lock;
-       /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
-       int user_irq_refcount;
+       atomic_t irq_received;
+       u32 trace_irq_seqno;
+
        /** Cached value of IER to avoid reads in updating the bitfield */
-       u32 irq_mask_reg;
        u32 pipestat[2];
+       u32 irq_mask;
+       u32 gt_irq_mask;
+       u32 pch_irq_mask;
+       struct lock irq_lock;
+
+       u32 hotplug_supported_mask;
 
        int tex_lru_log_granularity;
        int allow_batchbuffer;
-       struct mem_block *agp_heap;
        unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
        int vblank_pipe;
+       int num_pipe;
+
+       /* For hangcheck timer */
+#define DRM_I915_HANGCHECK_PERIOD ((1500 /* in ms */ * hz) / 1000)
+       int hangcheck_count;
+       uint32_t last_acthd;
+       uint32_t last_acthd_bsd;
+       uint32_t last_acthd_blt;
+       uint32_t last_instdone;
+       uint32_t last_instdone1;
 
        struct intel_opregion opregion;
 
+
+       /* overlay */
+       struct intel_overlay *overlay;
+       bool sprite_scaling_enabled;
+
+       /* LVDS info */
+       int backlight_level;  /* restore backlight to this value */
+       bool backlight_enabled;
+       struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
+       struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
+
+       /* Feature bits from the VBIOS */
+       unsigned int int_tv_support:1;
+       unsigned int lvds_dither:1;
+       unsigned int lvds_vbt:1;
+       unsigned int int_crt_support:1;
+       unsigned int lvds_use_ssc:1;
+       unsigned int display_clock_mode:1;
+       int lvds_ssc_freq;
+       struct {
+               int rate;
+               int lanes;
+               int preemphasis;
+               int vswing;
+
+               bool initialized;
+               bool support;
+               int bpp;
+               struct edp_power_seq pps;
+       } edp;
+       bool no_aux_handshake;
+
+       int crt_ddc_pin;
+       struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
+       int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
+       int num_fence_regs; /* 8 on pre-965, 16 otherwise */
+
+       /* PCH chipset type */
+       enum intel_pch pch_type;
+
+       /* Display functions */
+       struct drm_i915_display_funcs display;
+
+       unsigned long quirks;
+
        /* Register state */
+       bool modeset_on_lid;
        u8 saveLBB;
        u32 saveDSPACNTR;
        u32 saveDSPBCNTR;
        u32 saveDSPARB;
-       u32 saveRENDERSTANDBY;
        u32 saveHWS;
        u32 savePIPEACONF;
        u32 savePIPEBCONF;
@@ -165,6 +383,13 @@ typedef struct drm_i915_private {
        u32 saveVBLANK_A;
        u32 saveVSYNC_A;
        u32 saveBCLRPAT_A;
+       u32 saveTRANSACONF;
+       u32 saveTRANS_HTOTAL_A;
+       u32 saveTRANS_HBLANK_A;
+       u32 saveTRANS_HSYNC_A;
+       u32 saveTRANS_VTOTAL_A;
+       u32 saveTRANS_VBLANK_A;
+       u32 saveTRANS_VSYNC_A;
        u32 savePIPEASTAT;
        u32 saveDSPASTRIDE;
        u32 saveDSPASIZE;
@@ -173,8 +398,11 @@ typedef struct drm_i915_private {
        u32 saveDSPASURF;
        u32 saveDSPATILEOFF;
        u32 savePFIT_PGM_RATIOS;
+       u32 saveBLC_HIST_CTL;
        u32 saveBLC_PWM_CTL;
        u32 saveBLC_PWM_CTL2;
+       u32 saveBLC_CPU_PWM_CTL;
+       u32 saveBLC_CPU_PWM_CTL2;
        u32 saveFPB0;
        u32 saveFPB1;
        u32 saveDPLL_B;
@@ -186,6 +414,13 @@ typedef struct drm_i915_private {
        u32 saveVBLANK_B;
        u32 saveVSYNC_B;
        u32 saveBCLRPAT_B;
+       u32 saveTRANSBCONF;
+       u32 saveTRANS_HTOTAL_B;
+       u32 saveTRANS_HBLANK_B;
+       u32 saveTRANS_HSYNC_B;
+       u32 saveTRANS_VTOTAL_B;
+       u32 saveTRANS_VBLANK_B;
+       u32 saveTRANS_VSYNC_B;
        u32 savePIPEBSTAT;
        u32 saveDSPBSTRIDE;
        u32 saveDSPBSIZE;
@@ -211,6 +446,7 @@ typedef struct drm_i915_private {
        u32 savePFIT_CONTROL;
        u32 save_palette_a[256];
        u32 save_palette_b[256];
+       u32 saveDPFC_CB_BASE;
        u32 saveFBC_CFB_BASE;
        u32 saveFBC_LL_BASE;
        u32 saveFBC_CONTROL;
@@ -218,9 +454,13 @@ typedef struct drm_i915_private {
        u32 saveIER;
        u32 saveIIR;
        u32 saveIMR;
+       u32 saveDEIER;
+       u32 saveDEIMR;
+       u32 saveGTIER;
+       u32 saveGTIMR;
+       u32 saveFDI_RXA_IMR;
+       u32 saveFDI_RXB_IMR;
        u32 saveCACHE_MODE_0;
-       u32 saveD_STATE;
-       u32 saveCG_2D_DIS;
        u32 saveMI_ARB_STATE;
        u32 saveSWF0[16];
        u32 saveSWF1[16];
@@ -232,15 +472,73 @@ typedef struct drm_i915_private {
        u8 saveAR[21];
        u8 saveDACMASK;
        u8 saveCR[37];
+       uint64_t saveFENCE[I915_MAX_NUM_FENCES];
+       u32 saveCURACNTR;
+       u32 saveCURAPOS;
+       u32 saveCURABASE;
+       u32 saveCURBCNTR;
+       u32 saveCURBPOS;
+       u32 saveCURBBASE;
+       u32 saveCURSIZE;
+       u32 saveDP_B;
+       u32 saveDP_C;
+       u32 saveDP_D;
+       u32 savePIPEA_GMCH_DATA_M;
+       u32 savePIPEB_GMCH_DATA_M;
+       u32 savePIPEA_GMCH_DATA_N;
+       u32 savePIPEB_GMCH_DATA_N;
+       u32 savePIPEA_DP_LINK_M;
+       u32 savePIPEB_DP_LINK_M;
+       u32 savePIPEA_DP_LINK_N;
+       u32 savePIPEB_DP_LINK_N;
+       u32 saveFDI_RXA_CTL;
+       u32 saveFDI_TXA_CTL;
+       u32 saveFDI_RXB_CTL;
+       u32 saveFDI_TXB_CTL;
+       u32 savePFA_CTL_1;
+       u32 savePFB_CTL_1;
+       u32 savePFA_WIN_SZ;
+       u32 savePFB_WIN_SZ;
+       u32 savePFA_WIN_POS;
+       u32 savePFB_WIN_POS;
+       u32 savePCH_DREF_CONTROL;
+       u32 saveDISP_ARB_CTL;
+       u32 savePIPEA_DATA_M1;
+       u32 savePIPEA_DATA_N1;
+       u32 savePIPEA_LINK_M1;
+       u32 savePIPEA_LINK_N1;
+       u32 savePIPEB_DATA_M1;
+       u32 savePIPEB_DATA_N1;
+       u32 savePIPEB_LINK_M1;
+       u32 savePIPEB_LINK_N1;
+       u32 saveMCHBAR_RENDER_STANDBY;
+       u32 savePCH_PORT_HOTPLUG;
 
        struct {
-#ifdef __linux__
+               /** Memory allocator for GTT stolen memory */
+               struct drm_mm stolen;
+               /** Memory allocator for GTT */
                struct drm_mm gtt_space;
-#endif
+               /** List of all objects in gtt_space. Used to restore gtt
+                * mappings on resume */
+               struct list_head gtt_list;
+
+               /** Usable portion of the GTT for GEM */
+               unsigned long gtt_start;
+               unsigned long gtt_mappable_end;
+               unsigned long gtt_end;
+
+               /** PPGTT used for aliasing the PPGTT with the GTT */
+               struct i915_hw_ppgtt *aliasing_ppgtt;
+
                /**
                 * List of objects currently involved in rendering from the
                 * ringbuffer.
                 *
+                * Includes buffers having the contents of their GPU caches
+                * flushed, not necessarily primitives.  last_rendering_seqno
+                * represents when the rendering involved will be completed.
+                *
                 * A reference is held on the buffer while on this list.
                 */
                struct list_head active_list;
@@ -258,6 +556,8 @@ typedef struct drm_i915_private {
                 * LRU list of objects which are not in the ringbuffer and
                 * are ready to unbind, but are still in the GTT.
                 *
+                * last_rendering_seqno is 0 while an object is in this list.
+                *
                 * A reference is not held on the buffer while on this list,
                 * as merely being GTT-bound shouldn't prevent its being
                 * freed, and we'll pull it off the list in the free path.
@@ -265,11 +565,22 @@ typedef struct drm_i915_private {
                struct list_head inactive_list;
 
                /**
-                * List of breadcrumbs associated with GPU requests currently
-                * outstanding.
+                * LRU list of objects which are not in the ringbuffer but
+                * are still pinned in the GTT.
                 */
-               struct list_head request_list;
-#ifdef __linux__
+               struct list_head pinned_list;
+
+               /** LRU list of objects with fence regs on them. */
+               struct list_head fence_list;
+
+               /**
+                * List of objects currently pending being freed.
+                *
+                * These objects are no longer in use, but due to a signal
+                * we were prevented from freeing them at the appointed time.
+                */
+               struct list_head deferred_free_list;
+
                /**
                 * We leave the user IRQ off as much as possible,
                 * but this means that requests will finish and never
@@ -277,8 +588,14 @@ typedef struct drm_i915_private {
                 * fire periodically while the ring is running. When it
                 * fires, go retire requests.
                 */
-               struct delayed_work retire_work;
-#endif
+               struct timeout_task retire_task;
+
+               /**
+                * Are we in a non-interruptible section of code like
+                * modesetting?
+                */
+               bool interruptible;
+
                uint32_t next_gem_seqno;
 
                /**
@@ -314,9 +631,113 @@ typedef struct drm_i915_private {
                uint32_t bit_6_swizzle_x;
                /** Bit 6 swizzling required for Y tiling */
                uint32_t bit_6_swizzle_y;
+
+               /* storage for physical objects */
+               struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
+
+               /* accounting, useful for userland debugging */
+               size_t gtt_total;
+               size_t mappable_gtt_total;
+               size_t object_memory;
+               u32 object_count;
+
+               struct intel_gtt gtt;
+               eventhandler_tag i915_lowmem;
        } mm;
+
+       const struct intel_device_info *info;
+
+       struct sdvo_device_mapping sdvo_mappings[2];
+       /* indicate whether the LVDS_BORDER should be enabled or not */
+       unsigned int lvds_border_bits;
+       /* Panel fitter placement and size for Ironlake+ */
+       u32 pch_pf_pos, pch_pf_size;
+
+       struct drm_crtc *plane_to_crtc_mapping[3];
+       struct drm_crtc *pipe_to_crtc_mapping[3];
+       /* wait_queue_head_t pending_flip_queue; XXXKIB */
+       bool flip_pending_is_done;
+
+       /* Reclocking support */
+       bool render_reclock_avail;
+       bool lvds_downclock_avail;
+       /* indicates the reduced downclock for LVDS*/
+       int lvds_downclock;
+       struct task idle_task;
+       struct callout idle_callout;
+       bool busy;
+       u16 orig_clock;
+       int child_dev_num;
+       struct child_device_config *child_dev;
+       struct drm_connector *int_lvds_connector;
+       struct drm_connector *int_edp_connector;
+
+       device_t bridge_dev;
+       bool mchbar_need_disable;
+       int mch_res_rid;
+       struct resource *mch_res;
+
+       struct lock rps_lock;
+       u32 pm_iir;
+       struct task rps_task;
+
+       u8 cur_delay;
+       u8 min_delay;
+       u8 max_delay;
+       u8 fmax;
+       u8 fstart;
+
+       u64 last_count1;
+       unsigned long last_time1;
+       unsigned long chipset_power;
+       u64 last_count2;
+       struct timespec last_time2;
+       unsigned long gfx_power;
+       int c_m;
+       int r_t;
+       u8 corr;
+       struct lock *mchdev_lock;
+
+       enum no_fbc_reason no_fbc_reason;
+
+       unsigned long cfb_size;
+       unsigned int cfb_fb;
+       int cfb_plane;
+       int cfb_y;
+       struct intel_fbc_work *fbc_work;
+
+       unsigned int fsb_freq, mem_freq, is_ddr3;
+
+       struct taskqueue *tq;
+       struct task error_task;
+       struct task hotplug_task;
+       int error_completion;
+       struct lock error_completion_lock;
+       struct drm_i915_error_state *first_error;
+       struct lock error_lock;
+       struct callout hangcheck_timer;
+
+       unsigned long last_gpu_reset;
+
+       struct intel_fbdev *fbdev;
+
+       struct drm_property *broadcast_rgb_property;
+       struct drm_property *force_audio_property;
 } drm_i915_private_t;
 
+enum hdmi_force_audio {
+       HDMI_AUDIO_OFF_DVI = -2,        /* no aux data for HDMI-DVI converter */
+       HDMI_AUDIO_OFF,                 /* force turn off HDMI audio */
+       HDMI_AUDIO_AUTO,                /* trust EDID */
+       HDMI_AUDIO_ON,                  /* force turn on HDMI audio */
+};
+
+enum i915_cache_level {
+       I915_CACHE_NONE,
+       I915_CACHE_LLC,
+       I915_CACHE_LLC_MLC, /* gen6+ */
+};
+
 enum intel_chip_family {
        CHIP_I8XX = 0x01,
        CHIP_I9XX = 0x02,
@@ -326,31 +747,105 @@ enum intel_chip_family {
 
 /** driver private structure attached to each drm_gem_object */
 struct drm_i915_gem_object {
-       struct drm_gem_object *obj;
+       struct drm_gem_object base;
 
        /** Current space allocated to this object in the GTT, if any. */
        struct drm_mm_node *gtt_space;
-
+       struct list_head gtt_list;
        /** This object's place on the active/flushing/inactive lists */
-       struct list_head list;
+       struct list_head ring_list;
+       struct list_head mm_list;
+       /** This object's place on GPU write list */
+       struct list_head gpu_write_list;
+       /** This object's place in the batchbuffer or on the eviction list */
+       struct list_head exec_list;
 
        /**
         * This is set if the object is on the active or flushing lists
         * (has pending rendering), and is not set if it's on inactive (ready
         * to be unbound).
         */
-       int active;
+       unsigned int active:1;
 
        /**
         * This is set if the object has been written to since last bound
         * to the GTT
         */
-       int dirty;
+       unsigned int dirty:1;
+
+       /**
+        * This is set if the object has been written to since the last
+        * GPU flush.
+        */
+       unsigned int pending_gpu_write:1;
+
+       /**
+        * Fence register bits (if any) for this object.  Will be set
+        * as needed when mapped into the GTT.
+        * Protected by dev->struct_mutex.
+        */
+       signed int fence_reg:I915_MAX_NUM_FENCE_BITS;
+
+       /**
+        * Advice: are the backing pages purgeable?
+        */
+       unsigned int madv:2;
+
+       /**
+        * Current tiling mode for the object.
+        */
+       unsigned int tiling_mode:2;
+       unsigned int tiling_changed:1;
+
+       /** How many users have pinned this object in GTT space. The following
+        * users can each hold at most one reference: pwrite/pread, pin_ioctl
+        * (via user_pin_count), execbuffer (objects are not allowed multiple
+        * times for the same batchbuffer), and the framebuffer code. When
+        * switching/pageflipping, the framebuffer code has at most two buffers
+        * pinned per crtc.
+        *
+        * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
+        * bits with absolutely no headroom. So use 4 bits. */
+       unsigned int pin_count:4;
+#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
+
+       /**
+        * Is the object at the current location in the gtt mappable and
+        * fenceable? Used to avoid costly recalculations.
+        */
+       unsigned int map_and_fenceable:1;
+
+       /**
+        * Whether the current gtt mapping needs to be mappable (and isn't just
+        * mappable by accident). Track pin and fault separate for a more
+        * accurate mappable working set.
+        */
+       unsigned int fault_mappable:1;
+       unsigned int pin_mappable:1;
+
+       /*
+        * Is the GPU currently using a fence to access this buffer,
+        */
+       unsigned int pending_fenced_gpu_access:1;
+       unsigned int fenced_gpu_access:1;
 
-       /** AGP memory structure for our GTT binding. */
-       DRM_AGP_MEM *agp_mem;
+       unsigned int cache_level:2;
 
-       struct page **page_list;
+       unsigned int has_aliasing_ppgtt_mapping:1;
+
+       vm_page_t *pages;
+
+       /**
+        * DMAR support
+        */
+       struct sglist *sg_list;
+
+       /**
+        * Used for performing relocations during execbuffer insertion.
+        */
+       LIST_ENTRY(drm_i915_gem_object) exec_node;
+       unsigned long exec_handle;
+       struct drm_i915_gem_exec_object2 *exec_entry;
 
        /**
         * Current offset of the object in GTT space.
@@ -359,28 +854,43 @@ struct drm_i915_gem_object {
         */
        uint32_t gtt_offset;
 
-       /** Boolean whether this object has a valid gtt offset. */
-       int gtt_bound;
-
-       /** How many users have pinned this object in GTT space */
-       int pin_count;
-
        /** Breadcrumb of last rendering to the buffer. */
        uint32_t last_rendering_seqno;
+       struct intel_ring_buffer *ring;
 
-       /** Current tiling mode for the object. */
-       uint32_t tiling_mode;
+       /** Breadcrumb of last fenced GPU access to the buffer. */
+       uint32_t last_fenced_seqno;
+       struct intel_ring_buffer *last_fenced_ring;
 
-       /** AGP mapping type (AGP_USER_MEMORY or AGP_USER_CACHED_MEMORY */
-       uint32_t agp_type;
+       /** Current tiling stride for the object, if it's tiled. */
+       uint32_t stride;
+
+       /** Record of address bit 17 of each page at last unbind. */
+       unsigned long *bit_17;
 
        /**
-        * Flagging of which individual pages are valid in GEM_DOMAIN_CPU when
-        * GEM_DOMAIN_CPU is not in the object's read domain.
+        * If present, while GEM_DOMAIN_CPU is in the read domain this array
+        * flags which individual pages are valid.
         */
        uint8_t *page_cpu_valid;
+
+       /** User space pin count and filp owning the pin */
+       uint32_t user_pin_count;
+       struct drm_file *pin_filp;
+
+       /** for phy allocated objects */
+       struct drm_i915_gem_phys_object *phys_obj;
+
+       /**
+        * Number of crtcs where this object is currently the fb, but
+        * will be page flipped away on the next vblank.  When it
+        * reaches 0, dev_priv->pending_flip_queue will be woken up.
+        */
+       int pending_flip;
 };
 
+#define        to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
+
 /**
  * Request queue structure.
  *
@@ -392,27 +902,135 @@ struct drm_i915_gem_object {
  * an emission time with seqnos for tracking how far ahead of the GPU we are.
  */
 struct drm_i915_gem_request {
+       /** On Which ring this request was generated */
+       struct intel_ring_buffer *ring;
+
        /** GEM sequence number associated with this request. */
        uint32_t seqno;
 
+       /** Postion in the ringbuffer of the end of the request */
+       u32 tail;
+
        /** Time at which this request was emitted, in jiffies. */
        unsigned long emitted_jiffies;
 
-       /** Cache domains that were flushed at the start of the request. */
-       uint32_t flush_domains;
-
+       /** global list entry for this request */
        struct list_head list;
+
+       struct drm_i915_file_private *file_priv;
+       /** file_priv list entry for this request */
+       struct list_head client_list;
 };
 
 struct drm_i915_file_private {
        struct {
-               uint32_t last_gem_seqno;
-               uint32_t last_gem_throttle_seqno;
+               struct spinlock lock;
+               struct list_head request_list;
        } mm;
 };
 
+struct drm_i915_error_state {
+       u32 eir;
+       u32 pgtbl_er;
+       u32 pipestat[I915_MAX_PIPES];
+       u32 tail[I915_NUM_RINGS];
+       u32 head[I915_NUM_RINGS];
+       u32 ipeir[I915_NUM_RINGS];
+       u32 ipehr[I915_NUM_RINGS];
+       u32 instdone[I915_NUM_RINGS];
+       u32 acthd[I915_NUM_RINGS];
+       u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1];
+       /* our own tracking of ring head and tail */
+       u32 cpu_ring_head[I915_NUM_RINGS];
+       u32 cpu_ring_tail[I915_NUM_RINGS];
+       u32 error; /* gen6+ */
+       u32 instpm[I915_NUM_RINGS];
+       u32 instps[I915_NUM_RINGS];
+       u32 instdone1;
+       u32 seqno[I915_NUM_RINGS];
+       u64 bbaddr;
+       u32 fault_reg[I915_NUM_RINGS];
+       u32 done_reg;
+       u32 faddr[I915_NUM_RINGS];
+       u64 fence[I915_MAX_NUM_FENCES];
+       struct timeval time;
+       struct drm_i915_error_ring {
+               struct drm_i915_error_object {
+                       int page_count;
+                       u32 gtt_offset;
+                       u32 *pages[0];
+               } *ringbuffer, *batchbuffer;
+               struct drm_i915_error_request {
+                       long jiffies;
+                       u32 seqno;
+                       u32 tail;
+               } *requests;
+               int num_requests;
+       } ring[I915_NUM_RINGS];
+       struct drm_i915_error_buffer {
+               u32 size;
+               u32 name;
+               u32 seqno;
+               u32 gtt_offset;
+               u32 read_domains;
+               u32 write_domain;
+               s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
+               s32 pinned:2;
+               u32 tiling:2;
+               u32 dirty:1;
+               u32 purgeable:1;
+               s32 ring:4;
+               u32 cache_level:2;
+       } *active_bo, *pinned_bo;
+       u32 active_bo_count, pinned_bo_count;
+       struct intel_overlay_error_state *overlay;
+       struct intel_display_error_state *display;
+};
+
+/**
+ * RC6 is a special power stage which allows the GPU to enter an very
+ * low-voltage mode when idle, using down to 0V while at this stage.  This
+ * stage is entered automatically when the GPU is idle when RC6 support is
+ * enabled, and as soon as new workload arises GPU wakes up automatically as well.
+ *
+ * There are different RC6 modes available in Intel GPU, which differentiate
+ * among each other with the latency required to enter and leave RC6 and
+ * voltage consumed by the GPU in different states.
+ *
+ * The combination of the following flags define which states GPU is allowed
+ * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
+ * RC6pp is deepest RC6. Their support by hardware varies according to the
+ * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
+ * which brings the most power savings; deeper states save more power, but
+ * require higher latency to switch to and wake up.
+ */
+#define INTEL_RC6_ENABLE                       (1<<0)
+#define INTEL_RC6p_ENABLE                      (1<<1)
+#define INTEL_RC6pp_ENABLE                     (1<<2)
+
+extern int intel_iommu_enabled;
 extern struct drm_ioctl_desc i915_ioctls[];
-extern int i915_max_ioctl;
+extern struct drm_driver_info i915_driver_info;
+extern struct cdev_pager_ops i915_gem_pager_ops;
+extern int i915_panel_ignore_lid;
+extern unsigned int i915_powersave;
+extern int i915_semaphores;
+extern unsigned int i915_lvds_downclock;
+extern int i915_panel_use_ssc;
+extern int i915_vbt_sdvo_panel_type;
+extern int i915_enable_rc6;
+extern int i915_enable_fbc;
+extern int i915_enable_ppgtt;
+extern int i915_enable_hangcheck;
+
+const struct intel_device_info *i915_get_device_id(int device);
+
+int i915_reset(struct drm_device *dev, u8 flags);
+
+/* i915_debug.c */
+int i915_sysctl_init(struct drm_device *dev, struct sysctl_ctx_list *ctx,
+    struct sysctl_oid *top);
+void i915_sysctl_cleanup(struct drm_device *dev);
 
                                /* i915_dma.c */
 extern void i915_kernel_lost_context(struct drm_device * dev);
@@ -430,51 +1048,45 @@ extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
 extern int i915_emit_box(struct drm_device *dev,
                         struct drm_clip_rect __user *boxes,
                         int i, int DR1, int DR4);
+int i915_emit_box_p(struct drm_device *dev, struct drm_clip_rect *box,
+    int DR1, int DR4);
+
+unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
+unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
+void i915_update_gfx_val(struct drm_i915_private *dev_priv);
+unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
+unsigned long i915_read_mch_val(void);
+bool i915_gpu_raise(void);
+bool i915_gpu_lower(void);
+bool i915_gpu_busy(void);
+bool i915_gpu_turbo_disable(void);
 
 /* i915_irq.c */
 extern int i915_irq_emit(struct drm_device *dev, void *data,
                         struct drm_file *file_priv);
 extern int i915_irq_wait(struct drm_device *dev, void *data,
                         struct drm_file *file_priv);
-void i915_user_irq_get(struct drm_device *dev);
-void i915_user_irq_put(struct drm_device *dev);
 
-extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
-extern void i915_driver_irq_preinstall(struct drm_device * dev);
-extern int i915_driver_irq_postinstall(struct drm_device *dev);
-extern void i915_driver_irq_uninstall(struct drm_device * dev);
+extern void intel_irq_init(struct drm_device *dev);
+
 extern int i915_vblank_pipe_set(struct drm_device *dev, void *data,
                                struct drm_file *file_priv);
 extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
                                struct drm_file *file_priv);
-extern int i915_enable_vblank(struct drm_device *dev, int crtc);
-extern void i915_disable_vblank(struct drm_device *dev, int crtc);
-extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc);
-extern u32 g45_get_vblank_counter(struct drm_device *dev, int crtc);
 extern int i915_vblank_swap(struct drm_device *dev, void *data,
                            struct drm_file *file_priv);
+void intel_enable_asle(struct drm_device *dev);
+void i915_hangcheck_elapsed(void *context);
+void i915_handle_error(struct drm_device *dev, bool wedged);
 
-void
-i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
+void i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
+void i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
 
-void
-i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
+void i915_destroy_error_state(struct drm_device *dev);
 
-
-/* i915_mem.c */
-extern int i915_mem_alloc(struct drm_device *dev, void *data,
-                         struct drm_file *file_priv);
-extern int i915_mem_free(struct drm_device *dev, void *data,
-                        struct drm_file *file_priv);
-extern int i915_mem_init_heap(struct drm_device *dev, void *data,
-                             struct drm_file *file_priv);
-extern int i915_mem_destroy_heap(struct drm_device *dev, void *data,
-                                struct drm_file *file_priv);
-extern void i915_mem_takedown(struct mem_block **heap);
-extern void i915_mem_release(struct drm_device * dev,
-                            struct drm_file *file_priv, struct mem_block *heap);
-#ifdef I915_HAVE_GEM
 /* i915_gem.c */
+int i915_gem_create(struct drm_file *file, struct drm_device *dev, uint64_t size,
+                       uint32_t *handle_p);
 int i915_gem_init_ioctl(struct drm_device *dev, void *data,
                        struct drm_file *file_priv);
 int i915_gem_create_ioctl(struct drm_device *dev, void *data,
@@ -485,12 +1097,16 @@ int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
                          struct drm_file *file_priv);
 int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
                        struct drm_file *file_priv);
+int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
+                       struct drm_file *file_priv);
 int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
                              struct drm_file *file_priv);
 int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
                             struct drm_file *file_priv);
 int i915_gem_execbuffer(struct drm_device *dev, void *data,
                        struct drm_file *file_priv);
+int i915_gem_execbuffer2(struct drm_device *dev, void *data,
+                       struct drm_file *file_priv);
 int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
                       struct drm_file *file_priv);
 int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
@@ -499,6 +1115,8 @@ int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
                        struct drm_file *file_priv);
 int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
                            struct drm_file *file_priv);
+int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
+                          struct drm_file *file_priv);
 int i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
                           struct drm_file *file_priv);
 int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
@@ -507,98 +1125,237 @@ int i915_gem_set_tiling(struct drm_device *dev, void *data,
                        struct drm_file *file_priv);
 int i915_gem_get_tiling(struct drm_device *dev, void *data,
                        struct drm_file *file_priv);
+int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
+                               struct drm_file *file_priv);
 void i915_gem_load(struct drm_device *dev);
-int i915_gem_proc_init(struct drm_minor *minor);
-void i915_gem_proc_cleanup(struct drm_minor *minor);
+void i915_gem_unload(struct drm_device *dev);
 int i915_gem_init_object(struct drm_gem_object *obj);
 void i915_gem_free_object(struct drm_gem_object *obj);
-int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment);
-void i915_gem_object_unpin(struct drm_gem_object *obj);
+int i915_gem_object_pin(struct drm_i915_gem_object *obj, uint32_t alignment,
+    bool map_and_fenceable);
+void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
+int i915_gem_object_unbind(struct drm_i915_gem_object *obj);
 void i915_gem_lastclose(struct drm_device *dev);
 uint32_t i915_get_gem_seqno(struct drm_device *dev);
+
+static inline void
+i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
+{
+       if (obj->fence_reg != I915_FENCE_REG_NONE) {
+               struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+               dev_priv->fence_regs[obj->fence_reg].pin_count++;
+       }
+}
+
+static inline void
+i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
+{
+       if (obj->fence_reg != I915_FENCE_REG_NONE) {
+               struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+               dev_priv->fence_regs[obj->fence_reg].pin_count--;
+       }
+}
+
 void i915_gem_retire_requests(struct drm_device *dev);
-void i915_gem_retire_work_handler(struct work_struct *work);
-void i915_gem_clflush_object(struct drm_gem_object *obj);
+void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
+void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
+struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
+    size_t size);
+int i915_gem_do_init(struct drm_device *dev, unsigned long start,
+    unsigned long mappable_end, unsigned long end);
+uint32_t i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
+    uint32_t size, int tiling_mode);
+int i915_mutex_lock_interruptible(struct drm_device *dev);
+int i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
+    bool write);
+int i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
+    u32 alignment, struct intel_ring_buffer *pipelined);
+int i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
+int i915_gem_flush_ring(struct intel_ring_buffer *ring,
+    uint32_t invalidate_domains, uint32_t flush_domains);
+void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
+int i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj);
+int i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
+int i915_gem_idle(struct drm_device *dev);
+int i915_gem_init_hw(struct drm_device *dev);
+void i915_gem_init_swizzling(struct drm_device *dev);
+void i915_gem_init_ppgtt(struct drm_device *dev);
+void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
+int i915_gpu_idle(struct drm_device *dev, bool do_retire);
+void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
+    struct intel_ring_buffer *ring, uint32_t seqno);
+int i915_add_request(struct intel_ring_buffer *ring, struct drm_file *file,
+    struct drm_i915_gem_request *request);
+int i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
+    struct intel_ring_buffer *pipelined);
+void i915_gem_reset(struct drm_device *dev);
+int i915_wait_request(struct intel_ring_buffer *ring, uint32_t seqno,
+    bool do_retire);
+int i915_gem_mmap(struct drm_device *dev, uint64_t offset, int prot);
+int i915_gem_fault(struct drm_device *dev, uint64_t offset, int prot,
+    uint64_t *phys);
+void i915_gem_release(struct drm_device *dev, struct drm_file *file);
+int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
+    enum i915_cache_level cache_level);
+
+void i915_gem_free_all_phys_object(struct drm_device *dev);
+void i915_gem_detach_phys_object(struct drm_device *dev,
+    struct drm_i915_gem_object *obj);
+int i915_gem_attach_phys_object(struct drm_device *dev,
+    struct drm_i915_gem_object *obj, int id, int align);
+
+int i915_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
+    struct drm_mode_create_dumb *args);
+int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
+     uint32_t handle, uint64_t *offset);
+int i915_gem_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
+     uint32_t handle);
 
 /* i915_gem_tiling.c */
 void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
+void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
+void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
 
-/* i915_gem_debug.c */
-void i915_gem_dump_object(struct drm_gem_object *obj, int len,
-                         const char *where, uint32_t mark);
-#if WATCH_INACTIVE
-void i915_verify_inactive(struct drm_device *dev, char *file, int line);
-#else
-#define i915_verify_inactive(dev, file, line)
-#endif
-void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle);
-void i915_gem_dump_object(struct drm_gem_object *obj, int len,
-                         const char *where, uint32_t mark);
-void i915_dump_lru(struct drm_device *dev, const char *where);
-#endif /* I915_HAVE_GEM */
+/* i915_gem_evict.c */
+int i915_gem_evict_something(struct drm_device *dev, int min_size,
+    unsigned alignment, bool mappable);
+int i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only);
+int i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only);
 
 /* i915_suspend.c */
 extern int i915_save_state(struct drm_device *dev);
 extern int i915_restore_state(struct drm_device *dev);
 
-/* i915_opregion.c */
+/* intel_iic.c */
+extern int intel_setup_gmbus(struct drm_device *dev);
+extern void intel_teardown_gmbus(struct drm_device *dev);
+extern void intel_gmbus_set_speed(device_t idev, int speed);
+extern void intel_gmbus_force_bit(device_t idev, bool force_bit);
+extern void intel_iic_reset(struct drm_device *dev);
+
+/* intel_opregion.c */
+int intel_opregion_setup(struct drm_device *dev);
 extern int intel_opregion_init(struct drm_device *dev);
-extern void intel_opregion_free(struct drm_device *dev);
+extern void intel_opregion_fini(struct drm_device *dev);
 extern void opregion_asle_intr(struct drm_device *dev);
 extern void opregion_enable_asle(struct drm_device *dev);
 
-/**
- * Lock test for when it's just for synchronization of ring access.
- *
- * In that case, we don't need to do it when GEM is initialized as nobody else
- * has access to the ring.
+/* i915_gem_gtt.c */
+int i915_gem_init_aliasing_ppgtt(struct drm_device *dev);
+void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
+void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
+    struct drm_i915_gem_object *obj, enum i915_cache_level cache_level);
+void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
+    struct drm_i915_gem_object *obj);
+
+void i915_gem_restore_gtt_mappings(struct drm_device *dev);
+int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj);
+void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
+void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
+    enum i915_cache_level cache_level);
+
+/* modesetting */
+extern void intel_modeset_init(struct drm_device *dev);
+extern void intel_modeset_gem_init(struct drm_device *dev);
+extern void intel_modeset_cleanup(struct drm_device *dev);
+extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
+extern bool intel_fbc_enabled(struct drm_device *dev);
+extern void intel_disable_fbc(struct drm_device *dev);
+extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
+extern void ironlake_init_pch_refclk(struct drm_device *dev);
+extern void ironlake_enable_rc6(struct drm_device *dev);
+extern void gen6_set_rps(struct drm_device *dev, u8 val);
+extern void intel_detect_pch(struct drm_device *dev);
+extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
+
+extern void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
+extern void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv);
+extern void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
+extern void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv);
+
+extern struct intel_overlay_error_state *intel_overlay_capture_error_state(
+    struct drm_device *dev);
+extern void intel_overlay_print_error_state(struct sbuf *m,
+    struct intel_overlay_error_state *error);
+extern struct intel_display_error_state *intel_display_capture_error_state(
+    struct drm_device *dev);
+extern void intel_display_print_error_state(struct sbuf *m,
+    struct drm_device *dev, struct intel_display_error_state *error);
+
+static inline void
+trace_i915_reg_rw(boolean_t rw, int reg, uint64_t val, int sz)
+{
+       return;
+}
+
+/* On SNB platform, before reading ring registers forcewake bit
+ * must be set to prevent GT core from power down and stale values being
+ * returned.
  */
-#define RING_LOCK_TEST_WITH_RETURN(dev, file_priv) do {                        \
-       if (((drm_i915_private_t *)dev->dev_private)->ring.ring_obj == NULL) \
-               LOCK_TEST_WITH_RETURN(dev, file_priv);                  \
-} while (0)
+void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
+void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
+int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
 
-#if defined(__FreeBSD__)
-typedef boolean_t bool;
-#endif
+/* We give fast paths for the really cool registers */
+#define NEEDS_FORCE_WAKE(dev_priv, reg) \
+       (((dev_priv)->info->gen >= 6) && \
+        ((reg) < 0x40000) &&            \
+        ((reg) != FORCEWAKE))
+
+#define __i915_read(x, y) \
+       u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
+
+__i915_read(8, 8)
+__i915_read(16, 16)
+__i915_read(32, 32)
+__i915_read(64, 64)
+#undef __i915_read
+
+#define __i915_write(x, y) \
+       void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val);
 
-#define I915_READ(reg)         DRM_READ32(dev_priv->mmio_map, (reg))
-#define I915_WRITE(reg,val)    DRM_WRITE32(dev_priv->mmio_map, (reg), (val))
-#define I915_READ16(reg)       DRM_READ16(dev_priv->mmio_map, (reg))
-#define I915_WRITE16(reg,val)  DRM_WRITE16(dev_priv->mmio_map, (reg), (val))
-#define I915_READ8(reg)                DRM_READ8(dev_priv->mmio_map, (reg))
-#define I915_WRITE8(reg,val)   DRM_WRITE8(dev_priv->mmio_map, (reg), (val))
+__i915_write(8, 8)
+__i915_write(16, 16)
+__i915_write(32, 32)
+__i915_write(64, 64)
+#undef __i915_write
+
+#define I915_READ8(reg)                i915_read8(dev_priv, (reg))
+#define I915_WRITE8(reg, val)  i915_write8(dev_priv, (reg), (val))
+
+#define I915_READ16(reg)       i915_read16(dev_priv, (reg))
+#define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val))
+#define I915_READ16_NOTRACE(reg)       DRM_READ16(dev_priv->mmio_map, (reg))
+#define I915_WRITE16_NOTRACE(reg, val) DRM_WRITE16(dev_priv->mmio_map, (reg), (val))
+
+#define I915_READ(reg)         i915_read32(dev_priv, (reg))
+#define I915_WRITE(reg, val)   i915_write32(dev_priv, (reg), (val))
+#define I915_READ_NOTRACE(reg)         DRM_READ32(dev_priv->mmio_map, (reg))
+#define I915_WRITE_NOTRACE(reg, val)   DRM_WRITE32(dev_priv->mmio_map, (reg), (val))
+
+#define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val))
+#define I915_READ64(reg)       i915_read64(dev_priv, (reg))
+
+#define POSTING_READ(reg)      (void)I915_READ_NOTRACE(reg)
+#define POSTING_READ16(reg)    (void)I915_READ16_NOTRACE(reg)
 
 #define I915_VERBOSE 0
 
-#define RING_LOCALS    unsigned int outring, ringmask, outcount; \
-                        volatile char *virt;
-
-#define BEGIN_LP_RING(n) do {                          \
-       if (I915_VERBOSE)                               \
-               DRM_DEBUG("BEGIN_LP_RING(%d)\n", (n));  \
-       if (dev_priv->ring.space < (n)*4)               \
-               i915_wait_ring(dev, (n)*4, __func__);           \
-       outcount = 0;                                   \
-       outring = dev_priv->ring.tail;                  \
-       ringmask = dev_priv->ring.tail_mask;            \
-       virt = dev_priv->ring.virtual_start;            \
-} while (0)
+#define LP_RING(d) (&((struct drm_i915_private *)(d))->rings[RCS])
 
-#define OUT_RING(n) do {                                       \
-       if (I915_VERBOSE) DRM_DEBUG("   OUT_RING %x\n", (int)(n));      \
-       *(volatile unsigned int *)(virt + outring) = (n);       \
-        outcount++;                                            \
-       outring += 4;                                           \
-       outring &= ringmask;                                    \
-} while (0)
+#define BEGIN_LP_RING(n) \
+       intel_ring_begin(LP_RING(dev_priv), (n))
+
+#define OUT_RING(x) \
+       intel_ring_emit(LP_RING(dev_priv), x)
 
-#define ADVANCE_LP_RING() do {                                         \
-       if (I915_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING %x\n", outring);   \
-       dev_priv->ring.tail = outring;                                  \
-       dev_priv->ring.space -= outcount * 4;                           \
-       I915_WRITE(PRB0_TAIL, outring);                 \
-} while(0)
+#define ADVANCE_LP_RING() \
+       intel_ring_advance(LP_RING(dev_priv))
+
+#define RING_LOCK_TEST_WITH_RETURN(dev, file) do {                     \
+       if (LP_RING(dev->dev_private)->obj == NULL)                     \
+               LOCK_TEST_WITH_RETURN(dev, file);                       \
+} while (0)
 
 /**
  * Reads a dword out of the status page, which is written to from the command
@@ -620,19 +1377,30 @@ typedef boolean_t bool;
 #define I915_GEM_HWS_INDEX             0x20
 #define I915_BREADCRUMB_INDEX          0x21
 
-extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
-
-#define IS_I830(dev) ((dev)->pci_device == 0x3577)
-#define IS_845G(dev) ((dev)->pci_device == 0x2562)
-#define IS_I85X(dev) ((dev)->pci_device == 0x3582)
-#define IS_I855(dev) ((dev)->pci_device == 0x3582)
-#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
-
-#define IS_I915G(dev) ((dev)->pci_device == 0x2582 || (dev)->pci_device == 0x258a)
-#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
-#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
-#define IS_I945GM(dev) ((dev)->pci_device == 0x27A2 ||\
-                       (dev)->pci_device == 0x27AE)
+#define INTEL_INFO(dev)        (((struct drm_i915_private *) (dev)->dev_private)->info)
+
+#define IS_I830(dev)           ((dev)->pci_device == 0x3577)
+#define IS_845G(dev)           ((dev)->pci_device == 0x2562)
+#define IS_I85X(dev)           (INTEL_INFO(dev)->is_i85x)
+#define IS_I865G(dev)          ((dev)->pci_device == 0x2572)
+#define IS_I915G(dev)          (INTEL_INFO(dev)->is_i915g)
+#define IS_I915GM(dev)         ((dev)->pci_device == 0x2592)
+#define IS_I945G(dev)          ((dev)->pci_device == 0x2772)
+#define IS_I945GM(dev)         (INTEL_INFO(dev)->is_i945gm)
+#define        IS_BROADWATER(dev)      (INTEL_INFO(dev)->is_broadwater)
+#define        IS_CRESTLINE(dev)       (INTEL_INFO(dev)->is_crestline)
+#define IS_GM45(dev)           ((dev)->pci_device == 0x2A42)
+#define IS_G4X(dev)            (INTEL_INFO(dev)->is_g4x)
+#define IS_PINEVIEW_G(dev)     ((dev)->pci_device == 0xa001)
+#define IS_PINEVIEW_M(dev)     ((dev)->pci_device == 0xa011)
+#define IS_PINEVIEW(dev)       (INTEL_INFO(dev)->is_pineview)
+#define IS_G33(dev)            (INTEL_INFO(dev)->is_g33)
+#define IS_IRONLAKE_D(dev)     ((dev)->pci_device == 0x0042)
+#define IS_IRONLAKE_M(dev)     ((dev)->pci_device == 0x0046)
+#define        IS_IVYBRIDGE(dev)       (INTEL_INFO(dev)->is_ivybridge)
+#define IS_MOBILE(dev)         (INTEL_INFO(dev)->is_mobile)
+
+/* XXXKIB LEGACY */
 #define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \
                       (dev)->pci_device == 0x2982 || \
                       (dev)->pci_device == 0x2992 || \
@@ -647,32 +1415,65 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
 
 #define IS_I965GM(dev) ((dev)->pci_device == 0x2A02)
 
-#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
+#define IS_IGDG(dev) ((dev)->pci_device == 0xa001)
+#define IS_IGDGM(dev) ((dev)->pci_device == 0xa011)
+#define IS_IGD(dev) (IS_IGDG(dev) || IS_IGDGM(dev))
 
-#define IS_G4X(dev) ((dev)->pci_device == 0x2E02 || \
-                    (dev)->pci_device == 0x2E12 || \
-                    (dev)->pci_device == 0x2E22 || \
-                    (dev)->pci_device == 0x2E32 || \
-                    IS_GM45(dev))
+#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
+                     IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev))
+/* XXXKIB LEGACY END */
 
-#define IS_IGDG(dev)   ((dev)->pci_device == 0xa001)
-#define IS_IGDGM(dev)  ((dev)->pci_device == 0xa011)
-#define IS_IGD(dev)    (IS_IGDG(dev) || IS_IGDGM(dev))
+#define IS_GEN2(dev)   (INTEL_INFO(dev)->gen == 2)
+#define IS_GEN3(dev)   (INTEL_INFO(dev)->gen == 3)
+#define IS_GEN4(dev)   (INTEL_INFO(dev)->gen == 4)
+#define IS_GEN5(dev)   (INTEL_INFO(dev)->gen == 5)
+#define IS_GEN6(dev)   (INTEL_INFO(dev)->gen == 6)
+#define IS_GEN7(dev)   (INTEL_INFO(dev)->gen == 7)
 
-#define IS_G33(dev)    ((dev)->pci_device == 0x29C2 || \
-                       (dev)->pci_device == 0x29B2 ||  \
-                       (dev)->pci_device == 0x29D2 ||  \
-                       IS_IGD(dev))
+#define HAS_BSD(dev)            (INTEL_INFO(dev)->has_bsd_ring)
+#define HAS_BLT(dev)            (INTEL_INFO(dev)->has_blt_ring)
+#define HAS_LLC(dev)            (INTEL_INFO(dev)->has_llc)
+#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
 
-#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
-                     IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev))
+#define HAS_ALIASING_PPGTT(dev)        (INTEL_INFO(dev)->gen >=6)
 
-#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
-                       IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev) || \
-                       IS_IGD(dev))
+#define HAS_OVERLAY(dev)               (INTEL_INFO(dev)->has_overlay)
+#define OVERLAY_NEEDS_PHYSICAL(dev)    (INTEL_INFO(dev)->overlay_needs_physical)
 
-#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev))
+/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
+ * rows, which changed the alignment requirements and fence programming.
+ */
+#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
+                                                     IS_I915GM(dev)))
+#define SUPPORTS_DIGITAL_OUTPUTS(dev)  (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
+#define SUPPORTS_INTEGRATED_HDMI(dev)  (IS_G4X(dev) || IS_GEN5(dev))
+#define SUPPORTS_INTEGRATED_DP(dev)    (IS_G4X(dev) || IS_GEN5(dev))
+#define SUPPORTS_EDP(dev)              (IS_IRONLAKE_M(dev))
+#define SUPPORTS_TV(dev)               (INTEL_INFO(dev)->supports_tv)
+#define I915_HAS_HOTPLUG(dev)           (INTEL_INFO(dev)->has_hotplug)
+/* dsparb controlled by hw only */
+#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
+
+#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
+#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
+#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
+
+#define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev))
+#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
+
+#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
+#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
+#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
 
 #define PRIMARY_RINGBUFFER_SIZE         (128*1024)
 
+static inline bool
+i915_seqno_passed(uint32_t seq1, uint32_t seq2)
+{
+
+       return ((int32_t)(seq1 - seq2) >= 0);
+}
+
+u32 i915_gem_next_request_seqno(struct intel_ring_buffer *ring);
+
 #endif
index 952b62e..4ae421f 100644 (file)
  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  *
+ * $FreeBSD: src/sys/dev/drm2/i915/i915_irq.c,v 1.1 2012/05/22 11:07:44 kib Exp $
  */
 
-#include "dev/drm/drmP.h"
-#include "dev/drm/drm.h"
+#include <sys/sfbuf.h>
+
+#include <dev/drm/drmP.h>
+#include <dev/drm/drm.h>
 #include "i915_drm.h"
 #include "i915_drv.h"
+#include "intel_drv.h"
 
-#define MAX_NOPID ((u32)~0)
+static void i915_capture_error_state(struct drm_device *dev);
+static u32 ring_last_seqno(struct intel_ring_buffer *ring);
 
 /**
  * Interrupts that are always left unmasked.
  * we leave them always unmasked in IMR and then control enabling them through
  * PIPESTAT alone.
  */
-#define I915_INTERRUPT_ENABLE_FIX      (I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
-                                        I915_DISPLAY_PIPE_B_EVENT_INTERRUPT)
+#define I915_INTERRUPT_ENABLE_FIX                      \
+       (I915_ASLE_INTERRUPT |                          \
+        I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |          \
+        I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |          \
+        I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |  \
+        I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |  \
+        I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
 
 /** Interrupts that we mask and unmask at runtime. */
-#define I915_INTERRUPT_ENABLE_VAR      (I915_USER_INTERRUPT)
+#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT | I915_BSD_USER_INTERRUPT)
+
+#define I915_PIPE_VBLANK_STATUS        (PIPE_START_VBLANK_INTERRUPT_STATUS |\
+                                PIPE_VBLANK_INTERRUPT_STATUS)
 
-/** These are all of the interrupts used by the driver */
-#define I915_INTERRUPT_ENABLE_MASK     (I915_INTERRUPT_ENABLE_FIX | \
-                                        I915_INTERRUPT_ENABLE_VAR)
+#define I915_PIPE_VBLANK_ENABLE        (PIPE_START_VBLANK_INTERRUPT_ENABLE |\
+                                PIPE_VBLANK_INTERRUPT_ENABLE)
 
 #define DRM_I915_VBLANK_PIPE_ALL       (DRM_I915_VBLANK_PIPE_A | \
                                         DRM_I915_VBLANK_PIPE_B)
 
-static inline void
-i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
+/* For display hotplug interrupt */
+static void
+ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
 {
-       mask &= I915_INTERRUPT_ENABLE_VAR;
-       if ((dev_priv->irq_mask_reg & mask) != 0) {
-               dev_priv->irq_mask_reg &= ~mask;
-               I915_WRITE(IMR, dev_priv->irq_mask_reg);
-               (void) I915_READ(IMR);
+       if ((dev_priv->irq_mask & mask) != 0) {
+               dev_priv->irq_mask &= ~mask;
+               I915_WRITE(DEIMR, dev_priv->irq_mask);
+               POSTING_READ(DEIMR);
        }
 }
 
 static inline void
-i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
+ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
 {
-       mask &= I915_INTERRUPT_ENABLE_VAR;
-       if ((dev_priv->irq_mask_reg & mask) != mask) {
-               dev_priv->irq_mask_reg |= mask;
-               I915_WRITE(IMR, dev_priv->irq_mask_reg);
-               (void) I915_READ(IMR);
+       if ((dev_priv->irq_mask & mask) != mask) {
+               dev_priv->irq_mask |= mask;
+               I915_WRITE(DEIMR, dev_priv->irq_mask);
+               POSTING_READ(DEIMR);
        }
 }
 
-static inline u32
-i915_pipestat(int pipe)
-{
-       if (pipe == 0)
-           return PIPEASTAT;
-       if (pipe == 1)
-           return PIPEBSTAT;
-       return -EINVAL;
-}
-
 void
 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
 {
        if ((dev_priv->pipestat[pipe] & mask) != mask) {
-               u32 reg = i915_pipestat(pipe);
+               u32 reg = PIPESTAT(pipe);
 
                dev_priv->pipestat[pipe] |= mask;
                /* Enable the interrupt, clear any pending status */
                I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
-               (void) I915_READ(reg);
+               POSTING_READ(reg);
        }
 }
 
@@ -102,12 +103,34 @@ void
 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
 {
        if ((dev_priv->pipestat[pipe] & mask) != 0) {
-               u32 reg = i915_pipestat(pipe);
+               u32 reg = PIPESTAT(pipe);
 
                dev_priv->pipestat[pipe] &= ~mask;
                I915_WRITE(reg, dev_priv->pipestat[pipe]);
-               (void) I915_READ(reg);
+               POSTING_READ(reg);
+       }
+}
+
+/**
+ * intel_enable_asle - enable ASLE interrupt for OpRegion
+ */
+void intel_enable_asle(struct drm_device *dev)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+
+       lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
+
+       if (HAS_PCH_SPLIT(dev))
+               ironlake_enable_display_irq(dev_priv, DE_GSE);
+       else {
+               i915_enable_pipestat(dev_priv, 1,
+                                    PIPE_LEGACY_BLC_EVENT_ENABLE);
+               if (INTEL_INFO(dev)->gen >= 4)
+                       i915_enable_pipestat(dev_priv, 0,
+                                            PIPE_LEGACY_BLC_EVENT_ENABLE);
        }
+
+       lockmgr(&dev_priv->irq_lock, LK_RELEASE);
 }
 
 /**
@@ -123,420 +146,2114 @@ static int
 i915_pipe_enabled(struct drm_device *dev, int pipe)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-       unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF;
-
-       if (I915_READ(pipeconf) & PIPEACONF_ENABLE)
-               return 1;
-
-       return 0;
+       return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
 }
 
 /* Called from drm generic code, passed a 'crtc', which
  * we use as a pipe index
  */
-u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
+static u32
+i915_get_vblank_counter(struct drm_device *dev, int pipe)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        unsigned long high_frame;
        unsigned long low_frame;
-       u32 high1, high2, low, count;
-
-       high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
-       low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
+       u32 high1, high2, low;
 
        if (!i915_pipe_enabled(dev, pipe)) {
-               DRM_DEBUG("trying to get vblank count for disabled pipe %d\n", pipe);
+               DRM_DEBUG("trying to get vblank count for disabled "
+                               "pipe %c\n", pipe_name(pipe));
                return 0;
        }
 
+       high_frame = PIPEFRAME(pipe);
+       low_frame = PIPEFRAMEPIXEL(pipe);
+
        /*
         * High & low register fields aren't synchronized, so make sure
         * we get a low value that's stable across two reads of the high
         * register.
         */
        do {
-               high1 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
-                        PIPE_FRAME_HIGH_SHIFT);
-               low =  ((I915_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
-                       PIPE_FRAME_LOW_SHIFT);
-               high2 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
-                        PIPE_FRAME_HIGH_SHIFT);
+               high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
+               low   = I915_READ(low_frame)  & PIPE_FRAME_LOW_MASK;
+               high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
        } while (high1 != high2);
 
-       count = (high1 << 8) | low;
-
-       return count;
+       high1 >>= PIPE_FRAME_HIGH_SHIFT;
+       low >>= PIPE_FRAME_LOW_SHIFT;
+       return (high1 << 8) | low;
 }
 
-u32 g45_get_vblank_counter(struct drm_device *dev, int pipe)
+static u32
+gm45_get_vblank_counter(struct drm_device *dev, int pipe)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-       int reg = pipe ? PIPEB_FRMCOUNT_GM45 : PIPEA_FRMCOUNT_GM45;
+       int reg = PIPE_FRMCOUNT_GM45(pipe);
 
        if (!i915_pipe_enabled(dev, pipe)) {
-               DRM_DEBUG("trying to get vblank count for disabled pipe %d\n", pipe);
+               DRM_DEBUG("i915: trying to get vblank count for disabled "
+                                "pipe %c\n", pipe_name(pipe));
                return 0;
        }
 
        return I915_READ(reg);
 }
 
-irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
+static int
+i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
+    int *vpos, int *hpos)
 {
-       struct drm_device *dev = (struct drm_device *) arg;
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-       u32 iir, new_iir;
-       u32 pipea_stats, pipeb_stats;
-       u32 vblank_status;
-       int irq_received;
-
-       iir = I915_READ(IIR);
+       u32 vbl = 0, position = 0;
+       int vbl_start, vbl_end, htotal, vtotal;
+       bool in_vbl = true;
+       int ret = 0;
 
-       if (IS_I965G(dev))
-               vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS;
-       else
-               vblank_status = I915_VBLANK_INTERRUPT_STATUS;
+       if (!i915_pipe_enabled(dev, pipe)) {
+               DRM_DEBUG("i915: trying to get scanoutpos for disabled "
+                                "pipe %c\n", pipe_name(pipe));
+               return 0;
+       }
 
-       for (;;) {
-               irq_received = iir != 0;
+       /* Get vtotal. */
+       vtotal = 1 + ((I915_READ(VTOTAL(pipe)) >> 16) & 0x1fff);
 
-               /* Can't rely on pipestat interrupt bit in iir as it might
-                * have been cleared after the pipestat interrupt was received.
-                * It doesn't set the bit in iir again, but it still produces
-                * interrupts (for non-MSI).
+       if (INTEL_INFO(dev)->gen >= 4) {
+               /* No obvious pixelcount register. Only query vertical
+                * scanout position from Display scan line register.
                 */
-               spin_lock(&dev_priv->user_irq_lock);
-               pipea_stats = I915_READ(PIPEASTAT);
-               pipeb_stats = I915_READ(PIPEBSTAT);
+               position = I915_READ(PIPEDSL(pipe));
 
-               /*
-                * Clear the PIPE(A|B)STAT regs before the IIR
+               /* Decode into vertical scanout position. Don't have
+                * horizontal scanout position.
                 */
-               if (pipea_stats & 0x8000ffff) {
-                       I915_WRITE(PIPEASTAT, pipea_stats);
-                       irq_received = 1;
-               }
+               *vpos = position & 0x1fff;
+               *hpos = 0;
+       } else {
+               /* Have access to pixelcount since start of frame.
+                * We can split this into vertical and horizontal
+                * scanout position.
+                */
+               position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
 
-               if (pipeb_stats & 0x8000ffff) {
-                       I915_WRITE(PIPEBSTAT, pipeb_stats);
-                       irq_received = 1;
-               }
-               spin_unlock(&dev_priv->user_irq_lock);
+               htotal = 1 + ((I915_READ(HTOTAL(pipe)) >> 16) & 0x1fff);
+               *vpos = position / htotal;
+               *hpos = position - (*vpos * htotal);
+       }
 
-               if (!irq_received)
-                       break;
+       /* Query vblank area. */
+       vbl = I915_READ(VBLANK(pipe));
 
-               I915_WRITE(IIR, iir);
-               new_iir = I915_READ(IIR); /* Flush posted writes */
+       /* Test position against vblank region. */
+       vbl_start = vbl & 0x1fff;
+       vbl_end = (vbl >> 16) & 0x1fff;
 
-               if (dev_priv->sarea_priv)
-                       dev_priv->sarea_priv->last_dispatch =
-                           READ_BREADCRUMB(dev_priv);
+       if ((*vpos < vbl_start) || (*vpos > vbl_end))
+               in_vbl = false;
 
-               if (iir & I915_USER_INTERRUPT) {
-                       DRM_WAKEUP(&dev_priv->irq_queue);
-               }
+       /* Inside "upper part" of vblank area? Apply corrective offset: */
+       if (in_vbl && (*vpos >= vbl_start))
+               *vpos = *vpos - vtotal;
 
-               if (pipea_stats & vblank_status)
-                       drm_handle_vblank(dev, 0);
+       /* Readouts valid? */
+       if (vbl > 0)
+               ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
 
-               if (pipeb_stats & vblank_status)
-                       drm_handle_vblank(dev, 1);
+       /* In vblank? */
+       if (in_vbl)
+               ret |= DRM_SCANOUTPOS_INVBL;
 
-               /* With MSI, interrupts are only generated when iir
-                * transitions from zero to nonzero.  If another bit got
-                * set while we were handling the existing iir bits, then
-                * we would never get another interrupt.
-                *
-                * This is fine on non-MSI as well, as if we hit this path
-                * we avoid exiting the interrupt handler only to generate
-                * another one.
-                *
-                * Note that for MSI this could cause a stray interrupt report
-                * if an interrupt landed in the time between writing IIR and
-                * the posting read.  This should be rare enough to never
-                * trigger the 99% of 100,000 interrupts test for disabling
-                * stray interrupts.
-                */
-               iir = new_iir;
+       return ret;
+}
+
+static int
+i915_get_vblank_timestamp(struct drm_device *dev, int pipe, int *max_error,
+    struct timeval *vblank_time, unsigned flags)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_crtc *crtc;
+
+       if (pipe < 0 || pipe >= dev_priv->num_pipe) {
+               DRM_ERROR("Invalid crtc %d\n", pipe);
+               return -EINVAL;
+       }
+
+       /* Get drm_crtc to timestamp: */
+       crtc = intel_get_crtc_for_pipe(dev, pipe);
+       if (crtc == NULL) {
+               DRM_ERROR("Invalid crtc %d\n", pipe);
+               return -EINVAL;
+       }
+
+       if (!crtc->enabled) {
+#if 0
+               DRM_DEBUG("crtc %d is disabled\n", pipe);
+#endif
+               return -EBUSY;
        }
+
+       /* Helper routine in DRM core does all the work: */
+       return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
+                                                    vblank_time, flags,
+                                                    crtc);
 }
 
-static int i915_emit_irq(struct drm_device * dev)
+/*
+ * Handle hotplug events outside the interrupt handler proper.
+ */
+static void
+i915_hotplug_work_func(void *context, int pending)
 {
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       RING_LOCALS;
+       drm_i915_private_t *dev_priv = context;
+       struct drm_device *dev = dev_priv->dev;
+       struct drm_mode_config *mode_config;
+       struct intel_encoder *encoder;
 
-       i915_kernel_lost_context(dev);
+       DRM_DEBUG("running encoder hotplug functions\n");
+       dev_priv = context;
+       dev = dev_priv->dev;
 
-       if (++dev_priv->counter > 0x7FFFFFFFUL)
-               dev_priv->counter = 0;
-       if (dev_priv->sarea_priv)
-               dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
+       mode_config = &dev->mode_config;
 
-       DRM_DEBUG("emitting: %d\n", dev_priv->counter);
+       lockmgr(&mode_config->lock, LK_EXCLUSIVE);
+       DRM_DEBUG_KMS("running encoder hotplug functions\n");
 
-       BEGIN_LP_RING(4);
-       OUT_RING(MI_STORE_DWORD_INDEX);
-       OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-       OUT_RING(dev_priv->counter);
-       OUT_RING(MI_USER_INTERRUPT);
-       ADVANCE_LP_RING();
+       list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
+               if (encoder->hot_plug)
+                       encoder->hot_plug(encoder);
 
-       return dev_priv->counter;
+       lockmgr(&mode_config->lock, LK_RELEASE);
+
+       /* Just fire off a uevent and let userspace tell us what to do */
+#if 0
+       drm_helper_hpd_irq_event(dev);
+#endif
 }
 
-void i915_user_irq_get(struct drm_device *dev)
+static void i915_handle_rps_change(struct drm_device *dev)
 {
-       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       u32 busy_up, busy_down, max_avg, min_avg;
+       u8 new_delay = dev_priv->cur_delay;
+
+       I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
+       busy_up = I915_READ(RCPREVBSYTUPAVG);
+       busy_down = I915_READ(RCPREVBSYTDNAVG);
+       max_avg = I915_READ(RCBMAXAVG);
+       min_avg = I915_READ(RCBMINAVG);
+
+       /* Handle RCS change request from hw */
+       if (busy_up > max_avg) {
+               if (dev_priv->cur_delay != dev_priv->max_delay)
+                       new_delay = dev_priv->cur_delay - 1;
+               if (new_delay < dev_priv->max_delay)
+                       new_delay = dev_priv->max_delay;
+       } else if (busy_down < min_avg) {
+               if (dev_priv->cur_delay != dev_priv->min_delay)
+                       new_delay = dev_priv->cur_delay + 1;
+               if (new_delay > dev_priv->min_delay)
+                       new_delay = dev_priv->min_delay;
+       }
 
-       if (dev->irq_enabled == 0)
-               return;
+       if (ironlake_set_drps(dev, new_delay))
+               dev_priv->cur_delay = new_delay;
 
-       DRM_DEBUG("\n");
-       spin_lock(&dev_priv->user_irq_lock);
-       if (++dev_priv->user_irq_refcount == 1)
-               i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
-       spin_unlock(&dev_priv->user_irq_lock);
+       return;
 }
 
-void i915_user_irq_put(struct drm_device *dev)
+static void notify_ring(struct drm_device *dev,
+                       struct intel_ring_buffer *ring)
 {
-       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 seqno;
 
-       if (dev->irq_enabled == 0)
+       if (ring->obj == NULL)
                return;
 
-       spin_lock(&dev_priv->user_irq_lock);
-       KASSERT(dev_priv->user_irq_refcount > 0, ("invalid refcount"));
-       if (--dev_priv->user_irq_refcount == 0)
-               i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
-       spin_unlock(&dev_priv->user_irq_lock);
+       seqno = ring->get_seqno(ring);
+
+       lockmgr(&ring->irq_lock, LK_EXCLUSIVE);
+       ring->irq_seqno = seqno;
+       wakeup(ring);
+       lockmgr(&ring->irq_lock, LK_RELEASE);
+
+       if (i915_enable_hangcheck) {
+               dev_priv->hangcheck_count = 0;
+               callout_reset(&dev_priv->hangcheck_timer,
+                   DRM_I915_HANGCHECK_PERIOD, i915_hangcheck_elapsed, dev);
+       }
 }
 
-static int i915_wait_irq(struct drm_device * dev, int irq_nr)
+static void
+gen6_pm_rps_work_func(void *arg, int pending)
 {
-       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-       int ret = 0;
+       struct drm_device *dev;
+       drm_i915_private_t *dev_priv;
+       u8 new_delay;
+       u32 pm_iir, pm_imr;
+
+       dev_priv = (drm_i915_private_t *)arg;
+       dev = dev_priv->dev;
+       new_delay = dev_priv->cur_delay;
+
+       lockmgr(&dev_priv->rps_lock, LK_EXCLUSIVE);
+       pm_iir = dev_priv->pm_iir;
+       dev_priv->pm_iir = 0;
+       pm_imr = I915_READ(GEN6_PMIMR);
+       I915_WRITE(GEN6_PMIMR, 0);
+       lockmgr(&dev_priv->rps_lock, LK_RELEASE);
+
+       if (!pm_iir)
+               return;
 
-       if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
-               if (dev_priv->sarea_priv) {
-                       dev_priv->sarea_priv->last_dispatch =
-                               READ_BREADCRUMB(dev_priv);
+       DRM_LOCK(dev);
+       if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
+               if (dev_priv->cur_delay != dev_priv->max_delay)
+                       new_delay = dev_priv->cur_delay + 1;
+               if (new_delay > dev_priv->max_delay)
+                       new_delay = dev_priv->max_delay;
+       } else if (pm_iir & (GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT)) {
+               gen6_gt_force_wake_get(dev_priv);
+               if (dev_priv->cur_delay != dev_priv->min_delay)
+                       new_delay = dev_priv->cur_delay - 1;
+               if (new_delay < dev_priv->min_delay) {
+                       new_delay = dev_priv->min_delay;
+                       I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
+                                  I915_READ(GEN6_RP_INTERRUPT_LIMITS) |
+                                  ((new_delay << 16) & 0x3f0000));
+               } else {
+                       /* Make sure we continue to get down interrupts
+                        * until we hit the minimum frequency */
+                       I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
+                                  I915_READ(GEN6_RP_INTERRUPT_LIMITS) & ~0x3f0000);
                }
-               return 0;
+               gen6_gt_force_wake_put(dev_priv);
        }
 
-       if (dev_priv->sarea_priv)
-               dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
+       gen6_set_rps(dev, new_delay);
+       dev_priv->cur_delay = new_delay;
 
-       DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
-                 READ_BREADCRUMB(dev_priv));
+       /*
+        * rps_lock not held here because clearing is non-destructive. There is
+        * an *extremely* unlikely race with gen6_rps_enable() that is prevented
+        * by holding struct_mutex for the duration of the write.
+        */
+       DRM_UNLOCK(dev);
+}
 
-       i915_user_irq_get(dev);
-       DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
-                   READ_BREADCRUMB(dev_priv) >= irq_nr);
-       i915_user_irq_put(dev);
+static void pch_irq_handler(struct drm_device *dev)
+{
+       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+       u32 pch_iir;
+       int pipe;
 
-       if (ret == -ERESTART)
-               DRM_DEBUG("restarting syscall\n");
+       pch_iir = I915_READ(SDEIIR);
 
-       if (ret == -EBUSY) {
-               DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
-                         READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
-       }
+       if (pch_iir & SDE_AUDIO_POWER_MASK)
+               DRM_DEBUG("i915: PCH audio power change on port %d\n",
+                                (pch_iir & SDE_AUDIO_POWER_MASK) >>
+                                SDE_AUDIO_POWER_SHIFT);
 
-       return ret;
-}
+       if (pch_iir & SDE_GMBUS)
+               DRM_DEBUG("i915: PCH GMBUS interrupt\n");
 
-/* Needs the lock as it touches the ring.
- */
-int i915_irq_emit(struct drm_device *dev, void *data,
-                        struct drm_file *file_priv)
-{
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       drm_i915_irq_emit_t *emit = data;
-       int result;
+       if (pch_iir & SDE_AUDIO_HDCP_MASK)
+               DRM_DEBUG("i915: PCH HDCP audio interrupt\n");
 
-       if (!dev_priv) {
-               DRM_ERROR("called with no initialization\n");
-               return -EINVAL;
-       }
+       if (pch_iir & SDE_AUDIO_TRANS_MASK)
+               DRM_DEBUG("i915: PCH transcoder audio interrupt\n");
 
-       RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
+       if (pch_iir & SDE_POISON)
+               DRM_ERROR("i915: PCH poison interrupt\n");
 
-       result = i915_emit_irq(dev);
+       if (pch_iir & SDE_FDI_MASK)
+               for_each_pipe(pipe)
+                       DRM_DEBUG("  pipe %c FDI IIR: 0x%08x\n",
+                                        pipe_name(pipe),
+                                        I915_READ(FDI_RX_IIR(pipe)));
 
-       if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
-               DRM_ERROR("copy_to_user\n");
-               return -EFAULT;
-       }
+       if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
+               DRM_DEBUG("i915: PCH transcoder CRC done interrupt\n");
 
-       return 0;
+       if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
+               DRM_DEBUG("i915: PCH transcoder CRC error interrupt\n");
+
+       if (pch_iir & SDE_TRANSB_FIFO_UNDER)
+               DRM_DEBUG("i915: PCH transcoder B underrun interrupt\n");
+       if (pch_iir & SDE_TRANSA_FIFO_UNDER)
+               DRM_DEBUG("PCH transcoder A underrun interrupt\n");
 }
 
-/* Doesn't need the hardware lock.
- */
-int i915_irq_wait(struct drm_device *dev, void *data,
-                        struct drm_file *file_priv)
+static void 
+ivybridge_irq_handler(void *arg)
 {
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       drm_i915_irq_wait_t *irqwait = data;
+       struct drm_device *dev = (struct drm_device *) arg;
+       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+       u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
+#if 0
+       struct drm_i915_master_private *master_priv;
+#endif
+
+       atomic_inc(&dev_priv->irq_received);
+
+       /* disable master interrupt before clearing iir  */
+       de_ier = I915_READ(DEIER);
+       I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
+       POSTING_READ(DEIER);
+
+       de_iir = I915_READ(DEIIR);
+       gt_iir = I915_READ(GTIIR);
+       pch_iir = I915_READ(SDEIIR);
+       pm_iir = I915_READ(GEN6_PMIIR);
+
+       if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 && pm_iir == 0)
+               goto done;
+
+#if 0
+       if (dev->primary->master) {
+               master_priv = dev->primary->master->driver_priv;
+               if (master_priv->sarea_priv)
+                       master_priv->sarea_priv->last_dispatch =
+                               READ_BREADCRUMB(dev_priv);
+       }
+#else
+       if (dev_priv->sarea_priv)
+               dev_priv->sarea_priv->last_dispatch =
+                   READ_BREADCRUMB(dev_priv);
+#endif
+
+       if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
+               notify_ring(dev, &dev_priv->rings[RCS]);
+       if (gt_iir & GT_GEN6_BSD_USER_INTERRUPT)
+               notify_ring(dev, &dev_priv->rings[VCS]);
+       if (gt_iir & GT_BLT_USER_INTERRUPT)
+               notify_ring(dev, &dev_priv->rings[BCS]);
+
+       if (de_iir & DE_GSE_IVB) {
+#if 1
+               KIB_NOTYET();
+#else
+               intel_opregion_gse_intr(dev);
+#endif
+       }
 
-       if (!dev_priv) {
-               DRM_ERROR("called with no initialization\n");
-               return -EINVAL;
+       if (de_iir & DE_PLANEA_FLIP_DONE_IVB) {
+               intel_prepare_page_flip(dev, 0);
+               intel_finish_page_flip_plane(dev, 0);
        }
 
-       return i915_wait_irq(dev, irqwait->irq_seq);
-}
+       if (de_iir & DE_PLANEB_FLIP_DONE_IVB) {
+               intel_prepare_page_flip(dev, 1);
+               intel_finish_page_flip_plane(dev, 1);
+       }
 
-/* Called from drm generic code, passed 'crtc' which
- * we use as a pipe index
- */
-int i915_enable_vblank(struct drm_device *dev, int pipe)
-{
-       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+       if (de_iir & DE_PIPEA_VBLANK_IVB)
+               drm_handle_vblank(dev, 0);
 
-       if (!i915_pipe_enabled(dev, pipe))
-               return -EINVAL;
+       if (de_iir & DE_PIPEB_VBLANK_IVB)
+               drm_handle_vblank(dev, 1);
 
-       spin_lock(&dev_priv->user_irq_lock);
-       if (IS_I965G(dev))
-               i915_enable_pipestat(dev_priv, pipe,
-                                    PIPE_START_VBLANK_INTERRUPT_ENABLE);
-       else
-               i915_enable_pipestat(dev_priv, pipe,
-                                    PIPE_VBLANK_INTERRUPT_ENABLE);
-       spin_unlock(&dev_priv->user_irq_lock);
-       return 0;
+       /* check event from PCH */
+       if (de_iir & DE_PCH_EVENT_IVB) {
+               if (pch_iir & SDE_HOTPLUG_MASK_CPT)
+                       taskqueue_enqueue(dev_priv->tq, &dev_priv->hotplug_task);
+               pch_irq_handler(dev);
+       }
+
+       if (pm_iir & GEN6_PM_DEFERRED_EVENTS) {
+               lockmgr(&dev_priv->rps_lock, LK_EXCLUSIVE);
+               if ((dev_priv->pm_iir & pm_iir) != 0)
+                       kprintf("Missed a PM interrupt\n");
+               dev_priv->pm_iir |= pm_iir;
+               I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
+               POSTING_READ(GEN6_PMIMR);
+               lockmgr(&dev_priv->rps_lock, LK_RELEASE);
+               taskqueue_enqueue(dev_priv->tq, &dev_priv->rps_task);
+       }
+
+       /* should clear PCH hotplug event before clear CPU irq */
+       I915_WRITE(SDEIIR, pch_iir);
+       I915_WRITE(GTIIR, gt_iir);
+       I915_WRITE(DEIIR, de_iir);
+       I915_WRITE(GEN6_PMIIR, pm_iir);
+
+done:
+       I915_WRITE(DEIER, de_ier);
+       POSTING_READ(DEIER);
 }
 
-/* Called from drm generic code, passed 'crtc' which
- * we use as a pipe index
- */
-void i915_disable_vblank(struct drm_device *dev, int pipe)
+static void
+ironlake_irq_handler(void *arg)
 {
+       struct drm_device *dev = arg;
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+       u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
+       u32 hotplug_mask;
+#if 0
+       struct drm_i915_master_private *master_priv;
+#endif
+       u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
+
+       atomic_inc(&dev_priv->irq_received);
+
+       if (IS_GEN6(dev))
+               bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
+
+       /* disable master interrupt before clearing iir  */
+       de_ier = I915_READ(DEIER);
+       I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
+       POSTING_READ(DEIER);
+
+       de_iir = I915_READ(DEIIR);
+       gt_iir = I915_READ(GTIIR);
+       pch_iir = I915_READ(SDEIIR);
+       pm_iir = I915_READ(GEN6_PMIIR);
+
+       if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 &&
+           (!IS_GEN6(dev) || pm_iir == 0))
+               goto done;
+
+       if (HAS_PCH_CPT(dev))
+               hotplug_mask = SDE_HOTPLUG_MASK_CPT;
+       else
+               hotplug_mask = SDE_HOTPLUG_MASK;
 
-       spin_lock(&dev_priv->user_irq_lock);
-       i915_disable_pipestat(dev_priv, pipe,
-                             PIPE_VBLANK_INTERRUPT_ENABLE |
-                             PIPE_START_VBLANK_INTERRUPT_ENABLE);
-       spin_unlock(&dev_priv->user_irq_lock);
-}
+#if 0
+       if (dev->primary->master) {
+               master_priv = dev->primary->master->driver_priv;
+               if (master_priv->sarea_priv)
+                       master_priv->sarea_priv->last_dispatch =
+                               READ_BREADCRUMB(dev_priv);
+       }
+#else
+               if (dev_priv->sarea_priv)
+                       dev_priv->sarea_priv->last_dispatch =
+                           READ_BREADCRUMB(dev_priv);
+#endif
+
+       if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
+               notify_ring(dev, &dev_priv->rings[RCS]);
+       if (gt_iir & bsd_usr_interrupt)
+               notify_ring(dev, &dev_priv->rings[VCS]);
+       if (gt_iir & GT_BLT_USER_INTERRUPT)
+               notify_ring(dev, &dev_priv->rings[BCS]);
+
+       if (de_iir & DE_GSE) {
+#if 1
+               KIB_NOTYET();
+#else
+               intel_opregion_gse_intr(dev);
+#endif
+       }
 
-/* Set the vblank monitor pipe
- */
-int i915_vblank_pipe_set(struct drm_device *dev, void *data,
-                        struct drm_file *file_priv)
-{
-       drm_i915_private_t *dev_priv = dev->dev_private;
+       if (de_iir & DE_PLANEA_FLIP_DONE) {
+               intel_prepare_page_flip(dev, 0);
+               intel_finish_page_flip_plane(dev, 0);
+       }
 
-       if (!dev_priv) {
-               DRM_ERROR("called with no initialization\n");
-               return -EINVAL;
+       if (de_iir & DE_PLANEB_FLIP_DONE) {
+               intel_prepare_page_flip(dev, 1);
+               intel_finish_page_flip_plane(dev, 1);
        }
 
-       return 0;
-}
+       if (de_iir & DE_PIPEA_VBLANK)
+               drm_handle_vblank(dev, 0);
 
-int i915_vblank_pipe_get(struct drm_device *dev, void *data,
-                        struct drm_file *file_priv)
-{
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       drm_i915_vblank_pipe_t *pipe = data;
+       if (de_iir & DE_PIPEB_VBLANK)
+               drm_handle_vblank(dev, 1);
 
-       if (!dev_priv) {
-               DRM_ERROR("called with no initialization\n");
-               return -EINVAL;
+       /* check event from PCH */
+       if (de_iir & DE_PCH_EVENT) {
+               if (pch_iir & hotplug_mask)
+                       taskqueue_enqueue(dev_priv->tq,
+                           &dev_priv->hotplug_task);
+               pch_irq_handler(dev);
        }
 
-       pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
+       if (de_iir & DE_PCU_EVENT) {
+               I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
+               i915_handle_rps_change(dev);
+       }
 
-       return 0;
+       if (pm_iir & GEN6_PM_DEFERRED_EVENTS) {
+               lockmgr(&dev_priv->rps_lock, LK_EXCLUSIVE);
+               if ((dev_priv->pm_iir & pm_iir) != 0)
+                       kprintf("Missed a PM interrupt\n");
+               dev_priv->pm_iir |= pm_iir;
+               I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
+               POSTING_READ(GEN6_PMIMR);
+               lockmgr(&dev_priv->rps_lock, LK_RELEASE);
+               taskqueue_enqueue(dev_priv->tq, &dev_priv->rps_task);
+       }
+
+       /* should clear PCH hotplug event before clear CPU irq */
+       I915_WRITE(SDEIIR, pch_iir);
+       I915_WRITE(GTIIR, gt_iir);
+       I915_WRITE(DEIIR, de_iir);
+       I915_WRITE(GEN6_PMIIR, pm_iir);
+
+done:
+       I915_WRITE(DEIER, de_ier);
+       POSTING_READ(DEIER);
 }
 
 /**
- * Schedule buffer swap at given vertical blank.
+ * i915_error_work_func - do process context error handling work
+ * @work: work struct
+ *
+ * Fire an error uevent so userspace can see that a hang or error
+ * was detected.
  */
-int i915_vblank_swap(struct drm_device *dev, void *data,
-                    struct drm_file *file_priv)
+static void
+i915_error_work_func(void *context, int pending)
 {
-       /* The delayed swap mechanism was fundamentally racy, and has been
-        * removed.  The model was that the client requested a delayed flip/swap
-        * from the kernel, then waited for vblank before continuing to perform
-        * rendering.  The problem was that the kernel might wake the client
-        * up before it dispatched the vblank swap (since the lock has to be
-        * held while touching the ringbuffer), in which case the client would
-        * clear and start the next frame before the swap occurred, and
-        * flicker would occur in addition to likely missing the vblank.
-        *
-        * In the absence of this ioctl, userland falls back to a correct path
-        * of waiting for a vblank, then dispatching the swap on its own.
-        * Context switching to userland and back is plenty fast enough for
-        * meeting the requirements of vblank swapping.
-        */
-       return -EINVAL;
-}
+       drm_i915_private_t *dev_priv = context;
+       struct drm_device *dev = dev_priv->dev;
 
-/* drm_dma.h hooks
-*/
-void i915_driver_irq_preinstall(struct drm_device * dev)
-{
-       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+       /* kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); */
 
-       I915_WRITE(HWSTAM, 0xeffe);
-       I915_WRITE(PIPEASTAT, 0);
-       I915_WRITE(PIPEBSTAT, 0);
-       I915_WRITE(IMR, 0xffffffff);
-       I915_WRITE(IER, 0x0);
-       (void) I915_READ(IER);
+       if (atomic_load_acq_int(&dev_priv->mm.wedged)) {
+               DRM_DEBUG("i915: resetting chip\n");
+               /* kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event); */
+               if (!i915_reset(dev, GRDOM_RENDER)) {
+                       atomic_store_rel_int(&dev_priv->mm.wedged, 0);
+                       /* kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event); */
+               }
+               lockmgr(&dev_priv->error_completion_lock, LK_EXCLUSIVE);
+               dev_priv->error_completion++;
+               wakeup(&dev_priv->error_completion);
+               lockmgr(&dev_priv->error_completion_lock, LK_RELEASE);
+       }
 }
 
-int i915_driver_irq_postinstall(struct drm_device *dev)
+static void i915_report_and_clear_eir(struct drm_device *dev)
 {
-       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 eir = I915_READ(EIR);
+       int pipe;
 
-       dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
+       if (!eir)
+               return;
 
-       /* Unmask the interrupts that we always want on. */
-       dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX;
+       kprintf("i915: render error detected, EIR: 0x%08x\n", eir);
+
+       if (IS_G4X(dev)) {
+               if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
+                       u32 ipeir = I915_READ(IPEIR_I965);
+
+                       kprintf("  IPEIR: 0x%08x\n",
+                              I915_READ(IPEIR_I965));
+                       kprintf("  IPEHR: 0x%08x\n",
+                              I915_READ(IPEHR_I965));
+                       kprintf("  INSTDONE: 0x%08x\n",
+                              I915_READ(INSTDONE_I965));
+                       kprintf("  INSTPS: 0x%08x\n",
+                              I915_READ(INSTPS));
+                       kprintf("  INSTDONE1: 0x%08x\n",
+                              I915_READ(INSTDONE1));
+                       kprintf("  ACTHD: 0x%08x\n",
+                              I915_READ(ACTHD_I965));
+                       I915_WRITE(IPEIR_I965, ipeir);
+                       POSTING_READ(IPEIR_I965);
+               }
+               if (eir & GM45_ERROR_PAGE_TABLE) {
+                       u32 pgtbl_err = I915_READ(PGTBL_ER);
+                       kprintf("page table error\n");
+                       kprintf("  PGTBL_ER: 0x%08x\n",
+                              pgtbl_err);
+                       I915_WRITE(PGTBL_ER, pgtbl_err);
+                       POSTING_READ(PGTBL_ER);
+               }
+       }
 
-       /* Disable pipe interrupt enables, clear pending pipe status */
-       I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff);
-       I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff);
+       if (!IS_GEN2(dev)) {
+               if (eir & I915_ERROR_PAGE_TABLE) {
+                       u32 pgtbl_err = I915_READ(PGTBL_ER);
+                       kprintf("page table error\n");
+                       kprintf("  PGTBL_ER: 0x%08x\n",
+                              pgtbl_err);
+                       I915_WRITE(PGTBL_ER, pgtbl_err);
+                       POSTING_READ(PGTBL_ER);
+               }
+       }
 
-       /* Clear pending interrupt status */
-       I915_WRITE(IIR, I915_READ(IIR));
+       if (eir & I915_ERROR_MEMORY_REFRESH) {
+               kprintf("memory refresh error:\n");
+               for_each_pipe(pipe)
+                       kprintf("pipe %c stat: 0x%08x\n",
+                              pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
+               /* pipestat has already been acked */
+       }
+       if (eir & I915_ERROR_INSTRUCTION) {
+               kprintf("instruction error\n");
+               kprintf("  INSTPM: 0x%08x\n",
+                      I915_READ(INSTPM));
+               if (INTEL_INFO(dev)->gen < 4) {
+                       u32 ipeir = I915_READ(IPEIR);
+
+                       kprintf("  IPEIR: 0x%08x\n",
+                              I915_READ(IPEIR));
+                       kprintf("  IPEHR: 0x%08x\n",
+                              I915_READ(IPEHR));
+                       kprintf("  INSTDONE: 0x%08x\n",
+                              I915_READ(INSTDONE));
+                       kprintf("  ACTHD: 0x%08x\n",
+                              I915_READ(ACTHD));
+                       I915_WRITE(IPEIR, ipeir);
+                       POSTING_READ(IPEIR);
+               } else {
+                       u32 ipeir = I915_READ(IPEIR_I965);
+
+                       kprintf("  IPEIR: 0x%08x\n",
+                              I915_READ(IPEIR_I965));
+                       kprintf("  IPEHR: 0x%08x\n",
+                              I915_READ(IPEHR_I965));
+                       kprintf("  INSTDONE: 0x%08x\n",
+                              I915_READ(INSTDONE_I965));
+                       kprintf("  INSTPS: 0x%08x\n",
+                              I915_READ(INSTPS));
+                       kprintf("  INSTDONE1: 0x%08x\n",
+                              I915_READ(INSTDONE1));
+                       kprintf("  ACTHD: 0x%08x\n",
+                              I915_READ(ACTHD_I965));
+                       I915_WRITE(IPEIR_I965, ipeir);
+                       POSTING_READ(IPEIR_I965);
+               }
+       }
+
+       I915_WRITE(EIR, eir);
+       POSTING_READ(EIR);
+       eir = I915_READ(EIR);
+       if (eir) {
+               /*
+                * some errors might have become stuck,
+                * mask them.
+                */
+               DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
+               I915_WRITE(EMR, I915_READ(EMR) | eir);
+               I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
+       }
+}
+
+/**
+ * i915_handle_error - handle an error interrupt
+ * @dev: drm device
+ *
+ * Do some basic checking of regsiter state at error interrupt time and
+ * dump it to the syslog.  Also call i915_capture_error_state() to make
+ * sure we get a record and make it available in debugfs.  Fire a uevent
+ * so userspace knows something bad happened (should trigger collection
+ * of a ring dump etc.).
+ */
+void i915_handle_error(struct drm_device *dev, bool wedged)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       i915_capture_error_state(dev);
+       i915_report_and_clear_eir(dev);
+
+       if (wedged) {
+               lockmgr(&dev_priv->error_completion_lock, LK_EXCLUSIVE);
+               dev_priv->error_completion = 0;
+               dev_priv->mm.wedged = 1;
+               /* unlock acts as rel barrier for store to wedged */
+               lockmgr(&dev_priv->error_completion_lock, LK_RELEASE);
+
+               /*
+                * Wakeup waiting processes so they don't hang
+                */
+               lockmgr(&dev_priv->rings[RCS].irq_lock, LK_EXCLUSIVE);
+               wakeup(&dev_priv->rings[RCS]);
+               lockmgr(&dev_priv->rings[RCS].irq_lock, LK_RELEASE);
+               if (HAS_BSD(dev)) {
+                       lockmgr(&dev_priv->rings[VCS].irq_lock, LK_EXCLUSIVE);
+                       wakeup(&dev_priv->rings[VCS]);
+                       lockmgr(&dev_priv->rings[VCS].irq_lock, LK_RELEASE);
+               }
+               if (HAS_BLT(dev)) {
+                       lockmgr(&dev_priv->rings[BCS].irq_lock, LK_EXCLUSIVE);
+                       wakeup(&dev_priv->rings[BCS]);
+                       lockmgr(&dev_priv->rings[BCS].irq_lock, LK_RELEASE);
+               }
+       }
+
+       taskqueue_enqueue(dev_priv->tq, &dev_priv->error_task);
+}
+
+static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct drm_i915_gem_object *obj;
+       struct intel_unpin_work *work;
+       bool stall_detected;
+
+       /* Ignore early vblank irqs */
+       if (intel_crtc == NULL)
+               return;
+
+       lockmgr(&dev->event_lock, LK_EXCLUSIVE);
+       work = intel_crtc->unpin_work;
+
+       if (work == NULL || work->pending || !work->enable_stall_check) {
+               /* Either the pending flip IRQ arrived, or we're too early. Don't check */
+               lockmgr(&dev->event_lock, LK_RELEASE);
+               return;
+       }
+
+       /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
+       obj = work->pending_flip_obj;
+       if (INTEL_INFO(dev)->gen >= 4) {
+               int dspsurf = DSPSURF(intel_crtc->plane);
+               stall_detected = I915_READ(dspsurf) == obj->gtt_offset;
+       } else {
+               int dspaddr = DSPADDR(intel_crtc->plane);
+               stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
+                                                       crtc->y * crtc->fb->pitches[0] +
+                                                       crtc->x * crtc->fb->bits_per_pixel/8);
+       }
+
+       lockmgr(&dev->event_lock, LK_RELEASE);
+
+       if (stall_detected) {
+               DRM_DEBUG("Pageflip stall detected\n");
+               intel_prepare_page_flip(dev, intel_crtc->plane);
+       }
+}
+
+static void
+i915_driver_irq_handler(void *arg)
+{
+       struct drm_device *dev = (struct drm_device *)arg;
+       drm_i915_private_t *dev_priv = (drm_i915_private_t *)dev->dev_private;
+#if 0
+       struct drm_i915_master_private *master_priv;
+#endif
+       u32 iir, new_iir;
+       u32 pipe_stats[I915_MAX_PIPES];
+       u32 vblank_status;
+       int vblank = 0;
+       int irq_received;
+       int pipe;
+       bool blc_event = false;
+
+       atomic_inc(&dev_priv->irq_received);
+
+       iir = I915_READ(IIR);
+
+       if (INTEL_INFO(dev)->gen >= 4)
+               vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS;
+       else
+               vblank_status = PIPE_VBLANK_INTERRUPT_STATUS;
+
+       for (;;) {
+               irq_received = iir != 0;
+
+               /* Can't rely on pipestat interrupt bit in iir as it might
+                * have been cleared after the pipestat interrupt was received.
+                * It doesn't set the bit in iir again, but it still produces
+                * interrupts (for non-MSI).
+                */
+               lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
+               if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
+                       i915_handle_error(dev, false);
+
+               for_each_pipe(pipe) {
+                       int reg = PIPESTAT(pipe);
+                       pipe_stats[pipe] = I915_READ(reg);
+
+                       /*
+                        * Clear the PIPE*STAT regs before the IIR
+                        */
+                       if (pipe_stats[pipe] & 0x8000ffff) {
+                               if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
+                                       DRM_DEBUG("pipe %c underrun\n",
+                                                        pipe_name(pipe));
+                               I915_WRITE(reg, pipe_stats[pipe]);
+                               irq_received = 1;
+                       }
+               }
+               lockmgr(&dev_priv->irq_lock, LK_RELEASE);
+
+               if (!irq_received)
+                       break;
+
+               /* Consume port.  Then clear IIR or we'll miss events */
+               if ((I915_HAS_HOTPLUG(dev)) &&
+                   (iir & I915_DISPLAY_PORT_INTERRUPT)) {
+                       u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
+
+                       DRM_DEBUG("i915: hotplug event received, stat 0x%08x\n",
+                                 hotplug_status);
+                       if (hotplug_status & dev_priv->hotplug_supported_mask)
+                               taskqueue_enqueue(dev_priv->tq,
+                                   &dev_priv->hotplug_task);
+
+                       I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
+                       I915_READ(PORT_HOTPLUG_STAT);
+               }
+
+               I915_WRITE(IIR, iir);
+               new_iir = I915_READ(IIR); /* Flush posted writes */
+
+#if 0
+               if (dev->primary->master) {
+                       master_priv = dev->primary->master->driver_priv;
+                       if (master_priv->sarea_priv)
+                               master_priv->sarea_priv->last_dispatch =
+                                       READ_BREADCRUMB(dev_priv);
+               }
+#else
+               if (dev_priv->sarea_priv)
+                       dev_priv->sarea_priv->last_dispatch =
+                           READ_BREADCRUMB(dev_priv);
+#endif
+
+               if (iir & I915_USER_INTERRUPT)
+                       notify_ring(dev, &dev_priv->rings[RCS]);
+               if (iir & I915_BSD_USER_INTERRUPT)
+                       notify_ring(dev, &dev_priv->rings[VCS]);
+
+               if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
+                       intel_prepare_page_flip(dev, 0);
+                       if (dev_priv->flip_pending_is_done)
+                               intel_finish_page_flip_plane(dev, 0);
+               }
+
+               if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) {
+                       intel_prepare_page_flip(dev, 1);
+                       if (dev_priv->flip_pending_is_done)
+                               intel_finish_page_flip_plane(dev, 1);
+               }
+
+               for_each_pipe(pipe) {
+                       if (pipe_stats[pipe] & vblank_status &&
+                           drm_handle_vblank(dev, pipe)) {
+                               vblank++;
+                               if (!dev_priv->flip_pending_is_done) {
+                                       i915_pageflip_stall_check(dev, pipe);
+                                       intel_finish_page_flip(dev, pipe);
+                               }
+                       }
+
+                       if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
+                               blc_event = true;
+               }
+
+
+               if (blc_event || (iir & I915_ASLE_INTERRUPT)) {
+#if 1
+                       KIB_NOTYET();
+#else
+                       intel_opregion_asle_intr(dev);
+#endif
+               }
+
+               /* With MSI, interrupts are only generated when iir
+                * transitions from zero to nonzero.  If another bit got
+                * set while we were handling the existing iir bits, then
+                * we would never get another interrupt.
+                *
+                * This is fine on non-MSI as well, as if we hit this path
+                * we avoid exiting the interrupt handler only to generate
+                * another one.
+                *
+                * Note that for MSI this could cause a stray interrupt report
+                * if an interrupt landed in the time between writing IIR and
+                * the posting read.  This should be rare enough to never
+                * trigger the 99% of 100,000 interrupts test for disabling
+                * stray interrupts.
+                */
+               iir = new_iir;
+       }
+}
+
+static int i915_emit_irq(struct drm_device * dev)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+#if 0
+       struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
+#endif
+
+       i915_kernel_lost_context(dev);
+
+       DRM_DEBUG("i915: emit_irq\n");
+
+       dev_priv->counter++;
+       if (dev_priv->counter > 0x7FFFFFFFUL)
+               dev_priv->counter = 1;
+#if 0
+       if (master_priv->sarea_priv)
+               master_priv->sarea_priv->last_enqueue = dev_priv->counter;
+#else
+       if (dev_priv->sarea_priv)
+               dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
+#endif
+
+       if (BEGIN_LP_RING(4) == 0) {
+               OUT_RING(MI_STORE_DWORD_INDEX);
+               OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+               OUT_RING(dev_priv->counter);
+               OUT_RING(MI_USER_INTERRUPT);
+               ADVANCE_LP_RING();
+       }
+
+       return dev_priv->counter;
+}
+
+static int i915_wait_irq(struct drm_device * dev, int irq_nr)
+{
+       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+#if 0
+       struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
+#endif
+       int ret;
+       struct intel_ring_buffer *ring = LP_RING(dev_priv);
+
+       DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
+                 READ_BREADCRUMB(dev_priv));
+
+#if 0
+       if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
+               if (master_priv->sarea_priv)
+                       master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
+               return 0;
+       }
+
+       if (master_priv->sarea_priv)
+               master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
+#else
+       if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
+               if (dev_priv->sarea_priv) {
+                       dev_priv->sarea_priv->last_dispatch =
+                               READ_BREADCRUMB(dev_priv);
+               }
+               return 0;
+       }
+
+       if (dev_priv->sarea_priv)
+               dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
+#endif
+
+       ret = 0;
+       lockmgr(&ring->irq_lock, LK_EXCLUSIVE);
+       if (ring->irq_get(ring)) {
+               DRM_UNLOCK(dev);
+               while (ret == 0 && READ_BREADCRUMB(dev_priv) < irq_nr) {
+                       ret = -lksleep(ring, &ring->irq_lock, PCATCH,
+                           "915wtq", 3 * hz);
+               }
+               ring->irq_put(ring);
+               lockmgr(&ring->irq_lock, LK_RELEASE);
+               DRM_LOCK(dev);
+       } else {
+               lockmgr(&ring->irq_lock, LK_RELEASE);
+               if (_intel_wait_for(dev, READ_BREADCRUMB(dev_priv) >= irq_nr,
+                    3000, 1, "915wir"))
+                       ret = -EBUSY;
+       }
+
+       if (ret == -EBUSY) {
+               DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
+                         READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
+       }
+
+       return ret;
+}
+
+/* Needs the lock as it touches the ring.
+ */
+int i915_irq_emit(struct drm_device *dev, void *data,
+                        struct drm_file *file_priv)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       drm_i915_irq_emit_t *emit = data;
+       int result;
+
+       if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
+               DRM_ERROR("called with no initialization\n");
+               return -EINVAL;
+       }
+
+       RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+       DRM_LOCK(dev);
+       result = i915_emit_irq(dev);
+       DRM_UNLOCK(dev);
+
+       if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
+               DRM_ERROR("copy_to_user\n");
+               return -EFAULT;
+       }
+
+       return 0;
+}
+
+/* Doesn't need the hardware lock.
+ */
+int i915_irq_wait(struct drm_device *dev, void *data,
+                        struct drm_file *file_priv)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       drm_i915_irq_wait_t *irqwait = data;
+
+       if (!dev_priv) {
+               DRM_ERROR("called with no initialization\n");
+               return -EINVAL;
+       }
+
+       return i915_wait_irq(dev, irqwait->irq_seq);
+}
+
+/* Called from drm generic code, passed 'crtc' which
+ * we use as a pipe index
+ */
+static int
+i915_enable_vblank(struct drm_device *dev, int pipe)
+{
+       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+       if (!i915_pipe_enabled(dev, pipe))
+               return -EINVAL;
+
+       lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
+       if (INTEL_INFO(dev)->gen >= 4)
+               i915_enable_pipestat(dev_priv, pipe,
+                                    PIPE_START_VBLANK_INTERRUPT_ENABLE);
+       else
+               i915_enable_pipestat(dev_priv, pipe,
+                                    PIPE_VBLANK_INTERRUPT_ENABLE);
+
+       /* maintain vblank delivery even in deep C-states */
+       if (dev_priv->info->gen == 3)
+               I915_WRITE(INSTPM, INSTPM_AGPBUSY_DIS << 16);
+       lockmgr(&dev_priv->irq_lock, LK_RELEASE);
+
+       return 0;
+}
+
+static int
+ironlake_enable_vblank(struct drm_device *dev, int pipe)
+{
+       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+       if (!i915_pipe_enabled(dev, pipe))
+               return -EINVAL;
+
+       lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
+       ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
+           DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
+       lockmgr(&dev_priv->irq_lock, LK_RELEASE);
+
+       return 0;
+}
+
+static int
+ivybridge_enable_vblank(struct drm_device *dev, int pipe)
+{
+       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+       if (!i915_pipe_enabled(dev, pipe))
+               return -EINVAL;
+
+       lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
+       ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
+                                   DE_PIPEA_VBLANK_IVB : DE_PIPEB_VBLANK_IVB);
+       lockmgr(&dev_priv->irq_lock, LK_RELEASE);
+
+       return 0;
+}
+
+
+/* Called from drm generic code, passed 'crtc' which
+ * we use as a pipe index
+ */
+static void
+i915_disable_vblank(struct drm_device *dev, int pipe)
+{
+       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+       lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
+       if (dev_priv->info->gen == 3)
+               I915_WRITE(INSTPM,
+                          INSTPM_AGPBUSY_DIS << 16 | INSTPM_AGPBUSY_DIS);
+
+       i915_disable_pipestat(dev_priv, pipe,
+           PIPE_VBLANK_INTERRUPT_ENABLE |
+           PIPE_START_VBLANK_INTERRUPT_ENABLE);
+       lockmgr(&dev_priv->irq_lock, LK_RELEASE);
+}
+
+static void
+ironlake_disable_vblank(struct drm_device *dev, int pipe)
+{
+       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+       lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
+       ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
+           DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
+       lockmgr(&dev_priv->irq_lock, LK_RELEASE);
+}
+
+static void
+ivybridge_disable_vblank(struct drm_device *dev, int pipe)
+{
+       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+       lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
+       ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
+                                    DE_PIPEA_VBLANK_IVB : DE_PIPEB_VBLANK_IVB);
+       lockmgr(&dev_priv->irq_lock, LK_RELEASE);
+}
+
+/* Set the vblank monitor pipe
+ */
+int i915_vblank_pipe_set(struct drm_device *dev, void *data,
+                        struct drm_file *file_priv)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+
+       if (!dev_priv) {
+               DRM_ERROR("called with no initialization\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+int i915_vblank_pipe_get(struct drm_device *dev, void *data,
+                        struct drm_file *file_priv)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       drm_i915_vblank_pipe_t *pipe = data;
+
+       if (!dev_priv) {
+               DRM_ERROR("called with no initialization\n");
+               return -EINVAL;
+       }
+
+       pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
+
+       return 0;
+}
+
+/**
+ * Schedule buffer swap at given vertical blank.
+ */
+int i915_vblank_swap(struct drm_device *dev, void *data,
+                    struct drm_file *file_priv)
+{
+       /* The delayed swap mechanism was fundamentally racy, and has been
+        * removed.  The model was that the client requested a delayed flip/swap
+        * from the kernel, then waited for vblank before continuing to perform
+        * rendering.  The problem was that the kernel might wake the client
+        * up before it dispatched the vblank swap (since the lock has to be
+        * held while touching the ringbuffer), in which case the client would
+        * clear and start the next frame before the swap occurred, and
+        * flicker would occur in addition to likely missing the vblank.
+        *
+        * In the absence of this ioctl, userland falls back to a correct path
+        * of waiting for a vblank, then dispatching the swap on its own.
+        * Context switching to userland and back is plenty fast enough for
+        * meeting the requirements of vblank swapping.
+        */
+       return -EINVAL;
+}
+
+static u32
+ring_last_seqno(struct intel_ring_buffer *ring)
+{
+
+       if (list_empty(&ring->request_list))
+               return (0);
+       else
+               return (list_entry(ring->request_list.prev,
+                   struct drm_i915_gem_request, list)->seqno);
+}
+
+static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
+{
+       if (list_empty(&ring->request_list) ||
+           i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) {
+               /* Issue a wake-up to catch stuck h/w. */
+               if (ring->waiting_seqno) {
+                       DRM_ERROR(
+"Hangcheck timer elapsed... %s idle [waiting on %d, at %d], missed IRQ?\n",
+                                 ring->name,
+                                 ring->waiting_seqno,
+                                 ring->get_seqno(ring));
+                       wakeup(ring);
+                       *err = true;
+               }
+               return true;
+       }
+       return false;
+}
+
+static bool kick_ring(struct intel_ring_buffer *ring)
+{
+       struct drm_device *dev = ring->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 tmp = I915_READ_CTL(ring);
+       if (tmp & RING_WAIT) {
+               DRM_ERROR("Kicking stuck wait on %s\n",
+                         ring->name);
+               I915_WRITE_CTL(ring, tmp);
+               return true;
+       }
+       return false;
+}
+
+/**
+ * This is called when the chip hasn't reported back with completed
+ * batchbuffers in a long time. The first time this is called we simply record
+ * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses
+ * again, we assume the chip is wedged and try to fix it.
+ */
+void
+i915_hangcheck_elapsed(void *context)
+{
+       struct drm_device *dev = (struct drm_device *)context;
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       uint32_t acthd, instdone, instdone1, acthd_bsd, acthd_blt;
+       bool err = false;
+
+       if (!i915_enable_hangcheck)
+               return;
+
+       /* If all work is done then ACTHD clearly hasn't advanced. */
+       if (i915_hangcheck_ring_idle(&dev_priv->rings[RCS], &err) &&
+           i915_hangcheck_ring_idle(&dev_priv->rings[VCS], &err) &&
+           i915_hangcheck_ring_idle(&dev_priv->rings[BCS], &err)) {
+               dev_priv->hangcheck_count = 0;
+               if (err)
+                       goto repeat;
+               return;
+       }
+
+       if (INTEL_INFO(dev)->gen < 4) {
+               instdone = I915_READ(INSTDONE);
+               instdone1 = 0;
+       } else {
+               instdone = I915_READ(INSTDONE_I965);
+               instdone1 = I915_READ(INSTDONE1);
+       }
+       acthd = intel_ring_get_active_head(&dev_priv->rings[RCS]);
+       acthd_bsd = HAS_BSD(dev) ?
+               intel_ring_get_active_head(&dev_priv->rings[VCS]) : 0;
+       acthd_blt = HAS_BLT(dev) ?
+               intel_ring_get_active_head(&dev_priv->rings[BCS]) : 0;
+
+       if (dev_priv->last_acthd == acthd &&
+           dev_priv->last_acthd_bsd == acthd_bsd &&
+           dev_priv->last_acthd_blt == acthd_blt &&
+           dev_priv->last_instdone == instdone &&
+           dev_priv->last_instdone1 == instdone1) {
+               if (dev_priv->hangcheck_count++ > 1) {
+                       DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
+                       i915_handle_error(dev, true);
+
+                       if (!IS_GEN2(dev)) {
+                               /* Is the chip hanging on a WAIT_FOR_EVENT?
+                                * If so we can simply poke the RB_WAIT bit
+                                * and break the hang. This should work on
+                                * all but the second generation chipsets.
+                                */
+                               if (kick_ring(&dev_priv->rings[RCS]))
+                                       goto repeat;
+
+                               if (HAS_BSD(dev) &&
+                                   kick_ring(&dev_priv->rings[VCS]))
+                                       goto repeat;
+
+                               if (HAS_BLT(dev) &&
+                                   kick_ring(&dev_priv->rings[BCS]))
+                                       goto repeat;
+                       }
+
+                       return;
+               }
+       } else {
+               dev_priv->hangcheck_count = 0;
+
+               dev_priv->last_acthd = acthd;
+               dev_priv->last_acthd_bsd = acthd_bsd;
+               dev_priv->last_acthd_blt = acthd_blt;
+               dev_priv->last_instdone = instdone;
+               dev_priv->last_instdone1 = instdone1;
+       }
+
+repeat:
+       /* Reset timer case chip hangs without another request being added */
+       callout_reset(&dev_priv->hangcheck_timer, DRM_I915_HANGCHECK_PERIOD,
+           i915_hangcheck_elapsed, dev);
+}
+
+/* drm_dma.h hooks
+*/
+static void
+ironlake_irq_preinstall(struct drm_device *dev)
+{
+       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+       atomic_set(&dev_priv->irq_received, 0);
+
+       TASK_INIT(&dev_priv->hotplug_task, 0, i915_hotplug_work_func,
+           dev->dev_private);
+       TASK_INIT(&dev_priv->error_task, 0, i915_error_work_func,
+           dev->dev_private);
+       TASK_INIT(&dev_priv->rps_task, 0, gen6_pm_rps_work_func,
+           dev->dev_private);
+
+       I915_WRITE(HWSTAM, 0xeffe);
+
+       /* XXX hotplug from PCH */
+
+       I915_WRITE(DEIMR, 0xffffffff);
+       I915_WRITE(DEIER, 0x0);
+       POSTING_READ(DEIER);
+
+       /* and GT */
+       I915_WRITE(GTIMR, 0xffffffff);
+       I915_WRITE(GTIER, 0x0);
+       POSTING_READ(GTIER);
+
+       /* south display irq */
+       I915_WRITE(SDEIMR, 0xffffffff);
+       I915_WRITE(SDEIER, 0x0);
+       POSTING_READ(SDEIER);
+}
+
+/*
+ * Enable digital hotplug on the PCH, and configure the DP short pulse
+ * duration to 2ms (which is the minimum in the Display Port spec)
+ *
+ * This register is the same on all known PCH chips.
+ */
+
+static void ironlake_enable_pch_hotplug(struct drm_device *dev)
+{
+       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+       u32     hotplug;
+
+       hotplug = I915_READ(PCH_PORT_HOTPLUG);
+       hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
+       hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
+       hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
+       hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
+       I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
+}
+
+static int ironlake_irq_postinstall(struct drm_device *dev)
+{
+       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+       /* enable kind of interrupts always enabled */
+       u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
+                          DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
+       u32 render_irqs;
+       u32 hotplug_mask;
+
+       dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
+       dev_priv->irq_mask = ~display_mask;
+
+       /* should always can generate irq */
+       I915_WRITE(DEIIR, I915_READ(DEIIR));
+       I915_WRITE(DEIMR, dev_priv->irq_mask);
+       I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK);
+       POSTING_READ(DEIER);
+
+       dev_priv->gt_irq_mask = ~0;
+
+       I915_WRITE(GTIIR, I915_READ(GTIIR));
+       I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+
+       if (IS_GEN6(dev))
+               render_irqs =
+                       GT_USER_INTERRUPT |
+                       GT_GEN6_BSD_USER_INTERRUPT |
+                       GT_BLT_USER_INTERRUPT;
+       else
+               render_irqs =
+                       GT_USER_INTERRUPT |
+                       GT_PIPE_NOTIFY |
+                       GT_BSD_USER_INTERRUPT;
+       I915_WRITE(GTIER, render_irqs);
+       POSTING_READ(GTIER);
+
+       if (HAS_PCH_CPT(dev)) {
+               hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
+                               SDE_PORTB_HOTPLUG_CPT |
+                               SDE_PORTC_HOTPLUG_CPT |
+                               SDE_PORTD_HOTPLUG_CPT);
+       } else {
+               hotplug_mask = (SDE_CRT_HOTPLUG |
+                               SDE_PORTB_HOTPLUG |
+                               SDE_PORTC_HOTPLUG |
+                               SDE_PORTD_HOTPLUG |
+                               SDE_AUX_MASK);
+       }
+
+       dev_priv->pch_irq_mask = ~hotplug_mask;
+
+       I915_WRITE(SDEIIR, I915_READ(SDEIIR));
+       I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
+       I915_WRITE(SDEIER, hotplug_mask);
+       POSTING_READ(SDEIER);
+
+       ironlake_enable_pch_hotplug(dev);
+
+       if (IS_IRONLAKE_M(dev)) {
+               /* Clear & enable PCU event interrupts */
+               I915_WRITE(DEIIR, DE_PCU_EVENT);
+               I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
+               ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
+       }
+
+       return 0;
+}
+
+static int
+ivybridge_irq_postinstall(struct drm_device *dev)
+{
+       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+       /* enable kind of interrupts always enabled */
+       u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
+               DE_PCH_EVENT_IVB | DE_PLANEA_FLIP_DONE_IVB |
+               DE_PLANEB_FLIP_DONE_IVB;
+       u32 render_irqs;
+       u32 hotplug_mask;
+
+       dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
+       dev_priv->irq_mask = ~display_mask;
+
+       /* should always can generate irq */
+       I915_WRITE(DEIIR, I915_READ(DEIIR));
+       I915_WRITE(DEIMR, dev_priv->irq_mask);
+       I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK_IVB |
+                  DE_PIPEB_VBLANK_IVB);
+       POSTING_READ(DEIER);
+
+       dev_priv->gt_irq_mask = ~0;
+
+       I915_WRITE(GTIIR, I915_READ(GTIIR));
+       I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+
+       render_irqs = GT_USER_INTERRUPT | GT_GEN6_BSD_USER_INTERRUPT |
+               GT_BLT_USER_INTERRUPT;
+       I915_WRITE(GTIER, render_irqs);
+       POSTING_READ(GTIER);
+
+       hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
+                       SDE_PORTB_HOTPLUG_CPT |
+                       SDE_PORTC_HOTPLUG_CPT |
+                       SDE_PORTD_HOTPLUG_CPT);
+       dev_priv->pch_irq_mask = ~hotplug_mask;
+
+       I915_WRITE(SDEIIR, I915_READ(SDEIIR));
+       I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
+       I915_WRITE(SDEIER, hotplug_mask);
+       POSTING_READ(SDEIER);
+
+       ironlake_enable_pch_hotplug(dev);
+
+       return 0;
+}
+
+static void
+i915_driver_irq_preinstall(struct drm_device * dev)
+{
+       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+       int pipe;
+
+       atomic_set(&dev_priv->irq_received, 0);
+
+       TASK_INIT(&dev_priv->hotplug_task, 0, i915_hotplug_work_func,
+           dev->dev_private);
+       TASK_INIT(&dev_priv->error_task, 0, i915_error_work_func,
+           dev->dev_private);
+       TASK_INIT(&dev_priv->rps_task, 0, gen6_pm_rps_work_func,
+           dev->dev_private);
+
+       if (I915_HAS_HOTPLUG(dev)) {
+               I915_WRITE(PORT_HOTPLUG_EN, 0);
+               I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+       }
+
+       I915_WRITE(HWSTAM, 0xeffe);
+       for_each_pipe(pipe)
+               I915_WRITE(PIPESTAT(pipe), 0);
+       I915_WRITE(IMR, 0xffffffff);
+       I915_WRITE(IER, 0x0);
+       POSTING_READ(IER);
+}
+
+/*
+ * Must be called after intel_modeset_init or hotplug interrupts won't be
+ * enabled correctly.
+ */
+static int
+i915_driver_irq_postinstall(struct drm_device *dev)
+{
+       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+       u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR;
+       u32 error_mask;
+
+       dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
+
+       /* Unmask the interrupts that we always want on. */
+       dev_priv->irq_mask = ~I915_INTERRUPT_ENABLE_FIX;
+
+       dev_priv->pipestat[0] = 0;
+       dev_priv->pipestat[1] = 0;
+
+       if (I915_HAS_HOTPLUG(dev)) {
+               /* Enable in IER... */
+               enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
+               /* and unmask in IMR */
+               dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
+       }
+
+       /*
+        * Enable some error detection, note the instruction error mask
+        * bit is reserved, so we leave it masked.
+        */
+       if (IS_G4X(dev)) {
+               error_mask = ~(GM45_ERROR_PAGE_TABLE |
+                              GM45_ERROR_MEM_PRIV |
+                              GM45_ERROR_CP_PRIV |
+                              I915_ERROR_MEMORY_REFRESH);
+       } else {
+               error_mask = ~(I915_ERROR_PAGE_TABLE |
+                              I915_ERROR_MEMORY_REFRESH);
+       }
+       I915_WRITE(EMR, error_mask);
+
+       I915_WRITE(IMR, dev_priv->irq_mask);
+       I915_WRITE(IER, enable_mask);
+       POSTING_READ(IER);
+
+       if (I915_HAS_HOTPLUG(dev)) {
+               u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
+
+               /* Note HDMI and DP share bits */
+               if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
+                       hotplug_en |= HDMIB_HOTPLUG_INT_EN;
+               if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
+                       hotplug_en |= HDMIC_HOTPLUG_INT_EN;
+               if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
+                       hotplug_en |= HDMID_HOTPLUG_INT_EN;
+               if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
+                       hotplug_en |= SDVOC_HOTPLUG_INT_EN;
+               if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
+                       hotplug_en |= SDVOB_HOTPLUG_INT_EN;
+               if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
+                       hotplug_en |= CRT_HOTPLUG_INT_EN;
+
+                       /* Programming the CRT detection parameters tends
+                          to generate a spurious hotplug event about three
+                          seconds later.  So just do it once.
+                       */
+                       if (IS_G4X(dev))
+                               hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
+                       hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
+               }
 
-       I915_WRITE(IER, I915_INTERRUPT_ENABLE_MASK);
-       I915_WRITE(IMR, dev_priv->irq_mask_reg);
-       I915_WRITE(PIPEASTAT, dev_priv->pipestat[0] |
-           (dev_priv->pipestat[0] >> 16));
-       I915_WRITE(PIPEBSTAT, dev_priv->pipestat[1] |
-           (dev_priv->pipestat[1] >> 16));
-       (void) I915_READ(IER);
+               /* Ignore TV since it's buggy */
+
+               I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
+       }
+
+#if 1
+       KIB_NOTYET();
+#else
+       intel_opregion_enable_asle(dev);
+#endif
 
        return 0;
 }
 
-void i915_driver_irq_uninstall(struct drm_device * dev)
+static void
+ironlake_irq_uninstall(struct drm_device *dev)
+{
+       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+       if (dev_priv == NULL)
+               return;
+
+       dev_priv->vblank_pipe = 0;
+
+       I915_WRITE(HWSTAM, 0xffffffff);
+
+       I915_WRITE(DEIMR, 0xffffffff);
+       I915_WRITE(DEIER, 0x0);
+       I915_WRITE(DEIIR, I915_READ(DEIIR));
+
+       I915_WRITE(GTIMR, 0xffffffff);
+       I915_WRITE(GTIER, 0x0);
+       I915_WRITE(GTIIR, I915_READ(GTIIR));
+
+       I915_WRITE(SDEIMR, 0xffffffff);
+       I915_WRITE(SDEIER, 0x0);
+       I915_WRITE(SDEIIR, I915_READ(SDEIIR));
+
+       taskqueue_drain(dev_priv->tq, &dev_priv->hotplug_task);
+       taskqueue_drain(dev_priv->tq, &dev_priv->error_task);
+       taskqueue_drain(dev_priv->tq, &dev_priv->rps_task);
+}
+
+static void i915_driver_irq_uninstall(struct drm_device * dev)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+       int pipe;
 
        if (!dev_priv)
                return;
 
        dev_priv->vblank_pipe = 0;
 
+       if (I915_HAS_HOTPLUG(dev)) {
+               I915_WRITE(PORT_HOTPLUG_EN, 0);
+               I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+       }
+
        I915_WRITE(HWSTAM, 0xffffffff);
-       I915_WRITE(PIPEASTAT, 0);
-       I915_WRITE(PIPEBSTAT, 0);
+       for_each_pipe(pipe)
+               I915_WRITE(PIPESTAT(pipe), 0);
        I915_WRITE(IMR, 0xffffffff);
        I915_WRITE(IER, 0x0);
 
-       I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff);
-       I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff);
+       for_each_pipe(pipe)
+               I915_WRITE(PIPESTAT(pipe),
+                          I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
        I915_WRITE(IIR, I915_READ(IIR));
+
+       taskqueue_drain(dev_priv->tq, &dev_priv->hotplug_task);
+       taskqueue_drain(dev_priv->tq, &dev_priv->error_task);
+       taskqueue_drain(dev_priv->tq, &dev_priv->rps_task);
+}
+
+void
+intel_irq_init(struct drm_device *dev)
+{
+
+       dev->driver->get_vblank_counter = i915_get_vblank_counter;
+       dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
+       if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev)) {
+               dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
+               dev->driver->get_vblank_counter = gm45_get_vblank_counter;
+       }
+
+       if (drm_core_check_feature(dev, DRIVER_MODESET))
+               dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
+       else
+               dev->driver->get_vblank_timestamp = NULL;
+       dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
+
+       if (IS_IVYBRIDGE(dev)) {
+               /* Share pre & uninstall handlers with ILK/SNB */
+               dev->driver->irq_handler = ivybridge_irq_handler;
+               dev->driver->irq_preinstall = ironlake_irq_preinstall;
+               dev->driver->irq_postinstall = ivybridge_irq_postinstall;
+               dev->driver->irq_uninstall = ironlake_irq_uninstall;
+               dev->driver->enable_vblank = ivybridge_enable_vblank;
+               dev->driver->disable_vblank = ivybridge_disable_vblank;
+       } else if (HAS_PCH_SPLIT(dev)) {
+               dev->driver->irq_handler = ironlake_irq_handler;
+               dev->driver->irq_preinstall = ironlake_irq_preinstall;
+               dev->driver->irq_postinstall = ironlake_irq_postinstall;
+               dev->driver->irq_uninstall = ironlake_irq_uninstall;
+               dev->driver->enable_vblank = ironlake_enable_vblank;
+               dev->driver->disable_vblank = ironlake_disable_vblank;
+       } else {
+               dev->driver->irq_preinstall = i915_driver_irq_preinstall;
+               dev->driver->irq_postinstall = i915_driver_irq_postinstall;
+               dev->driver->irq_uninstall = i915_driver_irq_uninstall;
+               dev->driver->irq_handler = i915_driver_irq_handler;
+               dev->driver->enable_vblank = i915_enable_vblank;
+               dev->driver->disable_vblank = i915_disable_vblank;
+       }
+}
+
+static struct drm_i915_error_object *
+i915_error_object_create(struct drm_i915_private *dev_priv,
+    struct drm_i915_gem_object *src)
+{
+       struct drm_i915_error_object *dst;
+       struct sf_buf *sf;
+       void *d, *s;
+       int page, page_count;
+       u32 reloc_offset;
+
+       if (src == NULL || src->pages == NULL)
+               return NULL;
+
+       page_count = src->base.size / PAGE_SIZE;
+
+       dst = kmalloc(sizeof(*dst) + page_count * sizeof(u32 *), DRM_I915_GEM,
+           M_NOWAIT);
+       if (dst == NULL)
+               return (NULL);
+
+       reloc_offset = src->gtt_offset;
+       for (page = 0; page < page_count; page++) {
+               d = kmalloc(PAGE_SIZE, DRM_I915_GEM, M_NOWAIT);
+               if (d == NULL)
+                       goto unwind;
+
+               if (reloc_offset < dev_priv->mm.gtt_mappable_end) {
+                       /* Simply ignore tiling or any overlapping fence.
+                        * It's part of the error state, and this hopefully
+                        * captures what the GPU read.
+                        */
+                       s = pmap_mapdev_attr(src->base.dev->agp->base +
+                           reloc_offset, PAGE_SIZE, PAT_WRITE_COMBINING);
+                       memcpy(d, s, PAGE_SIZE);
+                       pmap_unmapdev((vm_offset_t)s, PAGE_SIZE);
+               } else {
+                       drm_clflush_pages(&src->pages[page], 1);
+
+                       sf = sf_buf_alloc(src->pages[page]);
+                       if (sf != NULL) {
+                               s = (void *)(uintptr_t)sf_buf_kva(sf);
+                               memcpy(d, s, PAGE_SIZE);
+                               sf_buf_free(sf);
+                       } else {
+                               bzero(d, PAGE_SIZE);
+                               strcpy(d, "XXXKIB");
+                       }
+
+                       drm_clflush_pages(&src->pages[page], 1);
+               }
+
+               dst->pages[page] = d;
+
+               reloc_offset += PAGE_SIZE;
+       }
+       dst->page_count = page_count;
+       dst->gtt_offset = src->gtt_offset;
+
+       return (dst);
+
+unwind:
+       while (page--)
+               drm_free(dst->pages[page], DRM_I915_GEM);
+       drm_free(dst, DRM_I915_GEM);
+       return (NULL);
+}
+
+static void
+i915_error_object_free(struct drm_i915_error_object *obj)
+{
+       int page;
+
+       if (obj == NULL)
+               return;
+
+       for (page = 0; page < obj->page_count; page++)
+               drm_free(obj->pages[page], DRM_I915_GEM);
+
+       drm_free(obj, DRM_I915_GEM);
+}
+
+static void
+i915_error_state_free(struct drm_device *dev,
+                     struct drm_i915_error_state *error)
+{
+       int i;
+
+       for (i = 0; i < DRM_ARRAY_SIZE(error->ring); i++) {
+               i915_error_object_free(error->ring[i].batchbuffer);
+               i915_error_object_free(error->ring[i].ringbuffer);
+               drm_free(error->ring[i].requests, DRM_I915_GEM);
+       }
+
+       drm_free(error->active_bo, DRM_I915_GEM);
+       drm_free(error->overlay, DRM_I915_GEM);
+       drm_free(error, DRM_I915_GEM);
+}
+
+static u32
+capture_bo_list(struct drm_i915_error_buffer *err, int count,
+    struct list_head *head)
+{
+       struct drm_i915_gem_object *obj;
+       int i = 0;
+
+       list_for_each_entry(obj, head, mm_list) {
+               err->size = obj->base.size;
+               err->name = obj->base.name;
+               err->seqno = obj->last_rendering_seqno;
+               err->gtt_offset = obj->gtt_offset;
+               err->read_domains = obj->base.read_domains;
+               err->write_domain = obj->base.write_domain;
+               err->fence_reg = obj->fence_reg;
+               err->pinned = 0;
+               if (obj->pin_count > 0)
+                       err->pinned = 1;
+               if (obj->user_pin_count > 0)
+                       err->pinned = -1;
+               err->tiling = obj->tiling_mode;
+               err->dirty = obj->dirty;
+               err->purgeable = obj->madv != I915_MADV_WILLNEED;
+               err->ring = obj->ring ? obj->ring->id : -1;
+               err->cache_level = obj->cache_level;
+
+               if (++i == count)
+                       break;
+
+               err++;
+       }
+
+       return (i);
+}
+
+static void
+i915_gem_record_fences(struct drm_device *dev,
+    struct drm_i915_error_state *error)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int i;
+
+       /* Fences */
+       switch (INTEL_INFO(dev)->gen) {
+       case 7:
+       case 6:
+               for (i = 0; i < 16; i++)
+                       error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
+               break;
+       case 5:
+       case 4:
+               for (i = 0; i < 16; i++)
+                       error->fence[i] = I915_READ64(FENCE_REG_965_0 +
+                           (i * 8));
+               break;
+       case 3:
+               if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+                       for (i = 0; i < 8; i++)
+                               error->fence[i+8] = I915_READ(FENCE_REG_945_8 +
+                                   (i * 4));
+       case 2:
+               for (i = 0; i < 8; i++)
+                       error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
+               break;
+
+       }
+}
+
+static struct drm_i915_error_object *
+i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
+                            struct intel_ring_buffer *ring)
+{
+       struct drm_i915_gem_object *obj;
+       u32 seqno;
+
+       if (!ring->get_seqno)
+               return (NULL);
+
+       seqno = ring->get_seqno(ring);
+       list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
+               if (obj->ring != ring)
+                       continue;
+
+               if (i915_seqno_passed(seqno, obj->last_rendering_seqno))
+                       continue;
+
+               if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
+                       continue;
+
+               /* We need to copy these to an anonymous buffer as the simplest
+                * method to avoid being overwritten by userspace.
+                */
+               return (i915_error_object_create(dev_priv, obj));
+       }
+
+       return NULL;
+}
+
+static void
+i915_record_ring_state(struct drm_device *dev,
+    struct drm_i915_error_state *error,
+    struct intel_ring_buffer *ring)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       if (INTEL_INFO(dev)->gen >= 6) {
+               error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
+               error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
+               error->semaphore_mboxes[ring->id][0]
+                       = I915_READ(RING_SYNC_0(ring->mmio_base));
+               error->semaphore_mboxes[ring->id][1]
+                       = I915_READ(RING_SYNC_1(ring->mmio_base));
+       }
+
+       if (INTEL_INFO(dev)->gen >= 4) {
+               error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
+               error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
+               error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
+               error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
+               if (ring->id == RCS) {
+                       error->instdone1 = I915_READ(INSTDONE1);
+                       error->bbaddr = I915_READ64(BB_ADDR);
+               }
+       } else {
+               error->ipeir[ring->id] = I915_READ(IPEIR);
+               error->ipehr[ring->id] = I915_READ(IPEHR);
+               error->instdone[ring->id] = I915_READ(INSTDONE);
+       }
+
+       error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
+       error->seqno[ring->id] = ring->get_seqno(ring);
+       error->acthd[ring->id] = intel_ring_get_active_head(ring);
+       error->head[ring->id] = I915_READ_HEAD(ring);
+       error->tail[ring->id] = I915_READ_TAIL(ring);
+
+       error->cpu_ring_head[ring->id] = ring->head;
+       error->cpu_ring_tail[ring->id] = ring->tail;
+}
+
+static void
+i915_gem_record_rings(struct drm_device *dev,
+    struct drm_i915_error_state *error)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_gem_request *request;
+       int i, count;
+
+       for (i = 0; i < I915_NUM_RINGS; i++) {
+               struct intel_ring_buffer *ring = &dev_priv->rings[i];
+
+               if (ring->obj == NULL)
+                       continue;
+
+               i915_record_ring_state(dev, error, ring);
+
+               error->ring[i].batchbuffer =
+                       i915_error_first_batchbuffer(dev_priv, ring);
+
+               error->ring[i].ringbuffer =
+                       i915_error_object_create(dev_priv, ring->obj);
+
+               count = 0;
+               list_for_each_entry(request, &ring->request_list, list)
+                       count++;
+
+               error->ring[i].num_requests = count;
+               error->ring[i].requests = kmalloc(count *
+                   sizeof(struct drm_i915_error_request), DRM_I915_GEM,
+                   M_WAITOK);
+               if (error->ring[i].requests == NULL) {
+                       error->ring[i].num_requests = 0;
+                       continue;
+               }
+
+               count = 0;
+               list_for_each_entry(request, &ring->request_list, list) {
+                       struct drm_i915_error_request *erq;
+
+                       erq = &error->ring[i].requests[count++];
+                       erq->seqno = request->seqno;
+                       erq->jiffies = request->emitted_jiffies;
+                       erq->tail = request->tail;
+               }
+       }
+}
+
+static void
+i915_capture_error_state(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_gem_object *obj;
+       struct drm_i915_error_state *error;
+       int i, pipe;
+
+       lockmgr(&dev_priv->error_lock, LK_EXCLUSIVE);
+       error = dev_priv->first_error;
+       lockmgr(&dev_priv->error_lock, LK_RELEASE);
+       if (error != NULL)
+               return;
+
+       /* Account for pipe specific data like PIPE*STAT */
+       error = kmalloc(sizeof(*error), DRM_I915_GEM, M_NOWAIT | M_ZERO);
+       if (error == NULL) {
+               DRM_DEBUG("out of memory, not capturing error state\n");
+               return;
+       }
+
+       DRM_INFO("capturing error event; look for more information in "
+           "sysctl hw.dri.%d.info.i915_error_state\n", dev->sysctl_node_idx);
+
+       error->eir = I915_READ(EIR);
+       error->pgtbl_er = I915_READ(PGTBL_ER);
+       for_each_pipe(pipe)
+               error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
+
+       if (INTEL_INFO(dev)->gen >= 6) {
+               error->error = I915_READ(ERROR_GEN6);
+               error->done_reg = I915_READ(DONE_REG);
+       }
+
+       i915_gem_record_fences(dev, error);
+       i915_gem_record_rings(dev, error);
+
+       /* Record buffers on the active and pinned lists. */
+       error->active_bo = NULL;
+       error->pinned_bo = NULL;
+
+       i = 0;
+       list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
+               i++;
+       error->active_bo_count = i;
+       list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
+               i++;
+       error->pinned_bo_count = i - error->active_bo_count;
+
+       error->active_bo = NULL;
+       error->pinned_bo = NULL;
+       if (i) {
+               error->active_bo = kmalloc(sizeof(*error->active_bo) * i,
+                   DRM_I915_GEM, M_NOWAIT);
+               if (error->active_bo)
+                       error->pinned_bo = error->active_bo +
+                           error->active_bo_count;
+       }
+
+       if (error->active_bo)
+               error->active_bo_count = capture_bo_list(error->active_bo,
+                   error->active_bo_count, &dev_priv->mm.active_list);
+
+       if (error->pinned_bo)
+               error->pinned_bo_count = capture_bo_list(error->pinned_bo,
+                   error->pinned_bo_count, &dev_priv->mm.pinned_list);
+
+       microtime(&error->time);
+
+       error->overlay = intel_overlay_capture_error_state(dev);
+       error->display = intel_display_capture_error_state(dev);
+
+       lockmgr(&dev_priv->error_lock, LK_EXCLUSIVE);
+       if (dev_priv->first_error == NULL) {
+               dev_priv->first_error = error;
+               error = NULL;
+       }
+       lockmgr(&dev_priv->error_lock, LK_RELEASE);
+
+       if (error != NULL)
+               i915_error_state_free(dev, error);
+}
+
+void
+i915_destroy_error_state(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_error_state *error;
+
+       lockmgr(&dev_priv->error_lock, LK_EXCLUSIVE);
+       error = dev_priv->first_error;
+       dev_priv->first_error = NULL;
+       lockmgr(&dev_priv->error_lock, LK_RELEASE);
+
+       if (error != NULL)
+               i915_error_state_free(dev, error);
 }
index aec84b1..cc28452 100644 (file)
  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * $FreeBSD: src/sys/dev/drm2/i915/i915_reg.h,v 1.1 2012/05/22 11:07:44 kib Exp $"
  */
 
 #ifndef _I915_REG_H_
 #define _I915_REG_H_
 
+#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
+
 /*
  * The Bridge device's PCI config space has information about the
  * fb aperture size and the amount of pre-reserved memory.
+ * This is all handled in the intel-gtt.ko module. i915.ko only
+ * cares about the vga bit for the vga rbiter.
  */
 #define INTEL_GMCH_CTRL                0x52
-#define INTEL_GMCH_ENABLED     0x4
-#define INTEL_GMCH_MEM_MASK    0x1
-#define INTEL_GMCH_MEM_64M     0x1
-#define INTEL_GMCH_MEM_128M    0
-
-#define INTEL_GMCH_GMS_MASK            (0xf << 4)
-#define INTEL_855_GMCH_GMS_DISABLED    (0x0 << 4)
-#define INTEL_855_GMCH_GMS_STOLEN_1M   (0x1 << 4)
-#define INTEL_855_GMCH_GMS_STOLEN_4M   (0x2 << 4)
-#define INTEL_855_GMCH_GMS_STOLEN_8M   (0x3 << 4)
-#define INTEL_855_GMCH_GMS_STOLEN_16M  (0x4 << 4)
-#define INTEL_855_GMCH_GMS_STOLEN_32M  (0x5 << 4)
-
-#define INTEL_915G_GMCH_GMS_STOLEN_48M (0x6 << 4)
-#define INTEL_915G_GMCH_GMS_STOLEN_64M (0x7 << 4)
-#define INTEL_GMCH_GMS_STOLEN_128M     (0x8 << 4)
-#define INTEL_GMCH_GMS_STOLEN_256M     (0x9 << 4)
-#define INTEL_GMCH_GMS_STOLEN_96M      (0xa << 4)
-#define INTEL_GMCH_GMS_STOLEN_160M     (0xb << 4)
-#define INTEL_GMCH_GMS_STOLEN_224M     (0xc << 4)
-#define INTEL_GMCH_GMS_STOLEN_352M     (0xd << 4)
+#define INTEL_GMCH_VGA_DISABLE  (1 << 1)
 
 /* PCI config space */
 
 #define HPLLCC 0xc0 /* 855 only */
-#define   GC_CLOCK_CONTROL_MASK                (3 << 0)
+#define   GC_CLOCK_CONTROL_MASK                (0xf << 0)
 #define   GC_CLOCK_133_200             (0 << 0)
 #define   GC_CLOCK_100_200             (1 << 0)
 #define   GC_CLOCK_100_133             (2 << 0)
 #define   GC_CLOCK_166_250             (3 << 0)
+#define GCFGC2 0xda
 #define GCFGC  0xf0 /* 915+ only */
 #define   GC_LOW_FREQUENCY_ENABLE      (1 << 7)
 #define   GC_DISPLAY_CLOCK_190_200_MHZ (0 << 4)
 #define   GC_DISPLAY_CLOCK_333_MHZ     (4 << 4)
 #define   GC_DISPLAY_CLOCK_MASK                (7 << 4)
+#define   GM45_GC_RENDER_CLOCK_MASK    (0xf << 0)
+#define   GM45_GC_RENDER_CLOCK_266_MHZ (8 << 0)
+#define   GM45_GC_RENDER_CLOCK_320_MHZ (9 << 0)
+#define   GM45_GC_RENDER_CLOCK_400_MHZ (0xb << 0)
+#define   GM45_GC_RENDER_CLOCK_533_MHZ (0xc << 0)
+#define   I965_GC_RENDER_CLOCK_MASK    (0xf << 0)
+#define   I965_GC_RENDER_CLOCK_267_MHZ (2 << 0)
+#define   I965_GC_RENDER_CLOCK_333_MHZ (3 << 0)
+#define   I965_GC_RENDER_CLOCK_444_MHZ (4 << 0)
+#define   I965_GC_RENDER_CLOCK_533_MHZ (5 << 0)
+#define   I945_GC_RENDER_CLOCK_MASK    (7 << 0)
+#define   I945_GC_RENDER_CLOCK_166_MHZ (0 << 0)
+#define   I945_GC_RENDER_CLOCK_200_MHZ (1 << 0)
+#define   I945_GC_RENDER_CLOCK_250_MHZ (3 << 0)
+#define   I945_GC_RENDER_CLOCK_400_MHZ (5 << 0)
+#define   I915_GC_RENDER_CLOCK_MASK    (7 << 0)
+#define   I915_GC_RENDER_CLOCK_166_MHZ (0 << 0)
+#define   I915_GC_RENDER_CLOCK_200_MHZ (1 << 0)
+#define   I915_GC_RENDER_CLOCK_333_MHZ (4 << 0)
 #define LBB    0xf4
 
+/* Graphics reset regs */
+#define I965_GDRST 0xc0 /* PCI config register */
+#define ILK_GDSR 0x2ca4 /* MCHBAR offset */
+#define  GRDOM_FULL    (0<<2)
+#define  GRDOM_RENDER  (1<<2)
+#define  GRDOM_MEDIA   (3<<2)
+
+#define GEN6_MBCUNIT_SNPCR     0x900c /* for LLC config */
+#define   GEN6_MBC_SNPCR_SHIFT 21
+#define   GEN6_MBC_SNPCR_MASK  (3<<21)
+#define   GEN6_MBC_SNPCR_MAX   (0<<21)
+#define   GEN6_MBC_SNPCR_MED   (1<<21)
+#define   GEN6_MBC_SNPCR_LOW   (2<<21)
+#define   GEN6_MBC_SNPCR_MIN   (3<<21) /* only 1/16th of the cache is shared */
+
+#define GEN6_MBCTL             0x0907c
+#define   GEN6_MBCTL_ENABLE_BOOT_FETCH (1 << 4)
+#define   GEN6_MBCTL_CTX_FETCH_NEEDED  (1 << 3)
+#define   GEN6_MBCTL_BME_UPDATE_ENABLE (1 << 2)
+#define   GEN6_MBCTL_MAE_UPDATE_ENABLE (1 << 1)
+#define   GEN6_MBCTL_BOOT_FETCH_MECH   (1 << 0)
+
+#define GEN6_GDRST     0x941c
+#define  GEN6_GRDOM_FULL               (1 << 0)
+#define  GEN6_GRDOM_RENDER             (1 << 1)
+#define  GEN6_GRDOM_MEDIA              (1 << 2)
+#define  GEN6_GRDOM_BLT                        (1 << 3)
+
+/* PPGTT stuff */
+#define GEN6_GTT_ADDR_ENCODE(addr)     ((addr) | (((addr) >> 28) & 0xff0))
+
+#define GEN6_PDE_VALID                 (1 << 0)
+#define GEN6_PDE_LARGE_PAGE            (2 << 0) /* use 32kb pages */
+/* gen6+ has bit 11-4 for physical addr bit 39-32 */
+#define GEN6_PDE_ADDR_ENCODE(addr)     GEN6_GTT_ADDR_ENCODE(addr)
+
+#define GEN6_PTE_VALID                 (1 << 0)
+#define GEN6_PTE_UNCACHED              (1 << 1)
+#define GEN6_PTE_CACHE_LLC             (2 << 1)
+#define GEN6_PTE_CACHE_LLC_MLC         (3 << 1)
+#define GEN6_PTE_CACHE_BITS            (3 << 1)
+#define GEN6_PTE_GFDT                  (1 << 3)
+#define GEN6_PTE_ADDR_ENCODE(addr)     GEN6_GTT_ADDR_ENCODE(addr)
+
+#define RING_PP_DIR_BASE(ring)         ((ring)->mmio_base+0x228)
+#define RING_PP_DIR_BASE_READ(ring)    ((ring)->mmio_base+0x518)
+#define RING_PP_DIR_DCLV(ring)         ((ring)->mmio_base+0x220)
+#define   PP_DIR_DCLV_2G               0xffffffff
+
+#define GAM_ECOCHK                     0x4090
+#define   ECOCHK_SNB_BIT               (1<<10)
+#define   ECOCHK_PPGTT_CACHE64B                (0x3<<3)
+#define   ECOCHK_PPGTT_CACHE4B         (0x0<<3)
+
 /* VGA stuff */
 
 #define VGA_ST01_MDA 0x3ba
 #define MI_NOOP                        MI_INSTR(0, 0)
 #define MI_USER_INTERRUPT      MI_INSTR(0x02, 0)
 #define MI_WAIT_FOR_EVENT       MI_INSTR(0x03, 0)
+#define   MI_WAIT_FOR_OVERLAY_FLIP     (1<<16)
 #define   MI_WAIT_FOR_PLANE_B_FLIP      (1<<6)
 #define   MI_WAIT_FOR_PLANE_A_FLIP      (1<<2)
 #define   MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1)
 #define   MI_NO_WRITE_FLUSH    (1 << 2)
 #define   MI_SCENE_COUNT       (1 << 3) /* just increment scene count */
 #define   MI_END_SCENE         (1 << 4) /* flush binner and incr scene count */
+#define   MI_INVALIDATE_ISP    (1 << 5) /* invalidate indirect state pointers */
 #define MI_BATCH_BUFFER_END    MI_INSTR(0x0a, 0)
+#define MI_SUSPEND_FLUSH       MI_INSTR(0x0b, 0)
+#define   MI_SUSPEND_FLUSH_EN  (1<<0)
 #define MI_REPORT_HEAD         MI_INSTR(0x07, 0)
+#define MI_OVERLAY_FLIP                MI_INSTR(0x11, 0)
+#define   MI_OVERLAY_CONTINUE  (0x0<<21)
+#define   MI_OVERLAY_ON                (0x1<<21)
+#define   MI_OVERLAY_OFF       (0x2<<21)
 #define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0)
+#define MI_DISPLAY_FLIP                MI_INSTR(0x14, 2)
+#define MI_DISPLAY_FLIP_I915   MI_INSTR(0x14, 1)
+#define   MI_DISPLAY_FLIP_PLANE(n) ((n) << 20)
+#define MI_SET_CONTEXT         MI_INSTR(0x18, 0)
+#define   MI_MM_SPACE_GTT              (1<<8)
+#define   MI_MM_SPACE_PHYSICAL         (0<<8)
+#define   MI_SAVE_EXT_STATE_EN         (1<<3)
+#define   MI_RESTORE_EXT_STATE_EN      (1<<2)
+#define   MI_FORCE_RESTORE             (1<<1)
+#define   MI_RESTORE_INHIBIT           (1<<0)
 #define MI_STORE_DWORD_IMM     MI_INSTR(0x20, 1)
 #define   MI_MEM_VIRTUAL       (1 << 22) /* 965+ only */
 #define MI_STORE_DWORD_INDEX   MI_INSTR(0x21, 1)
 #define   MI_STORE_DWORD_INDEX_SHIFT 2
-#define MI_LOAD_REGISTER_IMM   MI_INSTR(0x22, 1)
+/* Official intel docs are somewhat sloppy concerning MI_LOAD_REGISTER_IMM:
+ * - Always issue a MI_NOOP _before_ the MI_LOAD_REGISTER_IMM - otherwise hw
+ *   simply ignores the register load under certain conditions.
+ * - One can actually load arbitrary many arbitrary registers: Simply issue x
+ *   address/value pairs. Don't overdue it, though, x <= 2^4 must hold!
+ */
+#define MI_LOAD_REGISTER_IMM(x)        MI_INSTR(0x22, 2*x-1)
+#define MI_FLUSH_DW            MI_INSTR(0x26, 1) /* for GEN6 */
+#define   MI_INVALIDATE_TLB    (1<<18)
+#define   MI_INVALIDATE_BSD    (1<<7)
 #define MI_BATCH_BUFFER                MI_INSTR(0x30, 1)
 #define   MI_BATCH_NON_SECURE  (1)
 #define   MI_BATCH_NON_SECURE_I965 (1<<8)
 #define MI_BATCH_BUFFER_START  MI_INSTR(0x31, 0)
-
+#define MI_SEMAPHORE_MBOX      MI_INSTR(0x16, 1) /* gen6+ */
+#define  MI_SEMAPHORE_GLOBAL_GTT    (1<<22)
+#define  MI_SEMAPHORE_UPDATE       (1<<21)
+#define  MI_SEMAPHORE_COMPARE      (1<<20)
+#define  MI_SEMAPHORE_REGISTER     (1<<18)
+#define  MI_SEMAPHORE_SYNC_RV      (2<<16)
+#define  MI_SEMAPHORE_SYNC_RB      (0<<16)
+#define  MI_SEMAPHORE_SYNC_VR      (0<<16)
+#define  MI_SEMAPHORE_SYNC_VB      (2<<16)
+#define  MI_SEMAPHORE_SYNC_BR      (2<<16)
+#define  MI_SEMAPHORE_SYNC_BV      (0<<16)
+#define  MI_SEMAPHORE_SYNC_INVALID  (1<<0)
 /*
  * 3D instructions used by the kernel
  */
 #define   ASYNC_FLIP                (1<<22)
 #define   DISPLAY_PLANE_A           (0<<20)
 #define   DISPLAY_PLANE_B           (1<<20)
+#define GFX_OP_PIPE_CONTROL(len)       ((0x3<<29)|(0x3<<27)|(0x2<<24)|(len-2))
+#define   PIPE_CONTROL_CS_STALL                                (1<<20)
+#define   PIPE_CONTROL_QW_WRITE                                (1<<14)
+#define   PIPE_CONTROL_DEPTH_STALL                     (1<<13)
+#define   PIPE_CONTROL_WRITE_FLUSH                     (1<<12)
+#define   PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH       (1<<12) /* gen6+ */
+#define   PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE    (1<<11) /* MBZ on Ironlake */
+#define   PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE                (1<<10) /* GM45+ only */
+#define   PIPE_CONTROL_INDIRECT_STATE_DISABLE          (1<<9)
+#define   PIPE_CONTROL_NOTIFY                          (1<<8)
+#define   PIPE_CONTROL_VF_CACHE_INVALIDATE             (1<<4)
+#define   PIPE_CONTROL_CONST_CACHE_INVALIDATE          (1<<3)
+#define   PIPE_CONTROL_STATE_CACHE_INVALIDATE          (1<<2)
+#define   PIPE_CONTROL_STALL_AT_SCOREBOARD             (1<<1)
+#define   PIPE_CONTROL_DEPTH_CACHE_FLUSH               (1<<0)
+#define   PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */
+
+
+/*
+ * Reset registers
+ */
+#define DEBUG_RESET_I830               0x6070
+#define  DEBUG_RESET_FULL              (1<<7)
+#define  DEBUG_RESET_RENDER            (1<<8)
+#define  DEBUG_RESET_DISPLAY           (1<<9)
+
 
 /*
  * Fence registers
 #define   I830_FENCE_SIZE_BITS(size)   ((ffs((size) >> 19) - 1) << 8)
 #define   I830_FENCE_PITCH_SHIFT       4
 #define   I830_FENCE_REG_VALID         (1<<0)
+#define   I915_FENCE_MAX_PITCH_VAL     4
+#define   I830_FENCE_MAX_PITCH_VAL     6
+#define   I830_FENCE_MAX_SIZE_VAL      (1<<8)
 
 #define   I915_FENCE_START_MASK                0x0ff00000
 #define   I915_FENCE_SIZE_BITS(size)   ((ffs((size) >> 20) - 1) << 8)
 #define   I965_FENCE_PITCH_SHIFT       2
 #define   I965_FENCE_TILING_Y_SHIFT    1
 #define   I965_FENCE_REG_VALID         (1<<0)
+#define   I965_FENCE_MAX_PITCH_VAL     0x0400
+
+#define FENCE_REG_SANDYBRIDGE_0                0x100000
+#define   SANDYBRIDGE_FENCE_PITCH_SHIFT        32
+
+/* control register for cpu gtt access */
+#define TILECTL                                0x101000
+#define   TILECTL_SWZCTL                       (1 << 0)
+#define   TILECTL_TLB_PREFETCH_DIS     (1 << 2)
+#define   TILECTL_BACKSNOOP_DIS                (1 << 3)
 
 /*
  * Instruction and interrupt control regs
  */
-#define PRB0_TAIL      0x02030
-#define PRB0_HEAD      0x02034
-#define PRB0_START     0x02038
-#define PRB0_CTL       0x0203c
+#define PGTBL_ER       0x02024
+#define RENDER_RING_BASE       0x02000
+#define BSD_RING_BASE          0x04000
+#define GEN6_BSD_RING_BASE     0x12000
+#define BLT_RING_BASE          0x22000
+#define RING_TAIL(base)                ((base)+0x30)
+#define RING_HEAD(base)                ((base)+0x34)
+#define RING_START(base)       ((base)+0x38)
+#define RING_CTL(base)         ((base)+0x3c)
+#define RING_SYNC_0(base)      ((base)+0x40)
+#define RING_SYNC_1(base)      ((base)+0x44)
+#define GEN6_RVSYNC (RING_SYNC_0(RENDER_RING_BASE))
+#define GEN6_RBSYNC (RING_SYNC_1(RENDER_RING_BASE))
+#define GEN6_VRSYNC (RING_SYNC_1(GEN6_BSD_RING_BASE))
+#define GEN6_VBSYNC (RING_SYNC_0(GEN6_BSD_RING_BASE))
+#define GEN6_BRSYNC (RING_SYNC_0(BLT_RING_BASE))
+#define GEN6_BVSYNC (RING_SYNC_1(BLT_RING_BASE))
+#define RING_MAX_IDLE(base)    ((base)+0x54)
+#define RING_HWS_PGA(base)     ((base)+0x80)
+#define RING_HWS_PGA_GEN6(base)        ((base)+0x2080)
+#define ARB_MODE               0x04030
+#define   ARB_MODE_SWIZZLE_SNB (1<<4)
+#define   ARB_MODE_SWIZZLE_IVB (1<<5)
+#define   ARB_MODE_ENABLE(x)   GFX_MODE_ENABLE(x)
+#define   ARB_MODE_DISABLE(x)  GFX_MODE_DISABLE(x)
+#define RENDER_HWS_PGA_GEN7    (0x04080)
+#define RING_FAULT_REG(ring)   (0x4094 + 0x100*(ring)->id)
+#define DONE_REG               0x40b0
+#define BSD_HWS_PGA_GEN7       (0x04180)
+#define BLT_HWS_PGA_GEN7       (0x04280)
+#define RING_ACTHD(base)       ((base)+0x74)
+#define RING_NOPID(base)       ((base)+0x94)
+#define RING_IMR(base)         ((base)+0xa8)
 #define   TAIL_ADDR            0x001FFFF8
 #define   HEAD_WRAP_COUNT      0xFFE00000
 #define   HEAD_WRAP_ONE                0x00200000
 #define   RING_VALID_MASK      0x00000001
 #define   RING_VALID           0x00000001
 #define   RING_INVALID         0x00000000
+#define   RING_WAIT_I8XX       (1<<0) /* gen2, PRBx_HEAD */
+#define   RING_WAIT            (1<<11) /* gen3+, PRBx_CTL */
+#define   RING_WAIT_SEMAPHORE  (1<<10) /* gen6+ */
+#if 0
+#define PRB0_TAIL      0x02030
+#define PRB0_HEAD      0x02034
+#define PRB0_START     0x02038
+#define PRB0_CTL       0x0203c
 #define PRB1_TAIL      0x02040 /* 915+ only */
 #define PRB1_HEAD      0x02044 /* 915+ only */
 #define PRB1_START     0x02048 /* 915+ only */
 #define PRB1_CTL       0x0204c /* 915+ only */
+#endif
+#define IPEIR_I965     0x02064
+#define IPEHR_I965     0x02068
+#define INSTDONE_I965  0x0206c
+#define RING_IPEIR(base)       ((base)+0x64)
+#define RING_IPEHR(base)       ((base)+0x68)
+#define RING_INSTDONE(base)    ((base)+0x6c)
+#define RING_INSTPS(base)      ((base)+0x70)
+#define RING_DMA_FADD(base)    ((base)+0x78)
+#define RING_INSTPM(base)      ((base)+0xc0)
+#define INSTPS         0x02070 /* 965+ only */
+#define INSTDONE1      0x0207c /* 965+ only */
 #define ACTHD_I965     0x02074
 #define HWS_PGA                0x02080
 #define HWS_ADDRESS_MASK       0xfffff000
 #define HWS_START_ADDRESS_SHIFT        4
+#define PWRCTXA                0x2088 /* 965GM+ only */
+#define   PWRCTX_EN    (1<<0)
 #define IPEIR          0x02088
+#define IPEHR          0x0208c
+#define INSTDONE       0x02090
 #define NOPID          0x02094
 #define HWSTAM         0x02098
+
+#define ERROR_GEN6     0x040a0
+
+/* GM45+ chicken bits -- debug workaround bits that may be required
+ * for various sorts of correct behavior.  The top 16 bits of each are
+ * the enables for writing to the corresponding low bit.
+ */
+#define _3D_CHICKEN    0x02084
+#define _3D_CHICKEN2   0x0208c
+/* Disables pipelining of read flushes past the SF-WIZ interface.
+ * Required on all Ironlake steppings according to the B-Spec, but the
+ * particular danger of not doing so is not specified.
+ */
+# define _3D_CHICKEN2_WM_READ_PIPELINED                        (1 << 14)
+#define _3D_CHICKEN3   0x02090
+
+#define MI_MODE                0x0209c
+# define VS_TIMER_DISPATCH                             (1 << 6)
+# define MI_FLUSH_ENABLE                               (1 << 12)
+
+#define GFX_MODE       0x02520
+#define GFX_MODE_GEN7  0x0229c
+#define RING_MODE_GEN7(ring)   ((ring)->mmio_base+0x29c)
+#define   GFX_RUN_LIST_ENABLE          (1<<15)
+#define   GFX_TLB_INVALIDATE_ALWAYS    (1<<13)
+#define   GFX_SURFACE_FAULT_ENABLE     (1<<12)
+#define   GFX_REPLAY_MODE              (1<<11)
+#define   GFX_PSMI_GRANULARITY         (1<<10)
+#define   GFX_PPGTT_ENABLE             (1<<9)
+
+#define GFX_MODE_ENABLE(bit) (((bit) << 16) | (bit))
+#define GFX_MODE_DISABLE(bit) (((bit) << 16) | (0))
+
 #define SCPD0          0x0209c /* 915+ only */
 #define IER            0x020a0
 #define IIR            0x020a4
 #define   I915_PIPE_CONTROL_NOTIFY_INTERRUPT           (1<<18)
 #define   I915_DISPLAY_PORT_INTERRUPT                  (1<<17)
 #define   I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT   (1<<15)
-#define   I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT     (1<<14)
+#define   I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT     (1<<14) /* p-state */
 #define   I915_HWB_OOM_INTERRUPT                       (1<<13)
 #define   I915_SYNC_STATUS_INTERRUPT                   (1<<12)
 #define   I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT  (1<<11)
 #define   I915_DEBUG_INTERRUPT                         (1<<2)
 #define   I915_USER_INTERRUPT                          (1<<1)
 #define   I915_ASLE_INTERRUPT                          (1<<0)
+#define   I915_BSD_USER_INTERRUPT                      (1<<25)
 #define EIR            0x020b0
 #define EMR            0x020b4
 #define ESR            0x020b8
+#define   GM45_ERROR_PAGE_TABLE                                (1<<5)
+#define   GM45_ERROR_MEM_PRIV                          (1<<4)
+#define   I915_ERROR_PAGE_TABLE                                (1<<4)
+#define   GM45_ERROR_CP_PRIV                           (1<<3)
+#define   I915_ERROR_MEMORY_REFRESH                    (1<<1)
+#define   I915_ERROR_INSTRUCTION                       (1<<0)
 #define INSTPM         0x020c0
+#define   INSTPM_SELF_EN (1<<12) /* 915GM only */
+#define   INSTPM_AGPBUSY_DIS (1<<11) /* gen3: when disabled, pending interrupts
+                                       will not assert AGPBUSY# and will only
+                                       be delivered when out of C3. */
+#define   INSTPM_FORCE_ORDERING                                (1<<7) /* GEN6+ */
 #define ACTHD          0x020c8
 #define FW_BLC         0x020d8
+#define FW_BLC2                0x020dc
 #define FW_BLC_SELF    0x020e0 /* 915+ only */
+#define   FW_BLC_SELF_EN_MASK      (1<<31)
+#define   FW_BLC_SELF_FIFO_MASK    (1<<16) /* 945 only */
+#define   FW_BLC_SELF_EN           (1<<15) /* 945 only */
+#define MM_BURST_LENGTH     0x00700000
+#define MM_FIFO_WATERMARK   0x0001F000
+#define LM_BURST_LENGTH     0x00000700
+#define LM_FIFO_WATERMARK   0x0000001F
 #define MI_ARB_STATE   0x020e4 /* 915+ only */
+#define   MI_ARB_MASK_SHIFT      16    /* shift for enable bits */
+
+/* Make render/texture TLB fetches lower priorty than associated data
+ *   fetches. This is not turned on by default
+ */
+#define   MI_ARB_RENDER_TLB_LOW_PRIORITY       (1 << 15)
+
+/* Isoch request wait on GTT enable (Display A/B/C streams).
+ * Make isoch requests stall on the TLB update. May cause
+ * display underruns (test mode only)
+ */
+#define   MI_ARB_ISOCH_WAIT_GTT                        (1 << 14)
+
+/* Block grant count for isoch requests when block count is
+ * set to a finite value.
+ */
+#define   MI_ARB_BLOCK_GRANT_MASK              (3 << 12)
+#define   MI_ARB_BLOCK_GRANT_8                 (0 << 12)       /* for 3 display planes */
+#define   MI_ARB_BLOCK_GRANT_4                 (1 << 12)       /* for 2 display planes */
+#define   MI_ARB_BLOCK_GRANT_2                 (2 << 12)       /* for 1 display plane */
+#define   MI_ARB_BLOCK_GRANT_0                 (3 << 12)       /* don't use */
+
+/* Enable render writes to complete in C2/C3/C4 power states.
+ * If this isn't enabled, render writes are prevented in low
+ * power states. That seems bad to me.
+ */
+#define   MI_ARB_C3_LP_WRITE_ENABLE            (1 << 11)
+
+/* This acknowledges an async flip immediately instead
+ * of waiting for 2TLB fetches.
+ */
+#define   MI_ARB_ASYNC_FLIP_ACK_IMMEDIATE      (1 << 10)
+
+/* Enables non-sequential data reads through arbiter
+ */
+#define   MI_ARB_DUAL_DATA_PHASE_DISABLE       (1 << 9)
+
+/* Disable FSB snooping of cacheable write cycles from binner/render
+ * command stream
+ */
+#define   MI_ARB_CACHE_SNOOP_DISABLE           (1 << 8)
+
+/* Arbiter time slice for non-isoch streams */
+#define   MI_ARB_TIME_SLICE_MASK               (7 << 5)
+#define   MI_ARB_TIME_SLICE_1                  (0 << 5)
+#define   MI_ARB_TIME_SLICE_2                  (1 << 5)
+#define   MI_ARB_TIME_SLICE_4                  (2 << 5)
+#define   MI_ARB_TIME_SLICE_6                  (3 << 5)
+#define   MI_ARB_TIME_SLICE_8                  (4 << 5)
+#define   MI_ARB_TIME_SLICE_10                 (5 << 5)
+#define   MI_ARB_TIME_SLICE_14                 (6 << 5)
+#define   MI_ARB_TIME_SLICE_16                 (7 << 5)
+
+/* Low priority grace period page size */
+#define   MI_ARB_LOW_PRIORITY_GRACE_4KB                (0 << 4)        /* default */
+#define   MI_ARB_LOW_PRIORITY_GRACE_8KB                (1 << 4)
+
+/* Disable display A/B trickle feed */
+#define   MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE  (1 << 2)
+
+/* Set display plane priority */
+#define   MI_ARB_DISPLAY_PRIORITY_A_B          (0 << 0)        /* display A > display B */
+#define   MI_ARB_DISPLAY_PRIORITY_B_A          (1 << 0)        /* display B > display A */
+
 #define CACHE_MODE_0   0x02120 /* 915+ only */
 #define   CM0_MASK_SHIFT          16
 #define   CM0_IZ_OPT_DISABLE      (1<<6)
 #define   CM0_ZR_OPT_DISABLE      (1<<5)
+#define          CM0_STC_EVICT_DISABLE_LRA_SNB (1<<5)
 #define   CM0_DEPTH_EVICT_DISABLE (1<<4)
 #define   CM0_COLOR_EVICT_DISABLE (1<<3)
 #define   CM0_DEPTH_WRITE_DISABLE (1<<1)
 #define   CM0_RC_OP_FLUSH_DISABLE (1<<0)
+#define BB_ADDR                0x02140 /* 8 bytes */
 #define GFX_FLSH_CNTL  0x02170 /* 915+ only */
-
+#define ECOSKPD                0x021d0
+#define   ECO_GATING_CX_ONLY   (1<<3)
+#define   ECO_FLIP_DONE                (1<<0)
+
+/* GEN6 interrupt control */
+#define GEN6_RENDER_HWSTAM     0x2098
+#define GEN6_RENDER_IMR                0x20a8
+#define   GEN6_RENDER_CONTEXT_SWITCH_INTERRUPT         (1 << 8)
+#define   GEN6_RENDER_PPGTT_PAGE_FAULT                 (1 << 7)
+#define   GEN6_RENDER_TIMEOUT_COUNTER_EXPIRED          (1 << 6)
+#define   GEN6_RENDER_L3_PARITY_ERROR                  (1 << 5)
+#define   GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT    (1 << 4)
+#define   GEN6_RENDER_COMMAND_PARSER_MASTER_ERROR      (1 << 3)
+#define   GEN6_RENDER_SYNC_STATUS                      (1 << 2)
+#define   GEN6_RENDER_DEBUG_INTERRUPT                  (1 << 1)
+#define   GEN6_RENDER_USER_INTERRUPT                   (1 << 0)
+
+#define GEN6_BLITTER_HWSTAM    0x22098
+#define GEN6_BLITTER_IMR       0x220a8
+#define   GEN6_BLITTER_MI_FLUSH_DW_NOTIFY_INTERRUPT    (1 << 26)
+#define   GEN6_BLITTER_COMMAND_PARSER_MASTER_ERROR     (1 << 25)
+#define   GEN6_BLITTER_SYNC_STATUS                     (1 << 24)
+#define   GEN6_BLITTER_USER_INTERRUPT                  (1 << 22)
+
+#define GEN6_BLITTER_ECOSKPD   0x221d0
+#define   GEN6_BLITTER_LOCK_SHIFT                      16
+#define   GEN6_BLITTER_FBC_NOTIFY                      (1<<3)
+
+#define GEN6_BSD_SLEEP_PSMI_CONTROL    0x12050
+#define   GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK      (1 << 16)
+#define   GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE          (1 << 0)
+#define   GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE           0
+#define   GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR                   (1 << 3)
+
+#define GEN6_BSD_HWSTAM                        0x12098
+#define GEN6_BSD_IMR                   0x120a8
+#define   GEN6_BSD_USER_INTERRUPT      (1 << 12)
+
+#define GEN6_BSD_RNCID                 0x12198
 
 /*
  * Framebuffer compression (915+ only)
 #define   FBC_CTL_PERIODIC     (1<<30)
 #define   FBC_CTL_INTERVAL_SHIFT (16)
 #define   FBC_CTL_UNCOMPRESSIBLE (1<<14)
+#define   FBC_CTL_C3_IDLE      (1<<13)
 #define   FBC_CTL_STRIDE_SHIFT (5)
 #define   FBC_CTL_FENCENO      (1<<0)
 #define FBC_COMMAND            0x0320c
 #define   FBC_CTL_PLANEA       (0<<0)
 #define   FBC_CTL_PLANEB       (1<<0)
 #define FBC_FENCE_OFF          0x0321b
+#define FBC_TAG                        0x03300
 
 #define FBC_LL_SIZE            (1536)
 
+/* Framebuffer compression for GM45+ */
+#define DPFC_CB_BASE           0x3200
+#define DPFC_CONTROL           0x3208
+#define   DPFC_CTL_EN          (1<<31)
+#define   DPFC_CTL_PLANEA      (0<<30)
+#define   DPFC_CTL_PLANEB      (1<<30)
+#define   DPFC_CTL_FENCE_EN    (1<<29)
+#define   DPFC_CTL_PERSISTENT_MODE     (1<<25)
+#define   DPFC_SR_EN           (1<<10)
+#define   DPFC_CTL_LIMIT_1X    (0<<6)
+#define   DPFC_CTL_LIMIT_2X    (1<<6)
+#define   DPFC_CTL_LIMIT_4X    (2<<6)
+#define DPFC_RECOMP_CTL                0x320c
+#define   DPFC_RECOMP_STALL_EN (1<<27)
+#define   DPFC_RECOMP_STALL_WM_SHIFT (16)
+#define   DPFC_RECOMP_STALL_WM_MASK (0x07ff0000)
+#define   DPFC_RECOMP_TIMER_COUNT_SHIFT (0)
+#define   DPFC_RECOMP_TIMER_COUNT_MASK (0x0000003f)
+#define DPFC_STATUS            0x3210
+#define   DPFC_INVAL_SEG_SHIFT  (16)
+#define   DPFC_INVAL_SEG_MASK  (0x07ff0000)
+#define   DPFC_COMP_SEG_SHIFT  (0)
+#define   DPFC_COMP_SEG_MASK   (0x000003ff)
+#define DPFC_STATUS2           0x3214
+#define DPFC_FENCE_YOFF                0x3218
+#define DPFC_CHICKEN           0x3224
+#define   DPFC_HT_MODIFY       (1<<31)
+
+/* Framebuffer compression for Ironlake */
+#define ILK_DPFC_CB_BASE       0x43200
+#define ILK_DPFC_CONTROL       0x43208
+/* The bit 28-8 is reserved */
+#define   DPFC_RESERVED                (0x1FFFFF00)
+#define ILK_DPFC_RECOMP_CTL    0x4320c
+#define ILK_DPFC_STATUS                0x43210
+#define ILK_DPFC_FENCE_YOFF    0x43218
+#define ILK_DPFC_CHICKEN       0x43224
+#define ILK_FBC_RT_BASE                0x2128
+#define   ILK_FBC_RT_VALID     (1<<0)
+
+#define ILK_DISPLAY_CHICKEN1   0x42000
+#define   ILK_FBCQ_DIS         (1<<22)
+#define          ILK_PABSTRETCH_DIS    (1<<21)
+
+
+/*
+ * Framebuffer compression for Sandybridge
+ *
+ * The following two registers are of type GTTMMADR
+ */
+#define SNB_DPFC_CTL_SA                0x100100
+#define   SNB_CPU_FENCE_ENABLE (1<<29)
+#define DPFC_CPU_FENCE_OFFSET  0x100104
+
+
 /*
  * GPIO regs
  */
 # define GPIO_DATA_VAL_IN              (1 << 12)
 # define GPIO_DATA_PULLUP_DISABLE      (1 << 13)
 
+#define GMBUS0                 0x5100 /* clock/port select */
+#define   GMBUS_RATE_100KHZ    (0<<8)
+#define   GMBUS_RATE_50KHZ     (1<<8)
+#define   GMBUS_RATE_400KHZ    (2<<8) /* reserved on Pineview */
+#define   GMBUS_RATE_1MHZ      (3<<8) /* reserved on Pineview */
+#define   GMBUS_HOLD_EXT       (1<<7) /* 300ns hold time, rsvd on Pineview */
+#define   GMBUS_PORT_DISABLED  0
+#define   GMBUS_PORT_SSC       1
+#define   GMBUS_PORT_VGADDC    2
+#define   GMBUS_PORT_PANEL     3
+#define   GMBUS_PORT_DPC       4 /* HDMIC */
+#define   GMBUS_PORT_DPB       5 /* SDVO, HDMIB */
+                                 /* 6 reserved */
+#define   GMBUS_PORT_DPD       7 /* HDMID */
+#define   GMBUS_NUM_PORTS       8
+#define GMBUS1                 0x5104 /* command/status */
+#define   GMBUS_SW_CLR_INT     (1<<31)
+#define   GMBUS_SW_RDY         (1<<30)
+#define   GMBUS_ENT            (1<<29) /* enable timeout */
+#define   GMBUS_CYCLE_NONE     (0<<25)
+#define   GMBUS_CYCLE_WAIT     (1<<25)
+#define   GMBUS_CYCLE_INDEX    (2<<25)
+#define   GMBUS_CYCLE_STOP     (4<<25)
+#define   GMBUS_BYTE_COUNT_SHIFT 16
+#define   GMBUS_SLAVE_INDEX_SHIFT 8
+#define   GMBUS_SLAVE_ADDR_SHIFT 1
+#define   GMBUS_SLAVE_READ     (1<<0)
+#define   GMBUS_SLAVE_WRITE    (0<<0)
+#define GMBUS2                 0x5108 /* status */
+#define   GMBUS_INUSE          (1<<15)
+#define   GMBUS_HW_WAIT_PHASE  (1<<14)
+#define   GMBUS_STALL_TIMEOUT  (1<<13)
+#define   GMBUS_INT            (1<<12)
+#define   GMBUS_HW_RDY         (1<<11)
+#define   GMBUS_SATOER         (1<<10)
+#define   GMBUS_ACTIVE         (1<<9)
+#define GMBUS3                 0x510c /* data buffer bytes 3-0 */
+#define GMBUS4                 0x5110 /* interrupt mask (Pineview+) */
+#define   GMBUS_SLAVE_TIMEOUT_EN (1<<4)
+#define   GMBUS_NAK_EN         (1<<3)
+#define   GMBUS_IDLE_EN                (1<<2)
+#define   GMBUS_HW_WAIT_EN     (1<<1)
+#define   GMBUS_HW_RDY_EN      (1<<0)
+#define GMBUS5                 0x5120 /* byte index */
+#define   GMBUS_2BYTE_INDEX_EN (1<<31)
+
 /*
  * Clock control & power management
  */
 #define   VGA1_PD_P1_DIV_2     (1 << 13)
 #define   VGA1_PD_P1_SHIFT     8
 #define   VGA1_PD_P1_MASK      (0x1f << 8)
-#define DPLL_A 0x06014
-#define DPLL_B 0x06018
+#define _DPLL_A        0x06014
+#define _DPLL_B        0x06018
+#define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B)
 #define   DPLL_VCO_ENABLE              (1 << 31)
 #define   DPLL_DVO_HIGH_SPEED          (1 << 30)
 #define   DPLL_SYNCLOCK_ENABLE         (1 << 29)
 #define   DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24) /* i915 */
 #define   DPLLB_LVDS_P2_CLOCK_DIV_14   (0 << 24) /* i915 */
 #define   DPLLB_LVDS_P2_CLOCK_DIV_7    (1 << 24) /* i915 */
-#define   DPLL_P2_CLOCK_DIV_MASK               0x03000000 /* i915 */
-#define   DPLL_FPA01_P1_POST_DIV_MASK          0x00ff0000 /* i915 */
-#define   DPLL_FPA01_P1_POST_DIV_MASK_IGD      0x00ff8000 /* IGD */
-
-#define I915_FIFO_UNDERRUN_STATUS              (1UL<<31)
-#define I915_CRC_ERROR_ENABLE                  (1UL<<29)
-#define I915_CRC_DONE_ENABLE                   (1UL<<28)
-#define I915_GMBUS_EVENT_ENABLE                        (1UL<<27)
-#define I915_VSYNC_INTERRUPT_ENABLE            (1UL<<25)
-#define I915_DISPLAY_LINE_COMPARE_ENABLE       (1UL<<24)
-#define I915_DPST_EVENT_ENABLE                 (1UL<<23)
-#define I915_LEGACY_BLC_EVENT_ENABLE           (1UL<<22)
-#define I915_ODD_FIELD_INTERRUPT_ENABLE                (1UL<<21)
-#define I915_EVEN_FIELD_INTERRUPT_ENABLE       (1UL<<20)
-#define I915_START_VBLANK_INTERRUPT_ENABLE     (1UL<<18)       /* 965 or later */
-#define I915_VBLANK_INTERRUPT_ENABLE           (1UL<<17)
-#define I915_OVERLAY_UPDATED_ENABLE            (1UL<<16)
-#define I915_CRC_ERROR_INTERRUPT_STATUS                (1UL<<13)
-#define I915_CRC_DONE_INTERRUPT_STATUS         (1UL<<12)
-#define I915_GMBUS_INTERRUPT_STATUS            (1UL<<11)
-#define I915_VSYNC_INTERRUPT_STATUS            (1UL<<9)
-#define I915_DISPLAY_LINE_COMPARE_STATUS       (1UL<<8)
-#define I915_DPST_EVENT_STATUS                 (1UL<<7)
-#define I915_LEGACY_BLC_EVENT_STATUS           (1UL<<6)
-#define I915_ODD_FIELD_INTERRUPT_STATUS                (1UL<<5)
-#define I915_EVEN_FIELD_INTERRUPT_STATUS       (1UL<<4)
-#define I915_START_VBLANK_INTERRUPT_STATUS     (1UL<<2)        /* 965 or later */
-#define I915_VBLANK_INTERRUPT_STATUS           (1UL<<1)
-#define I915_OVERLAY_UPDATED_STATUS            (1UL<<0)
+#define   DPLL_P2_CLOCK_DIV_MASK       0x03000000 /* i915 */
+#define   DPLL_FPA01_P1_POST_DIV_MASK  0x00ff0000 /* i915 */
+#define   DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */
 
 #define SRX_INDEX              0x3c4
 #define SRX_DATA               0x3c5
 #define LVDS                   0x61180
 #define LVDS_ON                        (1<<31)
 
-#define ADPA                   0x61100
-#define ADPA_DPMS_MASK         (~(3<<10))
-#define ADPA_DPMS_ON           (0<<10)
-#define ADPA_DPMS_SUSPEND      (1<<10)
-#define ADPA_DPMS_STANDBY      (2<<10)
-#define ADPA_DPMS_OFF          (3<<10)
-
-#define RING_TAIL              0x00
-#define TAIL_ADDR              0x001FFFF8
-#define RING_HEAD              0x04
-#define HEAD_WRAP_COUNT                0xFFE00000
-#define HEAD_WRAP_ONE          0x00200000
-#define HEAD_ADDR              0x001FFFFC
-#define RING_START             0x08
-#define START_ADDR             0xFFFFF000
-#define RING_LEN               0x0C
-#define RING_NR_PAGES          0x001FF000
-#define RING_REPORT_MASK       0x00000006
-#define RING_REPORT_64K                0x00000002
-#define RING_REPORT_128K       0x00000004
-#define RING_NO_REPORT         0x00000000
-#define RING_VALID_MASK                0x00000001
-#define RING_VALID             0x00000001
-#define RING_INVALID           0x00000000
-
 /* Scratch pad debug 0 reg:
  */
 #define   DPLL_FPA01_P1_POST_DIV_MASK_I830     0x001f0000
  * this field (only one bit may be set).
  */
 #define   DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS        0x003f0000
-#define   DPLL_FPA01_P1_POST_DIV_SHIFT         16
-#define   DPLL_FPA01_P1_POST_DIV_SHIFT_IGD     15
+#define   DPLL_FPA01_P1_POST_DIV_SHIFT 16
+#define   DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW 15
 /* i830, required in DVO non-gang */
 #define   PLL_P2_DIVIDE_BY_4           (1 << 23)
 #define   PLL_P1_DIVIDE_BY_TWO         (1 << 21) /* i830 */
 #define   PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13)
 #define   PLL_REF_INPUT_MASK           (3 << 13)
 #define   PLL_LOAD_PULSE_PHASE_SHIFT           9
+/* Ironlake */
+# define PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT     9
+# define PLL_REF_SDVO_HDMI_MULTIPLIER_MASK      (7 << 9)
+# define PLL_REF_SDVO_HDMI_MULTIPLIER(x)       (((x)-1) << 9)
+# define DPLL_FPA1_P1_POST_DIV_SHIFT            0
+# define DPLL_FPA1_P1_POST_DIV_MASK             0xff
+
 /*
  * Parallel to Serial Load Pulse phase selection.
  * Selects the phase for the 10X DPLL clock for the PCIe
 #define   SDVO_MULTIPLIER_MASK                 0x000000ff
 #define   SDVO_MULTIPLIER_SHIFT_HIRES          4
 #define   SDVO_MULTIPLIER_SHIFT_VGA            0
-#define DPLL_A_MD 0x0601c /* 965+ only */
+#define _DPLL_A_MD 0x0601c /* 965+ only */
 /*
  * UDI pixel divider, controlling how many pixels are stuffed into a packet.
  *
  */
 #define   DPLL_MD_VGA_UDI_MULTIPLIER_MASK      0x0000003f
 #define   DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT     0
-#define DPLL_B_MD 0x06020 /* 965+ only */
-#define FPA0   0x06040
-#define FPA1   0x06044
-#define FPB0   0x06048
-#define FPB1   0x0604c
+#define _DPLL_B_MD 0x06020 /* 965+ only */
+#define DPLL_MD(pipe) _PIPE(pipe, _DPLL_A_MD, _DPLL_B_MD)
+#define _FPA0  0x06040
+#define _FPA1  0x06044
+#define _FPB0  0x06048
+#define _FPB1  0x0604c
+#define FP0(pipe) _PIPE(pipe, _FPA0, _FPB0)
+#define FP1(pipe) _PIPE(pipe, _FPA1, _FPB1)
 #define   FP_N_DIV_MASK                0x003f0000
-#define   FP_N_IGD_DIV_MASK    0x00ff0000
+#define   FP_N_PINEVIEW_DIV_MASK       0x00ff0000
 #define   FP_N_DIV_SHIFT               16
 #define   FP_M1_DIV_MASK       0x00003f00
 #define   FP_M1_DIV_SHIFT               8
 #define   FP_M2_DIV_MASK       0x0000003f
-#define   FP_M2_IGD_DIV_MASK   0x000000ff
+#define   FP_M2_PINEVIEW_DIV_MASK      0x000000ff
 #define   FP_M2_DIV_SHIFT               0
 #define DPLL_TEST      0x606c
 #define   DPLLB_TEST_SDVO_DIV_1                (0 << 22)
 #define   DPLLA_TEST_M_BYPASS          (1 << 2)
 #define   DPLLA_INPUT_BUFFER_ENABLE    (1 << 0)
 #define D_STATE                0x6104
-#define CG_2D_DIS      0x6200
-#define CG_3D_DIS      0x6204
+#define  DSTATE_GFX_RESET_I830                 (1<<6)
+#define  DSTATE_PLL_D3_OFF                     (1<<3)
+#define  DSTATE_GFX_CLOCK_GATING               (1<<1)
+#define  DSTATE_DOT_CLOCK_GATING               (1<<0)
+#define DSPCLK_GATE_D          0x6200
+# define DPUNIT_B_CLOCK_GATE_DISABLE           (1 << 30) /* 965 */
+# define VSUNIT_CLOCK_GATE_DISABLE             (1 << 29) /* 965 */
+# define VRHUNIT_CLOCK_GATE_DISABLE            (1 << 28) /* 965 */
+# define VRDUNIT_CLOCK_GATE_DISABLE            (1 << 27) /* 965 */
+# define AUDUNIT_CLOCK_GATE_DISABLE            (1 << 26) /* 965 */
+# define DPUNIT_A_CLOCK_GATE_DISABLE           (1 << 25) /* 965 */
+# define DPCUNIT_CLOCK_GATE_DISABLE            (1 << 24) /* 965 */
+# define TVRUNIT_CLOCK_GATE_DISABLE            (1 << 23) /* 915-945 */
+# define TVCUNIT_CLOCK_GATE_DISABLE            (1 << 22) /* 915-945 */
+# define TVFUNIT_CLOCK_GATE_DISABLE            (1 << 21) /* 915-945 */
+# define TVEUNIT_CLOCK_GATE_DISABLE            (1 << 20) /* 915-945 */
+# define DVSUNIT_CLOCK_GATE_DISABLE            (1 << 19) /* 915-945 */
+# define DSSUNIT_CLOCK_GATE_DISABLE            (1 << 18) /* 915-945 */
+# define DDBUNIT_CLOCK_GATE_DISABLE            (1 << 17) /* 915-945 */
+# define DPRUNIT_CLOCK_GATE_DISABLE            (1 << 16) /* 915-945 */
+# define DPFUNIT_CLOCK_GATE_DISABLE            (1 << 15) /* 915-945 */
+# define DPBMUNIT_CLOCK_GATE_DISABLE           (1 << 14) /* 915-945 */
+# define DPLSUNIT_CLOCK_GATE_DISABLE           (1 << 13) /* 915-945 */
+# define DPLUNIT_CLOCK_GATE_DISABLE            (1 << 12) /* 915-945 */
+# define DPOUNIT_CLOCK_GATE_DISABLE            (1 << 11)
+# define DPBUNIT_CLOCK_GATE_DISABLE            (1 << 10)
+# define DCUNIT_CLOCK_GATE_DISABLE             (1 << 9)
+# define DPUNIT_CLOCK_GATE_DISABLE             (1 << 8)
+# define VRUNIT_CLOCK_GATE_DISABLE             (1 << 7) /* 915+: reserved */
+# define OVHUNIT_CLOCK_GATE_DISABLE            (1 << 6) /* 830-865 */
+# define DPIOUNIT_CLOCK_GATE_DISABLE           (1 << 6) /* 915-945 */
+# define OVFUNIT_CLOCK_GATE_DISABLE            (1 << 5)
+# define OVBUNIT_CLOCK_GATE_DISABLE            (1 << 4)
+/**
+ * This bit must be set on the 830 to&