1 /* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
30 #include <drm/i915_drm.h>
32 #include "intel_drv.h"
33 #include "intel_ringbuffer.h"
34 #include <linux/workqueue.h>
36 extern struct drm_i915_private *i915_mch_dev;
38 #define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
40 #define BEGIN_LP_RING(n) \
41 intel_ring_begin(LP_RING(dev_priv), (n))
44 intel_ring_emit(LP_RING(dev_priv), x)
46 #define ADVANCE_LP_RING() \
47 intel_ring_advance(LP_RING(dev_priv))
50 * Lock test for when it's just for synchronization of ring access.
52 * In that case, we don't need to do it when GEM is initialized as nobody else
53 * has access to the ring.
55 #define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \
56 if (LP_RING(dev->dev_private)->obj == NULL) \
57 LOCK_TEST_WITH_RETURN(dev, file); \
61 intel_read_legacy_status_page(struct drm_i915_private *dev_priv, int reg)
63 if (I915_NEED_GFX_HWS(dev_priv->dev))
64 return ioread32(dev_priv->dri1.gfx_hws_cpu_addr + reg);
66 return intel_read_status_page(LP_RING(dev_priv), reg);
69 #define READ_HWSP(dev_priv, reg) intel_read_legacy_status_page(dev_priv, reg)
70 #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
71 #define I915_BREADCRUMB_INDEX 0x21
73 void i915_update_dri1_breadcrumb(struct drm_device *dev)
76 * The dri breadcrumb update races against the drm master disappearing.
77 * Instead of trying to fix this (this is by far not the only ums issue)
78 * just don't do the update in kms mode.
80 if (drm_core_check_feature(dev, DRIVER_MODESET))
83 /* XXX: don't do it at all actually */
87 static void i915_write_hws_pga(struct drm_device *dev)
89 drm_i915_private_t *dev_priv = dev->dev_private;
92 addr = dev_priv->status_page_dmah->busaddr;
93 if (INTEL_INFO(dev)->gen >= 4)
94 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
95 I915_WRITE(HWS_PGA, addr);
99 * Sets up the hardware status page for devices that need a physical address
102 static int i915_init_phys_hws(struct drm_device *dev)
104 drm_i915_private_t *dev_priv = dev->dev_private;
105 struct intel_ring_buffer *ring = LP_RING(dev_priv);
108 * Program Hardware Status Page
109 * XXXKIB Keep 4GB limit for allocation for now. This method
110 * of allocation is used on <= 965 hardware, that has several
111 * erratas regarding the use of physical memory > 4 GB.
114 dev_priv->status_page_dmah =
115 drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
117 if (!dev_priv->status_page_dmah) {
118 DRM_ERROR("Can not allocate hardware status page\n");
121 ring->status_page.page_addr = dev_priv->hw_status_page =
122 dev_priv->status_page_dmah->vaddr;
123 dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
125 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
127 i915_write_hws_pga(dev);
128 DRM_DEBUG("Enabled hardware status page, phys %jx\n",
129 (uintmax_t)dev_priv->dma_status_page);
134 * Frees the hardware status page, whether it's a physical address or a virtual
135 * address set up by the X Server.
137 static void i915_free_hws(struct drm_device *dev)
139 drm_i915_private_t *dev_priv = dev->dev_private;
140 struct intel_ring_buffer *ring = LP_RING(dev_priv);
142 if (dev_priv->status_page_dmah) {
143 drm_pci_free(dev, dev_priv->status_page_dmah);
144 dev_priv->status_page_dmah = NULL;
147 if (dev_priv->status_gfx_addr) {
148 dev_priv->status_gfx_addr = 0;
149 ring->status_page.gfx_addr = 0;
150 drm_core_ioremapfree(&dev_priv->hws_map, dev);
153 /* Need to rewrite hardware status page */
154 I915_WRITE(HWS_PGA, 0x1ffff000);
157 void i915_kernel_lost_context(struct drm_device * dev)
159 drm_i915_private_t *dev_priv = dev->dev_private;
160 struct intel_ring_buffer *ring = LP_RING(dev_priv);
163 * We should never lose context on the ring with modesetting
164 * as we don't expose it to userspace
166 if (drm_core_check_feature(dev, DRIVER_MODESET))
169 ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
170 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
171 ring->space = ring->head - (ring->tail + I915_RING_FREE_SPACE);
173 ring->space += ring->size;
178 if (!dev->primary->master)
182 if (ring->head == ring->tail && dev_priv->sarea_priv)
183 dev_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
186 static int i915_dma_cleanup(struct drm_device * dev)
188 drm_i915_private_t *dev_priv = dev->dev_private;
192 /* Make sure interrupts are disabled here because the uninstall ioctl
193 * may not have been called from userspace and after dev_private
194 * is freed, it's too late.
196 if (dev->irq_enabled)
197 drm_irq_uninstall(dev);
200 for (i = 0; i < I915_NUM_RINGS; i++)
201 intel_cleanup_ring_buffer(&dev_priv->ring[i]);
204 /* Clear the HWS virtual address at teardown */
205 if (I915_NEED_GFX_HWS(dev))
211 static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
213 drm_i915_private_t *dev_priv = dev->dev_private;
216 dev_priv->sarea = drm_getsarea(dev);
217 if (!dev_priv->sarea) {
218 DRM_ERROR("can not find sarea!\n");
219 i915_dma_cleanup(dev);
223 dev_priv->sarea_priv = (drm_i915_sarea_t *)
224 ((u8 *) dev_priv->sarea->virtual + init->sarea_priv_offset);
226 if (init->ring_size != 0) {
227 if (LP_RING(dev_priv)->obj != NULL) {
228 i915_dma_cleanup(dev);
229 DRM_ERROR("Client tried to initialize ringbuffer in "
234 ret = intel_render_ring_init_dri(dev,
238 i915_dma_cleanup(dev);
243 dev_priv->cpp = init->cpp;
244 dev_priv->back_offset = init->back_offset;
245 dev_priv->front_offset = init->front_offset;
246 dev_priv->current_page = 0;
247 dev_priv->sarea_priv->pf_current_page = 0;
249 /* Allow hardware batchbuffers unless told otherwise.
251 dev_priv->dri1.allow_batchbuffer = 1;
256 static int i915_dma_resume(struct drm_device * dev)
258 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
259 struct intel_ring_buffer *ring = LP_RING(dev_priv);
261 DRM_DEBUG_DRIVER("%s\n", __func__);
263 if (ring->virtual_start == NULL) {
264 DRM_ERROR("can not ioremap virtual address for"
269 /* Program Hardware Status Page */
270 if (!ring->status_page.page_addr) {
271 DRM_ERROR("Can not find hardware status page\n");
274 DRM_DEBUG_DRIVER("hw status page @ %p\n",
275 ring->status_page.page_addr);
276 if (ring->status_page.gfx_addr != 0)
277 intel_ring_setup_status_page(ring);
279 i915_write_hws_pga(dev);
281 DRM_DEBUG_DRIVER("Enabled hardware status page\n");
286 static int i915_dma_init(struct drm_device *dev, void *data,
287 struct drm_file *file_priv)
289 drm_i915_init_t *init = data;
292 if (drm_core_check_feature(dev, DRIVER_MODESET))
295 switch (init->func) {
297 retcode = i915_initialize(dev, init);
299 case I915_CLEANUP_DMA:
300 retcode = i915_dma_cleanup(dev);
302 case I915_RESUME_DMA:
303 retcode = i915_dma_resume(dev);
313 /* Implement basically the same security restrictions as hardware does
314 * for MI_BATCH_NON_SECURE. These can be made stricter at any time.
316 * Most of the calculations below involve calculating the size of a
317 * particular instruction. It's important to get the size right as
318 * that tells us where the next instruction to check is. Any illegal
319 * instruction detected will be given a size of zero, which is a
320 * signal to abort the rest of the buffer.
322 static int validate_cmd(int cmd)
324 switch (((cmd >> 29) & 0x7)) {
326 switch ((cmd >> 23) & 0x3f) {
328 return 1; /* MI_NOOP */
330 return 1; /* MI_FLUSH */
332 return 0; /* disallow everything else */
336 return 0; /* reserved */
338 return (cmd & 0xff) + 2; /* 2d commands */
340 if (((cmd >> 24) & 0x1f) <= 0x18)
343 switch ((cmd >> 24) & 0x1f) {
347 switch ((cmd >> 16) & 0xff) {
349 return (cmd & 0x1f) + 2;
351 return (cmd & 0xf) + 2;
353 return (cmd & 0xffff) + 2;
357 return (cmd & 0xffff) + 1;
361 if ((cmd & (1 << 23)) == 0) /* inline vertices */
362 return (cmd & 0x1ffff) + 2;
363 else if (cmd & (1 << 17)) /* indirect random */
364 if ((cmd & 0xffff) == 0)
365 return 0; /* unknown length, too hard */
367 return (((cmd & 0xffff) + 1) / 2) + 1;
369 return 2; /* indirect sequential */
380 static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
382 drm_i915_private_t *dev_priv = dev->dev_private;
385 if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8)
388 for (i = 0; i < dwords;) {
389 int sz = validate_cmd(buffer[i]);
390 if (sz == 0 || i + sz > dwords)
395 ret = BEGIN_LP_RING((dwords+1)&~1);
399 for (i = 0; i < dwords; i++)
410 i915_emit_box(struct drm_device *dev,
411 struct drm_clip_rect *box,
414 struct drm_i915_private *dev_priv = dev->dev_private;
417 if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
418 box->y2 <= 0 || box->x2 <= 0) {
419 DRM_ERROR("Bad box %d,%d..%d,%d\n",
420 box->x1, box->y1, box->x2, box->y2);
424 if (INTEL_INFO(dev)->gen >= 4) {
425 ret = BEGIN_LP_RING(4);
429 OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
430 OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
431 OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
434 ret = BEGIN_LP_RING(6);
438 OUT_RING(GFX_OP_DRAWRECT_INFO);
440 OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
441 OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
450 /* XXX: Emitting the counter should really be moved to part of the IRQ
451 * emit. For now, do it in both places:
454 static void i915_emit_breadcrumb(struct drm_device *dev)
456 drm_i915_private_t *dev_priv = dev->dev_private;
458 dev_priv->dri1.counter++;
459 if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
460 dev_priv->dri1.counter = 0;
461 if (dev_priv->sarea_priv)
462 dev_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
464 if (BEGIN_LP_RING(4) == 0) {
465 OUT_RING(MI_STORE_DWORD_INDEX);
466 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
467 OUT_RING(dev_priv->dri1.counter);
473 static int i915_dispatch_cmdbuffer(struct drm_device * dev,
474 drm_i915_cmdbuffer_t *cmd,
475 struct drm_clip_rect *cliprects,
478 int nbox = cmd->num_cliprects;
479 int i = 0, count, ret;
482 DRM_ERROR("alignment");
486 i915_kernel_lost_context(dev);
488 count = nbox ? nbox : 1;
490 for (i = 0; i < count; i++) {
492 ret = i915_emit_box(dev, &cliprects[i],
498 ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4);
503 i915_emit_breadcrumb(dev);
507 static int i915_dispatch_batchbuffer(struct drm_device * dev,
508 drm_i915_batchbuffer_t * batch,
509 struct drm_clip_rect *cliprects)
511 struct drm_i915_private *dev_priv = dev->dev_private;
512 int nbox = batch->num_cliprects;
515 if ((batch->start | batch->used) & 0x7) {
516 DRM_ERROR("alignment");
520 i915_kernel_lost_context(dev);
522 count = nbox ? nbox : 1;
523 for (i = 0; i < count; i++) {
525 ret = i915_emit_box(dev, &cliprects[i],
526 batch->DR1, batch->DR4);
531 if (!IS_I830(dev) && !IS_845G(dev)) {
532 ret = BEGIN_LP_RING(2);
536 if (INTEL_INFO(dev)->gen >= 4) {
537 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
538 OUT_RING(batch->start);
540 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
541 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
544 ret = BEGIN_LP_RING(4);
548 OUT_RING(MI_BATCH_BUFFER);
549 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
550 OUT_RING(batch->start + batch->used - 4);
557 if (IS_G4X(dev) || IS_GEN5(dev)) {
558 if (BEGIN_LP_RING(2) == 0) {
559 OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP);
565 i915_emit_breadcrumb(dev);
569 static int i915_dispatch_flip(struct drm_device * dev)
571 drm_i915_private_t *dev_priv = dev->dev_private;
574 if (!dev_priv->sarea_priv)
577 DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n",
579 dev_priv->dri1.current_page,
580 dev_priv->sarea_priv->pf_current_page);
582 i915_kernel_lost_context(dev);
584 ret = BEGIN_LP_RING(10);
588 OUT_RING(MI_FLUSH | MI_READ_FLUSH);
591 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
593 if (dev_priv->dri1.current_page == 0) {
594 OUT_RING(dev_priv->dri1.back_offset);
595 dev_priv->dri1.current_page = 1;
597 OUT_RING(dev_priv->dri1.front_offset);
598 dev_priv->dri1.current_page = 0;
602 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
607 dev_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter++;
609 if (BEGIN_LP_RING(4) == 0) {
610 OUT_RING(MI_STORE_DWORD_INDEX);
611 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
612 OUT_RING(dev_priv->dri1.counter);
617 dev_priv->sarea_priv->pf_current_page = dev_priv->dri1.current_page;
621 static int i915_quiescent(struct drm_device *dev)
623 i915_kernel_lost_context(dev);
624 return intel_ring_idle(LP_RING(dev->dev_private));
627 static int i915_flush_ioctl(struct drm_device *dev, void *data,
628 struct drm_file *file_priv)
632 if (drm_core_check_feature(dev, DRIVER_MODESET))
635 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
638 ret = i915_quiescent(dev);
644 static int i915_batchbuffer(struct drm_device *dev, void *data,
645 struct drm_file *file_priv)
647 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
648 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)dev_priv->sarea_priv;
649 drm_i915_batchbuffer_t *batch = data;
651 struct drm_clip_rect *cliprects = NULL;
653 if (drm_core_check_feature(dev, DRIVER_MODESET))
656 if (!dev_priv->dri1.allow_batchbuffer) {
657 DRM_ERROR("Batchbuffer ioctl disabled\n");
661 DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n",
662 batch->start, batch->used, batch->num_cliprects);
664 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
666 if (batch->num_cliprects < 0)
669 if (batch->num_cliprects) {
670 cliprects = kmalloc(batch->num_cliprects *
671 sizeof(struct drm_clip_rect), DRM_MEM_DMA,
673 if (cliprects == NULL)
676 ret = copy_from_user(cliprects, batch->cliprects,
677 batch->num_cliprects *
678 sizeof(struct drm_clip_rect));
686 ret = i915_dispatch_batchbuffer(dev, batch, cliprects);
690 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
693 kfree(cliprects, DRM_MEM_DMA);
697 static int i915_cmdbuffer(struct drm_device *dev, void *data,
698 struct drm_file *file_priv)
700 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
701 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)dev_priv->sarea_priv;
702 drm_i915_cmdbuffer_t *cmdbuf = data;
703 struct drm_clip_rect *cliprects = NULL;
707 DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
708 cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
710 if (drm_core_check_feature(dev, DRIVER_MODESET))
713 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
715 if (cmdbuf->num_cliprects < 0)
718 batch_data = kmalloc(cmdbuf->sz, DRM_MEM_DMA, M_WAITOK);
719 if (batch_data == NULL)
722 ret = -copyin(cmdbuf->buf, batch_data, cmdbuf->sz);
725 goto fail_batch_free;
728 if (cmdbuf->num_cliprects) {
729 cliprects = kmalloc(cmdbuf->num_cliprects *
730 sizeof(struct drm_clip_rect), DRM_MEM_DMA,
732 if (cliprects == NULL) {
734 goto fail_batch_free;
737 ret = copy_from_user(cliprects, cmdbuf->cliprects,
738 cmdbuf->num_cliprects *
739 sizeof(struct drm_clip_rect));
747 ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data);
750 DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
755 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
758 drm_free(cliprects, DRM_MEM_DMA);
760 drm_free(batch_data, DRM_MEM_DMA);
764 static int i915_emit_irq(struct drm_device * dev)
766 drm_i915_private_t *dev_priv = dev->dev_private;
768 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
771 i915_kernel_lost_context(dev);
773 DRM_DEBUG_DRIVER("\n");
775 dev_priv->dri1.counter++;
776 if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
777 dev_priv->dri1.counter = 1;
778 if (dev_priv->sarea_priv)
779 dev_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
781 if (BEGIN_LP_RING(4) == 0) {
782 OUT_RING(MI_STORE_DWORD_INDEX);
783 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
784 OUT_RING(dev_priv->dri1.counter);
785 OUT_RING(MI_USER_INTERRUPT);
789 return dev_priv->dri1.counter;
792 static int i915_wait_irq(struct drm_device * dev, int irq_nr)
794 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
796 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
799 struct intel_ring_buffer *ring = LP_RING(dev_priv);
801 DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
802 READ_BREADCRUMB(dev_priv));
805 if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
806 if (master_priv->sarea_priv)
807 master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
811 if (master_priv->sarea_priv)
812 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
814 if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
815 if (dev_priv->sarea_priv) {
816 dev_priv->sarea_priv->last_dispatch =
817 READ_BREADCRUMB(dev_priv);
822 if (dev_priv->sarea_priv)
823 dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
826 if (ring->irq_get(ring)) {
827 DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ,
828 READ_BREADCRUMB(dev_priv) >= irq_nr);
830 } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000))
834 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
835 READ_BREADCRUMB(dev_priv), (int)dev_priv->dri1.counter);
841 /* Needs the lock as it touches the ring.
843 static int i915_irq_emit(struct drm_device *dev, void *data,
844 struct drm_file *file_priv)
846 drm_i915_private_t *dev_priv = dev->dev_private;
847 drm_i915_irq_emit_t *emit = data;
850 if (drm_core_check_feature(dev, DRIVER_MODESET))
853 if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
854 DRM_ERROR("called with no initialization\n");
858 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
861 result = i915_emit_irq(dev);
864 if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
865 DRM_ERROR("copy_to_user\n");
872 /* Doesn't need the hardware lock.
874 static int i915_irq_wait(struct drm_device *dev, void *data,
875 struct drm_file *file_priv)
877 drm_i915_private_t *dev_priv = dev->dev_private;
878 drm_i915_irq_wait_t *irqwait = data;
880 if (drm_core_check_feature(dev, DRIVER_MODESET))
884 DRM_ERROR("called with no initialization\n");
888 return i915_wait_irq(dev, irqwait->irq_seq);
891 static int i915_vblank_pipe_get(struct drm_device *dev, void *data,
892 struct drm_file *file_priv)
894 drm_i915_private_t *dev_priv = dev->dev_private;
895 drm_i915_vblank_pipe_t *pipe = data;
897 if (drm_core_check_feature(dev, DRIVER_MODESET))
901 DRM_ERROR("called with no initialization\n");
905 pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
911 * Schedule buffer swap at given vertical blank.
913 static int i915_vblank_swap(struct drm_device *dev, void *data,
914 struct drm_file *file_priv)
916 /* The delayed swap mechanism was fundamentally racy, and has been
917 * removed. The model was that the client requested a delayed flip/swap
918 * from the kernel, then waited for vblank before continuing to perform
919 * rendering. The problem was that the kernel might wake the client
920 * up before it dispatched the vblank swap (since the lock has to be
921 * held while touching the ringbuffer), in which case the client would
922 * clear and start the next frame before the swap occurred, and
923 * flicker would occur in addition to likely missing the vblank.
925 * In the absence of this ioctl, userland falls back to a correct path
926 * of waiting for a vblank, then dispatching the swap on its own.
927 * Context switching to userland and back is plenty fast enough for
928 * meeting the requirements of vblank swapping.
933 static int i915_flip_bufs(struct drm_device *dev, void *data,
934 struct drm_file *file_priv)
938 if (drm_core_check_feature(dev, DRIVER_MODESET))
941 DRM_DEBUG_DRIVER("%s\n", __func__);
943 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
946 ret = i915_dispatch_flip(dev);
952 static int i915_getparam(struct drm_device *dev, void *data,
953 struct drm_file *file_priv)
955 drm_i915_private_t *dev_priv = dev->dev_private;
956 drm_i915_getparam_t *param = data;
960 DRM_ERROR("called with no initialization\n");
964 switch (param->param) {
965 case I915_PARAM_IRQ_ACTIVE:
966 value = dev->irq_enabled ? 1 : 0;
968 case I915_PARAM_ALLOW_BATCHBUFFER:
969 value = dev_priv->dri1.allow_batchbuffer ? 1 : 0;
971 case I915_PARAM_LAST_DISPATCH:
972 value = READ_BREADCRUMB(dev_priv);
974 case I915_PARAM_CHIPSET_ID:
975 value = dev->pci_device;
977 case I915_PARAM_HAS_GEM:
980 case I915_PARAM_NUM_FENCES_AVAIL:
981 value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
983 case I915_PARAM_HAS_OVERLAY:
984 value = dev_priv->overlay ? 1 : 0;
986 case I915_PARAM_HAS_PAGEFLIPPING:
989 case I915_PARAM_HAS_EXECBUF2:
993 case I915_PARAM_HAS_BSD:
994 value = intel_ring_initialized(&dev_priv->ring[VCS]);
996 case I915_PARAM_HAS_BLT:
997 value = intel_ring_initialized(&dev_priv->ring[BCS]);
999 case I915_PARAM_HAS_RELAXED_FENCING:
1002 case I915_PARAM_HAS_COHERENT_RINGS:
1005 case I915_PARAM_HAS_EXEC_CONSTANTS:
1006 value = INTEL_INFO(dev)->gen >= 4;
1008 case I915_PARAM_HAS_RELAXED_DELTA:
1011 case I915_PARAM_HAS_GEN7_SOL_RESET:
1014 case I915_PARAM_HAS_LLC:
1015 value = HAS_LLC(dev);
1017 case I915_PARAM_HAS_ALIASING_PPGTT:
1018 value = dev_priv->mm.aliasing_ppgtt ? 1 : 0;
1020 case I915_PARAM_HAS_WAIT_TIMEOUT:
1023 case I915_PARAM_HAS_PINNED_BATCHES:
1027 DRM_DEBUG_DRIVER("Unknown parameter %d\n",
1032 if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
1033 DRM_ERROR("DRM_COPY_TO_USER failed\n");
1040 static int i915_setparam(struct drm_device *dev, void *data,
1041 struct drm_file *file_priv)
1043 drm_i915_private_t *dev_priv = dev->dev_private;
1044 drm_i915_setparam_t *param = data;
1047 DRM_ERROR("called with no initialization\n");
1051 switch (param->param) {
1052 case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
1054 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
1056 case I915_SETPARAM_ALLOW_BATCHBUFFER:
1057 dev_priv->dri1.allow_batchbuffer = param->value;
1059 case I915_SETPARAM_NUM_USED_FENCES:
1060 if (param->value > dev_priv->num_fence_regs ||
1063 /* Userspace can use first N regs */
1064 dev_priv->fence_reg_start = param->value;
1067 DRM_DEBUG_DRIVER("unknown parameter %d\n",
1075 static int i915_set_status_page(struct drm_device *dev, void *data,
1076 struct drm_file *file_priv)
1078 drm_i915_private_t *dev_priv = dev->dev_private;
1079 drm_i915_hws_addr_t *hws = data;
1080 struct intel_ring_buffer *ring = LP_RING(dev_priv);
1082 if (drm_core_check_feature(dev, DRIVER_MODESET))
1085 if (!I915_NEED_GFX_HWS(dev))
1089 DRM_ERROR("called with no initialization\n");
1093 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1094 WARN(1, "tried to set status page when mode setting active\n");
1098 ring->status_page.gfx_addr = dev_priv->status_gfx_addr =
1099 hws->addr & (0x1ffff<<12);
1101 dev_priv->hws_map.offset = dev->agp->base + hws->addr;
1102 dev_priv->hws_map.size = 4*1024;
1103 dev_priv->hws_map.type = 0;
1104 dev_priv->hws_map.flags = 0;
1105 dev_priv->hws_map.mtrr = 0;
1107 drm_core_ioremap_wc(&dev_priv->hws_map, dev);
1108 if (dev_priv->hws_map.virtual == NULL) {
1109 i915_dma_cleanup(dev);
1110 ring->status_page.gfx_addr = dev_priv->status_gfx_addr = 0;
1111 DRM_ERROR("can not ioremap virtual address for"
1112 " G33 hw status page\n");
1115 ring->status_page.page_addr = dev_priv->hw_status_page =
1116 dev_priv->hws_map.virtual;
1118 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
1119 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
1121 DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
1122 ring->status_page.gfx_addr);
1123 DRM_DEBUG_DRIVER("load hws at %p\n",
1124 ring->status_page.page_addr);
1128 static int i915_get_bridge_dev(struct drm_device *dev)
1130 struct drm_i915_private *dev_priv = dev->dev_private;
1132 dev_priv->bridge_dev = pci_find_dbsf(0, 0, 0, 0);
1133 if (!dev_priv->bridge_dev) {
1134 DRM_ERROR("bridge device not found\n");
1140 #define MCHBAR_I915 0x44
1141 #define MCHBAR_I965 0x48
1142 #define MCHBAR_SIZE (4*4096)
1144 #define DEVEN_REG 0x54
1145 #define DEVEN_MCHBAR_EN (1 << 28)
1147 /* Allocate space for the MCH regs if needed, return nonzero on error */
1149 intel_alloc_mchbar_resource(struct drm_device *dev)
1151 drm_i915_private_t *dev_priv = dev->dev_private;
1152 int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
1154 u32 temp_lo, temp_hi;
1155 u64 mchbar_addr, temp;
1157 if (INTEL_INFO(dev)->gen >= 4)
1158 temp_hi = pci_read_config(dev_priv->bridge_dev, reg + 4, 4);
1161 temp_lo = pci_read_config(dev_priv->bridge_dev, reg, 4);
1162 mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
1164 /* If ACPI doesn't have it, assume we need to allocate it ourselves */
1167 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
1171 /* Get some space for it */
1172 vga = device_get_parent(dev->dev);
1173 dev_priv->mch_res_rid = 0x100;
1174 dev_priv->mch_res = BUS_ALLOC_RESOURCE(device_get_parent(vga),
1175 dev->dev, SYS_RES_MEMORY, &dev_priv->mch_res_rid, 0, ~0UL,
1176 MCHBAR_SIZE, RF_ACTIVE | RF_SHAREABLE, -1);
1177 if (dev_priv->mch_res == NULL) {
1178 DRM_ERROR("failed mchbar resource alloc\n");
1182 if (INTEL_INFO(dev)->gen >= 4) {
1183 temp = rman_get_start(dev_priv->mch_res);
1185 pci_write_config(dev_priv->bridge_dev, reg + 4, temp, 4);
1187 pci_write_config(dev_priv->bridge_dev, reg,
1188 rman_get_start(dev_priv->mch_res) & UINT32_MAX, 4);
1192 /* Setup MCHBAR if possible, return true if we should disable it again */
1194 intel_setup_mchbar(struct drm_device *dev)
1196 drm_i915_private_t *dev_priv = dev->dev_private;
1197 int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
1201 dev_priv->mchbar_need_disable = false;
1203 if (IS_I915G(dev) || IS_I915GM(dev)) {
1204 temp = pci_read_config(dev_priv->bridge_dev, DEVEN_REG, 4);
1205 enabled = (temp & DEVEN_MCHBAR_EN) != 0;
1207 temp = pci_read_config(dev_priv->bridge_dev, mchbar_reg, 4);
1211 /* If it's already enabled, don't have to do anything */
1215 if (intel_alloc_mchbar_resource(dev))
1218 dev_priv->mchbar_need_disable = true;
1220 /* Space is allocated or reserved, so enable it. */
1221 if (IS_I915G(dev) || IS_I915GM(dev)) {
1222 pci_write_config(dev_priv->bridge_dev, DEVEN_REG,
1223 temp | DEVEN_MCHBAR_EN, 4);
1225 temp = pci_read_config(dev_priv->bridge_dev, mchbar_reg, 4);
1226 pci_write_config(dev_priv->bridge_dev, mchbar_reg, temp | 1, 4);
1231 intel_teardown_mchbar(struct drm_device *dev)
1233 drm_i915_private_t *dev_priv = dev->dev_private;
1234 int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
1238 if (dev_priv->mchbar_need_disable) {
1239 if (IS_I915G(dev) || IS_I915GM(dev)) {
1240 temp = pci_read_config(dev_priv->bridge_dev,
1242 temp &= ~DEVEN_MCHBAR_EN;
1243 pci_write_config(dev_priv->bridge_dev, DEVEN_REG,
1246 temp = pci_read_config(dev_priv->bridge_dev,
1249 pci_write_config(dev_priv->bridge_dev, mchbar_reg,
1254 if (dev_priv->mch_res != NULL) {
1255 vga = device_get_parent(dev->dev);
1256 BUS_DEACTIVATE_RESOURCE(device_get_parent(vga), dev->dev,
1257 SYS_RES_MEMORY, dev_priv->mch_res_rid, dev_priv->mch_res);
1258 BUS_RELEASE_RESOURCE(device_get_parent(vga), dev->dev,
1259 SYS_RES_MEMORY, dev_priv->mch_res_rid, dev_priv->mch_res);
1260 dev_priv->mch_res = NULL;
1264 static int i915_load_modeset_init(struct drm_device *dev)
1266 struct drm_i915_private *dev_priv = dev->dev_private;
1269 ret = intel_parse_bios(dev);
1271 DRM_INFO("failed to find VBIOS tables\n");
1274 /* If we have > 1 VGA cards, then we need to arbitrate access
1275 * to the common VGA resources.
1277 * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
1278 * then we do not take part in VGA arbitration and the
1279 * vga_client_register() fails with -ENODEV.
1281 ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
1282 if (ret && ret != -ENODEV)
1285 intel_register_dsm_handler();
1287 ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops);
1289 goto cleanup_vga_client;
1291 /* Initialise stolen first so that we may reserve preallocated
1292 * objects for the BIOS to KMS transition.
1294 ret = i915_gem_init_stolen(dev);
1296 goto cleanup_vga_switcheroo;
1299 intel_modeset_init(dev);
1301 ret = i915_gem_init(dev);
1303 goto cleanup_gem_stolen;
1305 intel_modeset_gem_init(dev);
1307 ret = drm_irq_install(dev);
1311 /* Always safe in the mode setting case. */
1312 /* FIXME: do pre/post-mode set stuff in core KMS code */
1313 dev->vblank_disable_allowed = 1;
1315 ret = intel_fbdev_init(dev);
1319 drm_kms_helper_poll_init(dev);
1321 /* We're off and running w/KMS */
1322 dev_priv->mm.suspended = 0;
1327 drm_irq_uninstall(dev);
1330 i915_gem_cleanup_ringbuffer(dev);
1332 i915_gem_cleanup_aliasing_ppgtt(dev);
1335 i915_gem_cleanup_stolen(dev);
1336 cleanup_vga_switcheroo:
1337 vga_switcheroo_unregister_client(dev->pdev);
1339 vga_client_register(dev->pdev, NULL, NULL, NULL);
1346 * i915_driver_load - setup chip and create an initial config
1348 * @flags: startup flags
1350 * The driver load routine has to do several things:
1351 * - drive output discovery via intel_modeset_init()
1352 * - initialize the memory manager
1353 * - allocate initial config memory
1354 * - setup the DRM framebuffer with the allocated memory
1356 int i915_driver_load(struct drm_device *dev, unsigned long flags)
1358 struct drm_i915_private *dev_priv = dev->dev_private;
1359 unsigned long base, size;
1364 /* i915 has 4 more counters */
1366 dev->types[6] = _DRM_STAT_IRQ;
1367 dev->types[7] = _DRM_STAT_PRIMARY;
1368 dev->types[8] = _DRM_STAT_SECONDARY;
1369 dev->types[9] = _DRM_STAT_DMA;
1371 dev_priv = kmalloc(sizeof(drm_i915_private_t), DRM_MEM_DRIVER,
1373 if (dev_priv == NULL)
1376 dev->dev_private = (void *)dev_priv;
1377 dev_priv->dev = dev;
1378 dev_priv->info = i915_get_device_id(dev->pci_device);
1380 if (i915_get_bridge_dev(dev)) {
1381 drm_free(dev_priv, DRM_MEM_DRIVER);
1384 dev_priv->mm.gtt = intel_gtt_get();
1386 /* Add register map (needed for suspend/resume) */
1387 mmio_bar = IS_GEN2(dev) ? 1 : 0;
1388 base = drm_get_resource_start(dev, mmio_bar);
1389 size = drm_get_resource_len(dev, mmio_bar);
1391 ret = drm_addmap(dev, base, size, _DRM_REGISTERS,
1392 _DRM_KERNEL | _DRM_DRIVER, &dev_priv->mmio_map);
1394 /* The i915 workqueue is primarily used for batched retirement of
1395 * requests (and thus managing bo) once the task has been completed
1396 * by the GPU. i915_gem_retire_requests() is called directly when we
1397 * need high-priority retirement, such as waiting for an explicit
1400 * It is also used for periodic low-priority events, such as
1401 * idle-timers and recording error state.
1403 * All tasks on the workqueue are expected to acquire the dev mutex
1404 * so there is no point in running more than one instance of the
1405 * workqueue at any time. Use an ordered one.
1407 dev_priv->wq = alloc_ordered_workqueue("i915", 0);
1408 if (dev_priv->wq == NULL) {
1409 DRM_ERROR("Failed to create our workqueue.\n");
1414 /* This must be called before any calls to HAS_PCH_* */
1415 intel_detect_pch(dev);
1417 intel_irq_init(dev);
1420 /* Try to make sure MCHBAR is enabled before poking at it */
1421 intel_setup_mchbar(dev);
1422 intel_setup_gmbus(dev);
1423 intel_opregion_setup(dev);
1425 intel_setup_bios(dev);
1429 /* On the 945G/GM, the chipset reports the MSI capability on the
1430 * integrated graphics even though the support isn't actually there
1431 * according to the published specs. It doesn't appear to function
1432 * correctly in testing on 945G.
1433 * This may be a side effect of MSI having been made available for PEG
1434 * and the registers being closely associated.
1436 * According to chipset errata, on the 965GM, MSI interrupts may
1437 * be lost or delayed, but we use them anyways to avoid
1438 * stuck interrupts on some machines.
1441 lockinit(&dev_priv->irq_lock, "userirq", 0, LK_CANRECURSE);
1442 lockinit(&dev_priv->error_lock, "915err", 0, LK_CANRECURSE);
1443 spin_init(&dev_priv->rps.lock, "i915initrps");
1444 spin_init(&dev_priv->dpio_lock, "i915initdpio");
1446 lockinit(&dev_priv->rps.hw_lock, "i915 rps.hw_lock", 0, LK_CANRECURSE);
1449 if (!I915_NEED_GFX_HWS(dev)) {
1450 ret = i915_init_phys_hws(dev);
1452 drm_rmmap(dev, dev_priv->mmio_map);
1453 drm_free(dev_priv, DRM_MEM_DRIVER);
1458 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
1459 dev_priv->num_pipe = 3;
1460 else if (IS_MOBILE(dev) || !IS_GEN2(dev))
1461 dev_priv->num_pipe = 2;
1463 dev_priv->num_pipe = 1;
1465 ret = drm_vblank_init(dev, dev_priv->num_pipe);
1467 goto out_gem_unload;
1469 /* Start out suspended */
1470 dev_priv->mm.suspended = 1;
1472 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1473 ret = i915_load_modeset_init(dev);
1475 DRM_ERROR("failed to init modeset\n");
1476 goto out_gem_unload;
1480 /* Must be done after probing outputs */
1481 intel_opregion_init(dev);
1483 setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
1484 (unsigned long) dev);
1487 intel_gpu_ips_init(dev_priv);
1492 intel_teardown_gmbus(dev);
1493 intel_teardown_mchbar(dev);
1494 destroy_workqueue(dev_priv->wq);
1499 int i915_driver_unload(struct drm_device *dev)
1501 struct drm_i915_private *dev_priv = dev->dev_private;
1504 intel_gpu_ips_teardown();
1507 ret = i915_gpu_idle(dev);
1509 DRM_ERROR("failed to idle hardware: %d\n", ret);
1510 i915_gem_retire_requests(dev);
1513 /* Cancel the retire work handler, which should be idle now. */
1514 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
1518 intel_teardown_mchbar(dev);
1520 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1521 intel_fbdev_fini(dev);
1522 intel_modeset_cleanup(dev);
1525 /* Free error state after interrupts are fully disabled. */
1526 del_timer_sync(&dev_priv->hangcheck_timer);
1527 cancel_work_sync(&dev_priv->error_work);
1528 i915_destroy_error_state(dev);
1530 intel_opregion_fini(dev);
1532 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1533 /* Flush any outstanding unpin_work. */
1534 flush_workqueue(dev_priv->wq);
1537 i915_gem_free_all_phys_object(dev);
1538 i915_gem_cleanup_ringbuffer(dev);
1540 i915_gem_cleanup_aliasing_ppgtt(dev);
1541 drm_mm_takedown(&dev_priv->mm.stolen);
1543 intel_cleanup_overlay(dev);
1545 if (!I915_NEED_GFX_HWS(dev))
1549 i915_gem_unload(dev);
1551 bus_generic_detach(dev->dev);
1552 drm_rmmap(dev, dev_priv->mmio_map);
1553 intel_teardown_gmbus(dev);
1555 destroy_workqueue(dev_priv->wq);
1557 drm_free(dev->dev_private, DRM_MEM_DRIVER);
1562 int i915_driver_open(struct drm_device *dev, struct drm_file *file)
1564 struct drm_i915_file_private *file_priv;
1566 DRM_DEBUG_DRIVER("\n");
1567 file_priv = kmalloc(sizeof(*file_priv), DRM_MEM_FILES,
1572 file->driver_priv = file_priv;
1574 spin_init(&file_priv->mm.lock, "i915_priv");
1575 INIT_LIST_HEAD(&file_priv->mm.request_list);
1577 idr_init(&file_priv->context_idr);
1583 * i915_driver_lastclose - clean up after all DRM clients have exited
1586 * Take care of cleaning up after all DRM clients have exited. In the
1587 * mode setting case, we want to restore the kernel's initial mode (just
1588 * in case the last client left us in a bad state).
1590 * Additionally, in the non-mode setting case, we'll tear down the GTT
1591 * and DMA structures, since the kernel won't be using them, and clea
1594 void i915_driver_lastclose(struct drm_device * dev)
1596 drm_i915_private_t *dev_priv = dev->dev_private;
1598 if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) {
1602 drm_fb_helper_restore();
1603 vga_switcheroo_process_delayed_switch();
1607 i915_gem_lastclose(dev);
1608 i915_dma_cleanup(dev);
1611 void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
1614 i915_gem_release(dev, file_priv);
1617 void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
1619 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
1621 spin_uninit(&i915_file_priv->mm.lock);
1622 drm_free(i915_file_priv, DRM_MEM_FILES);
1625 struct drm_ioctl_desc i915_ioctls[] = {
1626 DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1627 DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
1628 DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH),
1629 DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
1630 DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
1631 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
1632 DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH),
1633 DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1634 DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
1635 DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
1636 DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1637 DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
1638 DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1639 DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1640 DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH),
1641 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
1642 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1643 DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1644 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
1645 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED),
1646 DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1647 DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1648 DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
1649 DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED),
1650 DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED),
1651 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED),
1652 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1653 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1654 DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED),
1655 DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED),
1656 DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED),
1657 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED),
1658 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED),
1659 DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED),
1660 DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED),
1661 DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED),
1662 DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED),
1663 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED),
1664 DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
1665 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
1666 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1667 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1668 DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1669 DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1670 DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED),
1673 int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
1676 * This is really ugly: Because old userspace abused the linux agp interface to
1677 * manage the gtt, we need to claim that all intel devices are agp. For
1678 * otherwise the drm core refuses to initialize the agp support code.
1680 int i915_driver_device_is_agp(struct drm_device * dev)