1 /* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 * $FreeBSD: src/sys/dev/drm2/i915/i915_dma.c,v 1.1 2012/05/22 11:07:44 kib Exp $
31 #include <drm/i915_drm.h>
33 #include "intel_drv.h"
34 #include "intel_ringbuffer.h"
35 #include <linux/workqueue.h>
37 extern struct drm_i915_private *i915_mch_dev;
39 static int i915_driver_unload_int(struct drm_device *dev, bool locked);
41 void i915_update_dri1_breadcrumb(struct drm_device *dev)
44 * The dri breadcrumb update races against the drm master disappearing.
45 * Instead of trying to fix this (this is by far not the only ums issue)
46 * just don't do the update in kms mode.
48 if (drm_core_check_feature(dev, DRIVER_MODESET))
51 /* XXX: don't do it at all actually */
55 static void i915_write_hws_pga(struct drm_device *dev)
57 drm_i915_private_t *dev_priv = dev->dev_private;
60 addr = dev_priv->status_page_dmah->busaddr;
61 if (INTEL_INFO(dev)->gen >= 4)
62 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
63 I915_WRITE(HWS_PGA, addr);
67 * Sets up the hardware status page for devices that need a physical address
70 static int i915_init_phys_hws(struct drm_device *dev)
72 drm_i915_private_t *dev_priv = dev->dev_private;
73 struct intel_ring_buffer *ring = LP_RING(dev_priv);
76 * Program Hardware Status Page
77 * XXXKIB Keep 4GB limit for allocation for now. This method
78 * of allocation is used on <= 965 hardware, that has several
79 * erratas regarding the use of physical memory > 4 GB.
82 dev_priv->status_page_dmah =
83 drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
85 if (!dev_priv->status_page_dmah) {
86 DRM_ERROR("Can not allocate hardware status page\n");
89 ring->status_page.page_addr = dev_priv->hw_status_page =
90 dev_priv->status_page_dmah->vaddr;
91 dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
93 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
95 i915_write_hws_pga(dev);
96 DRM_DEBUG("Enabled hardware status page, phys %jx\n",
97 (uintmax_t)dev_priv->dma_status_page);
102 * Frees the hardware status page, whether it's a physical address or a virtual
103 * address set up by the X Server.
105 static void i915_free_hws(struct drm_device *dev)
107 drm_i915_private_t *dev_priv = dev->dev_private;
108 struct intel_ring_buffer *ring = LP_RING(dev_priv);
110 if (dev_priv->status_page_dmah) {
111 drm_pci_free(dev, dev_priv->status_page_dmah);
112 dev_priv->status_page_dmah = NULL;
115 if (dev_priv->status_gfx_addr) {
116 dev_priv->status_gfx_addr = 0;
117 ring->status_page.gfx_addr = 0;
118 drm_core_ioremapfree(&dev_priv->hws_map, dev);
121 /* Need to rewrite hardware status page */
122 I915_WRITE(HWS_PGA, 0x1ffff000);
125 void i915_kernel_lost_context(struct drm_device * dev)
127 drm_i915_private_t *dev_priv = dev->dev_private;
128 struct intel_ring_buffer *ring = LP_RING(dev_priv);
131 * We should never lose context on the ring with modesetting
132 * as we don't expose it to userspace
134 if (drm_core_check_feature(dev, DRIVER_MODESET))
137 ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
138 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
139 ring->space = ring->head - (ring->tail + 8);
141 ring->space += ring->size;
146 if (!dev->primary->master)
150 if (ring->head == ring->tail && dev_priv->sarea_priv)
151 dev_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
154 static int i915_dma_cleanup(struct drm_device * dev)
156 drm_i915_private_t *dev_priv = dev->dev_private;
160 /* Make sure interrupts are disabled here because the uninstall ioctl
161 * may not have been called from userspace and after dev_private
162 * is freed, it's too late.
164 if (dev->irq_enabled)
165 drm_irq_uninstall(dev);
167 for (i = 0; i < I915_NUM_RINGS; i++)
168 intel_cleanup_ring_buffer(&dev_priv->ring[i]);
170 /* Clear the HWS virtual address at teardown */
171 if (I915_NEED_GFX_HWS(dev))
177 static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
179 drm_i915_private_t *dev_priv = dev->dev_private;
182 dev_priv->sarea = drm_getsarea(dev);
183 if (!dev_priv->sarea) {
184 DRM_ERROR("can not find sarea!\n");
185 i915_dma_cleanup(dev);
189 dev_priv->sarea_priv = (drm_i915_sarea_t *)
190 ((u8 *) dev_priv->sarea->virtual + init->sarea_priv_offset);
192 if (init->ring_size != 0) {
193 if (LP_RING(dev_priv)->obj != NULL) {
194 i915_dma_cleanup(dev);
195 DRM_ERROR("Client tried to initialize ringbuffer in "
200 ret = intel_render_ring_init_dri(dev,
204 i915_dma_cleanup(dev);
209 dev_priv->cpp = init->cpp;
210 dev_priv->back_offset = init->back_offset;
211 dev_priv->front_offset = init->front_offset;
212 dev_priv->current_page = 0;
213 dev_priv->sarea_priv->pf_current_page = 0;
215 /* Allow hardware batchbuffers unless told otherwise.
217 dev_priv->allow_batchbuffer = 1;
222 static int i915_dma_resume(struct drm_device * dev)
224 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
225 struct intel_ring_buffer *ring = LP_RING(dev_priv);
229 if (ring->map.handle == NULL) {
230 DRM_ERROR("can not ioremap virtual address for"
235 /* Program Hardware Status Page */
236 if (!ring->status_page.page_addr) {
237 DRM_ERROR("Can not find hardware status page\n");
240 DRM_DEBUG("hw status page @ %p\n", ring->status_page.page_addr);
241 if (ring->status_page.gfx_addr != 0)
242 intel_ring_setup_status_page(ring);
244 i915_write_hws_pga(dev);
246 DRM_DEBUG("Enabled hardware status page\n");
251 static int i915_dma_init(struct drm_device *dev, void *data,
252 struct drm_file *file_priv)
254 drm_i915_init_t *init = data;
257 switch (init->func) {
259 retcode = i915_initialize(dev, init);
261 case I915_CLEANUP_DMA:
262 retcode = i915_dma_cleanup(dev);
264 case I915_RESUME_DMA:
265 retcode = i915_dma_resume(dev);
275 /* Implement basically the same security restrictions as hardware does
276 * for MI_BATCH_NON_SECURE. These can be made stricter at any time.
278 * Most of the calculations below involve calculating the size of a
279 * particular instruction. It's important to get the size right as
280 * that tells us where the next instruction to check is. Any illegal
281 * instruction detected will be given a size of zero, which is a
282 * signal to abort the rest of the buffer.
284 static int do_validate_cmd(int cmd)
286 switch (((cmd >> 29) & 0x7)) {
288 switch ((cmd >> 23) & 0x3f) {
290 return 1; /* MI_NOOP */
292 return 1; /* MI_FLUSH */
294 return 0; /* disallow everything else */
298 return 0; /* reserved */
300 return (cmd & 0xff) + 2; /* 2d commands */
302 if (((cmd >> 24) & 0x1f) <= 0x18)
305 switch ((cmd >> 24) & 0x1f) {
309 switch ((cmd >> 16) & 0xff) {
311 return (cmd & 0x1f) + 2;
313 return (cmd & 0xf) + 2;
315 return (cmd & 0xffff) + 2;
319 return (cmd & 0xffff) + 1;
323 if ((cmd & (1 << 23)) == 0) /* inline vertices */
324 return (cmd & 0x1ffff) + 2;
325 else if (cmd & (1 << 17)) /* indirect random */
326 if ((cmd & 0xffff) == 0)
327 return 0; /* unknown length, too hard */
329 return (((cmd & 0xffff) + 1) / 2) + 1;
331 return 2; /* indirect sequential */
342 static int validate_cmd(int cmd)
344 int ret = do_validate_cmd(cmd);
346 /* printk("validate_cmd( %x ): %d\n", cmd, ret); */
351 static int i915_emit_cmds(struct drm_device *dev, int __user *buffer,
354 drm_i915_private_t *dev_priv = dev->dev_private;
357 if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8)
360 ret = BEGIN_LP_RING((dwords+1)&~1);
364 for (i = 0; i < dwords;) {
367 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd)))
370 if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
376 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i],
392 int i915_emit_box(struct drm_device * dev,
393 struct drm_clip_rect *boxes,
394 int i, int DR1, int DR4)
396 struct drm_clip_rect box;
398 if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) {
402 return (i915_emit_box_p(dev, &box, DR1, DR4));
406 i915_emit_box_p(struct drm_device *dev, struct drm_clip_rect *box,
409 drm_i915_private_t *dev_priv = dev->dev_private;
412 if (box->y2 <= box->y1 || box->x2 <= box->x1 || box->y2 <= 0 ||
414 DRM_ERROR("Bad box %d,%d..%d,%d\n",
415 box->x1, box->y1, box->x2, box->y2);
419 if (INTEL_INFO(dev)->gen >= 4) {
420 ret = BEGIN_LP_RING(4);
424 OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
425 OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
426 OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
429 ret = BEGIN_LP_RING(6);
433 OUT_RING(GFX_OP_DRAWRECT_INFO);
435 OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
436 OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
445 /* XXX: Emitting the counter should really be moved to part of the IRQ
446 * emit. For now, do it in both places:
449 static void i915_emit_breadcrumb(struct drm_device *dev)
451 drm_i915_private_t *dev_priv = dev->dev_private;
453 if (++dev_priv->counter > 0x7FFFFFFFUL)
454 dev_priv->counter = 0;
455 if (dev_priv->sarea_priv)
456 dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
458 if (BEGIN_LP_RING(4) == 0) {
459 OUT_RING(MI_STORE_DWORD_INDEX);
460 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
461 OUT_RING(dev_priv->counter);
467 static int i915_dispatch_cmdbuffer(struct drm_device * dev,
468 drm_i915_cmdbuffer_t * cmd, struct drm_clip_rect *cliprects, void *cmdbuf)
470 int nbox = cmd->num_cliprects;
471 int i = 0, count, ret;
474 DRM_ERROR("alignment\n");
478 i915_kernel_lost_context(dev);
480 count = nbox ? nbox : 1;
482 for (i = 0; i < count; i++) {
484 ret = i915_emit_box_p(dev, &cmd->cliprects[i],
490 ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4);
495 i915_emit_breadcrumb(dev);
500 i915_dispatch_batchbuffer(struct drm_device * dev,
501 drm_i915_batchbuffer_t * batch, struct drm_clip_rect *cliprects)
503 drm_i915_private_t *dev_priv = dev->dev_private;
504 int nbox = batch->num_cliprects;
507 if ((batch->start | batch->used) & 0x7) {
508 DRM_ERROR("alignment\n");
512 i915_kernel_lost_context(dev);
514 count = nbox ? nbox : 1;
516 for (i = 0; i < count; i++) {
518 int ret = i915_emit_box_p(dev, &cliprects[i],
519 batch->DR1, batch->DR4);
524 if (!IS_I830(dev) && !IS_845G(dev)) {
525 ret = BEGIN_LP_RING(2);
529 if (INTEL_INFO(dev)->gen >= 4) {
530 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) |
531 MI_BATCH_NON_SECURE_I965);
532 OUT_RING(batch->start);
534 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
535 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
538 ret = BEGIN_LP_RING(4);
542 OUT_RING(MI_BATCH_BUFFER);
543 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
544 OUT_RING(batch->start + batch->used - 4);
550 i915_emit_breadcrumb(dev);
555 static int i915_dispatch_flip(struct drm_device * dev)
557 drm_i915_private_t *dev_priv = dev->dev_private;
560 if (!dev_priv->sarea_priv)
563 DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
565 dev_priv->current_page,
566 dev_priv->sarea_priv->pf_current_page);
568 i915_kernel_lost_context(dev);
570 ret = BEGIN_LP_RING(10);
573 OUT_RING(MI_FLUSH | MI_READ_FLUSH);
576 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
578 if (dev_priv->current_page == 0) {
579 OUT_RING(dev_priv->back_offset);
580 dev_priv->current_page = 1;
582 OUT_RING(dev_priv->front_offset);
583 dev_priv->current_page = 0;
587 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
592 if (++dev_priv->counter > 0x7FFFFFFFUL)
593 dev_priv->counter = 0;
594 if (dev_priv->sarea_priv)
595 dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
597 if (BEGIN_LP_RING(4) == 0) {
598 OUT_RING(MI_STORE_DWORD_INDEX);
599 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
600 OUT_RING(dev_priv->counter);
605 dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
609 static int i915_quiescent(struct drm_device *dev)
611 i915_kernel_lost_context(dev);
612 return intel_ring_idle(LP_RING(dev->dev_private));
616 i915_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
620 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
623 ret = i915_quiescent(dev);
629 static int i915_batchbuffer(struct drm_device *dev, void *data,
630 struct drm_file *file_priv)
632 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
633 drm_i915_sarea_t *sarea_priv;
634 drm_i915_batchbuffer_t *batch = data;
635 struct drm_clip_rect *cliprects;
639 if (!dev_priv->allow_batchbuffer) {
640 DRM_ERROR("Batchbuffer ioctl disabled\n");
645 DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n",
646 batch->start, batch->used, batch->num_cliprects);
648 cliplen = batch->num_cliprects * sizeof(struct drm_clip_rect);
649 if (batch->num_cliprects < 0)
651 if (batch->num_cliprects != 0) {
652 cliprects = kmalloc(batch->num_cliprects *
653 sizeof(struct drm_clip_rect), DRM_MEM_DMA,
656 ret = -copyin(batch->cliprects, cliprects,
657 batch->num_cliprects * sizeof(struct drm_clip_rect));
666 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
667 ret = i915_dispatch_batchbuffer(dev, batch, cliprects);
669 sarea_priv = (drm_i915_sarea_t *)dev_priv->sarea_priv;
671 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
674 drm_free(cliprects, DRM_MEM_DMA);
678 static int i915_cmdbuffer(struct drm_device *dev, void *data,
679 struct drm_file *file_priv)
681 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
682 drm_i915_sarea_t *sarea_priv;
683 drm_i915_cmdbuffer_t *cmdbuf = data;
684 struct drm_clip_rect *cliprects = NULL;
688 DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
689 cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
691 if (cmdbuf->num_cliprects < 0)
696 batch_data = kmalloc(cmdbuf->sz, DRM_MEM_DMA, M_WAITOK);
698 ret = -copyin(cmdbuf->buf, batch_data, cmdbuf->sz);
701 goto fail_batch_free;
704 if (cmdbuf->num_cliprects) {
705 cliprects = kmalloc(cmdbuf->num_cliprects *
706 sizeof(struct drm_clip_rect), DRM_MEM_DMA,
708 ret = -copyin(cmdbuf->cliprects, cliprects,
709 cmdbuf->num_cliprects * sizeof(struct drm_clip_rect));
717 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
718 ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data);
720 DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
724 sarea_priv = (drm_i915_sarea_t *)dev_priv->sarea_priv;
726 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
729 drm_free(cliprects, DRM_MEM_DMA);
731 drm_free(batch_data, DRM_MEM_DMA);
735 static int i915_emit_irq(struct drm_device * dev)
737 drm_i915_private_t *dev_priv = dev->dev_private;
739 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
742 i915_kernel_lost_context(dev);
744 DRM_DEBUG("i915: emit_irq\n");
747 if (dev_priv->counter > 0x7FFFFFFFUL)
748 dev_priv->counter = 1;
750 if (master_priv->sarea_priv)
751 master_priv->sarea_priv->last_enqueue = dev_priv->counter;
753 if (dev_priv->sarea_priv)
754 dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
757 if (BEGIN_LP_RING(4) == 0) {
758 OUT_RING(MI_STORE_DWORD_INDEX);
759 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
760 OUT_RING(dev_priv->counter);
761 OUT_RING(MI_USER_INTERRUPT);
765 return dev_priv->counter;
768 static int i915_wait_irq(struct drm_device * dev, int irq_nr)
770 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
772 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
775 struct intel_ring_buffer *ring = LP_RING(dev_priv);
777 DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
778 READ_BREADCRUMB(dev_priv));
781 if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
782 if (master_priv->sarea_priv)
783 master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
787 if (master_priv->sarea_priv)
788 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
790 if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
791 if (dev_priv->sarea_priv) {
792 dev_priv->sarea_priv->last_dispatch =
793 READ_BREADCRUMB(dev_priv);
798 if (dev_priv->sarea_priv)
799 dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
802 if (ring->irq_get(ring)) {
803 DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ,
804 READ_BREADCRUMB(dev_priv) >= irq_nr);
806 } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000))
810 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
811 READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
817 /* Needs the lock as it touches the ring.
819 int i915_irq_emit(struct drm_device *dev, void *data,
820 struct drm_file *file_priv)
822 drm_i915_private_t *dev_priv = dev->dev_private;
823 drm_i915_irq_emit_t *emit = data;
826 if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
827 DRM_ERROR("called with no initialization\n");
831 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
834 result = i915_emit_irq(dev);
837 if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
838 DRM_ERROR("copy_to_user\n");
845 /* Doesn't need the hardware lock.
847 int i915_irq_wait(struct drm_device *dev, void *data,
848 struct drm_file *file_priv)
850 drm_i915_private_t *dev_priv = dev->dev_private;
851 drm_i915_irq_wait_t *irqwait = data;
854 DRM_ERROR("called with no initialization\n");
858 return i915_wait_irq(dev, irqwait->irq_seq);
861 static int i915_vblank_pipe_get(struct drm_device *dev, void *data,
862 struct drm_file *file_priv)
864 drm_i915_private_t *dev_priv = dev->dev_private;
865 drm_i915_vblank_pipe_t *pipe = data;
867 if (drm_core_check_feature(dev, DRIVER_MODESET))
871 DRM_ERROR("called with no initialization\n");
875 pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
881 * Schedule buffer swap at given vertical blank.
883 static int i915_vblank_swap(struct drm_device *dev, void *data,
884 struct drm_file *file_priv)
886 /* The delayed swap mechanism was fundamentally racy, and has been
887 * removed. The model was that the client requested a delayed flip/swap
888 * from the kernel, then waited for vblank before continuing to perform
889 * rendering. The problem was that the kernel might wake the client
890 * up before it dispatched the vblank swap (since the lock has to be
891 * held while touching the ringbuffer), in which case the client would
892 * clear and start the next frame before the swap occurred, and
893 * flicker would occur in addition to likely missing the vblank.
895 * In the absence of this ioctl, userland falls back to a correct path
896 * of waiting for a vblank, then dispatching the swap on its own.
897 * Context switching to userland and back is plenty fast enough for
898 * meeting the requirements of vblank swapping.
903 static int i915_flip_bufs(struct drm_device *dev, void *data,
904 struct drm_file *file_priv)
908 DRM_DEBUG("%s\n", __func__);
910 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
912 ret = i915_dispatch_flip(dev);
917 static int i915_getparam(struct drm_device *dev, void *data,
918 struct drm_file *file_priv)
920 drm_i915_private_t *dev_priv = dev->dev_private;
921 drm_i915_getparam_t *param = data;
925 DRM_ERROR("called with no initialization\n");
929 switch (param->param) {
930 case I915_PARAM_IRQ_ACTIVE:
931 value = dev->irq_enabled ? 1 : 0;
933 case I915_PARAM_ALLOW_BATCHBUFFER:
934 value = dev_priv->allow_batchbuffer ? 1 : 0;
936 case I915_PARAM_LAST_DISPATCH:
937 value = READ_BREADCRUMB(dev_priv);
939 case I915_PARAM_CHIPSET_ID:
940 value = dev->pci_device;
942 case I915_PARAM_HAS_GEM:
945 case I915_PARAM_NUM_FENCES_AVAIL:
946 value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
948 case I915_PARAM_HAS_OVERLAY:
949 value = dev_priv->overlay ? 1 : 0;
951 case I915_PARAM_HAS_PAGEFLIPPING:
954 case I915_PARAM_HAS_EXECBUF2:
957 case I915_PARAM_HAS_BSD:
958 value = HAS_BSD(dev);
960 case I915_PARAM_HAS_BLT:
961 value = HAS_BLT(dev);
963 case I915_PARAM_HAS_RELAXED_FENCING:
966 case I915_PARAM_HAS_COHERENT_RINGS:
969 case I915_PARAM_HAS_EXEC_CONSTANTS:
970 value = INTEL_INFO(dev)->gen >= 4;
972 case I915_PARAM_HAS_RELAXED_DELTA:
975 case I915_PARAM_HAS_GEN7_SOL_RESET:
978 case I915_PARAM_HAS_LLC:
979 value = HAS_LLC(dev);
982 DRM_DEBUG_DRIVER("Unknown parameter %d\n",
987 if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
988 DRM_ERROR("DRM_COPY_TO_USER failed\n");
995 static int i915_setparam(struct drm_device *dev, void *data,
996 struct drm_file *file_priv)
998 drm_i915_private_t *dev_priv = dev->dev_private;
999 drm_i915_setparam_t *param = data;
1002 DRM_ERROR("called with no initialization\n");
1006 switch (param->param) {
1007 case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
1009 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
1010 dev_priv->tex_lru_log_granularity = param->value;
1012 case I915_SETPARAM_ALLOW_BATCHBUFFER:
1013 dev_priv->allow_batchbuffer = param->value;
1015 case I915_SETPARAM_NUM_USED_FENCES:
1016 if (param->value > dev_priv->num_fence_regs ||
1019 /* Userspace can use first N regs */
1020 dev_priv->fence_reg_start = param->value;
1023 DRM_DEBUG("unknown parameter %d\n", param->param);
1030 static int i915_set_status_page(struct drm_device *dev, void *data,
1031 struct drm_file *file_priv)
1033 drm_i915_private_t *dev_priv = dev->dev_private;
1034 drm_i915_hws_addr_t *hws = data;
1035 struct intel_ring_buffer *ring = LP_RING(dev_priv);
1037 if (!I915_NEED_GFX_HWS(dev))
1041 DRM_ERROR("called with no initialization\n");
1045 DRM_DEBUG("set status page addr 0x%08x\n", (u32)hws->addr);
1046 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1047 DRM_ERROR("tried to set status page when mode setting active\n");
1051 ring->status_page.gfx_addr = dev_priv->status_gfx_addr =
1052 hws->addr & (0x1ffff<<12);
1054 dev_priv->hws_map.offset = dev->agp->base + hws->addr;
1055 dev_priv->hws_map.size = 4*1024;
1056 dev_priv->hws_map.type = 0;
1057 dev_priv->hws_map.flags = 0;
1058 dev_priv->hws_map.mtrr = 0;
1060 drm_core_ioremap_wc(&dev_priv->hws_map, dev);
1061 if (dev_priv->hws_map.virtual == NULL) {
1062 i915_dma_cleanup(dev);
1063 ring->status_page.gfx_addr = dev_priv->status_gfx_addr = 0;
1064 DRM_ERROR("can not ioremap virtual address for"
1065 " G33 hw status page\n");
1068 ring->status_page.page_addr = dev_priv->hw_status_page =
1069 dev_priv->hws_map.virtual;
1071 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
1072 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
1073 DRM_DEBUG("load hws HWS_PGA with gfx mem 0x%x\n",
1074 dev_priv->status_gfx_addr);
1075 DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page);
1080 intel_enable_ppgtt(struct drm_device *dev)
1082 if (i915_enable_ppgtt >= 0)
1083 return i915_enable_ppgtt;
1085 /* Disable ppgtt on SNB if VT-d is on. */
1086 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_enabled)
1093 i915_load_gem_init(struct drm_device *dev)
1095 struct drm_i915_private *dev_priv = dev->dev_private;
1096 unsigned long prealloc_size, gtt_size, mappable_size;
1099 prealloc_size = dev_priv->mm.gtt->stolen_size;
1100 gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
1101 mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
1103 /* Basic memrange allocator for stolen space */
1104 drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
1107 if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
1108 /* PPGTT pdes are stolen from global gtt ptes, so shrink the
1109 * aperture accordingly when using aliasing ppgtt. */
1110 gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
1111 /* For paranoia keep the guard page in between. */
1112 gtt_size -= PAGE_SIZE;
1114 i915_gem_do_init(dev, 0, mappable_size, gtt_size);
1116 ret = i915_gem_init_aliasing_ppgtt(dev);
1122 /* Let GEM Manage all of the aperture.
1124 * However, leave one page at the end still bound to the scratch
1125 * page. There are a number of places where the hardware
1126 * apparently prefetches past the end of the object, and we've
1127 * seen multiple hangs with the GPU head pointer stuck in a
1128 * batchbuffer bound at the last page of the aperture. One page
1129 * should be enough to keep any prefetching inside of the
1132 i915_gem_do_init(dev, 0, mappable_size, gtt_size - PAGE_SIZE);
1135 ret = i915_gem_init_hw(dev);
1138 i915_gem_cleanup_aliasing_ppgtt(dev);
1143 /* Try to set up FBC with a reasonable compressed buffer size */
1144 if (I915_HAS_FBC(dev) && i915_powersave) {
1147 /* Leave 1M for line length buffer & misc. */
1149 /* Try to get a 32M buffer... */
1150 if (prealloc_size > (36*1024*1024))
1151 cfb_size = 32*1024*1024;
1152 else /* fall back to 7/8 of the stolen space */
1153 cfb_size = prealloc_size * 7 / 8;
1154 i915_setup_compression(dev, cfb_size);
1158 /* Allow hardware batchbuffers unless told otherwise. */
1159 dev_priv->allow_batchbuffer = 1;
1164 i915_load_modeset_init(struct drm_device *dev)
1166 struct drm_i915_private *dev_priv = dev->dev_private;
1169 ret = intel_parse_bios(dev);
1171 DRM_INFO("failed to find VBIOS tables\n");
1174 intel_register_dsm_handler();
1177 /* IIR "flip pending" bit means done if this bit is set */
1178 if (IS_GEN3(dev) && (I915_READ(ECOSKPD) & ECO_FLIP_DONE))
1179 dev_priv->flip_pending_is_done = true;
1181 intel_modeset_init(dev);
1183 ret = i915_load_gem_init(dev);
1187 intel_modeset_gem_init(dev);
1189 ret = drm_irq_install(dev);
1193 dev->vblank_disable_allowed = 1;
1195 ret = intel_fbdev_init(dev);
1199 drm_kms_helper_poll_init(dev);
1201 /* We're off and running w/KMS */
1202 dev_priv->mm.suspended = 0;
1208 i915_gem_cleanup_ringbuffer(dev);
1210 i915_gem_cleanup_aliasing_ppgtt(dev);
1215 i915_get_bridge_dev(struct drm_device *dev)
1217 struct drm_i915_private *dev_priv;
1219 dev_priv = dev->dev_private;
1221 dev_priv->bridge_dev = intel_gtt_get_bridge_device();
1222 if (dev_priv->bridge_dev == NULL) {
1223 DRM_ERROR("bridge device not found\n");
1229 #define MCHBAR_I915 0x44
1230 #define MCHBAR_I965 0x48
1231 #define MCHBAR_SIZE (4*4096)
1233 #define DEVEN_REG 0x54
1234 #define DEVEN_MCHBAR_EN (1 << 28)
1236 /* Allocate space for the MCH regs if needed, return nonzero on error */
1238 intel_alloc_mchbar_resource(struct drm_device *dev)
1240 drm_i915_private_t *dev_priv;
1243 u32 temp_lo, temp_hi;
1244 u64 mchbar_addr, temp;
1246 dev_priv = dev->dev_private;
1247 reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
1249 if (INTEL_INFO(dev)->gen >= 4)
1250 temp_hi = pci_read_config(dev_priv->bridge_dev, reg + 4, 4);
1253 temp_lo = pci_read_config(dev_priv->bridge_dev, reg, 4);
1254 mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
1256 /* If ACPI doesn't have it, assume we need to allocate it ourselves */
1257 #ifdef XXX_CONFIG_PNP
1259 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
1263 /* Get some space for it */
1264 vga = device_get_parent(dev->dev);
1265 dev_priv->mch_res_rid = 0x100;
1266 dev_priv->mch_res = BUS_ALLOC_RESOURCE(device_get_parent(vga),
1267 dev->dev, SYS_RES_MEMORY, &dev_priv->mch_res_rid, 0, ~0UL,
1268 MCHBAR_SIZE, RF_ACTIVE | RF_SHAREABLE, -1);
1269 if (dev_priv->mch_res == NULL) {
1270 DRM_ERROR("failed mchbar resource alloc\n");
1274 if (INTEL_INFO(dev)->gen >= 4) {
1275 temp = rman_get_start(dev_priv->mch_res);
1277 pci_write_config(dev_priv->bridge_dev, reg + 4, temp, 4);
1279 pci_write_config(dev_priv->bridge_dev, reg,
1280 rman_get_start(dev_priv->mch_res) & UINT32_MAX, 4);
1285 intel_setup_mchbar(struct drm_device *dev)
1287 drm_i915_private_t *dev_priv;
1292 dev_priv = dev->dev_private;
1293 mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
1295 dev_priv->mchbar_need_disable = false;
1297 if (IS_I915G(dev) || IS_I915GM(dev)) {
1298 temp = pci_read_config(dev_priv->bridge_dev, DEVEN_REG, 4);
1299 enabled = (temp & DEVEN_MCHBAR_EN) != 0;
1301 temp = pci_read_config(dev_priv->bridge_dev, mchbar_reg, 4);
1305 /* If it's already enabled, don't have to do anything */
1307 DRM_DEBUG("mchbar already enabled\n");
1311 if (intel_alloc_mchbar_resource(dev))
1314 dev_priv->mchbar_need_disable = true;
1316 /* Space is allocated or reserved, so enable it. */
1317 if (IS_I915G(dev) || IS_I915GM(dev)) {
1318 pci_write_config(dev_priv->bridge_dev, DEVEN_REG,
1319 temp | DEVEN_MCHBAR_EN, 4);
1321 temp = pci_read_config(dev_priv->bridge_dev, mchbar_reg, 4);
1322 pci_write_config(dev_priv->bridge_dev, mchbar_reg, temp | 1, 4);
1327 intel_teardown_mchbar(struct drm_device *dev)
1329 drm_i915_private_t *dev_priv;
1334 dev_priv = dev->dev_private;
1335 mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
1337 if (dev_priv->mchbar_need_disable) {
1338 if (IS_I915G(dev) || IS_I915GM(dev)) {
1339 temp = pci_read_config(dev_priv->bridge_dev,
1341 temp &= ~DEVEN_MCHBAR_EN;
1342 pci_write_config(dev_priv->bridge_dev, DEVEN_REG,
1345 temp = pci_read_config(dev_priv->bridge_dev,
1348 pci_write_config(dev_priv->bridge_dev, mchbar_reg,
1353 if (dev_priv->mch_res != NULL) {
1354 vga = device_get_parent(dev->dev);
1355 BUS_DEACTIVATE_RESOURCE(device_get_parent(vga), dev->dev,
1356 SYS_RES_MEMORY, dev_priv->mch_res_rid, dev_priv->mch_res);
1357 BUS_RELEASE_RESOURCE(device_get_parent(vga), dev->dev,
1358 SYS_RES_MEMORY, dev_priv->mch_res_rid, dev_priv->mch_res);
1359 dev_priv->mch_res = NULL;
1364 * i915_driver_load - setup chip and create an initial config
1366 * @flags: startup flags
1368 * The driver load routine has to do several things:
1369 * - drive output discovery via intel_modeset_init()
1370 * - initialize the memory manager
1371 * - allocate initial config memory
1372 * - setup the DRM framebuffer with the allocated memory
1374 int i915_driver_load(struct drm_device *dev, unsigned long flags)
1376 struct drm_i915_private *dev_priv = dev->dev_private;
1377 unsigned long base, size;
1382 /* i915 has 4 more counters */
1384 dev->types[6] = _DRM_STAT_IRQ;
1385 dev->types[7] = _DRM_STAT_PRIMARY;
1386 dev->types[8] = _DRM_STAT_SECONDARY;
1387 dev->types[9] = _DRM_STAT_DMA;
1389 dev_priv = kmalloc(sizeof(drm_i915_private_t), DRM_MEM_DRIVER,
1391 if (dev_priv == NULL)
1394 dev->dev_private = (void *)dev_priv;
1395 dev_priv->dev = dev;
1396 dev_priv->info = i915_get_device_id(dev->pci_device);
1398 if (i915_get_bridge_dev(dev)) {
1399 drm_free(dev_priv, DRM_MEM_DRIVER);
1402 dev_priv->mm.gtt = intel_gtt_get();
1404 /* Add register map (needed for suspend/resume) */
1405 mmio_bar = IS_GEN2(dev) ? 1 : 0;
1406 base = drm_get_resource_start(dev, mmio_bar);
1407 size = drm_get_resource_len(dev, mmio_bar);
1409 ret = drm_addmap(dev, base, size, _DRM_REGISTERS,
1410 _DRM_KERNEL | _DRM_DRIVER, &dev_priv->mmio_map);
1412 /* The i915 workqueue is primarily used for batched retirement of
1413 * requests (and thus managing bo) once the task has been completed
1414 * by the GPU. i915_gem_retire_requests() is called directly when we
1415 * need high-priority retirement, such as waiting for an explicit
1418 * It is also used for periodic low-priority events, such as
1419 * idle-timers and recording error state.
1421 * All tasks on the workqueue are expected to acquire the dev mutex
1422 * so there is no point in running more than one instance of the
1423 * workqueue at any time. Use an ordered one.
1425 dev_priv->wq = alloc_ordered_workqueue("i915", 0);
1426 if (dev_priv->wq == NULL) {
1427 DRM_ERROR("Failed to create our workqueue.\n");
1432 /* This must be called before any calls to HAS_PCH_* */
1433 intel_detect_pch(dev);
1435 intel_irq_init(dev);
1438 /* Try to make sure MCHBAR is enabled before poking at it */
1439 intel_setup_mchbar(dev);
1440 intel_setup_gmbus(dev);
1441 intel_opregion_setup(dev);
1443 intel_setup_bios(dev);
1447 /* On the 945G/GM, the chipset reports the MSI capability on the
1448 * integrated graphics even though the support isn't actually there
1449 * according to the published specs. It doesn't appear to function
1450 * correctly in testing on 945G.
1451 * This may be a side effect of MSI having been made available for PEG
1452 * and the registers being closely associated.
1454 * According to chipset errata, on the 965GM, MSI interrupts may
1455 * be lost or delayed, but we use them anyways to avoid
1456 * stuck interrupts on some machines.
1459 lockinit(&dev_priv->irq_lock, "userirq", 0, LK_CANRECURSE);
1460 lockinit(&dev_priv->error_lock, "915err", 0, LK_CANRECURSE);
1461 spin_init(&dev_priv->rps.lock);
1463 lockinit(&dev_priv->rps.hw_lock, "i915 rps.hw_lock", 0, LK_CANRECURSE);
1466 if (!I915_NEED_GFX_HWS(dev)) {
1467 ret = i915_init_phys_hws(dev);
1469 drm_rmmap(dev, dev_priv->mmio_map);
1470 drm_free(dev_priv, DRM_MEM_DRIVER);
1475 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
1476 dev_priv->num_pipe = 3;
1477 else if (IS_MOBILE(dev) || !IS_GEN2(dev))
1478 dev_priv->num_pipe = 2;
1480 dev_priv->num_pipe = 1;
1482 ret = drm_vblank_init(dev, dev_priv->num_pipe);
1484 goto out_gem_unload;
1486 /* Start out suspended */
1487 dev_priv->mm.suspended = 1;
1489 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1490 ret = i915_load_modeset_init(dev);
1492 DRM_ERROR("failed to init modeset\n");
1493 goto out_gem_unload;
1497 /* Must be done after probing outputs */
1498 intel_opregion_init(dev);
1500 setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
1501 (unsigned long) dev);
1504 lockmgr(&mchdev_lock, LK_EXCLUSIVE);
1505 i915_mch_dev = dev_priv;
1506 dev_priv->mchdev_lock = &mchdev_lock;
1507 lockmgr(&mchdev_lock, LK_RELEASE);
1514 (void) i915_driver_unload_int(dev, true);
1521 i915_driver_unload_int(struct drm_device *dev, bool locked)
1523 struct drm_i915_private *dev_priv = dev->dev_private;
1528 ret = i915_gpu_idle(dev);
1530 DRM_ERROR("failed to idle hardware: %d\n", ret);
1536 intel_teardown_mchbar(dev);
1540 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1541 intel_fbdev_fini(dev);
1542 intel_modeset_cleanup(dev);
1545 /* Free error state after interrupts are fully disabled. */
1546 del_timer_sync(&dev_priv->hangcheck_timer);
1548 i915_destroy_error_state(dev);
1550 intel_opregion_fini(dev);
1555 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1558 i915_gem_free_all_phys_object(dev);
1559 i915_gem_cleanup_ringbuffer(dev);
1562 i915_gem_cleanup_aliasing_ppgtt(dev);
1566 if (I915_HAS_FBC(dev) && i915_powersave)
1567 i915_cleanup_compression(dev);
1569 drm_mm_takedown(&dev_priv->mm.stolen);
1571 intel_cleanup_overlay(dev);
1573 if (!I915_NEED_GFX_HWS(dev))
1577 i915_gem_unload(dev);
1579 lockuninit(&dev_priv->irq_lock);
1581 if (dev_priv->wq != NULL)
1582 destroy_workqueue(dev_priv->wq);
1584 bus_generic_detach(dev->dev);
1585 drm_rmmap(dev, dev_priv->mmio_map);
1586 intel_teardown_gmbus(dev);
1588 lockuninit(&dev_priv->error_lock);
1589 drm_free(dev->dev_private, DRM_MEM_DRIVER);
1595 i915_driver_unload(struct drm_device *dev)
1598 return (i915_driver_unload_int(dev, true));
1602 i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
1604 struct drm_i915_file_private *i915_file_priv;
1606 i915_file_priv = kmalloc(sizeof(*i915_file_priv), DRM_MEM_FILES,
1609 spin_init(&i915_file_priv->mm.lock);
1610 INIT_LIST_HEAD(&i915_file_priv->mm.request_list);
1611 file_priv->driver_priv = i915_file_priv;
1617 i915_driver_lastclose(struct drm_device * dev)
1619 drm_i915_private_t *dev_priv = dev->dev_private;
1621 if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) {
1625 drm_fb_helper_restore();
1626 vga_switcheroo_process_delayed_switch();
1630 i915_gem_lastclose(dev);
1631 i915_dma_cleanup(dev);
1634 void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
1637 i915_gem_release(dev, file_priv);
1640 void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
1642 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
1644 spin_uninit(&i915_file_priv->mm.lock);
1645 drm_free(i915_file_priv, DRM_MEM_FILES);
1648 struct drm_ioctl_desc i915_ioctls[] = {
1649 DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1650 DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
1651 DRM_IOCTL_DEF(DRM_I915_FLIP, i915_flip_bufs, DRM_AUTH),
1652 DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
1653 DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
1654 DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
1655 DRM_IOCTL_DEF(DRM_I915_GETPARAM, i915_getparam, DRM_AUTH),
1656 DRM_IOCTL_DEF(DRM_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1657 DRM_IOCTL_DEF(DRM_I915_ALLOC, drm_noop, DRM_AUTH),
1658 DRM_IOCTL_DEF(DRM_I915_FREE, drm_noop, DRM_AUTH),
1659 DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1660 DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
1661 DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
1662 DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
1663 DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH ),
1664 DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
1665 DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1666 DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1667 DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH | DRM_UNLOCKED),
1668 DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH | DRM_UNLOCKED),
1669 DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1670 DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1671 DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
1672 DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH),
1673 DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1674 DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1675 DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, 0),
1676 DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED),
1677 DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED),
1678 DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0),
1679 DRM_IOCTL_DEF(DRM_I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED),
1680 DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED),
1681 DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED),
1682 DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0),
1683 DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0),
1684 DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED),
1685 DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
1686 DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
1687 DRM_IOCTL_DEF(DRM_I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1688 DRM_IOCTL_DEF(DRM_I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1689 DRM_IOCTL_DEF(DRM_I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1690 DRM_IOCTL_DEF(DRM_I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1693 struct drm_driver i915_driver_info = {
1694 .driver_features = DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
1695 DRIVER_USE_MTRR | DRIVER_HAVE_IRQ | DRIVER_LOCKLESS_IRQ |
1696 DRIVER_GEM /*| DRIVER_MODESET*/,
1698 .buf_priv_size = sizeof(drm_i915_private_t),
1699 .load = i915_driver_load,
1700 .open = i915_driver_open,
1701 .unload = i915_driver_unload,
1702 .preclose = i915_driver_preclose,
1703 .lastclose = i915_driver_lastclose,
1704 .postclose = i915_driver_postclose,
1705 .device_is_agp = i915_driver_device_is_agp,
1706 .gem_init_object = i915_gem_init_object,
1707 .gem_free_object = i915_gem_free_object,
1708 .gem_pager_ops = &i915_gem_pager_ops,
1709 .dumb_create = i915_gem_dumb_create,
1710 .dumb_map_offset = i915_gem_mmap_gtt,
1711 .dumb_destroy = i915_gem_dumb_destroy,
1712 .sysctl_init = i915_sysctl_init,
1713 .sysctl_cleanup = i915_sysctl_cleanup,
1715 .ioctls = i915_ioctls,
1716 .max_ioctl = DRM_ARRAY_SIZE(i915_ioctls),
1718 .name = DRIVER_NAME,
1719 .desc = DRIVER_DESC,
1720 .date = DRIVER_DATE,
1721 .major = DRIVER_MAJOR,
1722 .minor = DRIVER_MINOR,
1723 .patchlevel = DRIVER_PATCHLEVEL,
1727 * Determine if the device really is AGP or not.
1729 * All Intel graphics chipsets are treated as AGP, even if they are really
1732 * \param dev The device to be tested.
1735 * A value of 1 is always retured to indictate every i9x5 is AGP.
1737 int i915_driver_device_is_agp(struct drm_device * dev)