kernel/drm: Fix -Winit-self.
[dragonfly.git] / sys / dev / drm / i915 / i915_dma.c
1 /* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
2  */
3 /*-
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  * $FreeBSD: src/sys/dev/drm2/i915/i915_dma.c,v 1.1 2012/05/22 11:07:44 kib Exp $
28  */
29
30 #include <drm/drmP.h>
31 #include <drm/i915_drm.h>
32 #include "i915_drv.h"
33 #include "intel_drv.h"
34 #include "intel_ringbuffer.h"
35
36 extern struct drm_i915_private *i915_mch_dev;
37
38 extern void i915_pineview_get_mem_freq(struct drm_device *dev);
39 extern void i915_ironlake_get_mem_freq(struct drm_device *dev);
40 static int i915_driver_unload_int(struct drm_device *dev, bool locked);
41
42 static void i915_write_hws_pga(struct drm_device *dev)
43 {
44         drm_i915_private_t *dev_priv = dev->dev_private;
45         u32 addr;
46
47         addr = dev_priv->status_page_dmah->busaddr;
48         if (INTEL_INFO(dev)->gen >= 4)
49                 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
50         I915_WRITE(HWS_PGA, addr);
51 }
52
53 /**
54  * Sets up the hardware status page for devices that need a physical address
55  * in the register.
56  */
57 static int i915_init_phys_hws(struct drm_device *dev)
58 {
59         drm_i915_private_t *dev_priv = dev->dev_private;
60         struct intel_ring_buffer *ring = LP_RING(dev_priv);
61
62         /*
63          * Program Hardware Status Page
64          * XXXKIB Keep 4GB limit for allocation for now.  This method
65          * of allocation is used on <= 965 hardware, that has several
66          * erratas regarding the use of physical memory > 4 GB.
67          */
68         DRM_UNLOCK(dev);
69         dev_priv->status_page_dmah =
70                 drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
71         DRM_LOCK(dev);
72         if (!dev_priv->status_page_dmah) {
73                 DRM_ERROR("Can not allocate hardware status page\n");
74                 return -ENOMEM;
75         }
76         ring->status_page.page_addr = dev_priv->hw_status_page =
77             dev_priv->status_page_dmah->vaddr;
78         dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
79
80         memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
81
82         i915_write_hws_pga(dev);
83         DRM_DEBUG("Enabled hardware status page, phys %jx\n",
84             (uintmax_t)dev_priv->dma_status_page);
85         return 0;
86 }
87
88 /**
89  * Frees the hardware status page, whether it's a physical address or a virtual
90  * address set up by the X Server.
91  */
92 static void i915_free_hws(struct drm_device *dev)
93 {
94         drm_i915_private_t *dev_priv = dev->dev_private;
95         struct intel_ring_buffer *ring = LP_RING(dev_priv);
96
97         if (dev_priv->status_page_dmah) {
98                 drm_pci_free(dev, dev_priv->status_page_dmah);
99                 dev_priv->status_page_dmah = NULL;
100         }
101
102         if (dev_priv->status_gfx_addr) {
103                 dev_priv->status_gfx_addr = 0;
104                 ring->status_page.gfx_addr = 0;
105                 drm_core_ioremapfree(&dev_priv->hws_map, dev);
106         }
107
108         /* Need to rewrite hardware status page */
109         I915_WRITE(HWS_PGA, 0x1ffff000);
110 }
111
112 void i915_kernel_lost_context(struct drm_device * dev)
113 {
114         drm_i915_private_t *dev_priv = dev->dev_private;
115         struct intel_ring_buffer *ring = LP_RING(dev_priv);
116
117         /*
118          * We should never lose context on the ring with modesetting
119          * as we don't expose it to userspace
120          */
121         if (drm_core_check_feature(dev, DRIVER_MODESET))
122                 return;
123
124         ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
125         ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
126         ring->space = ring->head - (ring->tail + 8);
127         if (ring->space < 0)
128                 ring->space += ring->size;
129
130 #if 1
131         KIB_NOTYET();
132 #else
133         if (!dev->primary->master)
134                 return;
135 #endif
136
137         if (ring->head == ring->tail && dev_priv->sarea_priv)
138                 dev_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
139 }
140
141 static int i915_dma_cleanup(struct drm_device * dev)
142 {
143         drm_i915_private_t *dev_priv = dev->dev_private;
144         int i;
145
146
147         /* Make sure interrupts are disabled here because the uninstall ioctl
148          * may not have been called from userspace and after dev_private
149          * is freed, it's too late.
150          */
151         if (dev->irq_enabled)
152                 drm_irq_uninstall(dev);
153
154         for (i = 0; i < I915_NUM_RINGS; i++)
155                 intel_cleanup_ring_buffer(&dev_priv->ring[i]);
156
157         /* Clear the HWS virtual address at teardown */
158         if (I915_NEED_GFX_HWS(dev))
159                 i915_free_hws(dev);
160
161         return 0;
162 }
163
164 static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
165 {
166         drm_i915_private_t *dev_priv = dev->dev_private;
167         int ret;
168
169         dev_priv->sarea = drm_getsarea(dev);
170         if (!dev_priv->sarea) {
171                 DRM_ERROR("can not find sarea!\n");
172                 i915_dma_cleanup(dev);
173                 return -EINVAL;
174         }
175
176         dev_priv->sarea_priv = (drm_i915_sarea_t *)
177             ((u8 *) dev_priv->sarea->virtual + init->sarea_priv_offset);
178
179         if (init->ring_size != 0) {
180                 if (LP_RING(dev_priv)->obj != NULL) {
181                         i915_dma_cleanup(dev);
182                         DRM_ERROR("Client tried to initialize ringbuffer in "
183                                   "GEM mode\n");
184                         return -EINVAL;
185                 }
186
187                 ret = intel_render_ring_init_dri(dev,
188                                                  init->ring_start,
189                                                  init->ring_size);
190                 if (ret) {
191                         i915_dma_cleanup(dev);
192                         return ret;
193                 }
194         }
195
196         dev_priv->cpp = init->cpp;
197         dev_priv->back_offset = init->back_offset;
198         dev_priv->front_offset = init->front_offset;
199         dev_priv->current_page = 0;
200         dev_priv->sarea_priv->pf_current_page = 0;
201
202         /* Allow hardware batchbuffers unless told otherwise.
203          */
204         dev_priv->allow_batchbuffer = 1;
205
206         return 0;
207 }
208
209 static int i915_dma_resume(struct drm_device * dev)
210 {
211         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
212         struct intel_ring_buffer *ring = LP_RING(dev_priv);
213
214         DRM_DEBUG("\n");
215
216         if (ring->map.handle == NULL) {
217                 DRM_ERROR("can not ioremap virtual address for"
218                           " ring buffer\n");
219                 return -ENOMEM;
220         }
221
222         /* Program Hardware Status Page */
223         if (!ring->status_page.page_addr) {
224                 DRM_ERROR("Can not find hardware status page\n");
225                 return -EINVAL;
226         }
227         DRM_DEBUG("hw status page @ %p\n", ring->status_page.page_addr);
228         if (ring->status_page.gfx_addr != 0)
229                 intel_ring_setup_status_page(ring);
230         else
231                 i915_write_hws_pga(dev);
232
233         DRM_DEBUG("Enabled hardware status page\n");
234
235         return 0;
236 }
237
238 static int i915_dma_init(struct drm_device *dev, void *data,
239                          struct drm_file *file_priv)
240 {
241         drm_i915_init_t *init = data;
242         int retcode = 0;
243
244         switch (init->func) {
245         case I915_INIT_DMA:
246                 retcode = i915_initialize(dev, init);
247                 break;
248         case I915_CLEANUP_DMA:
249                 retcode = i915_dma_cleanup(dev);
250                 break;
251         case I915_RESUME_DMA:
252                 retcode = i915_dma_resume(dev);
253                 break;
254         default:
255                 retcode = -EINVAL;
256                 break;
257         }
258
259         return retcode;
260 }
261
262 /* Implement basically the same security restrictions as hardware does
263  * for MI_BATCH_NON_SECURE.  These can be made stricter at any time.
264  *
265  * Most of the calculations below involve calculating the size of a
266  * particular instruction.  It's important to get the size right as
267  * that tells us where the next instruction to check is.  Any illegal
268  * instruction detected will be given a size of zero, which is a
269  * signal to abort the rest of the buffer.
270  */
271 static int do_validate_cmd(int cmd)
272 {
273         switch (((cmd >> 29) & 0x7)) {
274         case 0x0:
275                 switch ((cmd >> 23) & 0x3f) {
276                 case 0x0:
277                         return 1;       /* MI_NOOP */
278                 case 0x4:
279                         return 1;       /* MI_FLUSH */
280                 default:
281                         return 0;       /* disallow everything else */
282                 }
283                 break;
284         case 0x1:
285                 return 0;       /* reserved */
286         case 0x2:
287                 return (cmd & 0xff) + 2;        /* 2d commands */
288         case 0x3:
289                 if (((cmd >> 24) & 0x1f) <= 0x18)
290                         return 1;
291
292                 switch ((cmd >> 24) & 0x1f) {
293                 case 0x1c:
294                         return 1;
295                 case 0x1d:
296                         switch ((cmd >> 16) & 0xff) {
297                         case 0x3:
298                                 return (cmd & 0x1f) + 2;
299                         case 0x4:
300                                 return (cmd & 0xf) + 2;
301                         default:
302                                 return (cmd & 0xffff) + 2;
303                         }
304                 case 0x1e:
305                         if (cmd & (1 << 23))
306                                 return (cmd & 0xffff) + 1;
307                         else
308                                 return 1;
309                 case 0x1f:
310                         if ((cmd & (1 << 23)) == 0)     /* inline vertices */
311                                 return (cmd & 0x1ffff) + 2;
312                         else if (cmd & (1 << 17))       /* indirect random */
313                                 if ((cmd & 0xffff) == 0)
314                                         return 0;       /* unknown length, too hard */
315                                 else
316                                         return (((cmd & 0xffff) + 1) / 2) + 1;
317                         else
318                                 return 2;       /* indirect sequential */
319                 default:
320                         return 0;
321                 }
322         default:
323                 return 0;
324         }
325
326         return 0;
327 }
328
329 static int validate_cmd(int cmd)
330 {
331         int ret = do_validate_cmd(cmd);
332
333 /*      printk("validate_cmd( %x ): %d\n", cmd, ret); */
334
335         return ret;
336 }
337
338 static int i915_emit_cmds(struct drm_device *dev, int __user *buffer,
339                           int dwords)
340 {
341         drm_i915_private_t *dev_priv = dev->dev_private;
342         int i, ret;
343
344         if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8)
345                 return -EINVAL;
346
347         ret = BEGIN_LP_RING((dwords+1)&~1);
348         if (ret)
349                 return ret;
350
351         for (i = 0; i < dwords;) {
352                 int cmd, sz;
353
354                 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd)))
355                         return -EINVAL;
356
357                 if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
358                         return -EINVAL;
359
360                 OUT_RING(cmd);
361
362                 while (++i, --sz) {
363                         if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i],
364                                                          sizeof(cmd))) {
365                                 return -EINVAL;
366                         }
367                         OUT_RING(cmd);
368                 }
369         }
370
371         if (dwords & 1)
372                 OUT_RING(0);
373
374         ADVANCE_LP_RING();
375
376         return 0;
377 }
378
379 int i915_emit_box(struct drm_device * dev,
380                   struct drm_clip_rect *boxes,
381                   int i, int DR1, int DR4)
382 {
383         struct drm_clip_rect box;
384
385         if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) {
386                 return -EFAULT;
387         }
388
389         return (i915_emit_box_p(dev, &box, DR1, DR4));
390 }
391
392 int
393 i915_emit_box_p(struct drm_device *dev, struct drm_clip_rect *box,
394     int DR1, int DR4)
395 {
396         drm_i915_private_t *dev_priv = dev->dev_private;
397         int ret;
398
399         if (box->y2 <= box->y1 || box->x2 <= box->x1 || box->y2 <= 0 ||
400             box->x2 <= 0) {
401                 DRM_ERROR("Bad box %d,%d..%d,%d\n",
402                           box->x1, box->y1, box->x2, box->y2);
403                 return -EINVAL;
404         }
405
406         if (INTEL_INFO(dev)->gen >= 4) {
407                 ret = BEGIN_LP_RING(4);
408                 if (ret != 0)
409                         return (ret);
410
411                 OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
412                 OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
413                 OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
414                 OUT_RING(DR4);
415         } else {
416                 ret = BEGIN_LP_RING(6);
417                 if (ret != 0)
418                         return (ret);
419
420                 OUT_RING(GFX_OP_DRAWRECT_INFO);
421                 OUT_RING(DR1);
422                 OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
423                 OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
424                 OUT_RING(DR4);
425                 OUT_RING(0);
426         }
427         ADVANCE_LP_RING();
428
429         return 0;
430 }
431
432 /* XXX: Emitting the counter should really be moved to part of the IRQ
433  * emit. For now, do it in both places:
434  */
435
436 static void i915_emit_breadcrumb(struct drm_device *dev)
437 {
438         drm_i915_private_t *dev_priv = dev->dev_private;
439
440         if (++dev_priv->counter > 0x7FFFFFFFUL)
441                 dev_priv->counter = 0;
442         if (dev_priv->sarea_priv)
443                 dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
444
445         if (BEGIN_LP_RING(4) == 0) {
446                 OUT_RING(MI_STORE_DWORD_INDEX);
447                 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
448                 OUT_RING(dev_priv->counter);
449                 OUT_RING(0);
450                 ADVANCE_LP_RING();
451         }
452 }
453
454 static int i915_dispatch_cmdbuffer(struct drm_device * dev,
455     drm_i915_cmdbuffer_t * cmd, struct drm_clip_rect *cliprects, void *cmdbuf)
456 {
457         int nbox = cmd->num_cliprects;
458         int i = 0, count, ret;
459
460         if (cmd->sz & 0x3) {
461                 DRM_ERROR("alignment\n");
462                 return -EINVAL;
463         }
464
465         i915_kernel_lost_context(dev);
466
467         count = nbox ? nbox : 1;
468
469         for (i = 0; i < count; i++) {
470                 if (i < nbox) {
471                         ret = i915_emit_box_p(dev, &cmd->cliprects[i],
472                             cmd->DR1, cmd->DR4);
473                         if (ret)
474                                 return ret;
475                 }
476
477                 ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4);
478                 if (ret)
479                         return ret;
480         }
481
482         i915_emit_breadcrumb(dev);
483         return 0;
484 }
485
486 static int
487 i915_dispatch_batchbuffer(struct drm_device * dev,
488     drm_i915_batchbuffer_t * batch, struct drm_clip_rect *cliprects)
489 {
490         drm_i915_private_t *dev_priv = dev->dev_private;
491         int nbox = batch->num_cliprects;
492         int i, count, ret;
493
494         if ((batch->start | batch->used) & 0x7) {
495                 DRM_ERROR("alignment\n");
496                 return -EINVAL;
497         }
498
499         i915_kernel_lost_context(dev);
500
501         count = nbox ? nbox : 1;
502
503         for (i = 0; i < count; i++) {
504                 if (i < nbox) {
505                         int ret = i915_emit_box_p(dev, &cliprects[i],
506                             batch->DR1, batch->DR4);
507                         if (ret)
508                                 return ret;
509                 }
510
511                 if (!IS_I830(dev) && !IS_845G(dev)) {
512                         ret = BEGIN_LP_RING(2);
513                         if (ret != 0)
514                                 return (ret);
515
516                         if (INTEL_INFO(dev)->gen >= 4) {
517                                 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) |
518                                     MI_BATCH_NON_SECURE_I965);
519                                 OUT_RING(batch->start);
520                         } else {
521                                 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
522                                 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
523                         }
524                 } else {
525                         ret = BEGIN_LP_RING(4);
526                         if (ret != 0)
527                                 return (ret);
528
529                         OUT_RING(MI_BATCH_BUFFER);
530                         OUT_RING(batch->start | MI_BATCH_NON_SECURE);
531                         OUT_RING(batch->start + batch->used - 4);
532                         OUT_RING(0);
533                 }
534                 ADVANCE_LP_RING();
535         }
536
537         i915_emit_breadcrumb(dev);
538
539         return 0;
540 }
541
542 static int i915_dispatch_flip(struct drm_device * dev)
543 {
544         drm_i915_private_t *dev_priv = dev->dev_private;
545         int ret;
546
547         if (!dev_priv->sarea_priv)
548                 return -EINVAL;
549
550         DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
551                   __func__,
552                   dev_priv->current_page,
553                   dev_priv->sarea_priv->pf_current_page);
554
555         i915_kernel_lost_context(dev);
556
557         ret = BEGIN_LP_RING(10);
558         if (ret)
559                 return ret;
560         OUT_RING(MI_FLUSH | MI_READ_FLUSH);
561         OUT_RING(0);
562
563         OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
564         OUT_RING(0);
565         if (dev_priv->current_page == 0) {
566                 OUT_RING(dev_priv->back_offset);
567                 dev_priv->current_page = 1;
568         } else {
569                 OUT_RING(dev_priv->front_offset);
570                 dev_priv->current_page = 0;
571         }
572         OUT_RING(0);
573
574         OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
575         OUT_RING(0);
576
577         ADVANCE_LP_RING();
578
579         if (++dev_priv->counter > 0x7FFFFFFFUL)
580                 dev_priv->counter = 0;
581         if (dev_priv->sarea_priv)
582                 dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
583
584         if (BEGIN_LP_RING(4) == 0) {
585                 OUT_RING(MI_STORE_DWORD_INDEX);
586                 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
587                 OUT_RING(dev_priv->counter);
588                 OUT_RING(0);
589                 ADVANCE_LP_RING();
590         }
591
592         dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
593         return 0;
594 }
595
596 static int
597 i915_quiescent(struct drm_device *dev)
598 {
599         struct intel_ring_buffer *ring = LP_RING(dev->dev_private);
600
601         i915_kernel_lost_context(dev);
602         return (intel_wait_ring_idle(ring));
603 }
604
605 static int
606 i915_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
607 {
608         int ret;
609
610         RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
611
612         DRM_LOCK(dev);
613         ret = i915_quiescent(dev);
614         DRM_UNLOCK(dev);
615
616         return (ret);
617 }
618
619 static int i915_batchbuffer(struct drm_device *dev, void *data,
620                             struct drm_file *file_priv)
621 {
622         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
623         drm_i915_sarea_t *sarea_priv;
624         drm_i915_batchbuffer_t *batch = data;
625         struct drm_clip_rect *cliprects;
626         size_t cliplen;
627         int ret;
628
629         if (!dev_priv->allow_batchbuffer) {
630                 DRM_ERROR("Batchbuffer ioctl disabled\n");
631                 return -EINVAL;
632         }
633         DRM_UNLOCK(dev);
634
635         DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n",
636                   batch->start, batch->used, batch->num_cliprects);
637
638         cliplen = batch->num_cliprects * sizeof(struct drm_clip_rect);
639         if (batch->num_cliprects < 0)
640                 return -EFAULT;
641         if (batch->num_cliprects != 0) {
642                 cliprects = kmalloc(batch->num_cliprects *
643                     sizeof(struct drm_clip_rect), DRM_MEM_DMA,
644                     M_WAITOK | M_ZERO);
645
646                 ret = -copyin(batch->cliprects, cliprects,
647                     batch->num_cliprects * sizeof(struct drm_clip_rect));
648                 if (ret != 0) {
649                         DRM_LOCK(dev);
650                         goto fail_free;
651                 }
652         } else
653                 cliprects = NULL;
654
655         DRM_LOCK(dev);
656         RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
657         ret = i915_dispatch_batchbuffer(dev, batch, cliprects);
658
659         sarea_priv = (drm_i915_sarea_t *)dev_priv->sarea_priv;
660         if (sarea_priv)
661                 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
662
663 fail_free:
664         drm_free(cliprects, DRM_MEM_DMA);
665         return ret;
666 }
667
668 static int i915_cmdbuffer(struct drm_device *dev, void *data,
669                           struct drm_file *file_priv)
670 {
671         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
672         drm_i915_sarea_t *sarea_priv;
673         drm_i915_cmdbuffer_t *cmdbuf = data;
674         struct drm_clip_rect *cliprects = NULL;
675         void *batch_data;
676         int ret;
677
678         DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
679                   cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
680
681         if (cmdbuf->num_cliprects < 0)
682                 return -EINVAL;
683
684         DRM_UNLOCK(dev);
685
686         batch_data = kmalloc(cmdbuf->sz, DRM_MEM_DMA, M_WAITOK);
687
688         ret = -copyin(cmdbuf->buf, batch_data, cmdbuf->sz);
689         if (ret != 0) {
690                 DRM_LOCK(dev);
691                 goto fail_batch_free;
692         }
693
694         if (cmdbuf->num_cliprects) {
695                 cliprects = kmalloc(cmdbuf->num_cliprects *
696                     sizeof(struct drm_clip_rect), DRM_MEM_DMA,
697                     M_WAITOK | M_ZERO);
698                 ret = -copyin(cmdbuf->cliprects, cliprects,
699                     cmdbuf->num_cliprects * sizeof(struct drm_clip_rect));
700                 if (ret != 0) {
701                         DRM_LOCK(dev);
702                         goto fail_clip_free;
703                 }
704         }
705
706         DRM_LOCK(dev);
707         RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
708         ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data);
709         if (ret) {
710                 DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
711                 goto fail_clip_free;
712         }
713
714         sarea_priv = (drm_i915_sarea_t *)dev_priv->sarea_priv;
715         if (sarea_priv)
716                 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
717
718 fail_clip_free:
719         drm_free(cliprects, DRM_MEM_DMA);
720 fail_batch_free:
721         drm_free(batch_data, DRM_MEM_DMA);
722         return ret;
723 }
724
725 static int i915_flip_bufs(struct drm_device *dev, void *data,
726                           struct drm_file *file_priv)
727 {
728         int ret;
729
730         DRM_DEBUG("%s\n", __func__);
731
732         RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
733
734         ret = i915_dispatch_flip(dev);
735
736         return ret;
737 }
738
739 static int i915_getparam(struct drm_device *dev, void *data,
740                          struct drm_file *file_priv)
741 {
742         drm_i915_private_t *dev_priv = dev->dev_private;
743         drm_i915_getparam_t *param = data;
744         int value;
745
746         if (!dev_priv) {
747                 DRM_ERROR("called with no initialization\n");
748                 return -EINVAL;
749         }
750
751         switch (param->param) {
752         case I915_PARAM_IRQ_ACTIVE:
753                 value = dev->irq_enabled ? 1 : 0;
754                 break;
755         case I915_PARAM_ALLOW_BATCHBUFFER:
756                 value = dev_priv->allow_batchbuffer ? 1 : 0;
757                 break;
758         case I915_PARAM_LAST_DISPATCH:
759                 value = READ_BREADCRUMB(dev_priv);
760                 break;
761         case I915_PARAM_CHIPSET_ID:
762                 value = dev->pci_device;
763                 break;
764         case I915_PARAM_HAS_GEM:
765                 value = 1;
766                 break;
767         case I915_PARAM_NUM_FENCES_AVAIL:
768                 value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
769                 break;
770         case I915_PARAM_HAS_OVERLAY:
771                 value = dev_priv->overlay ? 1 : 0;
772                 break;
773         case I915_PARAM_HAS_PAGEFLIPPING:
774                 value = 1;
775                 break;
776         case I915_PARAM_HAS_EXECBUF2:
777                 value = 1;
778                 break;
779         case I915_PARAM_HAS_BSD:
780                 value = HAS_BSD(dev);
781                 break;
782         case I915_PARAM_HAS_BLT:
783                 value = HAS_BLT(dev);
784                 break;
785         case I915_PARAM_HAS_RELAXED_FENCING:
786                 value = 1;
787                 break;
788         case I915_PARAM_HAS_COHERENT_RINGS:
789                 value = 1;
790                 break;
791         case I915_PARAM_HAS_EXEC_CONSTANTS:
792                 value = INTEL_INFO(dev)->gen >= 4;
793                 break;
794         case I915_PARAM_HAS_RELAXED_DELTA:
795                 value = 1;
796                 break;
797         case I915_PARAM_HAS_GEN7_SOL_RESET:
798                 value = 1;
799                 break;
800         case I915_PARAM_HAS_LLC:
801                 value = HAS_LLC(dev);
802                 break;
803         default:
804                 DRM_DEBUG_DRIVER("Unknown parameter %d\n",
805                                  param->param);
806                 return -EINVAL;
807         }
808
809         if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
810                 DRM_ERROR("DRM_COPY_TO_USER failed\n");
811                 return -EFAULT;
812         }
813
814         return 0;
815 }
816
817 static int i915_setparam(struct drm_device *dev, void *data,
818                          struct drm_file *file_priv)
819 {
820         drm_i915_private_t *dev_priv = dev->dev_private;
821         drm_i915_setparam_t *param = data;
822
823         if (!dev_priv) {
824                 DRM_ERROR("called with no initialization\n");
825                 return -EINVAL;
826         }
827
828         switch (param->param) {
829         case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
830                 break;
831         case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
832                 dev_priv->tex_lru_log_granularity = param->value;
833                 break;
834         case I915_SETPARAM_ALLOW_BATCHBUFFER:
835                 dev_priv->allow_batchbuffer = param->value;
836                 break;
837         case I915_SETPARAM_NUM_USED_FENCES:
838                 if (param->value > dev_priv->num_fence_regs ||
839                     param->value < 0)
840                         return -EINVAL;
841                 /* Userspace can use first N regs */
842                 dev_priv->fence_reg_start = param->value;
843                 break;
844         default:
845                 DRM_DEBUG("unknown parameter %d\n", param->param);
846                 return -EINVAL;
847         }
848
849         return 0;
850 }
851
852 static int i915_set_status_page(struct drm_device *dev, void *data,
853                                 struct drm_file *file_priv)
854 {
855         drm_i915_private_t *dev_priv = dev->dev_private;
856         drm_i915_hws_addr_t *hws = data;
857         struct intel_ring_buffer *ring = LP_RING(dev_priv);
858
859         if (!I915_NEED_GFX_HWS(dev))
860                 return -EINVAL;
861
862         if (!dev_priv) {
863                 DRM_ERROR("called with no initialization\n");
864                 return -EINVAL;
865         }
866
867         DRM_DEBUG("set status page addr 0x%08x\n", (u32)hws->addr);
868         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
869                 DRM_ERROR("tried to set status page when mode setting active\n");
870                 return 0;
871         }
872
873         ring->status_page.gfx_addr = dev_priv->status_gfx_addr =
874             hws->addr & (0x1ffff<<12);
875
876         dev_priv->hws_map.offset = dev->agp->base + hws->addr;
877         dev_priv->hws_map.size = 4*1024;
878         dev_priv->hws_map.type = 0;
879         dev_priv->hws_map.flags = 0;
880         dev_priv->hws_map.mtrr = 0;
881
882         drm_core_ioremap_wc(&dev_priv->hws_map, dev);
883         if (dev_priv->hws_map.virtual == NULL) {
884                 i915_dma_cleanup(dev);
885                 ring->status_page.gfx_addr = dev_priv->status_gfx_addr = 0;
886                 DRM_ERROR("can not ioremap virtual address for"
887                                 " G33 hw status page\n");
888                 return -ENOMEM;
889         }
890         ring->status_page.page_addr = dev_priv->hw_status_page =
891             dev_priv->hws_map.virtual;
892
893         memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
894         I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
895         DRM_DEBUG("load hws HWS_PGA with gfx mem 0x%x\n",
896                         dev_priv->status_gfx_addr);
897         DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page);
898         return 0;
899 }
900
901 static bool
902 intel_enable_ppgtt(struct drm_device *dev)
903 {
904         if (i915_enable_ppgtt >= 0)
905                 return i915_enable_ppgtt;
906
907         /* Disable ppgtt on SNB if VT-d is on. */
908         if (INTEL_INFO(dev)->gen == 6 && intel_iommu_enabled)
909                 return false;
910
911         return true;
912 }
913
914 static int
915 i915_load_gem_init(struct drm_device *dev)
916 {
917         struct drm_i915_private *dev_priv = dev->dev_private;
918         unsigned long prealloc_size, gtt_size, mappable_size;
919         int ret;
920
921         prealloc_size = dev_priv->mm.gtt->stolen_size;
922         gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
923         mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
924
925         /* Basic memrange allocator for stolen space */
926         drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
927
928         DRM_LOCK(dev);
929         if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
930                 /* PPGTT pdes are stolen from global gtt ptes, so shrink the
931                  * aperture accordingly when using aliasing ppgtt. */
932                 gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
933                 /* For paranoia keep the guard page in between. */
934                 gtt_size -= PAGE_SIZE;
935
936                 i915_gem_do_init(dev, 0, mappable_size, gtt_size);
937
938                 ret = i915_gem_init_aliasing_ppgtt(dev);
939                 if (ret) {
940                         DRM_UNLOCK(dev);
941                         return ret;
942                 }
943         } else {
944                 /* Let GEM Manage all of the aperture.
945                  *
946                  * However, leave one page at the end still bound to the scratch
947                  * page.  There are a number of places where the hardware
948                  * apparently prefetches past the end of the object, and we've
949                  * seen multiple hangs with the GPU head pointer stuck in a
950                  * batchbuffer bound at the last page of the aperture.  One page
951                  * should be enough to keep any prefetching inside of the
952                  * aperture.
953                  */
954                 i915_gem_do_init(dev, 0, mappable_size, gtt_size - PAGE_SIZE);
955         }
956
957         ret = i915_gem_init_hw(dev);
958         DRM_UNLOCK(dev);
959         if (ret != 0) {
960                 i915_gem_cleanup_aliasing_ppgtt(dev);
961                 return (ret);
962         }
963
964 #if 0
965         /* Try to set up FBC with a reasonable compressed buffer size */
966         if (I915_HAS_FBC(dev) && i915_powersave) {
967                 int cfb_size;
968
969                 /* Leave 1M for line length buffer & misc. */
970
971                 /* Try to get a 32M buffer... */
972                 if (prealloc_size > (36*1024*1024))
973                         cfb_size = 32*1024*1024;
974                 else /* fall back to 7/8 of the stolen space */
975                         cfb_size = prealloc_size * 7 / 8;
976                 i915_setup_compression(dev, cfb_size);
977         }
978 #endif
979
980         /* Allow hardware batchbuffers unless told otherwise. */
981         dev_priv->allow_batchbuffer = 1;
982         return 0;
983 }
984
985 static int
986 i915_load_modeset_init(struct drm_device *dev)
987 {
988         struct drm_i915_private *dev_priv = dev->dev_private;
989         int ret;
990
991         ret = intel_parse_bios(dev);
992         if (ret)
993                 DRM_INFO("failed to find VBIOS tables\n");
994
995 #if 0
996         intel_register_dsm_handler();
997 #endif
998
999         /* IIR "flip pending" bit means done if this bit is set */
1000         if (IS_GEN3(dev) && (I915_READ(ECOSKPD) & ECO_FLIP_DONE))
1001                 dev_priv->flip_pending_is_done = true;
1002
1003         intel_modeset_init(dev);
1004
1005         ret = i915_load_gem_init(dev);
1006         if (ret != 0)
1007                 goto cleanup_gem;
1008
1009         intel_modeset_gem_init(dev);
1010
1011         ret = drm_irq_install(dev);
1012         if (ret)
1013                 goto cleanup_gem;
1014
1015         dev->vblank_disable_allowed = 1;
1016
1017         ret = intel_fbdev_init(dev);
1018         if (ret)
1019                 goto cleanup_gem;
1020
1021         drm_kms_helper_poll_init(dev);
1022
1023         /* We're off and running w/KMS */
1024         dev_priv->mm.suspended = 0;
1025
1026         return (0);
1027
1028 cleanup_gem:
1029         DRM_LOCK(dev);
1030         i915_gem_cleanup_ringbuffer(dev);
1031         DRM_UNLOCK(dev);
1032         i915_gem_cleanup_aliasing_ppgtt(dev);
1033         return (ret);
1034 }
1035
1036 static int
1037 i915_get_bridge_dev(struct drm_device *dev)
1038 {
1039         struct drm_i915_private *dev_priv;
1040
1041         dev_priv = dev->dev_private;
1042
1043         dev_priv->bridge_dev = intel_gtt_get_bridge_device();
1044         if (dev_priv->bridge_dev == NULL) {
1045                 DRM_ERROR("bridge device not found\n");
1046                 return (-1);
1047         }
1048         return (0);
1049 }
1050
1051 #define MCHBAR_I915 0x44
1052 #define MCHBAR_I965 0x48
1053 #define MCHBAR_SIZE (4*4096)
1054
1055 #define DEVEN_REG 0x54
1056 #define   DEVEN_MCHBAR_EN (1 << 28)
1057
1058 /* Allocate space for the MCH regs if needed, return nonzero on error */
1059 static int
1060 intel_alloc_mchbar_resource(struct drm_device *dev)
1061 {
1062         drm_i915_private_t *dev_priv;
1063         device_t vga;
1064         int reg;
1065         u32 temp_lo, temp_hi;
1066         u64 mchbar_addr, temp;
1067
1068         dev_priv = dev->dev_private;
1069         reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
1070
1071         if (INTEL_INFO(dev)->gen >= 4)
1072                 temp_hi = pci_read_config(dev_priv->bridge_dev, reg + 4, 4);
1073         else
1074                 temp_hi = 0;
1075         temp_lo = pci_read_config(dev_priv->bridge_dev, reg, 4);
1076         mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
1077
1078         /* If ACPI doesn't have it, assume we need to allocate it ourselves */
1079 #ifdef XXX_CONFIG_PNP
1080         if (mchbar_addr &&
1081             pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
1082                 return 0;
1083 #endif
1084
1085         /* Get some space for it */
1086         vga = device_get_parent(dev->dev);
1087         dev_priv->mch_res_rid = 0x100;
1088         dev_priv->mch_res = BUS_ALLOC_RESOURCE(device_get_parent(vga),
1089             dev->dev, SYS_RES_MEMORY, &dev_priv->mch_res_rid, 0, ~0UL,
1090             MCHBAR_SIZE, RF_ACTIVE | RF_SHAREABLE, -1);
1091         if (dev_priv->mch_res == NULL) {
1092                 DRM_ERROR("failed mchbar resource alloc\n");
1093                 return (-ENOMEM);
1094         }
1095
1096         if (INTEL_INFO(dev)->gen >= 4) {
1097                 temp = rman_get_start(dev_priv->mch_res);
1098                 temp >>= 32;
1099                 pci_write_config(dev_priv->bridge_dev, reg + 4, temp, 4);
1100         }
1101         pci_write_config(dev_priv->bridge_dev, reg,
1102             rman_get_start(dev_priv->mch_res) & UINT32_MAX, 4);
1103         return (0);
1104 }
1105
1106 static void
1107 intel_setup_mchbar(struct drm_device *dev)
1108 {
1109         drm_i915_private_t *dev_priv;
1110         int mchbar_reg;
1111         u32 temp;
1112         bool enabled;
1113
1114         dev_priv = dev->dev_private;
1115         mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
1116
1117         dev_priv->mchbar_need_disable = false;
1118
1119         if (IS_I915G(dev) || IS_I915GM(dev)) {
1120                 temp = pci_read_config(dev_priv->bridge_dev, DEVEN_REG, 4);
1121                 enabled = (temp & DEVEN_MCHBAR_EN) != 0;
1122         } else {
1123                 temp = pci_read_config(dev_priv->bridge_dev, mchbar_reg, 4);
1124                 enabled = temp & 1;
1125         }
1126
1127         /* If it's already enabled, don't have to do anything */
1128         if (enabled) {
1129                 DRM_DEBUG("mchbar already enabled\n");
1130                 return;
1131         }
1132
1133         if (intel_alloc_mchbar_resource(dev))
1134                 return;
1135
1136         dev_priv->mchbar_need_disable = true;
1137
1138         /* Space is allocated or reserved, so enable it. */
1139         if (IS_I915G(dev) || IS_I915GM(dev)) {
1140                 pci_write_config(dev_priv->bridge_dev, DEVEN_REG,
1141                     temp | DEVEN_MCHBAR_EN, 4);
1142         } else {
1143                 temp = pci_read_config(dev_priv->bridge_dev, mchbar_reg, 4);
1144                 pci_write_config(dev_priv->bridge_dev, mchbar_reg, temp | 1, 4);
1145         }
1146 }
1147
1148 static void
1149 intel_teardown_mchbar(struct drm_device *dev)
1150 {
1151         drm_i915_private_t *dev_priv;
1152         device_t vga;
1153         int mchbar_reg;
1154         u32 temp;
1155
1156         dev_priv = dev->dev_private;
1157         mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
1158
1159         if (dev_priv->mchbar_need_disable) {
1160                 if (IS_I915G(dev) || IS_I915GM(dev)) {
1161                         temp = pci_read_config(dev_priv->bridge_dev,
1162                             DEVEN_REG, 4);
1163                         temp &= ~DEVEN_MCHBAR_EN;
1164                         pci_write_config(dev_priv->bridge_dev, DEVEN_REG,
1165                             temp, 4);
1166                 } else {
1167                         temp = pci_read_config(dev_priv->bridge_dev,
1168                             mchbar_reg, 4);
1169                         temp &= ~1;
1170                         pci_write_config(dev_priv->bridge_dev, mchbar_reg,
1171                             temp, 4);
1172                 }
1173         }
1174
1175         if (dev_priv->mch_res != NULL) {
1176                 vga = device_get_parent(dev->dev);
1177                 BUS_DEACTIVATE_RESOURCE(device_get_parent(vga), dev->dev,
1178                     SYS_RES_MEMORY, dev_priv->mch_res_rid, dev_priv->mch_res);
1179                 BUS_RELEASE_RESOURCE(device_get_parent(vga), dev->dev,
1180                     SYS_RES_MEMORY, dev_priv->mch_res_rid, dev_priv->mch_res);
1181                 dev_priv->mch_res = NULL;
1182         }
1183 }
1184
1185 int
1186 i915_driver_load(struct drm_device *dev, unsigned long flags)
1187 {
1188         struct drm_i915_private *dev_priv = dev->dev_private;
1189         unsigned long base, size;
1190         int mmio_bar, ret;
1191
1192         ret = 0;
1193
1194         /* i915 has 4 more counters */
1195         dev->counters += 4;
1196         dev->types[6] = _DRM_STAT_IRQ;
1197         dev->types[7] = _DRM_STAT_PRIMARY;
1198         dev->types[8] = _DRM_STAT_SECONDARY;
1199         dev->types[9] = _DRM_STAT_DMA;
1200
1201         dev_priv = kmalloc(sizeof(drm_i915_private_t), DRM_MEM_DRIVER,
1202             M_ZERO | M_WAITOK);
1203         if (dev_priv == NULL)
1204                 return -ENOMEM;
1205
1206         dev->dev_private = (void *)dev_priv;
1207         dev_priv->dev = dev;
1208         dev_priv->info = i915_get_device_id(dev->pci_device);
1209
1210         if (i915_get_bridge_dev(dev)) {
1211                 drm_free(dev_priv, DRM_MEM_DRIVER);
1212                 return (-EIO);
1213         }
1214         dev_priv->mm.gtt = intel_gtt_get();
1215
1216         /* Add register map (needed for suspend/resume) */
1217         mmio_bar = IS_GEN2(dev) ? 1 : 0;
1218         base = drm_get_resource_start(dev, mmio_bar);
1219         size = drm_get_resource_len(dev, mmio_bar);
1220
1221         ret = drm_addmap(dev, base, size, _DRM_REGISTERS,
1222             _DRM_KERNEL | _DRM_DRIVER, &dev_priv->mmio_map);
1223
1224         dev_priv->tq = taskqueue_create("915", M_WAITOK,
1225             taskqueue_thread_enqueue, &dev_priv->tq);
1226         taskqueue_start_threads(&dev_priv->tq, 1, 0, -1, "i915 taskq");
1227         lockinit(&dev_priv->gt_lock, "915gt", 0, LK_CANRECURSE);
1228         lockinit(&dev_priv->error_lock, "915err", 0, LK_CANRECURSE);
1229         lockinit(&dev_priv->error_completion_lock, "915cmp", 0, LK_CANRECURSE);
1230         lockinit(&dev_priv->rps_lock, "915rps", 0, LK_CANRECURSE);
1231
1232         dev_priv->has_gem = 1;
1233         intel_irq_init(dev);
1234
1235         intel_setup_mchbar(dev);
1236         intel_setup_gmbus(dev);
1237         intel_opregion_setup(dev);
1238
1239         intel_setup_bios(dev);
1240
1241         i915_gem_load(dev);
1242
1243         /* Init HWS */
1244         if (!I915_NEED_GFX_HWS(dev)) {
1245                 ret = i915_init_phys_hws(dev);
1246                 if (ret != 0) {
1247                         drm_rmmap(dev, dev_priv->mmio_map);
1248                         drm_free(dev_priv, DRM_MEM_DRIVER);
1249                         return ret;
1250                 }
1251         }
1252
1253         if (IS_PINEVIEW(dev))
1254                 i915_pineview_get_mem_freq(dev);
1255         else if (IS_GEN5(dev))
1256                 i915_ironlake_get_mem_freq(dev);
1257
1258         lockinit(&dev_priv->irq_lock, "userirq", 0, LK_CANRECURSE);
1259
1260         if (IS_IVYBRIDGE(dev))
1261                 dev_priv->num_pipe = 3;
1262         else if (IS_MOBILE(dev) || !IS_GEN2(dev))
1263                 dev_priv->num_pipe = 2;
1264         else
1265                 dev_priv->num_pipe = 1;
1266
1267         ret = drm_vblank_init(dev, dev_priv->num_pipe);
1268         if (ret)
1269                 goto out_gem_unload;
1270
1271         /* Start out suspended */
1272         dev_priv->mm.suspended = 1;
1273
1274         intel_detect_pch(dev);
1275
1276         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1277                 DRM_UNLOCK(dev);
1278                 ret = i915_load_modeset_init(dev);
1279                 DRM_LOCK(dev);
1280                 if (ret < 0) {
1281                         DRM_ERROR("failed to init modeset\n");
1282                         goto out_gem_unload;
1283                 }
1284         }
1285
1286         intel_opregion_init(dev);
1287
1288         callout_init_mp(&dev_priv->hangcheck_timer);
1289         callout_reset(&dev_priv->hangcheck_timer, DRM_I915_HANGCHECK_PERIOD,
1290             i915_hangcheck_elapsed, dev);
1291
1292         if (IS_GEN5(dev)) {
1293                 lockmgr(&mchdev_lock, LK_EXCLUSIVE);
1294                 i915_mch_dev = dev_priv;
1295                 dev_priv->mchdev_lock = &mchdev_lock;
1296                 lockmgr(&mchdev_lock, LK_RELEASE);
1297         }
1298
1299         return (0);
1300
1301 out_gem_unload:
1302         /* XXXKIB */
1303         (void) i915_driver_unload_int(dev, true);
1304         return (ret);
1305 }
1306
1307 static int
1308 i915_driver_unload_int(struct drm_device *dev, bool locked)
1309 {
1310         struct drm_i915_private *dev_priv = dev->dev_private;
1311         int ret;
1312
1313         if (!locked)
1314                 DRM_LOCK(dev);
1315         ret = i915_gpu_idle(dev, true);
1316         if (ret)
1317                 DRM_ERROR("failed to idle hardware: %d\n", ret);
1318         if (!locked)
1319                 DRM_UNLOCK(dev);
1320
1321         i915_free_hws(dev);
1322
1323         intel_teardown_mchbar(dev);
1324
1325         if (locked)
1326                 DRM_UNLOCK(dev);
1327         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1328                 intel_fbdev_fini(dev);
1329                 intel_modeset_cleanup(dev);
1330         }
1331
1332         /* Free error state after interrupts are fully disabled. */
1333         callout_stop(&dev_priv->hangcheck_timer);
1334
1335         i915_destroy_error_state(dev);
1336
1337         intel_opregion_fini(dev);
1338
1339         if (locked)
1340                 DRM_LOCK(dev);
1341
1342         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1343                 if (!locked)
1344                         DRM_LOCK(dev);
1345                 i915_gem_free_all_phys_object(dev);
1346                 i915_gem_cleanup_ringbuffer(dev);
1347                 if (!locked)
1348                         DRM_UNLOCK(dev);
1349                 i915_gem_cleanup_aliasing_ppgtt(dev);
1350 #if 1
1351                 KIB_NOTYET();
1352 #else
1353                 if (I915_HAS_FBC(dev) && i915_powersave)
1354                         i915_cleanup_compression(dev);
1355 #endif
1356                 drm_mm_takedown(&dev_priv->mm.stolen);
1357
1358                 intel_cleanup_overlay(dev);
1359
1360                 if (!I915_NEED_GFX_HWS(dev))
1361                         i915_free_hws(dev);
1362         }
1363
1364         i915_gem_unload(dev);
1365
1366         lockuninit(&dev_priv->irq_lock);
1367
1368         if (dev_priv->tq != NULL)
1369                 taskqueue_free(dev_priv->tq);
1370
1371         bus_generic_detach(dev->dev);
1372         drm_rmmap(dev, dev_priv->mmio_map);
1373         intel_teardown_gmbus(dev);
1374
1375         lockuninit(&dev_priv->error_lock);
1376         lockuninit(&dev_priv->error_completion_lock);
1377         lockuninit(&dev_priv->rps_lock);
1378         drm_free(dev->dev_private, DRM_MEM_DRIVER);
1379
1380         return (0);
1381 }
1382
1383 int
1384 i915_driver_unload(struct drm_device *dev)
1385 {
1386
1387         return (i915_driver_unload_int(dev, true));
1388 }
1389
1390 int
1391 i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
1392 {
1393         struct drm_i915_file_private *i915_file_priv;
1394
1395         i915_file_priv = kmalloc(sizeof(*i915_file_priv), DRM_MEM_FILES,
1396             M_WAITOK | M_ZERO);
1397
1398         spin_init(&i915_file_priv->mm.lock);
1399         INIT_LIST_HEAD(&i915_file_priv->mm.request_list);
1400         file_priv->driver_priv = i915_file_priv;
1401
1402         return (0);
1403 }
1404
1405 void
1406 i915_driver_lastclose(struct drm_device * dev)
1407 {
1408         drm_i915_private_t *dev_priv = dev->dev_private;
1409
1410         if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) {
1411 #if 1
1412                 KIB_NOTYET();
1413 #else
1414                 drm_fb_helper_restore();
1415                 vga_switcheroo_process_delayed_switch();
1416 #endif
1417                 return;
1418         }
1419         i915_gem_lastclose(dev);
1420         i915_dma_cleanup(dev);
1421 }
1422
1423 void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
1424 {
1425
1426         i915_gem_release(dev, file_priv);
1427 }
1428
1429 void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
1430 {
1431         struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
1432
1433         spin_uninit(&i915_file_priv->mm.lock);
1434         drm_free(i915_file_priv, DRM_MEM_FILES);
1435 }
1436
1437 struct drm_ioctl_desc i915_ioctls[] = {
1438         DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1439         DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
1440         DRM_IOCTL_DEF(DRM_I915_FLIP, i915_flip_bufs, DRM_AUTH),
1441         DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
1442         DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
1443         DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
1444         DRM_IOCTL_DEF(DRM_I915_GETPARAM, i915_getparam, DRM_AUTH),
1445         DRM_IOCTL_DEF(DRM_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1446         DRM_IOCTL_DEF(DRM_I915_ALLOC, drm_noop, DRM_AUTH),
1447         DRM_IOCTL_DEF(DRM_I915_FREE, drm_noop, DRM_AUTH),
1448         DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1449         DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
1450         DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
1451         DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE,  i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
1452         DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE,  i915_vblank_pipe_get, DRM_AUTH ),
1453         DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
1454         DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1455         DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1456         DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH | DRM_UNLOCKED),
1457         DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH | DRM_UNLOCKED),
1458         DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1459         DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1460         DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
1461         DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH),
1462         DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1463         DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1464         DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, 0),
1465         DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED),
1466         DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED),
1467         DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0),
1468         DRM_IOCTL_DEF(DRM_I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED),
1469         DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED),
1470         DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED),
1471         DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0),
1472         DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0),
1473         DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED),
1474         DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
1475         DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
1476         DRM_IOCTL_DEF(DRM_I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1477         DRM_IOCTL_DEF(DRM_I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1478         DRM_IOCTL_DEF(DRM_I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1479         DRM_IOCTL_DEF(DRM_I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1480 };
1481
1482 struct drm_driver i915_driver_info = {
1483         .driver_features =   DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
1484             DRIVER_USE_MTRR | DRIVER_HAVE_IRQ | DRIVER_LOCKLESS_IRQ |
1485             DRIVER_GEM /*| DRIVER_MODESET*/,
1486
1487         .buf_priv_size  = sizeof(drm_i915_private_t),
1488         .load           = i915_driver_load,
1489         .open           = i915_driver_open,
1490         .unload         = i915_driver_unload,
1491         .preclose       = i915_driver_preclose,
1492         .lastclose      = i915_driver_lastclose,
1493         .postclose      = i915_driver_postclose,
1494         .device_is_agp  = i915_driver_device_is_agp,
1495         .gem_init_object = i915_gem_init_object,
1496         .gem_free_object = i915_gem_free_object,
1497         .gem_pager_ops  = &i915_gem_pager_ops,
1498         .dumb_create    = i915_gem_dumb_create,
1499         .dumb_map_offset = i915_gem_mmap_gtt,
1500         .dumb_destroy   = i915_gem_dumb_destroy,
1501         .sysctl_init    = i915_sysctl_init,
1502         .sysctl_cleanup = i915_sysctl_cleanup,
1503
1504         .ioctls         = i915_ioctls,
1505         .max_ioctl      = DRM_ARRAY_SIZE(i915_ioctls),
1506
1507         .name           = DRIVER_NAME,
1508         .desc           = DRIVER_DESC,
1509         .date           = DRIVER_DATE,
1510         .major          = DRIVER_MAJOR,
1511         .minor          = DRIVER_MINOR,
1512         .patchlevel     = DRIVER_PATCHLEVEL,
1513 };
1514
1515 /**
1516  * Determine if the device really is AGP or not.
1517  *
1518  * All Intel graphics chipsets are treated as AGP, even if they are really
1519  * built-in.
1520  *
1521  * \param dev   The device to be tested.
1522  *
1523  * \returns
1524  * A value of 1 is always retured to indictate every i9x5 is AGP.
1525  */
1526 int i915_driver_device_is_agp(struct drm_device * dev)
1527 {
1528         return 1;
1529 }