drm/i915: Use a common fence writing routine
[dragonfly.git] / sys / dev / drm / i915 / i915_dma.c
1 /* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
2  */
3 /*-
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  * $FreeBSD: src/sys/dev/drm2/i915/i915_dma.c,v 1.1 2012/05/22 11:07:44 kib Exp $
28  */
29
30 #include <drm/drmP.h>
31 #include <drm/i915_drm.h>
32 #include "i915_drv.h"
33 #include "intel_drv.h"
34 #include "intel_ringbuffer.h"
35 #include <linux/workqueue.h>
36
37 extern struct drm_i915_private *i915_mch_dev;
38
39 void i915_update_dri1_breadcrumb(struct drm_device *dev)
40 {
41         /*
42          * The dri breadcrumb update races against the drm master disappearing.
43          * Instead of trying to fix this (this is by far not the only ums issue)
44          * just don't do the update in kms mode.
45          */
46         if (drm_core_check_feature(dev, DRIVER_MODESET))
47                 return;
48
49         /* XXX: don't do it at all actually */
50         return;
51 }
52
53 static void i915_write_hws_pga(struct drm_device *dev)
54 {
55         drm_i915_private_t *dev_priv = dev->dev_private;
56         u32 addr;
57
58         addr = dev_priv->status_page_dmah->busaddr;
59         if (INTEL_INFO(dev)->gen >= 4)
60                 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
61         I915_WRITE(HWS_PGA, addr);
62 }
63
64 /**
65  * Sets up the hardware status page for devices that need a physical address
66  * in the register.
67  */
68 static int i915_init_phys_hws(struct drm_device *dev)
69 {
70         drm_i915_private_t *dev_priv = dev->dev_private;
71         struct intel_ring_buffer *ring = LP_RING(dev_priv);
72
73         /*
74          * Program Hardware Status Page
75          * XXXKIB Keep 4GB limit for allocation for now.  This method
76          * of allocation is used on <= 965 hardware, that has several
77          * erratas regarding the use of physical memory > 4 GB.
78          */
79         DRM_UNLOCK(dev);
80         dev_priv->status_page_dmah =
81                 drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
82         DRM_LOCK(dev);
83         if (!dev_priv->status_page_dmah) {
84                 DRM_ERROR("Can not allocate hardware status page\n");
85                 return -ENOMEM;
86         }
87         ring->status_page.page_addr = dev_priv->hw_status_page =
88             dev_priv->status_page_dmah->vaddr;
89         dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
90
91         memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
92
93         i915_write_hws_pga(dev);
94         DRM_DEBUG("Enabled hardware status page, phys %jx\n",
95             (uintmax_t)dev_priv->dma_status_page);
96         return 0;
97 }
98
99 /**
100  * Frees the hardware status page, whether it's a physical address or a virtual
101  * address set up by the X Server.
102  */
103 static void i915_free_hws(struct drm_device *dev)
104 {
105         drm_i915_private_t *dev_priv = dev->dev_private;
106         struct intel_ring_buffer *ring = LP_RING(dev_priv);
107
108         if (dev_priv->status_page_dmah) {
109                 drm_pci_free(dev, dev_priv->status_page_dmah);
110                 dev_priv->status_page_dmah = NULL;
111         }
112
113         if (dev_priv->status_gfx_addr) {
114                 dev_priv->status_gfx_addr = 0;
115                 ring->status_page.gfx_addr = 0;
116                 drm_core_ioremapfree(&dev_priv->hws_map, dev);
117         }
118
119         /* Need to rewrite hardware status page */
120         I915_WRITE(HWS_PGA, 0x1ffff000);
121 }
122
123 void i915_kernel_lost_context(struct drm_device * dev)
124 {
125         drm_i915_private_t *dev_priv = dev->dev_private;
126         struct intel_ring_buffer *ring = LP_RING(dev_priv);
127
128         /*
129          * We should never lose context on the ring with modesetting
130          * as we don't expose it to userspace
131          */
132         if (drm_core_check_feature(dev, DRIVER_MODESET))
133                 return;
134
135         ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
136         ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
137         ring->space = ring->head - (ring->tail + 8);
138         if (ring->space < 0)
139                 ring->space += ring->size;
140
141 #if 1
142         KIB_NOTYET();
143 #else
144         if (!dev->primary->master)
145                 return;
146 #endif
147
148         if (ring->head == ring->tail && dev_priv->sarea_priv)
149                 dev_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
150 }
151
152 static int i915_dma_cleanup(struct drm_device * dev)
153 {
154         drm_i915_private_t *dev_priv = dev->dev_private;
155         int i;
156
157
158         /* Make sure interrupts are disabled here because the uninstall ioctl
159          * may not have been called from userspace and after dev_private
160          * is freed, it's too late.
161          */
162         if (dev->irq_enabled)
163                 drm_irq_uninstall(dev);
164
165         for (i = 0; i < I915_NUM_RINGS; i++)
166                 intel_cleanup_ring_buffer(&dev_priv->ring[i]);
167
168         /* Clear the HWS virtual address at teardown */
169         if (I915_NEED_GFX_HWS(dev))
170                 i915_free_hws(dev);
171
172         return 0;
173 }
174
175 static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
176 {
177         drm_i915_private_t *dev_priv = dev->dev_private;
178         int ret;
179
180         dev_priv->sarea = drm_getsarea(dev);
181         if (!dev_priv->sarea) {
182                 DRM_ERROR("can not find sarea!\n");
183                 i915_dma_cleanup(dev);
184                 return -EINVAL;
185         }
186
187         dev_priv->sarea_priv = (drm_i915_sarea_t *)
188             ((u8 *) dev_priv->sarea->virtual + init->sarea_priv_offset);
189
190         if (init->ring_size != 0) {
191                 if (LP_RING(dev_priv)->obj != NULL) {
192                         i915_dma_cleanup(dev);
193                         DRM_ERROR("Client tried to initialize ringbuffer in "
194                                   "GEM mode\n");
195                         return -EINVAL;
196                 }
197
198                 ret = intel_render_ring_init_dri(dev,
199                                                  init->ring_start,
200                                                  init->ring_size);
201                 if (ret) {
202                         i915_dma_cleanup(dev);
203                         return ret;
204                 }
205         }
206
207         dev_priv->cpp = init->cpp;
208         dev_priv->back_offset = init->back_offset;
209         dev_priv->front_offset = init->front_offset;
210         dev_priv->current_page = 0;
211         dev_priv->sarea_priv->pf_current_page = 0;
212
213         /* Allow hardware batchbuffers unless told otherwise.
214          */
215         dev_priv->allow_batchbuffer = 1;
216
217         return 0;
218 }
219
220 static int i915_dma_resume(struct drm_device * dev)
221 {
222         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
223         struct intel_ring_buffer *ring = LP_RING(dev_priv);
224
225         DRM_DEBUG("\n");
226
227         if (ring->virtual_start == NULL) {
228                 DRM_ERROR("can not ioremap virtual address for"
229                           " ring buffer\n");
230                 return -ENOMEM;
231         }
232
233         /* Program Hardware Status Page */
234         if (!ring->status_page.page_addr) {
235                 DRM_ERROR("Can not find hardware status page\n");
236                 return -EINVAL;
237         }
238         DRM_DEBUG("hw status page @ %p\n", ring->status_page.page_addr);
239         if (ring->status_page.gfx_addr != 0)
240                 intel_ring_setup_status_page(ring);
241         else
242                 i915_write_hws_pga(dev);
243
244         DRM_DEBUG("Enabled hardware status page\n");
245
246         return 0;
247 }
248
249 static int i915_dma_init(struct drm_device *dev, void *data,
250                          struct drm_file *file_priv)
251 {
252         drm_i915_init_t *init = data;
253         int retcode = 0;
254
255         switch (init->func) {
256         case I915_INIT_DMA:
257                 retcode = i915_initialize(dev, init);
258                 break;
259         case I915_CLEANUP_DMA:
260                 retcode = i915_dma_cleanup(dev);
261                 break;
262         case I915_RESUME_DMA:
263                 retcode = i915_dma_resume(dev);
264                 break;
265         default:
266                 retcode = -EINVAL;
267                 break;
268         }
269
270         return retcode;
271 }
272
273 /* Implement basically the same security restrictions as hardware does
274  * for MI_BATCH_NON_SECURE.  These can be made stricter at any time.
275  *
276  * Most of the calculations below involve calculating the size of a
277  * particular instruction.  It's important to get the size right as
278  * that tells us where the next instruction to check is.  Any illegal
279  * instruction detected will be given a size of zero, which is a
280  * signal to abort the rest of the buffer.
281  */
282 static int do_validate_cmd(int cmd)
283 {
284         switch (((cmd >> 29) & 0x7)) {
285         case 0x0:
286                 switch ((cmd >> 23) & 0x3f) {
287                 case 0x0:
288                         return 1;       /* MI_NOOP */
289                 case 0x4:
290                         return 1;       /* MI_FLUSH */
291                 default:
292                         return 0;       /* disallow everything else */
293                 }
294                 break;
295         case 0x1:
296                 return 0;       /* reserved */
297         case 0x2:
298                 return (cmd & 0xff) + 2;        /* 2d commands */
299         case 0x3:
300                 if (((cmd >> 24) & 0x1f) <= 0x18)
301                         return 1;
302
303                 switch ((cmd >> 24) & 0x1f) {
304                 case 0x1c:
305                         return 1;
306                 case 0x1d:
307                         switch ((cmd >> 16) & 0xff) {
308                         case 0x3:
309                                 return (cmd & 0x1f) + 2;
310                         case 0x4:
311                                 return (cmd & 0xf) + 2;
312                         default:
313                                 return (cmd & 0xffff) + 2;
314                         }
315                 case 0x1e:
316                         if (cmd & (1 << 23))
317                                 return (cmd & 0xffff) + 1;
318                         else
319                                 return 1;
320                 case 0x1f:
321                         if ((cmd & (1 << 23)) == 0)     /* inline vertices */
322                                 return (cmd & 0x1ffff) + 2;
323                         else if (cmd & (1 << 17))       /* indirect random */
324                                 if ((cmd & 0xffff) == 0)
325                                         return 0;       /* unknown length, too hard */
326                                 else
327                                         return (((cmd & 0xffff) + 1) / 2) + 1;
328                         else
329                                 return 2;       /* indirect sequential */
330                 default:
331                         return 0;
332                 }
333         default:
334                 return 0;
335         }
336
337         return 0;
338 }
339
340 static int validate_cmd(int cmd)
341 {
342         int ret = do_validate_cmd(cmd);
343
344 /*      printk("validate_cmd( %x ): %d\n", cmd, ret); */
345
346         return ret;
347 }
348
349 static int i915_emit_cmds(struct drm_device *dev, int __user *buffer,
350                           int dwords)
351 {
352         drm_i915_private_t *dev_priv = dev->dev_private;
353         int i, ret;
354
355         if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8)
356                 return -EINVAL;
357
358         ret = BEGIN_LP_RING((dwords+1)&~1);
359         if (ret)
360                 return ret;
361
362         for (i = 0; i < dwords;) {
363                 int cmd, sz;
364
365                 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd)))
366                         return -EINVAL;
367
368                 if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
369                         return -EINVAL;
370
371                 OUT_RING(cmd);
372
373                 while (++i, --sz) {
374                         if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i],
375                                                          sizeof(cmd))) {
376                                 return -EINVAL;
377                         }
378                         OUT_RING(cmd);
379                 }
380         }
381
382         if (dwords & 1)
383                 OUT_RING(0);
384
385         ADVANCE_LP_RING();
386
387         return 0;
388 }
389
390 int i915_emit_box(struct drm_device * dev,
391                   struct drm_clip_rect *boxes,
392                   int i, int DR1, int DR4)
393 {
394         struct drm_clip_rect box;
395
396         if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) {
397                 return -EFAULT;
398         }
399
400         return (i915_emit_box_p(dev, &box, DR1, DR4));
401 }
402
403 int
404 i915_emit_box_p(struct drm_device *dev, struct drm_clip_rect *box,
405     int DR1, int DR4)
406 {
407         drm_i915_private_t *dev_priv = dev->dev_private;
408         int ret;
409
410         if (box->y2 <= box->y1 || box->x2 <= box->x1 || box->y2 <= 0 ||
411             box->x2 <= 0) {
412                 DRM_ERROR("Bad box %d,%d..%d,%d\n",
413                           box->x1, box->y1, box->x2, box->y2);
414                 return -EINVAL;
415         }
416
417         if (INTEL_INFO(dev)->gen >= 4) {
418                 ret = BEGIN_LP_RING(4);
419                 if (ret != 0)
420                         return (ret);
421
422                 OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
423                 OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
424                 OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
425                 OUT_RING(DR4);
426         } else {
427                 ret = BEGIN_LP_RING(6);
428                 if (ret != 0)
429                         return (ret);
430
431                 OUT_RING(GFX_OP_DRAWRECT_INFO);
432                 OUT_RING(DR1);
433                 OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
434                 OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
435                 OUT_RING(DR4);
436                 OUT_RING(0);
437         }
438         ADVANCE_LP_RING();
439
440         return 0;
441 }
442
443 /* XXX: Emitting the counter should really be moved to part of the IRQ
444  * emit. For now, do it in both places:
445  */
446
447 static void i915_emit_breadcrumb(struct drm_device *dev)
448 {
449         drm_i915_private_t *dev_priv = dev->dev_private;
450
451         if (++dev_priv->counter > 0x7FFFFFFFUL)
452                 dev_priv->counter = 0;
453         if (dev_priv->sarea_priv)
454                 dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
455
456         if (BEGIN_LP_RING(4) == 0) {
457                 OUT_RING(MI_STORE_DWORD_INDEX);
458                 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
459                 OUT_RING(dev_priv->counter);
460                 OUT_RING(0);
461                 ADVANCE_LP_RING();
462         }
463 }
464
465 static int i915_dispatch_cmdbuffer(struct drm_device * dev,
466     drm_i915_cmdbuffer_t * cmd, struct drm_clip_rect *cliprects, void *cmdbuf)
467 {
468         int nbox = cmd->num_cliprects;
469         int i = 0, count, ret;
470
471         if (cmd->sz & 0x3) {
472                 DRM_ERROR("alignment\n");
473                 return -EINVAL;
474         }
475
476         i915_kernel_lost_context(dev);
477
478         count = nbox ? nbox : 1;
479
480         for (i = 0; i < count; i++) {
481                 if (i < nbox) {
482                         ret = i915_emit_box_p(dev, &cmd->cliprects[i],
483                             cmd->DR1, cmd->DR4);
484                         if (ret)
485                                 return ret;
486                 }
487
488                 ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4);
489                 if (ret)
490                         return ret;
491         }
492
493         i915_emit_breadcrumb(dev);
494         return 0;
495 }
496
497 static int
498 i915_dispatch_batchbuffer(struct drm_device * dev,
499     drm_i915_batchbuffer_t * batch, struct drm_clip_rect *cliprects)
500 {
501         drm_i915_private_t *dev_priv = dev->dev_private;
502         int nbox = batch->num_cliprects;
503         int i, count, ret;
504
505         if ((batch->start | batch->used) & 0x7) {
506                 DRM_ERROR("alignment\n");
507                 return -EINVAL;
508         }
509
510         i915_kernel_lost_context(dev);
511
512         count = nbox ? nbox : 1;
513
514         for (i = 0; i < count; i++) {
515                 if (i < nbox) {
516                         int ret = i915_emit_box_p(dev, &cliprects[i],
517                             batch->DR1, batch->DR4);
518                         if (ret)
519                                 return ret;
520                 }
521
522                 if (!IS_I830(dev) && !IS_845G(dev)) {
523                         ret = BEGIN_LP_RING(2);
524                         if (ret != 0)
525                                 return (ret);
526
527                         if (INTEL_INFO(dev)->gen >= 4) {
528                                 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) |
529                                     MI_BATCH_NON_SECURE_I965);
530                                 OUT_RING(batch->start);
531                         } else {
532                                 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
533                                 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
534                         }
535                 } else {
536                         ret = BEGIN_LP_RING(4);
537                         if (ret != 0)
538                                 return (ret);
539
540                         OUT_RING(MI_BATCH_BUFFER);
541                         OUT_RING(batch->start | MI_BATCH_NON_SECURE);
542                         OUT_RING(batch->start + batch->used - 4);
543                         OUT_RING(0);
544                 }
545                 ADVANCE_LP_RING();
546         }
547
548         i915_emit_breadcrumb(dev);
549
550         return 0;
551 }
552
553 static int i915_dispatch_flip(struct drm_device * dev)
554 {
555         drm_i915_private_t *dev_priv = dev->dev_private;
556         int ret;
557
558         if (!dev_priv->sarea_priv)
559                 return -EINVAL;
560
561         DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
562                   __func__,
563                   dev_priv->current_page,
564                   dev_priv->sarea_priv->pf_current_page);
565
566         i915_kernel_lost_context(dev);
567
568         ret = BEGIN_LP_RING(10);
569         if (ret)
570                 return ret;
571         OUT_RING(MI_FLUSH | MI_READ_FLUSH);
572         OUT_RING(0);
573
574         OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
575         OUT_RING(0);
576         if (dev_priv->current_page == 0) {
577                 OUT_RING(dev_priv->back_offset);
578                 dev_priv->current_page = 1;
579         } else {
580                 OUT_RING(dev_priv->front_offset);
581                 dev_priv->current_page = 0;
582         }
583         OUT_RING(0);
584
585         OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
586         OUT_RING(0);
587
588         ADVANCE_LP_RING();
589
590         if (++dev_priv->counter > 0x7FFFFFFFUL)
591                 dev_priv->counter = 0;
592         if (dev_priv->sarea_priv)
593                 dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
594
595         if (BEGIN_LP_RING(4) == 0) {
596                 OUT_RING(MI_STORE_DWORD_INDEX);
597                 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
598                 OUT_RING(dev_priv->counter);
599                 OUT_RING(0);
600                 ADVANCE_LP_RING();
601         }
602
603         dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
604         return 0;
605 }
606
607 static int i915_quiescent(struct drm_device *dev)
608 {
609         i915_kernel_lost_context(dev);
610         return intel_ring_idle(LP_RING(dev->dev_private));
611 }
612
613 static int
614 i915_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
615 {
616         int ret;
617
618         RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
619
620         DRM_LOCK(dev);
621         ret = i915_quiescent(dev);
622         DRM_UNLOCK(dev);
623
624         return (ret);
625 }
626
627 static int i915_batchbuffer(struct drm_device *dev, void *data,
628                             struct drm_file *file_priv)
629 {
630         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
631         drm_i915_sarea_t *sarea_priv;
632         drm_i915_batchbuffer_t *batch = data;
633         struct drm_clip_rect *cliprects;
634         size_t cliplen;
635         int ret;
636
637         if (!dev_priv->allow_batchbuffer) {
638                 DRM_ERROR("Batchbuffer ioctl disabled\n");
639                 return -EINVAL;
640         }
641         DRM_UNLOCK(dev);
642
643         DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n",
644                   batch->start, batch->used, batch->num_cliprects);
645
646         cliplen = batch->num_cliprects * sizeof(struct drm_clip_rect);
647         if (batch->num_cliprects < 0)
648                 return -EFAULT;
649         if (batch->num_cliprects != 0) {
650                 cliprects = kmalloc(batch->num_cliprects *
651                     sizeof(struct drm_clip_rect), DRM_MEM_DMA,
652                     M_WAITOK | M_ZERO);
653
654                 ret = -copyin(batch->cliprects, cliprects,
655                     batch->num_cliprects * sizeof(struct drm_clip_rect));
656                 if (ret != 0) {
657                         DRM_LOCK(dev);
658                         goto fail_free;
659                 }
660         } else
661                 cliprects = NULL;
662
663         DRM_LOCK(dev);
664         RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
665         ret = i915_dispatch_batchbuffer(dev, batch, cliprects);
666
667         sarea_priv = (drm_i915_sarea_t *)dev_priv->sarea_priv;
668         if (sarea_priv)
669                 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
670
671 fail_free:
672         drm_free(cliprects, DRM_MEM_DMA);
673         return ret;
674 }
675
676 static int i915_cmdbuffer(struct drm_device *dev, void *data,
677                           struct drm_file *file_priv)
678 {
679         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
680         drm_i915_sarea_t *sarea_priv;
681         drm_i915_cmdbuffer_t *cmdbuf = data;
682         struct drm_clip_rect *cliprects = NULL;
683         void *batch_data;
684         int ret;
685
686         DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
687                   cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
688
689         if (cmdbuf->num_cliprects < 0)
690                 return -EINVAL;
691
692         DRM_UNLOCK(dev);
693
694         batch_data = kmalloc(cmdbuf->sz, DRM_MEM_DMA, M_WAITOK);
695
696         ret = -copyin(cmdbuf->buf, batch_data, cmdbuf->sz);
697         if (ret != 0) {
698                 DRM_LOCK(dev);
699                 goto fail_batch_free;
700         }
701
702         if (cmdbuf->num_cliprects) {
703                 cliprects = kmalloc(cmdbuf->num_cliprects *
704                     sizeof(struct drm_clip_rect), DRM_MEM_DMA,
705                     M_WAITOK | M_ZERO);
706                 ret = -copyin(cmdbuf->cliprects, cliprects,
707                     cmdbuf->num_cliprects * sizeof(struct drm_clip_rect));
708                 if (ret != 0) {
709                         DRM_LOCK(dev);
710                         goto fail_clip_free;
711                 }
712         }
713
714         DRM_LOCK(dev);
715         RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
716         ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data);
717         if (ret) {
718                 DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
719                 goto fail_clip_free;
720         }
721
722         sarea_priv = (drm_i915_sarea_t *)dev_priv->sarea_priv;
723         if (sarea_priv)
724                 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
725
726 fail_clip_free:
727         drm_free(cliprects, DRM_MEM_DMA);
728 fail_batch_free:
729         drm_free(batch_data, DRM_MEM_DMA);
730         return ret;
731 }
732
733 static int i915_emit_irq(struct drm_device * dev)
734 {
735         drm_i915_private_t *dev_priv = dev->dev_private;
736 #if 0
737         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
738 #endif
739
740         i915_kernel_lost_context(dev);
741
742         DRM_DEBUG("i915: emit_irq\n");
743
744         dev_priv->counter++;
745         if (dev_priv->counter > 0x7FFFFFFFUL)
746                 dev_priv->counter = 1;
747 #if 0
748         if (master_priv->sarea_priv)
749                 master_priv->sarea_priv->last_enqueue = dev_priv->counter;
750 #else
751         if (dev_priv->sarea_priv)
752                 dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
753 #endif
754
755         if (BEGIN_LP_RING(4) == 0) {
756                 OUT_RING(MI_STORE_DWORD_INDEX);
757                 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
758                 OUT_RING(dev_priv->counter);
759                 OUT_RING(MI_USER_INTERRUPT);
760                 ADVANCE_LP_RING();
761         }
762
763         return dev_priv->counter;
764 }
765
766 static int i915_wait_irq(struct drm_device * dev, int irq_nr)
767 {
768         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
769 #if 0
770         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
771 #endif
772         int ret = 0;
773         struct intel_ring_buffer *ring = LP_RING(dev_priv);
774
775         DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
776                   READ_BREADCRUMB(dev_priv));
777
778 #if 0
779         if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
780                 if (master_priv->sarea_priv)
781                         master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
782                 return 0;
783         }
784
785         if (master_priv->sarea_priv)
786                 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
787 #else
788         if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
789                 if (dev_priv->sarea_priv) {
790                         dev_priv->sarea_priv->last_dispatch =
791                                 READ_BREADCRUMB(dev_priv);
792                 }
793                 return 0;
794         }
795
796         if (dev_priv->sarea_priv)
797                 dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
798 #endif
799
800         if (ring->irq_get(ring)) {
801                 DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ,
802                             READ_BREADCRUMB(dev_priv) >= irq_nr);
803                 ring->irq_put(ring);
804         } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000))
805                 ret = -EBUSY;
806
807         if (ret == -EBUSY) {
808                 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
809                           READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
810         }
811
812         return ret;
813 }
814
815 /* Needs the lock as it touches the ring.
816  */
817 int i915_irq_emit(struct drm_device *dev, void *data,
818                          struct drm_file *file_priv)
819 {
820         drm_i915_private_t *dev_priv = dev->dev_private;
821         drm_i915_irq_emit_t *emit = data;
822         int result;
823
824         if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
825                 DRM_ERROR("called with no initialization\n");
826                 return -EINVAL;
827         }
828
829         RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
830
831         DRM_LOCK(dev);
832         result = i915_emit_irq(dev);
833         DRM_UNLOCK(dev);
834
835         if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
836                 DRM_ERROR("copy_to_user\n");
837                 return -EFAULT;
838         }
839
840         return 0;
841 }
842
843 /* Doesn't need the hardware lock.
844  */
845 int i915_irq_wait(struct drm_device *dev, void *data,
846                          struct drm_file *file_priv)
847 {
848         drm_i915_private_t *dev_priv = dev->dev_private;
849         drm_i915_irq_wait_t *irqwait = data;
850
851         if (!dev_priv) {
852                 DRM_ERROR("called with no initialization\n");
853                 return -EINVAL;
854         }
855
856         return i915_wait_irq(dev, irqwait->irq_seq);
857 }
858
859 static int i915_vblank_pipe_get(struct drm_device *dev, void *data,
860                          struct drm_file *file_priv)
861 {
862         drm_i915_private_t *dev_priv = dev->dev_private;
863         drm_i915_vblank_pipe_t *pipe = data;
864
865         if (drm_core_check_feature(dev, DRIVER_MODESET))
866                 return -ENODEV;
867
868         if (!dev_priv) {
869                 DRM_ERROR("called with no initialization\n");
870                 return -EINVAL;
871         }
872
873         pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
874
875         return 0;
876 }
877
878 /**
879  * Schedule buffer swap at given vertical blank.
880  */
881 static int i915_vblank_swap(struct drm_device *dev, void *data,
882                      struct drm_file *file_priv)
883 {
884         /* The delayed swap mechanism was fundamentally racy, and has been
885          * removed.  The model was that the client requested a delayed flip/swap
886          * from the kernel, then waited for vblank before continuing to perform
887          * rendering.  The problem was that the kernel might wake the client
888          * up before it dispatched the vblank swap (since the lock has to be
889          * held while touching the ringbuffer), in which case the client would
890          * clear and start the next frame before the swap occurred, and
891          * flicker would occur in addition to likely missing the vblank.
892          *
893          * In the absence of this ioctl, userland falls back to a correct path
894          * of waiting for a vblank, then dispatching the swap on its own.
895          * Context switching to userland and back is plenty fast enough for
896          * meeting the requirements of vblank swapping.
897          */
898         return -EINVAL;
899 }
900
901 static int i915_flip_bufs(struct drm_device *dev, void *data,
902                           struct drm_file *file_priv)
903 {
904         int ret;
905
906         DRM_DEBUG("%s\n", __func__);
907
908         RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
909
910         ret = i915_dispatch_flip(dev);
911
912         return ret;
913 }
914
915 static int i915_getparam(struct drm_device *dev, void *data,
916                          struct drm_file *file_priv)
917 {
918         drm_i915_private_t *dev_priv = dev->dev_private;
919         drm_i915_getparam_t *param = data;
920         int value;
921
922         if (!dev_priv) {
923                 DRM_ERROR("called with no initialization\n");
924                 return -EINVAL;
925         }
926
927         switch (param->param) {
928         case I915_PARAM_IRQ_ACTIVE:
929                 value = dev->irq_enabled ? 1 : 0;
930                 break;
931         case I915_PARAM_ALLOW_BATCHBUFFER:
932                 value = dev_priv->allow_batchbuffer ? 1 : 0;
933                 break;
934         case I915_PARAM_LAST_DISPATCH:
935                 value = READ_BREADCRUMB(dev_priv);
936                 break;
937         case I915_PARAM_CHIPSET_ID:
938                 value = dev->pci_device;
939                 break;
940         case I915_PARAM_HAS_GEM:
941                 value = 1;
942                 break;
943         case I915_PARAM_NUM_FENCES_AVAIL:
944                 value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
945                 break;
946         case I915_PARAM_HAS_OVERLAY:
947                 value = dev_priv->overlay ? 1 : 0;
948                 break;
949         case I915_PARAM_HAS_PAGEFLIPPING:
950                 value = 1;
951                 break;
952         case I915_PARAM_HAS_EXECBUF2:
953                 value = 1;
954                 break;
955         case I915_PARAM_HAS_BSD:
956                 value = HAS_BSD(dev);
957                 break;
958         case I915_PARAM_HAS_BLT:
959                 value = HAS_BLT(dev);
960                 break;
961         case I915_PARAM_HAS_RELAXED_FENCING:
962                 value = 1;
963                 break;
964         case I915_PARAM_HAS_COHERENT_RINGS:
965                 value = 1;
966                 break;
967         case I915_PARAM_HAS_EXEC_CONSTANTS:
968                 value = INTEL_INFO(dev)->gen >= 4;
969                 break;
970         case I915_PARAM_HAS_RELAXED_DELTA:
971                 value = 1;
972                 break;
973         case I915_PARAM_HAS_GEN7_SOL_RESET:
974                 value = 1;
975                 break;
976         case I915_PARAM_HAS_LLC:
977                 value = HAS_LLC(dev);
978                 break;
979         default:
980                 DRM_DEBUG_DRIVER("Unknown parameter %d\n",
981                                  param->param);
982                 return -EINVAL;
983         }
984
985         if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
986                 DRM_ERROR("DRM_COPY_TO_USER failed\n");
987                 return -EFAULT;
988         }
989
990         return 0;
991 }
992
993 static int i915_setparam(struct drm_device *dev, void *data,
994                          struct drm_file *file_priv)
995 {
996         drm_i915_private_t *dev_priv = dev->dev_private;
997         drm_i915_setparam_t *param = data;
998
999         if (!dev_priv) {
1000                 DRM_ERROR("called with no initialization\n");
1001                 return -EINVAL;
1002         }
1003
1004         switch (param->param) {
1005         case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
1006                 break;
1007         case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
1008                 dev_priv->tex_lru_log_granularity = param->value;
1009                 break;
1010         case I915_SETPARAM_ALLOW_BATCHBUFFER:
1011                 dev_priv->allow_batchbuffer = param->value;
1012                 break;
1013         case I915_SETPARAM_NUM_USED_FENCES:
1014                 if (param->value > dev_priv->num_fence_regs ||
1015                     param->value < 0)
1016                         return -EINVAL;
1017                 /* Userspace can use first N regs */
1018                 dev_priv->fence_reg_start = param->value;
1019                 break;
1020         default:
1021                 DRM_DEBUG("unknown parameter %d\n", param->param);
1022                 return -EINVAL;
1023         }
1024
1025         return 0;
1026 }
1027
1028 static int i915_set_status_page(struct drm_device *dev, void *data,
1029                                 struct drm_file *file_priv)
1030 {
1031         drm_i915_private_t *dev_priv = dev->dev_private;
1032         drm_i915_hws_addr_t *hws = data;
1033         struct intel_ring_buffer *ring = LP_RING(dev_priv);
1034
1035         if (!I915_NEED_GFX_HWS(dev))
1036                 return -EINVAL;
1037
1038         if (!dev_priv) {
1039                 DRM_ERROR("called with no initialization\n");
1040                 return -EINVAL;
1041         }
1042
1043         DRM_DEBUG("set status page addr 0x%08x\n", (u32)hws->addr);
1044         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1045                 DRM_ERROR("tried to set status page when mode setting active\n");
1046                 return 0;
1047         }
1048
1049         ring->status_page.gfx_addr = dev_priv->status_gfx_addr =
1050             hws->addr & (0x1ffff<<12);
1051
1052         dev_priv->hws_map.offset = dev->agp->base + hws->addr;
1053         dev_priv->hws_map.size = 4*1024;
1054         dev_priv->hws_map.type = 0;
1055         dev_priv->hws_map.flags = 0;
1056         dev_priv->hws_map.mtrr = 0;
1057
1058         drm_core_ioremap_wc(&dev_priv->hws_map, dev);
1059         if (dev_priv->hws_map.virtual == NULL) {
1060                 i915_dma_cleanup(dev);
1061                 ring->status_page.gfx_addr = dev_priv->status_gfx_addr = 0;
1062                 DRM_ERROR("can not ioremap virtual address for"
1063                                 " G33 hw status page\n");
1064                 return -ENOMEM;
1065         }
1066         ring->status_page.page_addr = dev_priv->hw_status_page =
1067             dev_priv->hws_map.virtual;
1068
1069         memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
1070         I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
1071         DRM_DEBUG("load hws HWS_PGA with gfx mem 0x%x\n",
1072                         dev_priv->status_gfx_addr);
1073         DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page);
1074         return 0;
1075 }
1076
1077 static bool
1078 intel_enable_ppgtt(struct drm_device *dev)
1079 {
1080         if (i915_enable_ppgtt >= 0)
1081                 return i915_enable_ppgtt;
1082
1083         /* Disable ppgtt on SNB if VT-d is on. */
1084         if (INTEL_INFO(dev)->gen == 6 && intel_iommu_enabled)
1085                 return false;
1086
1087         return true;
1088 }
1089
1090 static int
1091 i915_load_gem_init(struct drm_device *dev)
1092 {
1093         struct drm_i915_private *dev_priv = dev->dev_private;
1094         unsigned long prealloc_size, gtt_size, mappable_size;
1095         int ret;
1096
1097         prealloc_size = dev_priv->mm.gtt->stolen_size;
1098         gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
1099         mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
1100
1101         /* Basic memrange allocator for stolen space */
1102         drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
1103
1104         DRM_LOCK(dev);
1105         if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
1106                 /* PPGTT pdes are stolen from global gtt ptes, so shrink the
1107                  * aperture accordingly when using aliasing ppgtt. */
1108                 gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
1109                 /* For paranoia keep the guard page in between. */
1110                 gtt_size -= PAGE_SIZE;
1111
1112                 i915_gem_do_init(dev, 0, mappable_size, gtt_size);
1113
1114                 ret = i915_gem_init_aliasing_ppgtt(dev);
1115                 if (ret) {
1116                         DRM_UNLOCK(dev);
1117                         return ret;
1118                 }
1119         } else {
1120                 /* Let GEM Manage all of the aperture.
1121                  *
1122                  * However, leave one page at the end still bound to the scratch
1123                  * page.  There are a number of places where the hardware
1124                  * apparently prefetches past the end of the object, and we've
1125                  * seen multiple hangs with the GPU head pointer stuck in a
1126                  * batchbuffer bound at the last page of the aperture.  One page
1127                  * should be enough to keep any prefetching inside of the
1128                  * aperture.
1129                  */
1130                 i915_gem_do_init(dev, 0, mappable_size, gtt_size - PAGE_SIZE);
1131         }
1132
1133         ret = i915_gem_init_hw(dev);
1134         DRM_UNLOCK(dev);
1135         if (ret != 0) {
1136                 i915_gem_cleanup_aliasing_ppgtt(dev);
1137                 return (ret);
1138         }
1139
1140 #if 0
1141         /* Try to set up FBC with a reasonable compressed buffer size */
1142         if (I915_HAS_FBC(dev) && i915_powersave) {
1143                 int cfb_size;
1144
1145                 /* Leave 1M for line length buffer & misc. */
1146
1147                 /* Try to get a 32M buffer... */
1148                 if (prealloc_size > (36*1024*1024))
1149                         cfb_size = 32*1024*1024;
1150                 else /* fall back to 7/8 of the stolen space */
1151                         cfb_size = prealloc_size * 7 / 8;
1152                 i915_setup_compression(dev, cfb_size);
1153         }
1154 #endif
1155
1156         /* Allow hardware batchbuffers unless told otherwise. */
1157         dev_priv->allow_batchbuffer = 1;
1158         return 0;
1159 }
1160
1161 static int
1162 i915_load_modeset_init(struct drm_device *dev)
1163 {
1164         struct drm_i915_private *dev_priv = dev->dev_private;
1165         int ret;
1166
1167         ret = intel_parse_bios(dev);
1168         if (ret)
1169                 DRM_INFO("failed to find VBIOS tables\n");
1170
1171 #if 0
1172         intel_register_dsm_handler();
1173 #endif
1174
1175         intel_modeset_init(dev);
1176
1177         ret = i915_load_gem_init(dev);
1178         if (ret != 0)
1179                 goto cleanup_gem;
1180
1181         intel_modeset_gem_init(dev);
1182
1183         ret = drm_irq_install(dev);
1184         if (ret)
1185                 goto cleanup_gem;
1186
1187         dev->vblank_disable_allowed = 1;
1188
1189         ret = intel_fbdev_init(dev);
1190         if (ret)
1191                 goto cleanup_gem;
1192
1193         drm_kms_helper_poll_init(dev);
1194
1195         /* We're off and running w/KMS */
1196         dev_priv->mm.suspended = 0;
1197
1198         return (0);
1199
1200 cleanup_gem:
1201         DRM_LOCK(dev);
1202         i915_gem_cleanup_ringbuffer(dev);
1203         DRM_UNLOCK(dev);
1204         i915_gem_cleanup_aliasing_ppgtt(dev);
1205         return (ret);
1206 }
1207
1208 static int
1209 i915_get_bridge_dev(struct drm_device *dev)
1210 {
1211         struct drm_i915_private *dev_priv;
1212
1213         dev_priv = dev->dev_private;
1214
1215         dev_priv->bridge_dev = intel_gtt_get_bridge_device();
1216         if (dev_priv->bridge_dev == NULL) {
1217                 DRM_ERROR("bridge device not found\n");
1218                 return (-1);
1219         }
1220         return (0);
1221 }
1222
1223 #define MCHBAR_I915 0x44
1224 #define MCHBAR_I965 0x48
1225 #define MCHBAR_SIZE (4*4096)
1226
1227 #define DEVEN_REG 0x54
1228 #define   DEVEN_MCHBAR_EN (1 << 28)
1229
1230 /* Allocate space for the MCH regs if needed, return nonzero on error */
1231 static int
1232 intel_alloc_mchbar_resource(struct drm_device *dev)
1233 {
1234         drm_i915_private_t *dev_priv;
1235         device_t vga;
1236         int reg;
1237         u32 temp_lo, temp_hi;
1238         u64 mchbar_addr, temp;
1239
1240         dev_priv = dev->dev_private;
1241         reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
1242
1243         if (INTEL_INFO(dev)->gen >= 4)
1244                 temp_hi = pci_read_config(dev_priv->bridge_dev, reg + 4, 4);
1245         else
1246                 temp_hi = 0;
1247         temp_lo = pci_read_config(dev_priv->bridge_dev, reg, 4);
1248         mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
1249
1250         /* If ACPI doesn't have it, assume we need to allocate it ourselves */
1251 #ifdef XXX_CONFIG_PNP
1252         if (mchbar_addr &&
1253             pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
1254                 return 0;
1255 #endif
1256
1257         /* Get some space for it */
1258         vga = device_get_parent(dev->dev);
1259         dev_priv->mch_res_rid = 0x100;
1260         dev_priv->mch_res = BUS_ALLOC_RESOURCE(device_get_parent(vga),
1261             dev->dev, SYS_RES_MEMORY, &dev_priv->mch_res_rid, 0, ~0UL,
1262             MCHBAR_SIZE, RF_ACTIVE | RF_SHAREABLE, -1);
1263         if (dev_priv->mch_res == NULL) {
1264                 DRM_ERROR("failed mchbar resource alloc\n");
1265                 return (-ENOMEM);
1266         }
1267
1268         if (INTEL_INFO(dev)->gen >= 4) {
1269                 temp = rman_get_start(dev_priv->mch_res);
1270                 temp >>= 32;
1271                 pci_write_config(dev_priv->bridge_dev, reg + 4, temp, 4);
1272         }
1273         pci_write_config(dev_priv->bridge_dev, reg,
1274             rman_get_start(dev_priv->mch_res) & UINT32_MAX, 4);
1275         return (0);
1276 }
1277
1278 static void
1279 intel_setup_mchbar(struct drm_device *dev)
1280 {
1281         drm_i915_private_t *dev_priv;
1282         int mchbar_reg;
1283         u32 temp;
1284         bool enabled;
1285
1286         dev_priv = dev->dev_private;
1287         mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
1288
1289         dev_priv->mchbar_need_disable = false;
1290
1291         if (IS_I915G(dev) || IS_I915GM(dev)) {
1292                 temp = pci_read_config(dev_priv->bridge_dev, DEVEN_REG, 4);
1293                 enabled = (temp & DEVEN_MCHBAR_EN) != 0;
1294         } else {
1295                 temp = pci_read_config(dev_priv->bridge_dev, mchbar_reg, 4);
1296                 enabled = temp & 1;
1297         }
1298
1299         /* If it's already enabled, don't have to do anything */
1300         if (enabled) {
1301                 DRM_DEBUG("mchbar already enabled\n");
1302                 return;
1303         }
1304
1305         if (intel_alloc_mchbar_resource(dev))
1306                 return;
1307
1308         dev_priv->mchbar_need_disable = true;
1309
1310         /* Space is allocated or reserved, so enable it. */
1311         if (IS_I915G(dev) || IS_I915GM(dev)) {
1312                 pci_write_config(dev_priv->bridge_dev, DEVEN_REG,
1313                     temp | DEVEN_MCHBAR_EN, 4);
1314         } else {
1315                 temp = pci_read_config(dev_priv->bridge_dev, mchbar_reg, 4);
1316                 pci_write_config(dev_priv->bridge_dev, mchbar_reg, temp | 1, 4);
1317         }
1318 }
1319
1320 static void
1321 intel_teardown_mchbar(struct drm_device *dev)
1322 {
1323         drm_i915_private_t *dev_priv;
1324         device_t vga;
1325         int mchbar_reg;
1326         u32 temp;
1327
1328         dev_priv = dev->dev_private;
1329         mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
1330
1331         if (dev_priv->mchbar_need_disable) {
1332                 if (IS_I915G(dev) || IS_I915GM(dev)) {
1333                         temp = pci_read_config(dev_priv->bridge_dev,
1334                             DEVEN_REG, 4);
1335                         temp &= ~DEVEN_MCHBAR_EN;
1336                         pci_write_config(dev_priv->bridge_dev, DEVEN_REG,
1337                             temp, 4);
1338                 } else {
1339                         temp = pci_read_config(dev_priv->bridge_dev,
1340                             mchbar_reg, 4);
1341                         temp &= ~1;
1342                         pci_write_config(dev_priv->bridge_dev, mchbar_reg,
1343                             temp, 4);
1344                 }
1345         }
1346
1347         if (dev_priv->mch_res != NULL) {
1348                 vga = device_get_parent(dev->dev);
1349                 BUS_DEACTIVATE_RESOURCE(device_get_parent(vga), dev->dev,
1350                     SYS_RES_MEMORY, dev_priv->mch_res_rid, dev_priv->mch_res);
1351                 BUS_RELEASE_RESOURCE(device_get_parent(vga), dev->dev,
1352                     SYS_RES_MEMORY, dev_priv->mch_res_rid, dev_priv->mch_res);
1353                 dev_priv->mch_res = NULL;
1354         }
1355 }
1356
1357 /**
1358  * i915_driver_load - setup chip and create an initial config
1359  * @dev: DRM device
1360  * @flags: startup flags
1361  *
1362  * The driver load routine has to do several things:
1363  *   - drive output discovery via intel_modeset_init()
1364  *   - initialize the memory manager
1365  *   - allocate initial config memory
1366  *   - setup the DRM framebuffer with the allocated memory
1367  */
1368 int i915_driver_load(struct drm_device *dev, unsigned long flags)
1369 {
1370         struct drm_i915_private *dev_priv = dev->dev_private;
1371         unsigned long base, size;
1372         int mmio_bar, ret;
1373
1374         ret = 0;
1375
1376         /* i915 has 4 more counters */
1377         dev->counters += 4;
1378         dev->types[6] = _DRM_STAT_IRQ;
1379         dev->types[7] = _DRM_STAT_PRIMARY;
1380         dev->types[8] = _DRM_STAT_SECONDARY;
1381         dev->types[9] = _DRM_STAT_DMA;
1382
1383         dev_priv = kmalloc(sizeof(drm_i915_private_t), DRM_MEM_DRIVER,
1384             M_ZERO | M_WAITOK);
1385         if (dev_priv == NULL)
1386                 return -ENOMEM;
1387
1388         dev->dev_private = (void *)dev_priv;
1389         dev_priv->dev = dev;
1390         dev_priv->info = i915_get_device_id(dev->pci_device);
1391
1392         if (i915_get_bridge_dev(dev)) {
1393                 drm_free(dev_priv, DRM_MEM_DRIVER);
1394                 return (-EIO);
1395         }
1396         dev_priv->mm.gtt = intel_gtt_get();
1397
1398         /* Add register map (needed for suspend/resume) */
1399         mmio_bar = IS_GEN2(dev) ? 1 : 0;
1400         base = drm_get_resource_start(dev, mmio_bar);
1401         size = drm_get_resource_len(dev, mmio_bar);
1402
1403         ret = drm_addmap(dev, base, size, _DRM_REGISTERS,
1404             _DRM_KERNEL | _DRM_DRIVER, &dev_priv->mmio_map);
1405
1406         /* The i915 workqueue is primarily used for batched retirement of
1407          * requests (and thus managing bo) once the task has been completed
1408          * by the GPU. i915_gem_retire_requests() is called directly when we
1409          * need high-priority retirement, such as waiting for an explicit
1410          * bo.
1411          *
1412          * It is also used for periodic low-priority events, such as
1413          * idle-timers and recording error state.
1414          *
1415          * All tasks on the workqueue are expected to acquire the dev mutex
1416          * so there is no point in running more than one instance of the
1417          * workqueue at any time.  Use an ordered one.
1418          */
1419         dev_priv->wq = alloc_ordered_workqueue("i915", 0);
1420         if (dev_priv->wq == NULL) {
1421                 DRM_ERROR("Failed to create our workqueue.\n");
1422                 ret = -ENOMEM;
1423                 goto out_mtrrfree;
1424         }
1425
1426         /* This must be called before any calls to HAS_PCH_* */
1427         intel_detect_pch(dev);
1428
1429         intel_irq_init(dev);
1430         intel_gt_init(dev);
1431
1432         /* Try to make sure MCHBAR is enabled before poking at it */
1433         intel_setup_mchbar(dev);
1434         intel_setup_gmbus(dev);
1435         intel_opregion_setup(dev);
1436
1437         intel_setup_bios(dev);
1438
1439         i915_gem_load(dev);
1440
1441         /* On the 945G/GM, the chipset reports the MSI capability on the
1442          * integrated graphics even though the support isn't actually there
1443          * according to the published specs.  It doesn't appear to function
1444          * correctly in testing on 945G.
1445          * This may be a side effect of MSI having been made available for PEG
1446          * and the registers being closely associated.
1447          *
1448          * According to chipset errata, on the 965GM, MSI interrupts may
1449          * be lost or delayed, but we use them anyways to avoid
1450          * stuck interrupts on some machines.
1451          */
1452
1453         lockinit(&dev_priv->irq_lock, "userirq", 0, LK_CANRECURSE);
1454         lockinit(&dev_priv->error_lock, "915err", 0, LK_CANRECURSE);
1455         spin_init(&dev_priv->rps.lock);
1456
1457         lockinit(&dev_priv->rps.hw_lock, "i915 rps.hw_lock", 0, LK_CANRECURSE);
1458
1459         /* Init HWS */
1460         if (!I915_NEED_GFX_HWS(dev)) {
1461                 ret = i915_init_phys_hws(dev);
1462                 if (ret != 0) {
1463                         drm_rmmap(dev, dev_priv->mmio_map);
1464                         drm_free(dev_priv, DRM_MEM_DRIVER);
1465                         return ret;
1466                 }
1467         }
1468
1469         if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
1470                 dev_priv->num_pipe = 3;
1471         else if (IS_MOBILE(dev) || !IS_GEN2(dev))
1472                 dev_priv->num_pipe = 2;
1473         else
1474                 dev_priv->num_pipe = 1;
1475
1476         ret = drm_vblank_init(dev, dev_priv->num_pipe);
1477         if (ret)
1478                 goto out_gem_unload;
1479
1480         /* Start out suspended */
1481         dev_priv->mm.suspended = 1;
1482
1483         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1484                 ret = i915_load_modeset_init(dev);
1485                 if (ret < 0) {
1486                         DRM_ERROR("failed to init modeset\n");
1487                         goto out_gem_unload;
1488                 }
1489         }
1490
1491         /* Must be done after probing outputs */
1492         intel_opregion_init(dev);
1493
1494         setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
1495                     (unsigned long) dev);
1496
1497         if (IS_GEN5(dev)) {
1498                 lockmgr(&mchdev_lock, LK_EXCLUSIVE);
1499                 i915_mch_dev = dev_priv;
1500                 dev_priv->mchdev_lock = &mchdev_lock;
1501                 lockmgr(&mchdev_lock, LK_RELEASE);
1502         }
1503
1504         return 0;
1505
1506 out_gem_unload:
1507         intel_teardown_gmbus(dev);
1508         intel_teardown_mchbar(dev);
1509         destroy_workqueue(dev_priv->wq);
1510 out_mtrrfree:
1511         return ret;
1512 }
1513
1514 int i915_driver_unload(struct drm_device *dev)
1515 {
1516         struct drm_i915_private *dev_priv = dev->dev_private;
1517         int ret;
1518
1519         intel_gpu_ips_teardown();
1520
1521         DRM_LOCK(dev);
1522         ret = i915_gpu_idle(dev);
1523         if (ret)
1524                 DRM_ERROR("failed to idle hardware: %d\n", ret);
1525         i915_gem_retire_requests(dev);
1526         DRM_UNLOCK(dev);
1527
1528         /* Cancel the retire work handler, which should be idle now. */
1529         cancel_delayed_work_sync(&dev_priv->mm.retire_work);
1530
1531         i915_free_hws(dev);
1532
1533         intel_teardown_mchbar(dev);
1534
1535         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1536                 intel_fbdev_fini(dev);
1537                 intel_modeset_cleanup(dev);
1538         }
1539
1540         /* Free error state after interrupts are fully disabled. */
1541         del_timer_sync(&dev_priv->hangcheck_timer);
1542         cancel_work_sync(&dev_priv->error_work);
1543         i915_destroy_error_state(dev);
1544
1545         intel_opregion_fini(dev);
1546
1547         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1548                 /* Flush any outstanding unpin_work. */
1549                 flush_workqueue(dev_priv->wq);
1550
1551                 DRM_LOCK(dev);
1552                 i915_gem_free_all_phys_object(dev);
1553                 i915_gem_cleanup_ringbuffer(dev);
1554                 DRM_UNLOCK(dev);
1555                 i915_gem_cleanup_aliasing_ppgtt(dev);
1556                 drm_mm_takedown(&dev_priv->mm.stolen);
1557
1558                 intel_cleanup_overlay(dev);
1559
1560                 if (!I915_NEED_GFX_HWS(dev))
1561                         i915_free_hws(dev);
1562         }
1563
1564         i915_gem_unload(dev);
1565
1566         bus_generic_detach(dev->dev);
1567         drm_rmmap(dev, dev_priv->mmio_map);
1568         intel_teardown_gmbus(dev);
1569
1570         destroy_workqueue(dev_priv->wq);
1571
1572         drm_free(dev->dev_private, DRM_MEM_DRIVER);
1573
1574         return 0;
1575 }
1576
1577 int
1578 i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
1579 {
1580         struct drm_i915_file_private *i915_file_priv;
1581
1582         i915_file_priv = kmalloc(sizeof(*i915_file_priv), DRM_MEM_FILES,
1583             M_WAITOK | M_ZERO);
1584
1585         spin_init(&i915_file_priv->mm.lock);
1586         INIT_LIST_HEAD(&i915_file_priv->mm.request_list);
1587         file_priv->driver_priv = i915_file_priv;
1588
1589         return (0);
1590 }
1591
1592 void
1593 i915_driver_lastclose(struct drm_device * dev)
1594 {
1595         drm_i915_private_t *dev_priv = dev->dev_private;
1596
1597         if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) {
1598 #if 1
1599                 KIB_NOTYET();
1600 #else
1601                 drm_fb_helper_restore();
1602                 vga_switcheroo_process_delayed_switch();
1603 #endif
1604                 return;
1605         }
1606         i915_gem_lastclose(dev);
1607         i915_dma_cleanup(dev);
1608 }
1609
1610 void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
1611 {
1612
1613         i915_gem_release(dev, file_priv);
1614 }
1615
1616 void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
1617 {
1618         struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
1619
1620         spin_uninit(&i915_file_priv->mm.lock);
1621         drm_free(i915_file_priv, DRM_MEM_FILES);
1622 }
1623
1624 struct drm_ioctl_desc i915_ioctls[] = {
1625         DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1626         DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
1627         DRM_IOCTL_DEF(DRM_I915_FLIP, i915_flip_bufs, DRM_AUTH),
1628         DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
1629         DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
1630         DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
1631         DRM_IOCTL_DEF(DRM_I915_GETPARAM, i915_getparam, DRM_AUTH),
1632         DRM_IOCTL_DEF(DRM_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1633         DRM_IOCTL_DEF(DRM_I915_ALLOC, drm_noop, DRM_AUTH),
1634         DRM_IOCTL_DEF(DRM_I915_FREE, drm_noop, DRM_AUTH),
1635         DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1636         DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
1637         DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
1638         DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
1639         DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE,  i915_vblank_pipe_get, DRM_AUTH ),
1640         DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
1641         DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1642         DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1643         DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH | DRM_UNLOCKED),
1644         DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH | DRM_UNLOCKED),
1645         DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1646         DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1647         DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
1648         DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH),
1649         DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1650         DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1651         DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, 0),
1652         DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED),
1653         DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED),
1654         DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0),
1655         DRM_IOCTL_DEF(DRM_I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED),
1656         DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED),
1657         DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED),
1658         DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0),
1659         DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0),
1660         DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED),
1661         DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
1662         DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
1663         DRM_IOCTL_DEF(DRM_I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1664         DRM_IOCTL_DEF(DRM_I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1665         DRM_IOCTL_DEF(DRM_I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1666         DRM_IOCTL_DEF(DRM_I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1667 };
1668
1669 struct drm_driver i915_driver_info = {
1670         .driver_features =   DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
1671             DRIVER_USE_MTRR | DRIVER_HAVE_IRQ | DRIVER_LOCKLESS_IRQ |
1672             DRIVER_GEM /*| DRIVER_MODESET*/,
1673
1674         .buf_priv_size  = sizeof(drm_i915_private_t),
1675         .load           = i915_driver_load,
1676         .open           = i915_driver_open,
1677         .unload         = i915_driver_unload,
1678         .preclose       = i915_driver_preclose,
1679         .lastclose      = i915_driver_lastclose,
1680         .postclose      = i915_driver_postclose,
1681         .device_is_agp  = i915_driver_device_is_agp,
1682         .gem_init_object = i915_gem_init_object,
1683         .gem_free_object = i915_gem_free_object,
1684         .gem_pager_ops  = &i915_gem_pager_ops,
1685         .dumb_create    = i915_gem_dumb_create,
1686         .dumb_map_offset = i915_gem_mmap_gtt,
1687         .dumb_destroy   = i915_gem_dumb_destroy,
1688         .sysctl_init    = i915_sysctl_init,
1689         .sysctl_cleanup = i915_sysctl_cleanup,
1690
1691         .ioctls         = i915_ioctls,
1692         .max_ioctl      = DRM_ARRAY_SIZE(i915_ioctls),
1693
1694         .name           = DRIVER_NAME,
1695         .desc           = DRIVER_DESC,
1696         .date           = DRIVER_DATE,
1697         .major          = DRIVER_MAJOR,
1698         .minor          = DRIVER_MINOR,
1699         .patchlevel     = DRIVER_PATCHLEVEL,
1700 };
1701
1702 /**
1703  * Determine if the device really is AGP or not.
1704  *
1705  * All Intel graphics chipsets are treated as AGP, even if they are really
1706  * built-in.
1707  *
1708  * \param dev   The device to be tested.
1709  *
1710  * \returns
1711  * A value of 1 is always retured to indictate every i9x5 is AGP.
1712  */
1713 int i915_driver_device_is_agp(struct drm_device * dev)
1714 {
1715         return 1;
1716 }