drm: Store PCI device information in a struct pci_dev
[dragonfly.git] / sys / dev / drm / i915 / i915_dma.c
1 /* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28
29 #include <linux/async.h>
30 #include <drm/drmP.h>
31 #include <drm/i915_drm.h>
32 #include <drm/drm_legacy.h>
33 #include "i915_drv.h"
34 #include "intel_drv.h"
35 #include "intel_ringbuffer.h"
36 #include <linux/workqueue.h>
37
38
39 static int i915_getparam(struct drm_device *dev, void *data,
40                          struct drm_file *file_priv)
41 {
42         struct drm_i915_private *dev_priv = dev->dev_private;
43         drm_i915_getparam_t *param = data;
44         int value;
45
46         switch (param->param) {
47         case I915_PARAM_IRQ_ACTIVE:
48         case I915_PARAM_ALLOW_BATCHBUFFER:
49         case I915_PARAM_LAST_DISPATCH:
50                 /* Reject all old ums/dri params. */
51                 return -ENODEV;
52         case I915_PARAM_CHIPSET_ID:
53                 value = dev->pdev->device;
54                 break;
55         case I915_PARAM_HAS_GEM:
56                 value = 1;
57                 break;
58         case I915_PARAM_NUM_FENCES_AVAIL:
59                 value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
60                 break;
61         case I915_PARAM_HAS_OVERLAY:
62                 value = dev_priv->overlay ? 1 : 0;
63                 break;
64         case I915_PARAM_HAS_PAGEFLIPPING:
65                 value = 1;
66                 break;
67         case I915_PARAM_HAS_EXECBUF2:
68                 /* depends on GEM */
69                 value = 1;
70                 break;
71         case I915_PARAM_HAS_BSD:
72                 value = intel_ring_initialized(&dev_priv->ring[VCS]);
73                 break;
74         case I915_PARAM_HAS_BLT:
75                 value = intel_ring_initialized(&dev_priv->ring[BCS]);
76                 break;
77         case I915_PARAM_HAS_VEBOX:
78                 value = intel_ring_initialized(&dev_priv->ring[VECS]);
79                 break;
80         case I915_PARAM_HAS_BSD2:
81                 value = intel_ring_initialized(&dev_priv->ring[VCS2]);
82                 break;
83         case I915_PARAM_HAS_RELAXED_FENCING:
84                 value = 1;
85                 break;
86         case I915_PARAM_HAS_COHERENT_RINGS:
87                 value = 1;
88                 break;
89         case I915_PARAM_HAS_EXEC_CONSTANTS:
90                 value = INTEL_INFO(dev)->gen >= 4;
91                 break;
92         case I915_PARAM_HAS_RELAXED_DELTA:
93                 value = 1;
94                 break;
95         case I915_PARAM_HAS_GEN7_SOL_RESET:
96                 value = 1;
97                 break;
98         case I915_PARAM_HAS_LLC:
99                 value = HAS_LLC(dev);
100                 break;
101         case I915_PARAM_HAS_WT:
102                 value = HAS_WT(dev);
103                 break;
104         case I915_PARAM_HAS_ALIASING_PPGTT:
105                 value = USES_PPGTT(dev);
106                 break;
107         case I915_PARAM_HAS_WAIT_TIMEOUT:
108                 value = 1;
109                 break;
110         case I915_PARAM_HAS_SEMAPHORES:
111                 value = i915_semaphore_is_enabled(dev);
112                 break;
113         case I915_PARAM_HAS_PINNED_BATCHES:
114                 value = 1;
115                 break;
116         case I915_PARAM_HAS_EXEC_NO_RELOC:
117                 value = 1;
118                 break;
119         case I915_PARAM_HAS_EXEC_HANDLE_LUT:
120                 value = 1;
121                 break;
122         case I915_PARAM_CMD_PARSER_VERSION:
123                 value = i915_cmd_parser_get_version();
124                 break;
125         case I915_PARAM_HAS_COHERENT_PHYS_GTT:
126                 value = 1;
127                 break;
128         default:
129                 DRM_DEBUG("Unknown parameter %d\n", param->param);
130                 return -EINVAL;
131         }
132
133         if (copy_to_user(param->value, &value, sizeof(int))) {
134                 DRM_ERROR("copy_to_user failed\n");
135                 return -EFAULT;
136         }
137
138         return 0;
139 }
140
141 static int i915_setparam(struct drm_device *dev, void *data,
142                          struct drm_file *file_priv)
143 {
144         struct drm_i915_private *dev_priv = dev->dev_private;
145         drm_i915_setparam_t *param = data;
146
147         switch (param->param) {
148         case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
149         case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
150         case I915_SETPARAM_ALLOW_BATCHBUFFER:
151                 /* Reject all old ums/dri params. */
152                 return -ENODEV;
153
154         case I915_SETPARAM_NUM_USED_FENCES:
155                 if (param->value > dev_priv->num_fence_regs ||
156                     param->value < 0)
157                         return -EINVAL;
158                 /* Userspace can use first N regs */
159                 dev_priv->fence_reg_start = param->value;
160                 break;
161         default:
162                 DRM_DEBUG_DRIVER("unknown parameter %d\n",
163                                         param->param);
164                 return -EINVAL;
165         }
166
167         return 0;
168 }
169
170 static int i915_get_bridge_dev(struct drm_device *dev)
171 {
172         struct drm_i915_private *dev_priv = dev->dev_private;
173         static struct pci_dev i915_bridge_dev;
174
175         i915_bridge_dev.dev = pci_find_dbsf(0, 0, 0, 0);
176         if (!i915_bridge_dev.dev) {
177                 DRM_ERROR("bridge device not found\n");
178                 return -1;
179         }
180
181         dev_priv->bridge_dev = &i915_bridge_dev;
182         return 0;
183 }
184
185 #define MCHBAR_I915 0x44
186 #define MCHBAR_I965 0x48
187 #define MCHBAR_SIZE (4*4096)
188
189 #define DEVEN_REG 0x54
190 #define   DEVEN_MCHBAR_EN (1 << 28)
191
192 /* Allocate space for the MCH regs if needed, return nonzero on error */
193 static int
194 intel_alloc_mchbar_resource(struct drm_device *dev)
195 {
196         struct drm_i915_private *dev_priv = dev->dev_private;
197         int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
198         device_t vga;
199         u32 temp_lo, temp_hi = 0;
200         u64 mchbar_addr;
201
202         if (INTEL_INFO(dev)->gen >= 4)
203                 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
204         pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
205         mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
206
207         /* If ACPI doesn't have it, assume we need to allocate it ourselves */
208 #ifdef CONFIG_PNP
209         if (mchbar_addr &&
210             pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
211                 return 0;
212 #endif
213
214         /* Get some space for it */
215         vga = device_get_parent(dev->dev);
216         dev_priv->mch_res_rid = 0x100;
217         dev_priv->mch_res = BUS_ALLOC_RESOURCE(device_get_parent(vga),
218             dev->dev, SYS_RES_MEMORY, &dev_priv->mch_res_rid, 0, ~0UL,
219             MCHBAR_SIZE, RF_ACTIVE | RF_SHAREABLE, -1);
220         if (dev_priv->mch_res == NULL) {
221                 DRM_ERROR("failed mchbar resource alloc\n");
222                 return (-ENOMEM);
223         }
224
225         if (INTEL_INFO(dev)->gen >= 4)
226                 pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
227                                        upper_32_bits(rman_get_start(dev_priv->mch_res)));
228
229         pci_write_config_dword(dev_priv->bridge_dev, reg,
230                                lower_32_bits(rman_get_start(dev_priv->mch_res)));
231         return 0;
232 }
233
234 /* Setup MCHBAR if possible, return true if we should disable it again */
235 static void
236 intel_setup_mchbar(struct drm_device *dev)
237 {
238         struct drm_i915_private *dev_priv = dev->dev_private;
239         int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
240         u32 temp;
241         bool enabled;
242
243         if (IS_VALLEYVIEW(dev))
244                 return;
245
246         dev_priv->mchbar_need_disable = false;
247
248         if (IS_I915G(dev) || IS_I915GM(dev)) {
249                 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
250                 enabled = !!(temp & DEVEN_MCHBAR_EN);
251         } else {
252                 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
253                 enabled = temp & 1;
254         }
255
256         /* If it's already enabled, don't have to do anything */
257         if (enabled)
258                 return;
259
260         if (intel_alloc_mchbar_resource(dev))
261                 return;
262
263         dev_priv->mchbar_need_disable = true;
264
265         /* Space is allocated or reserved, so enable it. */
266         if (IS_I915G(dev) || IS_I915GM(dev)) {
267                 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
268                                        temp | DEVEN_MCHBAR_EN);
269         } else {
270                 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
271                 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
272         }
273 }
274
275 static void
276 intel_teardown_mchbar(struct drm_device *dev)
277 {
278         struct drm_i915_private *dev_priv = dev->dev_private;
279         int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
280         device_t vga;
281         u32 temp;
282
283         if (dev_priv->mchbar_need_disable) {
284                 if (IS_I915G(dev) || IS_I915GM(dev)) {
285                         pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
286                         temp &= ~DEVEN_MCHBAR_EN;
287                         pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp);
288                 } else {
289                         pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
290                         temp &= ~1;
291                         pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp);
292                 }
293         }
294
295         if (dev_priv->mch_res != NULL) {
296                 vga = device_get_parent(dev->dev);
297                 BUS_DEACTIVATE_RESOURCE(device_get_parent(vga), dev->dev,
298                     SYS_RES_MEMORY, dev_priv->mch_res_rid, dev_priv->mch_res);
299                 BUS_RELEASE_RESOURCE(device_get_parent(vga), dev->dev,
300                     SYS_RES_MEMORY, dev_priv->mch_res_rid, dev_priv->mch_res);
301                 dev_priv->mch_res = NULL;
302         }
303 }
304
305 #if 0
306 /* true = enable decode, false = disable decoder */
307 static unsigned int i915_vga_set_decode(void *cookie, bool state)
308 {
309         struct drm_device *dev = cookie;
310
311         intel_modeset_vga_set_state(dev, state);
312         if (state)
313                 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
314                        VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
315         else
316                 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
317 }
318
319 static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
320 {
321         struct drm_device *dev = pci_get_drvdata(pdev);
322         pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
323
324         if (state == VGA_SWITCHEROO_ON) {
325                 pr_info("switched on\n");
326                 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
327                 /* i915 resume handler doesn't set to D0 */
328                 pci_set_power_state(dev->pdev, PCI_D0);
329                 i915_resume_legacy(dev);
330                 dev->switch_power_state = DRM_SWITCH_POWER_ON;
331         } else {
332                 pr_err("switched off\n");
333                 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
334                 i915_suspend_legacy(dev, pmm);
335                 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
336         }
337 }
338
339 static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
340 {
341         struct drm_device *dev = pci_get_drvdata(pdev);
342
343         /*
344          * FIXME: open_count is protected by drm_global_mutex but that would lead to
345          * locking inversion with the driver load path. And the access here is
346          * completely racy anyway. So don't bother with locking for now.
347          */
348         return dev->open_count == 0;
349 }
350
351 static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
352         .set_gpu_state = i915_switcheroo_set_state,
353         .reprobe = NULL,
354         .can_switch = i915_switcheroo_can_switch,
355 };
356 #endif
357
358 static int i915_load_modeset_init(struct drm_device *dev)
359 {
360         struct drm_i915_private *dev_priv = dev->dev_private;
361         int ret;
362
363         ret = intel_parse_bios(dev);
364         if (ret)
365                 DRM_INFO("failed to find VBIOS tables\n");
366
367 #if 0
368         /* If we have > 1 VGA cards, then we need to arbitrate access
369          * to the common VGA resources.
370          *
371          * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
372          * then we do not take part in VGA arbitration and the
373          * vga_client_register() fails with -ENODEV.
374          */
375         ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
376         if (ret && ret != -ENODEV)
377                 goto out;
378
379         intel_register_dsm_handler();
380
381         ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops, false);
382         if (ret)
383                 goto cleanup_vga_client;
384
385         /* Initialise stolen first so that we may reserve preallocated
386          * objects for the BIOS to KMS transition.
387          */
388         ret = i915_gem_init_stolen(dev);
389         if (ret)
390                 goto cleanup_vga_switcheroo;
391 #endif
392
393         intel_power_domains_init_hw(dev_priv);
394
395 #ifdef __DragonFly__
396         dev_priv->dev->pdev->irq = dev->irq;
397 #endif
398         ret = intel_irq_install(dev_priv);
399         if (ret)
400                 goto cleanup_gem_stolen;
401
402         /* Important: The output setup functions called by modeset_init need
403          * working irqs for e.g. gmbus and dp aux transfers. */
404         intel_modeset_init(dev);
405
406         ret = i915_gem_init(dev);
407         if (ret)
408                 goto cleanup_irq;
409
410         intel_modeset_gem_init(dev);
411
412         /* Always safe in the mode setting case. */
413         /* FIXME: do pre/post-mode set stuff in core KMS code */
414         dev->vblank_disable_allowed = 1;
415         if (INTEL_INFO(dev)->num_pipes == 0)
416                 return 0;
417
418         ret = intel_fbdev_init(dev);
419         if (ret)
420                 goto cleanup_gem;
421
422         /* Only enable hotplug handling once the fbdev is fully set up. */
423         intel_hpd_init(dev_priv);
424
425         /*
426          * Some ports require correctly set-up hpd registers for detection to
427          * work properly (leading to ghost connected connector status), e.g. VGA
428          * on gm45.  Hence we can only set up the initial fbdev config after hpd
429          * irqs are fully enabled. Now we should scan for the initial config
430          * only once hotplug handling is enabled, but due to screwed-up locking
431          * around kms/fbdev init we can't protect the fdbev initial config
432          * scanning against hotplug events. Hence do this first and ignore the
433          * tiny window where we will loose hotplug notifactions.
434          */
435         async_schedule(intel_fbdev_initial_config, dev_priv);
436
437         drm_kms_helper_poll_init(dev);
438
439         return 0;
440
441 cleanup_gem:
442         mutex_lock(&dev->struct_mutex);
443         i915_gem_cleanup_ringbuffer(dev);
444         i915_gem_context_fini(dev);
445         mutex_unlock(&dev->struct_mutex);
446 cleanup_irq:
447         drm_irq_uninstall(dev);
448 cleanup_gem_stolen:
449 #if 0
450         i915_gem_cleanup_stolen(dev);
451 cleanup_vga_switcheroo:
452         vga_switcheroo_unregister_client(dev->pdev);
453 cleanup_vga_client:
454         vga_client_register(dev->pdev, NULL, NULL, NULL);
455 out:
456 #endif
457         return ret;
458 }
459
460 #if IS_ENABLED(CONFIG_FB)
461 static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
462 {
463         struct apertures_struct *ap;
464         struct pci_dev *pdev = dev_priv->dev->pdev;
465         bool primary;
466         int ret;
467
468         ap = alloc_apertures(1);
469         if (!ap)
470                 return -ENOMEM;
471
472         ap->ranges[0].base = dev_priv->gtt.mappable_base;
473         ap->ranges[0].size = dev_priv->gtt.mappable_end;
474
475         primary =
476                 pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
477
478         ret = remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
479
480         kfree(ap);
481
482         return ret;
483 }
484 #else
485 static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
486 {
487         return 0;
488 }
489 #endif
490
491 #if !defined(CONFIG_VGA_CONSOLE)
492 static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
493 {
494         return 0;
495 }
496 #elif !defined(CONFIG_DUMMY_CONSOLE)
497 static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
498 {
499         return -ENODEV;
500 }
501 #else
502 static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
503 {
504         int ret = 0;
505
506         DRM_INFO("Replacing VGA console driver\n");
507
508         console_lock();
509         if (con_is_bound(&vga_con))
510                 ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1);
511         if (ret == 0) {
512                 ret = do_unregister_con_driver(&vga_con);
513
514                 /* Ignore "already unregistered". */
515                 if (ret == -ENODEV)
516                         ret = 0;
517         }
518         console_unlock();
519
520         return ret;
521 }
522 #endif
523
524 static void i915_dump_device_info(struct drm_i915_private *dev_priv)
525 {
526 #if 0
527         const struct intel_device_info *info = &dev_priv->info;
528
529 #define PRINT_S(name) "%s"
530 #define SEP_EMPTY
531 #define PRINT_FLAG(name) info->name ? #name "," : ""
532 #define SEP_COMMA ,
533         DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x flags="
534                          DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY),
535                          info->gen,
536                          dev_priv->dev->pdev->device,
537                          dev_priv->dev->pdev->revision,
538                          DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA));
539 #undef PRINT_S
540 #undef SEP_EMPTY
541 #undef PRINT_FLAG
542 #undef SEP_COMMA
543 #endif
544 }
545
546 /*
547  * Determine various intel_device_info fields at runtime.
548  *
549  * Use it when either:
550  *   - it's judged too laborious to fill n static structures with the limit
551  *     when a simple if statement does the job,
552  *   - run-time checks (eg read fuse/strap registers) are needed.
553  *
554  * This function needs to be called:
555  *   - after the MMIO has been setup as we are reading registers,
556  *   - after the PCH has been detected,
557  *   - before the first usage of the fields it can tweak.
558  */
559 static void intel_device_info_runtime_init(struct drm_device *dev)
560 {
561         struct drm_i915_private *dev_priv = dev->dev_private;
562         struct intel_device_info *info;
563         enum i915_pipe pipe;
564
565         info = (struct intel_device_info *)&dev_priv->info;
566
567         if (IS_VALLEYVIEW(dev) || INTEL_INFO(dev)->gen == 9)
568                 for_each_pipe(dev_priv, pipe)
569                         info->num_sprites[pipe] = 2;
570         else
571                 for_each_pipe(dev_priv, pipe)
572                         info->num_sprites[pipe] = 1;
573
574         if (i915.disable_display) {
575                 DRM_INFO("Display disabled (module parameter)\n");
576                 info->num_pipes = 0;
577         } else if (info->num_pipes > 0 &&
578                    (INTEL_INFO(dev)->gen == 7 || INTEL_INFO(dev)->gen == 8) &&
579                    !IS_VALLEYVIEW(dev)) {
580                 u32 fuse_strap = I915_READ(FUSE_STRAP);
581                 u32 sfuse_strap = I915_READ(SFUSE_STRAP);
582
583                 /*
584                  * SFUSE_STRAP is supposed to have a bit signalling the display
585                  * is fused off. Unfortunately it seems that, at least in
586                  * certain cases, fused off display means that PCH display
587                  * reads don't land anywhere. In that case, we read 0s.
588                  *
589                  * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK
590                  * should be set when taking over after the firmware.
591                  */
592                 if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
593                     sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
594                     (dev_priv->pch_type == PCH_CPT &&
595                      !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
596                         DRM_INFO("Display fused off, disabling\n");
597                         info->num_pipes = 0;
598                 }
599         }
600
601         if (IS_CHERRYVIEW(dev)) {
602                 u32 fuse, mask_eu;
603
604                 fuse = I915_READ(CHV_FUSE_GT);
605                 mask_eu = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK |
606                                   CHV_FGT_EU_DIS_SS0_R1_MASK |
607                                   CHV_FGT_EU_DIS_SS1_R0_MASK |
608                                   CHV_FGT_EU_DIS_SS1_R1_MASK);
609                 info->eu_total = 16 - hweight32(mask_eu);
610         }
611 }
612
613 /**
614  * i915_driver_load - setup chip and create an initial config
615  * @dev: DRM device
616  * @flags: startup flags
617  *
618  * The driver load routine has to do several things:
619  *   - drive output discovery via intel_modeset_init()
620  *   - initialize the memory manager
621  *   - allocate initial config memory
622  *   - setup the DRM framebuffer with the allocated memory
623  */
624 int i915_driver_load(struct drm_device *dev, unsigned long flags)
625 {
626         struct drm_i915_private *dev_priv = dev->dev_private;
627         struct intel_device_info *info, *device_info;
628         unsigned long base, size;
629         int ret = 0, mmio_bar, mmio_size;
630         uint32_t aperture_size;
631
632         /* XXX: struct pci_dev */
633         info = i915_get_device_id(dev->pdev->device);
634
635         /* Refuse to load on gen6+ without kms enabled. */
636         if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET)) {
637                 DRM_INFO("Your hardware requires kernel modesetting (KMS)\n");
638                 DRM_INFO("See CONFIG_DRM_I915_KMS, nomodeset, and i915.modeset parameters\n");
639                 return -ENODEV;
640         }
641
642         dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
643         if (dev_priv == NULL)
644                 return -ENOMEM;
645
646         dev->dev_private = dev_priv;
647         dev_priv->dev = dev;
648
649         /* Setup the write-once "constant" device info */
650         device_info = (struct intel_device_info *)&dev_priv->info;
651         memcpy(device_info, info, sizeof(dev_priv->info));
652         device_info->device_id = dev->pdev->device;
653
654         lockinit(&dev_priv->irq_lock, "userirq", 0, LK_CANRECURSE);
655         lockinit(&dev_priv->gpu_error.lock, "915err", 0, LK_CANRECURSE);
656         lockinit(&dev_priv->backlight_lock, "i915bl", 0, LK_CANRECURSE);
657         lockinit(&dev_priv->uncore.lock, "915gt", 0, LK_CANRECURSE);
658         spin_init(&dev_priv->mm.object_stat_lock, "i915osl");
659         spin_init(&dev_priv->mmio_flip_lock, "i915mfl");
660         lockinit(&dev_priv->dpio_lock, "i915dpio", 0, LK_CANRECURSE);
661         lockinit(&dev_priv->modeset_restore_lock, "i915mrl", 0, LK_CANRECURSE);
662
663         intel_pm_setup(dev);
664
665         intel_display_crc_init(dev);
666
667         i915_dump_device_info(dev_priv);
668
669         /* Not all pre-production machines fall into this category, only the
670          * very first ones. Almost everything should work, except for maybe
671          * suspend/resume. And we don't implement workarounds that affect only
672          * pre-production machines. */
673         if (IS_HSW_EARLY_SDV(dev))
674                 DRM_INFO("This is an early pre-production Haswell machine. "
675                          "It may not be fully functional.\n");
676
677         if (i915_get_bridge_dev(dev)) {
678                 ret = -EIO;
679                 goto free_priv;
680         }
681
682         mmio_bar = IS_GEN2(dev) ? 1 : 0;
683         /* Before gen4, the registers and the GTT are behind different BARs.
684          * However, from gen4 onwards, the registers and the GTT are shared
685          * in the same BAR, so we want to restrict this ioremap from
686          * clobbering the GTT which we want ioremap_wc instead. Fortunately,
687          * the register BAR remains the same size for all the earlier
688          * generations up to Ironlake.
689          */
690         if (info->gen < 5)
691                 mmio_size = 512*1024;
692         else
693                 mmio_size = 2*1024*1024;
694
695 #if 0
696         dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
697         if (!dev_priv->regs) {
698                 DRM_ERROR("failed to map registers\n");
699                 ret = -EIO;
700                 goto put_bridge;
701         }
702 #else
703         base = drm_get_resource_start(dev, mmio_bar);
704         size = drm_get_resource_len(dev, mmio_bar);
705
706         ret = drm_legacy_addmap(dev, base, size, _DRM_REGISTERS,
707             _DRM_KERNEL | _DRM_DRIVER, &dev_priv->mmio_map);
708 #endif
709
710         /* This must be called before any calls to HAS_PCH_* */
711         intel_detect_pch(dev);
712
713         intel_uncore_init(dev);
714
715         ret = i915_gem_gtt_init(dev);
716         if (ret)
717                 goto out_regs;
718
719         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
720                 /* WARNING: Apparently we must kick fbdev drivers before vgacon,
721                  * otherwise the vga fbdev driver falls over. */
722                 ret = i915_kick_out_firmware_fb(dev_priv);
723                 if (ret) {
724                         DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
725                         goto out_gtt;
726                 }
727
728                 ret = i915_kick_out_vgacon(dev_priv);
729                 if (ret) {
730                         DRM_ERROR("failed to remove conflicting VGA console\n");
731                         goto out_gtt;
732                 }
733         }
734
735 #if 0
736         pci_set_master(dev->pdev);
737
738         /* overlay on gen2 is broken and can't address above 1G */
739         if (IS_GEN2(dev))
740                 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
741
742         /* 965GM sometimes incorrectly writes to hardware status page (HWS)
743          * using 32bit addressing, overwriting memory if HWS is located
744          * above 4GB.
745          *
746          * The documentation also mentions an issue with undefined
747          * behaviour if any general state is accessed within a page above 4GB,
748          * which also needs to be handled carefully.
749          */
750         if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
751                 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
752 #endif
753
754         aperture_size = dev_priv->gtt.mappable_end;
755
756         dev_priv->gtt.mappable =
757                 io_mapping_create_wc(dev_priv->gtt.mappable_base,
758                                      aperture_size);
759         if (dev_priv->gtt.mappable == NULL) {
760                 ret = -EIO;
761                 goto out_gtt;
762         }
763
764         dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
765                                               aperture_size);
766
767         /* The i915 workqueue is primarily used for batched retirement of
768          * requests (and thus managing bo) once the task has been completed
769          * by the GPU. i915_gem_retire_requests() is called directly when we
770          * need high-priority retirement, such as waiting for an explicit
771          * bo.
772          *
773          * It is also used for periodic low-priority events, such as
774          * idle-timers and recording error state.
775          *
776          * All tasks on the workqueue are expected to acquire the dev mutex
777          * so there is no point in running more than one instance of the
778          * workqueue at any time.  Use an ordered one.
779          */
780         dev_priv->wq = alloc_ordered_workqueue("i915", 0);
781         if (dev_priv->wq == NULL) {
782                 DRM_ERROR("Failed to create our workqueue.\n");
783                 ret = -ENOMEM;
784                 goto out_mtrrfree;
785         }
786
787         dev_priv->dp_wq = alloc_ordered_workqueue("i915-dp", 0);
788         if (dev_priv->dp_wq == NULL) {
789                 DRM_ERROR("Failed to create our dp workqueue.\n");
790                 ret = -ENOMEM;
791                 goto out_freewq;
792         }
793
794         dev_priv->gpu_error.hangcheck_wq =
795                 alloc_ordered_workqueue("i915-hangcheck", 0);
796         if (dev_priv->gpu_error.hangcheck_wq == NULL) {
797                 DRM_ERROR("Failed to create our hangcheck workqueue.\n");
798                 ret = -ENOMEM;
799                 goto out_freedpwq;
800         }
801
802         intel_irq_init(dev_priv);
803         intel_uncore_sanitize(dev);
804
805         /* Try to make sure MCHBAR is enabled before poking at it */
806         intel_setup_mchbar(dev);
807         intel_setup_gmbus(dev);
808         intel_opregion_setup(dev);
809
810         intel_setup_bios(dev);
811
812         i915_gem_load(dev);
813
814         /* On the 945G/GM, the chipset reports the MSI capability on the
815          * integrated graphics even though the support isn't actually there
816          * according to the published specs.  It doesn't appear to function
817          * correctly in testing on 945G.
818          * This may be a side effect of MSI having been made available for PEG
819          * and the registers being closely associated.
820          *
821          * According to chipset errata, on the 965GM, MSI interrupts may
822          * be lost or delayed, but we use them anyways to avoid
823          * stuck interrupts on some machines.
824          */
825 #if 0
826         if (!IS_I945G(dev) && !IS_I945GM(dev))
827                 pci_enable_msi(dev->pdev);
828 #endif
829
830         intel_device_info_runtime_init(dev);
831
832         if (INTEL_INFO(dev)->num_pipes) {
833                 ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
834                 if (ret)
835                         goto out_gem_unload;
836         }
837
838         intel_power_domains_init(dev_priv);
839
840         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
841                 ret = i915_load_modeset_init(dev);
842                 if (ret < 0) {
843                         DRM_ERROR("failed to init modeset\n");
844                         goto out_power_well;
845                 }
846         }
847
848 #if 0
849         i915_setup_sysfs(dev);
850 #endif
851
852         if (INTEL_INFO(dev)->num_pipes) {
853                 /* Must be done after probing outputs */
854                 intel_opregion_init(dev);
855 #if 0
856                 acpi_video_register();
857 #endif
858         }
859
860         if (IS_GEN5(dev))
861                 intel_gpu_ips_init(dev_priv);
862
863         intel_runtime_pm_enable(dev_priv);
864
865         i915_audio_component_init(dev_priv);
866
867         return 0;
868
869 out_power_well:
870         intel_power_domains_fini(dev_priv);
871         drm_vblank_cleanup(dev);
872 out_gem_unload:
873
874         intel_teardown_gmbus(dev);
875         intel_teardown_mchbar(dev);
876         pm_qos_remove_request(&dev_priv->pm_qos);
877         destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
878 out_freedpwq:
879         destroy_workqueue(dev_priv->dp_wq);
880 out_freewq:
881         destroy_workqueue(dev_priv->wq);
882 out_mtrrfree:
883         arch_phys_wc_del(dev_priv->gtt.mtrr);
884 #if 0
885         io_mapping_free(dev_priv->gtt.mappable);
886 #endif
887 out_gtt:
888         i915_global_gtt_cleanup(dev);
889 out_regs:
890         intel_uncore_fini(dev);
891 free_priv:
892         kfree(dev_priv);
893         return ret;
894 }
895
896 int i915_driver_unload(struct drm_device *dev)
897 {
898         struct drm_i915_private *dev_priv = dev->dev_private;
899         int ret;
900
901         i915_audio_component_cleanup(dev_priv);
902
903         ret = i915_gem_suspend(dev);
904         if (ret) {
905                 DRM_ERROR("failed to idle hardware: %d\n", ret);
906                 return ret;
907         }
908
909         intel_power_domains_fini(dev_priv);
910
911         intel_gpu_ips_teardown();
912
913 #if 0
914         i915_teardown_sysfs(dev);
915
916         WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
917         unregister_shrinker(&dev_priv->mm.shrinker);
918
919         io_mapping_free(dev_priv->gtt.mappable);
920 #endif
921         arch_phys_wc_del(dev_priv->gtt.mtrr);
922
923 #if 0
924         acpi_video_unregister();
925 #endif
926
927         if (drm_core_check_feature(dev, DRIVER_MODESET))
928                 intel_fbdev_fini(dev);
929
930         drm_vblank_cleanup(dev);
931
932         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
933                 intel_modeset_cleanup(dev);
934
935                 /*
936                  * free the memory space allocated for the child device
937                  * config parsed from VBT
938                  */
939                 if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
940                         kfree(dev_priv->vbt.child_dev);
941                         dev_priv->vbt.child_dev = NULL;
942                         dev_priv->vbt.child_dev_num = 0;
943                 }
944
945         }
946
947         /* Free error state after interrupts are fully disabled. */
948         cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
949 #if 0
950         i915_destroy_error_state(dev);
951 #endif
952
953         intel_opregion_fini(dev);
954
955         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
956                 /* Flush any outstanding unpin_work. */
957                 flush_workqueue(dev_priv->wq);
958
959                 mutex_lock(&dev->struct_mutex);
960                 i915_gem_cleanup_ringbuffer(dev);
961                 i915_gem_batch_pool_fini(&dev_priv->mm.batch_pool);
962                 i915_gem_context_fini(dev);
963                 mutex_unlock(&dev->struct_mutex);
964 #if 0
965                 i915_gem_cleanup_stolen(dev);
966 #endif
967         }
968
969         intel_teardown_gmbus(dev);
970         intel_teardown_mchbar(dev);
971
972         bus_generic_detach(dev->dev);
973         drm_legacy_rmmap(dev, dev_priv->mmio_map);
974
975         destroy_workqueue(dev_priv->dp_wq);
976         destroy_workqueue(dev_priv->wq);
977         destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
978         pm_qos_remove_request(&dev_priv->pm_qos);
979
980         i915_global_gtt_cleanup(dev);
981
982         intel_uncore_fini(dev);
983 #if 0
984         if (dev_priv->regs != NULL)
985                 pci_iounmap(dev->pdev, dev_priv->regs);
986 #endif
987
988         pci_dev_put(dev_priv->bridge_dev);
989         kfree(dev_priv);
990
991         return 0;
992 }
993
994 int i915_driver_open(struct drm_device *dev, struct drm_file *file)
995 {
996         int ret;
997
998         ret = i915_gem_open(dev, file);
999         if (ret)
1000                 return ret;
1001
1002         return 0;
1003 }
1004
1005 /**
1006  * i915_driver_lastclose - clean up after all DRM clients have exited
1007  * @dev: DRM device
1008  *
1009  * Take care of cleaning up after all DRM clients have exited.  In the
1010  * mode setting case, we want to restore the kernel's initial mode (just
1011  * in case the last client left us in a bad state).
1012  *
1013  * Additionally, in the non-mode setting case, we'll tear down the GTT
1014  * and DMA structures, since the kernel won't be using them, and clea
1015  * up any GEM state.
1016  */
1017 void i915_driver_lastclose(struct drm_device *dev)
1018 {
1019         intel_fbdev_restore_mode(dev);
1020 #if 0
1021         vga_switcheroo_process_delayed_switch();
1022 #endif
1023 }
1024
1025 void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
1026 {
1027         mutex_lock(&dev->struct_mutex);
1028         i915_gem_context_close(dev, file);
1029         i915_gem_release(dev, file);
1030         mutex_unlock(&dev->struct_mutex);
1031
1032         if (drm_core_check_feature(dev, DRIVER_MODESET))
1033                 intel_modeset_preclose(dev, file);
1034 }
1035
1036 void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
1037 {
1038         struct drm_i915_file_private *file_priv = file->driver_priv;
1039
1040         if (file_priv && file_priv->bsd_ring)
1041                 file_priv->bsd_ring = NULL;
1042         kfree(file_priv);
1043 }
1044
1045 static int
1046 i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
1047                           struct drm_file *file)
1048 {
1049         return -ENODEV;
1050 }
1051
1052 const struct drm_ioctl_desc i915_ioctls[] = {
1053         DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1054         DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
1055         DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
1056         DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
1057         DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
1058         DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
1059         DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
1060         DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1061         DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
1062         DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
1063         DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1064         DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
1065         DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1066         DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1067         DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE,  drm_noop, DRM_AUTH),
1068         DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
1069         DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1070         DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1071         DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
1072         DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
1073         DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1074         DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1075         DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
1076         DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1077         DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1078         DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
1079         DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1080         DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1081         DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1082         DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1083         DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1084         DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1085         DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1086         DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1087         DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1088         DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1089         DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1090         DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1091         DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
1092         DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1093         DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1094         DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1095         DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1096         DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1097         DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
1098         DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1099         DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1100         DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1101         DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1102 #if 0
1103         DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1104 #endif
1105         DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1106         DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1107 };
1108
1109 int i915_max_ioctl = ARRAY_SIZE(i915_ioctls);
1110
1111 /*
1112  * This is really ugly: Because old userspace abused the linux agp interface to
1113  * manage the gtt, we need to claim that all intel devices are agp.  For
1114  * otherwise the drm core refuses to initialize the agp support code.
1115  */
1116 int i915_driver_device_is_agp(struct drm_device *dev)
1117 {
1118         return 1;
1119 }