1 /* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
30 #include <drm/drm_crtc_helper.h>
31 #include <drm/drm_fb_helper.h>
32 #include <drm/drm_legacy.h>
33 #include "intel_drv.h"
34 #include <drm/i915_drm.h>
36 #include "i915_vgpu.h"
39 static int i915_getparam(struct drm_device *dev, void *data,
40 struct drm_file *file_priv)
42 struct drm_i915_private *dev_priv = dev->dev_private;
43 drm_i915_getparam_t *param = data;
46 switch (param->param) {
47 case I915_PARAM_IRQ_ACTIVE:
48 case I915_PARAM_ALLOW_BATCHBUFFER:
49 case I915_PARAM_LAST_DISPATCH:
50 /* Reject all old ums/dri params. */
52 case I915_PARAM_CHIPSET_ID:
53 value = dev->pdev->device;
55 case I915_PARAM_REVISION:
56 value = dev->pdev->revision;
58 case I915_PARAM_HAS_GEM:
61 case I915_PARAM_NUM_FENCES_AVAIL:
62 value = dev_priv->num_fence_regs;
64 case I915_PARAM_HAS_OVERLAY:
65 value = dev_priv->overlay ? 1 : 0;
67 case I915_PARAM_HAS_PAGEFLIPPING:
70 case I915_PARAM_HAS_EXECBUF2:
74 case I915_PARAM_HAS_BSD:
75 value = intel_ring_initialized(&dev_priv->ring[VCS]);
77 case I915_PARAM_HAS_BLT:
78 value = intel_ring_initialized(&dev_priv->ring[BCS]);
80 case I915_PARAM_HAS_VEBOX:
81 value = intel_ring_initialized(&dev_priv->ring[VECS]);
83 case I915_PARAM_HAS_BSD2:
84 value = intel_ring_initialized(&dev_priv->ring[VCS2]);
86 case I915_PARAM_HAS_RELAXED_FENCING:
89 case I915_PARAM_HAS_COHERENT_RINGS:
92 case I915_PARAM_HAS_EXEC_CONSTANTS:
93 value = INTEL_INFO(dev)->gen >= 4;
95 case I915_PARAM_HAS_RELAXED_DELTA:
98 case I915_PARAM_HAS_GEN7_SOL_RESET:
101 case I915_PARAM_HAS_LLC:
102 value = HAS_LLC(dev);
104 case I915_PARAM_HAS_WT:
107 case I915_PARAM_HAS_ALIASING_PPGTT:
108 value = USES_PPGTT(dev);
110 case I915_PARAM_HAS_WAIT_TIMEOUT:
113 case I915_PARAM_HAS_SEMAPHORES:
114 value = i915_semaphore_is_enabled(dev);
116 case I915_PARAM_HAS_PINNED_BATCHES:
119 case I915_PARAM_HAS_EXEC_NO_RELOC:
122 case I915_PARAM_HAS_EXEC_HANDLE_LUT:
125 case I915_PARAM_CMD_PARSER_VERSION:
126 value = i915_cmd_parser_get_version();
128 case I915_PARAM_HAS_COHERENT_PHYS_GTT:
131 case I915_PARAM_SUBSLICE_TOTAL:
132 value = INTEL_INFO(dev)->subslice_total;
136 case I915_PARAM_EU_TOTAL:
137 value = INTEL_INFO(dev)->eu_total;
141 case I915_PARAM_HAS_GPU_RESET:
142 value = i915.enable_hangcheck &&
143 intel_has_gpu_reset(dev);
145 case I915_PARAM_HAS_RESOURCE_STREAMER:
146 value = HAS_RESOURCE_STREAMER(dev);
148 case I915_PARAM_HAS_EXEC_SOFTPIN:
152 DRM_DEBUG("Unknown parameter %d\n", param->param);
156 if (copy_to_user(param->value, &value, sizeof(int))) {
157 DRM_ERROR("copy_to_user failed\n");
164 static int i915_get_bridge_dev(struct drm_device *dev)
166 struct drm_i915_private *dev_priv = dev->dev_private;
167 static struct pci_dev i915_bridge_dev;
169 i915_bridge_dev.dev.bsddev = pci_find_dbsf(0, 0, 0, 0);
170 if (!i915_bridge_dev.dev.bsddev) {
171 DRM_ERROR("bridge device not found\n");
175 dev_priv->bridge_dev = &i915_bridge_dev;
179 #define MCHBAR_I915 0x44
180 #define MCHBAR_I965 0x48
181 #define MCHBAR_SIZE (4*4096)
183 #define DEVEN_REG 0x54
184 #define DEVEN_MCHBAR_EN (1 << 28)
186 /* Allocate space for the MCH regs if needed, return nonzero on error */
188 intel_alloc_mchbar_resource(struct drm_device *dev)
190 struct drm_i915_private *dev_priv = dev->dev_private;
191 int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
193 u32 temp_lo, temp_hi = 0;
196 if (INTEL_INFO(dev)->gen >= 4)
197 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
198 pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
199 mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
201 /* If ACPI doesn't have it, assume we need to allocate it ourselves */
204 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
208 /* Get some space for it */
209 vga = device_get_parent(dev->dev->bsddev);
210 dev_priv->mch_res_rid = 0x100;
211 dev_priv->mch_res = BUS_ALLOC_RESOURCE(device_get_parent(vga),
212 dev->dev->bsddev, SYS_RES_MEMORY, &dev_priv->mch_res_rid, 0, ~0UL,
213 MCHBAR_SIZE, RF_ACTIVE | RF_SHAREABLE, -1);
214 if (dev_priv->mch_res == NULL) {
215 DRM_ERROR("failed mchbar resource alloc\n");
219 if (INTEL_INFO(dev)->gen >= 4)
220 pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
221 upper_32_bits(rman_get_start(dev_priv->mch_res)));
223 pci_write_config_dword(dev_priv->bridge_dev, reg,
224 lower_32_bits(rman_get_start(dev_priv->mch_res)));
228 /* Setup MCHBAR if possible, return true if we should disable it again */
230 intel_setup_mchbar(struct drm_device *dev)
232 struct drm_i915_private *dev_priv = dev->dev_private;
233 int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
237 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
240 dev_priv->mchbar_need_disable = false;
242 if (IS_I915G(dev) || IS_I915GM(dev)) {
243 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
244 enabled = !!(temp & DEVEN_MCHBAR_EN);
246 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
250 /* If it's already enabled, don't have to do anything */
254 if (intel_alloc_mchbar_resource(dev))
257 dev_priv->mchbar_need_disable = true;
259 /* Space is allocated or reserved, so enable it. */
260 if (IS_I915G(dev) || IS_I915GM(dev)) {
261 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
262 temp | DEVEN_MCHBAR_EN);
264 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
265 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
270 intel_teardown_mchbar(struct drm_device *dev)
272 struct drm_i915_private *dev_priv = dev->dev_private;
273 int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
277 if (dev_priv->mchbar_need_disable) {
278 if (IS_I915G(dev) || IS_I915GM(dev)) {
279 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
280 temp &= ~DEVEN_MCHBAR_EN;
281 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp);
283 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
285 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp);
289 if (dev_priv->mch_res != NULL) {
290 vga = device_get_parent(dev->dev->bsddev);
291 BUS_DEACTIVATE_RESOURCE(device_get_parent(vga), dev->dev->bsddev,
292 SYS_RES_MEMORY, dev_priv->mch_res_rid, dev_priv->mch_res);
293 BUS_RELEASE_RESOURCE(device_get_parent(vga), dev->dev->bsddev,
294 SYS_RES_MEMORY, dev_priv->mch_res_rid, dev_priv->mch_res);
295 dev_priv->mch_res = NULL;
300 /* true = enable decode, false = disable decoder */
301 static unsigned int i915_vga_set_decode(void *cookie, bool state)
303 struct drm_device *dev = cookie;
305 intel_modeset_vga_set_state(dev, state);
307 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
308 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
310 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
313 static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
315 struct drm_device *dev = pci_get_drvdata(pdev);
316 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
318 if (state == VGA_SWITCHEROO_ON) {
319 pr_info("switched on\n");
320 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
321 /* i915 resume handler doesn't set to D0 */
322 pci_set_power_state(dev->pdev, PCI_D0);
323 i915_resume_switcheroo(dev);
324 dev->switch_power_state = DRM_SWITCH_POWER_ON;
326 pr_info("switched off\n");
327 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
328 i915_suspend_switcheroo(dev, pmm);
329 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
333 static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
335 struct drm_device *dev = pci_get_drvdata(pdev);
338 * FIXME: open_count is protected by drm_global_mutex but that would lead to
339 * locking inversion with the driver load path. And the access here is
340 * completely racy anyway. So don't bother with locking for now.
342 return dev->open_count == 0;
345 static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
346 .set_gpu_state = i915_switcheroo_set_state,
348 .can_switch = i915_switcheroo_can_switch,
352 static int i915_load_modeset_init(struct drm_device *dev)
354 struct drm_i915_private *dev_priv = dev->dev_private;
357 ret = intel_bios_init(dev_priv);
359 DRM_INFO("failed to find VBIOS tables\n");
362 /* If we have > 1 VGA cards, then we need to arbitrate access
363 * to the common VGA resources.
365 * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
366 * then we do not take part in VGA arbitration and the
367 * vga_client_register() fails with -ENODEV.
369 ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
370 if (ret && ret != -ENODEV)
373 intel_register_dsm_handler();
375 ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops, false);
377 goto cleanup_vga_client;
380 intel_power_domains_init_hw(dev_priv, false);
382 intel_csr_ucode_init(dev_priv);
384 ret = intel_irq_install(dev_priv);
388 intel_setup_gmbus(dev);
390 /* Important: The output setup functions called by modeset_init need
391 * working irqs for e.g. gmbus and dp aux transfers. */
392 intel_modeset_init(dev);
394 intel_guc_ucode_init(dev);
396 ret = i915_gem_init(dev);
400 intel_modeset_gem_init(dev);
402 /* Always safe in the mode setting case. */
403 /* FIXME: do pre/post-mode set stuff in core KMS code */
404 dev->vblank_disable_allowed = 1;
405 if (INTEL_INFO(dev)->num_pipes == 0)
408 ret = intel_fbdev_init(dev);
412 /* Only enable hotplug handling once the fbdev is fully set up. */
413 intel_hpd_init(dev_priv);
416 * Some ports require correctly set-up hpd registers for detection to
417 * work properly (leading to ghost connected connector status), e.g. VGA
418 * on gm45. Hence we can only set up the initial fbdev config after hpd
419 * irqs are fully enabled. Now we should scan for the initial config
420 * only once hotplug handling is enabled, but due to screwed-up locking
421 * around kms/fbdev init we can't protect the fdbev initial config
422 * scanning against hotplug events. Hence do this first and ignore the
423 * tiny window where we will loose hotplug notifactions.
425 intel_fbdev_initial_config_async(dev);
427 drm_kms_helper_poll_init(dev);
432 mutex_lock(&dev->struct_mutex);
433 i915_gem_cleanup_ringbuffer(dev);
434 i915_gem_context_fini(dev);
435 mutex_unlock(&dev->struct_mutex);
437 intel_guc_ucode_fini(dev);
438 drm_irq_uninstall(dev);
439 intel_teardown_gmbus(dev);
441 intel_csr_ucode_fini(dev_priv);
443 vga_switcheroo_unregister_client(dev->pdev);
445 vga_client_register(dev->pdev, NULL, NULL, NULL);
451 #if IS_ENABLED(CONFIG_FB)
452 static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
454 struct apertures_struct *ap;
455 struct pci_dev *pdev = dev_priv->dev->pdev;
459 ap = alloc_apertures(1);
463 ap->ranges[0].base = dev_priv->gtt.mappable_base;
464 ap->ranges[0].size = dev_priv->gtt.mappable_end;
467 pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
469 ret = remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
476 static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
482 #if !defined(CONFIG_VGA_CONSOLE)
483 static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
487 #elif !defined(CONFIG_DUMMY_CONSOLE)
488 static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
493 static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
497 DRM_INFO("Replacing VGA console driver\n");
500 if (con_is_bound(&vga_con))
501 ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1);
503 ret = do_unregister_con_driver(&vga_con);
505 /* Ignore "already unregistered". */
515 static void i915_dump_device_info(struct drm_i915_private *dev_priv)
518 const struct intel_device_info *info = &dev_priv->info;
520 #define PRINT_S(name) "%s"
522 #define PRINT_FLAG(name) info->name ? #name "," : ""
524 DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x flags="
525 DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY),
527 dev_priv->dev->pdev->device,
528 dev_priv->dev->pdev->revision,
529 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA));
537 static void cherryview_sseu_info_init(struct drm_device *dev)
539 struct drm_i915_private *dev_priv = dev->dev_private;
540 struct intel_device_info *info;
543 info = (struct intel_device_info *)&dev_priv->info;
544 fuse = I915_READ(CHV_FUSE_GT);
546 info->slice_total = 1;
548 if (!(fuse & CHV_FGT_DISABLE_SS0)) {
549 info->subslice_per_slice++;
550 eu_dis = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK |
551 CHV_FGT_EU_DIS_SS0_R1_MASK);
552 info->eu_total += 8 - hweight32(eu_dis);
555 if (!(fuse & CHV_FGT_DISABLE_SS1)) {
556 info->subslice_per_slice++;
557 eu_dis = fuse & (CHV_FGT_EU_DIS_SS1_R0_MASK |
558 CHV_FGT_EU_DIS_SS1_R1_MASK);
559 info->eu_total += 8 - hweight32(eu_dis);
562 info->subslice_total = info->subslice_per_slice;
564 * CHV expected to always have a uniform distribution of EU
567 info->eu_per_subslice = info->subslice_total ?
568 info->eu_total / info->subslice_total :
571 * CHV supports subslice power gating on devices with more than
572 * one subslice, and supports EU power gating on devices with
573 * more than one EU pair per subslice.
575 info->has_slice_pg = 0;
576 info->has_subslice_pg = (info->subslice_total > 1);
577 info->has_eu_pg = (info->eu_per_subslice > 2);
580 static void gen9_sseu_info_init(struct drm_device *dev)
582 struct drm_i915_private *dev_priv = dev->dev_private;
583 struct intel_device_info *info;
584 int s_max = 3, ss_max = 4, eu_max = 8;
586 u32 fuse2, s_enable, ss_disable, eu_disable;
589 info = (struct intel_device_info *)&dev_priv->info;
590 fuse2 = I915_READ(GEN8_FUSE2);
591 s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >>
593 ss_disable = (fuse2 & GEN9_F2_SS_DIS_MASK) >>
594 GEN9_F2_SS_DIS_SHIFT;
596 info->slice_total = hweight32(s_enable);
598 * The subslice disable field is global, i.e. it applies
599 * to each of the enabled slices.
601 info->subslice_per_slice = ss_max - hweight32(ss_disable);
602 info->subslice_total = info->slice_total *
603 info->subslice_per_slice;
606 * Iterate through enabled slices and subslices to
607 * count the total enabled EU.
609 for (s = 0; s < s_max; s++) {
610 if (!(s_enable & (0x1 << s)))
611 /* skip disabled slice */
614 eu_disable = I915_READ(GEN9_EU_DISABLE(s));
615 for (ss = 0; ss < ss_max; ss++) {
618 if (ss_disable & (0x1 << ss))
619 /* skip disabled subslice */
622 eu_per_ss = eu_max - hweight8((eu_disable >> (ss*8)) &
626 * Record which subslice(s) has(have) 7 EUs. we
627 * can tune the hash used to spread work among
628 * subslices if they are unbalanced.
631 info->subslice_7eu[s] |= 1 << ss;
633 info->eu_total += eu_per_ss;
638 * SKL is expected to always have a uniform distribution
639 * of EU across subslices with the exception that any one
640 * EU in any one subslice may be fused off for die
641 * recovery. BXT is expected to be perfectly uniform in EU
644 info->eu_per_subslice = info->subslice_total ?
645 DIV_ROUND_UP(info->eu_total,
646 info->subslice_total) : 0;
648 * SKL supports slice power gating on devices with more than
649 * one slice, and supports EU power gating on devices with
650 * more than one EU pair per subslice. BXT supports subslice
651 * power gating on devices with more than one subslice, and
652 * supports EU power gating on devices with more than one EU
655 info->has_slice_pg = ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) &&
656 (info->slice_total > 1));
657 info->has_subslice_pg = (IS_BROXTON(dev) && (info->subslice_total > 1));
658 info->has_eu_pg = (info->eu_per_subslice > 2);
661 static void broadwell_sseu_info_init(struct drm_device *dev)
663 struct drm_i915_private *dev_priv = dev->dev_private;
664 struct intel_device_info *info;
665 const int s_max = 3, ss_max = 3, eu_max = 8;
667 u32 fuse2, eu_disable[s_max], s_enable, ss_disable;
669 fuse2 = I915_READ(GEN8_FUSE2);
670 s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
671 ss_disable = (fuse2 & GEN8_F2_SS_DIS_MASK) >> GEN8_F2_SS_DIS_SHIFT;
673 eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK;
674 eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) |
675 ((I915_READ(GEN8_EU_DISABLE1) & GEN8_EU_DIS1_S1_MASK) <<
676 (32 - GEN8_EU_DIS0_S1_SHIFT));
677 eu_disable[2] = (I915_READ(GEN8_EU_DISABLE1) >> GEN8_EU_DIS1_S2_SHIFT) |
678 ((I915_READ(GEN8_EU_DISABLE2) & GEN8_EU_DIS2_S2_MASK) <<
679 (32 - GEN8_EU_DIS1_S2_SHIFT));
682 info = (struct intel_device_info *)&dev_priv->info;
683 info->slice_total = hweight32(s_enable);
686 * The subslice disable field is global, i.e. it applies
687 * to each of the enabled slices.
689 info->subslice_per_slice = ss_max - hweight32(ss_disable);
690 info->subslice_total = info->slice_total * info->subslice_per_slice;
693 * Iterate through enabled slices and subslices to
694 * count the total enabled EU.
696 for (s = 0; s < s_max; s++) {
697 if (!(s_enable & (0x1 << s)))
698 /* skip disabled slice */
701 for (ss = 0; ss < ss_max; ss++) {
704 if (ss_disable & (0x1 << ss))
705 /* skip disabled subslice */
708 n_disabled = hweight8(eu_disable[s] >> (ss * eu_max));
711 * Record which subslices have 7 EUs.
713 if (eu_max - n_disabled == 7)
714 info->subslice_7eu[s] |= 1 << ss;
716 info->eu_total += eu_max - n_disabled;
721 * BDW is expected to always have a uniform distribution of EU across
722 * subslices with the exception that any one EU in any one subslice may
723 * be fused off for die recovery.
725 info->eu_per_subslice = info->subslice_total ?
726 DIV_ROUND_UP(info->eu_total, info->subslice_total) : 0;
729 * BDW supports slice power gating on devices with more than
732 info->has_slice_pg = (info->slice_total > 1);
733 info->has_subslice_pg = 0;
738 * Determine various intel_device_info fields at runtime.
740 * Use it when either:
741 * - it's judged too laborious to fill n static structures with the limit
742 * when a simple if statement does the job,
743 * - run-time checks (eg read fuse/strap registers) are needed.
745 * This function needs to be called:
746 * - after the MMIO has been setup as we are reading registers,
747 * - after the PCH has been detected,
748 * - before the first usage of the fields it can tweak.
750 static void intel_device_info_runtime_init(struct drm_device *dev)
752 struct drm_i915_private *dev_priv = dev->dev_private;
753 struct intel_device_info *info;
756 info = (struct intel_device_info *)&dev_priv->info;
759 * Skylake and Broxton currently don't expose the topmost plane as its
760 * use is exclusive with the legacy cursor and we only want to expose
761 * one of those, not both. Until we can safely expose the topmost plane
762 * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
763 * we don't expose the topmost plane at all to prevent ABI breakage
766 if (IS_BROXTON(dev)) {
767 info->num_sprites[PIPE_A] = 2;
768 info->num_sprites[PIPE_B] = 2;
769 info->num_sprites[PIPE_C] = 1;
770 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
771 for_each_pipe(dev_priv, pipe)
772 info->num_sprites[pipe] = 2;
774 for_each_pipe(dev_priv, pipe)
775 info->num_sprites[pipe] = 1;
777 if (i915.disable_display) {
778 DRM_INFO("Display disabled (module parameter)\n");
780 } else if (info->num_pipes > 0 &&
781 (INTEL_INFO(dev)->gen == 7 || INTEL_INFO(dev)->gen == 8) &&
782 HAS_PCH_SPLIT(dev)) {
783 u32 fuse_strap = I915_READ(FUSE_STRAP);
784 u32 sfuse_strap = I915_READ(SFUSE_STRAP);
787 * SFUSE_STRAP is supposed to have a bit signalling the display
788 * is fused off. Unfortunately it seems that, at least in
789 * certain cases, fused off display means that PCH display
790 * reads don't land anywhere. In that case, we read 0s.
792 * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK
793 * should be set when taking over after the firmware.
795 if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
796 sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
797 (dev_priv->pch_type == PCH_CPT &&
798 !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
799 DRM_INFO("Display fused off, disabling\n");
801 } else if (fuse_strap & IVB_PIPE_C_DISABLE) {
802 DRM_INFO("PipeC fused off\n");
803 info->num_pipes -= 1;
805 } else if (info->num_pipes > 0 && INTEL_INFO(dev)->gen == 9) {
806 u32 dfsm = I915_READ(SKL_DFSM);
807 u8 disabled_mask = 0;
811 if (dfsm & SKL_DFSM_PIPE_A_DISABLE)
812 disabled_mask |= BIT(PIPE_A);
813 if (dfsm & SKL_DFSM_PIPE_B_DISABLE)
814 disabled_mask |= BIT(PIPE_B);
815 if (dfsm & SKL_DFSM_PIPE_C_DISABLE)
816 disabled_mask |= BIT(PIPE_C);
818 num_bits = hweight8(disabled_mask);
820 switch (disabled_mask) {
823 case BIT(PIPE_A) | BIT(PIPE_B):
824 case BIT(PIPE_A) | BIT(PIPE_C):
831 if (num_bits > info->num_pipes || invalid)
832 DRM_ERROR("invalid pipe fuse configuration: 0x%x\n",
835 info->num_pipes -= num_bits;
838 /* Initialize slice/subslice/EU info */
839 if (IS_CHERRYVIEW(dev))
840 cherryview_sseu_info_init(dev);
841 else if (IS_BROADWELL(dev))
842 broadwell_sseu_info_init(dev);
843 else if (INTEL_INFO(dev)->gen >= 9)
844 gen9_sseu_info_init(dev);
846 DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total);
847 DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total);
848 DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice);
849 DRM_DEBUG_DRIVER("EU total: %u\n", info->eu_total);
850 DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->eu_per_subslice);
851 DRM_DEBUG_DRIVER("has slice power gating: %s\n",
852 info->has_slice_pg ? "y" : "n");
853 DRM_DEBUG_DRIVER("has subslice power gating: %s\n",
854 info->has_subslice_pg ? "y" : "n");
855 DRM_DEBUG_DRIVER("has EU power gating: %s\n",
856 info->has_eu_pg ? "y" : "n");
859 static void intel_init_dpio(struct drm_i915_private *dev_priv)
862 * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
863 * CHV x1 PHY (DP/HDMI D)
864 * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
866 if (IS_CHERRYVIEW(dev_priv)) {
867 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
868 DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
869 } else if (IS_VALLEYVIEW(dev_priv)) {
870 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
874 static int i915_workqueues_init(struct drm_i915_private *dev_priv)
877 * The i915 workqueue is primarily used for batched retirement of
878 * requests (and thus managing bo) once the task has been completed
879 * by the GPU. i915_gem_retire_requests() is called directly when we
880 * need high-priority retirement, such as waiting for an explicit
883 * It is also used for periodic low-priority events, such as
884 * idle-timers and recording error state.
886 * All tasks on the workqueue are expected to acquire the dev mutex
887 * so there is no point in running more than one instance of the
888 * workqueue at any time. Use an ordered one.
890 dev_priv->wq = alloc_ordered_workqueue("i915", 0);
891 if (dev_priv->wq == NULL)
894 dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
895 if (dev_priv->hotplug.dp_wq == NULL)
898 dev_priv->gpu_error.hangcheck_wq =
899 alloc_ordered_workqueue("i915-hangcheck", 0);
900 if (dev_priv->gpu_error.hangcheck_wq == NULL)
906 destroy_workqueue(dev_priv->hotplug.dp_wq);
908 destroy_workqueue(dev_priv->wq);
910 DRM_ERROR("Failed to allocate workqueues.\n");
915 static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
917 destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
918 destroy_workqueue(dev_priv->hotplug.dp_wq);
919 destroy_workqueue(dev_priv->wq);
922 static int i915_mmio_setup(struct drm_device *dev)
924 struct drm_i915_private *dev_priv = to_i915(dev);
928 mmio_bar = IS_GEN2(dev) ? 1 : 0;
930 * Before gen4, the registers and the GTT are behind different BARs.
931 * However, from gen4 onwards, the registers and the GTT are shared
932 * in the same BAR, so we want to restrict this ioremap from
933 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
934 * the register BAR remains the same size for all the earlier
935 * generations up to Ironlake.
937 if (INTEL_INFO(dev)->gen < 5)
938 mmio_size = 512 * 1024;
940 mmio_size = 2 * 1024 * 1024;
941 dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
942 if (dev_priv->regs == NULL) {
943 DRM_ERROR("failed to map registers\n");
948 /* Try to make sure MCHBAR is enabled before poking at it */
949 intel_setup_mchbar(dev);
954 static void i915_mmio_cleanup(struct drm_device *dev)
957 struct drm_i915_private *dev_priv = to_i915(dev);
960 intel_teardown_mchbar(dev);
962 pci_iounmap(dev->pdev, dev_priv->regs);
967 * i915_driver_load - setup chip and create an initial config
969 * @flags: startup flags
971 * The driver load routine has to do several things:
972 * - drive output discovery via intel_modeset_init()
973 * - initialize the memory manager
974 * - allocate initial config memory
975 * - setup the DRM framebuffer with the allocated memory
977 int i915_driver_load(struct drm_device *dev, unsigned long flags)
979 struct drm_i915_private *dev_priv;
980 struct intel_device_info *info, *device_info;
982 uint32_t aperture_size;
984 /* XXX: struct pci_dev */
985 info = i915_get_device_id(dev->pdev->device);
987 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
988 if (dev_priv == NULL)
991 dev->dev_private = dev_priv;
994 /* Setup the write-once "constant" device info */
995 device_info = (struct intel_device_info *)&dev_priv->info;
996 memcpy(device_info, info, sizeof(dev_priv->info));
997 device_info->device_id = dev->pdev->device;
999 lockinit(&dev_priv->irq_lock, "userirq", 0, LK_CANRECURSE);
1000 lockinit(&dev_priv->gpu_error.lock, "915err", 0, LK_CANRECURSE);
1001 lockinit(&dev_priv->backlight_lock, "i915bl", 0, LK_CANRECURSE);
1002 lockinit(&dev_priv->uncore.lock, "915gt", 0, LK_CANRECURSE);
1003 spin_init(&dev_priv->mm.object_stat_lock, "i915osl");
1004 spin_init(&dev_priv->mmio_flip_lock, "i915mfl");
1005 lockinit(&dev_priv->sb_lock, "i915sbl", 0, LK_CANRECURSE);
1006 lockinit(&dev_priv->modeset_restore_lock, "i915mrl", 0, LK_CANRECURSE);
1007 lockinit(&dev_priv->av_mutex, "i915am", 0, LK_CANRECURSE);
1009 ret = i915_workqueues_init(dev_priv);
1013 intel_pm_setup(dev);
1015 intel_runtime_pm_get(dev_priv);
1017 intel_display_crc_init(dev);
1019 i915_dump_device_info(dev_priv);
1021 /* Not all pre-production machines fall into this category, only the
1022 * very first ones. Almost everything should work, except for maybe
1023 * suspend/resume. And we don't implement workarounds that affect only
1024 * pre-production machines. */
1025 if (IS_HSW_EARLY_SDV(dev))
1026 DRM_INFO("This is an early pre-production Haswell machine. "
1027 "It may not be fully functional.\n");
1029 if (i915_get_bridge_dev(dev)) {
1031 goto out_runtime_pm_put;
1034 ret = i915_mmio_setup(dev);
1038 /* This must be called before any calls to HAS_PCH_* */
1039 intel_detect_pch(dev);
1041 intel_uncore_init(dev);
1043 ret = i915_gem_gtt_init(dev);
1045 goto out_uncore_fini;
1047 /* WARNING: Apparently we must kick fbdev drivers before vgacon,
1048 * otherwise the vga fbdev driver falls over. */
1049 ret = i915_kick_out_firmware_fb(dev_priv);
1051 DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
1055 ret = i915_kick_out_vgacon(dev_priv);
1057 DRM_ERROR("failed to remove conflicting VGA console\n");
1062 pci_set_master(dev->pdev);
1064 /* overlay on gen2 is broken and can't address above 1G */
1066 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
1068 /* 965GM sometimes incorrectly writes to hardware status page (HWS)
1069 * using 32bit addressing, overwriting memory if HWS is located
1072 * The documentation also mentions an issue with undefined
1073 * behaviour if any general state is accessed within a page above 4GB,
1074 * which also needs to be handled carefully.
1076 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
1077 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
1080 aperture_size = dev_priv->gtt.mappable_end;
1082 dev_priv->gtt.mappable =
1083 io_mapping_create_wc(dev_priv->gtt.mappable_base,
1085 if (dev_priv->gtt.mappable == NULL) {
1090 dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
1093 intel_irq_init(dev_priv);
1094 intel_uncore_sanitize(dev);
1096 intel_opregion_setup(dev);
1098 i915_gem_load_init(dev);
1099 i915_gem_shrinker_init(dev_priv);
1101 /* On the 945G/GM, the chipset reports the MSI capability on the
1102 * integrated graphics even though the support isn't actually there
1103 * according to the published specs. It doesn't appear to function
1104 * correctly in testing on 945G.
1105 * This may be a side effect of MSI having been made available for PEG
1106 * and the registers being closely associated.
1108 * According to chipset errata, on the 965GM, MSI interrupts may
1109 * be lost or delayed, but we use them anyways to avoid
1110 * stuck interrupts on some machines.
1113 if (!IS_I945G(dev) && !IS_I945GM(dev))
1114 pci_enable_msi(dev->pdev);
1117 intel_device_info_runtime_init(dev);
1119 intel_init_dpio(dev_priv);
1121 if (INTEL_INFO(dev)->num_pipes) {
1122 ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
1124 goto out_gem_unload;
1127 intel_power_domains_init(dev_priv);
1129 ret = i915_load_modeset_init(dev);
1131 DRM_ERROR("failed to init modeset\n");
1132 goto out_power_well;
1136 * Notify a valid surface after modesetting,
1137 * when running inside a VM.
1139 if (intel_vgpu_active(dev))
1140 I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
1142 i915_setup_sysfs(dev);
1144 if (INTEL_INFO(dev)->num_pipes) {
1145 /* Must be done after probing outputs */
1146 intel_opregion_init(dev);
1148 acpi_video_register();
1153 intel_gpu_ips_init(dev_priv);
1155 intel_runtime_pm_enable(dev_priv);
1157 i915_audio_component_init(dev_priv);
1159 intel_runtime_pm_put(dev_priv);
1164 intel_power_domains_fini(dev_priv);
1165 drm_vblank_cleanup(dev);
1167 i915_gem_shrinker_cleanup(dev_priv);
1170 if (dev->pdev->msi_enabled)
1171 pci_disable_msi(dev->pdev);
1174 intel_teardown_mchbar(dev);
1175 pm_qos_remove_request(&dev_priv->pm_qos);
1176 arch_phys_wc_del(dev_priv->gtt.mtrr);
1178 io_mapping_free(dev_priv->gtt.mappable);
1181 i915_global_gtt_cleanup(dev);
1183 intel_uncore_fini(dev);
1184 i915_mmio_cleanup(dev);
1186 pci_dev_put(dev_priv->bridge_dev);
1188 i915_gem_load_cleanup(dev);
1190 intel_runtime_pm_put(dev_priv);
1191 i915_workqueues_cleanup(dev_priv);
1198 int i915_driver_unload(struct drm_device *dev)
1200 struct drm_i915_private *dev_priv = dev->dev_private;
1203 intel_fbdev_fini(dev);
1205 i915_audio_component_cleanup(dev_priv);
1207 ret = i915_gem_suspend(dev);
1209 DRM_ERROR("failed to idle hardware: %d\n", ret);
1213 intel_power_domains_fini(dev_priv);
1215 intel_gpu_ips_teardown();
1217 i915_teardown_sysfs(dev);
1219 i915_gem_shrinker_cleanup(dev_priv);
1222 io_mapping_free(dev_priv->gtt.mappable);
1224 arch_phys_wc_del(dev_priv->gtt.mtrr);
1227 acpi_video_unregister();
1230 drm_vblank_cleanup(dev);
1232 intel_modeset_cleanup(dev);
1235 * free the memory space allocated for the child device
1236 * config parsed from VBT
1238 if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
1239 kfree(dev_priv->vbt.child_dev);
1240 dev_priv->vbt.child_dev = NULL;
1241 dev_priv->vbt.child_dev_num = 0;
1243 kfree(dev_priv->vbt.sdvo_lvds_vbt_mode);
1244 dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
1245 kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
1246 dev_priv->vbt.lfp_lvds_vbt_mode = NULL;
1249 vga_switcheroo_unregister_client(dev->pdev);
1250 vga_client_register(dev->pdev, NULL, NULL, NULL);
1253 intel_csr_ucode_fini(dev_priv);
1255 /* Free error state after interrupts are fully disabled. */
1256 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
1258 i915_destroy_error_state(dev);
1260 if (dev->pdev->msi_enabled)
1261 pci_disable_msi(dev->pdev);
1264 intel_opregion_fini(dev);
1266 /* Flush any outstanding unpin_work. */
1267 flush_workqueue(dev_priv->wq);
1269 intel_guc_ucode_fini(dev);
1270 mutex_lock(&dev->struct_mutex);
1271 i915_gem_cleanup_ringbuffer(dev);
1272 i915_gem_context_fini(dev);
1273 mutex_unlock(&dev->struct_mutex);
1274 intel_fbc_cleanup_cfb(dev_priv);
1276 pm_qos_remove_request(&dev_priv->pm_qos);
1278 i915_global_gtt_cleanup(dev);
1280 intel_uncore_fini(dev);
1281 i915_mmio_cleanup(dev);
1283 i915_gem_load_cleanup(dev);
1284 pci_dev_put(dev_priv->bridge_dev);
1285 i915_workqueues_cleanup(dev_priv);
1291 int i915_driver_open(struct drm_device *dev, struct drm_file *file)
1295 ret = i915_gem_open(dev, file);
1303 * i915_driver_lastclose - clean up after all DRM clients have exited
1306 * Take care of cleaning up after all DRM clients have exited. In the
1307 * mode setting case, we want to restore the kernel's initial mode (just
1308 * in case the last client left us in a bad state).
1310 * Additionally, in the non-mode setting case, we'll tear down the GTT
1311 * and DMA structures, since the kernel won't be using them, and clea
1314 void i915_driver_lastclose(struct drm_device *dev)
1316 intel_fbdev_restore_mode(dev);
1318 vga_switcheroo_process_delayed_switch();
1322 void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
1324 mutex_lock(&dev->struct_mutex);
1325 i915_gem_context_close(dev, file);
1326 i915_gem_release(dev, file);
1327 mutex_unlock(&dev->struct_mutex);
1330 void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
1332 struct drm_i915_file_private *file_priv = file->driver_priv;
1338 i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
1339 struct drm_file *file)
1344 const struct drm_ioctl_desc i915_ioctls[] = {
1345 DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1346 DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
1347 DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
1348 DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
1349 DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
1350 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
1351 DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
1352 DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1353 DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
1354 DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
1355 DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1356 DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
1357 DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1358 DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1359 DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH),
1360 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
1361 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1362 DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1363 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
1364 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
1365 DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1366 DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1367 DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
1368 DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1369 DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1370 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
1371 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1372 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1373 DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1374 DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1375 DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1376 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1377 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1378 DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1379 DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1380 DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1381 DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1382 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1383 DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
1384 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1385 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1386 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1387 DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1388 DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1389 DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
1390 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1391 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1392 DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1393 DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1395 DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
1397 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
1398 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
1401 int i915_max_ioctl = ARRAY_SIZE(i915_ioctls);