1 /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
31 #include <drm/i915_drm.h>
33 #include <drm/drm_pciids.h>
34 #include "intel_drv.h"
36 #include <linux/module.h>
37 #include <drm/drm_crtc_helper.h>
39 static struct drm_driver driver;
41 #define GEN_DEFAULT_PIPEOFFSETS \
42 .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
43 PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
44 .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
45 TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
46 .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
48 #define GEN_CHV_PIPEOFFSETS \
49 .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
50 CHV_PIPE_C_OFFSET }, \
51 .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
52 CHV_TRANSCODER_C_OFFSET, }, \
53 .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \
54 CHV_PALETTE_C_OFFSET }
56 #define CURSOR_OFFSETS \
57 .cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET }
59 #define IVB_CURSOR_OFFSETS \
60 .cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET }
62 static const struct intel_device_info intel_i830_info = {
63 .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
64 .has_overlay = 1, .overlay_needs_physical = 1,
65 .ring_mask = RENDER_RING,
66 GEN_DEFAULT_PIPEOFFSETS,
70 static const struct intel_device_info intel_845g_info = {
71 .gen = 2, .num_pipes = 1,
72 .has_overlay = 1, .overlay_needs_physical = 1,
73 .ring_mask = RENDER_RING,
74 GEN_DEFAULT_PIPEOFFSETS,
78 static const struct intel_device_info intel_i85x_info = {
79 .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
80 .cursor_needs_physical = 1,
81 .has_overlay = 1, .overlay_needs_physical = 1,
83 .ring_mask = RENDER_RING,
84 GEN_DEFAULT_PIPEOFFSETS,
88 static const struct intel_device_info intel_i865g_info = {
89 .gen = 2, .num_pipes = 1,
90 .has_overlay = 1, .overlay_needs_physical = 1,
91 .ring_mask = RENDER_RING,
92 GEN_DEFAULT_PIPEOFFSETS,
96 static const struct intel_device_info intel_i915g_info = {
97 .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
98 .has_overlay = 1, .overlay_needs_physical = 1,
99 .ring_mask = RENDER_RING,
100 GEN_DEFAULT_PIPEOFFSETS,
103 static const struct intel_device_info intel_i915gm_info = {
104 .gen = 3, .is_mobile = 1, .num_pipes = 2,
105 .cursor_needs_physical = 1,
106 .has_overlay = 1, .overlay_needs_physical = 1,
109 .ring_mask = RENDER_RING,
110 GEN_DEFAULT_PIPEOFFSETS,
113 static const struct intel_device_info intel_i945g_info = {
114 .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
115 .has_overlay = 1, .overlay_needs_physical = 1,
116 .ring_mask = RENDER_RING,
117 GEN_DEFAULT_PIPEOFFSETS,
120 static const struct intel_device_info intel_i945gm_info = {
121 .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
122 .has_hotplug = 1, .cursor_needs_physical = 1,
123 .has_overlay = 1, .overlay_needs_physical = 1,
126 .ring_mask = RENDER_RING,
127 GEN_DEFAULT_PIPEOFFSETS,
131 static const struct intel_device_info intel_i965g_info = {
132 .gen = 4, .is_broadwater = 1, .num_pipes = 2,
135 .ring_mask = RENDER_RING,
136 GEN_DEFAULT_PIPEOFFSETS,
140 static const struct intel_device_info intel_i965gm_info = {
141 .gen = 4, .is_crestline = 1, .num_pipes = 2,
142 .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
145 .ring_mask = RENDER_RING,
146 GEN_DEFAULT_PIPEOFFSETS,
150 static const struct intel_device_info intel_g33_info = {
151 .gen = 3, .is_g33 = 1, .num_pipes = 2,
152 .need_gfx_hws = 1, .has_hotplug = 1,
154 .ring_mask = RENDER_RING,
155 GEN_DEFAULT_PIPEOFFSETS,
159 static const struct intel_device_info intel_g45_info = {
160 .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
161 .has_pipe_cxsr = 1, .has_hotplug = 1,
162 .ring_mask = RENDER_RING | BSD_RING,
163 GEN_DEFAULT_PIPEOFFSETS,
167 static const struct intel_device_info intel_gm45_info = {
168 .gen = 4, .is_g4x = 1, .num_pipes = 2,
169 .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
170 .has_pipe_cxsr = 1, .has_hotplug = 1,
172 .ring_mask = RENDER_RING | BSD_RING,
173 GEN_DEFAULT_PIPEOFFSETS,
177 static const struct intel_device_info intel_pineview_info = {
178 .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
179 .need_gfx_hws = 1, .has_hotplug = 1,
181 GEN_DEFAULT_PIPEOFFSETS,
185 static const struct intel_device_info intel_ironlake_d_info = {
186 .gen = 5, .num_pipes = 2,
187 .need_gfx_hws = 1, .has_hotplug = 1,
188 .ring_mask = RENDER_RING | BSD_RING,
189 GEN_DEFAULT_PIPEOFFSETS,
193 static const struct intel_device_info intel_ironlake_m_info = {
194 .gen = 5, .is_mobile = 1, .num_pipes = 2,
195 .need_gfx_hws = 1, .has_hotplug = 1,
197 .ring_mask = RENDER_RING | BSD_RING,
198 GEN_DEFAULT_PIPEOFFSETS,
202 static const struct intel_device_info intel_sandybridge_d_info = {
203 .gen = 6, .num_pipes = 2,
204 .need_gfx_hws = 1, .has_hotplug = 1,
206 .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
208 GEN_DEFAULT_PIPEOFFSETS,
212 static const struct intel_device_info intel_sandybridge_m_info = {
213 .gen = 6, .is_mobile = 1, .num_pipes = 2,
214 .need_gfx_hws = 1, .has_hotplug = 1,
216 .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
218 GEN_DEFAULT_PIPEOFFSETS,
222 #define GEN7_FEATURES \
223 .gen = 7, .num_pipes = 3, \
224 .need_gfx_hws = 1, .has_hotplug = 1, \
226 .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
229 static const struct intel_device_info intel_ivybridge_d_info = {
232 GEN_DEFAULT_PIPEOFFSETS,
236 static const struct intel_device_info intel_ivybridge_m_info = {
240 GEN_DEFAULT_PIPEOFFSETS,
244 static const struct intel_device_info intel_ivybridge_q_info = {
247 .num_pipes = 0, /* legal, last one wins */
248 GEN_DEFAULT_PIPEOFFSETS,
252 static const struct intel_device_info intel_valleyview_m_info = {
257 .display_mmio_offset = VLV_DISPLAY_BASE,
258 .has_fbc = 0, /* legal, last one wins */
259 .has_llc = 0, /* legal, last one wins */
260 GEN_DEFAULT_PIPEOFFSETS,
264 static const struct intel_device_info intel_valleyview_d_info = {
268 .display_mmio_offset = VLV_DISPLAY_BASE,
269 .has_fbc = 0, /* legal, last one wins */
270 .has_llc = 0, /* legal, last one wins */
271 GEN_DEFAULT_PIPEOFFSETS,
275 static const struct intel_device_info intel_haswell_d_info = {
280 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
281 GEN_DEFAULT_PIPEOFFSETS,
285 static const struct intel_device_info intel_haswell_m_info = {
291 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
292 GEN_DEFAULT_PIPEOFFSETS,
296 static const struct intel_device_info intel_broadwell_d_info = {
297 .gen = 8, .num_pipes = 3,
298 .need_gfx_hws = 1, .has_hotplug = 1,
299 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
304 GEN_DEFAULT_PIPEOFFSETS,
308 static const struct intel_device_info intel_broadwell_m_info = {
309 .gen = 8, .is_mobile = 1, .num_pipes = 3,
310 .need_gfx_hws = 1, .has_hotplug = 1,
311 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
316 GEN_DEFAULT_PIPEOFFSETS,
320 static const struct intel_device_info intel_broadwell_gt3d_info = {
321 .gen = 8, .num_pipes = 3,
322 .need_gfx_hws = 1, .has_hotplug = 1,
323 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
328 GEN_DEFAULT_PIPEOFFSETS,
332 static const struct intel_device_info intel_broadwell_gt3m_info = {
333 .gen = 8, .is_mobile = 1, .num_pipes = 3,
334 .need_gfx_hws = 1, .has_hotplug = 1,
335 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
340 GEN_DEFAULT_PIPEOFFSETS,
344 static const struct intel_device_info intel_cherryview_info = {
346 .gen = 8, .num_pipes = 3,
347 .need_gfx_hws = 1, .has_hotplug = 1,
348 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
350 .display_mmio_offset = VLV_DISPLAY_BASE,
356 * Make sure any device matches here are from most specific to most
357 * general. For example, since the Quanta match is based on the subsystem
358 * and subvendor IDs, we need it to come before the more general IVB
359 * PCI ID matches, otherwise we'll use the wrong info struct above.
361 #define INTEL_PCI_IDS \
362 INTEL_I830_IDS(&intel_i830_info), \
363 INTEL_I845G_IDS(&intel_845g_info), \
364 INTEL_I85X_IDS(&intel_i85x_info), \
365 INTEL_I865G_IDS(&intel_i865g_info), \
366 INTEL_I915G_IDS(&intel_i915g_info), \
367 INTEL_I915GM_IDS(&intel_i915gm_info), \
368 INTEL_I945G_IDS(&intel_i945g_info), \
369 INTEL_I945GM_IDS(&intel_i945gm_info), \
370 INTEL_I965G_IDS(&intel_i965g_info), \
371 INTEL_G33_IDS(&intel_g33_info), \
372 INTEL_I965GM_IDS(&intel_i965gm_info), \
373 INTEL_GM45_IDS(&intel_gm45_info), \
374 INTEL_G45_IDS(&intel_g45_info), \
375 INTEL_PINEVIEW_IDS(&intel_pineview_info), \
376 INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info), \
377 INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info), \
378 INTEL_SNB_D_IDS(&intel_sandybridge_d_info), \
379 INTEL_SNB_M_IDS(&intel_sandybridge_m_info), \
380 INTEL_IVB_M_IDS(&intel_ivybridge_m_info), \
381 INTEL_IVB_D_IDS(&intel_ivybridge_d_info), \
382 INTEL_HSW_D_IDS(&intel_haswell_d_info), \
383 INTEL_HSW_M_IDS(&intel_haswell_m_info), \
384 INTEL_VLV_M_IDS(&intel_valleyview_m_info), \
385 INTEL_VLV_D_IDS(&intel_valleyview_d_info), \
386 INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info), \
387 INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info), \
388 INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), \
389 INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), \
390 INTEL_CHV_IDS(&intel_cherryview_info)
392 static const struct pci_device_id pciidlist[] = { /* aka */
397 #define PCI_VENDOR_INTEL 0x8086
399 void intel_detect_pch(struct drm_device *dev)
401 struct drm_i915_private *dev_priv = dev->dev_private;
402 struct device *pch = NULL;
403 struct pci_devinfo *di;
405 /* In all current cases, num_pipes is equivalent to the PCH_NOP setting
406 * (which really amounts to a PCH but no South Display).
408 if (INTEL_INFO(dev)->num_pipes == 0) {
409 dev_priv->pch_type = PCH_NOP;
413 /* XXX The ISA bridge probe causes some old Core2 machines to hang */
414 if (INTEL_INFO(dev)->gen < 5)
418 * The reason to probe ISA bridge instead of Dev31:Fun0 is to
419 * make graphics device passthrough work easy for VMM, that only
420 * need to expose ISA bridge to let driver know the real hardware
421 * underneath. This is a requirement from virtualization team.
423 * In some virtualized environments (e.g. XEN), there is irrelevant
424 * ISA bridge in the system. To work reliably, we should scan trhough
425 * all the ISA bridge devices and check for the first match, instead
426 * of only checking the first one.
430 while ((pch = pci_iterate_class(&di, PCIC_BRIDGE, PCIS_BRIDGE_ISA))) {
431 if (pci_get_vendor(pch) == PCI_VENDOR_INTEL) {
432 unsigned short id = pci_get_device(pch) & INTEL_PCH_DEVICE_ID_MASK;
433 dev_priv->pch_id = id;
435 if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
436 dev_priv->pch_type = PCH_IBX;
437 DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
438 WARN_ON(!IS_GEN5(dev));
439 } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
440 dev_priv->pch_type = PCH_CPT;
441 DRM_DEBUG_KMS("Found CougarPoint PCH\n");
442 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
443 } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
444 /* PantherPoint is CPT compatible */
445 dev_priv->pch_type = PCH_CPT;
446 DRM_DEBUG_KMS("Found PantherPoint PCH\n");
447 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
448 } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
449 dev_priv->pch_type = PCH_LPT;
450 DRM_DEBUG_KMS("Found LynxPoint PCH\n");
451 WARN_ON(!IS_HASWELL(dev));
452 WARN_ON(IS_ULT(dev));
453 } else if (IS_BROADWELL(dev)) {
454 dev_priv->pch_type = PCH_LPT;
456 INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
457 DRM_DEBUG_KMS("This is Broadwell, assuming "
458 "LynxPoint LP PCH\n");
459 } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
460 dev_priv->pch_type = PCH_LPT;
461 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
462 WARN_ON(!IS_HASWELL(dev));
463 WARN_ON(!IS_ULT(dev));
471 DRM_DEBUG_KMS("No PCH found.\n");
478 bool i915_semaphore_is_enabled(struct drm_device *dev)
480 if (INTEL_INFO(dev)->gen < 6)
483 if (i915.semaphores >= 0)
484 return i915.semaphores;
486 /* TODO: make semaphores and Execlists play nicely together */
487 if (i915.enable_execlists)
490 /* Until we get further testing... */
494 #ifdef CONFIG_INTEL_IOMMU
495 /* Enable semaphores on SNB when IO remapping is off */
496 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
503 void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
505 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
507 dev_priv->long_hpd_port_mask = 0;
508 dev_priv->short_hpd_port_mask = 0;
509 dev_priv->hpd_event_bits = 0;
511 lockmgr(&dev_priv->irq_lock, LK_RELEASE);
513 cancel_work_sync(&dev_priv->dig_port_work);
514 cancel_work_sync(&dev_priv->hotplug_work);
515 cancel_delayed_work_sync(&dev_priv->hotplug_reenable_work);
518 static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
520 struct drm_device *dev = dev_priv->dev;
521 struct drm_encoder *encoder;
523 drm_modeset_lock_all(dev);
524 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
525 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
527 if (intel_encoder->suspend)
528 intel_encoder->suspend(intel_encoder);
530 drm_modeset_unlock_all(dev);
535 static int intel_suspend_complete(struct drm_i915_private *dev_priv);
536 static int intel_resume_prepare(struct drm_i915_private *dev_priv,
540 static int i915_drm_freeze(struct drm_device *dev)
542 struct drm_i915_private *dev_priv = dev->dev_private;
543 struct drm_crtc *crtc;
544 pci_power_t opregion_target_state;
546 /* ignore lid events during suspend */
547 mutex_lock(&dev_priv->modeset_restore_lock);
548 dev_priv->modeset_restore = MODESET_SUSPENDED;
549 mutex_unlock(&dev_priv->modeset_restore_lock);
551 /* We do a lot of poking in a lot of registers, make sure they work
553 intel_display_set_init_power(dev_priv, true);
555 drm_kms_helper_poll_disable(dev);
558 pci_save_state(dev->pdev);
561 /* If KMS is active, we do the leavevt stuff here */
562 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
565 error = i915_gem_suspend(dev);
567 dev_err(dev->pdev->dev,
568 "GEM idle failed, resume might fail\n");
573 * Disable CRTCs directly since we want to preserve sw state
574 * for _thaw. Also, power gate the CRTC power wells.
576 drm_modeset_lock_all(dev);
577 for_each_crtc(dev, crtc)
578 intel_crtc_control(crtc, false);
579 drm_modeset_unlock_all(dev);
582 intel_dp_mst_suspend(dev);
584 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
587 intel_runtime_pm_disable_interrupts(dev);
588 intel_hpd_cancel_work(dev_priv);
590 intel_suspend_encoders(dev_priv);
592 intel_suspend_gt_powersave(dev);
594 intel_modeset_suspend_hw(dev);
597 i915_gem_suspend_gtt_mappings(dev);
599 i915_save_state(dev);
601 opregion_target_state = PCI_D3cold;
602 #if IS_ENABLED(CONFIG_ACPI_SLEEP)
603 if (acpi_target_system_state() < ACPI_STATE_S3)
604 opregion_target_state = PCI_D1;
606 intel_opregion_notify_adapter(dev, opregion_target_state);
609 intel_uncore_forcewake_reset(dev, false);
611 intel_opregion_fini(dev);
615 intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED);
619 dev_priv->suspend_count++;
621 intel_display_set_init_power(dev_priv, false);
626 int i915_suspend(device_t kdev)
628 struct drm_device *dev = device_get_softc(kdev);
631 if (!dev || !dev->dev_private) {
632 DRM_ERROR("dev: %p\n", dev);
633 DRM_ERROR("DRM not initialized, aborting suspend.\n");
637 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
640 error = i915_drm_freeze(dev);
645 if (state.event == PM_EVENT_SUSPEND) {
646 /* Shut down the device */
647 pci_disable_device(dev->pdev);
648 pci_set_power_state(dev->pdev, PCI_D3hot);
652 error = bus_generic_suspend(kdev);
657 static int i915_drm_thaw_early(struct drm_device *dev)
659 struct drm_i915_private *dev_priv = dev->dev_private;
662 ret = intel_resume_prepare(dev_priv, false);
664 DRM_ERROR("Resume prepare failed: %d,Continuing resume\n", ret);
666 intel_uncore_early_sanitize(dev, true);
667 intel_uncore_sanitize(dev);
668 intel_power_domains_init_hw(dev_priv);
674 static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
676 struct drm_i915_private *dev_priv = dev->dev_private;
678 if (drm_core_check_feature(dev, DRIVER_MODESET) &&
679 restore_gtt_mappings) {
680 mutex_lock(&dev->struct_mutex);
681 i915_gem_restore_gtt_mappings(dev);
682 mutex_unlock(&dev->struct_mutex);
685 intel_power_domains_init_hw(dev_priv);
687 i915_restore_state(dev);
688 intel_opregion_setup(dev);
690 /* KMS EnterVT equivalent */
691 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
692 intel_init_pch_refclk(dev);
693 drm_mode_config_reset(dev);
695 mutex_lock(&dev->struct_mutex);
696 if (i915_gem_init_hw(dev)) {
697 DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
698 atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
700 mutex_unlock(&dev->struct_mutex);
702 intel_runtime_pm_restore_interrupts(dev);
704 intel_modeset_init_hw(dev);
706 drm_modeset_lock_all(dev);
707 intel_modeset_setup_hw_state(dev, true);
708 drm_modeset_unlock_all(dev);
711 * ... but also need to make sure that hotplug processing
712 * doesn't cause havoc. Like in the driver load code we don't
713 * bother with the tiny race here where we might loose hotplug
717 /* Config may have changed between suspend and resume */
718 drm_helper_hpd_irq_event(dev);
721 intel_opregion_init(dev);
723 intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
725 mutex_lock(&dev_priv->modeset_restore_lock);
726 dev_priv->modeset_restore = MODESET_DONE;
727 mutex_unlock(&dev_priv->modeset_restore_lock);
730 intel_opregion_notify_adapter(dev, PCI_D0);
737 static int i915_drm_thaw(struct drm_device *dev)
739 if (drm_core_check_feature(dev, DRIVER_MODESET))
740 i915_check_and_clear_faults(dev);
742 return __i915_drm_thaw(dev, true);
746 int i915_resume(struct drm_device *dev)
748 struct drm_i915_private *dev_priv = dev->dev_private;
752 * Platforms with opregion should have sane BIOS, older ones (gen3 and
753 * earlier) need to restore the GTT mappings since the BIOS might clear
754 * all our scratch PTEs.
756 ret = __i915_drm_thaw(dev, !dev_priv->opregion.header);
760 drm_kms_helper_poll_enable(dev);
764 /* XXX Hack for the old *BSD drm code base
765 * The device id field is set at probe time */
766 static drm_pci_id_list_t i915_attach_list[] = {
767 {0x8086, 0, 0, "Intel i915 GPU"},
771 struct intel_device_info *
772 i915_get_device_id(int device)
774 const struct pci_device_id *did;
776 for (did = &pciidlist[0]; did->device != 0; did++) {
777 if (did->device != device)
779 return (struct intel_device_info *)did->driver_data;
784 extern devclass_t drm_devclass;
787 * i915_reset - reset chip after a hang
788 * @dev: drm device to reset
790 * Reset the chip. Useful if a hang is detected. Returns zero on successful
791 * reset or otherwise an error code.
793 * Procedure is fairly simple:
794 * - reset the chip using the reset reg
795 * - re-init context state
796 * - re-init hardware status page
797 * - re-init ring buffer
798 * - re-init interrupt state
801 int i915_reset(struct drm_device *dev)
803 struct drm_i915_private *dev_priv = dev->dev_private;
810 mutex_lock(&dev->struct_mutex);
814 simulated = dev_priv->gpu_error.stop_rings != 0;
816 ret = intel_gpu_reset(dev);
818 /* Also reset the gpu hangman. */
820 DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
821 dev_priv->gpu_error.stop_rings = 0;
822 if (ret == -ENODEV) {
823 DRM_INFO("Reset not implemented, but ignoring "
824 "error for simulated gpu hangs\n");
830 DRM_ERROR("Failed to reset chip: %i\n", ret);
831 mutex_unlock(&dev->struct_mutex);
835 /* Ok, now get things going again... */
838 * Everything depends on having the GTT running, so we need to start
839 * there. Fortunately we don't need to do this unless we reset the
840 * chip at a PCI level.
842 * Next we need to restore the context, but we don't use those
845 * Ring buffer needs to be re-initialized in the KMS case, or if X
846 * was running at the time of the reset (i.e. we weren't VT
849 if (drm_core_check_feature(dev, DRIVER_MODESET) ||
850 !dev_priv->ums.mm_suspended) {
851 dev_priv->ums.mm_suspended = 0;
853 /* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
854 dev_priv->gpu_error.reload_in_reset = true;
856 ret = i915_gem_init_hw(dev);
858 dev_priv->gpu_error.reload_in_reset = false;
860 mutex_unlock(&dev->struct_mutex);
862 DRM_ERROR("Failed hw init on reset %d\n", ret);
867 * FIXME: This races pretty badly against concurrent holders of
868 * ring interrupts. This is possible since we've started to drop
869 * dev->struct_mutex in select places when waiting for the gpu.
873 * rps/rc6 re-init is necessary to restore state lost after the
874 * reset and the re-install of gt irqs. Skip for ironlake per
875 * previous concerns that it doesn't respond well to some forms
876 * of re-init after reset.
878 if (INTEL_INFO(dev)->gen > 5)
879 intel_reset_gt_powersave(dev);
881 mutex_unlock(&dev->struct_mutex);
887 static int i915_pci_probe(device_t kdev)
891 if (pci_get_class(kdev) != PCIC_DISPLAY)
894 if (pci_get_vendor(kdev) != PCI_VENDOR_INTEL)
897 device = pci_get_device(kdev);
899 for (i = 0; pciidlist[i].device != 0; i++) {
900 if (pciidlist[i].device == device) {
901 i915_attach_list[0].device = device;
911 i915_pci_remove(struct pci_dev *pdev)
913 struct drm_device *dev = pci_get_drvdata(pdev);
918 static int i915_pm_suspend(struct device *dev)
920 struct pci_dev *pdev = to_pci_dev(dev);
921 struct drm_device *drm_dev = pci_get_drvdata(pdev);
923 if (!drm_dev || !drm_dev->dev_private) {
924 dev_err(dev, "DRM not initialized, aborting suspend.\n");
928 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
931 return i915_drm_freeze(drm_dev);
934 static int i915_pm_suspend_late(struct device *dev)
936 struct pci_dev *pdev = to_pci_dev(dev);
937 struct drm_device *drm_dev = pci_get_drvdata(pdev);
938 struct drm_i915_private *dev_priv = drm_dev->dev_private;
942 * We have a suspedn ordering issue with the snd-hda driver also
943 * requiring our device to be power up. Due to the lack of a
944 * parent/child relationship we currently solve this with an late
947 * FIXME: This should be solved with a special hdmi sink device or
948 * similar so that power domains can be employed.
950 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
953 ret = intel_suspend_complete(dev_priv);
956 DRM_ERROR("Suspend complete failed: %d\n", ret);
958 pci_disable_device(pdev);
959 pci_set_power_state(pdev, PCI_D3hot);
965 static int i915_pm_resume(struct device *dev)
967 struct pci_dev *pdev = to_pci_dev(dev);
968 struct drm_device *drm_dev = pci_get_drvdata(pdev);
970 return i915_resume(drm_dev);
973 static int i915_pm_freeze(struct device *dev)
975 struct pci_dev *pdev = to_pci_dev(dev);
976 struct drm_device *drm_dev = pci_get_drvdata(pdev);
977 struct drm_i915_private *dev_priv = drm_dev->dev_private;
979 if (!drm_dev || !drm_dev->dev_private) {
980 dev_err(dev, "DRM not initialized, aborting suspend.\n");
984 return i915_drm_freeze(drm_dev);
987 static int i915_pm_freeze_late(struct device *dev)
989 struct pci_dev *pdev = to_pci_dev(dev);
990 struct drm_device *drm_dev = pci_get_drvdata(pdev);
991 struct drm_i915_private *dev_priv = drm_dev->dev_private;
993 return intel_suspend_complete(dev_priv);
996 static int i915_pm_thaw(struct device *dev)
998 struct pci_dev *pdev = to_pci_dev(dev);
999 struct drm_device *drm_dev = pci_get_drvdata(pdev);
1001 return i915_drm_thaw(drm_dev);
1004 static int i915_pm_poweroff(struct device *dev)
1006 struct pci_dev *pdev = to_pci_dev(dev);
1007 struct drm_device *drm_dev = pci_get_drvdata(pdev);
1009 return i915_drm_freeze(drm_dev);
1012 static int hsw_suspend_complete(struct drm_i915_private *dev_priv)
1014 hsw_enable_pc8(dev_priv);
1019 static int snb_resume_prepare(struct drm_i915_private *dev_priv,
1022 struct drm_device *dev = dev_priv->dev;
1025 intel_init_pch_refclk(dev);
1030 static int hsw_resume_prepare(struct drm_i915_private *dev_priv,
1033 hsw_disable_pc8(dev_priv);
1039 * Save all Gunit registers that may be lost after a D3 and a subsequent
1040 * S0i[R123] transition. The list of registers needing a save/restore is
1041 * defined in the VLV2_S0IXRegs document. This documents marks all Gunit
1042 * registers in the following way:
1043 * - Driver: saved/restored by the driver
1044 * - Punit : saved/restored by the Punit firmware
1045 * - No, w/o marking: no need to save/restore, since the register is R/O or
1046 * used internally by the HW in a way that doesn't depend
1047 * keeping the content across a suspend/resume.
1048 * - Debug : used for debugging
1050 * We save/restore all registers marked with 'Driver', with the following
1052 * - Registers out of use, including also registers marked with 'Debug'.
1053 * These have no effect on the driver's operation, so we don't save/restore
1054 * them to reduce the overhead.
1055 * - Registers that are fully setup by an initialization function called from
1056 * the resume path. For example many clock gating and RPS/RC6 registers.
1057 * - Registers that provide the right functionality with their reset defaults.
1059 * TODO: Except for registers that based on the above 3 criteria can be safely
1060 * ignored, we save/restore all others, practically treating the HW context as
1061 * a black-box for the driver. Further investigation is needed to reduce the
1062 * saved/restored registers even further, by following the same 3 criteria.
1064 static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
1066 struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
1069 /* GAM 0x4000-0x4770 */
1070 s->wr_watermark = I915_READ(GEN7_WR_WATERMARK);
1071 s->gfx_prio_ctrl = I915_READ(GEN7_GFX_PRIO_CTRL);
1072 s->arb_mode = I915_READ(ARB_MODE);
1073 s->gfx_pend_tlb0 = I915_READ(GEN7_GFX_PEND_TLB0);
1074 s->gfx_pend_tlb1 = I915_READ(GEN7_GFX_PEND_TLB1);
1076 for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
1077 s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS_BASE + i * 4);
1079 s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
1080 s->gfx_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
1082 s->render_hwsp = I915_READ(RENDER_HWS_PGA_GEN7);
1083 s->ecochk = I915_READ(GAM_ECOCHK);
1084 s->bsd_hwsp = I915_READ(BSD_HWS_PGA_GEN7);
1085 s->blt_hwsp = I915_READ(BLT_HWS_PGA_GEN7);
1087 s->tlb_rd_addr = I915_READ(GEN7_TLB_RD_ADDR);
1089 /* MBC 0x9024-0x91D0, 0x8500 */
1090 s->g3dctl = I915_READ(VLV_G3DCTL);
1091 s->gsckgctl = I915_READ(VLV_GSCKGCTL);
1092 s->mbctl = I915_READ(GEN6_MBCTL);
1094 /* GCP 0x9400-0x9424, 0x8100-0x810C */
1095 s->ucgctl1 = I915_READ(GEN6_UCGCTL1);
1096 s->ucgctl3 = I915_READ(GEN6_UCGCTL3);
1097 s->rcgctl1 = I915_READ(GEN6_RCGCTL1);
1098 s->rcgctl2 = I915_READ(GEN6_RCGCTL2);
1099 s->rstctl = I915_READ(GEN6_RSTCTL);
1100 s->misccpctl = I915_READ(GEN7_MISCCPCTL);
1102 /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
1103 s->gfxpause = I915_READ(GEN6_GFXPAUSE);
1104 s->rpdeuhwtc = I915_READ(GEN6_RPDEUHWTC);
1105 s->rpdeuc = I915_READ(GEN6_RPDEUC);
1106 s->ecobus = I915_READ(ECOBUS);
1107 s->pwrdwnupctl = I915_READ(VLV_PWRDWNUPCTL);
1108 s->rp_down_timeout = I915_READ(GEN6_RP_DOWN_TIMEOUT);
1109 s->rp_deucsw = I915_READ(GEN6_RPDEUCSW);
1110 s->rcubmabdtmr = I915_READ(GEN6_RCUBMABDTMR);
1111 s->rcedata = I915_READ(VLV_RCEDATA);
1112 s->spare2gh = I915_READ(VLV_SPAREG2H);
1114 /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
1115 s->gt_imr = I915_READ(GTIMR);
1116 s->gt_ier = I915_READ(GTIER);
1117 s->pm_imr = I915_READ(GEN6_PMIMR);
1118 s->pm_ier = I915_READ(GEN6_PMIER);
1120 for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
1121 s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH_BASE + i * 4);
1123 /* GT SA CZ domain, 0x100000-0x138124 */
1124 s->tilectl = I915_READ(TILECTL);
1125 s->gt_fifoctl = I915_READ(GTFIFOCTL);
1126 s->gtlc_wake_ctrl = I915_READ(VLV_GTLC_WAKE_CTRL);
1127 s->gtlc_survive = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
1128 s->pmwgicz = I915_READ(VLV_PMWGICZ);
1130 /* Gunit-Display CZ domain, 0x182028-0x1821CF */
1131 s->gu_ctl0 = I915_READ(VLV_GU_CTL0);
1132 s->gu_ctl1 = I915_READ(VLV_GU_CTL1);
1133 s->clock_gate_dis2 = I915_READ(VLV_GUNIT_CLOCK_GATE2);
1136 * Not saving any of:
1137 * DFT, 0x9800-0x9EC0
1138 * SARB, 0xB000-0xB1FC
1139 * GAC, 0x5208-0x524C, 0x14000-0x14C000
1144 static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
1146 struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
1150 /* GAM 0x4000-0x4770 */
1151 I915_WRITE(GEN7_WR_WATERMARK, s->wr_watermark);
1152 I915_WRITE(GEN7_GFX_PRIO_CTRL, s->gfx_prio_ctrl);
1153 I915_WRITE(ARB_MODE, s->arb_mode | (0xffff << 16));
1154 I915_WRITE(GEN7_GFX_PEND_TLB0, s->gfx_pend_tlb0);
1155 I915_WRITE(GEN7_GFX_PEND_TLB1, s->gfx_pend_tlb1);
1157 for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
1158 I915_WRITE(GEN7_LRA_LIMITS_BASE + i * 4, s->lra_limits[i]);
1160 I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
1161 I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->gfx_max_req_count);
1163 I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp);
1164 I915_WRITE(GAM_ECOCHK, s->ecochk);
1165 I915_WRITE(BSD_HWS_PGA_GEN7, s->bsd_hwsp);
1166 I915_WRITE(BLT_HWS_PGA_GEN7, s->blt_hwsp);
1168 I915_WRITE(GEN7_TLB_RD_ADDR, s->tlb_rd_addr);
1170 /* MBC 0x9024-0x91D0, 0x8500 */
1171 I915_WRITE(VLV_G3DCTL, s->g3dctl);
1172 I915_WRITE(VLV_GSCKGCTL, s->gsckgctl);
1173 I915_WRITE(GEN6_MBCTL, s->mbctl);
1175 /* GCP 0x9400-0x9424, 0x8100-0x810C */
1176 I915_WRITE(GEN6_UCGCTL1, s->ucgctl1);
1177 I915_WRITE(GEN6_UCGCTL3, s->ucgctl3);
1178 I915_WRITE(GEN6_RCGCTL1, s->rcgctl1);
1179 I915_WRITE(GEN6_RCGCTL2, s->rcgctl2);
1180 I915_WRITE(GEN6_RSTCTL, s->rstctl);
1181 I915_WRITE(GEN7_MISCCPCTL, s->misccpctl);
1183 /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
1184 I915_WRITE(GEN6_GFXPAUSE, s->gfxpause);
1185 I915_WRITE(GEN6_RPDEUHWTC, s->rpdeuhwtc);
1186 I915_WRITE(GEN6_RPDEUC, s->rpdeuc);
1187 I915_WRITE(ECOBUS, s->ecobus);
1188 I915_WRITE(VLV_PWRDWNUPCTL, s->pwrdwnupctl);
1189 I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout);
1190 I915_WRITE(GEN6_RPDEUCSW, s->rp_deucsw);
1191 I915_WRITE(GEN6_RCUBMABDTMR, s->rcubmabdtmr);
1192 I915_WRITE(VLV_RCEDATA, s->rcedata);
1193 I915_WRITE(VLV_SPAREG2H, s->spare2gh);
1195 /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
1196 I915_WRITE(GTIMR, s->gt_imr);
1197 I915_WRITE(GTIER, s->gt_ier);
1198 I915_WRITE(GEN6_PMIMR, s->pm_imr);
1199 I915_WRITE(GEN6_PMIER, s->pm_ier);
1201 for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
1202 I915_WRITE(GEN7_GT_SCRATCH_BASE + i * 4, s->gt_scratch[i]);
1204 /* GT SA CZ domain, 0x100000-0x138124 */
1205 I915_WRITE(TILECTL, s->tilectl);
1206 I915_WRITE(GTFIFOCTL, s->gt_fifoctl);
1208 * Preserve the GT allow wake and GFX force clock bit, they are not
1209 * be restored, as they are used to control the s0ix suspend/resume
1210 * sequence by the caller.
1212 val = I915_READ(VLV_GTLC_WAKE_CTRL);
1213 val &= VLV_GTLC_ALLOWWAKEREQ;
1214 val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ;
1215 I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
1217 val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
1218 val &= VLV_GFX_CLK_FORCE_ON_BIT;
1219 val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT;
1220 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
1222 I915_WRITE(VLV_PMWGICZ, s->pmwgicz);
1224 /* Gunit-Display CZ domain, 0x182028-0x1821CF */
1225 I915_WRITE(VLV_GU_CTL0, s->gu_ctl0);
1226 I915_WRITE(VLV_GU_CTL1, s->gu_ctl1);
1227 I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2);
1231 int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
1236 val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
1237 WARN_ON(!!(val & VLV_GFX_CLK_FORCE_ON_BIT) == force_on);
1239 #define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT)
1240 /* Wait for a previous force-off to settle */
1242 err = wait_for(!COND, 20);
1244 DRM_ERROR("timeout waiting for GFX clock force-off (%08x)\n",
1245 I915_READ(VLV_GTLC_SURVIVABILITY_REG));
1250 val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
1251 val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
1253 val |= VLV_GFX_CLK_FORCE_ON_BIT;
1254 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
1259 err = wait_for(COND, 20);
1261 DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n",
1262 I915_READ(VLV_GTLC_SURVIVABILITY_REG));
1269 static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
1274 val = I915_READ(VLV_GTLC_WAKE_CTRL);
1275 val &= ~VLV_GTLC_ALLOWWAKEREQ;
1277 val |= VLV_GTLC_ALLOWWAKEREQ;
1278 I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
1279 POSTING_READ(VLV_GTLC_WAKE_CTRL);
1281 #define COND (!!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEACK) == \
1283 err = wait_for(COND, 1);
1285 DRM_ERROR("timeout disabling GT waking\n");
1290 static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
1297 mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
1298 val = wait_for_on ? mask : 0;
1299 #define COND ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val)
1303 DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n",
1304 wait_for_on ? "on" : "off",
1305 I915_READ(VLV_GTLC_PW_STATUS));
1308 * RC6 transitioning can be delayed up to 2 msec (see
1309 * valleyview_enable_rps), use 3 msec for safety.
1311 err = wait_for(COND, 3);
1313 DRM_ERROR("timeout waiting for GT wells to go %s\n",
1314 wait_for_on ? "on" : "off");
1320 static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
1322 if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
1325 DRM_ERROR("GT register access while GT waking disabled\n");
1326 I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
1329 static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
1335 * Bspec defines the following GT well on flags as debug only, so
1336 * don't treat them as hard failures.
1338 (void)vlv_wait_for_gt_wells(dev_priv, false);
1340 mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
1341 WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask);
1343 vlv_check_no_gt_access(dev_priv);
1345 err = vlv_force_gfx_clock(dev_priv, true);
1349 err = vlv_allow_gt_wake(dev_priv, false);
1352 vlv_save_gunit_s0ix_state(dev_priv);
1354 err = vlv_force_gfx_clock(dev_priv, false);
1361 /* For safety always re-enable waking and disable gfx clock forcing */
1362 vlv_allow_gt_wake(dev_priv, true);
1364 vlv_force_gfx_clock(dev_priv, false);
1369 static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
1372 struct drm_device *dev = dev_priv->dev;
1377 * If any of the steps fail just try to continue, that's the best we
1378 * can do at this point. Return the first error code (which will also
1379 * leave RPM permanently disabled).
1381 ret = vlv_force_gfx_clock(dev_priv, true);
1383 vlv_restore_gunit_s0ix_state(dev_priv);
1385 err = vlv_allow_gt_wake(dev_priv, true);
1389 err = vlv_force_gfx_clock(dev_priv, false);
1393 vlv_check_no_gt_access(dev_priv);
1396 intel_init_clock_gating(dev);
1397 i915_gem_restore_fences(dev);
1403 static int intel_runtime_suspend(struct device *device)
1405 struct pci_dev *pdev = to_pci_dev(device);
1406 struct drm_device *dev = pci_get_drvdata(pdev);
1407 struct drm_i915_private *dev_priv = dev->dev_private;
1410 if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev))))
1413 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
1416 assert_force_wake_inactive(dev_priv);
1418 DRM_DEBUG_KMS("Suspending device\n");
1421 * We could deadlock here in case another thread holding struct_mutex
1422 * calls RPM suspend concurrently, since the RPM suspend will wait
1423 * first for this RPM suspend to finish. In this case the concurrent
1424 * RPM resume will be followed by its RPM suspend counterpart. Still
1425 * for consistency return -EAGAIN, which will reschedule this suspend.
1427 if (!mutex_trylock(&dev->struct_mutex)) {
1428 DRM_DEBUG_KMS("device lock contention, deffering suspend\n");
1430 * Bump the expiration timestamp, otherwise the suspend won't
1433 pm_runtime_mark_last_busy(device);
1438 * We are safe here against re-faults, since the fault handler takes
1441 i915_gem_release_all_mmaps(dev_priv);
1442 mutex_unlock(&dev->struct_mutex);
1445 * rps.work can't be rearmed here, since we get here only after making
1446 * sure the GPU is idle and the RPS freq is set to the minimum. See
1447 * intel_mark_idle().
1449 cancel_work_sync(&dev_priv->rps.work);
1450 intel_runtime_pm_disable_interrupts(dev);
1452 ret = intel_suspend_complete(dev_priv);
1454 DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
1455 intel_runtime_pm_restore_interrupts(dev);
1460 del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
1461 dev_priv->pm.suspended = true;
1464 * FIXME: We really should find a document that references the arguments
1467 if (IS_HASWELL(dev)) {
1469 * current versions of firmware which depend on this opregion
1470 * notification have repurposed the D1 definition to mean
1471 * "runtime suspended" vs. what you would normally expect (D3)
1472 * to distinguish it from notifications that might be sent via
1475 intel_opregion_notify_adapter(dev, PCI_D1);
1478 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
1479 * being detected, and the call we do at intel_runtime_resume()
1480 * won't be able to restore them. Since PCI_D3hot matches the
1481 * actual specification and appears to be working, use it. Let's
1482 * assume the other non-Haswell platforms will stay the same as
1485 intel_opregion_notify_adapter(dev, PCI_D3hot);
1488 DRM_DEBUG_KMS("Device suspended\n");
1492 static int intel_runtime_resume(struct device *device)
1494 struct pci_dev *pdev = to_pci_dev(device);
1495 struct drm_device *dev = pci_get_drvdata(pdev);
1496 struct drm_i915_private *dev_priv = dev->dev_private;
1499 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
1502 DRM_DEBUG_KMS("Resuming device\n");
1504 intel_opregion_notify_adapter(dev, PCI_D0);
1505 dev_priv->pm.suspended = false;
1507 ret = intel_resume_prepare(dev_priv, true);
1509 * No point of rolling back things in case of an error, as the best
1510 * we can do is to hope that things will still work (and disable RPM).
1512 i915_gem_init_swizzling(dev);
1513 gen6_update_ring_freq(dev);
1515 intel_runtime_pm_restore_interrupts(dev);
1516 intel_reset_gt_powersave(dev);
1519 DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
1521 DRM_DEBUG_KMS("Device resumed\n");
1527 * This function implements common functionality of runtime and system
1530 static int intel_suspend_complete(struct drm_i915_private *dev_priv)
1532 struct drm_device *dev = dev_priv->dev;
1535 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1536 ret = hsw_suspend_complete(dev_priv);
1537 else if (IS_VALLEYVIEW(dev))
1538 ret = vlv_suspend_complete(dev_priv);
1546 * This function implements common functionality of runtime and system
1547 * resume sequence. Variable rpm_resume used for implementing different
1550 static int intel_resume_prepare(struct drm_i915_private *dev_priv,
1553 struct drm_device *dev = dev_priv->dev;
1557 ret = snb_resume_prepare(dev_priv, rpm_resume);
1558 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1559 ret = hsw_resume_prepare(dev_priv, rpm_resume);
1560 else if (IS_VALLEYVIEW(dev))
1561 ret = vlv_resume_prepare(dev_priv, rpm_resume);
1568 static const struct dev_pm_ops i915_pm_ops = {
1569 .suspend = i915_pm_suspend,
1570 .resume = i915_pm_resume,
1571 .freeze = i915_pm_freeze,
1572 .thaw = i915_pm_thaw,
1573 .poweroff = i915_pm_poweroff,
1574 .restore = i915_pm_resume,
1575 .runtime_suspend = intel_runtime_suspend,
1576 .runtime_resume = intel_runtime_resume,
1579 static const struct vm_operations_struct i915_gem_vm_ops = {
1580 .fault = i915_gem_fault,
1581 .open = drm_gem_vm_open,
1582 .close = drm_gem_vm_close,
1585 static const struct file_operations i915_driver_fops = {
1586 .owner = THIS_MODULE,
1588 .release = drm_release,
1589 .unlocked_ioctl = drm_ioctl,
1590 .mmap = drm_gem_mmap,
1593 #ifdef CONFIG_COMPAT
1594 .compat_ioctl = i915_compat_ioctl,
1596 .llseek = noop_llseek,
1600 static struct cdev_pager_ops i915_gem_vm_ops = {
1601 .cdev_pg_fault = i915_gem_fault,
1602 .cdev_pg_ctor = i915_gem_pager_ctor,
1603 .cdev_pg_dtor = i915_gem_pager_dtor
1606 static struct drm_driver driver = {
1607 /* Don't use MTRRs here; the Xserver or userspace app should
1608 * deal with them for Intel hardware.
1611 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
1612 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM,
1614 .load = i915_driver_load,
1615 .unload = i915_driver_unload,
1616 .open = i915_driver_open,
1617 .lastclose = i915_driver_lastclose,
1618 .preclose = i915_driver_preclose,
1619 .postclose = i915_driver_postclose,
1621 .device_is_agp = i915_driver_device_is_agp,
1622 .gem_free_object = i915_gem_free_object,
1623 .gem_pager_ops = &i915_gem_vm_ops,
1624 .dumb_create = i915_gem_dumb_create,
1625 .dumb_map_offset = i915_gem_mmap_gtt,
1626 .dumb_destroy = drm_gem_dumb_destroy,
1627 .ioctls = i915_ioctls,
1629 .name = DRIVER_NAME,
1630 .desc = DRIVER_DESC,
1631 .date = DRIVER_DATE,
1632 .major = DRIVER_MAJOR,
1633 .minor = DRIVER_MINOR,
1634 .patchlevel = DRIVER_PATCHLEVEL,
1637 static int __init i915_init(void);
1640 i915_attach(device_t kdev)
1642 struct drm_device *dev = device_get_softc(kdev);
1646 dev->driver = &driver;
1647 return (drm_attach(kdev, i915_attach_list));
1650 static device_method_t i915_methods[] = {
1651 /* Device interface */
1652 DEVMETHOD(device_probe, i915_pci_probe),
1653 DEVMETHOD(device_attach, i915_attach),
1654 DEVMETHOD(device_suspend, i915_suspend),
1655 DEVMETHOD(device_resume, i915_resume),
1656 DEVMETHOD(device_detach, drm_release),
1660 static driver_t i915_driver = {
1663 sizeof(struct drm_device)
1666 static int __init i915_init(void)
1668 driver.num_ioctls = i915_max_ioctl;
1671 * If CONFIG_DRM_I915_KMS is set, default to KMS unless
1672 * explicitly disabled with the module pararmeter.
1674 * Otherwise, just follow the parameter (defaulting to off).
1676 * Allow optional vga_text_mode_force boot option to override
1677 * the default behavior.
1679 #if defined(CONFIG_DRM_I915_KMS)
1680 if (i915.modeset != 0)
1681 driver.driver_features |= DRIVER_MODESET;
1683 if (i915.modeset == 1)
1684 driver.driver_features |= DRIVER_MODESET;
1686 #ifdef CONFIG_VGA_CONSOLE
1687 if (vgacon_text_force() && i915.modeset == -1)
1688 driver.driver_features &= ~DRIVER_MODESET;
1691 if (!(driver.driver_features & DRIVER_MODESET)) {
1692 driver.get_vblank_timestamp = NULL;
1693 #ifndef CONFIG_DRM_I915_UMS
1694 /* Silently fail loading to not upset userspace. */
1695 DRM_DEBUG_DRIVER("KMS and UMS disabled.\n");
1701 return drm_pci_init(&driver, &i915_pci_driver);
1707 DRIVER_MODULE_ORDERED(i915kms, vgapci, i915_driver, drm_devclass, NULL, NULL,
1709 MODULE_DEPEND(i915kms, drm, 1, 1, 1);
1710 MODULE_DEPEND(i915kms, agp, 1, 1, 1);
1711 MODULE_DEPEND(i915kms, iicbus, 1, 1, 1);
1712 MODULE_DEPEND(i915kms, iic, 1, 1, 1);
1713 MODULE_DEPEND(i915kms, iicbb, 1, 1, 1);