Merge branch 'vendor/NVI2'
[dragonfly.git] / sys / dev / drm / i915 / i915_drv.c
1 /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
2  */
3 /*
4  *
5  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6  * All Rights Reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the
10  * "Software"), to deal in the Software without restriction, including
11  * without limitation the rights to use, copy, modify, merge, publish,
12  * distribute, sub license, and/or sell copies of the Software, and to
13  * permit persons to whom the Software is furnished to do so, subject to
14  * the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the
17  * next paragraph) shall be included in all copies or substantial portions
18  * of the Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27  *
28  */
29
30 #include <drm/drmP.h>
31 #include <drm/i915_drm.h>
32 #include "i915_drv.h"
33 #include <drm/drm_pciids.h>
34 #include "intel_drv.h"
35
36 #include <linux/module.h>
37 #include <drm/drm_crtc_helper.h>
38
39 static struct drm_driver driver;
40
41 #define GEN_DEFAULT_PIPEOFFSETS \
42         .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
43                           PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
44         .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
45                            TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
46         .dpll_offsets = { DPLL_A_OFFSET, DPLL_B_OFFSET }, \
47         .dpll_md_offsets = { DPLL_A_MD_OFFSET, DPLL_B_MD_OFFSET }, \
48         .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
49
50 #define GEN_CHV_PIPEOFFSETS \
51         .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
52                           CHV_PIPE_C_OFFSET }, \
53         .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
54                            CHV_TRANSCODER_C_OFFSET, }, \
55         .dpll_offsets = { DPLL_A_OFFSET, DPLL_B_OFFSET, \
56                           CHV_DPLL_C_OFFSET }, \
57         .dpll_md_offsets = { DPLL_A_MD_OFFSET, DPLL_B_MD_OFFSET, \
58                              CHV_DPLL_C_MD_OFFSET }, \
59         .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \
60                              CHV_PALETTE_C_OFFSET }
61
62 #define CURSOR_OFFSETS \
63         .cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET }
64
65 #define IVB_CURSOR_OFFSETS \
66         .cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET }
67
68 static const struct intel_device_info intel_i830_info = {
69         .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
70         .has_overlay = 1, .overlay_needs_physical = 1,
71         .ring_mask = RENDER_RING,
72         GEN_DEFAULT_PIPEOFFSETS,
73         CURSOR_OFFSETS,
74 };
75
76 static const struct intel_device_info intel_845g_info = {
77         .gen = 2, .num_pipes = 1,
78         .has_overlay = 1, .overlay_needs_physical = 1,
79         .ring_mask = RENDER_RING,
80         GEN_DEFAULT_PIPEOFFSETS,
81         CURSOR_OFFSETS,
82 };
83
84 static const struct intel_device_info intel_i85x_info = {
85         .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
86         .cursor_needs_physical = 1,
87         .has_overlay = 1, .overlay_needs_physical = 1,
88         .has_fbc = 1,
89         .ring_mask = RENDER_RING,
90         GEN_DEFAULT_PIPEOFFSETS,
91         CURSOR_OFFSETS,
92 };
93
94 static const struct intel_device_info intel_i865g_info = {
95         .gen = 2, .num_pipes = 1,
96         .has_overlay = 1, .overlay_needs_physical = 1,
97         .ring_mask = RENDER_RING,
98         GEN_DEFAULT_PIPEOFFSETS,
99         CURSOR_OFFSETS,
100 };
101
102 static const struct intel_device_info intel_i915g_info = {
103         .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
104         .has_overlay = 1, .overlay_needs_physical = 1,
105         .ring_mask = RENDER_RING,
106         GEN_DEFAULT_PIPEOFFSETS,
107         CURSOR_OFFSETS,
108 };
109 static const struct intel_device_info intel_i915gm_info = {
110         .gen = 3, .is_mobile = 1, .num_pipes = 2,
111         .cursor_needs_physical = 1,
112         .has_overlay = 1, .overlay_needs_physical = 1,
113         .supports_tv = 1,
114         .has_fbc = 1,
115         .ring_mask = RENDER_RING,
116         GEN_DEFAULT_PIPEOFFSETS,
117         CURSOR_OFFSETS,
118 };
119 static const struct intel_device_info intel_i945g_info = {
120         .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
121         .has_overlay = 1, .overlay_needs_physical = 1,
122         .ring_mask = RENDER_RING,
123         GEN_DEFAULT_PIPEOFFSETS,
124         CURSOR_OFFSETS,
125 };
126 static const struct intel_device_info intel_i945gm_info = {
127         .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
128         .has_hotplug = 1, .cursor_needs_physical = 1,
129         .has_overlay = 1, .overlay_needs_physical = 1,
130         .supports_tv = 1,
131         .has_fbc = 1,
132         .ring_mask = RENDER_RING,
133         GEN_DEFAULT_PIPEOFFSETS,
134         CURSOR_OFFSETS,
135 };
136
137 static const struct intel_device_info intel_i965g_info = {
138         .gen = 4, .is_broadwater = 1, .num_pipes = 2,
139         .has_hotplug = 1,
140         .has_overlay = 1,
141         .ring_mask = RENDER_RING,
142         GEN_DEFAULT_PIPEOFFSETS,
143         CURSOR_OFFSETS,
144 };
145
146 static const struct intel_device_info intel_i965gm_info = {
147         .gen = 4, .is_crestline = 1, .num_pipes = 2,
148         .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
149         .has_overlay = 1,
150         .supports_tv = 1,
151         .ring_mask = RENDER_RING,
152         GEN_DEFAULT_PIPEOFFSETS,
153         CURSOR_OFFSETS,
154 };
155
156 static const struct intel_device_info intel_g33_info = {
157         .gen = 3, .is_g33 = 1, .num_pipes = 2,
158         .need_gfx_hws = 1, .has_hotplug = 1,
159         .has_overlay = 1,
160         .ring_mask = RENDER_RING,
161         GEN_DEFAULT_PIPEOFFSETS,
162         CURSOR_OFFSETS,
163 };
164
165 static const struct intel_device_info intel_g45_info = {
166         .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
167         .has_pipe_cxsr = 1, .has_hotplug = 1,
168         .ring_mask = RENDER_RING | BSD_RING,
169         GEN_DEFAULT_PIPEOFFSETS,
170         CURSOR_OFFSETS,
171 };
172
173 static const struct intel_device_info intel_gm45_info = {
174         .gen = 4, .is_g4x = 1, .num_pipes = 2,
175         .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
176         .has_pipe_cxsr = 1, .has_hotplug = 1,
177         .supports_tv = 1,
178         .ring_mask = RENDER_RING | BSD_RING,
179         GEN_DEFAULT_PIPEOFFSETS,
180         CURSOR_OFFSETS,
181 };
182
183 static const struct intel_device_info intel_pineview_info = {
184         .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
185         .need_gfx_hws = 1, .has_hotplug = 1,
186         .has_overlay = 1,
187         GEN_DEFAULT_PIPEOFFSETS,
188         CURSOR_OFFSETS,
189 };
190
191 static const struct intel_device_info intel_ironlake_d_info = {
192         .gen = 5, .num_pipes = 2,
193         .need_gfx_hws = 1, .has_hotplug = 1,
194         .ring_mask = RENDER_RING | BSD_RING,
195         GEN_DEFAULT_PIPEOFFSETS,
196         CURSOR_OFFSETS,
197 };
198
199 static const struct intel_device_info intel_ironlake_m_info = {
200         .gen = 5, .is_mobile = 1, .num_pipes = 2,
201         .need_gfx_hws = 1, .has_hotplug = 1,
202         .has_fbc = 1,
203         .ring_mask = RENDER_RING | BSD_RING,
204         GEN_DEFAULT_PIPEOFFSETS,
205         CURSOR_OFFSETS,
206 };
207
208 static const struct intel_device_info intel_sandybridge_d_info = {
209         .gen = 6, .num_pipes = 2,
210         .need_gfx_hws = 1, .has_hotplug = 1,
211         .has_fbc = 1,
212         .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
213         .has_llc = 1,
214         GEN_DEFAULT_PIPEOFFSETS,
215         CURSOR_OFFSETS,
216 };
217
218 static const struct intel_device_info intel_sandybridge_m_info = {
219         .gen = 6, .is_mobile = 1, .num_pipes = 2,
220         .need_gfx_hws = 1, .has_hotplug = 1,
221         .has_fbc = 1,
222         .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
223         .has_llc = 1,
224         GEN_DEFAULT_PIPEOFFSETS,
225         CURSOR_OFFSETS,
226 };
227
228 #define GEN7_FEATURES  \
229         .gen = 7, .num_pipes = 3, \
230         .need_gfx_hws = 1, .has_hotplug = 1, \
231         .has_fbc = 1, \
232         .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
233         .has_llc = 1
234
235 static const struct intel_device_info intel_ivybridge_d_info = {
236         GEN7_FEATURES,
237         .is_ivybridge = 1,
238         GEN_DEFAULT_PIPEOFFSETS,
239         IVB_CURSOR_OFFSETS,
240 };
241
242 static const struct intel_device_info intel_ivybridge_m_info = {
243         GEN7_FEATURES,
244         .is_ivybridge = 1,
245         .is_mobile = 1,
246         GEN_DEFAULT_PIPEOFFSETS,
247         IVB_CURSOR_OFFSETS,
248 };
249
250 static const struct intel_device_info intel_ivybridge_q_info = {
251         GEN7_FEATURES,
252         .is_ivybridge = 1,
253         .num_pipes = 0, /* legal, last one wins */
254         GEN_DEFAULT_PIPEOFFSETS,
255         IVB_CURSOR_OFFSETS,
256 };
257
258 static const struct intel_device_info intel_valleyview_m_info = {
259         GEN7_FEATURES,
260         .is_mobile = 1,
261         .num_pipes = 2,
262         .is_valleyview = 1,
263         .display_mmio_offset = VLV_DISPLAY_BASE,
264         .has_fbc = 0, /* legal, last one wins */
265         .has_llc = 0, /* legal, last one wins */
266         GEN_DEFAULT_PIPEOFFSETS,
267         CURSOR_OFFSETS,
268 };
269
270 static const struct intel_device_info intel_valleyview_d_info = {
271         GEN7_FEATURES,
272         .num_pipes = 2,
273         .is_valleyview = 1,
274         .display_mmio_offset = VLV_DISPLAY_BASE,
275         .has_fbc = 0, /* legal, last one wins */
276         .has_llc = 0, /* legal, last one wins */
277         GEN_DEFAULT_PIPEOFFSETS,
278         CURSOR_OFFSETS,
279 };
280
281 static const struct intel_device_info intel_haswell_d_info = {
282         GEN7_FEATURES,
283         .is_haswell = 1,
284         .has_ddi = 1,
285         .has_fpga_dbg = 1,
286         .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
287         GEN_DEFAULT_PIPEOFFSETS,
288         IVB_CURSOR_OFFSETS,
289 };
290
291 static const struct intel_device_info intel_haswell_m_info = {
292         GEN7_FEATURES,
293         .is_haswell = 1,
294         .is_mobile = 1,
295         .has_ddi = 1,
296         .has_fpga_dbg = 1,
297         .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
298         GEN_DEFAULT_PIPEOFFSETS,
299         IVB_CURSOR_OFFSETS,
300 };
301
302 static const struct intel_device_info intel_broadwell_d_info = {
303         .gen = 8, .num_pipes = 3,
304         .need_gfx_hws = 1, .has_hotplug = 1,
305         .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
306         .has_llc = 1,
307         .has_ddi = 1,
308         .has_fbc = 1,
309         GEN_DEFAULT_PIPEOFFSETS,
310         IVB_CURSOR_OFFSETS,
311 };
312
313 static const struct intel_device_info intel_broadwell_m_info = {
314         .gen = 8, .is_mobile = 1, .num_pipes = 3,
315         .need_gfx_hws = 1, .has_hotplug = 1,
316         .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
317         .has_llc = 1,
318         .has_ddi = 1,
319         .has_fbc = 1,
320         GEN_DEFAULT_PIPEOFFSETS,
321         IVB_CURSOR_OFFSETS,
322 };
323
324 static const struct intel_device_info intel_broadwell_gt3d_info = {
325         .gen = 8, .num_pipes = 3,
326         .need_gfx_hws = 1, .has_hotplug = 1,
327         .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
328         .has_llc = 1,
329         .has_ddi = 1,
330         .has_fbc = 1,
331         GEN_DEFAULT_PIPEOFFSETS,
332         IVB_CURSOR_OFFSETS,
333 };
334
335 static const struct intel_device_info intel_broadwell_gt3m_info = {
336         .gen = 8, .is_mobile = 1, .num_pipes = 3,
337         .need_gfx_hws = 1, .has_hotplug = 1,
338         .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
339         .has_llc = 1,
340         .has_ddi = 1,
341         .has_fbc = 1,
342         GEN_DEFAULT_PIPEOFFSETS,
343         IVB_CURSOR_OFFSETS,
344 };
345
346 static const struct intel_device_info intel_cherryview_info = {
347         .is_preliminary = 1,
348         .gen = 8, .num_pipes = 3,
349         .need_gfx_hws = 1, .has_hotplug = 1,
350         .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
351         .is_valleyview = 1,
352         .display_mmio_offset = VLV_DISPLAY_BASE,
353         GEN_CHV_PIPEOFFSETS,
354         CURSOR_OFFSETS,
355 };
356
357 /*
358  * Make sure any device matches here are from most specific to most
359  * general.  For example, since the Quanta match is based on the subsystem
360  * and subvendor IDs, we need it to come before the more general IVB
361  * PCI ID matches, otherwise we'll use the wrong info struct above.
362  */
363 #define INTEL_PCI_IDS \
364         INTEL_I830_IDS(&intel_i830_info),       \
365         INTEL_I845G_IDS(&intel_845g_info),      \
366         INTEL_I85X_IDS(&intel_i85x_info),       \
367         INTEL_I865G_IDS(&intel_i865g_info),     \
368         INTEL_I915G_IDS(&intel_i915g_info),     \
369         INTEL_I915GM_IDS(&intel_i915gm_info),   \
370         INTEL_I945G_IDS(&intel_i945g_info),     \
371         INTEL_I945GM_IDS(&intel_i945gm_info),   \
372         INTEL_I965G_IDS(&intel_i965g_info),     \
373         INTEL_G33_IDS(&intel_g33_info),         \
374         INTEL_I965GM_IDS(&intel_i965gm_info),   \
375         INTEL_GM45_IDS(&intel_gm45_info),       \
376         INTEL_G45_IDS(&intel_g45_info),         \
377         INTEL_PINEVIEW_IDS(&intel_pineview_info),       \
378         INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),   \
379         INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),   \
380         INTEL_SNB_D_IDS(&intel_sandybridge_d_info),     \
381         INTEL_SNB_M_IDS(&intel_sandybridge_m_info),     \
382         INTEL_IVB_M_IDS(&intel_ivybridge_m_info),       \
383         INTEL_IVB_D_IDS(&intel_ivybridge_d_info),       \
384         INTEL_HSW_D_IDS(&intel_haswell_d_info), \
385         INTEL_HSW_M_IDS(&intel_haswell_m_info), \
386         INTEL_VLV_M_IDS(&intel_valleyview_m_info),      \
387         INTEL_VLV_D_IDS(&intel_valleyview_d_info),      \
388         INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info),   \
389         INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info),   \
390         INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), \
391         INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), \
392         INTEL_CHV_IDS(&intel_cherryview_info)
393
394 static const struct pci_device_id pciidlist[] = {               /* aka */
395         INTEL_PCI_IDS,
396         {0, 0}
397 };
398
399 #define PCI_VENDOR_INTEL        0x8086
400
401 void intel_detect_pch(struct drm_device *dev)
402 {
403         struct drm_i915_private *dev_priv = dev->dev_private;
404         struct device *pch = NULL;
405         struct pci_devinfo *di;
406
407         /* In all current cases, num_pipes is equivalent to the PCH_NOP setting
408          * (which really amounts to a PCH but no South Display).
409          */
410         if (INTEL_INFO(dev)->num_pipes == 0) {
411                 dev_priv->pch_type = PCH_NOP;
412                 return;
413         }
414
415         /* XXX The ISA bridge probe causes some old Core2 machines to hang */
416         if (INTEL_INFO(dev)->gen < 5)
417                 return;
418
419         /*
420          * The reason to probe ISA bridge instead of Dev31:Fun0 is to
421          * make graphics device passthrough work easy for VMM, that only
422          * need to expose ISA bridge to let driver know the real hardware
423          * underneath. This is a requirement from virtualization team.
424          *
425          * In some virtualized environments (e.g. XEN), there is irrelevant
426          * ISA bridge in the system. To work reliably, we should scan trhough
427          * all the ISA bridge devices and check for the first match, instead
428          * of only checking the first one.
429          */
430         di = NULL;
431
432         while ((pch = pci_iterate_class(&di, PCIC_BRIDGE, PCIS_BRIDGE_ISA))) {
433                 if (pci_get_vendor(pch) == PCI_VENDOR_INTEL) {
434                         unsigned short id = pci_get_device(pch) & INTEL_PCH_DEVICE_ID_MASK;
435                         dev_priv->pch_id = id;
436
437                         if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
438                                 dev_priv->pch_type = PCH_IBX;
439                                 DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
440                                 WARN_ON(!IS_GEN5(dev));
441                         } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
442                                 dev_priv->pch_type = PCH_CPT;
443                                 DRM_DEBUG_KMS("Found CougarPoint PCH\n");
444                                 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
445                         } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
446                                 /* PantherPoint is CPT compatible */
447                                 dev_priv->pch_type = PCH_CPT;
448                                 DRM_DEBUG_KMS("Found PantherPoint PCH\n");
449                                 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
450                         } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
451                                 dev_priv->pch_type = PCH_LPT;
452                                 DRM_DEBUG_KMS("Found LynxPoint PCH\n");
453                                 WARN_ON(!IS_HASWELL(dev));
454                                 WARN_ON(IS_ULT(dev));
455                         } else if (IS_BROADWELL(dev)) {
456                                 dev_priv->pch_type = PCH_LPT;
457                                 dev_priv->pch_id =
458                                         INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
459                                 DRM_DEBUG_KMS("This is Broadwell, assuming "
460                                               "LynxPoint LP PCH\n");
461                         } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
462                                 dev_priv->pch_type = PCH_LPT;
463                                 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
464                                 WARN_ON(!IS_HASWELL(dev));
465                                 WARN_ON(!IS_ULT(dev));
466                         } else
467                                 continue;
468
469                         break;
470                 }
471         }
472         if (!pch)
473                 DRM_DEBUG_KMS("No PCH found.\n");
474
475 #if 0
476         pci_dev_put(pch);
477 #endif
478 }
479
480 bool i915_semaphore_is_enabled(struct drm_device *dev)
481 {
482         if (INTEL_INFO(dev)->gen < 6)
483                 return false;
484
485         if (i915.semaphores >= 0)
486                 return i915.semaphores;
487
488         /* Until we get further testing... */
489         if (IS_GEN8(dev))
490                 return false;
491
492 #ifdef CONFIG_INTEL_IOMMU
493         /* Enable semaphores on SNB when IO remapping is off */
494         if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
495                 return false;
496 #endif
497
498         return true;
499 }
500
501 static int i915_drm_freeze(struct drm_device *dev)
502 {
503         struct drm_i915_private *dev_priv = dev->dev_private;
504         struct drm_crtc *crtc;
505
506         intel_runtime_pm_get(dev_priv);
507
508         /* ignore lid events during suspend */
509         mutex_lock(&dev_priv->modeset_restore_lock);
510         dev_priv->modeset_restore = MODESET_SUSPENDED;
511         mutex_unlock(&dev_priv->modeset_restore_lock);
512
513         /* We do a lot of poking in a lot of registers, make sure they work
514          * properly. */
515         intel_display_set_init_power(dev_priv, true);
516
517         drm_kms_helper_poll_disable(dev);
518
519 #if 0
520         pci_save_state(dev->pdev);
521 #endif
522
523         /* If KMS is active, we do the leavevt stuff here */
524         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
525                 int error;
526
527                 error = i915_gem_suspend(dev);
528                 if (error) {
529                         dev_err(dev->pdev->dev,
530                                 "GEM idle failed, resume might fail\n");
531                         return error;
532                 }
533
534                 drm_irq_uninstall(dev);
535                 dev_priv->enable_hotplug_processing = false;
536
537                 intel_disable_gt_powersave(dev);
538
539                 /*
540                  * Disable CRTCs directly since we want to preserve sw state
541                  * for _thaw.
542                  */
543                 drm_modeset_lock_all(dev);
544                 for_each_crtc(dev, crtc) {
545                         dev_priv->display.crtc_disable(crtc);
546                 }
547                 drm_modeset_unlock_all(dev);
548
549                 intel_modeset_suspend_hw(dev);
550         }
551
552         i915_gem_suspend_gtt_mappings(dev);
553
554         i915_save_state(dev);
555
556         intel_opregion_fini(dev);
557         intel_uncore_fini(dev);
558
559 #if 0
560         console_lock();
561         intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED);
562         console_unlock();
563 #endif
564
565         dev_priv->suspend_count++;
566
567         return 0;
568 }
569
570 int i915_suspend(device_t kdev)
571 {
572         struct drm_device *dev = device_get_softc(kdev);
573         int error;
574
575         if (!dev || !dev->dev_private) {
576                 DRM_ERROR("dev: %p\n", dev);
577                 DRM_ERROR("DRM not initialized, aborting suspend.\n");
578                 return -ENODEV;
579         }
580
581         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
582                 return 0;
583
584         error = i915_drm_freeze(dev);
585         if (error)
586                 return error;
587
588 #if 0
589         if (state.event == PM_EVENT_SUSPEND) {
590                 /* Shut down the device */
591                 pci_disable_device(dev->pdev);
592                 pci_set_power_state(dev->pdev, PCI_D3hot);
593         }
594 #endif
595
596         error = bus_generic_suspend(kdev);
597         return (error);
598 }
599
600 #if 0
601 void intel_console_resume(struct work_struct *work)
602 {
603         struct drm_i915_private *dev_priv =
604                 container_of(work, struct drm_i915_private,
605                              console_resume_work);
606         struct drm_device *dev = dev_priv->dev;
607
608         console_lock();
609         intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
610         console_unlock();
611 }
612 #endif
613
614 static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
615 {
616         struct drm_i915_private *dev_priv = dev->dev_private;
617
618         if (drm_core_check_feature(dev, DRIVER_MODESET) &&
619             restore_gtt_mappings) {
620                 mutex_lock(&dev->struct_mutex);
621                 i915_gem_restore_gtt_mappings(dev);
622                 mutex_unlock(&dev->struct_mutex);
623         }
624
625         intel_power_domains_init_hw(dev_priv);
626
627         i915_restore_state(dev);
628         intel_opregion_setup(dev);
629
630         /* KMS EnterVT equivalent */
631         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
632                 intel_init_pch_refclk(dev);
633                 drm_mode_config_reset(dev);
634
635                 mutex_lock(&dev->struct_mutex);
636                 if (i915_gem_init_hw(dev)) {
637                         DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
638                         atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
639                 }
640                 mutex_unlock(&dev->struct_mutex);
641
642                 /* We need working interrupts for modeset enabling ... */
643                 drm_irq_install(dev, dev->irq);
644
645                 intel_modeset_init_hw(dev);
646
647                 drm_modeset_lock_all(dev);
648                 intel_modeset_setup_hw_state(dev, true);
649                 drm_modeset_unlock_all(dev);
650
651                 /*
652                  * ... but also need to make sure that hotplug processing
653                  * doesn't cause havoc. Like in the driver load code we don't
654                  * bother with the tiny race here where we might loose hotplug
655                  * notifications.
656                  * */
657                 intel_hpd_init(dev);
658                 dev_priv->enable_hotplug_processing = true;
659                 /* Config may have changed between suspend and resume */
660                 drm_helper_hpd_irq_event(dev);
661         }
662
663         intel_opregion_init(dev);
664
665         /*
666          * The console lock can be pretty contented on resume due
667          * to all the printk activity.  Try to keep it out of the hot
668          * path of resume if possible.
669          */
670 #if 0
671         if (console_trylock()) {
672                 intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
673                 console_unlock();
674         } else {
675                 schedule_work(&dev_priv->console_resume_work);
676         }
677 #endif
678
679         mutex_lock(&dev_priv->modeset_restore_lock);
680         dev_priv->modeset_restore = MODESET_DONE;
681         mutex_unlock(&dev_priv->modeset_restore_lock);
682
683         intel_runtime_pm_put(dev_priv);
684         return 0;
685 }
686
687 #if 0
688 static int i915_drm_thaw(struct drm_device *dev)
689 {
690         if (drm_core_check_feature(dev, DRIVER_MODESET))
691                 i915_check_and_clear_faults(dev);
692
693         return __i915_drm_thaw(dev, true);
694 }
695 #endif
696
697 int i915_resume(struct drm_device *dev)
698 {
699         struct drm_i915_private *dev_priv = dev->dev_private;
700         int ret;
701
702         /*
703          * Platforms with opregion should have sane BIOS, older ones (gen3 and
704          * earlier) need to restore the GTT mappings since the BIOS might clear
705          * all our scratch PTEs.
706          */
707         ret = __i915_drm_thaw(dev, !dev_priv->opregion.header);
708         if (ret)
709                 return ret;
710
711         drm_kms_helper_poll_enable(dev);
712         return 0;
713 }
714
715 /* XXX Hack for the old *BSD drm code base
716  * The device id field is set at probe time */
717 static drm_pci_id_list_t i915_attach_list[] = {
718         {0x8086, 0, 0, "Intel i915 GPU"},
719         {0, 0, 0, NULL}
720 };
721
722 struct intel_device_info *
723 i915_get_device_id(int device)
724 {
725         const struct pci_device_id *did;
726
727         for (did = &pciidlist[0]; did->device != 0; did++) {
728                 if (did->device != device)
729                         continue;
730                 return (struct intel_device_info *)did->driver_data;
731         }
732         return (NULL);
733 }
734
735 extern devclass_t drm_devclass;
736
737 /**
738  * i915_reset - reset chip after a hang
739  * @dev: drm device to reset
740  *
741  * Reset the chip.  Useful if a hang is detected. Returns zero on successful
742  * reset or otherwise an error code.
743  *
744  * Procedure is fairly simple:
745  *   - reset the chip using the reset reg
746  *   - re-init context state
747  *   - re-init hardware status page
748  *   - re-init ring buffer
749  *   - re-init interrupt state
750  *   - re-init display
751  */
752 int i915_reset(struct drm_device *dev)
753 {
754         struct drm_i915_private *dev_priv = dev->dev_private;
755         bool simulated;
756         int ret;
757
758         if (!i915.reset)
759                 return 0;
760
761         mutex_lock(&dev->struct_mutex);
762
763         i915_gem_reset(dev);
764
765         simulated = dev_priv->gpu_error.stop_rings != 0;
766
767         ret = intel_gpu_reset(dev);
768
769         /* Also reset the gpu hangman. */
770         if (simulated) {
771                 DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
772                 dev_priv->gpu_error.stop_rings = 0;
773                 if (ret == -ENODEV) {
774                         DRM_INFO("Reset not implemented, but ignoring "
775                                  "error for simulated gpu hangs\n");
776                         ret = 0;
777                 }
778         }
779
780         if (ret) {
781                 DRM_ERROR("Failed to reset chip: %i\n", ret);
782                 mutex_unlock(&dev->struct_mutex);
783                 return ret;
784         }
785
786         /* Ok, now get things going again... */
787
788         /*
789          * Everything depends on having the GTT running, so we need to start
790          * there.  Fortunately we don't need to do this unless we reset the
791          * chip at a PCI level.
792          *
793          * Next we need to restore the context, but we don't use those
794          * yet either...
795          *
796          * Ring buffer needs to be re-initialized in the KMS case, or if X
797          * was running at the time of the reset (i.e. we weren't VT
798          * switched away).
799          */
800         if (drm_core_check_feature(dev, DRIVER_MODESET) ||
801                         !dev_priv->ums.mm_suspended) {
802                 dev_priv->ums.mm_suspended = 0;
803
804                 ret = i915_gem_init_hw(dev);
805                 mutex_unlock(&dev->struct_mutex);
806                 if (ret) {
807                         DRM_ERROR("Failed hw init on reset %d\n", ret);
808                         return ret;
809                 }
810
811                 /*
812                  * FIXME: This races pretty badly against concurrent holders of
813                  * ring interrupts. This is possible since we've started to drop
814                  * dev->struct_mutex in select places when waiting for the gpu.
815                  */
816
817                 /*
818                  * rps/rc6 re-init is necessary to restore state lost after the
819                  * reset and the re-install of gt irqs. Skip for ironlake per
820                  * previous concerns that it doesn't respond well to some forms
821                  * of re-init after reset.
822                  */
823                 if (INTEL_INFO(dev)->gen > 5)
824                         intel_reset_gt_powersave(dev);
825
826                 intel_hpd_init(dev);
827         } else {
828                 mutex_unlock(&dev->struct_mutex);
829         }
830
831         return 0;
832 }
833
834 static int i915_pci_probe(device_t kdev)
835 {
836         int device, i = 0;
837
838         if (pci_get_class(kdev) != PCIC_DISPLAY)
839                 return ENXIO;
840
841         if (pci_get_vendor(kdev) != PCI_VENDOR_INTEL)
842                 return ENXIO;
843
844         device = pci_get_device(kdev);
845
846         for (i = 0; pciidlist[i].device != 0; i++) {
847                 if (pciidlist[i].device == device) {
848                         i915_attach_list[0].device = device;
849                         return 0;
850                 }
851         }
852
853         return ENXIO;
854 }
855
856 #if 0
857 static void
858 i915_pci_remove(struct pci_dev *pdev)
859 {
860         struct drm_device *dev = pci_get_drvdata(pdev);
861
862         drm_put_dev(dev);
863 }
864
865 static int i915_pm_suspend(struct device *dev)
866 {
867         struct pci_dev *pdev = to_pci_dev(dev);
868         struct drm_device *drm_dev = pci_get_drvdata(pdev);
869
870         if (!drm_dev || !drm_dev->dev_private) {
871                 dev_err(dev, "DRM not initialized, aborting suspend.\n");
872                 return -ENODEV;
873         }
874
875         if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
876                 return 0;
877
878         return i915_drm_freeze(drm_dev);
879 }
880
881 static int i915_pm_resume(struct device *dev)
882 {
883         struct pci_dev *pdev = to_pci_dev(dev);
884         struct drm_device *drm_dev = pci_get_drvdata(pdev);
885
886         return i915_resume(drm_dev);
887 }
888
889 static int i915_pm_freeze(struct device *dev)
890 {
891         struct pci_dev *pdev = to_pci_dev(dev);
892         struct drm_device *drm_dev = pci_get_drvdata(pdev);
893
894         if (!drm_dev || !drm_dev->dev_private) {
895                 dev_err(dev, "DRM not initialized, aborting suspend.\n");
896                 return -ENODEV;
897         }
898
899         return i915_drm_freeze(drm_dev);
900 }
901
902 static int i915_pm_thaw(struct device *dev)
903 {
904         struct pci_dev *pdev = to_pci_dev(dev);
905         struct drm_device *drm_dev = pci_get_drvdata(pdev);
906
907         return i915_drm_thaw(drm_dev);
908 }
909
910 static int i915_pm_poweroff(struct device *dev)
911 {
912         struct pci_dev *pdev = to_pci_dev(dev);
913         struct drm_device *drm_dev = pci_get_drvdata(pdev);
914
915         return i915_drm_freeze(drm_dev);
916 }
917
918 static int hsw_runtime_suspend(struct drm_i915_private *dev_priv)
919 {
920         hsw_enable_pc8(dev_priv);
921
922         return 0;
923 }
924
925 static int snb_runtime_resume(struct drm_i915_private *dev_priv)
926 {
927         struct drm_device *dev = dev_priv->dev;
928
929         intel_init_pch_refclk(dev);
930
931         return 0;
932 }
933
934 static int hsw_runtime_resume(struct drm_i915_private *dev_priv)
935 {
936         hsw_disable_pc8(dev_priv);
937
938         return 0;
939 }
940
941 /*
942  * Save all Gunit registers that may be lost after a D3 and a subsequent
943  * S0i[R123] transition. The list of registers needing a save/restore is
944  * defined in the VLV2_S0IXRegs document. This documents marks all Gunit
945  * registers in the following way:
946  * - Driver: saved/restored by the driver
947  * - Punit : saved/restored by the Punit firmware
948  * - No, w/o marking: no need to save/restore, since the register is R/O or
949  *                    used internally by the HW in a way that doesn't depend
950  *                    keeping the content across a suspend/resume.
951  * - Debug : used for debugging
952  *
953  * We save/restore all registers marked with 'Driver', with the following
954  * exceptions:
955  * - Registers out of use, including also registers marked with 'Debug'.
956  *   These have no effect on the driver's operation, so we don't save/restore
957  *   them to reduce the overhead.
958  * - Registers that are fully setup by an initialization function called from
959  *   the resume path. For example many clock gating and RPS/RC6 registers.
960  * - Registers that provide the right functionality with their reset defaults.
961  *
962  * TODO: Except for registers that based on the above 3 criteria can be safely
963  * ignored, we save/restore all others, practically treating the HW context as
964  * a black-box for the driver. Further investigation is needed to reduce the
965  * saved/restored registers even further, by following the same 3 criteria.
966  */
967 static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
968 {
969         struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
970         int i;
971
972         /* GAM 0x4000-0x4770 */
973         s->wr_watermark         = I915_READ(GEN7_WR_WATERMARK);
974         s->gfx_prio_ctrl        = I915_READ(GEN7_GFX_PRIO_CTRL);
975         s->arb_mode             = I915_READ(ARB_MODE);
976         s->gfx_pend_tlb0        = I915_READ(GEN7_GFX_PEND_TLB0);
977         s->gfx_pend_tlb1        = I915_READ(GEN7_GFX_PEND_TLB1);
978
979         for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
980                 s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS_BASE + i * 4);
981
982         s->media_max_req_count  = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
983         s->gfx_max_req_count    = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
984
985         s->render_hwsp          = I915_READ(RENDER_HWS_PGA_GEN7);
986         s->ecochk               = I915_READ(GAM_ECOCHK);
987         s->bsd_hwsp             = I915_READ(BSD_HWS_PGA_GEN7);
988         s->blt_hwsp             = I915_READ(BLT_HWS_PGA_GEN7);
989
990         s->tlb_rd_addr          = I915_READ(GEN7_TLB_RD_ADDR);
991
992         /* MBC 0x9024-0x91D0, 0x8500 */
993         s->g3dctl               = I915_READ(VLV_G3DCTL);
994         s->gsckgctl             = I915_READ(VLV_GSCKGCTL);
995         s->mbctl                = I915_READ(GEN6_MBCTL);
996
997         /* GCP 0x9400-0x9424, 0x8100-0x810C */
998         s->ucgctl1              = I915_READ(GEN6_UCGCTL1);
999         s->ucgctl3              = I915_READ(GEN6_UCGCTL3);
1000         s->rcgctl1              = I915_READ(GEN6_RCGCTL1);
1001         s->rcgctl2              = I915_READ(GEN6_RCGCTL2);
1002         s->rstctl               = I915_READ(GEN6_RSTCTL);
1003         s->misccpctl            = I915_READ(GEN7_MISCCPCTL);
1004
1005         /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
1006         s->gfxpause             = I915_READ(GEN6_GFXPAUSE);
1007         s->rpdeuhwtc            = I915_READ(GEN6_RPDEUHWTC);
1008         s->rpdeuc               = I915_READ(GEN6_RPDEUC);
1009         s->ecobus               = I915_READ(ECOBUS);
1010         s->pwrdwnupctl          = I915_READ(VLV_PWRDWNUPCTL);
1011         s->rp_down_timeout      = I915_READ(GEN6_RP_DOWN_TIMEOUT);
1012         s->rp_deucsw            = I915_READ(GEN6_RPDEUCSW);
1013         s->rcubmabdtmr          = I915_READ(GEN6_RCUBMABDTMR);
1014         s->rcedata              = I915_READ(VLV_RCEDATA);
1015         s->spare2gh             = I915_READ(VLV_SPAREG2H);
1016
1017         /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
1018         s->gt_imr               = I915_READ(GTIMR);
1019         s->gt_ier               = I915_READ(GTIER);
1020         s->pm_imr               = I915_READ(GEN6_PMIMR);
1021         s->pm_ier               = I915_READ(GEN6_PMIER);
1022
1023         for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
1024                 s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH_BASE + i * 4);
1025
1026         /* GT SA CZ domain, 0x100000-0x138124 */
1027         s->tilectl              = I915_READ(TILECTL);
1028         s->gt_fifoctl           = I915_READ(GTFIFOCTL);
1029         s->gtlc_wake_ctrl       = I915_READ(VLV_GTLC_WAKE_CTRL);
1030         s->gtlc_survive         = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
1031         s->pmwgicz              = I915_READ(VLV_PMWGICZ);
1032
1033         /* Gunit-Display CZ domain, 0x182028-0x1821CF */
1034         s->gu_ctl0              = I915_READ(VLV_GU_CTL0);
1035         s->gu_ctl1              = I915_READ(VLV_GU_CTL1);
1036         s->clock_gate_dis2      = I915_READ(VLV_GUNIT_CLOCK_GATE2);
1037
1038         /*
1039          * Not saving any of:
1040          * DFT,         0x9800-0x9EC0
1041          * SARB,        0xB000-0xB1FC
1042          * GAC,         0x5208-0x524C, 0x14000-0x14C000
1043          * PCI CFG
1044          */
1045 }
1046
1047 static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
1048 {
1049         struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
1050         u32 val;
1051         int i;
1052
1053         /* GAM 0x4000-0x4770 */
1054         I915_WRITE(GEN7_WR_WATERMARK,   s->wr_watermark);
1055         I915_WRITE(GEN7_GFX_PRIO_CTRL,  s->gfx_prio_ctrl);
1056         I915_WRITE(ARB_MODE,            s->arb_mode | (0xffff << 16));
1057         I915_WRITE(GEN7_GFX_PEND_TLB0,  s->gfx_pend_tlb0);
1058         I915_WRITE(GEN7_GFX_PEND_TLB1,  s->gfx_pend_tlb1);
1059
1060         for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
1061                 I915_WRITE(GEN7_LRA_LIMITS_BASE + i * 4, s->lra_limits[i]);
1062
1063         I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
1064         I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->gfx_max_req_count);
1065
1066         I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp);
1067         I915_WRITE(GAM_ECOCHK,          s->ecochk);
1068         I915_WRITE(BSD_HWS_PGA_GEN7,    s->bsd_hwsp);
1069         I915_WRITE(BLT_HWS_PGA_GEN7,    s->blt_hwsp);
1070
1071         I915_WRITE(GEN7_TLB_RD_ADDR,    s->tlb_rd_addr);
1072
1073         /* MBC 0x9024-0x91D0, 0x8500 */
1074         I915_WRITE(VLV_G3DCTL,          s->g3dctl);
1075         I915_WRITE(VLV_GSCKGCTL,        s->gsckgctl);
1076         I915_WRITE(GEN6_MBCTL,          s->mbctl);
1077
1078         /* GCP 0x9400-0x9424, 0x8100-0x810C */
1079         I915_WRITE(GEN6_UCGCTL1,        s->ucgctl1);
1080         I915_WRITE(GEN6_UCGCTL3,        s->ucgctl3);
1081         I915_WRITE(GEN6_RCGCTL1,        s->rcgctl1);
1082         I915_WRITE(GEN6_RCGCTL2,        s->rcgctl2);
1083         I915_WRITE(GEN6_RSTCTL,         s->rstctl);
1084         I915_WRITE(GEN7_MISCCPCTL,      s->misccpctl);
1085
1086         /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
1087         I915_WRITE(GEN6_GFXPAUSE,       s->gfxpause);
1088         I915_WRITE(GEN6_RPDEUHWTC,      s->rpdeuhwtc);
1089         I915_WRITE(GEN6_RPDEUC,         s->rpdeuc);
1090         I915_WRITE(ECOBUS,              s->ecobus);
1091         I915_WRITE(VLV_PWRDWNUPCTL,     s->pwrdwnupctl);
1092         I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout);
1093         I915_WRITE(GEN6_RPDEUCSW,       s->rp_deucsw);
1094         I915_WRITE(GEN6_RCUBMABDTMR,    s->rcubmabdtmr);
1095         I915_WRITE(VLV_RCEDATA,         s->rcedata);
1096         I915_WRITE(VLV_SPAREG2H,        s->spare2gh);
1097
1098         /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
1099         I915_WRITE(GTIMR,               s->gt_imr);
1100         I915_WRITE(GTIER,               s->gt_ier);
1101         I915_WRITE(GEN6_PMIMR,          s->pm_imr);
1102         I915_WRITE(GEN6_PMIER,          s->pm_ier);
1103
1104         for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
1105                 I915_WRITE(GEN7_GT_SCRATCH_BASE + i * 4, s->gt_scratch[i]);
1106
1107         /* GT SA CZ domain, 0x100000-0x138124 */
1108         I915_WRITE(TILECTL,                     s->tilectl);
1109         I915_WRITE(GTFIFOCTL,                   s->gt_fifoctl);
1110         /*
1111          * Preserve the GT allow wake and GFX force clock bit, they are not
1112          * be restored, as they are used to control the s0ix suspend/resume
1113          * sequence by the caller.
1114          */
1115         val = I915_READ(VLV_GTLC_WAKE_CTRL);
1116         val &= VLV_GTLC_ALLOWWAKEREQ;
1117         val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ;
1118         I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
1119
1120         val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
1121         val &= VLV_GFX_CLK_FORCE_ON_BIT;
1122         val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT;
1123         I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
1124
1125         I915_WRITE(VLV_PMWGICZ,                 s->pmwgicz);
1126
1127         /* Gunit-Display CZ domain, 0x182028-0x1821CF */
1128         I915_WRITE(VLV_GU_CTL0,                 s->gu_ctl0);
1129         I915_WRITE(VLV_GU_CTL1,                 s->gu_ctl1);
1130         I915_WRITE(VLV_GUNIT_CLOCK_GATE2,       s->clock_gate_dis2);
1131 }
1132 #endif
1133
1134 int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
1135 {
1136         u32 val;
1137         int err;
1138
1139         val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
1140         WARN_ON(!!(val & VLV_GFX_CLK_FORCE_ON_BIT) == force_on);
1141
1142 #define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT)
1143         /* Wait for a previous force-off to settle */
1144         if (force_on) {
1145                 err = wait_for(!COND, 20);
1146                 if (err) {
1147                         DRM_ERROR("timeout waiting for GFX clock force-off (%08x)\n",
1148                                   I915_READ(VLV_GTLC_SURVIVABILITY_REG));
1149                         return err;
1150                 }
1151         }
1152
1153         val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
1154         val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
1155         if (force_on)
1156                 val |= VLV_GFX_CLK_FORCE_ON_BIT;
1157         I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
1158
1159         if (!force_on)
1160                 return 0;
1161
1162         err = wait_for(COND, 20);
1163         if (err)
1164                 DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n",
1165                           I915_READ(VLV_GTLC_SURVIVABILITY_REG));
1166
1167         return err;
1168 #undef COND
1169 }
1170
1171 #if 0
1172 static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
1173 {
1174         u32 val;
1175         int err = 0;
1176
1177         val = I915_READ(VLV_GTLC_WAKE_CTRL);
1178         val &= ~VLV_GTLC_ALLOWWAKEREQ;
1179         if (allow)
1180                 val |= VLV_GTLC_ALLOWWAKEREQ;
1181         I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
1182         POSTING_READ(VLV_GTLC_WAKE_CTRL);
1183
1184 #define COND (!!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEACK) == \
1185               allow)
1186         err = wait_for(COND, 1);
1187         if (err)
1188                 DRM_ERROR("timeout disabling GT waking\n");
1189         return err;
1190 #undef COND
1191 }
1192
1193 static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
1194                                  bool wait_for_on)
1195 {
1196         u32 mask;
1197         u32 val;
1198         int err;
1199
1200         mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
1201         val = wait_for_on ? mask : 0;
1202 #define COND ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val)
1203         if (COND)
1204                 return 0;
1205
1206         DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n",
1207                         wait_for_on ? "on" : "off",
1208                         I915_READ(VLV_GTLC_PW_STATUS));
1209
1210         /*
1211          * RC6 transitioning can be delayed up to 2 msec (see
1212          * valleyview_enable_rps), use 3 msec for safety.
1213          */
1214         err = wait_for(COND, 3);
1215         if (err)
1216                 DRM_ERROR("timeout waiting for GT wells to go %s\n",
1217                           wait_for_on ? "on" : "off");
1218
1219         return err;
1220 #undef COND
1221 }
1222
1223 static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
1224 {
1225         if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
1226                 return;
1227
1228         DRM_ERROR("GT register access while GT waking disabled\n");
1229         I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
1230 }
1231
1232 static int vlv_runtime_suspend(struct drm_i915_private *dev_priv)
1233 {
1234         u32 mask;
1235         int err;
1236
1237         /*
1238          * Bspec defines the following GT well on flags as debug only, so
1239          * don't treat them as hard failures.
1240          */
1241         (void)vlv_wait_for_gt_wells(dev_priv, false);
1242
1243         mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
1244         WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask);
1245
1246         vlv_check_no_gt_access(dev_priv);
1247
1248         err = vlv_force_gfx_clock(dev_priv, true);
1249         if (err)
1250                 goto err1;
1251
1252         err = vlv_allow_gt_wake(dev_priv, false);
1253         if (err)
1254                 goto err2;
1255         vlv_save_gunit_s0ix_state(dev_priv);
1256
1257         err = vlv_force_gfx_clock(dev_priv, false);
1258         if (err)
1259                 goto err2;
1260
1261         return 0;
1262
1263 err2:
1264         /* For safety always re-enable waking and disable gfx clock forcing */
1265         vlv_allow_gt_wake(dev_priv, true);
1266 err1:
1267         vlv_force_gfx_clock(dev_priv, false);
1268
1269         return err;
1270 }
1271
1272 static int vlv_runtime_resume(struct drm_i915_private *dev_priv)
1273 {
1274         struct drm_device *dev = dev_priv->dev;
1275         int err;
1276         int ret;
1277
1278         /*
1279          * If any of the steps fail just try to continue, that's the best we
1280          * can do at this point. Return the first error code (which will also
1281          * leave RPM permanently disabled).
1282          */
1283         ret = vlv_force_gfx_clock(dev_priv, true);
1284
1285         vlv_restore_gunit_s0ix_state(dev_priv);
1286
1287         err = vlv_allow_gt_wake(dev_priv, true);
1288         if (!ret)
1289                 ret = err;
1290
1291         err = vlv_force_gfx_clock(dev_priv, false);
1292         if (!ret)
1293                 ret = err;
1294
1295         vlv_check_no_gt_access(dev_priv);
1296
1297         intel_init_clock_gating(dev);
1298         i915_gem_restore_fences(dev);
1299
1300         return ret;
1301 }
1302
1303 static int intel_runtime_suspend(struct device *device)
1304 {
1305         struct pci_dev *pdev = to_pci_dev(device);
1306         struct drm_device *dev = pci_get_drvdata(pdev);
1307         struct drm_i915_private *dev_priv = dev->dev_private;
1308         int ret;
1309
1310         if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev))))
1311                 return -ENODEV;
1312
1313         WARN_ON(!HAS_RUNTIME_PM(dev));
1314         assert_force_wake_inactive(dev_priv);
1315
1316         DRM_DEBUG_KMS("Suspending device\n");
1317
1318         /*
1319          * We could deadlock here in case another thread holding struct_mutex
1320          * calls RPM suspend concurrently, since the RPM suspend will wait
1321          * first for this RPM suspend to finish. In this case the concurrent
1322          * RPM resume will be followed by its RPM suspend counterpart. Still
1323          * for consistency return -EAGAIN, which will reschedule this suspend.
1324          */
1325         if (!mutex_trylock(&dev->struct_mutex)) {
1326                 DRM_DEBUG_KMS("device lock contention, deffering suspend\n");
1327                 /*
1328                  * Bump the expiration timestamp, otherwise the suspend won't
1329                  * be rescheduled.
1330                  */
1331                 pm_runtime_mark_last_busy(device);
1332
1333                 return -EAGAIN;
1334         }
1335         /*
1336          * We are safe here against re-faults, since the fault handler takes
1337          * an RPM reference.
1338          */
1339         i915_gem_release_all_mmaps(dev_priv);
1340         mutex_unlock(&dev->struct_mutex);
1341
1342         /*
1343          * rps.work can't be rearmed here, since we get here only after making
1344          * sure the GPU is idle and the RPS freq is set to the minimum. See
1345          * intel_mark_idle().
1346          */
1347         cancel_work_sync(&dev_priv->rps.work);
1348         intel_runtime_pm_disable_interrupts(dev);
1349
1350         if (IS_GEN6(dev)) {
1351                 ret = 0;
1352         } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
1353                 ret = hsw_runtime_suspend(dev_priv);
1354         } else if (IS_VALLEYVIEW(dev)) {
1355                 ret = vlv_runtime_suspend(dev_priv);
1356         } else {
1357                 ret = -ENODEV;
1358                 WARN_ON(1);
1359         }
1360
1361         if (ret) {
1362                 DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
1363                 intel_runtime_pm_restore_interrupts(dev);
1364
1365                 return ret;
1366         }
1367
1368         del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
1369         dev_priv->pm.suspended = true;
1370
1371         /*
1372          * current versions of firmware which depend on this opregion
1373          * notification have repurposed the D1 definition to mean
1374          * "runtime suspended" vs. what you would normally expect (D3)
1375          * to distinguish it from notifications that might be sent
1376          * via the suspend path.
1377          */
1378         intel_opregion_notify_adapter(dev, PCI_D1);
1379
1380         DRM_DEBUG_KMS("Device suspended\n");
1381         return 0;
1382 }
1383
1384 static int intel_runtime_resume(struct device *device)
1385 {
1386         struct pci_dev *pdev = to_pci_dev(device);
1387         struct drm_device *dev = pci_get_drvdata(pdev);
1388         struct drm_i915_private *dev_priv = dev->dev_private;
1389         int ret;
1390
1391         WARN_ON(!HAS_RUNTIME_PM(dev));
1392
1393         DRM_DEBUG_KMS("Resuming device\n");
1394
1395         intel_opregion_notify_adapter(dev, PCI_D0);
1396         dev_priv->pm.suspended = false;
1397
1398         if (IS_GEN6(dev)) {
1399                 ret = snb_runtime_resume(dev_priv);
1400         } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
1401                 ret = hsw_runtime_resume(dev_priv);
1402         } else if (IS_VALLEYVIEW(dev)) {
1403                 ret = vlv_runtime_resume(dev_priv);
1404         } else {
1405                 WARN_ON(1);
1406                 ret = -ENODEV;
1407         }
1408
1409         /*
1410          * No point of rolling back things in case of an error, as the best
1411          * we can do is to hope that things will still work (and disable RPM).
1412          */
1413         i915_gem_init_swizzling(dev);
1414         gen6_update_ring_freq(dev);
1415
1416         intel_runtime_pm_restore_interrupts(dev);
1417         intel_reset_gt_powersave(dev);
1418
1419         if (ret)
1420                 DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
1421         else
1422                 DRM_DEBUG_KMS("Device resumed\n");
1423
1424         return ret;
1425 }
1426
1427 static const struct dev_pm_ops i915_pm_ops = {
1428         .suspend = i915_pm_suspend,
1429         .resume = i915_pm_resume,
1430         .freeze = i915_pm_freeze,
1431         .thaw = i915_pm_thaw,
1432         .poweroff = i915_pm_poweroff,
1433         .restore = i915_pm_resume,
1434         .runtime_suspend = intel_runtime_suspend,
1435         .runtime_resume = intel_runtime_resume,
1436 };
1437
1438 static const struct vm_operations_struct i915_gem_vm_ops = {
1439         .fault = i915_gem_fault,
1440         .open = drm_gem_vm_open,
1441         .close = drm_gem_vm_close,
1442 };
1443
1444 static const struct file_operations i915_driver_fops = {
1445         .owner = THIS_MODULE,
1446         .open = drm_open,
1447         .release = drm_release,
1448         .unlocked_ioctl = drm_ioctl,
1449         .mmap = drm_gem_mmap,
1450         .poll = drm_poll,
1451         .read = drm_read,
1452 #ifdef CONFIG_COMPAT
1453         .compat_ioctl = i915_compat_ioctl,
1454 #endif
1455         .llseek = noop_llseek,
1456 };
1457 #endif
1458
1459 static struct cdev_pager_ops i915_gem_vm_ops = {
1460         .cdev_pg_fault  = i915_gem_fault,
1461         .cdev_pg_ctor   = i915_gem_pager_ctor,
1462         .cdev_pg_dtor   = i915_gem_pager_dtor
1463 };
1464
1465 static struct drm_driver driver = {
1466         /* Don't use MTRRs here; the Xserver or userspace app should
1467          * deal with them for Intel hardware.
1468          */
1469         .driver_features =
1470             DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
1471             DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM,
1472
1473         .load = i915_driver_load,
1474         .unload = i915_driver_unload,
1475         .open = i915_driver_open,
1476         .lastclose = i915_driver_lastclose,
1477         .preclose = i915_driver_preclose,
1478         .postclose = i915_driver_postclose,
1479
1480         .device_is_agp  = i915_driver_device_is_agp,
1481         .gem_free_object = i915_gem_free_object,
1482         .gem_pager_ops  = &i915_gem_vm_ops,
1483         .dumb_create    = i915_gem_dumb_create,
1484         .dumb_map_offset = i915_gem_mmap_gtt,
1485         .dumb_destroy = drm_gem_dumb_destroy,
1486         .ioctls         = i915_ioctls,
1487
1488         .name           = DRIVER_NAME,
1489         .desc           = DRIVER_DESC,
1490         .date           = DRIVER_DATE,
1491         .major          = DRIVER_MAJOR,
1492         .minor          = DRIVER_MINOR,
1493         .patchlevel     = DRIVER_PATCHLEVEL,
1494 };
1495
1496 static int __init i915_init(void);
1497
1498 static int
1499 i915_attach(device_t kdev)
1500 {
1501         struct drm_device *dev = device_get_softc(kdev);
1502
1503         i915_init();
1504
1505         dev->driver = &driver;
1506         return (drm_attach(kdev, i915_attach_list));
1507 }
1508
1509 static device_method_t i915_methods[] = {
1510         /* Device interface */
1511         DEVMETHOD(device_probe,         i915_pci_probe),
1512         DEVMETHOD(device_attach,        i915_attach),
1513         DEVMETHOD(device_suspend,       i915_suspend),
1514         DEVMETHOD(device_resume,        i915_resume),
1515         DEVMETHOD(device_detach,        drm_release),
1516         DEVMETHOD_END
1517 };
1518
1519 static driver_t i915_driver = {
1520         "drm",
1521         i915_methods,
1522         sizeof(struct drm_device)
1523 };
1524
1525 static int __init i915_init(void)
1526 {
1527         driver.num_ioctls = i915_max_ioctl;
1528
1529         /*
1530          * If CONFIG_DRM_I915_KMS is set, default to KMS unless
1531          * explicitly disabled with the module pararmeter.
1532          *
1533          * Otherwise, just follow the parameter (defaulting to off).
1534          *
1535          * Allow optional vga_text_mode_force boot option to override
1536          * the default behavior.
1537          */
1538 #if defined(CONFIG_DRM_I915_KMS)
1539         if (i915.modeset != 0)
1540                 driver.driver_features |= DRIVER_MODESET;
1541 #endif
1542         if (i915.modeset == 1)
1543                 driver.driver_features |= DRIVER_MODESET;
1544
1545 #ifdef CONFIG_VGA_CONSOLE
1546         if (vgacon_text_force() && i915.modeset == -1)
1547                 driver.driver_features &= ~DRIVER_MODESET;
1548 #endif
1549
1550         if (!(driver.driver_features & DRIVER_MODESET)) {
1551                 driver.get_vblank_timestamp = NULL;
1552 #ifndef CONFIG_DRM_I915_UMS
1553                 /* Silently fail loading to not upset userspace. */
1554                 DRM_DEBUG_DRIVER("KMS and UMS disabled.\n");
1555                 return 0;
1556 #endif
1557         }
1558
1559 #if 0
1560         return drm_pci_init(&driver, &i915_pci_driver);
1561 #else
1562         return 1;
1563 #endif
1564 }
1565
1566 DRIVER_MODULE_ORDERED(i915kms, vgapci, i915_driver, drm_devclass, 0, 0,
1567     SI_ORDER_ANY);
1568 MODULE_DEPEND(i915kms, drm, 1, 1, 1);
1569 MODULE_DEPEND(i915kms, agp, 1, 1, 1);
1570 MODULE_DEPEND(i915kms, iicbus, 1, 1, 1);
1571 MODULE_DEPEND(i915kms, iic, 1, 1, 1);
1572 MODULE_DEPEND(i915kms, iicbb, 1, 1, 1);