drm: Handle drm masters and minors like Linux
[dragonfly.git] / sys / dev / drm / i915 / i915_drv.c
1 /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
2  */
3 /*
4  *
5  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6  * All Rights Reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the
10  * "Software"), to deal in the Software without restriction, including
11  * without limitation the rights to use, copy, modify, merge, publish,
12  * distribute, sub license, and/or sell copies of the Software, and to
13  * permit persons to whom the Software is furnished to do so, subject to
14  * the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the
17  * next paragraph) shall be included in all copies or substantial portions
18  * of the Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27  *
28  */
29
30 #include <drm/drmP.h>
31 #include <drm/i915_drm.h>
32 #include "i915_drv.h"
33 #include <drm/drm_pciids.h>
34 #include "intel_drv.h"
35
36 /*               "Specify LVDS channel mode "
37                  "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)" */
38 int i915_lvds_channel_mode __read_mostly = 0;
39 TUNABLE_INT("drm.i915.lvds_channel_mode", &i915_lvds_channel_mode);
40
41 int i915_disable_power_well __read_mostly = 0;
42 module_param_named(disable_power_well, i915_disable_power_well, int, 0600);
43 MODULE_PARM_DESC(disable_power_well,
44                  "Disable the power well when possible (default: false)");
45
46 bool i915_enable_hangcheck __read_mostly = true;
47 module_param_named(enable_hangcheck, i915_enable_hangcheck, bool, 0644);
48 MODULE_PARM_DESC(enable_hangcheck,
49                 "Periodically check GPU activity for detecting hangs. "
50                 "WARNING: Disabling this can cause system wide hangs. "
51                 "(default: true)");
52
53 static struct drm_driver driver;
54
55 #define INTEL_VGA_DEVICE(id, info_) {           \
56         .device = id,                           \
57         .info = info_,                          \
58 }
59
60 static const struct intel_device_info intel_i830_info = {
61         .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1,
62         .has_overlay = 1, .overlay_needs_physical = 1,
63 };
64
65 static const struct intel_device_info intel_845g_info = {
66         .gen = 2,
67         .has_overlay = 1, .overlay_needs_physical = 1,
68 };
69
70 static const struct intel_device_info intel_i85x_info = {
71         .gen = 2, .is_i85x = 1, .is_mobile = 1,
72         .cursor_needs_physical = 1,
73         .has_overlay = 1, .overlay_needs_physical = 1,
74 };
75
76 static const struct intel_device_info intel_i865g_info = {
77         .gen = 2,
78         .has_overlay = 1, .overlay_needs_physical = 1,
79 };
80
81 static const struct intel_device_info intel_i915g_info = {
82         .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1,
83         .has_overlay = 1, .overlay_needs_physical = 1,
84 };
85 static const struct intel_device_info intel_i915gm_info = {
86         .gen = 3, .is_mobile = 1,
87         .cursor_needs_physical = 1,
88         .has_overlay = 1, .overlay_needs_physical = 1,
89         .supports_tv = 1,
90 };
91 static const struct intel_device_info intel_i945g_info = {
92         .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1,
93         .has_overlay = 1, .overlay_needs_physical = 1,
94 };
95 static const struct intel_device_info intel_i945gm_info = {
96         .gen = 3, .is_i945gm = 1, .is_mobile = 1,
97         .has_hotplug = 1, .cursor_needs_physical = 1,
98         .has_overlay = 1, .overlay_needs_physical = 1,
99         .supports_tv = 1,
100 };
101
102 static const struct intel_device_info intel_i965g_info = {
103         .gen = 4, .is_broadwater = 1,
104         .has_hotplug = 1,
105         .has_overlay = 1,
106 };
107
108 static const struct intel_device_info intel_i965gm_info = {
109         .gen = 4, .is_crestline = 1,
110         .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
111         .has_overlay = 1,
112         .supports_tv = 1,
113 };
114
115 static const struct intel_device_info intel_g33_info = {
116         .gen = 3, .is_g33 = 1,
117         .need_gfx_hws = 1, .has_hotplug = 1,
118         .has_overlay = 1,
119 };
120
121 static const struct intel_device_info intel_g45_info = {
122         .gen = 4, .is_g4x = 1, .need_gfx_hws = 1,
123         .has_pipe_cxsr = 1, .has_hotplug = 1,
124         .has_bsd_ring = 1,
125 };
126
127 static const struct intel_device_info intel_gm45_info = {
128         .gen = 4, .is_g4x = 1,
129         .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
130         .has_pipe_cxsr = 1, .has_hotplug = 1,
131         .supports_tv = 1,
132         .has_bsd_ring = 1,
133 };
134
135 static const struct intel_device_info intel_pineview_info = {
136         .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1,
137         .need_gfx_hws = 1, .has_hotplug = 1,
138         .has_overlay = 1,
139 };
140
141 static const struct intel_device_info intel_ironlake_d_info = {
142         .gen = 5,
143         .need_gfx_hws = 1, .has_hotplug = 1,
144         .has_bsd_ring = 1,
145 };
146
147 static const struct intel_device_info intel_ironlake_m_info = {
148         .gen = 5, .is_mobile = 1,
149         .need_gfx_hws = 1, .has_hotplug = 1,
150         .has_fbc = 1,
151         .has_bsd_ring = 1,
152 };
153
154 static const struct intel_device_info intel_sandybridge_d_info = {
155         .gen = 6,
156         .need_gfx_hws = 1, .has_hotplug = 1,
157         .has_bsd_ring = 1,
158         .has_blt_ring = 1,
159         .has_llc = 1,
160         .has_force_wake = 1,
161 };
162
163 static const struct intel_device_info intel_sandybridge_m_info = {
164         .gen = 6, .is_mobile = 1,
165         .need_gfx_hws = 1, .has_hotplug = 1,
166         .has_fbc = 1,
167         .has_bsd_ring = 1,
168         .has_blt_ring = 1,
169         .has_llc = 1,
170         .has_force_wake = 1,
171 };
172
173 static const struct intel_device_info intel_ivybridge_d_info = {
174         .is_ivybridge = 1, .gen = 7,
175         .need_gfx_hws = 1, .has_hotplug = 1,
176         .has_bsd_ring = 1,
177         .has_blt_ring = 1,
178         .has_llc = 1,
179         .has_force_wake = 1,
180 };
181
182 static const struct intel_device_info intel_ivybridge_m_info = {
183         .is_ivybridge = 1, .gen = 7, .is_mobile = 1,
184         .need_gfx_hws = 1, .has_hotplug = 1,
185         .has_fbc = 0,   /* FBC is not enabled on Ivybridge mobile yet */
186         .has_bsd_ring = 1,
187         .has_blt_ring = 1,
188         .has_llc = 1,
189         .has_force_wake = 1,
190 };
191
192 static const struct intel_device_info intel_valleyview_m_info = {
193         .gen = 7, .is_mobile = 1,
194         .need_gfx_hws = 1, .has_hotplug = 1,
195         .has_fbc = 0,
196         .has_bsd_ring = 1,
197         .has_blt_ring = 1,
198         .is_valleyview = 1,
199         .display_mmio_offset = VLV_DISPLAY_BASE,
200 };
201
202 static const struct intel_device_info intel_valleyview_d_info = {
203         .gen = 7,
204         .need_gfx_hws = 1, .has_hotplug = 1,
205         .has_fbc = 0,
206         .has_bsd_ring = 1,
207         .has_blt_ring = 1,
208         .is_valleyview = 1,
209         .display_mmio_offset = VLV_DISPLAY_BASE,
210 };
211
212 static const struct intel_device_info intel_haswell_d_info = {
213         .is_haswell = 1, .gen = 7,
214         .need_gfx_hws = 1, .has_hotplug = 1,
215         .has_bsd_ring = 1,
216         .has_blt_ring = 1,
217         .has_llc = 1,
218         .has_force_wake = 1,
219 };
220
221 static const struct intel_device_info intel_haswell_m_info = {
222         .is_haswell = 1, .gen = 7, .is_mobile = 1,
223         .need_gfx_hws = 1, .has_hotplug = 1,
224         .has_bsd_ring = 1,
225         .has_blt_ring = 1,
226         .has_llc = 1,
227         .has_force_wake = 1,
228 };
229
230 static const struct intel_gfx_device_id {
231         int device;
232         const struct intel_device_info *info;
233 } pciidlist[] = {               /* aka */
234         INTEL_VGA_DEVICE(0x3577, &intel_i830_info),             /* I830_M */
235         INTEL_VGA_DEVICE(0x2562, &intel_845g_info),             /* 845_G */
236         INTEL_VGA_DEVICE(0x3582, &intel_i85x_info),             /* I855_GM */
237         INTEL_VGA_DEVICE(0x358e, &intel_i85x_info),
238         INTEL_VGA_DEVICE(0x2572, &intel_i865g_info),            /* I865_G */
239         INTEL_VGA_DEVICE(0x2582, &intel_i915g_info),            /* I915_G */
240         INTEL_VGA_DEVICE(0x258a, &intel_i915g_info),            /* E7221_G */
241         INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info),           /* I915_GM */
242         INTEL_VGA_DEVICE(0x2772, &intel_i945g_info),            /* I945_G */
243         INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info),           /* I945_GM */
244         INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info),           /* I945_GME */
245         INTEL_VGA_DEVICE(0x2972, &intel_i965g_info),            /* I946_GZ */
246         INTEL_VGA_DEVICE(0x2982, &intel_i965g_info),            /* G35_G */
247         INTEL_VGA_DEVICE(0x2992, &intel_i965g_info),            /* I965_Q */
248         INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info),            /* I965_G */
249         INTEL_VGA_DEVICE(0x29b2, &intel_g33_info),              /* Q35_G */
250         INTEL_VGA_DEVICE(0x29c2, &intel_g33_info),              /* G33_G */
251         INTEL_VGA_DEVICE(0x29d2, &intel_g33_info),              /* Q33_G */
252         INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info),           /* I965_GM */
253         INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info),           /* I965_GME */
254         INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info),             /* GM45_G */
255         INTEL_VGA_DEVICE(0x2e02, &intel_g45_info),              /* IGD_E_G */
256         INTEL_VGA_DEVICE(0x2e12, &intel_g45_info),              /* Q45_G */
257         INTEL_VGA_DEVICE(0x2e22, &intel_g45_info),              /* G45_G */
258         INTEL_VGA_DEVICE(0x2e32, &intel_g45_info),              /* G41_G */
259         INTEL_VGA_DEVICE(0x2e42, &intel_g45_info),              /* B43_G */
260         INTEL_VGA_DEVICE(0x2e92, &intel_g45_info),              /* B43_G.1 */
261         INTEL_VGA_DEVICE(0xa001, &intel_pineview_info),
262         INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
263         INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
264         INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
265         INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info),
266         INTEL_VGA_DEVICE(0x0112, &intel_sandybridge_d_info),
267         INTEL_VGA_DEVICE(0x0122, &intel_sandybridge_d_info),
268         INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info),
269         INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info),
270         INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info),
271         INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info),
272         INTEL_VGA_DEVICE(0x0156, &intel_ivybridge_m_info), /* GT1 mobile */
273         INTEL_VGA_DEVICE(0x0166, &intel_ivybridge_m_info), /* GT2 mobile */
274         INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */
275         INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */
276         INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */
277         INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */
278         INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */
279         INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */
280         INTEL_VGA_DEVICE(0x0422, &intel_haswell_d_info), /* GT3 desktop */
281         INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */
282         INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */
283         INTEL_VGA_DEVICE(0x042a, &intel_haswell_d_info), /* GT3 server */
284         INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */
285         INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */
286         INTEL_VGA_DEVICE(0x0426, &intel_haswell_m_info), /* GT2 mobile */
287         INTEL_VGA_DEVICE(0x040B, &intel_haswell_d_info), /* GT1 reserved */
288         INTEL_VGA_DEVICE(0x041B, &intel_haswell_d_info), /* GT2 reserved */
289         INTEL_VGA_DEVICE(0x042B, &intel_haswell_d_info), /* GT3 reserved */
290         INTEL_VGA_DEVICE(0x040E, &intel_haswell_d_info), /* GT1 reserved */
291         INTEL_VGA_DEVICE(0x041E, &intel_haswell_d_info), /* GT2 reserved */
292         INTEL_VGA_DEVICE(0x042E, &intel_haswell_d_info), /* GT3 reserved */
293         INTEL_VGA_DEVICE(0x0C02, &intel_haswell_d_info), /* SDV GT1 desktop */
294         INTEL_VGA_DEVICE(0x0C12, &intel_haswell_d_info), /* SDV GT2 desktop */
295         INTEL_VGA_DEVICE(0x0C22, &intel_haswell_d_info), /* SDV GT3 desktop */
296         INTEL_VGA_DEVICE(0x0C0A, &intel_haswell_d_info), /* SDV GT1 server */
297         INTEL_VGA_DEVICE(0x0C1A, &intel_haswell_d_info), /* SDV GT2 server */
298         INTEL_VGA_DEVICE(0x0C2A, &intel_haswell_d_info), /* SDV GT3 server */
299         INTEL_VGA_DEVICE(0x0C06, &intel_haswell_m_info), /* SDV GT1 mobile */
300         INTEL_VGA_DEVICE(0x0C16, &intel_haswell_m_info), /* SDV GT2 mobile */
301         INTEL_VGA_DEVICE(0x0C26, &intel_haswell_m_info), /* SDV GT3 mobile */
302         INTEL_VGA_DEVICE(0x0C0B, &intel_haswell_d_info), /* SDV GT1 reserved */
303         INTEL_VGA_DEVICE(0x0C1B, &intel_haswell_d_info), /* SDV GT2 reserved */
304         INTEL_VGA_DEVICE(0x0C2B, &intel_haswell_d_info), /* SDV GT3 reserved */
305         INTEL_VGA_DEVICE(0x0C0E, &intel_haswell_d_info), /* SDV GT1 reserved */
306         INTEL_VGA_DEVICE(0x0C1E, &intel_haswell_d_info), /* SDV GT2 reserved */
307         INTEL_VGA_DEVICE(0x0C2E, &intel_haswell_d_info), /* SDV GT3 reserved */
308         INTEL_VGA_DEVICE(0x0A02, &intel_haswell_d_info), /* ULT GT1 desktop */
309         INTEL_VGA_DEVICE(0x0A12, &intel_haswell_d_info), /* ULT GT2 desktop */
310         INTEL_VGA_DEVICE(0x0A22, &intel_haswell_d_info), /* ULT GT3 desktop */
311         INTEL_VGA_DEVICE(0x0A0A, &intel_haswell_d_info), /* ULT GT1 server */
312         INTEL_VGA_DEVICE(0x0A1A, &intel_haswell_d_info), /* ULT GT2 server */
313         INTEL_VGA_DEVICE(0x0A2A, &intel_haswell_d_info), /* ULT GT3 server */
314         INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */
315         INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */
316         INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT3 mobile */
317         INTEL_VGA_DEVICE(0x0A0B, &intel_haswell_d_info), /* ULT GT1 reserved */
318         INTEL_VGA_DEVICE(0x0A1B, &intel_haswell_d_info), /* ULT GT2 reserved */
319         INTEL_VGA_DEVICE(0x0A2B, &intel_haswell_d_info), /* ULT GT3 reserved */
320         INTEL_VGA_DEVICE(0x0A0E, &intel_haswell_m_info), /* ULT GT1 reserved */
321         INTEL_VGA_DEVICE(0x0A1E, &intel_haswell_m_info), /* ULT GT2 reserved */
322         INTEL_VGA_DEVICE(0x0A2E, &intel_haswell_m_info), /* ULT GT3 reserved */
323         INTEL_VGA_DEVICE(0x0D02, &intel_haswell_d_info), /* CRW GT1 desktop */
324         INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT2 desktop */
325         INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT3 desktop */
326         INTEL_VGA_DEVICE(0x0D0A, &intel_haswell_d_info), /* CRW GT1 server */
327         INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT2 server */
328         INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT3 server */
329         INTEL_VGA_DEVICE(0x0D06, &intel_haswell_m_info), /* CRW GT1 mobile */
330         INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT2 mobile */
331         INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT3 mobile */
332         INTEL_VGA_DEVICE(0x0D0B, &intel_haswell_d_info), /* CRW GT1 reserved */
333         INTEL_VGA_DEVICE(0x0D1B, &intel_haswell_d_info), /* CRW GT2 reserved */
334         INTEL_VGA_DEVICE(0x0D2B, &intel_haswell_d_info), /* CRW GT3 reserved */
335         INTEL_VGA_DEVICE(0x0D0E, &intel_haswell_d_info), /* CRW GT1 reserved */
336         INTEL_VGA_DEVICE(0x0D1E, &intel_haswell_d_info), /* CRW GT2 reserved */
337         INTEL_VGA_DEVICE(0x0D2E, &intel_haswell_d_info), /* CRW GT3 reserved */
338         INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info),
339         INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info),
340         INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info),
341         {0, 0}
342 };
343
344 #define PCI_VENDOR_INTEL        0x8086
345
346 void intel_detect_pch(struct drm_device *dev)
347 {
348         struct drm_i915_private *dev_priv = dev->dev_private;
349         device_t pch;
350
351         /*
352          * The reason to probe ISA bridge instead of Dev31:Fun0 is to
353          * make graphics device passthrough work easy for VMM, that only
354          * need to expose ISA bridge to let driver know the real hardware
355          * underneath. This is a requirement from virtualization team.
356          */
357         pch = pci_find_class(PCIC_BRIDGE, PCIS_BRIDGE_ISA);
358         if (pch) {
359                 if (pci_get_vendor(pch) == PCI_VENDOR_INTEL) {
360                         unsigned short id;
361                         id = pci_get_device(pch) & INTEL_PCH_DEVICE_ID_MASK;
362                         dev_priv->pch_id = id;
363
364                         if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
365                                 dev_priv->pch_type = PCH_IBX;
366                                 dev_priv->num_pch_pll = 2;
367                                 DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
368                                 WARN_ON(!IS_GEN5(dev));
369                         } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
370                                 dev_priv->pch_type = PCH_CPT;
371                                 dev_priv->num_pch_pll = 2;
372                                 DRM_DEBUG_KMS("Found CougarPoint PCH\n");
373                                 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
374                         } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
375                                 /* PantherPoint is CPT compatible */
376                                 dev_priv->pch_type = PCH_CPT;
377                                 dev_priv->num_pch_pll = 2;
378                                 DRM_DEBUG_KMS("Found PatherPoint PCH\n");
379                                 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
380                         } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
381                                 dev_priv->pch_type = PCH_LPT;
382                                 dev_priv->num_pch_pll = 0;
383                                 DRM_DEBUG_KMS("Found LynxPoint PCH\n");
384                                 WARN_ON(!IS_HASWELL(dev));
385                         } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
386                                 dev_priv->pch_type = PCH_LPT;
387                                 dev_priv->num_pch_pll = 0;
388                                 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
389                                 WARN_ON(!IS_HASWELL(dev));
390                         }
391                         BUG_ON(dev_priv->num_pch_pll > I915_NUM_PLLS);
392                 }
393 #if 0
394                 pci_dev_put(pch);
395 #endif
396         }
397 }
398
399 bool i915_semaphore_is_enabled(struct drm_device *dev)
400 {
401         if (INTEL_INFO(dev)->gen < 6)
402                 return 0;
403
404         if (i915_semaphores >= 0)
405                 return i915_semaphores;
406
407 #ifdef CONFIG_INTEL_IOMMU
408         /* Enable semaphores on SNB when IO remapping is off */
409         if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
410                 return false;
411 #endif
412
413         return 1;
414 }
415
416 static int i915_drm_freeze(struct drm_device *dev)
417 {
418         struct drm_i915_private *dev_priv = dev->dev_private;
419
420         /* ignore lid events during suspend */
421         mutex_lock(&dev_priv->modeset_restore_lock);
422         dev_priv->modeset_restore = MODESET_SUSPENDED;
423         mutex_unlock(&dev_priv->modeset_restore_lock);
424
425         intel_set_power_well(dev, true);
426
427         drm_kms_helper_poll_disable(dev);
428
429 #if 0
430         pci_save_state(dev->pdev);
431 #endif
432
433         /* If KMS is active, we do the leavevt stuff here */
434         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
435                 int error = i915_gem_idle(dev);
436                 if (error) {
437                         dev_err(dev->pdev->dev,
438                                 "GEM idle failed, resume might fail\n");
439                         return error;
440                 }
441
442                 cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
443
444 #if 0
445                 intel_modeset_disable(dev);
446 #endif
447
448                 drm_irq_uninstall(dev);
449                 dev_priv->enable_hotplug_processing = false;
450         }
451
452         i915_save_state(dev);
453
454         intel_opregion_fini(dev);
455
456         return 0;
457 }
458
459 static int
460 i915_suspend(device_t kdev)
461 {
462         struct drm_device *dev;
463         int error;
464
465         dev = device_get_softc(kdev);
466         if (dev == NULL || dev->dev_private == NULL) {
467                 DRM_ERROR("DRM not initialized, aborting suspend.\n");
468                 return -ENODEV;
469         }
470
471         DRM_DEBUG_KMS("starting suspend\n");
472         error = i915_drm_freeze(dev);
473         if (error)
474                 return (error);
475
476         error = bus_generic_suspend(kdev);
477         DRM_DEBUG_KMS("finished suspend %d\n", error);
478         return (error);
479 }
480
481 static int __i915_drm_thaw(struct drm_device *dev)
482 {
483         struct drm_i915_private *dev_priv = dev->dev_private;
484         int error = 0;
485
486         i915_restore_state(dev);
487         intel_opregion_setup(dev);
488
489         /* KMS EnterVT equivalent */
490         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
491                 intel_init_pch_refclk(dev);
492
493                 mutex_lock(&dev->struct_mutex);
494                 dev_priv->mm.suspended = 0;
495
496                 error = i915_gem_init_hw(dev);
497                 mutex_unlock(&dev->struct_mutex);
498
499                 /* We need working interrupts for modeset enabling ... */
500                 drm_irq_install(dev);
501
502                 intel_modeset_init_hw(dev);
503                 intel_modeset_setup_hw_state(dev, false);
504
505                 /*
506                  * ... but also need to make sure that hotplug processing
507                  * doesn't cause havoc. Like in the driver load code we don't
508                  * bother with the tiny race here where we might loose hotplug
509                  * notifications.
510                  * */
511                 intel_hpd_init(dev);
512                 dev_priv->enable_hotplug_processing = true;
513         }
514
515         intel_opregion_init(dev);
516
517         /*
518          * The console lock can be pretty contented on resume due
519          * to all the printk activity.  Try to keep it out of the hot
520          * path of resume if possible.
521          */
522 #if 0
523         if (console_trylock()) {
524                 intel_fbdev_set_suspend(dev, 0);
525                 console_unlock();
526         } else {
527                 schedule_work(&dev_priv->console_resume_work);
528         }
529 #endif
530
531         mutex_lock(&dev_priv->modeset_restore_lock);
532         dev_priv->modeset_restore = MODESET_DONE;
533         mutex_unlock(&dev_priv->modeset_restore_lock);
534         return error;
535 }
536
537 static int i915_drm_thaw(struct drm_device *dev)
538 {
539         int error = 0;
540
541         intel_gt_reset(dev);
542
543         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
544                 mutex_lock(&dev->struct_mutex);
545                 i915_gem_restore_gtt_mappings(dev);
546                 mutex_unlock(&dev->struct_mutex);
547         }
548
549         __i915_drm_thaw(dev);
550
551         return error;
552 }
553
554 static int
555 i915_resume(device_t kdev)
556 {
557         struct drm_device *dev;
558         int ret;
559
560         dev = device_get_softc(kdev);
561         DRM_DEBUG_KMS("starting resume\n");
562 #if 0
563         if (pci_enable_device(dev->pdev))
564                 return -EIO;
565
566         pci_set_master(dev->pdev);
567 #endif
568
569         ret = -i915_drm_thaw(dev);
570         if (ret != 0)
571                 return (ret);
572
573         drm_kms_helper_poll_enable(dev);
574         ret = bus_generic_resume(kdev);
575         DRM_DEBUG_KMS("finished resume %d\n", ret);
576         return (ret);
577 }
578
579 /* XXX Hack for the old *BSD drm code base
580  * The device id field is set at probe time */
581 static drm_pci_id_list_t i915_attach_list[] = {
582         {0x8086, 0, 0, "Intel i915 GPU"},
583         {0, 0, 0, NULL}
584 };
585
586 int i915_modeset;
587
588 /* static int __init i915_init(void) */
589 static int
590 i915_attach(device_t kdev)
591 {
592         struct drm_device *dev;
593         int ret;
594
595         dev = device_get_softc(kdev);
596
597         driver.num_ioctls = i915_max_ioctl;
598
599         if (i915_modeset == 1)
600                 driver.driver_features |= DRIVER_MODESET;
601
602         dev->driver = &driver;
603
604         /* XXX shoud be in drm_get_pci_dev() */
605         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
606                 ret = drm_get_minor(kdev, dev, &dev->control, DRM_MINOR_CONTROL);
607                 if (ret)
608                         return -ENODEV;
609         }
610
611         if ((ret = drm_get_minor(kdev, dev, &dev->primary, DRM_MINOR_LEGACY)))
612                 return -ENODEV;
613
614         return (drm_attach(kdev, i915_attach_list));
615 }
616
617 const struct intel_device_info *
618 i915_get_device_id(int device)
619 {
620         const struct intel_gfx_device_id *did;
621
622         for (did = &pciidlist[0]; did->device != 0; did++) {
623                 if (did->device != device)
624                         continue;
625                 return (did->info);
626         }
627         return (NULL);
628 }
629
630 extern devclass_t drm_devclass;
631
632 int intel_iommu_enabled = 0;
633 TUNABLE_INT("drm.i915.intel_iommu_enabled", &intel_iommu_enabled);
634
635 int i915_semaphores = -1;
636 TUNABLE_INT("drm.i915.semaphores", &i915_semaphores);
637 static int i915_try_reset = 1;
638 TUNABLE_INT("drm.i915.try_reset", &i915_try_reset);
639 unsigned int i915_lvds_downclock = 0;
640 TUNABLE_INT("drm.i915.lvds_downclock", &i915_lvds_downclock);
641 int i915_vbt_sdvo_panel_type = -1;
642 TUNABLE_INT("drm.i915.vbt_sdvo_panel_type", &i915_vbt_sdvo_panel_type);
643 unsigned int i915_powersave = 1;
644 TUNABLE_INT("drm.i915.powersave", &i915_powersave);
645 int i915_enable_fbc = 0;
646 TUNABLE_INT("drm.i915.enable_fbc", &i915_enable_fbc);
647 int i915_enable_rc6 = 0;
648 TUNABLE_INT("drm.i915.enable_rc6", &i915_enable_rc6);
649 int i915_panel_use_ssc = -1;
650 TUNABLE_INT("drm.i915.panel_use_ssc", &i915_panel_use_ssc);
651 int i915_panel_ignore_lid = 0;
652 TUNABLE_INT("drm.i915.panel_ignore_lid", &i915_panel_ignore_lid);
653 int i915_modeset = 1;
654 TUNABLE_INT("drm.i915.modeset", &i915_modeset);
655 int i915_enable_ppgtt = -1;
656 TUNABLE_INT("drm.i915.enable_ppgtt", &i915_enable_ppgtt);
657
658 static int i8xx_do_reset(struct drm_device *dev)
659 {
660         struct drm_i915_private *dev_priv = dev->dev_private;
661
662         if (IS_I85X(dev))
663                 return -ENODEV;
664
665         I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830);
666         POSTING_READ(D_STATE);
667
668         if (IS_I830(dev) || IS_845G(dev)) {
669                 I915_WRITE(DEBUG_RESET_I830,
670                            DEBUG_RESET_DISPLAY |
671                            DEBUG_RESET_RENDER |
672                            DEBUG_RESET_FULL);
673                 POSTING_READ(DEBUG_RESET_I830);
674                 msleep(1);
675
676                 I915_WRITE(DEBUG_RESET_I830, 0);
677                 POSTING_READ(DEBUG_RESET_I830);
678         }
679
680         msleep(1);
681
682         I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830);
683         POSTING_READ(D_STATE);
684
685         return 0;
686 }
687
688 static int i965_reset_complete(struct drm_device *dev)
689 {
690         u8 gdrst;
691         pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
692         return (gdrst & GRDOM_RESET_ENABLE) == 0;
693 }
694
695 static int i965_do_reset(struct drm_device *dev)
696 {
697         int ret;
698         u8 gdrst;
699
700         /*
701          * Set the domains we want to reset (GRDOM/bits 2 and 3) as
702          * well as the reset bit (GR/bit 0).  Setting the GR bit
703          * triggers the reset; when done, the hardware will clear it.
704          */
705         pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
706         pci_write_config_byte(dev->pdev, I965_GDRST,
707                               gdrst | GRDOM_RENDER |
708                               GRDOM_RESET_ENABLE);
709         ret =  wait_for(i965_reset_complete(dev), 500);
710         if (ret)
711                 return ret;
712
713         /* We can't reset render&media without also resetting display ... */
714         pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
715         pci_write_config_byte(dev->pdev, I965_GDRST,
716                               gdrst | GRDOM_MEDIA |
717                               GRDOM_RESET_ENABLE);
718
719         return wait_for(i965_reset_complete(dev), 500);
720 }
721
722 static int ironlake_do_reset(struct drm_device *dev)
723 {
724         struct drm_i915_private *dev_priv = dev->dev_private;
725         u32 gdrst;
726         int ret;
727
728         gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
729         I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
730                    gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE);
731         ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
732         if (ret)
733                 return ret;
734
735         /* We can't reset render&media without also resetting display ... */
736         gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
737         I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
738                    gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE);
739         return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
740 }
741
742 static int gen6_do_reset(struct drm_device *dev)
743 {
744         struct drm_i915_private *dev_priv = dev->dev_private;
745         int ret;
746
747         dev_priv = dev->dev_private;
748
749         /* Hold gt_lock across reset to prevent any register access
750          * with forcewake not set correctly
751          */
752         lockmgr(&dev_priv->gt_lock, LK_EXCLUSIVE);
753
754         /* Reset the chip */
755
756         /* GEN6_GDRST is not in the gt power well, no need to check
757          * for fifo space for the write or forcewake the chip for
758          * the read
759          */
760         I915_WRITE_NOTRACE(GEN6_GDRST, GEN6_GRDOM_FULL);
761
762         /* Spin waiting for the device to ack the reset request */
763         ret = wait_for((I915_READ_NOTRACE(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
764
765         /* If reset with a user forcewake, try to restore, otherwise turn it off */
766         if (dev_priv->forcewake_count)
767                 dev_priv->gt.force_wake_get(dev_priv);
768         else
769                 dev_priv->gt.force_wake_put(dev_priv);
770
771         /* Restore fifo count */
772         dev_priv->gt_fifo_count = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
773
774         lockmgr(&dev_priv->gt_lock, LK_RELEASE);
775         return ret;
776 }
777
778 int intel_gpu_reset(struct drm_device *dev)
779 {
780         struct drm_i915_private *dev_priv = dev->dev_private;
781         int ret = -ENODEV;
782
783         switch (INTEL_INFO(dev)->gen) {
784         case 7:
785         case 6:
786                 ret = gen6_do_reset(dev);
787                 break;
788         case 5:
789                 ret = ironlake_do_reset(dev);
790                 break;
791         case 4:
792                 ret = i965_do_reset(dev);
793                 break;
794         case 2:
795                 ret = i8xx_do_reset(dev);
796                 break;
797         }
798
799         /* Also reset the gpu hangman. */
800         if (dev_priv->gpu_error.stop_rings) {
801                 DRM_DEBUG("Simulated gpu hang, resetting stop_rings\n");
802                 dev_priv->gpu_error.stop_rings = 0;
803                 if (ret == -ENODEV) {
804                         DRM_ERROR("Reset not implemented, but ignoring "
805                                   "error for simulated gpu hangs\n");
806                         ret = 0;
807                 }
808         }
809
810         return ret;
811 }
812
813 /**
814  * i915_reset - reset chip after a hang
815  * @dev: drm device to reset
816  *
817  * Reset the chip.  Useful if a hang is detected. Returns zero on successful
818  * reset or otherwise an error code.
819  *
820  * Procedure is fairly simple:
821  *   - reset the chip using the reset reg
822  *   - re-init context state
823  *   - re-init hardware status page
824  *   - re-init ring buffer
825  *   - re-init interrupt state
826  *   - re-init display
827  */
828 int i915_reset(struct drm_device *dev)
829 {
830         drm_i915_private_t *dev_priv = dev->dev_private;
831         int ret;
832
833         if (!i915_try_reset)
834                 return 0;
835
836         mutex_lock(&dev->struct_mutex);
837
838         i915_gem_reset(dev);
839
840         ret = -ENODEV;
841         if (time_uptime - dev_priv->gpu_error.last_reset < 5)
842                 DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
843         else
844                 ret = intel_gpu_reset(dev);
845
846         dev_priv->gpu_error.last_reset = time_uptime;
847         if (ret) {
848                 DRM_ERROR("Failed to reset chip.\n");
849                 mutex_unlock(&dev->struct_mutex);
850                 return ret;
851         }
852
853         /* Ok, now get things going again... */
854
855         /*
856          * Everything depends on having the GTT running, so we need to start
857          * there.  Fortunately we don't need to do this unless we reset the
858          * chip at a PCI level.
859          *
860          * Next we need to restore the context, but we don't use those
861          * yet either...
862          *
863          * Ring buffer needs to be re-initialized in the KMS case, or if X
864          * was running at the time of the reset (i.e. we weren't VT
865          * switched away).
866          */
867         if (drm_core_check_feature(dev, DRIVER_MODESET) ||
868                         !dev_priv->mm.suspended) {
869                 struct intel_ring_buffer *ring;
870                 int i;
871
872                 dev_priv->mm.suspended = 0;
873
874                 i915_gem_init_swizzling(dev);
875
876                 for_each_ring(ring, dev_priv, i)
877                         ring->init(ring);
878
879                 i915_gem_context_init(dev);
880                 i915_gem_init_ppgtt(dev);
881
882                 /*
883                  * It would make sense to re-init all the other hw state, at
884                  * least the rps/rc6/emon init done within modeset_init_hw. For
885                  * some unknown reason, this blows up my ilk, so don't.
886                  */
887
888                 mutex_unlock(&dev->struct_mutex);
889
890                 drm_irq_uninstall(dev);
891                 drm_irq_install(dev);
892                 intel_hpd_init(dev);
893         } else {
894                 mutex_unlock(&dev->struct_mutex);
895         }
896
897         return 0;
898 }
899
900 static int
901 i915_pci_probe(device_t kdev)
902 {
903         int device, i = 0;
904
905         if (pci_get_class(kdev) != PCIC_DISPLAY)
906                 return ENXIO;
907
908         if (pci_get_vendor(kdev) != PCI_VENDOR_INTEL)
909                 return ENXIO;
910
911         device = pci_get_device(kdev);
912
913         for (i = 0; pciidlist[i].device != 0; i++) {
914                 if (pciidlist[i].device == device) {
915                         i915_attach_list[0].device = device;
916                         return 0;
917                 }
918         }
919
920         return ENXIO;
921 }
922
923 static struct drm_driver driver = {
924         /* Don't use MTRRs here; the Xserver or userspace app should
925          * deal with them for Intel hardware.
926          */
927         .driver_features =
928             DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/
929             DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM,
930         .load           = i915_driver_load,
931         .unload         = i915_driver_unload,
932         .open           = i915_driver_open,
933         .lastclose      = i915_driver_lastclose,
934         .preclose       = i915_driver_preclose,
935         .postclose      = i915_driver_postclose,
936
937         .device_is_agp  = i915_driver_device_is_agp,
938         .master_create = i915_master_create,
939         .master_destroy = i915_master_destroy,
940
941         .gem_init_object = i915_gem_init_object,
942         .gem_free_object = i915_gem_free_object,
943         .gem_pager_ops  = &i915_gem_pager_ops,
944
945         .dumb_create    = i915_gem_dumb_create,
946         .dumb_map_offset = i915_gem_mmap_gtt,
947         .dumb_destroy   = i915_gem_dumb_destroy,
948         .ioctls         = i915_ioctls,
949
950         .name           = DRIVER_NAME,
951         .desc           = DRIVER_DESC,
952         .date           = DRIVER_DATE,
953         .major          = DRIVER_MAJOR,
954         .minor          = DRIVER_MINOR,
955         .patchlevel     = DRIVER_PATCHLEVEL,
956 };
957
958 static device_method_t i915_methods[] = {
959         /* Device interface */
960         DEVMETHOD(device_probe,         i915_pci_probe),
961         DEVMETHOD(device_attach,        i915_attach),
962         DEVMETHOD(device_suspend,       i915_suspend),
963         DEVMETHOD(device_resume,        i915_resume),
964         DEVMETHOD(device_detach,        drm_release),
965         DEVMETHOD_END
966 };
967
968 static driver_t i915_driver = {
969         "drm",
970         i915_methods,
971         sizeof(struct drm_device)
972 };
973
974 DRIVER_MODULE_ORDERED(i915kms, vgapci, i915_driver, drm_devclass, 0, 0,
975     SI_ORDER_ANY);
976 MODULE_DEPEND(i915kms, drm, 1, 1, 1);
977 MODULE_DEPEND(i915kms, agp, 1, 1, 1);
978 MODULE_DEPEND(i915kms, iicbus, 1, 1, 1);
979 MODULE_DEPEND(i915kms, iic, 1, 1, 1);
980 MODULE_DEPEND(i915kms, iicbb, 1, 1, 1);
981
982 /* We give fast paths for the really cool registers */
983 #define NEEDS_FORCE_WAKE(dev_priv, reg) \
984         ((HAS_FORCE_WAKE((dev_priv)->dev)) && \
985          ((reg) < 0x40000) &&            \
986          ((reg) != FORCEWAKE))
987 static void
988 ilk_dummy_write(struct drm_i915_private *dev_priv)
989 {
990         /* WaIssueDummyWriteToWakeupFromRC6: Issue a dummy write to wake up the
991          * chip from rc6 before touching it for real. MI_MODE is masked, hence
992          * harmless to write 0 into. */
993         I915_WRITE_NOTRACE(MI_MODE, 0);
994 }
995
996 #define __i915_read(x, y) \
997 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
998         u##x val = 0; \
999         if (IS_GEN5(dev_priv->dev)) \
1000                 ilk_dummy_write(dev_priv); \
1001         if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
1002                 lockmgr(&dev_priv->gt_lock, LK_EXCLUSIVE); \
1003                 if (dev_priv->forcewake_count == 0) \
1004                         dev_priv->gt.force_wake_get(dev_priv); \
1005                 val = DRM_READ##y(dev_priv->mmio_map, reg);     \
1006                 if (dev_priv->forcewake_count == 0) \
1007                         dev_priv->gt.force_wake_put(dev_priv); \
1008                 lockmgr(&dev_priv->gt_lock, LK_RELEASE); \
1009         } else { \
1010                 val = DRM_READ##y(dev_priv->mmio_map, reg);     \
1011         } \
1012         trace_i915_reg_rw(false, reg, val, sizeof(val)); \
1013         return val; \
1014 }
1015
1016 __i915_read(8, 8)
1017 __i915_read(16, 16)
1018 __i915_read(32, 32)
1019 __i915_read(64, 64)
1020 #undef __i915_read
1021
1022 #define __i915_write(x, y) \
1023 void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
1024         u32 __fifo_ret = 0; \
1025         trace_i915_reg_rw(true, reg, val, sizeof(val)); \
1026         if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
1027                 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
1028         } \
1029         if (IS_GEN5(dev_priv->dev)) \
1030                 ilk_dummy_write(dev_priv); \
1031         if (IS_HASWELL(dev_priv->dev) && (I915_READ_NOTRACE(GEN7_ERR_INT) & ERR_INT_MMIO_UNCLAIMED)) { \
1032                 DRM_ERROR("Unknown unclaimed register before writing to %x\n", reg); \
1033                 I915_WRITE_NOTRACE(GEN7_ERR_INT, ERR_INT_MMIO_UNCLAIMED); \
1034         } \
1035         DRM_WRITE##y(dev_priv->mmio_map, reg, val); \
1036         if (unlikely(__fifo_ret)) { \
1037                 gen6_gt_check_fifodbg(dev_priv); \
1038         } \
1039         if (IS_HASWELL(dev_priv->dev) && (I915_READ_NOTRACE(GEN7_ERR_INT) & ERR_INT_MMIO_UNCLAIMED)) { \
1040                 DRM_ERROR("Unclaimed write to %x\n", reg); \
1041                 DRM_WRITE32(dev_priv->mmio_map, GEN7_ERR_INT, ERR_INT_MMIO_UNCLAIMED);  \
1042         } \
1043 }
1044
1045 __i915_write(8, 8)
1046 __i915_write(16, 16)
1047 __i915_write(32, 32)
1048 __i915_write(64, 64)
1049 #undef __i915_write
1050
1051 static const struct register_whitelist {
1052         uint64_t offset;
1053         uint32_t size;
1054         uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
1055 } whitelist[] = {
1056         { RING_TIMESTAMP(RENDER_RING_BASE), 8, 0xF0 },
1057 };
1058
1059 int i915_reg_read_ioctl(struct drm_device *dev,
1060                         void *data, struct drm_file *file)
1061 {
1062         struct drm_i915_private *dev_priv = dev->dev_private;
1063         struct drm_i915_reg_read *reg = data;
1064         struct register_whitelist const *entry = whitelist;
1065         int i;
1066
1067         for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
1068                 if (entry->offset == reg->offset &&
1069                     (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
1070                         break;
1071         }
1072
1073         if (i == ARRAY_SIZE(whitelist))
1074                 return -EINVAL;
1075
1076         switch (entry->size) {
1077         case 8:
1078                 reg->val = I915_READ64(reg->offset);
1079                 break;
1080         case 4:
1081                 reg->val = I915_READ(reg->offset);
1082                 break;
1083         case 2:
1084                 reg->val = I915_READ16(reg->offset);
1085                 break;
1086         case 1:
1087                 reg->val = I915_READ8(reg->offset);
1088                 break;
1089         default:
1090                 WARN_ON(1);
1091                 return -EINVAL;
1092         }
1093
1094         return 0;
1095 }