drm: Add a new implementation of drm_sysfs.c
[dragonfly.git] / sys / dev / drm / i915 / i915_drv.c
CommitLineData
e555d299 1/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
7f3c3d6f 2 */
e555d299
FT
3/*
4 *
5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7f3c3d6f
HT
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
e555d299
FT
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
7f3c3d6f 15 *
e555d299
FT
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
18 * of the Software.
7f3c3d6f 19 *
e555d299
FT
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
7f3c3d6f 27 *
7f3c3d6f
HT
28 */
29
18e26a6d 30#include <drm/drmP.h>
5c6c6f23 31#include <drm/i915_drm.h>
c4a9e910 32#include "i915_drv.h"
18e26a6d 33#include <drm/drm_pciids.h>
e3adcf8f 34#include "intel_drv.h"
7f3c3d6f 35
19df918d
FT
36/* "Specify LVDS channel mode "
37 "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)" */
38int i915_lvds_channel_mode __read_mostly = 0;
39TUNABLE_INT("drm.i915.lvds_channel_mode", &i915_lvds_channel_mode);
40
a2fdbec6
FT
41int i915_disable_power_well __read_mostly = 0;
42module_param_named(disable_power_well, i915_disable_power_well, int, 0600);
43MODULE_PARM_DESC(disable_power_well,
44 "Disable the power well when possible (default: false)");
45
46bool i915_enable_hangcheck __read_mostly = true;
47module_param_named(enable_hangcheck, i915_enable_hangcheck, bool, 0644);
48MODULE_PARM_DESC(enable_hangcheck,
49 "Periodically check GPU activity for detecting hangs. "
50 "WARNING: Disabling this can cause system wide hangs. "
51 "(default: true)");
52
f0b54121
FT
53static struct drm_driver driver;
54
8e26cdf6
FT
55#define INTEL_VGA_DEVICE(id, info) { \
56 .class = PCI_BASE_CLASS_DISPLAY << 16, \
57 .class_mask = 0xff0000, \
58 .vendor = 0x8086, \
e9243325 59 .device = id, \
8e26cdf6
FT
60 .subvendor = PCI_ANY_ID, \
61 .subdevice = PCI_ANY_ID, \
62 .driver_data = (unsigned long) info }
63
64#define INTEL_QUANTA_VGA_DEVICE(info) { \
65 .class = PCI_BASE_CLASS_DISPLAY << 16, \
66 .class_mask = 0xff0000, \
67 .vendor = 0x8086, \
68 .device = 0x16a, \
69 .subvendor = 0x152d, \
70 .subdevice = 0x8990, \
71 .driver_data = (unsigned long) info }
72
e9243325 73
e3adcf8f 74static const struct intel_device_info intel_i830_info = {
8e26cdf6 75 .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
e3adcf8f
FT
76 .has_overlay = 1, .overlay_needs_physical = 1,
77};
78
79static const struct intel_device_info intel_845g_info = {
8e26cdf6 80 .gen = 2, .num_pipes = 1,
e3adcf8f
FT
81 .has_overlay = 1, .overlay_needs_physical = 1,
82};
83
84static const struct intel_device_info intel_i85x_info = {
8e26cdf6 85 .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
e3adcf8f
FT
86 .cursor_needs_physical = 1,
87 .has_overlay = 1, .overlay_needs_physical = 1,
88};
89
90static const struct intel_device_info intel_i865g_info = {
8e26cdf6 91 .gen = 2, .num_pipes = 1,
e3adcf8f
FT
92 .has_overlay = 1, .overlay_needs_physical = 1,
93};
94
95static const struct intel_device_info intel_i915g_info = {
8e26cdf6 96 .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
e3adcf8f
FT
97 .has_overlay = 1, .overlay_needs_physical = 1,
98};
99static const struct intel_device_info intel_i915gm_info = {
8e26cdf6 100 .gen = 3, .is_mobile = 1, .num_pipes = 2,
e3adcf8f
FT
101 .cursor_needs_physical = 1,
102 .has_overlay = 1, .overlay_needs_physical = 1,
103 .supports_tv = 1,
104};
105static const struct intel_device_info intel_i945g_info = {
8e26cdf6 106 .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
e3adcf8f
FT
107 .has_overlay = 1, .overlay_needs_physical = 1,
108};
109static const struct intel_device_info intel_i945gm_info = {
8e26cdf6 110 .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
e3adcf8f
FT
111 .has_hotplug = 1, .cursor_needs_physical = 1,
112 .has_overlay = 1, .overlay_needs_physical = 1,
113 .supports_tv = 1,
114};
115
116static const struct intel_device_info intel_i965g_info = {
8e26cdf6 117 .gen = 4, .is_broadwater = 1, .num_pipes = 2,
e3adcf8f
FT
118 .has_hotplug = 1,
119 .has_overlay = 1,
120};
121
122static const struct intel_device_info intel_i965gm_info = {
8e26cdf6 123 .gen = 4, .is_crestline = 1, .num_pipes = 2,
e3adcf8f
FT
124 .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
125 .has_overlay = 1,
126 .supports_tv = 1,
127};
128
129static const struct intel_device_info intel_g33_info = {
8e26cdf6 130 .gen = 3, .is_g33 = 1, .num_pipes = 2,
e3adcf8f
FT
131 .need_gfx_hws = 1, .has_hotplug = 1,
132 .has_overlay = 1,
133};
134
135static const struct intel_device_info intel_g45_info = {
8e26cdf6 136 .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
e3adcf8f
FT
137 .has_pipe_cxsr = 1, .has_hotplug = 1,
138 .has_bsd_ring = 1,
139};
140
141static const struct intel_device_info intel_gm45_info = {
8e26cdf6 142 .gen = 4, .is_g4x = 1, .num_pipes = 2,
e3adcf8f
FT
143 .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
144 .has_pipe_cxsr = 1, .has_hotplug = 1,
145 .supports_tv = 1,
146 .has_bsd_ring = 1,
147};
148
149static const struct intel_device_info intel_pineview_info = {
8e26cdf6 150 .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
e3adcf8f
FT
151 .need_gfx_hws = 1, .has_hotplug = 1,
152 .has_overlay = 1,
153};
154
155static const struct intel_device_info intel_ironlake_d_info = {
8e26cdf6 156 .gen = 5, .num_pipes = 2,
e3adcf8f
FT
157 .need_gfx_hws = 1, .has_hotplug = 1,
158 .has_bsd_ring = 1,
159};
160
161static const struct intel_device_info intel_ironlake_m_info = {
8e26cdf6 162 .gen = 5, .is_mobile = 1, .num_pipes = 2,
e3adcf8f 163 .need_gfx_hws = 1, .has_hotplug = 1,
00dca1bb 164 .has_fbc = 1,
e3adcf8f
FT
165 .has_bsd_ring = 1,
166};
167
168static const struct intel_device_info intel_sandybridge_d_info = {
8e26cdf6 169 .gen = 6, .num_pipes = 2,
e3adcf8f
FT
170 .need_gfx_hws = 1, .has_hotplug = 1,
171 .has_bsd_ring = 1,
172 .has_blt_ring = 1,
173 .has_llc = 1,
e9243325 174 .has_force_wake = 1,
e3adcf8f
FT
175};
176
177static const struct intel_device_info intel_sandybridge_m_info = {
8e26cdf6 178 .gen = 6, .is_mobile = 1, .num_pipes = 2,
e3adcf8f
FT
179 .need_gfx_hws = 1, .has_hotplug = 1,
180 .has_fbc = 1,
181 .has_bsd_ring = 1,
182 .has_blt_ring = 1,
183 .has_llc = 1,
e9243325 184 .has_force_wake = 1,
e3adcf8f
FT
185};
186
8e26cdf6
FT
187#define GEN7_FEATURES \
188 .gen = 7, .num_pipes = 3, \
189 .need_gfx_hws = 1, .has_hotplug = 1, \
190 .has_bsd_ring = 1, \
191 .has_blt_ring = 1, \
192 .has_llc = 1, \
193 .has_force_wake = 1
194
e3adcf8f 195static const struct intel_device_info intel_ivybridge_d_info = {
8e26cdf6
FT
196 GEN7_FEATURES,
197 .is_ivybridge = 1,
e3adcf8f
FT
198};
199
200static const struct intel_device_info intel_ivybridge_m_info = {
8e26cdf6
FT
201 GEN7_FEATURES,
202 .is_ivybridge = 1,
203 .is_mobile = 1,
204};
205
206static const struct intel_device_info intel_ivybridge_q_info = {
207 GEN7_FEATURES,
208 .is_ivybridge = 1,
209 .num_pipes = 0, /* legal, last one wins */
e3adcf8f
FT
210};
211
e9243325 212static const struct intel_device_info intel_valleyview_m_info = {
8e26cdf6
FT
213 GEN7_FEATURES,
214 .is_mobile = 1,
215 .num_pipes = 2,
e9243325 216 .is_valleyview = 1,
a2fdbec6 217 .display_mmio_offset = VLV_DISPLAY_BASE,
8e26cdf6 218 .has_llc = 0, /* legal, last one wins */
e9243325
FT
219};
220
221static const struct intel_device_info intel_valleyview_d_info = {
8e26cdf6
FT
222 GEN7_FEATURES,
223 .num_pipes = 2,
e9243325 224 .is_valleyview = 1,
a2fdbec6 225 .display_mmio_offset = VLV_DISPLAY_BASE,
8e26cdf6 226 .has_llc = 0, /* legal, last one wins */
e9243325
FT
227};
228
229static const struct intel_device_info intel_haswell_d_info = {
8e26cdf6
FT
230 GEN7_FEATURES,
231 .is_haswell = 1,
e9243325
FT
232};
233
234static const struct intel_device_info intel_haswell_m_info = {
8e26cdf6
FT
235 GEN7_FEATURES,
236 .is_haswell = 1,
237 .is_mobile = 1,
e9243325 238};
e3adcf8f 239
8e26cdf6 240static const struct pci_device_id pciidlist[] = { /* aka */
e9243325
FT
241 INTEL_VGA_DEVICE(0x3577, &intel_i830_info), /* I830_M */
242 INTEL_VGA_DEVICE(0x2562, &intel_845g_info), /* 845_G */
243 INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), /* I855_GM */
e3adcf8f 244 INTEL_VGA_DEVICE(0x358e, &intel_i85x_info),
e9243325
FT
245 INTEL_VGA_DEVICE(0x2572, &intel_i865g_info), /* I865_G */
246 INTEL_VGA_DEVICE(0x2582, &intel_i915g_info), /* I915_G */
247 INTEL_VGA_DEVICE(0x258a, &intel_i915g_info), /* E7221_G */
248 INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info), /* I915_GM */
249 INTEL_VGA_DEVICE(0x2772, &intel_i945g_info), /* I945_G */
250 INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info), /* I945_GM */
251 INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info), /* I945_GME */
252 INTEL_VGA_DEVICE(0x2972, &intel_i965g_info), /* I946_GZ */
253 INTEL_VGA_DEVICE(0x2982, &intel_i965g_info), /* G35_G */
254 INTEL_VGA_DEVICE(0x2992, &intel_i965g_info), /* I965_Q */
255 INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info), /* I965_G */
256 INTEL_VGA_DEVICE(0x29b2, &intel_g33_info), /* Q35_G */
257 INTEL_VGA_DEVICE(0x29c2, &intel_g33_info), /* G33_G */
258 INTEL_VGA_DEVICE(0x29d2, &intel_g33_info), /* Q33_G */
259 INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info), /* I965_GM */
260 INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info), /* I965_GME */
261 INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info), /* GM45_G */
262 INTEL_VGA_DEVICE(0x2e02, &intel_g45_info), /* IGD_E_G */
263 INTEL_VGA_DEVICE(0x2e12, &intel_g45_info), /* Q45_G */
264 INTEL_VGA_DEVICE(0x2e22, &intel_g45_info), /* G45_G */
265 INTEL_VGA_DEVICE(0x2e32, &intel_g45_info), /* G41_G */
266 INTEL_VGA_DEVICE(0x2e42, &intel_g45_info), /* B43_G */
267 INTEL_VGA_DEVICE(0x2e92, &intel_g45_info), /* B43_G.1 */
e3adcf8f
FT
268 INTEL_VGA_DEVICE(0xa001, &intel_pineview_info),
269 INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
270 INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
271 INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
272 INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info),
273 INTEL_VGA_DEVICE(0x0112, &intel_sandybridge_d_info),
274 INTEL_VGA_DEVICE(0x0122, &intel_sandybridge_d_info),
275 INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info),
276 INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info),
277 INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info),
278 INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info),
279 INTEL_VGA_DEVICE(0x0156, &intel_ivybridge_m_info), /* GT1 mobile */
280 INTEL_VGA_DEVICE(0x0166, &intel_ivybridge_m_info), /* GT2 mobile */
281 INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */
282 INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */
283 INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */
284 INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */
e9243325
FT
285 INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */
286 INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */
a2fdbec6 287 INTEL_VGA_DEVICE(0x0422, &intel_haswell_d_info), /* GT3 desktop */
e9243325
FT
288 INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */
289 INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */
a2fdbec6 290 INTEL_VGA_DEVICE(0x042a, &intel_haswell_d_info), /* GT3 server */
e9243325
FT
291 INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */
292 INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */
293 INTEL_VGA_DEVICE(0x0426, &intel_haswell_m_info), /* GT2 mobile */
a2fdbec6
FT
294 INTEL_VGA_DEVICE(0x040B, &intel_haswell_d_info), /* GT1 reserved */
295 INTEL_VGA_DEVICE(0x041B, &intel_haswell_d_info), /* GT2 reserved */
296 INTEL_VGA_DEVICE(0x042B, &intel_haswell_d_info), /* GT3 reserved */
297 INTEL_VGA_DEVICE(0x040E, &intel_haswell_d_info), /* GT1 reserved */
298 INTEL_VGA_DEVICE(0x041E, &intel_haswell_d_info), /* GT2 reserved */
299 INTEL_VGA_DEVICE(0x042E, &intel_haswell_d_info), /* GT3 reserved */
e9243325
FT
300 INTEL_VGA_DEVICE(0x0C02, &intel_haswell_d_info), /* SDV GT1 desktop */
301 INTEL_VGA_DEVICE(0x0C12, &intel_haswell_d_info), /* SDV GT2 desktop */
a2fdbec6 302 INTEL_VGA_DEVICE(0x0C22, &intel_haswell_d_info), /* SDV GT3 desktop */
e9243325
FT
303 INTEL_VGA_DEVICE(0x0C0A, &intel_haswell_d_info), /* SDV GT1 server */
304 INTEL_VGA_DEVICE(0x0C1A, &intel_haswell_d_info), /* SDV GT2 server */
a2fdbec6 305 INTEL_VGA_DEVICE(0x0C2A, &intel_haswell_d_info), /* SDV GT3 server */
e9243325
FT
306 INTEL_VGA_DEVICE(0x0C06, &intel_haswell_m_info), /* SDV GT1 mobile */
307 INTEL_VGA_DEVICE(0x0C16, &intel_haswell_m_info), /* SDV GT2 mobile */
a2fdbec6
FT
308 INTEL_VGA_DEVICE(0x0C26, &intel_haswell_m_info), /* SDV GT3 mobile */
309 INTEL_VGA_DEVICE(0x0C0B, &intel_haswell_d_info), /* SDV GT1 reserved */
310 INTEL_VGA_DEVICE(0x0C1B, &intel_haswell_d_info), /* SDV GT2 reserved */
311 INTEL_VGA_DEVICE(0x0C2B, &intel_haswell_d_info), /* SDV GT3 reserved */
312 INTEL_VGA_DEVICE(0x0C0E, &intel_haswell_d_info), /* SDV GT1 reserved */
313 INTEL_VGA_DEVICE(0x0C1E, &intel_haswell_d_info), /* SDV GT2 reserved */
314 INTEL_VGA_DEVICE(0x0C2E, &intel_haswell_d_info), /* SDV GT3 reserved */
e9243325
FT
315 INTEL_VGA_DEVICE(0x0A02, &intel_haswell_d_info), /* ULT GT1 desktop */
316 INTEL_VGA_DEVICE(0x0A12, &intel_haswell_d_info), /* ULT GT2 desktop */
a2fdbec6 317 INTEL_VGA_DEVICE(0x0A22, &intel_haswell_d_info), /* ULT GT3 desktop */
e9243325
FT
318 INTEL_VGA_DEVICE(0x0A0A, &intel_haswell_d_info), /* ULT GT1 server */
319 INTEL_VGA_DEVICE(0x0A1A, &intel_haswell_d_info), /* ULT GT2 server */
a2fdbec6 320 INTEL_VGA_DEVICE(0x0A2A, &intel_haswell_d_info), /* ULT GT3 server */
e9243325
FT
321 INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */
322 INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */
a2fdbec6
FT
323 INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT3 mobile */
324 INTEL_VGA_DEVICE(0x0A0B, &intel_haswell_d_info), /* ULT GT1 reserved */
325 INTEL_VGA_DEVICE(0x0A1B, &intel_haswell_d_info), /* ULT GT2 reserved */
326 INTEL_VGA_DEVICE(0x0A2B, &intel_haswell_d_info), /* ULT GT3 reserved */
327 INTEL_VGA_DEVICE(0x0A0E, &intel_haswell_m_info), /* ULT GT1 reserved */
328 INTEL_VGA_DEVICE(0x0A1E, &intel_haswell_m_info), /* ULT GT2 reserved */
329 INTEL_VGA_DEVICE(0x0A2E, &intel_haswell_m_info), /* ULT GT3 reserved */
e9243325
FT
330 INTEL_VGA_DEVICE(0x0D02, &intel_haswell_d_info), /* CRW GT1 desktop */
331 INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT2 desktop */
a2fdbec6 332 INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT3 desktop */
e9243325
FT
333 INTEL_VGA_DEVICE(0x0D0A, &intel_haswell_d_info), /* CRW GT1 server */
334 INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT2 server */
a2fdbec6 335 INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT3 server */
e9243325
FT
336 INTEL_VGA_DEVICE(0x0D06, &intel_haswell_m_info), /* CRW GT1 mobile */
337 INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT2 mobile */
a2fdbec6
FT
338 INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT3 mobile */
339 INTEL_VGA_DEVICE(0x0D0B, &intel_haswell_d_info), /* CRW GT1 reserved */
340 INTEL_VGA_DEVICE(0x0D1B, &intel_haswell_d_info), /* CRW GT2 reserved */
341 INTEL_VGA_DEVICE(0x0D2B, &intel_haswell_d_info), /* CRW GT3 reserved */
342 INTEL_VGA_DEVICE(0x0D0E, &intel_haswell_d_info), /* CRW GT1 reserved */
343 INTEL_VGA_DEVICE(0x0D1E, &intel_haswell_d_info), /* CRW GT2 reserved */
344 INTEL_VGA_DEVICE(0x0D2E, &intel_haswell_d_info), /* CRW GT3 reserved */
e9243325 345 INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info),
8e26cdf6
FT
346 INTEL_VGA_DEVICE(0x0f31, &intel_valleyview_m_info),
347 INTEL_VGA_DEVICE(0x0f32, &intel_valleyview_m_info),
348 INTEL_VGA_DEVICE(0x0f33, &intel_valleyview_m_info),
e9243325
FT
349 INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info),
350 INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info),
e3adcf8f
FT
351 {0, 0}
352};
353
e9243325
FT
354#define PCI_VENDOR_INTEL 0x8086
355
356void intel_detect_pch(struct drm_device *dev)
357{
358 struct drm_i915_private *dev_priv = dev->dev_private;
359 device_t pch;
360
8e26cdf6
FT
361 /* In all current cases, num_pipes is equivalent to the PCH_NOP setting
362 * (which really amounts to a PCH but no South Display).
363 */
364 if (INTEL_INFO(dev)->num_pipes == 0) {
365 dev_priv->pch_type = PCH_NOP;
366 dev_priv->num_pch_pll = 0;
367 return;
368 }
369
e9243325
FT
370 /*
371 * The reason to probe ISA bridge instead of Dev31:Fun0 is to
372 * make graphics device passthrough work easy for VMM, that only
373 * need to expose ISA bridge to let driver know the real hardware
374 * underneath. This is a requirement from virtualization team.
375 */
376 pch = pci_find_class(PCIC_BRIDGE, PCIS_BRIDGE_ISA);
377 if (pch) {
378 if (pci_get_vendor(pch) == PCI_VENDOR_INTEL) {
379 unsigned short id;
380 id = pci_get_device(pch) & INTEL_PCH_DEVICE_ID_MASK;
381 dev_priv->pch_id = id;
382
383 if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
384 dev_priv->pch_type = PCH_IBX;
385 dev_priv->num_pch_pll = 2;
386 DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
387 WARN_ON(!IS_GEN5(dev));
388 } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
389 dev_priv->pch_type = PCH_CPT;
390 dev_priv->num_pch_pll = 2;
391 DRM_DEBUG_KMS("Found CougarPoint PCH\n");
392 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
393 } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
394 /* PantherPoint is CPT compatible */
395 dev_priv->pch_type = PCH_CPT;
396 dev_priv->num_pch_pll = 2;
397 DRM_DEBUG_KMS("Found PatherPoint PCH\n");
398 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
399 } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
400 dev_priv->pch_type = PCH_LPT;
401 dev_priv->num_pch_pll = 0;
402 DRM_DEBUG_KMS("Found LynxPoint PCH\n");
403 WARN_ON(!IS_HASWELL(dev));
8e26cdf6 404 WARN_ON(IS_ULT(dev));
e9243325
FT
405 } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
406 dev_priv->pch_type = PCH_LPT;
407 dev_priv->num_pch_pll = 0;
408 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
409 WARN_ON(!IS_HASWELL(dev));
8e26cdf6 410 WARN_ON(!IS_ULT(dev));
e9243325
FT
411 }
412 BUG_ON(dev_priv->num_pch_pll > I915_NUM_PLLS);
413 }
414#if 0
415 pci_dev_put(pch);
416#endif
417 }
418}
419
3d4007e0
FT
420bool i915_semaphore_is_enabled(struct drm_device *dev)
421{
422 if (INTEL_INFO(dev)->gen < 6)
423 return 0;
424
425 if (i915_semaphores >= 0)
426 return i915_semaphores;
427
428#ifdef CONFIG_INTEL_IOMMU
429 /* Enable semaphores on SNB when IO remapping is off */
430 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
431 return false;
432#endif
433
434 return 1;
435}
436
e3adcf8f 437static int i915_drm_freeze(struct drm_device *dev)
7f3c3d6f 438{
e9243325 439 struct drm_i915_private *dev_priv = dev->dev_private;
8e26cdf6 440 struct drm_crtc *crtc;
b3705d71 441
a2fdbec6
FT
442 /* ignore lid events during suspend */
443 mutex_lock(&dev_priv->modeset_restore_lock);
444 dev_priv->modeset_restore = MODESET_SUSPENDED;
445 mutex_unlock(&dev_priv->modeset_restore_lock);
446
447 intel_set_power_well(dev, true);
448
e3adcf8f
FT
449 drm_kms_helper_poll_disable(dev);
450
451#if 0
452 pci_save_state(dev->pdev);
453#endif
b3705d71 454
e3adcf8f
FT
455 /* If KMS is active, we do the leavevt stuff here */
456 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
e9243325 457 int error = i915_gem_idle(dev);
e3adcf8f 458 if (error) {
a2fdbec6
FT
459 dev_err(dev->pdev->dev,
460 "GEM idle failed, resume might fail\n");
e9243325 461 return error;
e3adcf8f 462 }
a2fdbec6 463
e9243325
FT
464 cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
465
e3adcf8f 466 drm_irq_uninstall(dev);
a2fdbec6 467 dev_priv->enable_hotplug_processing = false;
8e26cdf6
FT
468 /*
469 * Disable CRTCs directly since we want to preserve sw state
470 * for _thaw.
471 */
472 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
473 dev_priv->display.crtc_disable(crtc);
e3adcf8f
FT
474 }
475
b3705d71 476 i915_save_state(dev);
e3adcf8f
FT
477
478 intel_opregion_fini(dev);
479
e3adcf8f 480 return 0;
b3705d71
HT
481}
482
e3adcf8f
FT
483static int
484i915_suspend(device_t kdev)
b3705d71 485{
e3adcf8f
FT
486 struct drm_device *dev;
487 int error;
488
489 dev = device_get_softc(kdev);
490 if (dev == NULL || dev->dev_private == NULL) {
491 DRM_ERROR("DRM not initialized, aborting suspend.\n");
492 return -ENODEV;
493 }
494
495 DRM_DEBUG_KMS("starting suspend\n");
496 error = i915_drm_freeze(dev);
497 if (error)
498 return (error);
499
500 error = bus_generic_suspend(kdev);
501 DRM_DEBUG_KMS("finished suspend %d\n", error);
502 return (error);
503}
504
8e26cdf6
FT
505static void intel_resume_hotplug(struct drm_device *dev)
506{
507 struct drm_mode_config *mode_config = &dev->mode_config;
508 struct intel_encoder *encoder;
509
510 mutex_lock(&mode_config->mutex);
511 DRM_DEBUG_KMS("running encoder hotplug functions\n");
512
513 list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
514 if (encoder->hot_plug)
515 encoder->hot_plug(encoder);
516
517 mutex_unlock(&mode_config->mutex);
518
519 /* Just fire off a uevent and let userspace tell us what to do */
520 drm_helper_hpd_irq_event(dev);
521}
522
a2fdbec6 523static int __i915_drm_thaw(struct drm_device *dev)
e3adcf8f
FT
524{
525 struct drm_i915_private *dev_priv = dev->dev_private;
526 int error = 0;
b3705d71
HT
527
528 i915_restore_state(dev);
e3adcf8f
FT
529 intel_opregion_setup(dev);
530
531 /* KMS EnterVT equivalent */
532 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
19df918d
FT
533 intel_init_pch_refclk(dev);
534
a2fdbec6 535 mutex_lock(&dev->struct_mutex);
e3adcf8f
FT
536 dev_priv->mm.suspended = 0;
537
538 error = i915_gem_init_hw(dev);
a2fdbec6
FT
539 mutex_unlock(&dev->struct_mutex);
540
541 /* We need working interrupts for modeset enabling ... */
542 drm_irq_install(dev);
e3adcf8f 543
19df918d 544 intel_modeset_init_hw(dev);
8e26cdf6
FT
545
546 drm_modeset_lock_all(dev);
547 intel_modeset_setup_hw_state(dev, true);
548 drm_modeset_unlock_all(dev);
a2fdbec6
FT
549
550 /*
551 * ... but also need to make sure that hotplug processing
552 * doesn't cause havoc. Like in the driver load code we don't
553 * bother with the tiny race here where we might loose hotplug
554 * notifications.
555 * */
556 intel_hpd_init(dev);
557 dev_priv->enable_hotplug_processing = true;
8e26cdf6
FT
558 /* Config may have changed between suspend and resume */
559 intel_resume_hotplug(dev);
e3adcf8f
FT
560 }
561
562 intel_opregion_init(dev);
563
a2fdbec6
FT
564 /*
565 * The console lock can be pretty contented on resume due
566 * to all the printk activity. Try to keep it out of the hot
567 * path of resume if possible.
568 */
19df918d 569#if 0
a2fdbec6
FT
570 if (console_trylock()) {
571 intel_fbdev_set_suspend(dev, 0);
572 console_unlock();
573 } else {
574 schedule_work(&dev_priv->console_resume_work);
575 }
19df918d 576#endif
a2fdbec6
FT
577
578 mutex_lock(&dev_priv->modeset_restore_lock);
579 dev_priv->modeset_restore = MODESET_DONE;
580 mutex_unlock(&dev_priv->modeset_restore_lock);
581 return error;
582}
583
584static int i915_drm_thaw(struct drm_device *dev)
585{
586 int error = 0;
587
588 intel_gt_reset(dev);
589
590 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
591 mutex_lock(&dev->struct_mutex);
592 i915_gem_restore_gtt_mappings(dev);
593 mutex_unlock(&dev->struct_mutex);
594 }
595
596 __i915_drm_thaw(dev);
597
e3adcf8f 598 return error;
b3705d71
HT
599}
600
e3adcf8f
FT
601static int
602i915_resume(device_t kdev)
b3705d71 603{
e3adcf8f
FT
604 struct drm_device *dev;
605 int ret;
606
607 dev = device_get_softc(kdev);
608 DRM_DEBUG_KMS("starting resume\n");
609#if 0
610 if (pci_enable_device(dev->pdev))
611 return -EIO;
b3705d71 612
e3adcf8f
FT
613 pci_set_master(dev->pdev);
614#endif
b3705d71 615
e3adcf8f
FT
616 ret = -i915_drm_thaw(dev);
617 if (ret != 0)
618 return (ret);
b3705d71 619
e3adcf8f
FT
620 drm_kms_helper_poll_enable(dev);
621 ret = bus_generic_resume(kdev);
622 DRM_DEBUG_KMS("finished resume %d\n", ret);
623 return (ret);
7f3c3d6f
HT
624}
625
62e5f8e8
FT
626/* XXX Hack for the old *BSD drm code base
627 * The device id field is set at probe time */
628static drm_pci_id_list_t i915_attach_list[] = {
629 {0x8086, 0, 0, "Intel i915 GPU"},
630 {0, 0, 0, NULL}
631};
632
e3adcf8f
FT
633int i915_modeset;
634
f0b54121 635/* static int __init i915_init(void) */
7f3c3d6f 636static int
b3705d71 637i915_attach(device_t kdev)
7f3c3d6f 638{
e3adcf8f 639 struct drm_device *dev;
b3705d71 640
e3adcf8f 641 dev = device_get_softc(kdev);
f0b54121
FT
642
643 driver.num_ioctls = i915_max_ioctl;
644
e3adcf8f 645 if (i915_modeset == 1)
f0b54121
FT
646 driver.driver_features |= DRIVER_MODESET;
647
648 dev->driver = &driver;
62e5f8e8 649 return (drm_attach(kdev, i915_attach_list));
b3705d71
HT
650}
651
e3adcf8f
FT
652const struct intel_device_info *
653i915_get_device_id(int device)
b3705d71 654{
8e26cdf6 655 const struct pci_device_id *did;
b3705d71 656
e3adcf8f
FT
657 for (did = &pciidlist[0]; did->device != 0; did++) {
658 if (did->device != device)
659 continue;
8e26cdf6 660 return (struct intel_device_info *)did->driver_data;
e3adcf8f
FT
661 }
662 return (NULL);
7f3c3d6f
HT
663}
664
7f3c3d6f 665extern devclass_t drm_devclass;
e3adcf8f
FT
666
667int intel_iommu_enabled = 0;
668TUNABLE_INT("drm.i915.intel_iommu_enabled", &intel_iommu_enabled);
669
670int i915_semaphores = -1;
671TUNABLE_INT("drm.i915.semaphores", &i915_semaphores);
672static int i915_try_reset = 1;
673TUNABLE_INT("drm.i915.try_reset", &i915_try_reset);
674unsigned int i915_lvds_downclock = 0;
675TUNABLE_INT("drm.i915.lvds_downclock", &i915_lvds_downclock);
676int i915_vbt_sdvo_panel_type = -1;
677TUNABLE_INT("drm.i915.vbt_sdvo_panel_type", &i915_vbt_sdvo_panel_type);
678unsigned int i915_powersave = 1;
679TUNABLE_INT("drm.i915.powersave", &i915_powersave);
680int i915_enable_fbc = 0;
681TUNABLE_INT("drm.i915.enable_fbc", &i915_enable_fbc);
682int i915_enable_rc6 = 0;
683TUNABLE_INT("drm.i915.enable_rc6", &i915_enable_rc6);
684int i915_panel_use_ssc = -1;
685TUNABLE_INT("drm.i915.panel_use_ssc", &i915_panel_use_ssc);
686int i915_panel_ignore_lid = 0;
687TUNABLE_INT("drm.i915.panel_ignore_lid", &i915_panel_ignore_lid);
688int i915_modeset = 1;
689TUNABLE_INT("drm.i915.modeset", &i915_modeset);
690int i915_enable_ppgtt = -1;
691TUNABLE_INT("drm.i915.enable_ppgtt", &i915_enable_ppgtt);
e3adcf8f 692
00640ec9 693static int i8xx_do_reset(struct drm_device *dev)
e3adcf8f
FT
694{
695 struct drm_i915_private *dev_priv = dev->dev_private;
696
697 if (IS_I85X(dev))
698 return -ENODEV;
699
700 I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830);
701 POSTING_READ(D_STATE);
702
703 if (IS_I830(dev) || IS_845G(dev)) {
704 I915_WRITE(DEBUG_RESET_I830,
705 DEBUG_RESET_DISPLAY |
706 DEBUG_RESET_RENDER |
707 DEBUG_RESET_FULL);
708 POSTING_READ(DEBUG_RESET_I830);
00640ec9 709 msleep(1);
e3adcf8f
FT
710
711 I915_WRITE(DEBUG_RESET_I830, 0);
712 POSTING_READ(DEBUG_RESET_I830);
713 }
714
00640ec9 715 msleep(1);
e3adcf8f
FT
716
717 I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830);
718 POSTING_READ(D_STATE);
719
720 return 0;
721}
722
00640ec9 723static int i965_reset_complete(struct drm_device *dev)
e3adcf8f
FT
724{
725 u8 gdrst;
f0d07c12
FT
726 pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
727 return (gdrst & GRDOM_RESET_ENABLE) == 0;
e3adcf8f
FT
728}
729
00640ec9 730static int i965_do_reset(struct drm_device *dev)
e3adcf8f 731{
00640ec9 732 int ret;
e3adcf8f
FT
733 u8 gdrst;
734
735 /*
736 * Set the domains we want to reset (GRDOM/bits 2 and 3) as
737 * well as the reset bit (GR/bit 0). Setting the GR bit
738 * triggers the reset; when done, the hardware will clear it.
739 */
f0d07c12
FT
740 pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
741 pci_write_config_byte(dev->pdev, I965_GDRST,
00640ec9 742 gdrst | GRDOM_RENDER |
f0d07c12 743 GRDOM_RESET_ENABLE);
00640ec9
FT
744 ret = wait_for(i965_reset_complete(dev), 500);
745 if (ret)
746 return ret;
747
748 /* We can't reset render&media without also resetting display ... */
f0d07c12
FT
749 pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
750 pci_write_config_byte(dev->pdev, I965_GDRST,
00640ec9 751 gdrst | GRDOM_MEDIA |
f0d07c12 752 GRDOM_RESET_ENABLE);
e3adcf8f 753
00640ec9 754 return wait_for(i965_reset_complete(dev), 500);
e3adcf8f
FT
755}
756
00640ec9 757static int ironlake_do_reset(struct drm_device *dev)
e3adcf8f 758{
00640ec9 759 struct drm_i915_private *dev_priv = dev->dev_private;
e3adcf8f 760 u32 gdrst;
00640ec9 761 int ret;
e3adcf8f 762
e3adcf8f 763 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
8e26cdf6 764 gdrst &= ~GRDOM_MASK;
00640ec9
FT
765 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
766 gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE);
767 ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
768 if (ret)
769 return ret;
770
771 /* We can't reset render&media without also resetting display ... */
772 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
8e26cdf6 773 gdrst &= ~GRDOM_MASK;
00640ec9
FT
774 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
775 gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE);
776 return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
e3adcf8f
FT
777}
778
e9243325 779static int gen6_do_reset(struct drm_device *dev)
e3adcf8f 780{
e9243325 781 struct drm_i915_private *dev_priv = dev->dev_private;
e3adcf8f
FT
782 int ret;
783
784 dev_priv = dev->dev_private;
785
786 /* Hold gt_lock across reset to prevent any register access
787 * with forcewake not set correctly
788 */
789 lockmgr(&dev_priv->gt_lock, LK_EXCLUSIVE);
790
791 /* Reset the chip */
792
793 /* GEN6_GDRST is not in the gt power well, no need to check
794 * for fifo space for the write or forcewake the chip for
795 * the read
796 */
797 I915_WRITE_NOTRACE(GEN6_GDRST, GEN6_GRDOM_FULL);
798
799 /* Spin waiting for the device to ack the reset request */
19df918d 800 ret = wait_for((I915_READ_NOTRACE(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
e3adcf8f
FT
801
802 /* If reset with a user forcewake, try to restore, otherwise turn it off */
e9243325
FT
803 if (dev_priv->forcewake_count)
804 dev_priv->gt.force_wake_get(dev_priv);
e3adcf8f 805 else
e9243325 806 dev_priv->gt.force_wake_put(dev_priv);
e3adcf8f
FT
807
808 /* Restore fifo count */
809 dev_priv->gt_fifo_count = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
810
811 lockmgr(&dev_priv->gt_lock, LK_RELEASE);
e9243325 812 return ret;
e3adcf8f
FT
813}
814
00640ec9
FT
815int intel_gpu_reset(struct drm_device *dev)
816{
817 struct drm_i915_private *dev_priv = dev->dev_private;
818 int ret = -ENODEV;
819
820 switch (INTEL_INFO(dev)->gen) {
821 case 7:
822 case 6:
823 ret = gen6_do_reset(dev);
824 break;
825 case 5:
826 ret = ironlake_do_reset(dev);
827 break;
828 case 4:
829 ret = i965_do_reset(dev);
830 break;
831 case 2:
832 ret = i8xx_do_reset(dev);
833 break;
834 }
835
836 /* Also reset the gpu hangman. */
a2fdbec6 837 if (dev_priv->gpu_error.stop_rings) {
8e26cdf6 838 DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
a2fdbec6 839 dev_priv->gpu_error.stop_rings = 0;
00640ec9
FT
840 if (ret == -ENODEV) {
841 DRM_ERROR("Reset not implemented, but ignoring "
842 "error for simulated gpu hangs\n");
843 ret = 0;
844 }
845 }
846
847 return ret;
848}
849
e9243325
FT
850/**
851 * i915_reset - reset chip after a hang
852 * @dev: drm device to reset
853 *
854 * Reset the chip. Useful if a hang is detected. Returns zero on successful
855 * reset or otherwise an error code.
856 *
857 * Procedure is fairly simple:
858 * - reset the chip using the reset reg
859 * - re-init context state
860 * - re-init hardware status page
861 * - re-init ring buffer
862 * - re-init interrupt state
863 * - re-init display
864 */
00640ec9 865int i915_reset(struct drm_device *dev)
e3adcf8f
FT
866{
867 drm_i915_private_t *dev_priv = dev->dev_private;
e9243325
FT
868 int ret;
869
870 if (!i915_try_reset)
871 return 0;
872
a2fdbec6 873 mutex_lock(&dev->struct_mutex);
e3adcf8f
FT
874
875 i915_gem_reset(dev);
876
877 ret = -ENODEV;
a2fdbec6 878 if (time_uptime - dev_priv->gpu_error.last_reset < 5)
e3adcf8f 879 DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
00640ec9
FT
880 else
881 ret = intel_gpu_reset(dev);
882
a2fdbec6 883 dev_priv->gpu_error.last_reset = time_uptime;
e3adcf8f
FT
884 if (ret) {
885 DRM_ERROR("Failed to reset chip.\n");
a2fdbec6 886 mutex_unlock(&dev->struct_mutex);
00640ec9 887 return ret;
e3adcf8f
FT
888 }
889
e9243325
FT
890 /* Ok, now get things going again... */
891
892 /*
893 * Everything depends on having the GTT running, so we need to start
894 * there. Fortunately we don't need to do this unless we reset the
895 * chip at a PCI level.
896 *
897 * Next we need to restore the context, but we don't use those
898 * yet either...
899 *
900 * Ring buffer needs to be re-initialized in the KMS case, or if X
901 * was running at the time of the reset (i.e. we weren't VT
902 * switched away).
903 */
e3adcf8f 904 if (drm_core_check_feature(dev, DRIVER_MODESET) ||
00640ec9
FT
905 !dev_priv->mm.suspended) {
906 struct intel_ring_buffer *ring;
907 int i;
908
e3adcf8f
FT
909 dev_priv->mm.suspended = 0;
910
911 i915_gem_init_swizzling(dev);
912
00640ec9
FT
913 for_each_ring(ring, dev_priv, i)
914 ring->init(ring);
e3adcf8f 915
00640ec9 916 i915_gem_context_init(dev);
8e26cdf6
FT
917 if (dev_priv->mm.aliasing_ppgtt) {
918 ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
919 if (ret)
920 i915_gem_cleanup_aliasing_ppgtt(dev);
921 }
e3adcf8f 922
00640ec9
FT
923 /*
924 * It would make sense to re-init all the other hw state, at
925 * least the rps/rc6/emon init done within modeset_init_hw. For
926 * some unknown reason, this blows up my ilk, so don't.
927 */
928
a2fdbec6 929 mutex_unlock(&dev->struct_mutex);
e3adcf8f 930
00640ec9
FT
931 drm_irq_uninstall(dev);
932 drm_irq_install(dev);
a2fdbec6 933 intel_hpd_init(dev);
00640ec9 934 } else {
a2fdbec6 935 mutex_unlock(&dev->struct_mutex);
e3adcf8f
FT
936 }
937
e9243325
FT
938 return 0;
939}
940
a2fdbec6
FT
941static int
942i915_pci_probe(device_t kdev)
943{
944 int device, i = 0;
945
946 if (pci_get_class(kdev) != PCIC_DISPLAY)
947 return ENXIO;
948
949 if (pci_get_vendor(kdev) != PCI_VENDOR_INTEL)
950 return ENXIO;
951
952 device = pci_get_device(kdev);
953
954 for (i = 0; pciidlist[i].device != 0; i++) {
955 if (pciidlist[i].device == device) {
956 i915_attach_list[0].device = device;
957 return 0;
958 }
959 }
960
961 return ENXIO;
962}
963
f0b54121 964static struct drm_driver driver = {
79f713b0
FT
965 .driver_features = DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
966 DRIVER_USE_MTRR | DRIVER_HAVE_IRQ | DRIVER_LOCKLESS_IRQ |
967 DRIVER_GEM /*| DRIVER_MODESET*/,
968
969 .buf_priv_size = sizeof(drm_i915_private_t),
f0b54121 970 .load = i915_driver_load,
2aa08105 971 .open = i915_driver_open,
79f713b0 972 .unload = i915_driver_unload,
2aa08105 973 .preclose = i915_driver_preclose,
79f713b0 974 .lastclose = i915_driver_lastclose,
f0b54121
FT
975 .postclose = i915_driver_postclose,
976 .device_is_agp = i915_driver_device_is_agp,
977 .gem_init_object = i915_gem_init_object,
978 .gem_free_object = i915_gem_free_object,
979 .gem_pager_ops = &i915_gem_pager_ops,
980 .dumb_create = i915_gem_dumb_create,
981 .dumb_map_offset = i915_gem_mmap_gtt,
982 .dumb_destroy = i915_gem_dumb_destroy,
79f713b0 983
f0b54121
FT
984 .ioctls = i915_ioctls,
985
986 .name = DRIVER_NAME,
987 .desc = DRIVER_DESC,
988 .date = DRIVER_DATE,
989 .major = DRIVER_MAJOR,
990 .minor = DRIVER_MINOR,
991 .patchlevel = DRIVER_PATCHLEVEL,
992};
993
a2fdbec6
FT
994static device_method_t i915_methods[] = {
995 /* Device interface */
996 DEVMETHOD(device_probe, i915_pci_probe),
997 DEVMETHOD(device_attach, i915_attach),
998 DEVMETHOD(device_suspend, i915_suspend),
999 DEVMETHOD(device_resume, i915_resume),
d0cc45b6 1000 DEVMETHOD(device_detach, drm_release),
a2fdbec6
FT
1001 DEVMETHOD_END
1002};
1003
1004static driver_t i915_driver = {
1005 "drm",
1006 i915_methods,
1007 sizeof(struct drm_device)
1008};
1009
1010DRIVER_MODULE_ORDERED(i915kms, vgapci, i915_driver, drm_devclass, 0, 0,
1011 SI_ORDER_ANY);
1012MODULE_DEPEND(i915kms, drm, 1, 1, 1);
1013MODULE_DEPEND(i915kms, agp, 1, 1, 1);
1014MODULE_DEPEND(i915kms, iicbus, 1, 1, 1);
1015MODULE_DEPEND(i915kms, iic, 1, 1, 1);
1016MODULE_DEPEND(i915kms, iicbb, 1, 1, 1);
1017
c0bdd5d9
FT
1018/* We give fast paths for the really cool registers */
1019#define NEEDS_FORCE_WAKE(dev_priv, reg) \
1020 ((HAS_FORCE_WAKE((dev_priv)->dev)) && \
1021 ((reg) < 0x40000) && \
1022 ((reg) != FORCEWAKE))
e9243325
FT
1023static void
1024ilk_dummy_write(struct drm_i915_private *dev_priv)
1025{
1026 /* WaIssueDummyWriteToWakeupFromRC6: Issue a dummy write to wake up the
1027 * chip from rc6 before touching it for real. MI_MODE is masked, hence
1028 * harmless to write 0 into. */
1029 I915_WRITE_NOTRACE(MI_MODE, 0);
e3adcf8f
FT
1030}
1031
8e26cdf6
FT
1032static void
1033hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
1034{
1035 if (IS_HASWELL(dev_priv->dev) &&
1036 (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
1037 DRM_ERROR("Unknown unclaimed register before writing to %x\n",
1038 reg);
1039 I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
1040 }
1041}
1042
1043static void
1044hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
1045{
1046 if (IS_HASWELL(dev_priv->dev) &&
1047 (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
1048 DRM_ERROR("Unclaimed write to %x\n", reg);
1049 I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
1050 }
1051}
1052
e3adcf8f
FT
1053#define __i915_read(x, y) \
1054u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
1055 u##x val = 0; \
e9243325
FT
1056 if (IS_GEN5(dev_priv->dev)) \
1057 ilk_dummy_write(dev_priv); \
e3adcf8f
FT
1058 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
1059 lockmgr(&dev_priv->gt_lock, LK_EXCLUSIVE); \
1060 if (dev_priv->forcewake_count == 0) \
e9243325 1061 dev_priv->gt.force_wake_get(dev_priv); \
e3adcf8f
FT
1062 val = DRM_READ##y(dev_priv->mmio_map, reg); \
1063 if (dev_priv->forcewake_count == 0) \
e9243325 1064 dev_priv->gt.force_wake_put(dev_priv); \
e3adcf8f
FT
1065 lockmgr(&dev_priv->gt_lock, LK_RELEASE); \
1066 } else { \
1067 val = DRM_READ##y(dev_priv->mmio_map, reg); \
1068 } \
1069 trace_i915_reg_rw(false, reg, val, sizeof(val)); \
1070 return val; \
1071}
1072
1073__i915_read(8, 8)
1074__i915_read(16, 16)
1075__i915_read(32, 32)
1076__i915_read(64, 64)
1077#undef __i915_read
1078
1079#define __i915_write(x, y) \
1080void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
1081 u32 __fifo_ret = 0; \
1082 trace_i915_reg_rw(true, reg, val, sizeof(val)); \
1083 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
1084 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
1085 } \
e9243325
FT
1086 if (IS_GEN5(dev_priv->dev)) \
1087 ilk_dummy_write(dev_priv); \
8e26cdf6 1088 hsw_unclaimed_reg_clear(dev_priv, reg); \
a2fdbec6 1089 DRM_WRITE##y(dev_priv->mmio_map, reg, val); \
e9243325 1090 if (unlikely(__fifo_ret)) { \
e3adcf8f
FT
1091 gen6_gt_check_fifodbg(dev_priv); \
1092 } \
8e26cdf6 1093 hsw_unclaimed_reg_check(dev_priv, reg); \
e3adcf8f 1094}
e9243325 1095
e3adcf8f
FT
1096__i915_write(8, 8)
1097__i915_write(16, 16)
1098__i915_write(32, 32)
1099__i915_write(64, 64)
1100#undef __i915_write
17878c63
FT
1101
1102static const struct register_whitelist {
1103 uint64_t offset;
1104 uint32_t size;
1105 uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
1106} whitelist[] = {
1107 { RING_TIMESTAMP(RENDER_RING_BASE), 8, 0xF0 },
1108};
1109
1110int i915_reg_read_ioctl(struct drm_device *dev,
1111 void *data, struct drm_file *file)
1112{
1113 struct drm_i915_private *dev_priv = dev->dev_private;
1114 struct drm_i915_reg_read *reg = data;
1115 struct register_whitelist const *entry = whitelist;
1116 int i;
1117
1118 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
1119 if (entry->offset == reg->offset &&
1120 (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
1121 break;
1122 }
1123
1124 if (i == ARRAY_SIZE(whitelist))
1125 return -EINVAL;
1126
1127 switch (entry->size) {
1128 case 8:
1129 reg->val = I915_READ64(reg->offset);
1130 break;
1131 case 4:
1132 reg->val = I915_READ(reg->offset);
1133 break;
1134 case 2:
1135 reg->val = I915_READ16(reg->offset);
1136 break;
1137 case 1:
1138 reg->val = I915_READ8(reg->offset);
1139 break;
1140 default:
1141 WARN_ON(1);
1142 return -EINVAL;
1143 }
1144
1145 return 0;
1146}