b8a309a94fd54aecbd5b335812ad8b6f8b08f757
[dragonfly.git] / sys / dev / drm / i915 / i915_drv.c
1 /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
2  */
3 /*
4  *
5  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6  * All Rights Reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the
10  * "Software"), to deal in the Software without restriction, including
11  * without limitation the rights to use, copy, modify, merge, publish,
12  * distribute, sub license, and/or sell copies of the Software, and to
13  * permit persons to whom the Software is furnished to do so, subject to
14  * the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the
17  * next paragraph) shall be included in all copies or substantial portions
18  * of the Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27  *
28  */
29
30 #include <drm/drmP.h>
31 #include <drm/i915_drm.h>
32 #include "i915_drv.h"
33 #include <drm/drm_pciids.h>
34 #include "intel_drv.h"
35
36 /*               "Specify LVDS channel mode "
37                  "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)" */
38 int i915_lvds_channel_mode __read_mostly = 0;
39 TUNABLE_INT("drm.i915.lvds_channel_mode", &i915_lvds_channel_mode);
40
41 int i915_disable_power_well __read_mostly = 0;
42 module_param_named(disable_power_well, i915_disable_power_well, int, 0600);
43 MODULE_PARM_DESC(disable_power_well,
44                  "Disable the power well when possible (default: false)");
45
46 bool i915_enable_hangcheck __read_mostly = true;
47 module_param_named(enable_hangcheck, i915_enable_hangcheck, bool, 0644);
48 MODULE_PARM_DESC(enable_hangcheck,
49                 "Periodically check GPU activity for detecting hangs. "
50                 "WARNING: Disabling this can cause system wide hangs. "
51                 "(default: true)");
52
53 static struct drm_driver driver;
54
55 #define INTEL_VGA_DEVICE(id, info_) {           \
56         .device = id,                           \
57         .info = info_,                          \
58 }
59
60 static const struct intel_device_info intel_i830_info = {
61         .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1,
62         .has_overlay = 1, .overlay_needs_physical = 1,
63 };
64
65 static const struct intel_device_info intel_845g_info = {
66         .gen = 2,
67         .has_overlay = 1, .overlay_needs_physical = 1,
68 };
69
70 static const struct intel_device_info intel_i85x_info = {
71         .gen = 2, .is_i85x = 1, .is_mobile = 1,
72         .cursor_needs_physical = 1,
73         .has_overlay = 1, .overlay_needs_physical = 1,
74 };
75
76 static const struct intel_device_info intel_i865g_info = {
77         .gen = 2,
78         .has_overlay = 1, .overlay_needs_physical = 1,
79 };
80
81 static const struct intel_device_info intel_i915g_info = {
82         .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1,
83         .has_overlay = 1, .overlay_needs_physical = 1,
84 };
85 static const struct intel_device_info intel_i915gm_info = {
86         .gen = 3, .is_mobile = 1,
87         .cursor_needs_physical = 1,
88         .has_overlay = 1, .overlay_needs_physical = 1,
89         .supports_tv = 1,
90 };
91 static const struct intel_device_info intel_i945g_info = {
92         .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1,
93         .has_overlay = 1, .overlay_needs_physical = 1,
94 };
95 static const struct intel_device_info intel_i945gm_info = {
96         .gen = 3, .is_i945gm = 1, .is_mobile = 1,
97         .has_hotplug = 1, .cursor_needs_physical = 1,
98         .has_overlay = 1, .overlay_needs_physical = 1,
99         .supports_tv = 1,
100 };
101
102 static const struct intel_device_info intel_i965g_info = {
103         .gen = 4, .is_broadwater = 1,
104         .has_hotplug = 1,
105         .has_overlay = 1,
106 };
107
108 static const struct intel_device_info intel_i965gm_info = {
109         .gen = 4, .is_crestline = 1,
110         .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
111         .has_overlay = 1,
112         .supports_tv = 1,
113 };
114
115 static const struct intel_device_info intel_g33_info = {
116         .gen = 3, .is_g33 = 1,
117         .need_gfx_hws = 1, .has_hotplug = 1,
118         .has_overlay = 1,
119 };
120
121 static const struct intel_device_info intel_g45_info = {
122         .gen = 4, .is_g4x = 1, .need_gfx_hws = 1,
123         .has_pipe_cxsr = 1, .has_hotplug = 1,
124         .has_bsd_ring = 1,
125 };
126
127 static const struct intel_device_info intel_gm45_info = {
128         .gen = 4, .is_g4x = 1,
129         .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
130         .has_pipe_cxsr = 1, .has_hotplug = 1,
131         .supports_tv = 1,
132         .has_bsd_ring = 1,
133 };
134
135 static const struct intel_device_info intel_pineview_info = {
136         .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1,
137         .need_gfx_hws = 1, .has_hotplug = 1,
138         .has_overlay = 1,
139 };
140
141 static const struct intel_device_info intel_ironlake_d_info = {
142         .gen = 5,
143         .need_gfx_hws = 1, .has_hotplug = 1,
144         .has_bsd_ring = 1,
145 };
146
147 static const struct intel_device_info intel_ironlake_m_info = {
148         .gen = 5, .is_mobile = 1,
149         .need_gfx_hws = 1, .has_hotplug = 1,
150         .has_fbc = 1,
151         .has_bsd_ring = 1,
152 };
153
154 static const struct intel_device_info intel_sandybridge_d_info = {
155         .gen = 6,
156         .need_gfx_hws = 1, .has_hotplug = 1,
157         .has_bsd_ring = 1,
158         .has_blt_ring = 1,
159         .has_llc = 1,
160         .has_force_wake = 1,
161 };
162
163 static const struct intel_device_info intel_sandybridge_m_info = {
164         .gen = 6, .is_mobile = 1,
165         .need_gfx_hws = 1, .has_hotplug = 1,
166         .has_fbc = 1,
167         .has_bsd_ring = 1,
168         .has_blt_ring = 1,
169         .has_llc = 1,
170         .has_force_wake = 1,
171 };
172
173 static const struct intel_device_info intel_ivybridge_d_info = {
174         .is_ivybridge = 1, .gen = 7,
175         .need_gfx_hws = 1, .has_hotplug = 1,
176         .has_bsd_ring = 1,
177         .has_blt_ring = 1,
178         .has_llc = 1,
179         .has_force_wake = 1,
180 };
181
182 static const struct intel_device_info intel_ivybridge_m_info = {
183         .is_ivybridge = 1, .gen = 7, .is_mobile = 1,
184         .need_gfx_hws = 1, .has_hotplug = 1,
185         .has_fbc = 0,   /* FBC is not enabled on Ivybridge mobile yet */
186         .has_bsd_ring = 1,
187         .has_blt_ring = 1,
188         .has_llc = 1,
189         .has_force_wake = 1,
190 };
191
192 static const struct intel_device_info intel_valleyview_m_info = {
193         .gen = 7, .is_mobile = 1,
194         .need_gfx_hws = 1, .has_hotplug = 1,
195         .has_fbc = 0,
196         .has_bsd_ring = 1,
197         .has_blt_ring = 1,
198         .is_valleyview = 1,
199         .display_mmio_offset = VLV_DISPLAY_BASE,
200 };
201
202 static const struct intel_device_info intel_valleyview_d_info = {
203         .gen = 7,
204         .need_gfx_hws = 1, .has_hotplug = 1,
205         .has_fbc = 0,
206         .has_bsd_ring = 1,
207         .has_blt_ring = 1,
208         .is_valleyview = 1,
209         .display_mmio_offset = VLV_DISPLAY_BASE,
210 };
211
212 static const struct intel_device_info intel_haswell_d_info = {
213         .is_haswell = 1, .gen = 7,
214         .need_gfx_hws = 1, .has_hotplug = 1,
215         .has_bsd_ring = 1,
216         .has_blt_ring = 1,
217         .has_llc = 1,
218         .has_force_wake = 1,
219 };
220
221 static const struct intel_device_info intel_haswell_m_info = {
222         .is_haswell = 1, .gen = 7, .is_mobile = 1,
223         .need_gfx_hws = 1, .has_hotplug = 1,
224         .has_bsd_ring = 1,
225         .has_blt_ring = 1,
226         .has_llc = 1,
227         .has_force_wake = 1,
228 };
229
230 static const struct intel_gfx_device_id {
231         int device;
232         const struct intel_device_info *info;
233 } pciidlist[] = {               /* aka */
234         INTEL_VGA_DEVICE(0x3577, &intel_i830_info),             /* I830_M */
235         INTEL_VGA_DEVICE(0x2562, &intel_845g_info),             /* 845_G */
236         INTEL_VGA_DEVICE(0x3582, &intel_i85x_info),             /* I855_GM */
237         INTEL_VGA_DEVICE(0x358e, &intel_i85x_info),
238         INTEL_VGA_DEVICE(0x2572, &intel_i865g_info),            /* I865_G */
239         INTEL_VGA_DEVICE(0x2582, &intel_i915g_info),            /* I915_G */
240         INTEL_VGA_DEVICE(0x258a, &intel_i915g_info),            /* E7221_G */
241         INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info),           /* I915_GM */
242         INTEL_VGA_DEVICE(0x2772, &intel_i945g_info),            /* I945_G */
243         INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info),           /* I945_GM */
244         INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info),           /* I945_GME */
245         INTEL_VGA_DEVICE(0x2972, &intel_i965g_info),            /* I946_GZ */
246         INTEL_VGA_DEVICE(0x2982, &intel_i965g_info),            /* G35_G */
247         INTEL_VGA_DEVICE(0x2992, &intel_i965g_info),            /* I965_Q */
248         INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info),            /* I965_G */
249         INTEL_VGA_DEVICE(0x29b2, &intel_g33_info),              /* Q35_G */
250         INTEL_VGA_DEVICE(0x29c2, &intel_g33_info),              /* G33_G */
251         INTEL_VGA_DEVICE(0x29d2, &intel_g33_info),              /* Q33_G */
252         INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info),           /* I965_GM */
253         INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info),           /* I965_GME */
254         INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info),             /* GM45_G */
255         INTEL_VGA_DEVICE(0x2e02, &intel_g45_info),              /* IGD_E_G */
256         INTEL_VGA_DEVICE(0x2e12, &intel_g45_info),              /* Q45_G */
257         INTEL_VGA_DEVICE(0x2e22, &intel_g45_info),              /* G45_G */
258         INTEL_VGA_DEVICE(0x2e32, &intel_g45_info),              /* G41_G */
259         INTEL_VGA_DEVICE(0x2e42, &intel_g45_info),              /* B43_G */
260         INTEL_VGA_DEVICE(0x2e92, &intel_g45_info),              /* B43_G.1 */
261         INTEL_VGA_DEVICE(0xa001, &intel_pineview_info),
262         INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
263         INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
264         INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
265         INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info),
266         INTEL_VGA_DEVICE(0x0112, &intel_sandybridge_d_info),
267         INTEL_VGA_DEVICE(0x0122, &intel_sandybridge_d_info),
268         INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info),
269         INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info),
270         INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info),
271         INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info),
272         INTEL_VGA_DEVICE(0x0156, &intel_ivybridge_m_info), /* GT1 mobile */
273         INTEL_VGA_DEVICE(0x0166, &intel_ivybridge_m_info), /* GT2 mobile */
274         INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */
275         INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */
276         INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */
277         INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */
278         INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */
279         INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */
280         INTEL_VGA_DEVICE(0x0422, &intel_haswell_d_info), /* GT3 desktop */
281         INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */
282         INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */
283         INTEL_VGA_DEVICE(0x042a, &intel_haswell_d_info), /* GT3 server */
284         INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */
285         INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */
286         INTEL_VGA_DEVICE(0x0426, &intel_haswell_m_info), /* GT2 mobile */
287         INTEL_VGA_DEVICE(0x040B, &intel_haswell_d_info), /* GT1 reserved */
288         INTEL_VGA_DEVICE(0x041B, &intel_haswell_d_info), /* GT2 reserved */
289         INTEL_VGA_DEVICE(0x042B, &intel_haswell_d_info), /* GT3 reserved */
290         INTEL_VGA_DEVICE(0x040E, &intel_haswell_d_info), /* GT1 reserved */
291         INTEL_VGA_DEVICE(0x041E, &intel_haswell_d_info), /* GT2 reserved */
292         INTEL_VGA_DEVICE(0x042E, &intel_haswell_d_info), /* GT3 reserved */
293         INTEL_VGA_DEVICE(0x0C02, &intel_haswell_d_info), /* SDV GT1 desktop */
294         INTEL_VGA_DEVICE(0x0C12, &intel_haswell_d_info), /* SDV GT2 desktop */
295         INTEL_VGA_DEVICE(0x0C22, &intel_haswell_d_info), /* SDV GT3 desktop */
296         INTEL_VGA_DEVICE(0x0C0A, &intel_haswell_d_info), /* SDV GT1 server */
297         INTEL_VGA_DEVICE(0x0C1A, &intel_haswell_d_info), /* SDV GT2 server */
298         INTEL_VGA_DEVICE(0x0C2A, &intel_haswell_d_info), /* SDV GT3 server */
299         INTEL_VGA_DEVICE(0x0C06, &intel_haswell_m_info), /* SDV GT1 mobile */
300         INTEL_VGA_DEVICE(0x0C16, &intel_haswell_m_info), /* SDV GT2 mobile */
301         INTEL_VGA_DEVICE(0x0C26, &intel_haswell_m_info), /* SDV GT3 mobile */
302         INTEL_VGA_DEVICE(0x0C0B, &intel_haswell_d_info), /* SDV GT1 reserved */
303         INTEL_VGA_DEVICE(0x0C1B, &intel_haswell_d_info), /* SDV GT2 reserved */
304         INTEL_VGA_DEVICE(0x0C2B, &intel_haswell_d_info), /* SDV GT3 reserved */
305         INTEL_VGA_DEVICE(0x0C0E, &intel_haswell_d_info), /* SDV GT1 reserved */
306         INTEL_VGA_DEVICE(0x0C1E, &intel_haswell_d_info), /* SDV GT2 reserved */
307         INTEL_VGA_DEVICE(0x0C2E, &intel_haswell_d_info), /* SDV GT3 reserved */
308         INTEL_VGA_DEVICE(0x0A02, &intel_haswell_d_info), /* ULT GT1 desktop */
309         INTEL_VGA_DEVICE(0x0A12, &intel_haswell_d_info), /* ULT GT2 desktop */
310         INTEL_VGA_DEVICE(0x0A22, &intel_haswell_d_info), /* ULT GT3 desktop */
311         INTEL_VGA_DEVICE(0x0A0A, &intel_haswell_d_info), /* ULT GT1 server */
312         INTEL_VGA_DEVICE(0x0A1A, &intel_haswell_d_info), /* ULT GT2 server */
313         INTEL_VGA_DEVICE(0x0A2A, &intel_haswell_d_info), /* ULT GT3 server */
314         INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */
315         INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */
316         INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT3 mobile */
317         INTEL_VGA_DEVICE(0x0A0B, &intel_haswell_d_info), /* ULT GT1 reserved */
318         INTEL_VGA_DEVICE(0x0A1B, &intel_haswell_d_info), /* ULT GT2 reserved */
319         INTEL_VGA_DEVICE(0x0A2B, &intel_haswell_d_info), /* ULT GT3 reserved */
320         INTEL_VGA_DEVICE(0x0A0E, &intel_haswell_m_info), /* ULT GT1 reserved */
321         INTEL_VGA_DEVICE(0x0A1E, &intel_haswell_m_info), /* ULT GT2 reserved */
322         INTEL_VGA_DEVICE(0x0A2E, &intel_haswell_m_info), /* ULT GT3 reserved */
323         INTEL_VGA_DEVICE(0x0D02, &intel_haswell_d_info), /* CRW GT1 desktop */
324         INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT2 desktop */
325         INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT3 desktop */
326         INTEL_VGA_DEVICE(0x0D0A, &intel_haswell_d_info), /* CRW GT1 server */
327         INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT2 server */
328         INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT3 server */
329         INTEL_VGA_DEVICE(0x0D06, &intel_haswell_m_info), /* CRW GT1 mobile */
330         INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT2 mobile */
331         INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT3 mobile */
332         INTEL_VGA_DEVICE(0x0D0B, &intel_haswell_d_info), /* CRW GT1 reserved */
333         INTEL_VGA_DEVICE(0x0D1B, &intel_haswell_d_info), /* CRW GT2 reserved */
334         INTEL_VGA_DEVICE(0x0D2B, &intel_haswell_d_info), /* CRW GT3 reserved */
335         INTEL_VGA_DEVICE(0x0D0E, &intel_haswell_d_info), /* CRW GT1 reserved */
336         INTEL_VGA_DEVICE(0x0D1E, &intel_haswell_d_info), /* CRW GT2 reserved */
337         INTEL_VGA_DEVICE(0x0D2E, &intel_haswell_d_info), /* CRW GT3 reserved */
338         INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info),
339         INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info),
340         INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info),
341         {0, 0}
342 };
343
344 #define PCI_VENDOR_INTEL        0x8086
345
346 void intel_detect_pch(struct drm_device *dev)
347 {
348         struct drm_i915_private *dev_priv = dev->dev_private;
349         device_t pch;
350
351         /*
352          * The reason to probe ISA bridge instead of Dev31:Fun0 is to
353          * make graphics device passthrough work easy for VMM, that only
354          * need to expose ISA bridge to let driver know the real hardware
355          * underneath. This is a requirement from virtualization team.
356          */
357         pch = pci_find_class(PCIC_BRIDGE, PCIS_BRIDGE_ISA);
358         if (pch) {
359                 if (pci_get_vendor(pch) == PCI_VENDOR_INTEL) {
360                         unsigned short id;
361                         id = pci_get_device(pch) & INTEL_PCH_DEVICE_ID_MASK;
362                         dev_priv->pch_id = id;
363
364                         if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
365                                 dev_priv->pch_type = PCH_IBX;
366                                 dev_priv->num_pch_pll = 2;
367                                 DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
368                                 WARN_ON(!IS_GEN5(dev));
369                         } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
370                                 dev_priv->pch_type = PCH_CPT;
371                                 dev_priv->num_pch_pll = 2;
372                                 DRM_DEBUG_KMS("Found CougarPoint PCH\n");
373                                 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
374                         } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
375                                 /* PantherPoint is CPT compatible */
376                                 dev_priv->pch_type = PCH_CPT;
377                                 dev_priv->num_pch_pll = 2;
378                                 DRM_DEBUG_KMS("Found PatherPoint PCH\n");
379                                 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
380                         } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
381                                 dev_priv->pch_type = PCH_LPT;
382                                 dev_priv->num_pch_pll = 0;
383                                 DRM_DEBUG_KMS("Found LynxPoint PCH\n");
384                                 WARN_ON(!IS_HASWELL(dev));
385                         } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
386                                 dev_priv->pch_type = PCH_LPT;
387                                 dev_priv->num_pch_pll = 0;
388                                 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
389                                 WARN_ON(!IS_HASWELL(dev));
390                         }
391                         BUG_ON(dev_priv->num_pch_pll > I915_NUM_PLLS);
392                 }
393 #if 0
394                 pci_dev_put(pch);
395 #endif
396         }
397 }
398
399 bool i915_semaphore_is_enabled(struct drm_device *dev)
400 {
401         if (INTEL_INFO(dev)->gen < 6)
402                 return 0;
403
404         if (i915_semaphores >= 0)
405                 return i915_semaphores;
406
407 #ifdef CONFIG_INTEL_IOMMU
408         /* Enable semaphores on SNB when IO remapping is off */
409         if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
410                 return false;
411 #endif
412
413         return 1;
414 }
415
416 static int i915_drm_freeze(struct drm_device *dev)
417 {
418         struct drm_i915_private *dev_priv = dev->dev_private;
419
420         /* ignore lid events during suspend */
421         mutex_lock(&dev_priv->modeset_restore_lock);
422         dev_priv->modeset_restore = MODESET_SUSPENDED;
423         mutex_unlock(&dev_priv->modeset_restore_lock);
424
425         intel_set_power_well(dev, true);
426
427         drm_kms_helper_poll_disable(dev);
428
429 #if 0
430         pci_save_state(dev->pdev);
431 #endif
432
433         /* If KMS is active, we do the leavevt stuff here */
434         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
435                 int error = i915_gem_idle(dev);
436                 if (error) {
437                         dev_err(dev->pdev->dev,
438                                 "GEM idle failed, resume might fail\n");
439                         return error;
440                 }
441
442                 cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
443
444 #if 0
445                 intel_modeset_disable(dev);
446 #endif
447
448                 drm_irq_uninstall(dev);
449                 dev_priv->enable_hotplug_processing = false;
450         }
451
452         i915_save_state(dev);
453
454         intel_opregion_fini(dev);
455
456         return 0;
457 }
458
459 static int
460 i915_suspend(device_t kdev)
461 {
462         struct drm_device *dev;
463         int error;
464
465         dev = device_get_softc(kdev);
466         if (dev == NULL || dev->dev_private == NULL) {
467                 DRM_ERROR("DRM not initialized, aborting suspend.\n");
468                 return -ENODEV;
469         }
470
471         DRM_DEBUG_KMS("starting suspend\n");
472         error = i915_drm_freeze(dev);
473         if (error)
474                 return (error);
475
476         error = bus_generic_suspend(kdev);
477         DRM_DEBUG_KMS("finished suspend %d\n", error);
478         return (error);
479 }
480
481 static int __i915_drm_thaw(struct drm_device *dev)
482 {
483         struct drm_i915_private *dev_priv = dev->dev_private;
484         int error = 0;
485
486         i915_restore_state(dev);
487         intel_opregion_setup(dev);
488
489         /* KMS EnterVT equivalent */
490         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
491                 intel_init_pch_refclk(dev);
492
493                 mutex_lock(&dev->struct_mutex);
494                 dev_priv->mm.suspended = 0;
495
496                 error = i915_gem_init_hw(dev);
497                 mutex_unlock(&dev->struct_mutex);
498
499                 /* We need working interrupts for modeset enabling ... */
500                 drm_irq_install(dev);
501
502                 intel_modeset_init_hw(dev);
503                 intel_modeset_setup_hw_state(dev, false);
504
505                 /*
506                  * ... but also need to make sure that hotplug processing
507                  * doesn't cause havoc. Like in the driver load code we don't
508                  * bother with the tiny race here where we might loose hotplug
509                  * notifications.
510                  * */
511                 intel_hpd_init(dev);
512                 dev_priv->enable_hotplug_processing = true;
513         }
514
515         intel_opregion_init(dev);
516
517         /*
518          * The console lock can be pretty contented on resume due
519          * to all the printk activity.  Try to keep it out of the hot
520          * path of resume if possible.
521          */
522 #if 0
523         if (console_trylock()) {
524                 intel_fbdev_set_suspend(dev, 0);
525                 console_unlock();
526         } else {
527                 schedule_work(&dev_priv->console_resume_work);
528         }
529 #endif
530
531         mutex_lock(&dev_priv->modeset_restore_lock);
532         dev_priv->modeset_restore = MODESET_DONE;
533         mutex_unlock(&dev_priv->modeset_restore_lock);
534         return error;
535 }
536
537 static int i915_drm_thaw(struct drm_device *dev)
538 {
539         int error = 0;
540
541         intel_gt_reset(dev);
542
543         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
544                 mutex_lock(&dev->struct_mutex);
545                 i915_gem_restore_gtt_mappings(dev);
546                 mutex_unlock(&dev->struct_mutex);
547         }
548
549         __i915_drm_thaw(dev);
550
551         return error;
552 }
553
554 static int
555 i915_resume(device_t kdev)
556 {
557         struct drm_device *dev;
558         int ret;
559
560         dev = device_get_softc(kdev);
561         DRM_DEBUG_KMS("starting resume\n");
562 #if 0
563         if (pci_enable_device(dev->pdev))
564                 return -EIO;
565
566         pci_set_master(dev->pdev);
567 #endif
568
569         ret = -i915_drm_thaw(dev);
570         if (ret != 0)
571                 return (ret);
572
573         drm_kms_helper_poll_enable(dev);
574         ret = bus_generic_resume(kdev);
575         DRM_DEBUG_KMS("finished resume %d\n", ret);
576         return (ret);
577 }
578
579 /* XXX Hack for the old *BSD drm code base
580  * The device id field is set at probe time */
581 static drm_pci_id_list_t i915_attach_list[] = {
582         {0x8086, 0, 0, "Intel i915 GPU"},
583         {0, 0, 0, NULL}
584 };
585
586 int i915_modeset;
587
588 /* static int __init i915_init(void) */
589 static int
590 i915_attach(device_t kdev)
591 {
592         struct drm_device *dev;
593
594         dev = device_get_softc(kdev);
595
596         driver.num_ioctls = i915_max_ioctl;
597
598         if (i915_modeset == 1)
599                 driver.driver_features |= DRIVER_MODESET;
600
601         dev->driver = &driver;
602         return (drm_attach(kdev, i915_attach_list));
603 }
604
605 const struct intel_device_info *
606 i915_get_device_id(int device)
607 {
608         const struct intel_gfx_device_id *did;
609
610         for (did = &pciidlist[0]; did->device != 0; did++) {
611                 if (did->device != device)
612                         continue;
613                 return (did->info);
614         }
615         return (NULL);
616 }
617
618 extern devclass_t drm_devclass;
619
620 int intel_iommu_enabled = 0;
621 TUNABLE_INT("drm.i915.intel_iommu_enabled", &intel_iommu_enabled);
622
623 int i915_semaphores = -1;
624 TUNABLE_INT("drm.i915.semaphores", &i915_semaphores);
625 static int i915_try_reset = 1;
626 TUNABLE_INT("drm.i915.try_reset", &i915_try_reset);
627 unsigned int i915_lvds_downclock = 0;
628 TUNABLE_INT("drm.i915.lvds_downclock", &i915_lvds_downclock);
629 int i915_vbt_sdvo_panel_type = -1;
630 TUNABLE_INT("drm.i915.vbt_sdvo_panel_type", &i915_vbt_sdvo_panel_type);
631 unsigned int i915_powersave = 1;
632 TUNABLE_INT("drm.i915.powersave", &i915_powersave);
633 int i915_enable_fbc = 0;
634 TUNABLE_INT("drm.i915.enable_fbc", &i915_enable_fbc);
635 int i915_enable_rc6 = 0;
636 TUNABLE_INT("drm.i915.enable_rc6", &i915_enable_rc6);
637 int i915_panel_use_ssc = -1;
638 TUNABLE_INT("drm.i915.panel_use_ssc", &i915_panel_use_ssc);
639 int i915_panel_ignore_lid = 0;
640 TUNABLE_INT("drm.i915.panel_ignore_lid", &i915_panel_ignore_lid);
641 int i915_modeset = 1;
642 TUNABLE_INT("drm.i915.modeset", &i915_modeset);
643 int i915_enable_ppgtt = -1;
644 TUNABLE_INT("drm.i915.enable_ppgtt", &i915_enable_ppgtt);
645
646 static int i8xx_do_reset(struct drm_device *dev)
647 {
648         struct drm_i915_private *dev_priv = dev->dev_private;
649
650         if (IS_I85X(dev))
651                 return -ENODEV;
652
653         I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830);
654         POSTING_READ(D_STATE);
655
656         if (IS_I830(dev) || IS_845G(dev)) {
657                 I915_WRITE(DEBUG_RESET_I830,
658                            DEBUG_RESET_DISPLAY |
659                            DEBUG_RESET_RENDER |
660                            DEBUG_RESET_FULL);
661                 POSTING_READ(DEBUG_RESET_I830);
662                 msleep(1);
663
664                 I915_WRITE(DEBUG_RESET_I830, 0);
665                 POSTING_READ(DEBUG_RESET_I830);
666         }
667
668         msleep(1);
669
670         I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830);
671         POSTING_READ(D_STATE);
672
673         return 0;
674 }
675
676 static int i965_reset_complete(struct drm_device *dev)
677 {
678         u8 gdrst;
679         pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
680         return (gdrst & GRDOM_RESET_ENABLE) == 0;
681 }
682
683 static int i965_do_reset(struct drm_device *dev)
684 {
685         int ret;
686         u8 gdrst;
687
688         /*
689          * Set the domains we want to reset (GRDOM/bits 2 and 3) as
690          * well as the reset bit (GR/bit 0).  Setting the GR bit
691          * triggers the reset; when done, the hardware will clear it.
692          */
693         pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
694         pci_write_config_byte(dev->pdev, I965_GDRST,
695                               gdrst | GRDOM_RENDER |
696                               GRDOM_RESET_ENABLE);
697         ret =  wait_for(i965_reset_complete(dev), 500);
698         if (ret)
699                 return ret;
700
701         /* We can't reset render&media without also resetting display ... */
702         pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
703         pci_write_config_byte(dev->pdev, I965_GDRST,
704                               gdrst | GRDOM_MEDIA |
705                               GRDOM_RESET_ENABLE);
706
707         return wait_for(i965_reset_complete(dev), 500);
708 }
709
710 static int ironlake_do_reset(struct drm_device *dev)
711 {
712         struct drm_i915_private *dev_priv = dev->dev_private;
713         u32 gdrst;
714         int ret;
715
716         gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
717         I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
718                    gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE);
719         ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
720         if (ret)
721                 return ret;
722
723         /* We can't reset render&media without also resetting display ... */
724         gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
725         I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
726                    gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE);
727         return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
728 }
729
730 static int gen6_do_reset(struct drm_device *dev)
731 {
732         struct drm_i915_private *dev_priv = dev->dev_private;
733         int ret;
734
735         dev_priv = dev->dev_private;
736
737         /* Hold gt_lock across reset to prevent any register access
738          * with forcewake not set correctly
739          */
740         lockmgr(&dev_priv->gt_lock, LK_EXCLUSIVE);
741
742         /* Reset the chip */
743
744         /* GEN6_GDRST is not in the gt power well, no need to check
745          * for fifo space for the write or forcewake the chip for
746          * the read
747          */
748         I915_WRITE_NOTRACE(GEN6_GDRST, GEN6_GRDOM_FULL);
749
750         /* Spin waiting for the device to ack the reset request */
751         ret = wait_for((I915_READ_NOTRACE(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
752
753         /* If reset with a user forcewake, try to restore, otherwise turn it off */
754         if (dev_priv->forcewake_count)
755                 dev_priv->gt.force_wake_get(dev_priv);
756         else
757                 dev_priv->gt.force_wake_put(dev_priv);
758
759         /* Restore fifo count */
760         dev_priv->gt_fifo_count = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
761
762         lockmgr(&dev_priv->gt_lock, LK_RELEASE);
763         return ret;
764 }
765
766 int intel_gpu_reset(struct drm_device *dev)
767 {
768         struct drm_i915_private *dev_priv = dev->dev_private;
769         int ret = -ENODEV;
770
771         switch (INTEL_INFO(dev)->gen) {
772         case 7:
773         case 6:
774                 ret = gen6_do_reset(dev);
775                 break;
776         case 5:
777                 ret = ironlake_do_reset(dev);
778                 break;
779         case 4:
780                 ret = i965_do_reset(dev);
781                 break;
782         case 2:
783                 ret = i8xx_do_reset(dev);
784                 break;
785         }
786
787         /* Also reset the gpu hangman. */
788         if (dev_priv->gpu_error.stop_rings) {
789                 DRM_DEBUG("Simulated gpu hang, resetting stop_rings\n");
790                 dev_priv->gpu_error.stop_rings = 0;
791                 if (ret == -ENODEV) {
792                         DRM_ERROR("Reset not implemented, but ignoring "
793                                   "error for simulated gpu hangs\n");
794                         ret = 0;
795                 }
796         }
797
798         return ret;
799 }
800
801 /**
802  * i915_reset - reset chip after a hang
803  * @dev: drm device to reset
804  *
805  * Reset the chip.  Useful if a hang is detected. Returns zero on successful
806  * reset or otherwise an error code.
807  *
808  * Procedure is fairly simple:
809  *   - reset the chip using the reset reg
810  *   - re-init context state
811  *   - re-init hardware status page
812  *   - re-init ring buffer
813  *   - re-init interrupt state
814  *   - re-init display
815  */
816 int i915_reset(struct drm_device *dev)
817 {
818         drm_i915_private_t *dev_priv = dev->dev_private;
819         int ret;
820
821         if (!i915_try_reset)
822                 return 0;
823
824         mutex_lock(&dev->struct_mutex);
825
826         i915_gem_reset(dev);
827
828         ret = -ENODEV;
829         if (time_uptime - dev_priv->gpu_error.last_reset < 5)
830                 DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
831         else
832                 ret = intel_gpu_reset(dev);
833
834         dev_priv->gpu_error.last_reset = time_uptime;
835         if (ret) {
836                 DRM_ERROR("Failed to reset chip.\n");
837                 mutex_unlock(&dev->struct_mutex);
838                 return ret;
839         }
840
841         /* Ok, now get things going again... */
842
843         /*
844          * Everything depends on having the GTT running, so we need to start
845          * there.  Fortunately we don't need to do this unless we reset the
846          * chip at a PCI level.
847          *
848          * Next we need to restore the context, but we don't use those
849          * yet either...
850          *
851          * Ring buffer needs to be re-initialized in the KMS case, or if X
852          * was running at the time of the reset (i.e. we weren't VT
853          * switched away).
854          */
855         if (drm_core_check_feature(dev, DRIVER_MODESET) ||
856                         !dev_priv->mm.suspended) {
857                 struct intel_ring_buffer *ring;
858                 int i;
859
860                 dev_priv->mm.suspended = 0;
861
862                 i915_gem_init_swizzling(dev);
863
864                 for_each_ring(ring, dev_priv, i)
865                         ring->init(ring);
866
867                 i915_gem_context_init(dev);
868                 i915_gem_init_ppgtt(dev);
869
870                 /*
871                  * It would make sense to re-init all the other hw state, at
872                  * least the rps/rc6/emon init done within modeset_init_hw. For
873                  * some unknown reason, this blows up my ilk, so don't.
874                  */
875
876                 mutex_unlock(&dev->struct_mutex);
877
878                 drm_irq_uninstall(dev);
879                 drm_irq_install(dev);
880                 intel_hpd_init(dev);
881         } else {
882                 mutex_unlock(&dev->struct_mutex);
883         }
884
885         return 0;
886 }
887
888 static int
889 i915_pci_probe(device_t kdev)
890 {
891         int device, i = 0;
892
893         if (pci_get_class(kdev) != PCIC_DISPLAY)
894                 return ENXIO;
895
896         if (pci_get_vendor(kdev) != PCI_VENDOR_INTEL)
897                 return ENXIO;
898
899         device = pci_get_device(kdev);
900
901         for (i = 0; pciidlist[i].device != 0; i++) {
902                 if (pciidlist[i].device == device) {
903                         i915_attach_list[0].device = device;
904                         return 0;
905                 }
906         }
907
908         return ENXIO;
909 }
910
911 static struct drm_driver driver = {
912         .driver_features =   DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
913             DRIVER_USE_MTRR | DRIVER_HAVE_IRQ | DRIVER_LOCKLESS_IRQ |
914             DRIVER_GEM /*| DRIVER_MODESET*/,
915
916         .buf_priv_size  = sizeof(drm_i915_private_t),
917         .load           = i915_driver_load,
918         .open           = i915_driver_open,
919         .unload         = i915_driver_unload,
920         .preclose       = i915_driver_preclose,
921         .lastclose      = i915_driver_lastclose,
922         .postclose      = i915_driver_postclose,
923         .device_is_agp  = i915_driver_device_is_agp,
924         .gem_init_object = i915_gem_init_object,
925         .gem_free_object = i915_gem_free_object,
926         .gem_pager_ops  = &i915_gem_pager_ops,
927         .dumb_create    = i915_gem_dumb_create,
928         .dumb_map_offset = i915_gem_mmap_gtt,
929         .dumb_destroy   = i915_gem_dumb_destroy,
930
931         .ioctls         = i915_ioctls,
932
933         .name           = DRIVER_NAME,
934         .desc           = DRIVER_DESC,
935         .date           = DRIVER_DATE,
936         .major          = DRIVER_MAJOR,
937         .minor          = DRIVER_MINOR,
938         .patchlevel     = DRIVER_PATCHLEVEL,
939 };
940
941 static device_method_t i915_methods[] = {
942         /* Device interface */
943         DEVMETHOD(device_probe,         i915_pci_probe),
944         DEVMETHOD(device_attach,        i915_attach),
945         DEVMETHOD(device_suspend,       i915_suspend),
946         DEVMETHOD(device_resume,        i915_resume),
947         DEVMETHOD(device_detach,        drm_release),
948         DEVMETHOD_END
949 };
950
951 static driver_t i915_driver = {
952         "drm",
953         i915_methods,
954         sizeof(struct drm_device)
955 };
956
957 DRIVER_MODULE_ORDERED(i915kms, vgapci, i915_driver, drm_devclass, 0, 0,
958     SI_ORDER_ANY);
959 MODULE_DEPEND(i915kms, drm, 1, 1, 1);
960 MODULE_DEPEND(i915kms, agp, 1, 1, 1);
961 MODULE_DEPEND(i915kms, iicbus, 1, 1, 1);
962 MODULE_DEPEND(i915kms, iic, 1, 1, 1);
963 MODULE_DEPEND(i915kms, iicbb, 1, 1, 1);
964
965 /* We give fast paths for the really cool registers */
966 #define NEEDS_FORCE_WAKE(dev_priv, reg) \
967         ((HAS_FORCE_WAKE((dev_priv)->dev)) && \
968          ((reg) < 0x40000) &&            \
969          ((reg) != FORCEWAKE))
970 static void
971 ilk_dummy_write(struct drm_i915_private *dev_priv)
972 {
973         /* WaIssueDummyWriteToWakeupFromRC6: Issue a dummy write to wake up the
974          * chip from rc6 before touching it for real. MI_MODE is masked, hence
975          * harmless to write 0 into. */
976         I915_WRITE_NOTRACE(MI_MODE, 0);
977 }
978
979 #define __i915_read(x, y) \
980 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
981         u##x val = 0; \
982         if (IS_GEN5(dev_priv->dev)) \
983                 ilk_dummy_write(dev_priv); \
984         if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
985                 lockmgr(&dev_priv->gt_lock, LK_EXCLUSIVE); \
986                 if (dev_priv->forcewake_count == 0) \
987                         dev_priv->gt.force_wake_get(dev_priv); \
988                 val = DRM_READ##y(dev_priv->mmio_map, reg);     \
989                 if (dev_priv->forcewake_count == 0) \
990                         dev_priv->gt.force_wake_put(dev_priv); \
991                 lockmgr(&dev_priv->gt_lock, LK_RELEASE); \
992         } else { \
993                 val = DRM_READ##y(dev_priv->mmio_map, reg);     \
994         } \
995         trace_i915_reg_rw(false, reg, val, sizeof(val)); \
996         return val; \
997 }
998
999 __i915_read(8, 8)
1000 __i915_read(16, 16)
1001 __i915_read(32, 32)
1002 __i915_read(64, 64)
1003 #undef __i915_read
1004
1005 #define __i915_write(x, y) \
1006 void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
1007         u32 __fifo_ret = 0; \
1008         trace_i915_reg_rw(true, reg, val, sizeof(val)); \
1009         if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
1010                 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
1011         } \
1012         if (IS_GEN5(dev_priv->dev)) \
1013                 ilk_dummy_write(dev_priv); \
1014         if (IS_HASWELL(dev_priv->dev) && (I915_READ_NOTRACE(GEN7_ERR_INT) & ERR_INT_MMIO_UNCLAIMED)) { \
1015                 DRM_ERROR("Unknown unclaimed register before writing to %x\n", reg); \
1016                 I915_WRITE_NOTRACE(GEN7_ERR_INT, ERR_INT_MMIO_UNCLAIMED); \
1017         } \
1018         DRM_WRITE##y(dev_priv->mmio_map, reg, val); \
1019         if (unlikely(__fifo_ret)) { \
1020                 gen6_gt_check_fifodbg(dev_priv); \
1021         } \
1022         if (IS_HASWELL(dev_priv->dev) && (I915_READ_NOTRACE(GEN7_ERR_INT) & ERR_INT_MMIO_UNCLAIMED)) { \
1023                 DRM_ERROR("Unclaimed write to %x\n", reg); \
1024                 DRM_WRITE32(dev_priv->mmio_map, GEN7_ERR_INT, ERR_INT_MMIO_UNCLAIMED);  \
1025         } \
1026 }
1027
1028 __i915_write(8, 8)
1029 __i915_write(16, 16)
1030 __i915_write(32, 32)
1031 __i915_write(64, 64)
1032 #undef __i915_write
1033
1034 static const struct register_whitelist {
1035         uint64_t offset;
1036         uint32_t size;
1037         uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
1038 } whitelist[] = {
1039         { RING_TIMESTAMP(RENDER_RING_BASE), 8, 0xF0 },
1040 };
1041
1042 int i915_reg_read_ioctl(struct drm_device *dev,
1043                         void *data, struct drm_file *file)
1044 {
1045         struct drm_i915_private *dev_priv = dev->dev_private;
1046         struct drm_i915_reg_read *reg = data;
1047         struct register_whitelist const *entry = whitelist;
1048         int i;
1049
1050         for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
1051                 if (entry->offset == reg->offset &&
1052                     (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
1053                         break;
1054         }
1055
1056         if (i == ARRAY_SIZE(whitelist))
1057                 return -EINVAL;
1058
1059         switch (entry->size) {
1060         case 8:
1061                 reg->val = I915_READ64(reg->offset);
1062                 break;
1063         case 4:
1064                 reg->val = I915_READ(reg->offset);
1065                 break;
1066         case 2:
1067                 reg->val = I915_READ16(reg->offset);
1068                 break;
1069         case 1:
1070                 reg->val = I915_READ8(reg->offset);
1071                 break;
1072         default:
1073                 WARN_ON(1);
1074                 return -EINVAL;
1075         }
1076
1077         return 0;
1078 }