drm/i915: Add and document the drm.i915.enable_execlists tunable.
[dragonfly.git] / sys / dev / drm / i915 / intel_uncore.c
1 /*
2  * Copyright © 2013 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23
24 #include "i915_drv.h"
25 #include "intel_drv.h"
26
27 #define FORCEWAKE_ACK_TIMEOUT_MS 2
28
29 #define __raw_i915_read8(dev_priv__, reg__) DRM_READ8(dev_priv__->mmio_map, reg__)
30 #define __raw_i915_write8(dev_priv__, reg__, val__) DRM_WRITE8(dev_priv__->mmio_map, reg__, val__)
31
32 #define __raw_i915_read16(dev_priv__, reg__) DRM_READ16(dev_priv__->mmio_map, reg__)
33 #define __raw_i915_write16(dev_priv__, reg__, val__) DRM_WRITE16(dev_priv__->mmio_map, reg__, val__)
34
35 #define __raw_i915_read32(dev_priv__, reg__) DRM_READ32(dev_priv__->mmio_map, reg__)
36 #define __raw_i915_write32(dev_priv__, reg__, val__) DRM_WRITE32(dev_priv__->mmio_map, reg__, val__)
37
38 #define __raw_i915_read64(dev_priv__, reg__) DRM_READ64(dev_priv__->mmio_map, reg__)
39 #define __raw_i915_write64(dev_priv__, reg__, val__) DRM_WRITE64(dev_priv__->mmio_map, reg__, val__)
40
41 #define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__)
42
43 static const char * const forcewake_domain_names[] = {
44         "render",
45         "blitter",
46         "media",
47 };
48
49 const char *
50 intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
51 {
52         BUILD_BUG_ON((sizeof(forcewake_domain_names)/sizeof(const char *)) !=
53                      FW_DOMAIN_ID_COUNT);
54
55         if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
56                 return forcewake_domain_names[id];
57
58         WARN_ON(id);
59
60         return "unknown";
61 }
62
63 static void
64 assert_device_not_suspended(struct drm_i915_private *dev_priv)
65 {
66         WARN_ONCE(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
67                   "Device suspended\n");
68 }
69
70 static inline void
71 fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
72 {
73         WARN_ON(d->reg_set == 0);
74         __raw_i915_write32(d->i915, d->reg_set, d->val_reset);
75 }
76
77 static inline void
78 fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
79 {
80         mod_timer_pinned(&d->timer, jiffies + 1);
81 }
82
83 static inline void
84 fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
85 {
86         if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
87                              FORCEWAKE_KERNEL) == 0,
88                             FORCEWAKE_ACK_TIMEOUT_MS))
89                 DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
90                           intel_uncore_forcewake_domain_to_str(d->id));
91 }
92
93 static inline void
94 fw_domain_get(const struct intel_uncore_forcewake_domain *d)
95 {
96         __raw_i915_write32(d->i915, d->reg_set, d->val_set);
97 }
98
99 static inline void
100 fw_domain_wait_ack(const struct intel_uncore_forcewake_domain *d)
101 {
102         if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
103                              FORCEWAKE_KERNEL),
104                             FORCEWAKE_ACK_TIMEOUT_MS))
105                 DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
106                           intel_uncore_forcewake_domain_to_str(d->id));
107 }
108
109 static inline void
110 fw_domain_put(const struct intel_uncore_forcewake_domain *d)
111 {
112         __raw_i915_write32(d->i915, d->reg_set, d->val_clear);
113 }
114
115 static inline void
116 fw_domain_posting_read(const struct intel_uncore_forcewake_domain *d)
117 {
118         /* something from same cacheline, but not from the set register */
119         if (d->reg_post)
120                 __raw_posting_read(d->i915, d->reg_post);
121 }
122
123 static void
124 fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
125 {
126         struct intel_uncore_forcewake_domain *d;
127         enum forcewake_domain_id id;
128
129         for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
130                 fw_domain_wait_ack_clear(d);
131                 fw_domain_get(d);
132                 fw_domain_wait_ack(d);
133         }
134 }
135
136 static void
137 fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
138 {
139         struct intel_uncore_forcewake_domain *d;
140         enum forcewake_domain_id id;
141
142         for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
143                 fw_domain_put(d);
144                 fw_domain_posting_read(d);
145         }
146 }
147
148 static void
149 fw_domains_posting_read(struct drm_i915_private *dev_priv)
150 {
151         struct intel_uncore_forcewake_domain *d;
152         enum forcewake_domain_id id;
153
154         /* No need to do for all, just do for first found */
155         for_each_fw_domain(d, dev_priv, id) {
156                 fw_domain_posting_read(d);
157                 break;
158         }
159 }
160
161 static void
162 fw_domains_reset(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
163 {
164         struct intel_uncore_forcewake_domain *d;
165         enum forcewake_domain_id id;
166
167         if (dev_priv->uncore.fw_domains == 0)
168                 return;
169
170         for_each_fw_domain_mask(d, fw_domains, dev_priv, id)
171                 fw_domain_reset(d);
172
173         fw_domains_posting_read(dev_priv);
174 }
175
176 static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
177 {
178         /* w/a for a sporadic read returning 0 by waiting for the GT
179          * thread to wake up.
180          */
181         if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
182                                 GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
183                 DRM_ERROR("GT thread status wait timed out\n");
184 }
185
186 static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv,
187                                               enum forcewake_domains fw_domains)
188 {
189         fw_domains_get(dev_priv, fw_domains);
190
191         /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
192         __gen6_gt_wait_for_thread_c0(dev_priv);
193 }
194
195 static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
196 {
197         u32 gtfifodbg;
198
199         gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
200         if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg))
201                 __raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg);
202 }
203
204 static void fw_domains_put_with_fifo(struct drm_i915_private *dev_priv,
205                                      enum forcewake_domains fw_domains)
206 {
207         fw_domains_put(dev_priv, fw_domains);
208         gen6_gt_check_fifodbg(dev_priv);
209 }
210
211 static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
212 {
213         int ret = 0;
214
215         /* On VLV, FIFO will be shared by both SW and HW.
216          * So, we need to read the FREE_ENTRIES everytime */
217         if (IS_VALLEYVIEW(dev_priv->dev))
218                 dev_priv->uncore.fifo_count =
219                         __raw_i915_read32(dev_priv, GTFIFOCTL) &
220                                                 GT_FIFO_FREE_ENTRIES_MASK;
221
222         if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
223                 int loop = 500;
224                 u32 fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
225                 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
226                         udelay(10);
227                         fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
228                 }
229                 if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
230                         ++ret;
231                 dev_priv->uncore.fifo_count = fifo;
232         }
233         dev_priv->uncore.fifo_count--;
234
235         return ret;
236 }
237
238 static void intel_uncore_fw_release_timer(unsigned long arg)
239 {
240         struct intel_uncore_forcewake_domain *domain = (void *)arg;
241
242         assert_device_not_suspended(domain->i915);
243
244         lockmgr(&domain->i915->uncore.lock, LK_EXCLUSIVE);
245         if (WARN_ON(domain->wake_count == 0))
246                 domain->wake_count++;
247
248         if (--domain->wake_count == 0)
249                 domain->i915->uncore.funcs.force_wake_put(domain->i915,
250                                                           1 << domain->id);
251
252         lockmgr(&domain->i915->uncore.lock, LK_RELEASE);
253 }
254
255 void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
256 {
257         struct drm_i915_private *dev_priv = dev->dev_private;
258         struct intel_uncore_forcewake_domain *domain;
259         int retry_count = 100;
260         enum forcewake_domain_id id;
261         enum forcewake_domains fw = 0, active_domains;
262
263         /* Hold uncore.lock across reset to prevent any register access
264          * with forcewake not set correctly. Wait until all pending
265          * timers are run before holding.
266          */
267         while (1) {
268                 active_domains = 0;
269
270                 for_each_fw_domain(domain, dev_priv, id) {
271                         if (del_timer_sync(&domain->timer) == 0)
272                                 continue;
273
274                         intel_uncore_fw_release_timer((unsigned long)domain);
275                 }
276
277                 lockmgr(&dev_priv->uncore.lock, LK_EXCLUSIVE);
278
279                 for_each_fw_domain(domain, dev_priv, id) {
280                         if (timer_pending(&domain->timer))
281                                 active_domains |= (1 << id);
282                 }
283
284                 if (active_domains == 0)
285                         break;
286
287                 if (--retry_count == 0) {
288                         DRM_ERROR("Timed out waiting for forcewake timers to finish\n");
289                         break;
290                 }
291
292                 lockmgr(&dev_priv->uncore.lock, LK_RELEASE);
293 #if 0
294                 cond_resched();
295 #endif
296         }
297
298         WARN_ON(active_domains);
299
300         for_each_fw_domain(domain, dev_priv, id)
301                 if (domain->wake_count)
302                         fw |= 1 << id;
303
304         if (fw)
305                 dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
306
307         fw_domains_reset(dev_priv, FORCEWAKE_ALL);
308
309         if (restore) { /* If reset with a user forcewake, try to restore */
310                 if (fw)
311                         dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
312
313                 if (IS_GEN6(dev) || IS_GEN7(dev))
314                         dev_priv->uncore.fifo_count =
315                                 __raw_i915_read32(dev_priv, GTFIFOCTL) &
316                                 GT_FIFO_FREE_ENTRIES_MASK;
317         }
318
319         if (!restore)
320                 assert_forcewakes_inactive(dev_priv);
321
322         lockmgr(&dev_priv->uncore.lock, LK_RELEASE);
323 }
324
325 static void intel_uncore_ellc_detect(struct drm_device *dev)
326 {
327         struct drm_i915_private *dev_priv = dev->dev_private;
328
329         if ((IS_HASWELL(dev) || IS_BROADWELL(dev)) &&
330             (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) == 1)) {
331                 /* The docs do not explain exactly how the calculation can be
332                  * made. It is somewhat guessable, but for now, it's always
333                  * 128MB.
334                  * NB: We can't write IDICR yet because we do not have gt funcs
335                  * set up */
336                 dev_priv->ellc_size = 128;
337                 DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
338         }
339 }
340
341 static void __intel_uncore_early_sanitize(struct drm_device *dev,
342                                           bool restore_forcewake)
343 {
344         struct drm_i915_private *dev_priv = dev->dev_private;
345
346         if (HAS_FPGA_DBG_UNCLAIMED(dev))
347                 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
348
349         /* clear out old GT FIFO errors */
350         if (IS_GEN6(dev) || IS_GEN7(dev))
351                 __raw_i915_write32(dev_priv, GTFIFODBG,
352                                    __raw_i915_read32(dev_priv, GTFIFODBG));
353
354         intel_uncore_forcewake_reset(dev, restore_forcewake);
355 }
356
357 void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
358 {
359         __intel_uncore_early_sanitize(dev, restore_forcewake);
360         i915_check_and_clear_faults(dev);
361 }
362
363 void intel_uncore_sanitize(struct drm_device *dev)
364 {
365         /* BIOS often leaves RC6 enabled, but disable it for hw init */
366         intel_disable_gt_powersave(dev);
367 }
368
369 /**
370  * intel_uncore_forcewake_get - grab forcewake domain references
371  * @dev_priv: i915 device instance
372  * @fw_domains: forcewake domains to get reference on
373  *
374  * This function can be used get GT's forcewake domain references.
375  * Normal register access will handle the forcewake domains automatically.
376  * However if some sequence requires the GT to not power down a particular
377  * forcewake domains this function should be called at the beginning of the
378  * sequence. And subsequently the reference should be dropped by symmetric
379  * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
380  * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
381  */
382 void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
383                                 enum forcewake_domains fw_domains)
384 {
385         struct intel_uncore_forcewake_domain *domain;
386         enum forcewake_domain_id id;
387
388         if (!dev_priv->uncore.funcs.force_wake_get)
389                 return;
390
391         WARN_ON(dev_priv->pm.suspended);
392
393         fw_domains &= dev_priv->uncore.fw_domains;
394
395         lockmgr(&dev_priv->uncore.lock, LK_EXCLUSIVE);
396
397         for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
398                 if (domain->wake_count++)
399                         fw_domains &= ~(1 << id);
400         }
401
402         if (fw_domains)
403                 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
404
405         lockmgr(&dev_priv->uncore.lock, LK_RELEASE);
406 }
407
408 /**
409  * intel_uncore_forcewake_put - release a forcewake domain reference
410  * @dev_priv: i915 device instance
411  * @fw_domains: forcewake domains to put references
412  *
413  * This function drops the device-level forcewakes for specified
414  * domains obtained by intel_uncore_forcewake_get().
415  */
416 void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
417                                 enum forcewake_domains fw_domains)
418 {
419         struct intel_uncore_forcewake_domain *domain;
420         enum forcewake_domain_id id;
421
422         if (!dev_priv->uncore.funcs.force_wake_put)
423                 return;
424
425         fw_domains &= dev_priv->uncore.fw_domains;
426
427         lockmgr(&dev_priv->uncore.lock, LK_EXCLUSIVE);
428
429         for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
430                 if (WARN_ON(domain->wake_count == 0))
431                         continue;
432
433                 if (--domain->wake_count)
434                         continue;
435
436                 domain->wake_count++;
437                 fw_domain_arm_timer(domain);
438         }
439
440         lockmgr(&dev_priv->uncore.lock, LK_RELEASE);
441 }
442
443 void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
444 {
445         struct intel_uncore_forcewake_domain *domain;
446         enum forcewake_domain_id id;
447
448         if (!dev_priv->uncore.funcs.force_wake_get)
449                 return;
450
451         for_each_fw_domain(domain, dev_priv, id)
452                 WARN_ON(domain->wake_count);
453 }
454
455 /* We give fast paths for the really cool registers */
456 #define NEEDS_FORCE_WAKE(dev_priv, reg) \
457          ((reg) < 0x40000 && (reg) != FORCEWAKE)
458
459 #define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end))
460
461 #define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
462         (REG_RANGE((reg), 0x2000, 0x4000) || \
463          REG_RANGE((reg), 0x5000, 0x8000) || \
464          REG_RANGE((reg), 0xB000, 0x12000) || \
465          REG_RANGE((reg), 0x2E000, 0x30000))
466
467 #define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg) \
468         (REG_RANGE((reg), 0x12000, 0x14000) || \
469          REG_RANGE((reg), 0x22000, 0x24000) || \
470          REG_RANGE((reg), 0x30000, 0x40000))
471
472 #define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \
473         (REG_RANGE((reg), 0x2000, 0x4000) || \
474          REG_RANGE((reg), 0x5200, 0x8000) || \
475          REG_RANGE((reg), 0x8300, 0x8500) || \
476          REG_RANGE((reg), 0xB000, 0xB480) || \
477          REG_RANGE((reg), 0xE000, 0xE800))
478
479 #define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \
480         (REG_RANGE((reg), 0x8800, 0x8900) || \
481          REG_RANGE((reg), 0xD000, 0xD800) || \
482          REG_RANGE((reg), 0x12000, 0x14000) || \
483          REG_RANGE((reg), 0x1A000, 0x1C000) || \
484          REG_RANGE((reg), 0x1E800, 0x1EA00) || \
485          REG_RANGE((reg), 0x30000, 0x38000))
486
487 #define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \
488         (REG_RANGE((reg), 0x4000, 0x5000) || \
489          REG_RANGE((reg), 0x8000, 0x8300) || \
490          REG_RANGE((reg), 0x8500, 0x8600) || \
491          REG_RANGE((reg), 0x9000, 0xB000) || \
492          REG_RANGE((reg), 0xF000, 0x10000))
493
494 #define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \
495         REG_RANGE((reg), 0xB00,  0x2000)
496
497 #define FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) \
498         (REG_RANGE((reg), 0x2000, 0x2700) || \
499          REG_RANGE((reg), 0x3000, 0x4000) || \
500          REG_RANGE((reg), 0x5200, 0x8000) || \
501          REG_RANGE((reg), 0x8140, 0x8160) || \
502          REG_RANGE((reg), 0x8300, 0x8500) || \
503          REG_RANGE((reg), 0x8C00, 0x8D00) || \
504          REG_RANGE((reg), 0xB000, 0xB480) || \
505          REG_RANGE((reg), 0xE000, 0xE900) || \
506          REG_RANGE((reg), 0x24400, 0x24800))
507
508 #define FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) \
509         (REG_RANGE((reg), 0x8130, 0x8140) || \
510          REG_RANGE((reg), 0x8800, 0x8A00) || \
511          REG_RANGE((reg), 0xD000, 0xD800) || \
512          REG_RANGE((reg), 0x12000, 0x14000) || \
513          REG_RANGE((reg), 0x1A000, 0x1EA00) || \
514          REG_RANGE((reg), 0x30000, 0x40000))
515
516 #define FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg) \
517         REG_RANGE((reg), 0x9400, 0x9800)
518
519 #define FORCEWAKE_GEN9_BLITTER_RANGE_OFFSET(reg) \
520         ((reg) < 0x40000 &&\
521          !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) && \
522          !FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) && \
523          !FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) && \
524          !FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg))
525
526 static void
527 ilk_dummy_write(struct drm_i915_private *dev_priv)
528 {
529         /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
530          * the chip from rc6 before touching it for real. MI_MODE is masked,
531          * hence harmless to write 0 into. */
532         __raw_i915_write32(dev_priv, MI_MODE, 0);
533 }
534
535 static void
536 hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv, u32 reg, bool read,
537                         bool before)
538 {
539         const char *op = read ? "reading" : "writing to";
540         const char *when = before ? "before" : "after";
541
542         if (!i915.mmio_debug)
543                 return;
544
545         if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
546                 WARN(1, "Unclaimed register detected %s %s register 0x%x\n",
547                      when, op, reg);
548                 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
549         }
550 }
551
552 static void
553 hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv)
554 {
555         if (i915.mmio_debug)
556                 return;
557
558         if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
559                 DRM_ERROR("Unclaimed register detected. Please use the i915.mmio_debug=1 to debug this problem.");
560                 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
561         }
562 }
563
564 #define GEN2_READ_HEADER(x) \
565         u##x val = 0; \
566         assert_device_not_suspended(dev_priv);
567
568 #define GEN2_READ_FOOTER \
569         trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
570         return val
571
572 #define __gen2_read(x) \
573 static u##x \
574 gen2_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
575         GEN2_READ_HEADER(x); \
576         val = __raw_i915_read##x(dev_priv, reg); \
577         GEN2_READ_FOOTER; \
578 }
579
580 #define __gen5_read(x) \
581 static u##x \
582 gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
583         GEN2_READ_HEADER(x); \
584         ilk_dummy_write(dev_priv); \
585         val = __raw_i915_read##x(dev_priv, reg); \
586         GEN2_READ_FOOTER; \
587 }
588
589 __gen5_read(8)
590 __gen5_read(16)
591 __gen5_read(32)
592 __gen5_read(64)
593 __gen2_read(8)
594 __gen2_read(16)
595 __gen2_read(32)
596 __gen2_read(64)
597
598 #undef __gen5_read
599 #undef __gen2_read
600
601 #undef GEN2_READ_FOOTER
602 #undef GEN2_READ_HEADER
603
604 #define GEN6_READ_HEADER(x) \
605         u##x val = 0; \
606         assert_device_not_suspended(dev_priv); \
607         lockmgr(&dev_priv->uncore.lock, LK_EXCLUSIVE)
608
609 #define GEN6_READ_FOOTER \
610         lockmgr(&dev_priv->uncore.lock, LK_RELEASE); \
611         trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
612         return val
613
614 static inline void __force_wake_get(struct drm_i915_private *dev_priv,
615                                     enum forcewake_domains fw_domains)
616 {
617         struct intel_uncore_forcewake_domain *domain;
618         enum forcewake_domain_id id;
619
620         if (WARN_ON(!fw_domains))
621                 return;
622
623         /* Ideally GCC would be constant-fold and eliminate this loop */
624         for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
625                 if (domain->wake_count) {
626                         fw_domains &= ~(1 << id);
627                         continue;
628                 }
629
630                 domain->wake_count++;
631                 fw_domain_arm_timer(domain);
632         }
633
634         if (fw_domains)
635                 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
636 }
637
638 #define __gen6_read(x) \
639 static u##x \
640 gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
641         GEN6_READ_HEADER(x); \
642         hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
643         if (NEEDS_FORCE_WAKE((dev_priv), (reg))) \
644                 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
645         val = __raw_i915_read##x(dev_priv, reg); \
646         hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
647         GEN6_READ_FOOTER; \
648 }
649
650 #define __vlv_read(x) \
651 static u##x \
652 vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
653         GEN6_READ_HEADER(x); \
654         if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) \
655                 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
656         else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) \
657                 __force_wake_get(dev_priv, FORCEWAKE_MEDIA); \
658         val = __raw_i915_read##x(dev_priv, reg); \
659         GEN6_READ_FOOTER; \
660 }
661
662 #define __chv_read(x) \
663 static u##x \
664 chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
665         GEN6_READ_HEADER(x); \
666         if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) \
667                 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
668         else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) \
669                 __force_wake_get(dev_priv, FORCEWAKE_MEDIA); \
670         else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) \
671                 __force_wake_get(dev_priv, \
672                                  FORCEWAKE_RENDER | FORCEWAKE_MEDIA); \
673         val = __raw_i915_read##x(dev_priv, reg); \
674         GEN6_READ_FOOTER; \
675 }
676
677 #define SKL_NEEDS_FORCE_WAKE(dev_priv, reg)     \
678          ((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg))
679
680 #define __gen9_read(x) \
681 static u##x \
682 gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
683         enum forcewake_domains fw_engine; \
684         GEN6_READ_HEADER(x); \
685         if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg)))   \
686                 fw_engine = 0; \
687         else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg))       \
688                 fw_engine = FORCEWAKE_RENDER; \
689         else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) \
690                 fw_engine = FORCEWAKE_MEDIA; \
691         else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) \
692                 fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
693         else \
694                 fw_engine = FORCEWAKE_BLITTER; \
695         if (fw_engine) \
696                 __force_wake_get(dev_priv, fw_engine); \
697         val = __raw_i915_read##x(dev_priv, reg); \
698         GEN6_READ_FOOTER; \
699 }
700
701 __gen9_read(8)
702 __gen9_read(16)
703 __gen9_read(32)
704 __gen9_read(64)
705 __chv_read(8)
706 __chv_read(16)
707 __chv_read(32)
708 __chv_read(64)
709 __vlv_read(8)
710 __vlv_read(16)
711 __vlv_read(32)
712 __vlv_read(64)
713 __gen6_read(8)
714 __gen6_read(16)
715 __gen6_read(32)
716 __gen6_read(64)
717
718 #undef __gen9_read
719 #undef __chv_read
720 #undef __vlv_read
721 #undef __gen6_read
722 #undef GEN6_READ_FOOTER
723 #undef GEN6_READ_HEADER
724
725 #define GEN2_WRITE_HEADER \
726         trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
727         assert_device_not_suspended(dev_priv); \
728
729 #define GEN2_WRITE_FOOTER
730
731 #define __gen2_write(x) \
732 static void \
733 gen2_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
734         GEN2_WRITE_HEADER; \
735         __raw_i915_write##x(dev_priv, reg, val); \
736         GEN2_WRITE_FOOTER; \
737 }
738
739 #define __gen5_write(x) \
740 static void \
741 gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
742         GEN2_WRITE_HEADER; \
743         ilk_dummy_write(dev_priv); \
744         __raw_i915_write##x(dev_priv, reg, val); \
745         GEN2_WRITE_FOOTER; \
746 }
747
748 __gen5_write(8)
749 __gen5_write(16)
750 __gen5_write(32)
751 __gen5_write(64)
752 __gen2_write(8)
753 __gen2_write(16)
754 __gen2_write(32)
755 __gen2_write(64)
756
757 #undef __gen5_write
758 #undef __gen2_write
759
760 #undef GEN2_WRITE_FOOTER
761 #undef GEN2_WRITE_HEADER
762
763 #define GEN6_WRITE_HEADER \
764         trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
765         assert_device_not_suspended(dev_priv); \
766         lockmgr(&dev_priv->uncore.lock, LK_EXCLUSIVE)
767
768 #define GEN6_WRITE_FOOTER \
769         lockmgr(&dev_priv->uncore.lock, LK_RELEASE)
770
771 #define __gen6_write(x) \
772 static void \
773 gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
774         u32 __fifo_ret = 0; \
775         GEN6_WRITE_HEADER; \
776         if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
777                 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
778         } \
779         __raw_i915_write##x(dev_priv, reg, val); \
780         if (unlikely(__fifo_ret)) { \
781                 gen6_gt_check_fifodbg(dev_priv); \
782         } \
783         GEN6_WRITE_FOOTER; \
784 }
785
786 #define __hsw_write(x) \
787 static void \
788 hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
789         u32 __fifo_ret = 0; \
790         GEN6_WRITE_HEADER; \
791         if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
792                 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
793         } \
794         hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
795         __raw_i915_write##x(dev_priv, reg, val); \
796         if (unlikely(__fifo_ret)) { \
797                 gen6_gt_check_fifodbg(dev_priv); \
798         } \
799         hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
800         hsw_unclaimed_reg_detect(dev_priv); \
801         GEN6_WRITE_FOOTER; \
802 }
803
804 static const u32 gen8_shadowed_regs[] = {
805         FORCEWAKE_MT,
806         GEN6_RPNSWREQ,
807         GEN6_RC_VIDEO_FREQ,
808         RING_TAIL(RENDER_RING_BASE),
809         RING_TAIL(GEN6_BSD_RING_BASE),
810         RING_TAIL(VEBOX_RING_BASE),
811         RING_TAIL(BLT_RING_BASE),
812         /* TODO: Other registers are not yet used */
813 };
814
815 static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg)
816 {
817         int i;
818         for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++)
819                 if (reg == gen8_shadowed_regs[i])
820                         return true;
821
822         return false;
823 }
824
825 #define __gen8_write(x) \
826 static void \
827 gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
828         GEN6_WRITE_HEADER; \
829         hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
830         if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) \
831                 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
832         __raw_i915_write##x(dev_priv, reg, val); \
833         hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
834         hsw_unclaimed_reg_detect(dev_priv); \
835         GEN6_WRITE_FOOTER; \
836 }
837
838 #define __chv_write(x) \
839 static void \
840 chv_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
841         bool shadowed = is_gen8_shadowed(dev_priv, reg); \
842         GEN6_WRITE_HEADER; \
843         if (!shadowed) { \
844                 if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) \
845                         __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
846                 else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) \
847                         __force_wake_get(dev_priv, FORCEWAKE_MEDIA); \
848                 else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) \
849                         __force_wake_get(dev_priv, FORCEWAKE_RENDER | FORCEWAKE_MEDIA); \
850         } \
851         __raw_i915_write##x(dev_priv, reg, val); \
852         GEN6_WRITE_FOOTER; \
853 }
854
855 static const u32 gen9_shadowed_regs[] = {
856         RING_TAIL(RENDER_RING_BASE),
857         RING_TAIL(GEN6_BSD_RING_BASE),
858         RING_TAIL(VEBOX_RING_BASE),
859         RING_TAIL(BLT_RING_BASE),
860         FORCEWAKE_BLITTER_GEN9,
861         FORCEWAKE_RENDER_GEN9,
862         FORCEWAKE_MEDIA_GEN9,
863         GEN6_RPNSWREQ,
864         GEN6_RC_VIDEO_FREQ,
865         /* TODO: Other registers are not yet used */
866 };
867
868 static bool is_gen9_shadowed(struct drm_i915_private *dev_priv, u32 reg)
869 {
870         int i;
871         for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++)
872                 if (reg == gen9_shadowed_regs[i])
873                         return true;
874
875         return false;
876 }
877
878 #define __gen9_write(x) \
879 static void \
880 gen9_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, \
881                 bool trace) { \
882         enum forcewake_domains fw_engine; \
883         GEN6_WRITE_HEADER; \
884         if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg)) || \
885             is_gen9_shadowed(dev_priv, reg)) \
886                 fw_engine = 0; \
887         else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \
888                 fw_engine = FORCEWAKE_RENDER; \
889         else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) \
890                 fw_engine = FORCEWAKE_MEDIA; \
891         else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) \
892                 fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
893         else \
894                 fw_engine = FORCEWAKE_BLITTER; \
895         if (fw_engine) \
896                 __force_wake_get(dev_priv, fw_engine); \
897         __raw_i915_write##x(dev_priv, reg, val); \
898         GEN6_WRITE_FOOTER; \
899 }
900
901 __gen9_write(8)
902 __gen9_write(16)
903 __gen9_write(32)
904 __gen9_write(64)
905 __chv_write(8)
906 __chv_write(16)
907 __chv_write(32)
908 __chv_write(64)
909 __gen8_write(8)
910 __gen8_write(16)
911 __gen8_write(32)
912 __gen8_write(64)
913 __hsw_write(8)
914 __hsw_write(16)
915 __hsw_write(32)
916 __hsw_write(64)
917 __gen6_write(8)
918 __gen6_write(16)
919 __gen6_write(32)
920 __gen6_write(64)
921
922 #undef __gen9_write
923 #undef __chv_write
924 #undef __gen8_write
925 #undef __hsw_write
926 #undef __gen6_write
927 #undef GEN6_WRITE_FOOTER
928 #undef GEN6_WRITE_HEADER
929
930 #define ASSIGN_WRITE_MMIO_VFUNCS(x) \
931 do { \
932         dev_priv->uncore.funcs.mmio_writeb = x##_write8; \
933         dev_priv->uncore.funcs.mmio_writew = x##_write16; \
934         dev_priv->uncore.funcs.mmio_writel = x##_write32; \
935         dev_priv->uncore.funcs.mmio_writeq = x##_write64; \
936 } while (0)
937
938 #define ASSIGN_READ_MMIO_VFUNCS(x) \
939 do { \
940         dev_priv->uncore.funcs.mmio_readb = x##_read8; \
941         dev_priv->uncore.funcs.mmio_readw = x##_read16; \
942         dev_priv->uncore.funcs.mmio_readl = x##_read32; \
943         dev_priv->uncore.funcs.mmio_readq = x##_read64; \
944 } while (0)
945
946
947 static void fw_domain_init(struct drm_i915_private *dev_priv,
948                            enum forcewake_domain_id domain_id,
949                            u32 reg_set, u32 reg_ack)
950 {
951         struct intel_uncore_forcewake_domain *d;
952
953         if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
954                 return;
955
956         d = &dev_priv->uncore.fw_domain[domain_id];
957
958         WARN_ON(d->wake_count);
959
960         d->wake_count = 0;
961         d->reg_set = reg_set;
962         d->reg_ack = reg_ack;
963
964         if (IS_GEN6(dev_priv)) {
965                 d->val_reset = 0;
966                 d->val_set = FORCEWAKE_KERNEL;
967                 d->val_clear = 0;
968         } else {
969                 d->val_reset = _MASKED_BIT_DISABLE(0xffff);
970                 d->val_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
971                 d->val_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
972         }
973
974         if (IS_VALLEYVIEW(dev_priv))
975                 d->reg_post = FORCEWAKE_ACK_VLV;
976         else if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv) || IS_GEN8(dev_priv))
977                 d->reg_post = ECOBUS;
978         else
979                 d->reg_post = 0;
980
981         d->i915 = dev_priv;
982         d->id = domain_id;
983
984         setup_timer(&d->timer, intel_uncore_fw_release_timer, (unsigned long)d);
985
986         dev_priv->uncore.fw_domains |= (1 << domain_id);
987
988         fw_domain_reset(d);
989 }
990
991 static void intel_uncore_fw_domains_init(struct drm_device *dev)
992 {
993         struct drm_i915_private *dev_priv = dev->dev_private;
994
995         if (INTEL_INFO(dev_priv->dev)->gen <= 5)
996                 return;
997
998         if (IS_GEN9(dev)) {
999                 dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1000                 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1001                 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1002                                FORCEWAKE_RENDER_GEN9,
1003                                FORCEWAKE_ACK_RENDER_GEN9);
1004                 fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
1005                                FORCEWAKE_BLITTER_GEN9,
1006                                FORCEWAKE_ACK_BLITTER_GEN9);
1007                 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1008                                FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
1009         } else if (IS_VALLEYVIEW(dev)) {
1010                 dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1011                 if (!IS_CHERRYVIEW(dev))
1012                         dev_priv->uncore.funcs.force_wake_put =
1013                                 fw_domains_put_with_fifo;
1014                 else
1015                         dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1016                 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1017                                FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
1018                 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1019                                FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
1020         } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
1021                 dev_priv->uncore.funcs.force_wake_get =
1022                         fw_domains_get_with_thread_status;
1023                 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1024                 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1025                                FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
1026         } else if (IS_IVYBRIDGE(dev)) {
1027                 u32 ecobus;
1028
1029                 /* IVB configs may use multi-threaded forcewake */
1030
1031                 /* A small trick here - if the bios hasn't configured
1032                  * MT forcewake, and if the device is in RC6, then
1033                  * force_wake_mt_get will not wake the device and the
1034                  * ECOBUS read will return zero. Which will be
1035                  * (correctly) interpreted by the test below as MT
1036                  * forcewake being disabled.
1037                  */
1038                 dev_priv->uncore.funcs.force_wake_get =
1039                         fw_domains_get_with_thread_status;
1040                 dev_priv->uncore.funcs.force_wake_put =
1041                         fw_domains_put_with_fifo;
1042
1043                 /* We need to init first for ECOBUS access and then
1044                  * determine later if we want to reinit, in case of MT access is
1045                  * not working. In this stage we don't know which flavour this
1046                  * ivb is, so it is better to reset also the gen6 fw registers
1047                  * before the ecobus check.
1048                  */
1049
1050                 __raw_i915_write32(dev_priv, FORCEWAKE, 0);
1051                 __raw_posting_read(dev_priv, ECOBUS);
1052
1053                 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1054                                FORCEWAKE_MT, FORCEWAKE_MT_ACK);
1055
1056                 mutex_lock(&dev->struct_mutex);
1057                 fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL);
1058                 ecobus = __raw_i915_read32(dev_priv, ECOBUS);
1059                 fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL);
1060                 mutex_unlock(&dev->struct_mutex);
1061
1062                 if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
1063                         DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
1064                         DRM_INFO("when using vblank-synced partial screen updates.\n");
1065                         fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1066                                        FORCEWAKE, FORCEWAKE_ACK);
1067                 }
1068         } else if (IS_GEN6(dev)) {
1069                 dev_priv->uncore.funcs.force_wake_get =
1070                         fw_domains_get_with_thread_status;
1071                 dev_priv->uncore.funcs.force_wake_put =
1072                         fw_domains_put_with_fifo;
1073                 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1074                                FORCEWAKE, FORCEWAKE_ACK);
1075         }
1076
1077         /* All future platforms are expected to require complex power gating */
1078         WARN_ON(dev_priv->uncore.fw_domains == 0);
1079 }
1080
1081 void intel_uncore_init(struct drm_device *dev)
1082 {
1083         struct drm_i915_private *dev_priv = dev->dev_private;
1084
1085         intel_uncore_ellc_detect(dev);
1086         intel_uncore_fw_domains_init(dev);
1087         __intel_uncore_early_sanitize(dev, false);
1088
1089         switch (INTEL_INFO(dev)->gen) {
1090         default:
1091                 MISSING_CASE(INTEL_INFO(dev)->gen);
1092                 return;
1093         case 9:
1094                 ASSIGN_WRITE_MMIO_VFUNCS(gen9);
1095                 ASSIGN_READ_MMIO_VFUNCS(gen9);
1096                 break;
1097         case 8:
1098                 if (IS_CHERRYVIEW(dev)) {
1099                         ASSIGN_WRITE_MMIO_VFUNCS(chv);
1100                         ASSIGN_READ_MMIO_VFUNCS(chv);
1101
1102                 } else {
1103                         ASSIGN_WRITE_MMIO_VFUNCS(gen8);
1104                         ASSIGN_READ_MMIO_VFUNCS(gen6);
1105                 }
1106                 break;
1107         case 7:
1108         case 6:
1109                 if (IS_HASWELL(dev)) {
1110                         ASSIGN_WRITE_MMIO_VFUNCS(hsw);
1111                 } else {
1112                         ASSIGN_WRITE_MMIO_VFUNCS(gen6);
1113                 }
1114
1115                 if (IS_VALLEYVIEW(dev)) {
1116                         ASSIGN_READ_MMIO_VFUNCS(vlv);
1117                 } else {
1118                         ASSIGN_READ_MMIO_VFUNCS(gen6);
1119                 }
1120                 break;
1121         case 5:
1122                 ASSIGN_WRITE_MMIO_VFUNCS(gen5);
1123                 ASSIGN_READ_MMIO_VFUNCS(gen5);
1124                 break;
1125         case 4:
1126         case 3:
1127         case 2:
1128                 ASSIGN_WRITE_MMIO_VFUNCS(gen2);
1129                 ASSIGN_READ_MMIO_VFUNCS(gen2);
1130                 break;
1131         }
1132
1133         i915_check_and_clear_faults(dev);
1134 }
1135 #undef ASSIGN_WRITE_MMIO_VFUNCS
1136 #undef ASSIGN_READ_MMIO_VFUNCS
1137
1138 void intel_uncore_fini(struct drm_device *dev)
1139 {
1140         /* Paranoia: make sure we have disabled everything before we exit. */
1141         intel_uncore_sanitize(dev);
1142         intel_uncore_forcewake_reset(dev, false);
1143 }
1144
1145 #define GEN_RANGE(l, h) GENMASK(h, l)
1146
1147 static const struct register_whitelist {
1148         uint64_t offset;
1149         uint32_t size;
1150         /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
1151         uint32_t gen_bitmask;
1152 } whitelist[] = {
1153         { RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 9) },
1154 };
1155
1156 int i915_reg_read_ioctl(struct drm_device *dev,
1157                         void *data, struct drm_file *file)
1158 {
1159         struct drm_i915_private *dev_priv = dev->dev_private;
1160         struct drm_i915_reg_read *reg = data;
1161         struct register_whitelist const *entry = whitelist;
1162         int i, ret = 0;
1163
1164         for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
1165                 if (entry->offset == reg->offset &&
1166                     (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
1167                         break;
1168         }
1169
1170         if (i == ARRAY_SIZE(whitelist))
1171                 return -EINVAL;
1172
1173         intel_runtime_pm_get(dev_priv);
1174
1175         switch (entry->size) {
1176         case 8:
1177                 reg->val = I915_READ64(reg->offset);
1178                 break;
1179         case 4:
1180                 reg->val = I915_READ(reg->offset);
1181                 break;
1182         case 2:
1183                 reg->val = I915_READ16(reg->offset);
1184                 break;
1185         case 1:
1186                 reg->val = I915_READ8(reg->offset);
1187                 break;
1188         default:
1189                 MISSING_CASE(entry->size);
1190                 ret = -EINVAL;
1191                 goto out;
1192         }
1193
1194 out:
1195         intel_runtime_pm_put(dev_priv);
1196         return ret;
1197 }
1198
1199 int i915_get_reset_stats_ioctl(struct drm_device *dev,
1200                                void *data, struct drm_file *file)
1201 {
1202         struct drm_i915_private *dev_priv = dev->dev_private;
1203         struct drm_i915_reset_stats *args = data;
1204         struct i915_ctx_hang_stats *hs;
1205         struct intel_context *ctx;
1206         int ret;
1207
1208         if (args->flags || args->pad)
1209                 return -EINVAL;
1210
1211         if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
1212                 return -EPERM;
1213
1214         ret = mutex_lock_interruptible(&dev->struct_mutex);
1215         if (ret)
1216                 return ret;
1217
1218         ctx = i915_gem_context_get(file->driver_priv, args->ctx_id);
1219         if (IS_ERR(ctx)) {
1220                 mutex_unlock(&dev->struct_mutex);
1221                 return PTR_ERR(ctx);
1222         }
1223         hs = &ctx->hang_stats;
1224
1225         if (capable(CAP_SYS_ADMIN))
1226                 args->reset_count = i915_reset_count(&dev_priv->gpu_error);
1227         else
1228                 args->reset_count = 0;
1229
1230         args->batch_active = hs->batch_active;
1231         args->batch_pending = hs->batch_pending;
1232
1233         mutex_unlock(&dev->struct_mutex);
1234
1235         return 0;
1236 }
1237
1238 static int i915_reset_complete(struct drm_device *dev)
1239 {
1240         u8 gdrst;
1241         pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
1242         return (gdrst & GRDOM_RESET_STATUS) == 0;
1243 }
1244
1245 static int i915_do_reset(struct drm_device *dev)
1246 {
1247         /* assert reset for at least 20 usec */
1248         pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1249         udelay(20);
1250         pci_write_config_byte(dev->pdev, I915_GDRST, 0);
1251
1252         return wait_for(i915_reset_complete(dev), 500);
1253 }
1254
1255 static int g4x_reset_complete(struct drm_device *dev)
1256 {
1257         u8 gdrst;
1258         pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
1259         return (gdrst & GRDOM_RESET_ENABLE) == 0;
1260 }
1261
1262 static int g33_do_reset(struct drm_device *dev)
1263 {
1264         pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1265         return wait_for(g4x_reset_complete(dev), 500);
1266 }
1267
1268 static int g4x_do_reset(struct drm_device *dev)
1269 {
1270         struct drm_i915_private *dev_priv = dev->dev_private;
1271         int ret;
1272
1273         pci_write_config_byte(dev->pdev, I915_GDRST,
1274                               GRDOM_RENDER | GRDOM_RESET_ENABLE);
1275         ret =  wait_for(g4x_reset_complete(dev), 500);
1276         if (ret)
1277                 return ret;
1278
1279         /* WaVcpClkGateDisableForMediaReset:ctg,elk */
1280         I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
1281         POSTING_READ(VDECCLK_GATE_D);
1282
1283         pci_write_config_byte(dev->pdev, I915_GDRST,
1284                               GRDOM_MEDIA | GRDOM_RESET_ENABLE);
1285         ret =  wait_for(g4x_reset_complete(dev), 500);
1286         if (ret)
1287                 return ret;
1288
1289         /* WaVcpClkGateDisableForMediaReset:ctg,elk */
1290         I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
1291         POSTING_READ(VDECCLK_GATE_D);
1292
1293         pci_write_config_byte(dev->pdev, I915_GDRST, 0);
1294
1295         return 0;
1296 }
1297
1298 static int ironlake_do_reset(struct drm_device *dev)
1299 {
1300         struct drm_i915_private *dev_priv = dev->dev_private;
1301         int ret;
1302
1303         I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
1304                    ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
1305         ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) &
1306                         ILK_GRDOM_RESET_ENABLE) == 0, 500);
1307         if (ret)
1308                 return ret;
1309
1310         I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
1311                    ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
1312         ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) &
1313                         ILK_GRDOM_RESET_ENABLE) == 0, 500);
1314         if (ret)
1315                 return ret;
1316
1317         I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 0);
1318
1319         return 0;
1320 }
1321
1322 static int gen6_do_reset(struct drm_device *dev)
1323 {
1324         struct drm_i915_private *dev_priv = dev->dev_private;
1325         int     ret;
1326
1327         /* Reset the chip */
1328
1329         /* GEN6_GDRST is not in the gt power well, no need to check
1330          * for fifo space for the write or forcewake the chip for
1331          * the read
1332          */
1333         __raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL);
1334
1335         /* Spin waiting for the device to ack the reset request */
1336         ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
1337
1338         intel_uncore_forcewake_reset(dev, true);
1339
1340         return ret;
1341 }
1342
1343 int intel_gpu_reset(struct drm_device *dev)
1344 {
1345         if (INTEL_INFO(dev)->gen >= 6)
1346                 return gen6_do_reset(dev);
1347         else if (IS_GEN5(dev))
1348                 return ironlake_do_reset(dev);
1349         else if (IS_G4X(dev))
1350                 return g4x_do_reset(dev);
1351         else if (IS_G33(dev))
1352                 return g33_do_reset(dev);
1353         else if (INTEL_INFO(dev)->gen >= 3)
1354                 return i915_do_reset(dev);
1355         else
1356                 return -ENODEV;
1357 }
1358
1359 void intel_uncore_check_errors(struct drm_device *dev)
1360 {
1361         struct drm_i915_private *dev_priv = dev->dev_private;
1362
1363         if (HAS_FPGA_DBG_UNCLAIMED(dev) &&
1364             (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
1365                 DRM_ERROR("Unclaimed register before interrupt\n");
1366                 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
1367         }
1368 }