2 * Copyright © 2013 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "intel_drv.h"
26 #include "i915_vgpu.h"
28 #define FORCEWAKE_ACK_TIMEOUT_MS 2
30 #define __raw_i915_read8(dev_priv__, reg__) DRM_READ8(dev_priv__->mmio_map, reg__)
31 #define __raw_i915_write8(dev_priv__, reg__, val__) DRM_WRITE8(dev_priv__->mmio_map, reg__, val__)
33 #define __raw_i915_read16(dev_priv__, reg__) DRM_READ16(dev_priv__->mmio_map, reg__)
34 #define __raw_i915_write16(dev_priv__, reg__, val__) DRM_WRITE16(dev_priv__->mmio_map, reg__, val__)
36 #define __raw_i915_read32(dev_priv__, reg__) DRM_READ32(dev_priv__->mmio_map, reg__)
37 #define __raw_i915_write32(dev_priv__, reg__, val__) DRM_WRITE32(dev_priv__->mmio_map, reg__, val__)
39 #define __raw_i915_read64(dev_priv__, reg__) DRM_READ64(dev_priv__->mmio_map, reg__)
40 #define __raw_i915_write64(dev_priv__, reg__, val__) DRM_WRITE64(dev_priv__->mmio_map, reg__, val__)
42 #define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__)
44 static const char * const forcewake_domain_names[] = {
51 intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
53 BUILD_BUG_ON((sizeof(forcewake_domain_names)/sizeof(const char *)) !=
56 if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
57 return forcewake_domain_names[id];
65 assert_device_not_suspended(struct drm_i915_private *dev_priv)
67 WARN_ONCE(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
68 "Device suspended\n");
72 fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
74 WARN_ON(d->reg_set == 0);
75 __raw_i915_write32(d->i915, d->reg_set, d->val_reset);
79 fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
81 mod_timer_pinned(&d->timer, jiffies + 1);
85 fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
87 if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
88 FORCEWAKE_KERNEL) == 0,
89 FORCEWAKE_ACK_TIMEOUT_MS))
90 DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
91 intel_uncore_forcewake_domain_to_str(d->id));
95 fw_domain_get(const struct intel_uncore_forcewake_domain *d)
97 __raw_i915_write32(d->i915, d->reg_set, d->val_set);
101 fw_domain_wait_ack(const struct intel_uncore_forcewake_domain *d)
103 if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
105 FORCEWAKE_ACK_TIMEOUT_MS))
106 DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
107 intel_uncore_forcewake_domain_to_str(d->id));
111 fw_domain_put(const struct intel_uncore_forcewake_domain *d)
113 __raw_i915_write32(d->i915, d->reg_set, d->val_clear);
117 fw_domain_posting_read(const struct intel_uncore_forcewake_domain *d)
119 /* something from same cacheline, but not from the set register */
121 __raw_posting_read(d->i915, d->reg_post);
125 fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
127 struct intel_uncore_forcewake_domain *d;
128 enum forcewake_domain_id id;
130 for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
131 fw_domain_wait_ack_clear(d);
133 fw_domain_wait_ack(d);
138 fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
140 struct intel_uncore_forcewake_domain *d;
141 enum forcewake_domain_id id;
143 for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
145 fw_domain_posting_read(d);
150 fw_domains_posting_read(struct drm_i915_private *dev_priv)
152 struct intel_uncore_forcewake_domain *d;
153 enum forcewake_domain_id id;
155 /* No need to do for all, just do for first found */
156 for_each_fw_domain(d, dev_priv, id) {
157 fw_domain_posting_read(d);
163 fw_domains_reset(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
165 struct intel_uncore_forcewake_domain *d;
166 enum forcewake_domain_id id;
168 if (dev_priv->uncore.fw_domains == 0)
171 for_each_fw_domain_mask(d, fw_domains, dev_priv, id)
174 fw_domains_posting_read(dev_priv);
177 static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
179 /* w/a for a sporadic read returning 0 by waiting for the GT
182 if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
183 GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
184 DRM_ERROR("GT thread status wait timed out\n");
187 static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv,
188 enum forcewake_domains fw_domains)
190 fw_domains_get(dev_priv, fw_domains);
192 /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
193 __gen6_gt_wait_for_thread_c0(dev_priv);
196 static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
200 gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
201 if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg))
202 __raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg);
205 static void fw_domains_put_with_fifo(struct drm_i915_private *dev_priv,
206 enum forcewake_domains fw_domains)
208 fw_domains_put(dev_priv, fw_domains);
209 gen6_gt_check_fifodbg(dev_priv);
212 static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv)
214 u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL);
216 return count & GT_FIFO_FREE_ENTRIES_MASK;
219 static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
223 /* On VLV, FIFO will be shared by both SW and HW.
224 * So, we need to read the FREE_ENTRIES everytime */
225 if (IS_VALLEYVIEW(dev_priv->dev))
226 dev_priv->uncore.fifo_count = fifo_free_entries(dev_priv);
228 if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
230 u32 fifo = fifo_free_entries(dev_priv);
232 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
234 fifo = fifo_free_entries(dev_priv);
236 if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
238 dev_priv->uncore.fifo_count = fifo;
240 dev_priv->uncore.fifo_count--;
245 static void intel_uncore_fw_release_timer(unsigned long arg)
247 struct intel_uncore_forcewake_domain *domain = (void *)arg;
249 assert_device_not_suspended(domain->i915);
251 lockmgr(&domain->i915->uncore.lock, LK_EXCLUSIVE);
252 if (WARN_ON(domain->wake_count == 0))
253 domain->wake_count++;
255 if (--domain->wake_count == 0)
256 domain->i915->uncore.funcs.force_wake_put(domain->i915,
259 lockmgr(&domain->i915->uncore.lock, LK_RELEASE);
262 void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
264 struct drm_i915_private *dev_priv = dev->dev_private;
265 struct intel_uncore_forcewake_domain *domain;
266 int retry_count = 100;
267 enum forcewake_domain_id id;
268 enum forcewake_domains fw = 0, active_domains;
270 /* Hold uncore.lock across reset to prevent any register access
271 * with forcewake not set correctly. Wait until all pending
272 * timers are run before holding.
277 for_each_fw_domain(domain, dev_priv, id) {
278 if (del_timer_sync(&domain->timer) == 0)
281 intel_uncore_fw_release_timer((unsigned long)domain);
284 lockmgr(&dev_priv->uncore.lock, LK_EXCLUSIVE);
286 for_each_fw_domain(domain, dev_priv, id) {
287 if (timer_pending(&domain->timer))
288 active_domains |= (1 << id);
291 if (active_domains == 0)
294 if (--retry_count == 0) {
295 DRM_ERROR("Timed out waiting for forcewake timers to finish\n");
299 lockmgr(&dev_priv->uncore.lock, LK_RELEASE);
305 WARN_ON(active_domains);
307 for_each_fw_domain(domain, dev_priv, id)
308 if (domain->wake_count)
312 dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
314 fw_domains_reset(dev_priv, FORCEWAKE_ALL);
316 if (restore) { /* If reset with a user forcewake, try to restore */
318 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
320 if (IS_GEN6(dev) || IS_GEN7(dev))
321 dev_priv->uncore.fifo_count =
322 fifo_free_entries(dev_priv);
326 assert_forcewakes_inactive(dev_priv);
328 lockmgr(&dev_priv->uncore.lock, LK_RELEASE);
331 static void intel_uncore_ellc_detect(struct drm_device *dev)
333 struct drm_i915_private *dev_priv = dev->dev_private;
335 if ((IS_HASWELL(dev) || IS_BROADWELL(dev) ||
336 INTEL_INFO(dev)->gen >= 9) &&
337 (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) & EDRAM_ENABLED)) {
338 /* The docs do not explain exactly how the calculation can be
339 * made. It is somewhat guessable, but for now, it's always
341 * NB: We can't write IDICR yet because we do not have gt funcs
343 dev_priv->ellc_size = 128;
344 DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
348 static void __intel_uncore_early_sanitize(struct drm_device *dev,
349 bool restore_forcewake)
351 struct drm_i915_private *dev_priv = dev->dev_private;
353 if (HAS_FPGA_DBG_UNCLAIMED(dev))
354 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
356 /* clear out old GT FIFO errors */
357 if (IS_GEN6(dev) || IS_GEN7(dev))
358 __raw_i915_write32(dev_priv, GTFIFODBG,
359 __raw_i915_read32(dev_priv, GTFIFODBG));
361 /* WaDisableShadowRegForCpd:chv */
362 if (IS_CHERRYVIEW(dev)) {
363 __raw_i915_write32(dev_priv, GTFIFOCTL,
364 __raw_i915_read32(dev_priv, GTFIFOCTL) |
365 GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
366 GT_FIFO_CTL_RC6_POLICY_STALL);
369 intel_uncore_forcewake_reset(dev, restore_forcewake);
372 void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
374 __intel_uncore_early_sanitize(dev, restore_forcewake);
375 i915_check_and_clear_faults(dev);
378 void intel_uncore_sanitize(struct drm_device *dev)
380 /* BIOS often leaves RC6 enabled, but disable it for hw init */
381 intel_disable_gt_powersave(dev);
384 static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
385 enum forcewake_domains fw_domains)
387 struct intel_uncore_forcewake_domain *domain;
388 enum forcewake_domain_id id;
390 if (!dev_priv->uncore.funcs.force_wake_get)
393 fw_domains &= dev_priv->uncore.fw_domains;
395 for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
396 if (domain->wake_count++)
397 fw_domains &= ~(1 << id);
401 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
405 * intel_uncore_forcewake_get - grab forcewake domain references
406 * @dev_priv: i915 device instance
407 * @fw_domains: forcewake domains to get reference on
409 * This function can be used get GT's forcewake domain references.
410 * Normal register access will handle the forcewake domains automatically.
411 * However if some sequence requires the GT to not power down a particular
412 * forcewake domains this function should be called at the beginning of the
413 * sequence. And subsequently the reference should be dropped by symmetric
414 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
415 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
417 void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
418 enum forcewake_domains fw_domains)
421 if (!dev_priv->uncore.funcs.force_wake_get)
424 WARN_ON(dev_priv->pm.suspended);
426 lockmgr(&dev_priv->uncore.lock, LK_EXCLUSIVE);
427 __intel_uncore_forcewake_get(dev_priv, fw_domains);
428 lockmgr(&dev_priv->uncore.lock, LK_RELEASE);
432 * intel_uncore_forcewake_get__locked - grab forcewake domain references
433 * @dev_priv: i915 device instance
434 * @fw_domains: forcewake domains to get reference on
436 * See intel_uncore_forcewake_get(). This variant places the onus
437 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
439 void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
440 enum forcewake_domains fw_domains)
442 assert_spin_locked(&dev_priv->uncore.lock);
444 if (!dev_priv->uncore.funcs.force_wake_get)
447 __intel_uncore_forcewake_get(dev_priv, fw_domains);
450 static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
451 enum forcewake_domains fw_domains)
453 struct intel_uncore_forcewake_domain *domain;
454 enum forcewake_domain_id id;
456 if (!dev_priv->uncore.funcs.force_wake_put)
459 fw_domains &= dev_priv->uncore.fw_domains;
461 for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
462 if (WARN_ON(domain->wake_count == 0))
465 if (--domain->wake_count)
468 domain->wake_count++;
469 fw_domain_arm_timer(domain);
474 * intel_uncore_forcewake_put - release a forcewake domain reference
475 * @dev_priv: i915 device instance
476 * @fw_domains: forcewake domains to put references
478 * This function drops the device-level forcewakes for specified
479 * domains obtained by intel_uncore_forcewake_get().
481 void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
482 enum forcewake_domains fw_domains)
485 if (!dev_priv->uncore.funcs.force_wake_put)
488 lockmgr(&dev_priv->uncore.lock, LK_EXCLUSIVE);
489 __intel_uncore_forcewake_put(dev_priv, fw_domains);
490 lockmgr(&dev_priv->uncore.lock, LK_RELEASE);
494 * intel_uncore_forcewake_put__locked - grab forcewake domain references
495 * @dev_priv: i915 device instance
496 * @fw_domains: forcewake domains to get reference on
498 * See intel_uncore_forcewake_put(). This variant places the onus
499 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
501 void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
502 enum forcewake_domains fw_domains)
504 assert_spin_locked(&dev_priv->uncore.lock);
506 if (!dev_priv->uncore.funcs.force_wake_put)
509 __intel_uncore_forcewake_put(dev_priv, fw_domains);
512 void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
514 struct intel_uncore_forcewake_domain *domain;
515 enum forcewake_domain_id id;
517 if (!dev_priv->uncore.funcs.force_wake_get)
520 for_each_fw_domain(domain, dev_priv, id)
521 WARN_ON(domain->wake_count);
524 /* We give fast paths for the really cool registers */
525 #define NEEDS_FORCE_WAKE(dev_priv, reg) \
526 ((reg) < 0x40000 && (reg) != FORCEWAKE)
528 #define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end))
530 #define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
531 (REG_RANGE((reg), 0x2000, 0x4000) || \
532 REG_RANGE((reg), 0x5000, 0x8000) || \
533 REG_RANGE((reg), 0xB000, 0x12000) || \
534 REG_RANGE((reg), 0x2E000, 0x30000))
536 #define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg) \
537 (REG_RANGE((reg), 0x12000, 0x14000) || \
538 REG_RANGE((reg), 0x22000, 0x24000) || \
539 REG_RANGE((reg), 0x30000, 0x40000))
541 #define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \
542 (REG_RANGE((reg), 0x2000, 0x4000) || \
543 REG_RANGE((reg), 0x5200, 0x8000) || \
544 REG_RANGE((reg), 0x8300, 0x8500) || \
545 REG_RANGE((reg), 0xB000, 0xB480) || \
546 REG_RANGE((reg), 0xE000, 0xE800))
548 #define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \
549 (REG_RANGE((reg), 0x8800, 0x8900) || \
550 REG_RANGE((reg), 0xD000, 0xD800) || \
551 REG_RANGE((reg), 0x12000, 0x14000) || \
552 REG_RANGE((reg), 0x1A000, 0x1C000) || \
553 REG_RANGE((reg), 0x1E800, 0x1EA00) || \
554 REG_RANGE((reg), 0x30000, 0x38000))
556 #define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \
557 (REG_RANGE((reg), 0x4000, 0x5000) || \
558 REG_RANGE((reg), 0x8000, 0x8300) || \
559 REG_RANGE((reg), 0x8500, 0x8600) || \
560 REG_RANGE((reg), 0x9000, 0xB000) || \
561 REG_RANGE((reg), 0xF000, 0x10000))
563 #define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \
564 REG_RANGE((reg), 0xB00, 0x2000)
566 #define FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) \
567 (REG_RANGE((reg), 0x2000, 0x2700) || \
568 REG_RANGE((reg), 0x3000, 0x4000) || \
569 REG_RANGE((reg), 0x5200, 0x8000) || \
570 REG_RANGE((reg), 0x8140, 0x8160) || \
571 REG_RANGE((reg), 0x8300, 0x8500) || \
572 REG_RANGE((reg), 0x8C00, 0x8D00) || \
573 REG_RANGE((reg), 0xB000, 0xB480) || \
574 REG_RANGE((reg), 0xE000, 0xE900) || \
575 REG_RANGE((reg), 0x24400, 0x24800))
577 #define FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) \
578 (REG_RANGE((reg), 0x8130, 0x8140) || \
579 REG_RANGE((reg), 0x8800, 0x8A00) || \
580 REG_RANGE((reg), 0xD000, 0xD800) || \
581 REG_RANGE((reg), 0x12000, 0x14000) || \
582 REG_RANGE((reg), 0x1A000, 0x1EA00) || \
583 REG_RANGE((reg), 0x30000, 0x40000))
585 #define FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg) \
586 REG_RANGE((reg), 0x9400, 0x9800)
588 #define FORCEWAKE_GEN9_BLITTER_RANGE_OFFSET(reg) \
590 !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) && \
591 !FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) && \
592 !FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) && \
593 !FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg))
596 ilk_dummy_write(struct drm_i915_private *dev_priv)
598 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
599 * the chip from rc6 before touching it for real. MI_MODE is masked,
600 * hence harmless to write 0 into. */
601 __raw_i915_write32(dev_priv, MI_MODE, 0);
605 hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv, u32 reg, bool read,
608 const char *op = read ? "reading" : "writing to";
609 const char *when = before ? "before" : "after";
611 if (!i915.mmio_debug)
614 if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
615 WARN(1, "Unclaimed register detected %s %s register 0x%x\n",
617 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
618 i915.mmio_debug--; /* Only report the first N failures */
623 hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv)
625 static bool mmio_debug_once = true;
627 if (i915.mmio_debug || !mmio_debug_once)
630 if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
631 DRM_DEBUG("Unclaimed register detected, "
632 "enabling oneshot unclaimed register reporting. "
633 "Please use i915.mmio_debug=N for more information.\n");
634 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
635 i915.mmio_debug = mmio_debug_once--;
639 #define GEN2_READ_HEADER(x) \
641 assert_device_not_suspended(dev_priv);
643 #define GEN2_READ_FOOTER \
644 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
647 #define __gen2_read(x) \
649 gen2_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
650 GEN2_READ_HEADER(x); \
651 val = __raw_i915_read##x(dev_priv, reg); \
655 #define __gen5_read(x) \
657 gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
658 GEN2_READ_HEADER(x); \
659 ilk_dummy_write(dev_priv); \
660 val = __raw_i915_read##x(dev_priv, reg); \
676 #undef GEN2_READ_FOOTER
677 #undef GEN2_READ_HEADER
679 #define GEN6_READ_HEADER(x) \
681 assert_device_not_suspended(dev_priv); \
682 lockmgr(&dev_priv->uncore.lock, LK_EXCLUSIVE)
684 #define GEN6_READ_FOOTER \
685 lockmgr(&dev_priv->uncore.lock, LK_RELEASE); \
686 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
689 static inline void __force_wake_get(struct drm_i915_private *dev_priv,
690 enum forcewake_domains fw_domains)
692 struct intel_uncore_forcewake_domain *domain;
693 enum forcewake_domain_id id;
695 if (WARN_ON(!fw_domains))
698 /* Ideally GCC would be constant-fold and eliminate this loop */
699 for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
700 if (domain->wake_count) {
701 fw_domains &= ~(1 << id);
705 domain->wake_count++;
706 fw_domain_arm_timer(domain);
710 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
713 #define __vgpu_read(x) \
715 vgpu_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
716 GEN6_READ_HEADER(x); \
717 val = __raw_i915_read##x(dev_priv, reg); \
721 #define __gen6_read(x) \
723 gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
724 GEN6_READ_HEADER(x); \
725 hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
726 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) \
727 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
728 val = __raw_i915_read##x(dev_priv, reg); \
729 hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
733 #define __vlv_read(x) \
735 vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
736 GEN6_READ_HEADER(x); \
737 if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) \
738 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
739 else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) \
740 __force_wake_get(dev_priv, FORCEWAKE_MEDIA); \
741 val = __raw_i915_read##x(dev_priv, reg); \
745 #define __chv_read(x) \
747 chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
748 GEN6_READ_HEADER(x); \
749 if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) \
750 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
751 else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) \
752 __force_wake_get(dev_priv, FORCEWAKE_MEDIA); \
753 else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) \
754 __force_wake_get(dev_priv, \
755 FORCEWAKE_RENDER | FORCEWAKE_MEDIA); \
756 val = __raw_i915_read##x(dev_priv, reg); \
760 #define SKL_NEEDS_FORCE_WAKE(dev_priv, reg) \
761 ((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg))
763 #define __gen9_read(x) \
765 gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
766 enum forcewake_domains fw_engine; \
767 GEN6_READ_HEADER(x); \
768 if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg))) \
770 else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \
771 fw_engine = FORCEWAKE_RENDER; \
772 else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) \
773 fw_engine = FORCEWAKE_MEDIA; \
774 else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) \
775 fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
777 fw_engine = FORCEWAKE_BLITTER; \
779 __force_wake_get(dev_priv, fw_engine); \
780 val = __raw_i915_read##x(dev_priv, reg); \
810 #undef GEN6_READ_FOOTER
811 #undef GEN6_READ_HEADER
813 #define GEN2_WRITE_HEADER \
814 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
815 assert_device_not_suspended(dev_priv); \
817 #define GEN2_WRITE_FOOTER
819 #define __gen2_write(x) \
821 gen2_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
823 __raw_i915_write##x(dev_priv, reg, val); \
827 #define __gen5_write(x) \
829 gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
831 ilk_dummy_write(dev_priv); \
832 __raw_i915_write##x(dev_priv, reg, val); \
848 #undef GEN2_WRITE_FOOTER
849 #undef GEN2_WRITE_HEADER
851 #define GEN6_WRITE_HEADER \
852 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
853 assert_device_not_suspended(dev_priv); \
854 lockmgr(&dev_priv->uncore.lock, LK_EXCLUSIVE)
856 #define GEN6_WRITE_FOOTER \
857 lockmgr(&dev_priv->uncore.lock, LK_RELEASE)
859 #define __gen6_write(x) \
861 gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
862 u32 __fifo_ret = 0; \
864 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
865 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
867 __raw_i915_write##x(dev_priv, reg, val); \
868 if (unlikely(__fifo_ret)) { \
869 gen6_gt_check_fifodbg(dev_priv); \
874 #define __hsw_write(x) \
876 hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
877 u32 __fifo_ret = 0; \
879 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
880 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
882 hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
883 __raw_i915_write##x(dev_priv, reg, val); \
884 if (unlikely(__fifo_ret)) { \
885 gen6_gt_check_fifodbg(dev_priv); \
887 hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
888 hsw_unclaimed_reg_detect(dev_priv); \
892 #define __vgpu_write(x) \
893 static void vgpu_write##x(struct drm_i915_private *dev_priv, \
894 off_t reg, u##x val, bool trace) { \
896 __raw_i915_write##x(dev_priv, reg, val); \
900 static const u32 gen8_shadowed_regs[] = {
904 RING_TAIL(RENDER_RING_BASE),
905 RING_TAIL(GEN6_BSD_RING_BASE),
906 RING_TAIL(VEBOX_RING_BASE),
907 RING_TAIL(BLT_RING_BASE),
908 /* TODO: Other registers are not yet used */
911 static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg)
914 for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++)
915 if (reg == gen8_shadowed_regs[i])
921 #define __gen8_write(x) \
923 gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
925 hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
926 if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) \
927 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
928 __raw_i915_write##x(dev_priv, reg, val); \
929 hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
930 hsw_unclaimed_reg_detect(dev_priv); \
934 #define __chv_write(x) \
936 chv_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
937 bool shadowed = is_gen8_shadowed(dev_priv, reg); \
940 if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) \
941 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
942 else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) \
943 __force_wake_get(dev_priv, FORCEWAKE_MEDIA); \
944 else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) \
945 __force_wake_get(dev_priv, FORCEWAKE_RENDER | FORCEWAKE_MEDIA); \
947 __raw_i915_write##x(dev_priv, reg, val); \
951 static const u32 gen9_shadowed_regs[] = {
952 RING_TAIL(RENDER_RING_BASE),
953 RING_TAIL(GEN6_BSD_RING_BASE),
954 RING_TAIL(VEBOX_RING_BASE),
955 RING_TAIL(BLT_RING_BASE),
956 FORCEWAKE_BLITTER_GEN9,
957 FORCEWAKE_RENDER_GEN9,
958 FORCEWAKE_MEDIA_GEN9,
961 /* TODO: Other registers are not yet used */
964 static bool is_gen9_shadowed(struct drm_i915_private *dev_priv, u32 reg)
967 for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++)
968 if (reg == gen9_shadowed_regs[i])
974 #define __gen9_write(x) \
976 gen9_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, \
978 enum forcewake_domains fw_engine; \
980 if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg)) || \
981 is_gen9_shadowed(dev_priv, reg)) \
983 else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \
984 fw_engine = FORCEWAKE_RENDER; \
985 else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) \
986 fw_engine = FORCEWAKE_MEDIA; \
987 else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) \
988 fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
990 fw_engine = FORCEWAKE_BLITTER; \
992 __force_wake_get(dev_priv, fw_engine); \
993 __raw_i915_write##x(dev_priv, reg, val); \
1028 #undef GEN6_WRITE_FOOTER
1029 #undef GEN6_WRITE_HEADER
1031 #define ASSIGN_WRITE_MMIO_VFUNCS(x) \
1033 dev_priv->uncore.funcs.mmio_writeb = x##_write8; \
1034 dev_priv->uncore.funcs.mmio_writew = x##_write16; \
1035 dev_priv->uncore.funcs.mmio_writel = x##_write32; \
1036 dev_priv->uncore.funcs.mmio_writeq = x##_write64; \
1039 #define ASSIGN_READ_MMIO_VFUNCS(x) \
1041 dev_priv->uncore.funcs.mmio_readb = x##_read8; \
1042 dev_priv->uncore.funcs.mmio_readw = x##_read16; \
1043 dev_priv->uncore.funcs.mmio_readl = x##_read32; \
1044 dev_priv->uncore.funcs.mmio_readq = x##_read64; \
1048 static void fw_domain_init(struct drm_i915_private *dev_priv,
1049 enum forcewake_domain_id domain_id,
1050 u32 reg_set, u32 reg_ack)
1052 struct intel_uncore_forcewake_domain *d;
1054 if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
1057 d = &dev_priv->uncore.fw_domain[domain_id];
1059 WARN_ON(d->wake_count);
1062 d->reg_set = reg_set;
1063 d->reg_ack = reg_ack;
1065 if (IS_GEN6(dev_priv)) {
1067 d->val_set = FORCEWAKE_KERNEL;
1070 /* WaRsClearFWBitsAtReset:bdw,skl */
1071 d->val_reset = _MASKED_BIT_DISABLE(0xffff);
1072 d->val_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
1073 d->val_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
1076 if (IS_VALLEYVIEW(dev_priv))
1077 d->reg_post = FORCEWAKE_ACK_VLV;
1078 else if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv) || IS_GEN8(dev_priv))
1079 d->reg_post = ECOBUS;
1086 setup_timer(&d->timer, intel_uncore_fw_release_timer, (unsigned long)d);
1088 dev_priv->uncore.fw_domains |= (1 << domain_id);
1093 static void intel_uncore_fw_domains_init(struct drm_device *dev)
1095 struct drm_i915_private *dev_priv = dev->dev_private;
1097 if (INTEL_INFO(dev_priv->dev)->gen <= 5)
1101 dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1102 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1103 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1104 FORCEWAKE_RENDER_GEN9,
1105 FORCEWAKE_ACK_RENDER_GEN9);
1106 fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
1107 FORCEWAKE_BLITTER_GEN9,
1108 FORCEWAKE_ACK_BLITTER_GEN9);
1109 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1110 FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
1111 } else if (IS_VALLEYVIEW(dev)) {
1112 dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1113 if (!IS_CHERRYVIEW(dev))
1114 dev_priv->uncore.funcs.force_wake_put =
1115 fw_domains_put_with_fifo;
1117 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1118 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1119 FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
1120 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1121 FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
1122 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
1123 dev_priv->uncore.funcs.force_wake_get =
1124 fw_domains_get_with_thread_status;
1125 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1126 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1127 FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
1128 } else if (IS_IVYBRIDGE(dev)) {
1131 /* IVB configs may use multi-threaded forcewake */
1133 /* A small trick here - if the bios hasn't configured
1134 * MT forcewake, and if the device is in RC6, then
1135 * force_wake_mt_get will not wake the device and the
1136 * ECOBUS read will return zero. Which will be
1137 * (correctly) interpreted by the test below as MT
1138 * forcewake being disabled.
1140 dev_priv->uncore.funcs.force_wake_get =
1141 fw_domains_get_with_thread_status;
1142 dev_priv->uncore.funcs.force_wake_put =
1143 fw_domains_put_with_fifo;
1145 /* We need to init first for ECOBUS access and then
1146 * determine later if we want to reinit, in case of MT access is
1147 * not working. In this stage we don't know which flavour this
1148 * ivb is, so it is better to reset also the gen6 fw registers
1149 * before the ecobus check.
1152 __raw_i915_write32(dev_priv, FORCEWAKE, 0);
1153 __raw_posting_read(dev_priv, ECOBUS);
1155 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1156 FORCEWAKE_MT, FORCEWAKE_MT_ACK);
1158 mutex_lock(&dev->struct_mutex);
1159 fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL);
1160 ecobus = __raw_i915_read32(dev_priv, ECOBUS);
1161 fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL);
1162 mutex_unlock(&dev->struct_mutex);
1164 if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
1165 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
1166 DRM_INFO("when using vblank-synced partial screen updates.\n");
1167 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1168 FORCEWAKE, FORCEWAKE_ACK);
1170 } else if (IS_GEN6(dev)) {
1171 dev_priv->uncore.funcs.force_wake_get =
1172 fw_domains_get_with_thread_status;
1173 dev_priv->uncore.funcs.force_wake_put =
1174 fw_domains_put_with_fifo;
1175 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1176 FORCEWAKE, FORCEWAKE_ACK);
1179 /* All future platforms are expected to require complex power gating */
1180 WARN_ON(dev_priv->uncore.fw_domains == 0);
1183 void intel_uncore_init(struct drm_device *dev)
1185 struct drm_i915_private *dev_priv = dev->dev_private;
1187 i915_check_vgpu(dev);
1189 intel_uncore_ellc_detect(dev);
1190 intel_uncore_fw_domains_init(dev);
1191 __intel_uncore_early_sanitize(dev, false);
1193 switch (INTEL_INFO(dev)->gen) {
1195 MISSING_CASE(INTEL_INFO(dev)->gen);
1198 ASSIGN_WRITE_MMIO_VFUNCS(gen9);
1199 ASSIGN_READ_MMIO_VFUNCS(gen9);
1202 if (IS_CHERRYVIEW(dev)) {
1203 ASSIGN_WRITE_MMIO_VFUNCS(chv);
1204 ASSIGN_READ_MMIO_VFUNCS(chv);
1207 ASSIGN_WRITE_MMIO_VFUNCS(gen8);
1208 ASSIGN_READ_MMIO_VFUNCS(gen6);
1213 if (IS_HASWELL(dev)) {
1214 ASSIGN_WRITE_MMIO_VFUNCS(hsw);
1216 ASSIGN_WRITE_MMIO_VFUNCS(gen6);
1219 if (IS_VALLEYVIEW(dev)) {
1220 ASSIGN_READ_MMIO_VFUNCS(vlv);
1222 ASSIGN_READ_MMIO_VFUNCS(gen6);
1226 ASSIGN_WRITE_MMIO_VFUNCS(gen5);
1227 ASSIGN_READ_MMIO_VFUNCS(gen5);
1232 ASSIGN_WRITE_MMIO_VFUNCS(gen2);
1233 ASSIGN_READ_MMIO_VFUNCS(gen2);
1237 if (intel_vgpu_active(dev)) {
1238 ASSIGN_WRITE_MMIO_VFUNCS(vgpu);
1239 ASSIGN_READ_MMIO_VFUNCS(vgpu);
1242 i915_check_and_clear_faults(dev);
1244 #undef ASSIGN_WRITE_MMIO_VFUNCS
1245 #undef ASSIGN_READ_MMIO_VFUNCS
1247 void intel_uncore_fini(struct drm_device *dev)
1249 /* Paranoia: make sure we have disabled everything before we exit. */
1250 intel_uncore_sanitize(dev);
1251 intel_uncore_forcewake_reset(dev, false);
1254 #define GEN_RANGE(l, h) GENMASK(h, l)
1256 static const struct register_whitelist {
1259 /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
1260 uint32_t gen_bitmask;
1262 { RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 9) },
1265 int i915_reg_read_ioctl(struct drm_device *dev,
1266 void *data, struct drm_file *file)
1268 struct drm_i915_private *dev_priv = dev->dev_private;
1269 struct drm_i915_reg_read *reg = data;
1270 struct register_whitelist const *entry = whitelist;
1275 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
1276 if (entry->offset == (reg->offset & -entry->size) &&
1277 (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
1281 if (i == ARRAY_SIZE(whitelist))
1284 /* We use the low bits to encode extra flags as the register should
1285 * be naturally aligned (and those that are not so aligned merely
1286 * limit the available flags for that register).
1288 offset = entry->offset;
1290 size |= reg->offset ^ offset;
1292 intel_runtime_pm_get(dev_priv);
1296 reg->val = I915_READ64_2x32(offset, offset+4);
1299 reg->val = I915_READ64(offset);
1302 reg->val = I915_READ(offset);
1305 reg->val = I915_READ16(offset);
1308 reg->val = I915_READ8(offset);
1316 intel_runtime_pm_put(dev_priv);
1320 int i915_get_reset_stats_ioctl(struct drm_device *dev,
1321 void *data, struct drm_file *file)
1323 struct drm_i915_private *dev_priv = dev->dev_private;
1324 struct drm_i915_reset_stats *args = data;
1325 struct i915_ctx_hang_stats *hs;
1326 struct intel_context *ctx;
1329 if (args->flags || args->pad)
1332 if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
1335 ret = mutex_lock_interruptible(&dev->struct_mutex);
1339 ctx = i915_gem_context_get(file->driver_priv, args->ctx_id);
1341 mutex_unlock(&dev->struct_mutex);
1342 return PTR_ERR(ctx);
1344 hs = &ctx->hang_stats;
1346 if (capable(CAP_SYS_ADMIN))
1347 args->reset_count = i915_reset_count(&dev_priv->gpu_error);
1349 args->reset_count = 0;
1351 args->batch_active = hs->batch_active;
1352 args->batch_pending = hs->batch_pending;
1354 mutex_unlock(&dev->struct_mutex);
1359 static int i915_reset_complete(struct drm_device *dev)
1362 pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
1363 return (gdrst & GRDOM_RESET_STATUS) == 0;
1366 static int i915_do_reset(struct drm_device *dev)
1368 /* assert reset for at least 20 usec */
1369 pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1371 pci_write_config_byte(dev->pdev, I915_GDRST, 0);
1373 return wait_for(i915_reset_complete(dev), 500);
1376 static int g4x_reset_complete(struct drm_device *dev)
1379 pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
1380 return (gdrst & GRDOM_RESET_ENABLE) == 0;
1383 static int g33_do_reset(struct drm_device *dev)
1385 pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1386 return wait_for(g4x_reset_complete(dev), 500);
1389 static int g4x_do_reset(struct drm_device *dev)
1391 struct drm_i915_private *dev_priv = dev->dev_private;
1394 pci_write_config_byte(dev->pdev, I915_GDRST,
1395 GRDOM_RENDER | GRDOM_RESET_ENABLE);
1396 ret = wait_for(g4x_reset_complete(dev), 500);
1400 /* WaVcpClkGateDisableForMediaReset:ctg,elk */
1401 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
1402 POSTING_READ(VDECCLK_GATE_D);
1404 pci_write_config_byte(dev->pdev, I915_GDRST,
1405 GRDOM_MEDIA | GRDOM_RESET_ENABLE);
1406 ret = wait_for(g4x_reset_complete(dev), 500);
1410 /* WaVcpClkGateDisableForMediaReset:ctg,elk */
1411 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
1412 POSTING_READ(VDECCLK_GATE_D);
1414 pci_write_config_byte(dev->pdev, I915_GDRST, 0);
1419 static int ironlake_do_reset(struct drm_device *dev)
1421 struct drm_i915_private *dev_priv = dev->dev_private;
1424 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
1425 ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
1426 ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) &
1427 ILK_GRDOM_RESET_ENABLE) == 0, 500);
1431 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
1432 ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
1433 ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) &
1434 ILK_GRDOM_RESET_ENABLE) == 0, 500);
1438 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 0);
1443 static int gen6_do_reset(struct drm_device *dev)
1445 struct drm_i915_private *dev_priv = dev->dev_private;
1448 /* Reset the chip */
1450 /* GEN6_GDRST is not in the gt power well, no need to check
1451 * for fifo space for the write or forcewake the chip for
1454 __raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL);
1456 /* Spin waiting for the device to ack the reset request */
1457 ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
1459 intel_uncore_forcewake_reset(dev, true);
1464 int intel_gpu_reset(struct drm_device *dev)
1466 if (INTEL_INFO(dev)->gen >= 6)
1467 return gen6_do_reset(dev);
1468 else if (IS_GEN5(dev))
1469 return ironlake_do_reset(dev);
1470 else if (IS_G4X(dev))
1471 return g4x_do_reset(dev);
1472 else if (IS_G33(dev))
1473 return g33_do_reset(dev);
1474 else if (INTEL_INFO(dev)->gen >= 3)
1475 return i915_do_reset(dev);
1480 void intel_uncore_check_errors(struct drm_device *dev)
1482 struct drm_i915_private *dev_priv = dev->dev_private;
1484 if (HAS_FPGA_DBG_UNCLAIMED(dev) &&
1485 (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
1486 DRM_ERROR("Unclaimed register before interrupt\n");
1487 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);