1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 * $FreeBSD: src/sys/dev/drm2/i915/i915_irq.c,v 1.1 2012/05/22 11:07:44 kib Exp $
30 #include <sys/sfbuf.h>
33 #include <drm/i915_drm.h>
35 #include "intel_drv.h"
37 static void i915_capture_error_state(struct drm_device *dev);
38 static u32 ring_last_seqno(struct intel_ring_buffer *ring);
41 * Interrupts that are always left unmasked.
43 * Since pipe events are edge-triggered from the PIPESTAT register to IIR,
44 * we leave them always unmasked in IMR and then control enabling them through
47 #define I915_INTERRUPT_ENABLE_FIX \
48 (I915_ASLE_INTERRUPT | \
49 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
50 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | \
51 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | \
52 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | \
53 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
55 /** Interrupts that we mask and unmask at runtime. */
56 #define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT | I915_BSD_USER_INTERRUPT)
58 #define I915_PIPE_VBLANK_STATUS (PIPE_START_VBLANK_INTERRUPT_STATUS |\
59 PIPE_VBLANK_INTERRUPT_STATUS)
61 #define I915_PIPE_VBLANK_ENABLE (PIPE_START_VBLANK_INTERRUPT_ENABLE |\
62 PIPE_VBLANK_INTERRUPT_ENABLE)
64 #define DRM_I915_VBLANK_PIPE_ALL (DRM_I915_VBLANK_PIPE_A | \
65 DRM_I915_VBLANK_PIPE_B)
67 /* For display hotplug interrupt */
69 ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
71 if ((dev_priv->irq_mask & mask) != 0) {
72 dev_priv->irq_mask &= ~mask;
73 I915_WRITE(DEIMR, dev_priv->irq_mask);
79 ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
81 if ((dev_priv->irq_mask & mask) != mask) {
82 dev_priv->irq_mask |= mask;
83 I915_WRITE(DEIMR, dev_priv->irq_mask);
89 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
91 if ((dev_priv->pipestat[pipe] & mask) != mask) {
92 u32 reg = PIPESTAT(pipe);
94 dev_priv->pipestat[pipe] |= mask;
95 /* Enable the interrupt, clear any pending status */
96 I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
102 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
104 if ((dev_priv->pipestat[pipe] & mask) != 0) {
105 u32 reg = PIPESTAT(pipe);
107 dev_priv->pipestat[pipe] &= ~mask;
108 I915_WRITE(reg, dev_priv->pipestat[pipe]);
114 * intel_enable_asle - enable ASLE interrupt for OpRegion
116 void intel_enable_asle(struct drm_device *dev)
118 drm_i915_private_t *dev_priv = dev->dev_private;
120 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
122 if (HAS_PCH_SPLIT(dev))
123 ironlake_enable_display_irq(dev_priv, DE_GSE);
125 i915_enable_pipestat(dev_priv, 1,
126 PIPE_LEGACY_BLC_EVENT_ENABLE);
127 if (INTEL_INFO(dev)->gen >= 4)
128 i915_enable_pipestat(dev_priv, 0,
129 PIPE_LEGACY_BLC_EVENT_ENABLE);
132 lockmgr(&dev_priv->irq_lock, LK_RELEASE);
136 * i915_pipe_enabled - check if a pipe is enabled
138 * @pipe: pipe to check
140 * Reading certain registers when the pipe is disabled can hang the chip.
141 * Use this routine to make sure the PLL is running and the pipe is active
142 * before reading such registers if unsure.
145 i915_pipe_enabled(struct drm_device *dev, int pipe)
147 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
148 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
151 /* Called from drm generic code, passed a 'crtc', which
152 * we use as a pipe index
155 i915_get_vblank_counter(struct drm_device *dev, int pipe)
157 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
158 unsigned long high_frame;
159 unsigned long low_frame;
160 u32 high1, high2, low;
162 if (!i915_pipe_enabled(dev, pipe)) {
163 DRM_DEBUG("trying to get vblank count for disabled "
164 "pipe %c\n", pipe_name(pipe));
168 high_frame = PIPEFRAME(pipe);
169 low_frame = PIPEFRAMEPIXEL(pipe);
172 * High & low register fields aren't synchronized, so make sure
173 * we get a low value that's stable across two reads of the high
177 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
178 low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK;
179 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
180 } while (high1 != high2);
182 high1 >>= PIPE_FRAME_HIGH_SHIFT;
183 low >>= PIPE_FRAME_LOW_SHIFT;
184 return (high1 << 8) | low;
188 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
190 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
191 int reg = PIPE_FRMCOUNT_GM45(pipe);
193 if (!i915_pipe_enabled(dev, pipe)) {
194 DRM_DEBUG("i915: trying to get vblank count for disabled "
195 "pipe %c\n", pipe_name(pipe));
199 return I915_READ(reg);
203 i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
204 int *vpos, int *hpos)
206 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
207 u32 vbl = 0, position = 0;
208 int vbl_start, vbl_end, htotal, vtotal;
212 if (!i915_pipe_enabled(dev, pipe)) {
213 DRM_DEBUG("i915: trying to get scanoutpos for disabled "
214 "pipe %c\n", pipe_name(pipe));
219 vtotal = 1 + ((I915_READ(VTOTAL(pipe)) >> 16) & 0x1fff);
221 if (INTEL_INFO(dev)->gen >= 4) {
222 /* No obvious pixelcount register. Only query vertical
223 * scanout position from Display scan line register.
225 position = I915_READ(PIPEDSL(pipe));
227 /* Decode into vertical scanout position. Don't have
228 * horizontal scanout position.
230 *vpos = position & 0x1fff;
233 /* Have access to pixelcount since start of frame.
234 * We can split this into vertical and horizontal
237 position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
239 htotal = 1 + ((I915_READ(HTOTAL(pipe)) >> 16) & 0x1fff);
240 *vpos = position / htotal;
241 *hpos = position - (*vpos * htotal);
244 /* Query vblank area. */
245 vbl = I915_READ(VBLANK(pipe));
247 /* Test position against vblank region. */
248 vbl_start = vbl & 0x1fff;
249 vbl_end = (vbl >> 16) & 0x1fff;
251 if ((*vpos < vbl_start) || (*vpos > vbl_end))
254 /* Inside "upper part" of vblank area? Apply corrective offset: */
255 if (in_vbl && (*vpos >= vbl_start))
256 *vpos = *vpos - vtotal;
258 /* Readouts valid? */
260 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
264 ret |= DRM_SCANOUTPOS_INVBL;
270 i915_get_vblank_timestamp(struct drm_device *dev, int pipe, int *max_error,
271 struct timeval *vblank_time, unsigned flags)
273 struct drm_i915_private *dev_priv = dev->dev_private;
274 struct drm_crtc *crtc;
276 if (pipe < 0 || pipe >= dev_priv->num_pipe) {
277 DRM_ERROR("Invalid crtc %d\n", pipe);
281 /* Get drm_crtc to timestamp: */
282 crtc = intel_get_crtc_for_pipe(dev, pipe);
284 DRM_ERROR("Invalid crtc %d\n", pipe);
288 if (!crtc->enabled) {
290 DRM_DEBUG("crtc %d is disabled\n", pipe);
295 /* Helper routine in DRM core does all the work: */
296 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
302 * Handle hotplug events outside the interrupt handler proper.
304 static void i915_hotplug_work_func(struct work_struct *work)
306 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
308 struct drm_device *dev = dev_priv->dev;
309 struct drm_mode_config *mode_config = &dev->mode_config;
310 struct intel_encoder *encoder;
312 lockmgr(&mode_config->mutex, LK_EXCLUSIVE);
313 DRM_DEBUG_KMS("running encoder hotplug functions\n");
315 list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
316 if (encoder->hot_plug)
317 encoder->hot_plug(encoder);
319 lockmgr(&mode_config->mutex, LK_RELEASE);
321 /* Just fire off a uevent and let userspace tell us what to do */
323 drm_helper_hpd_irq_event(dev);
327 static void ironlake_handle_rps_change(struct drm_device *dev)
329 drm_i915_private_t *dev_priv = dev->dev_private;
330 u32 busy_up, busy_down, max_avg, min_avg;
333 lockmgr(&mchdev_lock, LK_EXCLUSIVE);
335 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
337 new_delay = dev_priv->rps.cur_delay;
339 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
340 busy_up = I915_READ(RCPREVBSYTUPAVG);
341 busy_down = I915_READ(RCPREVBSYTDNAVG);
342 max_avg = I915_READ(RCBMAXAVG);
343 min_avg = I915_READ(RCBMINAVG);
345 /* Handle RCS change request from hw */
346 if (busy_up > max_avg) {
347 if (dev_priv->rps.cur_delay != dev_priv->rps.max_delay)
348 new_delay = dev_priv->rps.cur_delay - 1;
349 if (new_delay < dev_priv->rps.max_delay)
350 new_delay = dev_priv->rps.max_delay;
351 } else if (busy_down < min_avg) {
352 if (dev_priv->rps.cur_delay != dev_priv->rps.min_delay)
353 new_delay = dev_priv->rps.cur_delay + 1;
354 if (new_delay > dev_priv->rps.min_delay)
355 new_delay = dev_priv->rps.min_delay;
358 if (ironlake_set_drps(dev, new_delay))
359 dev_priv->rps.cur_delay = new_delay;
361 lockmgr(&mchdev_lock, LK_RELEASE);
366 static void notify_ring(struct drm_device *dev,
367 struct intel_ring_buffer *ring)
369 struct drm_i915_private *dev_priv = dev->dev_private;
372 if (ring->obj == NULL)
375 seqno = ring->get_seqno(ring);
377 lockmgr(&ring->irq_lock, LK_EXCLUSIVE);
378 ring->irq_seqno = seqno;
380 lockmgr(&ring->irq_lock, LK_RELEASE);
382 if (i915_enable_hangcheck) {
383 dev_priv->hangcheck_count = 0;
384 mod_timer(&dev_priv->hangcheck_timer,
385 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
389 static void gen6_pm_rps_work(struct work_struct *work)
391 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
396 spin_lock(&dev_priv->rps.lock);
397 pm_iir = dev_priv->rps.pm_iir;
398 dev_priv->rps.pm_iir = 0;
399 pm_imr = I915_READ(GEN6_PMIMR);
400 I915_WRITE(GEN6_PMIMR, 0);
401 spin_unlock(&dev_priv->rps.lock);
403 if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
406 lockmgr(&dev_priv->rps.hw_lock, LK_EXCLUSIVE);
408 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
409 new_delay = dev_priv->rps.cur_delay + 1;
411 new_delay = dev_priv->rps.cur_delay - 1;
413 /* sysfs frequency interfaces may have snuck in while servicing the
416 if (!(new_delay > dev_priv->rps.max_delay ||
417 new_delay < dev_priv->rps.min_delay)) {
418 gen6_set_rps(dev_priv->dev, new_delay);
421 lockmgr(&dev_priv->rps.hw_lock, LK_RELEASE);
424 static void snb_gt_irq_handler(struct drm_device *dev,
425 struct drm_i915_private *dev_priv,
429 if (gt_iir & (GEN6_RENDER_USER_INTERRUPT |
430 GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT))
431 notify_ring(dev, &dev_priv->ring[RCS]);
432 if (gt_iir & GEN6_BSD_USER_INTERRUPT)
433 notify_ring(dev, &dev_priv->ring[VCS]);
434 if (gt_iir & GEN6_BLITTER_USER_INTERRUPT)
435 notify_ring(dev, &dev_priv->ring[BCS]);
437 if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT |
438 GT_GEN6_BSD_CS_ERROR_INTERRUPT |
439 GT_RENDER_CS_ERROR_INTERRUPT)) {
440 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
441 i915_handle_error(dev, false);
445 if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT)
446 ivybridge_handle_parity_error(dev);
450 static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
455 * IIR bits should never already be set because IMR should
456 * prevent an interrupt from being shown in IIR. The warning
457 * displays a case where we've unsafely cleared
458 * dev_priv->rps.pm_iir. Although missing an interrupt of the same
459 * type is not a problem, it displays a problem in the logic.
461 * The mask bit in IMR is cleared by dev_priv->rps.work.
464 spin_lock(&dev_priv->rps.lock);
465 dev_priv->rps.pm_iir |= pm_iir;
466 I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
467 POSTING_READ(GEN6_PMIMR);
468 spin_unlock(&dev_priv->rps.lock);
470 queue_work(dev_priv->wq, &dev_priv->rps.work);
473 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
475 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
478 if (pch_iir & SDE_HOTPLUG_MASK)
479 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
481 if (pch_iir & SDE_AUDIO_POWER_MASK)
482 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
483 (pch_iir & SDE_AUDIO_POWER_MASK) >>
484 SDE_AUDIO_POWER_SHIFT);
486 if (pch_iir & SDE_GMBUS)
487 DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
489 if (pch_iir & SDE_AUDIO_HDCP_MASK)
490 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
492 if (pch_iir & SDE_AUDIO_TRANS_MASK)
493 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
495 if (pch_iir & SDE_POISON)
496 DRM_ERROR("PCH poison interrupt\n");
498 if (pch_iir & SDE_FDI_MASK)
500 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
502 I915_READ(FDI_RX_IIR(pipe)));
504 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
505 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
507 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
508 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
510 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
511 DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n");
512 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
513 DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
516 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
518 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
521 if (pch_iir & SDE_HOTPLUG_MASK_CPT)
522 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
524 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
525 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
526 (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
527 SDE_AUDIO_POWER_SHIFT_CPT);
529 if (pch_iir & SDE_AUX_MASK_CPT)
530 DRM_DEBUG_DRIVER("AUX channel interrupt\n");
532 if (pch_iir & SDE_GMBUS_CPT)
533 DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
535 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
536 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
538 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
539 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
541 if (pch_iir & SDE_FDI_MASK_CPT)
543 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
545 I915_READ(FDI_RX_IIR(pipe)));
549 ivybridge_irq_handler(void *arg)
551 struct drm_device *dev = (struct drm_device *) arg;
552 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
553 u32 de_iir, gt_iir, de_ier, pm_iir;
556 atomic_inc(&dev_priv->irq_received);
558 /* disable master interrupt before clearing iir */
559 de_ier = I915_READ(DEIER);
560 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
562 gt_iir = I915_READ(GTIIR);
564 snb_gt_irq_handler(dev, dev_priv, gt_iir);
565 I915_WRITE(GTIIR, gt_iir);
568 de_iir = I915_READ(DEIER);
571 if (de_iir & DE_GSE_IVB)
572 intel_opregion_gse_intr(dev);
575 for (i = 0; i < 3; i++) {
576 if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
577 drm_handle_vblank(dev, i);
578 if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
579 intel_prepare_page_flip(dev, i);
580 intel_finish_page_flip_plane(dev, i);
584 /* check event from PCH */
585 if (de_iir & DE_PCH_EVENT_IVB) {
586 u32 pch_iir = I915_READ(SDEIIR);
588 cpt_irq_handler(dev, pch_iir);
590 /* clear PCH hotplug event before clear CPU irq */
591 I915_WRITE(SDEIIR, pch_iir);
594 I915_WRITE(DEIIR, de_iir);
597 pm_iir = I915_READ(GEN6_PMIIR);
599 if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
600 gen6_queue_rps_work(dev_priv, pm_iir);
601 I915_WRITE(GEN6_PMIIR, pm_iir);
604 I915_WRITE(DEIER, de_ier);
608 static void ilk_gt_irq_handler(struct drm_device *dev,
609 struct drm_i915_private *dev_priv,
612 if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
613 notify_ring(dev, &dev_priv->ring[RCS]);
614 if (gt_iir & GT_BSD_USER_INTERRUPT)
615 notify_ring(dev, &dev_priv->ring[VCS]);
619 ironlake_irq_handler(void *arg)
621 struct drm_device *dev = arg;
622 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
623 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
625 atomic_inc(&dev_priv->irq_received);
627 /* disable master interrupt before clearing iir */
628 de_ier = I915_READ(DEIER);
629 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
632 de_iir = I915_READ(DEIIR);
633 gt_iir = I915_READ(GTIIR);
634 pch_iir = I915_READ(SDEIIR);
635 pm_iir = I915_READ(GEN6_PMIIR);
637 if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 &&
638 (!IS_GEN6(dev) || pm_iir == 0))
642 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
644 snb_gt_irq_handler(dev, dev_priv, gt_iir);
646 if (gt_iir & GT_GEN6_BLT_USER_INTERRUPT)
647 notify_ring(dev, &dev_priv->ring[BCS]);
649 if (de_iir & DE_GSE) {
653 intel_opregion_gse_intr(dev);
657 if (de_iir & DE_PIPEA_VBLANK)
658 drm_handle_vblank(dev, 0);
660 if (de_iir & DE_PIPEB_VBLANK)
661 drm_handle_vblank(dev, 1);
663 if (de_iir & DE_PLANEA_FLIP_DONE) {
664 intel_prepare_page_flip(dev, 0);
665 intel_finish_page_flip_plane(dev, 0);
668 if (de_iir & DE_PLANEB_FLIP_DONE) {
669 intel_prepare_page_flip(dev, 1);
670 intel_finish_page_flip_plane(dev, 1);
673 /* check event from PCH */
674 if (de_iir & DE_PCH_EVENT) {
675 if (HAS_PCH_CPT(dev))
676 cpt_irq_handler(dev, pch_iir);
678 ibx_irq_handler(dev, pch_iir);
681 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
682 ironlake_handle_rps_change(dev);
684 if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
685 gen6_queue_rps_work(dev_priv, pm_iir);
687 /* should clear PCH hotplug event before clear CPU irq */
688 I915_WRITE(SDEIIR, pch_iir);
689 I915_WRITE(GTIIR, gt_iir);
690 I915_WRITE(DEIIR, de_iir);
691 I915_WRITE(GEN6_PMIIR, pm_iir);
694 I915_WRITE(DEIER, de_ier);
699 * i915_error_work_func - do process context error handling work
702 * Fire an error uevent so userspace can see that a hang or error
705 static void i915_error_work_func(struct work_struct *work)
707 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
709 struct drm_device *dev = dev_priv->dev;
711 /* kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); */
713 if (atomic_read(&dev_priv->mm.wedged)) {
714 DRM_DEBUG_DRIVER("resetting chip\n");
715 /* kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event); */
716 if (!i915_reset(dev, GRDOM_RENDER)) {
717 atomic_set(&dev_priv->mm.wedged, 0);
718 /* kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event); */
720 lockmgr(&dev_priv->error_completion_lock, LK_EXCLUSIVE);
721 dev_priv->error_completion++;
722 wakeup(&dev_priv->error_completion);
723 lockmgr(&dev_priv->error_completion_lock, LK_RELEASE);
727 static void i915_report_and_clear_eir(struct drm_device *dev)
729 struct drm_i915_private *dev_priv = dev->dev_private;
730 u32 eir = I915_READ(EIR);
736 kprintf("i915: render error detected, EIR: 0x%08x\n", eir);
739 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
740 u32 ipeir = I915_READ(IPEIR_I965);
742 kprintf(" IPEIR: 0x%08x\n",
743 I915_READ(IPEIR_I965));
744 kprintf(" IPEHR: 0x%08x\n",
745 I915_READ(IPEHR_I965));
746 kprintf(" INSTDONE: 0x%08x\n",
747 I915_READ(INSTDONE_I965));
748 kprintf(" INSTPS: 0x%08x\n",
750 kprintf(" INSTDONE1: 0x%08x\n",
751 I915_READ(INSTDONE1));
752 kprintf(" ACTHD: 0x%08x\n",
753 I915_READ(ACTHD_I965));
754 I915_WRITE(IPEIR_I965, ipeir);
755 POSTING_READ(IPEIR_I965);
757 if (eir & GM45_ERROR_PAGE_TABLE) {
758 u32 pgtbl_err = I915_READ(PGTBL_ER);
759 kprintf("page table error\n");
760 kprintf(" PGTBL_ER: 0x%08x\n",
762 I915_WRITE(PGTBL_ER, pgtbl_err);
763 POSTING_READ(PGTBL_ER);
768 if (eir & I915_ERROR_PAGE_TABLE) {
769 u32 pgtbl_err = I915_READ(PGTBL_ER);
770 kprintf("page table error\n");
771 kprintf(" PGTBL_ER: 0x%08x\n",
773 I915_WRITE(PGTBL_ER, pgtbl_err);
774 POSTING_READ(PGTBL_ER);
778 if (eir & I915_ERROR_MEMORY_REFRESH) {
779 kprintf("memory refresh error:\n");
781 kprintf("pipe %c stat: 0x%08x\n",
782 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
783 /* pipestat has already been acked */
785 if (eir & I915_ERROR_INSTRUCTION) {
786 kprintf("instruction error\n");
787 kprintf(" INSTPM: 0x%08x\n",
789 if (INTEL_INFO(dev)->gen < 4) {
790 u32 ipeir = I915_READ(IPEIR);
792 kprintf(" IPEIR: 0x%08x\n",
794 kprintf(" IPEHR: 0x%08x\n",
796 kprintf(" INSTDONE: 0x%08x\n",
797 I915_READ(INSTDONE));
798 kprintf(" ACTHD: 0x%08x\n",
800 I915_WRITE(IPEIR, ipeir);
803 u32 ipeir = I915_READ(IPEIR_I965);
805 kprintf(" IPEIR: 0x%08x\n",
806 I915_READ(IPEIR_I965));
807 kprintf(" IPEHR: 0x%08x\n",
808 I915_READ(IPEHR_I965));
809 kprintf(" INSTDONE: 0x%08x\n",
810 I915_READ(INSTDONE_I965));
811 kprintf(" INSTPS: 0x%08x\n",
813 kprintf(" INSTDONE1: 0x%08x\n",
814 I915_READ(INSTDONE1));
815 kprintf(" ACTHD: 0x%08x\n",
816 I915_READ(ACTHD_I965));
817 I915_WRITE(IPEIR_I965, ipeir);
818 POSTING_READ(IPEIR_I965);
822 I915_WRITE(EIR, eir);
824 eir = I915_READ(EIR);
827 * some errors might have become stuck,
830 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
831 I915_WRITE(EMR, I915_READ(EMR) | eir);
832 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
837 * i915_handle_error - handle an error interrupt
840 * Do some basic checking of regsiter state at error interrupt time and
841 * dump it to the syslog. Also call i915_capture_error_state() to make
842 * sure we get a record and make it available in debugfs. Fire a uevent
843 * so userspace knows something bad happened (should trigger collection
844 * of a ring dump etc.).
846 void i915_handle_error(struct drm_device *dev, bool wedged)
848 struct drm_i915_private *dev_priv = dev->dev_private;
850 i915_capture_error_state(dev);
851 i915_report_and_clear_eir(dev);
854 lockmgr(&dev_priv->error_completion_lock, LK_EXCLUSIVE);
855 dev_priv->error_completion = 0;
856 atomic_set(&dev_priv->mm.wedged, 1);
857 /* unlock acts as rel barrier for store to wedged */
858 lockmgr(&dev_priv->error_completion_lock, LK_RELEASE);
861 * Wakeup waiting processes so they don't hang
863 lockmgr(&dev_priv->ring[RCS].irq_lock, LK_EXCLUSIVE);
864 wakeup(&dev_priv->ring[RCS]);
865 lockmgr(&dev_priv->ring[RCS].irq_lock, LK_RELEASE);
867 lockmgr(&dev_priv->ring[VCS].irq_lock, LK_EXCLUSIVE);
868 wakeup(&dev_priv->ring[VCS]);
869 lockmgr(&dev_priv->ring[VCS].irq_lock, LK_RELEASE);
872 lockmgr(&dev_priv->ring[BCS].irq_lock, LK_EXCLUSIVE);
873 wakeup(&dev_priv->ring[BCS]);
874 lockmgr(&dev_priv->ring[BCS].irq_lock, LK_RELEASE);
878 queue_work(dev_priv->wq, &dev_priv->error_work);
881 static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
883 drm_i915_private_t *dev_priv = dev->dev_private;
884 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
885 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
886 struct drm_i915_gem_object *obj;
887 struct intel_unpin_work *work;
890 /* Ignore early vblank irqs */
891 if (intel_crtc == NULL)
894 lockmgr(&dev->event_lock, LK_EXCLUSIVE);
895 work = intel_crtc->unpin_work;
897 if (work == NULL || atomic_read(&work->pending) ||
898 !work->enable_stall_check) {
899 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
900 lockmgr(&dev->event_lock, LK_RELEASE);
904 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
905 obj = work->pending_flip_obj;
906 if (INTEL_INFO(dev)->gen >= 4) {
907 int dspsurf = DSPSURF(intel_crtc->plane);
908 stall_detected = I915_READ(dspsurf) == obj->gtt_offset;
910 int dspaddr = DSPADDR(intel_crtc->plane);
911 stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
912 crtc->y * crtc->fb->pitches[0] +
913 crtc->x * crtc->fb->bits_per_pixel/8);
916 lockmgr(&dev->event_lock, LK_RELEASE);
918 if (stall_detected) {
919 DRM_DEBUG("Pageflip stall detected\n");
920 intel_prepare_page_flip(dev, intel_crtc->plane);
925 i915_driver_irq_handler(void *arg)
927 struct drm_device *dev = (struct drm_device *)arg;
928 drm_i915_private_t *dev_priv = (drm_i915_private_t *)dev->dev_private;
930 struct drm_i915_master_private *master_priv;
933 u32 pipe_stats[I915_MAX_PIPES];
938 bool blc_event = false;
940 atomic_inc(&dev_priv->irq_received);
942 iir = I915_READ(IIR);
944 if (INTEL_INFO(dev)->gen >= 4)
945 vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS;
947 vblank_status = PIPE_VBLANK_INTERRUPT_STATUS;
950 irq_received = iir != 0;
952 /* Can't rely on pipestat interrupt bit in iir as it might
953 * have been cleared after the pipestat interrupt was received.
954 * It doesn't set the bit in iir again, but it still produces
955 * interrupts (for non-MSI).
957 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
958 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
959 i915_handle_error(dev, false);
961 for_each_pipe(pipe) {
962 int reg = PIPESTAT(pipe);
963 pipe_stats[pipe] = I915_READ(reg);
966 * Clear the PIPE*STAT regs before the IIR
968 if (pipe_stats[pipe] & 0x8000ffff) {
969 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
970 DRM_DEBUG("pipe %c underrun\n",
972 I915_WRITE(reg, pipe_stats[pipe]);
976 lockmgr(&dev_priv->irq_lock, LK_RELEASE);
981 /* Consume port. Then clear IIR or we'll miss events */
982 if ((I915_HAS_HOTPLUG(dev)) &&
983 (iir & I915_DISPLAY_PORT_INTERRUPT)) {
984 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
986 DRM_DEBUG("i915: hotplug event received, stat 0x%08x\n",
988 if (hotplug_status & dev_priv->hotplug_supported_mask)
989 queue_work(dev_priv->wq,
990 &dev_priv->hotplug_work);
992 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
993 I915_READ(PORT_HOTPLUG_STAT);
996 I915_WRITE(IIR, iir);
997 new_iir = I915_READ(IIR); /* Flush posted writes */
1000 if (dev->primary->master) {
1001 master_priv = dev->primary->master->driver_priv;
1002 if (master_priv->sarea_priv)
1003 master_priv->sarea_priv->last_dispatch =
1004 READ_BREADCRUMB(dev_priv);
1007 if (dev_priv->sarea_priv)
1008 dev_priv->sarea_priv->last_dispatch =
1009 READ_BREADCRUMB(dev_priv);
1012 if (iir & I915_USER_INTERRUPT)
1013 notify_ring(dev, &dev_priv->ring[RCS]);
1014 if (iir & I915_BSD_USER_INTERRUPT)
1015 notify_ring(dev, &dev_priv->ring[VCS]);
1017 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
1018 intel_prepare_page_flip(dev, 0);
1019 if (dev_priv->flip_pending_is_done)
1020 intel_finish_page_flip_plane(dev, 0);
1023 if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) {
1024 intel_prepare_page_flip(dev, 1);
1025 if (dev_priv->flip_pending_is_done)
1026 intel_finish_page_flip_plane(dev, 1);
1029 for_each_pipe(pipe) {
1030 if (pipe_stats[pipe] & vblank_status &&
1031 drm_handle_vblank(dev, pipe)) {
1033 if (!dev_priv->flip_pending_is_done) {
1034 i915_pageflip_stall_check(dev, pipe);
1035 intel_finish_page_flip(dev, pipe);
1039 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1044 if (blc_event || (iir & I915_ASLE_INTERRUPT)) {
1048 intel_opregion_asle_intr(dev);
1052 /* With MSI, interrupts are only generated when iir
1053 * transitions from zero to nonzero. If another bit got
1054 * set while we were handling the existing iir bits, then
1055 * we would never get another interrupt.
1057 * This is fine on non-MSI as well, as if we hit this path
1058 * we avoid exiting the interrupt handler only to generate
1061 * Note that for MSI this could cause a stray interrupt report
1062 * if an interrupt landed in the time between writing IIR and
1063 * the posting read. This should be rare enough to never
1064 * trigger the 99% of 100,000 interrupts test for disabling
1071 static int i915_emit_irq(struct drm_device * dev)
1073 drm_i915_private_t *dev_priv = dev->dev_private;
1075 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1078 i915_kernel_lost_context(dev);
1080 DRM_DEBUG("i915: emit_irq\n");
1082 dev_priv->counter++;
1083 if (dev_priv->counter > 0x7FFFFFFFUL)
1084 dev_priv->counter = 1;
1086 if (master_priv->sarea_priv)
1087 master_priv->sarea_priv->last_enqueue = dev_priv->counter;
1089 if (dev_priv->sarea_priv)
1090 dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
1093 if (BEGIN_LP_RING(4) == 0) {
1094 OUT_RING(MI_STORE_DWORD_INDEX);
1095 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1096 OUT_RING(dev_priv->counter);
1097 OUT_RING(MI_USER_INTERRUPT);
1101 return dev_priv->counter;
1104 static int i915_wait_irq(struct drm_device * dev, int irq_nr)
1106 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1108 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1111 struct intel_ring_buffer *ring = LP_RING(dev_priv);
1113 DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
1114 READ_BREADCRUMB(dev_priv));
1117 if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
1118 if (master_priv->sarea_priv)
1119 master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
1123 if (master_priv->sarea_priv)
1124 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
1126 if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
1127 if (dev_priv->sarea_priv) {
1128 dev_priv->sarea_priv->last_dispatch =
1129 READ_BREADCRUMB(dev_priv);
1134 if (dev_priv->sarea_priv)
1135 dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
1139 lockmgr(&ring->irq_lock, LK_EXCLUSIVE);
1140 if (ring->irq_get(ring)) {
1142 while (ret == 0 && READ_BREADCRUMB(dev_priv) < irq_nr) {
1143 ret = -lksleep(ring, &ring->irq_lock, PCATCH,
1146 ring->irq_put(ring);
1147 lockmgr(&ring->irq_lock, LK_RELEASE);
1150 lockmgr(&ring->irq_lock, LK_RELEASE);
1151 if (_intel_wait_for(dev, READ_BREADCRUMB(dev_priv) >= irq_nr,
1156 if (ret == -EBUSY) {
1157 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
1158 READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
1164 /* Needs the lock as it touches the ring.
1166 int i915_irq_emit(struct drm_device *dev, void *data,
1167 struct drm_file *file_priv)
1169 drm_i915_private_t *dev_priv = dev->dev_private;
1170 drm_i915_irq_emit_t *emit = data;
1173 if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
1174 DRM_ERROR("called with no initialization\n");
1178 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
1181 result = i915_emit_irq(dev);
1184 if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
1185 DRM_ERROR("copy_to_user\n");
1192 /* Doesn't need the hardware lock.
1194 int i915_irq_wait(struct drm_device *dev, void *data,
1195 struct drm_file *file_priv)
1197 drm_i915_private_t *dev_priv = dev->dev_private;
1198 drm_i915_irq_wait_t *irqwait = data;
1201 DRM_ERROR("called with no initialization\n");
1205 return i915_wait_irq(dev, irqwait->irq_seq);
1208 /* Called from drm generic code, passed 'crtc' which
1209 * we use as a pipe index
1212 i915_enable_vblank(struct drm_device *dev, int pipe)
1214 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1216 if (!i915_pipe_enabled(dev, pipe))
1219 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
1220 if (INTEL_INFO(dev)->gen >= 4)
1221 i915_enable_pipestat(dev_priv, pipe,
1222 PIPE_START_VBLANK_INTERRUPT_ENABLE);
1224 i915_enable_pipestat(dev_priv, pipe,
1225 PIPE_VBLANK_INTERRUPT_ENABLE);
1227 /* maintain vblank delivery even in deep C-states */
1228 if (dev_priv->info->gen == 3)
1229 I915_WRITE(INSTPM, INSTPM_AGPBUSY_DIS << 16);
1230 lockmgr(&dev_priv->irq_lock, LK_RELEASE);
1236 ironlake_enable_vblank(struct drm_device *dev, int pipe)
1238 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1240 if (!i915_pipe_enabled(dev, pipe))
1243 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
1244 ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
1245 DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
1246 lockmgr(&dev_priv->irq_lock, LK_RELEASE);
1252 ivybridge_enable_vblank(struct drm_device *dev, int pipe)
1254 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1256 if (!i915_pipe_enabled(dev, pipe))
1259 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
1260 ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
1261 DE_PIPEA_VBLANK_IVB : DE_PIPEB_VBLANK_IVB);
1262 lockmgr(&dev_priv->irq_lock, LK_RELEASE);
1268 /* Called from drm generic code, passed 'crtc' which
1269 * we use as a pipe index
1272 i915_disable_vblank(struct drm_device *dev, int pipe)
1274 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1276 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
1277 if (dev_priv->info->gen == 3)
1279 INSTPM_AGPBUSY_DIS << 16 | INSTPM_AGPBUSY_DIS);
1281 i915_disable_pipestat(dev_priv, pipe,
1282 PIPE_VBLANK_INTERRUPT_ENABLE |
1283 PIPE_START_VBLANK_INTERRUPT_ENABLE);
1284 lockmgr(&dev_priv->irq_lock, LK_RELEASE);
1288 ironlake_disable_vblank(struct drm_device *dev, int pipe)
1290 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1292 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
1293 ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
1294 DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
1295 lockmgr(&dev_priv->irq_lock, LK_RELEASE);
1299 ivybridge_disable_vblank(struct drm_device *dev, int pipe)
1301 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1303 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
1304 ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
1305 DE_PIPEA_VBLANK_IVB : DE_PIPEB_VBLANK_IVB);
1306 lockmgr(&dev_priv->irq_lock, LK_RELEASE);
1309 /* Set the vblank monitor pipe
1311 int i915_vblank_pipe_set(struct drm_device *dev, void *data,
1312 struct drm_file *file_priv)
1314 drm_i915_private_t *dev_priv = dev->dev_private;
1317 DRM_ERROR("called with no initialization\n");
1324 int i915_vblank_pipe_get(struct drm_device *dev, void *data,
1325 struct drm_file *file_priv)
1327 drm_i915_private_t *dev_priv = dev->dev_private;
1328 drm_i915_vblank_pipe_t *pipe = data;
1331 DRM_ERROR("called with no initialization\n");
1335 pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
1341 * Schedule buffer swap at given vertical blank.
1343 int i915_vblank_swap(struct drm_device *dev, void *data,
1344 struct drm_file *file_priv)
1346 /* The delayed swap mechanism was fundamentally racy, and has been
1347 * removed. The model was that the client requested a delayed flip/swap
1348 * from the kernel, then waited for vblank before continuing to perform
1349 * rendering. The problem was that the kernel might wake the client
1350 * up before it dispatched the vblank swap (since the lock has to be
1351 * held while touching the ringbuffer), in which case the client would
1352 * clear and start the next frame before the swap occurred, and
1353 * flicker would occur in addition to likely missing the vblank.
1355 * In the absence of this ioctl, userland falls back to a correct path
1356 * of waiting for a vblank, then dispatching the swap on its own.
1357 * Context switching to userland and back is plenty fast enough for
1358 * meeting the requirements of vblank swapping.
1364 ring_last_seqno(struct intel_ring_buffer *ring)
1367 if (list_empty(&ring->request_list))
1370 return (list_entry(ring->request_list.prev,
1371 struct drm_i915_gem_request, list)->seqno);
1374 static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
1376 if (list_empty(&ring->request_list) ||
1377 i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) {
1378 /* Issue a wake-up to catch stuck h/w. */
1379 if (ring->waiting_seqno) {
1381 "Hangcheck timer elapsed... %s idle [waiting on %d, at %d], missed IRQ?\n",
1383 ring->waiting_seqno,
1384 ring->get_seqno(ring));
1393 static bool kick_ring(struct intel_ring_buffer *ring)
1395 struct drm_device *dev = ring->dev;
1396 struct drm_i915_private *dev_priv = dev->dev_private;
1397 u32 tmp = I915_READ_CTL(ring);
1398 if (tmp & RING_WAIT) {
1399 DRM_ERROR("Kicking stuck wait on %s\n",
1401 I915_WRITE_CTL(ring, tmp);
1408 * This is called when the chip hasn't reported back with completed
1409 * batchbuffers in a long time. The first time this is called we simply record
1410 * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses
1411 * again, we assume the chip is wedged and try to fix it.
1413 void i915_hangcheck_elapsed(unsigned long data)
1415 struct drm_device *dev = (struct drm_device *)data;
1416 drm_i915_private_t *dev_priv = dev->dev_private;
1417 uint32_t acthd, instdone, instdone1, acthd_bsd, acthd_blt;
1420 if (!i915_enable_hangcheck)
1423 /* If all work is done then ACTHD clearly hasn't advanced. */
1424 if (i915_hangcheck_ring_idle(&dev_priv->ring[RCS], &err) &&
1425 i915_hangcheck_ring_idle(&dev_priv->ring[VCS], &err) &&
1426 i915_hangcheck_ring_idle(&dev_priv->ring[BCS], &err)) {
1427 dev_priv->hangcheck_count = 0;
1433 if (INTEL_INFO(dev)->gen < 4) {
1434 instdone = I915_READ(INSTDONE);
1437 instdone = I915_READ(INSTDONE_I965);
1438 instdone1 = I915_READ(INSTDONE1);
1440 acthd = intel_ring_get_active_head(&dev_priv->ring[RCS]);
1441 acthd_bsd = HAS_BSD(dev) ?
1442 intel_ring_get_active_head(&dev_priv->ring[VCS]) : 0;
1443 acthd_blt = HAS_BLT(dev) ?
1444 intel_ring_get_active_head(&dev_priv->ring[BCS]) : 0;
1446 if (dev_priv->last_acthd == acthd &&
1447 dev_priv->last_acthd_bsd == acthd_bsd &&
1448 dev_priv->last_acthd_blt == acthd_blt &&
1449 dev_priv->last_instdone == instdone &&
1450 dev_priv->last_instdone1 == instdone1) {
1451 if (dev_priv->hangcheck_count++ > 1) {
1452 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
1453 i915_handle_error(dev, true);
1455 if (!IS_GEN2(dev)) {
1456 /* Is the chip hanging on a WAIT_FOR_EVENT?
1457 * If so we can simply poke the RB_WAIT bit
1458 * and break the hang. This should work on
1459 * all but the second generation chipsets.
1461 if (kick_ring(&dev_priv->ring[RCS]))
1465 kick_ring(&dev_priv->ring[VCS]))
1469 kick_ring(&dev_priv->ring[BCS]))
1476 dev_priv->hangcheck_count = 0;
1478 dev_priv->last_acthd = acthd;
1479 dev_priv->last_acthd_bsd = acthd_bsd;
1480 dev_priv->last_acthd_blt = acthd_blt;
1481 dev_priv->last_instdone = instdone;
1482 dev_priv->last_instdone1 = instdone1;
1486 /* Reset timer case chip hangs without another request being added */
1487 mod_timer(&dev_priv->hangcheck_timer,
1488 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
1494 ironlake_irq_preinstall(struct drm_device *dev)
1496 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1498 atomic_set(&dev_priv->irq_received, 0);
1500 I915_WRITE(HWSTAM, 0xeffe);
1502 /* XXX hotplug from PCH */
1504 I915_WRITE(DEIMR, 0xffffffff);
1505 I915_WRITE(DEIER, 0x0);
1506 POSTING_READ(DEIER);
1509 I915_WRITE(GTIMR, 0xffffffff);
1510 I915_WRITE(GTIER, 0x0);
1511 POSTING_READ(GTIER);
1513 /* south display irq */
1514 I915_WRITE(SDEIMR, 0xffffffff);
1515 I915_WRITE(SDEIER, 0x0);
1516 POSTING_READ(SDEIER);
1520 * Enable digital hotplug on the PCH, and configure the DP short pulse
1521 * duration to 2ms (which is the minimum in the Display Port spec)
1523 * This register is the same on all known PCH chips.
1526 static void ironlake_enable_pch_hotplug(struct drm_device *dev)
1528 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1531 hotplug = I915_READ(PCH_PORT_HOTPLUG);
1532 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
1533 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
1534 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
1535 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
1536 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
1539 static int ironlake_irq_postinstall(struct drm_device *dev)
1541 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1542 /* enable kind of interrupts always enabled */
1543 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
1544 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
1548 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
1549 dev_priv->irq_mask = ~display_mask;
1551 /* should always can generate irq */
1552 I915_WRITE(DEIIR, I915_READ(DEIIR));
1553 I915_WRITE(DEIMR, dev_priv->irq_mask);
1554 I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK);
1555 POSTING_READ(DEIER);
1557 dev_priv->gt_irq_mask = ~0;
1559 I915_WRITE(GTIIR, I915_READ(GTIIR));
1560 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1565 GT_GEN6_BSD_USER_INTERRUPT |
1566 GT_GEN6_BLT_USER_INTERRUPT;
1571 GT_BSD_USER_INTERRUPT;
1572 I915_WRITE(GTIER, render_irqs);
1573 POSTING_READ(GTIER);
1575 if (HAS_PCH_CPT(dev)) {
1576 hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
1577 SDE_PORTB_HOTPLUG_CPT |
1578 SDE_PORTC_HOTPLUG_CPT |
1579 SDE_PORTD_HOTPLUG_CPT);
1581 hotplug_mask = (SDE_CRT_HOTPLUG |
1588 dev_priv->pch_irq_mask = ~hotplug_mask;
1590 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1591 I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
1592 I915_WRITE(SDEIER, hotplug_mask);
1593 POSTING_READ(SDEIER);
1595 ironlake_enable_pch_hotplug(dev);
1597 if (IS_IRONLAKE_M(dev)) {
1598 /* Clear & enable PCU event interrupts */
1599 I915_WRITE(DEIIR, DE_PCU_EVENT);
1600 I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
1601 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
1608 ivybridge_irq_postinstall(struct drm_device *dev)
1610 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1611 /* enable kind of interrupts always enabled */
1612 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
1613 DE_PCH_EVENT_IVB | DE_PLANEA_FLIP_DONE_IVB |
1614 DE_PLANEB_FLIP_DONE_IVB;
1618 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
1619 dev_priv->irq_mask = ~display_mask;
1621 /* should always can generate irq */
1622 I915_WRITE(DEIIR, I915_READ(DEIIR));
1623 I915_WRITE(DEIMR, dev_priv->irq_mask);
1624 I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK_IVB |
1625 DE_PIPEB_VBLANK_IVB);
1626 POSTING_READ(DEIER);
1628 dev_priv->gt_irq_mask = ~0;
1630 I915_WRITE(GTIIR, I915_READ(GTIIR));
1631 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1633 render_irqs = GT_USER_INTERRUPT | GT_GEN6_BSD_USER_INTERRUPT |
1634 GT_GEN6_BLT_USER_INTERRUPT;
1635 I915_WRITE(GTIER, render_irqs);
1636 POSTING_READ(GTIER);
1638 hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
1639 SDE_PORTB_HOTPLUG_CPT |
1640 SDE_PORTC_HOTPLUG_CPT |
1641 SDE_PORTD_HOTPLUG_CPT);
1642 dev_priv->pch_irq_mask = ~hotplug_mask;
1644 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1645 I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
1646 I915_WRITE(SDEIER, hotplug_mask);
1647 POSTING_READ(SDEIER);
1649 ironlake_enable_pch_hotplug(dev);
1655 i915_driver_irq_preinstall(struct drm_device * dev)
1657 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1660 atomic_set(&dev_priv->irq_received, 0);
1662 if (I915_HAS_HOTPLUG(dev)) {
1663 I915_WRITE(PORT_HOTPLUG_EN, 0);
1664 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1667 I915_WRITE(HWSTAM, 0xeffe);
1669 I915_WRITE(PIPESTAT(pipe), 0);
1670 I915_WRITE(IMR, 0xffffffff);
1671 I915_WRITE(IER, 0x0);
1676 * Must be called after intel_modeset_init or hotplug interrupts won't be
1677 * enabled correctly.
1680 i915_driver_irq_postinstall(struct drm_device *dev)
1682 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1683 u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR;
1686 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
1688 /* Unmask the interrupts that we always want on. */
1689 dev_priv->irq_mask = ~I915_INTERRUPT_ENABLE_FIX;
1691 dev_priv->pipestat[0] = 0;
1692 dev_priv->pipestat[1] = 0;
1694 if (I915_HAS_HOTPLUG(dev)) {
1695 /* Enable in IER... */
1696 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
1697 /* and unmask in IMR */
1698 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
1702 * Enable some error detection, note the instruction error mask
1703 * bit is reserved, so we leave it masked.
1706 error_mask = ~(GM45_ERROR_PAGE_TABLE |
1707 GM45_ERROR_MEM_PRIV |
1708 GM45_ERROR_CP_PRIV |
1709 I915_ERROR_MEMORY_REFRESH);
1711 error_mask = ~(I915_ERROR_PAGE_TABLE |
1712 I915_ERROR_MEMORY_REFRESH);
1714 I915_WRITE(EMR, error_mask);
1716 I915_WRITE(IMR, dev_priv->irq_mask);
1717 I915_WRITE(IER, enable_mask);
1720 if (I915_HAS_HOTPLUG(dev)) {
1721 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
1723 /* Note HDMI and DP share bits */
1724 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
1725 hotplug_en |= HDMIB_HOTPLUG_INT_EN;
1726 if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
1727 hotplug_en |= HDMIC_HOTPLUG_INT_EN;
1728 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
1729 hotplug_en |= HDMID_HOTPLUG_INT_EN;
1730 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
1731 hotplug_en |= SDVOC_HOTPLUG_INT_EN;
1732 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
1733 hotplug_en |= SDVOB_HOTPLUG_INT_EN;
1734 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
1735 hotplug_en |= CRT_HOTPLUG_INT_EN;
1737 /* Programming the CRT detection parameters tends
1738 to generate a spurious hotplug event about three
1739 seconds later. So just do it once.
1742 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
1743 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
1746 /* Ignore TV since it's buggy */
1748 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
1754 intel_opregion_enable_asle(dev);
1761 ironlake_irq_uninstall(struct drm_device *dev)
1763 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1765 if (dev_priv == NULL)
1768 dev_priv->vblank_pipe = 0;
1770 I915_WRITE(HWSTAM, 0xffffffff);
1772 I915_WRITE(DEIMR, 0xffffffff);
1773 I915_WRITE(DEIER, 0x0);
1774 I915_WRITE(DEIIR, I915_READ(DEIIR));
1776 I915_WRITE(GTIMR, 0xffffffff);
1777 I915_WRITE(GTIER, 0x0);
1778 I915_WRITE(GTIIR, I915_READ(GTIIR));
1780 I915_WRITE(SDEIMR, 0xffffffff);
1781 I915_WRITE(SDEIER, 0x0);
1782 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1785 static void i915_driver_irq_uninstall(struct drm_device * dev)
1787 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1793 dev_priv->vblank_pipe = 0;
1795 if (I915_HAS_HOTPLUG(dev)) {
1796 I915_WRITE(PORT_HOTPLUG_EN, 0);
1797 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1800 I915_WRITE(HWSTAM, 0xffffffff);
1802 I915_WRITE(PIPESTAT(pipe), 0);
1803 I915_WRITE(IMR, 0xffffffff);
1804 I915_WRITE(IER, 0x0);
1807 I915_WRITE(PIPESTAT(pipe),
1808 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
1809 I915_WRITE(IIR, I915_READ(IIR));
1813 intel_irq_init(struct drm_device *dev)
1815 struct drm_i915_private *dev_priv = dev->dev_private;
1817 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
1818 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
1819 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
1821 dev->driver->get_vblank_counter = i915_get_vblank_counter;
1822 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
1823 if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev)) {
1824 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
1825 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
1828 if (drm_core_check_feature(dev, DRIVER_MODESET))
1829 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
1831 dev->driver->get_vblank_timestamp = NULL;
1832 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
1834 if (IS_IVYBRIDGE(dev)) {
1835 /* Share pre & uninstall handlers with ILK/SNB */
1836 dev->driver->irq_handler = ivybridge_irq_handler;
1837 dev->driver->irq_preinstall = ironlake_irq_preinstall;
1838 dev->driver->irq_postinstall = ivybridge_irq_postinstall;
1839 dev->driver->irq_uninstall = ironlake_irq_uninstall;
1840 dev->driver->enable_vblank = ivybridge_enable_vblank;
1841 dev->driver->disable_vblank = ivybridge_disable_vblank;
1842 } else if (HAS_PCH_SPLIT(dev)) {
1843 dev->driver->irq_handler = ironlake_irq_handler;
1844 dev->driver->irq_preinstall = ironlake_irq_preinstall;
1845 dev->driver->irq_postinstall = ironlake_irq_postinstall;
1846 dev->driver->irq_uninstall = ironlake_irq_uninstall;
1847 dev->driver->enable_vblank = ironlake_enable_vblank;
1848 dev->driver->disable_vblank = ironlake_disable_vblank;
1850 dev->driver->irq_preinstall = i915_driver_irq_preinstall;
1851 dev->driver->irq_postinstall = i915_driver_irq_postinstall;
1852 dev->driver->irq_uninstall = i915_driver_irq_uninstall;
1853 dev->driver->irq_handler = i915_driver_irq_handler;
1854 dev->driver->enable_vblank = i915_enable_vblank;
1855 dev->driver->disable_vblank = i915_disable_vblank;
1859 static struct drm_i915_error_object *
1860 i915_error_object_create(struct drm_i915_private *dev_priv,
1861 struct drm_i915_gem_object *src)
1863 struct drm_i915_error_object *dst;
1866 int page, page_count;
1869 if (src == NULL || src->pages == NULL)
1872 page_count = src->base.size / PAGE_SIZE;
1874 dst = kmalloc(sizeof(*dst) + page_count * sizeof(u32 *), DRM_I915_GEM,
1879 reloc_offset = src->gtt_offset;
1880 for (page = 0; page < page_count; page++) {
1881 d = kmalloc(PAGE_SIZE, DRM_I915_GEM, M_NOWAIT);
1885 if (reloc_offset < dev_priv->mm.gtt_mappable_end) {
1886 /* Simply ignore tiling or any overlapping fence.
1887 * It's part of the error state, and this hopefully
1888 * captures what the GPU read.
1890 s = pmap_mapdev_attr(src->base.dev->agp->base +
1891 reloc_offset, PAGE_SIZE, PAT_WRITE_COMBINING);
1892 memcpy(d, s, PAGE_SIZE);
1893 pmap_unmapdev((vm_offset_t)s, PAGE_SIZE);
1895 drm_clflush_pages(&src->pages[page], 1);
1897 sf = sf_buf_alloc(src->pages[page]);
1899 s = (void *)(uintptr_t)sf_buf_kva(sf);
1900 memcpy(d, s, PAGE_SIZE);
1903 bzero(d, PAGE_SIZE);
1904 strcpy(d, "XXXKIB");
1907 drm_clflush_pages(&src->pages[page], 1);
1910 dst->pages[page] = d;
1912 reloc_offset += PAGE_SIZE;
1914 dst->page_count = page_count;
1915 dst->gtt_offset = src->gtt_offset;
1921 drm_free(dst->pages[page], DRM_I915_GEM);
1922 drm_free(dst, DRM_I915_GEM);
1927 i915_error_object_free(struct drm_i915_error_object *obj)
1934 for (page = 0; page < obj->page_count; page++)
1935 drm_free(obj->pages[page], DRM_I915_GEM);
1937 drm_free(obj, DRM_I915_GEM);
1941 i915_error_state_free(struct drm_device *dev,
1942 struct drm_i915_error_state *error)
1946 for (i = 0; i < DRM_ARRAY_SIZE(error->ring); i++) {
1947 i915_error_object_free(error->ring[i].batchbuffer);
1948 i915_error_object_free(error->ring[i].ringbuffer);
1949 drm_free(error->ring[i].requests, DRM_I915_GEM);
1952 drm_free(error->active_bo, DRM_I915_GEM);
1953 drm_free(error->overlay, DRM_I915_GEM);
1954 drm_free(error, DRM_I915_GEM);
1958 capture_bo_list(struct drm_i915_error_buffer *err, int count,
1959 struct list_head *head)
1961 struct drm_i915_gem_object *obj;
1964 list_for_each_entry(obj, head, mm_list) {
1965 err->size = obj->base.size;
1966 err->name = obj->base.name;
1967 err->seqno = obj->last_rendering_seqno;
1968 err->gtt_offset = obj->gtt_offset;
1969 err->read_domains = obj->base.read_domains;
1970 err->write_domain = obj->base.write_domain;
1971 err->fence_reg = obj->fence_reg;
1973 if (obj->pin_count > 0)
1975 if (obj->user_pin_count > 0)
1977 err->tiling = obj->tiling_mode;
1978 err->dirty = obj->dirty;
1979 err->purgeable = obj->madv != I915_MADV_WILLNEED;
1980 err->ring = obj->ring ? obj->ring->id : -1;
1981 err->cache_level = obj->cache_level;
1993 i915_gem_record_fences(struct drm_device *dev,
1994 struct drm_i915_error_state *error)
1996 struct drm_i915_private *dev_priv = dev->dev_private;
2000 switch (INTEL_INFO(dev)->gen) {
2003 for (i = 0; i < 16; i++)
2004 error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
2008 for (i = 0; i < 16; i++)
2009 error->fence[i] = I915_READ64(FENCE_REG_965_0 +
2013 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
2014 for (i = 0; i < 8; i++)
2015 error->fence[i+8] = I915_READ(FENCE_REG_945_8 +
2018 for (i = 0; i < 8; i++)
2019 error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
2025 static struct drm_i915_error_object *
2026 i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
2027 struct intel_ring_buffer *ring)
2029 struct drm_i915_gem_object *obj;
2032 if (!ring->get_seqno)
2035 seqno = ring->get_seqno(ring);
2036 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
2037 if (obj->ring != ring)
2040 if (i915_seqno_passed(seqno, obj->last_rendering_seqno))
2043 if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
2046 /* We need to copy these to an anonymous buffer as the simplest
2047 * method to avoid being overwritten by userspace.
2049 return (i915_error_object_create(dev_priv, obj));
2056 i915_record_ring_state(struct drm_device *dev,
2057 struct drm_i915_error_state *error,
2058 struct intel_ring_buffer *ring)
2060 struct drm_i915_private *dev_priv = dev->dev_private;
2062 if (INTEL_INFO(dev)->gen >= 6) {
2063 error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
2064 error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
2065 error->semaphore_mboxes[ring->id][0]
2066 = I915_READ(RING_SYNC_0(ring->mmio_base));
2067 error->semaphore_mboxes[ring->id][1]
2068 = I915_READ(RING_SYNC_1(ring->mmio_base));
2071 if (INTEL_INFO(dev)->gen >= 4) {
2072 error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
2073 error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
2074 error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
2075 error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
2076 if (ring->id == RCS) {
2077 error->instdone1 = I915_READ(INSTDONE1);
2078 error->bbaddr = I915_READ64(BB_ADDR);
2081 error->ipeir[ring->id] = I915_READ(IPEIR);
2082 error->ipehr[ring->id] = I915_READ(IPEHR);
2083 error->instdone[ring->id] = I915_READ(INSTDONE);
2086 error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
2087 error->seqno[ring->id] = ring->get_seqno(ring);
2088 error->acthd[ring->id] = intel_ring_get_active_head(ring);
2089 error->head[ring->id] = I915_READ_HEAD(ring);
2090 error->tail[ring->id] = I915_READ_TAIL(ring);
2092 error->cpu_ring_head[ring->id] = ring->head;
2093 error->cpu_ring_tail[ring->id] = ring->tail;
2097 i915_gem_record_rings(struct drm_device *dev,
2098 struct drm_i915_error_state *error)
2100 struct drm_i915_private *dev_priv = dev->dev_private;
2101 struct drm_i915_gem_request *request;
2104 for (i = 0; i < I915_NUM_RINGS; i++) {
2105 struct intel_ring_buffer *ring = &dev_priv->ring[i];
2107 if (ring->obj == NULL)
2110 i915_record_ring_state(dev, error, ring);
2112 error->ring[i].batchbuffer =
2113 i915_error_first_batchbuffer(dev_priv, ring);
2115 error->ring[i].ringbuffer =
2116 i915_error_object_create(dev_priv, ring->obj);
2119 list_for_each_entry(request, &ring->request_list, list)
2122 error->ring[i].num_requests = count;
2123 error->ring[i].requests = kmalloc(count *
2124 sizeof(struct drm_i915_error_request), DRM_I915_GEM,
2126 if (error->ring[i].requests == NULL) {
2127 error->ring[i].num_requests = 0;
2132 list_for_each_entry(request, &ring->request_list, list) {
2133 struct drm_i915_error_request *erq;
2135 erq = &error->ring[i].requests[count++];
2136 erq->seqno = request->seqno;
2137 erq->jiffies = request->emitted_jiffies;
2138 erq->tail = request->tail;
2144 i915_capture_error_state(struct drm_device *dev)
2146 struct drm_i915_private *dev_priv = dev->dev_private;
2147 struct drm_i915_gem_object *obj;
2148 struct drm_i915_error_state *error;
2151 lockmgr(&dev_priv->error_lock, LK_EXCLUSIVE);
2152 error = dev_priv->first_error;
2153 lockmgr(&dev_priv->error_lock, LK_RELEASE);
2157 /* Account for pipe specific data like PIPE*STAT */
2158 error = kmalloc(sizeof(*error), DRM_I915_GEM, M_NOWAIT | M_ZERO);
2159 if (error == NULL) {
2160 DRM_DEBUG("out of memory, not capturing error state\n");
2164 DRM_INFO("capturing error event; look for more information in "
2165 "sysctl hw.dri.%d.info.i915_error_state\n", dev->sysctl_node_idx);
2167 error->eir = I915_READ(EIR);
2168 error->pgtbl_er = I915_READ(PGTBL_ER);
2170 error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
2172 if (INTEL_INFO(dev)->gen >= 6) {
2173 error->error = I915_READ(ERROR_GEN6);
2174 error->done_reg = I915_READ(DONE_REG);
2177 i915_gem_record_fences(dev, error);
2178 i915_gem_record_rings(dev, error);
2180 /* Record buffers on the active and pinned lists. */
2181 error->active_bo = NULL;
2182 error->pinned_bo = NULL;
2185 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
2187 error->active_bo_count = i;
2188 list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
2190 error->pinned_bo_count = i - error->active_bo_count;
2192 error->active_bo = NULL;
2193 error->pinned_bo = NULL;
2195 error->active_bo = kmalloc(sizeof(*error->active_bo) * i,
2196 DRM_I915_GEM, M_NOWAIT);
2197 if (error->active_bo)
2198 error->pinned_bo = error->active_bo +
2199 error->active_bo_count;
2202 if (error->active_bo)
2203 error->active_bo_count = capture_bo_list(error->active_bo,
2204 error->active_bo_count, &dev_priv->mm.active_list);
2206 if (error->pinned_bo)
2207 error->pinned_bo_count = capture_bo_list(error->pinned_bo,
2208 error->pinned_bo_count, &dev_priv->mm.pinned_list);
2210 microtime(&error->time);
2212 error->overlay = intel_overlay_capture_error_state(dev);
2213 error->display = intel_display_capture_error_state(dev);
2215 lockmgr(&dev_priv->error_lock, LK_EXCLUSIVE);
2216 if (dev_priv->first_error == NULL) {
2217 dev_priv->first_error = error;
2220 lockmgr(&dev_priv->error_lock, LK_RELEASE);
2223 i915_error_state_free(dev, error);
2227 i915_destroy_error_state(struct drm_device *dev)
2229 struct drm_i915_private *dev_priv = dev->dev_private;
2230 struct drm_i915_error_state *error;
2232 lockmgr(&dev_priv->error_lock, LK_EXCLUSIVE);
2233 error = dev_priv->first_error;
2234 dev_priv->first_error = NULL;
2235 lockmgr(&dev_priv->error_lock, LK_RELEASE);
2238 i915_error_state_free(dev, error);