1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
30 #include <drm/i915_drm.h>
32 #include "intel_drv.h"
34 static const u32 hpd_ibx[] = {
35 [HPD_CRT] = SDE_CRT_HOTPLUG,
36 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
37 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
38 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
39 [HPD_PORT_D] = SDE_PORTD_HOTPLUG
42 static const u32 hpd_cpt[] = {
43 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
44 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
45 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
46 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
47 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
50 static const u32 hpd_mask_i915[] = {
51 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
52 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
53 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
54 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
55 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
56 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
59 static const u32 hpd_status_gen4[] = {
60 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
61 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
62 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
63 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
64 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
65 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
68 static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
69 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
70 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
71 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
72 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
73 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
74 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
77 /* For display hotplug interrupt */
79 ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
81 if ((dev_priv->irq_mask & mask) != 0) {
82 dev_priv->irq_mask &= ~mask;
83 I915_WRITE(DEIMR, dev_priv->irq_mask);
89 ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
91 if ((dev_priv->irq_mask & mask) != mask) {
92 dev_priv->irq_mask |= mask;
93 I915_WRITE(DEIMR, dev_priv->irq_mask);
98 static bool ivb_can_enable_err_int(struct drm_device *dev)
100 struct drm_i915_private *dev_priv = dev->dev_private;
101 struct intel_crtc *crtc;
104 for_each_pipe(pipe) {
105 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
107 if (crtc->cpu_fifo_underrun_disabled)
114 static bool cpt_can_enable_serr_int(struct drm_device *dev)
116 struct drm_i915_private *dev_priv = dev->dev_private;
118 struct intel_crtc *crtc;
120 for_each_pipe(pipe) {
121 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
123 if (crtc->pch_fifo_underrun_disabled)
130 static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
131 enum i915_pipe pipe, bool enable)
133 struct drm_i915_private *dev_priv = dev->dev_private;
134 uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
135 DE_PIPEB_FIFO_UNDERRUN;
138 ironlake_enable_display_irq(dev_priv, bit);
140 ironlake_disable_display_irq(dev_priv, bit);
143 static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
146 struct drm_i915_private *dev_priv = dev->dev_private;
149 if (!ivb_can_enable_err_int(dev))
152 I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN_A |
153 ERR_INT_FIFO_UNDERRUN_B |
154 ERR_INT_FIFO_UNDERRUN_C);
156 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
158 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
162 static void ibx_set_fifo_underrun_reporting(struct intel_crtc *crtc,
165 struct drm_device *dev = crtc->base.dev;
166 struct drm_i915_private *dev_priv = dev->dev_private;
167 uint32_t bit = (crtc->pipe == PIPE_A) ? SDE_TRANSA_FIFO_UNDER :
168 SDE_TRANSB_FIFO_UNDER;
171 I915_WRITE(SDEIMR, I915_READ(SDEIMR) & ~bit);
173 I915_WRITE(SDEIMR, I915_READ(SDEIMR) | bit);
175 POSTING_READ(SDEIMR);
178 static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
179 enum transcoder pch_transcoder,
182 struct drm_i915_private *dev_priv = dev->dev_private;
185 if (!cpt_can_enable_serr_int(dev))
188 I915_WRITE(SERR_INT, SERR_INT_TRANS_A_FIFO_UNDERRUN |
189 SERR_INT_TRANS_B_FIFO_UNDERRUN |
190 SERR_INT_TRANS_C_FIFO_UNDERRUN);
192 I915_WRITE(SDEIMR, I915_READ(SDEIMR) & ~SDE_ERROR_CPT);
194 I915_WRITE(SDEIMR, I915_READ(SDEIMR) | SDE_ERROR_CPT);
197 POSTING_READ(SDEIMR);
201 * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages
204 * @enable: true if we want to report FIFO underrun errors, false otherwise
206 * This function makes us disable or enable CPU fifo underruns for a specific
207 * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun
208 * reporting for one pipe may also disable all the other CPU error interruts for
209 * the other pipes, due to the fact that there's just one interrupt mask/enable
210 * bit for all the pipes.
212 * Returns the previous state of underrun reporting.
214 bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
215 enum i915_pipe pipe, bool enable)
217 struct drm_i915_private *dev_priv = dev->dev_private;
218 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
219 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
222 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
224 ret = !intel_crtc->cpu_fifo_underrun_disabled;
229 intel_crtc->cpu_fifo_underrun_disabled = !enable;
231 if (IS_GEN5(dev) || IS_GEN6(dev))
232 ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
233 else if (IS_GEN7(dev))
234 ivybridge_set_fifo_underrun_reporting(dev, enable);
237 lockmgr(&dev_priv->irq_lock, LK_RELEASE);
242 * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
244 * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
245 * @enable: true if we want to report FIFO underrun errors, false otherwise
247 * This function makes us disable or enable PCH fifo underruns for a specific
248 * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
249 * underrun reporting for one transcoder may also disable all the other PCH
250 * error interruts for the other transcoders, due to the fact that there's just
251 * one interrupt mask/enable bit for all the transcoders.
253 * Returns the previous state of underrun reporting.
255 bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
256 enum transcoder pch_transcoder,
259 struct drm_i915_private *dev_priv = dev->dev_private;
261 struct drm_crtc *crtc;
262 struct intel_crtc *intel_crtc;
265 if (HAS_PCH_LPT(dev)) {
268 struct drm_crtc *c = dev_priv->pipe_to_crtc_mapping[p];
269 if (intel_pipe_has_type(c, INTEL_OUTPUT_ANALOG)) {
275 DRM_ERROR("PCH FIFO underrun, but no CRTC using the PCH found\n");
279 crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
281 intel_crtc = to_intel_crtc(crtc);
283 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
285 ret = !intel_crtc->pch_fifo_underrun_disabled;
290 intel_crtc->pch_fifo_underrun_disabled = !enable;
292 if (HAS_PCH_IBX(dev))
293 ibx_set_fifo_underrun_reporting(intel_crtc, enable);
295 cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
298 lockmgr(&dev_priv->irq_lock, LK_RELEASE);
304 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
306 u32 reg = PIPESTAT(pipe);
307 u32 pipestat = I915_READ(reg) & 0x7fff0000;
309 if ((pipestat & mask) == mask)
312 /* Enable the interrupt, clear any pending status */
313 pipestat |= mask | (mask >> 16);
314 I915_WRITE(reg, pipestat);
319 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
321 u32 reg = PIPESTAT(pipe);
322 u32 pipestat = I915_READ(reg) & 0x7fff0000;
324 if ((pipestat & mask) == 0)
328 I915_WRITE(reg, pipestat);
333 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
335 static void i915_enable_asle_pipestat(struct drm_device *dev)
337 drm_i915_private_t *dev_priv = dev->dev_private;
339 if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
342 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
344 i915_enable_pipestat(dev_priv, 1, PIPE_LEGACY_BLC_EVENT_ENABLE);
345 if (INTEL_INFO(dev)->gen >= 4)
346 i915_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE);
348 lockmgr(&dev_priv->irq_lock, LK_RELEASE);
352 * i915_pipe_enabled - check if a pipe is enabled
354 * @pipe: pipe to check
356 * Reading certain registers when the pipe is disabled can hang the chip.
357 * Use this routine to make sure the PLL is running and the pipe is active
358 * before reading such registers if unsure.
361 i915_pipe_enabled(struct drm_device *dev, int pipe)
363 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
365 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
366 /* Locking is horribly broken here, but whatever. */
367 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
368 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
370 return intel_crtc->active;
372 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
376 /* Called from drm generic code, passed a 'crtc', which
377 * we use as a pipe index
379 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
381 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
382 unsigned long high_frame;
383 unsigned long low_frame;
384 u32 high1, high2, low;
386 if (!i915_pipe_enabled(dev, pipe)) {
387 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
388 "pipe %c\n", pipe_name(pipe));
392 high_frame = PIPEFRAME(pipe);
393 low_frame = PIPEFRAMEPIXEL(pipe);
396 * High & low register fields aren't synchronized, so make sure
397 * we get a low value that's stable across two reads of the high
401 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
402 low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK;
403 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
404 } while (high1 != high2);
406 high1 >>= PIPE_FRAME_HIGH_SHIFT;
407 low >>= PIPE_FRAME_LOW_SHIFT;
408 return (high1 << 8) | low;
411 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
413 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
414 int reg = PIPE_FRMCOUNT_GM45(pipe);
416 if (!i915_pipe_enabled(dev, pipe)) {
417 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
418 "pipe %c\n", pipe_name(pipe));
422 return I915_READ(reg);
425 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
426 int *vpos, int *hpos)
428 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
429 u32 vbl = 0, position = 0;
430 int vbl_start, vbl_end, htotal, vtotal;
433 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
436 if (!i915_pipe_enabled(dev, pipe)) {
437 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
438 "pipe %c\n", pipe_name(pipe));
443 vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
445 if (INTEL_INFO(dev)->gen >= 4) {
446 /* No obvious pixelcount register. Only query vertical
447 * scanout position from Display scan line register.
449 position = I915_READ(PIPEDSL(pipe));
451 /* Decode into vertical scanout position. Don't have
452 * horizontal scanout position.
454 *vpos = position & 0x1fff;
457 /* Have access to pixelcount since start of frame.
458 * We can split this into vertical and horizontal
461 position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
463 htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
464 *vpos = position / htotal;
465 *hpos = position - (*vpos * htotal);
468 /* Query vblank area. */
469 vbl = I915_READ(VBLANK(cpu_transcoder));
471 /* Test position against vblank region. */
472 vbl_start = vbl & 0x1fff;
473 vbl_end = (vbl >> 16) & 0x1fff;
475 if ((*vpos < vbl_start) || (*vpos > vbl_end))
478 /* Inside "upper part" of vblank area? Apply corrective offset: */
479 if (in_vbl && (*vpos >= vbl_start))
480 *vpos = *vpos - vtotal;
482 /* Readouts valid? */
484 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
488 ret |= DRM_SCANOUTPOS_INVBL;
493 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
495 struct timeval *vblank_time,
498 struct drm_crtc *crtc;
500 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
501 DRM_ERROR("Invalid crtc %d\n", pipe);
505 /* Get drm_crtc to timestamp: */
506 crtc = intel_get_crtc_for_pipe(dev, pipe);
508 DRM_ERROR("Invalid crtc %d\n", pipe);
512 if (!crtc->enabled) {
513 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
517 /* Helper routine in DRM core does all the work: */
518 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
523 static int intel_hpd_irq_event(struct drm_device *dev, struct drm_connector *connector)
525 enum drm_connector_status old_status;
527 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
528 old_status = connector->status;
530 connector->status = connector->funcs->detect(connector, false);
531 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
533 drm_get_connector_name(connector),
534 old_status, connector->status);
535 return (old_status != connector->status);
539 * Handle hotplug events outside the interrupt handler proper.
541 #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
543 static void i915_hotplug_work_func(struct work_struct *work)
545 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
547 struct drm_device *dev = dev_priv->dev;
548 struct drm_mode_config *mode_config = &dev->mode_config;
549 struct intel_connector *intel_connector;
550 struct intel_encoder *intel_encoder;
551 struct drm_connector *connector;
552 bool hpd_disabled = false;
553 bool changed = false;
556 /* HPD irq before everything is fully set up. */
557 if (!dev_priv->enable_hotplug_processing)
560 mutex_lock(&mode_config->mutex);
561 DRM_DEBUG_KMS("running encoder hotplug functions\n");
563 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
565 hpd_event_bits = dev_priv->hpd_event_bits;
566 dev_priv->hpd_event_bits = 0;
567 list_for_each_entry(connector, &mode_config->connector_list, head) {
568 intel_connector = to_intel_connector(connector);
569 intel_encoder = intel_connector->encoder;
570 if (intel_encoder->hpd_pin > HPD_NONE &&
571 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
572 connector->polled == DRM_CONNECTOR_POLL_HPD) {
573 DRM_INFO("HPD interrupt storm detected on connector %s: "
574 "switching from hotplug detection to polling\n",
575 drm_get_connector_name(connector));
576 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
577 connector->polled = DRM_CONNECTOR_POLL_CONNECT
578 | DRM_CONNECTOR_POLL_DISCONNECT;
581 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
582 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
583 drm_get_connector_name(connector), intel_encoder->hpd_pin);
586 /* if there were no outputs to poll, poll was disabled,
587 * therefore make sure it's enabled when disabling HPD on
590 drm_kms_helper_poll_enable(dev);
591 mod_timer(&dev_priv->hotplug_reenable_timer,
592 jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
595 lockmgr(&dev_priv->irq_lock, LK_RELEASE);
597 list_for_each_entry(connector, &mode_config->connector_list, head) {
598 intel_connector = to_intel_connector(connector);
599 intel_encoder = intel_connector->encoder;
600 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
601 if (intel_encoder->hot_plug)
602 intel_encoder->hot_plug(intel_encoder);
603 if (intel_hpd_irq_event(dev, connector))
607 mutex_unlock(&mode_config->mutex);
610 drm_kms_helper_hotplug_event(dev);
613 static void ironlake_handle_rps_change(struct drm_device *dev)
615 drm_i915_private_t *dev_priv = dev->dev_private;
616 u32 busy_up, busy_down, max_avg, min_avg;
619 lockmgr(&mchdev_lock, LK_EXCLUSIVE);
621 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
623 new_delay = dev_priv->ips.cur_delay;
625 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
626 busy_up = I915_READ(RCPREVBSYTUPAVG);
627 busy_down = I915_READ(RCPREVBSYTDNAVG);
628 max_avg = I915_READ(RCBMAXAVG);
629 min_avg = I915_READ(RCBMINAVG);
631 /* Handle RCS change request from hw */
632 if (busy_up > max_avg) {
633 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
634 new_delay = dev_priv->ips.cur_delay - 1;
635 if (new_delay < dev_priv->ips.max_delay)
636 new_delay = dev_priv->ips.max_delay;
637 } else if (busy_down < min_avg) {
638 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
639 new_delay = dev_priv->ips.cur_delay + 1;
640 if (new_delay > dev_priv->ips.min_delay)
641 new_delay = dev_priv->ips.min_delay;
644 if (ironlake_set_drps(dev, new_delay))
645 dev_priv->ips.cur_delay = new_delay;
647 lockmgr(&mchdev_lock, LK_RELEASE);
652 static void notify_ring(struct drm_device *dev,
653 struct intel_ring_buffer *ring)
655 struct drm_i915_private *dev_priv = dev->dev_private;
657 if (ring->obj == NULL)
660 wake_up_all(&ring->irq_queue);
661 if (i915_enable_hangcheck) {
662 mod_timer(&dev_priv->gpu_error.hangcheck_timer,
663 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
667 static void gen6_pm_rps_work(struct work_struct *work)
669 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
674 lockmgr(&dev_priv->rps.lock, LK_EXCLUSIVE);
675 pm_iir = dev_priv->rps.pm_iir;
676 dev_priv->rps.pm_iir = 0;
677 pm_imr = I915_READ(GEN6_PMIMR);
678 /* Make sure not to corrupt PMIMR state used by ringbuffer code */
679 I915_WRITE(GEN6_PMIMR, pm_imr & ~GEN6_PM_RPS_EVENTS);
680 lockmgr(&dev_priv->rps.lock, LK_RELEASE);
682 if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0)
685 mutex_lock(&dev_priv->rps.hw_lock);
687 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
688 new_delay = dev_priv->rps.cur_delay + 1;
691 * For better performance, jump directly
692 * to RPe if we're below it.
694 if (IS_VALLEYVIEW(dev_priv->dev) &&
695 dev_priv->rps.cur_delay < dev_priv->rps.rpe_delay)
696 new_delay = dev_priv->rps.rpe_delay;
698 new_delay = dev_priv->rps.cur_delay - 1;
700 /* sysfs frequency interfaces may have snuck in while servicing the
703 if (new_delay >= dev_priv->rps.min_delay &&
704 new_delay <= dev_priv->rps.max_delay) {
705 if (IS_VALLEYVIEW(dev_priv->dev))
706 valleyview_set_rps(dev_priv->dev, new_delay);
708 gen6_set_rps(dev_priv->dev, new_delay);
711 if (IS_VALLEYVIEW(dev_priv->dev)) {
713 * On VLV, when we enter RC6 we may not be at the minimum
714 * voltage level, so arm a timer to check. It should only
715 * fire when there's activity or once after we've entered
716 * RC6, and then won't be re-armed until the next RPS interrupt.
718 mod_delayed_work(dev_priv->wq, &dev_priv->rps.vlv_work,
719 msecs_to_jiffies(100));
722 mutex_unlock(&dev_priv->rps.hw_lock);
727 * ivybridge_parity_work - Workqueue called when a parity error interrupt
729 * @work: workqueue struct
731 * Doesn't actually do anything except notify userspace. As a consequence of
732 * this event, userspace should try to remap the bad rows since statistically
733 * it is likely the same row is more likely to go bad again.
735 static void ivybridge_parity_work(struct work_struct *work)
737 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
738 l3_parity.error_work);
739 u32 error_status, row, bank, subbank;
740 char *parity_event[5];
743 /* We must turn off DOP level clock gating to access the L3 registers.
744 * In order to prevent a get/put style interface, acquire struct mutex
745 * any time we access those registers.
747 mutex_lock(&dev_priv->dev->struct_mutex);
749 misccpctl = I915_READ(GEN7_MISCCPCTL);
750 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
751 POSTING_READ(GEN7_MISCCPCTL);
753 error_status = I915_READ(GEN7_L3CDERRST1);
754 row = GEN7_PARITY_ERROR_ROW(error_status);
755 bank = GEN7_PARITY_ERROR_BANK(error_status);
756 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
758 I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
759 GEN7_L3CDERRST1_ENABLE);
760 POSTING_READ(GEN7_L3CDERRST1);
762 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
764 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
765 dev_priv->gt_irq_mask &= ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
766 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
767 lockmgr(&dev_priv->irq_lock, LK_RELEASE);
769 mutex_unlock(&dev_priv->dev->struct_mutex);
771 parity_event[0] = "L3_PARITY_ERROR=1";
772 parity_event[4] = NULL;
774 DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
778 static void ivybridge_handle_parity_error(struct drm_device *dev)
780 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
782 if (!HAS_L3_GPU_CACHE(dev))
785 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
786 dev_priv->gt_irq_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
787 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
788 lockmgr(&dev_priv->irq_lock, LK_RELEASE);
790 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
793 static void snb_gt_irq_handler(struct drm_device *dev,
794 struct drm_i915_private *dev_priv,
799 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
800 notify_ring(dev, &dev_priv->ring[RCS]);
801 if (gt_iir & GT_BSD_USER_INTERRUPT)
802 notify_ring(dev, &dev_priv->ring[VCS]);
803 if (gt_iir & GT_BLT_USER_INTERRUPT)
804 notify_ring(dev, &dev_priv->ring[BCS]);
806 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
807 GT_BSD_CS_ERROR_INTERRUPT |
808 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
809 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
810 i915_handle_error(dev, false);
813 if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
814 ivybridge_handle_parity_error(dev);
817 /* Legacy way of handling PM interrupts */
818 static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
823 * IIR bits should never already be set because IMR should
824 * prevent an interrupt from being shown in IIR. The warning
825 * displays a case where we've unsafely cleared
826 * dev_priv->rps.pm_iir. Although missing an interrupt of the same
827 * type is not a problem, it displays a problem in the logic.
829 * The mask bit in IMR is cleared by dev_priv->rps.work.
832 lockmgr(&dev_priv->rps.lock, LK_EXCLUSIVE);
833 dev_priv->rps.pm_iir |= pm_iir;
834 I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
835 POSTING_READ(GEN6_PMIMR);
836 lockmgr(&dev_priv->rps.lock, LK_RELEASE);
838 queue_work(dev_priv->wq, &dev_priv->rps.work);
841 #define HPD_STORM_DETECT_PERIOD 1000
842 #define HPD_STORM_THRESHOLD 5
844 static inline void intel_hpd_irq_handler(struct drm_device *dev,
848 drm_i915_private_t *dev_priv = dev->dev_private;
850 bool storm_detected = false;
852 if (!hotplug_trigger)
855 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
857 for (i = 1; i < HPD_NUM_PINS; i++) {
859 if (!(hpd[i] & hotplug_trigger) ||
860 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
863 dev_priv->hpd_event_bits |= (1 << i);
864 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
865 dev_priv->hpd_stats[i].hpd_last_jiffies
866 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
867 dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
868 dev_priv->hpd_stats[i].hpd_cnt = 0;
869 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
870 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
871 dev_priv->hpd_event_bits &= ~(1 << i);
872 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
873 storm_detected = true;
875 dev_priv->hpd_stats[i].hpd_cnt++;
879 lockmgr(&dev_priv->irq_lock, LK_RELEASE);
882 dev_priv->display.hpd_irq_setup(dev);
884 queue_work(dev_priv->wq,
885 &dev_priv->hotplug_work);
888 static void gmbus_irq_handler(struct drm_device *dev)
890 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
892 wake_up_all(&dev_priv->gmbus_wait_queue);
895 static void dp_aux_irq_handler(struct drm_device *dev)
897 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
899 wake_up_all(&dev_priv->gmbus_wait_queue);
902 /* Unlike gen6_queue_rps_work() from which this function is originally derived,
903 * we must be able to deal with other PM interrupts. This is complicated because
904 * of the way in which we use the masks to defer the RPS work (which for
905 * posterity is necessary because of forcewake).
907 static void hsw_pm_irq_handler(struct drm_i915_private *dev_priv,
910 lockmgr(&dev_priv->rps.lock, LK_EXCLUSIVE);
911 dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS;
912 if (dev_priv->rps.pm_iir) {
913 I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
914 /* never want to mask useful interrupts. (also posting read) */
915 WARN_ON(I915_READ_NOTRACE(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS);
916 /* TODO: if queue_work is slow, move it out of the spinlock */
917 queue_work(dev_priv->wq, &dev_priv->rps.work);
919 lockmgr(&dev_priv->rps.lock, LK_RELEASE);
921 if (pm_iir & ~GEN6_PM_RPS_EVENTS) {
922 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
923 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
925 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
926 DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir);
927 i915_handle_error(dev_priv->dev, false);
932 static irqreturn_t valleyview_irq_handler(void *arg)
934 struct drm_device *dev = (struct drm_device *) arg;
935 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
936 u32 iir, gt_iir, pm_iir;
938 u32 pipe_stats[I915_MAX_PIPES];
940 atomic_inc(&dev_priv->irq_received);
943 iir = I915_READ(VLV_IIR);
944 gt_iir = I915_READ(GTIIR);
945 pm_iir = I915_READ(GEN6_PMIIR);
947 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
950 snb_gt_irq_handler(dev, dev_priv, gt_iir);
952 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
953 for_each_pipe(pipe) {
954 int reg = PIPESTAT(pipe);
955 pipe_stats[pipe] = I915_READ(reg);
958 * Clear the PIPE*STAT regs before the IIR
960 if (pipe_stats[pipe] & 0x8000ffff) {
961 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
962 DRM_DEBUG_DRIVER("pipe %c underrun\n",
964 I915_WRITE(reg, pipe_stats[pipe]);
967 lockmgr(&dev_priv->irq_lock, LK_RELEASE);
969 for_each_pipe(pipe) {
970 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
971 drm_handle_vblank(dev, pipe);
973 if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
974 intel_prepare_page_flip(dev, pipe);
975 intel_finish_page_flip(dev, pipe);
979 /* Consume port. Then clear IIR or we'll miss events */
980 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
981 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
982 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
984 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
987 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
989 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
990 I915_READ(PORT_HOTPLUG_STAT);
993 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
994 gmbus_irq_handler(dev);
996 if (pm_iir & GEN6_PM_RPS_EVENTS)
997 gen6_queue_rps_work(dev_priv, pm_iir);
999 I915_WRITE(GTIIR, gt_iir);
1000 I915_WRITE(GEN6_PMIIR, pm_iir);
1001 I915_WRITE(VLV_IIR, iir);
1008 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1010 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1012 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1014 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
1016 if (pch_iir & SDE_AUDIO_POWER_MASK) {
1017 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1018 SDE_AUDIO_POWER_SHIFT);
1019 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1023 if (pch_iir & SDE_AUX_MASK)
1024 dp_aux_irq_handler(dev);
1026 if (pch_iir & SDE_GMBUS)
1027 gmbus_irq_handler(dev);
1029 if (pch_iir & SDE_AUDIO_HDCP_MASK)
1030 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1032 if (pch_iir & SDE_AUDIO_TRANS_MASK)
1033 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1035 if (pch_iir & SDE_POISON)
1036 DRM_ERROR("PCH poison interrupt\n");
1038 if (pch_iir & SDE_FDI_MASK)
1040 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1042 I915_READ(FDI_RX_IIR(pipe)));
1044 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1045 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1047 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1048 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1050 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1051 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
1053 DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
1055 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1056 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
1058 DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
1061 static void ivb_err_int_handler(struct drm_device *dev)
1063 struct drm_i915_private *dev_priv = dev->dev_private;
1064 u32 err_int = I915_READ(GEN7_ERR_INT);
1066 if (err_int & ERR_INT_POISON)
1067 DRM_ERROR("Poison interrupt\n");
1069 if (err_int & ERR_INT_FIFO_UNDERRUN_A)
1070 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
1071 DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
1073 if (err_int & ERR_INT_FIFO_UNDERRUN_B)
1074 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
1075 DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
1077 if (err_int & ERR_INT_FIFO_UNDERRUN_C)
1078 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_C, false))
1079 DRM_DEBUG_DRIVER("Pipe C FIFO underrun\n");
1081 I915_WRITE(GEN7_ERR_INT, err_int);
1084 static void cpt_serr_int_handler(struct drm_device *dev)
1086 struct drm_i915_private *dev_priv = dev->dev_private;
1087 u32 serr_int = I915_READ(SERR_INT);
1089 if (serr_int & SERR_INT_POISON)
1090 DRM_ERROR("PCH poison interrupt\n");
1092 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
1093 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
1095 DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
1097 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
1098 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
1100 DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
1102 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
1103 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C,
1105 DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n");
1107 I915_WRITE(SERR_INT, serr_int);
1110 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1112 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1114 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1116 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
1118 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1119 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1120 SDE_AUDIO_POWER_SHIFT_CPT);
1121 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
1125 if (pch_iir & SDE_AUX_MASK_CPT)
1126 dp_aux_irq_handler(dev);
1128 if (pch_iir & SDE_GMBUS_CPT)
1129 gmbus_irq_handler(dev);
1131 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1132 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
1134 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1135 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
1137 if (pch_iir & SDE_FDI_MASK_CPT)
1139 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1141 I915_READ(FDI_RX_IIR(pipe)));
1143 if (pch_iir & SDE_ERROR_CPT)
1144 cpt_serr_int_handler(dev);
1147 static irqreturn_t ivybridge_irq_handler(void *arg)
1149 struct drm_device *dev = (struct drm_device *) arg;
1150 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1151 u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier = 0;
1154 atomic_inc(&dev_priv->irq_received);
1156 /* We get interrupts on unclaimed registers, so check for this before we
1157 * do any I915_{READ,WRITE}. */
1158 if (IS_HASWELL(dev) &&
1159 (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
1160 DRM_ERROR("Unclaimed register before interrupt\n");
1161 I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
1164 /* disable master interrupt before clearing iir */
1165 de_ier = I915_READ(DEIER);
1166 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
1168 /* Disable south interrupts. We'll only write to SDEIIR once, so further
1169 * interrupts will will be stored on its back queue, and then we'll be
1170 * able to process them after we restore SDEIER (as soon as we restore
1171 * it, we'll get an interrupt if SDEIIR still has something to process
1172 * due to its back queue). */
1173 if (!HAS_PCH_NOP(dev)) {
1174 sde_ier = I915_READ(SDEIER);
1175 I915_WRITE(SDEIER, 0);
1176 POSTING_READ(SDEIER);
1179 /* On Haswell, also mask ERR_INT because we don't want to risk
1180 * generating "unclaimed register" interrupts from inside the interrupt
1182 if (IS_HASWELL(dev)) {
1183 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
1184 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
1185 lockmgr(&dev_priv->irq_lock, LK_RELEASE);
1188 gt_iir = I915_READ(GTIIR);
1190 snb_gt_irq_handler(dev, dev_priv, gt_iir);
1191 I915_WRITE(GTIIR, gt_iir);
1194 de_iir = I915_READ(DEIIR);
1196 if (de_iir & DE_ERR_INT_IVB)
1197 ivb_err_int_handler(dev);
1199 if (de_iir & DE_AUX_CHANNEL_A_IVB)
1200 dp_aux_irq_handler(dev);
1202 if (de_iir & DE_GSE_IVB)
1203 intel_opregion_asle_intr(dev);
1205 for (i = 0; i < 3; i++) {
1206 if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
1207 drm_handle_vblank(dev, i);
1208 if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
1209 intel_prepare_page_flip(dev, i);
1210 intel_finish_page_flip_plane(dev, i);
1214 /* check event from PCH */
1215 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
1216 u32 pch_iir = I915_READ(SDEIIR);
1218 cpt_irq_handler(dev, pch_iir);
1220 /* clear PCH hotplug event before clear CPU irq */
1221 I915_WRITE(SDEIIR, pch_iir);
1224 I915_WRITE(DEIIR, de_iir);
1227 pm_iir = I915_READ(GEN6_PMIIR);
1229 if (IS_HASWELL(dev))
1230 hsw_pm_irq_handler(dev_priv, pm_iir);
1231 else if (pm_iir & GEN6_PM_RPS_EVENTS)
1232 gen6_queue_rps_work(dev_priv, pm_iir);
1233 I915_WRITE(GEN6_PMIIR, pm_iir);
1236 if (IS_HASWELL(dev)) {
1237 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
1238 if (ivb_can_enable_err_int(dev))
1239 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
1240 lockmgr(&dev_priv->irq_lock, LK_RELEASE);
1243 I915_WRITE(DEIER, de_ier);
1244 POSTING_READ(DEIER);
1245 if (!HAS_PCH_NOP(dev)) {
1246 I915_WRITE(SDEIER, sde_ier);
1247 POSTING_READ(SDEIER);
1251 static void ilk_gt_irq_handler(struct drm_device *dev,
1252 struct drm_i915_private *dev_priv,
1256 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1257 notify_ring(dev, &dev_priv->ring[RCS]);
1258 if (gt_iir & ILK_BSD_USER_INTERRUPT)
1259 notify_ring(dev, &dev_priv->ring[VCS]);
1262 static irqreturn_t ironlake_irq_handler(void *arg)
1264 struct drm_device *dev = (struct drm_device *) arg;
1265 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1266 u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
1268 atomic_inc(&dev_priv->irq_received);
1270 /* disable master interrupt before clearing iir */
1271 de_ier = I915_READ(DEIER);
1272 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
1273 POSTING_READ(DEIER);
1275 /* Disable south interrupts. We'll only write to SDEIIR once, so further
1276 * interrupts will will be stored on its back queue, and then we'll be
1277 * able to process them after we restore SDEIER (as soon as we restore
1278 * it, we'll get an interrupt if SDEIIR still has something to process
1279 * due to its back queue). */
1280 sde_ier = I915_READ(SDEIER);
1281 I915_WRITE(SDEIER, 0);
1282 POSTING_READ(SDEIER);
1284 de_iir = I915_READ(DEIIR);
1285 gt_iir = I915_READ(GTIIR);
1286 pm_iir = I915_READ(GEN6_PMIIR);
1288 if (de_iir == 0 && gt_iir == 0 && (!IS_GEN6(dev) || pm_iir == 0))
1292 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
1294 snb_gt_irq_handler(dev, dev_priv, gt_iir);
1296 if (de_iir & DE_AUX_CHANNEL_A)
1297 dp_aux_irq_handler(dev);
1299 if (de_iir & DE_GSE)
1300 intel_opregion_asle_intr(dev);
1302 if (de_iir & DE_PIPEA_VBLANK)
1303 drm_handle_vblank(dev, 0);
1305 if (de_iir & DE_PIPEB_VBLANK)
1306 drm_handle_vblank(dev, 1);
1308 if (de_iir & DE_POISON)
1309 DRM_ERROR("Poison interrupt\n");
1311 if (de_iir & DE_PIPEA_FIFO_UNDERRUN)
1312 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
1313 DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
1315 if (de_iir & DE_PIPEB_FIFO_UNDERRUN)
1316 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
1317 DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
1319 if (de_iir & DE_PLANEA_FLIP_DONE) {
1320 intel_prepare_page_flip(dev, 0);
1321 intel_finish_page_flip_plane(dev, 0);
1324 if (de_iir & DE_PLANEB_FLIP_DONE) {
1325 intel_prepare_page_flip(dev, 1);
1326 intel_finish_page_flip_plane(dev, 1);
1329 /* check event from PCH */
1330 if (de_iir & DE_PCH_EVENT) {
1331 u32 pch_iir = I915_READ(SDEIIR);
1333 if (HAS_PCH_CPT(dev))
1334 cpt_irq_handler(dev, pch_iir);
1336 ibx_irq_handler(dev, pch_iir);
1338 /* should clear PCH hotplug event before clear CPU irq */
1339 I915_WRITE(SDEIIR, pch_iir);
1342 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
1343 ironlake_handle_rps_change(dev);
1345 if (IS_GEN6(dev) && pm_iir & GEN6_PM_RPS_EVENTS)
1346 gen6_queue_rps_work(dev_priv, pm_iir);
1348 I915_WRITE(GTIIR, gt_iir);
1349 I915_WRITE(DEIIR, de_iir);
1350 I915_WRITE(GEN6_PMIIR, pm_iir);
1353 I915_WRITE(DEIER, de_ier);
1354 POSTING_READ(DEIER);
1355 I915_WRITE(SDEIER, sde_ier);
1356 POSTING_READ(SDEIER);
1360 * i915_error_work_func - do process context error handling work
1361 * @work: work struct
1363 * Fire an error uevent so userspace can see that a hang or error
1366 static void i915_error_work_func(struct work_struct *work)
1368 struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
1370 drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
1372 struct drm_device *dev = dev_priv->dev;
1373 struct intel_ring_buffer *ring;
1375 char *error_event[] = { "ERROR=1", NULL };
1376 char *reset_event[] = { "RESET=1", NULL };
1377 char *reset_done_event[] = { "ERROR=0", NULL };
1381 /* kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); */
1384 * Note that there's only one work item which does gpu resets, so we
1385 * need not worry about concurrent gpu resets potentially incrementing
1386 * error->reset_counter twice. We only need to take care of another
1387 * racing irq/hangcheck declaring the gpu dead for a second time. A
1388 * quick check for that is good enough: schedule_work ensures the
1389 * correct ordering between hang detection and this work item, and since
1390 * the reset in-progress bit is only ever set by code outside of this
1391 * work we don't need to worry about any other races.
1393 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
1394 DRM_DEBUG_DRIVER("resetting chip\n");
1396 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE,
1400 ret = i915_reset(dev);
1404 * After all the gem state is reset, increment the reset
1405 * counter and wake up everyone waiting for the reset to
1408 * Since unlock operations are a one-sided barrier only,
1409 * we need to insert a barrier here to order any seqno
1411 * the counter increment.
1414 atomic_inc(&dev_priv->gpu_error.reset_counter);
1417 kobject_uevent_env(&dev->primary->kdev.kobj,
1418 KOBJ_CHANGE, reset_done_event);
1421 atomic_set(&error->reset_counter, I915_WEDGED);
1424 for_each_ring(ring, dev_priv, i)
1425 wake_up_all(&ring->irq_queue);
1427 intel_display_handle_reset(dev);
1429 wake_up_all(&dev_priv->gpu_error.reset_queue);
1433 /* NB: please notice the memset */
1434 static void i915_get_extra_instdone(struct drm_device *dev,
1437 struct drm_i915_private *dev_priv = dev->dev_private;
1438 memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
1440 switch(INTEL_INFO(dev)->gen) {
1443 instdone[0] = I915_READ(INSTDONE);
1448 instdone[0] = I915_READ(INSTDONE_I965);
1449 instdone[1] = I915_READ(INSTDONE1);
1453 WARN_ONCE(1, "Unsupported platform\n");
1456 instdone[0] = I915_READ(GEN7_INSTDONE_1);
1457 instdone[1] = I915_READ(GEN7_SC_INSTDONE);
1458 instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
1459 instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
1464 #if 0 /* CONFIG_DEBUG_FS */
1465 static struct drm_i915_error_object *
1466 i915_error_object_create_sized(struct drm_i915_private *dev_priv,
1467 struct drm_i915_gem_object *src,
1468 const int num_pages)
1470 struct drm_i915_error_object *dst;
1474 if (src == NULL || src->pages == NULL)
1477 dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
1481 reloc_offset = src->gtt_offset;
1482 for (i = 0; i < num_pages; i++) {
1483 unsigned long flags;
1486 d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
1490 local_irq_save(flags);
1491 if (reloc_offset < dev_priv->gtt.mappable_end &&
1492 src->has_global_gtt_mapping) {
1495 /* Simply ignore tiling or any overlapping fence.
1496 * It's part of the error state, and this hopefully
1497 * captures what the GPU read.
1500 s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
1502 memcpy_fromio(d, s, PAGE_SIZE);
1503 io_mapping_unmap_atomic(s);
1504 } else if (src->stolen) {
1505 unsigned long offset;
1507 offset = dev_priv->mm.stolen_base;
1508 offset += src->stolen->start;
1509 offset += i << PAGE_SHIFT;
1511 memcpy_fromio(d, (void *)offset, PAGE_SIZE);
1516 page = i915_gem_object_get_page(src, i);
1518 drm_clflush_pages(&page, 1);
1520 s = kmap_atomic(page);
1521 memcpy(d, s, PAGE_SIZE);
1524 drm_clflush_pages(&page, 1);
1526 local_irq_restore(flags);
1530 reloc_offset += PAGE_SIZE;
1532 dst->page_count = num_pages;
1533 dst->gtt_offset = src->gtt_offset;
1539 kfree(dst->pages[i]);
1543 #define i915_error_object_create(dev_priv, src) \
1544 i915_error_object_create_sized((dev_priv), (src), \
1545 (src)->base.size>>PAGE_SHIFT)
1548 i915_error_object_free(struct drm_i915_error_object *obj)
1555 for (page = 0; page < obj->page_count; page++)
1556 kfree(obj->pages[page]);
1562 i915_error_state_free(struct drm_device *dev,
1563 struct drm_i915_error_state *error)
1565 struct drm_i915_error_state *error = container_of(error_ref,
1566 typeof(*error), ref);
1569 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
1570 i915_error_object_free(error->ring[i].batchbuffer);
1571 i915_error_object_free(error->ring[i].ringbuffer);
1572 i915_error_object_free(error->ring[i].ctx);
1573 kfree(error->ring[i].requests);
1576 kfree(error->active_bo);
1577 kfree(error->overlay);
1578 kfree(error->display);
1581 static void capture_bo(struct drm_i915_error_buffer *err,
1582 struct drm_i915_gem_object *obj)
1584 err->size = obj->base.size;
1585 err->name = obj->base.name;
1586 err->rseqno = obj->last_read_seqno;
1587 err->wseqno = obj->last_write_seqno;
1588 err->gtt_offset = obj->gtt_offset;
1589 err->read_domains = obj->base.read_domains;
1590 err->write_domain = obj->base.write_domain;
1591 err->fence_reg = obj->fence_reg;
1593 if (obj->pin_count > 0)
1595 if (obj->user_pin_count > 0)
1597 err->tiling = obj->tiling_mode;
1598 err->dirty = obj->dirty;
1599 err->purgeable = obj->madv != I915_MADV_WILLNEED;
1600 err->ring = obj->ring ? obj->ring->id : -1;
1601 err->cache_level = obj->cache_level;
1604 static u32 capture_active_bo(struct drm_i915_error_buffer *err,
1605 int count, struct list_head *head)
1607 struct drm_i915_gem_object *obj;
1610 list_for_each_entry(obj, head, mm_list) {
1611 capture_bo(err++, obj);
1619 static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
1620 int count, struct list_head *head)
1622 struct drm_i915_gem_object *obj;
1625 list_for_each_entry(obj, head, global_list) {
1626 if (obj->pin_count == 0)
1629 capture_bo(err++, obj);
1637 static void i915_gem_record_fences(struct drm_device *dev,
1638 struct drm_i915_error_state *error)
1640 struct drm_i915_private *dev_priv = dev->dev_private;
1644 switch (INTEL_INFO(dev)->gen) {
1647 for (i = 0; i < dev_priv->num_fence_regs; i++)
1648 error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
1652 for (i = 0; i < 16; i++)
1653 error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
1656 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
1657 for (i = 0; i < 8; i++)
1658 error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
1660 for (i = 0; i < 8; i++)
1661 error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
1669 static struct drm_i915_error_object *
1670 i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
1671 struct intel_ring_buffer *ring)
1673 struct drm_i915_gem_object *obj;
1676 if (!ring->get_seqno)
1679 if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
1680 u32 acthd = I915_READ(ACTHD);
1682 if (WARN_ON(ring->id != RCS))
1685 obj = ring->private;
1686 if (acthd >= obj->gtt_offset &&
1687 acthd < obj->gtt_offset + obj->base.size)
1688 return i915_error_object_create(dev_priv, obj);
1691 seqno = ring->get_seqno(ring, false);
1692 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
1693 if (obj->ring != ring)
1696 if (i915_seqno_passed(seqno, obj->last_read_seqno))
1699 if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
1702 /* We need to copy these to an anonymous buffer as the simplest
1703 * method to avoid being overwritten by userspace.
1705 return i915_error_object_create(dev_priv, obj);
1711 static void i915_record_ring_state(struct drm_device *dev,
1712 struct drm_i915_error_state *error,
1713 struct intel_ring_buffer *ring)
1715 struct drm_i915_private *dev_priv = dev->dev_private;
1717 if (INTEL_INFO(dev)->gen >= 6) {
1718 error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
1719 error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
1720 error->semaphore_mboxes[ring->id][0]
1721 = I915_READ(RING_SYNC_0(ring->mmio_base));
1722 error->semaphore_mboxes[ring->id][1]
1723 = I915_READ(RING_SYNC_1(ring->mmio_base));
1724 error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
1725 error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
1728 if (INTEL_INFO(dev)->gen >= 4) {
1729 error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
1730 error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
1731 error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
1732 error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
1733 error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
1734 if (ring->id == RCS)
1735 error->bbaddr = I915_READ64(BB_ADDR);
1737 error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
1738 error->ipeir[ring->id] = I915_READ(IPEIR);
1739 error->ipehr[ring->id] = I915_READ(IPEHR);
1740 error->instdone[ring->id] = I915_READ(INSTDONE);
1743 error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
1744 error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
1745 error->seqno[ring->id] = ring->get_seqno(ring, false);
1746 error->acthd[ring->id] = intel_ring_get_active_head(ring);
1747 error->head[ring->id] = I915_READ_HEAD(ring);
1748 error->tail[ring->id] = I915_READ_TAIL(ring);
1749 error->ctl[ring->id] = I915_READ_CTL(ring);
1751 error->cpu_ring_head[ring->id] = ring->head;
1752 error->cpu_ring_tail[ring->id] = ring->tail;
1756 static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
1757 struct drm_i915_error_state *error,
1758 struct drm_i915_error_ring *ering)
1760 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1761 struct drm_i915_gem_object *obj;
1763 /* Currently render ring is the only HW context user */
1764 if (ring->id != RCS || !error->ccid)
1767 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
1768 if ((error->ccid & PAGE_MASK) == obj->gtt_offset) {
1769 ering->ctx = i915_error_object_create_sized(dev_priv,
1775 static void i915_gem_record_rings(struct drm_device *dev,
1776 struct drm_i915_error_state *error)
1778 struct drm_i915_private *dev_priv = dev->dev_private;
1779 struct intel_ring_buffer *ring;
1780 struct drm_i915_gem_request *request;
1783 for_each_ring(ring, dev_priv, i) {
1784 i915_record_ring_state(dev, error, ring);
1786 error->ring[i].batchbuffer =
1787 i915_error_first_batchbuffer(dev_priv, ring);
1789 error->ring[i].ringbuffer =
1790 i915_error_object_create(dev_priv, ring->obj);
1793 i915_gem_record_active_context(ring, error, &error->ring[i]);
1796 list_for_each_entry(request, &ring->request_list, list)
1799 error->ring[i].num_requests = count;
1800 error->ring[i].requests =
1801 kmalloc(count*sizeof(struct drm_i915_error_request),
1803 if (error->ring[i].requests == NULL) {
1804 error->ring[i].num_requests = 0;
1809 list_for_each_entry(request, &ring->request_list, list) {
1810 struct drm_i915_error_request *erq;
1812 erq = &error->ring[i].requests[count++];
1813 erq->seqno = request->seqno;
1814 erq->jiffies = request->emitted_jiffies;
1815 erq->tail = request->tail;
1821 * i915_capture_error_state - capture an error record for later analysis
1824 * Should be called when an error is detected (either a hang or an error
1825 * interrupt) to capture error state from the time of the error. Fills
1826 * out a structure which becomes available in debugfs for user level tools
1829 static void i915_capture_error_state(struct drm_device *dev)
1831 struct drm_i915_private *dev_priv = dev->dev_private;
1832 struct drm_i915_gem_object *obj;
1833 struct drm_i915_error_state *error;
1834 unsigned long flags;
1837 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1838 error = dev_priv->gpu_error.first_error;
1839 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1843 /* Account for pipe specific data like PIPE*STAT */
1844 error = kzalloc(sizeof(*error), GFP_ATOMIC);
1846 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1850 DRM_INFO("capturing error event; look for more information in "
1851 "/sys/kernel/debug/dri/%d/i915_error_state\n",
1852 dev->primary->index);
1854 kref_init(&error->ref);
1855 error->eir = I915_READ(EIR);
1856 error->pgtbl_er = I915_READ(PGTBL_ER);
1857 if (HAS_HW_CONTEXTS(dev))
1858 error->ccid = I915_READ(CCID);
1860 if (HAS_PCH_SPLIT(dev))
1861 error->ier = I915_READ(DEIER) | I915_READ(GTIER);
1862 else if (IS_VALLEYVIEW(dev))
1863 error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
1864 else if (IS_GEN2(dev))
1865 error->ier = I915_READ16(IER);
1867 error->ier = I915_READ(IER);
1869 if (INTEL_INFO(dev)->gen >= 6)
1870 error->derrmr = I915_READ(DERRMR);
1872 if (IS_VALLEYVIEW(dev))
1873 error->forcewake = I915_READ(FORCEWAKE_VLV);
1874 else if (INTEL_INFO(dev)->gen >= 7)
1875 error->forcewake = I915_READ(FORCEWAKE_MT);
1876 else if (INTEL_INFO(dev)->gen == 6)
1877 error->forcewake = I915_READ(FORCEWAKE);
1879 if (!HAS_PCH_SPLIT(dev))
1881 error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
1883 if (INTEL_INFO(dev)->gen >= 6) {
1884 error->error = I915_READ(ERROR_GEN6);
1885 error->done_reg = I915_READ(DONE_REG);
1888 if (INTEL_INFO(dev)->gen == 7)
1889 error->err_int = I915_READ(GEN7_ERR_INT);
1891 i915_get_extra_instdone(dev, error->extra_instdone);
1893 i915_gem_record_fences(dev, error);
1894 i915_gem_record_rings(dev, error);
1896 /* Record buffers on the active and pinned lists. */
1897 error->active_bo = NULL;
1898 error->pinned_bo = NULL;
1901 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
1903 error->active_bo_count = i;
1904 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
1907 error->pinned_bo_count = i - error->active_bo_count;
1909 error->active_bo = NULL;
1910 error->pinned_bo = NULL;
1912 error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
1914 if (error->active_bo)
1916 error->active_bo + error->active_bo_count;
1919 if (error->active_bo)
1920 error->active_bo_count =
1921 capture_active_bo(error->active_bo,
1922 error->active_bo_count,
1923 &dev_priv->mm.active_list);
1925 if (error->pinned_bo)
1926 error->pinned_bo_count =
1927 capture_pinned_bo(error->pinned_bo,
1928 error->pinned_bo_count,
1929 &dev_priv->mm.bound_list);
1931 do_gettimeofday(&error->time);
1933 error->overlay = intel_overlay_capture_error_state(dev);
1934 error->display = intel_display_capture_error_state(dev);
1936 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1937 if (dev_priv->gpu_error.first_error == NULL) {
1938 dev_priv->gpu_error.first_error = error;
1941 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1944 i915_error_state_free(&error->ref);
1947 void i915_destroy_error_state(struct drm_device *dev)
1949 struct drm_i915_private *dev_priv = dev->dev_private;
1950 struct drm_i915_error_state *error;
1952 lockmgr(&dev_priv->gpu_error.lock, LK_EXCLUSIVE);
1953 error = dev_priv->gpu_error.first_error;
1954 dev_priv->gpu_error.first_error = NULL;
1955 lockmgr(&dev_priv->gpu_error.lock, LK_RELEASE);
1958 i915_error_state_free(dev, error);
1961 #define i915_capture_error_state(x)
1964 static void i915_report_and_clear_eir(struct drm_device *dev)
1966 struct drm_i915_private *dev_priv = dev->dev_private;
1967 uint32_t instdone[I915_NUM_INSTDONE_REG];
1968 u32 eir = I915_READ(EIR);
1974 pr_err("render error detected, EIR: 0x%08x\n", eir);
1976 i915_get_extra_instdone(dev, instdone);
1979 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
1980 u32 ipeir = I915_READ(IPEIR_I965);
1982 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1983 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1984 for (i = 0; i < ARRAY_SIZE(instdone); i++)
1985 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
1986 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
1987 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1988 I915_WRITE(IPEIR_I965, ipeir);
1989 POSTING_READ(IPEIR_I965);
1991 if (eir & GM45_ERROR_PAGE_TABLE) {
1992 u32 pgtbl_err = I915_READ(PGTBL_ER);
1993 pr_err("page table error\n");
1994 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
1995 I915_WRITE(PGTBL_ER, pgtbl_err);
1996 POSTING_READ(PGTBL_ER);
2000 if (!IS_GEN2(dev)) {
2001 if (eir & I915_ERROR_PAGE_TABLE) {
2002 u32 pgtbl_err = I915_READ(PGTBL_ER);
2003 pr_err("page table error\n");
2004 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
2005 I915_WRITE(PGTBL_ER, pgtbl_err);
2006 POSTING_READ(PGTBL_ER);
2010 if (eir & I915_ERROR_MEMORY_REFRESH) {
2011 pr_err("memory refresh error:\n");
2013 pr_err("pipe %c stat: 0x%08x\n",
2014 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
2015 /* pipestat has already been acked */
2017 if (eir & I915_ERROR_INSTRUCTION) {
2018 pr_err("instruction error\n");
2019 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
2020 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2021 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2022 if (INTEL_INFO(dev)->gen < 4) {
2023 u32 ipeir = I915_READ(IPEIR);
2025 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
2026 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
2027 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
2028 I915_WRITE(IPEIR, ipeir);
2029 POSTING_READ(IPEIR);
2031 u32 ipeir = I915_READ(IPEIR_I965);
2033 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2034 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2035 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
2036 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2037 I915_WRITE(IPEIR_I965, ipeir);
2038 POSTING_READ(IPEIR_I965);
2042 I915_WRITE(EIR, eir);
2044 eir = I915_READ(EIR);
2047 * some errors might have become stuck,
2050 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2051 I915_WRITE(EMR, I915_READ(EMR) | eir);
2052 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2057 * i915_handle_error - handle an error interrupt
2060 * Do some basic checking of regsiter state at error interrupt time and
2061 * dump it to the syslog. Also call i915_capture_error_state() to make
2062 * sure we get a record and make it available in debugfs. Fire a uevent
2063 * so userspace knows something bad happened (should trigger collection
2064 * of a ring dump etc.).
2066 void i915_handle_error(struct drm_device *dev, bool wedged)
2068 struct drm_i915_private *dev_priv = dev->dev_private;
2069 struct intel_ring_buffer *ring;
2072 i915_capture_error_state(dev);
2073 i915_report_and_clear_eir(dev);
2076 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
2077 &dev_priv->gpu_error.reset_counter);
2080 * Wakeup waiting processes so that the reset work item
2081 * doesn't deadlock trying to grab various locks.
2083 for_each_ring(ring, dev_priv, i)
2084 wake_up_all(&ring->irq_queue);
2087 queue_work(dev_priv->wq, &dev_priv->gpu_error.work);
2090 static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
2092 drm_i915_private_t *dev_priv = dev->dev_private;
2093 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
2094 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2095 struct drm_i915_gem_object *obj;
2096 struct intel_unpin_work *work;
2097 bool stall_detected;
2099 /* Ignore early vblank irqs */
2100 if (intel_crtc == NULL)
2103 lockmgr(&dev->event_lock, LK_EXCLUSIVE);
2104 work = intel_crtc->unpin_work;
2107 atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
2108 !work->enable_stall_check) {
2109 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
2110 lockmgr(&dev->event_lock, LK_RELEASE);
2114 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
2115 obj = work->pending_flip_obj;
2116 if (INTEL_INFO(dev)->gen >= 4) {
2117 int dspsurf = DSPSURF(intel_crtc->plane);
2118 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
2121 int dspaddr = DSPADDR(intel_crtc->plane);
2122 stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
2123 crtc->y * crtc->fb->pitches[0] +
2124 crtc->x * crtc->fb->bits_per_pixel/8);
2127 lockmgr(&dev->event_lock, LK_RELEASE);
2129 if (stall_detected) {
2130 DRM_DEBUG_DRIVER("Pageflip stall detected\n");
2131 intel_prepare_page_flip(dev, intel_crtc->plane);
2135 /* Called from drm generic code, passed 'crtc' which
2136 * we use as a pipe index
2138 static int i915_enable_vblank(struct drm_device *dev, int pipe)
2140 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2142 if (!i915_pipe_enabled(dev, pipe))
2145 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
2146 if (INTEL_INFO(dev)->gen >= 4)
2147 i915_enable_pipestat(dev_priv, pipe,
2148 PIPE_START_VBLANK_INTERRUPT_ENABLE);
2150 i915_enable_pipestat(dev_priv, pipe,
2151 PIPE_VBLANK_INTERRUPT_ENABLE);
2153 /* maintain vblank delivery even in deep C-states */
2154 if (dev_priv->info->gen == 3)
2155 I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
2156 lockmgr(&dev_priv->irq_lock, LK_RELEASE);
2161 static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
2163 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2165 if (!i915_pipe_enabled(dev, pipe))
2168 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
2169 ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
2170 DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
2171 lockmgr(&dev_priv->irq_lock, LK_RELEASE);
2176 static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
2178 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2180 if (!i915_pipe_enabled(dev, pipe))
2183 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
2184 ironlake_enable_display_irq(dev_priv,
2185 DE_PIPEA_VBLANK_IVB << (5 * pipe));
2186 lockmgr(&dev_priv->irq_lock, LK_RELEASE);
2191 static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
2193 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2196 if (!i915_pipe_enabled(dev, pipe))
2199 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
2200 imr = I915_READ(VLV_IMR);
2202 imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
2204 imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2205 I915_WRITE(VLV_IMR, imr);
2206 i915_enable_pipestat(dev_priv, pipe,
2207 PIPE_START_VBLANK_INTERRUPT_ENABLE);
2208 lockmgr(&dev_priv->irq_lock, LK_RELEASE);
2213 /* Called from drm generic code, passed 'crtc' which
2214 * we use as a pipe index
2216 static void i915_disable_vblank(struct drm_device *dev, int pipe)
2218 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2220 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
2221 if (dev_priv->info->gen == 3)
2222 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
2224 i915_disable_pipestat(dev_priv, pipe,
2225 PIPE_VBLANK_INTERRUPT_ENABLE |
2226 PIPE_START_VBLANK_INTERRUPT_ENABLE);
2227 lockmgr(&dev_priv->irq_lock, LK_RELEASE);
2230 static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
2232 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2234 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
2235 ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
2236 DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
2237 lockmgr(&dev_priv->irq_lock, LK_RELEASE);
2240 static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
2242 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2244 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
2245 ironlake_disable_display_irq(dev_priv,
2246 DE_PIPEA_VBLANK_IVB << (pipe * 5));
2247 lockmgr(&dev_priv->irq_lock, LK_RELEASE);
2250 static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
2252 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2255 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
2256 i915_disable_pipestat(dev_priv, pipe,
2257 PIPE_START_VBLANK_INTERRUPT_ENABLE);
2258 imr = I915_READ(VLV_IMR);
2260 imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
2262 imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2263 I915_WRITE(VLV_IMR, imr);
2264 lockmgr(&dev_priv->irq_lock, LK_RELEASE);
2268 ring_last_seqno(struct intel_ring_buffer *ring)
2270 return list_entry(ring->request_list.prev,
2271 struct drm_i915_gem_request, list)->seqno;
2275 ring_idle(struct intel_ring_buffer *ring, u32 seqno)
2277 return (list_empty(&ring->request_list) ||
2278 i915_seqno_passed(seqno, ring_last_seqno(ring)));
2281 static struct intel_ring_buffer *
2282 semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno)
2284 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2285 u32 cmd, ipehr, acthd, acthd_min;
2287 ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
2288 if ((ipehr & ~(0x3 << 16)) !=
2289 (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER))
2292 /* ACTHD is likely pointing to the dword after the actual command,
2293 * so scan backwards until we find the MBOX.
2295 acthd = intel_ring_get_active_head(ring) & HEAD_ADDR;
2296 acthd_min = max((int)acthd - 3 * 4, 0);
2298 cmd = ioread32(ring->virtual_start + acthd);
2303 if (acthd < acthd_min)
2307 *seqno = ioread32(ring->virtual_start+acthd+4)+1;
2308 return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3];
2311 static int semaphore_passed(struct intel_ring_buffer *ring)
2313 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2314 struct intel_ring_buffer *signaller;
2317 ring->hangcheck.deadlock = true;
2319 signaller = semaphore_waits_for(ring, &seqno);
2320 if (signaller == NULL || signaller->hangcheck.deadlock)
2323 /* cursory check for an unkickable deadlock */
2324 ctl = I915_READ_CTL(signaller);
2325 if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0)
2328 return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno);
2331 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2333 struct intel_ring_buffer *ring;
2336 for_each_ring(ring, dev_priv, i)
2337 ring->hangcheck.deadlock = false;
2340 static enum intel_ring_hangcheck_action
2341 ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
2343 struct drm_device *dev = ring->dev;
2344 struct drm_i915_private *dev_priv = dev->dev_private;
2347 if (ring->hangcheck.acthd != acthd)
2353 /* Is the chip hanging on a WAIT_FOR_EVENT?
2354 * If so we can simply poke the RB_WAIT bit
2355 * and break the hang. This should work on
2356 * all but the second generation chipsets.
2358 tmp = I915_READ_CTL(ring);
2359 if (tmp & RING_WAIT) {
2360 DRM_ERROR("Kicking stuck wait on %s\n",
2362 I915_WRITE_CTL(ring, tmp);
2366 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
2367 switch (semaphore_passed(ring)) {
2371 DRM_ERROR("Kicking stuck semaphore on %s\n",
2373 I915_WRITE_CTL(ring, tmp);
2384 * This is called when the chip hasn't reported back with completed
2385 * batchbuffers in a long time. We keep track per ring seqno progress and
2386 * if there are no progress, hangcheck score for that ring is increased.
2387 * Further, acthd is inspected to see if the ring is stuck. On stuck case
2388 * we kick the ring. If we see no progress on three subsequent calls
2389 * we assume chip is wedged and try to fix it by resetting the chip.
2391 void i915_hangcheck_elapsed(unsigned long data)
2393 struct drm_device *dev = (struct drm_device *)data;
2394 drm_i915_private_t *dev_priv = dev->dev_private;
2395 struct intel_ring_buffer *ring;
2397 int busy_count = 0, rings_hung = 0;
2398 bool stuck[I915_NUM_RINGS] = { 0 };
2404 if (!i915_enable_hangcheck)
2407 for_each_ring(ring, dev_priv, i) {
2411 semaphore_clear_deadlocks(dev_priv);
2413 seqno = ring->get_seqno(ring, false);
2414 acthd = intel_ring_get_active_head(ring);
2416 if (ring->hangcheck.seqno == seqno) {
2417 if (ring_idle(ring, seqno)) {
2418 if (waitqueue_active(&ring->irq_queue)) {
2419 /* Issue a wake-up to catch stuck h/w. */
2420 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2422 wake_up_all(&ring->irq_queue);
2423 ring->hangcheck.score += HUNG;
2429 /* We always increment the hangcheck score
2430 * if the ring is busy and still processing
2431 * the same request, so that no single request
2432 * can run indefinitely (such as a chain of
2433 * batches). The only time we do not increment
2434 * the hangcheck score on this ring, if this
2435 * ring is in a legitimate wait for another
2436 * ring. In that case the waiting ring is a
2437 * victim and we want to be sure we catch the
2438 * right culprit. Then every time we do kick
2439 * the ring, add a small increment to the
2440 * score so that we can catch a batch that is
2441 * being repeatedly kicked and so responsible
2442 * for stalling the machine.
2444 ring->hangcheck.action = ring_stuck(ring,
2447 switch (ring->hangcheck.action) {
2462 ring->hangcheck.score += score;
2465 /* Gradually reduce the count so that we catch DoS
2466 * attempts across multiple batches.
2468 if (ring->hangcheck.score > 0)
2469 ring->hangcheck.score--;
2472 ring->hangcheck.seqno = seqno;
2473 ring->hangcheck.acthd = acthd;
2477 for_each_ring(ring, dev_priv, i) {
2478 if (ring->hangcheck.score > FIRE) {
2479 DRM_ERROR("%s on %s\n",
2480 stuck[i] ? "stuck" : "no progress",
2487 return i915_handle_error(dev, true);
2490 /* Reset timer case chip hangs without another request
2492 mod_timer(&dev_priv->gpu_error.hangcheck_timer,
2493 round_jiffies_up(jiffies +
2494 DRM_I915_HANGCHECK_JIFFIES));
2497 static void ibx_irq_preinstall(struct drm_device *dev)
2499 struct drm_i915_private *dev_priv = dev->dev_private;
2501 if (HAS_PCH_NOP(dev))
2504 /* south display irq */
2505 I915_WRITE(SDEIMR, 0xffffffff);
2507 * SDEIER is also touched by the interrupt handler to work around missed
2508 * PCH interrupts. Hence we can't update it after the interrupt handler
2509 * is enabled - instead we unconditionally enable all PCH interrupt
2510 * sources here, but then only unmask them as needed with SDEIMR.
2512 I915_WRITE(SDEIER, 0xffffffff);
2513 POSTING_READ(SDEIER);
2518 static void ironlake_irq_preinstall(struct drm_device *dev)
2520 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2522 atomic_set(&dev_priv->irq_received, 0);
2524 I915_WRITE(HWSTAM, 0xeffe);
2526 /* XXX hotplug from PCH */
2528 I915_WRITE(DEIMR, 0xffffffff);
2529 I915_WRITE(DEIER, 0x0);
2530 POSTING_READ(DEIER);
2533 I915_WRITE(GTIMR, 0xffffffff);
2534 I915_WRITE(GTIER, 0x0);
2535 POSTING_READ(GTIER);
2537 ibx_irq_preinstall(dev);
2540 static void ivybridge_irq_preinstall(struct drm_device *dev)
2542 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2544 atomic_set(&dev_priv->irq_received, 0);
2546 I915_WRITE(HWSTAM, 0xeffe);
2548 /* XXX hotplug from PCH */
2550 I915_WRITE(DEIMR, 0xffffffff);
2551 I915_WRITE(DEIER, 0x0);
2552 POSTING_READ(DEIER);
2555 I915_WRITE(GTIMR, 0xffffffff);
2556 I915_WRITE(GTIER, 0x0);
2557 POSTING_READ(GTIER);
2559 /* Power management */
2560 I915_WRITE(GEN6_PMIMR, 0xffffffff);
2561 I915_WRITE(GEN6_PMIER, 0x0);
2562 POSTING_READ(GEN6_PMIER);
2564 ibx_irq_preinstall(dev);
2567 static void valleyview_irq_preinstall(struct drm_device *dev)
2569 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2572 atomic_set(&dev_priv->irq_received, 0);
2575 I915_WRITE(VLV_IMR, 0);
2576 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
2577 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
2578 I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
2581 I915_WRITE(GTIIR, I915_READ(GTIIR));
2582 I915_WRITE(GTIIR, I915_READ(GTIIR));
2583 I915_WRITE(GTIMR, 0xffffffff);
2584 I915_WRITE(GTIER, 0x0);
2585 POSTING_READ(GTIER);
2587 I915_WRITE(DPINVGTT, 0xff);
2589 I915_WRITE(PORT_HOTPLUG_EN, 0);
2590 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2592 I915_WRITE(PIPESTAT(pipe), 0xffff);
2593 I915_WRITE(VLV_IIR, 0xffffffff);
2594 I915_WRITE(VLV_IMR, 0xffffffff);
2595 I915_WRITE(VLV_IER, 0x0);
2596 POSTING_READ(VLV_IER);
2599 static void ibx_hpd_irq_setup(struct drm_device *dev)
2601 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2602 struct drm_mode_config *mode_config = &dev->mode_config;
2603 struct intel_encoder *intel_encoder;
2604 u32 mask = ~I915_READ(SDEIMR);
2607 if (HAS_PCH_IBX(dev)) {
2608 mask &= ~SDE_HOTPLUG_MASK;
2609 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
2610 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
2611 mask |= hpd_ibx[intel_encoder->hpd_pin];
2613 mask &= ~SDE_HOTPLUG_MASK_CPT;
2614 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
2615 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
2616 mask |= hpd_cpt[intel_encoder->hpd_pin];
2619 I915_WRITE(SDEIMR, ~mask);
2622 * Enable digital hotplug on the PCH, and configure the DP short pulse
2623 * duration to 2ms (which is the minimum in the Display Port spec)
2625 * This register is the same on all known PCH chips.
2627 hotplug = I915_READ(PCH_PORT_HOTPLUG);
2628 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
2629 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
2630 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
2631 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
2632 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
2635 static void ibx_irq_postinstall(struct drm_device *dev)
2637 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2640 if (HAS_PCH_NOP(dev))
2643 if (HAS_PCH_IBX(dev)) {
2644 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER |
2645 SDE_TRANSA_FIFO_UNDER | SDE_POISON;
2647 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT;
2649 I915_WRITE(SERR_INT, I915_READ(SERR_INT));
2652 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2653 I915_WRITE(SDEIMR, ~mask);
2656 static int ironlake_irq_postinstall(struct drm_device *dev)
2658 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2659 /* enable kind of interrupts always enabled */
2660 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
2661 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
2662 DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN |
2663 DE_PIPEA_FIFO_UNDERRUN | DE_POISON;
2666 dev_priv->irq_mask = ~display_mask;
2668 /* should always can generate irq */
2669 I915_WRITE(DEIIR, I915_READ(DEIIR));
2670 I915_WRITE(DEIMR, dev_priv->irq_mask);
2671 I915_WRITE(DEIER, display_mask |
2672 DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT);
2673 POSTING_READ(DEIER);
2675 dev_priv->gt_irq_mask = ~0;
2677 I915_WRITE(GTIIR, I915_READ(GTIIR));
2678 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2680 gt_irqs = GT_RENDER_USER_INTERRUPT;
2683 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
2685 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
2686 ILK_BSD_USER_INTERRUPT;
2688 I915_WRITE(GTIER, gt_irqs);
2689 POSTING_READ(GTIER);
2691 ibx_irq_postinstall(dev);
2693 if (IS_IRONLAKE_M(dev)) {
2694 /* Enable PCU event interrupts
2696 * spinlocking not required here for correctness since interrupt
2697 * setup is guaranteed to run in single-threaded context. But we
2698 * need it to make the assert_spin_locked happy. */
2699 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
2700 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
2701 lockmgr(&dev_priv->irq_lock, LK_RELEASE);
2707 static int ivybridge_irq_postinstall(struct drm_device *dev)
2709 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2710 /* enable kind of interrupts always enabled */
2712 DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
2713 DE_PLANEC_FLIP_DONE_IVB |
2714 DE_PLANEB_FLIP_DONE_IVB |
2715 DE_PLANEA_FLIP_DONE_IVB |
2716 DE_AUX_CHANNEL_A_IVB |
2718 u32 pm_irqs = GEN6_PM_RPS_EVENTS;
2721 dev_priv->irq_mask = ~display_mask;
2723 /* should always can generate irq */
2724 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
2725 I915_WRITE(DEIIR, I915_READ(DEIIR));
2726 I915_WRITE(DEIMR, dev_priv->irq_mask);
2729 DE_PIPEC_VBLANK_IVB |
2730 DE_PIPEB_VBLANK_IVB |
2731 DE_PIPEA_VBLANK_IVB);
2732 POSTING_READ(DEIER);
2734 dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2736 I915_WRITE(GTIIR, I915_READ(GTIIR));
2737 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2739 gt_irqs = GT_RENDER_USER_INTERRUPT | GT_BSD_USER_INTERRUPT |
2740 GT_BLT_USER_INTERRUPT | GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2741 I915_WRITE(GTIER, gt_irqs);
2742 POSTING_READ(GTIER);
2744 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
2746 pm_irqs |= PM_VEBOX_USER_INTERRUPT |
2747 PM_VEBOX_CS_ERROR_INTERRUPT;
2749 /* Our enable/disable rps functions may touch these registers so
2750 * make sure to set a known state for only the non-RPS bits.
2751 * The RMW is extra paranoia since this should be called after being set
2752 * to a known state in preinstall.
2754 I915_WRITE(GEN6_PMIMR,
2755 (I915_READ(GEN6_PMIMR) | ~GEN6_PM_RPS_EVENTS) & ~pm_irqs);
2756 I915_WRITE(GEN6_PMIER,
2757 (I915_READ(GEN6_PMIER) & GEN6_PM_RPS_EVENTS) | pm_irqs);
2758 POSTING_READ(GEN6_PMIER);
2760 ibx_irq_postinstall(dev);
2765 static int valleyview_irq_postinstall(struct drm_device *dev)
2767 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2770 u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
2772 enable_mask = I915_DISPLAY_PORT_INTERRUPT;
2773 enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2774 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2775 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2776 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2779 *Leave vblank interrupts masked initially. enable/disable will
2780 * toggle them based on usage.
2782 dev_priv->irq_mask = (~enable_mask) |
2783 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2784 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2786 I915_WRITE(PORT_HOTPLUG_EN, 0);
2787 POSTING_READ(PORT_HOTPLUG_EN);
2789 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
2790 I915_WRITE(VLV_IER, enable_mask);
2791 I915_WRITE(VLV_IIR, 0xffffffff);
2792 I915_WRITE(PIPESTAT(0), 0xffff);
2793 I915_WRITE(PIPESTAT(1), 0xffff);
2794 POSTING_READ(VLV_IER);
2796 i915_enable_pipestat(dev_priv, 0, pipestat_enable);
2797 i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
2798 i915_enable_pipestat(dev_priv, 1, pipestat_enable);
2800 I915_WRITE(VLV_IIR, 0xffffffff);
2801 I915_WRITE(VLV_IIR, 0xffffffff);
2803 I915_WRITE(GTIIR, I915_READ(GTIIR));
2804 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2806 gt_irqs = GT_RENDER_USER_INTERRUPT | GT_BSD_USER_INTERRUPT |
2807 GT_BLT_USER_INTERRUPT;
2808 I915_WRITE(GTIER, gt_irqs);
2809 POSTING_READ(GTIER);
2811 /* ack & enable invalid PTE error interrupts */
2812 #if 0 /* FIXME: add support to irq handler for checking these bits */
2813 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
2814 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
2817 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
2822 static void valleyview_irq_uninstall(struct drm_device *dev)
2824 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2830 del_timer_sync(&dev_priv->hotplug_reenable_timer);
2833 I915_WRITE(PIPESTAT(pipe), 0xffff);
2835 I915_WRITE(HWSTAM, 0xffffffff);
2836 I915_WRITE(PORT_HOTPLUG_EN, 0);
2837 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2839 I915_WRITE(PIPESTAT(pipe), 0xffff);
2840 I915_WRITE(VLV_IIR, 0xffffffff);
2841 I915_WRITE(VLV_IMR, 0xffffffff);
2842 I915_WRITE(VLV_IER, 0x0);
2843 POSTING_READ(VLV_IER);
2846 static void ironlake_irq_uninstall(struct drm_device *dev)
2848 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2853 del_timer_sync(&dev_priv->hotplug_reenable_timer);
2855 I915_WRITE(HWSTAM, 0xffffffff);
2857 I915_WRITE(DEIMR, 0xffffffff);
2858 I915_WRITE(DEIER, 0x0);
2859 I915_WRITE(DEIIR, I915_READ(DEIIR));
2861 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
2863 I915_WRITE(GTIMR, 0xffffffff);
2864 I915_WRITE(GTIER, 0x0);
2865 I915_WRITE(GTIIR, I915_READ(GTIIR));
2867 if (HAS_PCH_NOP(dev))
2870 I915_WRITE(SDEIMR, 0xffffffff);
2871 I915_WRITE(SDEIER, 0x0);
2872 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2873 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
2874 I915_WRITE(SERR_INT, I915_READ(SERR_INT));
2877 static void i8xx_irq_preinstall(struct drm_device * dev)
2879 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2882 atomic_set(&dev_priv->irq_received, 0);
2885 I915_WRITE(PIPESTAT(pipe), 0);
2886 I915_WRITE16(IMR, 0xffff);
2887 I915_WRITE16(IER, 0x0);
2888 POSTING_READ16(IER);
2891 static int i8xx_irq_postinstall(struct drm_device *dev)
2893 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2896 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2898 /* Unmask the interrupts that we always want on. */
2899 dev_priv->irq_mask =
2900 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2901 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2902 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2903 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2904 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2905 I915_WRITE16(IMR, dev_priv->irq_mask);
2908 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2909 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2910 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2911 I915_USER_INTERRUPT);
2912 POSTING_READ16(IER);
2918 * Returns true when a page flip has completed.
2920 static bool i8xx_handle_vblank(struct drm_device *dev,
2923 drm_i915_private_t *dev_priv = dev->dev_private;
2924 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe);
2926 if (!drm_handle_vblank(dev, pipe))
2929 if ((iir & flip_pending) == 0)
2932 intel_prepare_page_flip(dev, pipe);
2934 /* We detect FlipDone by looking for the change in PendingFlip from '1'
2935 * to '0' on the following vblank, i.e. IIR has the Pendingflip
2936 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
2937 * the flip is completed (no longer pending). Since this doesn't raise
2938 * an interrupt per se, we watch for the change at vblank.
2940 if (I915_READ16(ISR) & flip_pending)
2943 intel_finish_page_flip(dev, pipe);
2948 static irqreturn_t i8xx_irq_handler(void *arg)
2950 struct drm_device *dev = (struct drm_device *) arg;
2951 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2957 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2958 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2960 atomic_inc(&dev_priv->irq_received);
2962 iir = I915_READ16(IIR);
2966 while (iir & ~flip_mask) {
2967 /* Can't rely on pipestat interrupt bit in iir as it might
2968 * have been cleared after the pipestat interrupt was received.
2969 * It doesn't set the bit in iir again, but it still produces
2970 * interrupts (for non-MSI).
2972 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
2973 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2974 i915_handle_error(dev, false);
2976 for_each_pipe(pipe) {
2977 int reg = PIPESTAT(pipe);
2978 pipe_stats[pipe] = I915_READ(reg);
2981 * Clear the PIPE*STAT regs before the IIR
2983 if (pipe_stats[pipe] & 0x8000ffff) {
2984 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2985 DRM_DEBUG_DRIVER("pipe %c underrun\n",
2987 I915_WRITE(reg, pipe_stats[pipe]);
2991 lockmgr(&dev_priv->irq_lock, LK_RELEASE);
2993 I915_WRITE16(IIR, iir & ~flip_mask);
2994 new_iir = I915_READ16(IIR); /* Flush posted writes */
2996 i915_update_dri1_breadcrumb(dev);
2998 if (iir & I915_USER_INTERRUPT)
2999 notify_ring(dev, &dev_priv->ring[RCS]);
3001 if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
3002 i8xx_handle_vblank(dev, 0, iir))
3003 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0);
3005 if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
3006 i8xx_handle_vblank(dev, 1, iir))
3007 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1);
3015 static void i8xx_irq_uninstall(struct drm_device * dev)
3017 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3020 for_each_pipe(pipe) {
3021 /* Clear enable bits; then clear status bits */
3022 I915_WRITE(PIPESTAT(pipe), 0);
3023 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3025 I915_WRITE16(IMR, 0xffff);
3026 I915_WRITE16(IER, 0x0);
3027 I915_WRITE16(IIR, I915_READ16(IIR));
3030 static void i915_irq_preinstall(struct drm_device * dev)
3032 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3035 atomic_set(&dev_priv->irq_received, 0);
3037 if (I915_HAS_HOTPLUG(dev)) {
3038 I915_WRITE(PORT_HOTPLUG_EN, 0);
3039 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3042 I915_WRITE16(HWSTAM, 0xeffe);
3044 I915_WRITE(PIPESTAT(pipe), 0);
3045 I915_WRITE(IMR, 0xffffffff);
3046 I915_WRITE(IER, 0x0);
3050 static int i915_irq_postinstall(struct drm_device *dev)
3052 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3055 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3057 /* Unmask the interrupts that we always want on. */
3058 dev_priv->irq_mask =
3059 ~(I915_ASLE_INTERRUPT |
3060 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3061 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3062 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3063 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3064 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3067 I915_ASLE_INTERRUPT |
3068 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3069 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3070 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
3071 I915_USER_INTERRUPT;
3073 if (I915_HAS_HOTPLUG(dev)) {
3074 I915_WRITE(PORT_HOTPLUG_EN, 0);
3075 POSTING_READ(PORT_HOTPLUG_EN);
3077 /* Enable in IER... */
3078 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3079 /* and unmask in IMR */
3080 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3083 I915_WRITE(IMR, dev_priv->irq_mask);
3084 I915_WRITE(IER, enable_mask);
3087 i915_enable_asle_pipestat(dev);
3093 * Returns true when a page flip has completed.
3095 static bool i915_handle_vblank(struct drm_device *dev,
3096 int plane, int pipe, u32 iir)
3098 drm_i915_private_t *dev_priv = dev->dev_private;
3099 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3101 if (!drm_handle_vblank(dev, pipe))
3104 if ((iir & flip_pending) == 0)
3107 intel_prepare_page_flip(dev, plane);
3109 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3110 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3111 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3112 * the flip is completed (no longer pending). Since this doesn't raise
3113 * an interrupt per se, we watch for the change at vblank.
3115 if (I915_READ(ISR) & flip_pending)
3118 intel_finish_page_flip(dev, pipe);
3123 static irqreturn_t i915_irq_handler(void *arg)
3125 struct drm_device *dev = (struct drm_device *) arg;
3126 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3127 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
3129 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3130 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3133 atomic_inc(&dev_priv->irq_received);
3135 iir = I915_READ(IIR);
3137 bool irq_received = (iir & ~flip_mask) != 0;
3138 bool blc_event = false;
3140 /* Can't rely on pipestat interrupt bit in iir as it might
3141 * have been cleared after the pipestat interrupt was received.
3142 * It doesn't set the bit in iir again, but it still produces
3143 * interrupts (for non-MSI).
3145 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
3146 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3147 i915_handle_error(dev, false);
3149 for_each_pipe(pipe) {
3150 int reg = PIPESTAT(pipe);
3151 pipe_stats[pipe] = I915_READ(reg);
3153 /* Clear the PIPE*STAT regs before the IIR */
3154 if (pipe_stats[pipe] & 0x8000ffff) {
3155 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3156 DRM_DEBUG_DRIVER("pipe %c underrun\n",
3158 I915_WRITE(reg, pipe_stats[pipe]);
3159 irq_received = true;
3162 lockmgr(&dev_priv->irq_lock, LK_RELEASE);
3167 /* Consume port. Then clear IIR or we'll miss events */
3168 if ((I915_HAS_HOTPLUG(dev)) &&
3169 (iir & I915_DISPLAY_PORT_INTERRUPT)) {
3170 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
3171 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
3173 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
3176 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
3178 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
3179 POSTING_READ(PORT_HOTPLUG_STAT);
3182 I915_WRITE(IIR, iir & ~flip_mask);
3183 new_iir = I915_READ(IIR); /* Flush posted writes */
3185 if (iir & I915_USER_INTERRUPT)
3186 notify_ring(dev, &dev_priv->ring[RCS]);
3188 for_each_pipe(pipe) {
3193 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3194 i915_handle_vblank(dev, plane, pipe, iir))
3195 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
3197 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3201 if (blc_event || (iir & I915_ASLE_INTERRUPT))
3202 intel_opregion_asle_intr(dev);
3204 /* With MSI, interrupts are only generated when iir
3205 * transitions from zero to nonzero. If another bit got
3206 * set while we were handling the existing iir bits, then
3207 * we would never get another interrupt.
3209 * This is fine on non-MSI as well, as if we hit this path
3210 * we avoid exiting the interrupt handler only to generate
3213 * Note that for MSI this could cause a stray interrupt report
3214 * if an interrupt landed in the time between writing IIR and
3215 * the posting read. This should be rare enough to never
3216 * trigger the 99% of 100,000 interrupts test for disabling
3220 } while (iir & ~flip_mask);
3222 i915_update_dri1_breadcrumb(dev);
3225 static void i915_irq_uninstall(struct drm_device * dev)
3227 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3230 del_timer_sync(&dev_priv->hotplug_reenable_timer);
3232 if (I915_HAS_HOTPLUG(dev)) {
3233 I915_WRITE(PORT_HOTPLUG_EN, 0);
3234 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3237 I915_WRITE16(HWSTAM, 0xffff);
3238 for_each_pipe(pipe) {
3239 /* Clear enable bits; then clear status bits */
3240 I915_WRITE(PIPESTAT(pipe), 0);
3241 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3243 I915_WRITE(IMR, 0xffffffff);
3244 I915_WRITE(IER, 0x0);
3246 I915_WRITE(IIR, I915_READ(IIR));
3249 static void i965_irq_preinstall(struct drm_device * dev)
3251 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3254 atomic_set(&dev_priv->irq_received, 0);
3256 I915_WRITE(PORT_HOTPLUG_EN, 0);
3257 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3259 I915_WRITE(HWSTAM, 0xeffe);
3261 I915_WRITE(PIPESTAT(pipe), 0);
3262 I915_WRITE(IMR, 0xffffffff);
3263 I915_WRITE(IER, 0x0);
3267 static int i965_irq_postinstall(struct drm_device *dev)
3269 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3273 /* Unmask the interrupts that we always want on. */
3274 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
3275 I915_DISPLAY_PORT_INTERRUPT |
3276 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3277 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3278 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3279 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3280 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3282 enable_mask = ~dev_priv->irq_mask;
3283 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3284 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3285 enable_mask |= I915_USER_INTERRUPT;
3288 enable_mask |= I915_BSD_USER_INTERRUPT;
3290 i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
3293 * Enable some error detection, note the instruction error mask
3294 * bit is reserved, so we leave it masked.
3297 error_mask = ~(GM45_ERROR_PAGE_TABLE |
3298 GM45_ERROR_MEM_PRIV |
3299 GM45_ERROR_CP_PRIV |
3300 I915_ERROR_MEMORY_REFRESH);
3302 error_mask = ~(I915_ERROR_PAGE_TABLE |
3303 I915_ERROR_MEMORY_REFRESH);
3305 I915_WRITE(EMR, error_mask);
3307 I915_WRITE(IMR, dev_priv->irq_mask);
3308 I915_WRITE(IER, enable_mask);
3311 I915_WRITE(PORT_HOTPLUG_EN, 0);
3312 POSTING_READ(PORT_HOTPLUG_EN);
3314 i915_enable_asle_pipestat(dev);
3319 static void i915_hpd_irq_setup(struct drm_device *dev)
3321 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3322 struct drm_mode_config *mode_config = &dev->mode_config;
3323 struct intel_encoder *intel_encoder;
3326 if (I915_HAS_HOTPLUG(dev)) {
3327 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
3328 hotplug_en &= ~HOTPLUG_INT_EN_MASK;
3329 /* Note HDMI and DP share hotplug bits */
3330 /* enable bits are the same for all generations */
3331 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
3332 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3333 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
3334 /* Programming the CRT detection parameters tends
3335 to generate a spurious hotplug event about three
3336 seconds later. So just do it once.
3339 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
3340 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
3341 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
3343 /* Ignore TV since it's buggy */
3344 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
3348 static irqreturn_t i965_irq_handler(void *arg)
3350 struct drm_device *dev = (struct drm_device *) arg;
3351 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3353 u32 pipe_stats[I915_MAX_PIPES];
3357 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3358 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3360 atomic_inc(&dev_priv->irq_received);
3362 iir = I915_READ(IIR);
3365 bool blc_event = false;
3367 irq_received = (iir & ~flip_mask) != 0;
3369 /* Can't rely on pipestat interrupt bit in iir as it might
3370 * have been cleared after the pipestat interrupt was received.
3371 * It doesn't set the bit in iir again, but it still produces
3372 * interrupts (for non-MSI).
3374 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
3375 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3376 i915_handle_error(dev, false);
3378 for_each_pipe(pipe) {
3379 int reg = PIPESTAT(pipe);
3380 pipe_stats[pipe] = I915_READ(reg);
3383 * Clear the PIPE*STAT regs before the IIR
3385 if (pipe_stats[pipe] & 0x8000ffff) {
3386 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3387 DRM_DEBUG_DRIVER("pipe %c underrun\n",
3389 I915_WRITE(reg, pipe_stats[pipe]);
3393 lockmgr(&dev_priv->irq_lock, LK_RELEASE);
3398 /* Consume port. Then clear IIR or we'll miss events */
3399 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
3400 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
3401 u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ?
3402 HOTPLUG_INT_STATUS_G4X :
3403 HOTPLUG_INT_STATUS_I915);
3405 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
3408 intel_hpd_irq_handler(dev, hotplug_trigger,
3409 IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i915);
3411 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
3412 I915_READ(PORT_HOTPLUG_STAT);
3415 I915_WRITE(IIR, iir & ~flip_mask);
3416 new_iir = I915_READ(IIR); /* Flush posted writes */
3418 if (iir & I915_USER_INTERRUPT)
3419 notify_ring(dev, &dev_priv->ring[RCS]);
3420 if (iir & I915_BSD_USER_INTERRUPT)
3421 notify_ring(dev, &dev_priv->ring[VCS]);
3423 for_each_pipe(pipe) {
3424 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
3425 i915_handle_vblank(dev, pipe, pipe, iir))
3426 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
3428 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3432 if (blc_event || (iir & I915_ASLE_INTERRUPT))
3433 intel_opregion_asle_intr(dev);
3435 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
3436 gmbus_irq_handler(dev);
3438 /* With MSI, interrupts are only generated when iir
3439 * transitions from zero to nonzero. If another bit got
3440 * set while we were handling the existing iir bits, then
3441 * we would never get another interrupt.
3443 * This is fine on non-MSI as well, as if we hit this path
3444 * we avoid exiting the interrupt handler only to generate
3447 * Note that for MSI this could cause a stray interrupt report
3448 * if an interrupt landed in the time between writing IIR and
3449 * the posting read. This should be rare enough to never
3450 * trigger the 99% of 100,000 interrupts test for disabling
3456 i915_update_dri1_breadcrumb(dev);
3459 static void i965_irq_uninstall(struct drm_device * dev)
3461 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3467 del_timer_sync(&dev_priv->hotplug_reenable_timer);
3469 I915_WRITE(PORT_HOTPLUG_EN, 0);
3470 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3472 I915_WRITE(HWSTAM, 0xffffffff);
3474 I915_WRITE(PIPESTAT(pipe), 0);
3475 I915_WRITE(IMR, 0xffffffff);
3476 I915_WRITE(IER, 0x0);
3479 I915_WRITE(PIPESTAT(pipe),
3480 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
3481 I915_WRITE(IIR, I915_READ(IIR));
3484 static void i915_reenable_hotplug_timer_func(unsigned long data)
3486 drm_i915_private_t *dev_priv = (drm_i915_private_t *)data;
3487 struct drm_device *dev = dev_priv->dev;
3488 struct drm_mode_config *mode_config = &dev->mode_config;
3491 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
3492 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
3493 struct drm_connector *connector;
3495 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
3498 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
3500 list_for_each_entry(connector, &mode_config->connector_list, head) {
3501 struct intel_connector *intel_connector = to_intel_connector(connector);
3503 if (intel_connector->encoder->hpd_pin == i) {
3504 if (connector->polled != intel_connector->polled)
3505 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
3506 drm_get_connector_name(connector));
3507 connector->polled = intel_connector->polled;
3508 if (!connector->polled)
3509 connector->polled = DRM_CONNECTOR_POLL_HPD;
3513 if (dev_priv->display.hpd_irq_setup)
3514 dev_priv->display.hpd_irq_setup(dev);
3515 lockmgr(&dev_priv->irq_lock, LK_RELEASE);
3518 void intel_irq_init(struct drm_device *dev)
3520 struct drm_i915_private *dev_priv = dev->dev_private;
3522 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
3523 INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
3524 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
3525 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
3527 setup_timer(&dev_priv->gpu_error.hangcheck_timer,
3528 i915_hangcheck_elapsed,
3529 (unsigned long) dev);
3530 setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func,
3531 (unsigned long) dev_priv);
3533 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
3535 dev->driver->get_vblank_counter = i915_get_vblank_counter;
3536 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
3537 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
3538 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
3539 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
3542 if (drm_core_check_feature(dev, DRIVER_MODESET))
3543 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
3545 dev->driver->get_vblank_timestamp = NULL;
3546 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
3548 if (IS_VALLEYVIEW(dev)) {
3549 dev->driver->irq_handler = valleyview_irq_handler;
3550 dev->driver->irq_preinstall = valleyview_irq_preinstall;
3551 dev->driver->irq_postinstall = valleyview_irq_postinstall;
3552 dev->driver->irq_uninstall = valleyview_irq_uninstall;
3553 dev->driver->enable_vblank = valleyview_enable_vblank;
3554 dev->driver->disable_vblank = valleyview_disable_vblank;
3555 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3556 } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
3557 /* Share uninstall handlers with ILK/SNB */
3558 dev->driver->irq_handler = ivybridge_irq_handler;
3559 dev->driver->irq_preinstall = ivybridge_irq_preinstall;
3560 dev->driver->irq_postinstall = ivybridge_irq_postinstall;
3561 dev->driver->irq_uninstall = ironlake_irq_uninstall;
3562 dev->driver->enable_vblank = ivybridge_enable_vblank;
3563 dev->driver->disable_vblank = ivybridge_disable_vblank;
3564 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
3565 } else if (HAS_PCH_SPLIT(dev)) {
3566 dev->driver->irq_handler = ironlake_irq_handler;
3567 dev->driver->irq_preinstall = ironlake_irq_preinstall;
3568 dev->driver->irq_postinstall = ironlake_irq_postinstall;
3569 dev->driver->irq_uninstall = ironlake_irq_uninstall;
3570 dev->driver->enable_vblank = ironlake_enable_vblank;
3571 dev->driver->disable_vblank = ironlake_disable_vblank;
3572 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
3574 if (INTEL_INFO(dev)->gen == 2) {
3575 dev->driver->irq_preinstall = i8xx_irq_preinstall;
3576 dev->driver->irq_postinstall = i8xx_irq_postinstall;
3577 dev->driver->irq_handler = i8xx_irq_handler;
3578 dev->driver->irq_uninstall = i8xx_irq_uninstall;
3579 } else if (INTEL_INFO(dev)->gen == 3) {
3580 dev->driver->irq_preinstall = i915_irq_preinstall;
3581 dev->driver->irq_postinstall = i915_irq_postinstall;
3582 dev->driver->irq_uninstall = i915_irq_uninstall;
3583 dev->driver->irq_handler = i915_irq_handler;
3584 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3586 dev->driver->irq_preinstall = i965_irq_preinstall;
3587 dev->driver->irq_postinstall = i965_irq_postinstall;
3588 dev->driver->irq_uninstall = i965_irq_uninstall;
3589 dev->driver->irq_handler = i965_irq_handler;
3590 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3592 dev->driver->enable_vblank = i915_enable_vblank;
3593 dev->driver->disable_vblank = i915_disable_vblank;
3597 void intel_hpd_init(struct drm_device *dev)
3599 struct drm_i915_private *dev_priv = dev->dev_private;
3600 struct drm_mode_config *mode_config = &dev->mode_config;
3601 struct drm_connector *connector;
3604 for (i = 1; i < HPD_NUM_PINS; i++) {
3605 dev_priv->hpd_stats[i].hpd_cnt = 0;
3606 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
3608 list_for_each_entry(connector, &mode_config->connector_list, head) {
3609 struct intel_connector *intel_connector = to_intel_connector(connector);
3610 connector->polled = intel_connector->polled;
3611 if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
3612 connector->polled = DRM_CONNECTOR_POLL_HPD;
3615 /* Interrupt setup is already guaranteed to be single-threaded, this is
3616 * just to make the assert_spin_locked checks happy. */
3617 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
3618 if (dev_priv->display.hpd_irq_setup)
3619 dev_priv->display.hpd_irq_setup(dev);
3620 lockmgr(&dev_priv->irq_lock, LK_RELEASE);