drm/i915: Update to Linux 3.18
[dragonfly.git] / sys / dev / drm / i915 / i915_irq.c
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28
29 #include <drm/drmP.h>
30 #include <drm/i915_drm.h>
31 #include "i915_drv.h"
32 #include "i915_trace.h"
33 #include "intel_drv.h"
34
35 static const u32 hpd_ibx[] = {
36         [HPD_CRT] = SDE_CRT_HOTPLUG,
37         [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
38         [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
39         [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
40         [HPD_PORT_D] = SDE_PORTD_HOTPLUG
41 };
42
43 static const u32 hpd_cpt[] = {
44         [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
45         [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
46         [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
47         [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
48         [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
49 };
50
51 static const u32 hpd_mask_i915[] = {
52         [HPD_CRT] = CRT_HOTPLUG_INT_EN,
53         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
54         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
55         [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
56         [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
57         [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
58 };
59
60 static const u32 hpd_status_g4x[] = {
61         [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
62         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
63         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
64         [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
65         [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
66         [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
67 };
68
69 static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
70         [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
71         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
72         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
73         [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
74         [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
75         [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
76 };
77
78 /* IIR can theoretically queue up two events. Be paranoid. */
79 #define GEN8_IRQ_RESET_NDX(type, which) do { \
80         I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
81         POSTING_READ(GEN8_##type##_IMR(which)); \
82         I915_WRITE(GEN8_##type##_IER(which), 0); \
83         I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
84         POSTING_READ(GEN8_##type##_IIR(which)); \
85         I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
86         POSTING_READ(GEN8_##type##_IIR(which)); \
87 } while (0)
88
89 #define GEN5_IRQ_RESET(type) do { \
90         I915_WRITE(type##IMR, 0xffffffff); \
91         POSTING_READ(type##IMR); \
92         I915_WRITE(type##IER, 0); \
93         I915_WRITE(type##IIR, 0xffffffff); \
94         POSTING_READ(type##IIR); \
95         I915_WRITE(type##IIR, 0xffffffff); \
96         POSTING_READ(type##IIR); \
97 } while (0)
98
99 /*
100  * We should clear IMR at preinstall/uninstall, and just check at postinstall.
101  */
102 #define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \
103         u32 val = I915_READ(reg); \
104         if (val) { \
105                 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \
106                      (reg), val); \
107                 I915_WRITE((reg), 0xffffffff); \
108                 POSTING_READ(reg); \
109                 I915_WRITE((reg), 0xffffffff); \
110                 POSTING_READ(reg); \
111         } \
112 } while (0)
113
114 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
115         GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \
116         I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
117         I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
118         POSTING_READ(GEN8_##type##_IER(which)); \
119 } while (0)
120
121 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
122         GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \
123         I915_WRITE(type##IMR, (imr_val)); \
124         I915_WRITE(type##IER, (ier_val)); \
125         POSTING_READ(type##IER); \
126 } while (0)
127
128 /* For display hotplug interrupt */
129 static void
130 ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
131 {
132         assert_spin_locked(&dev_priv->irq_lock);
133
134         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
135                 return;
136
137         if ((dev_priv->irq_mask & mask) != 0) {
138                 dev_priv->irq_mask &= ~mask;
139                 I915_WRITE(DEIMR, dev_priv->irq_mask);
140                 POSTING_READ(DEIMR);
141         }
142 }
143
144 static void
145 ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
146 {
147         assert_spin_locked(&dev_priv->irq_lock);
148
149         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
150                 return;
151
152         if ((dev_priv->irq_mask & mask) != mask) {
153                 dev_priv->irq_mask |= mask;
154                 I915_WRITE(DEIMR, dev_priv->irq_mask);
155                 POSTING_READ(DEIMR);
156         }
157 }
158
159 /**
160  * ilk_update_gt_irq - update GTIMR
161  * @dev_priv: driver private
162  * @interrupt_mask: mask of interrupt bits to update
163  * @enabled_irq_mask: mask of interrupt bits to enable
164  */
165 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
166                               uint32_t interrupt_mask,
167                               uint32_t enabled_irq_mask)
168 {
169         assert_spin_locked(&dev_priv->irq_lock);
170
171         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
172                 return;
173
174         dev_priv->gt_irq_mask &= ~interrupt_mask;
175         dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
176         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
177         POSTING_READ(GTIMR);
178 }
179
180 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
181 {
182         ilk_update_gt_irq(dev_priv, mask, mask);
183 }
184
185 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
186 {
187         ilk_update_gt_irq(dev_priv, mask, 0);
188 }
189
190 /**
191   * snb_update_pm_irq - update GEN6_PMIMR
192   * @dev_priv: driver private
193   * @interrupt_mask: mask of interrupt bits to update
194   * @enabled_irq_mask: mask of interrupt bits to enable
195   */
196 static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
197                               uint32_t interrupt_mask,
198                               uint32_t enabled_irq_mask)
199 {
200         uint32_t new_val;
201
202         assert_spin_locked(&dev_priv->irq_lock);
203
204         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
205                 return;
206
207         new_val = dev_priv->pm_irq_mask;
208         new_val &= ~interrupt_mask;
209         new_val |= (~enabled_irq_mask & interrupt_mask);
210
211         if (new_val != dev_priv->pm_irq_mask) {
212                 dev_priv->pm_irq_mask = new_val;
213                 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
214                 POSTING_READ(GEN6_PMIMR);
215         }
216 }
217
218 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
219 {
220         snb_update_pm_irq(dev_priv, mask, mask);
221 }
222
223 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
224 {
225         snb_update_pm_irq(dev_priv, mask, 0);
226 }
227
228 static bool ivb_can_enable_err_int(struct drm_device *dev)
229 {
230         struct drm_i915_private *dev_priv = dev->dev_private;
231         struct intel_crtc *crtc;
232         enum i915_pipe pipe;
233
234         assert_spin_locked(&dev_priv->irq_lock);
235
236         for_each_pipe(dev_priv, pipe) {
237                 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
238
239                 if (crtc->cpu_fifo_underrun_disabled)
240                         return false;
241         }
242
243         return true;
244 }
245
246 /**
247   * bdw_update_pm_irq - update GT interrupt 2
248   * @dev_priv: driver private
249   * @interrupt_mask: mask of interrupt bits to update
250   * @enabled_irq_mask: mask of interrupt bits to enable
251   *
252   * Copied from the snb function, updated with relevant register offsets
253   */
254 static void bdw_update_pm_irq(struct drm_i915_private *dev_priv,
255                               uint32_t interrupt_mask,
256                               uint32_t enabled_irq_mask)
257 {
258         uint32_t new_val;
259
260         assert_spin_locked(&dev_priv->irq_lock);
261
262         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
263                 return;
264
265         new_val = dev_priv->pm_irq_mask;
266         new_val &= ~interrupt_mask;
267         new_val |= (~enabled_irq_mask & interrupt_mask);
268
269         if (new_val != dev_priv->pm_irq_mask) {
270                 dev_priv->pm_irq_mask = new_val;
271                 I915_WRITE(GEN8_GT_IMR(2), dev_priv->pm_irq_mask);
272                 POSTING_READ(GEN8_GT_IMR(2));
273         }
274 }
275
276 void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
277 {
278         bdw_update_pm_irq(dev_priv, mask, mask);
279 }
280
281 void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
282 {
283         bdw_update_pm_irq(dev_priv, mask, 0);
284 }
285
286 static bool cpt_can_enable_serr_int(struct drm_device *dev)
287 {
288         struct drm_i915_private *dev_priv = dev->dev_private;
289         enum i915_pipe pipe;
290         struct intel_crtc *crtc;
291
292         assert_spin_locked(&dev_priv->irq_lock);
293
294         for_each_pipe(dev_priv, pipe) {
295                 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
296
297                 if (crtc->pch_fifo_underrun_disabled)
298                         return false;
299         }
300
301         return true;
302 }
303
304 void i9xx_check_fifo_underruns(struct drm_device *dev)
305 {
306         struct drm_i915_private *dev_priv = dev->dev_private;
307         struct intel_crtc *crtc;
308
309         lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
310
311         for_each_intel_crtc(dev, crtc) {
312                 u32 reg = PIPESTAT(crtc->pipe);
313                 u32 pipestat;
314
315                 if (crtc->cpu_fifo_underrun_disabled)
316                         continue;
317
318                 pipestat = I915_READ(reg) & 0xffff0000;
319                 if ((pipestat & PIPE_FIFO_UNDERRUN_STATUS) == 0)
320                         continue;
321
322                 I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
323                 POSTING_READ(reg);
324
325                 DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe));
326         }
327
328         lockmgr(&dev_priv->irq_lock, LK_RELEASE);
329 }
330
331 static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
332                                              enum i915_pipe pipe,
333                                              bool enable, bool old)
334 {
335         struct drm_i915_private *dev_priv = dev->dev_private;
336         u32 reg = PIPESTAT(pipe);
337         u32 pipestat = I915_READ(reg) & 0xffff0000;
338
339         assert_spin_locked(&dev_priv->irq_lock);
340
341         if (enable) {
342                 I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
343                 POSTING_READ(reg);
344         } else {
345                 if (old && pipestat & PIPE_FIFO_UNDERRUN_STATUS)
346                         DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
347         }
348 }
349
350 static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
351                                                  enum i915_pipe pipe, bool enable)
352 {
353         struct drm_i915_private *dev_priv = dev->dev_private;
354         uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
355                                           DE_PIPEB_FIFO_UNDERRUN;
356
357         if (enable)
358                 ironlake_enable_display_irq(dev_priv, bit);
359         else
360                 ironlake_disable_display_irq(dev_priv, bit);
361 }
362
363 static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
364                                                   enum i915_pipe pipe,
365                                                   bool enable, bool old)
366 {
367         struct drm_i915_private *dev_priv = dev->dev_private;
368         if (enable) {
369                 I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
370
371                 if (!ivb_can_enable_err_int(dev))
372                         return;
373
374                 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
375         } else {
376                 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
377
378                 if (old &&
379                     I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) {
380                         DRM_ERROR("uncleared fifo underrun on pipe %c\n",
381                                   pipe_name(pipe));
382                 }
383         }
384 }
385
386 static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
387                                                   enum i915_pipe pipe, bool enable)
388 {
389         struct drm_i915_private *dev_priv = dev->dev_private;
390
391         assert_spin_locked(&dev_priv->irq_lock);
392
393         if (enable)
394                 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN;
395         else
396                 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN;
397         I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
398         POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
399 }
400
401 /**
402  * ibx_display_interrupt_update - update SDEIMR
403  * @dev_priv: driver private
404  * @interrupt_mask: mask of interrupt bits to update
405  * @enabled_irq_mask: mask of interrupt bits to enable
406  */
407 static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
408                                          uint32_t interrupt_mask,
409                                          uint32_t enabled_irq_mask)
410 {
411         uint32_t sdeimr = I915_READ(SDEIMR);
412         sdeimr &= ~interrupt_mask;
413         sdeimr |= (~enabled_irq_mask & interrupt_mask);
414
415         assert_spin_locked(&dev_priv->irq_lock);
416
417         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
418                 return;
419
420         I915_WRITE(SDEIMR, sdeimr);
421         POSTING_READ(SDEIMR);
422 }
423 #define ibx_enable_display_interrupt(dev_priv, bits) \
424         ibx_display_interrupt_update((dev_priv), (bits), (bits))
425 #define ibx_disable_display_interrupt(dev_priv, bits) \
426         ibx_display_interrupt_update((dev_priv), (bits), 0)
427
428 static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
429                                             enum transcoder pch_transcoder,
430                                             bool enable)
431 {
432         struct drm_i915_private *dev_priv = dev->dev_private;
433         uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
434                        SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
435
436         if (enable)
437                 ibx_enable_display_interrupt(dev_priv, bit);
438         else
439                 ibx_disable_display_interrupt(dev_priv, bit);
440 }
441
442 static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
443                                             enum transcoder pch_transcoder,
444                                             bool enable, bool old)
445 {
446         struct drm_i915_private *dev_priv = dev->dev_private;
447
448         if (enable) {
449                 I915_WRITE(SERR_INT,
450                            SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
451
452                 if (!cpt_can_enable_serr_int(dev))
453                         return;
454
455                 ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
456         } else {
457                 ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
458
459                 if (old && I915_READ(SERR_INT) &
460                     SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) {
461                         DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n",
462                                   transcoder_name(pch_transcoder));
463                 }
464         }
465 }
466
467 /**
468  * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages
469  * @dev: drm device
470  * @pipe: pipe
471  * @enable: true if we want to report FIFO underrun errors, false otherwise
472  *
473  * This function makes us disable or enable CPU fifo underruns for a specific
474  * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun
475  * reporting for one pipe may also disable all the other CPU error interruts for
476  * the other pipes, due to the fact that there's just one interrupt mask/enable
477  * bit for all the pipes.
478  *
479  * Returns the previous state of underrun reporting.
480  */
481 static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
482                                                     enum i915_pipe pipe, bool enable)
483 {
484         struct drm_i915_private *dev_priv = dev->dev_private;
485         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
486         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
487         bool old;
488
489         assert_spin_locked(&dev_priv->irq_lock);
490
491         old = !intel_crtc->cpu_fifo_underrun_disabled;
492         intel_crtc->cpu_fifo_underrun_disabled = !enable;
493
494         if (HAS_GMCH_DISPLAY(dev))
495                 i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old);
496         else if (IS_GEN5(dev) || IS_GEN6(dev))
497                 ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
498         else if (IS_GEN7(dev))
499                 ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old);
500         else if (IS_GEN8(dev))
501                 broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
502
503         return old;
504 }
505
506 bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
507                                            enum i915_pipe pipe, bool enable)
508 {
509         struct drm_i915_private *dev_priv = dev->dev_private;
510         bool ret;
511
512         lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
513         ret = __intel_set_cpu_fifo_underrun_reporting(dev, pipe, enable);
514         lockmgr(&dev_priv->irq_lock, LK_RELEASE);
515
516         return ret;
517 }
518
519 static bool __cpu_fifo_underrun_reporting_enabled(struct drm_device *dev,
520                                                   enum i915_pipe pipe)
521 {
522         struct drm_i915_private *dev_priv = dev->dev_private;
523         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
524         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
525
526         return !intel_crtc->cpu_fifo_underrun_disabled;
527 }
528
529 /**
530  * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
531  * @dev: drm device
532  * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
533  * @enable: true if we want to report FIFO underrun errors, false otherwise
534  *
535  * This function makes us disable or enable PCH fifo underruns for a specific
536  * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
537  * underrun reporting for one transcoder may also disable all the other PCH
538  * error interruts for the other transcoders, due to the fact that there's just
539  * one interrupt mask/enable bit for all the transcoders.
540  *
541  * Returns the previous state of underrun reporting.
542  */
543 bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
544                                            enum transcoder pch_transcoder,
545                                            bool enable)
546 {
547         struct drm_i915_private *dev_priv = dev->dev_private;
548         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
549         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
550         bool old;
551
552         /*
553          * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
554          * has only one pch transcoder A that all pipes can use. To avoid racy
555          * pch transcoder -> pipe lookups from interrupt code simply store the
556          * underrun statistics in crtc A. Since we never expose this anywhere
557          * nor use it outside of the fifo underrun code here using the "wrong"
558          * crtc on LPT won't cause issues.
559          */
560
561         lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
562
563         old = !intel_crtc->pch_fifo_underrun_disabled;
564         intel_crtc->pch_fifo_underrun_disabled = !enable;
565
566         if (HAS_PCH_IBX(dev))
567                 ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
568         else
569                 cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable, old);
570
571         lockmgr(&dev_priv->irq_lock, LK_RELEASE);
572         return old;
573 }
574
575 static void
576 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe,
577                        u32 enable_mask, u32 status_mask)
578 {
579         u32 reg = PIPESTAT(pipe);
580         u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
581
582         assert_spin_locked(&dev_priv->irq_lock);
583
584         if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
585                       status_mask & ~PIPESTAT_INT_STATUS_MASK,
586                       "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
587                       pipe_name(pipe), enable_mask, status_mask))
588                 return;
589
590         if ((pipestat & enable_mask) == enable_mask)
591                 return;
592
593         dev_priv->pipestat_irq_mask[pipe] |= status_mask;
594
595         /* Enable the interrupt, clear any pending status */
596         pipestat |= enable_mask | status_mask;
597         I915_WRITE(reg, pipestat);
598         POSTING_READ(reg);
599 }
600
601 static void
602 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe,
603                         u32 enable_mask, u32 status_mask)
604 {
605         u32 reg = PIPESTAT(pipe);
606         u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
607
608         assert_spin_locked(&dev_priv->irq_lock);
609
610         if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
611                       status_mask & ~PIPESTAT_INT_STATUS_MASK,
612                       "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
613                       pipe_name(pipe), enable_mask, status_mask))
614                 return;
615
616         if ((pipestat & enable_mask) == 0)
617                 return;
618
619         dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
620
621         pipestat &= ~enable_mask;
622         I915_WRITE(reg, pipestat);
623         POSTING_READ(reg);
624 }
625
626 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
627 {
628         u32 enable_mask = status_mask << 16;
629
630         /*
631          * On pipe A we don't support the PSR interrupt yet,
632          * on pipe B and C the same bit MBZ.
633          */
634         if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
635                 return 0;
636         /*
637          * On pipe B and C we don't support the PSR interrupt yet, on pipe
638          * A the same bit is for perf counters which we don't use either.
639          */
640         if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
641                 return 0;
642
643         enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
644                          SPRITE0_FLIP_DONE_INT_EN_VLV |
645                          SPRITE1_FLIP_DONE_INT_EN_VLV);
646         if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
647                 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
648         if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
649                 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
650
651         return enable_mask;
652 }
653
654 void
655 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe,
656                      u32 status_mask)
657 {
658         u32 enable_mask;
659
660         if (IS_VALLEYVIEW(dev_priv->dev))
661                 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
662                                                            status_mask);
663         else
664                 enable_mask = status_mask << 16;
665         __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
666 }
667
668 void
669 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe,
670                       u32 status_mask)
671 {
672         u32 enable_mask;
673
674         if (IS_VALLEYVIEW(dev_priv->dev))
675                 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
676                                                            status_mask);
677         else
678                 enable_mask = status_mask << 16;
679         __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
680 }
681
682 /**
683  * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
684  */
685 static void i915_enable_asle_pipestat(struct drm_device *dev)
686 {
687         struct drm_i915_private *dev_priv = dev->dev_private;
688
689         if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
690                 return;
691
692         lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
693
694         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
695         if (INTEL_INFO(dev)->gen >= 4)
696                 i915_enable_pipestat(dev_priv, PIPE_A,
697                                      PIPE_LEGACY_BLC_EVENT_STATUS);
698
699         lockmgr(&dev_priv->irq_lock, LK_RELEASE);
700 }
701
702 /**
703  * i915_pipe_enabled - check if a pipe is enabled
704  * @dev: DRM device
705  * @pipe: pipe to check
706  *
707  * Reading certain registers when the pipe is disabled can hang the chip.
708  * Use this routine to make sure the PLL is running and the pipe is active
709  * before reading such registers if unsure.
710  */
711 static int
712 i915_pipe_enabled(struct drm_device *dev, int pipe)
713 {
714         struct drm_i915_private *dev_priv = dev->dev_private;
715
716         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
717                 /* Locking is horribly broken here, but whatever. */
718                 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
719                 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
720
721                 return intel_crtc->active;
722         } else {
723                 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
724         }
725 }
726
727 /*
728  * This timing diagram depicts the video signal in and
729  * around the vertical blanking period.
730  *
731  * Assumptions about the fictitious mode used in this example:
732  *  vblank_start >= 3
733  *  vsync_start = vblank_start + 1
734  *  vsync_end = vblank_start + 2
735  *  vtotal = vblank_start + 3
736  *
737  *           start of vblank:
738  *           latch double buffered registers
739  *           increment frame counter (ctg+)
740  *           generate start of vblank interrupt (gen4+)
741  *           |
742  *           |          frame start:
743  *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
744  *           |          may be shifted forward 1-3 extra lines via PIPECONF
745  *           |          |
746  *           |          |  start of vsync:
747  *           |          |  generate vsync interrupt
748  *           |          |  |
749  * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
750  *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
751  * ----va---> <-----------------vb--------------------> <--------va-------------
752  *       |          |       <----vs----->                     |
753  * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
754  * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
755  * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
756  *       |          |                                         |
757  *       last visible pixel                                   first visible pixel
758  *                  |                                         increment frame counter (gen3/4)
759  *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
760  *
761  * x  = horizontal active
762  * _  = horizontal blanking
763  * hs = horizontal sync
764  * va = vertical active
765  * vb = vertical blanking
766  * vs = vertical sync
767  * vbs = vblank_start (number)
768  *
769  * Summary:
770  * - most events happen at the start of horizontal sync
771  * - frame start happens at the start of horizontal blank, 1-4 lines
772  *   (depending on PIPECONF settings) after the start of vblank
773  * - gen3/4 pixel and frame counter are synchronized with the start
774  *   of horizontal active on the first line of vertical active
775  */
776
777 static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
778 {
779         /* Gen2 doesn't have a hardware frame counter */
780         return 0;
781 }
782
783 /* Called from drm generic code, passed a 'crtc', which
784  * we use as a pipe index
785  */
786 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
787 {
788         struct drm_i915_private *dev_priv = dev->dev_private;
789         unsigned long high_frame;
790         unsigned long low_frame;
791         u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
792
793         if (!i915_pipe_enabled(dev, pipe)) {
794                 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
795                                 "pipe %c\n", pipe_name(pipe));
796                 return 0;
797         }
798
799         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
800                 struct intel_crtc *intel_crtc =
801                         to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
802                 const struct drm_display_mode *mode =
803                         &intel_crtc->config.adjusted_mode;
804
805                 htotal = mode->crtc_htotal;
806                 hsync_start = mode->crtc_hsync_start;
807                 vbl_start = mode->crtc_vblank_start;
808                 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
809                         vbl_start = DIV_ROUND_UP(vbl_start, 2);
810         } else {
811                 enum transcoder cpu_transcoder = (enum transcoder) pipe;
812
813                 htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
814                 hsync_start = (I915_READ(HSYNC(cpu_transcoder))  & 0x1fff) + 1;
815                 vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1;
816                 if ((I915_READ(PIPECONF(cpu_transcoder)) &
817                      PIPECONF_INTERLACE_MASK) != PIPECONF_PROGRESSIVE)
818                         vbl_start = DIV_ROUND_UP(vbl_start, 2);
819         }
820
821         /* Convert to pixel count */
822         vbl_start *= htotal;
823
824         /* Start of vblank event occurs at start of hsync */
825         vbl_start -= htotal - hsync_start;
826
827         high_frame = PIPEFRAME(pipe);
828         low_frame = PIPEFRAMEPIXEL(pipe);
829
830         /*
831          * High & low register fields aren't synchronized, so make sure
832          * we get a low value that's stable across two reads of the high
833          * register.
834          */
835         do {
836                 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
837                 low   = I915_READ(low_frame);
838                 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
839         } while (high1 != high2);
840
841         high1 >>= PIPE_FRAME_HIGH_SHIFT;
842         pixel = low & PIPE_PIXEL_MASK;
843         low >>= PIPE_FRAME_LOW_SHIFT;
844
845         /*
846          * The frame counter increments at beginning of active.
847          * Cook up a vblank counter by also checking the pixel
848          * counter against vblank start.
849          */
850         return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
851 }
852
853 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
854 {
855         struct drm_i915_private *dev_priv = dev->dev_private;
856         int reg = PIPE_FRMCOUNT_GM45(pipe);
857
858         if (!i915_pipe_enabled(dev, pipe)) {
859                 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
860                                  "pipe %c\n", pipe_name(pipe));
861                 return 0;
862         }
863
864         return I915_READ(reg);
865 }
866
867 /* raw reads, only for fast reads of display block, no need for forcewake etc. */
868 #define __raw_i915_read32(dev_priv__, reg__) DRM_READ32(dev_priv__->mmio_map, reg__)
869
870 static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
871 {
872         struct drm_device *dev = crtc->base.dev;
873         struct drm_i915_private *dev_priv = dev->dev_private;
874         const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
875         enum i915_pipe pipe = crtc->pipe;
876         int position, vtotal;
877
878         vtotal = mode->crtc_vtotal;
879         if (mode->flags & DRM_MODE_FLAG_INTERLACE)
880                 vtotal /= 2;
881
882         if (IS_GEN2(dev))
883                 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
884         else
885                 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
886
887         /*
888          * See update_scanline_offset() for the details on the
889          * scanline_offset adjustment.
890          */
891         return (position + crtc->scanline_offset) % vtotal;
892 }
893
894 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
895                                     unsigned int flags, int *vpos, int *hpos,
896                                     ktime_t *stime, ktime_t *etime)
897 {
898         struct drm_i915_private *dev_priv = dev->dev_private;
899         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
900         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
901         const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
902         int position;
903         int vbl_start, vbl_end, hsync_start, htotal, vtotal;
904         bool in_vbl = true;
905         int ret = 0;
906
907         if (!intel_crtc->active) {
908                 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
909                                  "pipe %c\n", pipe_name(pipe));
910                 return 0;
911         }
912
913         htotal = mode->crtc_htotal;
914         hsync_start = mode->crtc_hsync_start;
915         vtotal = mode->crtc_vtotal;
916         vbl_start = mode->crtc_vblank_start;
917         vbl_end = mode->crtc_vblank_end;
918
919         if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
920                 vbl_start = DIV_ROUND_UP(vbl_start, 2);
921                 vbl_end /= 2;
922                 vtotal /= 2;
923         }
924
925         ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
926
927         /*
928          * Lock uncore.lock, as we will do multiple timing critical raw
929          * register reads, potentially with preemption disabled, so the
930          * following code must not block on uncore.lock.
931          */
932         lockmgr(&dev_priv->uncore.lock, LK_EXCLUSIVE);
933
934         /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
935
936         /* Get optional system timestamp before query. */
937         if (stime)
938                 *stime = ktime_get();
939
940         if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
941                 /* No obvious pixelcount register. Only query vertical
942                  * scanout position from Display scan line register.
943                  */
944                 position = __intel_get_crtc_scanline(intel_crtc);
945         } else {
946                 /* Have access to pixelcount since start of frame.
947                  * We can split this into vertical and horizontal
948                  * scanout position.
949                  */
950                 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
951
952                 /* convert to pixel counts */
953                 vbl_start *= htotal;
954                 vbl_end *= htotal;
955                 vtotal *= htotal;
956
957                 /*
958                  * In interlaced modes, the pixel counter counts all pixels,
959                  * so one field will have htotal more pixels. In order to avoid
960                  * the reported position from jumping backwards when the pixel
961                  * counter is beyond the length of the shorter field, just
962                  * clamp the position the length of the shorter field. This
963                  * matches how the scanline counter based position works since
964                  * the scanline counter doesn't count the two half lines.
965                  */
966                 if (position >= vtotal)
967                         position = vtotal - 1;
968
969                 /*
970                  * Start of vblank interrupt is triggered at start of hsync,
971                  * just prior to the first active line of vblank. However we
972                  * consider lines to start at the leading edge of horizontal
973                  * active. So, should we get here before we've crossed into
974                  * the horizontal active of the first line in vblank, we would
975                  * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
976                  * always add htotal-hsync_start to the current pixel position.
977                  */
978                 position = (position + htotal - hsync_start) % vtotal;
979         }
980
981         /* Get optional system timestamp after query. */
982         if (etime)
983                 *etime = ktime_get();
984
985         /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
986
987         lockmgr(&dev_priv->uncore.lock, LK_RELEASE);
988
989         in_vbl = position >= vbl_start && position < vbl_end;
990
991         /*
992          * While in vblank, position will be negative
993          * counting up towards 0 at vbl_end. And outside
994          * vblank, position will be positive counting
995          * up since vbl_end.
996          */
997         if (position >= vbl_start)
998                 position -= vbl_end;
999         else
1000                 position += vtotal - vbl_end;
1001
1002         if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
1003                 *vpos = position;
1004                 *hpos = 0;
1005         } else {
1006                 *vpos = position / htotal;
1007                 *hpos = position - (*vpos * htotal);
1008         }
1009
1010         /* In vblank? */
1011         if (in_vbl)
1012                 ret |= DRM_SCANOUTPOS_IN_VBLANK;
1013
1014         return ret;
1015 }
1016
1017 int intel_get_crtc_scanline(struct intel_crtc *crtc)
1018 {
1019         struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
1020         int position;
1021
1022         lockmgr(&dev_priv->uncore.lock, LK_EXCLUSIVE);
1023         position = __intel_get_crtc_scanline(crtc);
1024         lockmgr(&dev_priv->uncore.lock, LK_RELEASE);
1025
1026         return position;
1027 }
1028
1029 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
1030                               int *max_error,
1031                               struct timeval *vblank_time,
1032                               unsigned flags)
1033 {
1034         struct drm_crtc *crtc;
1035
1036         if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
1037                 DRM_ERROR("Invalid crtc %d\n", pipe);
1038                 return -EINVAL;
1039         }
1040
1041         /* Get drm_crtc to timestamp: */
1042         crtc = intel_get_crtc_for_pipe(dev, pipe);
1043         if (crtc == NULL) {
1044                 DRM_ERROR("Invalid crtc %d\n", pipe);
1045                 return -EINVAL;
1046         }
1047
1048         if (!crtc->enabled) {
1049                 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
1050                 return -EBUSY;
1051         }
1052
1053         /* Helper routine in DRM core does all the work: */
1054         return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
1055                                                      vblank_time, flags,
1056                                                      crtc,
1057                                                      &to_intel_crtc(crtc)->config.adjusted_mode);
1058 }
1059
1060 static bool intel_hpd_irq_event(struct drm_device *dev,
1061                                 struct drm_connector *connector)
1062 {
1063         enum drm_connector_status old_status;
1064
1065         WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
1066         old_status = connector->status;
1067
1068         connector->status = connector->funcs->detect(connector, false);
1069         if (old_status == connector->status)
1070                 return false;
1071
1072         DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
1073                       connector->base.id,
1074                       connector->name,
1075                       drm_get_connector_status_name(old_status),
1076                       drm_get_connector_status_name(connector->status));
1077
1078         return true;
1079 }
1080
1081 static void i915_digport_work_func(struct work_struct *work)
1082 {
1083         struct drm_i915_private *dev_priv =
1084                 container_of(work, struct drm_i915_private, dig_port_work);
1085         u32 long_port_mask, short_port_mask;
1086         struct intel_digital_port *intel_dig_port;
1087         int i, ret;
1088         u32 old_bits = 0;
1089
1090         lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
1091         long_port_mask = dev_priv->long_hpd_port_mask;
1092         dev_priv->long_hpd_port_mask = 0;
1093         short_port_mask = dev_priv->short_hpd_port_mask;
1094         dev_priv->short_hpd_port_mask = 0;
1095         lockmgr(&dev_priv->irq_lock, LK_RELEASE);
1096
1097         for (i = 0; i < I915_MAX_PORTS; i++) {
1098                 bool valid = false;
1099                 bool long_hpd = false;
1100                 intel_dig_port = dev_priv->hpd_irq_port[i];
1101                 if (!intel_dig_port || !intel_dig_port->hpd_pulse)
1102                         continue;
1103
1104                 if (long_port_mask & (1 << i))  {
1105                         valid = true;
1106                         long_hpd = true;
1107                 } else if (short_port_mask & (1 << i))
1108                         valid = true;
1109
1110                 if (valid) {
1111                         ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd);
1112                         if (ret == true) {
1113                                 /* if we get true fallback to old school hpd */
1114                                 old_bits |= (1 << intel_dig_port->base.hpd_pin);
1115                         }
1116                 }
1117         }
1118
1119         if (old_bits) {
1120                 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
1121                 dev_priv->hpd_event_bits |= old_bits;
1122                 lockmgr(&dev_priv->irq_lock, LK_RELEASE);
1123                 schedule_work(&dev_priv->hotplug_work);
1124         }
1125 }
1126
1127 /*
1128  * Handle hotplug events outside the interrupt handler proper.
1129  */
1130 #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
1131
1132 static void i915_hotplug_work_func(struct work_struct *work)
1133 {
1134         struct drm_i915_private *dev_priv =
1135                 container_of(work, struct drm_i915_private, hotplug_work);
1136         struct drm_device *dev = dev_priv->dev;
1137         struct drm_mode_config *mode_config = &dev->mode_config;
1138         struct intel_connector *intel_connector;
1139         struct intel_encoder *intel_encoder;
1140         struct drm_connector *connector;
1141         bool hpd_disabled = false;
1142         bool changed = false;
1143         u32 hpd_event_bits;
1144
1145         mutex_lock(&mode_config->mutex);
1146         DRM_DEBUG_KMS("running encoder hotplug functions\n");
1147
1148         lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
1149
1150         hpd_event_bits = dev_priv->hpd_event_bits;
1151         dev_priv->hpd_event_bits = 0;
1152         list_for_each_entry(connector, &mode_config->connector_list, head) {
1153                 intel_connector = to_intel_connector(connector);
1154                 if (!intel_connector->encoder)
1155                         continue;
1156                 intel_encoder = intel_connector->encoder;
1157                 if (intel_encoder->hpd_pin > HPD_NONE &&
1158                     dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
1159                     connector->polled == DRM_CONNECTOR_POLL_HPD) {
1160                         DRM_INFO("HPD interrupt storm detected on connector %s: "
1161                                  "switching from hotplug detection to polling\n",
1162                                 connector->name);
1163                         dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
1164                         connector->polled = DRM_CONNECTOR_POLL_CONNECT
1165                                 | DRM_CONNECTOR_POLL_DISCONNECT;
1166                         hpd_disabled = true;
1167                 }
1168                 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
1169                         DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
1170                                       connector->name, intel_encoder->hpd_pin);
1171                 }
1172         }
1173          /* if there were no outputs to poll, poll was disabled,
1174           * therefore make sure it's enabled when disabling HPD on
1175           * some connectors */
1176         if (hpd_disabled) {
1177                 drm_kms_helper_poll_enable(dev);
1178                 mod_delayed_work(system_wq, &dev_priv->hotplug_reenable_work,
1179                                  msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
1180         }
1181
1182         lockmgr(&dev_priv->irq_lock, LK_RELEASE);
1183
1184         list_for_each_entry(connector, &mode_config->connector_list, head) {
1185                 intel_connector = to_intel_connector(connector);
1186                 if (!intel_connector->encoder)
1187                         continue;
1188                 intel_encoder = intel_connector->encoder;
1189                 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
1190                         if (intel_encoder->hot_plug)
1191                                 intel_encoder->hot_plug(intel_encoder);
1192                         if (intel_hpd_irq_event(dev, connector))
1193                                 changed = true;
1194                 }
1195         }
1196         mutex_unlock(&mode_config->mutex);
1197
1198         if (changed)
1199                 drm_kms_helper_hotplug_event(dev);
1200 }
1201
1202 static void ironlake_rps_change_irq_handler(struct drm_device *dev)
1203 {
1204         struct drm_i915_private *dev_priv = dev->dev_private;
1205         u32 busy_up, busy_down, max_avg, min_avg;
1206         u8 new_delay;
1207
1208         lockmgr(&mchdev_lock, LK_EXCLUSIVE);
1209
1210         I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
1211
1212         new_delay = dev_priv->ips.cur_delay;
1213
1214         I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
1215         busy_up = I915_READ(RCPREVBSYTUPAVG);
1216         busy_down = I915_READ(RCPREVBSYTDNAVG);
1217         max_avg = I915_READ(RCBMAXAVG);
1218         min_avg = I915_READ(RCBMINAVG);
1219
1220         /* Handle RCS change request from hw */
1221         if (busy_up > max_avg) {
1222                 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
1223                         new_delay = dev_priv->ips.cur_delay - 1;
1224                 if (new_delay < dev_priv->ips.max_delay)
1225                         new_delay = dev_priv->ips.max_delay;
1226         } else if (busy_down < min_avg) {
1227                 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
1228                         new_delay = dev_priv->ips.cur_delay + 1;
1229                 if (new_delay > dev_priv->ips.min_delay)
1230                         new_delay = dev_priv->ips.min_delay;
1231         }
1232
1233         if (ironlake_set_drps(dev, new_delay))
1234                 dev_priv->ips.cur_delay = new_delay;
1235
1236         lockmgr(&mchdev_lock, LK_RELEASE);
1237
1238         return;
1239 }
1240
1241 static void notify_ring(struct drm_device *dev,
1242                         struct intel_engine_cs *ring)
1243 {
1244         if (!intel_ring_initialized(ring))
1245                 return;
1246
1247         trace_i915_gem_request_complete(ring);
1248
1249         if (drm_core_check_feature(dev, DRIVER_MODESET))
1250                 intel_notify_mmio_flip(ring);
1251
1252         wake_up_all(&ring->irq_queue);
1253         i915_queue_hangcheck(dev);
1254 }
1255
1256 static u32 vlv_c0_residency(struct drm_i915_private *dev_priv,
1257                             struct intel_rps_ei *rps_ei)
1258 {
1259         u32 cz_ts, cz_freq_khz;
1260         u32 render_count, media_count;
1261         u32 elapsed_render, elapsed_media, elapsed_time;
1262         u32 residency = 0;
1263
1264         cz_ts = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
1265         cz_freq_khz = DIV_ROUND_CLOSEST(dev_priv->mem_freq * 1000, 4);
1266
1267         render_count = I915_READ(VLV_RENDER_C0_COUNT_REG);
1268         media_count = I915_READ(VLV_MEDIA_C0_COUNT_REG);
1269
1270         if (rps_ei->cz_clock == 0) {
1271                 rps_ei->cz_clock = cz_ts;
1272                 rps_ei->render_c0 = render_count;
1273                 rps_ei->media_c0 = media_count;
1274
1275                 return dev_priv->rps.cur_freq;
1276         }
1277
1278         elapsed_time = cz_ts - rps_ei->cz_clock;
1279         rps_ei->cz_clock = cz_ts;
1280
1281         elapsed_render = render_count - rps_ei->render_c0;
1282         rps_ei->render_c0 = render_count;
1283
1284         elapsed_media = media_count - rps_ei->media_c0;
1285         rps_ei->media_c0 = media_count;
1286
1287         /* Convert all the counters into common unit of milli sec */
1288         elapsed_time /= VLV_CZ_CLOCK_TO_MILLI_SEC;
1289         elapsed_render /=  cz_freq_khz;
1290         elapsed_media /= cz_freq_khz;
1291
1292         /*
1293          * Calculate overall C0 residency percentage
1294          * only if elapsed time is non zero
1295          */
1296         if (elapsed_time) {
1297                 residency =
1298                         ((max(elapsed_render, elapsed_media) * 100)
1299                                 / elapsed_time);
1300         }
1301
1302         return residency;
1303 }
1304
1305 /**
1306  * vlv_calc_delay_from_C0_counters - Increase/Decrease freq based on GPU
1307  * busy-ness calculated from C0 counters of render & media power wells
1308  * @dev_priv: DRM device private
1309  *
1310  */
1311 static int vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv)
1312 {
1313         u32 residency_C0_up = 0, residency_C0_down = 0;
1314         int new_delay, adj;
1315
1316         dev_priv->rps.ei_interrupt_count++;
1317
1318         WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
1319
1320
1321         if (dev_priv->rps.up_ei.cz_clock == 0) {
1322                 vlv_c0_residency(dev_priv, &dev_priv->rps.up_ei);
1323                 vlv_c0_residency(dev_priv, &dev_priv->rps.down_ei);
1324                 return dev_priv->rps.cur_freq;
1325         }
1326
1327
1328         /*
1329          * To down throttle, C0 residency should be less than down threshold
1330          * for continous EI intervals. So calculate down EI counters
1331          * once in VLV_INT_COUNT_FOR_DOWN_EI
1332          */
1333         if (dev_priv->rps.ei_interrupt_count == VLV_INT_COUNT_FOR_DOWN_EI) {
1334
1335                 dev_priv->rps.ei_interrupt_count = 0;
1336
1337                 residency_C0_down = vlv_c0_residency(dev_priv,
1338                                                      &dev_priv->rps.down_ei);
1339         } else {
1340                 residency_C0_up = vlv_c0_residency(dev_priv,
1341                                                    &dev_priv->rps.up_ei);
1342         }
1343
1344         new_delay = dev_priv->rps.cur_freq;
1345
1346         adj = dev_priv->rps.last_adj;
1347         /* C0 residency is greater than UP threshold. Increase Frequency */
1348         if (residency_C0_up >= VLV_RP_UP_EI_THRESHOLD) {
1349                 if (adj > 0)
1350                         adj *= 2;
1351                 else
1352                         adj = 1;
1353
1354                 if (dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit)
1355                         new_delay = dev_priv->rps.cur_freq + adj;
1356
1357                 /*
1358                  * For better performance, jump directly
1359                  * to RPe if we're below it.
1360                  */
1361                 if (new_delay < dev_priv->rps.efficient_freq)
1362                         new_delay = dev_priv->rps.efficient_freq;
1363
1364         } else if (!dev_priv->rps.ei_interrupt_count &&
1365                         (residency_C0_down < VLV_RP_DOWN_EI_THRESHOLD)) {
1366                 if (adj < 0)
1367                         adj *= 2;
1368                 else
1369                         adj = -1;
1370                 /*
1371                  * This means, C0 residency is less than down threshold over
1372                  * a period of VLV_INT_COUNT_FOR_DOWN_EI. So, reduce the freq
1373                  */
1374                 if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit)
1375                         new_delay = dev_priv->rps.cur_freq + adj;
1376         }
1377
1378         return new_delay;
1379 }
1380
1381 static void gen6_pm_rps_work(struct work_struct *work)
1382 {
1383         struct drm_i915_private *dev_priv =
1384                 container_of(work, struct drm_i915_private, rps.work);
1385         u32 pm_iir;
1386         int new_delay, adj;
1387
1388         lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
1389         pm_iir = dev_priv->rps.pm_iir;
1390         dev_priv->rps.pm_iir = 0;
1391         if (INTEL_INFO(dev_priv->dev)->gen >= 8)
1392                 gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1393         else {
1394                 /* Make sure not to corrupt PMIMR state used by ringbuffer */
1395                 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1396         }
1397         lockmgr(&dev_priv->irq_lock, LK_RELEASE);
1398
1399         /* Make sure we didn't queue anything we're not going to process. */
1400         WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1401
1402         if ((pm_iir & dev_priv->pm_rps_events) == 0)
1403                 return;
1404
1405         mutex_lock(&dev_priv->rps.hw_lock);
1406
1407         adj = dev_priv->rps.last_adj;
1408         if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1409                 if (adj > 0)
1410                         adj *= 2;
1411                 else {
1412                         /* CHV needs even encode values */
1413                         adj = IS_CHERRYVIEW(dev_priv->dev) ? 2 : 1;
1414                 }
1415                 new_delay = dev_priv->rps.cur_freq + adj;
1416
1417                 /*
1418                  * For better performance, jump directly
1419                  * to RPe if we're below it.
1420                  */
1421                 if (new_delay < dev_priv->rps.efficient_freq)
1422                         new_delay = dev_priv->rps.efficient_freq;
1423         } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1424                 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1425                         new_delay = dev_priv->rps.efficient_freq;
1426                 else
1427                         new_delay = dev_priv->rps.min_freq_softlimit;
1428                 adj = 0;
1429         } else if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
1430                 new_delay = vlv_calc_delay_from_C0_counters(dev_priv);
1431         } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1432                 if (adj < 0)
1433                         adj *= 2;
1434                 else {
1435                         /* CHV needs even encode values */
1436                         adj = IS_CHERRYVIEW(dev_priv->dev) ? -2 : -1;
1437                 }
1438                 new_delay = dev_priv->rps.cur_freq + adj;
1439         } else { /* unknown event */
1440                 new_delay = dev_priv->rps.cur_freq;
1441         }
1442
1443         /* sysfs frequency interfaces may have snuck in while servicing the
1444          * interrupt
1445          */
1446         new_delay = clamp_t(int, new_delay,
1447                             dev_priv->rps.min_freq_softlimit,
1448                             dev_priv->rps.max_freq_softlimit);
1449
1450         dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq;
1451
1452         if (IS_VALLEYVIEW(dev_priv->dev))
1453                 valleyview_set_rps(dev_priv->dev, new_delay);
1454         else
1455                 gen6_set_rps(dev_priv->dev, new_delay);
1456
1457         mutex_unlock(&dev_priv->rps.hw_lock);
1458 }
1459
1460
1461 /**
1462  * ivybridge_parity_work - Workqueue called when a parity error interrupt
1463  * occurred.
1464  * @work: workqueue struct
1465  *
1466  * Doesn't actually do anything except notify userspace. As a consequence of
1467  * this event, userspace should try to remap the bad rows since statistically
1468  * it is likely the same row is more likely to go bad again.
1469  */
1470 static void ivybridge_parity_work(struct work_struct *work)
1471 {
1472         struct drm_i915_private *dev_priv =
1473                 container_of(work, struct drm_i915_private, l3_parity.error_work);
1474         u32 error_status, row, bank, subbank;
1475         char *parity_event[6];
1476         uint32_t misccpctl;
1477         uint8_t slice = 0;
1478
1479         /* We must turn off DOP level clock gating to access the L3 registers.
1480          * In order to prevent a get/put style interface, acquire struct mutex
1481          * any time we access those registers.
1482          */
1483         mutex_lock(&dev_priv->dev->struct_mutex);
1484
1485         /* If we've screwed up tracking, just let the interrupt fire again */
1486         if (WARN_ON(!dev_priv->l3_parity.which_slice))
1487                 goto out;
1488
1489         misccpctl = I915_READ(GEN7_MISCCPCTL);
1490         I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1491         POSTING_READ(GEN7_MISCCPCTL);
1492
1493         while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1494                 u32 reg;
1495
1496                 slice--;
1497                 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
1498                         break;
1499
1500                 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1501
1502                 reg = GEN7_L3CDERRST1 + (slice * 0x200);
1503
1504                 error_status = I915_READ(reg);
1505                 row = GEN7_PARITY_ERROR_ROW(error_status);
1506                 bank = GEN7_PARITY_ERROR_BANK(error_status);
1507                 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1508
1509                 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1510                 POSTING_READ(reg);
1511
1512                 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1513                 parity_event[1] = drm_asprintf(GFP_KERNEL, "ROW=%d", row);
1514                 parity_event[2] = drm_asprintf(GFP_KERNEL, "BANK=%d", bank);
1515                 parity_event[3] = drm_asprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1516                 parity_event[4] = drm_asprintf(GFP_KERNEL, "SLICE=%d", slice);
1517                 parity_event[5] = NULL;
1518
1519 #if 0
1520                 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
1521                                    KOBJ_CHANGE, parity_event);
1522 #endif
1523
1524                 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1525                           slice, row, bank, subbank);
1526
1527                 kfree(parity_event[4]);
1528                 kfree(parity_event[3]);
1529                 kfree(parity_event[2]);
1530                 kfree(parity_event[1]);
1531         }
1532
1533         I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1534
1535 out:
1536         WARN_ON(dev_priv->l3_parity.which_slice);
1537         lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
1538         gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
1539         lockmgr(&dev_priv->irq_lock, LK_RELEASE);
1540
1541         mutex_unlock(&dev_priv->dev->struct_mutex);
1542 }
1543
1544 static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
1545 {
1546         struct drm_i915_private *dev_priv = dev->dev_private;
1547
1548         if (!HAS_L3_DPF(dev))
1549                 return;
1550
1551         lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
1552         gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
1553         lockmgr(&dev_priv->irq_lock, LK_RELEASE);
1554
1555         iir &= GT_PARITY_ERROR(dev);
1556         if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1557                 dev_priv->l3_parity.which_slice |= 1 << 1;
1558
1559         if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1560                 dev_priv->l3_parity.which_slice |= 1 << 0;
1561
1562         queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1563 }
1564
1565 static void ilk_gt_irq_handler(struct drm_device *dev,
1566                                struct drm_i915_private *dev_priv,
1567                                u32 gt_iir)
1568 {
1569         if (gt_iir &
1570             (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1571                 notify_ring(dev, &dev_priv->ring[RCS]);
1572         if (gt_iir & ILK_BSD_USER_INTERRUPT)
1573                 notify_ring(dev, &dev_priv->ring[VCS]);
1574 }
1575
1576 static void snb_gt_irq_handler(struct drm_device *dev,
1577                                struct drm_i915_private *dev_priv,
1578                                u32 gt_iir)
1579 {
1580
1581         if (gt_iir &
1582             (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1583                 notify_ring(dev, &dev_priv->ring[RCS]);
1584         if (gt_iir & GT_BSD_USER_INTERRUPT)
1585                 notify_ring(dev, &dev_priv->ring[VCS]);
1586         if (gt_iir & GT_BLT_USER_INTERRUPT)
1587                 notify_ring(dev, &dev_priv->ring[BCS]);
1588
1589         if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1590                       GT_BSD_CS_ERROR_INTERRUPT |
1591                       GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
1592                 i915_handle_error(dev, false, "GT error interrupt 0x%08x",
1593                                   gt_iir);
1594         }
1595
1596         if (gt_iir & GT_PARITY_ERROR(dev))
1597                 ivybridge_parity_error_irq_handler(dev, gt_iir);
1598 }
1599
1600 static void gen8_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1601 {
1602         if ((pm_iir & dev_priv->pm_rps_events) == 0)
1603                 return;
1604
1605         lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
1606         dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1607         gen8_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1608         lockmgr(&dev_priv->irq_lock, LK_RELEASE);
1609
1610         queue_work(dev_priv->wq, &dev_priv->rps.work);
1611 }
1612
1613 static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
1614                                        struct drm_i915_private *dev_priv,
1615                                        u32 master_ctl)
1616 {
1617         struct intel_engine_cs *ring;
1618         u32 rcs, bcs, vcs;
1619         uint32_t tmp = 0;
1620
1621         if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1622                 tmp = I915_READ(GEN8_GT_IIR(0));
1623                 if (tmp) {
1624                         I915_WRITE(GEN8_GT_IIR(0), tmp);
1625
1626                         rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
1627                         ring = &dev_priv->ring[RCS];
1628                         if (rcs & GT_RENDER_USER_INTERRUPT)
1629                                 notify_ring(dev, ring);
1630                         if (rcs & GT_CONTEXT_SWITCH_INTERRUPT)
1631                                 intel_execlists_handle_ctx_events(ring);
1632
1633                         bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
1634                         ring = &dev_priv->ring[BCS];
1635                         if (bcs & GT_RENDER_USER_INTERRUPT)
1636                                 notify_ring(dev, ring);
1637                         if (bcs & GT_CONTEXT_SWITCH_INTERRUPT)
1638                                 intel_execlists_handle_ctx_events(ring);
1639                 } else
1640                         DRM_ERROR("The master control interrupt lied (GT0)!\n");
1641         }
1642
1643         if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1644                 tmp = I915_READ(GEN8_GT_IIR(1));
1645                 if (tmp) {
1646                         I915_WRITE(GEN8_GT_IIR(1), tmp);
1647
1648                         vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
1649                         ring = &dev_priv->ring[VCS];
1650                         if (vcs & GT_RENDER_USER_INTERRUPT)
1651                                 notify_ring(dev, ring);
1652                         if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1653                                 intel_execlists_handle_ctx_events(ring);
1654
1655                         vcs = tmp >> GEN8_VCS2_IRQ_SHIFT;
1656                         ring = &dev_priv->ring[VCS2];
1657                         if (vcs & GT_RENDER_USER_INTERRUPT)
1658                                 notify_ring(dev, ring);
1659                         if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1660                                 intel_execlists_handle_ctx_events(ring);
1661                 } else
1662                         DRM_ERROR("The master control interrupt lied (GT1)!\n");
1663         }
1664
1665         if (master_ctl & GEN8_GT_PM_IRQ) {
1666                 tmp = I915_READ(GEN8_GT_IIR(2));
1667                 if (tmp & dev_priv->pm_rps_events) {
1668                         I915_WRITE(GEN8_GT_IIR(2),
1669                                    tmp & dev_priv->pm_rps_events);
1670                         gen8_rps_irq_handler(dev_priv, tmp);
1671                 } else
1672                         DRM_ERROR("The master control interrupt lied (PM)!\n");
1673         }
1674
1675         if (master_ctl & GEN8_GT_VECS_IRQ) {
1676                 tmp = I915_READ(GEN8_GT_IIR(3));
1677                 if (tmp) {
1678                         I915_WRITE(GEN8_GT_IIR(3), tmp);
1679
1680                         vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
1681                         ring = &dev_priv->ring[VECS];
1682                         if (vcs & GT_RENDER_USER_INTERRUPT)
1683                                 notify_ring(dev, ring);
1684                         if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1685                                 intel_execlists_handle_ctx_events(ring);
1686                 } else
1687                         DRM_ERROR("The master control interrupt lied (GT3)!\n");
1688         }
1689
1690 }
1691
1692 #define HPD_STORM_DETECT_PERIOD 1000
1693 #define HPD_STORM_THRESHOLD 5
1694
1695 static int pch_port_to_hotplug_shift(enum port port)
1696 {
1697         switch (port) {
1698         case PORT_A:
1699         case PORT_E:
1700         default:
1701                 return -1;
1702         case PORT_B:
1703                 return 0;
1704         case PORT_C:
1705                 return 8;
1706         case PORT_D:
1707                 return 16;
1708         }
1709 }
1710
1711 static int i915_port_to_hotplug_shift(enum port port)
1712 {
1713         switch (port) {
1714         case PORT_A:
1715         case PORT_E:
1716         default:
1717                 return -1;
1718         case PORT_B:
1719                 return 17;
1720         case PORT_C:
1721                 return 19;
1722         case PORT_D:
1723                 return 21;
1724         }
1725 }
1726
1727 static inline enum port get_port_from_pin(enum hpd_pin pin)
1728 {
1729         switch (pin) {
1730         case HPD_PORT_B:
1731                 return PORT_B;
1732         case HPD_PORT_C:
1733                 return PORT_C;
1734         case HPD_PORT_D:
1735                 return PORT_D;
1736         default:
1737                 return PORT_A; /* no hpd */
1738         }
1739 }
1740
1741 static inline void intel_hpd_irq_handler(struct drm_device *dev,
1742                                          u32 hotplug_trigger,
1743                                          u32 dig_hotplug_reg,
1744                                          const u32 *hpd)
1745 {
1746         struct drm_i915_private *dev_priv = dev->dev_private;
1747         int i;
1748         enum port port;
1749         bool storm_detected = false;
1750         bool queue_dig = false, queue_hp = false;
1751         u32 dig_shift;
1752         u32 dig_port_mask = 0;
1753
1754         if (!hotplug_trigger)
1755                 return;
1756
1757         DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n",
1758                          hotplug_trigger, dig_hotplug_reg);
1759
1760         lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
1761         for (i = 1; i < HPD_NUM_PINS; i++) {
1762                 if (!(hpd[i] & hotplug_trigger))
1763                         continue;
1764
1765                 port = get_port_from_pin(i);
1766                 if (port && dev_priv->hpd_irq_port[port]) {
1767                         bool long_hpd;
1768
1769                         if (HAS_PCH_SPLIT(dev)) {
1770                                 dig_shift = pch_port_to_hotplug_shift(port);
1771                                 long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
1772                         } else {
1773                                 dig_shift = i915_port_to_hotplug_shift(port);
1774                                 long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
1775                         }
1776
1777                         DRM_DEBUG_DRIVER("digital hpd port %c - %s\n",
1778                                          port_name(port),
1779                                          long_hpd ? "long" : "short");
1780                         /* for long HPD pulses we want to have the digital queue happen,
1781                            but we still want HPD storm detection to function. */
1782                         if (long_hpd) {
1783                                 dev_priv->long_hpd_port_mask |= (1 << port);
1784                                 dig_port_mask |= hpd[i];
1785                         } else {
1786                                 /* for short HPD just trigger the digital queue */
1787                                 dev_priv->short_hpd_port_mask |= (1 << port);
1788                                 hotplug_trigger &= ~hpd[i];
1789                         }
1790                         queue_dig = true;
1791                 }
1792         }
1793
1794         for (i = 1; i < HPD_NUM_PINS; i++) {
1795                 if (hpd[i] & hotplug_trigger &&
1796                     dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) {
1797                         /*
1798                          * On GMCH platforms the interrupt mask bits only
1799                          * prevent irq generation, not the setting of the
1800                          * hotplug bits itself. So only WARN about unexpected
1801                          * interrupts on saner platforms.
1802                          */
1803                         WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev),
1804                                   "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
1805                                   hotplug_trigger, i, hpd[i]);
1806
1807                         continue;
1808                 }
1809
1810                 if (!(hpd[i] & hotplug_trigger) ||
1811                     dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
1812                         continue;
1813
1814                 if (!(dig_port_mask & hpd[i])) {
1815                         dev_priv->hpd_event_bits |= (1 << i);
1816                         queue_hp = true;
1817                 }
1818
1819                 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
1820                                    dev_priv->hpd_stats[i].hpd_last_jiffies
1821                                    + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
1822                         dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
1823                         dev_priv->hpd_stats[i].hpd_cnt = 0;
1824                         DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i);
1825                 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
1826                         dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
1827                         dev_priv->hpd_event_bits &= ~(1 << i);
1828                         DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
1829                         storm_detected = true;
1830                 } else {
1831                         dev_priv->hpd_stats[i].hpd_cnt++;
1832                         DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i,
1833                                       dev_priv->hpd_stats[i].hpd_cnt);
1834                 }
1835         }
1836
1837         if (storm_detected)
1838                 dev_priv->display.hpd_irq_setup(dev);
1839         lockmgr(&dev_priv->irq_lock, LK_RELEASE);
1840
1841         /*
1842          * Our hotplug handler can grab modeset locks (by calling down into the
1843          * fb helpers). Hence it must not be run on our own dev-priv->wq work
1844          * queue for otherwise the flush_work in the pageflip code will
1845          * deadlock.
1846          */
1847         if (queue_dig)
1848                 schedule_work(&dev_priv->dig_port_work);
1849         if (queue_hp)
1850                 schedule_work(&dev_priv->hotplug_work);
1851 }
1852
1853 static void gmbus_irq_handler(struct drm_device *dev)
1854 {
1855         struct drm_i915_private *dev_priv = dev->dev_private;
1856
1857         wake_up_all(&dev_priv->gmbus_wait_queue);
1858 }
1859
1860 static void dp_aux_irq_handler(struct drm_device *dev)
1861 {
1862         struct drm_i915_private *dev_priv = dev->dev_private;
1863
1864         wake_up_all(&dev_priv->gmbus_wait_queue);
1865 }
1866
1867 #if defined(CONFIG_DEBUG_FS)
1868 static void display_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe,
1869                                          uint32_t crc0, uint32_t crc1,
1870                                          uint32_t crc2, uint32_t crc3,
1871                                          uint32_t crc4)
1872 {
1873         struct drm_i915_private *dev_priv = dev->dev_private;
1874         struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1875         struct intel_pipe_crc_entry *entry;
1876         int head, tail;
1877
1878         spin_lock(&pipe_crc->lock);
1879
1880         if (!pipe_crc->entries) {
1881                 spin_unlock(&pipe_crc->lock);
1882                 DRM_ERROR("spurious interrupt\n");
1883                 return;
1884         }
1885
1886         head = pipe_crc->head;
1887         tail = pipe_crc->tail;
1888
1889         if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1890                 spin_unlock(&pipe_crc->lock);
1891                 DRM_ERROR("CRC buffer overflowing\n");
1892                 return;
1893         }
1894
1895         entry = &pipe_crc->entries[head];
1896
1897         entry->frame = dev->driver->get_vblank_counter(dev, pipe);
1898         entry->crc[0] = crc0;
1899         entry->crc[1] = crc1;
1900         entry->crc[2] = crc2;
1901         entry->crc[3] = crc3;
1902         entry->crc[4] = crc4;
1903
1904         head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1905         pipe_crc->head = head;
1906
1907         spin_unlock(&pipe_crc->lock);
1908
1909         wake_up_interruptible(&pipe_crc->wq);
1910 }
1911 #else
1912 static inline void
1913 display_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe,
1914                              uint32_t crc0, uint32_t crc1,
1915                              uint32_t crc2, uint32_t crc3,
1916                              uint32_t crc4) {}
1917 #endif
1918
1919
1920 static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe)
1921 {
1922         struct drm_i915_private *dev_priv = dev->dev_private;
1923
1924         display_pipe_crc_irq_handler(dev, pipe,
1925                                      I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1926                                      0, 0, 0, 0);
1927 }
1928
1929 static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe)
1930 {
1931         struct drm_i915_private *dev_priv = dev->dev_private;
1932
1933         display_pipe_crc_irq_handler(dev, pipe,
1934                                      I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1935                                      I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1936                                      I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1937                                      I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1938                                      I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1939 }
1940
1941 static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe)
1942 {
1943         struct drm_i915_private *dev_priv = dev->dev_private;
1944         uint32_t res1, res2;
1945
1946         if (INTEL_INFO(dev)->gen >= 3)
1947                 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1948         else
1949                 res1 = 0;
1950
1951         if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1952                 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1953         else
1954                 res2 = 0;
1955
1956         display_pipe_crc_irq_handler(dev, pipe,
1957                                      I915_READ(PIPE_CRC_RES_RED(pipe)),
1958                                      I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1959                                      I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1960                                      res1, res2);
1961 }
1962
1963 /* The RPS events need forcewake, so we add them to a work queue and mask their
1964  * IMR bits until the work is done. Other interrupts can be processed without
1965  * the work queue. */
1966 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1967 {
1968         if (pm_iir & dev_priv->pm_rps_events) {
1969                 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
1970                 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1971                 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1972                 lockmgr(&dev_priv->irq_lock, LK_RELEASE);
1973
1974                 queue_work(dev_priv->wq, &dev_priv->rps.work);
1975         }
1976
1977         if (HAS_VEBOX(dev_priv->dev)) {
1978                 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1979                         notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
1980
1981                 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
1982                         i915_handle_error(dev_priv->dev, false,
1983                                           "VEBOX CS error interrupt 0x%08x",
1984                                           pm_iir);
1985                 }
1986         }
1987 }
1988
1989 static bool intel_pipe_handle_vblank(struct drm_device *dev, enum i915_pipe pipe)
1990 {
1991         if (!drm_handle_vblank(dev, pipe))
1992                 return false;
1993
1994         return true;
1995 }
1996
1997 static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
1998 {
1999         struct drm_i915_private *dev_priv = dev->dev_private;
2000         u32 pipe_stats[I915_MAX_PIPES] = { };
2001         int pipe;
2002
2003         lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
2004         for_each_pipe(dev_priv, pipe) {
2005                 int reg;
2006                 u32 mask, iir_bit = 0;
2007
2008                 /*
2009                  * PIPESTAT bits get signalled even when the interrupt is
2010                  * disabled with the mask bits, and some of the status bits do
2011                  * not generate interrupts at all (like the underrun bit). Hence
2012                  * we need to be careful that we only handle what we want to
2013                  * handle.
2014                  */
2015                 mask = 0;
2016                 if (__cpu_fifo_underrun_reporting_enabled(dev, pipe))
2017                         mask |= PIPE_FIFO_UNDERRUN_STATUS;
2018
2019                 switch (pipe) {
2020                 case PIPE_A:
2021                         iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
2022                         break;
2023                 case PIPE_B:
2024                         iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
2025                         break;
2026                 case PIPE_C:
2027                         iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
2028                         break;
2029                 }
2030                 if (iir & iir_bit)
2031                         mask |= dev_priv->pipestat_irq_mask[pipe];
2032
2033                 if (!mask)
2034                         continue;
2035
2036                 reg = PIPESTAT(pipe);
2037                 mask |= PIPESTAT_INT_ENABLE_MASK;
2038                 pipe_stats[pipe] = I915_READ(reg) & mask;
2039
2040                 /*
2041                  * Clear the PIPE*STAT regs before the IIR
2042                  */
2043                 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
2044                                         PIPESTAT_INT_STATUS_MASK))
2045                         I915_WRITE(reg, pipe_stats[pipe]);
2046         }
2047         lockmgr(&dev_priv->irq_lock, LK_RELEASE);
2048
2049         for_each_pipe(dev_priv, pipe) {
2050                 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
2051                     intel_pipe_handle_vblank(dev, pipe))
2052                         intel_check_page_flip(dev, pipe);
2053
2054                 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
2055                         intel_prepare_page_flip(dev, pipe);
2056                         intel_finish_page_flip(dev, pipe);
2057                 }
2058
2059                 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
2060                         i9xx_pipe_crc_irq_handler(dev, pipe);
2061
2062                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
2063                     intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
2064                         DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
2065         }
2066
2067         if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
2068                 gmbus_irq_handler(dev);
2069 }
2070
2071 static void i9xx_hpd_irq_handler(struct drm_device *dev)
2072 {
2073         struct drm_i915_private *dev_priv = dev->dev_private;
2074         u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2075
2076         if (hotplug_status) {
2077                 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2078                 /*
2079                  * Make sure hotplug status is cleared before we clear IIR, or else we
2080                  * may miss hotplug events.
2081                  */
2082                 POSTING_READ(PORT_HOTPLUG_STAT);
2083
2084                 if (IS_G4X(dev)) {
2085                         u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
2086
2087                         intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_g4x);
2088                 } else {
2089                         u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
2090
2091                         intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_i915);
2092                 }
2093
2094                 if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) &&
2095                     hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
2096                         dp_aux_irq_handler(dev);
2097         }
2098 }
2099
2100 static irqreturn_t valleyview_irq_handler(void *arg)
2101 {
2102         struct drm_device *dev = arg;
2103         struct drm_i915_private *dev_priv = dev->dev_private;
2104         u32 iir, gt_iir, pm_iir;
2105
2106         while (true) {
2107                 /* Find, clear, then process each source of interrupt */
2108
2109                 gt_iir = I915_READ(GTIIR);
2110                 if (gt_iir)
2111                         I915_WRITE(GTIIR, gt_iir);
2112
2113                 pm_iir = I915_READ(GEN6_PMIIR);
2114                 if (pm_iir)
2115                         I915_WRITE(GEN6_PMIIR, pm_iir);
2116
2117                 iir = I915_READ(VLV_IIR);
2118                 if (iir) {
2119                         /* Consume port before clearing IIR or we'll miss events */
2120                         if (iir & I915_DISPLAY_PORT_INTERRUPT)
2121                                 i9xx_hpd_irq_handler(dev);
2122                         I915_WRITE(VLV_IIR, iir);
2123                 }
2124
2125                 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
2126                         goto out;
2127
2128                 if (gt_iir)
2129                         snb_gt_irq_handler(dev, dev_priv, gt_iir);
2130                 if (pm_iir)
2131                         gen6_rps_irq_handler(dev_priv, pm_iir);
2132                 /* Call regardless, as some status bits might not be
2133                  * signalled in iir */
2134                 valleyview_pipestat_irq_handler(dev, iir);
2135         }
2136
2137 out:
2138         return;
2139 }
2140
2141 static irqreturn_t cherryview_irq_handler(void *arg)
2142 {
2143         struct drm_device *dev = arg;
2144         struct drm_i915_private *dev_priv = dev->dev_private;
2145         u32 master_ctl, iir;
2146
2147         for (;;) {
2148                 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
2149                 iir = I915_READ(VLV_IIR);
2150
2151                 if (master_ctl == 0 && iir == 0)
2152                         break;
2153
2154
2155                 I915_WRITE(GEN8_MASTER_IRQ, 0);
2156
2157                 /* Find, clear, then process each source of interrupt */
2158
2159                 if (iir) {
2160                         /* Consume port before clearing IIR or we'll miss events */
2161                         if (iir & I915_DISPLAY_PORT_INTERRUPT)
2162                                 i9xx_hpd_irq_handler(dev);
2163                         I915_WRITE(VLV_IIR, iir);
2164                 }
2165
2166                 gen8_gt_irq_handler(dev, dev_priv, master_ctl);
2167
2168                 /* Call regardless, as some status bits might not be
2169                  * signalled in iir */
2170                 valleyview_pipestat_irq_handler(dev, iir);
2171
2172                 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
2173                 POSTING_READ(GEN8_MASTER_IRQ);
2174         }
2175
2176 }
2177
2178 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
2179 {
2180         struct drm_i915_private *dev_priv = dev->dev_private;
2181         int pipe;
2182         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
2183         u32 dig_hotplug_reg;
2184
2185         dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2186         I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2187
2188         intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_ibx);
2189
2190         if (pch_iir & SDE_AUDIO_POWER_MASK) {
2191                 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
2192                                SDE_AUDIO_POWER_SHIFT);
2193                 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
2194                                  port_name(port));
2195         }
2196
2197         if (pch_iir & SDE_AUX_MASK)
2198                 dp_aux_irq_handler(dev);
2199
2200         if (pch_iir & SDE_GMBUS)
2201                 gmbus_irq_handler(dev);
2202
2203         if (pch_iir & SDE_AUDIO_HDCP_MASK)
2204                 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
2205
2206         if (pch_iir & SDE_AUDIO_TRANS_MASK)
2207                 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
2208
2209         if (pch_iir & SDE_POISON)
2210                 DRM_ERROR("PCH poison interrupt\n");
2211
2212         if (pch_iir & SDE_FDI_MASK)
2213                 for_each_pipe(dev_priv, pipe)
2214                         DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
2215                                          pipe_name(pipe),
2216                                          I915_READ(FDI_RX_IIR(pipe)));
2217
2218         if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
2219                 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
2220
2221         if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
2222                 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
2223
2224         if (pch_iir & SDE_TRANSA_FIFO_UNDER)
2225                 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
2226                                                           false))
2227                         DRM_ERROR("PCH transcoder A FIFO underrun\n");
2228
2229         if (pch_iir & SDE_TRANSB_FIFO_UNDER)
2230                 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
2231                                                           false))
2232                         DRM_ERROR("PCH transcoder B FIFO underrun\n");
2233 }
2234
2235 static void ivb_err_int_handler(struct drm_device *dev)
2236 {
2237         struct drm_i915_private *dev_priv = dev->dev_private;
2238         u32 err_int = I915_READ(GEN7_ERR_INT);
2239         enum i915_pipe pipe;
2240
2241         if (err_int & ERR_INT_POISON)
2242                 DRM_ERROR("Poison interrupt\n");
2243
2244         for_each_pipe(dev_priv, pipe) {
2245                 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) {
2246                         if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
2247                                                                   false))
2248                                 DRM_ERROR("Pipe %c FIFO underrun\n",
2249                                           pipe_name(pipe));
2250                 }
2251
2252                 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
2253                         if (IS_IVYBRIDGE(dev))
2254                                 ivb_pipe_crc_irq_handler(dev, pipe);
2255                         else
2256                                 hsw_pipe_crc_irq_handler(dev, pipe);
2257                 }
2258         }
2259
2260         I915_WRITE(GEN7_ERR_INT, err_int);
2261 }
2262
2263 static void cpt_serr_int_handler(struct drm_device *dev)
2264 {
2265         struct drm_i915_private *dev_priv = dev->dev_private;
2266         u32 serr_int = I915_READ(SERR_INT);
2267
2268         if (serr_int & SERR_INT_POISON)
2269                 DRM_ERROR("PCH poison interrupt\n");
2270
2271         if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
2272                 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
2273                                                           false))
2274                         DRM_ERROR("PCH transcoder A FIFO underrun\n");
2275
2276         if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
2277                 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
2278                                                           false))
2279                         DRM_ERROR("PCH transcoder B FIFO underrun\n");
2280
2281         if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
2282                 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C,
2283                                                           false))
2284                         DRM_ERROR("PCH transcoder C FIFO underrun\n");
2285
2286         I915_WRITE(SERR_INT, serr_int);
2287 }
2288
2289 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
2290 {
2291         struct drm_i915_private *dev_priv = dev->dev_private;
2292         int pipe;
2293         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
2294         u32 dig_hotplug_reg;
2295
2296         dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2297         I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2298
2299         intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_cpt);
2300
2301         if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
2302                 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
2303                                SDE_AUDIO_POWER_SHIFT_CPT);
2304                 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
2305                                  port_name(port));
2306         }
2307
2308         if (pch_iir & SDE_AUX_MASK_CPT)
2309                 dp_aux_irq_handler(dev);
2310
2311         if (pch_iir & SDE_GMBUS_CPT)
2312                 gmbus_irq_handler(dev);
2313
2314         if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
2315                 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
2316
2317         if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
2318                 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
2319
2320         if (pch_iir & SDE_FDI_MASK_CPT)
2321                 for_each_pipe(dev_priv, pipe)
2322                         DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
2323                                          pipe_name(pipe),
2324                                          I915_READ(FDI_RX_IIR(pipe)));
2325
2326         if (pch_iir & SDE_ERROR_CPT)
2327                 cpt_serr_int_handler(dev);
2328 }
2329
2330 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
2331 {
2332         struct drm_i915_private *dev_priv = dev->dev_private;
2333         enum i915_pipe pipe;
2334
2335         if (de_iir & DE_AUX_CHANNEL_A)
2336                 dp_aux_irq_handler(dev);
2337
2338         if (de_iir & DE_GSE)
2339                 intel_opregion_asle_intr(dev);
2340
2341         if (de_iir & DE_POISON)
2342                 DRM_ERROR("Poison interrupt\n");
2343
2344         for_each_pipe(dev_priv, pipe) {
2345                 if (de_iir & DE_PIPE_VBLANK(pipe) &&
2346                     intel_pipe_handle_vblank(dev, pipe))
2347                         intel_check_page_flip(dev, pipe);
2348
2349                 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2350                         if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
2351                                 DRM_ERROR("Pipe %c FIFO underrun\n",
2352                                           pipe_name(pipe));
2353
2354                 if (de_iir & DE_PIPE_CRC_DONE(pipe))
2355                         i9xx_pipe_crc_irq_handler(dev, pipe);
2356
2357                 /* plane/pipes map 1:1 on ilk+ */
2358                 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
2359                         intel_prepare_page_flip(dev, pipe);
2360                         intel_finish_page_flip_plane(dev, pipe);
2361                 }
2362         }
2363
2364         /* check event from PCH */
2365         if (de_iir & DE_PCH_EVENT) {
2366                 u32 pch_iir = I915_READ(SDEIIR);
2367
2368                 if (HAS_PCH_CPT(dev))
2369                         cpt_irq_handler(dev, pch_iir);
2370                 else
2371                         ibx_irq_handler(dev, pch_iir);
2372
2373                 /* should clear PCH hotplug event before clear CPU irq */
2374                 I915_WRITE(SDEIIR, pch_iir);
2375         }
2376
2377         if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
2378                 ironlake_rps_change_irq_handler(dev);
2379 }
2380
2381 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
2382 {
2383         struct drm_i915_private *dev_priv = dev->dev_private;
2384         enum i915_pipe pipe;
2385
2386         if (de_iir & DE_ERR_INT_IVB)
2387                 ivb_err_int_handler(dev);
2388
2389         if (de_iir & DE_AUX_CHANNEL_A_IVB)
2390                 dp_aux_irq_handler(dev);
2391
2392         if (de_iir & DE_GSE_IVB)
2393                 intel_opregion_asle_intr(dev);
2394
2395         for_each_pipe(dev_priv, pipe) {
2396                 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
2397                     intel_pipe_handle_vblank(dev, pipe))
2398                         intel_check_page_flip(dev, pipe);
2399
2400                 /* plane/pipes map 1:1 on ilk+ */
2401                 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
2402                         intel_prepare_page_flip(dev, pipe);
2403                         intel_finish_page_flip_plane(dev, pipe);
2404                 }
2405         }
2406
2407         /* check event from PCH */
2408         if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
2409                 u32 pch_iir = I915_READ(SDEIIR);
2410
2411                 cpt_irq_handler(dev, pch_iir);
2412
2413                 /* clear PCH hotplug event before clear CPU irq */
2414                 I915_WRITE(SDEIIR, pch_iir);
2415         }
2416 }
2417
2418 /*
2419  * To handle irqs with the minimum potential races with fresh interrupts, we:
2420  * 1 - Disable Master Interrupt Control.
2421  * 2 - Find the source(s) of the interrupt.
2422  * 3 - Clear the Interrupt Identity bits (IIR).
2423  * 4 - Process the interrupt(s) that had bits set in the IIRs.
2424  * 5 - Re-enable Master Interrupt Control.
2425  */
2426 static irqreturn_t ironlake_irq_handler(void *arg)
2427 {
2428         struct drm_device *dev = arg;
2429         struct drm_i915_private *dev_priv = dev->dev_private;
2430         u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2431
2432         /* We get interrupts on unclaimed registers, so check for this before we
2433          * do any I915_{READ,WRITE}. */
2434         intel_uncore_check_errors(dev);
2435
2436         /* disable master interrupt before clearing iir  */
2437         de_ier = I915_READ(DEIER);
2438         I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2439         POSTING_READ(DEIER);
2440
2441         /* Disable south interrupts. We'll only write to SDEIIR once, so further
2442          * interrupts will will be stored on its back queue, and then we'll be
2443          * able to process them after we restore SDEIER (as soon as we restore
2444          * it, we'll get an interrupt if SDEIIR still has something to process
2445          * due to its back queue). */
2446         if (!HAS_PCH_NOP(dev)) {
2447                 sde_ier = I915_READ(SDEIER);
2448                 I915_WRITE(SDEIER, 0);
2449                 POSTING_READ(SDEIER);
2450         }
2451
2452         /* Find, clear, then process each source of interrupt */
2453
2454         gt_iir = I915_READ(GTIIR);
2455         if (gt_iir) {
2456                 I915_WRITE(GTIIR, gt_iir);
2457                 if (INTEL_INFO(dev)->gen >= 6)
2458                         snb_gt_irq_handler(dev, dev_priv, gt_iir);
2459                 else
2460                         ilk_gt_irq_handler(dev, dev_priv, gt_iir);
2461         }
2462
2463         de_iir = I915_READ(DEIIR);
2464         if (de_iir) {
2465                 I915_WRITE(DEIIR, de_iir);
2466                 if (INTEL_INFO(dev)->gen >= 7)
2467                         ivb_display_irq_handler(dev, de_iir);
2468                 else
2469                         ilk_display_irq_handler(dev, de_iir);
2470         }
2471
2472         if (INTEL_INFO(dev)->gen >= 6) {
2473                 u32 pm_iir = I915_READ(GEN6_PMIIR);
2474                 if (pm_iir) {
2475                         I915_WRITE(GEN6_PMIIR, pm_iir);
2476                         gen6_rps_irq_handler(dev_priv, pm_iir);
2477                 }
2478         }
2479
2480         I915_WRITE(DEIER, de_ier);
2481         POSTING_READ(DEIER);
2482         if (!HAS_PCH_NOP(dev)) {
2483                 I915_WRITE(SDEIER, sde_ier);
2484                 POSTING_READ(SDEIER);
2485         }
2486
2487 }
2488
2489 static irqreturn_t gen8_irq_handler(void *arg)
2490 {
2491         struct drm_device *dev = arg;
2492         struct drm_i915_private *dev_priv = dev->dev_private;
2493         u32 master_ctl;
2494         uint32_t tmp = 0;
2495         enum i915_pipe pipe;
2496
2497         master_ctl = I915_READ(GEN8_MASTER_IRQ);
2498         master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2499         if (!master_ctl)
2500                 return;
2501
2502         I915_WRITE(GEN8_MASTER_IRQ, 0);
2503         POSTING_READ(GEN8_MASTER_IRQ);
2504
2505         /* Find, clear, then process each source of interrupt */
2506
2507         gen8_gt_irq_handler(dev, dev_priv, master_ctl);
2508
2509         if (master_ctl & GEN8_DE_MISC_IRQ) {
2510                 tmp = I915_READ(GEN8_DE_MISC_IIR);
2511                 if (tmp) {
2512                         I915_WRITE(GEN8_DE_MISC_IIR, tmp);
2513                         if (tmp & GEN8_DE_MISC_GSE)
2514                                 intel_opregion_asle_intr(dev);
2515                         else
2516                                 DRM_ERROR("Unexpected DE Misc interrupt\n");
2517                 }
2518                 else
2519                         DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2520         }
2521
2522         if (master_ctl & GEN8_DE_PORT_IRQ) {
2523                 tmp = I915_READ(GEN8_DE_PORT_IIR);
2524                 if (tmp) {
2525                         I915_WRITE(GEN8_DE_PORT_IIR, tmp);
2526                         if (tmp & GEN8_AUX_CHANNEL_A)
2527                                 dp_aux_irq_handler(dev);
2528                         else
2529                                 DRM_ERROR("Unexpected DE Port interrupt\n");
2530                 }
2531                 else
2532                         DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2533         }
2534
2535         for_each_pipe(dev_priv, pipe) {
2536                 uint32_t pipe_iir;
2537
2538                 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2539                         continue;
2540
2541                 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2542                 if (pipe_iir) {
2543                         I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
2544                         if (pipe_iir & GEN8_PIPE_VBLANK &&
2545                             intel_pipe_handle_vblank(dev, pipe))
2546                                 intel_check_page_flip(dev, pipe);
2547
2548                         if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) {
2549                                 intel_prepare_page_flip(dev, pipe);
2550                                 intel_finish_page_flip_plane(dev, pipe);
2551                         }
2552
2553                         if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
2554                                 hsw_pipe_crc_irq_handler(dev, pipe);
2555
2556                         if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) {
2557                                 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
2558                                                                           false))
2559                                         DRM_ERROR("Pipe %c FIFO underrun\n",
2560                                                   pipe_name(pipe));
2561                         }
2562
2563                         if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) {
2564                                 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2565                                           pipe_name(pipe),
2566                                           pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
2567                         }
2568                 } else
2569                         DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2570         }
2571
2572         if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) {
2573                 /*
2574                  * FIXME(BDW): Assume for now that the new interrupt handling
2575                  * scheme also closed the SDE interrupt handling race we've seen
2576                  * on older pch-split platforms. But this needs testing.
2577                  */
2578                 u32 pch_iir = I915_READ(SDEIIR);
2579                 if (pch_iir) {
2580                         I915_WRITE(SDEIIR, pch_iir);
2581                         cpt_irq_handler(dev, pch_iir);
2582                 } else
2583                         DRM_ERROR("The master control interrupt lied (SDE)!\n");
2584
2585         }
2586
2587         I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2588         POSTING_READ(GEN8_MASTER_IRQ);
2589
2590 }
2591
2592 static void i915_error_wake_up(struct drm_i915_private *dev_priv,
2593                                bool reset_completed)
2594 {
2595         struct intel_engine_cs *ring;
2596         int i;
2597
2598         /*
2599          * Notify all waiters for GPU completion events that reset state has
2600          * been changed, and that they need to restart their wait after
2601          * checking for potential errors (and bail out to drop locks if there is
2602          * a gpu reset pending so that i915_error_work_func can acquire them).
2603          */
2604
2605         /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2606         for_each_ring(ring, dev_priv, i)
2607                 wake_up_all(&ring->irq_queue);
2608
2609         /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2610         wake_up_all(&dev_priv->pending_flip_queue);
2611
2612         /*
2613          * Signal tasks blocked in i915_gem_wait_for_error that the pending
2614          * reset state is cleared.
2615          */
2616         if (reset_completed)
2617                 wake_up_all(&dev_priv->gpu_error.reset_queue);
2618 }
2619
2620 /**
2621  * i915_error_work_func - do process context error handling work
2622  * @work: work struct
2623  *
2624  * Fire an error uevent so userspace can see that a hang or error
2625  * was detected.
2626  */
2627 static void i915_error_work_func(struct work_struct *work)
2628 {
2629         struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
2630                                                     work);
2631         struct drm_i915_private *dev_priv =
2632                 container_of(error, struct drm_i915_private, gpu_error);
2633         struct drm_device *dev = dev_priv->dev;
2634 #if 0
2635         char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2636         char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2637         char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
2638 #endif
2639         int ret;
2640
2641         /* kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event); */
2642
2643         /*
2644          * Note that there's only one work item which does gpu resets, so we
2645          * need not worry about concurrent gpu resets potentially incrementing
2646          * error->reset_counter twice. We only need to take care of another
2647          * racing irq/hangcheck declaring the gpu dead for a second time. A
2648          * quick check for that is good enough: schedule_work ensures the
2649          * correct ordering between hang detection and this work item, and since
2650          * the reset in-progress bit is only ever set by code outside of this
2651          * work we don't need to worry about any other races.
2652          */
2653         if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
2654                 DRM_DEBUG_DRIVER("resetting chip\n");
2655 #if 0
2656                 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
2657                                    reset_event);
2658 #endif
2659
2660                 /*
2661                  * In most cases it's guaranteed that we get here with an RPM
2662                  * reference held, for example because there is a pending GPU
2663                  * request that won't finish until the reset is done. This
2664                  * isn't the case at least when we get here by doing a
2665                  * simulated reset via debugs, so get an RPM reference.
2666                  */
2667                 intel_runtime_pm_get(dev_priv);
2668                 /*
2669                  * All state reset _must_ be completed before we update the
2670                  * reset counter, for otherwise waiters might miss the reset
2671                  * pending state and not properly drop locks, resulting in
2672                  * deadlocks with the reset work.
2673                  */
2674                 ret = i915_reset(dev);
2675
2676                 intel_display_handle_reset(dev);
2677
2678                 intel_runtime_pm_put(dev_priv);
2679
2680                 if (ret == 0) {
2681                         /*
2682                          * After all the gem state is reset, increment the reset
2683                          * counter and wake up everyone waiting for the reset to
2684                          * complete.
2685                          *
2686                          * Since unlock operations are a one-sided barrier only,
2687                          * we need to insert a barrier here to order any seqno
2688                          * updates before
2689                          * the counter increment.
2690                          */
2691                         smp_mb__before_atomic();
2692                         atomic_inc(&dev_priv->gpu_error.reset_counter);
2693
2694 #if 0
2695                         kobject_uevent_env(&dev->primary->kdev->kobj,
2696                                            KOBJ_CHANGE, reset_done_event);
2697 #endif
2698                 } else {
2699                         atomic_set_mask(I915_WEDGED, &error->reset_counter);
2700                 }
2701
2702                 /*
2703                  * Note: The wake_up also serves as a memory barrier so that
2704                  * waiters see the update value of the reset counter atomic_t.
2705                  */
2706                 i915_error_wake_up(dev_priv, true);
2707         }
2708 }
2709
2710 static void i915_report_and_clear_eir(struct drm_device *dev)
2711 {
2712         struct drm_i915_private *dev_priv = dev->dev_private;
2713         uint32_t instdone[I915_NUM_INSTDONE_REG];
2714         u32 eir = I915_READ(EIR);
2715         int pipe, i;
2716
2717         if (!eir)
2718                 return;
2719
2720         pr_err("render error detected, EIR: 0x%08x\n", eir);
2721
2722 #if 0
2723         i915_get_extra_instdone(dev, instdone);
2724 #endif
2725
2726         if (IS_G4X(dev)) {
2727                 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2728                         u32 ipeir = I915_READ(IPEIR_I965);
2729
2730                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2731                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2732                         for (i = 0; i < ARRAY_SIZE(instdone); i++)
2733                                 pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2734                         pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
2735                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2736                         I915_WRITE(IPEIR_I965, ipeir);
2737                         POSTING_READ(IPEIR_I965);
2738                 }
2739                 if (eir & GM45_ERROR_PAGE_TABLE) {
2740                         u32 pgtbl_err = I915_READ(PGTBL_ER);
2741                         pr_err("page table error\n");
2742                         pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
2743                         I915_WRITE(PGTBL_ER, pgtbl_err);
2744                         POSTING_READ(PGTBL_ER);
2745                 }
2746         }
2747
2748         if (!IS_GEN2(dev)) {
2749                 if (eir & I915_ERROR_PAGE_TABLE) {
2750                         u32 pgtbl_err = I915_READ(PGTBL_ER);
2751                         pr_err("page table error\n");
2752                         pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
2753                         I915_WRITE(PGTBL_ER, pgtbl_err);
2754                         POSTING_READ(PGTBL_ER);
2755                 }
2756         }
2757
2758         if (eir & I915_ERROR_MEMORY_REFRESH) {
2759                 pr_err("memory refresh error:\n");
2760                 for_each_pipe(dev_priv, pipe)
2761                         pr_err("pipe %c stat: 0x%08x\n",
2762                                pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
2763                 /* pipestat has already been acked */
2764         }
2765         if (eir & I915_ERROR_INSTRUCTION) {
2766                 pr_err("instruction error\n");
2767                 pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
2768                 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2769                         pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2770                 if (INTEL_INFO(dev)->gen < 4) {
2771                         u32 ipeir = I915_READ(IPEIR);
2772
2773                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
2774                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
2775                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
2776                         I915_WRITE(IPEIR, ipeir);
2777                         POSTING_READ(IPEIR);
2778                 } else {
2779                         u32 ipeir = I915_READ(IPEIR_I965);
2780
2781                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2782                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2783                         pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
2784                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2785                         I915_WRITE(IPEIR_I965, ipeir);
2786                         POSTING_READ(IPEIR_I965);
2787                 }
2788         }
2789
2790         I915_WRITE(EIR, eir);
2791         POSTING_READ(EIR);
2792         eir = I915_READ(EIR);
2793         if (eir) {
2794                 /*
2795                  * some errors might have become stuck,
2796                  * mask them.
2797                  */
2798                 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2799                 I915_WRITE(EMR, I915_READ(EMR) | eir);
2800                 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2801         }
2802 }
2803
2804 /**
2805  * i915_handle_error - handle an error interrupt
2806  * @dev: drm device
2807  *
2808  * Do some basic checking of regsiter state at error interrupt time and
2809  * dump it to the syslog.  Also call i915_capture_error_state() to make
2810  * sure we get a record and make it available in debugfs.  Fire a uevent
2811  * so userspace knows something bad happened (should trigger collection
2812  * of a ring dump etc.).
2813  */
2814 void i915_handle_error(struct drm_device *dev, bool wedged,
2815                        const char *fmt, ...)
2816 {
2817         struct drm_i915_private *dev_priv = dev->dev_private;
2818 #if 0
2819         va_list args;
2820         char error_msg[80];
2821
2822         va_start(args, fmt);
2823         vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2824         va_end(args);
2825
2826         i915_capture_error_state(dev, wedged, error_msg);
2827 #endif
2828         i915_report_and_clear_eir(dev);
2829
2830         if (wedged) {
2831                 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
2832                                 &dev_priv->gpu_error.reset_counter);
2833
2834                 /*
2835                  * Wakeup waiting processes so that the reset work function
2836                  * i915_error_work_func doesn't deadlock trying to grab various
2837                  * locks. By bumping the reset counter first, the woken
2838                  * processes will see a reset in progress and back off,
2839                  * releasing their locks and then wait for the reset completion.
2840                  * We must do this for _all_ gpu waiters that might hold locks
2841                  * that the reset work needs to acquire.
2842                  *
2843                  * Note: The wake_up serves as the required memory barrier to
2844                  * ensure that the waiters see the updated value of the reset
2845                  * counter atomic_t.
2846                  */
2847                 i915_error_wake_up(dev_priv, false);
2848         }
2849
2850         /*
2851          * Our reset work can grab modeset locks (since it needs to reset the
2852          * state of outstanding pagelips). Hence it must not be run on our own
2853          * dev-priv->wq work queue for otherwise the flush_work in the pageflip
2854          * code will deadlock.
2855          */
2856         schedule_work(&dev_priv->gpu_error.work);
2857 }
2858
2859 /* Called from drm generic code, passed 'crtc' which
2860  * we use as a pipe index
2861  */
2862 static int i915_enable_vblank(struct drm_device *dev, int pipe)
2863 {
2864         struct drm_i915_private *dev_priv = dev->dev_private;
2865
2866         if (!i915_pipe_enabled(dev, pipe))
2867                 return -EINVAL;
2868
2869         lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
2870         if (INTEL_INFO(dev)->gen >= 4)
2871                 i915_enable_pipestat(dev_priv, pipe,
2872                                      PIPE_START_VBLANK_INTERRUPT_STATUS);
2873         else
2874                 i915_enable_pipestat(dev_priv, pipe,
2875                                      PIPE_VBLANK_INTERRUPT_STATUS);
2876         lockmgr(&dev_priv->irq_lock, LK_RELEASE);
2877
2878         return 0;
2879 }
2880
2881 static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
2882 {
2883         struct drm_i915_private *dev_priv = dev->dev_private;
2884         uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2885                                                      DE_PIPE_VBLANK(pipe);
2886
2887         if (!i915_pipe_enabled(dev, pipe))
2888                 return -EINVAL;
2889
2890         lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
2891         ironlake_enable_display_irq(dev_priv, bit);
2892         lockmgr(&dev_priv->irq_lock, LK_RELEASE);
2893
2894         return 0;
2895 }
2896
2897 static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
2898 {
2899         struct drm_i915_private *dev_priv = dev->dev_private;
2900
2901         if (!i915_pipe_enabled(dev, pipe))
2902                 return -EINVAL;
2903
2904         lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
2905         i915_enable_pipestat(dev_priv, pipe,
2906                              PIPE_START_VBLANK_INTERRUPT_STATUS);
2907         lockmgr(&dev_priv->irq_lock, LK_RELEASE);
2908
2909         return 0;
2910 }
2911
2912 static int gen8_enable_vblank(struct drm_device *dev, int pipe)
2913 {
2914         struct drm_i915_private *dev_priv = dev->dev_private;
2915
2916         if (!i915_pipe_enabled(dev, pipe))
2917                 return -EINVAL;
2918
2919         lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
2920         dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
2921         I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2922         POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2923         lockmgr(&dev_priv->irq_lock, LK_RELEASE);
2924         return 0;
2925 }
2926
2927 /* Called from drm generic code, passed 'crtc' which
2928  * we use as a pipe index
2929  */
2930 static void i915_disable_vblank(struct drm_device *dev, int pipe)
2931 {
2932         struct drm_i915_private *dev_priv = dev->dev_private;
2933
2934         lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
2935         i915_disable_pipestat(dev_priv, pipe,
2936                               PIPE_VBLANK_INTERRUPT_STATUS |
2937                               PIPE_START_VBLANK_INTERRUPT_STATUS);
2938         lockmgr(&dev_priv->irq_lock, LK_RELEASE);
2939 }
2940
2941 static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
2942 {
2943         struct drm_i915_private *dev_priv = dev->dev_private;
2944         uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2945                                                      DE_PIPE_VBLANK(pipe);
2946
2947         lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
2948         ironlake_disable_display_irq(dev_priv, bit);
2949         lockmgr(&dev_priv->irq_lock, LK_RELEASE);
2950 }
2951
2952 static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
2953 {
2954         struct drm_i915_private *dev_priv = dev->dev_private;
2955
2956         lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
2957         i915_disable_pipestat(dev_priv, pipe,
2958                               PIPE_START_VBLANK_INTERRUPT_STATUS);
2959         lockmgr(&dev_priv->irq_lock, LK_RELEASE);
2960 }
2961
2962 static void gen8_disable_vblank(struct drm_device *dev, int pipe)
2963 {
2964         struct drm_i915_private *dev_priv = dev->dev_private;
2965
2966         if (!i915_pipe_enabled(dev, pipe))
2967                 return;
2968
2969         lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
2970         dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
2971         I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2972         POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2973         lockmgr(&dev_priv->irq_lock, LK_RELEASE);
2974 }
2975
2976 static u32
2977 ring_last_seqno(struct intel_engine_cs *ring)
2978 {
2979         return list_entry(ring->request_list.prev,
2980                           struct drm_i915_gem_request, list)->seqno;
2981 }
2982
2983 static bool
2984 ring_idle(struct intel_engine_cs *ring, u32 seqno)
2985 {
2986         return (list_empty(&ring->request_list) ||
2987                 i915_seqno_passed(seqno, ring_last_seqno(ring)));
2988 }
2989
2990 static bool
2991 ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
2992 {
2993         if (INTEL_INFO(dev)->gen >= 8) {
2994                 return (ipehr >> 23) == 0x1c;
2995         } else {
2996                 ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
2997                 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
2998                                  MI_SEMAPHORE_REGISTER);
2999         }
3000 }
3001
3002 static struct intel_engine_cs *
3003 semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
3004 {
3005         struct drm_i915_private *dev_priv = ring->dev->dev_private;
3006         struct intel_engine_cs *signaller;
3007         int i;
3008
3009         if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
3010                 for_each_ring(signaller, dev_priv, i) {
3011                         if (ring == signaller)
3012                                 continue;
3013
3014                         if (offset == signaller->semaphore.signal_ggtt[ring->id])
3015                                 return signaller;
3016                 }
3017         } else {
3018                 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
3019
3020                 for_each_ring(signaller, dev_priv, i) {
3021                         if(ring == signaller)
3022                                 continue;
3023
3024                         if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
3025                                 return signaller;
3026                 }
3027         }
3028
3029         DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016lx\n",
3030                   ring->id, ipehr, offset);
3031
3032         return NULL;
3033 }
3034
3035 static struct intel_engine_cs *
3036 semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
3037 {
3038         struct drm_i915_private *dev_priv = ring->dev->dev_private;
3039         u32 cmd, ipehr, head;
3040         u64 offset = 0;
3041         int i, backwards;
3042
3043         ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
3044         if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
3045                 return NULL;
3046
3047         /*
3048          * HEAD is likely pointing to the dword after the actual command,
3049          * so scan backwards until we find the MBOX. But limit it to just 3
3050          * or 4 dwords depending on the semaphore wait command size.
3051          * Note that we don't care about ACTHD here since that might
3052          * point at at batch, and semaphores are always emitted into the
3053          * ringbuffer itself.
3054          */
3055         head = I915_READ_HEAD(ring) & HEAD_ADDR;
3056         backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
3057
3058         for (i = backwards; i; --i) {
3059                 /*
3060                  * Be paranoid and presume the hw has gone off into the wild -
3061                  * our ring is smaller than what the hardware (and hence
3062                  * HEAD_ADDR) allows. Also handles wrap-around.
3063                  */
3064                 head &= ring->buffer->size - 1;
3065
3066                 /* This here seems to blow up */
3067                 cmd = ioread32(ring->buffer->virtual_start + head);
3068                 if (cmd == ipehr)
3069                         break;
3070
3071                 head -= 4;
3072         }
3073
3074         if (!i)
3075                 return NULL;
3076
3077         *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
3078         if (INTEL_INFO(ring->dev)->gen >= 8) {
3079                 offset = ioread32(ring->buffer->virtual_start + head + 12);
3080                 offset <<= 32;
3081                 offset = ioread32(ring->buffer->virtual_start + head + 8);
3082         }
3083         return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
3084 }
3085
3086 static int semaphore_passed(struct intel_engine_cs *ring)
3087 {
3088         struct drm_i915_private *dev_priv = ring->dev->dev_private;
3089         struct intel_engine_cs *signaller;
3090         u32 seqno;
3091
3092         ring->hangcheck.deadlock++;
3093
3094         signaller = semaphore_waits_for(ring, &seqno);
3095         if (signaller == NULL)
3096                 return -1;
3097
3098         /* Prevent pathological recursion due to driver bugs */
3099         if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
3100                 return -1;
3101
3102         if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
3103                 return 1;
3104
3105         /* cursory check for an unkickable deadlock */
3106         if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
3107             semaphore_passed(signaller) < 0)
3108                 return -1;
3109
3110         return 0;
3111 }
3112
3113 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
3114 {
3115         struct intel_engine_cs *ring;
3116         int i;
3117
3118         for_each_ring(ring, dev_priv, i)
3119                 ring->hangcheck.deadlock = 0;
3120 }
3121
3122 static enum intel_ring_hangcheck_action
3123 ring_stuck(struct intel_engine_cs *ring, u64 acthd)
3124 {
3125         struct drm_device *dev = ring->dev;
3126         struct drm_i915_private *dev_priv = dev->dev_private;
3127         u32 tmp;
3128
3129         if (acthd != ring->hangcheck.acthd) {
3130                 if (acthd > ring->hangcheck.max_acthd) {
3131                         ring->hangcheck.max_acthd = acthd;
3132                         return HANGCHECK_ACTIVE;
3133                 }
3134
3135                 return HANGCHECK_ACTIVE_LOOP;
3136         }
3137
3138         if (IS_GEN2(dev))
3139                 return HANGCHECK_HUNG;
3140
3141         /* Is the chip hanging on a WAIT_FOR_EVENT?
3142          * If so we can simply poke the RB_WAIT bit
3143          * and break the hang. This should work on
3144          * all but the second generation chipsets.
3145          */
3146         tmp = I915_READ_CTL(ring);
3147         if (tmp & RING_WAIT) {
3148                 i915_handle_error(dev, false,
3149                                   "Kicking stuck wait on %s",
3150                                   ring->name);
3151                 I915_WRITE_CTL(ring, tmp);
3152                 return HANGCHECK_KICK;
3153         }
3154
3155         if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
3156                 switch (semaphore_passed(ring)) {
3157                 default:
3158                         return HANGCHECK_HUNG;
3159                 case 1:
3160                         i915_handle_error(dev, false,
3161                                           "Kicking stuck semaphore on %s",
3162                                           ring->name);
3163                         I915_WRITE_CTL(ring, tmp);
3164                         return HANGCHECK_KICK;
3165                 case 0:
3166                         return HANGCHECK_WAIT;
3167                 }
3168         }
3169
3170         return HANGCHECK_HUNG;
3171 }
3172
3173 /**
3174  * This is called when the chip hasn't reported back with completed
3175  * batchbuffers in a long time. We keep track per ring seqno progress and
3176  * if there are no progress, hangcheck score for that ring is increased.
3177  * Further, acthd is inspected to see if the ring is stuck. On stuck case
3178  * we kick the ring. If we see no progress on three subsequent calls
3179  * we assume chip is wedged and try to fix it by resetting the chip.
3180  */
3181 static void i915_hangcheck_elapsed(unsigned long data)
3182 {
3183         struct drm_device *dev = (struct drm_device *)data;
3184         struct drm_i915_private *dev_priv = dev->dev_private;
3185         struct intel_engine_cs *ring;
3186         int i;
3187         int busy_count = 0, rings_hung = 0;
3188         bool stuck[I915_NUM_RINGS] = { 0 };
3189 #define BUSY 1
3190 #define KICK 5
3191 #define HUNG 20
3192
3193         if (!i915.enable_hangcheck)
3194                 return;
3195
3196         for_each_ring(ring, dev_priv, i) {
3197                 u64 acthd;
3198                 u32 seqno;
3199                 bool busy = true;
3200
3201                 semaphore_clear_deadlocks(dev_priv);
3202
3203                 seqno = ring->get_seqno(ring, false);
3204                 acthd = intel_ring_get_active_head(ring);
3205
3206                 if (ring->hangcheck.seqno == seqno) {
3207                         if (ring_idle(ring, seqno)) {
3208                                 ring->hangcheck.action = HANGCHECK_IDLE;
3209
3210                                 if (waitqueue_active(&ring->irq_queue)) {
3211                                         /* Issue a wake-up to catch stuck h/w. */
3212                                         if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
3213                                                 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
3214                                                         DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
3215                                                                   ring->name);
3216                                                 else
3217                                                         DRM_INFO("Fake missed irq on %s\n",
3218                                                                  ring->name);
3219                                                 wake_up_all(&ring->irq_queue);
3220                                         }
3221                                         /* Safeguard against driver failure */
3222                                         ring->hangcheck.score += BUSY;
3223                                 } else
3224                                         busy = false;
3225                         } else {
3226                                 /* We always increment the hangcheck score
3227                                  * if the ring is busy and still processing
3228                                  * the same request, so that no single request
3229                                  * can run indefinitely (such as a chain of
3230                                  * batches). The only time we do not increment
3231                                  * the hangcheck score on this ring, if this
3232                                  * ring is in a legitimate wait for another
3233                                  * ring. In that case the waiting ring is a
3234                                  * victim and we want to be sure we catch the
3235                                  * right culprit. Then every time we do kick
3236                                  * the ring, add a small increment to the
3237                                  * score so that we can catch a batch that is
3238                                  * being repeatedly kicked and so responsible
3239                                  * for stalling the machine.
3240                                  */
3241                                 ring->hangcheck.action = ring_stuck(ring,
3242                                                                     acthd);
3243
3244                                 switch (ring->hangcheck.action) {
3245                                 case HANGCHECK_IDLE:
3246                                 case HANGCHECK_WAIT:
3247                                 case HANGCHECK_ACTIVE:
3248                                         break;
3249                                 case HANGCHECK_ACTIVE_LOOP:
3250                                         ring->hangcheck.score += BUSY;
3251                                         break;
3252                                 case HANGCHECK_KICK:
3253                                         ring->hangcheck.score += KICK;
3254                                         break;
3255                                 case HANGCHECK_HUNG:
3256                                         ring->hangcheck.score += HUNG;
3257                                         stuck[i] = true;
3258                                         break;
3259                                 }
3260                         }
3261                 } else {
3262                         ring->hangcheck.action = HANGCHECK_ACTIVE;
3263
3264                         /* Gradually reduce the count so that we catch DoS
3265                          * attempts across multiple batches.
3266                          */
3267                         if (ring->hangcheck.score > 0)
3268                                 ring->hangcheck.score--;
3269
3270                         ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
3271                 }
3272
3273                 ring->hangcheck.seqno = seqno;
3274                 ring->hangcheck.acthd = acthd;
3275                 busy_count += busy;
3276         }
3277
3278         for_each_ring(ring, dev_priv, i) {
3279                 if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
3280                         DRM_INFO("%s on %s\n",
3281                                  stuck[i] ? "stuck" : "no progress",
3282                                  ring->name);
3283                         rings_hung++;
3284                 }
3285         }
3286
3287         if (rings_hung)
3288                 return i915_handle_error(dev, true, "Ring hung");
3289
3290         if (busy_count)
3291                 /* Reset timer case chip hangs without another request
3292                  * being added */
3293                 i915_queue_hangcheck(dev);
3294 }
3295
3296 void i915_queue_hangcheck(struct drm_device *dev)
3297 {
3298         struct drm_i915_private *dev_priv = dev->dev_private;
3299         if (!i915.enable_hangcheck)
3300                 return;
3301
3302         mod_timer(&dev_priv->gpu_error.hangcheck_timer,
3303                   round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
3304 }
3305
3306 static void ibx_irq_reset(struct drm_device *dev)
3307 {
3308         struct drm_i915_private *dev_priv = dev->dev_private;
3309
3310         if (HAS_PCH_NOP(dev))
3311                 return;
3312
3313         GEN5_IRQ_RESET(SDE);
3314
3315         if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
3316                 I915_WRITE(SERR_INT, 0xffffffff);
3317 }
3318
3319 /*
3320  * SDEIER is also touched by the interrupt handler to work around missed PCH
3321  * interrupts. Hence we can't update it after the interrupt handler is enabled -
3322  * instead we unconditionally enable all PCH interrupt sources here, but then
3323  * only unmask them as needed with SDEIMR.
3324  *
3325  * This function needs to be called before interrupts are enabled.
3326  */
3327 static void ibx_irq_pre_postinstall(struct drm_device *dev)
3328 {
3329         struct drm_i915_private *dev_priv = dev->dev_private;
3330
3331         if (HAS_PCH_NOP(dev))
3332                 return;
3333
3334         WARN_ON(I915_READ(SDEIER) != 0);
3335         I915_WRITE(SDEIER, 0xffffffff);
3336         POSTING_READ(SDEIER);
3337 }
3338
3339 static void gen5_gt_irq_reset(struct drm_device *dev)
3340 {
3341         struct drm_i915_private *dev_priv = dev->dev_private;
3342
3343         GEN5_IRQ_RESET(GT);
3344         if (INTEL_INFO(dev)->gen >= 6)
3345                 GEN5_IRQ_RESET(GEN6_PM);
3346 }
3347
3348 /* drm_dma.h hooks
3349 */
3350 static void ironlake_irq_reset(struct drm_device *dev)
3351 {
3352         struct drm_i915_private *dev_priv = dev->dev_private;
3353
3354         I915_WRITE(HWSTAM, 0xffffffff);
3355
3356         GEN5_IRQ_RESET(DE);
3357         if (IS_GEN7(dev))
3358                 I915_WRITE(GEN7_ERR_INT, 0xffffffff);
3359
3360         gen5_gt_irq_reset(dev);
3361
3362         ibx_irq_reset(dev);
3363 }
3364
3365 static void valleyview_irq_preinstall(struct drm_device *dev)
3366 {
3367         struct drm_i915_private *dev_priv = dev->dev_private;
3368         int pipe;
3369
3370         /* VLV magic */
3371         I915_WRITE(VLV_IMR, 0);
3372         I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
3373         I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
3374         I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
3375
3376         /* and GT */
3377         I915_WRITE(GTIIR, I915_READ(GTIIR));
3378         I915_WRITE(GTIIR, I915_READ(GTIIR));
3379
3380         gen5_gt_irq_reset(dev);
3381
3382         I915_WRITE(DPINVGTT, 0xff);
3383
3384         I915_WRITE(PORT_HOTPLUG_EN, 0);
3385         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3386         for_each_pipe(dev_priv, pipe)
3387                 I915_WRITE(PIPESTAT(pipe), 0xffff);
3388         I915_WRITE(VLV_IIR, 0xffffffff);
3389         I915_WRITE(VLV_IMR, 0xffffffff);
3390         I915_WRITE(VLV_IER, 0x0);
3391         POSTING_READ(VLV_IER);
3392 }
3393
3394 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3395 {
3396         GEN8_IRQ_RESET_NDX(GT, 0);
3397         GEN8_IRQ_RESET_NDX(GT, 1);
3398         GEN8_IRQ_RESET_NDX(GT, 2);
3399         GEN8_IRQ_RESET_NDX(GT, 3);
3400 }
3401
3402 static void gen8_irq_reset(struct drm_device *dev)
3403 {
3404         struct drm_i915_private *dev_priv = dev->dev_private;
3405         int pipe;
3406
3407         I915_WRITE(GEN8_MASTER_IRQ, 0);
3408         POSTING_READ(GEN8_MASTER_IRQ);
3409
3410         gen8_gt_irq_reset(dev_priv);
3411
3412         for_each_pipe(dev_priv, pipe)
3413                 if (intel_display_power_enabled(dev_priv,
3414                                                 POWER_DOMAIN_PIPE(pipe)))
3415                         GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3416
3417         GEN5_IRQ_RESET(GEN8_DE_PORT_);
3418         GEN5_IRQ_RESET(GEN8_DE_MISC_);
3419         GEN5_IRQ_RESET(GEN8_PCU_);
3420
3421         ibx_irq_reset(dev);
3422 }
3423
3424 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv)
3425 {
3426         uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
3427
3428         lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
3429         GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B],
3430                           ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
3431         GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C],
3432                           ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
3433         lockmgr(&dev_priv->irq_lock, LK_RELEASE);
3434 }
3435
3436 static void cherryview_irq_preinstall(struct drm_device *dev)
3437 {
3438         struct drm_i915_private *dev_priv = dev->dev_private;
3439         int pipe;
3440
3441         I915_WRITE(GEN8_MASTER_IRQ, 0);
3442         POSTING_READ(GEN8_MASTER_IRQ);
3443
3444         gen8_gt_irq_reset(dev_priv);
3445
3446         GEN5_IRQ_RESET(GEN8_PCU_);
3447
3448         POSTING_READ(GEN8_PCU_IIR);
3449
3450         I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3451
3452         I915_WRITE(PORT_HOTPLUG_EN, 0);
3453         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3454
3455         for_each_pipe(dev_priv, pipe)
3456                 I915_WRITE(PIPESTAT(pipe), 0xffff);
3457
3458         I915_WRITE(VLV_IMR, 0xffffffff);
3459         I915_WRITE(VLV_IER, 0x0);
3460         I915_WRITE(VLV_IIR, 0xffffffff);
3461         POSTING_READ(VLV_IIR);
3462 }
3463
3464 static void ibx_hpd_irq_setup(struct drm_device *dev)
3465 {
3466         struct drm_i915_private *dev_priv = dev->dev_private;
3467         struct intel_encoder *intel_encoder;
3468         u32 hotplug_irqs, hotplug, enabled_irqs = 0;
3469
3470         if (HAS_PCH_IBX(dev)) {
3471                 hotplug_irqs = SDE_HOTPLUG_MASK;
3472                 for_each_intel_encoder(dev, intel_encoder)
3473                         if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3474                                 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
3475         } else {
3476                 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3477                 for_each_intel_encoder(dev, intel_encoder)
3478                         if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3479                                 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
3480         }
3481
3482         ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3483
3484         /*
3485          * Enable digital hotplug on the PCH, and configure the DP short pulse
3486          * duration to 2ms (which is the minimum in the Display Port spec)
3487          *
3488          * This register is the same on all known PCH chips.
3489          */
3490         hotplug = I915_READ(PCH_PORT_HOTPLUG);
3491         hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
3492         hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3493         hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3494         hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3495         I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3496 }
3497
3498 static void ibx_irq_postinstall(struct drm_device *dev)
3499 {
3500         struct drm_i915_private *dev_priv = dev->dev_private;
3501         u32 mask;
3502
3503         if (HAS_PCH_NOP(dev))
3504                 return;
3505
3506         if (HAS_PCH_IBX(dev))
3507                 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3508         else
3509                 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3510
3511         GEN5_ASSERT_IIR_IS_ZERO(SDEIIR);
3512         I915_WRITE(SDEIMR, ~mask);
3513 }
3514
3515 static void gen5_gt_irq_postinstall(struct drm_device *dev)
3516 {
3517         struct drm_i915_private *dev_priv = dev->dev_private;
3518         u32 pm_irqs, gt_irqs;
3519
3520         pm_irqs = gt_irqs = 0;
3521
3522         dev_priv->gt_irq_mask = ~0;
3523         if (HAS_L3_DPF(dev)) {
3524                 /* L3 parity interrupt is always unmasked. */
3525                 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
3526                 gt_irqs |= GT_PARITY_ERROR(dev);
3527         }
3528
3529         gt_irqs |= GT_RENDER_USER_INTERRUPT;
3530         if (IS_GEN5(dev)) {
3531                 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
3532                            ILK_BSD_USER_INTERRUPT;
3533         } else {
3534                 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3535         }
3536
3537         GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
3538
3539         if (INTEL_INFO(dev)->gen >= 6) {
3540                 pm_irqs |= dev_priv->pm_rps_events;
3541
3542                 if (HAS_VEBOX(dev))
3543                         pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3544
3545                 dev_priv->pm_irq_mask = 0xffffffff;
3546                 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
3547         }
3548 }
3549
3550 static int ironlake_irq_postinstall(struct drm_device *dev)
3551 {
3552         struct drm_i915_private *dev_priv = dev->dev_private;
3553         u32 display_mask, extra_mask;
3554
3555         if (INTEL_INFO(dev)->gen >= 7) {
3556                 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3557                                 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3558                                 DE_PLANEB_FLIP_DONE_IVB |
3559                                 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
3560                 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3561                               DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB);
3562         } else {
3563                 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3564                                 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
3565                                 DE_AUX_CHANNEL_A |
3566                                 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3567                                 DE_POISON);
3568                 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3569                                 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN;
3570         }
3571
3572         dev_priv->irq_mask = ~display_mask;
3573
3574         I915_WRITE(HWSTAM, 0xeffe);
3575
3576         ibx_irq_pre_postinstall(dev);
3577
3578         GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3579
3580         gen5_gt_irq_postinstall(dev);
3581
3582         ibx_irq_postinstall(dev);
3583
3584         if (IS_IRONLAKE_M(dev)) {
3585                 /* Enable PCU event interrupts
3586                  *
3587                  * spinlocking not required here for correctness since interrupt
3588                  * setup is guaranteed to run in single-threaded context. But we
3589                  * need it to make the assert_spin_locked happy. */
3590                 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
3591                 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
3592                 lockmgr(&dev_priv->irq_lock, LK_RELEASE);
3593         }
3594
3595         return 0;
3596 }
3597
3598 static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
3599 {
3600         u32 pipestat_mask;
3601         u32 iir_mask;
3602
3603         pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3604                         PIPE_FIFO_UNDERRUN_STATUS;
3605
3606         I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask);
3607         I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask);
3608         POSTING_READ(PIPESTAT(PIPE_A));
3609
3610         pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3611                         PIPE_CRC_DONE_INTERRUPT_STATUS;
3612
3613         i915_enable_pipestat(dev_priv, PIPE_A, pipestat_mask |
3614                                                PIPE_GMBUS_INTERRUPT_STATUS);
3615         i915_enable_pipestat(dev_priv, PIPE_B, pipestat_mask);
3616
3617         iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3618                    I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3619                    I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3620         dev_priv->irq_mask &= ~iir_mask;
3621
3622         I915_WRITE(VLV_IIR, iir_mask);
3623         I915_WRITE(VLV_IIR, iir_mask);
3624         I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3625         I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3626         POSTING_READ(VLV_IER);
3627 }
3628
3629 static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
3630 {
3631         u32 pipestat_mask;
3632         u32 iir_mask;
3633
3634         iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3635                    I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3636                    I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3637
3638         dev_priv->irq_mask |= iir_mask;
3639         I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3640         I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3641         I915_WRITE(VLV_IIR, iir_mask);
3642         I915_WRITE(VLV_IIR, iir_mask);
3643         POSTING_READ(VLV_IIR);
3644
3645         pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3646                         PIPE_CRC_DONE_INTERRUPT_STATUS;
3647
3648         i915_disable_pipestat(dev_priv, PIPE_A, pipestat_mask |
3649                                                 PIPE_GMBUS_INTERRUPT_STATUS);
3650         i915_disable_pipestat(dev_priv, PIPE_B, pipestat_mask);
3651
3652         pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3653                         PIPE_FIFO_UNDERRUN_STATUS;
3654         I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask);
3655         I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask);
3656         POSTING_READ(PIPESTAT(PIPE_A));
3657 }
3658
3659 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3660 {
3661         assert_spin_locked(&dev_priv->irq_lock);
3662
3663         if (dev_priv->display_irqs_enabled)
3664                 return;
3665
3666         dev_priv->display_irqs_enabled = true;
3667
3668         if (dev_priv->dev->irq_enabled)
3669                 valleyview_display_irqs_install(dev_priv);
3670 }
3671
3672 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3673 {
3674         assert_spin_locked(&dev_priv->irq_lock);
3675
3676         if (!dev_priv->display_irqs_enabled)
3677                 return;
3678
3679         dev_priv->display_irqs_enabled = false;
3680
3681         if (dev_priv->dev->irq_enabled)
3682                 valleyview_display_irqs_uninstall(dev_priv);
3683 }
3684
3685 static int valleyview_irq_postinstall(struct drm_device *dev)
3686 {
3687         struct drm_i915_private *dev_priv = dev->dev_private;
3688
3689         dev_priv->irq_mask = ~0;
3690
3691         I915_WRITE(PORT_HOTPLUG_EN, 0);
3692         POSTING_READ(PORT_HOTPLUG_EN);
3693
3694         I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3695         I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3696         I915_WRITE(VLV_IIR, 0xffffffff);
3697         POSTING_READ(VLV_IER);
3698
3699         /* Interrupt setup is already guaranteed to be single-threaded, this is
3700          * just to make the assert_spin_locked check happy. */
3701         lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
3702         if (dev_priv->display_irqs_enabled)
3703                 valleyview_display_irqs_install(dev_priv);
3704         lockmgr(&dev_priv->irq_lock, LK_RELEASE);
3705
3706         I915_WRITE(VLV_IIR, 0xffffffff);
3707         I915_WRITE(VLV_IIR, 0xffffffff);
3708
3709         gen5_gt_irq_postinstall(dev);
3710
3711         /* ack & enable invalid PTE error interrupts */
3712 #if 0 /* FIXME: add support to irq handler for checking these bits */
3713         I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3714         I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
3715 #endif
3716
3717         I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3718
3719         return 0;
3720 }
3721
3722 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3723 {
3724         /* These are interrupts we'll toggle with the ring mask register */
3725         uint32_t gt_interrupts[] = {
3726                 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3727                         GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3728                         GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
3729                         GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
3730                         GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
3731                 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3732                         GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3733                         GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
3734                         GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
3735                 0,
3736                 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3737                         GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3738                 };
3739
3740         dev_priv->pm_irq_mask = 0xffffffff;
3741         GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3742         GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
3743         GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, dev_priv->pm_rps_events);
3744         GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
3745 }
3746
3747 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3748 {
3749         uint32_t de_pipe_masked = GEN8_PIPE_PRIMARY_FLIP_DONE |
3750                 GEN8_PIPE_CDCLK_CRC_DONE |
3751                 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3752         uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3753                 GEN8_PIPE_FIFO_UNDERRUN;
3754         int pipe;
3755         dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3756         dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3757         dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3758
3759         for_each_pipe(dev_priv, pipe)
3760                 if (intel_display_power_enabled(dev_priv,
3761                                 POWER_DOMAIN_PIPE(pipe)))
3762                         GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3763                                           dev_priv->de_irq_mask[pipe],
3764                                           de_pipe_enables);
3765
3766         GEN5_IRQ_INIT(GEN8_DE_PORT_, ~GEN8_AUX_CHANNEL_A, GEN8_AUX_CHANNEL_A);
3767 }
3768
3769 static int gen8_irq_postinstall(struct drm_device *dev)
3770 {
3771         struct drm_i915_private *dev_priv = dev->dev_private;
3772
3773         ibx_irq_pre_postinstall(dev);
3774
3775         gen8_gt_irq_postinstall(dev_priv);
3776         gen8_de_irq_postinstall(dev_priv);
3777
3778         ibx_irq_postinstall(dev);
3779
3780         I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
3781         POSTING_READ(GEN8_MASTER_IRQ);
3782
3783         return 0;
3784 }
3785
3786 static int cherryview_irq_postinstall(struct drm_device *dev)
3787 {
3788         struct drm_i915_private *dev_priv = dev->dev_private;
3789         u32 enable_mask = I915_DISPLAY_PORT_INTERRUPT |
3790                 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3791                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3792                 I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3793         u32 pipestat_enable = PLANE_FLIP_DONE_INT_STATUS_VLV |
3794                 PIPE_CRC_DONE_INTERRUPT_STATUS;
3795         int pipe;
3796
3797         /*
3798          * Leave vblank interrupts masked initially.  enable/disable will
3799          * toggle them based on usage.
3800          */
3801         dev_priv->irq_mask = ~enable_mask;
3802
3803         for_each_pipe(dev_priv, pipe)
3804                 I915_WRITE(PIPESTAT(pipe), 0xffff);
3805
3806         lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
3807         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3808         for_each_pipe(dev_priv, pipe)
3809                 i915_enable_pipestat(dev_priv, pipe, pipestat_enable);
3810         lockmgr(&dev_priv->irq_lock, LK_RELEASE);
3811
3812         I915_WRITE(VLV_IIR, 0xffffffff);
3813         I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3814         I915_WRITE(VLV_IER, enable_mask);
3815
3816         gen8_gt_irq_postinstall(dev_priv);
3817
3818         I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
3819         POSTING_READ(GEN8_MASTER_IRQ);
3820
3821         return 0;
3822 }
3823
3824 static void gen8_irq_uninstall(struct drm_device *dev)
3825 {
3826         struct drm_i915_private *dev_priv = dev->dev_private;
3827
3828         if (!dev_priv)
3829                 return;
3830
3831         gen8_irq_reset(dev);
3832 }
3833
3834 static void valleyview_irq_uninstall(struct drm_device *dev)
3835 {
3836         struct drm_i915_private *dev_priv = dev->dev_private;
3837         int pipe;
3838
3839         if (!dev_priv)
3840                 return;
3841
3842         I915_WRITE(VLV_MASTER_IER, 0);
3843
3844         for_each_pipe(dev_priv, pipe)
3845                 I915_WRITE(PIPESTAT(pipe), 0xffff);
3846
3847         I915_WRITE(HWSTAM, 0xffffffff);
3848         I915_WRITE(PORT_HOTPLUG_EN, 0);
3849         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3850
3851         lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
3852         if (dev_priv->display_irqs_enabled)
3853                 valleyview_display_irqs_uninstall(dev_priv);
3854         lockmgr(&dev_priv->irq_lock, LK_RELEASE);
3855
3856         dev_priv->irq_mask = 0;
3857
3858         I915_WRITE(VLV_IIR, 0xffffffff);
3859         I915_WRITE(VLV_IMR, 0xffffffff);
3860         I915_WRITE(VLV_IER, 0x0);
3861         POSTING_READ(VLV_IER);
3862 }
3863
3864 static void cherryview_irq_uninstall(struct drm_device *dev)
3865 {
3866         struct drm_i915_private *dev_priv = dev->dev_private;
3867         int pipe;
3868
3869         if (!dev_priv)
3870                 return;
3871
3872         I915_WRITE(GEN8_MASTER_IRQ, 0);
3873         POSTING_READ(GEN8_MASTER_IRQ);
3874
3875 #define GEN8_IRQ_FINI_NDX(type, which)                          \
3876 do {                                                            \
3877         I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff);       \
3878         I915_WRITE(GEN8_##type##_IER(which), 0);                \
3879         I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff);       \
3880         POSTING_READ(GEN8_##type##_IIR(which));                 \
3881         I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff);       \
3882 } while (0)
3883
3884 #define GEN8_IRQ_FINI(type)                             \
3885 do {                                                    \
3886         I915_WRITE(GEN8_##type##_IMR, 0xffffffff);      \
3887         I915_WRITE(GEN8_##type##_IER, 0);               \
3888         I915_WRITE(GEN8_##type##_IIR, 0xffffffff);      \
3889         POSTING_READ(GEN8_##type##_IIR);                \
3890         I915_WRITE(GEN8_##type##_IIR, 0xffffffff);      \
3891 } while (0)
3892
3893         GEN8_IRQ_FINI_NDX(GT, 0);
3894         GEN8_IRQ_FINI_NDX(GT, 1);
3895         GEN8_IRQ_FINI_NDX(GT, 2);
3896         GEN8_IRQ_FINI_NDX(GT, 3);
3897
3898         GEN8_IRQ_FINI(PCU);
3899
3900 #undef GEN8_IRQ_FINI
3901 #undef GEN8_IRQ_FINI_NDX
3902
3903         I915_WRITE(PORT_HOTPLUG_EN, 0);
3904         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3905
3906         for_each_pipe(dev_priv, pipe)
3907                 I915_WRITE(PIPESTAT(pipe), 0xffff);
3908
3909         I915_WRITE(VLV_IMR, 0xffffffff);
3910         I915_WRITE(VLV_IER, 0x0);
3911         I915_WRITE(VLV_IIR, 0xffffffff);
3912         POSTING_READ(VLV_IIR);
3913 }
3914
3915 static void ironlake_irq_uninstall(struct drm_device *dev)
3916 {
3917         struct drm_i915_private *dev_priv = dev->dev_private;
3918
3919         if (!dev_priv)
3920                 return;
3921
3922         ironlake_irq_reset(dev);
3923 }
3924
3925 static void i8xx_irq_preinstall(struct drm_device * dev)
3926 {
3927         struct drm_i915_private *dev_priv = dev->dev_private;
3928         int pipe;
3929
3930         for_each_pipe(dev_priv, pipe)
3931                 I915_WRITE(PIPESTAT(pipe), 0);
3932         I915_WRITE16(IMR, 0xffff);
3933         I915_WRITE16(IER, 0x0);
3934         POSTING_READ16(IER);
3935 }
3936
3937 static int i8xx_irq_postinstall(struct drm_device *dev)
3938 {
3939         struct drm_i915_private *dev_priv = dev->dev_private;
3940
3941         I915_WRITE16(EMR,
3942                      ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3943
3944         /* Unmask the interrupts that we always want on. */
3945         dev_priv->irq_mask =
3946                 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3947                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3948                   I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3949                   I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3950                   I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3951         I915_WRITE16(IMR, dev_priv->irq_mask);
3952
3953         I915_WRITE16(IER,
3954                      I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3955                      I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3956                      I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
3957                      I915_USER_INTERRUPT);
3958         POSTING_READ16(IER);
3959
3960         /* Interrupt setup is already guaranteed to be single-threaded, this is
3961          * just to make the assert_spin_locked check happy. */
3962         lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
3963         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3964         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3965         lockmgr(&dev_priv->irq_lock, LK_RELEASE);
3966
3967         return 0;
3968 }
3969
3970 /*
3971  * Returns true when a page flip has completed.
3972  */
3973 static bool i8xx_handle_vblank(struct drm_device *dev,
3974                                int plane, int pipe, u32 iir)
3975 {
3976         struct drm_i915_private *dev_priv = dev->dev_private;
3977         u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3978
3979         if (!intel_pipe_handle_vblank(dev, pipe))
3980                 return false;
3981
3982         if ((iir & flip_pending) == 0)
3983                 goto check_page_flip;
3984
3985         intel_prepare_page_flip(dev, plane);
3986
3987         /* We detect FlipDone by looking for the change in PendingFlip from '1'
3988          * to '0' on the following vblank, i.e. IIR has the Pendingflip
3989          * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3990          * the flip is completed (no longer pending). Since this doesn't raise
3991          * an interrupt per se, we watch for the change at vblank.
3992          */
3993         if (I915_READ16(ISR) & flip_pending)
3994                 goto check_page_flip;
3995
3996         intel_finish_page_flip(dev, pipe);
3997         return true;
3998
3999 check_page_flip:
4000         intel_check_page_flip(dev, pipe);
4001         return false;
4002 }
4003
4004 static irqreturn_t i8xx_irq_handler(void *arg)
4005 {
4006         struct drm_device *dev = arg;
4007         struct drm_i915_private *dev_priv = dev->dev_private;
4008         u16 iir, new_iir;
4009         u32 pipe_stats[2];
4010         int pipe;
4011         u16 flip_mask =
4012                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4013                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4014
4015         iir = I915_READ16(IIR);
4016         if (iir == 0)
4017                 return;
4018
4019         while (iir & ~flip_mask) {
4020                 /* Can't rely on pipestat interrupt bit in iir as it might
4021                  * have been cleared after the pipestat interrupt was received.
4022                  * It doesn't set the bit in iir again, but it still produces
4023                  * interrupts (for non-MSI).
4024                  */
4025                 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
4026                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4027                         i915_handle_error(dev, false,
4028                                           "Command parser error, iir 0x%08x",
4029                                           iir);
4030
4031                 for_each_pipe(dev_priv, pipe) {
4032                         int reg = PIPESTAT(pipe);
4033                         pipe_stats[pipe] = I915_READ(reg);
4034
4035                         /*
4036                          * Clear the PIPE*STAT regs before the IIR
4037                          */
4038                         if (pipe_stats[pipe] & 0x8000ffff)
4039                                 I915_WRITE(reg, pipe_stats[pipe]);
4040                 }
4041                 lockmgr(&dev_priv->irq_lock, LK_RELEASE);
4042
4043                 I915_WRITE16(IIR, iir & ~flip_mask);
4044                 new_iir = I915_READ16(IIR); /* Flush posted writes */
4045
4046                 i915_update_dri1_breadcrumb(dev);
4047
4048                 if (iir & I915_USER_INTERRUPT)
4049                         notify_ring(dev, &dev_priv->ring[RCS]);
4050
4051                 for_each_pipe(dev_priv, pipe) {
4052                         int plane = pipe;
4053                         if (HAS_FBC(dev))
4054                                 plane = !plane;
4055
4056                         if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4057                             i8xx_handle_vblank(dev, plane, pipe, iir))
4058                                 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4059
4060                         if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4061                                 i9xx_pipe_crc_irq_handler(dev, pipe);
4062
4063                         if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
4064                             intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
4065                                 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
4066                 }
4067
4068                 iir = new_iir;
4069         }
4070
4071 }
4072
4073 static void i8xx_irq_uninstall(struct drm_device * dev)
4074 {
4075         struct drm_i915_private *dev_priv = dev->dev_private;
4076         int pipe;
4077
4078         for_each_pipe(dev_priv, pipe) {
4079                 /* Clear enable bits; then clear status bits */
4080                 I915_WRITE(PIPESTAT(pipe), 0);
4081                 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4082         }
4083         I915_WRITE16(IMR, 0xffff);
4084         I915_WRITE16(IER, 0x0);
4085         I915_WRITE16(IIR, I915_READ16(IIR));
4086 }
4087
4088 static void i915_irq_preinstall(struct drm_device * dev)
4089 {
4090         struct drm_i915_private *dev_priv = dev->dev_private;
4091         int pipe;
4092
4093         if (I915_HAS_HOTPLUG(dev)) {
4094                 I915_WRITE(PORT_HOTPLUG_EN, 0);
4095                 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4096         }
4097
4098         I915_WRITE16(HWSTAM, 0xeffe);
4099         for_each_pipe(dev_priv, pipe)
4100                 I915_WRITE(PIPESTAT(pipe), 0);
4101         I915_WRITE(IMR, 0xffffffff);
4102         I915_WRITE(IER, 0x0);
4103         POSTING_READ(IER);
4104 }
4105
4106 static int i915_irq_postinstall(struct drm_device *dev)
4107 {
4108         struct drm_i915_private *dev_priv = dev->dev_private;
4109         u32 enable_mask;
4110
4111         I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
4112
4113         /* Unmask the interrupts that we always want on. */
4114         dev_priv->irq_mask =
4115                 ~(I915_ASLE_INTERRUPT |
4116                   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4117                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4118                   I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4119                   I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4120                   I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4121
4122         enable_mask =
4123                 I915_ASLE_INTERRUPT |
4124                 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4125                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4126                 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
4127                 I915_USER_INTERRUPT;
4128
4129         if (I915_HAS_HOTPLUG(dev)) {
4130                 I915_WRITE(PORT_HOTPLUG_EN, 0);
4131                 POSTING_READ(PORT_HOTPLUG_EN);
4132
4133                 /* Enable in IER... */
4134                 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
4135                 /* and unmask in IMR */
4136                 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
4137         }
4138
4139         I915_WRITE(IMR, dev_priv->irq_mask);
4140         I915_WRITE(IER, enable_mask);
4141         POSTING_READ(IER);
4142
4143         i915_enable_asle_pipestat(dev);
4144
4145         /* Interrupt setup is already guaranteed to be single-threaded, this is
4146          * just to make the assert_spin_locked check happy. */
4147         lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
4148         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4149         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4150         lockmgr(&dev_priv->irq_lock, LK_RELEASE);
4151
4152         return 0;
4153 }
4154
4155 /*
4156  * Returns true when a page flip has completed.
4157  */
4158 static bool i915_handle_vblank(struct drm_device *dev,
4159                                int plane, int pipe, u32 iir)
4160 {
4161         struct drm_i915_private *dev_priv = dev->dev_private;
4162         u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
4163
4164         if (!intel_pipe_handle_vblank(dev, pipe))
4165                 return false;
4166
4167         if ((iir & flip_pending) == 0)
4168                 goto check_page_flip;
4169
4170         intel_prepare_page_flip(dev, plane);
4171
4172         /* We detect FlipDone by looking for the change in PendingFlip from '1'
4173          * to '0' on the following vblank, i.e. IIR has the Pendingflip
4174          * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
4175          * the flip is completed (no longer pending). Since this doesn't raise
4176          * an interrupt per se, we watch for the change at vblank.
4177          */
4178         if (I915_READ(ISR) & flip_pending)
4179                 goto check_page_flip;
4180
4181         intel_finish_page_flip(dev, pipe);
4182         return true;
4183
4184 check_page_flip:
4185         intel_check_page_flip(dev, pipe);
4186         return false;
4187 }
4188
4189 static irqreturn_t i915_irq_handler(void *arg)
4190 {
4191         struct drm_device *dev = arg;
4192         struct drm_i915_private *dev_priv = dev->dev_private;
4193         u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
4194         u32 flip_mask =
4195                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4196                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4197         int pipe;
4198
4199         iir = I915_READ(IIR);
4200         do {
4201                 bool irq_received = (iir & ~flip_mask) != 0;
4202                 bool blc_event = false;
4203
4204                 /* Can't rely on pipestat interrupt bit in iir as it might
4205                  * have been cleared after the pipestat interrupt was received.
4206                  * It doesn't set the bit in iir again, but it still produces
4207                  * interrupts (for non-MSI).
4208                  */
4209                 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
4210                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4211                         i915_handle_error(dev, false,
4212                                           "Command parser error, iir 0x%08x",
4213                                           iir);
4214
4215                 for_each_pipe(dev_priv, pipe) {
4216                         int reg = PIPESTAT(pipe);
4217                         pipe_stats[pipe] = I915_READ(reg);
4218
4219                         /* Clear the PIPE*STAT regs before the IIR */
4220                         if (pipe_stats[pipe] & 0x8000ffff) {
4221                                 I915_WRITE(reg, pipe_stats[pipe]);
4222                                 irq_received = true;
4223                         }
4224                 }
4225                 lockmgr(&dev_priv->irq_lock, LK_RELEASE);
4226
4227                 if (!irq_received)
4228                         break;
4229
4230                 /* Consume port.  Then clear IIR or we'll miss events */
4231                 if (I915_HAS_HOTPLUG(dev) &&
4232                     iir & I915_DISPLAY_PORT_INTERRUPT)
4233                         i9xx_hpd_irq_handler(dev);
4234
4235                 I915_WRITE(IIR, iir & ~flip_mask);
4236                 new_iir = I915_READ(IIR); /* Flush posted writes */
4237
4238                 if (iir & I915_USER_INTERRUPT)
4239                         notify_ring(dev, &dev_priv->ring[RCS]);
4240
4241                 for_each_pipe(dev_priv, pipe) {
4242                         int plane = pipe;
4243                         if (HAS_FBC(dev))
4244                                 plane = !plane;
4245
4246                         if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4247                             i915_handle_vblank(dev, plane, pipe, iir))
4248                                 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4249
4250                         if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4251                                 blc_event = true;
4252
4253                         if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4254                                 i9xx_pipe_crc_irq_handler(dev, pipe);
4255
4256                         if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
4257                             intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
4258                                 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
4259                 }
4260
4261                 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4262                         intel_opregion_asle_intr(dev);
4263
4264                 /* With MSI, interrupts are only generated when iir
4265                  * transitions from zero to nonzero.  If another bit got
4266                  * set while we were handling the existing iir bits, then
4267                  * we would never get another interrupt.
4268                  *
4269                  * This is fine on non-MSI as well, as if we hit this path
4270                  * we avoid exiting the interrupt handler only to generate
4271                  * another one.
4272                  *
4273                  * Note that for MSI this could cause a stray interrupt report
4274                  * if an interrupt landed in the time between writing IIR and
4275                  * the posting read.  This should be rare enough to never
4276                  * trigger the 99% of 100,000 interrupts test for disabling
4277                  * stray interrupts.
4278                  */
4279                 iir = new_iir;
4280         } while (iir & ~flip_mask);
4281
4282         i915_update_dri1_breadcrumb(dev);
4283
4284 }
4285
4286 static void i915_irq_uninstall(struct drm_device * dev)
4287 {
4288         struct drm_i915_private *dev_priv = dev->dev_private;
4289         int pipe;
4290
4291         if (I915_HAS_HOTPLUG(dev)) {
4292                 I915_WRITE(PORT_HOTPLUG_EN, 0);
4293                 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4294         }
4295
4296         I915_WRITE16(HWSTAM, 0xffff);
4297         for_each_pipe(dev_priv, pipe) {
4298                 /* Clear enable bits; then clear status bits */
4299                 I915_WRITE(PIPESTAT(pipe), 0);
4300                 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4301         }
4302         I915_WRITE(IMR, 0xffffffff);
4303         I915_WRITE(IER, 0x0);
4304
4305         I915_WRITE(IIR, I915_READ(IIR));
4306 }
4307
4308 static void i965_irq_preinstall(struct drm_device * dev)
4309 {
4310         struct drm_i915_private *dev_priv = dev->dev_private;
4311         int pipe;
4312
4313         I915_WRITE(PORT_HOTPLUG_EN, 0);
4314         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4315
4316         I915_WRITE(HWSTAM, 0xeffe);
4317         for_each_pipe(dev_priv, pipe)
4318                 I915_WRITE(PIPESTAT(pipe), 0);
4319         I915_WRITE(IMR, 0xffffffff);
4320         I915_WRITE(IER, 0x0);
4321         POSTING_READ(IER);
4322 }
4323
4324 static int i965_irq_postinstall(struct drm_device *dev)
4325 {
4326         struct drm_i915_private *dev_priv = dev->dev_private;
4327         u32 enable_mask;
4328         u32 error_mask;
4329
4330         /* Unmask the interrupts that we always want on. */
4331         dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
4332                                I915_DISPLAY_PORT_INTERRUPT |
4333                                I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4334                                I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4335                                I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4336                                I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4337                                I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4338
4339         enable_mask = ~dev_priv->irq_mask;
4340         enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4341                          I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4342         enable_mask |= I915_USER_INTERRUPT;
4343
4344         if (IS_G4X(dev))
4345                 enable_mask |= I915_BSD_USER_INTERRUPT;
4346
4347         /* Interrupt setup is already guaranteed to be single-threaded, this is
4348          * just to make the assert_spin_locked check happy. */
4349         lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
4350         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4351         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4352         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4353         lockmgr(&dev_priv->irq_lock, LK_RELEASE);
4354
4355         /*
4356          * Enable some error detection, note the instruction error mask
4357          * bit is reserved, so we leave it masked.
4358          */
4359         if (IS_G4X(dev)) {
4360                 error_mask = ~(GM45_ERROR_PAGE_TABLE |
4361                                GM45_ERROR_MEM_PRIV |
4362                                GM45_ERROR_CP_PRIV |
4363                                I915_ERROR_MEMORY_REFRESH);
4364         } else {
4365                 error_mask = ~(I915_ERROR_PAGE_TABLE |
4366                                I915_ERROR_MEMORY_REFRESH);
4367         }
4368         I915_WRITE(EMR, error_mask);
4369
4370         I915_WRITE(IMR, dev_priv->irq_mask);
4371         I915_WRITE(IER, enable_mask);
4372         POSTING_READ(IER);
4373
4374         I915_WRITE(PORT_HOTPLUG_EN, 0);
4375         POSTING_READ(PORT_HOTPLUG_EN);
4376
4377         i915_enable_asle_pipestat(dev);
4378
4379         return 0;
4380 }
4381
4382 static void i915_hpd_irq_setup(struct drm_device *dev)
4383 {
4384         struct drm_i915_private *dev_priv = dev->dev_private;
4385         struct intel_encoder *intel_encoder;
4386         u32 hotplug_en;
4387
4388         assert_spin_locked(&dev_priv->irq_lock);
4389
4390         if (I915_HAS_HOTPLUG(dev)) {
4391                 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
4392                 hotplug_en &= ~HOTPLUG_INT_EN_MASK;
4393                 /* Note HDMI and DP share hotplug bits */
4394                 /* enable bits are the same for all generations */
4395                 for_each_intel_encoder(dev, intel_encoder)
4396                         if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
4397                                 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
4398                 /* Programming the CRT detection parameters tends
4399                    to generate a spurious hotplug event about three
4400                    seconds later.  So just do it once.
4401                 */
4402                 if (IS_G4X(dev))
4403                         hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4404                 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
4405                 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4406
4407                 /* Ignore TV since it's buggy */
4408                 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
4409         }
4410 }
4411
4412 static irqreturn_t i965_irq_handler(void *arg)
4413 {
4414         struct drm_device *dev = arg;
4415         struct drm_i915_private *dev_priv = dev->dev_private;
4416         u32 iir, new_iir;
4417         u32 pipe_stats[I915_MAX_PIPES];
4418         int pipe;
4419         u32 flip_mask =
4420                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4421                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4422
4423         iir = I915_READ(IIR);
4424
4425         for (;;) {
4426                 bool irq_received = (iir & ~flip_mask) != 0;
4427                 bool blc_event = false;
4428
4429                 /* Can't rely on pipestat interrupt bit in iir as it might
4430                  * have been cleared after the pipestat interrupt was received.
4431                  * It doesn't set the bit in iir again, but it still produces
4432                  * interrupts (for non-MSI).
4433                  */
4434                 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
4435                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4436                         i915_handle_error(dev, false,
4437                                           "Command parser error, iir 0x%08x",
4438                                           iir);
4439
4440                 for_each_pipe(dev_priv, pipe) {
4441                         int reg = PIPESTAT(pipe);
4442                         pipe_stats[pipe] = I915_READ(reg);
4443
4444                         /*
4445                          * Clear the PIPE*STAT regs before the IIR
4446                          */
4447                         if (pipe_stats[pipe] & 0x8000ffff) {
4448                                 I915_WRITE(reg, pipe_stats[pipe]);
4449                                 irq_received = true;
4450                         }
4451                 }
4452                 lockmgr(&dev_priv->irq_lock, LK_RELEASE);
4453
4454                 if (!irq_received)
4455                         break;
4456
4457                 /* Consume port.  Then clear IIR or we'll miss events */
4458                 if (iir & I915_DISPLAY_PORT_INTERRUPT)
4459                         i9xx_hpd_irq_handler(dev);
4460
4461                 I915_WRITE(IIR, iir & ~flip_mask);
4462                 new_iir = I915_READ(IIR); /* Flush posted writes */
4463
4464                 if (iir & I915_USER_INTERRUPT)
4465                         notify_ring(dev, &dev_priv->ring[RCS]);
4466                 if (iir & I915_BSD_USER_INTERRUPT)
4467                         notify_ring(dev, &dev_priv->ring[VCS]);
4468
4469                 for_each_pipe(dev_priv, pipe) {
4470                         if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
4471                             i915_handle_vblank(dev, pipe, pipe, iir))
4472                                 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
4473
4474                         if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4475                                 blc_event = true;
4476
4477                         if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4478                                 i9xx_pipe_crc_irq_handler(dev, pipe);
4479
4480                         if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
4481                             intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
4482                                 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
4483                 }
4484
4485                 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4486                         intel_opregion_asle_intr(dev);
4487
4488                 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4489                         gmbus_irq_handler(dev);
4490
4491                 /* With MSI, interrupts are only generated when iir
4492                  * transitions from zero to nonzero.  If another bit got
4493                  * set while we were handling the existing iir bits, then
4494                  * we would never get another interrupt.
4495                  *
4496                  * This is fine on non-MSI as well, as if we hit this path
4497                  * we avoid exiting the interrupt handler only to generate
4498                  * another one.
4499                  *
4500                  * Note that for MSI this could cause a stray interrupt report
4501                  * if an interrupt landed in the time between writing IIR and
4502                  * the posting read.  This should be rare enough to never
4503                  * trigger the 99% of 100,000 interrupts test for disabling
4504                  * stray interrupts.
4505                  */
4506                 iir = new_iir;
4507         }
4508
4509         i915_update_dri1_breadcrumb(dev);
4510
4511 }
4512
4513 static void i965_irq_uninstall(struct drm_device * dev)
4514 {
4515         struct drm_i915_private *dev_priv = dev->dev_private;
4516         int pipe;
4517
4518         if (!dev_priv)
4519                 return;
4520
4521         I915_WRITE(PORT_HOTPLUG_EN, 0);
4522         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4523
4524         I915_WRITE(HWSTAM, 0xffffffff);
4525         for_each_pipe(dev_priv, pipe)
4526                 I915_WRITE(PIPESTAT(pipe), 0);
4527         I915_WRITE(IMR, 0xffffffff);
4528         I915_WRITE(IER, 0x0);
4529
4530         for_each_pipe(dev_priv, pipe)
4531                 I915_WRITE(PIPESTAT(pipe),
4532                            I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4533         I915_WRITE(IIR, I915_READ(IIR));
4534 }
4535
4536 static void intel_hpd_irq_reenable(struct work_struct *work)
4537 {
4538         struct drm_i915_private *dev_priv =
4539                 container_of(work, typeof(*dev_priv),
4540                              hotplug_reenable_work.work);
4541         struct drm_device *dev = dev_priv->dev;
4542         struct drm_mode_config *mode_config = &dev->mode_config;
4543         int i;
4544
4545         intel_runtime_pm_get(dev_priv);
4546
4547         lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
4548         for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
4549                 struct drm_connector *connector;
4550
4551                 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
4552                         continue;
4553
4554                 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
4555
4556                 list_for_each_entry(connector, &mode_config->connector_list, head) {
4557                         struct intel_connector *intel_connector = to_intel_connector(connector);
4558
4559                         if (intel_connector->encoder->hpd_pin == i) {
4560                                 if (connector->polled != intel_connector->polled)
4561                                         DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
4562                                                          connector->name);
4563                                 connector->polled = intel_connector->polled;
4564                                 if (!connector->polled)
4565                                         connector->polled = DRM_CONNECTOR_POLL_HPD;
4566                         }
4567                 }
4568         }
4569         if (dev_priv->display.hpd_irq_setup)
4570                 dev_priv->display.hpd_irq_setup(dev);
4571         lockmgr(&dev_priv->irq_lock, LK_RELEASE);
4572
4573         intel_runtime_pm_put(dev_priv);
4574 }
4575
4576 void intel_irq_init(struct drm_device *dev)
4577 {
4578         struct drm_i915_private *dev_priv = dev->dev_private;
4579
4580         INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
4581         INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func);
4582         INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
4583         INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
4584         INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4585
4586         /* Let's track the enabled rps events */
4587         if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev))
4588                 /* WaGsvRC0ResidencyMethod:vlv */
4589                 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
4590         else
4591                 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4592
4593         setup_timer(&dev_priv->gpu_error.hangcheck_timer,
4594                     i915_hangcheck_elapsed,
4595                     (unsigned long) dev);
4596         INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work,
4597                           intel_hpd_irq_reenable);
4598
4599         pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
4600
4601         /* Haven't installed the IRQ handler yet */
4602         dev_priv->pm._irqs_disabled = true;
4603
4604         if (IS_GEN2(dev)) {
4605                 dev->max_vblank_count = 0;
4606                 dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
4607         } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
4608                 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4609                 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
4610         } else {
4611                 dev->driver->get_vblank_counter = i915_get_vblank_counter;
4612                 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4613         }
4614
4615         /*
4616          * Opt out of the vblank disable timer on everything except gen2.
4617          * Gen2 doesn't have a hardware frame counter and so depends on
4618          * vblank interrupts to produce sane vblank seuquence numbers.
4619          */
4620         if (!IS_GEN2(dev))
4621                 dev->vblank_disable_immediate = true;
4622
4623         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
4624                 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4625                 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4626         }
4627
4628         if (IS_CHERRYVIEW(dev)) {
4629                 dev->driver->irq_handler = cherryview_irq_handler;
4630                 dev->driver->irq_preinstall = cherryview_irq_preinstall;
4631                 dev->driver->irq_postinstall = cherryview_irq_postinstall;
4632                 dev->driver->irq_uninstall = cherryview_irq_uninstall;
4633                 dev->driver->enable_vblank = valleyview_enable_vblank;
4634                 dev->driver->disable_vblank = valleyview_disable_vblank;
4635                 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4636         } else if (IS_VALLEYVIEW(dev)) {
4637                 dev->driver->irq_handler = valleyview_irq_handler;
4638                 dev->driver->irq_preinstall = valleyview_irq_preinstall;
4639                 dev->driver->irq_postinstall = valleyview_irq_postinstall;
4640                 dev->driver->irq_uninstall = valleyview_irq_uninstall;
4641                 dev->driver->enable_vblank = valleyview_enable_vblank;
4642                 dev->driver->disable_vblank = valleyview_disable_vblank;
4643                 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4644         } else if (IS_GEN8(dev)) {
4645                 dev->driver->irq_handler = gen8_irq_handler;
4646                 dev->driver->irq_preinstall = gen8_irq_reset;
4647                 dev->driver->irq_postinstall = gen8_irq_postinstall;
4648                 dev->driver->irq_uninstall = gen8_irq_uninstall;
4649                 dev->driver->enable_vblank = gen8_enable_vblank;
4650                 dev->driver->disable_vblank = gen8_disable_vblank;
4651                 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
4652         } else if (HAS_PCH_SPLIT(dev)) {
4653                 dev->driver->irq_handler = ironlake_irq_handler;
4654                 dev->driver->irq_preinstall = ironlake_irq_reset;
4655                 dev->driver->irq_postinstall = ironlake_irq_postinstall;
4656                 dev->driver->irq_uninstall = ironlake_irq_uninstall;
4657                 dev->driver->enable_vblank = ironlake_enable_vblank;
4658                 dev->driver->disable_vblank = ironlake_disable_vblank;
4659                 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
4660         } else {
4661                 if (INTEL_INFO(dev)->gen == 2) {
4662                         dev->driver->irq_preinstall = i8xx_irq_preinstall;
4663                         dev->driver->irq_postinstall = i8xx_irq_postinstall;
4664                         dev->driver->irq_handler = i8xx_irq_handler;
4665                         dev->driver->irq_uninstall = i8xx_irq_uninstall;
4666                 } else if (INTEL_INFO(dev)->gen == 3) {
4667                         dev->driver->irq_preinstall = i915_irq_preinstall;
4668                         dev->driver->irq_postinstall = i915_irq_postinstall;
4669                         dev->driver->irq_uninstall = i915_irq_uninstall;
4670                         dev->driver->irq_handler = i915_irq_handler;
4671                         dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4672                 } else {
4673                         dev->driver->irq_preinstall = i965_irq_preinstall;
4674                         dev->driver->irq_postinstall = i965_irq_postinstall;
4675                         dev->driver->irq_uninstall = i965_irq_uninstall;
4676                         dev->driver->irq_handler = i965_irq_handler;
4677                         dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4678                 }
4679                 dev->driver->enable_vblank = i915_enable_vblank;
4680                 dev->driver->disable_vblank = i915_disable_vblank;
4681         }
4682 }
4683
4684 void intel_hpd_init(struct drm_device *dev)
4685 {
4686         struct drm_i915_private *dev_priv = dev->dev_private;
4687         struct drm_mode_config *mode_config = &dev->mode_config;
4688         struct drm_connector *connector;
4689         int i;
4690
4691         for (i = 1; i < HPD_NUM_PINS; i++) {
4692                 dev_priv->hpd_stats[i].hpd_cnt = 0;
4693                 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
4694         }
4695         list_for_each_entry(connector, &mode_config->connector_list, head) {
4696                 struct intel_connector *intel_connector = to_intel_connector(connector);
4697                 connector->polled = intel_connector->polled;
4698                 if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
4699                         connector->polled = DRM_CONNECTOR_POLL_HPD;
4700         }
4701
4702         /* Interrupt setup is already guaranteed to be single-threaded, this is
4703          * just to make the assert_spin_locked checks happy. */
4704         lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
4705         if (dev_priv->display.hpd_irq_setup)
4706                 dev_priv->display.hpd_irq_setup(dev);
4707         lockmgr(&dev_priv->irq_lock, LK_RELEASE);
4708 }
4709
4710 /* Disable interrupts so we can allow runtime PM. */
4711 void intel_runtime_pm_disable_interrupts(struct drm_device *dev)
4712 {
4713         struct drm_i915_private *dev_priv = dev->dev_private;
4714
4715         dev->driver->irq_uninstall(dev);
4716         dev_priv->pm._irqs_disabled = true;
4717 }
4718
4719 /* Restore interrupts so we can recover from runtime PM. */
4720 void intel_runtime_pm_restore_interrupts(struct drm_device *dev)
4721 {
4722         struct drm_i915_private *dev_priv = dev->dev_private;
4723
4724         dev_priv->pm._irqs_disabled = false;
4725         dev->driver->irq_preinstall(dev);
4726         dev->driver->irq_postinstall(dev);
4727 }