63a3be9a13b1389a7a1c3903548d0efe22f90df6
[dragonfly.git] / sys / dev / drm / i915 / intel_display.c
1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Eric Anholt <eric@anholt.net>
25  */
26
27 #include <ddb/ddb.h>
28 #include <sys/limits.h>
29
30 #include <drm/drmP.h>
31 #include <drm/drm_edid.h>
32 #include "intel_drv.h"
33 #include <drm/i915_drm.h>
34 #include "i915_drv.h"
35 #include <drm/drm_dp_helper.h>
36 #include <drm/drm_crtc_helper.h>
37
38 #include <linux/err.h>
39
40 bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
41 static void intel_increase_pllclock(struct drm_crtc *crtc);
42 static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
43
44 typedef struct {
45         /* given values */
46         int n;
47         int m1, m2;
48         int p1, p2;
49         /* derived values */
50         int     dot;
51         int     vco;
52         int     m;
53         int     p;
54 } intel_clock_t;
55
56 typedef struct {
57         int     min, max;
58 } intel_range_t;
59
60 typedef struct {
61         int     dot_limit;
62         int     p2_slow, p2_fast;
63 } intel_p2_t;
64
65 #define INTEL_P2_NUM                  2
66 typedef struct intel_limit intel_limit_t;
67 struct intel_limit {
68         intel_range_t   dot, vco, n, m, m1, m2, p, p1;
69         intel_p2_t          p2;
70         bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
71                         int, int, intel_clock_t *, intel_clock_t *);
72 };
73
74 /* FDI */
75 #define IRONLAKE_FDI_FREQ               2700000 /* in kHz for mode->clock */
76
77 int
78 intel_pch_rawclk(struct drm_device *dev)
79 {
80         struct drm_i915_private *dev_priv = dev->dev_private;
81
82         WARN_ON(!HAS_PCH_SPLIT(dev));
83
84         return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
85 }
86
87 static bool
88 intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
89                     int target, int refclk, intel_clock_t *match_clock,
90                     intel_clock_t *best_clock);
91 static bool
92 intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
93                         int target, int refclk, intel_clock_t *match_clock,
94                         intel_clock_t *best_clock);
95
96 static bool
97 intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
98                       int target, int refclk, intel_clock_t *match_clock,
99                       intel_clock_t *best_clock);
100 static bool
101 intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
102                            int target, int refclk, intel_clock_t *match_clock,
103                            intel_clock_t *best_clock);
104
105 static bool
106 intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc,
107                         int target, int refclk, intel_clock_t *match_clock,
108                         intel_clock_t *best_clock);
109
110 static inline u32 /* units of 100MHz */
111 intel_fdi_link_freq(struct drm_device *dev)
112 {
113         if (IS_GEN5(dev)) {
114                 struct drm_i915_private *dev_priv = dev->dev_private;
115                 return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
116         } else
117                 return 27;
118 }
119
120 static const intel_limit_t intel_limits_i8xx_dvo = {
121         .dot = { .min = 25000, .max = 350000 },
122         .vco = { .min = 930000, .max = 1400000 },
123         .n = { .min = 3, .max = 16 },
124         .m = { .min = 96, .max = 140 },
125         .m1 = { .min = 18, .max = 26 },
126         .m2 = { .min = 6, .max = 16 },
127         .p = { .min = 4, .max = 128 },
128         .p1 = { .min = 2, .max = 33 },
129         .p2 = { .dot_limit = 165000,
130                 .p2_slow = 4, .p2_fast = 2 },
131         .find_pll = intel_find_best_PLL,
132 };
133
134 static const intel_limit_t intel_limits_i8xx_lvds = {
135         .dot = { .min = 25000, .max = 350000 },
136         .vco = { .min = 930000, .max = 1400000 },
137         .n = { .min = 3, .max = 16 },
138         .m = { .min = 96, .max = 140 },
139         .m1 = { .min = 18, .max = 26 },
140         .m2 = { .min = 6, .max = 16 },
141         .p = { .min = 4, .max = 128 },
142         .p1 = { .min = 1, .max = 6 },
143         .p2 = { .dot_limit = 165000,
144                 .p2_slow = 14, .p2_fast = 7 },
145         .find_pll = intel_find_best_PLL,
146 };
147
148 static const intel_limit_t intel_limits_i9xx_sdvo = {
149         .dot = { .min = 20000, .max = 400000 },
150         .vco = { .min = 1400000, .max = 2800000 },
151         .n = { .min = 1, .max = 6 },
152         .m = { .min = 70, .max = 120 },
153         .m1 = { .min = 8, .max = 18 },
154         .m2 = { .min = 3, .max = 7 },
155         .p = { .min = 5, .max = 80 },
156         .p1 = { .min = 1, .max = 8 },
157         .p2 = { .dot_limit = 200000,
158                 .p2_slow = 10, .p2_fast = 5 },
159         .find_pll = intel_find_best_PLL,
160 };
161
162 static const intel_limit_t intel_limits_i9xx_lvds = {
163         .dot = { .min = 20000, .max = 400000 },
164         .vco = { .min = 1400000, .max = 2800000 },
165         .n = { .min = 1, .max = 6 },
166         .m = { .min = 70, .max = 120 },
167         .m1 = { .min = 8, .max = 18 },
168         .m2 = { .min = 3, .max = 7 },
169         .p = { .min = 7, .max = 98 },
170         .p1 = { .min = 1, .max = 8 },
171         .p2 = { .dot_limit = 112000,
172                 .p2_slow = 14, .p2_fast = 7 },
173         .find_pll = intel_find_best_PLL,
174 };
175
176
177 static const intel_limit_t intel_limits_g4x_sdvo = {
178         .dot = { .min = 25000, .max = 270000 },
179         .vco = { .min = 1750000, .max = 3500000},
180         .n = { .min = 1, .max = 4 },
181         .m = { .min = 104, .max = 138 },
182         .m1 = { .min = 17, .max = 23 },
183         .m2 = { .min = 5, .max = 11 },
184         .p = { .min = 10, .max = 30 },
185         .p1 = { .min = 1, .max = 3},
186         .p2 = { .dot_limit = 270000,
187                 .p2_slow = 10,
188                 .p2_fast = 10
189         },
190         .find_pll = intel_g4x_find_best_PLL,
191 };
192
193 static const intel_limit_t intel_limits_g4x_hdmi = {
194         .dot = { .min = 22000, .max = 400000 },
195         .vco = { .min = 1750000, .max = 3500000},
196         .n = { .min = 1, .max = 4 },
197         .m = { .min = 104, .max = 138 },
198         .m1 = { .min = 16, .max = 23 },
199         .m2 = { .min = 5, .max = 11 },
200         .p = { .min = 5, .max = 80 },
201         .p1 = { .min = 1, .max = 8},
202         .p2 = { .dot_limit = 165000,
203                 .p2_slow = 10, .p2_fast = 5 },
204         .find_pll = intel_g4x_find_best_PLL,
205 };
206
207 static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
208         .dot = { .min = 20000, .max = 115000 },
209         .vco = { .min = 1750000, .max = 3500000 },
210         .n = { .min = 1, .max = 3 },
211         .m = { .min = 104, .max = 138 },
212         .m1 = { .min = 17, .max = 23 },
213         .m2 = { .min = 5, .max = 11 },
214         .p = { .min = 28, .max = 112 },
215         .p1 = { .min = 2, .max = 8 },
216         .p2 = { .dot_limit = 0,
217                 .p2_slow = 14, .p2_fast = 14
218         },
219         .find_pll = intel_g4x_find_best_PLL,
220 };
221
222 static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
223         .dot = { .min = 80000, .max = 224000 },
224         .vco = { .min = 1750000, .max = 3500000 },
225         .n = { .min = 1, .max = 3 },
226         .m = { .min = 104, .max = 138 },
227         .m1 = { .min = 17, .max = 23 },
228         .m2 = { .min = 5, .max = 11 },
229         .p = { .min = 14, .max = 42 },
230         .p1 = { .min = 2, .max = 6 },
231         .p2 = { .dot_limit = 0,
232                 .p2_slow = 7, .p2_fast = 7
233         },
234         .find_pll = intel_g4x_find_best_PLL,
235 };
236
237 static const intel_limit_t intel_limits_g4x_display_port = {
238         .dot = { .min = 161670, .max = 227000 },
239         .vco = { .min = 1750000, .max = 3500000},
240         .n = { .min = 1, .max = 2 },
241         .m = { .min = 97, .max = 108 },
242         .m1 = { .min = 0x10, .max = 0x12 },
243         .m2 = { .min = 0x05, .max = 0x06 },
244         .p = { .min = 10, .max = 20 },
245         .p1 = { .min = 1, .max = 2},
246         .p2 = { .dot_limit = 0,
247                 .p2_slow = 10, .p2_fast = 10 },
248         .find_pll = intel_find_pll_g4x_dp,
249 };
250
251 static const intel_limit_t intel_limits_pineview_sdvo = {
252         .dot = { .min = 20000, .max = 400000},
253         .vco = { .min = 1700000, .max = 3500000 },
254         /* Pineview's Ncounter is a ring counter */
255         .n = { .min = 3, .max = 6 },
256         .m = { .min = 2, .max = 256 },
257         /* Pineview only has one combined m divider, which we treat as m2. */
258         .m1 = { .min = 0, .max = 0 },
259         .m2 = { .min = 0, .max = 254 },
260         .p = { .min = 5, .max = 80 },
261         .p1 = { .min = 1, .max = 8 },
262         .p2 = { .dot_limit = 200000,
263                 .p2_slow = 10, .p2_fast = 5 },
264         .find_pll = intel_find_best_PLL,
265 };
266
267 static const intel_limit_t intel_limits_pineview_lvds = {
268         .dot = { .min = 20000, .max = 400000 },
269         .vco = { .min = 1700000, .max = 3500000 },
270         .n = { .min = 3, .max = 6 },
271         .m = { .min = 2, .max = 256 },
272         .m1 = { .min = 0, .max = 0 },
273         .m2 = { .min = 0, .max = 254 },
274         .p = { .min = 7, .max = 112 },
275         .p1 = { .min = 1, .max = 8 },
276         .p2 = { .dot_limit = 112000,
277                 .p2_slow = 14, .p2_fast = 14 },
278         .find_pll = intel_find_best_PLL,
279 };
280
281 /* Ironlake / Sandybridge
282  *
283  * We calculate clock using (register_value + 2) for N/M1/M2, so here
284  * the range value for them is (actual_value - 2).
285  */
286 static const intel_limit_t intel_limits_ironlake_dac = {
287         .dot = { .min = 25000, .max = 350000 },
288         .vco = { .min = 1760000, .max = 3510000 },
289         .n = { .min = 1, .max = 5 },
290         .m = { .min = 79, .max = 127 },
291         .m1 = { .min = 12, .max = 22 },
292         .m2 = { .min = 5, .max = 9 },
293         .p = { .min = 5, .max = 80 },
294         .p1 = { .min = 1, .max = 8 },
295         .p2 = { .dot_limit = 225000,
296                 .p2_slow = 10, .p2_fast = 5 },
297         .find_pll = intel_g4x_find_best_PLL,
298 };
299
300 static const intel_limit_t intel_limits_ironlake_single_lvds = {
301         .dot = { .min = 25000, .max = 350000 },
302         .vco = { .min = 1760000, .max = 3510000 },
303         .n = { .min = 1, .max = 3 },
304         .m = { .min = 79, .max = 118 },
305         .m1 = { .min = 12, .max = 22 },
306         .m2 = { .min = 5, .max = 9 },
307         .p = { .min = 28, .max = 112 },
308         .p1 = { .min = 2, .max = 8 },
309         .p2 = { .dot_limit = 225000,
310                 .p2_slow = 14, .p2_fast = 14 },
311         .find_pll = intel_g4x_find_best_PLL,
312 };
313
314 static const intel_limit_t intel_limits_ironlake_dual_lvds = {
315         .dot = { .min = 25000, .max = 350000 },
316         .vco = { .min = 1760000, .max = 3510000 },
317         .n = { .min = 1, .max = 3 },
318         .m = { .min = 79, .max = 127 },
319         .m1 = { .min = 12, .max = 22 },
320         .m2 = { .min = 5, .max = 9 },
321         .p = { .min = 14, .max = 56 },
322         .p1 = { .min = 2, .max = 8 },
323         .p2 = { .dot_limit = 225000,
324                 .p2_slow = 7, .p2_fast = 7 },
325         .find_pll = intel_g4x_find_best_PLL,
326 };
327
328 /* LVDS 100mhz refclk limits. */
329 static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
330         .dot = { .min = 25000, .max = 350000 },
331         .vco = { .min = 1760000, .max = 3510000 },
332         .n = { .min = 1, .max = 2 },
333         .m = { .min = 79, .max = 126 },
334         .m1 = { .min = 12, .max = 22 },
335         .m2 = { .min = 5, .max = 9 },
336         .p = { .min = 28, .max = 112 },
337         .p1 = { .min = 2, .max = 8 },
338         .p2 = { .dot_limit = 225000,
339                 .p2_slow = 14, .p2_fast = 14 },
340         .find_pll = intel_g4x_find_best_PLL,
341 };
342
343 static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
344         .dot = { .min = 25000, .max = 350000 },
345         .vco = { .min = 1760000, .max = 3510000 },
346         .n = { .min = 1, .max = 3 },
347         .m = { .min = 79, .max = 126 },
348         .m1 = { .min = 12, .max = 22 },
349         .m2 = { .min = 5, .max = 9 },
350         .p = { .min = 14, .max = 42 },
351         .p1 = { .min = 2, .max = 6 },
352         .p2 = { .dot_limit = 225000,
353                 .p2_slow = 7, .p2_fast = 7 },
354         .find_pll = intel_g4x_find_best_PLL,
355 };
356
357 static const intel_limit_t intel_limits_ironlake_display_port = {
358         .dot = { .min = 25000, .max = 350000 },
359         .vco = { .min = 1760000, .max = 3510000},
360         .n = { .min = 1, .max = 2 },
361         .m = { .min = 81, .max = 90 },
362         .m1 = { .min = 12, .max = 22 },
363         .m2 = { .min = 5, .max = 9 },
364         .p = { .min = 10, .max = 20 },
365         .p1 = { .min = 1, .max = 2},
366         .p2 = { .dot_limit = 0,
367                 .p2_slow = 10, .p2_fast = 10 },
368         .find_pll = intel_find_pll_ironlake_dp,
369 };
370
371 static const intel_limit_t intel_limits_vlv_dac = {
372         .dot = { .min = 25000, .max = 270000 },
373         .vco = { .min = 4000000, .max = 6000000 },
374         .n = { .min = 1, .max = 7 },
375         .m = { .min = 22, .max = 450 }, /* guess */
376         .m1 = { .min = 2, .max = 3 },
377         .m2 = { .min = 11, .max = 156 },
378         .p = { .min = 10, .max = 30 },
379         .p1 = { .min = 2, .max = 3 },
380         .p2 = { .dot_limit = 270000,
381                 .p2_slow = 2, .p2_fast = 20 },
382         .find_pll = intel_vlv_find_best_pll,
383 };
384
385 static const intel_limit_t intel_limits_vlv_hdmi = {
386         .dot = { .min = 20000, .max = 165000 },
387         .vco = { .min = 4000000, .max = 5994000},
388         .n = { .min = 1, .max = 7 },
389         .m = { .min = 60, .max = 300 }, /* guess */
390         .m1 = { .min = 2, .max = 3 },
391         .m2 = { .min = 11, .max = 156 },
392         .p = { .min = 10, .max = 30 },
393         .p1 = { .min = 2, .max = 3 },
394         .p2 = { .dot_limit = 270000,
395                 .p2_slow = 2, .p2_fast = 20 },
396         .find_pll = intel_vlv_find_best_pll,
397 };
398
399 static const intel_limit_t intel_limits_vlv_dp = {
400         .dot = { .min = 25000, .max = 270000 },
401         .vco = { .min = 4000000, .max = 6000000 },
402         .n = { .min = 1, .max = 7 },
403         .m = { .min = 22, .max = 450 },
404         .m1 = { .min = 2, .max = 3 },
405         .m2 = { .min = 11, .max = 156 },
406         .p = { .min = 10, .max = 30 },
407         .p1 = { .min = 2, .max = 3 },
408         .p2 = { .dot_limit = 270000,
409                 .p2_slow = 2, .p2_fast = 20 },
410         .find_pll = intel_vlv_find_best_pll,
411 };
412
413 u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg)
414 {
415         WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
416
417         if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
418                 DRM_ERROR("DPIO idle wait timed out\n");
419                 return 0;
420         }
421
422         I915_WRITE(DPIO_REG, reg);
423         I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_READ | DPIO_PORTID |
424                    DPIO_BYTE);
425         if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
426                 DRM_ERROR("DPIO read wait timed out\n");
427                 return 0;
428         }
429
430         return I915_READ(DPIO_DATA);
431 }
432
433 static void intel_dpio_write(struct drm_i915_private *dev_priv, int reg,
434                              u32 val)
435 {
436         WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
437
438         if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
439                 DRM_ERROR("DPIO idle wait timed out\n");
440                 return;
441         }
442
443         I915_WRITE(DPIO_DATA, val);
444         I915_WRITE(DPIO_REG, reg);
445         I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_WRITE | DPIO_PORTID |
446                    DPIO_BYTE);
447         if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100))
448                 DRM_ERROR("DPIO write wait timed out\n");
449 }
450
451 static void vlv_init_dpio(struct drm_device *dev)
452 {
453         struct drm_i915_private *dev_priv = dev->dev_private;
454
455         /* Reset the DPIO config */
456         I915_WRITE(DPIO_CTL, 0);
457         POSTING_READ(DPIO_CTL);
458         I915_WRITE(DPIO_CTL, 1);
459         POSTING_READ(DPIO_CTL);
460 }
461
462 static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
463                                                 int refclk)
464 {
465         struct drm_device *dev = crtc->dev;
466         const intel_limit_t *limit;
467
468         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
469                 if (intel_is_dual_link_lvds(dev)) {
470                         /* LVDS dual channel */
471                         if (refclk == 100000)
472                                 limit = &intel_limits_ironlake_dual_lvds_100m;
473                         else
474                                 limit = &intel_limits_ironlake_dual_lvds;
475                 } else {
476                         if (refclk == 100000)
477                                 limit = &intel_limits_ironlake_single_lvds_100m;
478                         else
479                                 limit = &intel_limits_ironlake_single_lvds;
480                 }
481         } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
482                    intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
483                 limit = &intel_limits_ironlake_display_port;
484         else
485                 limit = &intel_limits_ironlake_dac;
486
487         return limit;
488 }
489
490 static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
491 {
492         struct drm_device *dev = crtc->dev;
493         const intel_limit_t *limit;
494
495         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
496                 if (intel_is_dual_link_lvds(dev))
497                         /* LVDS with dual channel */
498                         limit = &intel_limits_g4x_dual_channel_lvds;
499                 else
500                         /* LVDS with dual channel */
501                         limit = &intel_limits_g4x_single_channel_lvds;
502         } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
503                    intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
504                 limit = &intel_limits_g4x_hdmi;
505         } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
506                 limit = &intel_limits_g4x_sdvo;
507         } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
508                 limit = &intel_limits_g4x_display_port;
509         } else /* The option is for other outputs */
510                 limit = &intel_limits_i9xx_sdvo;
511
512         return limit;
513 }
514
515 static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
516 {
517         struct drm_device *dev = crtc->dev;
518         const intel_limit_t *limit;
519
520         if (HAS_PCH_SPLIT(dev))
521                 limit = intel_ironlake_limit(crtc, refclk);
522         else if (IS_G4X(dev)) {
523                 limit = intel_g4x_limit(crtc);
524         } else if (IS_PINEVIEW(dev)) {
525                 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
526                         limit = &intel_limits_pineview_lvds;
527                 else
528                         limit = &intel_limits_pineview_sdvo;
529         } else if (IS_VALLEYVIEW(dev)) {
530                 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG))
531                         limit = &intel_limits_vlv_dac;
532                 else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
533                         limit = &intel_limits_vlv_hdmi;
534                 else
535                         limit = &intel_limits_vlv_dp;
536         } else if (!IS_GEN2(dev)) {
537                 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
538                         limit = &intel_limits_i9xx_lvds;
539                 else
540                         limit = &intel_limits_i9xx_sdvo;
541         } else {
542                 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
543                         limit = &intel_limits_i8xx_lvds;
544                 else
545                         limit = &intel_limits_i8xx_dvo;
546         }
547         return limit;
548 }
549
550 /* m1 is reserved as 0 in Pineview, n is a ring counter */
551 static void pineview_clock(int refclk, intel_clock_t *clock)
552 {
553         clock->m = clock->m2 + 2;
554         clock->p = clock->p1 * clock->p2;
555         clock->vco = refclk * clock->m / clock->n;
556         clock->dot = clock->vco / clock->p;
557 }
558
559 static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock)
560 {
561         if (IS_PINEVIEW(dev)) {
562                 pineview_clock(refclk, clock);
563                 return;
564         }
565         clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
566         clock->p = clock->p1 * clock->p2;
567         clock->vco = refclk * clock->m / (clock->n + 2);
568         clock->dot = clock->vco / clock->p;
569 }
570
571 /**
572  * Returns whether any output on the specified pipe is of the specified type
573  */
574 bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
575 {
576         struct drm_device *dev = crtc->dev;
577         struct intel_encoder *encoder;
578
579         for_each_encoder_on_crtc(dev, crtc, encoder)
580                 if (encoder->type == type)
581                         return true;
582
583         return false;
584 }
585
586 #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
587 /**
588  * Returns whether the given set of divisors are valid for a given refclk with
589  * the given connectors.
590  */
591
592 static bool intel_PLL_is_valid(struct drm_device *dev,
593                                const intel_limit_t *limit,
594                                const intel_clock_t *clock)
595 {
596         if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
597                 INTELPllInvalid("p1 out of range\n");
598         if (clock->p   < limit->p.min   || limit->p.max   < clock->p)
599                 INTELPllInvalid("p out of range\n");
600         if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
601                 INTELPllInvalid("m2 out of range\n");
602         if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
603                 INTELPllInvalid("m1 out of range\n");
604         if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev))
605                 INTELPllInvalid("m1 <= m2\n");
606         if (clock->m   < limit->m.min   || limit->m.max   < clock->m)
607                 INTELPllInvalid("m out of range\n");
608         if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
609                 INTELPllInvalid("n out of range\n");
610         if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
611                 INTELPllInvalid("vco out of range\n");
612         /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
613          * connector, etc., rather than just a single range.
614          */
615         if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
616                 INTELPllInvalid("dot out of range\n");
617
618         return true;
619 }
620
621 static bool
622 intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
623                     int target, int refclk, intel_clock_t *match_clock,
624                     intel_clock_t *best_clock)
625
626 {
627         struct drm_device *dev = crtc->dev;
628         intel_clock_t clock;
629         int err = target;
630
631         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
632                 /*
633                  * For LVDS just rely on its current settings for dual-channel.
634                  * We haven't figured out how to reliably set up different
635                  * single/dual channel state, if we even can.
636                  */
637                 if (intel_is_dual_link_lvds(dev))
638                         clock.p2 = limit->p2.p2_fast;
639                 else
640                         clock.p2 = limit->p2.p2_slow;
641         } else {
642                 if (target < limit->p2.dot_limit)
643                         clock.p2 = limit->p2.p2_slow;
644                 else
645                         clock.p2 = limit->p2.p2_fast;
646         }
647
648         memset(best_clock, 0, sizeof(*best_clock));
649
650         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
651              clock.m1++) {
652                 for (clock.m2 = limit->m2.min;
653                      clock.m2 <= limit->m2.max; clock.m2++) {
654                         /* m1 is always 0 in Pineview */
655                         if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
656                                 break;
657                         for (clock.n = limit->n.min;
658                              clock.n <= limit->n.max; clock.n++) {
659                                 for (clock.p1 = limit->p1.min;
660                                         clock.p1 <= limit->p1.max; clock.p1++) {
661                                         int this_err;
662
663                                         intel_clock(dev, refclk, &clock);
664                                         if (!intel_PLL_is_valid(dev, limit,
665                                                                 &clock))
666                                                 continue;
667                                         if (match_clock &&
668                                             clock.p != match_clock->p)
669                                                 continue;
670
671                                         this_err = abs(clock.dot - target);
672                                         if (this_err < err) {
673                                                 *best_clock = clock;
674                                                 err = this_err;
675                                         }
676                                 }
677                         }
678                 }
679         }
680
681         return (err != target);
682 }
683
684 static bool
685 intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
686                         int target, int refclk, intel_clock_t *match_clock,
687                         intel_clock_t *best_clock)
688 {
689         struct drm_device *dev = crtc->dev;
690         intel_clock_t clock;
691         int max_n;
692         bool found;
693         /* approximately equals target * 0.00585 */
694         int err_most = (target >> 8) + (target >> 9);
695         found = false;
696
697         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
698                 int lvds_reg;
699
700                 if (HAS_PCH_SPLIT(dev))
701                         lvds_reg = PCH_LVDS;
702                 else
703                         lvds_reg = LVDS;
704                 if (intel_is_dual_link_lvds(dev))
705                         clock.p2 = limit->p2.p2_fast;
706                 else
707                         clock.p2 = limit->p2.p2_slow;
708         } else {
709                 if (target < limit->p2.dot_limit)
710                         clock.p2 = limit->p2.p2_slow;
711                 else
712                         clock.p2 = limit->p2.p2_fast;
713         }
714
715         memset(best_clock, 0, sizeof(*best_clock));
716         max_n = limit->n.max;
717         /* based on hardware requirement, prefer smaller n to precision */
718         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
719                 /* based on hardware requirement, prefere larger m1,m2 */
720                 for (clock.m1 = limit->m1.max;
721                      clock.m1 >= limit->m1.min; clock.m1--) {
722                         for (clock.m2 = limit->m2.max;
723                              clock.m2 >= limit->m2.min; clock.m2--) {
724                                 for (clock.p1 = limit->p1.max;
725                                      clock.p1 >= limit->p1.min; clock.p1--) {
726                                         int this_err;
727
728                                         intel_clock(dev, refclk, &clock);
729                                         if (!intel_PLL_is_valid(dev, limit,
730                                                                 &clock))
731                                                 continue;
732                                         if (match_clock &&
733                                             clock.p != match_clock->p)
734                                                 continue;
735
736                                         this_err = abs(clock.dot - target);
737                                         if (this_err < err_most) {
738                                                 *best_clock = clock;
739                                                 err_most = this_err;
740                                                 max_n = clock.n;
741                                                 found = true;
742                                         }
743                                 }
744                         }
745                 }
746         }
747         return found;
748 }
749
750 static bool
751 intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
752                            int target, int refclk, intel_clock_t *match_clock,
753                            intel_clock_t *best_clock)
754 {
755         struct drm_device *dev = crtc->dev;
756         intel_clock_t clock;
757
758         if (target < 200000) {
759                 clock.n = 1;
760                 clock.p1 = 2;
761                 clock.p2 = 10;
762                 clock.m1 = 12;
763                 clock.m2 = 9;
764         } else {
765                 clock.n = 2;
766                 clock.p1 = 1;
767                 clock.p2 = 10;
768                 clock.m1 = 14;
769                 clock.m2 = 8;
770         }
771         intel_clock(dev, refclk, &clock);
772         memcpy(best_clock, &clock, sizeof(intel_clock_t));
773         return true;
774 }
775
776 /* DisplayPort has only two frequencies, 162MHz and 270MHz */
777 static bool
778 intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
779                       int target, int refclk, intel_clock_t *match_clock,
780                       intel_clock_t *best_clock)
781 {
782         intel_clock_t clock;
783         if (target < 200000) {
784                 clock.p1 = 2;
785                 clock.p2 = 10;
786                 clock.n = 2;
787                 clock.m1 = 23;
788                 clock.m2 = 8;
789         } else {
790                 clock.p1 = 1;
791                 clock.p2 = 10;
792                 clock.n = 1;
793                 clock.m1 = 14;
794                 clock.m2 = 2;
795         }
796         clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
797         clock.p = (clock.p1 * clock.p2);
798         clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
799         clock.vco = 0;
800         memcpy(best_clock, &clock, sizeof(intel_clock_t));
801         return true;
802 }
803
804 static bool
805 intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc,
806                         int target, int refclk, intel_clock_t *match_clock,
807                         intel_clock_t *best_clock)
808 {
809         u32 p1, p2, m1, m2, vco, bestn, bestm1, bestm2, bestp1, bestp2;
810         u32 m, n, fastclk;
811         u32 updrate, minupdate, fracbits, p;
812         unsigned long bestppm, ppm, absppm;
813         int dotclk, flag;
814
815         flag = 0;
816         dotclk = target * 1000;
817         bestppm = 1000000;
818         ppm = absppm = 0;
819         fastclk = dotclk / (2*100);
820         updrate = 0;
821         minupdate = 19200;
822         fracbits = 1;
823         n = p = p1 = p2 = m = m1 = m2 = vco = bestn = 0;
824         bestm1 = bestm2 = bestp1 = bestp2 = 0;
825
826         /* based on hardware requirement, prefer smaller n to precision */
827         for (n = limit->n.min; n <= ((refclk) / minupdate); n++) {
828                 updrate = refclk / n;
829                 for (p1 = limit->p1.max; p1 > limit->p1.min; p1--) {
830                         for (p2 = limit->p2.p2_fast+1; p2 > 0; p2--) {
831                                 if (p2 > 10)
832                                         p2 = p2 - 1;
833                                 p = p1 * p2;
834                                 /* based on hardware requirement, prefer bigger m1,m2 values */
835                                 for (m1 = limit->m1.min; m1 <= limit->m1.max; m1++) {
836                                         m2 = (((2*(fastclk * p * n / m1 )) +
837                                                refclk) / (2*refclk));
838                                         m = m1 * m2;
839                                         vco = updrate * m;
840                                         if (vco >= limit->vco.min && vco < limit->vco.max) {
841                                                 ppm = 1000000 * ((vco / p) - fastclk) / fastclk;
842                                                 absppm = (ppm > 0) ? ppm : (-ppm);
843                                                 if (absppm < 100 && ((p1 * p2) > (bestp1 * bestp2))) {
844                                                         bestppm = 0;
845                                                         flag = 1;
846                                                 }
847                                                 if (absppm < bestppm - 10) {
848                                                         bestppm = absppm;
849                                                         flag = 1;
850                                                 }
851                                                 if (flag) {
852                                                         bestn = n;
853                                                         bestm1 = m1;
854                                                         bestm2 = m2;
855                                                         bestp1 = p1;
856                                                         bestp2 = p2;
857                                                         flag = 0;
858                                                 }
859                                         }
860                                 }
861                         }
862                 }
863         }
864         best_clock->n = bestn;
865         best_clock->m1 = bestm1;
866         best_clock->m2 = bestm2;
867         best_clock->p1 = bestp1;
868         best_clock->p2 = bestp2;
869
870         return true;
871 }
872
873 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
874                                              enum i915_pipe pipe)
875 {
876         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
877         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
878
879         return intel_crtc->cpu_transcoder;
880 }
881
882 static void ironlake_wait_for_vblank(struct drm_device *dev, int pipe)
883 {
884         struct drm_i915_private *dev_priv = dev->dev_private;
885         u32 frame, frame_reg = PIPEFRAME(pipe);
886
887         frame = I915_READ(frame_reg);
888
889         if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50))
890                 DRM_DEBUG_KMS("vblank wait timed out\n");
891 }
892
893 /**
894  * intel_wait_for_vblank - wait for vblank on a given pipe
895  * @dev: drm device
896  * @pipe: pipe to wait for
897  *
898  * Wait for vblank to occur on a given pipe.  Needed for various bits of
899  * mode setting code.
900  */
901 void intel_wait_for_vblank(struct drm_device *dev, int pipe)
902 {
903         struct drm_i915_private *dev_priv = dev->dev_private;
904         int pipestat_reg = PIPESTAT(pipe);
905
906         if (INTEL_INFO(dev)->gen >= 5) {
907                 ironlake_wait_for_vblank(dev, pipe);
908                 return;
909         }
910
911         /* Clear existing vblank status. Note this will clear any other
912          * sticky status fields as well.
913          *
914          * This races with i915_driver_irq_handler() with the result
915          * that either function could miss a vblank event.  Here it is not
916          * fatal, as we will either wait upon the next vblank interrupt or
917          * timeout.  Generally speaking intel_wait_for_vblank() is only
918          * called during modeset at which time the GPU should be idle and
919          * should *not* be performing page flips and thus not waiting on
920          * vblanks...
921          * Currently, the result of us stealing a vblank from the irq
922          * handler is that a single frame will be skipped during swapbuffers.
923          */
924         I915_WRITE(pipestat_reg,
925                    I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
926
927         /* Wait for vblank interrupt bit to set */
928         if (wait_for(I915_READ(pipestat_reg) &
929                      PIPE_VBLANK_INTERRUPT_STATUS,
930                      50))
931                 DRM_DEBUG_KMS("vblank wait timed out\n");
932 }
933
934 /*
935  * intel_wait_for_pipe_off - wait for pipe to turn off
936  * @dev: drm device
937  * @pipe: pipe to wait for
938  *
939  * After disabling a pipe, we can't wait for vblank in the usual way,
940  * spinning on the vblank interrupt status bit, since we won't actually
941  * see an interrupt when the pipe is disabled.
942  *
943  * On Gen4 and above:
944  *   wait for the pipe register state bit to turn off
945  *
946  * Otherwise:
947  *   wait for the display line value to settle (it usually
948  *   ends up stopping at the start of the next frame).
949  *
950  */
951 void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
952 {
953         struct drm_i915_private *dev_priv = dev->dev_private;
954         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
955                                                                       pipe);
956
957         if (INTEL_INFO(dev)->gen >= 4) {
958                 int reg = PIPECONF(cpu_transcoder);
959
960                 /* Wait for the Pipe State to go off */
961                 if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
962                              100))
963                         WARN(1, "pipe_off wait timed out\n");
964         } else {
965                 u32 last_line, line_mask;
966                 int reg = PIPEDSL(pipe);
967                 unsigned long timeout = jiffies + msecs_to_jiffies(100);
968
969                 if (IS_GEN2(dev))
970                         line_mask = DSL_LINEMASK_GEN2;
971                 else
972                         line_mask = DSL_LINEMASK_GEN3;
973
974                 /* Wait for the display line to settle */
975                 do {
976                         last_line = I915_READ(reg) & line_mask;
977                         mdelay(5);
978                 } while (((I915_READ(reg) & line_mask) != last_line) &&
979                          time_after(timeout, jiffies));
980                 if (time_after(jiffies, timeout))
981                         WARN(1, "pipe_off wait timed out\n");
982         }
983 }
984
985 /*
986  * ibx_digital_port_connected - is the specified port connected?
987  * @dev_priv: i915 private structure
988  * @port: the port to test
989  *
990  * Returns true if @port is connected, false otherwise.
991  */
992 bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
993                                 struct intel_digital_port *port)
994 {
995         u32 bit;
996
997         if (HAS_PCH_IBX(dev_priv->dev)) {
998                 switch(port->port) {
999                 case PORT_B:
1000                         bit = SDE_PORTB_HOTPLUG;
1001                         break;
1002                 case PORT_C:
1003                         bit = SDE_PORTC_HOTPLUG;
1004                         break;
1005                 case PORT_D:
1006                         bit = SDE_PORTD_HOTPLUG;
1007                         break;
1008                 default:
1009                         return true;
1010                 }
1011         } else {
1012                 switch(port->port) {
1013                 case PORT_B:
1014                         bit = SDE_PORTB_HOTPLUG_CPT;
1015                         break;
1016                 case PORT_C:
1017                         bit = SDE_PORTC_HOTPLUG_CPT;
1018                         break;
1019                 case PORT_D:
1020                         bit = SDE_PORTD_HOTPLUG_CPT;
1021                         break;
1022                 default:
1023                         return true;
1024                 }
1025         }
1026
1027         return I915_READ(SDEISR) & bit;
1028 }
1029
1030 static const char *state_string(bool enabled)
1031 {
1032         return enabled ? "on" : "off";
1033 }
1034
1035 /* Only for pre-ILK configs */
1036 static void assert_pll(struct drm_i915_private *dev_priv,
1037                        enum i915_pipe pipe, bool state)
1038 {
1039         int reg;
1040         u32 val;
1041         bool cur_state;
1042
1043         reg = DPLL(pipe);
1044         val = I915_READ(reg);
1045         cur_state = !!(val & DPLL_VCO_ENABLE);
1046         WARN(cur_state != state,
1047              "PLL state assertion failure (expected %s, current %s)\n",
1048              state_string(state), state_string(cur_state));
1049 }
1050 #define assert_pll_enabled(d, p) assert_pll(d, p, true)
1051 #define assert_pll_disabled(d, p) assert_pll(d, p, false)
1052
1053 /* For ILK+ */
1054 static void assert_pch_pll(struct drm_i915_private *dev_priv,
1055                            struct intel_pch_pll *pll,
1056                            struct intel_crtc *crtc,
1057                            bool state)
1058 {
1059         u32 val;
1060         bool cur_state;
1061
1062         if (HAS_PCH_LPT(dev_priv->dev)) {
1063                 DRM_DEBUG_DRIVER("LPT detected: skipping PCH PLL test\n");
1064                 return;
1065         }
1066
1067         if (WARN (!pll,
1068                   "asserting PCH PLL %s with no PLL\n", state_string(state)))
1069                 return;
1070
1071         val = I915_READ(pll->pll_reg);
1072         cur_state = !!(val & DPLL_VCO_ENABLE);
1073         WARN(cur_state != state,
1074              "PCH PLL state for reg %x assertion failure (expected %s, current %s), val=%08x\n",
1075              pll->pll_reg, state_string(state), state_string(cur_state), val);
1076
1077         /* Make sure the selected PLL is correctly attached to the transcoder */
1078         if (crtc && HAS_PCH_CPT(dev_priv->dev)) {
1079                 u32 pch_dpll;
1080
1081                 pch_dpll = I915_READ(PCH_DPLL_SEL);
1082                 cur_state = pll->pll_reg == _PCH_DPLL_B;
1083                 if (!WARN(((pch_dpll >> (4 * crtc->pipe)) & 1) != cur_state,
1084                           "PLL[%d] not attached to this transcoder %d: %08x\n",
1085                           cur_state, crtc->pipe, pch_dpll)) {
1086                         cur_state = !!(val >> (4*crtc->pipe + 3));
1087                         WARN(cur_state != state,
1088                              "PLL[%d] not %s on this transcoder %d: %08x\n",
1089                              pll->pll_reg == _PCH_DPLL_B,
1090                              state_string(state),
1091                              crtc->pipe,
1092                              val);
1093                 }
1094         }
1095 }
1096 #define assert_pch_pll_enabled(d, p, c) assert_pch_pll(d, p, c, true)
1097 #define assert_pch_pll_disabled(d, p, c) assert_pch_pll(d, p, c, false)
1098
1099 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1100                           enum i915_pipe pipe, bool state)
1101 {
1102         int reg;
1103         u32 val;
1104         bool cur_state;
1105         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1106                                                                       pipe);
1107
1108         if (HAS_DDI(dev_priv->dev)) {
1109                 /* DDI does not have a specific FDI_TX register */
1110                 reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
1111                 val = I915_READ(reg);
1112                 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1113         } else {
1114                 reg = FDI_TX_CTL(pipe);
1115                 val = I915_READ(reg);
1116                 cur_state = !!(val & FDI_TX_ENABLE);
1117         }
1118         WARN(cur_state != state,
1119              "FDI TX state assertion failure (expected %s, current %s)\n",
1120              state_string(state), state_string(cur_state));
1121 }
1122 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1123 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1124
1125 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1126                           enum i915_pipe pipe, bool state)
1127 {
1128         int reg;
1129         u32 val;
1130         bool cur_state;
1131
1132         reg = FDI_RX_CTL(pipe);
1133         val = I915_READ(reg);
1134         cur_state = !!(val & FDI_RX_ENABLE);
1135         WARN(cur_state != state,
1136              "FDI RX state assertion failure (expected %s, current %s)\n",
1137              state_string(state), state_string(cur_state));
1138 }
1139 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1140 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1141
1142 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1143                                       enum i915_pipe pipe)
1144 {
1145         int reg;
1146         u32 val;
1147
1148         /* ILK FDI PLL is always enabled */
1149         if (dev_priv->info->gen == 5)
1150                 return;
1151
1152         /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1153         if (HAS_DDI(dev_priv->dev))
1154                 return;
1155
1156         reg = FDI_TX_CTL(pipe);
1157         val = I915_READ(reg);
1158         WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1159 }
1160
1161 static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
1162                                       enum i915_pipe pipe)
1163 {
1164         int reg;
1165         u32 val;
1166
1167         reg = FDI_RX_CTL(pipe);
1168         val = I915_READ(reg);
1169         WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n");
1170 }
1171
1172 static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1173                                   enum i915_pipe pipe)
1174 {
1175         int pp_reg, lvds_reg;
1176         u32 val;
1177         enum i915_pipe panel_pipe = PIPE_A;
1178         bool locked = true;
1179
1180         if (HAS_PCH_SPLIT(dev_priv->dev)) {
1181                 pp_reg = PCH_PP_CONTROL;
1182                 lvds_reg = PCH_LVDS;
1183         } else {
1184                 pp_reg = PP_CONTROL;
1185                 lvds_reg = LVDS;
1186         }
1187
1188         val = I915_READ(pp_reg);
1189         if (!(val & PANEL_POWER_ON) ||
1190             ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS))
1191                 locked = false;
1192
1193         if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT)
1194                 panel_pipe = PIPE_B;
1195
1196         WARN(panel_pipe == pipe && locked,
1197              "panel assertion failure, pipe %c regs locked\n",
1198              pipe_name(pipe));
1199 }
1200
1201 void assert_pipe(struct drm_i915_private *dev_priv,
1202                  enum i915_pipe pipe, bool state)
1203 {
1204         int reg;
1205         u32 val;
1206         bool cur_state;
1207         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1208                                                                       pipe);
1209
1210         /* if we need the pipe A quirk it must be always on */
1211         if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
1212                 state = true;
1213
1214         if (IS_HASWELL(dev_priv->dev) && cpu_transcoder != TRANSCODER_EDP &&
1215             !(I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_ENABLE)) {
1216                 cur_state = false;
1217         } else {
1218                 reg = PIPECONF(cpu_transcoder);
1219                 val = I915_READ(reg);
1220                 cur_state = !!(val & PIPECONF_ENABLE);
1221         }
1222
1223         WARN(cur_state != state,
1224              "pipe %c assertion failure (expected %s, current %s)\n",
1225              pipe_name(pipe), state_string(state), state_string(cur_state));
1226 }
1227
1228 static void assert_plane(struct drm_i915_private *dev_priv,
1229                          enum plane plane, bool state)
1230 {
1231         int reg;
1232         u32 val;
1233         bool cur_state;
1234
1235         reg = DSPCNTR(plane);
1236         val = I915_READ(reg);
1237         cur_state = !!(val & DISPLAY_PLANE_ENABLE);
1238         WARN(cur_state != state,
1239              "plane %c assertion failure (expected %s, current %s)\n",
1240              plane_name(plane), state_string(state), state_string(cur_state));
1241 }
1242
1243 #define assert_plane_enabled(d, p) assert_plane(d, p, true)
1244 #define assert_plane_disabled(d, p) assert_plane(d, p, false)
1245
1246 static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1247                                    enum i915_pipe pipe)
1248 {
1249         int reg, i;
1250         u32 val;
1251         int cur_pipe;
1252
1253         /* Planes are fixed to pipes on ILK+ */
1254         if (HAS_PCH_SPLIT(dev_priv->dev)) {
1255                 reg = DSPCNTR(pipe);
1256                 val = I915_READ(reg);
1257                 WARN((val & DISPLAY_PLANE_ENABLE),
1258                      "plane %c assertion failure, should be disabled but not\n",
1259                      plane_name(pipe));
1260                 return;
1261         }
1262
1263         /* Need to check both planes against the pipe */
1264         for (i = 0; i < 2; i++) {
1265                 reg = DSPCNTR(i);
1266                 val = I915_READ(reg);
1267                 cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
1268                         DISPPLANE_SEL_PIPE_SHIFT;
1269                 WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
1270                      "plane %c assertion failure, should be off on pipe %c but is still active\n",
1271                      plane_name(i), pipe_name(pipe));
1272         }
1273 }
1274
1275 static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
1276 {
1277         u32 val;
1278         bool enabled;
1279
1280         if (HAS_PCH_LPT(dev_priv->dev)) {
1281                 DRM_DEBUG_DRIVER("LPT does not has PCH refclk, skipping check\n");
1282                 return;
1283         }
1284
1285         val = I915_READ(PCH_DREF_CONTROL);
1286         enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1287                             DREF_SUPERSPREAD_SOURCE_MASK));
1288         WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
1289 }
1290
1291 static void assert_transcoder_disabled(struct drm_i915_private *dev_priv,
1292                                        enum i915_pipe pipe)
1293 {
1294         int reg;
1295         u32 val;
1296         bool enabled;
1297
1298         reg = TRANSCONF(pipe);
1299         val = I915_READ(reg);
1300         enabled = !!(val & TRANS_ENABLE);
1301         WARN(enabled,
1302              "transcoder assertion failed, should be off on pipe %c but is still active\n",
1303              pipe_name(pipe));
1304 }
1305
1306 static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1307                             enum i915_pipe pipe, u32 port_sel, u32 val)
1308 {
1309         if ((val & DP_PORT_EN) == 0)
1310                 return false;
1311
1312         if (HAS_PCH_CPT(dev_priv->dev)) {
1313                 u32     trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
1314                 u32     trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
1315                 if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1316                         return false;
1317         } else {
1318                 if ((val & DP_PIPE_MASK) != (pipe << 30))
1319                         return false;
1320         }
1321         return true;
1322 }
1323
1324 static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1325                               enum i915_pipe pipe, u32 val)
1326 {
1327         if ((val & PORT_ENABLE) == 0)
1328                 return false;
1329
1330         if (HAS_PCH_CPT(dev_priv->dev)) {
1331                 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1332                         return false;
1333         } else {
1334                 if ((val & TRANSCODER_MASK) != TRANSCODER(pipe))
1335                         return false;
1336         }
1337         return true;
1338 }
1339
1340 static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1341                               enum i915_pipe pipe, u32 val)
1342 {
1343         if ((val & LVDS_PORT_EN) == 0)
1344                 return false;
1345
1346         if (HAS_PCH_CPT(dev_priv->dev)) {
1347                 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1348                         return false;
1349         } else {
1350                 if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1351                         return false;
1352         }
1353         return true;
1354 }
1355
1356 static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1357                               enum i915_pipe pipe, u32 val)
1358 {
1359         if ((val & ADPA_DAC_ENABLE) == 0)
1360                 return false;
1361         if (HAS_PCH_CPT(dev_priv->dev)) {
1362                 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1363                         return false;
1364         } else {
1365                 if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1366                         return false;
1367         }
1368         return true;
1369 }
1370
1371 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1372                                    enum i915_pipe pipe, int reg, u32 port_sel)
1373 {
1374         u32 val = I915_READ(reg);
1375         WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
1376              "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1377              reg, pipe_name(pipe));
1378
1379         WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
1380              && (val & DP_PIPEB_SELECT),
1381              "IBX PCH dp port still using transcoder B\n");
1382 }
1383
1384 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1385                                      enum i915_pipe pipe, int reg)
1386 {
1387         u32 val = I915_READ(reg);
1388         WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
1389              "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
1390              reg, pipe_name(pipe));
1391
1392         WARN(HAS_PCH_IBX(dev_priv->dev) && (val & PORT_ENABLE) == 0
1393              && (val & SDVO_PIPE_B_SELECT),
1394              "IBX PCH hdmi port still using transcoder B\n");
1395 }
1396
1397 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1398                                       enum i915_pipe pipe)
1399 {
1400         int reg;
1401         u32 val;
1402
1403         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1404         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1405         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1406
1407         reg = PCH_ADPA;
1408         val = I915_READ(reg);
1409         WARN(adpa_pipe_enabled(dev_priv, pipe, val),
1410              "PCH VGA enabled on transcoder %c, should be disabled\n",
1411              pipe_name(pipe));
1412
1413         reg = PCH_LVDS;
1414         val = I915_READ(reg);
1415         WARN(lvds_pipe_enabled(dev_priv, pipe, val),
1416              "PCH LVDS enabled on transcoder %c, should be disabled\n",
1417              pipe_name(pipe));
1418
1419         assert_pch_hdmi_disabled(dev_priv, pipe, HDMIB);
1420         assert_pch_hdmi_disabled(dev_priv, pipe, HDMIC);
1421         assert_pch_hdmi_disabled(dev_priv, pipe, HDMID);
1422 }
1423
1424 /**
1425  * intel_enable_pll - enable a PLL
1426  * @dev_priv: i915 private structure
1427  * @pipe: pipe PLL to enable
1428  *
1429  * Enable @pipe's PLL so we can start pumping pixels from a plane.  Check to
1430  * make sure the PLL reg is writable first though, since the panel write
1431  * protect mechanism may be enabled.
1432  *
1433  * Note!  This is for pre-ILK only.
1434  *
1435  * Unfortunately needed by dvo_ns2501 since the dvo depends on it running.
1436  */
1437 static void intel_enable_pll(struct drm_i915_private *dev_priv, enum i915_pipe pipe)
1438 {
1439         int reg;
1440         u32 val;
1441
1442         /* No really, not for ILK+ */
1443         BUG_ON(!IS_VALLEYVIEW(dev_priv->dev) && dev_priv->info->gen >= 5);
1444
1445         /* PLL is protected by panel, make sure we can write it */
1446         if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
1447                 assert_panel_unlocked(dev_priv, pipe);
1448
1449         reg = DPLL(pipe);
1450         val = I915_READ(reg);
1451         val |= DPLL_VCO_ENABLE;
1452
1453         /* We do this three times for luck */
1454         I915_WRITE(reg, val);
1455         POSTING_READ(reg);
1456         udelay(150); /* wait for warmup */
1457         I915_WRITE(reg, val);
1458         POSTING_READ(reg);
1459         udelay(150); /* wait for warmup */
1460         I915_WRITE(reg, val);
1461         POSTING_READ(reg);
1462         udelay(150); /* wait for warmup */
1463 }
1464
1465 /**
1466  * intel_disable_pll - disable a PLL
1467  * @dev_priv: i915 private structure
1468  * @pipe: pipe PLL to disable
1469  *
1470  * Disable the PLL for @pipe, making sure the pipe is off first.
1471  *
1472  * Note!  This is for pre-ILK only.
1473  */
1474 static void intel_disable_pll(struct drm_i915_private *dev_priv, enum i915_pipe pipe)
1475 {
1476         int reg;
1477         u32 val;
1478
1479         /* Don't disable pipe A or pipe A PLLs if needed */
1480         if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1481                 return;
1482
1483         /* Make sure the pipe isn't still relying on us */
1484         assert_pipe_disabled(dev_priv, pipe);
1485
1486         reg = DPLL(pipe);
1487         val = I915_READ(reg);
1488         val &= ~DPLL_VCO_ENABLE;
1489         I915_WRITE(reg, val);
1490         POSTING_READ(reg);
1491 }
1492
1493 /* SBI access */
1494 static void
1495 intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
1496                 enum intel_sbi_destination destination)
1497 {
1498         u32 tmp;
1499
1500         WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
1501
1502         if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 100)) {
1503                 DRM_ERROR("timeout waiting for SBI to become ready\n");
1504                 return;
1505         }
1506
1507         I915_WRITE(SBI_ADDR, (reg << 16));
1508         I915_WRITE(SBI_DATA, value);
1509
1510         if (destination == SBI_ICLK)
1511                 tmp = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRWR;
1512         else
1513                 tmp = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IOWR;
1514         I915_WRITE(SBI_CTL_STAT, SBI_BUSY | tmp);
1515
1516         if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
1517                                 100)) {
1518                 DRM_ERROR("timeout waiting for SBI to complete write transaction\n");
1519                 return;
1520         }
1521 }
1522
1523 static u32
1524 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
1525                enum intel_sbi_destination destination)
1526 {
1527         u32 value = 0;
1528
1529         WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
1530
1531         if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 100)) {
1532                 DRM_ERROR("timeout waiting for SBI to become ready\n");
1533                 return 0;
1534         }
1535
1536         I915_WRITE(SBI_ADDR, (reg << 16));
1537
1538         if (destination == SBI_ICLK)
1539                 value = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD;
1540         else
1541                 value = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
1542         I915_WRITE(SBI_CTL_STAT, value | SBI_BUSY);
1543
1544         if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
1545                                 100)) {
1546                 DRM_ERROR("timeout waiting for SBI to complete read transaction\n");
1547                 return 0;
1548         }
1549
1550         return I915_READ(SBI_DATA);
1551 }
1552
1553 /**
1554  * ironlake_enable_pch_pll - enable PCH PLL
1555  * @dev_priv: i915 private structure
1556  * @pipe: pipe PLL to enable
1557  *
1558  * The PCH PLL needs to be enabled before the PCH transcoder, since it
1559  * drives the transcoder clock.
1560  */
1561 static void ironlake_enable_pch_pll(struct intel_crtc *intel_crtc)
1562 {
1563         struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
1564         struct intel_pch_pll *pll;
1565         int reg;
1566         u32 val;
1567
1568         /* PCH PLLs only available on ILK, SNB and IVB */
1569         BUG_ON(dev_priv->info->gen < 5);
1570         pll = intel_crtc->pch_pll;
1571         if (pll == NULL)
1572                 return;
1573
1574         if (WARN_ON(pll->refcount == 0))
1575                 return;
1576
1577         DRM_DEBUG_KMS("enable PCH PLL %x (active %d, on? %d)for crtc %d\n",
1578                       pll->pll_reg, pll->active, pll->on,
1579                       intel_crtc->base.base.id);
1580
1581         /* PCH refclock must be enabled first */
1582         assert_pch_refclk_enabled(dev_priv);
1583
1584         if (pll->active++ && pll->on) {
1585                 assert_pch_pll_enabled(dev_priv, pll, NULL);
1586                 return;
1587         }
1588
1589         DRM_DEBUG_KMS("enabling PCH PLL %x\n", pll->pll_reg);
1590
1591         reg = pll->pll_reg;
1592         val = I915_READ(reg);
1593         val |= DPLL_VCO_ENABLE;
1594         I915_WRITE(reg, val);
1595         POSTING_READ(reg);
1596         udelay(200);
1597
1598         pll->on = true;
1599 }
1600
1601 static void intel_disable_pch_pll(struct intel_crtc *intel_crtc)
1602 {
1603         struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
1604         struct intel_pch_pll *pll = intel_crtc->pch_pll;
1605         int reg;
1606         u32 val;
1607
1608         /* PCH only available on ILK+ */
1609         BUG_ON(dev_priv->info->gen < 5);
1610         if (pll == NULL)
1611                return;
1612
1613         if (WARN_ON(pll->refcount == 0))
1614                 return;
1615
1616         DRM_DEBUG_KMS("disable PCH PLL %x (active %d, on? %d) for crtc %d\n",
1617                       pll->pll_reg, pll->active, pll->on,
1618                       intel_crtc->base.base.id);
1619
1620         if (WARN_ON(pll->active == 0)) {
1621                 assert_pch_pll_disabled(dev_priv, pll, NULL);
1622                 return;
1623         }
1624
1625         if (--pll->active) {
1626                 assert_pch_pll_enabled(dev_priv, pll, NULL);
1627                 return;
1628         }
1629
1630         DRM_DEBUG_KMS("disabling PCH PLL %x\n", pll->pll_reg);
1631
1632         /* Make sure transcoder isn't still depending on us */
1633         assert_transcoder_disabled(dev_priv, intel_crtc->pipe);
1634
1635         reg = pll->pll_reg;
1636         val = I915_READ(reg);
1637         val &= ~DPLL_VCO_ENABLE;
1638         I915_WRITE(reg, val);
1639         POSTING_READ(reg);
1640         udelay(200);
1641
1642         pll->on = false;
1643 }
1644
1645 static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1646                                            enum i915_pipe pipe)
1647 {
1648         struct drm_device *dev = dev_priv->dev;
1649         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1650         uint32_t reg, val, pipeconf_val;
1651
1652         /* PCH only available on ILK+ */
1653         BUG_ON(dev_priv->info->gen < 5);
1654
1655         /* Make sure PCH DPLL is enabled */
1656         assert_pch_pll_enabled(dev_priv,
1657                                to_intel_crtc(crtc)->pch_pll,
1658                                to_intel_crtc(crtc));
1659
1660         /* FDI must be feeding us bits for PCH ports */
1661         assert_fdi_tx_enabled(dev_priv, pipe);
1662         assert_fdi_rx_enabled(dev_priv, pipe);
1663
1664         if (HAS_PCH_CPT(dev)) {
1665                 /* Workaround: Set the timing override bit before enabling the
1666                  * pch transcoder. */
1667                 reg = TRANS_CHICKEN2(pipe);
1668                 val = I915_READ(reg);
1669                 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1670                 I915_WRITE(reg, val);
1671         }
1672
1673         reg = TRANSCONF(pipe);
1674         val = I915_READ(reg);
1675         pipeconf_val = I915_READ(PIPECONF(pipe));
1676
1677         if (HAS_PCH_IBX(dev_priv->dev)) {
1678                 /*
1679                  * make the BPC in transcoder be consistent with
1680                  * that in pipeconf reg.
1681                  */
1682                 val &= ~PIPECONF_BPC_MASK;
1683                 val |= pipeconf_val & PIPECONF_BPC_MASK;
1684         }
1685
1686         val &= ~TRANS_INTERLACE_MASK;
1687         if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
1688                 if (HAS_PCH_IBX(dev_priv->dev) &&
1689                     intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO))
1690                         val |= TRANS_LEGACY_INTERLACED_ILK;
1691                 else
1692                         val |= TRANS_INTERLACED;
1693         else
1694                 val |= TRANS_PROGRESSIVE;
1695
1696         I915_WRITE(reg, val | TRANS_ENABLE);
1697         if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
1698                 DRM_ERROR("failed to enable transcoder %d\n", pipe);
1699 }
1700
1701 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1702                                       enum transcoder cpu_transcoder)
1703 {
1704         u32 val, pipeconf_val;
1705
1706         /* PCH only available on ILK+ */
1707         BUG_ON(dev_priv->info->gen < 5);
1708
1709         /* FDI must be feeding us bits for PCH ports */
1710         assert_fdi_tx_enabled(dev_priv, (enum i915_pipe) cpu_transcoder);
1711         assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
1712
1713         /* Workaround: set timing override bit. */
1714         val = I915_READ(_TRANSA_CHICKEN2);
1715         val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1716         I915_WRITE(_TRANSA_CHICKEN2, val);
1717
1718         val = TRANS_ENABLE;
1719         pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1720
1721         if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1722             PIPECONF_INTERLACED_ILK)
1723                 val |= TRANS_INTERLACED;
1724         else
1725                 val |= TRANS_PROGRESSIVE;
1726
1727         I915_WRITE(TRANSCONF(TRANSCODER_A), val);
1728         if (wait_for(I915_READ(_TRANSACONF) & TRANS_STATE_ENABLE, 100))
1729                 DRM_ERROR("Failed to enable PCH transcoder\n");
1730 }
1731
1732 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1733                                             enum i915_pipe pipe)
1734 {
1735         struct drm_device *dev = dev_priv->dev;
1736         uint32_t reg, val;
1737
1738         /* FDI relies on the transcoder */
1739         assert_fdi_tx_disabled(dev_priv, pipe);
1740         assert_fdi_rx_disabled(dev_priv, pipe);
1741
1742         /* Ports must be off as well */
1743         assert_pch_ports_disabled(dev_priv, pipe);
1744
1745         reg = TRANSCONF(pipe);
1746         val = I915_READ(reg);
1747         val &= ~TRANS_ENABLE;
1748         I915_WRITE(reg, val);
1749         /* wait for PCH transcoder off, transcoder state */
1750         if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
1751                 DRM_ERROR("failed to disable transcoder %d\n", pipe);
1752
1753         if (!HAS_PCH_IBX(dev)) {
1754                 /* Workaround: Clear the timing override chicken bit again. */
1755                 reg = TRANS_CHICKEN2(pipe);
1756                 val = I915_READ(reg);
1757                 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1758                 I915_WRITE(reg, val);
1759         }
1760 }
1761
1762 static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1763 {
1764         u32 val;
1765
1766         val = I915_READ(_TRANSACONF);
1767         val &= ~TRANS_ENABLE;
1768         I915_WRITE(_TRANSACONF, val);
1769         /* wait for PCH transcoder off, transcoder state */
1770         if (wait_for((I915_READ(_TRANSACONF) & TRANS_STATE_ENABLE) == 0, 50))
1771                 DRM_ERROR("Failed to disable PCH transcoder\n");
1772
1773         /* Workaround: clear timing override bit. */
1774         val = I915_READ(_TRANSA_CHICKEN2);
1775         val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1776         I915_WRITE(_TRANSA_CHICKEN2, val);
1777 }
1778
1779 /**
1780  * intel_enable_pipe - enable a pipe, asserting requirements
1781  * @dev_priv: i915 private structure
1782  * @pipe: pipe to enable
1783  * @pch_port: on ILK+, is this pipe driving a PCH port or not
1784  *
1785  * Enable @pipe, making sure that various hardware specific requirements
1786  * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
1787  *
1788  * @pipe should be %PIPE_A or %PIPE_B.
1789  *
1790  * Will wait until the pipe is actually running (i.e. first vblank) before
1791  * returning.
1792  */
1793 static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum i915_pipe pipe,
1794                               bool pch_port)
1795 {
1796         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1797                                                                       pipe);
1798         enum i915_pipe pch_transcoder;
1799         int reg;
1800         u32 val;
1801
1802         if (HAS_PCH_LPT(dev_priv->dev))
1803                 pch_transcoder = TRANSCODER_A;
1804         else
1805                 pch_transcoder = pipe;
1806
1807         /*
1808          * A pipe without a PLL won't actually be able to drive bits from
1809          * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
1810          * need the check.
1811          */
1812         if (!HAS_PCH_SPLIT(dev_priv->dev))
1813                 assert_pll_enabled(dev_priv, pipe);
1814         else {
1815                 if (pch_port) {
1816                         /* if driving the PCH, we need FDI enabled */
1817                         assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
1818                         assert_fdi_tx_pll_enabled(dev_priv,
1819                                                   (enum i915_pipe) cpu_transcoder);
1820                 }
1821                 /* FIXME: assert CPU port conditions for SNB+ */
1822         }
1823
1824         reg = PIPECONF(cpu_transcoder);
1825         val = I915_READ(reg);
1826         if (val & PIPECONF_ENABLE)
1827                 return;
1828
1829         I915_WRITE(reg, val | PIPECONF_ENABLE);
1830         intel_wait_for_vblank(dev_priv->dev, pipe);
1831 }
1832
1833 /**
1834  * intel_disable_pipe - disable a pipe, asserting requirements
1835  * @dev_priv: i915 private structure
1836  * @pipe: pipe to disable
1837  *
1838  * Disable @pipe, making sure that various hardware specific requirements
1839  * are met, if applicable, e.g. plane disabled, panel fitter off, etc.
1840  *
1841  * @pipe should be %PIPE_A or %PIPE_B.
1842  *
1843  * Will wait until the pipe has shut down before returning.
1844  */
1845 static void intel_disable_pipe(struct drm_i915_private *dev_priv,
1846                                enum i915_pipe pipe)
1847 {
1848         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1849                                                                       pipe);
1850         int reg;
1851         u32 val;
1852
1853         /*
1854          * Make sure planes won't keep trying to pump pixels to us,
1855          * or we might hang the display.
1856          */
1857         assert_planes_disabled(dev_priv, pipe);
1858
1859         /* Don't disable pipe A or pipe A PLLs if needed */
1860         if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1861                 return;
1862
1863         reg = PIPECONF(cpu_transcoder);
1864         val = I915_READ(reg);
1865         if ((val & PIPECONF_ENABLE) == 0)
1866                 return;
1867
1868         I915_WRITE(reg, val & ~PIPECONF_ENABLE);
1869         intel_wait_for_pipe_off(dev_priv->dev, pipe);
1870 }
1871
1872 /*
1873  * Plane regs are double buffered, going from enabled->disabled needs a
1874  * trigger in order to latch.  The display address reg provides this.
1875  */
1876 void intel_flush_display_plane(struct drm_i915_private *dev_priv,
1877                                       enum plane plane)
1878 {
1879         if (dev_priv->info->gen >= 4)
1880                 I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
1881         else
1882                 I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
1883 }
1884
1885 /**
1886  * intel_enable_plane - enable a display plane on a given pipe
1887  * @dev_priv: i915 private structure
1888  * @plane: plane to enable
1889  * @pipe: pipe being fed
1890  *
1891  * Enable @plane on @pipe, making sure that @pipe is running first.
1892  */
1893 static void intel_enable_plane(struct drm_i915_private *dev_priv,
1894                                enum plane plane, enum i915_pipe pipe)
1895 {
1896         int reg;
1897         u32 val;
1898
1899         /* If the pipe isn't enabled, we can't pump pixels and may hang */
1900         assert_pipe_enabled(dev_priv, pipe);
1901
1902         reg = DSPCNTR(plane);
1903         val = I915_READ(reg);
1904         if (val & DISPLAY_PLANE_ENABLE)
1905                 return;
1906
1907         I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
1908         intel_flush_display_plane(dev_priv, plane);
1909         intel_wait_for_vblank(dev_priv->dev, pipe);
1910 }
1911
1912 /**
1913  * intel_disable_plane - disable a display plane
1914  * @dev_priv: i915 private structure
1915  * @plane: plane to disable
1916  * @pipe: pipe consuming the data
1917  *
1918  * Disable @plane; should be an independent operation.
1919  */
1920 static void intel_disable_plane(struct drm_i915_private *dev_priv,
1921                                 enum plane plane, enum i915_pipe pipe)
1922 {
1923         int reg;
1924         u32 val;
1925
1926         reg = DSPCNTR(plane);
1927         val = I915_READ(reg);
1928         if ((val & DISPLAY_PLANE_ENABLE) == 0)
1929                 return;
1930
1931         I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
1932         intel_flush_display_plane(dev_priv, plane);
1933         intel_wait_for_vblank(dev_priv->dev, pipe);
1934 }
1935
1936 int
1937 intel_pin_and_fence_fb_obj(struct drm_device *dev,
1938                            struct drm_i915_gem_object *obj,
1939                            struct intel_ring_buffer *pipelined)
1940 {
1941         struct drm_i915_private *dev_priv = dev->dev_private;
1942         u32 alignment;
1943         int ret;
1944
1945         switch (obj->tiling_mode) {
1946         case I915_TILING_NONE:
1947                 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
1948                         alignment = 128 * 1024;
1949                 else if (INTEL_INFO(dev)->gen >= 4)
1950                         alignment = 4 * 1024;
1951                 else
1952                         alignment = 64 * 1024;
1953                 break;
1954         case I915_TILING_X:
1955                 /* pin() will align the object as required by fence */
1956                 alignment = 0;
1957                 break;
1958         case I915_TILING_Y:
1959                 /* FIXME: Is this true? */
1960                 DRM_ERROR("Y tiled not allowed for scan out buffers\n");
1961                 return -EINVAL;
1962         default:
1963                 BUG();
1964         }
1965
1966         dev_priv->mm.interruptible = false;
1967         ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
1968         if (ret)
1969                 goto err_interruptible;
1970
1971         /* Install a fence for tiled scan-out. Pre-i965 always needs a
1972          * fence, whereas 965+ only requires a fence if using
1973          * framebuffer compression.  For simplicity, we always install
1974          * a fence as the cost is not that onerous.
1975          */
1976         ret = i915_gem_object_get_fence(obj);
1977         if (ret)
1978                 goto err_unpin;
1979
1980         i915_gem_object_pin_fence(obj);
1981
1982         dev_priv->mm.interruptible = true;
1983         return 0;
1984
1985 err_unpin:
1986         i915_gem_object_unpin(obj);
1987 err_interruptible:
1988         dev_priv->mm.interruptible = true;
1989         return ret;
1990 }
1991
1992 void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
1993 {
1994         i915_gem_object_unpin_fence(obj);
1995         i915_gem_object_unpin(obj);
1996 }
1997
1998 /* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
1999  * is assumed to be a power-of-two. */
2000 unsigned long intel_gen4_compute_page_offset(int *x, int *y,
2001                                              unsigned int tiling_mode,
2002                                              unsigned int cpp,
2003                                              unsigned int pitch)
2004 {
2005         if (tiling_mode != I915_TILING_NONE) {
2006                 unsigned int tile_rows, tiles;
2007
2008                 tile_rows = *y / 8;
2009                 *y %= 8;
2010
2011                 tiles = *x / (512/cpp);
2012                 *x %= 512/cpp;
2013
2014                 return tile_rows * pitch * 8 + tiles * 4096;
2015         } else {
2016                 unsigned int offset;
2017
2018                 offset = *y * pitch + *x * cpp;
2019                 *y = 0;
2020                 *x = (offset & 4095) / cpp;
2021                 return offset & -4096;
2022         }
2023 }
2024
2025 static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2026                              int x, int y)
2027 {
2028         struct drm_device *dev = crtc->dev;
2029         struct drm_i915_private *dev_priv = dev->dev_private;
2030         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2031         struct intel_framebuffer *intel_fb;
2032         struct drm_i915_gem_object *obj;
2033         int plane = intel_crtc->plane;
2034         unsigned long linear_offset;
2035         u32 dspcntr;
2036         u32 reg;
2037
2038         switch (plane) {
2039         case 0:
2040         case 1:
2041                 break;
2042         default:
2043                 DRM_ERROR("Can't update plane %d in SAREA\n", plane);
2044                 return -EINVAL;
2045         }
2046
2047         intel_fb = to_intel_framebuffer(fb);
2048         obj = intel_fb->obj;
2049
2050         reg = DSPCNTR(plane);
2051         dspcntr = I915_READ(reg);
2052         /* Mask out pixel format bits in case we change it */
2053         dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2054         switch (fb->pixel_format) {
2055         case DRM_FORMAT_C8:
2056                 dspcntr |= DISPPLANE_8BPP;
2057                 break;
2058         case DRM_FORMAT_XRGB1555:
2059         case DRM_FORMAT_ARGB1555:
2060                 dspcntr |= DISPPLANE_BGRX555;
2061                 break;
2062         case DRM_FORMAT_RGB565:
2063                 dspcntr |= DISPPLANE_BGRX565;
2064                 break;
2065         case DRM_FORMAT_XRGB8888:
2066         case DRM_FORMAT_ARGB8888:
2067                 dspcntr |= DISPPLANE_BGRX888;
2068                 break;
2069         case DRM_FORMAT_XBGR8888:
2070         case DRM_FORMAT_ABGR8888:
2071                 dspcntr |= DISPPLANE_RGBX888;
2072                 break;
2073         case DRM_FORMAT_XRGB2101010:
2074         case DRM_FORMAT_ARGB2101010:
2075                 dspcntr |= DISPPLANE_BGRX101010;
2076                 break;
2077         case DRM_FORMAT_XBGR2101010:
2078         case DRM_FORMAT_ABGR2101010:
2079                 dspcntr |= DISPPLANE_RGBX101010;
2080                 break;
2081         default:
2082                 DRM_ERROR("Unknown pixel format 0x%08x\n", fb->pixel_format);
2083                 return -EINVAL;
2084         }
2085
2086         if (INTEL_INFO(dev)->gen >= 4) {
2087                 if (obj->tiling_mode != I915_TILING_NONE)
2088                         dspcntr |= DISPPLANE_TILED;
2089                 else
2090                         dspcntr &= ~DISPPLANE_TILED;
2091         }
2092
2093         I915_WRITE(reg, dspcntr);
2094
2095         linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2096
2097         if (INTEL_INFO(dev)->gen >= 4) {
2098                 intel_crtc->dspaddr_offset =
2099                         intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
2100                                                        fb->bits_per_pixel / 8,
2101                                                        fb->pitches[0]);
2102                 linear_offset -= intel_crtc->dspaddr_offset;
2103         } else {
2104                 intel_crtc->dspaddr_offset = linear_offset;
2105         }
2106
2107         DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n",
2108                       obj->gtt_offset, linear_offset, x, y, fb->pitches[0]);
2109         I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2110         if (INTEL_INFO(dev)->gen >= 4) {
2111                 I915_MODIFY_DISPBASE(DSPSURF(plane),
2112                                      obj->gtt_offset + intel_crtc->dspaddr_offset);
2113                 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2114                 I915_WRITE(DSPLINOFF(plane), linear_offset);
2115         } else
2116                 I915_WRITE(DSPADDR(plane), obj->gtt_offset + linear_offset);
2117         POSTING_READ(reg);
2118
2119         return 0;
2120 }
2121
2122 static int ironlake_update_plane(struct drm_crtc *crtc,
2123                                  struct drm_framebuffer *fb, int x, int y)
2124 {
2125         struct drm_device *dev = crtc->dev;
2126         struct drm_i915_private *dev_priv = dev->dev_private;
2127         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2128         struct intel_framebuffer *intel_fb;
2129         struct drm_i915_gem_object *obj;
2130         int plane = intel_crtc->plane;
2131         unsigned long linear_offset;
2132         u32 dspcntr;
2133         u32 reg;
2134
2135         switch (plane) {
2136         case 0:
2137         case 1:
2138         case 2:
2139                 break;
2140         default:
2141                 DRM_ERROR("Can't update plane %d in SAREA\n", plane);
2142                 return -EINVAL;
2143         }
2144
2145         intel_fb = to_intel_framebuffer(fb);
2146         obj = intel_fb->obj;
2147
2148         reg = DSPCNTR(plane);
2149         dspcntr = I915_READ(reg);
2150         /* Mask out pixel format bits in case we change it */
2151         dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2152         switch (fb->pixel_format) {
2153         case DRM_FORMAT_C8:
2154                 dspcntr |= DISPPLANE_8BPP;
2155                 break;
2156         case DRM_FORMAT_RGB565:
2157                 dspcntr |= DISPPLANE_BGRX565;
2158                 break;
2159         case DRM_FORMAT_XRGB8888:
2160         case DRM_FORMAT_ARGB8888:
2161                 dspcntr |= DISPPLANE_BGRX888;
2162                 break;
2163         case DRM_FORMAT_XBGR8888:
2164         case DRM_FORMAT_ABGR8888:
2165                 dspcntr |= DISPPLANE_RGBX888;
2166                 break;
2167         case DRM_FORMAT_XRGB2101010:
2168         case DRM_FORMAT_ARGB2101010:
2169                 dspcntr |= DISPPLANE_BGRX101010;
2170                 break;
2171         case DRM_FORMAT_XBGR2101010:
2172         case DRM_FORMAT_ABGR2101010:
2173                 dspcntr |= DISPPLANE_RGBX101010;
2174                 break;
2175         default:
2176                 DRM_ERROR("Unknown pixel format 0x%08x\n", fb->pixel_format);
2177                 return -EINVAL;
2178         }
2179
2180         if (obj->tiling_mode != I915_TILING_NONE)
2181                 dspcntr |= DISPPLANE_TILED;
2182         else
2183                 dspcntr &= ~DISPPLANE_TILED;
2184
2185         /* must disable */
2186         dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2187
2188         I915_WRITE(reg, dspcntr);
2189
2190         linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2191         intel_crtc->dspaddr_offset =
2192                 intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
2193                                                fb->bits_per_pixel / 8,
2194                                                fb->pitches[0]);
2195         linear_offset -= intel_crtc->dspaddr_offset;
2196
2197         DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n",
2198                       obj->gtt_offset, linear_offset, x, y, fb->pitches[0]);
2199         I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2200         I915_MODIFY_DISPBASE(DSPSURF(plane),
2201                              obj->gtt_offset + intel_crtc->dspaddr_offset);
2202         if (IS_HASWELL(dev)) {
2203                 I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
2204         } else {
2205                 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2206                 I915_WRITE(DSPLINOFF(plane), linear_offset);
2207         }
2208         POSTING_READ(reg);
2209
2210         return 0;
2211 }
2212
2213 /* Assume fb object is pinned & idle & fenced and just update base pointers */
2214 static int
2215 intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2216                            int x, int y, enum mode_set_atomic state)
2217 {
2218         struct drm_device *dev = crtc->dev;
2219         struct drm_i915_private *dev_priv = dev->dev_private;
2220
2221         if (dev_priv->display.disable_fbc)
2222                 dev_priv->display.disable_fbc(dev);
2223         intel_increase_pllclock(crtc);
2224
2225         return dev_priv->display.update_plane(crtc, fb, x, y);
2226 }
2227
2228 static int
2229 intel_finish_fb(struct drm_framebuffer *old_fb)
2230 {
2231         struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
2232         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2233         bool was_interruptible = dev_priv->mm.interruptible;
2234         int ret;
2235
2236         /* Big Hammer, we also need to ensure that any pending
2237          * MI_WAIT_FOR_EVENT inside a user batch buffer on the
2238          * current scanout is retired before unpinning the old
2239          * framebuffer.
2240          *
2241          * This should only fail upon a hung GPU, in which case we
2242          * can safely continue.
2243          */
2244         dev_priv->mm.interruptible = false;
2245         ret = i915_gem_object_finish_gpu(obj);
2246         dev_priv->mm.interruptible = was_interruptible;
2247
2248         return ret;
2249 }
2250
2251 static void intel_crtc_update_sarea_pos(struct drm_crtc *crtc, int x, int y)
2252 {
2253         struct drm_device *dev = crtc->dev;
2254 #if 0
2255         struct drm_i915_master_private *master_priv;
2256 #else
2257         drm_i915_private_t *dev_priv = dev->dev_private;
2258 #endif
2259         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2260
2261 #if 0
2262         if (!dev->primary->master)
2263                 return;
2264
2265         master_priv = dev->primary->master->driver_priv;
2266         if (!master_priv->sarea_priv)
2267                 return;
2268 #else
2269         if (!dev_priv->sarea_priv)
2270                 return;
2271 #endif
2272
2273         switch (intel_crtc->pipe) {
2274         case 0:
2275 #if 0
2276                 master_priv->sarea_priv->pipeA_x = x;
2277                 master_priv->sarea_priv->pipeA_y = y;
2278 #else
2279                 dev_priv->sarea_priv->planeA_x = x;
2280                 dev_priv->sarea_priv->planeA_y = y;
2281 #endif
2282                 break;
2283         case 1:
2284 #if 0
2285                 master_priv->sarea_priv->pipeB_x = x;
2286                 master_priv->sarea_priv->pipeB_y = y;
2287 #else
2288                 dev_priv->sarea_priv->planeB_x = x;
2289                 dev_priv->sarea_priv->planeB_y = y;
2290 #endif
2291                 break;
2292         default:
2293                 break;
2294         }
2295 }
2296
2297 static int
2298 intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2299                     struct drm_framebuffer *fb)
2300 {
2301         struct drm_device *dev = crtc->dev;
2302         struct drm_i915_private *dev_priv = dev->dev_private;
2303         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2304         struct drm_framebuffer *old_fb;
2305         int ret;
2306
2307         /* no fb bound */
2308         if (!fb) {
2309                 DRM_ERROR("No FB bound\n");
2310                 return 0;
2311         }
2312
2313         if(intel_crtc->plane > dev_priv->num_pipe) {
2314                 DRM_ERROR("no plane for crtc: plane %d, num_pipes %d\n",
2315                                 intel_crtc->plane,
2316                                 dev_priv->num_pipe);
2317                 return -EINVAL;
2318         }
2319
2320         mutex_lock(&dev->struct_mutex);
2321         ret = intel_pin_and_fence_fb_obj(dev,
2322                                          to_intel_framebuffer(fb)->obj,
2323                                          NULL);
2324         if (ret != 0) {
2325                 mutex_unlock(&dev->struct_mutex);
2326                 DRM_ERROR("pin & fence failed\n");
2327                 return ret;
2328         }
2329
2330         if (crtc->fb)
2331                 intel_finish_fb(crtc->fb);
2332
2333         ret = dev_priv->display.update_plane(crtc, fb, x, y);
2334         if (ret) {
2335                 intel_unpin_fb_obj(to_intel_framebuffer(fb)->obj);
2336                 mutex_unlock(&dev->struct_mutex);
2337                 DRM_ERROR("failed to update base address\n");
2338                 return ret;
2339         }
2340
2341         old_fb = crtc->fb;
2342         crtc->fb = fb;
2343         crtc->x = x;
2344         crtc->y = y;
2345
2346         if (old_fb) {
2347                 intel_wait_for_vblank(dev, intel_crtc->pipe);
2348                 intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
2349         }
2350
2351         intel_update_fbc(dev);
2352         mutex_unlock(&dev->struct_mutex);
2353
2354         intel_crtc_update_sarea_pos(crtc, x, y);
2355
2356         return 0;
2357 }
2358
2359 static void intel_fdi_normal_train(struct drm_crtc *crtc)
2360 {
2361         struct drm_device *dev = crtc->dev;
2362         struct drm_i915_private *dev_priv = dev->dev_private;
2363         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2364         int pipe = intel_crtc->pipe;
2365         u32 reg, temp;
2366
2367         /* enable normal train */
2368         reg = FDI_TX_CTL(pipe);
2369         temp = I915_READ(reg);
2370         if (IS_IVYBRIDGE(dev)) {
2371                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2372                 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
2373         } else {
2374                 temp &= ~FDI_LINK_TRAIN_NONE;
2375                 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
2376         }
2377         I915_WRITE(reg, temp);
2378
2379         reg = FDI_RX_CTL(pipe);
2380         temp = I915_READ(reg);
2381         if (HAS_PCH_CPT(dev)) {
2382                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2383                 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
2384         } else {
2385                 temp &= ~FDI_LINK_TRAIN_NONE;
2386                 temp |= FDI_LINK_TRAIN_NONE;
2387         }
2388         I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
2389
2390         /* wait one idle pattern time */
2391         POSTING_READ(reg);
2392         udelay(1000);
2393
2394         /* IVB wants error correction enabled */
2395         if (IS_IVYBRIDGE(dev))
2396                 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
2397                            FDI_FE_ERRC_ENABLE);
2398 }
2399
2400 static void ivb_modeset_global_resources(struct drm_device *dev)
2401 {
2402         struct drm_i915_private *dev_priv = dev->dev_private;
2403         struct intel_crtc *pipe_B_crtc =
2404                 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
2405         struct intel_crtc *pipe_C_crtc =
2406                 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]);
2407         uint32_t temp;
2408
2409         /* When everything is off disable fdi C so that we could enable fdi B
2410          * with all lanes. XXX: This misses the case where a pipe is not using
2411          * any pch resources and so doesn't need any fdi lanes. */
2412         if (!pipe_B_crtc->base.enabled && !pipe_C_crtc->base.enabled) {
2413                 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
2414                 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
2415
2416                 temp = I915_READ(SOUTH_CHICKEN1);
2417                 temp &= ~FDI_BC_BIFURCATION_SELECT;
2418                 DRM_DEBUG_KMS("disabling fdi C rx\n");
2419                 I915_WRITE(SOUTH_CHICKEN1, temp);
2420         }
2421 }
2422
2423 /* The FDI link training functions for ILK/Ibexpeak. */
2424 static void ironlake_fdi_link_train(struct drm_crtc *crtc)
2425 {
2426         struct drm_device *dev = crtc->dev;
2427         struct drm_i915_private *dev_priv = dev->dev_private;
2428         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2429         int pipe = intel_crtc->pipe;
2430         int plane = intel_crtc->plane;
2431         u32 reg, temp, tries;
2432
2433         /* FDI needs bits from pipe & plane first */
2434         assert_pipe_enabled(dev_priv, pipe);
2435         assert_plane_enabled(dev_priv, plane);
2436
2437         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2438            for train result */
2439         reg = FDI_RX_IMR(pipe);
2440         temp = I915_READ(reg);
2441         temp &= ~FDI_RX_SYMBOL_LOCK;
2442         temp &= ~FDI_RX_BIT_LOCK;
2443         I915_WRITE(reg, temp);
2444         I915_READ(reg);
2445         udelay(150);
2446
2447         /* enable CPU FDI TX and PCH FDI RX */
2448         reg = FDI_TX_CTL(pipe);
2449         temp = I915_READ(reg);
2450         temp &= ~(7 << 19);
2451         temp |= (intel_crtc->fdi_lanes - 1) << 19;
2452         temp &= ~FDI_LINK_TRAIN_NONE;
2453         temp |= FDI_LINK_TRAIN_PATTERN_1;
2454         I915_WRITE(reg, temp | FDI_TX_ENABLE);
2455
2456         reg = FDI_RX_CTL(pipe);
2457         temp = I915_READ(reg);
2458         temp &= ~FDI_LINK_TRAIN_NONE;
2459         temp |= FDI_LINK_TRAIN_PATTERN_1;
2460         I915_WRITE(reg, temp | FDI_RX_ENABLE);
2461
2462         POSTING_READ(reg);
2463         udelay(150);
2464
2465         /* Ironlake workaround, enable clock pointer after FDI enable*/
2466         I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2467         I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
2468                    FDI_RX_PHASE_SYNC_POINTER_EN);
2469
2470         reg = FDI_RX_IIR(pipe);
2471         for (tries = 0; tries < 5; tries++) {
2472                 temp = I915_READ(reg);
2473                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2474
2475                 if ((temp & FDI_RX_BIT_LOCK)) {
2476                         DRM_DEBUG_KMS("FDI train 1 done.\n");
2477                         I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2478                         break;
2479                 }
2480         }
2481         if (tries == 5)
2482                 DRM_ERROR("FDI train 1 fail!\n");
2483
2484         /* Train 2 */
2485         reg = FDI_TX_CTL(pipe);
2486         temp = I915_READ(reg);
2487         temp &= ~FDI_LINK_TRAIN_NONE;
2488         temp |= FDI_LINK_TRAIN_PATTERN_2;
2489         I915_WRITE(reg, temp);
2490
2491         reg = FDI_RX_CTL(pipe);
2492         temp = I915_READ(reg);
2493         temp &= ~FDI_LINK_TRAIN_NONE;
2494         temp |= FDI_LINK_TRAIN_PATTERN_2;
2495         I915_WRITE(reg, temp);
2496
2497         POSTING_READ(reg);
2498         udelay(150);
2499
2500         reg = FDI_RX_IIR(pipe);
2501         for (tries = 0; tries < 5; tries++) {
2502                 temp = I915_READ(reg);
2503                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2504
2505                 if (temp & FDI_RX_SYMBOL_LOCK) {
2506                         I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2507                         DRM_DEBUG_KMS("FDI train 2 done.\n");
2508                         break;
2509                 }
2510         }
2511         if (tries == 5)
2512                 DRM_ERROR("FDI train 2 fail!\n");
2513
2514         DRM_DEBUG_KMS("FDI train done\n");
2515
2516 }
2517
2518 static const int snb_b_fdi_train_param[] = {
2519         FDI_LINK_TRAIN_400MV_0DB_SNB_B,
2520         FDI_LINK_TRAIN_400MV_6DB_SNB_B,
2521         FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
2522         FDI_LINK_TRAIN_800MV_0DB_SNB_B,
2523 };
2524
2525 /* The FDI link training functions for SNB/Cougarpoint. */
2526 static void gen6_fdi_link_train(struct drm_crtc *crtc)
2527 {
2528         struct drm_device *dev = crtc->dev;
2529         struct drm_i915_private *dev_priv = dev->dev_private;
2530         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2531         int pipe = intel_crtc->pipe;
2532         u32 reg, temp, i, retry;
2533
2534         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2535            for train result */
2536         reg = FDI_RX_IMR(pipe);
2537         temp = I915_READ(reg);
2538         temp &= ~FDI_RX_SYMBOL_LOCK;
2539         temp &= ~FDI_RX_BIT_LOCK;
2540         I915_WRITE(reg, temp);
2541
2542         POSTING_READ(reg);
2543         udelay(150);
2544
2545         /* enable CPU FDI TX and PCH FDI RX */
2546         reg = FDI_TX_CTL(pipe);
2547         temp = I915_READ(reg);
2548         temp &= ~(7 << 19);
2549         temp |= (intel_crtc->fdi_lanes - 1) << 19;
2550         temp &= ~FDI_LINK_TRAIN_NONE;
2551         temp |= FDI_LINK_TRAIN_PATTERN_1;
2552         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2553         /* SNB-B */
2554         temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2555         I915_WRITE(reg, temp | FDI_TX_ENABLE);
2556
2557         I915_WRITE(FDI_RX_MISC(pipe),
2558                    FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
2559
2560         reg = FDI_RX_CTL(pipe);
2561         temp = I915_READ(reg);
2562         if (HAS_PCH_CPT(dev)) {
2563                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2564                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2565         } else {
2566                 temp &= ~FDI_LINK_TRAIN_NONE;
2567                 temp |= FDI_LINK_TRAIN_PATTERN_1;
2568         }
2569         I915_WRITE(reg, temp | FDI_RX_ENABLE);
2570
2571         POSTING_READ(reg);
2572         udelay(150);
2573
2574         for (i = 0; i < 4; i++) {
2575                 reg = FDI_TX_CTL(pipe);
2576                 temp = I915_READ(reg);
2577                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2578                 temp |= snb_b_fdi_train_param[i];
2579                 I915_WRITE(reg, temp);
2580
2581                 POSTING_READ(reg);
2582                 udelay(500);
2583
2584                 for (retry = 0; retry < 5; retry++) {
2585                         reg = FDI_RX_IIR(pipe);
2586                         temp = I915_READ(reg);
2587                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2588                         if (temp & FDI_RX_BIT_LOCK) {
2589                                 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2590                                 DRM_DEBUG_KMS("FDI train 1 done.\n");
2591                                 break;
2592                         }
2593                         udelay(50);
2594                 }
2595                 if (retry < 5)
2596                         break;
2597         }
2598         if (i == 4)
2599                 DRM_ERROR("FDI train 1 fail!\n");
2600
2601         /* Train 2 */
2602         reg = FDI_TX_CTL(pipe);
2603         temp = I915_READ(reg);
2604         temp &= ~FDI_LINK_TRAIN_NONE;
2605         temp |= FDI_LINK_TRAIN_PATTERN_2;
2606         if (IS_GEN6(dev)) {
2607                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2608                 /* SNB-B */
2609                 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2610         }
2611         I915_WRITE(reg, temp);
2612
2613         reg = FDI_RX_CTL(pipe);
2614         temp = I915_READ(reg);
2615         if (HAS_PCH_CPT(dev)) {
2616                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2617                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2618         } else {
2619                 temp &= ~FDI_LINK_TRAIN_NONE;
2620                 temp |= FDI_LINK_TRAIN_PATTERN_2;
2621         }
2622         I915_WRITE(reg, temp);
2623
2624         POSTING_READ(reg);
2625         udelay(150);
2626
2627         for (i = 0; i < 4; i++) {
2628                 reg = FDI_TX_CTL(pipe);
2629                 temp = I915_READ(reg);
2630                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2631                 temp |= snb_b_fdi_train_param[i];
2632                 I915_WRITE(reg, temp);
2633
2634                 POSTING_READ(reg);
2635                 udelay(500);
2636
2637                 for (retry = 0; retry < 5; retry++) {
2638                         reg = FDI_RX_IIR(pipe);
2639                         temp = I915_READ(reg);
2640                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2641                         if (temp & FDI_RX_SYMBOL_LOCK) {
2642                                 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2643                                 DRM_DEBUG_KMS("FDI train 2 done.\n");
2644                                 break;
2645                         }
2646                         udelay(50);
2647                 }
2648                 if (retry < 5)
2649                         break;
2650         }
2651         if (i == 4)
2652                 DRM_ERROR("FDI train 2 fail!\n");
2653
2654         DRM_DEBUG_KMS("FDI train done.\n");
2655 }
2656
2657 /* Manual link training for Ivy Bridge A0 parts */
2658 static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2659 {
2660         struct drm_device *dev = crtc->dev;
2661         struct drm_i915_private *dev_priv = dev->dev_private;
2662         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2663         int pipe = intel_crtc->pipe;
2664         u32 reg, temp, i;
2665
2666         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2667            for train result */
2668         reg = FDI_RX_IMR(pipe);
2669         temp = I915_READ(reg);
2670         temp &= ~FDI_RX_SYMBOL_LOCK;
2671         temp &= ~FDI_RX_BIT_LOCK;
2672         I915_WRITE(reg, temp);
2673
2674         POSTING_READ(reg);
2675         udelay(150);
2676
2677         DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
2678                       I915_READ(FDI_RX_IIR(pipe)));
2679
2680         /* enable CPU FDI TX and PCH FDI RX */
2681         reg = FDI_TX_CTL(pipe);
2682         temp = I915_READ(reg);
2683         temp &= ~(7 << 19);
2684         temp |= (intel_crtc->fdi_lanes - 1) << 19;
2685         temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
2686         temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
2687         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2688         temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2689         temp |= FDI_COMPOSITE_SYNC;
2690         I915_WRITE(reg, temp | FDI_TX_ENABLE);
2691
2692         I915_WRITE(FDI_RX_MISC(pipe),
2693                    FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
2694
2695         reg = FDI_RX_CTL(pipe);
2696         temp = I915_READ(reg);
2697         temp &= ~FDI_LINK_TRAIN_AUTO;
2698         temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2699         temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2700         temp |= FDI_COMPOSITE_SYNC;
2701         I915_WRITE(reg, temp | FDI_RX_ENABLE);
2702
2703         POSTING_READ(reg);
2704         udelay(150);
2705
2706         for (i = 0; i < 4; i++) {
2707                 reg = FDI_TX_CTL(pipe);
2708                 temp = I915_READ(reg);
2709                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2710                 temp |= snb_b_fdi_train_param[i];
2711                 I915_WRITE(reg, temp);
2712
2713                 POSTING_READ(reg);
2714                 udelay(500);
2715
2716                 reg = FDI_RX_IIR(pipe);
2717                 temp = I915_READ(reg);
2718                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2719
2720                 if (temp & FDI_RX_BIT_LOCK ||
2721                     (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
2722                         I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2723                         DRM_DEBUG_KMS("FDI train 1 done, level %i.\n", i);
2724                         break;
2725                 }
2726         }
2727         if (i == 4)
2728                 DRM_ERROR("FDI train 1 fail!\n");
2729
2730         /* Train 2 */
2731         reg = FDI_TX_CTL(pipe);
2732         temp = I915_READ(reg);
2733         temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2734         temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
2735         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2736         temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2737         I915_WRITE(reg, temp);
2738
2739         reg = FDI_RX_CTL(pipe);
2740         temp = I915_READ(reg);
2741         temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2742         temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2743         I915_WRITE(reg, temp);
2744
2745         POSTING_READ(reg);
2746         udelay(150);
2747
2748         for (i = 0; i < 4; i++) {
2749                 reg = FDI_TX_CTL(pipe);
2750                 temp = I915_READ(reg);
2751                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2752                 temp |= snb_b_fdi_train_param[i];
2753                 I915_WRITE(reg, temp);
2754
2755                 POSTING_READ(reg);
2756                 udelay(500);
2757
2758                 reg = FDI_RX_IIR(pipe);
2759                 temp = I915_READ(reg);
2760                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2761
2762                 if (temp & FDI_RX_SYMBOL_LOCK) {
2763                         I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2764                         DRM_DEBUG_KMS("FDI train 2 done, level %i.\n", i);
2765                         break;
2766                 }
2767         }
2768         if (i == 4)
2769                 DRM_ERROR("FDI train 2 fail!\n");
2770
2771         DRM_DEBUG_KMS("FDI train done.\n");
2772 }
2773
2774 static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
2775 {
2776         struct drm_device *dev = intel_crtc->base.dev;
2777         struct drm_i915_private *dev_priv = dev->dev_private;
2778         int pipe = intel_crtc->pipe;
2779         u32 reg, temp;
2780
2781
2782         /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
2783         reg = FDI_RX_CTL(pipe);
2784         temp = I915_READ(reg);
2785         temp &= ~((0x7 << 19) | (0x7 << 16));
2786         temp |= (intel_crtc->fdi_lanes - 1) << 19;
2787         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
2788         I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
2789
2790         POSTING_READ(reg);
2791         udelay(200);
2792
2793         /* Switch from Rawclk to PCDclk */
2794         temp = I915_READ(reg);
2795         I915_WRITE(reg, temp | FDI_PCDCLK);
2796
2797         POSTING_READ(reg);
2798         udelay(200);
2799
2800         /* Enable CPU FDI TX PLL, always on for Ironlake */
2801         reg = FDI_TX_CTL(pipe);
2802         temp = I915_READ(reg);
2803         if ((temp & FDI_TX_PLL_ENABLE) == 0) {
2804                 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
2805
2806                 POSTING_READ(reg);
2807                 udelay(100);
2808         }
2809 }
2810
2811 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
2812 {
2813         struct drm_device *dev = intel_crtc->base.dev;
2814         struct drm_i915_private *dev_priv = dev->dev_private;
2815         int pipe = intel_crtc->pipe;
2816         u32 reg, temp;
2817
2818         /* Switch from PCDclk to Rawclk */
2819         reg = FDI_RX_CTL(pipe);
2820         temp = I915_READ(reg);
2821         I915_WRITE(reg, temp & ~FDI_PCDCLK);
2822
2823         /* Disable CPU FDI TX PLL */
2824         reg = FDI_TX_CTL(pipe);
2825         temp = I915_READ(reg);
2826         I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
2827
2828         POSTING_READ(reg);
2829         udelay(100);
2830
2831         reg = FDI_RX_CTL(pipe);
2832         temp = I915_READ(reg);
2833         I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
2834
2835         /* Wait for the clocks to turn off. */
2836         POSTING_READ(reg);
2837         udelay(100);
2838 }
2839
2840 static void ironlake_fdi_disable(struct drm_crtc *crtc)
2841 {
2842         struct drm_device *dev = crtc->dev;
2843         struct drm_i915_private *dev_priv = dev->dev_private;
2844         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2845         int pipe = intel_crtc->pipe;
2846         u32 reg, temp;
2847
2848         /* disable CPU FDI tx and PCH FDI rx */
2849         reg = FDI_TX_CTL(pipe);
2850         temp = I915_READ(reg);
2851         I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
2852         POSTING_READ(reg);
2853
2854         reg = FDI_RX_CTL(pipe);
2855         temp = I915_READ(reg);
2856         temp &= ~(0x7 << 16);
2857         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
2858         I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
2859
2860         POSTING_READ(reg);
2861         udelay(100);
2862
2863         /* Ironlake workaround, disable clock pointer after downing FDI */
2864         if (HAS_PCH_IBX(dev)) {
2865                 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2866         }
2867
2868         /* still set train pattern 1 */
2869         reg = FDI_TX_CTL(pipe);
2870         temp = I915_READ(reg);
2871         temp &= ~FDI_LINK_TRAIN_NONE;
2872         temp |= FDI_LINK_TRAIN_PATTERN_1;
2873         I915_WRITE(reg, temp);
2874
2875         reg = FDI_RX_CTL(pipe);
2876         temp = I915_READ(reg);
2877         if (HAS_PCH_CPT(dev)) {
2878                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2879                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2880         } else {
2881                 temp &= ~FDI_LINK_TRAIN_NONE;
2882                 temp |= FDI_LINK_TRAIN_PATTERN_1;
2883         }
2884         /* BPC in FDI rx is consistent with that in PIPECONF */
2885         temp &= ~(0x07 << 16);
2886         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
2887         I915_WRITE(reg, temp);
2888
2889         POSTING_READ(reg);
2890         udelay(100);
2891 }
2892
2893 static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
2894 {
2895         struct drm_device *dev = crtc->dev;
2896         struct drm_i915_private *dev_priv = dev->dev_private;
2897         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2898         bool pending;
2899
2900         if (i915_reset_in_progress(&dev_priv->gpu_error) ||
2901             intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
2902                 return false;
2903
2904         lockmgr(&dev->event_lock, LK_EXCLUSIVE);
2905         pending = to_intel_crtc(crtc)->unpin_work != NULL;
2906         lockmgr(&dev->event_lock, LK_RELEASE);
2907
2908         return pending;
2909 }
2910
2911 static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
2912 {
2913         struct drm_device *dev = crtc->dev;
2914         struct drm_i915_private *dev_priv = dev->dev_private;
2915
2916         if (crtc->fb == NULL)
2917                 return;
2918
2919         WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
2920
2921         wait_event(dev_priv->pending_flip_queue,
2922                    !intel_crtc_has_pending_flip(crtc));
2923
2924         mutex_lock(&dev->struct_mutex);
2925         intel_finish_fb(crtc->fb);
2926         mutex_unlock(&dev->struct_mutex);
2927 }
2928
2929 static bool ironlake_crtc_driving_pch(struct drm_crtc *crtc)
2930 {
2931         struct drm_device *dev = crtc->dev;
2932         struct intel_encoder *intel_encoder;
2933
2934         /*
2935          * If there's a non-PCH eDP on this crtc, it must be DP_A, and that
2936          * must be driven by its own crtc; no sharing is possible.
2937          */
2938         for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
2939                 switch (intel_encoder->type) {
2940                 case INTEL_OUTPUT_EDP:
2941                         if (!intel_encoder_is_pch_edp(&intel_encoder->base))
2942                                 return false;
2943                         continue;
2944                 }
2945         }
2946
2947         return true;
2948 }
2949
2950 static bool haswell_crtc_driving_pch(struct drm_crtc *crtc)
2951 {
2952         return intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG);
2953 }
2954
2955 /* Program iCLKIP clock to the desired frequency */
2956 static void lpt_program_iclkip(struct drm_crtc *crtc)
2957 {
2958         struct drm_device *dev = crtc->dev;
2959         struct drm_i915_private *dev_priv = dev->dev_private;
2960         u32 divsel, phaseinc, auxdiv, phasedir = 0;
2961         u32 temp;
2962
2963         mutex_lock(&dev_priv->dpio_lock);
2964
2965         /* It is necessary to ungate the pixclk gate prior to programming
2966          * the divisors, and gate it back when it is done.
2967          */
2968         I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
2969
2970         /* Disable SSCCTL */
2971         intel_sbi_write(dev_priv, SBI_SSCCTL6,
2972                         intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) |
2973                                 SBI_SSCCTL_DISABLE,
2974                         SBI_ICLK);
2975
2976         /* 20MHz is a corner case which is out of range for the 7-bit divisor */
2977         if (crtc->mode.clock == 20000) {
2978                 auxdiv = 1;
2979                 divsel = 0x41;
2980                 phaseinc = 0x20;
2981         } else {
2982                 /* The iCLK virtual clock root frequency is in MHz,
2983                  * but the crtc->mode.clock in in KHz. To get the divisors,
2984                  * it is necessary to divide one by another, so we
2985                  * convert the virtual clock precision to KHz here for higher
2986                  * precision.
2987                  */
2988                 u32 iclk_virtual_root_freq = 172800 * 1000;
2989                 u32 iclk_pi_range = 64;
2990                 u32 desired_divisor, msb_divisor_value, pi_value;
2991
2992                 desired_divisor = (iclk_virtual_root_freq / crtc->mode.clock);
2993                 msb_divisor_value = desired_divisor / iclk_pi_range;
2994                 pi_value = desired_divisor % iclk_pi_range;
2995
2996                 auxdiv = 0;
2997                 divsel = msb_divisor_value - 2;
2998                 phaseinc = pi_value;
2999         }
3000
3001         /* This should not happen with any sane values */
3002         WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
3003                 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
3004         WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
3005                 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
3006
3007         DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
3008                         crtc->mode.clock,
3009                         auxdiv,
3010                         divsel,
3011                         phasedir,
3012                         phaseinc);
3013
3014         /* Program SSCDIVINTPHASE6 */
3015         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
3016         temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
3017         temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
3018         temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
3019         temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
3020         temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
3021         temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
3022         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
3023
3024         /* Program SSCAUXDIV */
3025         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
3026         temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
3027         temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
3028         intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
3029
3030         /* Enable modulator and associated divider */
3031         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
3032         temp &= ~SBI_SSCCTL_DISABLE;
3033         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
3034
3035         /* Wait for initialization time */
3036         udelay(24);
3037
3038         I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
3039
3040         mutex_unlock(&dev_priv->dpio_lock);
3041 }
3042
3043 /*
3044  * Enable PCH resources required for PCH ports:
3045  *   - PCH PLLs
3046  *   - FDI training & RX/TX
3047  *   - update transcoder timings
3048  *   - DP transcoding bits
3049  *   - transcoder
3050  */
3051 static void ironlake_pch_enable(struct drm_crtc *crtc)
3052 {
3053         struct drm_device *dev = crtc->dev;
3054         struct drm_i915_private *dev_priv = dev->dev_private;
3055         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3056         int pipe = intel_crtc->pipe;
3057         u32 reg, temp;
3058
3059         assert_transcoder_disabled(dev_priv, pipe);
3060
3061         /* Write the TU size bits before fdi link training, so that error
3062          * detection works. */
3063         I915_WRITE(FDI_RX_TUSIZE1(pipe),
3064                    I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
3065
3066         /* For PCH output, training FDI link */
3067         dev_priv->display.fdi_link_train(crtc);
3068
3069         /* XXX: pch pll's can be enabled any time before we enable the PCH
3070          * transcoder, and we actually should do this to not upset any PCH
3071          * transcoder that already use the clock when we share it.
3072          *
3073          * Note that enable_pch_pll tries to do the right thing, but get_pch_pll
3074          * unconditionally resets the pll - we need that to have the right LVDS
3075          * enable sequence. */
3076         ironlake_enable_pch_pll(intel_crtc);
3077
3078         if (HAS_PCH_CPT(dev)) {
3079                 u32 sel;
3080
3081                 temp = I915_READ(PCH_DPLL_SEL);
3082                 switch (pipe) {
3083                 default:
3084                 case 0:
3085                         temp |= TRANSA_DPLL_ENABLE;
3086                         sel = TRANSA_DPLLB_SEL;
3087                         break;
3088                 case 1:
3089                         temp |= TRANSB_DPLL_ENABLE;
3090                         sel = TRANSB_DPLLB_SEL;
3091                         break;
3092                 case 2:
3093                         temp |= TRANSC_DPLL_ENABLE;
3094                         sel = TRANSC_DPLLB_SEL;
3095                         break;
3096                 }
3097                 if (intel_crtc->pch_pll->pll_reg == _PCH_DPLL_B)
3098                         temp |= sel;
3099                 else
3100                         temp &= ~sel;
3101                 I915_WRITE(PCH_DPLL_SEL, temp);
3102         }
3103
3104         /* set transcoder timing, panel must allow it */
3105         assert_panel_unlocked(dev_priv, pipe);
3106         I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe)));
3107         I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe)));
3108         I915_WRITE(TRANS_HSYNC(pipe),  I915_READ(HSYNC(pipe)));
3109
3110         I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe)));
3111         I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
3112         I915_WRITE(TRANS_VSYNC(pipe),  I915_READ(VSYNC(pipe)));
3113         I915_WRITE(TRANS_VSYNCSHIFT(pipe),  I915_READ(VSYNCSHIFT(pipe)));
3114
3115         intel_fdi_normal_train(crtc);
3116
3117         /* For PCH DP, enable TRANS_DP_CTL */
3118         if (HAS_PCH_CPT(dev) &&
3119             (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
3120              intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
3121                 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
3122                 reg = TRANS_DP_CTL(pipe);
3123                 temp = I915_READ(reg);
3124                 temp &= ~(TRANS_DP_PORT_SEL_MASK |
3125                           TRANS_DP_SYNC_MASK |
3126                           TRANS_DP_BPC_MASK);
3127                 temp |= (TRANS_DP_OUTPUT_ENABLE |
3128                          TRANS_DP_ENH_FRAMING);
3129                 temp |= bpc << 9; /* same format but at 11:9 */
3130
3131                 if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
3132                         temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
3133                 if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
3134                         temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
3135
3136                 switch (intel_trans_dp_port_sel(crtc)) {
3137                 case PCH_DP_B:
3138                         temp |= TRANS_DP_PORT_SEL_B;
3139                         break;
3140                 case PCH_DP_C:
3141                         temp |= TRANS_DP_PORT_SEL_C;
3142                         break;
3143                 case PCH_DP_D:
3144                         temp |= TRANS_DP_PORT_SEL_D;
3145                         break;
3146                 default:
3147                         BUG();
3148                 }
3149
3150                 I915_WRITE(reg, temp);
3151         }
3152
3153         ironlake_enable_pch_transcoder(dev_priv, pipe);
3154 }
3155
3156 static void lpt_pch_enable(struct drm_crtc *crtc)
3157 {
3158         struct drm_device *dev = crtc->dev;
3159         struct drm_i915_private *dev_priv = dev->dev_private;
3160         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3161         enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
3162
3163         assert_transcoder_disabled(dev_priv, TRANSCODER_A);
3164
3165         lpt_program_iclkip(crtc);
3166
3167         /* Set transcoder timing. */
3168         I915_WRITE(_TRANS_HTOTAL_A, I915_READ(HTOTAL(cpu_transcoder)));
3169         I915_WRITE(_TRANS_HBLANK_A, I915_READ(HBLANK(cpu_transcoder)));
3170         I915_WRITE(_TRANS_HSYNC_A,  I915_READ(HSYNC(cpu_transcoder)));
3171
3172         I915_WRITE(_TRANS_VTOTAL_A, I915_READ(VTOTAL(cpu_transcoder)));
3173         I915_WRITE(_TRANS_VBLANK_A, I915_READ(VBLANK(cpu_transcoder)));
3174         I915_WRITE(_TRANS_VSYNC_A,  I915_READ(VSYNC(cpu_transcoder)));
3175         I915_WRITE(_TRANS_VSYNCSHIFT_A, I915_READ(VSYNCSHIFT(cpu_transcoder)));
3176
3177         lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
3178 }
3179
3180 static void intel_put_pch_pll(struct intel_crtc *intel_crtc)
3181 {
3182         struct intel_pch_pll *pll = intel_crtc->pch_pll;
3183
3184         if (pll == NULL)
3185                 return;
3186
3187         if (pll->refcount == 0) {
3188                 WARN(1, "bad PCH PLL refcount\n");
3189                 return;
3190         }
3191
3192         --pll->refcount;
3193         intel_crtc->pch_pll = NULL;
3194 }
3195
3196 static struct intel_pch_pll *intel_get_pch_pll(struct intel_crtc *intel_crtc, u32 dpll, u32 fp)
3197 {
3198         struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
3199         struct intel_pch_pll *pll;
3200         int i;
3201
3202         pll = intel_crtc->pch_pll;
3203         if (pll) {
3204                 DRM_DEBUG_KMS("CRTC:%d reusing existing PCH PLL %x\n",
3205                               intel_crtc->base.base.id, pll->pll_reg);
3206                 goto prepare;
3207         }
3208
3209         if (HAS_PCH_IBX(dev_priv->dev)) {
3210                 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
3211                 i = intel_crtc->pipe;
3212                 pll = &dev_priv->pch_plls[i];
3213
3214                 DRM_DEBUG_KMS("CRTC:%d using pre-allocated PCH PLL %x\n",
3215                               intel_crtc->base.base.id, pll->pll_reg);
3216
3217                 goto found;
3218         }
3219
3220         for (i = 0; i < dev_priv->num_pch_pll; i++) {
3221                 pll = &dev_priv->pch_plls[i];
3222
3223                 /* Only want to check enabled timings first */
3224                 if (pll->refcount == 0)
3225                         continue;
3226
3227                 if (dpll == (I915_READ(pll->pll_reg) & 0x7fffffff) &&
3228                     fp == I915_READ(pll->fp0_reg)) {
3229                         DRM_DEBUG_KMS("CRTC:%d sharing existing PCH PLL %x (refcount %d, ative %d)\n",
3230                                       intel_crtc->base.base.id,
3231                                       pll->pll_reg, pll->refcount, pll->active);
3232
3233                         goto found;
3234                 }
3235         }
3236
3237         /* Ok no matching timings, maybe there's a free one? */
3238         for (i = 0; i < dev_priv->num_pch_pll; i++) {
3239                 pll = &dev_priv->pch_plls[i];
3240                 if (pll->refcount == 0) {
3241                         DRM_DEBUG_KMS("CRTC:%d allocated PCH PLL %x\n",
3242                                       intel_crtc->base.base.id, pll->pll_reg);
3243                         goto found;
3244                 }
3245         }
3246
3247         return NULL;
3248
3249 found:
3250         intel_crtc->pch_pll = pll;
3251         pll->refcount++;
3252         DRM_DEBUG_DRIVER("using pll %d for pipe %d\n", i, intel_crtc->pipe);
3253 prepare: /* separate function? */
3254         DRM_DEBUG_DRIVER("switching PLL %x off\n", pll->pll_reg);
3255
3256         /* Wait for the clocks to stabilize before rewriting the regs */
3257         I915_WRITE(pll->pll_reg, dpll & ~DPLL_VCO_ENABLE);
3258         POSTING_READ(pll->pll_reg);
3259         udelay(150);
3260
3261         I915_WRITE(pll->fp0_reg, fp);
3262         I915_WRITE(pll->pll_reg, dpll & ~DPLL_VCO_ENABLE);
3263         pll->on = false;
3264         return pll;
3265 }
3266
3267 void intel_cpt_verify_modeset(struct drm_device *dev, int pipe)
3268 {
3269         struct drm_i915_private *dev_priv = dev->dev_private;
3270         int dslreg = PIPEDSL(pipe);
3271         u32 temp;
3272
3273         temp = I915_READ(dslreg);
3274         udelay(500);
3275         if (wait_for(I915_READ(dslreg) != temp, 5)) {
3276                 if (wait_for(I915_READ(dslreg) != temp, 5))
3277                         DRM_ERROR("mode set failed: pipe %d stuck\n", pipe);
3278         }
3279 }
3280
3281 static void ironlake_crtc_enable(struct drm_crtc *crtc)
3282 {
3283         struct drm_device *dev = crtc->dev;
3284         struct drm_i915_private *dev_priv = dev->dev_private;
3285         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3286         struct intel_encoder *encoder;
3287         int pipe = intel_crtc->pipe;
3288         int plane = intel_crtc->plane;
3289         u32 temp;
3290         bool is_pch_port;
3291
3292         WARN_ON(!crtc->enabled);
3293
3294         if (intel_crtc->active)
3295                 return;
3296
3297         intel_crtc->active = true;
3298         intel_update_watermarks(dev);
3299
3300         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
3301                 temp = I915_READ(PCH_LVDS);
3302                 if ((temp & LVDS_PORT_EN) == 0)
3303                         I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
3304         }
3305
3306         is_pch_port = ironlake_crtc_driving_pch(crtc);
3307
3308         if (is_pch_port) {
3309                 /* Note: FDI PLL enabling _must_ be done before we enable the
3310                  * cpu pipes, hence this is separate from all the other fdi/pch
3311                  * enabling. */
3312                 ironlake_fdi_pll_enable(intel_crtc);
3313         } else {
3314                 assert_fdi_tx_disabled(dev_priv, pipe);
3315                 assert_fdi_rx_disabled(dev_priv, pipe);
3316         }
3317
3318         for_each_encoder_on_crtc(dev, crtc, encoder)
3319                 if (encoder->pre_enable)
3320                         encoder->pre_enable(encoder);
3321
3322         /* Enable panel fitting for LVDS */
3323         if (dev_priv->pch_pf_size &&
3324             (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
3325              intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
3326                 /* Force use of hard-coded filter coefficients
3327                  * as some pre-programmed values are broken,
3328                  * e.g. x201.
3329                  */
3330                 if (IS_IVYBRIDGE(dev))
3331                         I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
3332                                                  PF_PIPE_SEL_IVB(pipe));
3333                 else
3334                         I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
3335                 I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
3336                 I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
3337         }
3338
3339         /*
3340          * On ILK+ LUT must be loaded before the pipe is running but with
3341          * clocks enabled
3342          */
3343         intel_crtc_load_lut(crtc);
3344
3345         intel_enable_pipe(dev_priv, pipe, is_pch_port);
3346         intel_enable_plane(dev_priv, plane, pipe);
3347
3348         if (is_pch_port)
3349                 ironlake_pch_enable(crtc);
3350
3351         mutex_lock(&dev->struct_mutex);
3352         intel_update_fbc(dev);
3353         mutex_unlock(&dev->struct_mutex);
3354
3355         intel_crtc_update_cursor(crtc, true);
3356
3357         for_each_encoder_on_crtc(dev, crtc, encoder)
3358                 encoder->enable(encoder);
3359
3360         if (HAS_PCH_CPT(dev))
3361                 intel_cpt_verify_modeset(dev, intel_crtc->pipe);
3362
3363         /*
3364          * There seems to be a race in PCH platform hw (at least on some
3365          * outputs) where an enabled pipe still completes any pageflip right
3366          * away (as if the pipe is off) instead of waiting for vblank. As soon
3367          * as the first vblank happend, everything works as expected. Hence just
3368          * wait for one vblank before returning to avoid strange things
3369          * happening.
3370          */
3371         intel_wait_for_vblank(dev, intel_crtc->pipe);
3372 }
3373
3374 static void haswell_crtc_enable(struct drm_crtc *crtc)
3375 {
3376         struct drm_device *dev = crtc->dev;
3377         struct drm_i915_private *dev_priv = dev->dev_private;
3378         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3379         struct intel_encoder *encoder;
3380         int pipe = intel_crtc->pipe;
3381         int plane = intel_crtc->plane;
3382         bool is_pch_port;
3383
3384         WARN_ON(!crtc->enabled);
3385
3386         if (intel_crtc->active)
3387                 return;
3388
3389         intel_crtc->active = true;
3390         intel_update_watermarks(dev);
3391
3392         is_pch_port = haswell_crtc_driving_pch(crtc);
3393
3394         if (is_pch_port)
3395                 dev_priv->display.fdi_link_train(crtc);
3396
3397         for_each_encoder_on_crtc(dev, crtc, encoder)
3398                 if (encoder->pre_enable)
3399                         encoder->pre_enable(encoder);
3400
3401         intel_ddi_enable_pipe_clock(intel_crtc);
3402
3403         /* Enable panel fitting for eDP */
3404         if (dev_priv->pch_pf_size &&
3405             intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
3406                 /* Force use of hard-coded filter coefficients
3407                  * as some pre-programmed values are broken,
3408                  * e.g. x201.
3409                  */
3410                 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
3411                                          PF_PIPE_SEL_IVB(pipe));
3412                 I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
3413                 I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
3414         }
3415
3416         /*
3417          * On ILK+ LUT must be loaded before the pipe is running but with
3418          * clocks enabled
3419          */
3420         intel_crtc_load_lut(crtc);
3421
3422         intel_ddi_set_pipe_settings(crtc);
3423         intel_ddi_enable_pipe_func(crtc);
3424
3425         intel_enable_pipe(dev_priv, pipe, is_pch_port);
3426         intel_enable_plane(dev_priv, plane, pipe);
3427
3428         if (is_pch_port)
3429                 lpt_pch_enable(crtc);
3430
3431         mutex_lock(&dev->struct_mutex);
3432         intel_update_fbc(dev);
3433         mutex_unlock(&dev->struct_mutex);
3434
3435         intel_crtc_update_cursor(crtc, true);
3436
3437         for_each_encoder_on_crtc(dev, crtc, encoder)
3438                 encoder->enable(encoder);
3439
3440         /*
3441          * There seems to be a race in PCH platform hw (at least on some
3442          * outputs) where an enabled pipe still completes any pageflip right
3443          * away (as if the pipe is off) instead of waiting for vblank. As soon
3444          * as the first vblank happend, everything works as expected. Hence just
3445          * wait for one vblank before returning to avoid strange things
3446          * happening.
3447          */
3448         intel_wait_for_vblank(dev, intel_crtc->pipe);
3449 }
3450
3451 static void ironlake_crtc_disable(struct drm_crtc *crtc)
3452 {
3453         struct drm_device *dev = crtc->dev;
3454         struct drm_i915_private *dev_priv = dev->dev_private;
3455         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3456         struct intel_encoder *encoder;
3457         int pipe = intel_crtc->pipe;
3458         int plane = intel_crtc->plane;
3459         u32 reg, temp;
3460
3461
3462         if (!intel_crtc->active)
3463                 return;
3464
3465         for_each_encoder_on_crtc(dev, crtc, encoder)
3466                 encoder->disable(encoder);
3467
3468         intel_crtc_wait_for_pending_flips(crtc);
3469         drm_vblank_off(dev, pipe);
3470         intel_crtc_update_cursor(crtc, false);
3471
3472         intel_disable_plane(dev_priv, plane, pipe);
3473
3474         if (dev_priv->cfb_plane == plane)
3475                 intel_disable_fbc(dev);
3476
3477         intel_disable_pipe(dev_priv, pipe);
3478
3479         /* Disable PF */
3480         I915_WRITE(PF_CTL(pipe), 0);
3481         I915_WRITE(PF_WIN_SZ(pipe), 0);
3482
3483         for_each_encoder_on_crtc(dev, crtc, encoder)
3484                 if (encoder->post_disable)
3485                         encoder->post_disable(encoder);
3486
3487         ironlake_fdi_disable(crtc);
3488
3489         ironlake_disable_pch_transcoder(dev_priv, pipe);
3490
3491         if (HAS_PCH_CPT(dev)) {
3492                 /* disable TRANS_DP_CTL */
3493                 reg = TRANS_DP_CTL(pipe);
3494                 temp = I915_READ(reg);
3495                 temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
3496                 temp |= TRANS_DP_PORT_SEL_NONE;
3497                 I915_WRITE(reg, temp);
3498
3499                 /* disable DPLL_SEL */
3500                 temp = I915_READ(PCH_DPLL_SEL);
3501                 switch (pipe) {
3502                 case 0:
3503                         temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
3504                         break;
3505                 case 1:
3506                         temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
3507                         break;
3508                 case 2:
3509                         /* C shares PLL A or B */
3510                         temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL);
3511                         break;
3512                 default:
3513                         BUG(); /* wtf */
3514                 }
3515                 I915_WRITE(PCH_DPLL_SEL, temp);
3516         }
3517
3518         /* disable PCH DPLL */
3519         intel_disable_pch_pll(intel_crtc);
3520
3521         ironlake_fdi_pll_disable(intel_crtc);
3522
3523         intel_crtc->active = false;
3524         intel_update_watermarks(dev);
3525
3526         mutex_lock(&dev->struct_mutex);
3527         intel_update_fbc(dev);
3528         mutex_unlock(&dev->struct_mutex);
3529 }
3530
3531 static void haswell_crtc_disable(struct drm_crtc *crtc)
3532 {
3533         struct drm_device *dev = crtc->dev;
3534         struct drm_i915_private *dev_priv = dev->dev_private;
3535         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3536         struct intel_encoder *encoder;
3537         int pipe = intel_crtc->pipe;
3538         int plane = intel_crtc->plane;
3539         enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
3540         bool is_pch_port;
3541
3542         if (!intel_crtc->active)
3543                 return;
3544
3545         is_pch_port = haswell_crtc_driving_pch(crtc);
3546
3547         for_each_encoder_on_crtc(dev, crtc, encoder)
3548                 encoder->disable(encoder);
3549
3550         intel_crtc_wait_for_pending_flips(crtc);
3551         drm_vblank_off(dev, pipe);
3552         intel_crtc_update_cursor(crtc, false);
3553
3554         intel_disable_plane(dev_priv, plane, pipe);
3555
3556         if (dev_priv->cfb_plane == plane)
3557                 intel_disable_fbc(dev);
3558
3559         intel_disable_pipe(dev_priv, pipe);
3560
3561         intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
3562
3563         /* Disable PF */
3564         I915_WRITE(PF_CTL(pipe), 0);
3565         I915_WRITE(PF_WIN_SZ(pipe), 0);
3566
3567         intel_ddi_disable_pipe_clock(intel_crtc);
3568
3569         for_each_encoder_on_crtc(dev, crtc, encoder)
3570                 if (encoder->post_disable)
3571                         encoder->post_disable(encoder);
3572
3573         if (is_pch_port) {
3574                 lpt_disable_pch_transcoder(dev_priv);
3575                 intel_ddi_fdi_disable(crtc);
3576         }
3577
3578         intel_crtc->active = false;
3579         intel_update_watermarks(dev);
3580
3581         mutex_lock(&dev->struct_mutex);
3582         intel_update_fbc(dev);
3583         mutex_unlock(&dev->struct_mutex);
3584 }
3585
3586 static void ironlake_crtc_off(struct drm_crtc *crtc)
3587 {
3588         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3589         intel_put_pch_pll(intel_crtc);
3590 }
3591
3592 static void haswell_crtc_off(struct drm_crtc *crtc)
3593 {
3594         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3595
3596         /* Stop saying we're using TRANSCODER_EDP because some other CRTC might
3597          * start using it. */
3598         intel_crtc->cpu_transcoder = (enum transcoder) intel_crtc->pipe;
3599
3600         intel_ddi_put_crtc_pll(crtc);
3601 }
3602
3603 static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
3604 {
3605         if (!enable && intel_crtc->overlay) {
3606                 struct drm_device *dev = intel_crtc->base.dev;
3607                 struct drm_i915_private *dev_priv = dev->dev_private;
3608
3609                 mutex_lock(&dev->struct_mutex);
3610                 dev_priv->mm.interruptible = false;
3611                 (void) intel_overlay_switch_off(intel_crtc->overlay);
3612                 dev_priv->mm.interruptible = true;
3613                 mutex_unlock(&dev->struct_mutex);
3614         }
3615
3616         /* Let userspace switch the overlay on again. In most cases userspace
3617          * has to recompute where to put it anyway.
3618          */
3619 }
3620
3621 /**
3622  * i9xx_fixup_plane - ugly workaround for G45 to fire up the hardware
3623  * cursor plane briefly if not already running after enabling the display
3624  * plane.
3625  * This workaround avoids occasional blank screens when self refresh is
3626  * enabled.
3627  */
3628 static void
3629 g4x_fixup_plane(struct drm_i915_private *dev_priv, enum i915_pipe pipe)
3630 {
3631         u32 cntl = I915_READ(CURCNTR(pipe));
3632
3633         if ((cntl & CURSOR_MODE) == 0) {
3634                 u32 fw_bcl_self = I915_READ(FW_BLC_SELF);
3635
3636                 I915_WRITE(FW_BLC_SELF, fw_bcl_self & ~FW_BLC_SELF_EN);
3637                 I915_WRITE(CURCNTR(pipe), CURSOR_MODE_64_ARGB_AX);
3638                 intel_wait_for_vblank(dev_priv->dev, pipe);
3639                 I915_WRITE(CURCNTR(pipe), cntl);
3640                 I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
3641                 I915_WRITE(FW_BLC_SELF, fw_bcl_self);
3642         }
3643 }
3644
3645 static void i9xx_crtc_enable(struct drm_crtc *crtc)
3646 {
3647         struct drm_device *dev = crtc->dev;
3648         struct drm_i915_private *dev_priv = dev->dev_private;
3649         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3650         struct intel_encoder *encoder;
3651         int pipe = intel_crtc->pipe;
3652         int plane = intel_crtc->plane;
3653
3654         WARN_ON(!crtc->enabled);
3655
3656         if (intel_crtc->active)
3657                 return;
3658
3659         intel_crtc->active = true;
3660         intel_update_watermarks(dev);
3661
3662         intel_enable_pll(dev_priv, pipe);
3663
3664         for_each_encoder_on_crtc(dev, crtc, encoder)
3665                 if (encoder->pre_enable)
3666                         encoder->pre_enable(encoder);
3667
3668         intel_enable_pipe(dev_priv, pipe, false);
3669         intel_enable_plane(dev_priv, plane, pipe);
3670         if (IS_G4X(dev))
3671                 g4x_fixup_plane(dev_priv, pipe);
3672
3673         intel_crtc_load_lut(crtc);
3674         intel_update_fbc(dev);
3675
3676         /* Give the overlay scaler a chance to enable if it's on this pipe */
3677         intel_crtc_dpms_overlay(intel_crtc, true);
3678         intel_crtc_update_cursor(crtc, true);
3679
3680         for_each_encoder_on_crtc(dev, crtc, encoder)
3681                 encoder->enable(encoder);
3682 }
3683
3684 static void i9xx_crtc_disable(struct drm_crtc *crtc)
3685 {
3686         struct drm_device *dev = crtc->dev;
3687         struct drm_i915_private *dev_priv = dev->dev_private;
3688         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3689         struct intel_encoder *encoder;
3690         int pipe = intel_crtc->pipe;
3691         int plane = intel_crtc->plane;
3692         u32 pctl;
3693
3694
3695         if (!intel_crtc->active)
3696                 return;
3697
3698         for_each_encoder_on_crtc(dev, crtc, encoder)
3699                 encoder->disable(encoder);
3700
3701         /* Give the overlay scaler a chance to disable if it's on this pipe */
3702         intel_crtc_wait_for_pending_flips(crtc);
3703         drm_vblank_off(dev, pipe);
3704         intel_crtc_dpms_overlay(intel_crtc, false);
3705         intel_crtc_update_cursor(crtc, false);
3706
3707         if (dev_priv->cfb_plane == plane)
3708                 intel_disable_fbc(dev);
3709
3710         intel_disable_plane(dev_priv, plane, pipe);
3711         intel_disable_pipe(dev_priv, pipe);
3712
3713         /* Disable pannel fitter if it is on this pipe. */
3714         pctl = I915_READ(PFIT_CONTROL);
3715         if ((pctl & PFIT_ENABLE) &&
3716             ((pctl & PFIT_PIPE_MASK) >> PFIT_PIPE_SHIFT) == pipe)
3717                 I915_WRITE(PFIT_CONTROL, 0);
3718
3719         intel_disable_pll(dev_priv, pipe);
3720
3721         intel_crtc->active = false;
3722         intel_update_fbc(dev);
3723         intel_update_watermarks(dev);
3724 }
3725
3726 static void i9xx_crtc_off(struct drm_crtc *crtc)
3727 {
3728 }
3729
3730 static void intel_crtc_update_sarea(struct drm_crtc *crtc,
3731                                     bool enabled)
3732 {
3733         struct drm_device *dev = crtc->dev;
3734         struct drm_i915_private *dev_priv = dev->dev_private;
3735 #if 0
3736         struct drm_i915_master_private *master_priv;
3737 #endif
3738         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3739         int pipe = intel_crtc->pipe;
3740
3741 #if 0
3742         if (!dev->primary->master)
3743                 return;
3744
3745         master_priv = dev->primary->master->driver_priv;
3746         if (!master_priv->sarea_priv)
3747                 return;
3748 #else
3749         if (!dev_priv->sarea_priv)
3750                 return;
3751 #endif
3752
3753         switch (pipe) {
3754         case 0:
3755 #if 0
3756                 master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
3757                 master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
3758 #else
3759                 dev_priv->sarea_priv->planeA_w = enabled ? crtc->mode.hdisplay : 0;
3760                 dev_priv->sarea_priv->planeA_h = enabled ? crtc->mode.vdisplay : 0;
3761 #endif
3762                 break;
3763         case 1:
3764 #if 0
3765                 master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
3766                 master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
3767 #else
3768                 dev_priv->sarea_priv->planeB_w = enabled ? crtc->mode.hdisplay : 0;
3769                 dev_priv->sarea_priv->planeB_h = enabled ? crtc->mode.vdisplay : 0;
3770 #endif
3771                 break;
3772         default:
3773                 DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
3774                 break;
3775         }
3776 }
3777
3778 /**
3779  * Sets the power management mode of the pipe and plane.
3780  */
3781 void intel_crtc_update_dpms(struct drm_crtc *crtc)
3782 {
3783         struct drm_device *dev = crtc->dev;
3784         struct drm_i915_private *dev_priv = dev->dev_private;
3785         struct intel_encoder *intel_encoder;
3786         bool enable = false;
3787
3788         for_each_encoder_on_crtc(dev, crtc, intel_encoder)
3789                 enable |= intel_encoder->connectors_active;
3790
3791         if (enable)
3792                 dev_priv->display.crtc_enable(crtc);
3793         else
3794                 dev_priv->display.crtc_disable(crtc);
3795
3796         intel_crtc_update_sarea(crtc, enable);
3797 }
3798
3799 static void intel_crtc_noop(struct drm_crtc *crtc)
3800 {
3801 }
3802
3803 static void intel_crtc_disable(struct drm_crtc *crtc)
3804 {
3805         struct drm_device *dev = crtc->dev;
3806         struct drm_connector *connector;
3807         struct drm_i915_private *dev_priv = dev->dev_private;
3808         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3809
3810         /* crtc should still be enabled when we disable it. */
3811         WARN_ON(!crtc->enabled);
3812
3813         intel_crtc->eld_vld = false;
3814         dev_priv->display.crtc_disable(crtc);
3815         intel_crtc_update_sarea(crtc, false);
3816         dev_priv->display.off(crtc);
3817
3818         assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
3819         assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
3820
3821         if (crtc->fb) {
3822                 mutex_lock(&dev->struct_mutex);
3823                 intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
3824                 mutex_unlock(&dev->struct_mutex);
3825                 crtc->fb = NULL;
3826         }
3827
3828         /* Update computed state. */
3829         list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
3830                 if (!connector->encoder || !connector->encoder->crtc)
3831                         continue;