drm: add connector/encoder name creation
[dragonfly.git] / sys / dev / drm / i915 / intel_display.c
1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Eric Anholt <eric@anholt.net>
25  */
26
27 #include <linux/dmi.h>
28 #include <linux/module.h>
29 #include <linux/i2c.h>
30 #include <linux/kernel.h>
31 #include <drm/drm_edid.h>
32 #include <drm/drmP.h>
33 #include "intel_drv.h"
34 #include <drm/i915_drm.h>
35 #include "i915_drv.h"
36 #include "i915_trace.h"
37 #include <drm/drm_dp_helper.h>
38 #include <drm/drm_crtc_helper.h>
39
40 #define DIV_ROUND_CLOSEST_ULL(ll, d)    \
41         ({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; })
42
43 static void intel_increase_pllclock(struct drm_crtc *crtc);
44 static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
45
46 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
47                                 struct intel_crtc_config *pipe_config);
48 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
49                                    struct intel_crtc_config *pipe_config);
50
51 static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
52                           int x, int y, struct drm_framebuffer *old_fb);
53 static int intel_framebuffer_init(struct drm_device *dev,
54                                   struct intel_framebuffer *ifb,
55                                   struct drm_mode_fb_cmd2 *mode_cmd,
56                                   struct drm_i915_gem_object *obj);
57 static void intel_dp_set_m_n(struct intel_crtc *crtc);
58 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
59 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
60 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
61                                          struct intel_link_m_n *m_n);
62 static void ironlake_set_pipeconf(struct drm_crtc *crtc);
63 static void haswell_set_pipeconf(struct drm_crtc *crtc);
64 static void intel_set_pipe_csc(struct drm_crtc *crtc);
65 static void vlv_prepare_pll(struct intel_crtc *crtc);
66
67 typedef struct {
68         int     min, max;
69 } intel_range_t;
70
71 typedef struct {
72         int     dot_limit;
73         int     p2_slow, p2_fast;
74 } intel_p2_t;
75
76 typedef struct intel_limit intel_limit_t;
77 struct intel_limit {
78         intel_range_t   dot, vco, n, m, m1, m2, p, p1;
79         intel_p2_t          p2;
80 };
81
82 int
83 intel_pch_rawclk(struct drm_device *dev)
84 {
85         struct drm_i915_private *dev_priv = dev->dev_private;
86
87         WARN_ON(!HAS_PCH_SPLIT(dev));
88
89         return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
90 }
91
92 static inline u32 /* units of 100MHz */
93 intel_fdi_link_freq(struct drm_device *dev)
94 {
95         if (IS_GEN5(dev)) {
96                 struct drm_i915_private *dev_priv = dev->dev_private;
97                 return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
98         } else
99                 return 27;
100 }
101
102 static const intel_limit_t intel_limits_i8xx_dac = {
103         .dot = { .min = 25000, .max = 350000 },
104         .vco = { .min = 908000, .max = 1512000 },
105         .n = { .min = 2, .max = 16 },
106         .m = { .min = 96, .max = 140 },
107         .m1 = { .min = 18, .max = 26 },
108         .m2 = { .min = 6, .max = 16 },
109         .p = { .min = 4, .max = 128 },
110         .p1 = { .min = 2, .max = 33 },
111         .p2 = { .dot_limit = 165000,
112                 .p2_slow = 4, .p2_fast = 2 },
113 };
114
115 static const intel_limit_t intel_limits_i8xx_dvo = {
116         .dot = { .min = 25000, .max = 350000 },
117         .vco = { .min = 908000, .max = 1512000 },
118         .n = { .min = 2, .max = 16 },
119         .m = { .min = 96, .max = 140 },
120         .m1 = { .min = 18, .max = 26 },
121         .m2 = { .min = 6, .max = 16 },
122         .p = { .min = 4, .max = 128 },
123         .p1 = { .min = 2, .max = 33 },
124         .p2 = { .dot_limit = 165000,
125                 .p2_slow = 4, .p2_fast = 4 },
126 };
127
128 static const intel_limit_t intel_limits_i8xx_lvds = {
129         .dot = { .min = 25000, .max = 350000 },
130         .vco = { .min = 908000, .max = 1512000 },
131         .n = { .min = 2, .max = 16 },
132         .m = { .min = 96, .max = 140 },
133         .m1 = { .min = 18, .max = 26 },
134         .m2 = { .min = 6, .max = 16 },
135         .p = { .min = 4, .max = 128 },
136         .p1 = { .min = 1, .max = 6 },
137         .p2 = { .dot_limit = 165000,
138                 .p2_slow = 14, .p2_fast = 7 },
139 };
140
141 static const intel_limit_t intel_limits_i9xx_sdvo = {
142         .dot = { .min = 20000, .max = 400000 },
143         .vco = { .min = 1400000, .max = 2800000 },
144         .n = { .min = 1, .max = 6 },
145         .m = { .min = 70, .max = 120 },
146         .m1 = { .min = 8, .max = 18 },
147         .m2 = { .min = 3, .max = 7 },
148         .p = { .min = 5, .max = 80 },
149         .p1 = { .min = 1, .max = 8 },
150         .p2 = { .dot_limit = 200000,
151                 .p2_slow = 10, .p2_fast = 5 },
152 };
153
154 static const intel_limit_t intel_limits_i9xx_lvds = {
155         .dot = { .min = 20000, .max = 400000 },
156         .vco = { .min = 1400000, .max = 2800000 },
157         .n = { .min = 1, .max = 6 },
158         .m = { .min = 70, .max = 120 },
159         .m1 = { .min = 8, .max = 18 },
160         .m2 = { .min = 3, .max = 7 },
161         .p = { .min = 7, .max = 98 },
162         .p1 = { .min = 1, .max = 8 },
163         .p2 = { .dot_limit = 112000,
164                 .p2_slow = 14, .p2_fast = 7 },
165 };
166
167
168 static const intel_limit_t intel_limits_g4x_sdvo = {
169         .dot = { .min = 25000, .max = 270000 },
170         .vco = { .min = 1750000, .max = 3500000},
171         .n = { .min = 1, .max = 4 },
172         .m = { .min = 104, .max = 138 },
173         .m1 = { .min = 17, .max = 23 },
174         .m2 = { .min = 5, .max = 11 },
175         .p = { .min = 10, .max = 30 },
176         .p1 = { .min = 1, .max = 3},
177         .p2 = { .dot_limit = 270000,
178                 .p2_slow = 10,
179                 .p2_fast = 10
180         },
181 };
182
183 static const intel_limit_t intel_limits_g4x_hdmi = {
184         .dot = { .min = 22000, .max = 400000 },
185         .vco = { .min = 1750000, .max = 3500000},
186         .n = { .min = 1, .max = 4 },
187         .m = { .min = 104, .max = 138 },
188         .m1 = { .min = 16, .max = 23 },
189         .m2 = { .min = 5, .max = 11 },
190         .p = { .min = 5, .max = 80 },
191         .p1 = { .min = 1, .max = 8},
192         .p2 = { .dot_limit = 165000,
193                 .p2_slow = 10, .p2_fast = 5 },
194 };
195
196 static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
197         .dot = { .min = 20000, .max = 115000 },
198         .vco = { .min = 1750000, .max = 3500000 },
199         .n = { .min = 1, .max = 3 },
200         .m = { .min = 104, .max = 138 },
201         .m1 = { .min = 17, .max = 23 },
202         .m2 = { .min = 5, .max = 11 },
203         .p = { .min = 28, .max = 112 },
204         .p1 = { .min = 2, .max = 8 },
205         .p2 = { .dot_limit = 0,
206                 .p2_slow = 14, .p2_fast = 14
207         },
208 };
209
210 static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
211         .dot = { .min = 80000, .max = 224000 },
212         .vco = { .min = 1750000, .max = 3500000 },
213         .n = { .min = 1, .max = 3 },
214         .m = { .min = 104, .max = 138 },
215         .m1 = { .min = 17, .max = 23 },
216         .m2 = { .min = 5, .max = 11 },
217         .p = { .min = 14, .max = 42 },
218         .p1 = { .min = 2, .max = 6 },
219         .p2 = { .dot_limit = 0,
220                 .p2_slow = 7, .p2_fast = 7
221         },
222 };
223
224 static const intel_limit_t intel_limits_pineview_sdvo = {
225         .dot = { .min = 20000, .max = 400000},
226         .vco = { .min = 1700000, .max = 3500000 },
227         /* Pineview's Ncounter is a ring counter */
228         .n = { .min = 3, .max = 6 },
229         .m = { .min = 2, .max = 256 },
230         /* Pineview only has one combined m divider, which we treat as m2. */
231         .m1 = { .min = 0, .max = 0 },
232         .m2 = { .min = 0, .max = 254 },
233         .p = { .min = 5, .max = 80 },
234         .p1 = { .min = 1, .max = 8 },
235         .p2 = { .dot_limit = 200000,
236                 .p2_slow = 10, .p2_fast = 5 },
237 };
238
239 static const intel_limit_t intel_limits_pineview_lvds = {
240         .dot = { .min = 20000, .max = 400000 },
241         .vco = { .min = 1700000, .max = 3500000 },
242         .n = { .min = 3, .max = 6 },
243         .m = { .min = 2, .max = 256 },
244         .m1 = { .min = 0, .max = 0 },
245         .m2 = { .min = 0, .max = 254 },
246         .p = { .min = 7, .max = 112 },
247         .p1 = { .min = 1, .max = 8 },
248         .p2 = { .dot_limit = 112000,
249                 .p2_slow = 14, .p2_fast = 14 },
250 };
251
252 /* Ironlake / Sandybridge
253  *
254  * We calculate clock using (register_value + 2) for N/M1/M2, so here
255  * the range value for them is (actual_value - 2).
256  */
257 static const intel_limit_t intel_limits_ironlake_dac = {
258         .dot = { .min = 25000, .max = 350000 },
259         .vco = { .min = 1760000, .max = 3510000 },
260         .n = { .min = 1, .max = 5 },
261         .m = { .min = 79, .max = 127 },
262         .m1 = { .min = 12, .max = 22 },
263         .m2 = { .min = 5, .max = 9 },
264         .p = { .min = 5, .max = 80 },
265         .p1 = { .min = 1, .max = 8 },
266         .p2 = { .dot_limit = 225000,
267                 .p2_slow = 10, .p2_fast = 5 },
268 };
269
270 static const intel_limit_t intel_limits_ironlake_single_lvds = {
271         .dot = { .min = 25000, .max = 350000 },
272         .vco = { .min = 1760000, .max = 3510000 },
273         .n = { .min = 1, .max = 3 },
274         .m = { .min = 79, .max = 118 },
275         .m1 = { .min = 12, .max = 22 },
276         .m2 = { .min = 5, .max = 9 },
277         .p = { .min = 28, .max = 112 },
278         .p1 = { .min = 2, .max = 8 },
279         .p2 = { .dot_limit = 225000,
280                 .p2_slow = 14, .p2_fast = 14 },
281 };
282
283 static const intel_limit_t intel_limits_ironlake_dual_lvds = {
284         .dot = { .min = 25000, .max = 350000 },
285         .vco = { .min = 1760000, .max = 3510000 },
286         .n = { .min = 1, .max = 3 },
287         .m = { .min = 79, .max = 127 },
288         .m1 = { .min = 12, .max = 22 },
289         .m2 = { .min = 5, .max = 9 },
290         .p = { .min = 14, .max = 56 },
291         .p1 = { .min = 2, .max = 8 },
292         .p2 = { .dot_limit = 225000,
293                 .p2_slow = 7, .p2_fast = 7 },
294 };
295
296 /* LVDS 100mhz refclk limits. */
297 static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
298         .dot = { .min = 25000, .max = 350000 },
299         .vco = { .min = 1760000, .max = 3510000 },
300         .n = { .min = 1, .max = 2 },
301         .m = { .min = 79, .max = 126 },
302         .m1 = { .min = 12, .max = 22 },
303         .m2 = { .min = 5, .max = 9 },
304         .p = { .min = 28, .max = 112 },
305         .p1 = { .min = 2, .max = 8 },
306         .p2 = { .dot_limit = 225000,
307                 .p2_slow = 14, .p2_fast = 14 },
308 };
309
310 static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
311         .dot = { .min = 25000, .max = 350000 },
312         .vco = { .min = 1760000, .max = 3510000 },
313         .n = { .min = 1, .max = 3 },
314         .m = { .min = 79, .max = 126 },
315         .m1 = { .min = 12, .max = 22 },
316         .m2 = { .min = 5, .max = 9 },
317         .p = { .min = 14, .max = 42 },
318         .p1 = { .min = 2, .max = 6 },
319         .p2 = { .dot_limit = 225000,
320                 .p2_slow = 7, .p2_fast = 7 },
321 };
322
323 static const intel_limit_t intel_limits_vlv = {
324          /*
325           * These are the data rate limits (measured in fast clocks)
326           * since those are the strictest limits we have. The fast
327           * clock and actual rate limits are more relaxed, so checking
328           * them would make no difference.
329           */
330         .dot = { .min = 25000 * 5, .max = 270000 * 5 },
331         .vco = { .min = 4000000, .max = 6000000 },
332         .n = { .min = 1, .max = 7 },
333         .m1 = { .min = 2, .max = 3 },
334         .m2 = { .min = 11, .max = 156 },
335         .p1 = { .min = 2, .max = 3 },
336         .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
337 };
338
339 static const intel_limit_t intel_limits_chv = {
340         /*
341          * These are the data rate limits (measured in fast clocks)
342          * since those are the strictest limits we have.  The fast
343          * clock and actual rate limits are more relaxed, so checking
344          * them would make no difference.
345          */
346         .dot = { .min = 25000 * 5, .max = 540000 * 5},
347         .vco = { .min = 4860000, .max = 6700000 },
348         .n = { .min = 1, .max = 1 },
349         .m1 = { .min = 2, .max = 2 },
350         .m2 = { .min = 24 << 22, .max = 175 << 22 },
351         .p1 = { .min = 2, .max = 4 },
352         .p2 = { .p2_slow = 1, .p2_fast = 14 },
353 };
354
355 static void vlv_clock(int refclk, intel_clock_t *clock)
356 {
357         clock->m = clock->m1 * clock->m2;
358         clock->p = clock->p1 * clock->p2;
359         if (WARN_ON(clock->n == 0 || clock->p == 0))
360                 return;
361         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
362         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
363 }
364
365 /**
366  * Returns whether any output on the specified pipe is of the specified type
367  */
368 static bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
369 {
370         struct drm_device *dev = crtc->dev;
371         struct intel_encoder *encoder;
372
373         for_each_encoder_on_crtc(dev, crtc, encoder)
374                 if (encoder->type == type)
375                         return true;
376
377         return false;
378 }
379
380 static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
381                                                 int refclk)
382 {
383         struct drm_device *dev = crtc->dev;
384         const intel_limit_t *limit;
385
386         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
387                 if (intel_is_dual_link_lvds(dev)) {
388                         if (refclk == 100000)
389                                 limit = &intel_limits_ironlake_dual_lvds_100m;
390                         else
391                                 limit = &intel_limits_ironlake_dual_lvds;
392                 } else {
393                         if (refclk == 100000)
394                                 limit = &intel_limits_ironlake_single_lvds_100m;
395                         else
396                                 limit = &intel_limits_ironlake_single_lvds;
397                 }
398         } else
399                 limit = &intel_limits_ironlake_dac;
400
401         return limit;
402 }
403
404 static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
405 {
406         struct drm_device *dev = crtc->dev;
407         const intel_limit_t *limit;
408
409         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
410                 if (intel_is_dual_link_lvds(dev))
411                         limit = &intel_limits_g4x_dual_channel_lvds;
412                 else
413                         limit = &intel_limits_g4x_single_channel_lvds;
414         } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
415                    intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
416                 limit = &intel_limits_g4x_hdmi;
417         } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
418                 limit = &intel_limits_g4x_sdvo;
419         } else /* The option is for other outputs */
420                 limit = &intel_limits_i9xx_sdvo;
421
422         return limit;
423 }
424
425 static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
426 {
427         struct drm_device *dev = crtc->dev;
428         const intel_limit_t *limit;
429
430         if (HAS_PCH_SPLIT(dev))
431                 limit = intel_ironlake_limit(crtc, refclk);
432         else if (IS_G4X(dev)) {
433                 limit = intel_g4x_limit(crtc);
434         } else if (IS_PINEVIEW(dev)) {
435                 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
436                         limit = &intel_limits_pineview_lvds;
437                 else
438                         limit = &intel_limits_pineview_sdvo;
439         } else if (IS_CHERRYVIEW(dev)) {
440                 limit = &intel_limits_chv;
441         } else if (IS_VALLEYVIEW(dev)) {
442                 limit = &intel_limits_vlv;
443         } else if (!IS_GEN2(dev)) {
444                 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
445                         limit = &intel_limits_i9xx_lvds;
446                 else
447                         limit = &intel_limits_i9xx_sdvo;
448         } else {
449                 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
450                         limit = &intel_limits_i8xx_lvds;
451                 else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO))
452                         limit = &intel_limits_i8xx_dvo;
453                 else
454                         limit = &intel_limits_i8xx_dac;
455         }
456         return limit;
457 }
458
459 /* m1 is reserved as 0 in Pineview, n is a ring counter */
460 static void pineview_clock(int refclk, intel_clock_t *clock)
461 {
462         clock->m = clock->m2 + 2;
463         clock->p = clock->p1 * clock->p2;
464         if (WARN_ON(clock->n == 0 || clock->p == 0))
465                 return;
466         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
467         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
468 }
469
470 static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
471 {
472         return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
473 }
474
475 static void i9xx_clock(int refclk, intel_clock_t *clock)
476 {
477         clock->m = i9xx_dpll_compute_m(clock);
478         clock->p = clock->p1 * clock->p2;
479         if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
480                 return;
481         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
482         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
483 }
484
485 static void chv_clock(int refclk, intel_clock_t *clock)
486 {
487         clock->m = clock->m1 * clock->m2;
488         clock->p = clock->p1 * clock->p2;
489         if (WARN_ON(clock->n == 0 || clock->p == 0))
490                 return;
491         clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
492                         clock->n << 22);
493         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
494 }
495
496 #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
497 /**
498  * Returns whether the given set of divisors are valid for a given refclk with
499  * the given connectors.
500  */
501
502 static bool intel_PLL_is_valid(struct drm_device *dev,
503                                const intel_limit_t *limit,
504                                const intel_clock_t *clock)
505 {
506         if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
507                 INTELPllInvalid("n out of range\n");
508         if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
509                 INTELPllInvalid("p1 out of range\n");
510         if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
511                 INTELPllInvalid("m2 out of range\n");
512         if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
513                 INTELPllInvalid("m1 out of range\n");
514
515         if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev))
516                 if (clock->m1 <= clock->m2)
517                         INTELPllInvalid("m1 <= m2\n");
518
519         if (!IS_VALLEYVIEW(dev)) {
520                 if (clock->p < limit->p.min || limit->p.max < clock->p)
521                         INTELPllInvalid("p out of range\n");
522                 if (clock->m < limit->m.min || limit->m.max < clock->m)
523                         INTELPllInvalid("m out of range\n");
524         }
525
526         if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
527                 INTELPllInvalid("vco out of range\n");
528         /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
529          * connector, etc., rather than just a single range.
530          */
531         if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
532                 INTELPllInvalid("dot out of range\n");
533
534         return true;
535 }
536
537 static bool
538 i9xx_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
539                     int target, int refclk, intel_clock_t *match_clock,
540                     intel_clock_t *best_clock)
541 {
542         struct drm_device *dev = crtc->dev;
543         intel_clock_t clock;
544         int err = target;
545
546         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
547                 /*
548                  * For LVDS just rely on its current settings for dual-channel.
549                  * We haven't figured out how to reliably set up different
550                  * single/dual channel state, if we even can.
551                  */
552                 if (intel_is_dual_link_lvds(dev))
553                         clock.p2 = limit->p2.p2_fast;
554                 else
555                         clock.p2 = limit->p2.p2_slow;
556         } else {
557                 if (target < limit->p2.dot_limit)
558                         clock.p2 = limit->p2.p2_slow;
559                 else
560                         clock.p2 = limit->p2.p2_fast;
561         }
562
563         memset(best_clock, 0, sizeof(*best_clock));
564
565         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
566              clock.m1++) {
567                 for (clock.m2 = limit->m2.min;
568                      clock.m2 <= limit->m2.max; clock.m2++) {
569                         if (clock.m2 >= clock.m1)
570                                 break;
571                         for (clock.n = limit->n.min;
572                              clock.n <= limit->n.max; clock.n++) {
573                                 for (clock.p1 = limit->p1.min;
574                                         clock.p1 <= limit->p1.max; clock.p1++) {
575                                         int this_err;
576
577                                         i9xx_clock(refclk, &clock);
578                                         if (!intel_PLL_is_valid(dev, limit,
579                                                                 &clock))
580                                                 continue;
581                                         if (match_clock &&
582                                             clock.p != match_clock->p)
583                                                 continue;
584
585                                         this_err = abs(clock.dot - target);
586                                         if (this_err < err) {
587                                                 *best_clock = clock;
588                                                 err = this_err;
589                                         }
590                                 }
591                         }
592                 }
593         }
594
595         return (err != target);
596 }
597
598 static bool
599 pnv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
600                    int target, int refclk, intel_clock_t *match_clock,
601                    intel_clock_t *best_clock)
602 {
603         struct drm_device *dev = crtc->dev;
604         intel_clock_t clock;
605         int err = target;
606
607         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
608                 /*
609                  * For LVDS just rely on its current settings for dual-channel.
610                  * We haven't figured out how to reliably set up different
611                  * single/dual channel state, if we even can.
612                  */
613                 if (intel_is_dual_link_lvds(dev))
614                         clock.p2 = limit->p2.p2_fast;
615                 else
616                         clock.p2 = limit->p2.p2_slow;
617         } else {
618                 if (target < limit->p2.dot_limit)
619                         clock.p2 = limit->p2.p2_slow;
620                 else
621                         clock.p2 = limit->p2.p2_fast;
622         }
623
624         memset(best_clock, 0, sizeof(*best_clock));
625
626         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
627              clock.m1++) {
628                 for (clock.m2 = limit->m2.min;
629                      clock.m2 <= limit->m2.max; clock.m2++) {
630                         for (clock.n = limit->n.min;
631                              clock.n <= limit->n.max; clock.n++) {
632                                 for (clock.p1 = limit->p1.min;
633                                         clock.p1 <= limit->p1.max; clock.p1++) {
634                                         int this_err;
635
636                                         pineview_clock(refclk, &clock);
637                                         if (!intel_PLL_is_valid(dev, limit,
638                                                                 &clock))
639                                                 continue;
640                                         if (match_clock &&
641                                             clock.p != match_clock->p)
642                                                 continue;
643
644                                         this_err = abs(clock.dot - target);
645                                         if (this_err < err) {
646                                                 *best_clock = clock;
647                                                 err = this_err;
648                                         }
649                                 }
650                         }
651                 }
652         }
653
654         return (err != target);
655 }
656
657 static bool
658 g4x_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
659                    int target, int refclk, intel_clock_t *match_clock,
660                    intel_clock_t *best_clock)
661 {
662         struct drm_device *dev = crtc->dev;
663         intel_clock_t clock;
664         int max_n;
665         bool found;
666         /* approximately equals target * 0.00585 */
667         int err_most = (target >> 8) + (target >> 9);
668         found = false;
669
670         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
671                 if (intel_is_dual_link_lvds(dev))
672                         clock.p2 = limit->p2.p2_fast;
673                 else
674                         clock.p2 = limit->p2.p2_slow;
675         } else {
676                 if (target < limit->p2.dot_limit)
677                         clock.p2 = limit->p2.p2_slow;
678                 else
679                         clock.p2 = limit->p2.p2_fast;
680         }
681
682         memset(best_clock, 0, sizeof(*best_clock));
683         max_n = limit->n.max;
684         /* based on hardware requirement, prefer smaller n to precision */
685         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
686                 /* based on hardware requirement, prefere larger m1,m2 */
687                 for (clock.m1 = limit->m1.max;
688                      clock.m1 >= limit->m1.min; clock.m1--) {
689                         for (clock.m2 = limit->m2.max;
690                              clock.m2 >= limit->m2.min; clock.m2--) {
691                                 for (clock.p1 = limit->p1.max;
692                                      clock.p1 >= limit->p1.min; clock.p1--) {
693                                         int this_err;
694
695                                         i9xx_clock(refclk, &clock);
696                                         if (!intel_PLL_is_valid(dev, limit,
697                                                                 &clock))
698                                                 continue;
699
700                                         this_err = abs(clock.dot - target);
701                                         if (this_err < err_most) {
702                                                 *best_clock = clock;
703                                                 err_most = this_err;
704                                                 max_n = clock.n;
705                                                 found = true;
706                                         }
707                                 }
708                         }
709                 }
710         }
711         return found;
712 }
713
714 static bool
715 vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
716                    int target, int refclk, intel_clock_t *match_clock,
717                    intel_clock_t *best_clock)
718 {
719         struct drm_device *dev = crtc->dev;
720         intel_clock_t clock;
721         unsigned int bestppm = 1000000;
722         /* min update 19.2 MHz */
723         int max_n = min(limit->n.max, refclk / 19200);
724         bool found = false;
725
726         target *= 5; /* fast clock */
727
728         memset(best_clock, 0, sizeof(*best_clock));
729
730         /* based on hardware requirement, prefer smaller n to precision */
731         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
732                 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
733                         for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
734                              clock.p2 -= clock.p2 > 10 ? 2 : 1) {
735                                 clock.p = clock.p1 * clock.p2;
736                                 /* based on hardware requirement, prefer bigger m1,m2 values */
737                                 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
738                                         unsigned int ppm, diff;
739
740                                         clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
741                                                                      refclk * clock.m1);
742
743                                         vlv_clock(refclk, &clock);
744
745                                         if (!intel_PLL_is_valid(dev, limit,
746                                                                 &clock))
747                                                 continue;
748
749                                         diff = abs(clock.dot - target);
750                                         ppm = div_u64(1000000ULL * diff, target);
751
752                                         if (ppm < 100 && clock.p > best_clock->p) {
753                                                 bestppm = 0;
754                                                 *best_clock = clock;
755                                                 found = true;
756                                         }
757
758                                         if (bestppm >= 10 && ppm < bestppm - 10) {
759                                                 bestppm = ppm;
760                                                 *best_clock = clock;
761                                                 found = true;
762                                         }
763                                 }
764                         }
765                 }
766         }
767
768         return found;
769 }
770
771 static bool
772 chv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
773                    int target, int refclk, intel_clock_t *match_clock,
774                    intel_clock_t *best_clock)
775 {
776         struct drm_device *dev = crtc->dev;
777         intel_clock_t clock;
778         uint64_t m2;
779         int found = false;
780
781         memset(best_clock, 0, sizeof(*best_clock));
782
783         /*
784          * Based on hardware doc, the n always set to 1, and m1 always
785          * set to 2.  If requires to support 200Mhz refclk, we need to
786          * revisit this because n may not 1 anymore.
787          */
788         clock.n = 1, clock.m1 = 2;
789         target *= 5;    /* fast clock */
790
791         for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
792                 for (clock.p2 = limit->p2.p2_fast;
793                                 clock.p2 >= limit->p2.p2_slow;
794                                 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
795
796                         clock.p = clock.p1 * clock.p2;
797
798                         m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p *
799                                         clock.n) << 22, refclk * clock.m1);
800
801                         if (m2 > INT_MAX/clock.m1)
802                                 continue;
803
804                         clock.m2 = m2;
805
806                         chv_clock(refclk, &clock);
807
808                         if (!intel_PLL_is_valid(dev, limit, &clock))
809                                 continue;
810
811                         /* based on hardware requirement, prefer bigger p
812                          */
813                         if (clock.p > best_clock->p) {
814                                 *best_clock = clock;
815                                 found = true;
816                         }
817                 }
818         }
819
820         return found;
821 }
822
823 bool intel_crtc_active(struct drm_crtc *crtc)
824 {
825         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
826
827         /* Be paranoid as we can arrive here with only partial
828          * state retrieved from the hardware during setup.
829          *
830          * We can ditch the adjusted_mode.crtc_clock check as soon
831          * as Haswell has gained clock readout/fastboot support.
832          *
833          * We can ditch the crtc->primary->fb check as soon as we can
834          * properly reconstruct framebuffers.
835          */
836         return intel_crtc->active && crtc->primary->fb &&
837                 intel_crtc->config.adjusted_mode.crtc_clock;
838 }
839
840 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
841                                              enum i915_pipe pipe)
842 {
843         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
844         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
845
846         return intel_crtc->config.cpu_transcoder;
847 }
848
849 static void g4x_wait_for_vblank(struct drm_device *dev, int pipe)
850 {
851         struct drm_i915_private *dev_priv = dev->dev_private;
852         u32 frame, frame_reg = PIPE_FRMCOUNT_GM45(pipe);
853
854         frame = I915_READ(frame_reg);
855
856         if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50))
857                 WARN(1, "vblank wait timed out\n");
858 }
859
860 /**
861  * intel_wait_for_vblank - wait for vblank on a given pipe
862  * @dev: drm device
863  * @pipe: pipe to wait for
864  *
865  * Wait for vblank to occur on a given pipe.  Needed for various bits of
866  * mode setting code.
867  */
868 void intel_wait_for_vblank(struct drm_device *dev, int pipe)
869 {
870         struct drm_i915_private *dev_priv = dev->dev_private;
871         int pipestat_reg = PIPESTAT(pipe);
872
873         if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
874                 g4x_wait_for_vblank(dev, pipe);
875                 return;
876         }
877
878         /* Clear existing vblank status. Note this will clear any other
879          * sticky status fields as well.
880          *
881          * This races with i915_driver_irq_handler() with the result
882          * that either function could miss a vblank event.  Here it is not
883          * fatal, as we will either wait upon the next vblank interrupt or
884          * timeout.  Generally speaking intel_wait_for_vblank() is only
885          * called during modeset at which time the GPU should be idle and
886          * should *not* be performing page flips and thus not waiting on
887          * vblanks...
888          * Currently, the result of us stealing a vblank from the irq
889          * handler is that a single frame will be skipped during swapbuffers.
890          */
891         I915_WRITE(pipestat_reg,
892                    I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
893
894         /* Wait for vblank interrupt bit to set */
895         if (wait_for(I915_READ(pipestat_reg) &
896                      PIPE_VBLANK_INTERRUPT_STATUS,
897                      50))
898                 DRM_DEBUG_KMS("vblank wait timed out\n");
899 }
900
901 static bool pipe_dsl_stopped(struct drm_device *dev, enum i915_pipe pipe)
902 {
903         struct drm_i915_private *dev_priv = dev->dev_private;
904         u32 reg = PIPEDSL(pipe);
905         u32 line1, line2;
906         u32 line_mask;
907
908         if (IS_GEN2(dev))
909                 line_mask = DSL_LINEMASK_GEN2;
910         else
911                 line_mask = DSL_LINEMASK_GEN3;
912
913         line1 = I915_READ(reg) & line_mask;
914         mdelay(5);
915         line2 = I915_READ(reg) & line_mask;
916
917         return line1 == line2;
918 }
919
920 /*
921  * intel_wait_for_pipe_off - wait for pipe to turn off
922  * @dev: drm device
923  * @pipe: pipe to wait for
924  *
925  * After disabling a pipe, we can't wait for vblank in the usual way,
926  * spinning on the vblank interrupt status bit, since we won't actually
927  * see an interrupt when the pipe is disabled.
928  *
929  * On Gen4 and above:
930  *   wait for the pipe register state bit to turn off
931  *
932  * Otherwise:
933  *   wait for the display line value to settle (it usually
934  *   ends up stopping at the start of the next frame).
935  *
936  */
937 void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
938 {
939         struct drm_i915_private *dev_priv = dev->dev_private;
940         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
941                                                                       pipe);
942
943         if (INTEL_INFO(dev)->gen >= 4) {
944                 int reg = PIPECONF(cpu_transcoder);
945
946                 /* Wait for the Pipe State to go off */
947                 if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
948                              100))
949                         WARN(1, "pipe_off wait timed out\n");
950         } else {
951                 /* Wait for the display line to settle */
952                 if (wait_for(pipe_dsl_stopped(dev, pipe), 100))
953                         WARN(1, "pipe_off wait timed out\n");
954         }
955 }
956
957 /*
958  * ibx_digital_port_connected - is the specified port connected?
959  * @dev_priv: i915 private structure
960  * @port: the port to test
961  *
962  * Returns true if @port is connected, false otherwise.
963  */
964 bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
965                                 struct intel_digital_port *port)
966 {
967         u32 bit;
968
969         if (HAS_PCH_IBX(dev_priv->dev)) {
970                 switch (port->port) {
971                 case PORT_B:
972                         bit = SDE_PORTB_HOTPLUG;
973                         break;
974                 case PORT_C:
975                         bit = SDE_PORTC_HOTPLUG;
976                         break;
977                 case PORT_D:
978                         bit = SDE_PORTD_HOTPLUG;
979                         break;
980                 default:
981                         return true;
982                 }
983         } else {
984                 switch (port->port) {
985                 case PORT_B:
986                         bit = SDE_PORTB_HOTPLUG_CPT;
987                         break;
988                 case PORT_C:
989                         bit = SDE_PORTC_HOTPLUG_CPT;
990                         break;
991                 case PORT_D:
992                         bit = SDE_PORTD_HOTPLUG_CPT;
993                         break;
994                 default:
995                         return true;
996                 }
997         }
998
999         return I915_READ(SDEISR) & bit;
1000 }
1001
1002 static const char *state_string(bool enabled)
1003 {
1004         return enabled ? "on" : "off";
1005 }
1006
1007 /* Only for pre-ILK configs */
1008 void assert_pll(struct drm_i915_private *dev_priv,
1009                 enum i915_pipe pipe, bool state)
1010 {
1011         int reg;
1012         u32 val;
1013         bool cur_state;
1014
1015         reg = DPLL(pipe);
1016         val = I915_READ(reg);
1017         cur_state = !!(val & DPLL_VCO_ENABLE);
1018         WARN(cur_state != state,
1019              "PLL state assertion failure (expected %s, current %s)\n",
1020              state_string(state), state_string(cur_state));
1021 }
1022
1023 /* XXX: the dsi pll is shared between MIPI DSI ports */
1024 static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1025 {
1026         u32 val;
1027         bool cur_state;
1028
1029         mutex_lock(&dev_priv->dpio_lock);
1030         val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1031         mutex_unlock(&dev_priv->dpio_lock);
1032
1033         cur_state = val & DSI_PLL_VCO_EN;
1034         WARN(cur_state != state,
1035              "DSI PLL state assertion failure (expected %s, current %s)\n",
1036              state_string(state), state_string(cur_state));
1037 }
1038 #define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
1039 #define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
1040
1041 struct intel_shared_dpll *
1042 intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
1043 {
1044         struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
1045
1046         if (crtc->config.shared_dpll < 0)
1047                 return NULL;
1048
1049         return &dev_priv->shared_dplls[crtc->config.shared_dpll];
1050 }
1051
1052 /* For ILK+ */
1053 void assert_shared_dpll(struct drm_i915_private *dev_priv,
1054                         struct intel_shared_dpll *pll,
1055                         bool state)
1056 {
1057         bool cur_state;
1058         struct intel_dpll_hw_state hw_state;
1059
1060         if (HAS_PCH_LPT(dev_priv->dev)) {
1061                 DRM_DEBUG_DRIVER("LPT detected: skipping PCH PLL test\n");
1062                 return;
1063         }
1064
1065         if (WARN (!pll,
1066                   "asserting DPLL %s with no DPLL\n", state_string(state)))
1067                 return;
1068
1069         cur_state = pll->get_hw_state(dev_priv, pll, &hw_state);
1070         WARN(cur_state != state,
1071              "%s assertion failure (expected %s, current %s)\n",
1072              pll->name, state_string(state), state_string(cur_state));
1073 }
1074
1075 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1076                           enum i915_pipe pipe, bool state)
1077 {
1078         int reg;
1079         u32 val;
1080         bool cur_state;
1081         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1082                                                                       pipe);
1083
1084         if (HAS_DDI(dev_priv->dev)) {
1085                 /* DDI does not have a specific FDI_TX register */
1086                 reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
1087                 val = I915_READ(reg);
1088                 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1089         } else {
1090                 reg = FDI_TX_CTL(pipe);
1091                 val = I915_READ(reg);
1092                 cur_state = !!(val & FDI_TX_ENABLE);
1093         }
1094         WARN(cur_state != state,
1095              "FDI TX state assertion failure (expected %s, current %s)\n",
1096              state_string(state), state_string(cur_state));
1097 }
1098 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1099 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1100
1101 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1102                           enum i915_pipe pipe, bool state)
1103 {
1104         int reg;
1105         u32 val;
1106         bool cur_state;
1107
1108         reg = FDI_RX_CTL(pipe);
1109         val = I915_READ(reg);
1110         cur_state = !!(val & FDI_RX_ENABLE);
1111         WARN(cur_state != state,
1112              "FDI RX state assertion failure (expected %s, current %s)\n",
1113              state_string(state), state_string(cur_state));
1114 }
1115 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1116 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1117
1118 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1119                                       enum i915_pipe pipe)
1120 {
1121         int reg;
1122         u32 val;
1123
1124         /* ILK FDI PLL is always enabled */
1125         if (INTEL_INFO(dev_priv->dev)->gen == 5)
1126                 return;
1127
1128         /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1129         if (HAS_DDI(dev_priv->dev))
1130                 return;
1131
1132         reg = FDI_TX_CTL(pipe);
1133         val = I915_READ(reg);
1134         WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1135 }
1136
1137 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1138                        enum i915_pipe pipe, bool state)
1139 {
1140         int reg;
1141         u32 val;
1142         bool cur_state;
1143
1144         reg = FDI_RX_CTL(pipe);
1145         val = I915_READ(reg);
1146         cur_state = !!(val & FDI_RX_PLL_ENABLE);
1147         WARN(cur_state != state,
1148              "FDI RX PLL assertion failure (expected %s, current %s)\n",
1149              state_string(state), state_string(cur_state));
1150 }
1151
1152 static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1153                                   enum i915_pipe pipe)
1154 {
1155         int pp_reg, lvds_reg;
1156         u32 val;
1157         enum i915_pipe panel_pipe = PIPE_A;
1158         bool locked = true;
1159
1160         if (HAS_PCH_SPLIT(dev_priv->dev)) {
1161                 pp_reg = PCH_PP_CONTROL;
1162                 lvds_reg = PCH_LVDS;
1163         } else {
1164                 pp_reg = PP_CONTROL;
1165                 lvds_reg = LVDS;
1166         }
1167
1168         val = I915_READ(pp_reg);
1169         if (!(val & PANEL_POWER_ON) ||
1170             ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS))
1171                 locked = false;
1172
1173         if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT)
1174                 panel_pipe = PIPE_B;
1175
1176         WARN(panel_pipe == pipe && locked,
1177              "panel assertion failure, pipe %c regs locked\n",
1178              pipe_name(pipe));
1179 }
1180
1181 static void assert_cursor(struct drm_i915_private *dev_priv,
1182                           enum i915_pipe pipe, bool state)
1183 {
1184         struct drm_device *dev = dev_priv->dev;
1185         bool cur_state;
1186
1187         if (IS_845G(dev) || IS_I865G(dev))
1188                 cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
1189         else
1190                 cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
1191
1192         WARN(cur_state != state,
1193              "cursor on pipe %c assertion failure (expected %s, current %s)\n",
1194              pipe_name(pipe), state_string(state), state_string(cur_state));
1195 }
1196 #define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
1197 #define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
1198
1199 void assert_pipe(struct drm_i915_private *dev_priv,
1200                  enum i915_pipe pipe, bool state)
1201 {
1202         int reg;
1203         u32 val;
1204         bool cur_state;
1205         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1206                                                                       pipe);
1207
1208         /* if we need the pipe A quirk it must be always on */
1209         if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
1210                 state = true;
1211
1212         if (!intel_display_power_enabled(dev_priv,
1213                                 POWER_DOMAIN_TRANSCODER(cpu_transcoder))) {
1214                 cur_state = false;
1215         } else {
1216                 reg = PIPECONF(cpu_transcoder);
1217                 val = I915_READ(reg);
1218                 cur_state = !!(val & PIPECONF_ENABLE);
1219         }
1220
1221         WARN(cur_state != state,
1222              "pipe %c assertion failure (expected %s, current %s)\n",
1223              pipe_name(pipe), state_string(state), state_string(cur_state));
1224 }
1225
1226 static void assert_plane(struct drm_i915_private *dev_priv,
1227                          enum plane plane, bool state)
1228 {
1229         int reg;
1230         u32 val;
1231         bool cur_state;
1232
1233         reg = DSPCNTR(plane);
1234         val = I915_READ(reg);
1235         cur_state = !!(val & DISPLAY_PLANE_ENABLE);
1236         WARN(cur_state != state,
1237              "plane %c assertion failure (expected %s, current %s)\n",
1238              plane_name(plane), state_string(state), state_string(cur_state));
1239 }
1240
1241 #define assert_plane_enabled(d, p) assert_plane(d, p, true)
1242 #define assert_plane_disabled(d, p) assert_plane(d, p, false)
1243
1244 static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1245                                    enum i915_pipe pipe)
1246 {
1247         struct drm_device *dev = dev_priv->dev;
1248         int reg, i;
1249         u32 val;
1250         int cur_pipe;
1251
1252         /* Primary planes are fixed to pipes on gen4+ */
1253         if (INTEL_INFO(dev)->gen >= 4) {
1254                 reg = DSPCNTR(pipe);
1255                 val = I915_READ(reg);
1256                 WARN(val & DISPLAY_PLANE_ENABLE,
1257                      "plane %c assertion failure, should be disabled but not\n",
1258                      plane_name(pipe));
1259                 return;
1260         }
1261
1262         /* Need to check both planes against the pipe */
1263         for_each_pipe(i) {
1264                 reg = DSPCNTR(i);
1265                 val = I915_READ(reg);
1266                 cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
1267                         DISPPLANE_SEL_PIPE_SHIFT;
1268                 WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
1269                      "plane %c assertion failure, should be off on pipe %c but is still active\n",
1270                      plane_name(i), pipe_name(pipe));
1271         }
1272 }
1273
1274 static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
1275                                     enum i915_pipe pipe)
1276 {
1277         struct drm_device *dev = dev_priv->dev;
1278         int reg, sprite;
1279         u32 val;
1280
1281         if (IS_VALLEYVIEW(dev)) {
1282                 for_each_sprite(pipe, sprite) {
1283                         reg = SPCNTR(pipe, sprite);
1284                         val = I915_READ(reg);
1285                         WARN(val & SP_ENABLE,
1286                              "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1287                              sprite_name(pipe, sprite), pipe_name(pipe));
1288                 }
1289         } else if (INTEL_INFO(dev)->gen >= 7) {
1290                 reg = SPRCTL(pipe);
1291                 val = I915_READ(reg);
1292                 WARN(val & SPRITE_ENABLE,
1293                      "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1294                      plane_name(pipe), pipe_name(pipe));
1295         } else if (INTEL_INFO(dev)->gen >= 5) {
1296                 reg = DVSCNTR(pipe);
1297                 val = I915_READ(reg);
1298                 WARN(val & DVS_ENABLE,
1299                      "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1300                      plane_name(pipe), pipe_name(pipe));
1301         }
1302 }
1303
1304 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
1305 {
1306         u32 val;
1307         bool enabled;
1308
1309         WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev)));
1310
1311         val = I915_READ(PCH_DREF_CONTROL);
1312         enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1313                             DREF_SUPERSPREAD_SOURCE_MASK));
1314         WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
1315 }
1316
1317 static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1318                                            enum i915_pipe pipe)
1319 {
1320         int reg;
1321         u32 val;
1322         bool enabled;
1323
1324         reg = PCH_TRANSCONF(pipe);
1325         val = I915_READ(reg);
1326         enabled = !!(val & TRANS_ENABLE);
1327         WARN(enabled,
1328              "transcoder assertion failed, should be off on pipe %c but is still active\n",
1329              pipe_name(pipe));
1330 }
1331
1332 static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1333                             enum i915_pipe pipe, u32 port_sel, u32 val)
1334 {
1335         if ((val & DP_PORT_EN) == 0)
1336                 return false;
1337
1338         if (HAS_PCH_CPT(dev_priv->dev)) {
1339                 u32     trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
1340                 u32     trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
1341                 if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1342                         return false;
1343         } else if (IS_CHERRYVIEW(dev_priv->dev)) {
1344                 if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe))
1345                         return false;
1346         } else {
1347                 if ((val & DP_PIPE_MASK) != (pipe << 30))
1348                         return false;
1349         }
1350         return true;
1351 }
1352
1353 static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1354                               enum i915_pipe pipe, u32 val)
1355 {
1356         if ((val & SDVO_ENABLE) == 0)
1357                 return false;
1358
1359         if (HAS_PCH_CPT(dev_priv->dev)) {
1360                 if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
1361                         return false;
1362         } else if (IS_CHERRYVIEW(dev_priv->dev)) {
1363                 if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe))
1364                         return false;
1365         } else {
1366                 if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
1367                         return false;
1368         }
1369         return true;
1370 }
1371
1372 static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1373                               enum i915_pipe pipe, u32 val)
1374 {
1375         if ((val & LVDS_PORT_EN) == 0)
1376                 return false;
1377
1378         if (HAS_PCH_CPT(dev_priv->dev)) {
1379                 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1380                         return false;
1381         } else {
1382                 if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1383                         return false;
1384         }
1385         return true;
1386 }
1387
1388 static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1389                               enum i915_pipe pipe, u32 val)
1390 {
1391         if ((val & ADPA_DAC_ENABLE) == 0)
1392                 return false;
1393         if (HAS_PCH_CPT(dev_priv->dev)) {
1394                 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1395                         return false;
1396         } else {
1397                 if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1398                         return false;
1399         }
1400         return true;
1401 }
1402
1403 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1404                                    enum i915_pipe pipe, int reg, u32 port_sel)
1405 {
1406         u32 val = I915_READ(reg);
1407         WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
1408              "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1409              reg, pipe_name(pipe));
1410
1411         WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
1412              && (val & DP_PIPEB_SELECT),
1413              "IBX PCH dp port still using transcoder B\n");
1414 }
1415
1416 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1417                                      enum i915_pipe pipe, int reg)
1418 {
1419         u32 val = I915_READ(reg);
1420         WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
1421              "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
1422              reg, pipe_name(pipe));
1423
1424         WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0
1425              && (val & SDVO_PIPE_B_SELECT),
1426              "IBX PCH hdmi port still using transcoder B\n");
1427 }
1428
1429 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1430                                       enum i915_pipe pipe)
1431 {
1432         int reg;
1433         u32 val;
1434
1435         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1436         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1437         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1438
1439         reg = PCH_ADPA;
1440         val = I915_READ(reg);
1441         WARN(adpa_pipe_enabled(dev_priv, pipe, val),
1442              "PCH VGA enabled on transcoder %c, should be disabled\n",
1443              pipe_name(pipe));
1444
1445         reg = PCH_LVDS;
1446         val = I915_READ(reg);
1447         WARN(lvds_pipe_enabled(dev_priv, pipe, val),
1448              "PCH LVDS enabled on transcoder %c, should be disabled\n",
1449              pipe_name(pipe));
1450
1451         assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
1452         assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
1453         assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
1454 }
1455
1456 static void intel_init_dpio(struct drm_device *dev)
1457 {
1458         struct drm_i915_private *dev_priv = dev->dev_private;
1459
1460         if (!IS_VALLEYVIEW(dev))
1461                 return;
1462
1463         /*
1464          * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
1465          * CHV x1 PHY (DP/HDMI D)
1466          * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
1467          */
1468         if (IS_CHERRYVIEW(dev)) {
1469                 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
1470                 DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
1471         } else {
1472                 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
1473         }
1474 }
1475
1476 static void intel_reset_dpio(struct drm_device *dev)
1477 {
1478         struct drm_i915_private *dev_priv = dev->dev_private;
1479
1480         if (!IS_VALLEYVIEW(dev))
1481                 return;
1482
1483         if (IS_CHERRYVIEW(dev)) {
1484                 enum dpio_phy phy;
1485                 u32 val;
1486
1487                 for (phy = DPIO_PHY0; phy < I915_NUM_PHYS_VLV; phy++) {
1488                         /* Poll for phypwrgood signal */
1489                         if (wait_for(I915_READ(DISPLAY_PHY_STATUS) &
1490                                                 PHY_POWERGOOD(phy), 1))
1491                                 DRM_ERROR("Display PHY %d is not power up\n", phy);
1492
1493                         /*
1494                          * Deassert common lane reset for PHY.
1495                          *
1496                          * This should only be done on init and resume from S3
1497                          * with both PLLs disabled, or we risk losing DPIO and
1498                          * PLL synchronization.
1499                          */
1500                         val = I915_READ(DISPLAY_PHY_CONTROL);
1501                         I915_WRITE(DISPLAY_PHY_CONTROL,
1502                                 PHY_COM_LANE_RESET_DEASSERT(phy, val));
1503                 }
1504
1505         } else {
1506                 /*
1507                  * If DPIO has already been reset, e.g. by BIOS, just skip all
1508                  * this.
1509                  */
1510                 if (I915_READ(DPIO_CTL) & DPIO_CMNRST)
1511                         return;
1512
1513                 /*
1514                  * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
1515                  * Need to assert and de-assert PHY SB reset by gating the
1516                  * common lane power, then un-gating it.
1517                  * Simply ungating isn't enough to reset the PHY enough to get
1518                  * ports and lanes running.
1519                  */
1520                 __vlv_set_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC,
1521                                      false);
1522                 __vlv_set_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC,
1523                                      true);
1524         }
1525 }
1526
1527 static void vlv_enable_pll(struct intel_crtc *crtc)
1528 {
1529         struct drm_device *dev = crtc->base.dev;
1530         struct drm_i915_private *dev_priv = dev->dev_private;
1531         int reg = DPLL(crtc->pipe);
1532         u32 dpll = crtc->config.dpll_hw_state.dpll;
1533
1534         assert_pipe_disabled(dev_priv, crtc->pipe);
1535
1536         /* No really, not for ILK+ */
1537         BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
1538
1539         /* PLL is protected by panel, make sure we can write it */
1540         if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
1541                 assert_panel_unlocked(dev_priv, crtc->pipe);
1542
1543         I915_WRITE(reg, dpll);
1544         POSTING_READ(reg);
1545         udelay(150);
1546
1547         if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1548                 DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe);
1549
1550         I915_WRITE(DPLL_MD(crtc->pipe), crtc->config.dpll_hw_state.dpll_md);
1551         POSTING_READ(DPLL_MD(crtc->pipe));
1552
1553         /* We do this three times for luck */
1554         I915_WRITE(reg, dpll);
1555         POSTING_READ(reg);
1556         udelay(150); /* wait for warmup */
1557         I915_WRITE(reg, dpll);
1558         POSTING_READ(reg);
1559         udelay(150); /* wait for warmup */
1560         I915_WRITE(reg, dpll);
1561         POSTING_READ(reg);
1562         udelay(150); /* wait for warmup */
1563 }
1564
1565 static void chv_enable_pll(struct intel_crtc *crtc)
1566 {
1567         struct drm_device *dev = crtc->base.dev;
1568         struct drm_i915_private *dev_priv = dev->dev_private;
1569         int pipe = crtc->pipe;
1570         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1571         u32 tmp;
1572
1573         assert_pipe_disabled(dev_priv, crtc->pipe);
1574
1575         BUG_ON(!IS_CHERRYVIEW(dev_priv->dev));
1576
1577         mutex_lock(&dev_priv->dpio_lock);
1578
1579         /* Enable back the 10bit clock to display controller */
1580         tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1581         tmp |= DPIO_DCLKP_EN;
1582         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1583
1584         /*
1585          * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1586          */
1587         udelay(1);
1588
1589         /* Enable PLL */
1590         I915_WRITE(DPLL(pipe), crtc->config.dpll_hw_state.dpll);
1591
1592         /* Check PLL is locked */
1593         if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1594                 DRM_ERROR("PLL %d failed to lock\n", pipe);
1595
1596         /* not sure when this should be written */
1597         I915_WRITE(DPLL_MD(pipe), crtc->config.dpll_hw_state.dpll_md);
1598         POSTING_READ(DPLL_MD(pipe));
1599
1600         mutex_unlock(&dev_priv->dpio_lock);
1601 }
1602
1603 static void i9xx_enable_pll(struct intel_crtc *crtc)
1604 {
1605         struct drm_device *dev = crtc->base.dev;
1606         struct drm_i915_private *dev_priv = dev->dev_private;
1607         int reg = DPLL(crtc->pipe);
1608         u32 dpll = crtc->config.dpll_hw_state.dpll;
1609
1610         assert_pipe_disabled(dev_priv, crtc->pipe);
1611
1612         /* No really, not for ILK+ */
1613         BUG_ON(INTEL_INFO(dev)->gen >= 5);
1614
1615         /* PLL is protected by panel, make sure we can write it */
1616         if (IS_MOBILE(dev) && !IS_I830(dev))
1617                 assert_panel_unlocked(dev_priv, crtc->pipe);
1618
1619         I915_WRITE(reg, dpll);
1620
1621         /* Wait for the clocks to stabilize. */
1622         POSTING_READ(reg);
1623         udelay(150);
1624
1625         if (INTEL_INFO(dev)->gen >= 4) {
1626                 I915_WRITE(DPLL_MD(crtc->pipe),
1627                            crtc->config.dpll_hw_state.dpll_md);
1628         } else {
1629                 /* The pixel multiplier can only be updated once the
1630                  * DPLL is enabled and the clocks are stable.
1631                  *
1632                  * So write it again.
1633                  */
1634                 I915_WRITE(reg, dpll);
1635         }
1636
1637         /* We do this three times for luck */
1638         I915_WRITE(reg, dpll);
1639         POSTING_READ(reg);
1640         udelay(150); /* wait for warmup */
1641         I915_WRITE(reg, dpll);
1642         POSTING_READ(reg);
1643         udelay(150); /* wait for warmup */
1644         I915_WRITE(reg, dpll);
1645         POSTING_READ(reg);
1646         udelay(150); /* wait for warmup */
1647 }
1648
1649 /**
1650  * i9xx_disable_pll - disable a PLL
1651  * @dev_priv: i915 private structure
1652  * @pipe: pipe PLL to disable
1653  *
1654  * Disable the PLL for @pipe, making sure the pipe is off first.
1655  *
1656  * Note!  This is for pre-ILK only.
1657  */
1658 static void i9xx_disable_pll(struct drm_i915_private *dev_priv, enum i915_pipe pipe)
1659 {
1660         /* Don't disable pipe A or pipe A PLLs if needed */
1661         if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1662                 return;
1663
1664         /* Make sure the pipe isn't still relying on us */
1665         assert_pipe_disabled(dev_priv, pipe);
1666
1667         I915_WRITE(DPLL(pipe), 0);
1668         POSTING_READ(DPLL(pipe));
1669 }
1670
1671 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum i915_pipe pipe)
1672 {
1673         u32 val = 0;
1674
1675         /* Make sure the pipe isn't still relying on us */
1676         assert_pipe_disabled(dev_priv, pipe);
1677
1678         /*
1679          * Leave integrated clock source and reference clock enabled for pipe B.
1680          * The latter is needed for VGA hotplug / manual detection.
1681          */
1682         if (pipe == PIPE_B)
1683                 val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REFA_CLK_ENABLE_VLV;
1684         I915_WRITE(DPLL(pipe), val);
1685         POSTING_READ(DPLL(pipe));
1686
1687 }
1688
1689 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum i915_pipe pipe)
1690 {
1691         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1692         u32 val;
1693
1694         /* Make sure the pipe isn't still relying on us */
1695         assert_pipe_disabled(dev_priv, pipe);
1696
1697         /* Set PLL en = 0 */
1698         val = DPLL_SSC_REF_CLOCK_CHV;
1699         if (pipe != PIPE_A)
1700                 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1701         I915_WRITE(DPLL(pipe), val);
1702         POSTING_READ(DPLL(pipe));
1703
1704         mutex_lock(&dev_priv->dpio_lock);
1705
1706         /* Disable 10bit clock to display controller */
1707         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1708         val &= ~DPIO_DCLKP_EN;
1709         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1710
1711         mutex_unlock(&dev_priv->dpio_lock);
1712 }
1713
1714 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1715                 struct intel_digital_port *dport)
1716 {
1717         u32 port_mask;
1718         int dpll_reg;
1719
1720         switch (dport->port) {
1721         case PORT_B:
1722                 port_mask = DPLL_PORTB_READY_MASK;
1723                 dpll_reg = DPLL(0);
1724                 break;
1725         case PORT_C:
1726                 port_mask = DPLL_PORTC_READY_MASK;
1727                 dpll_reg = DPLL(0);
1728                 break;
1729         case PORT_D:
1730                 port_mask = DPLL_PORTD_READY_MASK;
1731                 dpll_reg = DPIO_PHY_STATUS;
1732                 break;
1733         default:
1734                 BUG();
1735         }
1736
1737         if (wait_for((I915_READ(dpll_reg) & port_mask) == 0, 1000))
1738                 WARN(1, "timed out waiting for port %c ready: 0x%08x\n",
1739                      port_name(dport->port), I915_READ(dpll_reg));
1740 }
1741
1742 static void intel_prepare_shared_dpll(struct intel_crtc *crtc)
1743 {
1744         struct drm_device *dev = crtc->base.dev;
1745         struct drm_i915_private *dev_priv = dev->dev_private;
1746         struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1747
1748         WARN_ON(!pll->refcount);
1749         if (pll->active == 0) {
1750                 DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
1751                 WARN_ON(pll->on);
1752                 assert_shared_dpll_disabled(dev_priv, pll);
1753
1754                 pll->mode_set(dev_priv, pll);
1755         }
1756 }
1757
1758 /**
1759  * intel_enable_shared_dpll - enable PCH PLL
1760  * @dev_priv: i915 private structure
1761  * @pipe: pipe PLL to enable
1762  *
1763  * The PCH PLL needs to be enabled before the PCH transcoder, since it
1764  * drives the transcoder clock.
1765  */
1766 static void intel_enable_shared_dpll(struct intel_crtc *crtc)
1767 {
1768         struct drm_device *dev = crtc->base.dev;
1769         struct drm_i915_private *dev_priv = dev->dev_private;
1770         struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1771
1772         if (WARN_ON(pll == NULL))
1773                 return;
1774
1775         if (WARN_ON(pll->refcount == 0))
1776                 return;
1777
1778         DRM_DEBUG_KMS("enable %s (active %d, on? %d)for crtc %d\n",
1779                       pll->name, pll->active, pll->on,
1780                       crtc->base.base.id);
1781
1782         if (pll->active++) {
1783                 WARN_ON(!pll->on);
1784                 assert_shared_dpll_enabled(dev_priv, pll);
1785                 return;
1786         }
1787         WARN_ON(pll->on);
1788
1789         DRM_DEBUG_KMS("enabling %s\n", pll->name);
1790         pll->enable(dev_priv, pll);
1791         pll->on = true;
1792 }
1793
1794 static void intel_disable_shared_dpll(struct intel_crtc *crtc)
1795 {
1796         struct drm_device *dev = crtc->base.dev;
1797         struct drm_i915_private *dev_priv = dev->dev_private;
1798         struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1799
1800         /* PCH only available on ILK+ */
1801         BUG_ON(INTEL_INFO(dev)->gen < 5);
1802         if (WARN_ON(pll == NULL))
1803                return;
1804
1805         if (WARN_ON(pll->refcount == 0))
1806                 return;
1807
1808         DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n",
1809                       pll->name, pll->active, pll->on,
1810                       crtc->base.base.id);
1811
1812         if (WARN_ON(pll->active == 0)) {
1813                 assert_shared_dpll_disabled(dev_priv, pll);
1814                 return;
1815         }
1816
1817         assert_shared_dpll_enabled(dev_priv, pll);
1818         WARN_ON(!pll->on);
1819         if (--pll->active)
1820                 return;
1821
1822         DRM_DEBUG_KMS("disabling %s\n", pll->name);
1823         pll->disable(dev_priv, pll);
1824         pll->on = false;
1825 }
1826
1827 static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1828                                            enum i915_pipe pipe)
1829 {
1830         struct drm_device *dev = dev_priv->dev;
1831         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1832         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1833         uint32_t reg, val, pipeconf_val;
1834
1835         /* PCH only available on ILK+ */
1836         BUG_ON(INTEL_INFO(dev)->gen < 5);
1837
1838         /* Make sure PCH DPLL is enabled */
1839         assert_shared_dpll_enabled(dev_priv,
1840                                    intel_crtc_to_shared_dpll(intel_crtc));
1841
1842         /* FDI must be feeding us bits for PCH ports */
1843         assert_fdi_tx_enabled(dev_priv, pipe);
1844         assert_fdi_rx_enabled(dev_priv, pipe);
1845
1846         if (HAS_PCH_CPT(dev)) {
1847                 /* Workaround: Set the timing override bit before enabling the
1848                  * pch transcoder. */
1849                 reg = TRANS_CHICKEN2(pipe);
1850                 val = I915_READ(reg);
1851                 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1852                 I915_WRITE(reg, val);
1853         }
1854
1855         reg = PCH_TRANSCONF(pipe);
1856         val = I915_READ(reg);
1857         pipeconf_val = I915_READ(PIPECONF(pipe));
1858
1859         if (HAS_PCH_IBX(dev_priv->dev)) {
1860                 /*
1861                  * make the BPC in transcoder be consistent with
1862                  * that in pipeconf reg.
1863                  */
1864                 val &= ~PIPECONF_BPC_MASK;
1865                 val |= pipeconf_val & PIPECONF_BPC_MASK;
1866         }
1867
1868         val &= ~TRANS_INTERLACE_MASK;
1869         if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
1870                 if (HAS_PCH_IBX(dev_priv->dev) &&
1871                     intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO))
1872                         val |= TRANS_LEGACY_INTERLACED_ILK;
1873                 else
1874                         val |= TRANS_INTERLACED;
1875         else
1876                 val |= TRANS_PROGRESSIVE;
1877
1878         I915_WRITE(reg, val | TRANS_ENABLE);
1879         if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
1880                 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
1881 }
1882
1883 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1884                                       enum transcoder cpu_transcoder)
1885 {
1886         u32 val, pipeconf_val;
1887
1888         /* PCH only available on ILK+ */
1889         BUG_ON(INTEL_INFO(dev_priv->dev)->gen < 5);
1890
1891         /* FDI must be feeding us bits for PCH ports */
1892         assert_fdi_tx_enabled(dev_priv, (enum i915_pipe) cpu_transcoder);
1893         assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
1894
1895         /* Workaround: set timing override bit. */
1896         val = I915_READ(_TRANSA_CHICKEN2);
1897         val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1898         I915_WRITE(_TRANSA_CHICKEN2, val);
1899
1900         val = TRANS_ENABLE;
1901         pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1902
1903         if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1904             PIPECONF_INTERLACED_ILK)
1905                 val |= TRANS_INTERLACED;
1906         else
1907                 val |= TRANS_PROGRESSIVE;
1908
1909         I915_WRITE(LPT_TRANSCONF, val);
1910         if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100))
1911                 DRM_ERROR("Failed to enable PCH transcoder\n");
1912 }
1913
1914 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1915                                             enum i915_pipe pipe)
1916 {
1917         struct drm_device *dev = dev_priv->dev;
1918         uint32_t reg, val;
1919
1920         /* FDI relies on the transcoder */
1921         assert_fdi_tx_disabled(dev_priv, pipe);
1922         assert_fdi_rx_disabled(dev_priv, pipe);
1923
1924         /* Ports must be off as well */
1925         assert_pch_ports_disabled(dev_priv, pipe);
1926
1927         reg = PCH_TRANSCONF(pipe);
1928         val = I915_READ(reg);
1929         val &= ~TRANS_ENABLE;
1930         I915_WRITE(reg, val);
1931         /* wait for PCH transcoder off, transcoder state */
1932         if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
1933                 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
1934
1935         if (!HAS_PCH_IBX(dev)) {
1936                 /* Workaround: Clear the timing override chicken bit again. */
1937                 reg = TRANS_CHICKEN2(pipe);
1938                 val = I915_READ(reg);
1939                 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1940                 I915_WRITE(reg, val);
1941         }
1942 }
1943
1944 static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1945 {
1946         u32 val;
1947
1948         val = I915_READ(LPT_TRANSCONF);
1949         val &= ~TRANS_ENABLE;
1950         I915_WRITE(LPT_TRANSCONF, val);
1951         /* wait for PCH transcoder off, transcoder state */
1952         if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50))
1953                 DRM_ERROR("Failed to disable PCH transcoder\n");
1954
1955         /* Workaround: clear timing override bit. */
1956         val = I915_READ(_TRANSA_CHICKEN2);
1957         val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1958         I915_WRITE(_TRANSA_CHICKEN2, val);
1959 }
1960
1961 /**
1962  * intel_enable_pipe - enable a pipe, asserting requirements
1963  * @crtc: crtc responsible for the pipe
1964  *
1965  * Enable @crtc's pipe, making sure that various hardware specific requirements
1966  * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
1967  */
1968 static void intel_enable_pipe(struct intel_crtc *crtc)
1969 {
1970         struct drm_device *dev = crtc->base.dev;
1971         struct drm_i915_private *dev_priv = dev->dev_private;
1972         enum i915_pipe pipe = crtc->pipe;
1973         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1974                                                                       pipe);
1975         enum i915_pipe pch_transcoder;
1976         int reg;
1977         u32 val;
1978
1979         assert_planes_disabled(dev_priv, pipe);
1980         assert_cursor_disabled(dev_priv, pipe);
1981         assert_sprites_disabled(dev_priv, pipe);
1982
1983         if (HAS_PCH_LPT(dev_priv->dev))
1984                 pch_transcoder = TRANSCODER_A;
1985         else
1986                 pch_transcoder = pipe;
1987
1988         /*
1989          * A pipe without a PLL won't actually be able to drive bits from
1990          * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
1991          * need the check.
1992          */
1993         if (!HAS_PCH_SPLIT(dev_priv->dev))
1994                 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DSI))
1995                         assert_dsi_pll_enabled(dev_priv);
1996                 else
1997                         assert_pll_enabled(dev_priv, pipe);
1998         else {
1999                 if (crtc->config.has_pch_encoder) {
2000                         /* if driving the PCH, we need FDI enabled */
2001                         assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
2002                         assert_fdi_tx_pll_enabled(dev_priv,
2003                                                   (enum i915_pipe) cpu_transcoder);
2004                 }
2005                 /* FIXME: assert CPU port conditions for SNB+ */
2006         }
2007
2008         reg = PIPECONF(cpu_transcoder);
2009         val = I915_READ(reg);
2010         if (val & PIPECONF_ENABLE) {
2011                 WARN_ON(!(pipe == PIPE_A &&
2012                           dev_priv->quirks & QUIRK_PIPEA_FORCE));
2013                 return;
2014         }
2015
2016         I915_WRITE(reg, val | PIPECONF_ENABLE);
2017         POSTING_READ(reg);
2018 }
2019
2020 /**
2021  * intel_disable_pipe - disable a pipe, asserting requirements
2022  * @dev_priv: i915 private structure
2023  * @pipe: pipe to disable
2024  *
2025  * Disable @pipe, making sure that various hardware specific requirements
2026  * are met, if applicable, e.g. plane disabled, panel fitter off, etc.
2027  *
2028  * @pipe should be %PIPE_A or %PIPE_B.
2029  *
2030  * Will wait until the pipe has shut down before returning.
2031  */
2032 static void intel_disable_pipe(struct drm_i915_private *dev_priv,
2033                                enum i915_pipe pipe)
2034 {
2035         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
2036                                                                       pipe);
2037         int reg;
2038         u32 val;
2039
2040         /*
2041          * Make sure planes won't keep trying to pump pixels to us,
2042          * or we might hang the display.
2043          */
2044         assert_planes_disabled(dev_priv, pipe);
2045         assert_cursor_disabled(dev_priv, pipe);
2046         assert_sprites_disabled(dev_priv, pipe);
2047
2048         /* Don't disable pipe A or pipe A PLLs if needed */
2049         if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
2050                 return;
2051
2052         reg = PIPECONF(cpu_transcoder);
2053         val = I915_READ(reg);
2054         if ((val & PIPECONF_ENABLE) == 0)
2055                 return;
2056
2057         I915_WRITE(reg, val & ~PIPECONF_ENABLE);
2058         intel_wait_for_pipe_off(dev_priv->dev, pipe);
2059 }
2060
2061 /*
2062  * Plane regs are double buffered, going from enabled->disabled needs a
2063  * trigger in order to latch.  The display address reg provides this.
2064  */
2065 void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
2066                                enum plane plane)
2067 {
2068         struct drm_device *dev = dev_priv->dev;
2069         u32 reg = INTEL_INFO(dev)->gen >= 4 ? DSPSURF(plane) : DSPADDR(plane);
2070
2071         I915_WRITE(reg, I915_READ(reg));
2072         POSTING_READ(reg);
2073 }
2074
2075 /**
2076  * intel_enable_primary_hw_plane - enable the primary plane on a given pipe
2077  * @dev_priv: i915 private structure
2078  * @plane: plane to enable
2079  * @pipe: pipe being fed
2080  *
2081  * Enable @plane on @pipe, making sure that @pipe is running first.
2082  */
2083 static void intel_enable_primary_hw_plane(struct drm_i915_private *dev_priv,
2084                                           enum plane plane, enum i915_pipe pipe)
2085 {
2086         struct drm_device *dev = dev_priv->dev;
2087         struct intel_crtc *intel_crtc =
2088                 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
2089         int reg;
2090         u32 val;
2091
2092         /* If the pipe isn't enabled, we can't pump pixels and may hang */
2093         assert_pipe_enabled(dev_priv, pipe);
2094
2095         if (intel_crtc->primary_enabled)
2096                 return;
2097
2098         intel_crtc->primary_enabled = true;
2099
2100         reg = DSPCNTR(plane);
2101         val = I915_READ(reg);
2102         WARN_ON(val & DISPLAY_PLANE_ENABLE);
2103
2104         I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
2105         intel_flush_primary_plane(dev_priv, plane);
2106
2107         /*
2108          * BDW signals flip done immediately if the plane
2109          * is disabled, even if the plane enable is already
2110          * armed to occur at the next vblank :(
2111          */
2112         if (IS_BROADWELL(dev))
2113                 intel_wait_for_vblank(dev, intel_crtc->pipe);
2114 }
2115
2116 /**
2117  * intel_disable_primary_hw_plane - disable the primary hardware plane
2118  * @dev_priv: i915 private structure
2119  * @plane: plane to disable
2120  * @pipe: pipe consuming the data
2121  *
2122  * Disable @plane; should be an independent operation.
2123  */
2124 static void intel_disable_primary_hw_plane(struct drm_i915_private *dev_priv,
2125                                            enum plane plane, enum i915_pipe pipe)
2126 {
2127         struct intel_crtc *intel_crtc =
2128                 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
2129         int reg;
2130         u32 val;
2131
2132         if (!intel_crtc->primary_enabled)
2133                 return;
2134
2135         intel_crtc->primary_enabled = false;
2136
2137         reg = DSPCNTR(plane);
2138         val = I915_READ(reg);
2139         WARN_ON((val & DISPLAY_PLANE_ENABLE) == 0);
2140
2141         I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
2142         intel_flush_primary_plane(dev_priv, plane);
2143 }
2144
2145 static bool need_vtd_wa(struct drm_device *dev)
2146 {
2147 #ifdef CONFIG_INTEL_IOMMU
2148         if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped)
2149                 return true;
2150 #endif
2151         return false;
2152 }
2153
2154 static int intel_align_height(struct drm_device *dev, int height, bool tiled)
2155 {
2156         int tile_height;
2157
2158         tile_height = tiled ? (IS_GEN2(dev) ? 16 : 8) : 1;
2159         return ALIGN(height, tile_height);
2160 }
2161
2162 int
2163 intel_pin_and_fence_fb_obj(struct drm_device *dev,
2164                            struct drm_i915_gem_object *obj,
2165                            struct intel_engine_cs *pipelined)
2166 {
2167         struct drm_i915_private *dev_priv = dev->dev_private;
2168         u32 alignment;
2169         int ret;
2170
2171         switch (obj->tiling_mode) {
2172         case I915_TILING_NONE:
2173                 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
2174                         alignment = 128 * 1024;
2175                 else if (INTEL_INFO(dev)->gen >= 4)
2176                         alignment = 4 * 1024;
2177                 else
2178                         alignment = 64 * 1024;
2179                 break;
2180         case I915_TILING_X:
2181                 /* pin() will align the object as required by fence */
2182                 alignment = 0;
2183                 break;
2184         case I915_TILING_Y:
2185                 WARN(1, "Y tiled bo slipped through, driver bug!\n");
2186                 return -EINVAL;
2187         default:
2188                 BUG();
2189         }
2190
2191         /* Note that the w/a also requires 64 PTE of padding following the
2192          * bo. We currently fill all unused PTE with the shadow page and so
2193          * we should always have valid PTE following the scanout preventing
2194          * the VT-d warning.
2195          */
2196         if (need_vtd_wa(dev) && alignment < 256 * 1024)
2197                 alignment = 256 * 1024;
2198
2199         dev_priv->mm.interruptible = false;
2200         ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
2201         if (ret)
2202                 goto err_interruptible;
2203
2204         /* Install a fence for tiled scan-out. Pre-i965 always needs a
2205          * fence, whereas 965+ only requires a fence if using
2206          * framebuffer compression.  For simplicity, we always install
2207          * a fence as the cost is not that onerous.
2208          */
2209         ret = i915_gem_object_get_fence(obj);
2210         if (ret)
2211                 goto err_unpin;
2212
2213         i915_gem_object_pin_fence(obj);
2214
2215         dev_priv->mm.interruptible = true;
2216         return 0;
2217
2218 err_unpin:
2219         i915_gem_object_unpin_from_display_plane(obj);
2220 err_interruptible:
2221         dev_priv->mm.interruptible = true;
2222         return ret;
2223 }
2224
2225 void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
2226 {
2227         i915_gem_object_unpin_fence(obj);
2228         i915_gem_object_unpin_from_display_plane(obj);
2229 }
2230
2231 /* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
2232  * is assumed to be a power-of-two. */
2233 unsigned long intel_gen4_compute_page_offset(int *x, int *y,
2234                                              unsigned int tiling_mode,
2235                                              unsigned int cpp,
2236                                              unsigned int pitch)
2237 {
2238         if (tiling_mode != I915_TILING_NONE) {
2239                 unsigned int tile_rows, tiles;
2240
2241                 tile_rows = *y / 8;
2242                 *y %= 8;
2243
2244                 tiles = *x / (512/cpp);
2245                 *x %= 512/cpp;
2246
2247                 return tile_rows * pitch * 8 + tiles * 4096;
2248         } else {
2249                 unsigned int offset;
2250
2251                 offset = *y * pitch + *x * cpp;
2252                 *y = 0;
2253                 *x = (offset & 4095) / cpp;
2254                 return offset & -4096;
2255         }
2256 }
2257
2258 int intel_format_to_fourcc(int format)
2259 {
2260         switch (format) {
2261         case DISPPLANE_8BPP:
2262                 return DRM_FORMAT_C8;
2263         case DISPPLANE_BGRX555:
2264                 return DRM_FORMAT_XRGB1555;
2265         case DISPPLANE_BGRX565:
2266                 return DRM_FORMAT_RGB565;
2267         default:
2268         case DISPPLANE_BGRX888:
2269                 return DRM_FORMAT_XRGB8888;
2270         case DISPPLANE_RGBX888:
2271                 return DRM_FORMAT_XBGR8888;
2272         case DISPPLANE_BGRX101010:
2273                 return DRM_FORMAT_XRGB2101010;
2274         case DISPPLANE_RGBX101010:
2275                 return DRM_FORMAT_XBGR2101010;
2276         }
2277 }
2278
2279 static bool intel_alloc_plane_obj(struct intel_crtc *crtc,
2280                                   struct intel_plane_config *plane_config)
2281 {
2282         struct drm_device *dev = crtc->base.dev;
2283         struct drm_i915_gem_object *obj = NULL;
2284         struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2285         u32 base = plane_config->base;
2286
2287         if (plane_config->size == 0)
2288                 return false;
2289
2290         obj = i915_gem_object_create_stolen_for_preallocated(dev, base, base,
2291                                                              plane_config->size);
2292         if (!obj)
2293                 return false;
2294
2295         if (plane_config->tiled) {
2296                 obj->tiling_mode = I915_TILING_X;
2297                 obj->stride = crtc->base.primary->fb->pitches[0];
2298         }
2299
2300         mode_cmd.pixel_format = crtc->base.primary->fb->pixel_format;
2301         mode_cmd.width = crtc->base.primary->fb->width;
2302         mode_cmd.height = crtc->base.primary->fb->height;
2303         mode_cmd.pitches[0] = crtc->base.primary->fb->pitches[0];
2304
2305         mutex_lock(&dev->struct_mutex);
2306
2307         if (intel_framebuffer_init(dev, to_intel_framebuffer(crtc->base.primary->fb),
2308                                    &mode_cmd, obj)) {
2309                 DRM_DEBUG_KMS("intel fb init failed\n");
2310                 goto out_unref_obj;
2311         }
2312
2313         mutex_unlock(&dev->struct_mutex);
2314
2315         DRM_DEBUG_KMS("plane fb obj %p\n", obj);
2316         return true;
2317
2318 out_unref_obj:
2319         drm_gem_object_unreference(&obj->base);
2320         mutex_unlock(&dev->struct_mutex);
2321         return false;
2322 }
2323
2324 static void intel_find_plane_obj(struct intel_crtc *intel_crtc,
2325                                  struct intel_plane_config *plane_config)
2326 {
2327         struct drm_device *dev = intel_crtc->base.dev;
2328         struct drm_crtc *c;
2329         struct intel_crtc *i;
2330         struct intel_framebuffer *fb;
2331
2332         if (!intel_crtc->base.primary->fb)
2333                 return;
2334
2335         if (intel_alloc_plane_obj(intel_crtc, plane_config))
2336                 return;
2337
2338         kfree(intel_crtc->base.primary->fb);
2339         intel_crtc->base.primary->fb = NULL;
2340
2341         /*
2342          * Failed to alloc the obj, check to see if we should share
2343          * an fb with another CRTC instead
2344          */
2345         for_each_crtc(dev, c) {
2346                 i = to_intel_crtc(c);
2347
2348                 if (c == &intel_crtc->base)
2349                         continue;
2350
2351                 if (!i->active || !c->primary->fb)
2352                         continue;
2353
2354                 fb = to_intel_framebuffer(c->primary->fb);
2355                 if (i915_gem_obj_ggtt_offset(fb->obj) == plane_config->base) {
2356                         drm_framebuffer_reference(c->primary->fb);
2357                         intel_crtc->base.primary->fb = c->primary->fb;
2358                         break;
2359                 }
2360         }
2361 }
2362
2363 static void i9xx_update_primary_plane(struct drm_crtc *crtc,
2364                                       struct drm_framebuffer *fb,
2365                                       int x, int y)
2366 {
2367         struct drm_device *dev = crtc->dev;
2368         struct drm_i915_private *dev_priv = dev->dev_private;
2369         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2370         struct intel_framebuffer *intel_fb;
2371         struct drm_i915_gem_object *obj;
2372         int plane = intel_crtc->plane;
2373         unsigned long linear_offset;
2374         u32 dspcntr;
2375         u32 reg;
2376
2377         intel_fb = to_intel_framebuffer(fb);
2378         obj = intel_fb->obj;
2379
2380         reg = DSPCNTR(plane);
2381         dspcntr = I915_READ(reg);
2382         /* Mask out pixel format bits in case we change it */
2383         dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2384         switch (fb->pixel_format) {
2385         case DRM_FORMAT_C8:
2386                 dspcntr |= DISPPLANE_8BPP;
2387                 break;
2388         case DRM_FORMAT_XRGB1555:
2389         case DRM_FORMAT_ARGB1555:
2390                 dspcntr |= DISPPLANE_BGRX555;
2391                 break;
2392         case DRM_FORMAT_RGB565:
2393                 dspcntr |= DISPPLANE_BGRX565;
2394                 break;
2395         case DRM_FORMAT_XRGB8888:
2396         case DRM_FORMAT_ARGB8888:
2397                 dspcntr |= DISPPLANE_BGRX888;
2398                 break;
2399         case DRM_FORMAT_XBGR8888:
2400         case DRM_FORMAT_ABGR8888:
2401                 dspcntr |= DISPPLANE_RGBX888;
2402                 break;
2403         case DRM_FORMAT_XRGB2101010:
2404         case DRM_FORMAT_ARGB2101010:
2405                 dspcntr |= DISPPLANE_BGRX101010;
2406                 break;
2407         case DRM_FORMAT_XBGR2101010:
2408         case DRM_FORMAT_ABGR2101010:
2409                 dspcntr |= DISPPLANE_RGBX101010;
2410                 break;
2411         default:
2412                 BUG();
2413         }
2414
2415         if (INTEL_INFO(dev)->gen >= 4) {
2416                 if (obj->tiling_mode != I915_TILING_NONE)
2417                         dspcntr |= DISPPLANE_TILED;
2418                 else
2419                         dspcntr &= ~DISPPLANE_TILED;
2420         }
2421
2422         if (IS_G4X(dev))
2423                 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2424
2425         I915_WRITE(reg, dspcntr);
2426
2427         linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2428
2429         if (INTEL_INFO(dev)->gen >= 4) {
2430                 intel_crtc->dspaddr_offset =
2431                         intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
2432                                                        fb->bits_per_pixel / 8,
2433                                                        fb->pitches[0]);
2434                 linear_offset -= intel_crtc->dspaddr_offset;
2435         } else {
2436                 intel_crtc->dspaddr_offset = linear_offset;
2437         }
2438
2439         DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2440                       i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
2441                       fb->pitches[0]);
2442         I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2443         if (INTEL_INFO(dev)->gen >= 4) {
2444                 I915_WRITE(DSPSURF(plane),
2445                            i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2446                 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2447                 I915_WRITE(DSPLINOFF(plane), linear_offset);
2448         } else
2449                 I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
2450         POSTING_READ(reg);
2451 }
2452
2453 static void ironlake_update_primary_plane(struct drm_crtc *crtc,
2454                                           struct drm_framebuffer *fb,
2455                                           int x, int y)
2456 {
2457         struct drm_device *dev = crtc->dev;
2458         struct drm_i915_private *dev_priv = dev->dev_private;
2459         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2460         struct intel_framebuffer *intel_fb;
2461         struct drm_i915_gem_object *obj;
2462         int plane = intel_crtc->plane;
2463         unsigned long linear_offset;
2464         u32 dspcntr;
2465         u32 reg;
2466
2467         intel_fb = to_intel_framebuffer(fb);
2468         obj = intel_fb->obj;
2469
2470         reg = DSPCNTR(plane);
2471         dspcntr = I915_READ(reg);
2472         /* Mask out pixel format bits in case we change it */
2473         dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2474         switch (fb->pixel_format) {
2475         case DRM_FORMAT_C8:
2476                 dspcntr |= DISPPLANE_8BPP;
2477                 break;
2478         case DRM_FORMAT_RGB565:
2479                 dspcntr |= DISPPLANE_BGRX565;
2480                 break;
2481         case DRM_FORMAT_XRGB8888:
2482         case DRM_FORMAT_ARGB8888:
2483                 dspcntr |= DISPPLANE_BGRX888;
2484                 break;
2485         case DRM_FORMAT_XBGR8888:
2486         case DRM_FORMAT_ABGR8888:
2487                 dspcntr |= DISPPLANE_RGBX888;
2488                 break;
2489         case DRM_FORMAT_XRGB2101010:
2490         case DRM_FORMAT_ARGB2101010:
2491                 dspcntr |= DISPPLANE_BGRX101010;
2492                 break;
2493         case DRM_FORMAT_XBGR2101010:
2494         case DRM_FORMAT_ABGR2101010:
2495                 dspcntr |= DISPPLANE_RGBX101010;
2496                 break;
2497         default:
2498                 BUG();
2499         }
2500
2501         if (obj->tiling_mode != I915_TILING_NONE)
2502                 dspcntr |= DISPPLANE_TILED;
2503         else
2504                 dspcntr &= ~DISPPLANE_TILED;
2505
2506         if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2507                 dspcntr &= ~DISPPLANE_TRICKLE_FEED_DISABLE;
2508         else
2509                 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2510
2511         I915_WRITE(reg, dspcntr);
2512
2513         linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2514         intel_crtc->dspaddr_offset =
2515                 intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
2516                                                fb->bits_per_pixel / 8,
2517                                                fb->pitches[0]);
2518         linear_offset -= intel_crtc->dspaddr_offset;
2519
2520         DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2521                       i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
2522                       fb->pitches[0]);
2523         I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2524         I915_WRITE(DSPSURF(plane),
2525                    i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2526         if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2527                 I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
2528         } else {
2529                 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2530                 I915_WRITE(DSPLINOFF(plane), linear_offset);
2531         }
2532         POSTING_READ(reg);
2533 }
2534
2535 /* Assume fb object is pinned & idle & fenced and just update base pointers */
2536 static int
2537 intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2538                            int x, int y, enum mode_set_atomic state)
2539 {
2540         struct drm_device *dev = crtc->dev;
2541         struct drm_i915_private *dev_priv = dev->dev_private;
2542
2543         if (dev_priv->display.disable_fbc)
2544                 dev_priv->display.disable_fbc(dev);
2545         intel_increase_pllclock(crtc);
2546
2547         dev_priv->display.update_primary_plane(crtc, fb, x, y);
2548
2549         return 0;
2550 }
2551
2552 void intel_display_handle_reset(struct drm_device *dev)
2553 {
2554         struct drm_i915_private *dev_priv = dev->dev_private;
2555         struct drm_crtc *crtc;
2556
2557         /*
2558          * Flips in the rings have been nuked by the reset,
2559          * so complete all pending flips so that user space
2560          * will get its events and not get stuck.
2561          *
2562          * Also update the base address of all primary
2563          * planes to the the last fb to make sure we're
2564          * showing the correct fb after a reset.
2565          *
2566          * Need to make two loops over the crtcs so that we
2567          * don't try to grab a crtc mutex before the
2568          * pending_flip_queue really got woken up.
2569          */
2570
2571         for_each_crtc(dev, crtc) {
2572                 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2573                 enum plane plane = intel_crtc->plane;
2574
2575                 intel_prepare_page_flip(dev, plane);
2576                 intel_finish_page_flip_plane(dev, plane);
2577         }
2578
2579         for_each_crtc(dev, crtc) {
2580                 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2581
2582                 drm_modeset_lock(&crtc->mutex, NULL);
2583                 /*
2584                  * FIXME: Once we have proper support for primary planes (and
2585                  * disabling them without disabling the entire crtc) allow again
2586                  * a NULL crtc->primary->fb.
2587                  */
2588                 if (intel_crtc->active && crtc->primary->fb)
2589                         dev_priv->display.update_primary_plane(crtc,
2590                                                                crtc->primary->fb,
2591                                                                crtc->x,
2592                                                                crtc->y);
2593                 drm_modeset_unlock(&crtc->mutex);
2594         }
2595 }
2596
2597 static int
2598 intel_finish_fb(struct drm_framebuffer *old_fb)
2599 {
2600         struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
2601         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2602         bool was_interruptible = dev_priv->mm.interruptible;
2603         int ret;
2604
2605         /* Big Hammer, we also need to ensure that any pending
2606          * MI_WAIT_FOR_EVENT inside a user batch buffer on the
2607          * current scanout is retired before unpinning the old
2608          * framebuffer.
2609          *
2610          * This should only fail upon a hung GPU, in which case we
2611          * can safely continue.
2612          */
2613         dev_priv->mm.interruptible = false;
2614         ret = i915_gem_object_finish_gpu(obj);
2615         dev_priv->mm.interruptible = was_interruptible;
2616
2617         return ret;
2618 }
2619
2620 static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
2621 {
2622         struct drm_device *dev = crtc->dev;
2623         struct drm_i915_private *dev_priv = dev->dev_private;
2624         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2625         bool pending;
2626
2627         if (i915_reset_in_progress(&dev_priv->gpu_error) ||
2628             intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
2629                 return false;
2630
2631         lockmgr(&dev->event_lock, LK_EXCLUSIVE);
2632         pending = to_intel_crtc(crtc)->unpin_work != NULL;
2633         lockmgr(&dev->event_lock, LK_RELEASE);
2634
2635         return pending;
2636 }
2637
2638 static int
2639 intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2640                     struct drm_framebuffer *fb)
2641 {
2642         struct drm_device *dev = crtc->dev;
2643         struct drm_i915_private *dev_priv = dev->dev_private;
2644         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2645         struct drm_framebuffer *old_fb;
2646         int ret;
2647
2648         if (intel_crtc_has_pending_flip(crtc)) {
2649                 DRM_ERROR("pipe is still busy with an old pageflip\n");
2650                 return -EBUSY;
2651         }
2652
2653         /* no fb bound */
2654         if (!fb) {
2655                 DRM_ERROR("No FB bound\n");
2656                 return 0;
2657         }
2658
2659         if (intel_crtc->plane > INTEL_INFO(dev)->num_pipes) {
2660                 DRM_ERROR("no plane for crtc: plane %c, num_pipes %d\n",
2661                           plane_name(intel_crtc->plane),
2662                           INTEL_INFO(dev)->num_pipes);
2663                 return -EINVAL;
2664         }
2665
2666         mutex_lock(&dev->struct_mutex);
2667         ret = intel_pin_and_fence_fb_obj(dev,
2668                                          to_intel_framebuffer(fb)->obj,
2669                                          NULL);
2670         mutex_unlock(&dev->struct_mutex);
2671         if (ret != 0) {
2672                 DRM_ERROR("pin & fence failed\n");
2673                 return ret;
2674         }
2675
2676         /*
2677          * Update pipe size and adjust fitter if needed: the reason for this is
2678          * that in compute_mode_changes we check the native mode (not the pfit
2679          * mode) to see if we can flip rather than do a full mode set. In the
2680          * fastboot case, we'll flip, but if we don't update the pipesrc and
2681          * pfit state, we'll end up with a big fb scanned out into the wrong
2682          * sized surface.
2683          *
2684          * To fix this properly, we need to hoist the checks up into
2685          * compute_mode_changes (or above), check the actual pfit state and
2686          * whether the platform allows pfit disable with pipe active, and only
2687          * then update the pipesrc and pfit state, even on the flip path.
2688          */
2689         if (i915.fastboot) {
2690                 const struct drm_display_mode *adjusted_mode =
2691                         &intel_crtc->config.adjusted_mode;
2692
2693                 I915_WRITE(PIPESRC(intel_crtc->pipe),
2694                            ((adjusted_mode->crtc_hdisplay - 1) << 16) |
2695                            (adjusted_mode->crtc_vdisplay - 1));
2696                 if (!intel_crtc->config.pch_pfit.enabled &&
2697                     (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
2698                      intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
2699                         I915_WRITE(PF_CTL(intel_crtc->pipe), 0);
2700                         I915_WRITE(PF_WIN_POS(intel_crtc->pipe), 0);
2701                         I915_WRITE(PF_WIN_SZ(intel_crtc->pipe), 0);
2702                 }
2703                 intel_crtc->config.pipe_src_w = adjusted_mode->crtc_hdisplay;
2704                 intel_crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay;
2705         }
2706
2707         dev_priv->display.update_primary_plane(crtc, fb, x, y);
2708
2709         old_fb = crtc->primary->fb;
2710         crtc->primary->fb = fb;
2711         crtc->x = x;
2712         crtc->y = y;
2713
2714         if (old_fb) {
2715                 if (intel_crtc->active && old_fb != fb)
2716                         intel_wait_for_vblank(dev, intel_crtc->pipe);
2717                 mutex_lock(&dev->struct_mutex);
2718                 intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
2719                 mutex_unlock(&dev->struct_mutex);
2720         }
2721
2722         mutex_lock(&dev->struct_mutex);
2723         intel_update_fbc(dev);
2724         intel_edp_psr_update(dev);
2725         mutex_unlock(&dev->struct_mutex);
2726
2727         return 0;
2728 }
2729
2730 static void intel_fdi_normal_train(struct drm_crtc *crtc)
2731 {
2732         struct drm_device *dev = crtc->dev;
2733         struct drm_i915_private *dev_priv = dev->dev_private;
2734         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2735         int pipe = intel_crtc->pipe;
2736         u32 reg, temp;
2737
2738         /* enable normal train */
2739         reg = FDI_TX_CTL(pipe);
2740         temp = I915_READ(reg);
2741         if (IS_IVYBRIDGE(dev)) {
2742                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2743                 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
2744         } else {
2745                 temp &= ~FDI_LINK_TRAIN_NONE;
2746                 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
2747         }
2748         I915_WRITE(reg, temp);
2749
2750         reg = FDI_RX_CTL(pipe);
2751         temp = I915_READ(reg);
2752         if (HAS_PCH_CPT(dev)) {
2753                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2754                 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
2755         } else {
2756                 temp &= ~FDI_LINK_TRAIN_NONE;
2757                 temp |= FDI_LINK_TRAIN_NONE;
2758         }
2759         I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
2760
2761         /* wait one idle pattern time */
2762         POSTING_READ(reg);
2763         udelay(1000);
2764
2765         /* IVB wants error correction enabled */
2766         if (IS_IVYBRIDGE(dev))
2767                 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
2768                            FDI_FE_ERRC_ENABLE);
2769 }
2770
2771 static bool pipe_has_enabled_pch(struct intel_crtc *crtc)
2772 {
2773         return crtc->base.enabled && crtc->active &&
2774                 crtc->config.has_pch_encoder;
2775 }
2776
2777 static void ivb_modeset_global_resources(struct drm_device *dev)
2778 {
2779         struct drm_i915_private *dev_priv = dev->dev_private;
2780         struct intel_crtc *pipe_B_crtc =
2781                 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
2782         struct intel_crtc *pipe_C_crtc =
2783                 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]);
2784         uint32_t temp;
2785
2786         /*
2787          * When everything is off disable fdi C so that we could enable fdi B
2788          * with all lanes. Note that we don't care about enabled pipes without
2789          * an enabled pch encoder.
2790          */
2791         if (!pipe_has_enabled_pch(pipe_B_crtc) &&
2792             !pipe_has_enabled_pch(pipe_C_crtc)) {
2793                 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
2794                 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
2795
2796                 temp = I915_READ(SOUTH_CHICKEN1);
2797                 temp &= ~FDI_BC_BIFURCATION_SELECT;
2798                 DRM_DEBUG_KMS("disabling fdi C rx\n");
2799                 I915_WRITE(SOUTH_CHICKEN1, temp);
2800         }
2801 }
2802
2803 /* The FDI link training functions for ILK/Ibexpeak. */
2804 static void ironlake_fdi_link_train(struct drm_crtc *crtc)
2805 {
2806         struct drm_device *dev = crtc->dev;
2807         struct drm_i915_private *dev_priv = dev->dev_private;
2808         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2809         int pipe = intel_crtc->pipe;
2810         u32 reg, temp, tries;
2811
2812         /* FDI needs bits from pipe first */
2813         assert_pipe_enabled(dev_priv, pipe);
2814
2815         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2816            for train result */
2817         reg = FDI_RX_IMR(pipe);
2818         temp = I915_READ(reg);
2819         temp &= ~FDI_RX_SYMBOL_LOCK;
2820         temp &= ~FDI_RX_BIT_LOCK;
2821         I915_WRITE(reg, temp);
2822         I915_READ(reg);
2823         udelay(150);
2824
2825         /* enable CPU FDI TX and PCH FDI RX */
2826         reg = FDI_TX_CTL(pipe);
2827         temp = I915_READ(reg);
2828         temp &= ~FDI_DP_PORT_WIDTH_MASK;
2829         temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
2830         temp &= ~FDI_LINK_TRAIN_NONE;
2831         temp |= FDI_LINK_TRAIN_PATTERN_1;
2832         I915_WRITE(reg, temp | FDI_TX_ENABLE);
2833
2834         reg = FDI_RX_CTL(pipe);
2835         temp = I915_READ(reg);
2836         temp &= ~FDI_LINK_TRAIN_NONE;
2837         temp |= FDI_LINK_TRAIN_PATTERN_1;
2838         I915_WRITE(reg, temp | FDI_RX_ENABLE);
2839
2840         POSTING_READ(reg);
2841         udelay(150);
2842
2843         /* Ironlake workaround, enable clock pointer after FDI enable*/
2844         I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2845         I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
2846                    FDI_RX_PHASE_SYNC_POINTER_EN);
2847
2848         reg = FDI_RX_IIR(pipe);
2849         for (tries = 0; tries < 5; tries++) {
2850                 temp = I915_READ(reg);
2851                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2852
2853                 if ((temp & FDI_RX_BIT_LOCK)) {
2854                         DRM_DEBUG_KMS("FDI train 1 done.\n");
2855                         I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2856                         break;
2857                 }
2858         }
2859         if (tries == 5)
2860                 DRM_ERROR("FDI train 1 fail!\n");
2861
2862         /* Train 2 */
2863         reg = FDI_TX_CTL(pipe);
2864         temp = I915_READ(reg);
2865         temp &= ~FDI_LINK_TRAIN_NONE;
2866         temp |= FDI_LINK_TRAIN_PATTERN_2;
2867         I915_WRITE(reg, temp);
2868
2869         reg = FDI_RX_CTL(pipe);
2870         temp = I915_READ(reg);
2871         temp &= ~FDI_LINK_TRAIN_NONE;
2872         temp |= FDI_LINK_TRAIN_PATTERN_2;
2873         I915_WRITE(reg, temp);
2874
2875         POSTING_READ(reg);
2876         udelay(150);
2877
2878         reg = FDI_RX_IIR(pipe);
2879         for (tries = 0; tries < 5; tries++) {
2880                 temp = I915_READ(reg);
2881                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2882
2883                 if (temp & FDI_RX_SYMBOL_LOCK) {
2884                         I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2885                         DRM_DEBUG_KMS("FDI train 2 done.\n");
2886                         break;
2887                 }
2888         }
2889         if (tries == 5)
2890                 DRM_ERROR("FDI train 2 fail!\n");
2891
2892         DRM_DEBUG_KMS("FDI train done\n");
2893
2894 }
2895
2896 static const int snb_b_fdi_train_param[] = {
2897         FDI_LINK_TRAIN_400MV_0DB_SNB_B,
2898         FDI_LINK_TRAIN_400MV_6DB_SNB_B,
2899         FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
2900         FDI_LINK_TRAIN_800MV_0DB_SNB_B,
2901 };
2902
2903 /* The FDI link training functions for SNB/Cougarpoint. */
2904 static void gen6_fdi_link_train(struct drm_crtc *crtc)
2905 {
2906         struct drm_device *dev = crtc->dev;
2907         struct drm_i915_private *dev_priv = dev->dev_private;
2908         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2909         int pipe = intel_crtc->pipe;
2910         u32 reg, temp, i, retry;
2911
2912         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2913            for train result */
2914         reg = FDI_RX_IMR(pipe);
2915         temp = I915_READ(reg);
2916         temp &= ~FDI_RX_SYMBOL_LOCK;
2917         temp &= ~FDI_RX_BIT_LOCK;
2918         I915_WRITE(reg, temp);
2919
2920         POSTING_READ(reg);
2921         udelay(150);
2922
2923         /* enable CPU FDI TX and PCH FDI RX */
2924         reg = FDI_TX_CTL(pipe);
2925         temp = I915_READ(reg);
2926         temp &= ~FDI_DP_PORT_WIDTH_MASK;
2927         temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
2928         temp &= ~FDI_LINK_TRAIN_NONE;
2929         temp |= FDI_LINK_TRAIN_PATTERN_1;
2930         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2931         /* SNB-B */
2932         temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2933         I915_WRITE(reg, temp | FDI_TX_ENABLE);
2934
2935         I915_WRITE(FDI_RX_MISC(pipe),
2936                    FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
2937
2938         reg = FDI_RX_CTL(pipe);
2939         temp = I915_READ(reg);
2940         if (HAS_PCH_CPT(dev)) {
2941                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2942                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2943         } else {
2944                 temp &= ~FDI_LINK_TRAIN_NONE;
2945                 temp |= FDI_LINK_TRAIN_PATTERN_1;
2946         }
2947         I915_WRITE(reg, temp | FDI_RX_ENABLE);
2948
2949         POSTING_READ(reg);
2950         udelay(150);
2951
2952         for (i = 0; i < 4; i++) {
2953                 reg = FDI_TX_CTL(pipe);
2954                 temp = I915_READ(reg);
2955                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2956                 temp |= snb_b_fdi_train_param[i];
2957                 I915_WRITE(reg, temp);
2958
2959                 POSTING_READ(reg);
2960                 udelay(500);
2961
2962                 for (retry = 0; retry < 5; retry++) {
2963                         reg = FDI_RX_IIR(pipe);
2964                         temp = I915_READ(reg);
2965                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2966                         if (temp & FDI_RX_BIT_LOCK) {
2967                                 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2968                                 DRM_DEBUG_KMS("FDI train 1 done.\n");
2969                                 break;
2970                         }
2971                         udelay(50);
2972                 }
2973                 if (retry < 5)
2974                         break;
2975         }
2976         if (i == 4)
2977                 DRM_ERROR("FDI train 1 fail!\n");
2978
2979         /* Train 2 */
2980         reg = FDI_TX_CTL(pipe);
2981         temp = I915_READ(reg);
2982         temp &= ~FDI_LINK_TRAIN_NONE;
2983         temp |= FDI_LINK_TRAIN_PATTERN_2;
2984         if (IS_GEN6(dev)) {
2985                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2986                 /* SNB-B */
2987                 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2988         }
2989         I915_WRITE(reg, temp);
2990
2991         reg = FDI_RX_CTL(pipe);
2992         temp = I915_READ(reg);
2993         if (HAS_PCH_CPT(dev)) {
2994                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2995                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2996         } else {
2997                 temp &= ~FDI_LINK_TRAIN_NONE;
2998                 temp |= FDI_LINK_TRAIN_PATTERN_2;
2999         }
3000         I915_WRITE(reg, temp);
3001
3002         POSTING_READ(reg);
3003         udelay(150);
3004
3005         for (i = 0; i < 4; i++) {
3006                 reg = FDI_TX_CTL(pipe);
3007                 temp = I915_READ(reg);
3008                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3009                 temp |= snb_b_fdi_train_param[i];
3010                 I915_WRITE(reg, temp);
3011
3012                 POSTING_READ(reg);
3013                 udelay(500);
3014
3015                 for (retry = 0; retry < 5; retry++) {
3016                         reg = FDI_RX_IIR(pipe);
3017                         temp = I915_READ(reg);
3018                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3019                         if (temp & FDI_RX_SYMBOL_LOCK) {
3020                                 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3021                                 DRM_DEBUG_KMS("FDI train 2 done.\n");
3022                                 break;
3023                         }
3024                         udelay(50);
3025                 }
3026                 if (retry < 5)
3027                         break;
3028         }
3029         if (i == 4)
3030                 DRM_ERROR("FDI train 2 fail!\n");
3031
3032         DRM_DEBUG_KMS("FDI train done.\n");
3033 }
3034
3035 /* Manual link training for Ivy Bridge A0 parts */
3036 static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
3037 {
3038         struct drm_device *dev = crtc->dev;
3039         struct drm_i915_private *dev_priv = dev->dev_private;
3040         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3041         int pipe = intel_crtc->pipe;
3042         u32 reg, temp, i, j;
3043
3044         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3045            for train result */
3046         reg = FDI_RX_IMR(pipe);
3047         temp = I915_READ(reg);
3048         temp &= ~FDI_RX_SYMBOL_LOCK;
3049         temp &= ~FDI_RX_BIT_LOCK;
3050         I915_WRITE(reg, temp);
3051
3052         POSTING_READ(reg);
3053         udelay(150);
3054
3055         DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
3056                       I915_READ(FDI_RX_IIR(pipe)));
3057
3058         /* Try each vswing and preemphasis setting twice before moving on */
3059         for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
3060                 /* disable first in case we need to retry */
3061                 reg = FDI_TX_CTL(pipe);
3062                 temp = I915_READ(reg);
3063                 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
3064                 temp &= ~FDI_TX_ENABLE;
3065                 I915_WRITE(reg, temp);
3066
3067                 reg = FDI_RX_CTL(pipe);
3068                 temp = I915_READ(reg);
3069                 temp &= ~FDI_LINK_TRAIN_AUTO;
3070                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3071                 temp &= ~FDI_RX_ENABLE;
3072                 I915_WRITE(reg, temp);
3073
3074                 /* enable CPU FDI TX and PCH FDI RX */
3075                 reg = FDI_TX_CTL(pipe);
3076                 temp = I915_READ(reg);
3077                 temp &= ~FDI_DP_PORT_WIDTH_MASK;
3078                 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
3079                 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
3080                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3081                 temp |= snb_b_fdi_train_param[j/2];
3082                 temp |= FDI_COMPOSITE_SYNC;
3083                 I915_WRITE(reg, temp | FDI_TX_ENABLE);
3084
3085                 I915_WRITE(FDI_RX_MISC(pipe),
3086                            FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3087
3088                 reg = FDI_RX_CTL(pipe);
3089                 temp = I915_READ(reg);
3090                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3091                 temp |= FDI_COMPOSITE_SYNC;
3092                 I915_WRITE(reg, temp | FDI_RX_ENABLE);
3093
3094                 POSTING_READ(reg);
3095                 udelay(1); /* should be 0.5us */
3096
3097                 for (i = 0; i < 4; i++) {
3098                         reg = FDI_RX_IIR(pipe);
3099                         temp = I915_READ(reg);
3100                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3101
3102                         if (temp & FDI_RX_BIT_LOCK ||
3103                             (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
3104                                 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3105                                 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
3106                                               i);
3107                                 break;
3108                         }
3109                         udelay(1); /* should be 0.5us */
3110                 }
3111                 if (i == 4) {
3112                         DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
3113                         continue;
3114                 }
3115
3116                 /* Train 2 */
3117                 reg = FDI_TX_CTL(pipe);
3118                 temp = I915_READ(reg);
3119                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3120                 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
3121                 I915_WRITE(reg, temp);
3122
3123                 reg = FDI_RX_CTL(pipe);
3124                 temp = I915_READ(reg);
3125                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3126                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3127                 I915_WRITE(reg, temp);
3128
3129                 POSTING_READ(reg);
3130                 udelay(2); /* should be 1.5us */
3131
3132                 for (i = 0; i < 4; i++) {
3133                         reg = FDI_RX_IIR(pipe);
3134                         temp = I915_READ(reg);
3135                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3136
3137                         if (temp & FDI_RX_SYMBOL_LOCK ||
3138                             (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
3139                                 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3140                                 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
3141                                               i);
3142                                 goto train_done;
3143                         }
3144                         udelay(2); /* should be 1.5us */
3145                 }
3146                 if (i == 4)
3147                         DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
3148         }
3149
3150 train_done:
3151         DRM_DEBUG_KMS("FDI train done.\n");
3152 }
3153
3154 static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
3155 {
3156         struct drm_device *dev = intel_crtc->base.dev;
3157         struct drm_i915_private *dev_priv = dev->dev_private;
3158         int pipe = intel_crtc->pipe;
3159         u32 reg, temp;
3160
3161
3162         /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
3163         reg = FDI_RX_CTL(pipe);
3164         temp = I915_READ(reg);
3165         temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
3166         temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
3167         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3168         I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
3169
3170         POSTING_READ(reg);
3171         udelay(200);
3172
3173         /* Switch from Rawclk to PCDclk */
3174         temp = I915_READ(reg);
3175         I915_WRITE(reg, temp | FDI_PCDCLK);
3176
3177         POSTING_READ(reg);
3178         udelay(200);
3179
3180         /* Enable CPU FDI TX PLL, always on for Ironlake */
3181         reg = FDI_TX_CTL(pipe);
3182         temp = I915_READ(reg);
3183         if ((temp & FDI_TX_PLL_ENABLE) == 0) {
3184                 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
3185
3186                 POSTING_READ(reg);
3187                 udelay(100);
3188         }
3189 }
3190
3191 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
3192 {
3193         struct drm_device *dev = intel_crtc->base.dev;
3194         struct drm_i915_private *dev_priv = dev->dev_private;
3195         int pipe = intel_crtc->pipe;
3196         u32 reg, temp;
3197
3198         /* Switch from PCDclk to Rawclk */
3199         reg = FDI_RX_CTL(pipe);
3200         temp = I915_READ(reg);
3201         I915_WRITE(reg, temp & ~FDI_PCDCLK);
3202
3203         /* Disable CPU FDI TX PLL */
3204         reg = FDI_TX_CTL(pipe);
3205         temp = I915_READ(reg);
3206         I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
3207
3208         POSTING_READ(reg);
3209         udelay(100);
3210
3211         reg = FDI_RX_CTL(pipe);
3212         temp = I915_READ(reg);
3213         I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
3214
3215         /* Wait for the clocks to turn off. */
3216         POSTING_READ(reg);
3217         udelay(100);
3218 }
3219
3220 static void ironlake_fdi_disable(struct drm_crtc *crtc)
3221 {
3222         struct drm_device *dev = crtc->dev;
3223         struct drm_i915_private *dev_priv = dev->dev_private;
3224         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3225         int pipe = intel_crtc->pipe;
3226         u32 reg, temp;
3227
3228         /* disable CPU FDI tx and PCH FDI rx */
3229         reg = FDI_TX_CTL(pipe);
3230         temp = I915_READ(reg);
3231         I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
3232         POSTING_READ(reg);
3233
3234         reg = FDI_RX_CTL(pipe);
3235         temp = I915_READ(reg);
3236         temp &= ~(0x7 << 16);
3237         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3238         I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
3239
3240         POSTING_READ(reg);
3241         udelay(100);
3242
3243         /* Ironlake workaround, disable clock pointer after downing FDI */
3244         if (HAS_PCH_IBX(dev))
3245                 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3246
3247         /* still set train pattern 1 */
3248         reg = FDI_TX_CTL(pipe);
3249         temp = I915_READ(reg);
3250         temp &= ~FDI_LINK_TRAIN_NONE;
3251         temp |= FDI_LINK_TRAIN_PATTERN_1;
3252         I915_WRITE(reg, temp);
3253
3254         reg = FDI_RX_CTL(pipe);
3255         temp = I915_READ(reg);
3256         if (HAS_PCH_CPT(dev)) {
3257                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3258                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3259         } else {
3260                 temp &= ~FDI_LINK_TRAIN_NONE;
3261                 temp |= FDI_LINK_TRAIN_PATTERN_1;
3262         }
3263         /* BPC in FDI rx is consistent with that in PIPECONF */
3264         temp &= ~(0x07 << 16);
3265         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3266         I915_WRITE(reg, temp);
3267
3268         POSTING_READ(reg);
3269         udelay(100);
3270 }
3271
3272 bool intel_has_pending_fb_unpin(struct drm_device *dev)
3273 {
3274         struct intel_crtc *crtc;
3275
3276         /* Note that we don't need to be called with mode_config.lock here
3277          * as our list of CRTC objects is static for the lifetime of the
3278          * device and so cannot disappear as we iterate. Similarly, we can
3279          * happily treat the predicates as racy, atomic checks as userspace
3280          * cannot claim and pin a new fb without at least acquring the
3281          * struct_mutex and so serialising with us.
3282          */
3283         for_each_intel_crtc(dev, crtc) {
3284                 if (atomic_read(&crtc->unpin_work_count) == 0)
3285                         continue;
3286
3287                 if (crtc->unpin_work)
3288                         intel_wait_for_vblank(dev, crtc->pipe);
3289
3290                 return true;
3291         }
3292
3293         return false;
3294 }
3295
3296 void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
3297 {
3298         struct drm_device *dev = crtc->dev;
3299         struct drm_i915_private *dev_priv = dev->dev_private;
3300
3301         if (crtc->primary->fb == NULL)
3302                 return;
3303
3304         WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
3305
3306         WARN_ON(wait_event_timeout(dev_priv->pending_flip_queue,
3307                                    !intel_crtc_has_pending_flip(crtc),
3308                                    60*HZ) == 0);
3309
3310         mutex_lock(&dev->struct_mutex);
3311         intel_finish_fb(crtc->primary->fb);
3312         mutex_unlock(&dev->struct_mutex);
3313 }
3314
3315 /* Program iCLKIP clock to the desired frequency */
3316 static void lpt_program_iclkip(struct drm_crtc *crtc)
3317 {
3318         struct drm_device *dev = crtc->dev;
3319         struct drm_i915_private *dev_priv = dev->dev_private;
3320         int clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
3321         u32 divsel, phaseinc, auxdiv, phasedir = 0;
3322         u32 temp;
3323
3324         mutex_lock(&dev_priv->dpio_lock);
3325
3326         /* It is necessary to ungate the pixclk gate prior to programming
3327          * the divisors, and gate it back when it is done.
3328          */
3329         I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
3330
3331         /* Disable SSCCTL */
3332         intel_sbi_write(dev_priv, SBI_SSCCTL6,
3333                         intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) |
3334                                 SBI_SSCCTL_DISABLE,
3335                         SBI_ICLK);
3336
3337         /* 20MHz is a corner case which is out of range for the 7-bit divisor */
3338         if (clock == 20000) {
3339                 auxdiv = 1;
3340                 divsel = 0x41;
3341                 phaseinc = 0x20;
3342         } else {
3343                 /* The iCLK virtual clock root frequency is in MHz,
3344                  * but the adjusted_mode->crtc_clock in in KHz. To get the
3345                  * divisors, it is necessary to divide one by another, so we
3346                  * convert the virtual clock precision to KHz here for higher
3347                  * precision.
3348                  */
3349                 u32 iclk_virtual_root_freq = 172800 * 1000;
3350                 u32 iclk_pi_range = 64;
3351                 u32 desired_divisor, msb_divisor_value, pi_value;
3352
3353                 desired_divisor = (iclk_virtual_root_freq / clock);
3354                 msb_divisor_value = desired_divisor / iclk_pi_range;
3355                 pi_value = desired_divisor % iclk_pi_range;
3356
3357                 auxdiv = 0;
3358                 divsel = msb_divisor_value - 2;
3359                 phaseinc = pi_value;
3360         }
3361
3362         /* This should not happen with any sane values */
3363         WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
3364                 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
3365         WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
3366                 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
3367
3368         DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
3369                         clock,
3370                         auxdiv,
3371                         divsel,
3372                         phasedir,
3373                         phaseinc);
3374
3375         /* Program SSCDIVINTPHASE6 */
3376         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
3377         temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
3378         temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
3379         temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
3380         temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
3381         temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
3382         temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
3383         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
3384
3385         /* Program SSCAUXDIV */
3386         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
3387         temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
3388         temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
3389         intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
3390
3391         /* Enable modulator and associated divider */
3392         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
3393         temp &= ~SBI_SSCCTL_DISABLE;
3394         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
3395
3396         /* Wait for initialization time */
3397         udelay(24);
3398
3399         I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
3400
3401         mutex_unlock(&dev_priv->dpio_lock);
3402 }
3403
3404 static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
3405                                                 enum i915_pipe pch_transcoder)
3406 {
3407         struct drm_device *dev = crtc->base.dev;
3408         struct drm_i915_private *dev_priv = dev->dev_private;
3409         enum transcoder cpu_transcoder = crtc->config.cpu_transcoder;
3410
3411         I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
3412                    I915_READ(HTOTAL(cpu_transcoder)));
3413         I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
3414                    I915_READ(HBLANK(cpu_transcoder)));
3415         I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
3416                    I915_READ(HSYNC(cpu_transcoder)));
3417
3418         I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
3419                    I915_READ(VTOTAL(cpu_transcoder)));
3420         I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
3421                    I915_READ(VBLANK(cpu_transcoder)));
3422         I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
3423                    I915_READ(VSYNC(cpu_transcoder)));
3424         I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
3425                    I915_READ(VSYNCSHIFT(cpu_transcoder)));
3426 }
3427
3428 static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
3429 {
3430         struct drm_i915_private *dev_priv = dev->dev_private;
3431         uint32_t temp;
3432
3433         temp = I915_READ(SOUTH_CHICKEN1);
3434         if (temp & FDI_BC_BIFURCATION_SELECT)
3435                 return;
3436
3437         WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
3438         WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
3439
3440         temp |= FDI_BC_BIFURCATION_SELECT;
3441         DRM_DEBUG_KMS("enabling fdi C rx\n");
3442         I915_WRITE(SOUTH_CHICKEN1, temp);
3443         POSTING_READ(SOUTH_CHICKEN1);
3444 }
3445
3446 static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
3447 {
3448         struct drm_device *dev = intel_crtc->base.dev;
3449         struct drm_i915_private *dev_priv = dev->dev_private;
3450
3451         switch (intel_crtc->pipe) {
3452         case PIPE_A:
3453                 break;
3454         case PIPE_B:
3455                 if (intel_crtc->config.fdi_lanes > 2)
3456                         WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
3457                 else
3458                         cpt_enable_fdi_bc_bifurcation(dev);
3459
3460                 break;
3461         case PIPE_C:
3462                 cpt_enable_fdi_bc_bifurcation(dev);
3463
3464                 break;
3465         default:
3466                 BUG();
3467         }
3468 }
3469
3470 /*
3471  * Enable PCH resources required for PCH ports:
3472  *   - PCH PLLs
3473  *   - FDI training & RX/TX
3474  *   - update transcoder timings
3475  *   - DP transcoding bits
3476  *   - transcoder
3477  */
3478 static void ironlake_pch_enable(struct drm_crtc *crtc)
3479 {
3480         struct drm_device *dev = crtc->dev;
3481         struct drm_i915_private *dev_priv = dev->dev_private;
3482         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3483         int pipe = intel_crtc->pipe;
3484         u32 reg, temp;
3485
3486         assert_pch_transcoder_disabled(dev_priv, pipe);
3487
3488         if (IS_IVYBRIDGE(dev))
3489                 ivybridge_update_fdi_bc_bifurcation(intel_crtc);
3490
3491         /* Write the TU size bits before fdi link training, so that error
3492          * detection works. */
3493         I915_WRITE(FDI_RX_TUSIZE1(pipe),
3494                    I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
3495
3496         /* For PCH output, training FDI link */
3497         dev_priv->display.fdi_link_train(crtc);
3498
3499         /* We need to program the right clock selection before writing the pixel
3500          * mutliplier into the DPLL. */
3501         if (HAS_PCH_CPT(dev)) {
3502                 u32 sel;
3503
3504                 temp = I915_READ(PCH_DPLL_SEL);
3505                 temp |= TRANS_DPLL_ENABLE(pipe);
3506                 sel = TRANS_DPLLB_SEL(pipe);
3507                 if (intel_crtc->config.shared_dpll == DPLL_ID_PCH_PLL_B)
3508                         temp |= sel;
3509                 else
3510                         temp &= ~sel;
3511                 I915_WRITE(PCH_DPLL_SEL, temp);
3512         }
3513
3514         /* XXX: pch pll's can be enabled any time before we enable the PCH
3515          * transcoder, and we actually should do this to not upset any PCH
3516          * transcoder that already use the clock when we share it.
3517          *
3518          * Note that enable_shared_dpll tries to do the right thing, but
3519          * get_shared_dpll unconditionally resets the pll - we need that to have
3520          * the right LVDS enable sequence. */
3521         intel_enable_shared_dpll(intel_crtc);
3522
3523         /* set transcoder timing, panel must allow it */
3524         assert_panel_unlocked(dev_priv, pipe);
3525         ironlake_pch_transcoder_set_timings(intel_crtc, pipe);
3526
3527         intel_fdi_normal_train(crtc);
3528
3529         /* For PCH DP, enable TRANS_DP_CTL */
3530         if (HAS_PCH_CPT(dev) &&
3531             (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
3532              intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
3533                 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
3534                 reg = TRANS_DP_CTL(pipe);
3535                 temp = I915_READ(reg);
3536                 temp &= ~(TRANS_DP_PORT_SEL_MASK |
3537                           TRANS_DP_SYNC_MASK |
3538                           TRANS_DP_BPC_MASK);
3539                 temp |= (TRANS_DP_OUTPUT_ENABLE |
3540                          TRANS_DP_ENH_FRAMING);
3541                 temp |= bpc << 9; /* same format but at 11:9 */
3542
3543                 if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
3544                         temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
3545                 if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
3546                         temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
3547
3548                 switch (intel_trans_dp_port_sel(crtc)) {
3549                 case PCH_DP_B:
3550                         temp |= TRANS_DP_PORT_SEL_B;
3551                         break;
3552                 case PCH_DP_C:
3553                         temp |= TRANS_DP_PORT_SEL_C;
3554                         break;
3555                 case PCH_DP_D:
3556                         temp |= TRANS_DP_PORT_SEL_D;
3557                         break;
3558                 default:
3559                         BUG();
3560                 }
3561
3562                 I915_WRITE(reg, temp);
3563         }
3564
3565         ironlake_enable_pch_transcoder(dev_priv, pipe);
3566 }
3567
3568 static void lpt_pch_enable(struct drm_crtc *crtc)
3569 {
3570         struct drm_device *dev = crtc->dev;
3571         struct drm_i915_private *dev_priv = dev->dev_private;
3572         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3573         enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
3574
3575         assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A);
3576
3577         lpt_program_iclkip(crtc);
3578
3579         /* Set transcoder timing. */
3580         ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A);
3581
3582         lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
3583 }
3584
3585 static void intel_put_shared_dpll(struct intel_crtc *crtc)
3586 {
3587         struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
3588
3589         if (pll == NULL)
3590                 return;
3591
3592         if (pll->refcount == 0) {
3593                 WARN(1, "bad %s refcount\n", pll->name);
3594                 return;
3595         }
3596
3597         if (--pll->refcount == 0) {
3598                 WARN_ON(pll->on);
3599                 WARN_ON(pll->active);
3600         }
3601
3602         crtc->config.shared_dpll = DPLL_ID_PRIVATE;
3603 }
3604
3605 static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
3606 {
3607         struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
3608         struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
3609         enum intel_dpll_id i;
3610
3611         if (pll) {
3612                 DRM_DEBUG_KMS("CRTC:%d dropping existing %s\n",
3613                               crtc->base.base.id, pll->name);
3614                 intel_put_shared_dpll(crtc);
3615         }
3616
3617         if (HAS_PCH_IBX(dev_priv->dev)) {
3618                 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
3619                 i = (enum intel_dpll_id) crtc->pipe;
3620                 pll = &dev_priv->shared_dplls[i];
3621
3622                 DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
3623                               crtc->base.base.id, pll->name);
3624
3625                 WARN_ON(pll->refcount);
3626
3627                 goto found;
3628         }
3629
3630         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3631                 pll = &dev_priv->shared_dplls[i];
3632
3633                 /* Only want to check enabled timings first */
3634                 if (pll->refcount == 0)
3635                         continue;
3636
3637                 if (memcmp(&crtc->config.dpll_hw_state, &pll->hw_state,
3638                            sizeof(pll->hw_state)) == 0) {
3639                         DRM_DEBUG_KMS("CRTC:%d sharing existing %s (refcount %d, ative %d)\n",
3640                                       crtc->base.base.id,
3641                                       pll->name, pll->refcount, pll->active);
3642
3643                         goto found;
3644                 }
3645         }
3646
3647         /* Ok no matching timings, maybe there's a free one? */
3648         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3649                 pll = &dev_priv->shared_dplls[i];
3650                 if (pll->refcount == 0) {
3651                         DRM_DEBUG_KMS("CRTC:%d allocated %s\n",
3652                                       crtc->base.base.id, pll->name);
3653                         goto found;
3654                 }
3655         }
3656
3657         return NULL;
3658
3659 found:
3660         if (pll->refcount == 0)
3661                 pll->hw_state = crtc->config.dpll_hw_state;
3662
3663         crtc->config.shared_dpll = i;
3664         DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
3665                          pipe_name(crtc->pipe));
3666
3667         pll->refcount++;
3668
3669         return pll;
3670 }
3671
3672 static void cpt_verify_modeset(struct drm_device *dev, int pipe)
3673 {
3674         struct drm_i915_private *dev_priv = dev->dev_private;
3675         int dslreg = PIPEDSL(pipe);
3676         u32 temp;
3677
3678         temp = I915_READ(dslreg);
3679         udelay(500);
3680         if (wait_for(I915_READ(dslreg) != temp, 5)) {
3681                 if (wait_for(I915_READ(dslreg) != temp, 5))
3682                         DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
3683         }
3684 }
3685
3686 static void ironlake_pfit_enable(struct intel_crtc *crtc)
3687 {
3688         struct drm_device *dev = crtc->base.dev;
3689         struct drm_i915_private *dev_priv = dev->dev_private;
3690         int pipe = crtc->pipe;
3691
3692         if (crtc->config.pch_pfit.enabled) {
3693                 /* Force use of hard-coded filter coefficients
3694                  * as some pre-programmed values are broken,
3695                  * e.g. x201.
3696                  */
3697                 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
3698                         I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
3699                                                  PF_PIPE_SEL_IVB(pipe));
3700                 else
3701                         I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
3702                 I915_WRITE(PF_WIN_POS(pipe), crtc->config.pch_pfit.pos);
3703                 I915_WRITE(PF_WIN_SZ(pipe), crtc->config.pch_pfit.size);
3704         }
3705 }
3706
3707 static void intel_enable_planes(struct drm_crtc *crtc)
3708 {
3709         struct drm_device *dev = crtc->dev;
3710         enum i915_pipe pipe = to_intel_crtc(crtc)->pipe;
3711         struct drm_plane *plane;
3712         struct intel_plane *intel_plane;
3713
3714         drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
3715                 intel_plane = to_intel_plane(plane);
3716                 if (intel_plane->pipe == pipe)
3717                         intel_plane_restore(&intel_plane->base);
3718         }
3719 }
3720
3721 static void intel_disable_planes(struct drm_crtc *crtc)
3722 {
3723         struct drm_device *dev = crtc->dev;
3724         enum i915_pipe pipe = to_intel_crtc(crtc)->pipe;
3725         struct drm_plane *plane;
3726         struct intel_plane *intel_plane;
3727
3728         drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
3729                 intel_plane = to_intel_plane(plane);
3730                 if (intel_plane->pipe == pipe)
3731                         intel_plane_disable(&intel_plane->base);
3732         }
3733 }
3734
3735 void hsw_enable_ips(struct intel_crtc *crtc)
3736 {
3737         struct drm_device *dev = crtc->base.dev;
3738         struct drm_i915_private *dev_priv = dev->dev_private;
3739
3740         if (!crtc->config.ips_enabled)
3741                 return;
3742
3743         /* We can only enable IPS after we enable a plane and wait for a vblank */
3744         intel_wait_for_vblank(dev, crtc->pipe);
3745
3746         assert_plane_enabled(dev_priv, crtc->plane);
3747         if (IS_BROADWELL(dev)) {
3748                 mutex_lock(&dev_priv->rps.hw_lock);
3749                 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000));
3750                 mutex_unlock(&dev_priv->rps.hw_lock);
3751                 /* Quoting Art Runyan: "its not safe to expect any particular
3752                  * value in IPS_CTL bit 31 after enabling IPS through the
3753                  * mailbox." Moreover, the mailbox may return a bogus state,
3754                  * so we need to just enable it and continue on.
3755                  */
3756         } else {
3757                 I915_WRITE(IPS_CTL, IPS_ENABLE);
3758                 /* The bit only becomes 1 in the next vblank, so this wait here
3759                  * is essentially intel_wait_for_vblank. If we don't have this
3760                  * and don't wait for vblanks until the end of crtc_enable, then
3761                  * the HW state readout code will complain that the expected
3762                  * IPS_CTL value is not the one we read. */
3763                 if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50))
3764                         DRM_ERROR("Timed out waiting for IPS enable\n");
3765         }
3766 }
3767
3768 void hsw_disable_ips(struct intel_crtc *crtc)
3769 {
3770         struct drm_device *dev = crtc->base.dev;
3771         struct drm_i915_private *dev_priv = dev->dev_private;
3772
3773         if (!crtc->config.ips_enabled)
3774                 return;
3775
3776         assert_plane_enabled(dev_priv, crtc->plane);
3777         if (IS_BROADWELL(dev)) {
3778                 mutex_lock(&dev_priv->rps.hw_lock);
3779                 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
3780                 mutex_unlock(&dev_priv->rps.hw_lock);
3781                 /* wait for pcode to finish disabling IPS, which may take up to 42ms */
3782                 if (wait_for((I915_READ(IPS_CTL) & IPS_ENABLE) == 0, 42))
3783                         DRM_ERROR("Timed out waiting for IPS disable\n");
3784         } else {
3785                 I915_WRITE(IPS_CTL, 0);
3786                 POSTING_READ(IPS_CTL);
3787         }
3788
3789         /* We need to wait for a vblank before we can disable the plane. */
3790         intel_wait_for_vblank(dev, crtc->pipe);
3791 }
3792
3793 /** Loads the palette/gamma unit for the CRTC with the prepared values */
3794 static void intel_crtc_load_lut(struct drm_crtc *crtc)
3795 {
3796         struct drm_device *dev = crtc->dev;
3797         struct drm_i915_private *dev_priv = dev->dev_private;
3798         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3799         enum i915_pipe pipe = intel_crtc->pipe;
3800         int palreg = PALETTE(pipe);
3801         int i;
3802         bool reenable_ips = false;
3803
3804         /* The clocks have to be on to load the palette. */
3805         if (!crtc->enabled || !intel_crtc->active)
3806                 return;
3807
3808         if (!HAS_PCH_SPLIT(dev_priv->dev)) {
3809                 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
3810                         assert_dsi_pll_enabled(dev_priv);
3811                 else
3812                         assert_pll_enabled(dev_priv, pipe);
3813         }
3814
3815         /* use legacy palette for Ironlake */
3816         if (HAS_PCH_SPLIT(dev))
3817                 palreg = LGC_PALETTE(pipe);
3818
3819         /* Workaround : Do not read or write the pipe palette/gamma data while
3820          * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
3821          */
3822         if (IS_HASWELL(dev) && intel_crtc->config.ips_enabled &&
3823             ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
3824              GAMMA_MODE_MODE_SPLIT)) {
3825                 hsw_disable_ips(intel_crtc);
3826                 reenable_ips = true;
3827         }
3828
3829         for (i = 0; i < 256; i++) {
3830                 I915_WRITE(palreg + 4 * i,
3831                            (intel_crtc->lut_r[i] << 16) |
3832                            (intel_crtc->lut_g[i] << 8) |
3833                            intel_crtc->lut_b[i]);
3834         }
3835
3836         if (reenable_ips)
3837                 hsw_enable_ips(intel_crtc);
3838 }
3839
3840 static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
3841 {
3842         if (!enable && intel_crtc->overlay) {
3843                 struct drm_device *dev = intel_crtc->base.dev;
3844                 struct drm_i915_private *dev_priv = dev->dev_private;
3845
3846                 mutex_lock(&dev->struct_mutex);
3847                 dev_priv->mm.interruptible = false;
3848                 (void) intel_overlay_switch_off(intel_crtc->overlay);
3849                 dev_priv->mm.interruptible = true;
3850                 mutex_unlock(&dev->struct_mutex);
3851         }
3852
3853         /* Let userspace switch the overlay on again. In most cases userspace
3854          * has to recompute where to put it anyway.
3855          */
3856 }
3857
3858 /**
3859  * i9xx_fixup_plane - ugly workaround for G45 to fire up the hardware
3860  * cursor plane briefly if not already running after enabling the display
3861  * plane.
3862  * This workaround avoids occasional blank screens when self refresh is
3863  * enabled.
3864  */
3865 static void
3866 g4x_fixup_plane(struct drm_i915_private *dev_priv, enum i915_pipe pipe)
3867 {
3868         u32 cntl = I915_READ(CURCNTR(pipe));
3869
3870         if ((cntl & CURSOR_MODE) == 0) {
3871                 u32 fw_bcl_self = I915_READ(FW_BLC_SELF);
3872
3873                 I915_WRITE(FW_BLC_SELF, fw_bcl_self & ~FW_BLC_SELF_EN);
3874                 I915_WRITE(CURCNTR(pipe), CURSOR_MODE_64_ARGB_AX);
3875                 intel_wait_for_vblank(dev_priv->dev, pipe);
3876                 I915_WRITE(CURCNTR(pipe), cntl);
3877                 I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
3878                 I915_WRITE(FW_BLC_SELF, fw_bcl_self);
3879         }
3880 }
3881
3882 static void intel_crtc_enable_planes(struct drm_crtc *crtc)
3883 {
3884         struct drm_device *dev = crtc->dev;
3885         struct drm_i915_private *dev_priv = dev->dev_private;
3886         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3887         int pipe = intel_crtc->pipe;
3888         int plane = intel_crtc->plane;
3889
3890         intel_enable_primary_hw_plane(dev_priv, plane, pipe);
3891         intel_enable_planes(crtc);
3892         /* The fixup needs to happen before cursor is enabled */
3893         if (IS_G4X(dev))
3894                 g4x_fixup_plane(dev_priv, pipe);
3895         intel_crtc_update_cursor(crtc, true);
3896         intel_crtc_dpms_overlay(intel_crtc, true);
3897
3898         hsw_enable_ips(intel_crtc);
3899
3900         mutex_lock(&dev->struct_mutex);
3901         intel_update_fbc(dev);
3902         intel_edp_psr_update(dev);
3903         mutex_unlock(&dev->struct_mutex);
3904 }
3905
3906 static void intel_crtc_disable_planes(struct drm_crtc *crtc)
3907 {
3908         struct drm_device *dev = crtc->dev;
3909         struct drm_i915_private *dev_priv = dev->dev_private;
3910         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3911         int pipe = intel_crtc->pipe;
3912         int plane = intel_crtc->plane;
3913
3914         intel_crtc_wait_for_pending_flips(crtc);
3915         drm_crtc_vblank_off(crtc);
3916
3917         if (dev_priv->fbc.plane == plane)
3918                 intel_disable_fbc(dev);
3919
3920         hsw_disable_ips(intel_crtc);
3921
3922         intel_crtc_dpms_overlay(intel_crtc, false);
3923         intel_crtc_update_cursor(crtc, false);
3924         intel_disable_planes(crtc);
3925         intel_disable_primary_hw_plane(dev_priv, plane, pipe);
3926 }
3927
3928 static void ironlake_crtc_enable(struct drm_crtc *crtc)
3929 {
3930         struct drm_device *dev = crtc->dev;
3931         struct drm_i915_private *dev_priv = dev->dev_private;
3932         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3933         struct intel_encoder *encoder;
3934         int pipe = intel_crtc->pipe;
3935         enum plane plane = intel_crtc->plane;
3936
3937         WARN_ON(!crtc->enabled);
3938
3939         if (intel_crtc->active)
3940                 return;
3941
3942         if (intel_crtc->config.has_pch_encoder)
3943                 intel_prepare_shared_dpll(intel_crtc);
3944
3945         if (intel_crtc->config.has_dp_encoder)
3946                 intel_dp_set_m_n(intel_crtc);
3947
3948         intel_set_pipe_timings(intel_crtc);
3949
3950         if (intel_crtc->config.has_pch_encoder) {
3951                 intel_cpu_transcoder_set_m_n(intel_crtc,
3952                                              &intel_crtc->config.fdi_m_n);
3953         }
3954
3955         ironlake_set_pipeconf(crtc);
3956
3957         /* Set up the display plane register */
3958         I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
3959         POSTING_READ(DSPCNTR(plane));
3960
3961         dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
3962                                                crtc->x, crtc->y);
3963
3964         intel_crtc->active = true;
3965
3966         intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
3967         intel_set_pch_fifo_underrun_reporting(dev, pipe, true);
3968
3969         for_each_encoder_on_crtc(dev, crtc, encoder)
3970                 if (encoder->pre_enable)
3971                         encoder->pre_enable(encoder);
3972
3973         if (intel_crtc->config.has_pch_encoder) {
3974                 /* Note: FDI PLL enabling _must_ be done before we enable the
3975                  * cpu pipes, hence this is separate from all the other fdi/pch
3976                  * enabling. */
3977                 ironlake_fdi_pll_enable(intel_crtc);
3978         } else {
3979                 assert_fdi_tx_disabled(dev_priv, pipe);
3980                 assert_fdi_rx_disabled(dev_priv, pipe);
3981         }
3982
3983         ironlake_pfit_enable(intel_crtc);
3984
3985         /*
3986          * On ILK+ LUT must be loaded before the pipe is running but with
3987          * clocks enabled
3988          */
3989         intel_crtc_load_lut(crtc);
3990
3991         intel_update_watermarks(crtc);
3992         intel_enable_pipe(intel_crtc);
3993
3994         if (intel_crtc->config.has_pch_encoder)
3995                 ironlake_pch_enable(crtc);
3996
3997         for_each_encoder_on_crtc(dev, crtc, encoder)
3998                 encoder->enable(encoder);
3999
4000         if (HAS_PCH_CPT(dev))
4001                 cpt_verify_modeset(dev, intel_crtc->pipe);
4002
4003         intel_crtc_enable_planes(crtc);
4004
4005         drm_crtc_vblank_on(crtc);
4006 }
4007
4008 /* IPS only exists on ULT machines and is tied to pipe A. */
4009 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
4010 {
4011         return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
4012 }
4013
4014 /*
4015  * This implements the workaround described in the "notes" section of the mode
4016  * set sequence documentation. When going from no pipes or single pipe to
4017  * multiple pipes, and planes are enabled after the pipe, we need to wait at
4018  * least 2 vblanks on the first pipe before enabling planes on the second pipe.
4019  */
4020 static void haswell_mode_set_planes_workaround(struct intel_crtc *crtc)
4021 {
4022         struct drm_device *dev = crtc->base.dev;
4023         struct intel_crtc *crtc_it, *other_active_crtc = NULL;
4024
4025         /* We want to get the other_active_crtc only if there's only 1 other
4026          * active crtc. */
4027         for_each_intel_crtc(dev, crtc_it) {
4028                 if (!crtc_it->active || crtc_it == crtc)
4029                         continue;
4030
4031                 if (other_active_crtc)
4032                         return;
4033
4034                 other_active_crtc = crtc_it;
4035         }
4036         if (!other_active_crtc)
4037                 return;
4038
4039         intel_wait_for_vblank(dev, other_active_crtc->pipe);
4040         intel_wait_for_vblank(dev, other_active_crtc->pipe);
4041 }
4042
4043 static void haswell_crtc_enable(struct drm_crtc *crtc)
4044 {
4045         struct drm_device *dev = crtc->dev;
4046         struct drm_i915_private *dev_priv = dev->dev_private;
4047         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4048         struct intel_encoder *encoder;
4049         int pipe = intel_crtc->pipe;
4050         enum plane plane = intel_crtc->plane;
4051
4052         WARN_ON(!crtc->enabled);
4053
4054         if (intel_crtc->active)
4055                 return;
4056
4057         if (intel_crtc->config.has_dp_encoder)
4058                 intel_dp_set_m_n(intel_crtc);
4059
4060         intel_set_pipe_timings(intel_crtc);
4061
4062         if (intel_crtc->config.has_pch_encoder) {
4063                 intel_cpu_transcoder_set_m_n(intel_crtc,
4064                                              &intel_crtc->config.fdi_m_n);
4065         }
4066
4067         haswell_set_pipeconf(crtc);
4068
4069         intel_set_pipe_csc(crtc);
4070
4071         /* Set up the display plane register */
4072         I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE | DISPPLANE_PIPE_CSC_ENABLE);
4073         POSTING_READ(DSPCNTR(plane));
4074
4075         dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
4076                                                crtc->x, crtc->y);
4077
4078         intel_crtc->active = true;
4079
4080         intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
4081         if (intel_crtc->config.has_pch_encoder)
4082                 intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
4083
4084         if (intel_crtc->config.has_pch_encoder)
4085                 dev_priv->display.fdi_link_train(crtc);
4086
4087         for_each_encoder_on_crtc(dev, crtc, encoder)
4088                 if (encoder->pre_enable)
4089                         encoder->pre_enable(encoder);
4090
4091         intel_ddi_enable_pipe_clock(intel_crtc);
4092
4093         ironlake_pfit_enable(intel_crtc);
4094
4095         /*
4096          * On ILK+ LUT must be loaded before the pipe is running but with
4097          * clocks enabled
4098          */
4099         intel_crtc_load_lut(crtc);
4100
4101         intel_ddi_set_pipe_settings(crtc);
4102         intel_ddi_enable_transcoder_func(crtc);
4103
4104         intel_update_watermarks(crtc);
4105         intel_enable_pipe(intel_crtc);
4106
4107         if (intel_crtc->config.has_pch_encoder)
4108                 lpt_pch_enable(crtc);
4109
4110         for_each_encoder_on_crtc(dev, crtc, encoder) {
4111                 encoder->enable(encoder);
4112                 intel_opregion_notify_encoder(encoder, true);
4113         }
4114
4115         /* If we change the relative order between pipe/planes enabling, we need
4116          * to change the workaround. */
4117         haswell_mode_set_planes_workaround(intel_crtc);
4118         intel_crtc_enable_planes(crtc);
4119
4120         drm_crtc_vblank_on(crtc);
4121 }
4122
4123 static void ironlake_pfit_disable(struct intel_crtc *crtc)
4124 {
4125         struct drm_device *dev = crtc->base.dev;
4126         struct drm_i915_private *dev_priv = dev->dev_private;
4127         int pipe = crtc->pipe;
4128
4129         /* To avoid upsetting the power well on haswell only disable the pfit if
4130          * it's in use. The hw state code will make sure we get this right. */
4131         if (crtc->config.pch_pfit.enabled) {
4132                 I915_WRITE(PF_CTL(pipe), 0);
4133                 I915_WRITE(PF_WIN_POS(pipe), 0);
4134                 I915_WRITE(PF_WIN_SZ(pipe), 0);
4135         }
4136 }
4137
4138 static void ironlake_crtc_disable(struct drm_crtc *crtc)
4139 {
4140         struct drm_device *dev = crtc->dev;
4141         struct drm_i915_private *dev_priv = dev->dev_private;
4142         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4143         struct intel_encoder *encoder;
4144         int pipe = intel_crtc->pipe;
4145         u32 reg, temp;
4146
4147         if (!intel_crtc->active)
4148                 return;
4149
4150         intel_crtc_disable_planes(crtc);
4151
4152         for_each_encoder_on_crtc(dev, crtc, encoder)
4153                 encoder->disable(encoder);
4154
4155         if (intel_crtc->config.has_pch_encoder)
4156                 intel_set_pch_fifo_underrun_reporting(dev, pipe, false);
4157
4158         intel_disable_pipe(dev_priv, pipe);
4159
4160         ironlake_pfit_disable(intel_crtc);
4161
4162         for_each_encoder_on_crtc(dev, crtc, encoder)
4163                 if (encoder->post_disable)
4164                         encoder->post_disable(encoder);
4165
4166         if (intel_crtc->config.has_pch_encoder) {
4167                 ironlake_fdi_disable(crtc);
4168
4169                 ironlake_disable_pch_transcoder(dev_priv, pipe);
4170                 intel_set_pch_fifo_underrun_reporting(dev, pipe, true);
4171
4172                 if (HAS_PCH_CPT(dev)) {
4173                         /* disable TRANS_DP_CTL */
4174                         reg = TRANS_DP_CTL(pipe);
4175                         temp = I915_READ(reg);
4176                         temp &= ~(TRANS_DP_OUTPUT_ENABLE |
4177                                   TRANS_DP_PORT_SEL_MASK);
4178                         temp |= TRANS_DP_PORT_SEL_NONE;
4179                         I915_WRITE(reg, temp);
4180
4181                         /* disable DPLL_SEL */
4182                         temp = I915_READ(PCH_DPLL_SEL);
4183                         temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
4184                         I915_WRITE(PCH_DPLL_SEL, temp);
4185                 }
4186
4187                 /* disable PCH DPLL */
4188                 intel_disable_shared_dpll(intel_crtc);
4189
4190                 ironlake_fdi_pll_disable(intel_crtc);
4191         }
4192
4193         intel_crtc->active = false;
4194         intel_update_watermarks(crtc);
4195
4196         mutex_lock(&dev->struct_mutex);
4197         intel_update_fbc(dev);
4198         intel_edp_psr_update(dev);
4199         mutex_unlock(&dev->struct_mutex);
4200 }
4201
4202 static void haswell_crtc_disable(struct drm_crtc *crtc)
4203 {
4204         struct drm_device *dev = crtc->dev;
4205         struct drm_i915_private *dev_priv = dev->dev_private;
4206         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4207         struct intel_encoder *encoder;
4208         int pipe = intel_crtc->pipe;
4209         enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
4210
4211         if (!intel_crtc->active)
4212                 return;
4213
4214         intel_crtc_disable_planes(crtc);
4215
4216         for_each_encoder_on_crtc(dev, crtc, encoder) {
4217                 intel_opregion_notify_encoder(encoder, false);
4218                 encoder->disable(encoder);
4219         }
4220
4221         if (intel_crtc->config.has_pch_encoder)
4222                 intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, false);
4223         intel_disable_pipe(dev_priv, pipe);
4224
4225         intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
4226
4227         ironlake_pfit_disable(intel_crtc);
4228
4229         intel_ddi_disable_pipe_clock(intel_crtc);
4230
4231         for_each_encoder_on_crtc(dev, crtc, encoder)
4232                 if (encoder->post_disable)
4233                         encoder->post_disable(encoder);
4234
4235         if (intel_crtc->config.has_pch_encoder) {
4236                 lpt_disable_pch_transcoder(dev_priv);
4237                 intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
4238                 intel_ddi_fdi_disable(crtc);
4239         }
4240
4241         intel_crtc->active = false;
4242         intel_update_watermarks(crtc);
4243
4244         mutex_lock(&dev->struct_mutex);
4245         intel_update_fbc(dev);
4246         intel_edp_psr_update(dev);
4247         mutex_unlock(&dev->struct_mutex);
4248 }
4249
4250 static void ironlake_crtc_off(struct drm_crtc *crtc)
4251 {
4252         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4253         intel_put_shared_dpll(intel_crtc);
4254 }
4255
4256 static void haswell_crtc_off(struct drm_crtc *crtc)
4257 {
4258         intel_ddi_put_crtc_pll(crtc);
4259 }
4260
4261 static void i9xx_pfit_enable(struct intel_crtc *crtc)
4262 {
4263         struct drm_device *dev = crtc->base.dev;
4264         struct drm_i915_private *dev_priv = dev->dev_private;
4265         struct intel_crtc_config *pipe_config = &crtc->config;
4266
4267         if (!crtc->config.gmch_pfit.control)
4268                 return;
4269
4270         /*
4271          * The panel fitter should only be adjusted whilst the pipe is disabled,
4272          * according to register description and PRM.
4273          */
4274         WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
4275         assert_pipe_disabled(dev_priv, crtc->pipe);
4276
4277         I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
4278         I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
4279
4280         /* Border color in case we don't scale up to the full screen. Black by
4281          * default, change to something else for debugging. */
4282         I915_WRITE(BCLRPAT(crtc->pipe), 0);
4283 }
4284
4285 #define for_each_power_domain(domain, mask)                             \
4286         for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++)     \
4287                 if ((1 << (domain)) & (mask))
4288
4289 enum intel_display_power_domain
4290 intel_display_port_power_domain(struct intel_encoder *intel_encoder)
4291 {
4292         struct drm_device *dev = intel_encoder->base.dev;
4293         struct intel_digital_port *intel_dig_port;
4294
4295         switch (intel_encoder->type) {
4296         case INTEL_OUTPUT_UNKNOWN:
4297                 /* Only DDI platforms should ever use this output type */
4298                 WARN_ON_ONCE(!HAS_DDI(dev));
4299         case INTEL_OUTPUT_DISPLAYPORT:
4300         case INTEL_OUTPUT_HDMI:
4301         case INTEL_OUTPUT_EDP:
4302                 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
4303                 switch (intel_dig_port->port) {
4304                 case PORT_A:
4305                         return POWER_DOMAIN_PORT_DDI_A_4_LANES;
4306                 case PORT_B:
4307                         return POWER_DOMAIN_PORT_DDI_B_4_LANES;
4308                 case PORT_C:
4309                         return POWER_DOMAIN_PORT_DDI_C_4_LANES;
4310                 case PORT_D:
4311                         return POWER_DOMAIN_PORT_DDI_D_4_LANES;
4312                 default:
4313                         WARN_ON_ONCE(1);
4314                         return POWER_DOMAIN_PORT_OTHER;
4315                 }
4316         case INTEL_OUTPUT_ANALOG:
4317                 return POWER_DOMAIN_PORT_CRT;
4318         case INTEL_OUTPUT_DSI:
4319                 return POWER_DOMAIN_PORT_DSI;
4320         default:
4321                 return POWER_DOMAIN_PORT_OTHER;
4322         }
4323 }
4324
4325 static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
4326 {
4327         struct drm_device *dev = crtc->dev;
4328         struct intel_encoder *intel_encoder;
4329         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4330         enum i915_pipe pipe = intel_crtc->pipe;
4331         bool pfit_enabled = intel_crtc->config.pch_pfit.enabled;
4332         unsigned long mask;
4333         enum transcoder transcoder;
4334
4335         transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe);
4336
4337         mask = BIT(POWER_DOMAIN_PIPE(pipe));
4338         mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
4339         if (pfit_enabled)
4340                 mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
4341
4342         for_each_encoder_on_crtc(dev, crtc, intel_encoder)
4343                 mask |= BIT(intel_display_port_power_domain(intel_encoder));
4344
4345         return mask;
4346 }
4347
4348 void intel_display_set_init_power(struct drm_i915_private *dev_priv,
4349                                   bool enable)
4350 {
4351         if (dev_priv->power_domains.init_power_on == enable)
4352                 return;
4353
4354         if (enable)
4355                 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
4356         else
4357                 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
4358
4359         dev_priv->power_domains.init_power_on = enable;
4360 }
4361
4362 static void modeset_update_crtc_power_domains(struct drm_device *dev)
4363 {
4364         struct drm_i915_private *dev_priv = dev->dev_private;
4365         unsigned long pipe_domains[I915_MAX_PIPES] = { 0, };
4366         struct intel_crtc *crtc;
4367
4368         /*
4369          * First get all needed power domains, then put all unneeded, to avoid
4370          * any unnecessary toggling of the power wells.
4371          */
4372         for_each_intel_crtc(dev, crtc) {
4373                 enum intel_display_power_domain domain;
4374
4375                 if (!crtc->base.enabled)
4376                         continue;
4377
4378                 pipe_domains[crtc->pipe] = get_crtc_power_domains(&crtc->base);
4379
4380                 for_each_power_domain(domain, pipe_domains[crtc->pipe])
4381                         intel_display_power_get(dev_priv, domain);
4382         }
4383
4384         for_each_intel_crtc(dev, crtc) {
4385                 enum intel_display_power_domain domain;
4386
4387                 for_each_power_domain(domain, crtc->enabled_power_domains)
4388                         intel_display_power_put(dev_priv, domain);
4389
4390                 crtc->enabled_power_domains = pipe_domains[crtc->pipe];
4391         }
4392
4393         intel_display_set_init_power(dev_priv, false);
4394 }
4395
4396 int valleyview_get_vco(struct drm_i915_private *dev_priv)
4397 {
4398         int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
4399
4400         /* Obtain SKU information */
4401         mutex_lock(&dev_priv->dpio_lock);
4402         hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
4403                 CCK_FUSE_HPLL_FREQ_MASK;
4404         mutex_unlock(&dev_priv->dpio_lock);
4405
4406         return vco_freq[hpll_freq];
4407 }
4408
4409 /* Adjust CDclk dividers to allow high res or save power if possible */
4410 static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
4411 {
4412         struct drm_i915_private *dev_priv = dev->dev_private;
4413         u32 val, cmd;
4414
4415         WARN_ON(valleyview_cur_cdclk(dev_priv) != dev_priv->vlv_cdclk_freq);
4416         dev_priv->vlv_cdclk_freq = cdclk;
4417
4418         if (cdclk >= 320) /* jump to highest voltage for 400MHz too */
4419                 cmd = 2;
4420         else if (cdclk == 266)
4421                 cmd = 1;
4422         else
4423                 cmd = 0;
4424
4425         mutex_lock(&dev_priv->rps.hw_lock);
4426         val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
4427         val &= ~DSPFREQGUAR_MASK;
4428         val |= (cmd << DSPFREQGUAR_SHIFT);
4429         vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
4430         if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
4431                       DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
4432                      50)) {
4433                 DRM_ERROR("timed out waiting for CDclk change\n");
4434         }
4435         mutex_unlock(&dev_priv->rps.hw_lock);
4436
4437         if (cdclk == 400) {
4438                 u32 divider, vco;
4439
4440                 vco = valleyview_get_vco(dev_priv);
4441                 divider = ((vco << 1) / cdclk) - 1;
4442
4443                 mutex_lock(&dev_priv->dpio_lock);
4444                 /* adjust cdclk divider */
4445                 val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
4446                 val &= ~0xf;
4447                 val |= divider;
4448                 vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
4449                 mutex_unlock(&dev_priv->dpio_lock);
4450         }
4451
4452         mutex_lock(&dev_priv->dpio_lock);
4453         /* adjust self-refresh exit latency value */
4454         val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
4455         val &= ~0x7f;
4456
4457         /*
4458          * For high bandwidth configs, we set a higher latency in the bunit
4459          * so that the core display fetch happens in time to avoid underruns.
4460          */
4461         if (cdclk == 400)
4462                 val |= 4500 / 250; /* 4.5 usec */
4463         else
4464                 val |= 3000 / 250; /* 3.0 usec */
4465         vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
4466         mutex_unlock(&dev_priv->dpio_lock);
4467
4468         /* Since we changed the CDclk, we need to update the GMBUSFREQ too */
4469         intel_i2c_reset(dev);
4470 }
4471
4472 int valleyview_cur_cdclk(struct drm_i915_private *dev_priv)
4473 {
4474         int cur_cdclk, vco;
4475         int divider;
4476
4477         vco = valleyview_get_vco(dev_priv);
4478
4479         mutex_lock(&dev_priv->dpio_lock);
4480         divider = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
4481         mutex_unlock(&dev_priv->dpio_lock);
4482
4483         divider &= 0xf;
4484
4485         cur_cdclk = (vco << 1) / (divider + 1);
4486
4487         return cur_cdclk;
4488 }
4489
4490 static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
4491                                  int max_pixclk)
4492 {
4493         /*
4494          * Really only a few cases to deal with, as only 4 CDclks are supported:
4495          *   200MHz
4496          *   267MHz
4497          *   320MHz
4498          *   400MHz
4499          * So we check to see whether we're above 90% of the lower bin and
4500          * adjust if needed.
4501          */
4502         if (max_pixclk > 288000) {
4503                 return 400;
4504         } else if (max_pixclk > 240000) {
4505                 return 320;
4506         } else
4507                 return 266;
4508         /* Looks like the 200MHz CDclk freq doesn't work on some configs */
4509 }
4510
4511 /* compute the max pixel clock for new configuration */
4512 static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv)
4513 {
4514         struct drm_device *dev = dev_priv->dev;
4515         struct intel_crtc *intel_crtc;
4516         int max_pixclk = 0;
4517
4518         for_each_intel_crtc(dev, intel_crtc) {
4519                 if (intel_crtc->new_enabled)
4520                         max_pixclk = max(max_pixclk,
4521                                          intel_crtc->new_config->adjusted_mode.crtc_clock);
4522         }
4523
4524         return max_pixclk;
4525 }
4526
4527 static void valleyview_modeset_global_pipes(struct drm_device *dev,
4528                                             unsigned *prepare_pipes)
4529 {
4530         struct drm_i915_private *dev_priv = dev->dev_private;
4531         struct intel_crtc *intel_crtc;
4532         int max_pixclk = intel_mode_max_pixclk(dev_priv);
4533
4534         if (valleyview_calc_cdclk(dev_priv, max_pixclk) ==
4535             dev_priv->vlv_cdclk_freq)
4536                 return;
4537
4538         /* disable/enable all currently active pipes while we change cdclk */
4539         for_each_intel_crtc(dev, intel_crtc)
4540                 if (intel_crtc->base.enabled)
4541                         *prepare_pipes |= (1 << intel_crtc->pipe);
4542 }
4543
4544 static void valleyview_modeset_global_resources(struct drm_device *dev)
4545 {
4546         struct drm_i915_private *dev_priv = dev->dev_private;
4547         int max_pixclk = intel_mode_max_pixclk(dev_priv);
4548         int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
4549
4550         if (req_cdclk != dev_priv->vlv_cdclk_freq)
4551                 valleyview_set_cdclk(dev, req_cdclk);
4552         modeset_update_crtc_power_domains(dev);
4553 }
4554
4555 static void valleyview_crtc_enable(struct drm_crtc *crtc)
4556 {
4557         struct drm_device *dev = crtc->dev;
4558         struct drm_i915_private *dev_priv = dev->dev_private;
4559         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4560         struct intel_encoder *encoder;
4561         int pipe = intel_crtc->pipe;
4562         int plane = intel_crtc->plane;
4563         bool is_dsi;
4564         u32 dspcntr;
4565
4566         WARN_ON(!crtc->enabled);
4567
4568         if (intel_crtc->active)
4569                 return;
4570
4571         is_dsi = intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI);
4572
4573         if (!is_dsi && !IS_CHERRYVIEW(dev))
4574                 vlv_prepare_pll(intel_crtc);
4575
4576         /* Set up the display plane register */
4577         dspcntr = DISPPLANE_GAMMA_ENABLE;
4578
4579         if (intel_crtc->config.has_dp_encoder)
4580                 intel_dp_set_m_n(intel_crtc);
4581
4582         intel_set_pipe_timings(intel_crtc);
4583
4584         /* pipesrc and dspsize control the size that is scaled from,
4585          * which should always be the user's requested size.
4586          */
4587         I915_WRITE(DSPSIZE(plane),
4588                    ((intel_crtc->config.pipe_src_h - 1) << 16) |
4589                    (intel_crtc->config.pipe_src_w - 1));
4590         I915_WRITE(DSPPOS(plane), 0);
4591
4592         i9xx_set_pipeconf(intel_crtc);
4593
4594         I915_WRITE(DSPCNTR(plane), dspcntr);
4595         POSTING_READ(DSPCNTR(plane));
4596
4597         dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
4598                                                crtc->x, crtc->y);
4599
4600         intel_crtc->active = true;
4601
4602         intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
4603
4604         for_each_encoder_on_crtc(dev, crtc, encoder)
4605                 if (encoder->pre_pll_enable)
4606                         encoder->pre_pll_enable(encoder);
4607
4608         if (!is_dsi) {
4609                 if (IS_CHERRYVIEW(dev))
4610                         chv_enable_pll(intel_crtc);
4611                 else
4612                         vlv_enable_pll(intel_crtc);
4613         }
4614
4615         for_each_encoder_on_crtc(dev, crtc, encoder)
4616                 if (encoder->pre_enable)
4617                         encoder->pre_enable(encoder);
4618
4619         i9xx_pfit_enable(intel_crtc);
4620
4621         intel_crtc_load_lut(crtc);
4622
4623         intel_update_watermarks(crtc);
4624         intel_enable_pipe(intel_crtc);
4625
4626         for_each_encoder_on_crtc(dev, crtc, encoder)
4627                 encoder->enable(encoder);
4628
4629         intel_crtc_enable_planes(crtc);
4630
4631         drm_crtc_vblank_on(crtc);
4632
4633         /* Underruns don't raise interrupts, so check manually. */
4634         i9xx_check_fifo_underruns(dev);
4635 }
4636
4637 static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
4638 {
4639         struct drm_device *dev = crtc->base.dev;
4640         struct drm_i915_private *dev_priv = dev->dev_private;
4641
4642         I915_WRITE(FP0(crtc->pipe), crtc->config.dpll_hw_state.fp0);
4643         I915_WRITE(FP1(crtc->pipe), crtc->config.dpll_hw_state.fp1);
4644 }
4645
4646 static void i9xx_crtc_enable(struct drm_crtc *crtc)
4647 {
4648         struct drm_device *dev = crtc->dev;
4649         struct drm_i915_private *dev_priv = dev->dev_private;
4650         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4651         struct intel_encoder *encoder;
4652         int pipe = intel_crtc->pipe;
4653         int plane = intel_crtc->plane;
4654         u32 dspcntr;
4655
4656         WARN_ON(!crtc->enabled);
4657
4658         if (intel_crtc->active)
4659                 return;
4660
4661         i9xx_set_pll_dividers(intel_crtc);
4662
4663         /* Set up the display plane register */
4664         dspcntr = DISPPLANE_GAMMA_ENABLE;
4665
4666         if (pipe == 0)
4667                 dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
4668         else
4669                 dspcntr |= DISPPLANE_SEL_PIPE_B;
4670
4671         if (intel_crtc->config.has_dp_encoder)
4672                 intel_dp_set_m_n(intel_crtc);
4673
4674         intel_set_pipe_timings(intel_crtc);
4675
4676         /* pipesrc and dspsize control the size that is scaled from,
4677          * which should always be the user's requested size.
4678          */
4679         I915_WRITE(DSPSIZE(plane),
4680                    ((intel_crtc->config.pipe_src_h - 1) << 16) |
4681                    (intel_crtc->config.pipe_src_w - 1));
4682         I915_WRITE(DSPPOS(plane), 0);
4683
4684         i9xx_set_pipeconf(intel_crtc);
4685
4686         I915_WRITE(DSPCNTR(plane), dspcntr);
4687         POSTING_READ(DSPCNTR(plane));
4688
4689         dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
4690                                                crtc->x, crtc->y);
4691
4692         intel_crtc->active = true;
4693
4694         if (!IS_GEN2(dev))
4695                 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
4696
4697         for_each_encoder_on_crtc(dev, crtc, encoder)
4698                 if (encoder->pre_enable)
4699                         encoder->pre_enable(encoder);
4700
4701         i9xx_enable_pll(intel_crtc);
4702
4703         i9xx_pfit_enable(intel_crtc);
4704
4705         intel_crtc_load_lut(crtc);
4706
4707         intel_update_watermarks(crtc);
4708         intel_enable_pipe(intel_crtc);
4709
4710         for_each_encoder_on_crtc(dev, crtc, encoder)
4711                 encoder->enable(encoder);
4712
4713         intel_crtc_enable_planes(crtc);
4714
4715         /*
4716          * Gen2 reports pipe underruns whenever all planes are disabled.
4717          * So don't enable underrun reporting before at least some planes
4718          * are enabled.
4719          * FIXME: Need to fix the logic to work when we turn off all planes
4720          * but leave the pipe running.
4721          */
4722         if (IS_GEN2(dev))
4723                 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
4724
4725         drm_crtc_vblank_on(crtc);
4726
4727         /* Underruns don't raise interrupts, so check manually. */
4728         i9xx_check_fifo_underruns(dev);
4729 }
4730
4731 static void i9xx_pfit_disable(struct intel_crtc *crtc)
4732 {
4733         struct drm_device *dev = crtc->base.dev;
4734         struct drm_i915_private *dev_priv = dev->dev_private;
4735
4736         if (!crtc->config.gmch_pfit.control)
4737                 return;
4738
4739         assert_pipe_disabled(dev_priv, crtc->pipe);
4740
4741         DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n",
4742                          I915_READ(PFIT_CONTROL));
4743         I915_WRITE(PFIT_CONTROL, 0);
4744 }
4745
4746 static void i9xx_crtc_disable(struct drm_crtc *crtc)
4747 {
4748         struct drm_device *dev = crtc->dev;
4749         struct drm_i915_private *dev_priv = dev->dev_private;
4750         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4751         struct intel_encoder *encoder;
4752         int pipe = intel_crtc->pipe;
4753
4754         if (!intel_crtc->active)
4755                 return;
4756
4757         /*
4758          * Gen2 reports pipe underruns whenever all planes are disabled.
4759          * So diasble underrun reporting before all the planes get disabled.
4760          * FIXME: Need to fix the logic to work when we turn off all planes
4761          * but leave the pipe running.
4762          */
4763         if (IS_GEN2(dev))
4764                 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
4765
4766         intel_crtc_disable_planes(crtc);
4767
4768         for_each_encoder_on_crtc(dev, crtc, encoder)
4769                 encoder->disable(encoder);
4770
4771         /*
4772          * On gen2 planes are double buffered but the pipe isn't, so we must
4773          * wait for planes to fully turn off before disabling the pipe.
4774          */
4775         if (IS_GEN2(dev))
4776                 intel_wait_for_vblank(dev, pipe);
4777
4778         intel_disable_pipe(dev_priv, pipe);
4779
4780         i9xx_pfit_disable(intel_crtc);
4781
4782         for_each_encoder_on_crtc(dev, crtc, encoder)
4783                 if (encoder->post_disable)
4784                         encoder->post_disable(encoder);
4785
4786         if (!intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI)) {
4787                 if (IS_CHERRYVIEW(dev))
4788                         chv_disable_pll(dev_priv, pipe);
4789                 else if (IS_VALLEYVIEW(dev))
4790                         vlv_disable_pll(dev_priv, pipe);
4791                 else
4792                         i9xx_disable_pll(dev_priv, pipe);
4793         }
4794
4795         if (!IS_GEN2(dev))
4796                 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
4797
4798         intel_crtc->active = false;
4799         intel_update_watermarks(crtc);
4800
4801         mutex_lock(&dev->struct_mutex);
4802         intel_update_fbc(dev);
4803         intel_edp_psr_update(dev);
4804         mutex_unlock(&dev->struct_mutex);
4805 }
4806
4807 static void i9xx_crtc_off(struct drm_crtc *crtc)
4808 {
4809 }
4810
4811 static void intel_crtc_update_sarea(struct drm_crtc *crtc,
4812                                     bool enabled)
4813 {
4814         struct drm_device *dev = crtc->dev;
4815         struct drm_i915_master_private *master_priv = dev->dev_private;
4816         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4817         int pipe = intel_crtc->pipe;
4818
4819 #if 0
4820         if (!dev->primary->master)
4821                 return;
4822
4823         master_priv = dev->primary->master->driver_priv;
4824 #endif
4825         if (!master_priv->sarea_priv)
4826                 return;
4827
4828         switch (pipe) {
4829         case 0:
4830                 master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
4831                 master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
4832                 break;
4833         case 1:
4834                 master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
4835                 master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
4836                 break;
4837         default:
4838                 DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
4839                 break;
4840         }
4841 }
4842
4843 /**
4844  * Sets the power management mode of the pipe and plane.
4845  */
4846 void intel_crtc_update_dpms(struct drm_crtc *crtc)
4847 {
4848         struct drm_device *dev = crtc->dev;
4849         struct drm_i915_private *dev_priv = dev->dev_private;
4850         struct intel_encoder *intel_encoder;
4851         bool enable = false;
4852
4853         for_each_encoder_on_crtc(dev, crtc, intel_encoder)
4854                 enable |= intel_encoder->connectors_active;
4855
4856         if (enable)
4857                 dev_priv->display.crtc_enable(crtc);
4858         else
4859                 dev_priv->display.crtc_disable(crtc);
4860
4861         intel_crtc_update_sarea(crtc, enable);
4862 }
4863
4864 static void intel_crtc_disable(struct drm_crtc *crtc)
4865 {
4866         struct drm_device *dev = crtc->dev;
4867         struct drm_connector *connector;
4868         struct drm_i915_private *dev_priv = dev->dev_private;
4869
4870         /* crtc should still be enabled when we disable it. */
4871         WARN_ON(!crtc->enabled);
4872
4873         dev_priv->display.crtc_disable(crtc);
4874         intel_crtc_update_sarea(crtc, false);
4875         dev_priv->display.off(crtc);
4876
4877         assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
4878         assert_cursor_disabled(dev_priv, to_intel_crtc(crtc)->pipe);
4879         assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
4880
4881         if (crtc->primary->fb) {
4882                 mutex_lock(&dev->struct_mutex);
4883                 intel_unpin_fb_obj(to_intel_framebuffer(crtc->primary->fb)->obj);
4884                 mutex_unlock(&dev->struct_mutex);
4885                 crtc->primary->fb = NULL;
4886         }
4887
4888         /* Update computed state. */
4889         list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
4890                 if (!connector->encoder || !connector->encoder->crtc)
4891                         continue;
4892
4893                 if (connector->encoder->crtc != crtc)
4894                         continue;
4895
4896                 connector->dpms = DRM_MODE_DPMS_OFF;
4897                 to_intel_encoder(connector->encoder)->connectors_active = false;
4898         }
4899 }
4900
4901 void intel_encoder_destroy(struct drm_encoder *encoder)
4902 {
4903         struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
4904
4905         drm_encoder_cleanup(encoder);
4906         kfree(intel_encoder);
4907 }
4908
4909 /* Simple dpms helper for encoders with just one connector, no cloning and only
4910  * one kind of off state. It clamps all !ON modes to fully OFF and changes the
4911  * state of the entire output pipe. */
4912 static void intel_encoder_dpms(struct intel_encoder *encoder, int mode)
4913 {
4914         if (mode == DRM_MODE_DPMS_ON) {
4915                 encoder->connectors_active = true;
4916
4917                 intel_crtc_update_dpms(encoder->base.crtc);
4918         } else {
4919                 encoder->connectors_active = false;
4920
4921                 intel_crtc_update_dpms(encoder->base.crtc);
4922         }
4923 }
4924
4925 /* Cross check the actual hw state with our own modeset state tracking (and it's
4926  * internal consistency). */
4927 static void intel_connector_check_state(struct intel_connector *connector)
4928 {
4929         if (connector->get_hw_state(connector)) {
4930                 struct intel_encoder *encoder = connector->encoder;
4931                 struct drm_crtc *crtc;
4932                 bool encoder_enabled;
4933                 enum i915_pipe pipe;
4934
4935                 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4936                               connector->base.base.id,
4937                               connector->base.name);
4938
4939                 WARN(connector->base.dpms == DRM_MODE_DPMS_OFF,
4940                      "wrong connector dpms state\n");
4941                 WARN(connector->base.encoder != &encoder->base,
4942                      "active connector not linked to encoder\n");
4943                 WARN(!encoder->connectors_active,
4944                      "encoder->connectors_active not set\n");
4945
4946                 encoder_enabled = encoder->get_hw_state(encoder, &pipe);
4947                 WARN(!encoder_enabled, "encoder not enabled\n");
4948                 if (WARN_ON(!encoder->base.crtc))
4949                         return;
4950
4951                 crtc = encoder->base.crtc;
4952
4953                 WARN(!crtc->enabled, "crtc not enabled\n");
4954                 WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
4955                 WARN(pipe != to_intel_crtc(crtc)->pipe,
4956                      "encoder active on the wrong pipe\n");
4957         }
4958 }
4959
4960 /* Even simpler default implementation, if there's really no special case to
4961  * consider. */
4962 void intel_connector_dpms(struct drm_connector *connector, int mode)
4963 {
4964         /* All the simple cases only support two dpms states. */
4965         if (mode != DRM_MODE_DPMS_ON)
4966                 mode = DRM_MODE_DPMS_OFF;
4967
4968         if (mode == connector->dpms)
4969                 return;
4970
4971         connector->dpms = mode;
4972
4973         /* Only need to change hw state when actually enabled */
4974         if (connector->encoder)
4975                 intel_encoder_dpms(to_intel_encoder(connector->encoder), mode);
4976
4977         intel_modeset_check_state(connector->dev);
4978 }
4979
4980 /* Simple connector->get_hw_state implementation for encoders that support only
4981  * one connector and no cloning and hence the encoder state determines the state
4982  * of the connector. */
4983 bool intel_connector_get_hw_state(struct intel_connector *connector)
4984 {
4985         enum i915_pipe pipe = 0;
4986         struct intel_encoder *encoder = connector->encoder;
4987
4988         return encoder->get_hw_state(encoder, &pipe);
4989 }
4990
4991 static bool ironlake_check_fdi_lanes(struct drm_device *dev, enum i915_pipe pipe,
4992                                      struct intel_crtc_config *pipe_config)
4993 {
4994         struct drm_i915_private *dev_priv = dev->dev_private;
4995         struct intel_crtc *pipe_B_crtc =
4996                 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
4997
4998         DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
4999                       pipe_name(pipe), pipe_config->fdi_lanes);
5000         if (pipe_config->fdi_lanes > 4) {
5001                 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
5002                               pipe_name(pipe), pipe_config->fdi_lanes);
5003                 return false;
5004         }
5005
5006         if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
5007                 if (pipe_config->fdi_lanes > 2) {
5008                         DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
5009                                       pipe_config->fdi_lanes);
5010                         return false;
5011                 } else {
5012                         return true;
5013                 }
5014         }
5015
5016         if (INTEL_INFO(dev)->num_pipes == 2)
5017                 return true;
5018
5019         /* Ivybridge 3 pipe is really complicated */
5020         switch (pipe) {
5021         case PIPE_A:
5022                 return true;
5023         case PIPE_B:
5024                 if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled &&
5025                     pipe_config->fdi_lanes > 2) {
5026                         DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
5027                                       pipe_name(pipe), pipe_config->fdi_lanes);
5028                         return false;
5029                 }
5030                 return true;
5031         case PIPE_C:
5032                 if (!pipe_has_enabled_pch(pipe_B_crtc) ||
5033                     pipe_B_crtc->config.fdi_lanes <= 2) {
5034                         if (pipe_config->fdi_lanes > 2) {
5035                                 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
5036                                               pipe_name(pipe), pipe_config->fdi_lanes);
5037                                 return false;
5038                         }
5039                 } else {
5040                         DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
5041                         return false;
5042                 }
5043                 return true;
5044         default:
5045                 BUG();
5046         }
5047 }
5048
5049 #define RETRY 1
5050 static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
5051                                        struct intel_crtc_config *pipe_config)
5052 {
5053         struct drm_device *dev = intel_crtc->base.dev;
5054         struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
5055         int lane, link_bw, fdi_dotclock;
5056         bool setup_ok, needs_recompute = false;
5057
5058 retry:
5059         /* FDI is a binary signal running at ~2.7GHz, encoding
5060          * each output octet as 10 bits. The actual frequency
5061          * is stored as a divider into a 100MHz clock, and the
5062          * mode pixel clock is stored in units of 1KHz.
5063          * Hence the bw of each lane in terms of the mode signal
5064          * is:
5065          */
5066         link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
5067
5068         fdi_dotclock = adjusted_mode->crtc_clock;
5069
5070         lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
5071                                            pipe_config->pipe_bpp);
5072
5073         pipe_config->fdi_lanes = lane;
5074
5075         intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
5076                                link_bw, &pipe_config->fdi_m_n);
5077
5078         setup_ok = ironlake_check_fdi_lanes(intel_crtc->base.dev,
5079                                             intel_crtc->pipe, pipe_config);
5080         if (!setup_ok && pipe_config->pipe_bpp > 6*3) {
5081                 pipe_config->pipe_bpp -= 2*3;
5082                 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
5083                               pipe_config->pipe_bpp);
5084                 needs_recompute = true;
5085                 pipe_config->bw_constrained = true;
5086
5087                 goto retry;
5088         }
5089
5090         if (needs_recompute)
5091                 return RETRY;
5092
5093         return setup_ok ? 0 : -EINVAL;
5094 }
5095
5096 static void hsw_compute_ips_config(struct intel_crtc *crtc,
5097                                    struct intel_crtc_config *pipe_config)
5098 {
5099         pipe_config->ips_enabled = i915.enable_ips &&
5100                                    hsw_crtc_supports_ips(crtc) &&
5101                                    pipe_config->pipe_bpp <= 24;
5102 }
5103
5104 static int intel_crtc_compute_config(struct intel_crtc *crtc,
5105                                      struct intel_crtc_config *pipe_config)
5106 {
5107         struct drm_device *dev = crtc->base.dev;
5108         struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
5109
5110         /* FIXME should check pixel clock limits on all platforms */
5111         if (INTEL_INFO(dev)->gen < 4) {
5112                 struct drm_i915_private *dev_priv = dev->dev_private;
5113                 int clock_limit =
5114                         dev_priv->display.get_display_clock_speed(dev);
5115
5116                 /*
5117                  * Enable pixel doubling when the dot clock
5118                  * is > 90% of the (display) core speed.
5119                  *
5120                  * GDG double wide on either pipe,
5121                  * otherwise pipe A only.
5122                  */
5123                 if ((crtc->pipe == PIPE_A || IS_I915G(dev)) &&
5124                     adjusted_mode->crtc_clock > clock_limit * 9 / 10) {
5125                         clock_limit *= 2;
5126                         pipe_config->double_wide = true;
5127                 }
5128
5129                 if (adjusted_mode->crtc_clock > clock_limit * 9 / 10)
5130                         return -EINVAL;
5131         }
5132
5133         /*
5134          * Pipe horizontal size must be even in:
5135          * - DVO ganged mode
5136          * - LVDS dual channel mode
5137          * - Double wide pipe
5138          */
5139         if ((intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
5140              intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
5141                 pipe_config->pipe_src_w &= ~1;
5142
5143         /* Cantiga+ cannot handle modes with a hsync front porch of 0.
5144          * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
5145          */
5146         if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
5147                 adjusted_mode->hsync_start == adjusted_mode->hdisplay)
5148                 return -EINVAL;
5149
5150         if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && pipe_config->pipe_bpp > 10*3) {
5151                 pipe_config->pipe_bpp = 10*3; /* 12bpc is gen5+ */
5152         } else if (INTEL_INFO(dev)->gen <= 4 && pipe_config->pipe_bpp > 8*3) {
5153                 /* only a 8bpc pipe, with 6bpc dither through the panel fitter
5154                  * for lvds. */
5155                 pipe_config->pipe_bpp = 8*3;
5156         }
5157
5158         if (HAS_IPS(dev))
5159                 hsw_compute_ips_config(crtc, pipe_config);
5160
5161         /* XXX: PCH clock sharing is done in ->mode_set, so make sure the old
5162          * clock survives for now. */
5163         if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
5164                 pipe_config->shared_dpll = crtc->config.shared_dpll;
5165
5166         if (pipe_config->has_pch_encoder)
5167                 return ironlake_fdi_compute_config(crtc, pipe_config);
5168
5169         return 0;
5170 }
5171
5172 static int valleyview_get_display_clock_speed(struct drm_device *dev)
5173 {
5174         return 400000; /* FIXME */
5175 }
5176
5177 static int i945_get_display_clock_speed(struct drm_device *dev)
5178 {
5179         return 400000;
5180 }
5181
5182 static int i915_get_display_clock_speed(struct drm_device *dev)
5183 {
5184         return 333000;
5185 }
5186
5187 static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
5188 {
5189         return 200000;
5190 }
5191
5192 static int pnv_get_display_clock_speed(struct drm_device *dev)
5193 {
5194         u16 gcfgc = 0;
5195
5196         pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
5197
5198         switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
5199         case GC_DISPLAY_CLOCK_267_MHZ_PNV:
5200                 return 267000;
5201         case GC_DISPLAY_CLOCK_333_MHZ_PNV:
5202                 return 333000;
5203         case GC_DISPLAY_CLOCK_444_MHZ_PNV:
5204                 return 444000;
5205         case GC_DISPLAY_CLOCK_200_MHZ_PNV:
5206                 return 200000;
5207         default:
5208                 DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
5209         case GC_DISPLAY_CLOCK_133_MHZ_PNV:
5210                 return 133000;
5211         case GC_DISPLAY_CLOCK_167_MHZ_PNV:
5212                 return 167000;
5213         }
5214 }
5215
5216 static int i915gm_get_display_clock_speed(struct drm_device *dev)
5217 {
5218         u16 gcfgc = 0;
5219
5220         pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
5221
5222         if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
5223                 return 133000;
5224         else {
5225                 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
5226                 case GC_DISPLAY_CLOCK_333_MHZ:
5227                         return 333000;
5228                 default:
5229                 case GC_DISPLAY_CLOCK_190_200_MHZ:
5230                         return 190000;
5231                 }
5232         }
5233 }
5234
5235 static int i865_get_display_clock_speed(struct drm_device *dev)
5236 {
5237         return 266000;
5238 }
5239
5240 static int i855_get_display_clock_speed(struct drm_device *dev)
5241 {
5242         u16 hpllcc = 0;
5243         /* Assume that the hardware is in the high speed state.  This
5244          * should be the default.
5245          */
5246         switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
5247         case GC_CLOCK_133_200:
5248         case GC_CLOCK_100_200:
5249                 return 200000;
5250         case GC_CLOCK_166_250:
5251                 return 250000;
5252         case GC_CLOCK_100_133:
5253                 return 133000;
5254         }
5255
5256         /* Shouldn't happen */
5257         return 0;
5258 }
5259
5260 static int i830_get_display_clock_speed(struct drm_device *dev)
5261 {
5262         return 133000;
5263 }
5264
5265 static void
5266 intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
5267 {
5268         while (*num > DATA_LINK_M_N_MASK ||
5269                *den > DATA_LINK_M_N_MASK) {
5270                 *num >>= 1;
5271                 *den >>= 1;
5272         }
5273 }
5274
5275 static void compute_m_n(unsigned int m, unsigned int n,
5276                         uint32_t *ret_m, uint32_t *ret_n)
5277 {
5278         *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
5279         *ret_m = div_u64((uint64_t) m * *ret_n, n);
5280         intel_reduce_m_n_ratio(ret_m, ret_n);
5281 }
5282
5283 void
5284 intel_link_compute_m_n(int bits_per_pixel, int nlanes,
5285                        int pixel_clock, int link_clock,
5286                        struct intel_link_m_n *m_n)
5287 {
5288         m_n->tu = 64;
5289
5290         compute_m_n(bits_per_pixel * pixel_clock,
5291                     link_clock * nlanes * 8,
5292                     &m_n->gmch_m, &m_n->gmch_n);
5293
5294         compute_m_n(pixel_clock, link_clock,
5295                     &m_n->link_m, &m_n->link_n);
5296 }
5297
5298 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
5299 {
5300         if (i915.panel_use_ssc >= 0)
5301                 return i915.panel_use_ssc != 0;
5302         return dev_priv->vbt.lvds_use_ssc
5303                 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
5304 }
5305
5306 static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
5307 {
5308         struct drm_device *dev = crtc->dev;
5309         struct drm_i915_private *dev_priv = dev->dev_private;
5310         int refclk;
5311
5312         if (IS_VALLEYVIEW(dev)) {
5313                 refclk = 100000;
5314         } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
5315             intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
5316                 refclk = dev_priv->vbt.lvds_ssc_freq;
5317                 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
5318         } else if (!IS_GEN2(dev)) {
5319                 refclk = 96000;
5320         } else {
5321                 refclk = 48000;
5322         }
5323
5324         return refclk;
5325 }
5326
5327 static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
5328 {
5329         return (1 << dpll->n) << 16 | dpll->m2;
5330 }
5331
5332 static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
5333 {
5334         return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
5335 }
5336
5337 static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
5338                                      intel_clock_t *reduced_clock)
5339 {
5340         struct drm_device *dev = crtc->base.dev;
5341         u32 fp, fp2 = 0;
5342
5343         if (IS_PINEVIEW(dev)) {
5344                 fp = pnv_dpll_compute_fp(&crtc->config.dpll);
5345                 if (reduced_clock)
5346                         fp2 = pnv_dpll_compute_fp(reduced_clock);
5347         } else {
5348                 fp = i9xx_dpll_compute_fp(&crtc->config.dpll);
5349                 if (reduced_clock)
5350                         fp2 = i9xx_dpll_compute_fp(reduced_clock);
5351         }
5352
5353         crtc->config.dpll_hw_state.fp0 = fp;
5354
5355         crtc->lowfreq_avail = false;
5356         if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
5357             reduced_clock && i915.powersave) {
5358                 crtc->config.dpll_hw_state.fp1 = fp2;
5359                 crtc->lowfreq_avail = true;
5360         } else {
5361                 crtc->config.dpll_hw_state.fp1 = fp;
5362         }
5363 }
5364
5365 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum i915_pipe
5366                 pipe)
5367 {
5368         u32 reg_val;
5369
5370         /*
5371          * PLLB opamp always calibrates to max value of 0x3f, force enable it
5372          * and set it to a reasonable value instead.
5373          */
5374         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
5375         reg_val &= 0xffffff00;
5376         reg_val |= 0x00000030;
5377         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
5378
5379         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
5380         reg_val &= 0x8cffffff;
5381         reg_val = 0x8c000000;
5382         vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
5383
5384         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
5385         reg_val &= 0xffffff00;
5386         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
5387
5388         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
5389         reg_val &= 0x00ffffff;
5390         reg_val |= 0xb0000000;
5391         vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
5392 }
5393
5394 static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
5395                                          struct intel_link_m_n *m_n)
5396 {
5397         struct drm_device *dev = crtc->base.dev;
5398         struct drm_i915_private *dev_priv = dev->dev_private;
5399         int pipe = crtc->pipe;
5400
5401         I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
5402         I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
5403         I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
5404         I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
5405 }
5406
5407 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
5408                                          struct intel_link_m_n *m_n)
5409 {
5410         struct drm_device *dev = crtc->base.dev;
5411         struct drm_i915_private *dev_priv = dev->dev_private;
5412         int pipe = crtc->pipe;
5413         enum transcoder transcoder = crtc->config.cpu_transcoder;
5414
5415         if (INTEL_INFO(dev)->gen >= 5) {
5416                 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
5417                 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
5418                 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
5419                 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
5420         } else {
5421                 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
5422                 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
5423                 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
5424                 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
5425         }
5426 }
5427
5428 static void intel_dp_set_m_n(struct intel_crtc *crtc)
5429 {
5430         if (crtc->config.has_pch_encoder)
5431                 intel_pch_transcoder_set_m_n(crtc, &crtc->config.dp_m_n);
5432         else
5433                 intel_cpu_transcoder_set_m_n(crtc, &crtc->config.dp_m_n);
5434 }
5435
5436 static void vlv_update_pll(struct intel_crtc *crtc)
5437 {
5438         u32 dpll, dpll_md;
5439
5440         /*
5441          * Enable DPIO clock input. We should never disable the reference
5442          * clock for pipe B, since VGA hotplug / manual detection depends
5443          * on it.
5444          */
5445         dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
5446                 DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
5447         /* We should never disable this, set it here for state tracking */
5448         if (crtc->pipe == PIPE_B)
5449                 dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
5450         dpll |= DPLL_VCO_ENABLE;
5451         crtc->config.dpll_hw_state.dpll = dpll;
5452
5453         dpll_md = (crtc->config.pixel_multiplier - 1)
5454                 << DPLL_MD_UDI_MULTIPLIER_SHIFT;
5455         crtc->config.dpll_hw_state.dpll_md = dpll_md;
5456 }
5457
5458 static void vlv_prepare_pll(struct intel_crtc *crtc)
5459 {
5460         struct drm_device *dev = crtc->base.dev;
5461         struct drm_i915_private *dev_priv = dev->dev_private;
5462         int pipe = crtc->pipe;
5463         u32 mdiv;
5464         u32 bestn, bestm1, bestm2, bestp1, bestp2;
5465         u32 coreclk, reg_val;
5466
5467         mutex_lock(&dev_priv->dpio_lock);
5468
5469         bestn = crtc->config.dpll.n;
5470         bestm1 = crtc->config.dpll.m1;
5471         bestm2 = crtc->config.dpll.m2;
5472         bestp1 = crtc->config.dpll.p1;
5473         bestp2 = crtc->config.dpll.p2;
5474
5475         /* See eDP HDMI DPIO driver vbios notes doc */
5476
5477         /* PLL B needs special handling */
5478         if (pipe == PIPE_B)
5479                 vlv_pllb_recal_opamp(dev_priv, pipe);
5480
5481         /* Set up Tx target for periodic Rcomp update */
5482         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
5483
5484         /* Disable target IRef on PLL */
5485         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
5486         reg_val &= 0x00ffffff;
5487         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
5488
5489         /* Disable fast lock */
5490         vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
5491
5492         /* Set idtafcrecal before PLL is enabled */
5493         mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
5494         mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
5495         mdiv |= ((bestn << DPIO_N_SHIFT));
5496         mdiv |= (1 << DPIO_K_SHIFT);
5497
5498         /*
5499          * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
5500          * but we don't support that).
5501          * Note: don't use the DAC post divider as it seems unstable.
5502          */
5503         mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
5504         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
5505
5506         mdiv |= DPIO_ENABLE_CALIBRATION;
5507         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
5508
5509         /* Set HBR and RBR LPF coefficients */
5510         if (crtc->config.port_clock == 162000 ||
5511             intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) ||
5512             intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI))
5513                 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
5514                                  0x009f0003);
5515         else
5516                 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
5517                                  0x00d0000f);
5518
5519         if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) ||
5520             intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) {
5521                 /* Use SSC source */
5522                 if (pipe == PIPE_A)
5523                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
5524                                          0x0df40000);
5525                 else
5526                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
5527                                          0x0df70000);
5528         } else { /* HDMI or VGA */
5529                 /* Use bend source */
5530                 if (pipe == PIPE_A)
5531                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
5532                                          0x0df70000);
5533                 else
5534                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
5535                                          0x0df40000);
5536         }
5537
5538         coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
5539         coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
5540         if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT) ||
5541             intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP))
5542                 coreclk |= 0x01000000;
5543         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
5544
5545         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
5546         mutex_unlock(&dev_priv->dpio_lock);
5547 }
5548
5549 static void chv_update_pll(struct intel_crtc *crtc)
5550 {
5551         struct drm_device *dev = crtc->base.dev;
5552         struct drm_i915_private *dev_priv = dev->dev_private;
5553         int pipe = crtc->pipe;
5554         int dpll_reg = DPLL(crtc->pipe);
5555         enum dpio_channel port = vlv_pipe_to_channel(pipe);
5556         u32 loopfilter, intcoeff;
5557         u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
5558         int refclk;
5559
5560         crtc->config.dpll_hw_state.dpll = DPLL_SSC_REF_CLOCK_CHV |
5561                 DPLL_REFA_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
5562                 DPLL_VCO_ENABLE;
5563         if (pipe != PIPE_A)
5564                 crtc->config.dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
5565
5566         crtc->config.dpll_hw_state.dpll_md =
5567                 (crtc->config.pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
5568
5569         bestn = crtc->config.dpll.n;
5570         bestm2_frac = crtc->config.dpll.m2 & 0x3fffff;
5571         bestm1 = crtc->config.dpll.m1;
5572         bestm2 = crtc->config.dpll.m2 >> 22;
5573         bestp1 = crtc->config.dpll.p1;
5574         bestp2 = crtc->config.dpll.p2;
5575
5576         /*
5577          * Enable Refclk and SSC
5578          */
5579         I915_WRITE(dpll_reg,
5580                    crtc->config.dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
5581
5582         mutex_lock(&dev_priv->dpio_lock);
5583
5584         /* p1 and p2 divider */
5585         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
5586                         5 << DPIO_CHV_S1_DIV_SHIFT |
5587                         bestp1 << DPIO_CHV_P1_DIV_SHIFT |
5588                         bestp2 << DPIO_CHV_P2_DIV_SHIFT |
5589                         1 << DPIO_CHV_K_DIV_SHIFT);
5590
5591         /* Feedback post-divider - m2 */
5592         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
5593
5594         /* Feedback refclk divider - n and m1 */
5595         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
5596                         DPIO_CHV_M1_DIV_BY_2 |
5597                         1 << DPIO_CHV_N_DIV_SHIFT);
5598
5599         /* M2 fraction division */
5600         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
5601
5602         /* M2 fraction division enable */
5603         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port),
5604                        DPIO_CHV_FRAC_DIV_EN |
5605                        (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT));
5606
5607         /* Loop filter */
5608         refclk = i9xx_get_refclk(&crtc->base, 0);
5609         loopfilter = 5 << DPIO_CHV_PROP_COEFF_SHIFT |
5610                 2 << DPIO_CHV_GAIN_CTRL_SHIFT;
5611         if (refclk == 100000)
5612                 intcoeff = 11;
5613         else if (refclk == 38400)
5614                 intcoeff = 10;
5615         else
5616                 intcoeff = 9;
5617         loopfilter |= intcoeff << DPIO_CHV_INT_COEFF_SHIFT;
5618         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
5619
5620         /* AFC Recal */
5621         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
5622                         vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
5623                         DPIO_AFC_RECAL);
5624
5625         mutex_unlock(&dev_priv->dpio_lock);
5626 }
5627
5628 static void i9xx_update_pll(struct intel_crtc *crtc,
5629                             intel_clock_t *reduced_clock,
5630                             int num_connectors)
5631 {
5632         struct drm_device *dev = crtc->base.dev;
5633         struct drm_i915_private *dev_priv = dev->dev_private;
5634         u32 dpll;
5635         bool is_sdvo;
5636         struct dpll *clock = &crtc->config.dpll;
5637
5638         i9xx_update_pll_dividers(crtc, reduced_clock);
5639
5640         is_sdvo = intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_SDVO) ||
5641                 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI);
5642
5643         dpll = DPLL_VGA_MODE_DIS;
5644
5645         if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS))
5646                 dpll |= DPLLB_MODE_LVDS;
5647         else
5648                 dpll |= DPLLB_MODE_DAC_SERIAL;
5649
5650         if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
5651                 dpll |= (crtc->config.pixel_multiplier - 1)
5652                         << SDVO_MULTIPLIER_SHIFT_HIRES;
5653         }
5654
5655         if (is_sdvo)
5656                 dpll |= DPLL_SDVO_HIGH_SPEED;
5657
5658         if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT))
5659                 dpll |= DPLL_SDVO_HIGH_SPEED;
5660
5661         /* compute bitmask from p1 value */
5662         if (IS_PINEVIEW(dev))
5663                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
5664         else {
5665                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5666                 if (IS_G4X(dev) && reduced_clock)
5667                         dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
5668         }
5669         switch (clock->p2) {
5670         case 5:
5671                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
5672                 break;
5673         case 7:
5674                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
5675                 break;
5676         case 10:
5677                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
5678                 break;
5679         case 14:
5680                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
5681                 break;
5682         }
5683         if (INTEL_INFO(dev)->gen >= 4)
5684                 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
5685
5686         if (crtc->config.sdvo_tv_clock)
5687                 dpll |= PLL_REF_INPUT_TVCLKINBC;
5688         else if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
5689                  intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5690                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5691         else
5692                 dpll |= PLL_REF_INPUT_DREFCLK;
5693
5694         dpll |= DPLL_VCO_ENABLE;
5695         crtc->config.dpll_hw_state.dpll = dpll;
5696
5697         if (INTEL_INFO(dev)->gen >= 4) {
5698                 u32 dpll_md = (crtc->config.pixel_multiplier - 1)
5699                         << DPLL_MD_UDI_MULTIPLIER_SHIFT;
5700                 crtc->config.dpll_hw_state.dpll_md = dpll_md;
5701         }
5702 }
5703
5704 static void i8xx_update_pll(struct intel_crtc *crtc,
5705                             intel_clock_t *reduced_clock,
5706                             int num_connectors)
5707 {
5708         struct drm_device *dev = crtc->base.dev;
5709         struct drm_i915_private *dev_priv = dev->dev_private;
5710         u32 dpll;
5711         struct dpll *clock = &crtc->config.dpll;
5712
5713         i9xx_update_pll_dividers(crtc, reduced_clock);
5714
5715         dpll = DPLL_VGA_MODE_DIS;
5716
5717         if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS)) {
5718                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5719         } else {
5720                 if (clock->p1 == 2)
5721                         dpll |= PLL_P1_DIVIDE_BY_TWO;
5722                 else
5723                         dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5724                 if (clock->p2 == 4)
5725                         dpll |= PLL_P2_DIVIDE_BY_4;
5726         }
5727
5728         if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO))
5729                 dpll |= DPLL_DVO_2X_MODE;
5730
5731         if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
5732                  intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5733                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5734         else
5735                 dpll |= PLL_REF_INPUT_DREFCLK;
5736
5737         dpll |= DPLL_VCO_ENABLE;
5738         crtc->config.dpll_hw_state.dpll = dpll;
5739 }
5740
5741 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
5742 {
5743         struct drm_device *dev = intel_crtc->base.dev;
5744         struct drm_i915_private *dev_priv = dev->dev_private;
5745         enum i915_pipe pipe = intel_crtc->pipe;
5746         enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
5747         struct drm_display_mode *adjusted_mode =
5748                 &intel_crtc->config.adjusted_mode;
5749         uint32_t crtc_vtotal, crtc_vblank_end;
5750         int vsyncshift = 0;
5751
5752         /* We need to be careful not to changed the adjusted mode, for otherwise
5753          * the hw state checker will get angry at the mismatch. */
5754         crtc_vtotal = adjusted_mode->crtc_vtotal;
5755         crtc_vblank_end = adjusted_mode->crtc_vblank_end;
5756
5757         if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5758                 /* the chip adds 2 halflines automatically */
5759                 crtc_vtotal -= 1;
5760                 crtc_vblank_end -= 1;
5761
5762                 if (intel_pipe_has_type(&intel_crtc->base, INTEL_OUTPUT_SDVO))
5763                         vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
5764                 else
5765                         vsyncshift = adjusted_mode->crtc_hsync_start -
5766                                 adjusted_mode->crtc_htotal / 2;
5767                 if (vsyncshift < 0)
5768                         vsyncshift += adjusted_mode->crtc_htotal;
5769         }
5770
5771         if (INTEL_INFO(dev)->gen > 3)
5772                 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
5773
5774         I915_WRITE(HTOTAL(cpu_transcoder),
5775                    (adjusted_mode->crtc_hdisplay - 1) |
5776                    ((adjusted_mode->crtc_htotal - 1) << 16));
5777         I915_WRITE(HBLANK(cpu_transcoder),
5778                    (adjusted_mode->crtc_hblank_start - 1) |
5779                    ((adjusted_mode->crtc_hblank_end - 1) << 16));
5780         I915_WRITE(HSYNC(cpu_transcoder),
5781                    (adjusted_mode->crtc_hsync_start - 1) |
5782                    ((adjusted_mode->crtc_hsync_end - 1) << 16));
5783
5784         I915_WRITE(VTOTAL(cpu_transcoder),
5785                    (adjusted_mode->crtc_vdisplay - 1) |
5786                    ((crtc_vtotal - 1) << 16));
5787         I915_WRITE(VBLANK(cpu_transcoder),
5788                    (adjusted_mode->crtc_vblank_start - 1) |
5789                    ((crtc_vblank_end - 1) << 16));
5790         I915_WRITE(VSYNC(cpu_transcoder),
5791                    (adjusted_mode->crtc_vsync_start - 1) |
5792                    ((adjusted_mode->crtc_vsync_end - 1) << 16));
5793
5794         /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
5795          * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
5796          * documented on the DDI_FUNC_CTL register description, EDP Input Select
5797          * bits. */
5798         if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP &&
5799             (pipe == PIPE_B || pipe == PIPE_C))
5800                 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
5801
5802         /* pipesrc controls the size that is scaled from, which should
5803          * always be the user's requested size.
5804          */
5805         I915_WRITE(PIPESRC(pipe),
5806                    ((intel_crtc->config.pipe_src_w - 1) << 16) |
5807                    (intel_crtc->config.pipe_src_h - 1));
5808 }
5809
5810 static void intel_get_pipe_timings(struct intel_crtc *crtc,
5811                                    struct intel_crtc_config *pipe_config)
5812 {
5813         struct drm_device *dev = crtc->base.dev;
5814         struct drm_i915_private *dev_priv = dev->dev_private;
5815         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
5816         uint32_t tmp;
5817
5818         tmp = I915_READ(HTOTAL(cpu_transcoder));
5819         pipe_config->adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
5820         pipe_config->adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
5821         tmp = I915_READ(HBLANK(cpu_transcoder));
5822         pipe_config->adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
5823         pipe_config->adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
5824         tmp = I915_READ(HSYNC(cpu_transcoder));
5825         pipe_config->adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
5826         pipe_config->adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
5827
5828         tmp = I915_READ(VTOTAL(cpu_transcoder));
5829         pipe_config->adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
5830         pipe_config->adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
5831         tmp = I915_READ(VBLANK(cpu_transcoder));
5832         pipe_config->adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
5833         pipe_config->adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
5834         tmp = I915_READ(VSYNC(cpu_transcoder));
5835         pipe_config->adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
5836         pipe_config->adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
5837
5838         if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
5839                 pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
5840                 pipe_config->adjusted_mode.crtc_vtotal += 1;
5841                 pipe_config->adjusted_mode.crtc_vblank_end += 1;
5842         }
5843
5844         tmp = I915_READ(PIPESRC(crtc->pipe));
5845         pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
5846         pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
5847
5848         pipe_config->requested_mode.vdisplay = pipe_config->pipe_src_h;
5849         pipe_config->requested_mode.hdisplay = pipe_config->pipe_src_w;
5850 }
5851
5852 void intel_mode_from_pipe_config(struct drm_display_mode *mode,
5853                                  struct intel_crtc_config *pipe_config)
5854 {
5855         mode->hdisplay = pipe_config->adjusted_mode.crtc_hdisplay;
5856         mode->htotal = pipe_config->adjusted_mode.crtc_htotal;
5857         mode->hsync_start = pipe_config->adjusted_mode.crtc_hsync_start;
5858         mode->hsync_end = pipe_config->adjusted_mode.crtc_hsync_end;
5859
5860         mode->vdisplay = pipe_config->adjusted_mode.crtc_vdisplay;
5861         mode->vtotal = pipe_config->adjusted_mode.crtc_vtotal;
5862         mode->vsync_start = pipe_config->adjusted_mode.crtc_vsync_start;
5863         mode->vsync_end = pipe_config->adjusted_mode.crtc_vsync_end;
5864
5865         mode->flags = pipe_config->adjusted_mode.flags;
5866
5867         mode->clock = pipe_config->adjusted_mode.crtc_clock;
5868         mode->flags |= pipe_config->adjusted_mode.flags;
5869 }
5870
5871 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
5872 {
5873         struct drm_device *dev = intel_crtc->base.dev;
5874         struct drm_i915_private *dev_priv = dev->dev_private;
5875         uint32_t pipeconf;
5876
5877         pipeconf = 0;
5878
5879         if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
5880             I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE)
5881                 pipeconf |= PIPECONF_ENABLE;
5882
5883         if (intel_crtc->config.double_wide)
5884                 pipeconf |= PIPECONF_DOUBLE_WIDE;
5885
5886         /* only g4x and later have fancy bpc/dither controls */
5887         if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
5888                 /* Bspec claims that we can't use dithering for 30bpp pipes. */
5889                 if (intel_crtc->config.dither && intel_crtc->config.pipe_bpp != 30)
5890                         pipeconf |= PIPECONF_DITHER_EN |
5891                                     PIPECONF_DITHER_TYPE_SP;
5892
5893                 switch (intel_crtc->config.pipe_bpp) {
5894                 case 18:
5895                         pipeconf |= PIPECONF_6BPC;
5896                         break;
5897                 case 24:
5898                         pipeconf |= PIPECONF_8BPC;
5899                         break;
5900                 case 30:
5901                         pipeconf |= PIPECONF_10BPC;
5902                         break;
5903                 default:
5904                         /* Case prevented by intel_choose_pipe_bpp_dither. */
5905                         BUG();
5906                 }
5907         }
5908
5909         if (HAS_PIPE_CXSR(dev)) {
5910                 if (intel_crtc->lowfreq_avail) {
5911                         DRM_DEBUG_KMS("enabling CxSR downclocking\n");
5912                         pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
5913                 } else {
5914                         DRM_DEBUG_KMS("disabling CxSR downclocking\n");
5915                 }
5916         }
5917
5918         if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
5919                 if (INTEL_INFO(dev)->gen < 4 ||
5920                     intel_pipe_has_type(&intel_crtc->base, INTEL_OUTPUT_SDVO))
5921                         pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
5922                 else
5923                         pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
5924         } else
5925                 pipeconf |= PIPECONF_PROGRESSIVE;
5926
5927         if (IS_VALLEYVIEW(dev) && intel_crtc->config.limited_color_range)
5928                 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
5929
5930         I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
5931         POSTING_READ(PIPECONF(intel_crtc->pipe));
5932 }
5933
5934 static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
5935                               int x, int y,
5936                               struct drm_framebuffer *fb)
5937 {
5938         struct drm_device *dev = crtc->dev;
5939         struct drm_i915_private *dev_priv = dev->dev_private;
5940         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5941         int refclk, num_connectors = 0;
5942         intel_clock_t clock, reduced_clock;
5943         bool ok, has_reduced_clock = false;
5944         bool is_lvds = false, is_dsi = false;
5945         struct intel_encoder *encoder;
5946         const intel_limit_t *limit;
5947
5948         for_each_encoder_on_crtc(dev, crtc, encoder) {
5949                 switch (encoder->type) {
5950                 case INTEL_OUTPUT_LVDS:
5951                         is_lvds = true;
5952                         break;
5953                 case INTEL_OUTPUT_DSI:
5954                         is_dsi = true;
5955                         break;
5956                 }
5957
5958                 num_connectors++;
5959         }
5960
5961         if (is_dsi)
5962                 return 0;
5963
5964         if (!intel_crtc->config.clock_set) {
5965                 refclk = i9xx_get_refclk(crtc, num_connectors);
5966
5967                 /*
5968                  * Returns a set of divisors for the desired target clock with
5969                  * the given refclk, or FALSE.  The returned values represent
5970                  * the clock equation: reflck * (5 * (m1 + 2) + (m2 + 2)) / (n +
5971                  * 2) / p1 / p2.
5972                  */
5973                 limit = intel_limit(crtc, refclk);
5974                 ok = dev_priv->display.find_dpll(limit, crtc,
5975                                                  intel_crtc->config.port_clock,
5976                                                  refclk, NULL, &clock);
5977                 if (!ok) {
5978                         DRM_ERROR("Couldn't find PLL settings for mode!\n");
5979                         return -EINVAL;
5980                 }
5981
5982                 if (is_lvds && dev_priv->lvds_downclock_avail) {
5983                         /*
5984                          * Ensure we match the reduced clock's P to the target
5985                          * clock.  If the clocks don't match, we can't switch
5986                          * the display clock by using the FP0/FP1. In such case
5987                          * we will disable the LVDS downclock feature.
5988                          */
5989                         has_reduced_clock =
5990                                 dev_priv->display.find_dpll(limit, crtc,
5991                                                             dev_priv->lvds_downclock,
5992                                                             refclk, &clock,
5993                                                             &reduced_clock);
5994                 }
5995                 /* Compat-code for transition, will disappear. */
5996                 intel_crtc->config.dpll.n = clock.n;
5997                 intel_crtc->config.dpll.m1 = clock.m1;
5998                 intel_crtc->config.dpll.m2 = clock.m2;
5999                 intel_crtc->config.dpll.p1 = clock.p1;
6000                 intel_crtc->config.dpll.p2 = clock.p2;
6001         }
6002
6003         if (IS_GEN2(dev)) {
6004                 i8xx_update_pll(intel_crtc,
6005                                 has_reduced_clock ? &reduced_clock : NULL,
6006                                 num_connectors);
6007         } else if (IS_CHERRYVIEW(dev)) {
6008                 chv_update_pll(intel_crtc);
6009         } else if (IS_VALLEYVIEW(dev)) {
6010                 vlv_update_pll(intel_crtc);
6011         } else {
6012                 i9xx_update_pll(intel_crtc,
6013                                 has_reduced_clock ? &reduced_clock : NULL,
6014                                 num_connectors);
6015         }
6016
6017         return 0;
6018 }
6019
6020 static void i9xx_get_pfit_config(struct intel_crtc *crtc,
6021                                  struct intel_crtc_config *pipe_config)
6022 {
6023         struct drm_device *dev = crtc->base.dev;
6024         struct drm_i915_private *dev_priv = dev->dev_private;
6025         uint32_t tmp;
6026
6027         if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
6028                 return;
6029
6030         tmp = I915_READ(PFIT_CONTROL);
6031         if (!(tmp & PFIT_ENABLE))
6032                 return;
6033
6034         /* Check whether the pfit is attached to our pipe. */
6035         if (INTEL_INFO(dev)->gen < 4) {
6036                 if (crtc->pipe != PIPE_B)
6037                         return;
6038         } else {
6039                 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
6040                         return;
6041         }
6042
6043         pipe_config->gmch_pfit.control = tmp;
6044         pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
6045         if (INTEL_INFO(dev)->gen < 5)
6046                 pipe_config->gmch_pfit.lvds_border_bits =
6047                         I915_READ(LVDS) & LVDS_BORDER_ENABLE;
6048 }
6049
6050 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
6051                                struct intel_crtc_config *pipe_config)
6052 {
6053         struct drm_device *dev = crtc->base.dev;
6054         struct drm_i915_private *dev_priv = dev->dev_private;
6055         int pipe = pipe_config->cpu_transcoder;
6056         intel_clock_t clock;
6057         u32 mdiv;
6058         int refclk = 100000;
6059
6060         mutex_lock(&dev_priv->dpio_lock);
6061         mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
6062         mutex_unlock(&dev_priv->dpio_lock);
6063
6064         clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
6065         clock.m2 = mdiv & DPIO_M2DIV_MASK;
6066         clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
6067         clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
6068         clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
6069
6070         vlv_clock(refclk, &clock);
6071
6072         /* clock.dot is the fast clock */
6073         pipe_config->port_clock = clock.dot / 5;
6074 }
6075
6076 static void i9xx_get_plane_config(struct intel_crtc *crtc,
6077                                   struct intel_plane_config *plane_config)
6078 {
6079         struct drm_device *dev = crtc->base.dev;
6080         struct drm_i915_private *dev_priv = dev->dev_private;
6081         u32 val, base, offset;
6082         int pipe = crtc->pipe, plane = crtc->plane;
6083         int fourcc, pixel_format;
6084         int aligned_height;
6085
6086         crtc->base.primary->fb = kzalloc(sizeof(struct intel_framebuffer), GFP_KERNEL);
6087         if (!crtc->base.primary->fb) {
6088                 DRM_DEBUG_KMS("failed to alloc fb\n");
6089                 return;
6090         }
6091
6092         val = I915_READ(DSPCNTR(plane));
6093
6094         if (INTEL_INFO(dev)->gen >= 4)
6095                 if (val & DISPPLANE_TILED)
6096                         plane_config->tiled = true;
6097
6098         pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
6099         fourcc = intel_format_to_fourcc(pixel_format);
6100         crtc->base.primary->fb->pixel_format = fourcc;
6101         crtc->base.primary->fb->bits_per_pixel =
6102                 drm_format_plane_cpp(fourcc, 0) * 8;
6103
6104         if (INTEL_INFO(dev)->gen >= 4) {
6105                 if (plane_config->tiled)
6106                         offset = I915_READ(DSPTILEOFF(plane));
6107                 else
6108                         offset = I915_READ(DSPLINOFF(plane));
6109                 base = I915_READ(DSPSURF(plane)) & 0xfffff000;
6110         } else {
6111                 base = I915_READ(DSPADDR(plane));
6112         }
6113         plane_config->base = base;
6114
6115         val = I915_READ(PIPESRC(pipe));
6116         crtc->base.primary->fb->width = ((val >> 16) & 0xfff) + 1;
6117         crtc->base.primary->fb->height = ((val >> 0) & 0xfff) + 1;
6118
6119         val = I915_READ(DSPSTRIDE(pipe));
6120         crtc->base.primary->fb->pitches[0] = val & 0xffffff80;
6121
6122         aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
6123                                             plane_config->tiled);
6124
6125         plane_config->size = ALIGN(crtc->base.primary->fb->pitches[0] *
6126                                    aligned_height, PAGE_SIZE);
6127
6128         DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
6129                       pipe, plane, crtc->base.primary->fb->width,
6130                       crtc->base.primary->fb->height,
6131                       crtc->base.primary->fb->bits_per_pixel, base,
6132                       crtc->base.primary->fb->pitches[0],
6133                       plane_config->size);
6134
6135 }
6136
6137 static void chv_crtc_clock_get(struct intel_crtc *crtc,
6138                                struct intel_crtc_config *pipe_config)
6139 {
6140         struct drm_device *dev = crtc->base.dev;
6141         struct drm_i915_private *dev_priv = dev->dev_private;
6142         int pipe = pipe_config->cpu_transcoder;
6143         enum dpio_channel port = vlv_pipe_to_channel(pipe);
6144         intel_clock_t clock;
6145         u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2;
6146         int refclk = 100000;
6147
6148         mutex_lock(&dev_priv->dpio_lock);
6149         cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
6150         pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
6151         pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
6152         pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
6153         mutex_unlock(&dev_priv->dpio_lock);
6154
6155         clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
6156         clock.m2 = ((pll_dw0 & 0xff) << 22) | (pll_dw2 & 0x3fffff);
6157         clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
6158         clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
6159         clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
6160
6161         chv_clock(refclk, &clock);
6162
6163         /* clock.dot is the fast clock */
6164         pipe_config->port_clock = clock.dot / 5;
6165 }
6166
6167 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
6168                                  struct intel_crtc_config *pipe_config)
6169 {
6170         struct drm_device *dev = crtc->base.dev;
6171         struct drm_i915_private *dev_priv = dev->dev_private;
6172         uint32_t tmp;
6173
6174         if (!intel_display_power_enabled(dev_priv,
6175                                          POWER_DOMAIN_PIPE(crtc->pipe)))
6176                 return false;
6177
6178         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
6179         pipe_config->shared_dpll = DPLL_ID_PRIVATE;
6180
6181         tmp = I915_READ(PIPECONF(crtc->pipe));
6182         if (!(tmp & PIPECONF_ENABLE))
6183                 return false;
6184
6185         if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
6186                 switch (tmp & PIPECONF_BPC_MASK) {
6187                 case PIPECONF_6BPC:
6188                         pipe_config->pipe_bpp = 18;
6189                         break;
6190                 case PIPECONF_8BPC:
6191                         pipe_config->pipe_bpp = 24;
6192                         break;
6193                 case PIPECONF_10BPC:
6194                         pipe_config->pipe_bpp = 30;
6195                         break;
6196                 default:
6197                         break;
6198                 }
6199         }
6200
6201         if (IS_VALLEYVIEW(dev) && (tmp & PIPECONF_COLOR_RANGE_SELECT))
6202                 pipe_config->limited_color_range = true;
6203
6204         if (INTEL_INFO(dev)->gen < 4)
6205                 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
6206
6207         intel_get_pipe_timings(crtc, pipe_config);
6208
6209         i9xx_get_pfit_config(crtc, pipe_config);
6210
6211         if (INTEL_INFO(dev)->gen >= 4) {
6212                 tmp = I915_READ(DPLL_MD(crtc->pipe));
6213                 pipe_config->pixel_multiplier =
6214                         ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
6215                          >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
6216                 pipe_config->dpll_hw_state.dpll_md = tmp;
6217         } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
6218                 tmp = I915_READ(DPLL(crtc->pipe));
6219                 pipe_config->pixel_multiplier =
6220                         ((tmp & SDVO_MULTIPLIER_MASK)
6221                          >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
6222         } else {
6223                 /* Note that on i915G/GM the pixel multiplier is in the sdvo
6224                  * port and will be fixed up in the encoder->get_config
6225                  * function. */
6226                 pipe_config->pixel_multiplier = 1;
6227         }
6228         pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
6229         if (!IS_VALLEYVIEW(dev)) {
6230                 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
6231                 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
6232         } else {
6233                 /* Mask out read-only status bits. */
6234                 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
6235                                                      DPLL_PORTC_READY_MASK |
6236                                                      DPLL_PORTB_READY_MASK);
6237         }
6238
6239         if (IS_CHERRYVIEW(dev))
6240                 chv_crtc_clock_get(crtc, pipe_config);
6241         else if (IS_VALLEYVIEW(dev))
6242                 vlv_crtc_clock_get(crtc, pipe_config);
6243         else
6244                 i9xx_crtc_clock_get(crtc, pipe_config);
6245
6246         return true;
6247 }
6248
6249 static void ironlake_init_pch_refclk(struct drm_device *dev)
6250 {
6251         struct drm_i915_private *dev_priv = dev->dev_private;
6252         struct drm_mode_config *mode_config = &dev->mode_config;
6253         struct intel_encoder *encoder;
6254         u32 val, final;
6255         bool has_lvds = false;
6256         bool has_cpu_edp = false;
6257         bool has_panel = false;
6258         bool has_ck505 = false;
6259         bool can_ssc = false;
6260
6261         /* We need to take the global config into account */
6262         list_for_each_entry(encoder, &mode_config->encoder_list,
6263                             base.head) {
6264                 switch (encoder->type) {
6265                 case INTEL_OUTPUT_LVDS:
6266                         has_panel = true;
6267                         has_lvds = true;
6268                         break;
6269                 case INTEL_OUTPUT_EDP:
6270                         has_panel = true;
6271                         if (enc_to_dig_port(&encoder->base)->port == PORT_A)
6272                                 has_cpu_edp = true;
6273                         break;
6274                 }
6275         }
6276
6277         if (HAS_PCH_IBX(dev)) {
6278                 has_ck505 = dev_priv->vbt.display_clock_mode;
6279                 can_ssc = has_ck505;
6280         } else {
6281                 has_ck505 = false;
6282                 can_ssc = true;
6283         }
6284
6285         DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n",
6286                       has_panel, has_lvds, has_ck505);
6287
6288         /* Ironlake: try to setup display ref clock before DPLL
6289          * enabling. This is only under driver's control after
6290          * PCH B stepping, previous chipset stepping should be
6291          * ignoring this setting.
6292          */
6293         val = I915_READ(PCH_DREF_CONTROL);
6294
6295         /* As we must carefully and slowly disable/enable each source in turn,
6296          * compute the final state we want first and check if we need to
6297          * make any changes at all.
6298          */
6299         final = val;
6300         final &= ~DREF_NONSPREAD_SOURCE_MASK;
6301         if (has_ck505)
6302                 final |= DREF_NONSPREAD_CK505_ENABLE;
6303         else
6304                 final |= DREF_NONSPREAD_SOURCE_ENABLE;
6305
6306         final &= ~DREF_SSC_SOURCE_MASK;
6307         final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
6308         final &= ~DREF_SSC1_ENABLE;
6309
6310         if (has_panel) {
6311                 final |= DREF_SSC_SOURCE_ENABLE;
6312
6313                 if (intel_panel_use_ssc(dev_priv) && can_ssc)
6314                         final |= DREF_SSC1_ENABLE;
6315
6316                 if (has_cpu_edp) {
6317                         if (intel_panel_use_ssc(dev_priv) && can_ssc)
6318                                 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
6319                         else
6320                                 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
6321                 } else
6322                         final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
6323         } else {
6324                 final |= DREF_SSC_SOURCE_DISABLE;
6325                 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
6326         }
6327
6328         if (final == val)
6329                 return;
6330
6331         /* Always enable nonspread source */
6332         val &= ~DREF_NONSPREAD_SOURCE_MASK;
6333
6334         if (has_ck505)
6335                 val |= DREF_NONSPREAD_CK505_ENABLE;
6336         else
6337                 val |= DREF_NONSPREAD_SOURCE_ENABLE;
6338
6339         if (has_panel) {
6340                 val &= ~DREF_SSC_SOURCE_MASK;
6341                 val |= DREF_SSC_SOURCE_ENABLE;
6342
6343                 /* SSC must be turned on before enabling the CPU output  */
6344                 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
6345                         DRM_DEBUG_KMS("Using SSC on panel\n");
6346                         val |= DREF_SSC1_ENABLE;
6347                 } else
6348                         val &= ~DREF_SSC1_ENABLE;
6349
6350                 /* Get SSC going before enabling the outputs */
6351                 I915_WRITE(PCH_DREF_CONTROL, val);
6352                 POSTING_READ(PCH_DREF_CONTROL);
6353                 udelay(200);
6354
6355                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
6356
6357                 /* Enable CPU source on CPU attached eDP */
6358                 if (has_cpu_edp) {
6359                         if (intel_panel_use_ssc(dev_priv) && can_ssc) {
6360                                 DRM_DEBUG_KMS("Using SSC on eDP\n");
6361                                 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
6362                         } else
6363                                 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
6364                 } else
6365                         val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
6366
6367                 I915_WRITE(PCH_DREF_CONTROL, val);
6368                 POSTING_READ(PCH_DREF_CONTROL);
6369                 udelay(200);
6370         } else {
6371                 DRM_DEBUG_KMS("Disabling SSC entirely\n");
6372
6373                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
6374
6375                 /* Turn off CPU output */
6376                 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
6377
6378                 I915_WRITE(PCH_DREF_CONTROL, val);
6379                 POSTING_READ(PCH_DREF_CONTROL);
6380                 udelay(200);
6381
6382                 /* Turn off the SSC source */
6383                 val &= ~DREF_SSC_SOURCE_MASK;
6384                 val |= DREF_SSC_SOURCE_DISABLE;
6385
6386                 /* Turn off SSC1 */
6387                 val &= ~DREF_SSC1_ENABLE;
6388
6389                 I915_WRITE(PCH_DREF_CONTROL, val);
6390                 POSTING_READ(PCH_DREF_CONTROL);
6391                 udelay(200);
6392         }
6393
6394         BUG_ON(val != final);
6395 }
6396
6397 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
6398 {
6399         uint32_t tmp;
6400
6401         tmp = I915_READ(SOUTH_CHICKEN2);
6402         tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
6403         I915_WRITE(SOUTH_CHICKEN2, tmp);
6404
6405         if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
6406                                FDI_MPHY_IOSFSB_RESET_STATUS, 100))
6407                 DRM_ERROR("FDI mPHY reset assert timeout\n");
6408
6409         tmp = I915_READ(SOUTH_CHICKEN2);
6410         tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
6411         I915_WRITE(SOUTH_CHICKEN2, tmp);
6412
6413         if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
6414                                 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
6415                 DRM_ERROR("FDI mPHY reset de-assert timeout\n");
6416 }
6417
6418 /* WaMPhyProgramming:hsw */
6419 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
6420 {
6421         uint32_t tmp;
6422
6423         tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
6424         tmp &= ~(0xFF << 24);
6425         tmp |= (0x12 << 24);
6426         intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
6427
6428         tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
6429         tmp |= (1 << 11);
6430         intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
6431
6432         tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
6433         tmp |= (1 << 11);
6434         intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
6435
6436         tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
6437         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
6438         intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
6439
6440         tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
6441         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
6442         intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
6443
6444         tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
6445         tmp &= ~(7 << 13);
6446         tmp |= (5 << 13);
6447         intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
6448
6449         tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
6450         tmp &= ~(7 << 13);
6451         tmp |= (5 << 13);
6452         intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
6453
6454         tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
6455         tmp &= ~0xFF;
6456         tmp |= 0x1C;
6457         intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
6458
6459         tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
6460         tmp &= ~0xFF;
6461         tmp |= 0x1C;
6462         intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
6463
6464         tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
6465         tmp &= ~(0xFF << 16);
6466         tmp |= (0x1C << 16);
6467         intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
6468
6469         tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
6470         tmp &= ~(0xFF << 16);
6471         tmp |= (0x1C << 16);
6472         intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
6473
6474         tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
6475         tmp |= (1 << 27);
6476         intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
6477
6478         tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
6479         tmp |= (1 << 27);
6480         intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
6481
6482         tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
6483         tmp &= ~(0xF << 28);
6484         tmp |= (4 << 28);
6485         intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
6486
6487         tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
6488         tmp &= ~(0xF << 28);
6489         tmp |= (4 << 28);
6490         intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
6491 }
6492
6493 /* Implements 3 different sequences from BSpec chapter "Display iCLK
6494  * Programming" based on the parameters passed:
6495  * - Sequence to enable CLKOUT_DP
6496  * - Sequence to enable CLKOUT_DP without spread
6497  * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
6498  */
6499 static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
6500                                  bool with_fdi)
6501 {
6502         struct drm_i915_private *dev_priv = dev->dev_private;
6503         uint32_t reg, tmp;
6504
6505         if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
6506                 with_spread = true;
6507         if (WARN(dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE &&
6508                  with_fdi, "LP PCH doesn't have FDI\n"))
6509                 with_fdi = false;
6510
6511         mutex_lock(&dev_priv->dpio_lock);
6512
6513         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
6514         tmp &= ~SBI_SSCCTL_DISABLE;
6515         tmp |= SBI_SSCCTL_PATHALT;
6516         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
6517
6518         udelay(24);
6519
6520         if (with_spread) {
6521                 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
6522                 tmp &= ~SBI_SSCCTL_PATHALT;
6523                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
6524
6525                 if (with_fdi) {
6526                         lpt_reset_fdi_mphy(dev_priv);
6527                         lpt_program_fdi_mphy(dev_priv);
6528                 }
6529         }
6530
6531         reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ?
6532                SBI_GEN0 : SBI_DBUFF0;
6533         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
6534         tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
6535         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
6536
6537         mutex_unlock(&dev_priv->dpio_lock);
6538 }
6539
6540 /* Sequence to disable CLKOUT_DP */
6541 static void lpt_disable_clkout_dp(struct drm_device *dev)
6542 {
6543         struct drm_i915_private *dev_priv = dev->dev_private;
6544         uint32_t reg, tmp;
6545
6546         mutex_lock(&dev_priv->dpio_lock);
6547
6548         reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ?
6549                SBI_GEN0 : SBI_DBUFF0;
6550         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
6551         tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
6552         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
6553
6554         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
6555         if (!(tmp & SBI_SSCCTL_DISABLE)) {
6556                 if (!(tmp & SBI_SSCCTL_PATHALT)) {
6557                         tmp |= SBI_SSCCTL_PATHALT;
6558                         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
6559                         udelay(32);
6560                 }
6561                 tmp |= SBI_SSCCTL_DISABLE;
6562                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
6563         }
6564
6565         mutex_unlock(&dev_priv->dpio_lock);
6566 }
6567
6568 static void lpt_init_pch_refclk(struct drm_device *dev)
6569 {
6570         struct drm_mode_config *mode_config = &dev->mode_config;
6571         struct intel_encoder *encoder;
6572         bool has_vga = false;
6573
6574         list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
6575                 switch (encoder->type) {
6576                 case INTEL_OUTPUT_ANALOG:
6577                         has_vga = true;
6578                         break;
6579                 }
6580         }
6581
6582         if (has_vga)
6583                 lpt_enable_clkout_dp(dev, true, true);
6584         else
6585                 lpt_disable_clkout_dp(dev);
6586 }
6587
6588 /*
6589  * Initialize reference clocks when the driver loads
6590  */
6591 void intel_init_pch_refclk(struct drm_device *dev)
6592 {
6593         if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
6594                 ironlake_init_pch_refclk(dev);
6595         else if (HAS_PCH_LPT(dev))
6596                 lpt_init_pch_refclk(dev);
6597 }
6598
6599 static int ironlake_get_refclk(struct drm_crtc *crtc)
6600 {
6601         struct drm_device *dev = crtc->dev;
6602         struct drm_i915_private *dev_priv = dev->dev_private;
6603         struct intel_encoder *encoder;
6604         int num_connectors = 0;
6605         bool is_lvds = false;
6606
6607         for_each_encoder_on_crtc(dev, crtc, encoder) {
6608                 switch (encoder->type) {
6609                 case INTEL_OUTPUT_LVDS:
6610                         is_lvds = true;
6611                         break;
6612                 }
6613                 num_connectors++;
6614         }
6615
6616         if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
6617                 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
6618                               dev_priv->vbt.lvds_ssc_freq);
6619                 return dev_priv->vbt.lvds_ssc_freq;
6620         }
6621
6622         return 120000;
6623 }
6624
6625 static void ironlake_set_pipeconf(struct drm_crtc *crtc)
6626 {
6627         struct drm_i915_private *dev_priv = crtc->dev->dev_private;
6628         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6629         int pipe = intel_crtc->pipe;
6630         uint32_t val;
6631
6632         val = 0;
6633
6634         switch (intel_crtc->config.pipe_bpp) {
6635         case 18:
6636                 val |= PIPECONF_6BPC;
6637                 break;
6638         case 24:
6639                 val |= PIPECONF_8BPC;
6640                 break;
6641         case 30:
6642                 val |= PIPECONF_10BPC;
6643                 break;
6644         case 36:
6645                 val |= PIPECONF_12BPC;
6646                 break;
6647         default:
6648                 /* Case prevented by intel_choose_pipe_bpp_dither. */
6649                 BUG();
6650         }
6651
6652         if (intel_crtc->config.dither)
6653                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
6654
6655         if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
6656                 val |= PIPECONF_INTERLACED_ILK;
6657         else
6658                 val |= PIPECONF_PROGRESSIVE;
6659
6660         if (intel_crtc->config.limited_color_range)
6661                 val |= PIPECONF_COLOR_RANGE_SELECT;
6662
6663         I915_WRITE(PIPECONF(pipe), val);
6664         POSTING_READ(PIPECONF(pipe));
6665 }
6666
6667 /*
6668  * Set up the pipe CSC unit.
6669  *
6670  * Currently only full range RGB to limited range RGB conversion
6671  * is supported, but eventually this should handle various
6672  * RGB<->YCbCr scenarios as well.
6673  */
6674 static void intel_set_pipe_csc(struct drm_crtc *crtc)
6675 {
6676         struct drm_device *dev = crtc->dev;
6677         struct drm_i915_private *dev_priv = dev->dev_private;
6678         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6679         int pipe = intel_crtc->pipe;
6680         uint16_t coeff = 0x7800; /* 1.0 */
6681
6682         /*
6683          * TODO: Check what kind of values actually come out of the pipe
6684          * with these coeff/postoff values and adjust to get the best
6685          * accuracy. Perhaps we even need to take the bpc value into
6686          * consideration.
6687          */
6688
6689         if (intel_crtc->config.limited_color_range)
6690                 coeff = ((235 - 16) * (1 << 12) / 255) & 0xff8; /* 0.xxx... */
6691
6692         /*
6693          * GY/GU and RY/RU should be the other way around according
6694          * to BSpec, but reality doesn't agree. Just set them up in
6695          * a way that results in the correct picture.
6696          */
6697         I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff << 16);
6698         I915_WRITE(PIPE_CSC_COEFF_BY(pipe), 0);
6699
6700         I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff);
6701         I915_WRITE(PIPE_CSC_COEFF_BU(pipe), 0);
6702
6703         I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), 0);
6704         I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff << 16);
6705
6706         I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0);
6707         I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
6708         I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0);
6709
6710         if (INTEL_INFO(dev)->gen > 6) {
6711                 uint16_t postoff = 0;
6712
6713                 if (intel_crtc->config.limited_color_range)
6714                         postoff = (16 * (1 << 12) / 255) & 0x1fff;
6715
6716                 I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
6717                 I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff);
6718                 I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff);
6719
6720                 I915_WRITE(PIPE_CSC_MODE(pipe), 0);
6721         } else {
6722                 uint32_t mode = CSC_MODE_YUV_TO_RGB;
6723
6724                 if (intel_crtc->config.limited_color_range)
6725                         mode |= CSC_BLACK_SCREEN_OFFSET;
6726
6727                 I915_WRITE(PIPE_CSC_MODE(pipe), mode);
6728         }
6729 }
6730
6731 static void haswell_set_pipeconf(struct drm_crtc *crtc)
6732 {
6733         struct drm_device *dev = crtc->dev;
6734         struct drm_i915_private *dev_priv = dev->dev_private;
6735         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6736         enum i915_pipe pipe = intel_crtc->pipe;
6737         enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
6738         uint32_t val;
6739
6740         val = 0;
6741
6742         if (IS_HASWELL(dev) && intel_crtc->config.dither)
6743                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
6744
6745         if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
6746                 val |= PIPECONF_INTERLACED_ILK;
6747         else
6748                 val |= PIPECONF_PROGRESSIVE;
6749
6750         I915_WRITE(PIPECONF(cpu_transcoder), val);
6751         POSTING_READ(PIPECONF(cpu_transcoder));
6752
6753         I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
6754         POSTING_READ(GAMMA_MODE(intel_crtc->pipe));
6755
6756         if (IS_BROADWELL(dev)) {
6757                 val = 0;
6758
6759                 switch (intel_crtc->config.pipe_bpp) {
6760                 case 18:
6761                         val |= PIPEMISC_DITHER_6_BPC;
6762                         break;
6763                 case 24:
6764                         val |= PIPEMISC_DITHER_8_BPC;
6765                         break;
6766                 case 30:
6767                         val |= PIPEMISC_DITHER_10_BPC;
6768                         break;
6769                 case 36:
6770                         val |= PIPEMISC_DITHER_12_BPC;
6771                         break;
6772                 default:
6773                         /* Case prevented by pipe_config_set_bpp. */
6774                         BUG();
6775                 }
6776
6777                 if (intel_crtc->config.dither)
6778                         val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
6779
6780                 I915_WRITE(PIPEMISC(pipe), val);
6781         }
6782 }
6783
6784 static bool ironlake_compute_clocks(struct drm_crtc *crtc,
6785                                     intel_clock_t *clock,
6786                                     bool *has_reduced_clock,
6787                                     intel_clock_t *reduced_clock)
6788 {
6789         struct drm_device *dev = crtc->dev;
6790         struct drm_i915_private *dev_priv = dev->dev_private;
6791         struct intel_encoder *intel_encoder;
6792         int refclk;
6793         const intel_limit_t *limit;
6794         bool ret, is_lvds = false;
6795
6796         for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
6797                 switch (intel_encoder->type) {
6798                 case INTEL_OUTPUT_LVDS:
6799                         is_lvds = true;
6800                         break;
6801                 }
6802         }
6803
6804         refclk = ironlake_get_refclk(crtc);
6805
6806         /*
6807          * Returns a set of divisors for the desired target clock with the given
6808          * refclk, or FALSE.  The returned values represent the clock equation:
6809          * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
6810          */
6811         limit = intel_limit(crtc, refclk);
6812         ret = dev_priv->display.find_dpll(limit, crtc,
6813                                           to_intel_crtc(crtc)->config.port_clock,
6814                                           refclk, NULL, clock);
6815         if (!ret)
6816                 return false;
6817
6818         if (is_lvds && dev_priv->lvds_downclock_avail) {
6819                 /*
6820                  * Ensure we match the reduced clock's P to the target clock.
6821                  * If the clocks don't match, we can't switch the display clock
6822                  * by using the FP0/FP1. In such case we will disable the LVDS
6823                  * downclock feature.
6824                 */
6825                 *has_reduced_clock =
6826                         dev_priv->display.find_dpll(limit, crtc,
6827                                                     dev_priv->lvds_downclock,
6828                                                     refclk, clock,
6829                                                     reduced_clock);
6830         }
6831
6832         return true;
6833 }
6834
6835 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
6836 {
6837         /*
6838          * Account for spread spectrum to avoid
6839          * oversubscribing the link. Max center spread
6840          * is 2.5%; use 5% for safety's sake.
6841          */
6842         u32 bps = target_clock * bpp * 21 / 20;
6843         return DIV_ROUND_UP(bps, link_bw * 8);
6844 }
6845
6846 static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
6847 {
6848         return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
6849 }
6850
6851 static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
6852                                       u32 *fp,
6853                                       intel_clock_t *reduced_clock, u32 *fp2)
6854 {
6855         struct drm_crtc *crtc = &intel_crtc->base;
6856         struct drm_device *dev = crtc->dev;
6857         struct drm_i915_private *dev_priv = dev->dev_private;
6858         struct intel_encoder *intel_encoder;
6859         uint32_t dpll;
6860         int factor, num_connectors = 0;
6861         bool is_lvds = false, is_sdvo = false;
6862
6863         for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
6864                 switch (intel_encoder->type) {
6865                 case INTEL_OUTPUT_LVDS:
6866                         is_lvds = true;
6867                         break;
6868                 case INTEL_OUTPUT_SDVO:
6869                 case INTEL_OUTPUT_HDMI:
6870                         is_sdvo = true;
6871                         break;
6872                 }
6873
6874                 num_connectors++;
6875         }
6876
6877         /* Enable autotuning of the PLL clock (if permissible) */
6878         factor = 21;
6879         if (is_lvds) {
6880                 if ((intel_panel_use_ssc(dev_priv) &&
6881                      dev_priv->vbt.lvds_ssc_freq == 100000) ||
6882                     (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
6883                         factor = 25;
6884         } else if (intel_crtc->config.sdvo_tv_clock)
6885                 factor = 20;
6886
6887         if (ironlake_needs_fb_cb_tune(&intel_crtc->config.dpll, factor))
6888                 *fp |= FP_CB_TUNE;
6889
6890         if (fp2 && (reduced_clock->m < factor * reduced_clock->n))
6891                 *fp2 |= FP_CB_TUNE;
6892
6893         dpll = 0;
6894
6895         if (is_lvds)
6896                 dpll |= DPLLB_MODE_LVDS;
6897         else
6898                 dpll |= DPLLB_MODE_DAC_SERIAL;
6899
6900         dpll |= (intel_crtc->config.pixel_multiplier - 1)
6901                 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
6902
6903         if (is_sdvo)
6904                 dpll |= DPLL_SDVO_HIGH_SPEED;
6905         if (intel_crtc->config.has_dp_encoder)
6906                 dpll |= DPLL_SDVO_HIGH_SPEED;
6907
6908         /* compute bitmask from p1 value */
6909         dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
6910         /* also FPA1 */
6911         dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
6912
6913         switch (intel_crtc->config.dpll.p2) {
6914         case 5:
6915                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
6916                 break;
6917         case 7:
6918                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
6919                 break;
6920         case 10:
6921                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
6922                 break;
6923         case 14:
6924                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
6925                 break;
6926         }
6927
6928         if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
6929                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
6930         else
6931                 dpll |= PLL_REF_INPUT_DREFCLK;
6932
6933         return dpll | DPLL_VCO_ENABLE;
6934 }
6935
6936 static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
6937                                   int x, int y,
6938                                   struct drm_framebuffer *fb)
6939 {
6940         struct drm_device *dev = crtc->dev;
6941         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6942         int num_connectors = 0;
6943         intel_clock_t clock, reduced_clock;
6944         u32 dpll = 0, fp = 0, fp2 = 0;
6945         bool ok, has_reduced_clock = false;
6946         bool is_lvds = false;
6947         struct intel_encoder *encoder;
6948         struct intel_shared_dpll *pll;
6949
6950         for_each_encoder_on_crtc(dev, crtc, encoder) {
6951                 switch (encoder->type) {
6952                 case INTEL_OUTPUT_LVDS:
6953                         is_lvds = true;
6954                         break;
6955                 }
6956
6957                 num_connectors++;
6958         }
6959
6960         WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
6961              "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
6962
6963         ok = ironlake_compute_clocks(crtc, &clock,
6964                                      &has_reduced_clock, &reduced_clock);
6965         if (!ok && !intel_crtc->config.clock_set) {
6966                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
6967                 return -EINVAL;
6968         }
6969         /* Compat-code for transition, will disappear. */
6970         if (!intel_crtc->config.clock_set) {
6971                 intel_crtc->config.dpll.n = clock.n;
6972                 intel_crtc->config.dpll.m1 = clock.m1;
6973                 intel_crtc->config.dpll.m2 = clock.m2;
6974                 intel_crtc->config.dpll.p1 = clock.p1;
6975                 intel_crtc->config.dpll.p2 = clock.p2;
6976         }
6977
6978         /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
6979         if (intel_crtc->config.has_pch_encoder) {
6980                 fp = i9xx_dpll_compute_fp(&intel_crtc->config.dpll);
6981                 if (has_reduced_clock)
6982                         fp2 = i9xx_dpll_compute_fp(&reduced_clock);
6983
6984                 dpll = ironlake_compute_dpll(intel_crtc,
6985                                              &fp, &reduced_clock,
6986                                              has_reduced_clock ? &fp2 : NULL);
6987
6988                 intel_crtc->config.dpll_hw_state.dpll = dpll;
6989                 intel_crtc->config.dpll_hw_state.fp0 = fp;
6990                 if (has_reduced_clock)
6991                         intel_crtc->config.dpll_hw_state.fp1 = fp2;
6992                 else
6993                         intel_crtc->config.dpll_hw_state.fp1 = fp;
6994
6995                 pll = intel_get_shared_dpll(intel_crtc);
6996                 if (pll == NULL) {
6997                         DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
6998                                          pipe_name(intel_crtc->pipe));
6999                         return -EINVAL;
7000                 }
7001         } else
7002                 intel_put_shared_dpll(intel_crtc);
7003
7004         if (is_lvds && has_reduced_clock && i915.powersave)
7005                 intel_crtc->lowfreq_avail = true;
7006         else
7007                 intel_crtc->lowfreq_avail = false;
7008
7009         return 0;
7010 }
7011
7012 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
7013                                          struct intel_link_m_n *m_n)
7014 {
7015         struct drm_device *dev = crtc->base.dev;
7016         struct drm_i915_private *dev_priv = dev->dev_private;
7017         enum i915_pipe pipe = crtc->pipe;
7018
7019         m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
7020         m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
7021         m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
7022                 & ~TU_SIZE_MASK;
7023         m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
7024         m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
7025                     & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
7026 }
7027
7028 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
7029                                          enum transcoder transcoder,
7030                                          struct intel_link_m_n *m_n)
7031 {
7032         struct drm_device *dev = crtc->base.dev;
7033         struct drm_i915_private *dev_priv = dev->dev_private;
7034         enum i915_pipe pipe = crtc->pipe;
7035
7036         if (INTEL_INFO(dev)->gen >= 5) {
7037                 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
7038                 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
7039                 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
7040                         & ~TU_SIZE_MASK;
7041                 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
7042                 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
7043                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
7044         } else {
7045                 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
7046                 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
7047                 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
7048                         & ~TU_SIZE_MASK;
7049                 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
7050                 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
7051                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
7052         }
7053 }
7054
7055 void intel_dp_get_m_n(struct intel_crtc *crtc,
7056                       struct intel_crtc_config *pipe_config)
7057 {
7058         if (crtc->config.has_pch_encoder)
7059                 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
7060         else
7061                 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
7062                                              &pipe_config->dp_m_n);
7063 }
7064
7065 static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
7066                                         struct intel_crtc_config *pipe_config)
7067 {
7068         intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
7069                                      &pipe_config->fdi_m_n);
7070 }
7071
7072 static void ironlake_get_pfit_config(struct intel_crtc *crtc,
7073                                      struct intel_crtc_config *pipe_config)
7074 {
7075         struct drm_device *dev = crtc->base.dev;
7076         struct drm_i915_private *dev_priv = dev->dev_private;
7077         uint32_t tmp;
7078
7079         tmp = I915_READ(PF_CTL(crtc->pipe));
7080
7081         if (tmp & PF_ENABLE) {
7082                 pipe_config->pch_pfit.enabled = true;
7083                 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
7084                 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
7085
7086                 /* We currently do not free assignements of panel fitters on
7087                  * ivb/hsw (since we don't use the higher upscaling modes which
7088                  * differentiates them) so just WARN about this case for now. */
7089                 if (IS_GEN7(dev)) {
7090                         WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
7091                                 PF_PIPE_SEL_IVB(crtc->pipe));
7092                 }
7093         }
7094 }
7095
7096 static void ironlake_get_plane_config(struct intel_crtc *crtc,
7097                                       struct intel_plane_config *plane_config)
7098 {
7099         struct drm_device *dev = crtc->base.dev;
7100         struct drm_i915_private *dev_priv = dev->dev_private;
7101         u32 val, base, offset;
7102         int pipe = crtc->pipe, plane = crtc->plane;
7103         int fourcc, pixel_format;
7104         int aligned_height;
7105
7106         crtc->base.primary->fb = kzalloc(sizeof(struct intel_framebuffer), GFP_KERNEL);
7107         if (!crtc->base.primary->fb) {
7108                 DRM_DEBUG_KMS("failed to alloc fb\n");
7109                 return;
7110         }
7111
7112         val = I915_READ(DSPCNTR(plane));
7113
7114         if (INTEL_INFO(dev)->gen >= 4)
7115                 if (val & DISPPLANE_TILED)
7116                         plane_config->tiled = true;
7117
7118         pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
7119         fourcc = intel_format_to_fourcc(pixel_format);
7120         crtc->base.primary->fb->pixel_format = fourcc;
7121         crtc->base.primary->fb->bits_per_pixel =
7122                 drm_format_plane_cpp(fourcc, 0) * 8;
7123
7124         base = I915_READ(DSPSURF(plane)) & 0xfffff000;
7125         if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
7126                 offset = I915_READ(DSPOFFSET(plane));
7127         } else {
7128                 if (plane_config->tiled)
7129                         offset = I915_READ(DSPTILEOFF(plane));
7130                 else
7131                         offset = I915_READ(DSPLINOFF(plane));
7132         }
7133         plane_config->base = base;
7134
7135         val = I915_READ(PIPESRC(pipe));
7136         crtc->base.primary->fb->width = ((val >> 16) & 0xfff) + 1;
7137         crtc->base.primary->fb->height = ((val >> 0) & 0xfff) + 1;
7138
7139         val = I915_READ(DSPSTRIDE(pipe));
7140         crtc->base.primary->fb->pitches[0] = val & 0xffffff80;
7141
7142         aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
7143                                             plane_config->tiled);
7144
7145         plane_config->size = ALIGN(crtc->base.primary->fb->pitches[0] *
7146                                    aligned_height, PAGE_SIZE);
7147
7148         DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
7149                       pipe, plane, crtc->base.primary->fb->width,
7150                       crtc->base.primary->fb->height,
7151                       crtc->base.primary->fb->bits_per_pixel, base,
7152                       crtc->base.primary->fb->pitches[0],
7153                       plane_config->size);
7154 }
7155
7156 static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
7157                                      struct intel_crtc_config *pipe_config)
7158 {
7159         struct drm_device *dev = crtc->base.dev;
7160         struct drm_i915_private *dev_priv = dev->dev_private;
7161         uint32_t tmp;
7162
7163         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
7164         pipe_config->shared_dpll = DPLL_ID_PRIVATE;
7165
7166         tmp = I915_READ(PIPECONF(crtc->pipe));
7167         if (!(tmp & PIPECONF_ENABLE))
7168                 return false;
7169
7170         switch (tmp & PIPECONF_BPC_MASK) {
7171         case PIPECONF_6BPC:
7172                 pipe_config->pipe_bpp = 18;
7173                 break;
7174         case PIPECONF_8BPC:
7175                 pipe_config->pipe_bpp = 24;
7176                 break;
7177         case PIPECONF_10BPC:
7178                 pipe_config->pipe_bpp = 30;
7179                 break;
7180         case PIPECONF_12BPC:
7181                 pipe_config->pipe_bpp = 36;
7182                 break;
7183         default:
7184                 break;
7185         }
7186
7187         if (tmp & PIPECONF_COLOR_RANGE_SELECT)
7188                 pipe_config->limited_color_range = true;
7189
7190         if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
7191                 struct intel_shared_dpll *pll;
7192
7193                 pipe_config->has_pch_encoder = true;
7194
7195                 tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
7196                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
7197                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
7198
7199                 ironlake_get_fdi_m_n_config(crtc, pipe_config);
7200
7201                 if (HAS_PCH_IBX(dev_priv->dev)) {
7202                         pipe_config->shared_dpll =
7203                                 (enum intel_dpll_id) crtc->pipe;
7204                 } else {
7205                         tmp = I915_READ(PCH_DPLL_SEL);
7206                         if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
7207                                 pipe_config->shared_dpll = DPLL_ID_PCH_PLL_B;
7208                         else
7209                                 pipe_config->shared_dpll = DPLL_ID_PCH_PLL_A;
7210                 }
7211
7212                 pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
7213
7214                 WARN_ON(!pll->get_hw_state(dev_priv, pll,
7215                                            &pipe_config->dpll_hw_state));
7216
7217                 tmp = pipe_config->dpll_hw_state.dpll;
7218                 pipe_config->pixel_multiplier =
7219                         ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
7220                          >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
7221
7222                 ironlake_pch_clock_get(crtc, pipe_config);
7223         } else {
7224                 pipe_config->pixel_multiplier = 1;
7225         }
7226
7227         intel_get_pipe_timings(crtc, pipe_config);
7228
7229         ironlake_get_pfit_config(crtc, pipe_config);
7230
7231         return true;
7232 }
7233
7234 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
7235 {
7236         struct drm_device *dev = dev_priv->dev;
7237         struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
7238         struct intel_crtc *crtc;
7239
7240         for_each_intel_crtc(dev, crtc)
7241                 WARN(crtc->active, "CRTC for pipe %c enabled\n",
7242                      pipe_name(crtc->pipe));
7243
7244         WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
7245         WARN(plls->spll_refcount, "SPLL enabled\n");
7246         WARN(plls->wrpll1_refcount, "WRPLL1 enabled\n");
7247         WARN(plls->wrpll2_refcount, "WRPLL2 enabled\n");
7248         WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
7249         WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
7250              "CPU PWM1 enabled\n");
7251         WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
7252              "CPU PWM2 enabled\n");
7253         WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
7254              "PCH PWM1 enabled\n");
7255         WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
7256              "Utility pin enabled\n");
7257         WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
7258
7259         /*
7260          * In theory we can still leave IRQs enabled, as long as only the HPD
7261          * interrupts remain enabled. We used to check for that, but since it's
7262          * gen-specific and since we only disable LCPLL after we fully disable
7263          * the interrupts, the check below should be enough.
7264          */
7265         WARN(!dev_priv->pm.irqs_disabled, "IRQs enabled\n");
7266 }
7267
7268 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
7269 {
7270         struct drm_device *dev = dev_priv->dev;
7271
7272         if (IS_HASWELL(dev)) {
7273                 mutex_lock(&dev_priv->rps.hw_lock);
7274                 if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
7275                                             val))
7276                         DRM_ERROR("Failed to disable D_COMP\n");
7277                 mutex_unlock(&dev_priv->rps.hw_lock);
7278         } else {
7279                 I915_WRITE(D_COMP, val);
7280         }
7281         POSTING_READ(D_COMP);
7282 }
7283
7284 /*
7285  * This function implements pieces of two sequences from BSpec:
7286  * - Sequence for display software to disable LCPLL
7287  * - Sequence for display software to allow package C8+
7288  * The steps implemented here are just the steps that actually touch the LCPLL
7289  * register. Callers should take care of disabling all the display engine
7290  * functions, doing the mode unset, fixing interrupts, etc.
7291  */
7292 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
7293                               bool switch_to_fclk, bool allow_power_down)
7294 {
7295         uint32_t val;
7296
7297         assert_can_disable_lcpll(dev_priv);
7298
7299         val = I915_READ(LCPLL_CTL);
7300
7301         if (switch_to_fclk) {
7302                 val |= LCPLL_CD_SOURCE_FCLK;
7303                 I915_WRITE(LCPLL_CTL, val);
7304
7305                 if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
7306                                        LCPLL_CD_SOURCE_FCLK_DONE, 1))
7307                         DRM_ERROR("Switching to FCLK failed\n");
7308
7309                 val = I915_READ(LCPLL_CTL);
7310         }
7311
7312         val |= LCPLL_PLL_DISABLE;
7313         I915_WRITE(LCPLL_CTL, val);
7314         POSTING_READ(LCPLL_CTL);
7315
7316         if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1))
7317                 DRM_ERROR("LCPLL still locked\n");
7318
7319         val = I915_READ(D_COMP);
7320         val |= D_COMP_COMP_DISABLE;
7321         hsw_write_dcomp(dev_priv, val);
7322         ndelay(100);
7323
7324         if (wait_for((I915_READ(D_COMP) & D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
7325                 DRM_ERROR("D_COMP RCOMP still in progress\n");
7326
7327         if (allow_power_down) {
7328                 val = I915_READ(LCPLL_CTL);
7329                 val |= LCPLL_POWER_DOWN_ALLOW;
7330                 I915_WRITE(LCPLL_CTL, val);
7331                 POSTING_READ(LCPLL_CTL);
7332         }
7333 }
7334
7335 /*
7336  * Fully restores LCPLL, disallowing power down and switching back to LCPLL
7337  * source.
7338  */
7339 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
7340 {
7341         uint32_t val;
7342
7343         val = I915_READ(LCPLL_CTL);
7344
7345         if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
7346                     LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
7347                 return;
7348
7349         /*
7350          * Make sure we're not on PC8 state before disabling PC8, otherwise
7351          * we'll hang the machine. To prevent PC8 state, just enable force_wake.
7352          *
7353          * The other problem is that hsw_restore_lcpll() is called as part of
7354          * the runtime PM resume sequence, so we can't just call
7355          * gen6_gt_force_wake_get() because that function calls
7356          * intel_runtime_pm_get(), and we can't change the runtime PM refcount
7357          * while we are on the resume sequence. So to solve this problem we have
7358          * to call special forcewake code that doesn't touch runtime PM and
7359          * doesn't enable the forcewake delayed work.
7360          */
7361         lockmgr(&dev_priv->uncore.lock, LK_EXCLUSIVE);
7362         if (dev_priv->uncore.forcewake_count++ == 0)
7363                 dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
7364         lockmgr(&dev_priv->uncore.lock, LK_RELEASE);
7365
7366         if (val & LCPLL_POWER_DOWN_ALLOW) {
7367                 val &= ~LCPLL_POWER_DOWN_ALLOW;
7368                 I915_WRITE(LCPLL_CTL, val);
7369                 POSTING_READ(LCPLL_CTL);
7370         }
7371
7372         val = I915_READ(D_COMP);
7373         val |= D_COMP_COMP_FORCE;
7374         val &= ~D_COMP_COMP_DISABLE;
7375         hsw_write_dcomp(dev_priv, val);
7376
7377         val = I915_READ(LCPLL_CTL);
7378         val &= ~LCPLL_PLL_DISABLE;
7379         I915_WRITE(LCPLL_CTL, val);
7380
7381         if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5))
7382                 DRM_ERROR("LCPLL not locked yet\n");
7383
7384         if (val & LCPLL_CD_SOURCE_FCLK) {
7385                 val = I915_READ(LCPLL_CTL);
7386                 val &= ~LCPLL_CD_SOURCE_FCLK;
7387                 I915_WRITE(LCPLL_CTL, val);
7388
7389                 if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
7390                                         LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
7391                         DRM_ERROR("Switching back to LCPLL failed\n");
7392         }
7393
7394         /* See the big comment above. */
7395         lockmgr(&dev_priv->uncore.lock, LK_EXCLUSIVE);
7396         if (--dev_priv->uncore.forcewake_count == 0)
7397                 dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
7398         lockmgr(&dev_priv->uncore.lock, LK_RELEASE);
7399 }
7400
7401 /*
7402  * Package states C8 and deeper are really deep PC states that can only be
7403  * reached when all the devices on the system allow it, so even if the graphics
7404  * device allows PC8+, it doesn't mean the system will actually get to these
7405  * states. Our driver only allows PC8+ when going into runtime PM.
7406  *
7407  * The requirements for PC8+ are that all the outputs are disabled, the power
7408  * well is disabled and most interrupts are disabled, and these are also
7409  * requirements for runtime PM. When these conditions are met, we manually do
7410  * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
7411  * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
7412  * hang the machine.
7413  *
7414  * When we really reach PC8 or deeper states (not just when we allow it) we lose
7415  * the state of some registers, so when we come back from PC8+ we need to
7416  * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
7417  * need to take care of the registers kept by RC6. Notice that this happens even
7418  * if we don't put the device in PCI D3 state (which is what currently happens
7419  * because of the runtime PM support).
7420  *
7421  * For more, read "Display Sequences for Package C8" on the hardware
7422  * documentation.
7423  */
7424 void hsw_enable_pc8(struct drm_i915_private *dev_priv)
7425 {
7426         struct drm_device *dev = dev_priv->dev;
7427         uint32_t val;
7428
7429         DRM_DEBUG_KMS("Enabling package C8+\n");
7430
7431         if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
7432                 val = I915_READ(SOUTH_DSPCLK_GATE_D);
7433                 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
7434                 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
7435         }
7436
7437         lpt_disable_clkout_dp(dev);
7438         hsw_disable_lcpll(dev_priv, true, true);
7439 }
7440
7441 void hsw_disable_pc8(struct drm_i915_private *dev_priv)
7442 {
7443         struct drm_device *dev = dev_priv->dev;
7444         uint32_t val;
7445
7446         DRM_DEBUG_KMS("Disabling package C8+\n");
7447
7448         hsw_restore_lcpll(dev_priv);
7449         lpt_init_pch_refclk(dev);
7450
7451         if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
7452                 val = I915_READ(SOUTH_DSPCLK_GATE_D);
7453                 val |= PCH_LP_PARTITION_LEVEL_DISABLE;
7454                 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
7455         }
7456
7457         intel_prepare_ddi(dev);
7458 }
7459
7460 static void snb_modeset_global_resources(struct drm_device *dev)
7461 {
7462         modeset_update_crtc_power_domains(dev);
7463 }
7464
7465 static void haswell_modeset_global_resources(struct drm_device *dev)
7466 {
7467         modeset_update_crtc_power_domains(dev);
7468 }
7469
7470 static int haswell_crtc_mode_set(struct drm_crtc *crtc,
7471                                  int x, int y,
7472                                  struct drm_framebuffer *fb)
7473 {
7474         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7475
7476         if (!intel_ddi_pll_select(intel_crtc))
7477                 return -EINVAL;
7478         intel_ddi_pll_enable(intel_crtc);
7479
7480         intel_crtc->lowfreq_avail = false;
7481
7482         return 0;
7483 }
7484
7485 static bool haswell_get_pipe_config(struct intel_crtc *crtc,
7486                                     struct intel_crtc_config *pipe_config)
7487 {
7488         struct drm_device *dev = crtc->base.dev;
7489         struct drm_i915_private *dev_priv = dev->dev_private;
7490         enum intel_display_power_domain pfit_domain;
7491         uint32_t tmp;
7492
7493         if (!intel_display_power_enabled(dev_priv,
7494                                          POWER_DOMAIN_PIPE(crtc->pipe)))
7495                 return false;
7496
7497         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
7498         pipe_config->shared_dpll = DPLL_ID_PRIVATE;
7499
7500         tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
7501         if (tmp & TRANS_DDI_FUNC_ENABLE) {
7502                 enum i915_pipe trans_edp_pipe;
7503                 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
7504                 default:
7505                         WARN(1, "unknown pipe linked to edp transcoder\n");
7506                 case TRANS_DDI_EDP_INPUT_A_ONOFF:
7507                 case TRANS_DDI_EDP_INPUT_A_ON:
7508                         trans_edp_pipe = PIPE_A;
7509                         break;
7510                 case TRANS_DDI_EDP_INPUT_B_ONOFF:
7511                         trans_edp_pipe = PIPE_B;
7512                         break;
7513                 case TRANS_DDI_EDP_INPUT_C_ONOFF:
7514                         trans_edp_pipe = PIPE_C;
7515                         break;
7516                 }
7517
7518                 if (trans_edp_pipe == crtc->pipe)
7519                         pipe_config->cpu_transcoder = TRANSCODER_EDP;
7520         }
7521
7522         if (!intel_display_power_enabled(dev_priv,
7523                         POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
7524                 return false;
7525
7526         tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
7527         if (!(tmp & PIPECONF_ENABLE))
7528                 return false;
7529
7530         /*
7531          * Haswell has only FDI/PCH transcoder A. It is which is connected to
7532          * DDI E. So just check whether this pipe is wired to DDI E and whether
7533          * the PCH transcoder is on.
7534          */
7535         tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
7536         if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(PORT_E) &&
7537             I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
7538                 pipe_config->has_pch_encoder = true;
7539
7540                 tmp = I915_READ(FDI_RX_CTL(PIPE_A));
7541                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
7542                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
7543
7544                 ironlake_get_fdi_m_n_config(crtc, pipe_config);
7545         }
7546
7547         intel_get_pipe_timings(crtc, pipe_config);
7548
7549         pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
7550         if (intel_display_power_enabled(dev_priv, pfit_domain))
7551                 ironlake_get_pfit_config(crtc, pipe_config);
7552
7553         if (IS_HASWELL(dev))
7554                 pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
7555                         (I915_READ(IPS_CTL) & IPS_ENABLE);
7556
7557         pipe_config->pixel_multiplier = 1;
7558
7559         return true;
7560 }
7561
7562 static struct {
7563         int clock;
7564         u32 config;
7565 } hdmi_audio_clock[] = {
7566         { DIV_ROUND_UP(25200 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 },
7567         { 25200, AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 }, /* default per bspec */
7568         { 27000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 },
7569         { 27000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 },
7570         { 54000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 },
7571         { 54000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 },
7572         { DIV_ROUND_UP(74250 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 },
7573         { 74250, AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 },
7574         { DIV_ROUND_UP(148500 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 },
7575         { 148500, AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 },
7576 };
7577
7578 /* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
7579 static u32 audio_config_hdmi_pixel_clock(struct drm_display_mode *mode)
7580 {
7581         int i;
7582
7583         for (i = 0; i < ARRAY_SIZE(hdmi_audio_clock); i++) {
7584                 if (mode->clock == hdmi_audio_clock[i].clock)
7585                         break;
7586         }
7587
7588         if (i == ARRAY_SIZE(hdmi_audio_clock)) {
7589                 DRM_DEBUG_KMS("HDMI audio pixel clock setting for %d not found, falling back to defaults\n", mode->clock);
7590                 i = 1;
7591         }
7592
7593         DRM_DEBUG_KMS("Configuring HDMI audio for pixel clock %d (0x%08x)\n",
7594                       hdmi_audio_clock[i].clock,
7595                       hdmi_audio_clock[i].config);
7596
7597         return hdmi_audio_clock[i].config;
7598 }
7599
7600 static bool intel_eld_uptodate(struct drm_connector *connector,
7601                                int reg_eldv, uint32_t bits_eldv,
7602                                int reg_elda, uint32_t bits_elda,
7603                                int reg_edid)
7604 {
7605         struct drm_i915_private *dev_priv = connector->dev->dev_private;
7606         uint8_t *eld = connector->eld;
7607         uint32_t i;
7608
7609         i = I915_READ(reg_eldv);
7610         i &= bits_eldv;
7611
7612         if (!eld[0])
7613                 return !i;
7614
7615         if (!i)
7616                 return false;
7617
7618         i = I915_READ(reg_elda);
7619         i &= ~bits_elda;
7620         I915_WRITE(reg_elda, i);
7621
7622         for (i = 0; i < eld[2]; i++)
7623                 if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
7624                         return false;
7625
7626         return true;
7627 }
7628
7629 static void g4x_write_eld(struct drm_connector *connector,
7630                           struct drm_crtc *crtc,
7631                           struct drm_display_mode *mode)
7632 {
7633         struct drm_i915_private *dev_priv = connector->dev->dev_private;
7634         uint8_t *eld = connector->eld;
7635         uint32_t eldv;
7636         uint32_t len;
7637         uint32_t i;
7638
7639         i = I915_READ(G4X_AUD_VID_DID);
7640
7641         if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL)
7642                 eldv = G4X_ELDV_DEVCL_DEVBLC;
7643         else
7644                 eldv = G4X_ELDV_DEVCTG;
7645
7646         if (intel_eld_uptodate(connector,
7647                                G4X_AUD_CNTL_ST, eldv,
7648                                G4X_AUD_CNTL_ST, G4X_ELD_ADDR,
7649                                G4X_HDMIW_HDMIEDID))
7650                 return;
7651
7652         i = I915_READ(G4X_AUD_CNTL_ST);
7653         i &= ~(eldv | G4X_ELD_ADDR);
7654         len = (i >> 9) & 0x1f;          /* ELD buffer size */
7655         I915_WRITE(G4X_AUD_CNTL_ST, i);
7656
7657         if (!eld[0])
7658                 return;
7659
7660         len = min_t(uint8_t, eld[2], len);
7661         DRM_DEBUG_DRIVER("ELD size %d\n", len);
7662         for (i = 0; i < len; i++)
7663                 I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
7664
7665         i = I915_READ(G4X_AUD_CNTL_ST);
7666         i |= eldv;
7667         I915_WRITE(G4X_AUD_CNTL_ST, i);
7668 }
7669
7670 static void haswell_write_eld(struct drm_connector *connector,
7671                               struct drm_crtc *crtc,
7672                               struct drm_display_mode *mode)
7673 {
7674         struct drm_i915_private *dev_priv = connector->dev->dev_private;
7675         uint8_t *eld = connector->eld;
7676         uint32_t eldv;
7677         uint32_t i;
7678         int len;
7679         int pipe = to_intel_crtc(crtc)->pipe;
7680         int tmp;
7681
7682         int hdmiw_hdmiedid = HSW_AUD_EDID_DATA(pipe);
7683         int aud_cntl_st = HSW_AUD_DIP_ELD_CTRL(pipe);
7684         int aud_config = HSW_AUD_CFG(pipe);
7685         int aud_cntrl_st2 = HSW_AUD_PIN_ELD_CP_VLD;
7686
7687         /* Audio output enable */
7688         DRM_DEBUG_DRIVER("HDMI audio: enable codec\n");
7689         tmp = I915_READ(aud_cntrl_st2);
7690         tmp |= (AUDIO_OUTPUT_ENABLE_A << (pipe * 4));
7691         I915_WRITE(aud_cntrl_st2, tmp);
7692         POSTING_READ(aud_cntrl_st2);
7693
7694         assert_pipe_disabled(dev_priv, to_intel_crtc(crtc)->pipe);
7695
7696         /* Set ELD valid state */
7697         tmp = I915_READ(aud_cntrl_st2);
7698         DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%08x\n", tmp);
7699         tmp |= (AUDIO_ELD_VALID_A << (pipe * 4));
7700         I915_WRITE(aud_cntrl_st2, tmp);
7701         tmp = I915_READ(aud_cntrl_st2);
7702         DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%08x\n", tmp);
7703
7704         /* Enable HDMI mode */
7705         tmp = I915_READ(aud_config);
7706         DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%08x\n", tmp);
7707         /* clear N_programing_enable and N_value_index */
7708         tmp &= ~(AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_N_PROG_ENABLE);
7709         I915_WRITE(aud_config, tmp);
7710
7711         DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
7712
7713         eldv = AUDIO_ELD_VALID_A << (pipe * 4);
7714
7715         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
7716                 DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
7717                 eld[5] |= (1 << 2);     /* Conn_Type, 0x1 = DisplayPort */
7718                 I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
7719         } else {
7720                 I915_WRITE(aud_config, audio_config_hdmi_pixel_clock(mode));
7721         }
7722
7723         if (intel_eld_uptodate(connector,
7724                                aud_cntrl_st2, eldv,
7725                                aud_cntl_st, IBX_ELD_ADDRESS,
7726                                hdmiw_hdmiedid))
7727                 return;
7728
7729         i = I915_READ(aud_cntrl_st2);
7730         i &= ~eldv;
7731         I915_WRITE(aud_cntrl_st2, i);
7732
7733         if (!eld[0])
7734                 return;
7735
7736         i = I915_READ(aud_cntl_st);
7737         i &= ~IBX_ELD_ADDRESS;
7738         I915_WRITE(aud_cntl_st, i);
7739         i = (i >> 29) & DIP_PORT_SEL_MASK;              /* DIP_Port_Select, 0x1 = PortB */
7740         DRM_DEBUG_DRIVER("port num:%d\n", i);
7741
7742         len = min_t(uint8_t, eld[2], 21);       /* 84 bytes of hw ELD buffer */
7743         DRM_DEBUG_DRIVER("ELD size %d\n", len);
7744         for (i = 0; i < len; i++)
7745                 I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
7746
7747         i = I915_READ(aud_cntrl_st2);
7748         i |= eldv;
7749         I915_WRITE(aud_cntrl_st2, i);
7750
7751 }
7752
7753 static void ironlake_write_eld(struct drm_connector *connector,
7754                                struct drm_crtc *crtc,
7755                                struct drm_display_mode *mode)
7756 {
7757         struct drm_i915_private *dev_priv = connector->dev->dev_private;
7758         uint8_t *eld = connector->eld;
7759         uint32_t eldv;
7760         uint32_t i;
7761         int len;
7762         int hdmiw_hdmiedid;
7763         int aud_config;
7764         int aud_cntl_st;
7765         int aud_cntrl_st2;
7766         int pipe = to_intel_crtc(crtc)->pipe;
7767
7768         if (HAS_PCH_IBX(connector->dev)) {
7769                 hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
7770                 aud_config = IBX_AUD_CFG(pipe);
7771                 aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
7772                 aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
7773         } else if (IS_VALLEYVIEW(connector->dev)) {
7774                 hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe);
7775                 aud_config = VLV_AUD_CFG(pipe);
7776                 aud_cntl_st = VLV_AUD_CNTL_ST(pipe);
7777                 aud_cntrl_st2 = VLV_AUD_CNTL_ST2;
7778         } else {
7779                 hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
7780                 aud_config = CPT_AUD_CFG(pipe);
7781                 aud_cntl_st = CPT_AUD_CNTL_ST(pipe);
7782                 aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
7783         }
7784
7785         DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
7786
7787         if (IS_VALLEYVIEW(connector->dev))  {
7788                 struct intel_encoder *intel_encoder;
7789                 struct intel_digital_port *intel_dig_port;
7790
7791                 intel_encoder = intel_attached_encoder(connector);
7792                 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
7793                 i = intel_dig_port->port;
7794         } else {
7795                 i = I915_READ(aud_cntl_st);
7796                 i = (i >> 29) & DIP_PORT_SEL_MASK;
7797                 /* DIP_Port_Select, 0x1 = PortB */
7798         }
7799
7800         if (!i) {
7801                 DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
7802                 /* operate blindly on all ports */
7803                 eldv = IBX_ELD_VALIDB;
7804                 eldv |= IBX_ELD_VALIDB << 4;
7805                 eldv |= IBX_ELD_VALIDB << 8;
7806         } else {
7807                 DRM_DEBUG_DRIVER("ELD on port %c\n", port_name(i));
7808                 eldv = IBX_ELD_VALIDB << ((i - 1) * 4);
7809         }
7810
7811         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
7812                 DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
7813                 eld[5] |= (1 << 2);     /* Conn_Type, 0x1 = DisplayPort */
7814                 I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
7815         } else {
7816                 I915_WRITE(aud_config, audio_config_hdmi_pixel_clock(mode));
7817         }
7818
7819         if (intel_eld_uptodate(connector,
7820                                aud_cntrl_st2, eldv,
7821                                aud_cntl_st, IBX_ELD_ADDRESS,
7822                                hdmiw_hdmiedid))
7823                 return;
7824
7825         i = I915_READ(aud_cntrl_st2);
7826         i &= ~eldv;
7827         I915_WRITE(aud_cntrl_st2, i);
7828
7829         if (!eld[0])
7830                 return;
7831
7832         i = I915_READ(aud_cntl_st);
7833         i &= ~IBX_ELD_ADDRESS;
7834         I915_WRITE(aud_cntl_st, i);
7835
7836         len = min_t(uint8_t, eld[2], 21);       /* 84 bytes of hw ELD buffer */
7837         DRM_DEBUG_DRIVER("ELD size %d\n", len);
7838         for (i = 0; i < len; i++)
7839                 I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
7840
7841         i = I915_READ(aud_cntrl_st2);
7842         i |= eldv;
7843         I915_WRITE(aud_cntrl_st2, i);
7844 }
7845
7846 void intel_write_eld(struct drm_encoder *encoder,
7847                      struct drm_display_mode *mode)
7848 {
7849         struct drm_crtc *crtc = encoder->crtc;
7850         struct drm_connector *connector;
7851         struct drm_device *dev = encoder->dev;
7852         struct drm_i915_private *dev_priv = dev->dev_private;
7853
7854         connector = drm_select_eld(encoder, mode);
7855         if (!connector)
7856                 return;
7857
7858         DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
7859                          connector->base.id,
7860                          connector->name,
7861                          connector->encoder->base.id,
7862                          connector->encoder->name);
7863
7864         connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
7865
7866         if (dev_priv->display.write_eld)
7867                 dev_priv->display.write_eld(connector, crtc, mode);
7868 }
7869
7870 static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
7871 {
7872         struct drm_device *dev = crtc->dev;
7873         struct drm_i915_private *dev_priv = dev->dev_private;
7874         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7875         uint32_t cntl;
7876
7877         if (base != intel_crtc->cursor_base) {
7878                 /* On these chipsets we can only modify the base whilst
7879                  * the cursor is disabled.
7880                  */
7881                 if (intel_crtc->cursor_cntl) {
7882                         I915_WRITE(_CURACNTR, 0);
7883                         POSTING_READ(_CURACNTR);
7884                         intel_crtc->cursor_cntl = 0;
7885                 }
7886
7887                 I915_WRITE(_CURABASE, base);
7888                 POSTING_READ(_CURABASE);
7889         }
7890
7891         /* XXX width must be 64, stride 256 => 0x00 << 28 */
7892         cntl = 0;
7893         if (base)
7894                 cntl = (CURSOR_ENABLE |
7895                         CURSOR_GAMMA_ENABLE |
7896                         CURSOR_FORMAT_ARGB);
7897         if (intel_crtc->cursor_cntl != cntl) {
7898                 I915_WRITE(_CURACNTR, cntl);
7899                 POSTING_READ(_CURACNTR);
7900                 intel_crtc->cursor_cntl = cntl;
7901         }
7902 }
7903
7904 static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
7905 {
7906         struct drm_device *dev = crtc->dev;
7907         struct drm_i915_private *dev_priv = dev->dev_private;
7908         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7909         int pipe = intel_crtc->pipe;
7910         uint32_t cntl;
7911
7912         cntl = 0;
7913         if (base) {
7914                 cntl = MCURSOR_GAMMA_ENABLE;
7915                 switch (intel_crtc->cursor_width) {
7916                         case 64:
7917                                 cntl |= CURSOR_MODE_64_ARGB_AX;
7918                                 break;
7919                         case 128:
7920                                 cntl |= CURSOR_MODE_128_ARGB_AX;
7921                                 break;
7922                         case 256:
7923                                 cntl |= CURSOR_MODE_256_ARGB_AX;
7924                                 break;
7925                         default:
7926                                 WARN_ON(1);
7927                                 return;
7928                 }
7929                 cntl |= pipe << 28; /* Connect to correct pipe */
7930         }
7931         if (intel_crtc->cursor_cntl != cntl) {
7932                 I915_WRITE(CURCNTR(pipe), cntl);
7933                 POSTING_READ(CURCNTR(pipe));
7934                 intel_crtc->cursor_cntl = cntl;
7935         }
7936
7937         /* and commit changes on next vblank */
7938         I915_WRITE(CURBASE(pipe), base);
7939         POSTING_READ(CURBASE(pipe));
7940 }
7941
7942 static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
7943 {
7944         struct drm_device *dev = crtc->dev;
7945         struct drm_i915_private *dev_priv = dev->dev_private;
7946         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7947         int pipe = intel_crtc->pipe;
7948         uint32_t cntl;
7949
7950         cntl = 0;
7951         if (base) {
7952                 cntl = MCURSOR_GAMMA_ENABLE;
7953                 switch (intel_crtc->cursor_width) {
7954                         case 64:
7955                                 cntl |= CURSOR_MODE_64_ARGB_AX;
7956                                 break;
7957                         case 128:
7958                                 cntl |= CURSOR_MODE_128_ARGB_AX;
7959                                 break;
7960                         case 256:
7961                                 cntl |= CURSOR_MODE_256_ARGB_AX;
7962                                 break;
7963                         default:
7964                                 WARN_ON(1);
7965                                 return;
7966                 }
7967         }
7968         if (IS_HASWELL(dev) || IS_BROADWELL(dev))
7969                 cntl |= CURSOR_PIPE_CSC_ENABLE;
7970
7971         if (intel_crtc->cursor_cntl != cntl) {
7972                 I915_WRITE(CURCNTR(pipe), cntl);
7973                 POSTING_READ(CURCNTR(pipe));
7974                 intel_crtc->cursor_cntl = cntl;
7975         }
7976
7977         /* and commit changes on next vblank */
7978         I915_WRITE(CURBASE(pipe), base);
7979         POSTING_READ(CURBASE(pipe));
7980 }
7981
7982 /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
7983 static void intel_crtc_update_cursor(struct drm_crtc *crtc,
7984                                      bool on)
7985 {
7986         struct drm_device *dev = crtc->dev;
7987         struct drm_i915_private *dev_priv = dev->dev_private;
7988         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7989         int pipe = intel_crtc->pipe;
7990         int x = intel_crtc->cursor_x;
7991         int y = intel_crtc->cursor_y;
7992         u32 base = 0, pos = 0;
7993
7994         if (on)
7995                 base = intel_crtc->cursor_addr;
7996
7997         if (x >= intel_crtc->config.pipe_src_w)
7998                 base = 0;
7999
8000         if (y >= intel_crtc->config.pipe_src_h)
8001                 base = 0;
8002
8003         if (x < 0) {
8004                 if (x + intel_crtc->cursor_width <= 0)
8005                         base = 0;
8006
8007                 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
8008                 x = -x;
8009         }
8010         pos |= x << CURSOR_X_SHIFT;
8011
8012         if (y < 0) {
8013                 if (y + intel_crtc->cursor_height <= 0)
8014                         base = 0;
8015
8016                 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
8017                 y = -y;
8018         }
8019         pos |= y << CURSOR_Y_SHIFT;
8020
8021         if (base == 0 && intel_crtc->cursor_base == 0)
8022                 return;
8023
8024         I915_WRITE(CURPOS(pipe), pos);
8025
8026         if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev))
8027                 ivb_update_cursor(crtc, base);
8028         else if (IS_845G(dev) || IS_I865G(dev))
8029                 i845_update_cursor(crtc, base);
8030         else
8031                 i9xx_update_cursor(crtc, base);
8032         intel_crtc->cursor_base = base;
8033 }
8034
8035 static int intel_crtc_cursor_set(struct drm_crtc *crtc,
8036                                  struct drm_file *file,
8037                                  uint32_t handle,
8038                                  uint32_t width, uint32_t height)
8039 {
8040         struct drm_device *dev = crtc->dev;
8041         struct drm_i915_private *dev_priv = dev->dev_private;
8042         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8043         struct drm_i915_gem_object *obj;
8044         unsigned old_width;
8045         uint32_t addr;
8046         int ret;
8047
8048         /* if we want to turn off the cursor ignore width and height */
8049         if (!handle) {
8050                 DRM_DEBUG_KMS("cursor off\n");
8051                 addr = 0;
8052                 obj = NULL;
8053                 mutex_lock(&dev->struct_mutex);
8054                 goto finish;
8055         }
8056
8057         /* Check for which cursor types we support */
8058         if (!((width == 64 && height == 64) ||
8059                         (width == 128 && height == 128 && !IS_GEN2(dev)) ||
8060                         (width == 256 && height == 256 && !IS_GEN2(dev)))) {
8061                 DRM_DEBUG("Cursor dimension not supported\n");
8062                 return -EINVAL;
8063         }
8064
8065         obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
8066         if (&obj->base == NULL)
8067                 return -ENOENT;
8068
8069         if (obj->base.size < width * height * 4) {
8070                 DRM_DEBUG_KMS("buffer is to small\n");
8071                 ret = -ENOMEM;
8072                 goto fail;
8073         }
8074
8075         /* we only need to pin inside GTT if cursor is non-phy */
8076         mutex_lock(&dev->struct_mutex);
8077         if (!INTEL_INFO(dev)->cursor_needs_physical) {
8078                 unsigned alignment;
8079
8080                 if (obj->tiling_mode) {
8081                         DRM_DEBUG_KMS("cursor cannot be tiled\n");
8082                         ret = -EINVAL;
8083                         goto fail_locked;
8084                 }
8085
8086                 /* Note that the w/a also requires 2 PTE of padding following
8087                  * the bo. We currently fill all unused PTE with the shadow
8088                  * page and so we should always have valid PTE following the
8089                  * cursor preventing the VT-d warning.
8090                  */
8091                 alignment = 0;
8092                 if (need_vtd_wa(dev))
8093                         alignment = 64*1024;
8094
8095                 ret = i915_gem_object_pin_to_display_plane(obj, alignment, NULL);
8096                 if (ret) {
8097                         DRM_DEBUG_KMS("failed to move cursor bo into the GTT\n");
8098                         goto fail_locked;
8099                 }
8100
8101                 ret = i915_gem_object_put_fence(obj);
8102                 if (ret) {
8103                         DRM_DEBUG_KMS("failed to release fence for cursor");
8104                         goto fail_unpin;
8105                 }
8106
8107                 addr = i915_gem_obj_ggtt_offset(obj);
8108         } else {
8109                 int align = IS_I830(dev) ? 16 * 1024 : 256;
8110                 ret = i915_gem_object_attach_phys(obj, align);
8111                 if (ret) {
8112                         DRM_DEBUG_KMS("failed to attach phys object\n");
8113                         goto fail_locked;
8114                 }
8115                 addr = obj->phys_handle->busaddr;
8116         }
8117
8118         if (IS_GEN2(dev))
8119                 I915_WRITE(CURSIZE, (height << 12) | width);
8120
8121  finish:
8122         if (intel_crtc->cursor_bo) {
8123                 if (!INTEL_INFO(dev)->cursor_needs_physical)
8124                         i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo);
8125                 drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
8126         }
8127
8128         mutex_unlock(&dev->struct_mutex);
8129
8130         old_width = intel_crtc->cursor_width;
8131
8132         intel_crtc->cursor_addr = addr;
8133         intel_crtc->cursor_bo = obj;
8134         intel_crtc->cursor_width = width;
8135         intel_crtc->cursor_height = height;
8136
8137         if (intel_crtc->active) {
8138                 if (old_width != width)
8139                         intel_update_watermarks(crtc);
8140                 intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
8141         }
8142
8143         return 0;
8144 fail_unpin:
8145         i915_gem_object_unpin_from_display_plane(obj);
8146 fail_locked:
8147         mutex_unlock(&dev->struct_mutex);
8148 fail:
8149         drm_gem_object_unreference_unlocked(&obj->base);
8150         return ret;
8151 }
8152
8153 static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
8154 {
8155         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8156
8157         intel_crtc->cursor_x = clamp_t(int, x, SHRT_MIN, SHRT_MAX);
8158         intel_crtc->cursor_y = clamp_t(int, y, SHRT_MIN, SHRT_MAX);
8159
8160         if (intel_crtc->active)
8161                 intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
8162
8163         return 0;
8164 }
8165
8166 static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
8167                                  u16 *blue, uint32_t start, uint32_t size)
8168 {
8169         int end = (start + size > 256) ? 256 : start + size, i;
8170         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8171
8172         for (i = start; i < end; i++) {
8173                 intel_crtc->lut_r[i] = red[i] >> 8;
8174                 intel_crtc->lut_g[i] = green[i] >> 8;
8175                 intel_crtc->lut_b[i] = blue[i] >> 8;
8176         }
8177
8178         intel_crtc_load_lut(crtc);
8179 }
8180
8181 /* VESA 640x480x72Hz mode to set on the pipe */
8182 static struct drm_display_mode load_detect_mode = {
8183         DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
8184                  704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
8185 };
8186
8187 struct drm_framebuffer *
8188 __intel_framebuffer_create(struct drm_device *dev,
8189                            struct drm_mode_fb_cmd2 *mode_cmd,
8190                            struct drm_i915_gem_object *obj)
8191 {
8192         struct intel_framebuffer *intel_fb;
8193         int ret;
8194
8195         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
8196         if (!intel_fb) {
8197                 drm_gem_object_unreference_unlocked(&obj->base);
8198                 return ERR_PTR(-ENOMEM);
8199         }
8200
8201         ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
8202         if (ret)
8203                 goto err;
8204
8205         return &intel_fb->base;
8206 err:
8207         drm_gem_object_unreference_unlocked(&obj->base);
8208         kfree(intel_fb);
8209
8210         return ERR_PTR(ret);
8211 }
8212
8213 static struct drm_framebuffer *
8214 intel_framebuffer_create(struct drm_device *dev,
8215                          struct drm_mode_fb_cmd2 *mode_cmd,
8216                          struct drm_i915_gem_object *obj)
8217 {
8218         struct drm_framebuffer *fb;
8219         int ret;
8220
8221         ret = i915_mutex_lock_interruptible(dev);
8222         if (ret)
8223                 return ERR_PTR(ret);
8224         fb = __intel_framebuffer_create(dev, mode_cmd, obj);
8225         mutex_unlock(&dev->struct_mutex);
8226
8227         return fb;
8228 }
8229
8230 static u32
8231 intel_framebuffer_pitch_for_width(int width, int bpp)
8232 {
8233         u32 pitch = DIV_ROUND_UP(width * bpp, 8);
8234         return ALIGN(pitch, 64);
8235 }
8236
8237 static u32
8238 intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
8239 {
8240         u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
8241         return ALIGN(pitch * mode->vdisplay, PAGE_SIZE);
8242 }
8243
8244 static struct drm_framebuffer *
8245 intel_framebuffer_create_for_mode(struct drm_device *dev,
8246                                   struct drm_display_mode *mode,
8247                                   int depth, int bpp)
8248 {
8249         struct drm_i915_gem_object *obj;
8250         struct drm_mode_fb_cmd2 mode_cmd = { 0 };
8251
8252         obj = i915_gem_alloc_object(dev,
8253                                     intel_framebuffer_size_for_mode(mode, bpp));
8254         if (obj == NULL)
8255                 return ERR_PTR(-ENOMEM);
8256
8257         mode_cmd.width = mode->hdisplay;
8258         mode_cmd.height = mode->vdisplay;
8259         mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
8260                                                                 bpp);
8261         mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
8262
8263         return intel_framebuffer_create(dev, &mode_cmd, obj);
8264 }
8265
8266 static struct drm_framebuffer *
8267 mode_fits_in_fbdev(struct drm_device *dev,
8268                    struct drm_display_mode *mode)
8269 {
8270 #ifdef CONFIG_DRM_I915_FBDEV
8271         struct drm_i915_private *dev_priv = dev->dev_private;
8272         struct drm_i915_gem_object *obj;
8273         struct drm_framebuffer *fb;
8274
8275         if (!dev_priv->fbdev)
8276                 return NULL;
8277
8278         if (!dev_priv->fbdev->fb)
8279                 return NULL;
8280
8281         obj = dev_priv->fbdev->fb->obj;
8282         BUG_ON(!obj);
8283
8284         fb = &dev_priv->fbdev->fb->base;
8285         if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
8286                                                                fb->bits_per_pixel))
8287                 return NULL;
8288
8289         if (obj->base.size < mode->vdisplay * fb->pitches[0])
8290                 return NULL;
8291
8292         return fb;
8293 #else
8294         return NULL;
8295 #endif
8296 }
8297
8298 bool intel_get_load_detect_pipe(struct drm_connector *connector,
8299                                 struct drm_display_mode *mode,
8300                                 struct intel_load_detect_pipe *old,
8301                                 struct drm_modeset_acquire_ctx *ctx)
8302 {
8303         struct intel_crtc *intel_crtc;
8304         struct intel_encoder *intel_encoder =
8305                 intel_attached_encoder(connector);
8306         struct drm_crtc *possible_crtc;
8307         struct drm_encoder *encoder = &intel_encoder->base;
8308         struct drm_crtc *crtc = NULL;
8309         struct drm_device *dev = encoder->dev;
8310         struct drm_framebuffer *fb;
8311         struct drm_mode_config *config = &dev->mode_config;
8312         int ret, i = -1;
8313
8314         DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
8315                       connector->base.id, connector->name,
8316                       encoder->base.id, encoder->name);
8317
8318         drm_modeset_acquire_init(ctx, 0);
8319
8320 retry:
8321         ret = drm_modeset_lock(&config->connection_mutex, ctx);
8322         if (ret)
8323                 goto fail_unlock;
8324
8325         /*
8326          * Algorithm gets a little messy:
8327          *
8328          *   - if the connector already has an assigned crtc, use it (but make
8329          *     sure it's on first)
8330          *
8331          *   - try to find the first unused crtc that can drive this connector,
8332          *     and use that if we find one
8333          */
8334
8335         /* See if we already have a CRTC for this connector */
8336         if (encoder->crtc) {
8337                 crtc = encoder->crtc;
8338
8339                 ret = drm_modeset_lock(&crtc->mutex, ctx);
8340                 if (ret)
8341                         goto fail_unlock;
8342
8343                 old->dpms_mode = connector->dpms;
8344                 old->load_detect_temp = false;
8345
8346                 /* Make sure the crtc and connector are running */
8347                 if (connector->dpms != DRM_MODE_DPMS_ON)
8348                         connector->funcs->dpms(connector, DRM_MODE_DPMS_ON);
8349
8350                 return true;
8351         }
8352
8353         /* Find an unused one (if possible) */
8354         for_each_crtc(dev, possible_crtc) {
8355                 i++;
8356                 if (!(encoder->possible_crtcs & (1 << i)))
8357                         continue;
8358                 if (!possible_crtc->enabled) {
8359                         crtc = possible_crtc;
8360                         break;
8361                 }
8362         }
8363
8364         /*
8365          * If we didn't find an unused CRTC, don't use any.
8366          */
8367         if (!crtc) {
8368                 DRM_DEBUG_KMS("no pipe available for load-detect\n");
8369                 goto fail_unlock;
8370         }
8371
8372         ret = drm_modeset_lock(&crtc->mutex, ctx);
8373         if (ret)
8374                 goto fail_unlock;
8375         intel_encoder->new_crtc = to_intel_crtc(crtc);
8376         to_intel_connector(connector)->new_encoder = intel_encoder;
8377
8378         intel_crtc = to_intel_crtc(crtc);
8379         intel_crtc->new_enabled = true;
8380         intel_crtc->new_config = &intel_crtc->config;
8381         old->dpms_mode = connector->dpms;
8382         old->load_detect_temp = true;
8383         old->release_fb = NULL;
8384
8385         if (!mode)
8386                 mode = &load_detect_mode;
8387
8388         /* We need a framebuffer large enough to accommodate all accesses
8389          * that the plane may generate whilst we perform load detection.
8390          * We can not rely on the fbcon either being present (we get called
8391          * during its initialisation to detect all boot displays, or it may
8392          * not even exist) or that it is large enough to satisfy the
8393          * requested mode.
8394          */
8395         fb = mode_fits_in_fbdev(dev, mode);
8396         if (fb == NULL) {
8397                 DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
8398                 fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
8399                 old->release_fb = fb;
8400         } else
8401                 DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
8402         if (IS_ERR(fb)) {
8403                 DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
8404                 goto fail;
8405         }
8406
8407         if (intel_set_mode(crtc, mode, 0, 0, fb)) {
8408                 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
8409                 if (old->release_fb)
8410                         old->release_fb->funcs->destroy(old->release_fb);
8411                 goto fail;
8412         }
8413
8414         /* let the connector get through one full cycle before testing */
8415         intel_wait_for_vblank(dev, intel_crtc->pipe);
8416         return true;
8417
8418  fail:
8419         intel_crtc->new_enabled = crtc->enabled;
8420         if (intel_crtc->new_enabled)
8421                 intel_crtc->new_config = &intel_crtc->config;
8422         else
8423                 intel_crtc->new_config = NULL;
8424 fail_unlock:
8425         if (ret == -EDEADLK) {
8426                 drm_modeset_backoff(ctx);
8427                 goto retry;
8428         }
8429
8430         drm_modeset_drop_locks(ctx);
8431         drm_modeset_acquire_fini(ctx);
8432
8433         return false;
8434 }
8435
8436 void intel_release_load_detect_pipe(struct drm_connector *connector,
8437                                     struct intel_load_detect_pipe *old,
8438                                     struct drm_modeset_acquire_ctx *ctx)
8439 {
8440         struct intel_encoder *intel_encoder =
8441                 intel_attached_encoder(connector);
8442         struct drm_encoder *encoder = &intel_encoder->base;
8443         struct drm_crtc *crtc = encoder->crtc;
8444         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8445
8446         DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
8447                       connector->base.id, connector->name,
8448                       encoder->base.id, encoder->name);
8449
8450         if (old->load_detect_temp) {
8451                 to_intel_connector(connector)->new_encoder = NULL;
8452                 intel_encoder->new_crtc = NULL;
8453                 intel_crtc->new_enabled = false;
8454                 intel_crtc->new_config = NULL;
8455                 intel_set_mode(crtc, NULL, 0, 0, NULL);
8456
8457                 if (old->release_fb) {
8458                         drm_framebuffer_unregister_private(old->release_fb);
8459                         drm_framebuffer_unreference(old->release_fb);
8460                 }
8461
8462                 goto unlock;
8463                 return;
8464         }
8465
8466         /* Switch crtc and encoder back off if necessary */
8467         if (old->dpms_mode != DRM_MODE_DPMS_ON)
8468                 connector->funcs->dpms(connector, old->dpms_mode);
8469
8470 unlock:
8471         drm_modeset_drop_locks(ctx);
8472         drm_modeset_acquire_fini(ctx);
8473 }
8474
8475 static int i9xx_pll_refclk(struct drm_device *dev,
8476                            const struct intel_crtc_config *pipe_config)
8477 {
8478         struct drm_i915_private *dev_priv = dev->dev_private;
8479         u32 dpll = pipe_config->dpll_hw_state.dpll;
8480
8481         if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
8482                 return dev_priv->vbt.lvds_ssc_freq;
8483         else if (HAS_PCH_SPLIT(dev))
8484                 return 120000;
8485         else if (!IS_GEN2(dev))
8486                 return 96000;
8487         else
8488                 return 48000;
8489 }
8490
8491 /* Returns the clock of the currently programmed mode of the given pipe. */
8492 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
8493                                 struct intel_crtc_config *pipe_config)
8494 {
8495         struct drm_device *dev = crtc->base.dev;
8496         struct drm_i915_private *dev_priv = dev->dev_private;
8497         int pipe = pipe_config->cpu_transcoder;
8498         u32 dpll = pipe_config->dpll_hw_state.dpll;
8499         u32 fp;
8500         intel_clock_t clock;
8501         int refclk = i9xx_pll_refclk(dev, pipe_config);
8502
8503         if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
8504                 fp = pipe_config->dpll_hw_state.fp0;
8505         else
8506                 fp = pipe_config->dpll_hw_state.fp1;
8507
8508         clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
8509         if (IS_PINEVIEW(dev)) {
8510                 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
8511                 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
8512         } else {
8513                 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
8514                 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
8515         }
8516
8517         if (!IS_GEN2(dev)) {
8518                 if (IS_PINEVIEW(dev))
8519                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
8520                                 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
8521                 else
8522                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
8523                                DPLL_FPA01_P1_POST_DIV_SHIFT);
8524
8525                 switch (dpll & DPLL_MODE_MASK) {
8526                 case DPLLB_MODE_DAC_SERIAL:
8527                         clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
8528                                 5 : 10;
8529                         break;
8530                 case DPLLB_MODE_LVDS:
8531                         clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
8532                                 7 : 14;
8533                         break;
8534                 default:
8535                         DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
8536                                   "mode\n", (int)(dpll & DPLL_MODE_MASK));
8537                         return;
8538                 }
8539
8540                 if (IS_PINEVIEW(dev))
8541                         pineview_clock(refclk, &clock);
8542                 else
8543                         i9xx_clock(refclk, &clock);
8544         } else {
8545                 u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS);
8546                 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
8547
8548                 if (is_lvds) {
8549                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
8550                                        DPLL_FPA01_P1_POST_DIV_SHIFT);
8551
8552                         if (lvds & LVDS_CLKB_POWER_UP)
8553                                 clock.p2 = 7;
8554                         else
8555                                 clock.p2 = 14;
8556                 } else {
8557                         if (dpll & PLL_P1_DIVIDE_BY_TWO)
8558                                 clock.p1 = 2;
8559                         else {
8560                                 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
8561                                             DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
8562                         }
8563                         if (dpll & PLL_P2_DIVIDE_BY_4)
8564                                 clock.p2 = 4;
8565                         else
8566                                 clock.p2 = 2;
8567                 }
8568
8569                 i9xx_clock(refclk, &clock);
8570         }
8571
8572         /*
8573          * This value includes pixel_multiplier. We will use
8574          * port_clock to compute adjusted_mode.crtc_clock in the
8575          * encoder's get_config() function.
8576          */
8577         pipe_config->port_clock = clock.dot;
8578 }
8579
8580 int intel_dotclock_calculate(int link_freq,
8581                              const struct intel_link_m_n *m_n)
8582 {
8583         /*
8584          * The calculation for the data clock is:
8585          * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
8586          * But we want to avoid losing precison if possible, so:
8587          * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
8588          *
8589          * and the link clock is simpler:
8590          * link_clock = (m * link_clock) / n
8591          */
8592
8593         if (!m_n->link_n)
8594                 return 0;
8595
8596         return div_u64((u64)m_n->link_m * link_freq, m_n->link_n);
8597 }
8598
8599 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
8600                                    struct intel_crtc_config *pipe_config)
8601 {
8602         struct drm_device *dev = crtc->base.dev;
8603
8604         /* read out port_clock from the DPLL */
8605         i9xx_crtc_clock_get(crtc, pipe_config);
8606
8607         /*
8608          * This value does not include pixel_multiplier.
8609          * We will check that port_clock and adjusted_mode.crtc_clock
8610          * agree once we know their relationship in the encoder's
8611          * get_config() function.
8612          */
8613         pipe_config->adjusted_mode.crtc_clock =
8614                 intel_dotclock_calculate(intel_fdi_link_freq(dev) * 10000,
8615                                          &pipe_config->fdi_m_n);
8616 }
8617
8618 /** Returns the currently programmed mode of the given pipe. */
8619 struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
8620                                              struct drm_crtc *crtc)
8621 {
8622         struct drm_i915_private *dev_priv = dev->dev_private;
8623         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8624         enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
8625         struct drm_display_mode *mode;
8626         struct intel_crtc_config pipe_config;
8627         int htot = I915_READ(HTOTAL(cpu_transcoder));
8628         int hsync = I915_READ(HSYNC(cpu_transcoder));
8629         int vtot = I915_READ(VTOTAL(cpu_transcoder));
8630         int vsync = I915_READ(VSYNC(cpu_transcoder));
8631         enum i915_pipe pipe = intel_crtc->pipe;
8632
8633         mode = kzalloc(sizeof(*mode), GFP_KERNEL);
8634         if (!mode)
8635                 return NULL;
8636
8637         /*
8638          * Construct a pipe_config sufficient for getting the clock info
8639          * back out of crtc_clock_get.
8640          *
8641          * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
8642          * to use a real value here instead.
8643          */
8644         pipe_config.cpu_transcoder = (enum transcoder) pipe;
8645         pipe_config.pixel_multiplier = 1;
8646         pipe_config.dpll_hw_state.dpll = I915_READ(DPLL(pipe));
8647         pipe_config.dpll_hw_state.fp0 = I915_READ(FP0(pipe));
8648         pipe_config.dpll_hw_state.fp1 = I915_READ(FP1(pipe));
8649         i9xx_crtc_clock_get(intel_crtc, &pipe_config);
8650
8651         mode->clock = pipe_config.port_clock / pipe_config.pixel_multiplier;
8652         mode->hdisplay = (htot & 0xffff) + 1;
8653         mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
8654         mode->hsync_start = (hsync & 0xffff) + 1;
8655         mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
8656         mode->vdisplay = (vtot & 0xffff) + 1;
8657         mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
8658         mode->vsync_start = (vsync & 0xffff) + 1;
8659         mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
8660
8661         drm_mode_set_name(mode);
8662
8663         return mode;
8664 }
8665
8666 static void intel_increase_pllclock(struct drm_crtc *crtc)
8667 {
8668         struct drm_device *dev = crtc->dev;
8669         struct drm_i915_private *dev_priv = dev->dev_private;
8670         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8671         int pipe = intel_crtc->pipe;
8672         int dpll_reg = DPLL(pipe);
8673         int dpll;
8674
8675         if (HAS_PCH_SPLIT(dev))
8676                 return;
8677
8678         if (!dev_priv->lvds_downclock_avail)
8679                 return;
8680
8681         dpll = I915_READ(dpll_reg);
8682         if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
8683                 DRM_DEBUG_DRIVER("upclocking LVDS\n");
8684
8685                 assert_panel_unlocked(dev_priv, pipe);
8686
8687                 dpll &= ~DISPLAY_RATE_SELECT_FPA1;
8688                 I915_WRITE(dpll_reg, dpll);
8689                 intel_wait_for_vblank(dev, pipe);
8690
8691                 dpll = I915_READ(dpll_reg);
8692                 if (dpll & DISPLAY_RATE_SELECT_FPA1)
8693                         DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
8694         }
8695 }
8696
8697 static void intel_decrease_pllclock(struct drm_crtc *crtc)
8698 {
8699         struct drm_device *dev = crtc->dev;
8700         struct drm_i915_private *dev_priv = dev->dev_private;
8701         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8702
8703         if (HAS_PCH_SPLIT(dev))
8704                 return;
8705
8706         if (!dev_priv->lvds_downclock_avail)
8707                 return;
8708
8709         /*
8710          * Since this is called by a timer, we should never get here in
8711          * the manual case.
8712          */
8713         if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
8714                 int pipe = intel_crtc->pipe;
8715                 int dpll_reg = DPLL(pipe);
8716                 int dpll;
8717
8718                 DRM_DEBUG_DRIVER("downclocking LVDS\n");
8719
8720                 assert_panel_unlocked(dev_priv, pipe);
8721
8722                 dpll = I915_READ(dpll_reg);
8723                 dpll |= DISPLAY_RATE_SELECT_FPA1;
8724                 I915_WRITE(dpll_reg, dpll);
8725                 intel_wait_for_vblank(dev, pipe);
8726                 dpll = I915_READ(dpll_reg);
8727                 if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
8728                         DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
8729         }
8730
8731 }
8732
8733 void intel_mark_busy(struct drm_device *dev)
8734 {
8735         struct drm_i915_private *dev_priv = dev->dev_private;
8736
8737         if (dev_priv->mm.busy)
8738                 return;
8739
8740         intel_runtime_pm_get(dev_priv);
8741         i915_update_gfx_val(dev_priv);
8742         dev_priv->mm.busy = true;
8743 }
8744
8745 void intel_mark_idle(struct drm_device *dev)
8746 {
8747         struct drm_i915_private *dev_priv = dev->dev_private;
8748         struct drm_crtc *crtc;
8749
8750         if (!dev_priv->mm.busy)
8751                 return;
8752
8753         dev_priv->mm.busy = false;
8754
8755         if (!i915.powersave)
8756                 goto out;
8757
8758         for_each_crtc(dev, crtc) {
8759                 if (!crtc->primary->fb)
8760                         continue;
8761
8762                 intel_decrease_pllclock(crtc);
8763         }
8764
8765         if (INTEL_INFO(dev)->gen >= 6)
8766                 gen6_rps_idle(dev->dev_private);
8767
8768 out:
8769         intel_runtime_pm_put(dev_priv);
8770 }
8771
8772 void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
8773                         struct intel_engine_cs *ring)
8774 {
8775         struct drm_device *dev = obj->base.dev;
8776         struct drm_crtc *crtc;
8777
8778         if (!i915.powersave)
8779                 return;
8780
8781         for_each_crtc(dev, crtc) {
8782                 if (!crtc->primary->fb)
8783                         continue;
8784
8785                 if (to_intel_framebuffer(crtc->primary->fb)->obj != obj)
8786                         continue;
8787
8788                 intel_increase_pllclock(crtc);
8789                 if (ring && intel_fbc_enabled(dev))
8790                         ring->fbc_dirty = true;
8791         }
8792 }
8793
8794 static void intel_crtc_destroy(struct drm_crtc *crtc)
8795 {
8796         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8797         struct drm_device *dev = crtc->dev;
8798         struct intel_unpin_work *work;
8799
8800         lockmgr(&dev->event_lock, LK_EXCLUSIVE);
8801         work = intel_crtc->unpin_work;
8802         intel_crtc->unpin_work = NULL;
8803         lockmgr(&dev->event_lock, LK_RELEASE);
8804
8805         if (work) {
8806                 cancel_work_sync(&work->work);
8807                 kfree(work);
8808         }
8809
8810         intel_crtc_cursor_set(crtc, NULL, 0, 0, 0);
8811
8812         drm_crtc_cleanup(crtc);
8813
8814         kfree(intel_crtc);
8815 }
8816
8817 static void intel_unpin_work_fn(struct work_struct *__work)
8818 {
8819         struct intel_unpin_work *work =
8820                 container_of(__work, struct intel_unpin_work, work);
8821         struct drm_device *dev = work->crtc->dev;
8822
8823         mutex_lock(&dev->struct_mutex);
8824         intel_unpin_fb_obj(work->old_fb_obj);
8825         drm_gem_object_unreference(&work->pending_flip_obj->base);
8826         drm_gem_object_unreference(&work->old_fb_obj->base);
8827
8828         intel_update_fbc(dev);
8829         mutex_unlock(&dev->struct_mutex);
8830
8831         BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0);
8832         atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count);
8833
8834         kfree(work);
8835 }
8836
8837 static void do_intel_finish_page_flip(struct drm_device *dev,
8838                                       struct drm_crtc *crtc)
8839 {
8840         struct drm_i915_private *dev_priv = dev->dev_private;
8841         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8842         struct intel_unpin_work *work;
8843
8844         /* Ignore early vblank irqs */
8845         if (intel_crtc == NULL)
8846                 return;
8847
8848         lockmgr(&dev->event_lock, LK_EXCLUSIVE);
8849         work = intel_crtc->unpin_work;
8850
8851         /* Ensure we don't miss a work->pending update ... */
8852         smp_rmb();
8853
8854         if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
8855                 lockmgr(&dev->event_lock, LK_RELEASE);
8856                 return;
8857         }
8858
8859         /* and that the unpin work is consistent wrt ->pending. */
8860         smp_rmb();
8861
8862         intel_crtc->unpin_work = NULL;
8863
8864         if (work->event)
8865                 drm_send_vblank_event(dev, intel_crtc->pipe, work->event);
8866
8867         drm_crtc_vblank_put(crtc);
8868
8869         lockmgr(&dev->event_lock, LK_RELEASE);
8870
8871         wake_up_all(&dev_priv->pending_flip_queue);
8872
8873         queue_work(dev_priv->wq, &work->work);
8874
8875         trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
8876 }
8877
8878 void intel_finish_page_flip(struct drm_device *dev, int pipe)
8879 {
8880         struct drm_i915_private *dev_priv = dev->dev_private;
8881         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
8882
8883         do_intel_finish_page_flip(dev, crtc);
8884 }
8885
8886 void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
8887 {
8888         struct drm_i915_private *dev_priv = dev->dev_private;
8889         struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
8890
8891         do_intel_finish_page_flip(dev, crtc);
8892 }
8893
8894 /* Is 'a' after or equal to 'b'? */
8895 static bool g4x_flip_count_after_eq(u32 a, u32 b)
8896 {
8897         return !((a - b) & 0x80000000);
8898 }
8899
8900 static bool page_flip_finished(struct intel_crtc *crtc)
8901 {
8902         struct drm_device *dev = crtc->base.dev;
8903         struct drm_i915_private *dev_priv = dev->dev_private;
8904
8905         /*
8906          * The relevant registers doen't exist on pre-ctg.
8907          * As the flip done interrupt doesn't trigger for mmio
8908          * flips on gmch platforms, a flip count check isn't
8909          * really needed there. But since ctg has the registers,
8910          * include it in the check anyway.
8911          */
8912         if (INTEL_INFO(dev)->gen < 5 && !IS_G4X(dev))
8913                 return true;
8914
8915         /*
8916          * A DSPSURFLIVE check isn't enough in case the mmio and CS flips
8917          * used the same base address. In that case the mmio flip might
8918          * have completed, but the CS hasn't even executed the flip yet.
8919          *
8920          * A flip count check isn't enough as the CS might have updated
8921          * the base address just after start of vblank, but before we
8922          * managed to process the interrupt. This means we'd complete the
8923          * CS flip too soon.
8924          *
8925          * Combining both checks should get us a good enough result. It may
8926          * still happen that the CS flip has been executed, but has not
8927          * yet actually completed. But in case the base address is the same
8928          * anyway, we don't really care.
8929          */
8930         return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) ==
8931                 crtc->unpin_work->gtt_offset &&
8932                 g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_GM45(crtc->pipe)),
8933                                     crtc->unpin_work->flip_count);
8934 }
8935
8936 void intel_prepare_page_flip(struct drm_device *dev, int plane)
8937 {
8938         struct drm_i915_private *dev_priv = dev->dev_private;
8939         struct intel_crtc *intel_crtc =
8940                 to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
8941
8942         /* NB: An MMIO update of the plane base pointer will also
8943          * generate a page-flip completion irq, i.e. every modeset
8944          * is also accompanied by a spurious intel_prepare_page_flip().
8945          */
8946         lockmgr(&dev->event_lock, LK_EXCLUSIVE);
8947         if (intel_crtc->unpin_work && page_flip_finished(intel_crtc))
8948                 atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
8949         lockmgr(&dev->event_lock, LK_RELEASE);
8950 }
8951
8952 static inline void intel_mark_page_flip_active(struct intel_crtc *intel_crtc)
8953 {
8954         /* Ensure that the work item is consistent when activating it ... */
8955         smp_wmb();
8956         atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING);
8957         /* and that it is marked active as soon as the irq could fire. */
8958         smp_wmb();
8959 }
8960
8961 static int intel_gen2_queue_flip(struct drm_device *dev,
8962                                  struct drm_crtc *crtc,
8963                                  struct drm_framebuffer *fb,
8964                                  struct drm_i915_gem_object *obj,
8965                                  struct intel_engine_cs *ring,
8966                                  uint32_t flags)
8967 {
8968         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8969         u32 flip_mask;
8970         int ret;
8971
8972         ret = intel_ring_begin(ring, 6);
8973         if (ret)
8974                 return ret;
8975
8976         /* Can't queue multiple flips, so wait for the previous
8977          * one to finish before executing the next.
8978          */
8979         if (intel_crtc->plane)
8980                 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
8981         else
8982                 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
8983         intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
8984         intel_ring_emit(ring, MI_NOOP);
8985         intel_ring_emit(ring, MI_DISPLAY_FLIP |
8986                         MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
8987         intel_ring_emit(ring, fb->pitches[0]);
8988         intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
8989         intel_ring_emit(ring, 0); /* aux display base address, unused */
8990
8991         intel_mark_page_flip_active(intel_crtc);
8992         __intel_ring_advance(ring);
8993         return 0;
8994 }
8995
8996 static int intel_gen3_queue_flip(struct drm_device *dev,
8997                                  struct drm_crtc *crtc,
8998                                  struct drm_framebuffer *fb,
8999                                  struct drm_i915_gem_object *obj,
9000                                  struct intel_engine_cs *ring,
9001                                  uint32_t flags)
9002 {
9003         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9004         u32 flip_mask;
9005         int ret;
9006
9007         ret = intel_ring_begin(ring, 6);
9008         if (ret)
9009                 return ret;
9010
9011         if (intel_crtc->plane)
9012                 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
9013         else
9014                 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
9015         intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
9016         intel_ring_emit(ring, MI_NOOP);
9017         intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
9018                         MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
9019         intel_ring_emit(ring, fb->pitches[0]);
9020         intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
9021         intel_ring_emit(ring, MI_NOOP);
9022
9023         intel_mark_page_flip_active(intel_crtc);
9024         __intel_ring_advance(ring);
9025         return 0;
9026 }
9027
9028 static int intel_gen4_queue_flip(struct drm_device *dev,
9029                                  struct drm_crtc *crtc,
9030                                  struct drm_framebuffer *fb,
9031                                  struct drm_i915_gem_object *obj,
9032                                  struct intel_engine_cs *ring,
9033                                  uint32_t flags)
9034 {
9035         struct drm_i915_private *dev_priv = dev->dev_private;
9036         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9037         uint32_t pf, pipesrc;
9038         int ret;
9039
9040         ret = intel_ring_begin(ring, 4);
9041         if (ret)
9042                 return ret;
9043
9044         /* i965+ uses the linear or tiled offsets from the
9045          * Display Registers (which do not change across a page-flip)
9046          * so we need only reprogram the base address.
9047          */
9048         intel_ring_emit(ring, MI_DISPLAY_FLIP |
9049                         MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
9050         intel_ring_emit(ring, fb->pitches[0]);
9051         intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset |
9052                         obj->tiling_mode);
9053
9054         /* XXX Enabling the panel-fitter across page-flip is so far
9055          * untested on non-native modes, so ignore it for now.
9056          * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
9057          */
9058         pf = 0;
9059         pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
9060         intel_ring_emit(ring, pf | pipesrc);
9061
9062         intel_mark_page_flip_active(intel_crtc);
9063         __intel_ring_advance(ring);
9064         return 0;
9065 }
9066
9067 static int intel_gen6_queue_flip(struct drm_device *dev,
9068                                  struct drm_crtc *crtc,
9069                                  struct drm_framebuffer *fb,
9070                                  struct drm_i915_gem_object *obj,
9071                                  struct intel_engine_cs *ring,
9072                                  uint32_t flags)
9073 {
9074         struct drm_i915_private *dev_priv = dev->dev_private;
9075         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9076         uint32_t pf, pipesrc;
9077         int ret;
9078
9079         ret = intel_ring_begin(ring, 4);
9080         if (ret)
9081                 return ret;
9082
9083         intel_ring_emit(ring, MI_DISPLAY_FLIP |
9084                         MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
9085         intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
9086         intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
9087
9088         /* Contrary to the suggestions in the documentation,
9089          * "Enable Panel Fitter" does not seem to be required when page
9090          * flipping with a non-native mode, and worse causes a normal
9091          * modeset to fail.
9092          * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
9093          */
9094         pf = 0;
9095         pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
9096         intel_ring_emit(ring, pf | pipesrc);
9097
9098         intel_mark_page_flip_active(intel_crtc);
9099         __intel_ring_advance(ring);
9100         return 0;
9101 }
9102
9103 static int intel_gen7_queue_flip(struct drm_device *dev,
9104                                  struct drm_crtc *crtc,
9105                                  struct drm_framebuffer *fb,
9106                                  struct drm_i915_gem_object *obj,
9107                                  struct intel_engine_cs *ring,
9108                                  uint32_t flags)
9109 {
9110         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9111         uint32_t plane_bit = 0;
9112         int len, ret;
9113
9114         switch (intel_crtc->plane) {
9115         case PLANE_A:
9116                 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
9117                 break;
9118         case PLANE_B:
9119                 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
9120                 break;
9121         case PLANE_C:
9122                 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
9123                 break;
9124         default:
9125                 WARN_ONCE(1, "unknown plane in flip command\n");
9126                 return -ENODEV;
9127         }
9128
9129         len = 4;
9130         if (ring->id == RCS) {
9131                 len += 6;
9132                 /*
9133                  * On Gen 8, SRM is now taking an extra dword to accommodate
9134                  * 48bits addresses, and we need a NOOP for the batch size to
9135                  * stay even.
9136                  */
9137                 if (IS_GEN8(dev))
9138                         len += 2;
9139         }
9140
9141         /*
9142          * BSpec MI_DISPLAY_FLIP for IVB:
9143          * "The full packet must be contained within the same cache line."
9144          *
9145          * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same
9146          * cacheline, if we ever start emitting more commands before
9147          * the MI_DISPLAY_FLIP we may need to first emit everything else,
9148          * then do the cacheline alignment, and finally emit the
9149          * MI_DISPLAY_FLIP.
9150          */
9151         ret = intel_ring_cacheline_align(ring);
9152         if (ret)
9153                 return ret;
9154
9155         ret = intel_ring_begin(ring, len);
9156         if (ret)
9157                 return ret;
9158
9159         /* Unmask the flip-done completion message. Note that the bspec says that
9160          * we should do this for both the BCS and RCS, and that we must not unmask
9161          * more than one flip event at any time (or ensure that one flip message
9162          * can be sent by waiting for flip-done prior to queueing new flips).
9163          * Experimentation says that BCS works despite DERRMR masking all
9164          * flip-done completion events and that unmasking all planes at once
9165          * for the RCS also doesn't appear to drop events. Setting the DERRMR
9166          * to zero does lead to lockups within MI_DISPLAY_FLIP.
9167          */
9168         if (ring->id == RCS) {
9169                 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
9170                 intel_ring_emit(ring, DERRMR);
9171                 intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
9172                                         DERRMR_PIPEB_PRI_FLIP_DONE |
9173                                         DERRMR_PIPEC_PRI_FLIP_DONE));
9174                 if (IS_GEN8(dev))
9175                         intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8(1) |
9176                                               MI_SRM_LRM_GLOBAL_GTT);
9177                 else
9178                         intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) |
9179                                               MI_SRM_LRM_GLOBAL_GTT);
9180                 intel_ring_emit(ring, DERRMR);
9181                 intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
9182                 if (IS_GEN8(dev)) {
9183                         intel_ring_emit(ring, 0);
9184                         intel_ring_emit(ring, MI_NOOP);
9185                 }
9186         }
9187
9188         intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
9189         intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
9190         intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
9191         intel_ring_emit(ring, (MI_NOOP));
9192
9193         intel_mark_page_flip_active(intel_crtc);
9194         __intel_ring_advance(ring);
9195         return 0;
9196 }
9197
9198 static int intel_default_queue_flip(struct drm_device *dev,
9199                                     struct drm_crtc *crtc,
9200                                     struct drm_framebuffer *fb,
9201                                     struct drm_i915_gem_object *obj,
9202                                     struct intel_engine_cs *ring,
9203                                     uint32_t flags)
9204 {
9205         return -ENODEV;
9206 }
9207
9208 static int intel_crtc_page_flip(struct drm_crtc *crtc,
9209                                 struct drm_framebuffer *fb,
9210                                 struct drm_pending_vblank_event *event,
9211                                 uint32_t page_flip_flags)
9212 {
9213         struct drm_device *dev = crtc->dev;
9214         struct drm_i915_private *dev_priv = dev->dev_private;
9215         struct drm_framebuffer *old_fb = crtc->primary->fb;
9216         struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj;
9217         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9218         struct intel_unpin_work *work;
9219         struct intel_engine_cs *ring;
9220         int ret;
9221
9222         /* Can't change pixel format via MI display flips. */
9223         if (fb->pixel_format != crtc->primary->fb->pixel_format)
9224                 return -EINVAL;
9225
9226         /*
9227          * TILEOFF/LINOFF registers can't be changed via MI display flips.
9228          * Note that pitch changes could also affect these register.
9229          */
9230         if (INTEL_INFO(dev)->gen > 3 &&
9231             (fb->offsets[0] != crtc->primary->fb->offsets[0] ||
9232              fb->pitches[0] != crtc->primary->fb->pitches[0]))
9233                 return -EINVAL;
9234
9235         if (i915_terminally_wedged(&dev_priv->gpu_error))
9236                 goto out_hang;
9237
9238         work = kzalloc(sizeof(*work), GFP_KERNEL);
9239         if (work == NULL)
9240                 return -ENOMEM;
9241
9242         work->event = event;
9243         work->crtc = crtc;
9244         work->old_fb_obj = to_intel_framebuffer(old_fb)->obj;
9245         INIT_WORK(&work->work, intel_unpin_work_fn);
9246
9247         ret = drm_crtc_vblank_get(crtc);
9248         if (ret)
9249                 goto free_work;
9250
9251         /* We borrow the event spin lock for protecting unpin_work */
9252         lockmgr(&dev->event_lock, LK_EXCLUSIVE);
9253         if (intel_crtc->unpin_work) {
9254                 lockmgr(&dev->event_lock, LK_RELEASE);
9255                 kfree(work);
9256                 drm_crtc_vblank_put(crtc);
9257
9258                 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
9259                 return -EBUSY;
9260         }
9261         intel_crtc->unpin_work = work;
9262         lockmgr(&dev->event_lock, LK_RELEASE);
9263
9264         if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
9265                 flush_workqueue(dev_priv->wq);
9266
9267         ret = i915_mutex_lock_interruptible(dev);
9268         if (ret)
9269                 goto cleanup;
9270
9271         /* Reference the objects for the scheduled work. */
9272         drm_gem_object_reference(&work->old_fb_obj->base);
9273         drm_gem_object_reference(&obj->base);
9274
9275         crtc->primary->fb = fb;
9276
9277         work->pending_flip_obj = obj;
9278
9279         work->enable_stall_check = true;
9280
9281         atomic_inc(&intel_crtc->unpin_work_count);
9282         intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
9283
9284         if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
9285                 work->flip_count = I915_READ(PIPE_FLIPCOUNT_GM45(intel_crtc->pipe)) + 1;
9286
9287         if (IS_VALLEYVIEW(dev)) {
9288                 ring = &dev_priv->ring[BCS];
9289         } else if (INTEL_INFO(dev)->gen >= 7) {
9290                 ring = obj->ring;
9291                 if (ring == NULL || ring->id != RCS)
9292                         ring = &dev_priv->ring[BCS];
9293         } else {
9294                 ring = &dev_priv->ring[RCS];
9295         }
9296
9297         ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
9298         if (ret)
9299                 goto cleanup_pending;
9300
9301         work->gtt_offset =
9302                 i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset;
9303
9304         ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring, page_flip_flags);
9305         if (ret)
9306                 goto cleanup_unpin;
9307
9308         intel_disable_fbc(dev);
9309         intel_mark_fb_busy(obj, NULL);
9310         mutex_unlock(&dev->struct_mutex);
9311
9312         trace_i915_flip_request(intel_crtc->plane, obj);
9313
9314         return 0;
9315
9316 cleanup_unpin:
9317         intel_unpin_fb_obj(obj);
9318 cleanup_pending:
9319         atomic_dec(&intel_crtc->unpin_work_count);
9320         crtc->primary->fb = old_fb;
9321         drm_gem_object_unreference(&work->old_fb_obj->base);
9322         drm_gem_object_unreference(&obj->base);
9323         mutex_unlock(&dev->struct_mutex);
9324
9325 cleanup:
9326         lockmgr(&dev->event_lock, LK_EXCLUSIVE);
9327         intel_crtc->unpin_work = NULL;
9328         lockmgr(&dev->event_lock, LK_RELEASE);
9329
9330         drm_crtc_vblank_put(crtc);
9331 free_work:
9332         kfree(work);
9333
9334         if (ret == -EIO) {
9335 out_hang:
9336                 intel_crtc_wait_for_pending_flips(crtc);
9337                 ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb);
9338                 if (ret == 0 && event)
9339                         drm_send_vblank_event(dev, intel_crtc->pipe, event);
9340         }
9341         return ret;
9342 }
9343
9344 static struct drm_crtc_helper_funcs intel_helper_funcs = {
9345         .mode_set_base_atomic = intel_pipe_set_base_atomic,
9346         .load_lut = intel_crtc_load_lut,
9347 };
9348
9349 /**
9350  * intel_modeset_update_staged_output_state
9351  *
9352  * Updates the staged output configuration state, e.g. after we've read out the
9353  * current hw state.
9354  */
9355 static void intel_modeset_update_staged_output_state(struct drm_device *dev)
9356 {
9357         struct intel_crtc *crtc;
9358         struct intel_encoder *encoder;
9359         struct intel_connector *connector;
9360
9361         list_for_each_entry(connector, &dev->mode_config.connector_list,
9362                             base.head) {
9363                 connector->new_encoder =
9364                         to_intel_encoder(connector->base.encoder);
9365         }
9366
9367         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
9368                             base.head) {
9369                 encoder->new_crtc =
9370                         to_intel_crtc(encoder->base.crtc);
9371         }
9372
9373         for_each_intel_crtc(dev, crtc) {
9374                 crtc->new_enabled = crtc->base.enabled;
9375
9376                 if (crtc->new_enabled)
9377                         crtc->new_config = &crtc->config;
9378                 else
9379                         crtc->new_config = NULL;
9380         }
9381 }
9382
9383 /**
9384  * intel_modeset_commit_output_state
9385  *
9386  * This function copies the stage display pipe configuration to the real one.
9387  */
9388 static void intel_modeset_commit_output_state(struct drm_device *dev)
9389 {
9390         struct intel_crtc *crtc;
9391         struct intel_encoder *encoder;
9392         struct intel_connector *connector;
9393
9394         list_for_each_entry(connector, &dev->mode_config.connector_list,
9395                             base.head) {
9396                 connector->base.encoder = &connector->new_encoder->base;
9397         }
9398
9399         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
9400                             base.head) {
9401                 encoder->base.crtc = &encoder->new_crtc->base;
9402         }
9403
9404         for_each_intel_crtc(dev, crtc) {
9405                 crtc->base.enabled = crtc->new_enabled;
9406         }
9407 }
9408
9409 static void
9410 connected_sink_compute_bpp(struct intel_connector *connector,
9411                            struct intel_crtc_config *pipe_config)
9412 {
9413         int bpp = pipe_config->pipe_bpp;
9414
9415         DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
9416                 connector->base.base.id,
9417                 connector->base.name);
9418
9419         /* Don't use an invalid EDID bpc value */
9420         if (connector->base.display_info.bpc &&
9421             connector->base.display_info.bpc * 3 < bpp) {
9422                 DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
9423                               bpp, connector->base.display_info.bpc*3);
9424                 pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
9425         }
9426
9427         /* Clamp bpp to 8 on screens without EDID 1.4 */
9428         if (connector->base.display_info.bpc == 0 && bpp > 24) {
9429                 DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
9430                               bpp);
9431                 pipe_config->pipe_bpp = 24;
9432         }
9433 }
9434
9435 static int
9436 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
9437                           struct drm_framebuffer *fb,
9438                           struct intel_crtc_config *pipe_config)
9439 {
9440         struct drm_device *dev = crtc->base.dev;
9441         struct intel_connector *connector;
9442         int bpp;
9443
9444         switch (fb->pixel_format) {
9445         case DRM_FORMAT_C8:
9446                 bpp = 8*3; /* since we go through a colormap */
9447                 break;
9448         case DRM_FORMAT_XRGB1555:
9449         case DRM_FORMAT_ARGB1555:
9450                 /* checked in intel_framebuffer_init already */
9451                 if (WARN_ON(INTEL_INFO(dev)->gen > 3))
9452                         return -EINVAL;
9453         case DRM_FORMAT_RGB565:
9454                 bpp = 6*3; /* min is 18bpp */
9455                 break;
9456         case DRM_FORMAT_XBGR8888:
9457         case DRM_FORMAT_ABGR8888:
9458                 /* checked in intel_framebuffer_init already */
9459                 if (WARN_ON(INTEL_INFO(dev)->gen < 4))
9460                         return -EINVAL;
9461         case DRM_FORMAT_XRGB8888:
9462         case DRM_FORMAT_ARGB8888:
9463                 bpp = 8*3;
9464                 break;
9465         case DRM_FORMAT_XRGB2101010:
9466         case DRM_FORMAT_ARGB2101010:
9467         case DRM_FORMAT_XBGR2101010:
9468         case DRM_FORMAT_ABGR2101010:
9469                 /* checked in intel_framebuffer_init already */
9470                 if (WARN_ON(INTEL_INFO(dev)->gen < 4))
9471                         return -EINVAL;
9472                 bpp = 10*3;
9473                 break;
9474         /* TODO: gen4+ supports 16 bpc floating point, too. */
9475         default:
9476                 DRM_DEBUG_KMS("unsupported depth\n");
9477                 return -EINVAL;
9478         }
9479
9480         pipe_config->pipe_bpp = bpp;
9481
9482         /* Clamp display bpp to EDID value */
9483         list_for_each_entry(connector, &dev->mode_config.connector_list,
9484                             base.head) {
9485                 if (!connector->new_encoder ||
9486                     connector->new_encoder->new_crtc != crtc)
9487                         continue;
9488
9489                 connected_sink_compute_bpp(connector, pipe_config);
9490         }
9491
9492         return bpp;
9493 }
9494
9495 static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
9496 {
9497         DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
9498                         "type: 0x%x flags: 0x%x\n",
9499                 mode->crtc_clock,
9500                 mode->crtc_hdisplay, mode->crtc_hsync_start,
9501                 mode->crtc_hsync_end, mode->crtc_htotal,
9502                 mode->crtc_vdisplay, mode->crtc_vsync_start,
9503                 mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
9504 }
9505
9506 static void intel_dump_pipe_config(struct intel_crtc *crtc,
9507                                    struct intel_crtc_config *pipe_config,
9508                                    const char *context)
9509 {
9510         DRM_DEBUG_KMS("[CRTC:%d]%s config for pipe %c\n", crtc->base.base.id,
9511                       context, pipe_name(crtc->pipe));
9512
9513         DRM_DEBUG_KMS("cpu_transcoder: %c\n", transcoder_name(pipe_config->cpu_transcoder));
9514         DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n",
9515                       pipe_config->pipe_bpp, pipe_config->dither);
9516         DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
9517                       pipe_config->has_pch_encoder,
9518                       pipe_config->fdi_lanes,
9519                       pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
9520                       pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
9521                       pipe_config->fdi_m_n.tu);
9522         DRM_DEBUG_KMS("dp: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
9523                       pipe_config->has_dp_encoder,
9524                       pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
9525                       pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
9526                       pipe_config->dp_m_n.tu);
9527         DRM_DEBUG_KMS("requested mode:\n");
9528         drm_mode_debug_printmodeline(&pipe_config->requested_mode);
9529         DRM_DEBUG_KMS("adjusted mode:\n");
9530         drm_mode_debug_printmodeline(&pipe_config->adjusted_mode);
9531         intel_dump_crtc_timings(&pipe_config->adjusted_mode);
9532         DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock);
9533         DRM_DEBUG_KMS("pipe src size: %dx%d\n",
9534                       pipe_config->pipe_src_w, pipe_config->pipe_src_h);
9535         DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
9536                       pipe_config->gmch_pfit.control,
9537                       pipe_config->gmch_pfit.pgm_ratios,
9538                       pipe_config->gmch_pfit.lvds_border_bits);
9539         DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
9540                       pipe_config->pch_pfit.pos,
9541                       pipe_config->pch_pfit.size,
9542                       pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
9543         DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
9544         DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
9545 }
9546
9547 static bool encoders_cloneable(const struct intel_encoder *a,
9548                                const struct intel_encoder *b)
9549 {
9550         /* masks could be asymmetric, so check both ways */
9551         return a == b || (a->cloneable & (1 << b->type) &&
9552                           b->cloneable & (1 << a->type));
9553 }
9554
9555 static bool check_single_encoder_cloning(struct intel_crtc *crtc,
9556                                          struct intel_encoder *encoder)
9557 {
9558         struct drm_device *dev = crtc->base.dev;
9559         struct intel_encoder *source_encoder;
9560
9561         list_for_each_entry(source_encoder,
9562                             &dev->mode_config.encoder_list, base.head) {
9563                 if (source_encoder->new_crtc != crtc)
9564                         continue;
9565
9566                 if (!encoders_cloneable(encoder, source_encoder))
9567                         return false;
9568         }
9569
9570         return true;
9571 }
9572
9573 static bool check_encoder_cloning(struct intel_crtc *crtc)
9574 {
9575         struct drm_device *dev = crtc->base.dev;
9576         struct intel_encoder *encoder;
9577
9578         list_for_each_entry(encoder,
9579                             &dev->mode_config.encoder_list, base.head) {
9580                 if (encoder->new_crtc != crtc)
9581                         continue;
9582
9583                 if (!check_single_encoder_cloning(crtc, encoder))
9584                         return false;
9585         }
9586
9587         return true;
9588 }
9589
9590 static struct intel_crtc_config *
9591 intel_modeset_pipe_config(struct drm_crtc *crtc,
9592                           struct drm_framebuffer *fb,
9593                           struct drm_display_mode *mode)
9594 {
9595         struct drm_device *dev = crtc->dev;
9596         struct intel_encoder *encoder;
9597         struct intel_crtc_config *pipe_config;
9598         int plane_bpp, ret = -EINVAL;
9599         bool retry = true;
9600
9601         if (!check_encoder_cloning(to_intel_crtc(crtc))) {
9602                 DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
9603                 return ERR_PTR(-EINVAL);
9604         }
9605
9606         pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
9607         if (!pipe_config)
9608                 return ERR_PTR(-ENOMEM);
9609
9610         drm_mode_copy(&pipe_config->adjusted_mode, mode);
9611         drm_mode_copy(&pipe_config->requested_mode, mode);
9612
9613         pipe_config->cpu_transcoder =
9614                 (enum transcoder) to_intel_crtc(crtc)->pipe;
9615         pipe_config->shared_dpll = DPLL_ID_PRIVATE;
9616
9617         /*
9618          * Sanitize sync polarity flags based on requested ones. If neither
9619          * positive or negative polarity is requested, treat this as meaning
9620          * negative polarity.
9621          */
9622         if (!(pipe_config->adjusted_mode.flags &
9623               (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
9624                 pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
9625
9626         if (!(pipe_config->adjusted_mode.flags &
9627               (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
9628                 pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
9629
9630         /* Compute a starting value for pipe_config->pipe_bpp taking the source
9631          * plane pixel format and any sink constraints into account. Returns the
9632          * source plane bpp so that dithering can be selected on mismatches
9633          * after encoders and crtc also have had their say. */
9634         plane_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
9635                                               fb, pipe_config);
9636         if (plane_bpp < 0)
9637                 goto fail;
9638
9639         /*
9640          * Determine the real pipe dimensions. Note that stereo modes can
9641          * increase the actual pipe size due to the frame doubling and
9642          * insertion of additional space for blanks between the frame. This
9643          * is stored in the crtc timings. We use the requested mode to do this
9644          * computation to clearly distinguish it from the adjusted mode, which
9645          * can be changed by the connectors in the below retry loop.
9646          */
9647         drm_mode_set_crtcinfo(&pipe_config->requested_mode, CRTC_STEREO_DOUBLE);
9648         pipe_config->pipe_src_w = pipe_config->requested_mode.crtc_hdisplay;
9649         pipe_config->pipe_src_h = pipe_config->requested_mode.crtc_vdisplay;
9650
9651 encoder_retry:
9652         /* Ensure the port clock defaults are reset when retrying. */
9653         pipe_config->port_clock = 0;
9654         pipe_config->pixel_multiplier = 1;
9655
9656         /* Fill in default crtc timings, allow encoders to overwrite them. */
9657         drm_mode_set_crtcinfo(&pipe_config->adjusted_mode, CRTC_STEREO_DOUBLE);
9658
9659         /* Pass our mode to the connectors and the CRTC to give them a chance to
9660          * adjust it according to limitations or connector properties, and also
9661          * a chance to reject the mode entirely.
9662          */
9663         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
9664                             base.head) {
9665
9666                 if (&encoder->new_crtc->base != crtc)
9667                         continue;
9668
9669                 if (!(encoder->compute_config(encoder, pipe_config))) {
9670                         DRM_DEBUG_KMS("Encoder config failure\n");
9671                         goto fail;
9672                 }
9673         }
9674
9675         /* Set default port clock if not overwritten by the encoder. Needs to be
9676          * done afterwards in case the encoder adjusts the mode. */
9677         if (!pipe_config->port_clock)
9678                 pipe_config->port_clock = pipe_config->adjusted_mode.crtc_clock
9679                         * pipe_config->pixel_multiplier;
9680
9681         ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
9682         if (ret < 0) {
9683                 DRM_DEBUG_KMS("CRTC fixup failed\n");
9684                 goto fail;
9685         }
9686
9687         if (ret == RETRY) {
9688                 if (WARN(!retry, "loop in pipe configuration computation\n")) {
9689                         ret = -EINVAL;
9690                         goto fail;
9691                 }
9692
9693                 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
9694                 retry = false;
9695                 goto encoder_retry;
9696         }
9697
9698         pipe_config->dither = pipe_config->pipe_bpp != plane_bpp;
9699         DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n",
9700                       plane_bpp, pipe_config->pipe_bpp, pipe_config->dither);
9701
9702         return pipe_config;
9703 fail:
9704         kfree(pipe_config);
9705         return ERR_PTR(ret);
9706 }
9707
9708 /* Computes which crtcs are affected and sets the relevant bits in the mask. For
9709  * simplicity we use the crtc's pipe number (because it's easier to obtain). */
9710 static void
9711 intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes,
9712                              unsigned *prepare_pipes, unsigned *disable_pipes)
9713 {
9714         struct intel_crtc *intel_crtc;
9715         struct drm_device *dev = crtc->dev;
9716         struct intel_encoder *encoder;
9717         struct intel_connector *connector;
9718         struct drm_crtc *tmp_crtc;
9719
9720         *disable_pipes = *modeset_pipes = *prepare_pipes = 0;
9721
9722         /* Check which crtcs have changed outputs connected to them, these need
9723          * to be part of the prepare_pipes mask. We don't (yet) support global
9724          * modeset across multiple crtcs, so modeset_pipes will only have one
9725          * bit set at most. */
9726         list_for_each_entry(connector, &dev->mode_config.connector_list,
9727                             base.head) {
9728                 if (connector->base.encoder == &connector->new_encoder->base)
9729                         continue;
9730
9731                 if (connector->base.encoder) {
9732                         tmp_crtc = connector->base.encoder->crtc;
9733
9734                         *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
9735                 }
9736
9737                 if (connector->new_encoder)
9738                         *prepare_pipes |=
9739                                 1 << connector->new_encoder->new_crtc->pipe;
9740         }
9741
9742         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
9743                             base.head) {
9744                 if (encoder->base.crtc == &encoder->new_crtc->base)
9745                         continue;
9746
9747                 if (encoder->base.crtc) {
9748                         tmp_crtc = encoder->base.crtc;
9749
9750                         *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
9751                 }
9752
9753                 if (encoder->new_crtc)
9754                         *prepare_pipes |= 1 << encoder->new_crtc->pipe;
9755         }
9756
9757         /* Check for pipes that will be enabled/disabled ... */
9758         for_each_intel_crtc(dev, intel_crtc) {
9759                 if (intel_crtc->base.enabled == intel_crtc->new_enabled)
9760                         continue;
9761
9762                 if (!intel_crtc->new_enabled)
9763                         *disable_pipes |= 1 << intel_crtc->pipe;
9764                 else
9765                         *prepare_pipes |= 1 << intel_crtc->pipe;
9766         }
9767
9768
9769         /* set_mode is also used to update properties on life display pipes. */
9770         intel_crtc = to_intel_crtc(crtc);
9771         if (intel_crtc->new_enabled)
9772                 *prepare_pipes |= 1 << intel_crtc->pipe;
9773
9774         /*
9775          * For simplicity do a full modeset on any pipe where the output routing
9776          * changed. We could be more clever, but that would require us to be
9777          * more careful with calling the relevant encoder->mode_set functions.
9778          */
9779         if (*prepare_pipes)
9780                 *modeset_pipes = *prepare_pipes;
9781
9782         /* ... and mask these out. */
9783         *modeset_pipes &= ~(*disable_pipes);
9784         *prepare_pipes &= ~(*disable_pipes);
9785
9786         /*
9787          * HACK: We don't (yet) fully support global modesets. intel_set_config
9788          * obies this rule, but the modeset restore mode of
9789          * intel_modeset_setup_hw_state does not.
9790          */
9791         *modeset_pipes &= 1 << intel_crtc->pipe;
9792         *prepare_pipes &= 1 << intel_crtc->pipe;
9793
9794         DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n",
9795                       *modeset_pipes, *prepare_pipes, *disable_pipes);
9796 }
9797
9798 static bool intel_crtc_in_use(struct drm_crtc *crtc)
9799 {
9800         struct drm_encoder *encoder;
9801         struct drm_device *dev = crtc->dev;
9802
9803         list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
9804                 if (encoder->crtc == crtc)
9805                         return true;
9806
9807         return false;
9808 }
9809
9810 static void
9811 intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
9812 {
9813         struct intel_encoder *intel_encoder;
9814         struct intel_crtc *intel_crtc;
9815         struct drm_connector *connector;
9816
9817         list_for_each_entry(intel_encoder, &dev->mode_config.encoder_list,
9818                             base.head) {
9819                 if (!intel_encoder->base.crtc)
9820                         continue;
9821
9822                 intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
9823
9824                 if (prepare_pipes & (1 << intel_crtc->pipe))
9825                         intel_encoder->connectors_active = false;
9826         }
9827
9828         intel_modeset_commit_output_state(dev);
9829
9830         /* Double check state. */
9831         for_each_intel_crtc(dev, intel_crtc) {
9832                 WARN_ON(intel_crtc->base.enabled != intel_crtc_in_use(&intel_crtc->base));
9833                 WARN_ON(intel_crtc->new_config &&
9834                         intel_crtc->new_config != &intel_crtc->config);
9835                 WARN_ON(intel_crtc->base.enabled != !!intel_crtc->new_config);
9836         }
9837
9838         list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
9839                 if (!connector->encoder || !connector->encoder->crtc)
9840                         continue;
9841
9842                 intel_crtc = to_intel_crtc(connector->encoder->crtc);
9843
9844                 if (prepare_pipes & (1 << intel_crtc->pipe)) {
9845                         struct drm_property *dpms_property =
9846                                 dev->mode_config.dpms_property;
9847
9848                         connector->dpms = DRM_MODE_DPMS_ON;
9849                         drm_object_property_set_value(&connector->base,
9850                                                          dpms_property,
9851                                                          DRM_MODE_DPMS_ON);
9852
9853                         intel_encoder = to_intel_encoder(connector->encoder);
9854                         intel_encoder->connectors_active = true;
9855                 }
9856         }
9857
9858 }
9859
9860 static bool intel_fuzzy_clock_check(int clock1, int clock2)
9861 {
9862         int diff;
9863
9864         if (clock1 == clock2)
9865                 return true;
9866
9867         if (!clock1 || !clock2)
9868                 return false;
9869
9870         diff = abs(clock1 - clock2);
9871
9872         if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
9873                 return true;
9874
9875         return false;
9876 }
9877
9878 #define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
9879         list_for_each_entry((intel_crtc), \
9880                             &(dev)->mode_config.crtc_list, \
9881                             base.head) \
9882                 if (mask & (1 <<(intel_crtc)->pipe))
9883
9884 static bool
9885 intel_pipe_config_compare(struct drm_device *dev,
9886                           struct intel_crtc_config *current_config,
9887                           struct intel_crtc_config *pipe_config)
9888 {
9889 #define PIPE_CONF_CHECK_X(name) \
9890         if (current_config->name != pipe_config->name) { \
9891                 DRM_ERROR("mismatch in " #name " " \
9892                           "(expected 0x%08x, found 0x%08x)\n", \
9893                           current_config->name, \
9894                           pipe_config->name); \
9895                 return false; \
9896         }
9897
9898 #define PIPE_CONF_CHECK_I(name) \
9899         if (current_config->name != pipe_config->name) { \
9900                 DRM_ERROR("mismatch in " #name " " \
9901                           "(expected %i, found %i)\n", \
9902                           current_config->name, \
9903                           pipe_config->name); \
9904                 return false; \
9905         }
9906
9907 #define PIPE_CONF_CHECK_FLAGS(name, mask)       \
9908         if ((current_config->name ^ pipe_config->name) & (mask)) { \
9909                 DRM_ERROR("mismatch in " #name "(" #mask ") "      \
9910                           "(expected %i, found %i)\n", \
9911                           current_config->name & (mask), \
9912                           pipe_config->name & (mask)); \
9913                 return false; \
9914         }
9915
9916 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \
9917         if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
9918                 DRM_ERROR("mismatch in " #name " " \
9919                           "(expected %i, found %i)\n", \
9920                           current_config->name, \
9921                           pipe_config->name); \
9922                 return false; \
9923         }
9924
9925 #define PIPE_CONF_QUIRK(quirk)  \
9926         ((current_config->quirks | pipe_config->quirks) & (quirk))
9927
9928         PIPE_CONF_CHECK_I(cpu_transcoder);
9929
9930         PIPE_CONF_CHECK_I(has_pch_encoder);
9931         PIPE_CONF_CHECK_I(fdi_lanes);
9932         PIPE_CONF_CHECK_I(fdi_m_n.gmch_m);
9933         PIPE_CONF_CHECK_I(fdi_m_n.gmch_n);
9934         PIPE_CONF_CHECK_I(fdi_m_n.link_m);
9935         PIPE_CONF_CHECK_I(fdi_m_n.link_n);
9936         PIPE_CONF_CHECK_I(fdi_m_n.tu);
9937
9938         PIPE_CONF_CHECK_I(has_dp_encoder);
9939         PIPE_CONF_CHECK_I(dp_m_n.gmch_m);
9940         PIPE_CONF_CHECK_I(dp_m_n.gmch_n);
9941         PIPE_CONF_CHECK_I(dp_m_n.link_m);
9942         PIPE_CONF_CHECK_I(dp_m_n.link_n);
9943         PIPE_CONF_CHECK_I(dp_m_n.tu);
9944
9945         PIPE_CONF_CHECK_I(adjusted_mode.crtc_hdisplay);
9946         PIPE_CONF_CHECK_I(adjusted_mode.crtc_htotal);
9947         PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_start);
9948         PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_end);
9949         PIPE_CONF_CHECK_I(adjusted_mode.crtc_hsync_start);
9950         PIPE_CONF_CHECK_I(adjusted_mode.crtc_hsync_end);
9951
9952         PIPE_CONF_CHECK_I(adjusted_mode.crtc_vdisplay);
9953         PIPE_CONF_CHECK_I(adjusted_mode.crtc_vtotal);
9954         PIPE_CONF_CHECK_I(adjusted_mode.crtc_vblank_start);
9955         PIPE_CONF_CHECK_I(adjusted_mode.crtc_vblank_end);
9956         PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_start);
9957         PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_end);
9958
9959         PIPE_CONF_CHECK_I(pixel_multiplier);
9960         PIPE_CONF_CHECK_I(has_hdmi_sink);
9961         if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) ||
9962             IS_VALLEYVIEW(dev))
9963                 PIPE_CONF_CHECK_I(limited_color_range);
9964
9965         PIPE_CONF_CHECK_I(has_audio);
9966
9967         PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
9968                               DRM_MODE_FLAG_INTERLACE);
9969
9970         if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
9971                 PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
9972                                       DRM_MODE_FLAG_PHSYNC);
9973                 PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
9974                                       DRM_MODE_FLAG_NHSYNC);
9975                 PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
9976                                       DRM_MODE_FLAG_PVSYNC);
9977                 PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
9978                                       DRM_MODE_FLAG_NVSYNC);
9979         }
9980
9981         PIPE_CONF_CHECK_I(pipe_src_w);
9982         PIPE_CONF_CHECK_I(pipe_src_h);
9983
9984         /*
9985          * FIXME: BIOS likes to set up a cloned config with lvds+external
9986          * screen. Since we don't yet re-compute the pipe config when moving
9987          * just the lvds port away to another pipe the sw tracking won't match.
9988          *
9989          * Proper atomic modesets with recomputed global state will fix this.
9990          * Until then just don't check gmch state for inherited modes.
9991          */
9992         if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_INHERITED_MODE)) {
9993                 PIPE_CONF_CHECK_I(gmch_pfit.control);
9994                 /* pfit ratios are autocomputed by the hw on gen4+ */
9995                 if (INTEL_INFO(dev)->gen < 4)
9996                         PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
9997                 PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
9998         }
9999
10000         PIPE_CONF_CHECK_I(pch_pfit.enabled);
10001         if (current_config->pch_pfit.enabled) {
10002                 PIPE_CONF_CHECK_I(pch_pfit.pos);
10003                 PIPE_CONF_CHECK_I(pch_pfit.size);
10004         }
10005
10006         /* BDW+ don't expose a synchronous way to read the state */
10007         if (IS_HASWELL(dev))
10008                 PIPE_CONF_CHECK_I(ips_enabled);
10009
10010         PIPE_CONF_CHECK_I(double_wide);
10011
10012         PIPE_CONF_CHECK_I(shared_dpll);
10013         PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
10014         PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
10015         PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
10016         PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
10017
10018         if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
10019                 PIPE_CONF_CHECK_I(pipe_bpp);
10020
10021         PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock);
10022         PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
10023
10024 #undef PIPE_CONF_CHECK_X
10025 #undef PIPE_CONF_CHECK_I
10026 #undef PIPE_CONF_CHECK_FLAGS
10027 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
10028 #undef PIPE_CONF_QUIRK
10029
10030         return true;
10031 }
10032
10033 static void
10034 check_connector_state(struct drm_device *dev)
10035 {
10036         struct intel_connector *connector;
10037
10038         list_for_each_entry(connector, &dev->mode_config.connector_list,
10039                             base.head) {
10040                 /* This also checks the encoder/connector hw state with the
10041                  * ->get_hw_state callbacks. */
10042                 intel_connector_check_state(connector);
10043
10044                 WARN(&connector->new_encoder->base != connector->base.encoder,
10045                      "connector's staged encoder doesn't match current encoder\n");
10046         }
10047 }
10048
10049 static void
10050 check_encoder_state(struct drm_device *dev)
10051 {
10052         struct intel_encoder *encoder;
10053         struct intel_connector *connector;
10054
10055         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
10056                             base.head) {
10057                 bool enabled = false;
10058                 bool active = false;
10059                 enum i915_pipe pipe, tracked_pipe;
10060
10061                 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
10062                               encoder->base.base.id,
10063                               encoder->base.name);
10064
10065                 WARN(&encoder->new_crtc->base != encoder->base.crtc,
10066                      "encoder's stage crtc doesn't match current crtc\n");
10067                 WARN(encoder->connectors_active && !encoder->base.crtc,
10068                      "encoder's active_connectors set, but no crtc\n");
10069
10070                 list_for_each_entry(connector, &dev->mode_config.connector_list,
10071                                     base.head) {
10072                         if (connector->base.encoder != &encoder->base)
10073                                 continue;
10074                         enabled = true;
10075                         if (connector->base.dpms != DRM_MODE_DPMS_OFF)
10076                                 active = true;
10077                 }
10078                 WARN(!!encoder->base.crtc != enabled,
10079                      "encoder's enabled state mismatch "
10080                      "(expected %i, found %i)\n",
10081                      !!encoder->base.crtc, enabled);
10082                 WARN(active && !encoder->base.crtc,
10083                      "active encoder with no crtc\n");
10084
10085                 WARN(encoder->connectors_active != active,
10086                      "encoder's computed active state doesn't match tracked active state "
10087                      "(expected %i, found %i)\n", active, encoder->connectors_active);
10088
10089                 active = encoder->get_hw_state(encoder, &pipe);
10090                 WARN(active != encoder->connectors_active,
10091                      "encoder's hw state doesn't match sw tracking "
10092                      "(expected %i, found %i)\n",
10093                      encoder->connectors_active, active);
10094
10095                 if (!encoder->base.crtc)
10096                         continue;
10097
10098                 tracked_pipe = to_intel_crtc(encoder->base.crtc)->pipe;
10099                 WARN(active && pipe != tracked_pipe,
10100                      "active encoder's pipe doesn't match"
10101                      "(expected %i, found %i)\n",
10102                      tracked_pipe, pipe);
10103
10104         }
10105 }
10106
10107 static void
10108 check_crtc_state(struct drm_device *dev)
10109 {
10110         struct drm_i915_private *dev_priv = dev->dev_private;
10111         struct intel_crtc *crtc;
10112         struct intel_encoder *encoder;
10113         struct intel_crtc_config pipe_config;
10114
10115         for_each_intel_crtc(dev, crtc) {
10116                 bool enabled = false;
10117                 bool active = false;
10118
10119                 memset(&pipe_config, 0, sizeof(pipe_config));
10120
10121                 DRM_DEBUG_KMS("[CRTC:%d]\n",
10122                               crtc->base.base.id);
10123
10124                 WARN(crtc->active && !crtc->base.enabled,
10125                      "active crtc, but not enabled in sw tracking\n");
10126
10127                 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
10128                                     base.head) {
10129                         if (encoder->base.crtc != &crtc->base)
10130                                 continue;
10131                         enabled = true;
10132                         if (encoder->connectors_active)
10133                                 active = true;
10134                 }
10135
10136                 WARN(active != crtc->active,
10137                      "crtc's computed active state doesn't match tracked active state "
10138                      "(expected %i, found %i)\n", active, crtc->active);
10139                 WARN(enabled != crtc->base.enabled,
10140                      "crtc's computed enabled state doesn't match tracked enabled state "
10141                      "(expected %i, found %i)\n", enabled, crtc->base.enabled);
10142
10143                 active = dev_priv->display.get_pipe_config(crtc,
10144                                                            &pipe_config);
10145
10146                 /* hw state is inconsistent with the pipe A quirk */
10147                 if (crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
10148                         active = crtc->active;
10149
10150                 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
10151                                     base.head) {
10152                         enum i915_pipe pipe;
10153                         if (encoder->base.crtc != &crtc->base)
10154                                 continue;
10155                         if (encoder->get_hw_state(encoder, &pipe))
10156                                 encoder->get_config(encoder, &pipe_config);
10157                 }
10158
10159                 WARN(crtc->active != active,
10160                      "crtc active state doesn't match with hw state "
10161                      "(expected %i, found %i)\n", crtc->active, active);
10162
10163                 if (active &&
10164                     !intel_pipe_config_compare(dev, &crtc->config, &pipe_config)) {
10165                         WARN(1, "pipe state doesn't match!\n");
10166                         intel_dump_pipe_config(crtc, &pipe_config,
10167                                                "[hw state]");
10168                         intel_dump_pipe_config(crtc, &crtc->config,
10169                                                "[sw state]");
10170                 }
10171         }
10172 }
10173
10174 static void
10175 check_shared_dpll_state(struct drm_device *dev)
10176 {
10177         struct drm_i915_private *dev_priv = dev->dev_private;
10178         struct intel_crtc *crtc;
10179         struct intel_dpll_hw_state dpll_hw_state;
10180         int i;
10181
10182         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
10183                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
10184                 int enabled_crtcs = 0, active_crtcs = 0;
10185                 bool active;
10186
10187                 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
10188
10189                 DRM_DEBUG_KMS("%s\n", pll->name);
10190
10191                 active = pll->get_hw_state(dev_priv, pll, &dpll_hw_state);
10192
10193                 WARN(pll->active > pll->refcount,
10194                      "more active pll users than references: %i vs %i\n",
10195                      pll->active, pll->refcount);
10196                 WARN(pll->active && !pll->on,
10197                      "pll in active use but not on in sw tracking\n");
10198                 WARN(pll->on && !pll->active,
10199                      "pll in on but not on in use in sw tracking\n");
10200                 WARN(pll->on != active,
10201                      "pll on state mismatch (expected %i, found %i)\n",
10202                      pll->on, active);
10203
10204                 for_each_intel_crtc(dev, crtc) {
10205                         if (crtc->base.enabled && intel_crtc_to_shared_dpll(crtc) == pll)
10206                                 enabled_crtcs++;
10207                         if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
10208                                 active_crtcs++;
10209                 }
10210                 WARN(pll->active != active_crtcs,
10211                      "pll active crtcs mismatch (expected %i, found %i)\n",
10212                      pll->active, active_crtcs);
10213                 WARN(pll->refcount != enabled_crtcs,
10214                      "pll enabled crtcs mismatch (expected %i, found %i)\n",
10215                      pll->refcount, enabled_crtcs);
10216
10217                 WARN(pll->on && memcmp(&pll->hw_state, &dpll_hw_state,
10218                                        sizeof(dpll_hw_state)),
10219                      "pll hw state mismatch\n");
10220         }
10221 }
10222
10223 void
10224 intel_modeset_check_state(struct drm_device *dev)
10225 {
10226         check_connector_state(dev);
10227         check_encoder_state(dev);
10228         check_crtc_state(dev);
10229         check_shared_dpll_state(dev);
10230 }
10231
10232 void ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
10233                                      int dotclock)
10234 {
10235         /*
10236          * FDI already provided one idea for the dotclock.
10237          * Yell if the encoder disagrees.
10238          */
10239         WARN(!intel_fuzzy_clock_check(pipe_config->adjusted_mode.crtc_clock, dotclock),
10240              "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
10241              pipe_config->adjusted_mode.crtc_clock, dotclock);
10242 }
10243
10244 static void update_scanline_offset(struct intel_crtc *crtc)
10245 {
10246         struct drm_device *dev = crtc->base.dev;
10247
10248         /*
10249          * The scanline counter increments at the leading edge of hsync.
10250          *
10251          * On most platforms it starts counting from vtotal-1 on the
10252          * first active line. That means the scanline counter value is
10253          * always one less than what we would expect. Ie. just after
10254          * start of vblank, which also occurs at start of hsync (on the
10255          * last active line), the scanline counter will read vblank_start-1.
10256          *
10257          * On gen2 the scanline counter starts counting from 1 instead
10258          * of vtotal-1, so we have to subtract one (or rather add vtotal-1
10259          * to keep the value positive), instead of adding one.
10260          *
10261          * On HSW+ the behaviour of the scanline counter depends on the output
10262          * type. For DP ports it behaves like most other platforms, but on HDMI
10263          * there's an extra 1 line difference. So we need to add two instead of
10264          * one to the value.
10265          */
10266         if (IS_GEN2(dev)) {
10267                 const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
10268                 int vtotal;
10269
10270                 vtotal = mode->crtc_vtotal;
10271                 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
10272                         vtotal /= 2;
10273
10274                 crtc->scanline_offset = vtotal - 1;
10275         } else if (HAS_DDI(dev) &&
10276                    intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI)) {
10277                 crtc->scanline_offset = 2;
10278         } else
10279                 crtc->scanline_offset = 1;
10280 }
10281
10282 static int __intel_set_mode(struct drm_crtc *crtc,
10283                             struct drm_display_mode *mode,
10284                             int x, int y, struct drm_framebuffer *fb)
10285 {
10286         struct drm_device *dev = crtc->dev;
10287         struct drm_i915_private *dev_priv = dev->dev_private;
10288         struct drm_display_mode *saved_mode;
10289         struct intel_crtc_config *pipe_config = NULL;
10290         struct intel_crtc *intel_crtc;
10291         unsigned disable_pipes, prepare_pipes, modeset_pipes;
10292         int ret = 0;
10293
10294         saved_mode = kmalloc(sizeof(*saved_mode), M_DRM, M_WAITOK);
10295         if (!saved_mode)
10296                 return -ENOMEM;
10297
10298         intel_modeset_affected_pipes(crtc, &modeset_pipes,
10299                                      &prepare_pipes, &disable_pipes);
10300
10301         *saved_mode = crtc->mode;
10302
10303         /* Hack: Because we don't (yet) support global modeset on multiple
10304          * crtcs, we don't keep track of the new mode for more than one crtc.
10305          * Hence simply check whether any bit is set in modeset_pipes in all the
10306          * pieces of code that are not yet converted to deal with mutliple crtcs
10307          * changing their mode at the same time. */
10308         if (modeset_pipes) {
10309                 pipe_config = intel_modeset_pipe_config(crtc, fb, mode);
10310                 if (IS_ERR(pipe_config)) {
10311                         ret = PTR_ERR(pipe_config);
10312                         pipe_config = NULL;
10313
10314                         goto out;
10315                 }
10316                 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
10317                                        "[modeset]");
10318                 to_intel_crtc(crtc)->new_config = pipe_config;
10319         }
10320
10321         /*
10322          * See if the config requires any additional preparation, e.g.
10323          * to adjust global state with pipes off.  We need to do this
10324          * here so we can get the modeset_pipe updated config for the new
10325          * mode set on this crtc.  For other crtcs we need to use the
10326          * adjusted_mode bits in the crtc directly.
10327          */
10328         if (IS_VALLEYVIEW(dev)) {
10329                 valleyview_modeset_global_pipes(dev, &prepare_pipes);
10330
10331                 /* may have added more to prepare_pipes than we should */
10332                 prepare_pipes &= ~disable_pipes;
10333         }
10334
10335         for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
10336                 intel_crtc_disable(&intel_crtc->base);
10337
10338         for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
10339                 if (intel_crtc->base.enabled)
10340                         dev_priv->display.crtc_disable(&intel_crtc->base);
10341         }
10342
10343         /* crtc->mode is already used by the ->mode_set callbacks, hence we need
10344          * to set it here already despite that we pass it down the callchain.
10345          */
10346         if (modeset_pipes) {
10347                 crtc->mode = *mode;
10348                 /* mode_set/enable/disable functions rely on a correct pipe
10349                  * config. */
10350                 to_intel_crtc(crtc)->config = *pipe_config;
10351                 to_intel_crtc(crtc)->new_config = &to_intel_crtc(crtc)->config;
10352
10353                 /*
10354                  * Calculate and store various constants which
10355                  * are later needed by vblank and swap-completion
10356                  * timestamping. They are derived from true hwmode.
10357                  */
10358                 drm_calc_timestamping_constants(crtc,
10359                                                 &pipe_config->adjusted_mode);
10360         }
10361
10362         /* Only after disabling all output pipelines that will be changed can we
10363          * update the the output configuration. */
10364         intel_modeset_update_state(dev, prepare_pipes);
10365
10366         if (dev_priv->display.modeset_global_resources)
10367                 dev_priv->display.modeset_global_resources(dev);
10368
10369         /* Set up the DPLL and any encoders state that needs to adjust or depend
10370          * on the DPLL.
10371          */
10372         for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
10373                 struct drm_framebuffer *old_fb;
10374
10375                 mutex_lock(&dev->struct_mutex);
10376                 ret = intel_pin_and_fence_fb_obj(dev,
10377                                                  to_intel_framebuffer(fb)->obj,
10378                                                  NULL);
10379                 if (ret != 0) {
10380                         DRM_ERROR("pin & fence failed\n");
10381                         mutex_unlock(&dev->struct_mutex);
10382                         goto done;
10383                 }
10384                 old_fb = crtc->primary->fb;
10385                 if (old_fb)
10386                         intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
10387                 mutex_unlock(&dev->struct_mutex);
10388
10389                 crtc->primary->fb = fb;
10390                 crtc->x = x;
10391                 crtc->y = y;
10392
10393                 ret = dev_priv->display.crtc_mode_set(&intel_crtc->base,
10394                                                       x, y, fb);
10395                 if (ret)
10396                         goto done;
10397         }
10398
10399         /* Now enable the clocks, plane, pipe, and connectors that we set up. */
10400         for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
10401                 update_scanline_offset(intel_crtc);
10402
10403                 dev_priv->display.crtc_enable(&intel_crtc->base);
10404         }
10405
10406         /* FIXME: add subpixel order */
10407 done:
10408         if (ret && crtc->enabled)
10409                 crtc->mode = *saved_mode;
10410
10411 out:
10412         kfree(pipe_config);
10413         kfree(saved_mode);
10414         return ret;
10415 }
10416
10417 static int intel_set_mode(struct drm_crtc *crtc,
10418                           struct drm_display_mode *mode,
10419                           int x, int y, struct drm_framebuffer *fb)
10420 {
10421         int ret;
10422
10423         ret = __intel_set_mode(crtc, mode, x, y, fb);
10424
10425         if (ret == 0)
10426                 intel_modeset_check_state(crtc->dev);
10427
10428         return ret;
10429 }
10430
10431 void intel_crtc_restore_mode(struct drm_crtc *crtc)
10432 {
10433         intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->primary->fb);
10434 }
10435
10436 #undef for_each_intel_crtc_masked
10437
10438 static void intel_set_config_free(struct intel_set_config *config)
10439 {
10440         if (!config)
10441                 return;
10442
10443         kfree(config->save_connector_encoders);
10444         kfree(config->save_encoder_crtcs);
10445         kfree(config->save_crtc_enabled);
10446         kfree(config);
10447 }
10448
10449 static int intel_set_config_save_state(struct drm_device *dev,
10450                                        struct intel_set_config *config)
10451 {
10452         struct drm_crtc *crtc;
10453         struct drm_encoder *encoder;
10454         struct drm_connector *connector;
10455         int count;
10456
10457         config->save_crtc_enabled =
10458                 kcalloc(dev->mode_config.num_crtc,
10459                         sizeof(bool), GFP_KERNEL);
10460         if (!config->save_crtc_enabled)
10461                 return -ENOMEM;
10462
10463         config->save_encoder_crtcs =
10464                 kcalloc(dev->mode_config.num_encoder,
10465                         sizeof(struct drm_crtc *), GFP_KERNEL);
10466         if (!config->save_encoder_crtcs)
10467                 return -ENOMEM;
10468
10469         config->save_connector_encoders =
10470                 kcalloc(dev->mode_config.num_connector,
10471                         sizeof(struct drm_encoder *), GFP_KERNEL);
10472         if (!config->save_connector_encoders)
10473                 return -ENOMEM;
10474
10475         /* Copy data. Note that driver private data is not affected.
10476          * Should anything bad happen only the expected state is
10477          * restored, not the drivers personal bookkeeping.
10478          */
10479         count = 0;
10480         for_each_crtc(dev, crtc) {
10481                 config->save_crtc_enabled[count++] = crtc->enabled;
10482         }
10483
10484         count = 0;
10485         list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
10486                 config->save_encoder_crtcs[count++] = encoder->crtc;
10487         }
10488
10489         count = 0;
10490         list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
10491                 config->save_connector_encoders[count++] = connector->encoder;
10492         }
10493
10494         return 0;
10495 }
10496
10497 static void intel_set_config_restore_state(struct drm_device *dev,
10498                                            struct intel_set_config *config)
10499 {
10500         struct intel_crtc *crtc;
10501         struct intel_encoder *encoder;
10502         struct intel_connector *connector;
10503         int count;
10504
10505         count = 0;
10506         for_each_intel_crtc(dev, crtc) {
10507                 crtc->new_enabled = config->save_crtc_enabled[count++];
10508
10509                 if (crtc->new_enabled)
10510                         crtc->new_config = &crtc->config;
10511                 else
10512                         crtc->new_config = NULL;
10513         }
10514
10515         count = 0;
10516         list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
10517                 encoder->new_crtc =
10518                         to_intel_crtc(config->save_encoder_crtcs[count++]);
10519         }
10520
10521         count = 0;
10522         list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) {
10523                 connector->new_encoder =
10524                         to_intel_encoder(config->save_connector_encoders[count++]);
10525         }
10526 }
10527
10528 static bool
10529 is_crtc_connector_off(struct drm_mode_set *set)
10530 {
10531         int i;
10532
10533         if (set->num_connectors == 0)
10534                 return false;
10535
10536         if (WARN_ON(set->connectors == NULL))
10537                 return false;
10538
10539         for (i = 0; i < set->num_connectors; i++)
10540                 if (set->connectors[i]->encoder &&
10541                     set->connectors[i]->encoder->crtc == set->crtc &&
10542                     set->connectors[i]->dpms != DRM_MODE_DPMS_ON)
10543                         return true;
10544
10545         return false;
10546 }
10547
10548 static void
10549 intel_set_config_compute_mode_changes(struct drm_mode_set *set,
10550                                       struct intel_set_config *config)
10551 {
10552
10553         /* We should be able to check here if the fb has the same properties
10554          * and then just flip_or_move it */
10555         if (is_crtc_connector_off(set)) {
10556                 config->mode_changed = true;
10557         } else if (set->crtc->primary->fb != set->fb) {
10558                 /* If we have no fb then treat it as a full mode set */
10559                 if (set->crtc->primary->fb == NULL) {
10560                         struct intel_crtc *intel_crtc =
10561                                 to_intel_crtc(set->crtc);
10562
10563                         if (intel_crtc->active && i915.fastboot) {
10564                                 DRM_DEBUG_KMS("crtc has no fb, will flip\n");
10565                                 config->fb_changed = true;
10566                         } else {
10567                                 DRM_DEBUG_KMS("inactive crtc, full mode set\n");
10568                                 config->mode_changed = true;
10569                         }
10570                 } else if (set->fb == NULL) {
10571                         config->mode_changed = true;
10572                 } else if (set->fb->pixel_format !=
10573                            set->crtc->primary->fb->pixel_format) {
10574                         config->mode_changed = true;
10575                 } else {
10576                         config->fb_changed = true;
10577                 }
10578         }
10579
10580         if (set->fb && (set->x != set->crtc->x || set->y != set->crtc->y))
10581                 config->fb_changed = true;
10582
10583         if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
10584                 DRM_DEBUG_KMS("modes are different, full mode set\n");
10585                 drm_mode_debug_printmodeline(&set->crtc->mode);
10586                 drm_mode_debug_printmodeline(set->mode);
10587                 config->mode_changed = true;
10588         }
10589
10590         DRM_DEBUG_KMS("computed changes for [CRTC:%d], mode_changed=%d, fb_changed=%d\n",
10591                         set->crtc->base.id, config->mode_changed, config->fb_changed);
10592 }
10593
10594 static int
10595 intel_modeset_stage_output_state(struct drm_device *dev,
10596                                  struct drm_mode_set *set,
10597                                  struct intel_set_config *config)
10598 {
10599         struct intel_connector *connector;
10600         struct intel_encoder *encoder;
10601         struct intel_crtc *crtc;
10602         int ro;
10603
10604         /* The upper layers ensure that we either disable a crtc or have a list
10605          * of connectors. For paranoia, double-check this. */
10606         WARN_ON(!set->fb && (set->num_connectors != 0));
10607         WARN_ON(set->fb && (set->num_connectors == 0));
10608
10609         list_for_each_entry(connector, &dev->mode_config.connector_list,
10610                             base.head) {
10611                 /* Otherwise traverse passed in connector list and get encoders
10612                  * for them. */
10613                 for (ro = 0; ro < set->num_connectors; ro++) {
10614                         if (set->connectors[ro] == &connector->base) {
10615                                 connector->new_encoder = connector->encoder;
10616                                 break;
10617                         }
10618                 }
10619
10620                 /* If we disable the crtc, disable all its connectors. Also, if
10621                  * the connector is on the changing crtc but not on the new
10622                  * connector list, disable it. */
10623                 if ((!set->fb || ro == set->num_connectors) &&
10624                     connector->base.encoder &&
10625                     connector->base.encoder->crtc == set->crtc) {
10626                         connector->new_encoder = NULL;
10627
10628                         DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
10629                                 connector->base.base.id,
10630                                 connector->base.name);
10631                 }
10632
10633
10634                 if (&connector->new_encoder->base != connector->base.encoder) {
10635                         DRM_DEBUG_KMS("encoder changed, full mode switch\n");
10636                         config->mode_changed = true;
10637                 }
10638         }
10639         /* connector->new_encoder is now updated for all connectors. */
10640
10641         /* Update crtc of enabled connectors. */
10642         list_for_each_entry(connector, &dev->mode_config.connector_list,
10643                             base.head) {
10644                 struct drm_crtc *new_crtc;
10645
10646                 if (!connector->new_encoder)
10647                         continue;
10648
10649                 new_crtc = connector->new_encoder->base.crtc;
10650
10651                 for (ro = 0; ro < set->num_connectors; ro++) {
10652                         if (set->connectors[ro] == &connector->base)
10653                                 new_crtc = set->crtc;
10654                 }
10655
10656                 /* Make sure the new CRTC will work with the encoder */
10657                 if (!drm_encoder_crtc_ok(&connector->new_encoder->base,
10658                                          new_crtc)) {
10659                         return -EINVAL;
10660                 }
10661                 connector->encoder->new_crtc = to_intel_crtc(new_crtc);
10662
10663                 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
10664                         connector->base.base.id,
10665                         connector->base.name,
10666                         new_crtc->base.id);
10667         }
10668
10669         /* Check for any encoders that needs to be disabled. */
10670         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
10671                             base.head) {
10672                 int num_connectors = 0;
10673                 list_for_each_entry(connector,
10674                                     &dev->mode_config.connector_list,
10675                                     base.head) {
10676                         if (connector->new_encoder == encoder) {
10677                                 WARN_ON(!connector->new_encoder->new_crtc);
10678                                 num_connectors++;
10679                         }
10680                 }
10681
10682                 if (num_connectors == 0)
10683                         encoder->new_crtc = NULL;
10684                 else if (num_connectors > 1)
10685                         return -EINVAL;
10686
10687                 /* Only now check for crtc changes so we don't miss encoders
10688                  * that will be disabled. */
10689                 if (&encoder->new_crtc->base != encoder->base.crtc) {
10690                         DRM_DEBUG_KMS("crtc changed, full mode switch\n");
10691                         config->mode_changed = true;
10692                 }
10693         }
10694         /* Now we've also updated encoder->new_crtc for all encoders. */
10695
10696         for_each_intel_crtc(dev, crtc) {
10697                 crtc->new_enabled = false;
10698
10699                 list_for_each_entry(encoder,
10700                                     &dev->mode_config.encoder_list,
10701                                     base.head) {
10702                         if (encoder->new_crtc == crtc) {
10703                                 crtc->new_enabled = true;
10704                                 break;
10705                         }
10706                 }
10707
10708                 if (crtc->new_enabled != crtc->base.enabled) {
10709                         DRM_DEBUG_KMS("crtc %sabled, full mode switch\n",
10710                                       crtc->new_enabled ? "en" : "dis");
10711                         config->mode_changed = true;
10712                 }
10713
10714                 if (crtc->new_enabled)
10715                         crtc->new_config = &crtc->config;
10716                 else
10717                         crtc->new_config = NULL;
10718         }
10719
10720         return 0;
10721 }
10722
10723 static void disable_crtc_nofb(struct intel_crtc *crtc)
10724 {
10725         struct drm_device *dev = crtc->base.dev;
10726         struct intel_encoder *encoder;
10727         struct intel_connector *connector;
10728
10729         DRM_DEBUG_KMS("Trying to restore without FB -> disabling pipe %c\n",
10730                       pipe_name(crtc->pipe));
10731
10732         list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) {
10733                 if (connector->new_encoder &&
10734                     connector->new_encoder->new_crtc == crtc)
10735                         connector->new_encoder = NULL;
10736         }
10737
10738         list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
10739                 if (encoder->new_crtc == crtc)
10740                         encoder->new_crtc = NULL;
10741         }
10742
10743         crtc->new_enabled = false;
10744         crtc->new_config = NULL;
10745 }
10746
10747 static int intel_crtc_set_config(struct drm_mode_set *set)
10748 {
10749         struct drm_device *dev;
10750         struct drm_mode_set save_set;
10751         struct intel_set_config *config;
10752         int ret;
10753
10754         BUG_ON(!set);
10755         BUG_ON(!set->crtc);
10756         BUG_ON(!set->crtc->helper_private);
10757
10758         /* Enforce sane interface api - has been abused by the fb helper. */
10759         BUG_ON(!set->mode && set->fb);
10760         BUG_ON(set->fb && set->num_connectors == 0);
10761
10762         if (set->fb) {
10763                 DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
10764                                 set->crtc->base.id, set->fb->base.id,
10765                                 (int)set->num_connectors, set->x, set->y);
10766         } else {
10767                 DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
10768         }
10769
10770         dev = set->crtc->dev;
10771
10772         ret = -ENOMEM;
10773         config = kzalloc(sizeof(*config), GFP_KERNEL);
10774         if (!config)
10775                 goto out_config;
10776
10777         ret = intel_set_config_save_state(dev, config);
10778         if (ret)
10779                 goto out_config;
10780
10781         save_set.crtc = set->crtc;
10782         save_set.mode = &set->crtc->mode;
10783         save_set.x = set->crtc->x;
10784         save_set.y = set->crtc->y;
10785         save_set.fb = set->crtc->primary->fb;
10786
10787         /* Compute whether we need a full modeset, only an fb base update or no
10788          * change at all. In the future we might also check whether only the
10789          * mode changed, e.g. for LVDS where we only change the panel fitter in
10790          * such cases. */
10791         intel_set_config_compute_mode_changes(set, config);
10792
10793         ret = intel_modeset_stage_output_state(dev, set, config);
10794         if (ret)
10795                 goto fail;
10796
10797         if (config->mode_changed) {
10798                 ret = intel_set_mode(set->crtc, set->mode,
10799                                      set->x, set->y, set->fb);
10800         } else if (config->fb_changed) {
10801                 intel_crtc_wait_for_pending_flips(set->crtc);
10802
10803                 ret = intel_pipe_set_base(set->crtc,
10804                                           set->x, set->y, set->fb);
10805                 /*
10806                  * In the fastboot case this may be our only check of the
10807                  * state after boot.  It would be better to only do it on
10808                  * the first update, but we don't have a nice way of doing that
10809                  * (and really, set_config isn't used much for high freq page
10810                  * flipping, so increasing its cost here shouldn't be a big
10811                  * deal).
10812                  */
10813                 if (i915.fastboot && ret == 0)
10814                         intel_modeset_check_state(set->crtc->dev);
10815         }
10816
10817         if (ret) {
10818                 DRM_DEBUG_KMS("failed to set mode on [CRTC:%d], err = %d\n",
10819                               set->crtc->base.id, ret);
10820 fail:
10821                 intel_set_config_restore_state(dev, config);
10822
10823                 /*
10824                  * HACK: if the pipe was on, but we didn't have a framebuffer,
10825                  * force the pipe off to avoid oopsing in the modeset code
10826                  * due to fb==NULL. This should only happen during boot since
10827                  * we don't yet reconstruct the FB from the hardware state.
10828                  */
10829                 if (to_intel_crtc(save_set.crtc)->new_enabled && !save_set.fb)
10830                         disable_crtc_nofb(to_intel_crtc(save_set.crtc));
10831
10832                 /* Try to restore the config */
10833                 if (config->mode_changed &&
10834                     intel_set_mode(save_set.crtc, save_set.mode,
10835                                    save_set.x, save_set.y, save_set.fb))
10836                         DRM_ERROR("failed to restore config after modeset failure\n");
10837         }
10838
10839 out_config:
10840         intel_set_config_free(config);
10841         return ret;
10842 }
10843
10844 static const struct drm_crtc_funcs intel_crtc_funcs = {
10845         .cursor_set = intel_crtc_cursor_set,
10846         .cursor_move = intel_crtc_cursor_move,
10847         .gamma_set = intel_crtc_gamma_set,
10848         .set_config = intel_crtc_set_config,
10849         .destroy = intel_crtc_destroy,
10850         .page_flip = intel_crtc_page_flip,
10851 };
10852
10853 static void intel_cpu_pll_init(struct drm_device *dev)
10854 {
10855         if (HAS_DDI(dev))
10856                 intel_ddi_pll_init(dev);
10857 }
10858
10859 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
10860                                       struct intel_shared_dpll *pll,
10861                                       struct intel_dpll_hw_state *hw_state)
10862 {
10863         uint32_t val;
10864
10865         val = I915_READ(PCH_DPLL(pll->id));
10866         hw_state->dpll = val;
10867         hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
10868         hw_state->fp1 = I915_READ(PCH_FP1(pll->id));
10869
10870         return val & DPLL_VCO_ENABLE;
10871 }
10872
10873 static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv,
10874                                   struct intel_shared_dpll *pll)
10875 {
10876         I915_WRITE(PCH_FP0(pll->id), pll->hw_state.fp0);
10877         I915_WRITE(PCH_FP1(pll->id), pll->hw_state.fp1);
10878 }
10879
10880 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
10881                                 struct intel_shared_dpll *pll)
10882 {
10883         /* PCH refclock must be enabled first */
10884         ibx_assert_pch_refclk_enabled(dev_priv);
10885
10886         I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll);
10887
10888         /* Wait for the clocks to stabilize. */
10889         POSTING_READ(PCH_DPLL(pll->id));
10890         udelay(150);
10891
10892         /* The pixel multiplier can only be updated once the
10893          * DPLL is enabled and the clocks are stable.
10894          *
10895          * So write it again.
10896          */
10897         I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll);
10898         POSTING_READ(PCH_DPLL(pll->id));
10899         udelay(200);
10900 }
10901
10902 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
10903                                  struct intel_shared_dpll *pll)
10904 {
10905         struct drm_device *dev = dev_priv->dev;
10906         struct intel_crtc *crtc;
10907
10908         /* Make sure no transcoder isn't still depending on us. */
10909         for_each_intel_crtc(dev, crtc) {
10910                 if (intel_crtc_to_shared_dpll(crtc) == pll)
10911                         assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
10912         }
10913
10914         I915_WRITE(PCH_DPLL(pll->id), 0);
10915         POSTING_READ(PCH_DPLL(pll->id));
10916         udelay(200);
10917 }
10918
10919 static char *ibx_pch_dpll_names[] = {
10920         "PCH DPLL A",
10921         "PCH DPLL B",
10922 };
10923
10924 static void ibx_pch_dpll_init(struct drm_device *dev)
10925 {
10926         struct drm_i915_private *dev_priv = dev->dev_private;
10927         int i;
10928
10929         dev_priv->num_shared_dpll = 2;
10930
10931         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
10932                 dev_priv->shared_dplls[i].id = i;
10933                 dev_priv->shared_dplls[i].name = ibx_pch_dpll_names[i];
10934                 dev_priv->shared_dplls[i].mode_set = ibx_pch_dpll_mode_set;
10935                 dev_priv->shared_dplls[i].enable = ibx_pch_dpll_enable;
10936                 dev_priv->shared_dplls[i].disable = ibx_pch_dpll_disable;
10937                 dev_priv->shared_dplls[i].get_hw_state =
10938                         ibx_pch_dpll_get_hw_state;
10939         }
10940 }
10941
10942 static void intel_shared_dpll_init(struct drm_device *dev)
10943 {
10944         struct drm_i915_private *dev_priv = dev->dev_private;
10945
10946         if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
10947                 ibx_pch_dpll_init(dev);
10948         else
10949                 dev_priv->num_shared_dpll = 0;
10950
10951         BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
10952 }
10953
10954 static void intel_crtc_init(struct drm_device *dev, int pipe)
10955 {
10956         struct drm_i915_private *dev_priv = dev->dev_private;
10957         struct intel_crtc *intel_crtc;
10958         int i;
10959
10960         intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
10961         if (intel_crtc == NULL)
10962                 return;
10963
10964         drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
10965
10966         drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
10967         for (i = 0; i < 256; i++) {
10968                 intel_crtc->lut_r[i] = i;
10969                 intel_crtc->lut_g[i] = i;
10970                 intel_crtc->lut_b[i] = i;
10971         }
10972
10973         /*
10974          * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port
10975          * is hooked to plane B. Hence we want plane A feeding pipe B.
10976          */
10977         intel_crtc->pipe = pipe;
10978         intel_crtc->plane = pipe;
10979         if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) {
10980                 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
10981                 intel_crtc->plane = !pipe;
10982         }
10983
10984         intel_crtc->cursor_base = ~0;
10985         intel_crtc->cursor_cntl = ~0;
10986
10987         init_waitqueue_head(&intel_crtc->vbl_wait);
10988
10989         BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
10990                dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
10991         dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
10992         dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
10993
10994         drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
10995
10996         WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
10997 }
10998
10999 enum i915_pipe intel_get_pipe_from_connector(struct intel_connector *connector)
11000 {
11001         struct drm_encoder *encoder = connector->base.encoder;
11002         struct drm_device *dev = connector->base.dev;
11003
11004         WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
11005
11006         if (!encoder)
11007                 return INVALID_PIPE;
11008
11009         return to_intel_crtc(encoder->crtc)->pipe;
11010 }
11011
11012 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
11013                                 struct drm_file *file)
11014 {
11015         struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
11016         struct drm_mode_object *drmmode_obj;
11017         struct intel_crtc *crtc;
11018
11019         if (!drm_core_check_feature(dev, DRIVER_MODESET))
11020                 return -ENODEV;
11021
11022         drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
11023                         DRM_MODE_OBJECT_CRTC);
11024
11025         if (!drmmode_obj) {
11026                 DRM_ERROR("no such CRTC id\n");
11027                 return -ENOENT;
11028         }
11029
11030         crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
11031         pipe_from_crtc_id->pipe = crtc->pipe;
11032
11033         return 0;
11034 }
11035
11036 static int intel_encoder_clones(struct intel_encoder *encoder)
11037 {
11038         struct drm_device *dev = encoder->base.dev;
11039         struct intel_encoder *source_encoder;
11040         int index_mask = 0;
11041         int entry = 0;
11042
11043         list_for_each_entry(source_encoder,
11044                             &dev->mode_config.encoder_list, base.head) {
11045                 if (encoders_cloneable(encoder, source_encoder))
11046                         index_mask |= (1 << entry);
11047
11048                 entry++;
11049         }
11050
11051         return index_mask;
11052 }
11053
11054 static bool has_edp_a(struct drm_device *dev)
11055 {
11056         struct drm_i915_private *dev_priv = dev->dev_private;
11057
11058         if (!IS_MOBILE(dev))
11059                 return false;
11060
11061         if ((I915_READ(DP_A) & DP_DETECTED) == 0)
11062                 return false;
11063
11064         if (IS_GEN5(dev) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
11065                 return false;
11066
11067         return true;
11068 }
11069
11070 const char *intel_output_name(int output)
11071 {
11072         static const char *names[] = {
11073                 [INTEL_OUTPUT_UNUSED] = "Unused",
11074                 [INTEL_OUTPUT_ANALOG] = "Analog",
11075                 [INTEL_OUTPUT_DVO] = "DVO",
11076                 [INTEL_OUTPUT_SDVO] = "SDVO",
11077                 [INTEL_OUTPUT_LVDS] = "LVDS",
11078                 [INTEL_OUTPUT_TVOUT] = "TV",
11079                 [INTEL_OUTPUT_HDMI] = "HDMI",
11080                 [INTEL_OUTPUT_DISPLAYPORT] = "DisplayPort",
11081                 [INTEL_OUTPUT_EDP] = "eDP",
11082                 [INTEL_OUTPUT_DSI] = "DSI",
11083                 [INTEL_OUTPUT_UNKNOWN] = "Unknown",
11084         };
11085
11086         if (output < 0 || output >= ARRAY_SIZE(names) || !names[output])
11087                 return "Invalid";
11088
11089         return names[output];
11090 }
11091
11092 static bool intel_crt_present(struct drm_device *dev)
11093 {
11094         struct drm_i915_private *dev_priv = dev->dev_private;
11095
11096         if (IS_ULT(dev))
11097                 return false;
11098
11099         if (IS_CHERRYVIEW(dev))
11100                 return false;
11101
11102         if (IS_VALLEYVIEW(dev) && !dev_priv->vbt.int_crt_support)
11103                 return false;
11104
11105         return true;
11106 }
11107
11108 static void intel_setup_outputs(struct drm_device *dev)
11109 {
11110         struct drm_i915_private *dev_priv = dev->dev_private;
11111         struct intel_encoder *encoder;
11112         bool dpd_is_edp = false;
11113
11114         intel_lvds_init(dev);
11115
11116         if (intel_crt_present(dev))
11117                 intel_crt_init(dev);
11118
11119         if (HAS_DDI(dev)) {
11120                 int found;
11121
11122                 /* Haswell uses DDI functions to detect digital outputs */
11123                 found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED;
11124                 /* DDI A only supports eDP */
11125                 if (found)
11126                         intel_ddi_init(dev, PORT_A);
11127
11128                 /* DDI B, C and D detection is indicated by the SFUSE_STRAP
11129                  * register */
11130                 found = I915_READ(SFUSE_STRAP);
11131
11132                 if (found & SFUSE_STRAP_DDIB_DETECTED)
11133                         intel_ddi_init(dev, PORT_B);
11134                 if (found & SFUSE_STRAP_DDIC_DETECTED)
11135                         intel_ddi_init(dev, PORT_C);
11136                 if (found & SFUSE_STRAP_DDID_DETECTED)
11137                         intel_ddi_init(dev, PORT_D);
11138         } else if (HAS_PCH_SPLIT(dev)) {
11139                 int found;
11140                 dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
11141
11142                 if (has_edp_a(dev))
11143                         intel_dp_init(dev, DP_A, PORT_A);
11144
11145                 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
11146                         /* PCH SDVOB multiplex with HDMIB */
11147                         found = intel_sdvo_init(dev, PCH_SDVOB, true);
11148                         if (!found)
11149                                 intel_hdmi_init(dev, PCH_HDMIB, PORT_B);
11150                         if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
11151                                 intel_dp_init(dev, PCH_DP_B, PORT_B);
11152                 }
11153
11154                 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
11155                         intel_hdmi_init(dev, PCH_HDMIC, PORT_C);
11156
11157                 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
11158                         intel_hdmi_init(dev, PCH_HDMID, PORT_D);
11159
11160                 if (I915_READ(PCH_DP_C) & DP_DETECTED)
11161                         intel_dp_init(dev, PCH_DP_C, PORT_C);
11162
11163                 if (I915_READ(PCH_DP_D) & DP_DETECTED)
11164                         intel_dp_init(dev, PCH_DP_D, PORT_D);
11165         } else if (IS_VALLEYVIEW(dev)) {
11166                 if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) {
11167                         intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB,
11168                                         PORT_B);
11169                         if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED)
11170                                 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B);
11171                 }
11172
11173                 if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED) {
11174                         intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC,
11175                                         PORT_C);
11176                         if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED)
11177                                 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C);
11178                 }
11179
11180                 if (IS_CHERRYVIEW(dev)) {
11181                         if (I915_READ(VLV_DISPLAY_BASE + CHV_HDMID) & SDVO_DETECTED) {
11182                                 intel_hdmi_init(dev, VLV_DISPLAY_BASE + CHV_HDMID,
11183                                                 PORT_D);
11184                                 if (I915_READ(VLV_DISPLAY_BASE + DP_D) & DP_DETECTED)
11185                                         intel_dp_init(dev, VLV_DISPLAY_BASE + DP_D, PORT_D);
11186                         }
11187                 }
11188
11189                 intel_dsi_init(dev);
11190         } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
11191                 bool found = false;
11192
11193                 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
11194                         DRM_DEBUG_KMS("probing SDVOB\n");
11195                         found = intel_sdvo_init(dev, GEN3_SDVOB, true);
11196                         if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
11197                                 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
11198                                 intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
11199                         }
11200
11201                         if (!found && SUPPORTS_INTEGRATED_DP(dev))
11202                                 intel_dp_init(dev, DP_B, PORT_B);
11203                 }
11204
11205                 /* Before G4X SDVOC doesn't have its own detect register */
11206
11207                 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
11208                         DRM_DEBUG_KMS("probing SDVOC\n");
11209                         found = intel_sdvo_init(dev, GEN3_SDVOC, false);
11210                 }
11211
11212                 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
11213
11214                         if (SUPPORTS_INTEGRATED_HDMI(dev)) {
11215                                 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
11216                                 intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
11217                         }
11218                         if (SUPPORTS_INTEGRATED_DP(dev))
11219                                 intel_dp_init(dev, DP_C, PORT_C);
11220                 }
11221
11222                 if (SUPPORTS_INTEGRATED_DP(dev) &&
11223                     (I915_READ(DP_D) & DP_DETECTED))
11224                         intel_dp_init(dev, DP_D, PORT_D);
11225 #if 0
11226         } else if (IS_GEN2(dev))
11227                 intel_dvo_init(dev);
11228 #endif
11229         }
11230
11231         if (SUPPORTS_TV(dev))
11232                 intel_tv_init(dev);
11233
11234         list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
11235                 encoder->base.possible_crtcs = encoder->crtc_mask;
11236                 encoder->base.possible_clones =
11237                         intel_encoder_clones(encoder);
11238         }
11239
11240         intel_init_pch_refclk(dev);
11241
11242         drm_helper_move_panel_connectors_to_head(dev);
11243 }
11244
11245 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
11246 {
11247         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
11248
11249         drm_framebuffer_cleanup(fb);
11250         WARN_ON(!intel_fb->obj->framebuffer_references--);
11251         drm_gem_object_unreference_unlocked(&intel_fb->obj->base);
11252         kfree(intel_fb);
11253 }
11254
11255 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
11256                                                 struct drm_file *file,
11257                                                 unsigned int *handle)
11258 {
11259         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
11260         struct drm_i915_gem_object *obj = intel_fb->obj;
11261
11262         return drm_gem_handle_create(file, &obj->base, handle);
11263 }
11264
11265 static const struct drm_framebuffer_funcs intel_fb_funcs = {
11266         .destroy = intel_user_framebuffer_destroy,
11267         .create_handle = intel_user_framebuffer_create_handle,
11268 };
11269
11270 static int intel_framebuffer_init(struct drm_device *dev,
11271                                   struct intel_framebuffer *intel_fb,
11272                                   struct drm_mode_fb_cmd2 *mode_cmd,
11273                                   struct drm_i915_gem_object *obj)
11274 {
11275         int aligned_height;
11276         int pitch_limit;
11277         int ret;
11278
11279         WARN_ON(!mutex_is_locked(&dev->struct_mutex));
11280
11281         if (obj->tiling_mode == I915_TILING_Y) {
11282                 DRM_DEBUG("hardware does not support tiling Y\n");
11283                 return -EINVAL;
11284         }
11285
11286         if (mode_cmd->pitches[0] & 63) {
11287                 DRM_DEBUG("pitch (%d) must be at least 64 byte aligned\n",
11288                           mode_cmd->pitches[0]);
11289                 return -EINVAL;
11290         }
11291
11292         if (INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev)) {
11293                 pitch_limit = 32*1024;
11294         } else if (INTEL_INFO(dev)->gen >= 4) {
11295                 if (obj->tiling_mode)
11296                         pitch_limit = 16*1024;
11297                 else
11298                         pitch_limit = 32*1024;
11299         } else if (INTEL_INFO(dev)->gen >= 3) {
11300                 if (obj->tiling_mode)
11301                         pitch_limit = 8*1024;
11302                 else
11303                         pitch_limit = 16*1024;
11304         } else
11305                 /* XXX DSPC is limited to 4k tiled */
11306                 pitch_limit = 8*1024;
11307
11308         if (mode_cmd->pitches[0] > pitch_limit) {
11309                 DRM_DEBUG("%s pitch (%d) must be at less than %d\n",
11310                           obj->tiling_mode ? "tiled" : "linear",
11311                           mode_cmd->pitches[0], pitch_limit);
11312                 return -EINVAL;
11313         }
11314
11315         if (obj->tiling_mode != I915_TILING_NONE &&
11316             mode_cmd->pitches[0] != obj->stride) {
11317                 DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
11318                           mode_cmd->pitches[0], obj->stride);
11319                 return -EINVAL;
11320         }
11321
11322         /* Reject formats not supported by any plane early. */
11323         switch (mode_cmd->pixel_format) {
11324         case DRM_FORMAT_C8:
11325         case DRM_FORMAT_RGB565:
11326         case DRM_FORMAT_XRGB8888:
11327         case DRM_FORMAT_ARGB8888:
11328                 break;
11329         case DRM_FORMAT_XRGB1555:
11330         case DRM_FORMAT_ARGB1555:
11331                 if (INTEL_INFO(dev)->gen > 3) {
11332                         DRM_DEBUG("unsupported pixel format: %s\n",
11333                                   drm_get_format_name(mode_cmd->pixel_format));
11334                         return -EINVAL;
11335                 }
11336                 break;
11337         case DRM_FORMAT_XBGR8888:
11338         case DRM_FORMAT_ABGR8888:
11339         case DRM_FORMAT_XRGB2101010:
11340         case DRM_FORMAT_ARGB2101010:
11341         case DRM_FORMAT_XBGR2101010:
11342         case DRM_FORMAT_ABGR2101010:
11343                 if (INTEL_INFO(dev)->gen < 4) {
11344                         DRM_DEBUG("unsupported pixel format: %s\n",
11345                                   drm_get_format_name(mode_cmd->pixel_format));
11346                         return -EINVAL;
11347                 }
11348                 break;
11349         case DRM_FORMAT_YUYV:
11350         case DRM_FORMAT_UYVY:
11351         case DRM_FORMAT_YVYU:
11352         case DRM_FORMAT_VYUY:
11353                 if (INTEL_INFO(dev)->gen < 5) {
11354                         DRM_DEBUG("unsupported pixel format: %s\n",
11355                                   drm_get_format_name(mode_cmd->pixel_format));
11356                         return -EINVAL;
11357                 }
11358                 break;
11359         default:
11360                 DRM_DEBUG("unsupported pixel format: %s\n",
11361                           drm_get_format_name(mode_cmd->pixel_format));
11362                 return -EINVAL;
11363         }
11364
11365         /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
11366         if (mode_cmd->offsets[0] != 0)
11367                 return -EINVAL;
11368
11369         aligned_height = intel_align_height(dev, mode_cmd->height,
11370                                             obj->tiling_mode);
11371         /* FIXME drm helper for size checks (especially planar formats)? */
11372         if (obj->base.size < aligned_height * mode_cmd->pitches[0])
11373                 return -EINVAL;
11374
11375         drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
11376         intel_fb->obj = obj;
11377         intel_fb->obj->framebuffer_references++;
11378
11379         ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
11380         if (ret) {
11381                 DRM_ERROR("framebuffer init failed %d\n", ret);
11382                 return ret;
11383         }
11384
11385         return 0;
11386 }
11387
11388 static struct drm_framebuffer *
11389 intel_user_framebuffer_create(struct drm_device *dev,
11390                               struct drm_file *filp,
11391                               struct drm_mode_fb_cmd2 *mode_cmd)
11392 {
11393         struct drm_i915_gem_object *obj;
11394
11395         obj = to_intel_bo(drm_gem_object_lookup(dev, filp,
11396                                                 mode_cmd->handles[0]));
11397         if (&obj->base == NULL)
11398                 return ERR_PTR(-ENOENT);
11399
11400         return intel_framebuffer_create(dev, mode_cmd, obj);
11401 }
11402
11403 #ifndef CONFIG_DRM_I915_FBDEV
11404 static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
11405 {
11406 }
11407 #endif
11408
11409 static const struct drm_mode_config_funcs intel_mode_funcs = {
11410         .fb_create = intel_user_framebuffer_create,
11411         .output_poll_changed = intel_fbdev_output_poll_changed,
11412 };
11413
11414 /* Set up chip specific display functions */
11415 static void intel_init_display(struct drm_device *dev)
11416 {
11417         struct drm_i915_private *dev_priv = dev->dev_private;
11418
11419         if (HAS_PCH_SPLIT(dev) || IS_G4X(dev))
11420                 dev_priv->display.find_dpll = g4x_find_best_dpll;
11421         else if (IS_CHERRYVIEW(dev))
11422                 dev_priv->display.find_dpll = chv_find_best_dpll;
11423         else if (IS_VALLEYVIEW(dev))
11424                 dev_priv->display.find_dpll = vlv_find_best_dpll;
11425         else if (IS_PINEVIEW(dev))
11426                 dev_priv->display.find_dpll = pnv_find_best_dpll;
11427         else
11428                 dev_priv->display.find_dpll = i9xx_find_best_dpll;
11429
11430         if (HAS_DDI(dev)) {
11431                 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
11432                 dev_priv->display.get_plane_config = ironlake_get_plane_config;
11433                 dev_priv->display.crtc_mode_set = haswell_crtc_mode_set;
11434                 dev_priv->display.crtc_enable = haswell_crtc_enable;
11435                 dev_priv->display.crtc_disable = haswell_crtc_disable;
11436                 dev_priv->display.off = haswell_crtc_off;
11437                 dev_priv->display.update_primary_plane =
11438                         ironlake_update_primary_plane;
11439         } else if (HAS_PCH_SPLIT(dev)) {
11440                 dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
11441                 dev_priv->display.get_plane_config = ironlake_get_plane_config;
11442                 dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
11443                 dev_priv->display.crtc_enable = ironlake_crtc_enable;
11444                 dev_priv->display.crtc_disable = ironlake_crtc_disable;
11445                 dev_priv->display.off = ironlake_crtc_off;
11446                 dev_priv->display.update_primary_plane =
11447                         ironlake_update_primary_plane;
11448         } else if (IS_VALLEYVIEW(dev)) {
11449                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
11450                 dev_priv->display.get_plane_config = i9xx_get_plane_config;
11451                 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
11452                 dev_priv->display.crtc_enable = valleyview_crtc_enable;
11453                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
11454                 dev_priv->display.off = i9xx_crtc_off;
11455                 dev_priv->display.update_primary_plane =
11456                         i9xx_update_primary_plane;
11457         } else {
11458                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
11459                 dev_priv->display.get_plane_config = i9xx_get_plane_config;
11460                 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
11461                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
11462                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
11463                 dev_priv->display.off = i9xx_crtc_off;
11464                 dev_priv->display.update_primary_plane =
11465                         i9xx_update_primary_plane;
11466         }
11467
11468         /* Returns the core display clock speed */
11469         if (IS_VALLEYVIEW(dev))
11470                 dev_priv->display.get_display_clock_speed =
11471                         valleyview_get_display_clock_speed;
11472         else if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
11473                 dev_priv->display.get_display_clock_speed =
11474                         i945_get_display_clock_speed;
11475         else if (IS_I915G(dev))
11476                 dev_priv->display.get_display_clock_speed =
11477                         i915_get_display_clock_speed;
11478         else if (IS_I945GM(dev) || IS_845G(dev))
11479                 dev_priv->display.get_display_clock_speed =
11480                         i9xx_misc_get_display_clock_speed;
11481         else if (IS_PINEVIEW(dev))
11482                 dev_priv->display.get_display_clock_speed =
11483                         pnv_get_display_clock_speed;
11484         else if (IS_I915GM(dev))
11485                 dev_priv->display.get_display_clock_speed =
11486                         i915gm_get_display_clock_speed;
11487         else if (IS_I865G(dev))
11488                 dev_priv->display.get_display_clock_speed =
11489                         i865_get_display_clock_speed;
11490         else if (IS_I85X(dev))
11491                 dev_priv->display.get_display_clock_speed =
11492                         i855_get_display_clock_speed;
11493         else /* 852, 830 */
11494                 dev_priv->display.get_display_clock_speed =
11495                         i830_get_display_clock_speed;
11496
11497         if (HAS_PCH_SPLIT(dev)) {
11498                 if (IS_GEN5(dev)) {
11499                         dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
11500                         dev_priv->display.write_eld = ironlake_write_eld;
11501                 } else if (IS_GEN6(dev)) {
11502                         dev_priv->display.fdi_link_train = gen6_fdi_link_train;
11503                         dev_priv->display.write_eld = ironlake_write_eld;
11504                         dev_priv->display.modeset_global_resources =
11505                                 snb_modeset_global_resources;
11506                 } else if (IS_IVYBRIDGE(dev)) {
11507                         /* FIXME: detect B0+ stepping and use auto training */
11508                         dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
11509                         dev_priv->display.write_eld = ironlake_write_eld;
11510                         dev_priv->display.modeset_global_resources =
11511                                 ivb_modeset_global_resources;
11512                 } else if (IS_HASWELL(dev) || IS_GEN8(dev)) {
11513                         dev_priv->display.fdi_link_train = hsw_fdi_link_train;
11514                         dev_priv->display.write_eld = haswell_write_eld;
11515                         dev_priv->display.modeset_global_resources =
11516                                 haswell_modeset_global_resources;
11517                 }
11518         } else if (IS_G4X(dev)) {
11519                 dev_priv->display.write_eld = g4x_write_eld;
11520         } else if (IS_VALLEYVIEW(dev)) {
11521                 dev_priv->display.modeset_global_resources =
11522                         valleyview_modeset_global_resources;
11523                 dev_priv->display.write_eld = ironlake_write_eld;
11524         }
11525
11526         /* Default just returns -ENODEV to indicate unsupported */
11527         dev_priv->display.queue_flip = intel_default_queue_flip;
11528
11529         switch (INTEL_INFO(dev)->gen) {
11530         case 2:
11531                 dev_priv->display.queue_flip = intel_gen2_queue_flip;
11532                 break;
11533
11534         case 3:
11535                 dev_priv->display.queue_flip = intel_gen3_queue_flip;
11536                 break;
11537
11538         case 4:
11539         case 5:
11540                 dev_priv->display.queue_flip = intel_gen4_queue_flip;
11541                 break;
11542
11543         case 6:
11544                 dev_priv->display.queue_flip = intel_gen6_queue_flip;
11545                 break;
11546         case 7:
11547         case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */
11548                 dev_priv->display.queue_flip = intel_gen7_queue_flip;
11549                 break;
11550         }
11551
11552         intel_panel_init_backlight_funcs(dev);
11553 }
11554
11555 /*
11556  * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
11557  * resume, or other times.  This quirk makes sure that's the case for
11558  * affected systems.
11559  */
11560 static void quirk_pipea_force(struct drm_device *dev)
11561 {
11562         struct drm_i915_private *dev_priv = dev->dev_private;
11563
11564         dev_priv->quirks |= QUIRK_PIPEA_FORCE;
11565         DRM_INFO("applying pipe a force quirk\n");
11566 }
11567
11568 /*
11569  * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
11570  */
11571 static void quirk_ssc_force_disable(struct drm_device *dev)
11572 {
11573         struct drm_i915_private *dev_priv = dev->dev_private;
11574         dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
11575         DRM_INFO("applying lvds SSC disable quirk\n");
11576 }
11577
11578 /*
11579  * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
11580  * brightness value
11581  */
11582 static void quirk_invert_brightness(struct drm_device *dev)
11583 {
11584         struct drm_i915_private *dev_priv = dev->dev_private;
11585         dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
11586         DRM_INFO("applying inverted panel brightness quirk\n");
11587 }
11588
11589 /* Some VBT's incorrectly indicate no backlight is present */
11590 static void quirk_backlight_present(struct drm_device *dev)
11591 {
11592         struct drm_i915_private *dev_priv = dev->dev_private;
11593         dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
11594         DRM_INFO("applying backlight present quirk\n");
11595 }
11596
11597 struct intel_quirk {
11598         int device;
11599         int subsystem_vendor;
11600         int subsystem_device;
11601         void (*hook)(struct drm_device *dev);
11602 };
11603
11604 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
11605 struct intel_dmi_quirk {
11606         void (*hook)(struct drm_device *dev);
11607         const struct dmi_system_id (*dmi_id_list)[];
11608 };
11609
11610 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
11611 {
11612         DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
11613         return 1;
11614 }
11615
11616 static const struct intel_dmi_quirk intel_dmi_quirks[] = {
11617         {
11618                 .dmi_id_list = &(const struct dmi_system_id[]) {
11619                         {
11620                                 .callback = intel_dmi_reverse_brightness,
11621                                 .ident = "NCR Corporation",
11622                                 .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
11623                                             DMI_MATCH(DMI_PRODUCT_NAME, ""),
11624                                 },
11625                         },
11626                         { }  /* terminating entry */
11627                 },
11628                 .hook = quirk_invert_brightness,
11629         },
11630 };
11631
11632 static struct intel_quirk intel_quirks[] = {
11633         /* HP Mini needs pipe A force quirk (LP: #322104) */
11634         { 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
11635
11636         /* Toshiba Protege R-205, S-209 needs pipe A force quirk */
11637         { 0x2592, 0x1179, 0x0001, quirk_pipea_force },
11638
11639         /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
11640         { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
11641
11642         /* Lenovo U160 cannot use SSC on LVDS */
11643         { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
11644
11645         /* Sony Vaio Y cannot use SSC on LVDS */
11646         { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
11647
11648         /* Acer Aspire 5734Z must invert backlight brightness */
11649         { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
11650
11651         /* Acer/eMachines G725 */
11652         { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
11653
11654         /* Acer/eMachines e725 */
11655         { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
11656
11657         /* Acer/Packard Bell NCL20 */
11658         { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
11659
11660         /* Acer Aspire 4736Z */
11661         { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
11662
11663         /* Acer Aspire 5336 */
11664         { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
11665
11666         /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
11667         { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
11668
11669         /* Toshiba CB35 Chromebook (Celeron 2955U) */
11670         { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
11671
11672         /* HP Chromebook 14 (Celeron 2955U) */
11673         { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
11674 };
11675
11676 static void intel_init_quirks(struct drm_device *dev)
11677 {
11678         struct device *d = dev->dev;
11679         int i;
11680
11681         for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
11682                 struct intel_quirk *q = &intel_quirks[i];
11683
11684                 if (pci_get_device(d) == q->device &&
11685                     (pci_get_subvendor(d) == q->subsystem_vendor ||
11686                      q->subsystem_vendor == PCI_ANY_ID) &&
11687                     (pci_get_subdevice(d) == q->subsystem_device ||
11688                      q->subsystem_device == PCI_ANY_ID))
11689                         q->hook(dev);
11690         }
11691         for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
11692                 if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
11693                         intel_dmi_quirks[i].hook(dev);
11694         }
11695 }
11696
11697 /* Disable the VGA plane that we never use */
11698 static void i915_disable_vga(struct drm_device *dev)
11699 {
11700         struct drm_i915_private *dev_priv = dev->dev_private;
11701         u8 sr1;
11702         u32 vga_reg = i915_vgacntrl_reg(dev);
11703
11704         /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
11705 #if 0
11706         vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
11707 #endif
11708         outb(VGA_SR_INDEX, SR01);
11709         sr1 = inb(VGA_SR_DATA);
11710         outb(VGA_SR_DATA, sr1 | 1 << 5);
11711 #if 0
11712         vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
11713 #endif
11714         udelay(300);
11715
11716         I915_WRITE(vga_reg, VGA_DISP_DISABLE);
11717         POSTING_READ(vga_reg);
11718 }
11719
11720 void intel_modeset_init_hw(struct drm_device *dev)
11721 {
11722         intel_prepare_ddi(dev);
11723
11724         intel_init_clock_gating(dev);
11725
11726         intel_reset_dpio(dev);
11727
11728         intel_enable_gt_powersave(dev);
11729 }
11730
11731 void intel_modeset_suspend_hw(struct drm_device *dev)
11732 {
11733         intel_suspend_hw(dev);
11734 }
11735
11736 void intel_modeset_init(struct drm_device *dev)
11737 {
11738         struct drm_i915_private *dev_priv = dev->dev_private;
11739         int sprite, ret;
11740         enum i915_pipe pipe;
11741         struct intel_crtc *crtc;
11742
11743         drm_mode_config_init(dev);
11744
11745         dev->mode_config.min_width = 0;
11746         dev->mode_config.min_height = 0;
11747
11748         dev->mode_config.preferred_depth = 24;
11749         dev->mode_config.prefer_shadow = 1;
11750
11751         dev->mode_config.funcs = &intel_mode_funcs;
11752
11753         intel_init_quirks(dev);
11754
11755         intel_init_pm(dev);
11756
11757         if (INTEL_INFO(dev)->num_pipes == 0)
11758                 return;
11759
11760         intel_init_display(dev);
11761
11762         if (IS_GEN2(dev)) {
11763                 dev->mode_config.max_width = 2048;
11764                 dev->mode_config.max_height = 2048;
11765         } else if (IS_GEN3(dev)) {
11766                 dev->mode_config.max_width = 4096;
11767                 dev->mode_config.max_height = 4096;
11768         } else {
11769                 dev->mode_config.max_width = 8192;
11770                 dev->mode_config.max_height = 8192;
11771         }
11772
11773         if (IS_GEN2(dev)) {
11774                 dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH;
11775                 dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT;
11776         } else {
11777                 dev->mode_config.cursor_width = MAX_CURSOR_WIDTH;
11778                 dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
11779         }
11780
11781         dev->mode_config.fb_base = dev_priv->gtt.mappable_base;
11782
11783         DRM_DEBUG_KMS("%d display pipe%s available.\n",
11784                       INTEL_INFO(dev)->num_pipes,
11785                       INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
11786
11787         for_each_pipe(pipe) {
11788                 intel_crtc_init(dev, pipe);
11789                 for_each_sprite(pipe, sprite) {
11790                         ret = intel_plane_init(dev, pipe, sprite);
11791                         if (ret)
11792                                 DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n",
11793                                               pipe_name(pipe), sprite_name(pipe, sprite), ret);
11794                 }
11795         }
11796
11797         intel_init_dpio(dev);
11798         intel_reset_dpio(dev);
11799
11800         intel_cpu_pll_init(dev);
11801         intel_shared_dpll_init(dev);
11802
11803         /* Just disable it once at startup */
11804         i915_disable_vga(dev);
11805         intel_setup_outputs(dev);
11806
11807         /* Just in case the BIOS is doing something questionable. */
11808         intel_disable_fbc(dev);
11809
11810         drm_modeset_lock_all(dev);
11811         intel_modeset_setup_hw_state(dev, false);
11812         drm_modeset_unlock_all(dev);
11813
11814         for_each_intel_crtc(dev, crtc) {
11815                 if (!crtc->active)
11816                         continue;
11817
11818                 /*
11819                  * Note that reserving the BIOS fb up front prevents us
11820                  * from stuffing other stolen allocations like the ring
11821                  * on top.  This prevents some ugliness at boot time, and
11822                  * can even allow for smooth boot transitions if the BIOS
11823                  * fb is large enough for the active pipe configuration.
11824                  */
11825                 if (dev_priv->display.get_plane_config) {
11826                         dev_priv->display.get_plane_config(crtc,
11827                                                            &crtc->plane_config);
11828                         /*
11829                          * If the fb is shared between multiple heads, we'll
11830                          * just get the first one.
11831                          */
11832                         intel_find_plane_obj(crtc, &crtc->plane_config);
11833                 }
11834         }
11835 }
11836
11837 static void intel_enable_pipe_a(struct drm_device *dev)
11838 {
11839         struct intel_connector *connector;
11840         struct drm_connector *crt = NULL;
11841         struct intel_load_detect_pipe load_detect_temp;
11842         struct drm_modeset_acquire_ctx ctx;
11843
11844         /* We can't just switch on the pipe A, we need to set things up with a
11845          * proper mode and output configuration. As a gross hack, enable pipe A
11846          * by enabling the load detect pipe once. */
11847         list_for_each_entry(connector,
11848                             &dev->mode_config.connector_list,
11849                             base.head) {
11850                 if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
11851                         crt = &connector->base;
11852                         break;
11853                 }
11854         }
11855
11856         if (!crt)
11857                 return;
11858
11859         if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, &ctx))
11860                 intel_release_load_detect_pipe(crt, &load_detect_temp, &ctx);
11861
11862
11863 }
11864
11865 static bool
11866 intel_check_plane_mapping(struct intel_crtc *crtc)
11867 {
11868         struct drm_device *dev = crtc->base.dev;
11869         struct drm_i915_private *dev_priv = dev->dev_private;
11870         u32 reg, val;
11871
11872         if (INTEL_INFO(dev)->num_pipes == 1)
11873                 return true;
11874
11875         reg = DSPCNTR(!crtc->plane);
11876         val = I915_READ(reg);
11877
11878         if ((val & DISPLAY_PLANE_ENABLE) &&
11879             (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
11880                 return false;
11881
11882         return true;
11883 }
11884
11885 static void intel_sanitize_crtc(struct intel_crtc *crtc)
11886 {
11887         struct drm_device *dev = crtc->base.dev;
11888         struct drm_i915_private *dev_priv = dev->dev_private;
11889         u32 reg;
11890
11891         /* Clear any frame start delays used for debugging left by the BIOS */
11892         reg = PIPECONF(crtc->config.cpu_transcoder);
11893         I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
11894
11895         /* restore vblank interrupts to correct state */
11896         if (crtc->active)
11897                 drm_vblank_on(dev, crtc->pipe);
11898         else
11899                 drm_vblank_off(dev, crtc->pipe);
11900
11901         /* We need to sanitize the plane -> pipe mapping first because this will
11902          * disable the crtc (and hence change the state) if it is wrong. Note
11903          * that gen4+ has a fixed plane -> pipe mapping.  */
11904         if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
11905                 struct intel_connector *connector;
11906                 bool plane;
11907
11908                 DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
11909                               crtc->base.base.id);
11910
11911                 /* Pipe has the wrong plane attached and the plane is active.
11912                  * Temporarily change the plane mapping and disable everything
11913                  * ...  */
11914                 plane = crtc->plane;
11915                 crtc->plane = !plane;
11916                 crtc->primary_enabled = true;
11917                 dev_priv->display.crtc_disable(&crtc->base);
11918                 crtc->plane = plane;
11919
11920                 /* ... and break all links. */
11921                 list_for_each_entry(connector, &dev->mode_config.connector_list,
11922                                     base.head) {
11923                         if (connector->encoder->base.crtc != &crtc->base)
11924                                 continue;
11925
11926                         connector->base.dpms = DRM_MODE_DPMS_OFF;
11927                         connector->base.encoder = NULL;
11928                 }
11929                 /* multiple connectors may have the same encoder:
11930                  *  handle them and break crtc link separately */
11931                 list_for_each_entry(connector, &dev->mode_config.connector_list,
11932                                     base.head)
11933                         if (connector->encoder->base.crtc == &crtc->base) {
11934                                 connector->encoder->base.crtc = NULL;
11935                                 connector->encoder->connectors_active = false;
11936                         }
11937
11938                 WARN_ON(crtc->active);
11939                 crtc->base.enabled = false;
11940         }
11941
11942         if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
11943             crtc->pipe == PIPE_A && !crtc->active) {
11944                 /* BIOS forgot to enable pipe A, this mostly happens after
11945                  * resume. Force-enable the pipe to fix this, the update_dpms
11946                  * call below we restore the pipe to the right state, but leave
11947                  * the required bits on. */
11948                 intel_enable_pipe_a(dev);
11949         }
11950
11951         /* Adjust the state of the output pipe according to whether we
11952          * have active connectors/encoders. */
11953         intel_crtc_update_dpms(&crtc->base);
11954
11955         if (crtc->active != crtc->base.enabled) {
11956                 struct intel_encoder *encoder;
11957
11958                 /* This can happen either due to bugs in the get_hw_state
11959                  * functions or because the pipe is force-enabled due to the
11960                  * pipe A quirk. */
11961                 DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n",
11962                               crtc->base.base.id,
11963                               crtc->base.enabled ? "enabled" : "disabled",
11964                               crtc->active ? "enabled" : "disabled");
11965
11966                 crtc->base.enabled = crtc->active;
11967
11968                 /* Because we only establish the connector -> encoder ->
11969                  * crtc links if something is active, this means the
11970                  * crtc is now deactivated. Break the links. connector
11971                  * -> encoder links are only establish when things are
11972                  *  actually up, hence no need to break them. */
11973                 WARN_ON(crtc->active);
11974
11975                 for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
11976                         WARN_ON(encoder->connectors_active);
11977                         encoder->base.crtc = NULL;
11978                 }
11979         }
11980
11981         if (crtc->active || IS_VALLEYVIEW(dev) || INTEL_INFO(dev)->gen < 5) {
11982                 /*
11983                  * We start out with underrun reporting disabled to avoid races.
11984                  * For correct bookkeeping mark this on active crtcs.
11985                  *
11986                  * Also on gmch platforms we dont have any hardware bits to
11987                  * disable the underrun reporting. Which means we need to start
11988                  * out with underrun reporting disabled also on inactive pipes,
11989                  * since otherwise we'll complain about the garbage we read when
11990                  * e.g. coming up after runtime pm.
11991                  *
11992                  * No protection against concurrent access is required - at
11993                  * worst a fifo underrun happens which also sets this to false.
11994                  */
11995                 crtc->cpu_fifo_underrun_disabled = true;
11996                 crtc->pch_fifo_underrun_disabled = true;
11997
11998                 update_scanline_offset(crtc);
11999         }
12000 }
12001
12002 static void intel_sanitize_encoder(struct intel_encoder *encoder)
12003 {
12004         struct intel_connector *connector;
12005         struct drm_device *dev = encoder->base.dev;
12006
12007         /* We need to check both for a crtc link (meaning that the
12008          * encoder is active and trying to read from a pipe) and the
12009          * pipe itself being active. */
12010         bool has_active_crtc = encoder->base.crtc &&
12011                 to_intel_crtc(encoder->base.crtc)->active;
12012
12013         if (encoder->connectors_active && !has_active_crtc) {
12014                 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
12015                               encoder->base.base.id,
12016                               encoder->base.name);
12017
12018                 /* Connector is active, but has no active pipe. This is
12019                  * fallout from our resume register restoring. Disable
12020                  * the encoder manually again. */
12021                 if (encoder->base.crtc) {
12022                         DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
12023                                       encoder->base.base.id,
12024                                       encoder->base.name);
12025                         encoder->disable(encoder);
12026                 }
12027                 encoder->base.crtc = NULL;
12028                 encoder->connectors_active = false;
12029
12030                 /* Inconsistent output/port/pipe state happens presumably due to
12031                  * a bug in one of the get_hw_state functions. Or someplace else
12032                  * in our code, like the register restore mess on resume. Clamp
12033                  * things to off as a safer default. */
12034                 list_for_each_entry(connector,
12035                                     &dev->mode_config.connector_list,
12036                                     base.head) {
12037                         if (connector->encoder != encoder)
12038                                 continue;
12039
12040                         connector->base.dpms = DRM_MODE_DPMS_OFF;
12041                         connector->base.encoder = NULL;
12042                 }
12043         }
12044         /* Enabled encoders without active connectors will be fixed in
12045          * the crtc fixup. */
12046 }
12047
12048 void i915_redisable_vga_power_on(struct drm_device *dev)
12049 {
12050         struct drm_i915_private *dev_priv = dev->dev_private;
12051         u32 vga_reg = i915_vgacntrl_reg(dev);
12052
12053         if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
12054                 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
12055                 i915_disable_vga(dev);
12056         }
12057 }
12058
12059 void i915_redisable_vga(struct drm_device *dev)
12060 {
12061         struct drm_i915_private *dev_priv = dev->dev_private;
12062
12063         /* This function can be called both from intel_modeset_setup_hw_state or
12064          * at a very early point in our resume sequence, where the power well
12065          * structures are not yet restored. Since this function is at a very
12066          * paranoid "someone might have enabled VGA while we were not looking"
12067          * level, just check if the power well is enabled instead of trying to
12068          * follow the "don't touch the power well if we don't need it" policy
12069          * the rest of the driver uses. */
12070         if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_VGA))
12071                 return;
12072
12073         i915_redisable_vga_power_on(dev);
12074 }
12075
12076 static bool primary_get_hw_state(struct intel_crtc *crtc)
12077 {
12078         struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
12079
12080         if (!crtc->active)
12081                 return false;
12082
12083         return I915_READ(DSPCNTR(crtc->plane)) & DISPLAY_PLANE_ENABLE;
12084 }
12085
12086 static void intel_modeset_readout_hw_state(struct drm_device *dev)
12087 {
12088         struct drm_i915_private *dev_priv = dev->dev_private;
12089         enum i915_pipe pipe;
12090         struct intel_crtc *crtc;
12091         struct intel_encoder *encoder;
12092         struct intel_connector *connector;
12093         int i;
12094
12095         for_each_intel_crtc(dev, crtc) {
12096                 memset(&crtc->config, 0, sizeof(crtc->config));
12097
12098                 crtc->config.quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE;
12099
12100                 crtc->active = dev_priv->display.get_pipe_config(crtc,
12101                                                                  &crtc->config);
12102
12103                 crtc->base.enabled = crtc->active;
12104                 crtc->primary_enabled = primary_get_hw_state(crtc);
12105
12106                 DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
12107                               crtc->base.base.id,
12108                               crtc->active ? "enabled" : "disabled");
12109         }
12110
12111         /* FIXME: Smash this into the new shared dpll infrastructure. */
12112         if (HAS_DDI(dev))
12113                 intel_ddi_setup_hw_pll_state(dev);
12114
12115         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
12116                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
12117
12118                 pll->on = pll->get_hw_state(dev_priv, pll, &pll->hw_state);
12119                 pll->active = 0;
12120                 for_each_intel_crtc(dev, crtc) {
12121                         if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
12122                                 pll->active++;
12123                 }
12124                 pll->refcount = pll->active;
12125
12126                 DRM_DEBUG_KMS("%s hw state readout: refcount %i, on %i\n",
12127                               pll->name, pll->refcount, pll->on);
12128         }
12129
12130         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
12131                             base.head) {
12132                 pipe = 0;
12133
12134                 if (encoder->get_hw_state(encoder, &pipe)) {
12135                         crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
12136                         encoder->base.crtc = &crtc->base;
12137                         encoder->get_config(encoder, &crtc->config);
12138                 } else {
12139                         encoder->base.crtc = NULL;
12140                 }
12141
12142                 encoder->connectors_active = false;
12143                 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
12144                               encoder->base.base.id,
12145                               encoder->base.name,
12146                               encoder->base.crtc ? "enabled" : "disabled",
12147                               pipe_name(pipe));
12148         }
12149
12150         list_for_each_entry(connector, &dev->mode_config.connector_list,
12151                             base.head) {
12152                 if (connector->get_hw_state(connector)) {
12153                         connector->base.dpms = DRM_MODE_DPMS_ON;
12154                         connector->encoder->connectors_active = true;
12155                         connector->base.encoder = &connector->encoder->base;
12156                 } else {
12157                         connector->base.dpms = DRM_MODE_DPMS_OFF;
12158                         connector->base.encoder = NULL;
12159                 }
12160                 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
12161                               connector->base.base.id,
12162                               connector->base.name,
12163                               connector->base.encoder ? "enabled" : "disabled");
12164         }
12165 }
12166
12167 /* Scan out the current hw modeset state, sanitizes it and maps it into the drm
12168  * and i915 state tracking structures. */
12169 void intel_modeset_setup_hw_state(struct drm_device *dev,
12170                                   bool force_restore)
12171 {
12172         struct drm_i915_private *dev_priv = dev->dev_private;
12173         enum i915_pipe pipe;
12174         struct intel_crtc *crtc;
12175         struct intel_encoder *encoder;
12176         int i;
12177
12178         intel_modeset_readout_hw_state(dev);
12179
12180         /*
12181          * Now that we have the config, copy it to each CRTC struct
12182          * Note that this could go away if we move to using crtc_config
12183          * checking everywhere.
12184          */
12185         for_each_intel_crtc(dev, crtc) {
12186                 if (crtc->active && i915.fastboot) {
12187                         intel_mode_from_pipe_config(&crtc->base.mode, &crtc->config);
12188                         DRM_DEBUG_KMS("[CRTC:%d] found active mode: ",
12189                                       crtc->base.base.id);
12190                         drm_mode_debug_printmodeline(&crtc->base.mode);
12191                 }
12192         }
12193
12194         /* HW state is read out, now we need to sanitize this mess. */
12195         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
12196                             base.head) {
12197                 intel_sanitize_encoder(encoder);
12198         }
12199
12200         for_each_pipe(pipe) {
12201                 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
12202                 intel_sanitize_crtc(crtc);
12203                 intel_dump_pipe_config(crtc, &crtc->config, "[setup_hw_state]");
12204         }
12205
12206         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
12207                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
12208
12209                 if (!pll->on || pll->active)
12210                         continue;
12211
12212                 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name);
12213
12214                 pll->disable(dev_priv, pll);
12215                 pll->on = false;
12216         }
12217
12218         if (HAS_PCH_SPLIT(dev))
12219                 ilk_wm_get_hw_state(dev);
12220
12221         if (force_restore) {
12222                 i915_redisable_vga(dev);
12223
12224                 /*
12225                  * We need to use raw interfaces for restoring state to avoid
12226                  * checking (bogus) intermediate states.
12227                  */
12228                 for_each_pipe(pipe) {
12229                         struct drm_crtc *crtc =
12230                                 dev_priv->pipe_to_crtc_mapping[pipe];
12231
12232                         __intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y,
12233                                          crtc->primary->fb);
12234                 }
12235         } else {
12236                 intel_modeset_update_staged_output_state(dev);
12237         }
12238
12239         intel_modeset_check_state(dev);
12240 }
12241
12242 void intel_modeset_gem_init(struct drm_device *dev)
12243 {
12244         struct drm_crtc *c;
12245         struct intel_framebuffer *fb;
12246
12247         mutex_lock(&dev->struct_mutex);
12248         intel_init_gt_powersave(dev);
12249         mutex_unlock(&dev->struct_mutex);
12250
12251         intel_modeset_init_hw(dev);
12252
12253         intel_setup_overlay(dev);
12254
12255         /*
12256          * Make sure any fbs we allocated at startup are properly
12257          * pinned & fenced.  When we do the allocation it's too early
12258          * for this.
12259          */
12260         mutex_lock(&dev->struct_mutex);
12261         for_each_crtc(dev, c) {
12262                 if (!c->primary->fb)
12263                         continue;
12264
12265                 fb = to_intel_framebuffer(c->primary->fb);
12266                 if (intel_pin_and_fence_fb_obj(dev, fb->obj, NULL)) {
12267                         DRM_ERROR("failed to pin boot fb on pipe %d\n",
12268                                   to_intel_crtc(c)->pipe);
12269                         drm_framebuffer_unreference(c->primary->fb);
12270                         c->primary->fb = NULL;
12271                 }
12272         }
12273         mutex_unlock(&dev->struct_mutex);
12274 }
12275
12276 void intel_connector_unregister(struct intel_connector *intel_connector)
12277 {
12278         struct drm_connector *connector = &intel_connector->base;
12279
12280         intel_panel_destroy_backlight(connector);
12281         drm_sysfs_connector_remove(connector);
12282 }
12283
12284 void intel_modeset_cleanup(struct drm_device *dev)
12285 {
12286         struct drm_i915_private *dev_priv = dev->dev_private;
12287         struct drm_crtc *crtc;
12288         struct drm_connector *connector;
12289
12290         /*
12291          * Interrupts and polling as the first thing to avoid creating havoc.
12292          * Too much stuff here (turning of rps, connectors, ...) would
12293          * experience fancy races otherwise.
12294          */
12295         drm_irq_uninstall(dev);
12296         cancel_work_sync(&dev_priv->hotplug_work);
12297         /*
12298          * Due to the hpd irq storm handling the hotplug work can re-arm the
12299          * poll handlers. Hence disable polling after hpd handling is shut down.
12300          */
12301         drm_kms_helper_poll_fini(dev);
12302
12303         mutex_lock(&dev->struct_mutex);
12304
12305         intel_unregister_dsm_handler();
12306
12307         for_each_crtc(dev, crtc) {
12308                 /* Skip inactive CRTCs */
12309                 if (!crtc->primary->fb)
12310                         continue;
12311
12312                 intel_increase_pllclock(crtc);
12313         }
12314
12315         intel_disable_fbc(dev);
12316
12317         intel_disable_gt_powersave(dev);
12318
12319         ironlake_teardown_rc6(dev);
12320
12321         mutex_unlock(&dev->struct_mutex);
12322
12323         /* flush any delayed tasks or pending work */
12324         flush_scheduled_work();
12325
12326         /* destroy the backlight and sysfs files before encoders/connectors */
12327         list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
12328                 struct intel_connector *intel_connector;
12329
12330                 intel_connector = to_intel_connector(connector);
12331                 intel_connector->unregister(intel_connector);
12332         }
12333
12334         drm_mode_config_cleanup(dev);
12335
12336         intel_cleanup_overlay(dev);
12337
12338         mutex_lock(&dev->struct_mutex);
12339         intel_cleanup_gt_powersave(dev);
12340         mutex_unlock(&dev->struct_mutex);
12341 }
12342
12343 /*
12344  * Return which encoder is currently attached for connector.
12345  */
12346 struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
12347 {
12348         return &intel_attached_encoder(connector)->base;
12349 }
12350
12351 void intel_connector_attach_encoder(struct intel_connector *connector,
12352                                     struct intel_encoder *encoder)
12353 {
12354         connector->encoder = encoder;
12355         drm_mode_connector_attach_encoder(&connector->base,
12356                                           &encoder->base);
12357 }
12358
12359 /*
12360  * set vga decode state - true == enable VGA decode
12361  */
12362 int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
12363 {
12364         struct drm_i915_private *dev_priv = dev->dev_private;
12365         unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
12366         u16 gmch_ctrl;
12367
12368         if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
12369                 DRM_ERROR("failed to read control word\n");
12370                 return -EIO;
12371         }
12372
12373         if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
12374                 return 0;
12375
12376         if (state)
12377                 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
12378         else
12379                 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
12380
12381         if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
12382                 DRM_ERROR("failed to write control word\n");
12383                 return -EIO;
12384         }
12385
12386         return 0;
12387 }
12388
12389 #if 0
12390 struct intel_display_error_state {
12391
12392         u32 power_well_driver;
12393
12394         int num_transcoders;
12395
12396         struct intel_cursor_error_state {
12397                 u32 control;
12398                 u32 position;
12399                 u32 base;
12400                 u32 size;
12401         } cursor[I915_MAX_PIPES];
12402
12403         struct intel_pipe_error_state {
12404                 bool power_domain_on;
12405                 u32 source;
12406                 u32 stat;
12407         } pipe[I915_MAX_PIPES];
12408
12409         struct intel_plane_error_state {
12410                 u32 control;
12411                 u32 stride;
12412                 u32 size;
12413                 u32 pos;
12414                 u32 addr;
12415                 u32 surface;
12416                 u32 tile_offset;
12417         } plane[I915_MAX_PIPES];
12418
12419         struct intel_transcoder_error_state {
12420                 bool power_domain_on;
12421                 enum transcoder cpu_transcoder;
12422
12423                 u32 conf;
12424
12425                 u32 htotal;
12426                 u32 hblank;
12427                 u32 hsync;
12428                 u32 vtotal;
12429                 u32 vblank;
12430                 u32 vsync;
12431         } transcoder[4];
12432 };
12433
12434 struct intel_display_error_state *
12435 intel_display_capture_error_state(struct drm_device *dev)
12436 {
12437         struct drm_i915_private *dev_priv = dev->dev_private;
12438         struct intel_display_error_state *error;
12439         int transcoders[] = {
12440                 TRANSCODER_A,
12441                 TRANSCODER_B,
12442                 TRANSCODER_C,
12443                 TRANSCODER_EDP,
12444         };
12445         int i;
12446
12447         if (INTEL_INFO(dev)->num_pipes == 0)
12448                 return NULL;
12449
12450         error = kzalloc(sizeof(*error), GFP_ATOMIC);
12451         if (error == NULL)
12452                 return NULL;
12453
12454         if (IS_HASWELL(dev) || IS_BROADWELL(dev))
12455                 error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
12456
12457         for_each_pipe(i) {
12458                 error->pipe[i].power_domain_on =
12459                         intel_display_power_enabled_unlocked(dev_priv,
12460                                                            POWER_DOMAIN_PIPE(i));
12461                 if (!error->pipe[i].power_domain_on)
12462                         continue;
12463
12464                 error->cursor[i].control = I915_READ(CURCNTR(i));
12465                 error->cursor[i].position = I915_READ(CURPOS(i));
12466                 error->cursor[i].base = I915_READ(CURBASE(i));
12467
12468                 error->plane[i].control = I915_READ(DSPCNTR(i));
12469                 error->plane[i].stride = I915_READ(DSPSTRIDE(i));
12470                 if (INTEL_INFO(dev)->gen <= 3) {
12471                         error->plane[i].size = I915_READ(DSPSIZE(i));
12472                         error->plane[i].pos = I915_READ(DSPPOS(i));
12473                 }
12474                 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
12475                         error->plane[i].addr = I915_READ(DSPADDR(i));
12476                 if (INTEL_INFO(dev)->gen >= 4) {
12477                         error->plane[i].surface = I915_READ(DSPSURF(i));
12478                         error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
12479                 }
12480
12481                 error->pipe[i].source = I915_READ(PIPESRC(i));
12482
12483                 if (!HAS_PCH_SPLIT(dev))
12484                         error->pipe[i].stat = I915_READ(PIPESTAT(i));
12485         }
12486
12487         error->num_transcoders = INTEL_INFO(dev)->num_pipes;
12488         if (HAS_DDI(dev_priv->dev))
12489                 error->num_transcoders++; /* Account for eDP. */
12490
12491         for (i = 0; i < error->num_transcoders; i++) {
12492                 enum transcoder cpu_transcoder = transcoders[i];
12493
12494                 error->transcoder[i].power_domain_on =
12495                         intel_display_power_enabled_unlocked(dev_priv,
12496                                 POWER_DOMAIN_TRANSCODER(cpu_transcoder));
12497                 if (!error->transcoder[i].power_domain_on)
12498                         continue;
12499
12500                 error->transcoder[i].cpu_transcoder = cpu_transcoder;
12501
12502                 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
12503                 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
12504                 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
12505                 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
12506                 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
12507                 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
12508                 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
12509         }
12510
12511         return error;
12512 }
12513
12514 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
12515
12516 void
12517 intel_display_print_error_state(struct drm_i915_error_state_buf *m,
12518                                 struct drm_device *dev,
12519                                 struct intel_display_error_state *error)
12520 {
12521         int i;
12522
12523         if (!error)
12524                 return;
12525
12526         err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
12527         if (IS_HASWELL(dev) || IS_BROADWELL(dev))
12528                 err_printf(m, "PWR_WELL_CTL2: %08x\n",
12529                            error->power_well_driver);
12530         for_each_pipe(i) {
12531                 err_printf(m, "Pipe [%d]:\n", i);
12532                 err_printf(m, "  Power: %s\n",
12533                            error->pipe[i].power_domain_on ? "on" : "off");
12534                 err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
12535                 err_printf(m, "  STAT: %08x\n", error->pipe[i].stat);
12536
12537                 err_printf(m, "Plane [%d]:\n", i);
12538                 err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
12539                 err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
12540                 if (INTEL_INFO(dev)->gen <= 3) {
12541                         err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
12542                         err_printf(m, "  POS: %08x\n", error->plane[i].pos);
12543                 }
12544                 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
12545                         err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
12546                 if (INTEL_INFO(dev)->gen >= 4) {
12547                         err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
12548                         err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
12549                 }
12550
12551                 err_printf(m, "Cursor [%d]:\n", i);
12552                 err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
12553                 err_printf(m, "  POS: %08x\n", error->cursor[i].position);
12554                 err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
12555         }
12556
12557         for (i = 0; i < error->num_transcoders; i++) {
12558                 err_printf(m, "CPU transcoder: %c\n",
12559                            transcoder_name(error->transcoder[i].cpu_transcoder));
12560                 err_printf(m, "  Power: %s\n",
12561                            error->transcoder[i].power_domain_on ? "on" : "off");
12562                 err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
12563                 err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
12564                 err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
12565                 err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
12566                 err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
12567                 err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
12568                 err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
12569         }
12570 }
12571 #endif