drm/i915: Fix DPCD debug print.
[dragonfly.git] / sys / dev / drm / i915 / intel_dp.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Keith Packard <keithp@keithp.com>
25  *
26  */
27
28 #include <linux/i2c.h>
29 #include <linux/export.h>
30 #include <drm/drmP.h>
31 #include <linux/slab.h>
32 #include <drm/drm_atomic_helper.h>
33 #include <drm/drm_crtc.h>
34 #include <drm/drm_crtc_helper.h>
35 #include <drm/drm_edid.h>
36 #include "intel_drv.h"
37 #include <drm/i915_drm.h>
38 #include "i915_drv.h"
39
40 #define DP_LINK_CHECK_TIMEOUT   (10 * 1000)
41
42 static int disable_aux_irq = 0;
43 TUNABLE_INT("drm.i915.disable_aux_irq", &disable_aux_irq);
44
45 /* Compliance test status bits  */
46 #define INTEL_DP_RESOLUTION_SHIFT_MASK  0
47 #define INTEL_DP_RESOLUTION_PREFERRED   (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48 #define INTEL_DP_RESOLUTION_STANDARD    (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49 #define INTEL_DP_RESOLUTION_FAILSAFE    (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
50
51 struct dp_link_dpll {
52         int clock;
53         struct dpll dpll;
54 };
55
56 static const struct dp_link_dpll gen4_dpll[] = {
57         { 162000,
58                 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
59         { 270000,
60                 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
61 };
62
63 static const struct dp_link_dpll pch_dpll[] = {
64         { 162000,
65                 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
66         { 270000,
67                 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
68 };
69
70 static const struct dp_link_dpll vlv_dpll[] = {
71         { 162000,
72                 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
73         { 270000,
74                 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
75 };
76
77 /*
78  * CHV supports eDP 1.4 that have  more link rates.
79  * Below only provides the fixed rate but exclude variable rate.
80  */
81 static const struct dp_link_dpll chv_dpll[] = {
82         /*
83          * CHV requires to program fractional division for m2.
84          * m2 is stored in fixed point format using formula below
85          * (m2_int << 22) | m2_fraction
86          */
87         { 162000,       /* m2_int = 32, m2_fraction = 1677722 */
88                 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
89         { 270000,       /* m2_int = 27, m2_fraction = 0 */
90                 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
91         { 540000,       /* m2_int = 27, m2_fraction = 0 */
92                 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
93 };
94
95 static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
96                                   324000, 432000, 540000 };
97 static const int skl_rates[] = { 162000, 216000, 270000,
98                                   324000, 432000, 540000 };
99 static const int default_rates[] = { 162000, 270000, 540000 };
100
101 /**
102  * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
103  * @intel_dp: DP struct
104  *
105  * If a CPU or PCH DP output is attached to an eDP panel, this function
106  * will return true, and false otherwise.
107  */
108 static bool is_edp(struct intel_dp *intel_dp)
109 {
110         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
111
112         return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
113 }
114
115 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
116 {
117         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
118
119         return intel_dig_port->base.base.dev;
120 }
121
122 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
123 {
124         return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
125 }
126
127 static void intel_dp_link_down(struct intel_dp *intel_dp);
128 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
129 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
130 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
131 static void vlv_steal_power_sequencer(struct drm_device *dev,
132                                       enum i915_pipe pipe);
133
134 static int
135 intel_dp_max_link_bw(struct intel_dp  *intel_dp)
136 {
137         int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
138
139         switch (max_link_bw) {
140         case DP_LINK_BW_1_62:
141         case DP_LINK_BW_2_7:
142         case DP_LINK_BW_5_4:
143                 break;
144         default:
145                 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
146                      max_link_bw);
147                 max_link_bw = DP_LINK_BW_1_62;
148                 break;
149         }
150         return max_link_bw;
151 }
152
153 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
154 {
155         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
156         struct drm_device *dev = intel_dig_port->base.base.dev;
157         u8 source_max, sink_max;
158
159         source_max = 4;
160         if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
161             (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
162                 source_max = 2;
163
164         sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
165
166         return min(source_max, sink_max);
167 }
168
169 /*
170  * The units on the numbers in the next two are... bizarre.  Examples will
171  * make it clearer; this one parallels an example in the eDP spec.
172  *
173  * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
174  *
175  *     270000 * 1 * 8 / 10 == 216000
176  *
177  * The actual data capacity of that configuration is 2.16Gbit/s, so the
178  * units are decakilobits.  ->clock in a drm_display_mode is in kilohertz -
179  * or equivalently, kilopixels per second - so for 1680x1050R it'd be
180  * 119000.  At 18bpp that's 2142000 kilobits per second.
181  *
182  * Thus the strange-looking division by 10 in intel_dp_link_required, to
183  * get the result in decakilobits instead of kilobits.
184  */
185
186 static int
187 intel_dp_link_required(int pixel_clock, int bpp)
188 {
189         return (pixel_clock * bpp + 9) / 10;
190 }
191
192 static int
193 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
194 {
195         return (max_link_clock * max_lanes * 8) / 10;
196 }
197
198 static enum drm_mode_status
199 intel_dp_mode_valid(struct drm_connector *connector,
200                     struct drm_display_mode *mode)
201 {
202         struct intel_dp *intel_dp = intel_attached_dp(connector);
203         struct intel_connector *intel_connector = to_intel_connector(connector);
204         struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
205         int target_clock = mode->clock;
206         int max_rate, mode_rate, max_lanes, max_link_clock;
207
208         if (is_edp(intel_dp) && fixed_mode) {
209                 if (mode->hdisplay > fixed_mode->hdisplay)
210                         return MODE_PANEL;
211
212                 if (mode->vdisplay > fixed_mode->vdisplay)
213                         return MODE_PANEL;
214
215                 target_clock = fixed_mode->clock;
216         }
217
218         max_link_clock = intel_dp_max_link_rate(intel_dp);
219         max_lanes = intel_dp_max_lane_count(intel_dp);
220
221         max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
222         mode_rate = intel_dp_link_required(target_clock, 18);
223
224         if (mode_rate > max_rate)
225                 return MODE_CLOCK_HIGH;
226
227         if (mode->clock < 10000)
228                 return MODE_CLOCK_LOW;
229
230         if (mode->flags & DRM_MODE_FLAG_DBLCLK)
231                 return MODE_H_ILLEGAL;
232
233         return MODE_OK;
234 }
235
236 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
237 {
238         int     i;
239         uint32_t v = 0;
240
241         if (src_bytes > 4)
242                 src_bytes = 4;
243         for (i = 0; i < src_bytes; i++)
244                 v |= ((uint32_t) src[i]) << ((3-i) * 8);
245         return v;
246 }
247
248 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
249 {
250         int i;
251         if (dst_bytes > 4)
252                 dst_bytes = 4;
253         for (i = 0; i < dst_bytes; i++)
254                 dst[i] = src >> ((3-i) * 8);
255 }
256
257 /* hrawclock is 1/4 the FSB frequency */
258 static int
259 intel_hrawclk(struct drm_device *dev)
260 {
261         struct drm_i915_private *dev_priv = dev->dev_private;
262         uint32_t clkcfg;
263
264         /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
265         if (IS_VALLEYVIEW(dev))
266                 return 200;
267
268         clkcfg = I915_READ(CLKCFG);
269         switch (clkcfg & CLKCFG_FSB_MASK) {
270         case CLKCFG_FSB_400:
271                 return 100;
272         case CLKCFG_FSB_533:
273                 return 133;
274         case CLKCFG_FSB_667:
275                 return 166;
276         case CLKCFG_FSB_800:
277                 return 200;
278         case CLKCFG_FSB_1067:
279                 return 266;
280         case CLKCFG_FSB_1333:
281                 return 333;
282         /* these two are just a guess; one of them might be right */
283         case CLKCFG_FSB_1600:
284         case CLKCFG_FSB_1600_ALT:
285                 return 400;
286         default:
287                 return 133;
288         }
289 }
290
291 static void
292 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
293                                     struct intel_dp *intel_dp);
294 static void
295 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
296                                               struct intel_dp *intel_dp);
297
298 static void pps_lock(struct intel_dp *intel_dp)
299 {
300         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
301         struct intel_encoder *encoder = &intel_dig_port->base;
302         struct drm_device *dev = encoder->base.dev;
303         struct drm_i915_private *dev_priv = dev->dev_private;
304         enum intel_display_power_domain power_domain;
305
306         /*
307          * See vlv_power_sequencer_reset() why we need
308          * a power domain reference here.
309          */
310         power_domain = intel_display_port_power_domain(encoder);
311         intel_display_power_get(dev_priv, power_domain);
312
313         mutex_lock(&dev_priv->pps_mutex);
314 }
315
316 static void pps_unlock(struct intel_dp *intel_dp)
317 {
318         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
319         struct intel_encoder *encoder = &intel_dig_port->base;
320         struct drm_device *dev = encoder->base.dev;
321         struct drm_i915_private *dev_priv = dev->dev_private;
322         enum intel_display_power_domain power_domain;
323
324         mutex_unlock(&dev_priv->pps_mutex);
325
326         power_domain = intel_display_port_power_domain(encoder);
327         intel_display_power_put(dev_priv, power_domain);
328 }
329
330 static void
331 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
332 {
333         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
334         struct drm_device *dev = intel_dig_port->base.base.dev;
335         struct drm_i915_private *dev_priv = dev->dev_private;
336         enum i915_pipe pipe = intel_dp->pps_pipe;
337         bool pll_enabled;
338         uint32_t DP;
339
340         if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
341                  "skipping pipe %c power seqeuncer kick due to port %c being active\n",
342                  pipe_name(pipe), port_name(intel_dig_port->port)))
343                 return;
344
345         DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
346                       pipe_name(pipe), port_name(intel_dig_port->port));
347
348         /* Preserve the BIOS-computed detected bit. This is
349          * supposed to be read-only.
350          */
351         DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
352         DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
353         DP |= DP_PORT_WIDTH(1);
354         DP |= DP_LINK_TRAIN_PAT_1;
355
356         if (IS_CHERRYVIEW(dev))
357                 DP |= DP_PIPE_SELECT_CHV(pipe);
358         else if (pipe == PIPE_B)
359                 DP |= DP_PIPEB_SELECT;
360
361         pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
362
363         /*
364          * The DPLL for the pipe must be enabled for this to work.
365          * So enable temporarily it if it's not already enabled.
366          */
367         if (!pll_enabled)
368                 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
369                                  &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
370
371         /*
372          * Similar magic as in intel_dp_enable_port().
373          * We _must_ do this port enable + disable trick
374          * to make this power seqeuencer lock onto the port.
375          * Otherwise even VDD force bit won't work.
376          */
377         I915_WRITE(intel_dp->output_reg, DP);
378         POSTING_READ(intel_dp->output_reg);
379
380         I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
381         POSTING_READ(intel_dp->output_reg);
382
383         I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
384         POSTING_READ(intel_dp->output_reg);
385
386         if (!pll_enabled)
387                 vlv_force_pll_off(dev, pipe);
388 }
389
390 static enum i915_pipe
391 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
392 {
393         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
394         struct drm_device *dev = intel_dig_port->base.base.dev;
395         struct drm_i915_private *dev_priv = dev->dev_private;
396         struct intel_encoder *encoder;
397         unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
398         enum i915_pipe pipe;
399
400         lockdep_assert_held(&dev_priv->pps_mutex);
401
402         /* We should never land here with regular DP ports */
403         WARN_ON(!is_edp(intel_dp));
404
405         if (intel_dp->pps_pipe != INVALID_PIPE)
406                 return intel_dp->pps_pipe;
407
408         /*
409          * We don't have power sequencer currently.
410          * Pick one that's not used by other ports.
411          */
412         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
413                             base.head) {
414                 struct intel_dp *tmp;
415
416                 if (encoder->type != INTEL_OUTPUT_EDP)
417                         continue;
418
419                 tmp = enc_to_intel_dp(&encoder->base);
420
421                 if (tmp->pps_pipe != INVALID_PIPE)
422                         pipes &= ~(1 << tmp->pps_pipe);
423         }
424
425         /*
426          * Didn't find one. This should not happen since there
427          * are two power sequencers and up to two eDP ports.
428          */
429         if (WARN_ON(pipes == 0))
430                 pipe = PIPE_A;
431         else
432                 pipe = ffs(pipes) - 1;
433
434         vlv_steal_power_sequencer(dev, pipe);
435         intel_dp->pps_pipe = pipe;
436
437         DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
438                       pipe_name(intel_dp->pps_pipe),
439                       port_name(intel_dig_port->port));
440
441         /* init power sequencer on this pipe and port */
442         intel_dp_init_panel_power_sequencer(dev, intel_dp);
443         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
444
445         /*
446          * Even vdd force doesn't work until we've made
447          * the power sequencer lock in on the port.
448          */
449         vlv_power_sequencer_kick(intel_dp);
450
451         return intel_dp->pps_pipe;
452 }
453
454 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
455                                enum i915_pipe pipe);
456
457 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
458                                enum i915_pipe pipe)
459 {
460         return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
461 }
462
463 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
464                                 enum i915_pipe pipe)
465 {
466         return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
467 }
468
469 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
470                          enum i915_pipe pipe)
471 {
472         return true;
473 }
474
475 static enum i915_pipe
476 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
477                      enum port port,
478                      vlv_pipe_check pipe_check)
479 {
480         enum i915_pipe pipe;
481
482         for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
483                 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
484                         PANEL_PORT_SELECT_MASK;
485
486                 if (port_sel != PANEL_PORT_SELECT_VLV(port))
487                         continue;
488
489                 if (!pipe_check(dev_priv, pipe))
490                         continue;
491
492                 return pipe;
493         }
494
495         return INVALID_PIPE;
496 }
497
498 static void
499 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
500 {
501         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
502         struct drm_device *dev = intel_dig_port->base.base.dev;
503         struct drm_i915_private *dev_priv = dev->dev_private;
504         enum port port = intel_dig_port->port;
505
506         lockdep_assert_held(&dev_priv->pps_mutex);
507
508         /* try to find a pipe with this port selected */
509         /* first pick one where the panel is on */
510         intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
511                                                   vlv_pipe_has_pp_on);
512         /* didn't find one? pick one where vdd is on */
513         if (intel_dp->pps_pipe == INVALID_PIPE)
514                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
515                                                           vlv_pipe_has_vdd_on);
516         /* didn't find one? pick one with just the correct port */
517         if (intel_dp->pps_pipe == INVALID_PIPE)
518                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
519                                                           vlv_pipe_any);
520
521         /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
522         if (intel_dp->pps_pipe == INVALID_PIPE) {
523                 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
524                               port_name(port));
525                 return;
526         }
527
528         DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
529                       port_name(port), pipe_name(intel_dp->pps_pipe));
530
531         intel_dp_init_panel_power_sequencer(dev, intel_dp);
532         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
533 }
534
535 void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
536 {
537         struct drm_device *dev = dev_priv->dev;
538         struct intel_encoder *encoder;
539
540         if (WARN_ON(!IS_VALLEYVIEW(dev)))
541                 return;
542
543         /*
544          * We can't grab pps_mutex here due to deadlock with power_domain
545          * mutex when power_domain functions are called while holding pps_mutex.
546          * That also means that in order to use pps_pipe the code needs to
547          * hold both a power domain reference and pps_mutex, and the power domain
548          * reference get/put must be done while _not_ holding pps_mutex.
549          * pps_{lock,unlock}() do these steps in the correct order, so one
550          * should use them always.
551          */
552
553         list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
554                 struct intel_dp *intel_dp;
555
556                 if (encoder->type != INTEL_OUTPUT_EDP)
557                         continue;
558
559                 intel_dp = enc_to_intel_dp(&encoder->base);
560                 intel_dp->pps_pipe = INVALID_PIPE;
561         }
562 }
563
564 static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
565 {
566         struct drm_device *dev = intel_dp_to_dev(intel_dp);
567
568         if (IS_BROXTON(dev))
569                 return BXT_PP_CONTROL(0);
570         else if (HAS_PCH_SPLIT(dev))
571                 return PCH_PP_CONTROL;
572         else
573                 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
574 }
575
576 static u32 _pp_stat_reg(struct intel_dp *intel_dp)
577 {
578         struct drm_device *dev = intel_dp_to_dev(intel_dp);
579
580         if (IS_BROXTON(dev))
581                 return BXT_PP_STATUS(0);
582         else if (HAS_PCH_SPLIT(dev))
583                 return PCH_PP_STATUS;
584         else
585                 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
586 }
587
588 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
589    This function only applicable when panel PM state is not to be tracked */
590 #if 0
591 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
592                               void *unused)
593 {
594         struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
595                                                  edp_notifier);
596         struct drm_device *dev = intel_dp_to_dev(intel_dp);
597         struct drm_i915_private *dev_priv = dev->dev_private;
598         u32 pp_div;
599         u32 pp_ctrl_reg, pp_div_reg;
600
601         if (!is_edp(intel_dp) || code != SYS_RESTART)
602                 return 0;
603
604         pps_lock(intel_dp);
605
606         if (IS_VALLEYVIEW(dev)) {
607                 enum i915_pipe pipe = vlv_power_sequencer_pipe(intel_dp);
608
609                 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
610                 pp_div_reg  = VLV_PIPE_PP_DIVISOR(pipe);
611                 pp_div = I915_READ(pp_div_reg);
612                 pp_div &= PP_REFERENCE_DIVIDER_MASK;
613
614                 /* 0x1F write to PP_DIV_REG sets max cycle delay */
615                 I915_WRITE(pp_div_reg, pp_div | 0x1F);
616                 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
617                 msleep(intel_dp->panel_power_cycle_delay);
618         }
619
620         pps_unlock(intel_dp);
621
622         return 0;
623 }
624 #endif
625
626 static bool edp_have_panel_power(struct intel_dp *intel_dp)
627 {
628         struct drm_device *dev = intel_dp_to_dev(intel_dp);
629         struct drm_i915_private *dev_priv = dev->dev_private;
630
631         lockdep_assert_held(&dev_priv->pps_mutex);
632
633         if (IS_VALLEYVIEW(dev) &&
634             intel_dp->pps_pipe == INVALID_PIPE)
635                 return false;
636
637         return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
638 }
639
640 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
641 {
642         struct drm_device *dev = intel_dp_to_dev(intel_dp);
643         struct drm_i915_private *dev_priv = dev->dev_private;
644
645         lockdep_assert_held(&dev_priv->pps_mutex);
646
647         if (IS_VALLEYVIEW(dev) &&
648             intel_dp->pps_pipe == INVALID_PIPE)
649                 return false;
650
651         return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
652 }
653
654 static void
655 intel_dp_check_edp(struct intel_dp *intel_dp)
656 {
657         struct drm_device *dev = intel_dp_to_dev(intel_dp);
658         struct drm_i915_private *dev_priv = dev->dev_private;
659
660         if (!is_edp(intel_dp))
661                 return;
662
663         if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
664                 WARN(1, "eDP powered off while attempting aux channel communication.\n");
665                 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
666                               I915_READ(_pp_stat_reg(intel_dp)),
667                               I915_READ(_pp_ctrl_reg(intel_dp)));
668         }
669 }
670
671 static uint32_t
672 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
673 {
674         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
675         struct drm_device *dev = intel_dig_port->base.base.dev;
676         struct drm_i915_private *dev_priv = dev->dev_private;
677         uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
678         uint32_t status;
679         bool done;
680
681 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
682         if (has_aux_irq)
683                 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
684                                           msecs_to_jiffies_timeout(10));
685         else
686                 done = wait_for_atomic(C, 10) == 0;
687         if (!done)
688                 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
689                           has_aux_irq);
690 #undef C
691
692         return status;
693 }
694
695 static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
696 {
697         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
698         struct drm_device *dev = intel_dig_port->base.base.dev;
699
700         /*
701          * The clock divider is based off the hrawclk, and would like to run at
702          * 2MHz.  So, take the hrawclk value and divide by 2 and use that
703          */
704         return index ? 0 : intel_hrawclk(dev) / 2;
705 }
706
707 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
708 {
709         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
710         struct drm_device *dev = intel_dig_port->base.base.dev;
711         struct drm_i915_private *dev_priv = dev->dev_private;
712
713         if (index)
714                 return 0;
715
716         if (intel_dig_port->port == PORT_A) {
717                 return DIV_ROUND_UP(dev_priv->cdclk_freq, 2000);
718
719         } else {
720                 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
721         }
722 }
723
724 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
725 {
726         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
727         struct drm_device *dev = intel_dig_port->base.base.dev;
728         struct drm_i915_private *dev_priv = dev->dev_private;
729
730         if (intel_dig_port->port == PORT_A) {
731                 if (index)
732                         return 0;
733                 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
734         } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
735                 /* Workaround for non-ULT HSW */
736                 switch (index) {
737                 case 0: return 63;
738                 case 1: return 72;
739                 default: return 0;
740                 }
741         } else  {
742                 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
743         }
744 }
745
746 static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
747 {
748         return index ? 0 : 100;
749 }
750
751 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
752 {
753         /*
754          * SKL doesn't need us to program the AUX clock divider (Hardware will
755          * derive the clock from CDCLK automatically). We still implement the
756          * get_aux_clock_divider vfunc to plug-in into the existing code.
757          */
758         return index ? 0 : 1;
759 }
760
761 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
762                                       bool has_aux_irq,
763                                       int send_bytes,
764                                       uint32_t aux_clock_divider)
765 {
766         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
767         struct drm_device *dev = intel_dig_port->base.base.dev;
768         uint32_t precharge, timeout;
769
770         if (IS_GEN6(dev))
771                 precharge = 3;
772         else
773                 precharge = 5;
774
775         if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
776                 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
777         else
778                 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
779
780         return DP_AUX_CH_CTL_SEND_BUSY |
781                DP_AUX_CH_CTL_DONE |
782                (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
783                DP_AUX_CH_CTL_TIME_OUT_ERROR |
784                timeout |
785                DP_AUX_CH_CTL_RECEIVE_ERROR |
786                (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
787                (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
788                (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
789 }
790
791 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
792                                       bool has_aux_irq,
793                                       int send_bytes,
794                                       uint32_t unused)
795 {
796         return DP_AUX_CH_CTL_SEND_BUSY |
797                DP_AUX_CH_CTL_DONE |
798                (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
799                DP_AUX_CH_CTL_TIME_OUT_ERROR |
800                DP_AUX_CH_CTL_TIME_OUT_1600us |
801                DP_AUX_CH_CTL_RECEIVE_ERROR |
802                (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
803                DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
804 }
805
806 static int
807 intel_dp_aux_ch(struct intel_dp *intel_dp,
808                 const uint8_t *send, int send_bytes,
809                 uint8_t *recv, int recv_size)
810 {
811         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
812         struct drm_device *dev = intel_dig_port->base.base.dev;
813         struct drm_i915_private *dev_priv = dev->dev_private;
814         uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
815         uint32_t ch_data = ch_ctl + 4;
816         uint32_t aux_clock_divider;
817         int i, ret, recv_bytes;
818         uint32_t status;
819         int try, clock = 0;
820         bool has_aux_irq = HAS_AUX_IRQ(dev) && !disable_aux_irq;
821         bool vdd;
822
823         pps_lock(intel_dp);
824
825         /*
826          * We will be called with VDD already enabled for dpcd/edid/oui reads.
827          * In such cases we want to leave VDD enabled and it's up to upper layers
828          * to turn it off. But for eg. i2c-dev access we need to turn it on/off
829          * ourselves.
830          */
831         vdd = edp_panel_vdd_on(intel_dp);
832
833         /* dp aux is extremely sensitive to irq latency, hence request the
834          * lowest possible wakeup latency and so prevent the cpu from going into
835          * deep sleep states.
836          */
837         pm_qos_update_request(&dev_priv->pm_qos, 0);
838
839         intel_dp_check_edp(intel_dp);
840
841         intel_aux_display_runtime_get(dev_priv);
842
843         /* Try to wait for any previous AUX channel activity */
844         for (try = 0; try < 3; try++) {
845                 status = I915_READ_NOTRACE(ch_ctl);
846                 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
847                         break;
848                 msleep(1);
849         }
850
851         if (try == 3) {
852                 static u32 last_status = -1;
853                 const u32 status = I915_READ(ch_ctl);
854
855                 if (status != last_status) {
856                         WARN(1, "dp_aux_ch not started status 0x%08x\n",
857                              status);
858                         last_status = status;
859                 }
860
861                 ret = -EBUSY;
862                 goto out;
863         }
864
865         /* Only 5 data registers! */
866         if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
867                 ret = -E2BIG;
868                 goto out;
869         }
870
871         while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
872                 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
873                                                           has_aux_irq,
874                                                           send_bytes,
875                                                           aux_clock_divider);
876
877                 /* Must try at least 3 times according to DP spec */
878                 for (try = 0; try < 5; try++) {
879                         /* Load the send data into the aux channel data registers */
880                         for (i = 0; i < send_bytes; i += 4)
881                                 I915_WRITE(ch_data + i,
882                                            intel_dp_pack_aux(send + i,
883                                                              send_bytes - i));
884
885                         /* Send the command and wait for it to complete */
886                         I915_WRITE(ch_ctl, send_ctl);
887
888                         status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
889
890                         /* Clear done status and any errors */
891                         I915_WRITE(ch_ctl,
892                                    status |
893                                    DP_AUX_CH_CTL_DONE |
894                                    DP_AUX_CH_CTL_TIME_OUT_ERROR |
895                                    DP_AUX_CH_CTL_RECEIVE_ERROR);
896
897                         if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
898                                 continue;
899
900                         /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
901                          *   400us delay required for errors and timeouts
902                          *   Timeout errors from the HW already meet this
903                          *   requirement so skip to next iteration
904                          */
905                         if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
906                                 usleep_range(400, 500);
907                                 continue;
908                         }
909                         if (status & DP_AUX_CH_CTL_DONE)
910                                 goto done;
911                 }
912         }
913
914         if ((status & DP_AUX_CH_CTL_DONE) == 0) {
915                 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
916                 ret = -EBUSY;
917                 goto out;
918         }
919
920 done:
921         /* Check for timeout or receive error.
922          * Timeouts occur when the sink is not connected
923          */
924         if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
925                 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
926                 ret = -EIO;
927                 goto out;
928         }
929
930         /* Timeouts occur when the device isn't connected, so they're
931          * "normal" -- don't fill the kernel log with these */
932         if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
933                 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
934                 ret = -ETIMEDOUT;
935                 goto out;
936         }
937
938         /* Unload any bytes sent back from the other side */
939         recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
940                       DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
941         if (recv_bytes > recv_size)
942                 recv_bytes = recv_size;
943
944         for (i = 0; i < recv_bytes; i += 4)
945                 intel_dp_unpack_aux(I915_READ(ch_data + i),
946                                     recv + i, recv_bytes - i);
947
948         ret = recv_bytes;
949 out:
950         pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
951         intel_aux_display_runtime_put(dev_priv);
952
953         if (vdd)
954                 edp_panel_vdd_off(intel_dp, false);
955
956         pps_unlock(intel_dp);
957
958         return ret;
959 }
960
961 #define BARE_ADDRESS_SIZE       3
962 #define HEADER_SIZE             (BARE_ADDRESS_SIZE + 1)
963 static ssize_t
964 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
965 {
966         struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
967         uint8_t txbuf[20], rxbuf[20];
968         size_t txsize, rxsize;
969         int ret;
970
971         txbuf[0] = (msg->request << 4) |
972                 ((msg->address >> 16) & 0xf);
973         txbuf[1] = (msg->address >> 8) & 0xff;
974         txbuf[2] = msg->address & 0xff;
975         txbuf[3] = msg->size - 1;
976
977         switch (msg->request & ~DP_AUX_I2C_MOT) {
978         case DP_AUX_NATIVE_WRITE:
979         case DP_AUX_I2C_WRITE:
980                 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
981                 rxsize = 2; /* 0 or 1 data bytes */
982
983                 if (WARN_ON(txsize > 20))
984                         return -E2BIG;
985
986                 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
987
988                 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
989                 if (ret > 0) {
990                         msg->reply = rxbuf[0] >> 4;
991
992                         if (ret > 1) {
993                                 /* Number of bytes written in a short write. */
994                                 ret = clamp_t(int, rxbuf[1], 0, msg->size);
995                         } else {
996                                 /* Return payload size. */
997                                 ret = msg->size;
998                         }
999                 }
1000                 break;
1001
1002         case DP_AUX_NATIVE_READ:
1003         case DP_AUX_I2C_READ:
1004                 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
1005                 rxsize = msg->size + 1;
1006
1007                 if (WARN_ON(rxsize > 20))
1008                         return -E2BIG;
1009
1010                 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
1011                 if (ret > 0) {
1012                         msg->reply = rxbuf[0] >> 4;
1013                         /*
1014                          * Assume happy day, and copy the data. The caller is
1015                          * expected to check msg->reply before touching it.
1016                          *
1017                          * Return payload size.
1018                          */
1019                         ret--;
1020                         memcpy(msg->buffer, rxbuf + 1, ret);
1021                 }
1022                 break;
1023
1024         default:
1025                 ret = -EINVAL;
1026                 break;
1027         }
1028
1029         return ret;
1030 }
1031
1032 static int
1033 intel_dp_i2c_aux_ch(struct device *adapter, int mode,
1034                     uint8_t write_byte, uint8_t *read_byte)
1035 {
1036         struct i2c_algo_dp_aux_data *data = device_get_softc(adapter);
1037         struct intel_dp *intel_dp = data->priv;
1038         uint16_t address = data->address;
1039         uint8_t msg[5];
1040         uint8_t reply[2];
1041         unsigned retry;
1042         int msg_bytes;
1043         int reply_bytes;
1044         int ret;
1045
1046         intel_edp_panel_vdd_on(intel_dp);
1047         intel_dp_check_edp(intel_dp);
1048         /* Set up the command byte */
1049         if (mode & MODE_I2C_READ)
1050                 msg[0] = DP_AUX_I2C_READ << 4;
1051         else
1052                 msg[0] = DP_AUX_I2C_WRITE << 4;
1053
1054         if (!(mode & MODE_I2C_STOP))
1055                 msg[0] |= DP_AUX_I2C_MOT << 4;
1056
1057         msg[1] = address >> 8;
1058         msg[2] = address;
1059
1060         switch (mode) {
1061         case MODE_I2C_WRITE:
1062                 msg[3] = 0;
1063                 msg[4] = write_byte;
1064                 msg_bytes = 5;
1065                 reply_bytes = 1;
1066                 break;
1067         case MODE_I2C_READ:
1068                 msg[3] = 0;
1069                 msg_bytes = 4;
1070                 reply_bytes = 2;
1071                 break;
1072         default:
1073                 msg_bytes = 3;
1074                 reply_bytes = 1;
1075                 break;
1076         }
1077
1078         /*
1079          * DP1.2 sections 2.7.7.1.5.6.1 and 2.7.7.1.6.6.1: A DP Source device is
1080          * required to retry at least seven times upon receiving AUX_DEFER
1081          * before giving up the AUX transaction.
1082          */
1083         for (retry = 0; retry < 7; retry++) {
1084                 ret = intel_dp_aux_ch(intel_dp,
1085                                       msg, msg_bytes,
1086                                       reply, reply_bytes);
1087                 if (ret < 0) {
1088                         DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
1089                         goto out;
1090                 }
1091
1092                 switch ((reply[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK) {
1093                 case DP_AUX_NATIVE_REPLY_ACK:
1094                         /* I2C-over-AUX Reply field is only valid
1095                          * when paired with AUX ACK.
1096                          */
1097                         break;
1098                 case DP_AUX_NATIVE_REPLY_NACK:
1099                         DRM_DEBUG_KMS("aux_ch native nack\n");
1100                         ret = -EREMOTEIO;
1101                         goto out;
1102                 case DP_AUX_NATIVE_REPLY_DEFER:
1103                         /*
1104                          * For now, just give more slack to branch devices. We
1105                          * could check the DPCD for I2C bit rate capabilities,
1106                          * and if available, adjust the interval. We could also
1107                          * be more careful with DP-to-Legacy adapters where a
1108                          * long legacy cable may force very low I2C bit rates.
1109                          */
1110                         if (intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
1111                             DP_DWN_STRM_PORT_PRESENT)
1112                                 usleep_range(500, 600);
1113                         else
1114                                 usleep_range(300, 400);
1115                         continue;
1116                 default:
1117                         DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
1118                                   reply[0]);
1119                         ret = -EREMOTEIO;
1120                         goto out;
1121                 }
1122
1123                 switch ((reply[0] >> 4) & DP_AUX_I2C_REPLY_MASK) {
1124                 case DP_AUX_I2C_REPLY_ACK:
1125                         if (mode == MODE_I2C_READ) {
1126                                 *read_byte = reply[1];
1127                         }
1128                         ret = 0;        /* reply_bytes - 1 */
1129                         goto out;
1130                 case DP_AUX_I2C_REPLY_NACK:
1131                         DRM_DEBUG_KMS("aux_i2c nack\n");
1132                         ret = -EREMOTEIO;
1133                         goto out;
1134                 case DP_AUX_I2C_REPLY_DEFER:
1135                         DRM_DEBUG_KMS("aux_i2c defer\n");
1136                         udelay(100);
1137                         break;
1138                 default:
1139                         DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
1140                         ret = -EREMOTEIO;
1141                         goto out;
1142                 }
1143         }
1144
1145         DRM_ERROR("too many retries, giving up\n");
1146         ret = -EREMOTEIO;
1147
1148 out:
1149         return ret;
1150 }
1151
1152 static void
1153 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1154 {
1155         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1156         struct drm_i915_private *dev_priv = dev->dev_private;
1157         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1158         enum port port = intel_dig_port->port;
1159         struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
1160         const char *name = NULL;
1161         uint32_t porte_aux_ctl_reg = DPA_AUX_CH_CTL;
1162         int ret;
1163
1164         /* On SKL we don't have Aux for port E so we rely on VBT to set
1165          * a proper alternate aux channel.
1166          */
1167         if (IS_SKYLAKE(dev) && port == PORT_E) {
1168                 switch (info->alternate_aux_channel) {
1169                 case DP_AUX_B:
1170                         porte_aux_ctl_reg = DPB_AUX_CH_CTL;
1171                         break;
1172                 case DP_AUX_C:
1173                         porte_aux_ctl_reg = DPC_AUX_CH_CTL;
1174                         break;
1175                 case DP_AUX_D:
1176                         porte_aux_ctl_reg = DPD_AUX_CH_CTL;
1177                         break;
1178                 case DP_AUX_A:
1179                 default:
1180                         porte_aux_ctl_reg = DPA_AUX_CH_CTL;
1181                 }
1182         }
1183
1184         switch (port) {
1185         case PORT_A:
1186                 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
1187                 name = "DPDDC-A";
1188                 break;
1189         case PORT_B:
1190                 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
1191                 name = "DPDDC-B";
1192                 break;
1193         case PORT_C:
1194                 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
1195                 name = "DPDDC-C";
1196                 break;
1197         case PORT_D:
1198                 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
1199                 name = "DPDDC-D";
1200                 break;
1201         case PORT_E:
1202                 intel_dp->aux_ch_ctl_reg = porte_aux_ctl_reg;
1203                 name = "DPDDC-E";
1204                 break;
1205         default:
1206                 BUG();
1207         }
1208
1209         /*
1210          * The AUX_CTL register is usually DP_CTL + 0x10.
1211          *
1212          * On Haswell and Broadwell though:
1213          *   - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1214          *   - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1215          *
1216          * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1217          */
1218         if (!IS_HASWELL(dev) && !IS_BROADWELL(dev) && port != PORT_E)
1219                 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
1220
1221         intel_dp->aux.name = name;
1222         intel_dp->aux.dev = dev->dev;
1223         intel_dp->aux.transfer = intel_dp_aux_transfer;
1224
1225         DRM_DEBUG_KMS("i2c_init %s\n", name);
1226         ret = iic_dp_aux_add_bus(connector->base.dev->dev, name,
1227             intel_dp_i2c_aux_ch, intel_dp, &intel_dp->dp_iic_bus,
1228             &intel_dp->aux.ddc);
1229         WARN(ret, "intel_dp_i2c_init failed with error %d for port %c\n",
1230              ret, port_name(port));
1231
1232 }
1233
1234 static void
1235 intel_dp_connector_unregister(struct intel_connector *intel_connector)
1236 {
1237         intel_connector_unregister(intel_connector);
1238 }
1239
1240 #if 0
1241 static int
1242 intel_dp_i2c_init(struct intel_dp *intel_dp,
1243                   struct intel_connector *intel_connector, const char *name)
1244 {
1245         int     ret;
1246
1247         DRM_DEBUG_KMS("i2c_init %s\n", name);
1248 #if 0
1249         memset(&intel_dp->adapter, '\0', sizeof(intel_dp->adapter));
1250         intel_dp->adapter.owner = THIS_MODULE;
1251         intel_dp->adapter.class = I2C_CLASS_DDC;
1252         strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
1253         intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
1254         intel_dp->adapter.algo_data = &intel_dp->algo;
1255         intel_dp->adapter.dev.parent = intel_connector->base.dev->dev;
1256
1257         ret = i2c_dp_aux_add_bus(&intel_dp->adapter);
1258         if (ret < 0)
1259                 return ret;
1260
1261         ret = sysfs_create_link(&intel_connector->base.kdev->kobj,
1262                                 &intel_dp->adapter.dev.kobj,
1263                                 intel_dp->adapter.dev.kobj.name);
1264 #endif
1265         ret = iic_dp_aux_add_bus(intel_connector->base.dev->dev, name,
1266             intel_dp_i2c_aux_ch, intel_dp, &intel_dp->dp_iic_bus,
1267             &intel_dp->adapter);
1268
1269         return ret;
1270 }
1271 #endif
1272
1273 static void
1274 skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
1275 {
1276         u32 ctrl1;
1277
1278         memset(&pipe_config->dpll_hw_state, 0,
1279                sizeof(pipe_config->dpll_hw_state));
1280
1281         pipe_config->ddi_pll_sel = SKL_DPLL0;
1282         pipe_config->dpll_hw_state.cfgcr1 = 0;
1283         pipe_config->dpll_hw_state.cfgcr2 = 0;
1284
1285         ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
1286         switch (pipe_config->port_clock / 2) {
1287         case 81000:
1288                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
1289                                               SKL_DPLL0);
1290                 break;
1291         case 135000:
1292                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
1293                                               SKL_DPLL0);
1294                 break;
1295         case 270000:
1296                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
1297                                               SKL_DPLL0);
1298                 break;
1299         case 162000:
1300                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
1301                                               SKL_DPLL0);
1302                 break;
1303         /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1304         results in CDCLK change. Need to handle the change of CDCLK by
1305         disabling pipes and re-enabling them */
1306         case 108000:
1307                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
1308                                               SKL_DPLL0);
1309                 break;
1310         case 216000:
1311                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
1312                                               SKL_DPLL0);
1313                 break;
1314
1315         }
1316         pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1317 }
1318
1319 void
1320 hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
1321 {
1322         memset(&pipe_config->dpll_hw_state, 0,
1323                sizeof(pipe_config->dpll_hw_state));
1324
1325         switch (pipe_config->port_clock / 2) {
1326         case 81000:
1327                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1328                 break;
1329         case 135000:
1330                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1331                 break;
1332         case 270000:
1333                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1334                 break;
1335         }
1336 }
1337
1338 static int
1339 intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1340 {
1341         if (intel_dp->num_sink_rates) {
1342                 *sink_rates = intel_dp->sink_rates;
1343                 return intel_dp->num_sink_rates;
1344         }
1345
1346         *sink_rates = default_rates;
1347
1348         return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1349 }
1350
1351 static bool intel_dp_source_supports_hbr2(struct drm_device *dev)
1352 {
1353         /* WaDisableHBR2:skl */
1354         if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1355                 return false;
1356
1357         if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1358             (INTEL_INFO(dev)->gen >= 9))
1359                 return true;
1360         else
1361                 return false;
1362 }
1363
1364 static int
1365 intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
1366 {
1367         int size;
1368
1369         if (IS_BROXTON(dev)) {
1370                 *source_rates = bxt_rates;
1371                 size = ARRAY_SIZE(bxt_rates);
1372         } else if (IS_SKYLAKE(dev)) {
1373                 *source_rates = skl_rates;
1374                 size = ARRAY_SIZE(skl_rates);
1375         } else {
1376                 *source_rates = default_rates;
1377                 size = ARRAY_SIZE(default_rates);
1378         }
1379
1380         /* This depends on the fact that 5.4 is last value in the array */
1381         if (!intel_dp_source_supports_hbr2(dev))
1382                 size--;
1383
1384         return size;
1385 }
1386
1387 static void
1388 intel_dp_set_clock(struct intel_encoder *encoder,
1389                    struct intel_crtc_state *pipe_config)
1390 {
1391         struct drm_device *dev = encoder->base.dev;
1392         const struct dp_link_dpll *divisor = NULL;
1393         int i, count = 0;
1394
1395         if (IS_G4X(dev)) {
1396                 divisor = gen4_dpll;
1397                 count = ARRAY_SIZE(gen4_dpll);
1398         } else if (HAS_PCH_SPLIT(dev)) {
1399                 divisor = pch_dpll;
1400                 count = ARRAY_SIZE(pch_dpll);
1401         } else if (IS_CHERRYVIEW(dev)) {
1402                 divisor = chv_dpll;
1403                 count = ARRAY_SIZE(chv_dpll);
1404         } else if (IS_VALLEYVIEW(dev)) {
1405                 divisor = vlv_dpll;
1406                 count = ARRAY_SIZE(vlv_dpll);
1407         }
1408
1409         if (divisor && count) {
1410                 for (i = 0; i < count; i++) {
1411                         if (pipe_config->port_clock == divisor[i].clock) {
1412                                 pipe_config->dpll = divisor[i].dpll;
1413                                 pipe_config->clock_set = true;
1414                                 break;
1415                         }
1416                 }
1417         }
1418 }
1419
1420 static int intersect_rates(const int *source_rates, int source_len,
1421                            const int *sink_rates, int sink_len,
1422                            int *common_rates)
1423 {
1424         int i = 0, j = 0, k = 0;
1425
1426         while (i < source_len && j < sink_len) {
1427                 if (source_rates[i] == sink_rates[j]) {
1428                         if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1429                                 return k;
1430                         common_rates[k] = source_rates[i];
1431                         ++k;
1432                         ++i;
1433                         ++j;
1434                 } else if (source_rates[i] < sink_rates[j]) {
1435                         ++i;
1436                 } else {
1437                         ++j;
1438                 }
1439         }
1440         return k;
1441 }
1442
1443 static int intel_dp_common_rates(struct intel_dp *intel_dp,
1444                                  int *common_rates)
1445 {
1446         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1447         const int *source_rates, *sink_rates;
1448         int source_len, sink_len;
1449
1450         sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1451         source_len = intel_dp_source_rates(dev, &source_rates);
1452
1453         return intersect_rates(source_rates, source_len,
1454                                sink_rates, sink_len,
1455                                common_rates);
1456 }
1457
1458 static void snprintf_int_array(char *str, size_t len,
1459                                const int *array, int nelem)
1460 {
1461         int i;
1462
1463         str[0] = '\0';
1464
1465         for (i = 0; i < nelem; i++) {
1466                 int r = ksnprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1467                 if (r >= len)
1468                         return;
1469                 str += r;
1470                 len -= r;
1471         }
1472 }
1473
1474 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1475 {
1476         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1477         const int *source_rates, *sink_rates;
1478         int source_len, sink_len, common_len;
1479         int common_rates[DP_MAX_SUPPORTED_RATES];
1480         char str[128]; /* FIXME: too big for stack? */
1481
1482         if ((drm_debug & DRM_UT_KMS) == 0)
1483                 return;
1484
1485         source_len = intel_dp_source_rates(dev, &source_rates);
1486         snprintf_int_array(str, sizeof(str), source_rates, source_len);
1487         DRM_DEBUG_KMS("source rates: %s\n", str);
1488
1489         sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1490         snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1491         DRM_DEBUG_KMS("sink rates: %s\n", str);
1492
1493         common_len = intel_dp_common_rates(intel_dp, common_rates);
1494         snprintf_int_array(str, sizeof(str), common_rates, common_len);
1495         DRM_DEBUG_KMS("common rates: %s\n", str);
1496 }
1497
1498 static int rate_to_index(int find, const int *rates)
1499 {
1500         int i = 0;
1501
1502         for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1503                 if (find == rates[i])
1504                         break;
1505
1506         return i;
1507 }
1508
1509 int
1510 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1511 {
1512         int rates[DP_MAX_SUPPORTED_RATES] = {};
1513         int len;
1514
1515         len = intel_dp_common_rates(intel_dp, rates);
1516         if (WARN_ON(len <= 0))
1517                 return 162000;
1518
1519         return rates[rate_to_index(0, rates) - 1];
1520 }
1521
1522 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1523 {
1524         return rate_to_index(rate, intel_dp->sink_rates);
1525 }
1526
1527 bool
1528 intel_dp_compute_config(struct intel_encoder *encoder,
1529                         struct intel_crtc_state *pipe_config)
1530 {
1531         struct drm_device *dev = encoder->base.dev;
1532         struct drm_i915_private *dev_priv = dev->dev_private;
1533         struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1534         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1535         enum port port = dp_to_dig_port(intel_dp)->port;
1536         struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1537         struct intel_connector *intel_connector = intel_dp->attached_connector;
1538         int lane_count, clock;
1539         int min_lane_count = 1;
1540         int max_lane_count = intel_dp_max_lane_count(intel_dp);
1541         /* Conveniently, the link BW constants become indices with a shift...*/
1542         int min_clock = 0;
1543         int max_clock;
1544         int bpp, mode_rate;
1545         int link_avail, link_clock;
1546         int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1547         int common_len;
1548
1549         common_len = intel_dp_common_rates(intel_dp, common_rates);
1550
1551         /* No common link rates between source and sink */
1552         WARN_ON(common_len <= 0);
1553
1554         max_clock = common_len - 1;
1555
1556         if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1557                 pipe_config->has_pch_encoder = true;
1558
1559         pipe_config->has_dp_encoder = true;
1560         pipe_config->has_drrs = false;
1561         pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
1562
1563         if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1564                 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1565                                        adjusted_mode);
1566
1567                 if (INTEL_INFO(dev)->gen >= 9) {
1568                         int ret;
1569                         ret = skl_update_scaler_crtc(pipe_config);
1570                         if (ret)
1571                                 return ret;
1572                 }
1573
1574                 if (!HAS_PCH_SPLIT(dev))
1575                         intel_gmch_panel_fitting(intel_crtc, pipe_config,
1576                                                  intel_connector->panel.fitting_mode);
1577                 else
1578                         intel_pch_panel_fitting(intel_crtc, pipe_config,
1579                                                 intel_connector->panel.fitting_mode);
1580         }
1581
1582         if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1583                 return false;
1584
1585         DRM_DEBUG_KMS("DP link computation with max lane count %i "
1586                       "max bw %d pixel clock %iKHz\n",
1587                       max_lane_count, common_rates[max_clock],
1588                       adjusted_mode->crtc_clock);
1589
1590         /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1591          * bpc in between. */
1592         bpp = pipe_config->pipe_bpp;
1593         if (is_edp(intel_dp)) {
1594
1595                 /* Get bpp from vbt only for panels that dont have bpp in edid */
1596                 if (intel_connector->base.display_info.bpc == 0 &&
1597                         (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
1598                         DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1599                                       dev_priv->vbt.edp_bpp);
1600                         bpp = dev_priv->vbt.edp_bpp;
1601                 }
1602
1603                 /*
1604                  * Use the maximum clock and number of lanes the eDP panel
1605                  * advertizes being capable of. The panels are generally
1606                  * designed to support only a single clock and lane
1607                  * configuration, and typically these values correspond to the
1608                  * native resolution of the panel.
1609                  */
1610                 min_lane_count = max_lane_count;
1611                 min_clock = max_clock;
1612         }
1613
1614         for (; bpp >= 6*3; bpp -= 2*3) {
1615                 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1616                                                    bpp);
1617
1618                 for (clock = min_clock; clock <= max_clock; clock++) {
1619                         for (lane_count = min_lane_count;
1620                                 lane_count <= max_lane_count;
1621                                 lane_count <<= 1) {
1622
1623                                 link_clock = common_rates[clock];
1624                                 link_avail = intel_dp_max_data_rate(link_clock,
1625                                                                     lane_count);
1626
1627                                 if (mode_rate <= link_avail) {
1628                                         goto found;
1629                                 }
1630                         }
1631                 }
1632         }
1633
1634         return false;
1635
1636 found:
1637         if (intel_dp->color_range_auto) {
1638                 /*
1639                  * See:
1640                  * CEA-861-E - 5.1 Default Encoding Parameters
1641                  * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1642                  */
1643                 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
1644                         intel_dp->color_range = DP_COLOR_RANGE_16_235;
1645                 else
1646                         intel_dp->color_range = 0;
1647         }
1648
1649         if (intel_dp->color_range)
1650                 pipe_config->limited_color_range = true;
1651
1652         intel_dp->lane_count = lane_count;
1653
1654         if (intel_dp->num_sink_rates) {
1655                 intel_dp->link_bw = 0;
1656                 intel_dp->rate_select =
1657                         intel_dp_rate_select(intel_dp, common_rates[clock]);
1658         } else {
1659                 intel_dp->link_bw =
1660                         drm_dp_link_rate_to_bw_code(common_rates[clock]);
1661                 intel_dp->rate_select = 0;
1662         }
1663
1664         pipe_config->pipe_bpp = bpp;
1665         pipe_config->port_clock = common_rates[clock];
1666
1667         DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
1668                       intel_dp->link_bw, intel_dp->lane_count,
1669                       pipe_config->port_clock, bpp);
1670         DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1671                       mode_rate, link_avail);
1672
1673         intel_link_compute_m_n(bpp, lane_count,
1674                                adjusted_mode->crtc_clock,
1675                                pipe_config->port_clock,
1676                                &pipe_config->dp_m_n);
1677
1678         if (intel_connector->panel.downclock_mode != NULL &&
1679                 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1680                         pipe_config->has_drrs = true;
1681                         intel_link_compute_m_n(bpp, lane_count,
1682                                 intel_connector->panel.downclock_mode->clock,
1683                                 pipe_config->port_clock,
1684                                 &pipe_config->dp_m2_n2);
1685         }
1686
1687         if (IS_SKYLAKE(dev) && is_edp(intel_dp))
1688                 skl_edp_set_pll_config(pipe_config);
1689         else if (IS_BROXTON(dev))
1690                 /* handled in ddi */;
1691         else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1692                 hsw_dp_set_ddi_pll_sel(pipe_config);
1693         else
1694                 intel_dp_set_clock(encoder, pipe_config);
1695
1696         return true;
1697 }
1698
1699 static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
1700 {
1701         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1702         struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1703         struct drm_device *dev = crtc->base.dev;
1704         struct drm_i915_private *dev_priv = dev->dev_private;
1705         u32 dpa_ctl;
1706
1707         DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1708                       crtc->config->port_clock);
1709         dpa_ctl = I915_READ(DP_A);
1710         dpa_ctl &= ~DP_PLL_FREQ_MASK;
1711
1712         if (crtc->config->port_clock == 162000) {
1713                 /* For a long time we've carried around a ILK-DevA w/a for the
1714                  * 160MHz clock. If we're really unlucky, it's still required.
1715                  */
1716                 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
1717                 dpa_ctl |= DP_PLL_FREQ_160MHZ;
1718                 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
1719         } else {
1720                 dpa_ctl |= DP_PLL_FREQ_270MHZ;
1721                 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
1722         }
1723
1724         I915_WRITE(DP_A, dpa_ctl);
1725
1726         POSTING_READ(DP_A);
1727         udelay(500);
1728 }
1729
1730 static void intel_dp_prepare(struct intel_encoder *encoder)
1731 {
1732         struct drm_device *dev = encoder->base.dev;
1733         struct drm_i915_private *dev_priv = dev->dev_private;
1734         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1735         enum port port = dp_to_dig_port(intel_dp)->port;
1736         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1737         struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1738
1739         /*
1740          * There are four kinds of DP registers:
1741          *
1742          *      IBX PCH
1743          *      SNB CPU
1744          *      IVB CPU
1745          *      CPT PCH
1746          *
1747          * IBX PCH and CPU are the same for almost everything,
1748          * except that the CPU DP PLL is configured in this
1749          * register
1750          *
1751          * CPT PCH is quite different, having many bits moved
1752          * to the TRANS_DP_CTL register instead. That
1753          * configuration happens (oddly) in ironlake_pch_enable
1754          */
1755
1756         /* Preserve the BIOS-computed detected bit. This is
1757          * supposed to be read-only.
1758          */
1759         intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1760
1761         /* Handle DP bits in common between all three register formats */
1762         intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1763         intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
1764
1765         if (crtc->config->has_audio)
1766                 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
1767
1768         /* Split out the IBX/CPU vs CPT settings */
1769
1770         if (IS_GEN7(dev) && port == PORT_A) {
1771                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1772                         intel_dp->DP |= DP_SYNC_HS_HIGH;
1773                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1774                         intel_dp->DP |= DP_SYNC_VS_HIGH;
1775                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1776
1777                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1778                         intel_dp->DP |= DP_ENHANCED_FRAMING;
1779
1780                 intel_dp->DP |= crtc->pipe << 29;
1781         } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
1782                 u32 trans_dp;
1783
1784                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1785
1786                 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1787                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1788                         trans_dp |= TRANS_DP_ENH_FRAMING;
1789                 else
1790                         trans_dp &= ~TRANS_DP_ENH_FRAMING;
1791                 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
1792         } else {
1793                 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
1794                         intel_dp->DP |= intel_dp->color_range;
1795
1796                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1797                         intel_dp->DP |= DP_SYNC_HS_HIGH;
1798                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1799                         intel_dp->DP |= DP_SYNC_VS_HIGH;
1800                 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1801
1802                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1803                         intel_dp->DP |= DP_ENHANCED_FRAMING;
1804
1805                 if (IS_CHERRYVIEW(dev))
1806                         intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1807                 else if (crtc->pipe == PIPE_B)
1808                         intel_dp->DP |= DP_PIPEB_SELECT;
1809         }
1810 }
1811
1812 #define IDLE_ON_MASK            (PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
1813 #define IDLE_ON_VALUE           (PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
1814
1815 #define IDLE_OFF_MASK           (PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
1816 #define IDLE_OFF_VALUE          (0     | PP_SEQUENCE_NONE | 0                     | 0)
1817
1818 #define IDLE_CYCLE_MASK         (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1819 #define IDLE_CYCLE_VALUE        (0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
1820
1821 static void wait_panel_status(struct intel_dp *intel_dp,
1822                                        u32 mask,
1823                                        u32 value)
1824 {
1825         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1826         struct drm_i915_private *dev_priv = dev->dev_private;
1827         u32 pp_stat_reg, pp_ctrl_reg;
1828
1829         lockdep_assert_held(&dev_priv->pps_mutex);
1830
1831         pp_stat_reg = _pp_stat_reg(intel_dp);
1832         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1833
1834         DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1835                         mask, value,
1836                         I915_READ(pp_stat_reg),
1837                         I915_READ(pp_ctrl_reg));
1838
1839         if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1840                 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1841                                 I915_READ(pp_stat_reg),
1842                                 I915_READ(pp_ctrl_reg));
1843         }
1844
1845         DRM_DEBUG_KMS("Wait complete\n");
1846 }
1847
1848 static void wait_panel_on(struct intel_dp *intel_dp)
1849 {
1850         DRM_DEBUG_KMS("Wait for panel power on\n");
1851         wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1852 }
1853
1854 static void wait_panel_off(struct intel_dp *intel_dp)
1855 {
1856         DRM_DEBUG_KMS("Wait for panel power off time\n");
1857         wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1858 }
1859
1860 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1861 {
1862         DRM_DEBUG_KMS("Wait for panel power cycle\n");
1863
1864         /* When we disable the VDD override bit last we have to do the manual
1865          * wait. */
1866         wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1867                                        intel_dp->panel_power_cycle_delay);
1868
1869         wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1870 }
1871
1872 static void wait_backlight_on(struct intel_dp *intel_dp)
1873 {
1874         wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1875                                        intel_dp->backlight_on_delay);
1876 }
1877
1878 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1879 {
1880         wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1881                                        intel_dp->backlight_off_delay);
1882 }
1883
1884 /* Read the current pp_control value, unlocking the register if it
1885  * is locked
1886  */
1887
1888 static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1889 {
1890         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1891         struct drm_i915_private *dev_priv = dev->dev_private;
1892         u32 control;
1893
1894         lockdep_assert_held(&dev_priv->pps_mutex);
1895
1896         control = I915_READ(_pp_ctrl_reg(intel_dp));
1897         if (!IS_BROXTON(dev)) {
1898                 control &= ~PANEL_UNLOCK_MASK;
1899                 control |= PANEL_UNLOCK_REGS;
1900         }
1901         return control;
1902 }
1903
1904 /*
1905  * Must be paired with edp_panel_vdd_off().
1906  * Must hold pps_mutex around the whole on/off sequence.
1907  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1908  */
1909 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1910 {
1911         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1912         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1913         struct intel_encoder *intel_encoder = &intel_dig_port->base;
1914         struct drm_i915_private *dev_priv = dev->dev_private;
1915         enum intel_display_power_domain power_domain;
1916         u32 pp;
1917         u32 pp_stat_reg, pp_ctrl_reg;
1918         bool need_to_disable = !intel_dp->want_panel_vdd;
1919
1920         lockdep_assert_held(&dev_priv->pps_mutex);
1921
1922         if (!is_edp(intel_dp))
1923                 return false;
1924
1925         cancel_delayed_work(&intel_dp->panel_vdd_work);
1926         intel_dp->want_panel_vdd = true;
1927
1928         if (edp_have_panel_vdd(intel_dp))
1929                 return need_to_disable;
1930
1931         power_domain = intel_display_port_power_domain(intel_encoder);
1932         intel_display_power_get(dev_priv, power_domain);
1933
1934         DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1935                       port_name(intel_dig_port->port));
1936
1937         if (!edp_have_panel_power(intel_dp))
1938                 wait_panel_power_cycle(intel_dp);
1939
1940         pp = ironlake_get_pp_control(intel_dp);
1941         pp |= EDP_FORCE_VDD;
1942
1943         pp_stat_reg = _pp_stat_reg(intel_dp);
1944         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1945
1946         I915_WRITE(pp_ctrl_reg, pp);
1947         POSTING_READ(pp_ctrl_reg);
1948         DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1949                         I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1950         /*
1951          * If the panel wasn't on, delay before accessing aux channel
1952          */
1953         if (!edp_have_panel_power(intel_dp)) {
1954                 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1955                               port_name(intel_dig_port->port));
1956                 msleep(intel_dp->panel_power_up_delay);
1957         }
1958
1959         return need_to_disable;
1960 }
1961
1962 /*
1963  * Must be paired with intel_edp_panel_vdd_off() or
1964  * intel_edp_panel_off().
1965  * Nested calls to these functions are not allowed since
1966  * we drop the lock. Caller must use some higher level
1967  * locking to prevent nested calls from other threads.
1968  */
1969 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1970 {
1971         bool vdd;
1972
1973         if (!is_edp(intel_dp))
1974                 return;
1975
1976         pps_lock(intel_dp);
1977         vdd = edp_panel_vdd_on(intel_dp);
1978         pps_unlock(intel_dp);
1979
1980         I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
1981              port_name(dp_to_dig_port(intel_dp)->port));
1982 }
1983
1984 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1985 {
1986         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1987         struct drm_i915_private *dev_priv = dev->dev_private;
1988         struct intel_digital_port *intel_dig_port =
1989                 dp_to_dig_port(intel_dp);
1990         struct intel_encoder *intel_encoder = &intel_dig_port->base;
1991         enum intel_display_power_domain power_domain;
1992         u32 pp;
1993         u32 pp_stat_reg, pp_ctrl_reg;
1994
1995         lockdep_assert_held(&dev_priv->pps_mutex);
1996
1997         WARN_ON(intel_dp->want_panel_vdd);
1998
1999         if (!edp_have_panel_vdd(intel_dp))
2000                 return;
2001
2002         DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
2003                       port_name(intel_dig_port->port));
2004
2005         pp = ironlake_get_pp_control(intel_dp);
2006         pp &= ~EDP_FORCE_VDD;
2007
2008         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2009         pp_stat_reg = _pp_stat_reg(intel_dp);
2010
2011         I915_WRITE(pp_ctrl_reg, pp);
2012         POSTING_READ(pp_ctrl_reg);
2013
2014         /* Make sure sequencer is idle before allowing subsequent activity */
2015         DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
2016         I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
2017
2018         if ((pp & POWER_TARGET_ON) == 0)
2019                 intel_dp->last_power_cycle = jiffies;
2020
2021         power_domain = intel_display_port_power_domain(intel_encoder);
2022         intel_display_power_put(dev_priv, power_domain);
2023 }
2024
2025 static void edp_panel_vdd_work(struct work_struct *__work)
2026 {
2027         struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
2028                                                  struct intel_dp, panel_vdd_work);
2029
2030         pps_lock(intel_dp);
2031         if (!intel_dp->want_panel_vdd)
2032                 edp_panel_vdd_off_sync(intel_dp);
2033         pps_unlock(intel_dp);
2034 }
2035
2036 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
2037 {
2038         unsigned long delay;
2039
2040         /*
2041          * Queue the timer to fire a long time from now (relative to the power
2042          * down delay) to keep the panel power up across a sequence of
2043          * operations.
2044          */
2045         delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
2046         schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
2047 }
2048
2049 /*
2050  * Must be paired with edp_panel_vdd_on().
2051  * Must hold pps_mutex around the whole on/off sequence.
2052  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2053  */
2054 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
2055 {
2056         struct drm_i915_private *dev_priv =
2057                 intel_dp_to_dev(intel_dp)->dev_private;
2058
2059         lockdep_assert_held(&dev_priv->pps_mutex);
2060
2061         if (!is_edp(intel_dp))
2062                 return;
2063
2064         I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
2065              port_name(dp_to_dig_port(intel_dp)->port));
2066
2067         intel_dp->want_panel_vdd = false;
2068
2069         if (sync)
2070                 edp_panel_vdd_off_sync(intel_dp);
2071         else
2072                 edp_panel_vdd_schedule_off(intel_dp);
2073 }
2074
2075 static void edp_panel_on(struct intel_dp *intel_dp)
2076 {
2077         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2078         struct drm_i915_private *dev_priv = dev->dev_private;
2079         u32 pp;
2080         u32 pp_ctrl_reg;
2081
2082         lockdep_assert_held(&dev_priv->pps_mutex);
2083
2084         if (!is_edp(intel_dp))
2085                 return;
2086
2087         DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
2088                       port_name(dp_to_dig_port(intel_dp)->port));
2089
2090         if (WARN(edp_have_panel_power(intel_dp),
2091                  "eDP port %c panel power already on\n",
2092                  port_name(dp_to_dig_port(intel_dp)->port)))
2093                 return;
2094
2095         wait_panel_power_cycle(intel_dp);
2096
2097         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2098         pp = ironlake_get_pp_control(intel_dp);
2099         if (IS_GEN5(dev)) {
2100                 /* ILK workaround: disable reset around power sequence */
2101                 pp &= ~PANEL_POWER_RESET;
2102                 I915_WRITE(pp_ctrl_reg, pp);
2103                 POSTING_READ(pp_ctrl_reg);
2104         }
2105
2106         pp |= POWER_TARGET_ON;
2107         if (!IS_GEN5(dev))
2108                 pp |= PANEL_POWER_RESET;
2109
2110         I915_WRITE(pp_ctrl_reg, pp);
2111         POSTING_READ(pp_ctrl_reg);
2112
2113         wait_panel_on(intel_dp);
2114         intel_dp->last_power_on = jiffies;
2115
2116         if (IS_GEN5(dev)) {
2117                 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
2118                 I915_WRITE(pp_ctrl_reg, pp);
2119                 POSTING_READ(pp_ctrl_reg);
2120         }
2121 }
2122
2123 void intel_edp_panel_on(struct intel_dp *intel_dp)
2124 {
2125         if (!is_edp(intel_dp))
2126                 return;
2127
2128         pps_lock(intel_dp);
2129         edp_panel_on(intel_dp);
2130         pps_unlock(intel_dp);
2131 }
2132
2133
2134 static void edp_panel_off(struct intel_dp *intel_dp)
2135 {
2136         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2137         struct intel_encoder *intel_encoder = &intel_dig_port->base;
2138         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2139         struct drm_i915_private *dev_priv = dev->dev_private;
2140         enum intel_display_power_domain power_domain;
2141         u32 pp;
2142         u32 pp_ctrl_reg;
2143
2144         lockdep_assert_held(&dev_priv->pps_mutex);
2145
2146         if (!is_edp(intel_dp))
2147                 return;
2148
2149         DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2150                       port_name(dp_to_dig_port(intel_dp)->port));
2151
2152         WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2153              port_name(dp_to_dig_port(intel_dp)->port));
2154
2155         pp = ironlake_get_pp_control(intel_dp);
2156         /* We need to switch off panel power _and_ force vdd, for otherwise some
2157          * panels get very unhappy and cease to work. */
2158         pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2159                 EDP_BLC_ENABLE);
2160
2161         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2162
2163         intel_dp->want_panel_vdd = false;
2164
2165         I915_WRITE(pp_ctrl_reg, pp);
2166         POSTING_READ(pp_ctrl_reg);
2167
2168         intel_dp->last_power_cycle = jiffies;
2169         wait_panel_off(intel_dp);
2170
2171         /* We got a reference when we enabled the VDD. */
2172         power_domain = intel_display_port_power_domain(intel_encoder);
2173         intel_display_power_put(dev_priv, power_domain);
2174 }
2175
2176 void intel_edp_panel_off(struct intel_dp *intel_dp)
2177 {
2178         if (!is_edp(intel_dp))
2179                 return;
2180
2181         pps_lock(intel_dp);
2182         edp_panel_off(intel_dp);
2183         pps_unlock(intel_dp);
2184 }
2185
2186 /* Enable backlight in the panel power control. */
2187 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2188 {
2189         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2190         struct drm_device *dev = intel_dig_port->base.base.dev;
2191         struct drm_i915_private *dev_priv = dev->dev_private;
2192         u32 pp;
2193         u32 pp_ctrl_reg;
2194
2195         /*
2196          * If we enable the backlight right away following a panel power
2197          * on, we may see slight flicker as the panel syncs with the eDP
2198          * link.  So delay a bit to make sure the image is solid before
2199          * allowing it to appear.
2200          */
2201         wait_backlight_on(intel_dp);
2202
2203         pps_lock(intel_dp);
2204
2205         pp = ironlake_get_pp_control(intel_dp);
2206         pp |= EDP_BLC_ENABLE;
2207
2208         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2209
2210         I915_WRITE(pp_ctrl_reg, pp);
2211         POSTING_READ(pp_ctrl_reg);
2212
2213         pps_unlock(intel_dp);
2214 }
2215
2216 /* Enable backlight PWM and backlight PP control. */
2217 void intel_edp_backlight_on(struct intel_dp *intel_dp)
2218 {
2219         if (!is_edp(intel_dp))
2220                 return;
2221
2222         DRM_DEBUG_KMS("\n");
2223
2224         intel_panel_enable_backlight(intel_dp->attached_connector);
2225         _intel_edp_backlight_on(intel_dp);
2226 }
2227
2228 /* Disable backlight in the panel power control. */
2229 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2230 {
2231         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2232         struct drm_i915_private *dev_priv = dev->dev_private;
2233         u32 pp;
2234         u32 pp_ctrl_reg;
2235
2236         if (!is_edp(intel_dp))
2237                 return;
2238
2239         pps_lock(intel_dp);
2240
2241         pp = ironlake_get_pp_control(intel_dp);
2242         pp &= ~EDP_BLC_ENABLE;
2243
2244         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2245
2246         I915_WRITE(pp_ctrl_reg, pp);
2247         POSTING_READ(pp_ctrl_reg);
2248
2249         pps_unlock(intel_dp);
2250
2251         intel_dp->last_backlight_off = jiffies;
2252         edp_wait_backlight_off(intel_dp);
2253 }
2254
2255 /* Disable backlight PP control and backlight PWM. */
2256 void intel_edp_backlight_off(struct intel_dp *intel_dp)
2257 {
2258         if (!is_edp(intel_dp))
2259                 return;
2260
2261         DRM_DEBUG_KMS("\n");
2262
2263         _intel_edp_backlight_off(intel_dp);
2264         intel_panel_disable_backlight(intel_dp->attached_connector);
2265 }
2266
2267 /*
2268  * Hook for controlling the panel power control backlight through the bl_power
2269  * sysfs attribute. Take care to handle multiple calls.
2270  */
2271 static void intel_edp_backlight_power(struct intel_connector *connector,
2272                                       bool enable)
2273 {
2274         struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2275         bool is_enabled;
2276
2277         pps_lock(intel_dp);
2278         is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2279         pps_unlock(intel_dp);
2280
2281         if (is_enabled == enable)
2282                 return;
2283
2284         DRM_DEBUG_KMS("panel power control backlight %s\n",
2285                       enable ? "enable" : "disable");
2286
2287         if (enable)
2288                 _intel_edp_backlight_on(intel_dp);
2289         else
2290                 _intel_edp_backlight_off(intel_dp);
2291 }
2292
2293 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2294 {
2295         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2296         struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2297         struct drm_device *dev = crtc->dev;
2298         struct drm_i915_private *dev_priv = dev->dev_private;
2299         u32 dpa_ctl;
2300
2301         assert_pipe_disabled(dev_priv,
2302                              to_intel_crtc(crtc)->pipe);
2303
2304         DRM_DEBUG_KMS("\n");
2305         dpa_ctl = I915_READ(DP_A);
2306         WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2307         WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2308
2309         /* We don't adjust intel_dp->DP while tearing down the link, to
2310          * facilitate link retraining (e.g. after hotplug). Hence clear all
2311          * enable bits here to ensure that we don't enable too much. */
2312         intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2313         intel_dp->DP |= DP_PLL_ENABLE;
2314         I915_WRITE(DP_A, intel_dp->DP);
2315         POSTING_READ(DP_A);
2316         udelay(200);
2317 }
2318
2319 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2320 {
2321         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2322         struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2323         struct drm_device *dev = crtc->dev;
2324         struct drm_i915_private *dev_priv = dev->dev_private;
2325         u32 dpa_ctl;
2326
2327         assert_pipe_disabled(dev_priv,
2328                              to_intel_crtc(crtc)->pipe);
2329
2330         dpa_ctl = I915_READ(DP_A);
2331         WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2332              "dp pll off, should be on\n");
2333         WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2334
2335         /* We can't rely on the value tracked for the DP register in
2336          * intel_dp->DP because link_down must not change that (otherwise link
2337          * re-training will fail. */
2338         dpa_ctl &= ~DP_PLL_ENABLE;
2339         I915_WRITE(DP_A, dpa_ctl);
2340         POSTING_READ(DP_A);
2341         udelay(200);
2342 }
2343
2344 /* If the sink supports it, try to set the power state appropriately */
2345 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2346 {
2347         int ret, i;
2348
2349         /* Should have a valid DPCD by this point */
2350         if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2351                 return;
2352
2353         if (mode != DRM_MODE_DPMS_ON) {
2354                 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2355                                          DP_SET_POWER_D3);
2356         } else {
2357                 /*
2358                  * When turning on, we need to retry for 1ms to give the sink
2359                  * time to wake up.
2360                  */
2361                 for (i = 0; i < 3; i++) {
2362                         ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2363                                                  DP_SET_POWER_D0);
2364                         if (ret == 1)
2365                                 break;
2366                         msleep(1);
2367                 }
2368         }
2369
2370         if (ret != 1)
2371                 DRM_DEBUG_KMS("failed to %s sink power state\n",
2372                               mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2373 }
2374
2375 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2376                                   enum i915_pipe *pipe)
2377 {
2378         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2379         enum port port = dp_to_dig_port(intel_dp)->port;
2380         struct drm_device *dev = encoder->base.dev;
2381         struct drm_i915_private *dev_priv = dev->dev_private;
2382         enum intel_display_power_domain power_domain;
2383         u32 tmp;
2384
2385         power_domain = intel_display_port_power_domain(encoder);
2386         if (!intel_display_power_is_enabled(dev_priv, power_domain))
2387                 return false;
2388
2389         tmp = I915_READ(intel_dp->output_reg);
2390
2391         if (!(tmp & DP_PORT_EN))
2392                 return false;
2393
2394         if (IS_GEN7(dev) && port == PORT_A) {
2395                 *pipe = PORT_TO_PIPE_CPT(tmp);
2396         } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
2397                 enum i915_pipe p;
2398
2399                 for_each_pipe(dev_priv, p) {
2400                         u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2401                         if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2402                                 *pipe = p;
2403                                 return true;
2404                         }
2405                 }
2406
2407                 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2408                               intel_dp->output_reg);
2409         } else if (IS_CHERRYVIEW(dev)) {
2410                 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2411         } else {
2412                 *pipe = PORT_TO_PIPE(tmp);
2413         }
2414
2415         return true;
2416 }
2417
2418 static void intel_dp_get_config(struct intel_encoder *encoder,
2419                                 struct intel_crtc_state *pipe_config)
2420 {
2421         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2422         u32 tmp, flags = 0;
2423         struct drm_device *dev = encoder->base.dev;
2424         struct drm_i915_private *dev_priv = dev->dev_private;
2425         enum port port = dp_to_dig_port(intel_dp)->port;
2426         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2427         int dotclock;
2428
2429         tmp = I915_READ(intel_dp->output_reg);
2430
2431         pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2432
2433         if (HAS_PCH_CPT(dev) && port != PORT_A) {
2434                 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2435                 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2436                         flags |= DRM_MODE_FLAG_PHSYNC;
2437                 else
2438                         flags |= DRM_MODE_FLAG_NHSYNC;
2439
2440                 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2441                         flags |= DRM_MODE_FLAG_PVSYNC;
2442                 else
2443                         flags |= DRM_MODE_FLAG_NVSYNC;
2444         } else {
2445                 if (tmp & DP_SYNC_HS_HIGH)
2446                         flags |= DRM_MODE_FLAG_PHSYNC;
2447                 else
2448                         flags |= DRM_MODE_FLAG_NHSYNC;
2449
2450                 if (tmp & DP_SYNC_VS_HIGH)
2451                         flags |= DRM_MODE_FLAG_PVSYNC;
2452                 else
2453                         flags |= DRM_MODE_FLAG_NVSYNC;
2454         }
2455
2456         pipe_config->base.adjusted_mode.flags |= flags;
2457
2458         if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2459             tmp & DP_COLOR_RANGE_16_235)
2460                 pipe_config->limited_color_range = true;
2461
2462         pipe_config->has_dp_encoder = true;
2463
2464         intel_dp_get_m_n(crtc, pipe_config);
2465
2466         if (port == PORT_A) {
2467                 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2468                         pipe_config->port_clock = 162000;
2469                 else
2470                         pipe_config->port_clock = 270000;
2471         }
2472
2473         dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2474                                             &pipe_config->dp_m_n);
2475
2476         if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2477                 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2478
2479         pipe_config->base.adjusted_mode.crtc_clock = dotclock;
2480
2481         if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2482             pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2483                 /*
2484                  * This is a big fat ugly hack.
2485                  *
2486                  * Some machines in UEFI boot mode provide us a VBT that has 18
2487                  * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2488                  * unknown we fail to light up. Yet the same BIOS boots up with
2489                  * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2490                  * max, not what it tells us to use.
2491                  *
2492                  * Note: This will still be broken if the eDP panel is not lit
2493                  * up by the BIOS, and thus we can't get the mode at module
2494                  * load.
2495                  */
2496                 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2497                               pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2498                 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2499         }
2500 }
2501
2502 static void intel_disable_dp(struct intel_encoder *encoder)
2503 {
2504         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2505         struct drm_device *dev = encoder->base.dev;
2506         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2507
2508         if (crtc->config->has_audio)
2509                 intel_audio_codec_disable(encoder);
2510
2511         if (HAS_PSR(dev) && !HAS_DDI(dev))
2512                 intel_psr_disable(intel_dp);
2513
2514         /* Make sure the panel is off before trying to change the mode. But also
2515          * ensure that we have vdd while we switch off the panel. */
2516         intel_edp_panel_vdd_on(intel_dp);
2517         intel_edp_backlight_off(intel_dp);
2518         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2519         intel_edp_panel_off(intel_dp);
2520
2521         /* disable the port before the pipe on g4x */
2522         if (INTEL_INFO(dev)->gen < 5)
2523                 intel_dp_link_down(intel_dp);
2524 }
2525
2526 static void ilk_post_disable_dp(struct intel_encoder *encoder)
2527 {
2528         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2529         enum port port = dp_to_dig_port(intel_dp)->port;
2530
2531         intel_dp_link_down(intel_dp);
2532         if (port == PORT_A)
2533                 ironlake_edp_pll_off(intel_dp);
2534 }
2535
2536 static void vlv_post_disable_dp(struct intel_encoder *encoder)
2537 {
2538         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2539
2540         intel_dp_link_down(intel_dp);
2541 }
2542
2543 static void chv_post_disable_dp(struct intel_encoder *encoder)
2544 {
2545         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2546         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2547         struct drm_device *dev = encoder->base.dev;
2548         struct drm_i915_private *dev_priv = dev->dev_private;
2549         struct intel_crtc *intel_crtc =
2550                 to_intel_crtc(encoder->base.crtc);
2551         enum dpio_channel ch = vlv_dport_to_channel(dport);
2552         enum i915_pipe pipe = intel_crtc->pipe;
2553         u32 val;
2554
2555         intel_dp_link_down(intel_dp);
2556
2557         mutex_lock(&dev_priv->sb_lock);
2558
2559         /* Propagate soft reset to data lane reset */
2560         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2561         val |= CHV_PCS_REQ_SOFTRESET_EN;
2562         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2563
2564         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2565         val |= CHV_PCS_REQ_SOFTRESET_EN;
2566         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2567
2568         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2569         val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2570         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2571
2572         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2573         val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2574         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2575
2576         mutex_unlock(&dev_priv->sb_lock);
2577 }
2578
2579 static void
2580 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2581                          uint32_t *DP,
2582                          uint8_t dp_train_pat)
2583 {
2584         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2585         struct drm_device *dev = intel_dig_port->base.base.dev;
2586         struct drm_i915_private *dev_priv = dev->dev_private;
2587         enum port port = intel_dig_port->port;
2588
2589         if (HAS_DDI(dev)) {
2590                 uint32_t temp = I915_READ(DP_TP_CTL(port));
2591
2592                 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2593                         temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2594                 else
2595                         temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2596
2597                 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2598                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2599                 case DP_TRAINING_PATTERN_DISABLE:
2600                         temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2601
2602                         break;
2603                 case DP_TRAINING_PATTERN_1:
2604                         temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2605                         break;
2606                 case DP_TRAINING_PATTERN_2:
2607                         temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2608                         break;
2609                 case DP_TRAINING_PATTERN_3:
2610                         temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2611                         break;
2612                 }
2613                 I915_WRITE(DP_TP_CTL(port), temp);
2614
2615         } else if ((IS_GEN7(dev) && port == PORT_A) ||
2616                    (HAS_PCH_CPT(dev) && port != PORT_A)) {
2617                 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2618
2619                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2620                 case DP_TRAINING_PATTERN_DISABLE:
2621                         *DP |= DP_LINK_TRAIN_OFF_CPT;
2622                         break;
2623                 case DP_TRAINING_PATTERN_1:
2624                         *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2625                         break;
2626                 case DP_TRAINING_PATTERN_2:
2627                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2628                         break;
2629                 case DP_TRAINING_PATTERN_3:
2630                         DRM_ERROR("DP training pattern 3 not supported\n");
2631                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2632                         break;
2633                 }
2634
2635         } else {
2636                 if (IS_CHERRYVIEW(dev))
2637                         *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2638                 else
2639                         *DP &= ~DP_LINK_TRAIN_MASK;
2640
2641                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2642                 case DP_TRAINING_PATTERN_DISABLE:
2643                         *DP |= DP_LINK_TRAIN_OFF;
2644                         break;
2645                 case DP_TRAINING_PATTERN_1:
2646                         *DP |= DP_LINK_TRAIN_PAT_1;
2647                         break;
2648                 case DP_TRAINING_PATTERN_2:
2649                         *DP |= DP_LINK_TRAIN_PAT_2;
2650                         break;
2651                 case DP_TRAINING_PATTERN_3:
2652                         if (IS_CHERRYVIEW(dev)) {
2653                                 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2654                         } else {
2655                                 DRM_ERROR("DP training pattern 3 not supported\n");
2656                                 *DP |= DP_LINK_TRAIN_PAT_2;
2657                         }
2658                         break;
2659                 }
2660         }
2661 }
2662
2663 static void intel_dp_enable_port(struct intel_dp *intel_dp)
2664 {
2665         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2666         struct drm_i915_private *dev_priv = dev->dev_private;
2667
2668         /* enable with pattern 1 (as per spec) */
2669         _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2670                                  DP_TRAINING_PATTERN_1);
2671
2672         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2673         POSTING_READ(intel_dp->output_reg);
2674
2675         /*
2676          * Magic for VLV/CHV. We _must_ first set up the register
2677          * without actually enabling the port, and then do another
2678          * write to enable the port. Otherwise link training will
2679          * fail when the power sequencer is freshly used for this port.
2680          */
2681         intel_dp->DP |= DP_PORT_EN;
2682
2683         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2684         POSTING_READ(intel_dp->output_reg);
2685 }
2686
2687 static void intel_enable_dp(struct intel_encoder *encoder)
2688 {
2689         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2690         struct drm_device *dev = encoder->base.dev;
2691         struct drm_i915_private *dev_priv = dev->dev_private;
2692         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2693         uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2694         unsigned int lane_mask = 0x0;
2695
2696         if (WARN_ON(dp_reg & DP_PORT_EN))
2697                 return;
2698
2699         pps_lock(intel_dp);
2700
2701         if (IS_VALLEYVIEW(dev))
2702                 vlv_init_panel_power_sequencer(intel_dp);
2703
2704         intel_dp_enable_port(intel_dp);
2705
2706         edp_panel_vdd_on(intel_dp);
2707         edp_panel_on(intel_dp);
2708         edp_panel_vdd_off(intel_dp, true);
2709
2710         pps_unlock(intel_dp);
2711
2712         if (IS_VALLEYVIEW(dev))
2713                 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2714                                     lane_mask);
2715
2716         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2717         intel_dp_start_link_train(intel_dp);
2718         intel_dp_complete_link_train(intel_dp);
2719         intel_dp_stop_link_train(intel_dp);
2720
2721         if (crtc->config->has_audio) {
2722                 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2723                                  pipe_name(crtc->pipe));
2724                 intel_audio_codec_enable(encoder);
2725         }
2726 }
2727
2728 static void g4x_enable_dp(struct intel_encoder *encoder)
2729 {
2730         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2731
2732         intel_enable_dp(encoder);
2733         intel_edp_backlight_on(intel_dp);
2734 }
2735
2736 static void vlv_enable_dp(struct intel_encoder *encoder)
2737 {
2738         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2739
2740         intel_edp_backlight_on(intel_dp);
2741         intel_psr_enable(intel_dp);
2742 }
2743
2744 static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2745 {
2746         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2747         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2748
2749         intel_dp_prepare(encoder);
2750
2751         /* Only ilk+ has port A */
2752         if (dport->port == PORT_A) {
2753                 ironlake_set_pll_cpu_edp(intel_dp);
2754                 ironlake_edp_pll_on(intel_dp);
2755         }
2756 }
2757
2758 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2759 {
2760         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2761         struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2762         enum i915_pipe pipe = intel_dp->pps_pipe;
2763         int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2764
2765         edp_panel_vdd_off_sync(intel_dp);
2766
2767         /*
2768          * VLV seems to get confused when multiple power seqeuencers
2769          * have the same port selected (even if only one has power/vdd
2770          * enabled). The failure manifests as vlv_wait_port_ready() failing
2771          * CHV on the other hand doesn't seem to mind having the same port
2772          * selected in multiple power seqeuencers, but let's clear the
2773          * port select always when logically disconnecting a power sequencer
2774          * from a port.
2775          */
2776         DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2777                       pipe_name(pipe), port_name(intel_dig_port->port));
2778         I915_WRITE(pp_on_reg, 0);
2779         POSTING_READ(pp_on_reg);
2780
2781         intel_dp->pps_pipe = INVALID_PIPE;
2782 }
2783
2784 static void vlv_steal_power_sequencer(struct drm_device *dev,
2785                                       enum i915_pipe pipe)
2786 {
2787         struct drm_i915_private *dev_priv = dev->dev_private;
2788         struct intel_encoder *encoder;
2789
2790         lockdep_assert_held(&dev_priv->pps_mutex);
2791
2792         if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2793                 return;
2794
2795         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2796                             base.head) {
2797                 struct intel_dp *intel_dp;
2798                 enum port port;
2799
2800                 if (encoder->type != INTEL_OUTPUT_EDP)
2801                         continue;
2802
2803                 intel_dp = enc_to_intel_dp(&encoder->base);
2804                 port = dp_to_dig_port(intel_dp)->port;
2805
2806                 if (intel_dp->pps_pipe != pipe)
2807                         continue;
2808
2809                 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2810                               pipe_name(pipe), port_name(port));
2811
2812                 WARN(encoder->base.crtc,
2813                      "stealing pipe %c power sequencer from active eDP port %c\n",
2814                      pipe_name(pipe), port_name(port));
2815
2816                 /* make sure vdd is off before we steal it */
2817                 vlv_detach_power_sequencer(intel_dp);
2818         }
2819 }
2820
2821 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2822 {
2823         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2824         struct intel_encoder *encoder = &intel_dig_port->base;
2825         struct drm_device *dev = encoder->base.dev;
2826         struct drm_i915_private *dev_priv = dev->dev_private;
2827         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2828
2829         lockdep_assert_held(&dev_priv->pps_mutex);
2830
2831         if (!is_edp(intel_dp))
2832                 return;
2833
2834         if (intel_dp->pps_pipe == crtc->pipe)
2835                 return;
2836
2837         /*
2838          * If another power sequencer was being used on this
2839          * port previously make sure to turn off vdd there while
2840          * we still have control of it.
2841          */
2842         if (intel_dp->pps_pipe != INVALID_PIPE)
2843                 vlv_detach_power_sequencer(intel_dp);
2844
2845         /*
2846          * We may be stealing the power
2847          * sequencer from another port.
2848          */
2849         vlv_steal_power_sequencer(dev, crtc->pipe);
2850
2851         /* now it's all ours */
2852         intel_dp->pps_pipe = crtc->pipe;
2853
2854         DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2855                       pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2856
2857         /* init power sequencer on this pipe and port */
2858         intel_dp_init_panel_power_sequencer(dev, intel_dp);
2859         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2860 }
2861
2862 static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2863 {
2864         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2865         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2866         struct drm_device *dev = encoder->base.dev;
2867         struct drm_i915_private *dev_priv = dev->dev_private;
2868         struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2869         enum dpio_channel port = vlv_dport_to_channel(dport);
2870         int pipe = intel_crtc->pipe;
2871         u32 val;
2872
2873         mutex_lock(&dev_priv->sb_lock);
2874
2875         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2876         val = 0;
2877         if (pipe)
2878                 val |= (1<<21);
2879         else
2880                 val &= ~(1<<21);
2881         val |= 0x001000c4;
2882         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2883         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2884         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2885
2886         mutex_unlock(&dev_priv->sb_lock);
2887
2888         intel_enable_dp(encoder);
2889 }
2890
2891 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2892 {
2893         struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2894         struct drm_device *dev = encoder->base.dev;
2895         struct drm_i915_private *dev_priv = dev->dev_private;
2896         struct intel_crtc *intel_crtc =
2897                 to_intel_crtc(encoder->base.crtc);
2898         enum dpio_channel port = vlv_dport_to_channel(dport);
2899         int pipe = intel_crtc->pipe;
2900
2901         intel_dp_prepare(encoder);
2902
2903         /* Program Tx lane resets to default */
2904         mutex_lock(&dev_priv->sb_lock);
2905         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2906                          DPIO_PCS_TX_LANE2_RESET |
2907                          DPIO_PCS_TX_LANE1_RESET);
2908         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2909                          DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2910                          DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2911                          (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2912                                  DPIO_PCS_CLK_SOFT_RESET);
2913
2914         /* Fix up inter-pair skew failure */
2915         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2916         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2917         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2918         mutex_unlock(&dev_priv->sb_lock);
2919 }
2920
2921 static void chv_pre_enable_dp(struct intel_encoder *encoder)
2922 {
2923         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2924         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2925         struct drm_device *dev = encoder->base.dev;
2926         struct drm_i915_private *dev_priv = dev->dev_private;
2927         struct intel_crtc *intel_crtc =
2928                 to_intel_crtc(encoder->base.crtc);
2929         enum dpio_channel ch = vlv_dport_to_channel(dport);
2930         int pipe = intel_crtc->pipe;
2931         int data, i, stagger;
2932         u32 val;
2933
2934         mutex_lock(&dev_priv->sb_lock);
2935
2936         /* allow hardware to manage TX FIFO reset source */
2937         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2938         val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2939         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2940
2941         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2942         val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2943         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2944
2945         /* Deassert soft data lane reset*/
2946         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2947         val |= CHV_PCS_REQ_SOFTRESET_EN;
2948         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2949
2950         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2951         val |= CHV_PCS_REQ_SOFTRESET_EN;
2952         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2953
2954         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2955         val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2956         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2957
2958         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2959         val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2960         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2961
2962         /* Program Tx lane latency optimal setting*/
2963         for (i = 0; i < 4; i++) {
2964                 /* Set the upar bit */
2965                 data = (i == 1) ? 0x0 : 0x1;
2966                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2967                                 data << DPIO_UPAR_SHIFT);
2968         }
2969
2970         /* Data lane stagger programming */
2971         if (intel_crtc->config->port_clock > 270000)
2972                 stagger = 0x18;
2973         else if (intel_crtc->config->port_clock > 135000)
2974                 stagger = 0xd;
2975         else if (intel_crtc->config->port_clock > 67500)
2976                 stagger = 0x7;
2977         else if (intel_crtc->config->port_clock > 33750)
2978                 stagger = 0x4;
2979         else
2980                 stagger = 0x2;
2981
2982         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2983         val |= DPIO_TX2_STAGGER_MASK(0x1f);
2984         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2985
2986         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2987         val |= DPIO_TX2_STAGGER_MASK(0x1f);
2988         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2989
2990         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
2991                        DPIO_LANESTAGGER_STRAP(stagger) |
2992                        DPIO_LANESTAGGER_STRAP_OVRD |
2993                        DPIO_TX1_STAGGER_MASK(0x1f) |
2994                        DPIO_TX1_STAGGER_MULT(6) |
2995                        DPIO_TX2_STAGGER_MULT(0));
2996
2997         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
2998                        DPIO_LANESTAGGER_STRAP(stagger) |
2999                        DPIO_LANESTAGGER_STRAP_OVRD |
3000                        DPIO_TX1_STAGGER_MASK(0x1f) |
3001                        DPIO_TX1_STAGGER_MULT(7) |
3002                        DPIO_TX2_STAGGER_MULT(5));
3003
3004         mutex_unlock(&dev_priv->sb_lock);
3005
3006         intel_enable_dp(encoder);
3007 }
3008
3009 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
3010 {
3011         struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
3012         struct drm_device *dev = encoder->base.dev;
3013         struct drm_i915_private *dev_priv = dev->dev_private;
3014         struct intel_crtc *intel_crtc =
3015                 to_intel_crtc(encoder->base.crtc);
3016         enum dpio_channel ch = vlv_dport_to_channel(dport);
3017         enum i915_pipe pipe = intel_crtc->pipe;
3018         u32 val;
3019
3020         intel_dp_prepare(encoder);
3021
3022         mutex_lock(&dev_priv->sb_lock);
3023
3024         /* program left/right clock distribution */
3025         if (pipe != PIPE_B) {
3026                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3027                 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3028                 if (ch == DPIO_CH0)
3029                         val |= CHV_BUFLEFTENA1_FORCE;
3030                 if (ch == DPIO_CH1)
3031                         val |= CHV_BUFRIGHTENA1_FORCE;
3032                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3033         } else {
3034                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3035                 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3036                 if (ch == DPIO_CH0)
3037                         val |= CHV_BUFLEFTENA2_FORCE;
3038                 if (ch == DPIO_CH1)
3039                         val |= CHV_BUFRIGHTENA2_FORCE;
3040                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3041         }
3042
3043         /* program clock channel usage */
3044         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
3045         val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3046         if (pipe != PIPE_B)
3047                 val &= ~CHV_PCS_USEDCLKCHANNEL;
3048         else
3049                 val |= CHV_PCS_USEDCLKCHANNEL;
3050         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
3051
3052         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
3053         val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3054         if (pipe != PIPE_B)
3055                 val &= ~CHV_PCS_USEDCLKCHANNEL;
3056         else
3057                 val |= CHV_PCS_USEDCLKCHANNEL;
3058         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
3059
3060         /*
3061          * This a a bit weird since generally CL
3062          * matches the pipe, but here we need to
3063          * pick the CL based on the port.
3064          */
3065         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
3066         if (pipe != PIPE_B)
3067                 val &= ~CHV_CMN_USEDCLKCHANNEL;
3068         else
3069                 val |= CHV_CMN_USEDCLKCHANNEL;
3070         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
3071
3072         mutex_unlock(&dev_priv->sb_lock);
3073 }
3074
3075 /*
3076  * Native read with retry for link status and receiver capability reads for
3077  * cases where the sink may still be asleep.
3078  *
3079  * Sinks are *supposed* to come up within 1ms from an off state, but we're also
3080  * supposed to retry 3 times per the spec.
3081  */
3082 static ssize_t
3083 intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
3084                         void *buffer, size_t size)
3085 {
3086         ssize_t ret;
3087         int i;
3088
3089         /*
3090          * Sometime we just get the same incorrect byte repeated
3091          * over the entire buffer. Doing just one throw away read
3092          * initially seems to "solve" it.
3093          */
3094         drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
3095
3096         for (i = 0; i < 3; i++) {
3097                 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
3098                 if (ret == size)
3099                         return ret;
3100                 msleep(1);
3101         }
3102
3103         return ret;
3104 }
3105
3106 /*
3107  * Fetch AUX CH registers 0x202 - 0x207 which contain
3108  * link status information
3109  */
3110 static bool
3111 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
3112 {
3113         return intel_dp_dpcd_read_wake(&intel_dp->aux,
3114                                        DP_LANE0_1_STATUS,
3115                                        link_status,
3116                                        DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
3117 }
3118
3119 /* These are source-specific values. */
3120 static uint8_t
3121 intel_dp_voltage_max(struct intel_dp *intel_dp)
3122 {
3123         struct drm_device *dev = intel_dp_to_dev(intel_dp);
3124         struct drm_i915_private *dev_priv = dev->dev_private;
3125         enum port port = dp_to_dig_port(intel_dp)->port;
3126
3127         if (IS_BROXTON(dev))
3128                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3129         else if (INTEL_INFO(dev)->gen >= 9) {
3130                 if (dev_priv->edp_low_vswing && port == PORT_A)
3131                         return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3132                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3133         } else if (IS_VALLEYVIEW(dev))
3134                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3135         else if (IS_GEN7(dev) && port == PORT_A)
3136                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3137         else if (HAS_PCH_CPT(dev) && port != PORT_A)
3138                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3139         else
3140                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3141 }
3142
3143 static uint8_t
3144 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3145 {
3146         struct drm_device *dev = intel_dp_to_dev(intel_dp);
3147         enum port port = dp_to_dig_port(intel_dp)->port;
3148
3149         if (INTEL_INFO(dev)->gen >= 9) {
3150                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3151                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3152                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
3153                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3154                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3155                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3156                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3157                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3158                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3159                 default:
3160                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3161                 }
3162         } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
3163                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3164                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3165                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
3166                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3167                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3168                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3169                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3170                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3171                 default:
3172                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3173                 }
3174         } else if (IS_VALLEYVIEW(dev)) {
3175                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3176                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3177                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
3178                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3179                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3180                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3181                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3182                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3183                 default:
3184                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3185                 }
3186         } else if (IS_GEN7(dev) && port == PORT_A) {
3187                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3188                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3189                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3190                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3191                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3192                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3193                 default:
3194                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3195                 }
3196         } else {
3197                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3198                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3199                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3200                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3201                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3202                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3203                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3204                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3205                 default:
3206                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3207                 }
3208         }
3209 }
3210
3211 static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
3212 {
3213         struct drm_device *dev = intel_dp_to_dev(intel_dp);
3214         struct drm_i915_private *dev_priv = dev->dev_private;
3215         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3216         struct intel_crtc *intel_crtc =
3217                 to_intel_crtc(dport->base.base.crtc);
3218         unsigned long demph_reg_value, preemph_reg_value,
3219                 uniqtranscale_reg_value;
3220         uint8_t train_set = intel_dp->train_set[0];
3221         enum dpio_channel port = vlv_dport_to_channel(dport);
3222         int pipe = intel_crtc->pipe;
3223
3224         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3225         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3226                 preemph_reg_value = 0x0004000;
3227                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3228                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3229                         demph_reg_value = 0x2B405555;
3230                         uniqtranscale_reg_value = 0x552AB83A;
3231                         break;
3232                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3233                         demph_reg_value = 0x2B404040;
3234                         uniqtranscale_reg_value = 0x5548B83A;
3235                         break;
3236                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3237                         demph_reg_value = 0x2B245555;
3238                         uniqtranscale_reg_value = 0x5560B83A;
3239                         break;
3240                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3241                         demph_reg_value = 0x2B405555;
3242                         uniqtranscale_reg_value = 0x5598DA3A;
3243                         break;
3244                 default:
3245                         return 0;
3246                 }
3247                 break;
3248         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3249                 preemph_reg_value = 0x0002000;
3250                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3251                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3252                         demph_reg_value = 0x2B404040;
3253                         uniqtranscale_reg_value = 0x5552B83A;
3254                         break;
3255                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3256                         demph_reg_value = 0x2B404848;
3257                         uniqtranscale_reg_value = 0x5580B83A;
3258                         break;
3259                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3260                         demph_reg_value = 0x2B404040;
3261                         uniqtranscale_reg_value = 0x55ADDA3A;
3262                         break;
3263                 default:
3264                         return 0;
3265                 }
3266                 break;
3267         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3268                 preemph_reg_value = 0x0000000;
3269                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3270                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3271                         demph_reg_value = 0x2B305555;
3272                         uniqtranscale_reg_value = 0x5570B83A;
3273                         break;
3274                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3275                         demph_reg_value = 0x2B2B4040;
3276                         uniqtranscale_reg_value = 0x55ADDA3A;
3277                         break;
3278                 default:
3279                         return 0;
3280                 }
3281                 break;
3282         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3283                 preemph_reg_value = 0x0006000;
3284                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3285                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3286                         demph_reg_value = 0x1B405555;
3287                         uniqtranscale_reg_value = 0x55ADDA3A;
3288                         break;
3289                 default:
3290                         return 0;
3291                 }
3292                 break;
3293         default:
3294                 return 0;
3295         }
3296
3297         mutex_lock(&dev_priv->sb_lock);
3298         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3299         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3300         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
3301                          uniqtranscale_reg_value);
3302         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3303         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3304         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3305         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
3306         mutex_unlock(&dev_priv->sb_lock);
3307
3308         return 0;
3309 }
3310
3311 static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3312 {
3313         struct drm_device *dev = intel_dp_to_dev(intel_dp);
3314         struct drm_i915_private *dev_priv = dev->dev_private;
3315         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3316         struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3317         u32 deemph_reg_value, margin_reg_value, val;
3318         uint8_t train_set = intel_dp->train_set[0];
3319         enum dpio_channel ch = vlv_dport_to_channel(dport);
3320         enum i915_pipe pipe = intel_crtc->pipe;
3321         int i;
3322
3323         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3324         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3325                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3326                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3327                         deemph_reg_value = 128;
3328                         margin_reg_value = 52;
3329                         break;
3330                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3331                         deemph_reg_value = 128;
3332                         margin_reg_value = 77;
3333                         break;
3334                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3335                         deemph_reg_value = 128;
3336                         margin_reg_value = 102;
3337                         break;
3338                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3339                         deemph_reg_value = 128;
3340                         margin_reg_value = 154;
3341                         /* FIXME extra to set for 1200 */
3342                         break;
3343                 default:
3344                         return 0;
3345                 }
3346                 break;
3347         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3348                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3349                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3350                         deemph_reg_value = 85;
3351                         margin_reg_value = 78;
3352                         break;
3353                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3354                         deemph_reg_value = 85;
3355                         margin_reg_value = 116;
3356                         break;
3357                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3358                         deemph_reg_value = 85;
3359                         margin_reg_value = 154;
3360                         break;
3361                 default:
3362                         return 0;
3363                 }
3364                 break;
3365         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3366                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3367                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3368                         deemph_reg_value = 64;
3369                         margin_reg_value = 104;
3370                         break;
3371                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3372                         deemph_reg_value = 64;
3373                         margin_reg_value = 154;
3374                         break;
3375                 default:
3376                         return 0;
3377                 }
3378                 break;
3379         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3380                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3381                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3382                         deemph_reg_value = 43;
3383                         margin_reg_value = 154;
3384                         break;
3385                 default:
3386                         return 0;
3387                 }
3388                 break;
3389         default:
3390                 return 0;
3391         }
3392
3393         mutex_lock(&dev_priv->sb_lock);
3394
3395         /* Clear calc init */
3396         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3397         val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3398         val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3399         val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3400         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3401
3402         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3403         val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3404         val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3405         val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3406         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3407
3408         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3409         val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3410         val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3411         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3412
3413         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3414         val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3415         val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3416         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3417
3418         /* Program swing deemph */
3419         for (i = 0; i < 4; i++) {
3420                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3421                 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3422                 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3423                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3424         }
3425
3426         /* Program swing margin */
3427         for (i = 0; i < 4; i++) {
3428                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3429                 val &= ~DPIO_SWING_MARGIN000_MASK;
3430                 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3431                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3432         }
3433
3434         /* Disable unique transition scale */
3435         for (i = 0; i < 4; i++) {
3436                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3437                 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3438                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3439         }
3440
3441         if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
3442                         == DP_TRAIN_PRE_EMPH_LEVEL_0) &&
3443                 ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
3444                         == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
3445
3446                 /*
3447                  * The document said it needs to set bit 27 for ch0 and bit 26
3448                  * for ch1. Might be a typo in the doc.
3449                  * For now, for this unique transition scale selection, set bit
3450                  * 27 for ch0 and ch1.
3451                  */
3452                 for (i = 0; i < 4; i++) {
3453                         val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3454                         val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3455                         vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3456                 }
3457
3458                 for (i = 0; i < 4; i++) {
3459                         val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3460                         val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3461                         val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3462                         vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3463                 }
3464         }
3465
3466         /* Start swing calculation */
3467         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3468         val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3469         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3470
3471         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3472         val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3473         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3474
3475         /* LRC Bypass */
3476         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3477         val |= DPIO_LRC_BYPASS;
3478         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3479
3480         mutex_unlock(&dev_priv->sb_lock);
3481
3482         return 0;
3483 }
3484
3485 static void
3486 intel_get_adjust_train(struct intel_dp *intel_dp,
3487                        const uint8_t link_status[DP_LINK_STATUS_SIZE])
3488 {
3489         uint8_t v = 0;
3490         uint8_t p = 0;
3491         int lane;
3492         uint8_t voltage_max;
3493         uint8_t preemph_max;
3494
3495         for (lane = 0; lane < intel_dp->lane_count; lane++) {
3496                 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3497                 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
3498
3499                 if (this_v > v)
3500                         v = this_v;
3501                 if (this_p > p)
3502                         p = this_p;
3503         }
3504
3505         voltage_max = intel_dp_voltage_max(intel_dp);
3506         if (v >= voltage_max)
3507                 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
3508
3509         preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3510         if (p >= preemph_max)
3511                 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
3512
3513         for (lane = 0; lane < 4; lane++)
3514                 intel_dp->train_set[lane] = v | p;
3515 }
3516
3517 static uint32_t
3518 gen4_signal_levels(uint8_t train_set)
3519 {
3520         uint32_t        signal_levels = 0;
3521
3522         switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3523         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3524         default:
3525                 signal_levels |= DP_VOLTAGE_0_4;
3526                 break;
3527         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3528                 signal_levels |= DP_VOLTAGE_0_6;
3529                 break;
3530         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3531                 signal_levels |= DP_VOLTAGE_0_8;
3532                 break;
3533         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3534                 signal_levels |= DP_VOLTAGE_1_2;
3535                 break;
3536         }
3537         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3538         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3539         default:
3540                 signal_levels |= DP_PRE_EMPHASIS_0;
3541                 break;
3542         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3543                 signal_levels |= DP_PRE_EMPHASIS_3_5;
3544                 break;
3545         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3546                 signal_levels |= DP_PRE_EMPHASIS_6;
3547                 break;
3548         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3549                 signal_levels |= DP_PRE_EMPHASIS_9_5;
3550                 break;
3551         }
3552         return signal_levels;
3553 }
3554
3555 /* Gen6's DP voltage swing and pre-emphasis control */
3556 static uint32_t
3557 gen6_edp_signal_levels(uint8_t train_set)
3558 {
3559         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3560                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3561         switch (signal_levels) {
3562         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3563         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3564                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3565         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3566                 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3567         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3568         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3569                 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3570         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3571         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3572                 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3573         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3574         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3575                 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3576         default:
3577                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3578                               "0x%x\n", signal_levels);
3579                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3580         }
3581 }
3582
3583 /* Gen7's DP voltage swing and pre-emphasis control */
3584 static uint32_t
3585 gen7_edp_signal_levels(uint8_t train_set)
3586 {
3587         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3588                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3589         switch (signal_levels) {
3590         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3591                 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3592         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3593                 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3594         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3595                 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3596
3597         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3598                 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3599         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3600                 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3601
3602         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3603                 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3604         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3605                 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3606
3607         default:
3608                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3609                               "0x%x\n", signal_levels);
3610                 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3611         }
3612 }
3613
3614 /* Properly updates "DP" with the correct signal levels. */
3615 static void
3616 intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3617 {
3618         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3619         enum port port = intel_dig_port->port;
3620         struct drm_device *dev = intel_dig_port->base.base.dev;
3621         uint32_t signal_levels, mask = 0;
3622         uint8_t train_set = intel_dp->train_set[0];
3623
3624         if (HAS_DDI(dev)) {
3625                 signal_levels = ddi_signal_levels(intel_dp);
3626
3627                 if (IS_BROXTON(dev))
3628                         signal_levels = 0;
3629                 else
3630                         mask = DDI_BUF_EMP_MASK;
3631         } else if (IS_CHERRYVIEW(dev)) {
3632                 signal_levels = chv_signal_levels(intel_dp);
3633         } else if (IS_VALLEYVIEW(dev)) {
3634                 signal_levels = vlv_signal_levels(intel_dp);
3635         } else if (IS_GEN7(dev) && port == PORT_A) {
3636                 signal_levels = gen7_edp_signal_levels(train_set);
3637                 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3638         } else if (IS_GEN6(dev) && port == PORT_A) {
3639                 signal_levels = gen6_edp_signal_levels(train_set);
3640                 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3641         } else {
3642                 signal_levels = gen4_signal_levels(train_set);
3643                 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3644         }
3645
3646         if (mask)
3647                 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3648
3649         DRM_DEBUG_KMS("Using vswing level %d\n",
3650                 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3651         DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3652                 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3653                         DP_TRAIN_PRE_EMPHASIS_SHIFT);
3654
3655         *DP = (*DP & ~mask) | signal_levels;
3656 }
3657
3658 static bool
3659 intel_dp_set_link_train(struct intel_dp *intel_dp,
3660                         uint32_t *DP,
3661                         uint8_t dp_train_pat)
3662 {
3663         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3664         struct drm_device *dev = intel_dig_port->base.base.dev;
3665         struct drm_i915_private *dev_priv = dev->dev_private;
3666         uint8_t buf[sizeof(intel_dp->train_set) + 1];
3667         int ret, len;
3668
3669         _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3670
3671         I915_WRITE(intel_dp->output_reg, *DP);
3672         POSTING_READ(intel_dp->output_reg);
3673
3674         buf[0] = dp_train_pat;
3675         if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
3676             DP_TRAINING_PATTERN_DISABLE) {
3677                 /* don't write DP_TRAINING_LANEx_SET on disable */
3678                 len = 1;
3679         } else {
3680                 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3681                 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3682                 len = intel_dp->lane_count + 1;
3683         }
3684
3685         ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3686                                 buf, len);
3687
3688         return ret == len;
3689 }
3690
3691 static bool
3692 intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3693                         uint8_t dp_train_pat)
3694 {
3695         if (!intel_dp->train_set_valid)
3696                 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
3697         intel_dp_set_signal_levels(intel_dp, DP);
3698         return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3699 }
3700
3701 static bool
3702 intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3703                            const uint8_t link_status[DP_LINK_STATUS_SIZE])
3704 {
3705         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3706         struct drm_device *dev = intel_dig_port->base.base.dev;
3707         struct drm_i915_private *dev_priv = dev->dev_private;
3708         int ret;
3709
3710         intel_get_adjust_train(intel_dp, link_status);
3711         intel_dp_set_signal_levels(intel_dp, DP);
3712
3713         I915_WRITE(intel_dp->output_reg, *DP);
3714         POSTING_READ(intel_dp->output_reg);
3715
3716         ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3717                                 intel_dp->train_set, intel_dp->lane_count);
3718
3719         return ret == intel_dp->lane_count;
3720 }
3721
3722 static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3723 {
3724         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3725         struct drm_device *dev = intel_dig_port->base.base.dev;
3726         struct drm_i915_private *dev_priv = dev->dev_private;
3727         enum port port = intel_dig_port->port;
3728         uint32_t val;
3729
3730         if (!HAS_DDI(dev))
3731                 return;
3732
3733         val = I915_READ(DP_TP_CTL(port));
3734         val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3735         val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3736         I915_WRITE(DP_TP_CTL(port), val);
3737
3738         /*
3739          * On PORT_A we can have only eDP in SST mode. There the only reason
3740          * we need to set idle transmission mode is to work around a HW issue
3741          * where we enable the pipe while not in idle link-training mode.
3742          * In this case there is requirement to wait for a minimum number of
3743          * idle patterns to be sent.
3744          */
3745         if (port == PORT_A)
3746                 return;
3747
3748         if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3749                      1))
3750                 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3751 }
3752
3753 /* Enable corresponding port and start training pattern 1 */
3754 void
3755 intel_dp_start_link_train(struct intel_dp *intel_dp)
3756 {
3757         struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
3758         struct drm_device *dev = encoder->dev;
3759         int i;
3760         uint8_t voltage;
3761         int voltage_tries, loop_tries;
3762         uint32_t DP = intel_dp->DP;
3763         uint8_t link_config[2];
3764
3765         if (HAS_DDI(dev))
3766                 intel_ddi_prepare_link_retrain(encoder);
3767
3768         /* Write the link configuration data */
3769         link_config[0] = intel_dp->link_bw;
3770         link_config[1] = intel_dp->lane_count;
3771         if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3772                 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
3773         drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
3774         if (intel_dp->num_sink_rates)
3775                 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3776                                 &intel_dp->rate_select, 1);
3777
3778         link_config[0] = 0;
3779         link_config[1] = DP_SET_ANSI_8B10B;
3780         drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
3781
3782         DP |= DP_PORT_EN;
3783
3784         /* clock recovery */
3785         if (!intel_dp_reset_link_train(intel_dp, &DP,
3786                                        DP_TRAINING_PATTERN_1 |
3787                                        DP_LINK_SCRAMBLING_DISABLE)) {
3788                 DRM_ERROR("failed to enable link training\n");
3789                 return;
3790         }
3791
3792         voltage = 0xff;
3793         voltage_tries = 0;
3794         loop_tries = 0;
3795         for (;;) {
3796                 uint8_t link_status[DP_LINK_STATUS_SIZE];
3797
3798                 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
3799                 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3800                         DRM_ERROR("failed to get link status\n");
3801                         break;
3802                 }
3803
3804                 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3805                         DRM_DEBUG_KMS("clock recovery OK\n");
3806                         break;
3807                 }
3808
3809                 /*
3810                  * if we used previously trained voltage and pre-emphasis values
3811                  * and we don't get clock recovery, reset link training values
3812                  */
3813                 if (intel_dp->train_set_valid) {
3814                         DRM_DEBUG_KMS("clock recovery not ok, reset");
3815                         /* clear the flag as we are not reusing train set */
3816                         intel_dp->train_set_valid = false;
3817                         if (!intel_dp_reset_link_train(intel_dp, &DP,
3818                                                        DP_TRAINING_PATTERN_1 |
3819                                                        DP_LINK_SCRAMBLING_DISABLE)) {
3820                                 DRM_ERROR("failed to enable link training\n");
3821                                 return;
3822                         }
3823                         continue;
3824                 }
3825
3826                 /* Check to see if we've tried the max voltage */
3827                 for (i = 0; i < intel_dp->lane_count; i++)
3828                         if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
3829                                 break;
3830                 if (i == intel_dp->lane_count) {
3831                         ++loop_tries;
3832                         if (loop_tries == 5) {
3833                                 DRM_ERROR("too many full retries, give up\n");
3834                                 break;
3835                         }
3836                         intel_dp_reset_link_train(intel_dp, &DP,
3837                                                   DP_TRAINING_PATTERN_1 |
3838                                                   DP_LINK_SCRAMBLING_DISABLE);
3839                         voltage_tries = 0;
3840                         continue;
3841                 }
3842
3843                 /* Check to see if we've tried the same voltage 5 times */
3844                 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
3845                         ++voltage_tries;
3846                         if (voltage_tries == 5) {
3847                                 DRM_ERROR("too many voltage retries, give up\n");
3848                                 break;
3849                         }
3850                 } else
3851                         voltage_tries = 0;
3852                 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
3853
3854                 /* Update training set as requested by target */
3855                 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3856                         DRM_ERROR("failed to update link training\n");
3857                         break;
3858                 }
3859         }
3860
3861         intel_dp->DP = DP;
3862 }
3863
3864 void
3865 intel_dp_complete_link_train(struct intel_dp *intel_dp)
3866 {
3867         bool channel_eq = false;
3868         int tries, cr_tries;
3869         uint32_t DP = intel_dp->DP;
3870         uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3871
3872         /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
3873         if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
3874                 training_pattern = DP_TRAINING_PATTERN_3;
3875
3876         /* channel equalization */
3877         if (!intel_dp_set_link_train(intel_dp, &DP,
3878                                      training_pattern |
3879                                      DP_LINK_SCRAMBLING_DISABLE)) {
3880                 DRM_ERROR("failed to start channel equalization\n");
3881                 return;
3882         }
3883
3884         tries = 0;
3885         cr_tries = 0;
3886         channel_eq = false;
3887         for (;;) {
3888                 uint8_t link_status[DP_LINK_STATUS_SIZE];
3889
3890                 if (cr_tries > 5) {
3891                         DRM_ERROR("failed to train DP, aborting\n");
3892                         break;
3893                 }
3894
3895                 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
3896                 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3897                         DRM_ERROR("failed to get link status\n");
3898                         break;
3899                 }
3900
3901                 /* Make sure clock is still ok */
3902                 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3903                         intel_dp->train_set_valid = false;
3904                         intel_dp_start_link_train(intel_dp);
3905                         intel_dp_set_link_train(intel_dp, &DP,
3906                                                 training_pattern |
3907                                                 DP_LINK_SCRAMBLING_DISABLE);
3908                         cr_tries++;
3909                         continue;
3910                 }
3911
3912                 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3913                         channel_eq = true;
3914                         break;
3915                 }
3916
3917                 /* Try 5 times, then try clock recovery if that fails */
3918                 if (tries > 5) {
3919                         intel_dp->train_set_valid = false;
3920                         intel_dp_start_link_train(intel_dp);
3921                         intel_dp_set_link_train(intel_dp, &DP,
3922                                                 training_pattern |
3923                                                 DP_LINK_SCRAMBLING_DISABLE);
3924                         tries = 0;
3925                         cr_tries++;
3926                         continue;
3927                 }
3928
3929                 /* Update training set as requested by target */
3930                 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3931                         DRM_ERROR("failed to update link training\n");
3932                         break;
3933                 }
3934                 ++tries;
3935         }
3936
3937         intel_dp_set_idle_link_train(intel_dp);
3938
3939         intel_dp->DP = DP;
3940
3941         if (channel_eq) {
3942                 intel_dp->train_set_valid = true;
3943                 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
3944         }
3945 }
3946
3947 void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3948 {
3949         intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3950                                 DP_TRAINING_PATTERN_DISABLE);
3951 }
3952
3953 static void
3954 intel_dp_link_down(struct intel_dp *intel_dp)
3955 {
3956         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3957         struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
3958         enum port port = intel_dig_port->port;
3959         struct drm_device *dev = intel_dig_port->base.base.dev;
3960         struct drm_i915_private *dev_priv = dev->dev_private;
3961         uint32_t DP = intel_dp->DP;
3962
3963         if (WARN_ON(HAS_DDI(dev)))
3964                 return;
3965
3966         if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3967                 return;
3968
3969         DRM_DEBUG_KMS("\n");
3970
3971         if ((IS_GEN7(dev) && port == PORT_A) ||
3972             (HAS_PCH_CPT(dev) && port != PORT_A)) {
3973                 DP &= ~DP_LINK_TRAIN_MASK_CPT;
3974                 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
3975         } else {
3976                 if (IS_CHERRYVIEW(dev))
3977                         DP &= ~DP_LINK_TRAIN_MASK_CHV;
3978                 else
3979                         DP &= ~DP_LINK_TRAIN_MASK;
3980                 DP |= DP_LINK_TRAIN_PAT_IDLE;
3981         }
3982         I915_WRITE(intel_dp->output_reg, DP);
3983         POSTING_READ(intel_dp->output_reg);
3984
3985         DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3986         I915_WRITE(intel_dp->output_reg, DP);
3987         POSTING_READ(intel_dp->output_reg);
3988
3989         /*
3990          * HW workaround for IBX, we need to move the port
3991          * to transcoder A after disabling it to allow the
3992          * matching HDMI port to be enabled on transcoder A.
3993          */
3994         if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
3995                 /* always enable with pattern 1 (as per spec) */
3996                 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3997                 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3998                 I915_WRITE(intel_dp->output_reg, DP);
3999                 POSTING_READ(intel_dp->output_reg);
4000
4001                 DP &= ~DP_PORT_EN;
4002                 I915_WRITE(intel_dp->output_reg, DP);
4003                 POSTING_READ(intel_dp->output_reg);
4004         }
4005
4006         msleep(intel_dp->panel_power_down_delay);
4007 }
4008
4009 static bool
4010 intel_dp_get_dpcd(struct intel_dp *intel_dp)
4011 {
4012         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4013         struct drm_device *dev = dig_port->base.base.dev;
4014         struct drm_i915_private *dev_priv = dev->dev_private;
4015         uint8_t rev;
4016
4017         if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
4018                                     sizeof(intel_dp->dpcd)) < 0)
4019                 return false; /* aux transfer failed */
4020
4021 #ifdef __DragonFly__
4022         char dpcd_hex_dump[DP_RECEIVER_CAP_SIZE * 3];
4023         DRM_DEBUG_KMS("DPCD: %s\n", hexncpy(intel_dp->dpcd, sizeof(intel_dp->dpcd),
4024                       dpcd_hex_dump, sizeof(dpcd_hex_dump), " "));
4025 #else
4026         DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
4027 #endif
4028
4029         if (intel_dp->dpcd[DP_DPCD_REV] == 0)
4030                 return false; /* DPCD not present */
4031
4032         /* Check if the panel supports PSR */
4033         memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
4034         if (is_edp(intel_dp)) {
4035                 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
4036                                         intel_dp->psr_dpcd,
4037                                         sizeof(intel_dp->psr_dpcd));
4038                 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
4039                         dev_priv->psr.sink_support = true;
4040                         DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
4041                 }
4042
4043                 if (INTEL_INFO(dev)->gen >= 9 &&
4044                         (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
4045                         uint8_t frame_sync_cap;
4046
4047                         dev_priv->psr.sink_support = true;
4048                         intel_dp_dpcd_read_wake(&intel_dp->aux,
4049                                         DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
4050                                         &frame_sync_cap, 1);
4051                         dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
4052                         /* PSR2 needs frame sync as well */
4053                         dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
4054                         DRM_DEBUG_KMS("PSR2 %s on sink",
4055                                 dev_priv->psr.psr2_support ? "supported" : "not supported");
4056                 }
4057         }
4058
4059         /* Training Pattern 3 support, Intel platforms that support HBR2 alone
4060          * have support for TP3 hence that check is used along with dpcd check
4061          * to ensure TP3 can be enabled.
4062          * SKL < B0: due it's WaDisableHBR2 is the only exception where TP3 is
4063          * supported but still not enabled.
4064          */
4065         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
4066             intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
4067             intel_dp_source_supports_hbr2(dev)) {
4068                 intel_dp->use_tps3 = true;
4069                 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
4070         } else
4071                 intel_dp->use_tps3 = false;
4072
4073         /* Intermediate frequency support */
4074         if (is_edp(intel_dp) &&
4075             (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
4076             (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
4077             (rev >= 0x03)) { /* eDp v1.4 or higher */
4078                 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
4079                 int i;
4080
4081                 intel_dp_dpcd_read_wake(&intel_dp->aux,
4082                                 DP_SUPPORTED_LINK_RATES,
4083                                 sink_rates,
4084                                 sizeof(sink_rates));
4085
4086                 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
4087                         int val = le16_to_cpu(sink_rates[i]);
4088
4089                         if (val == 0)
4090                                 break;
4091
4092                         /* Value read is in kHz while drm clock is saved in deca-kHz */
4093                         intel_dp->sink_rates[i] = (val * 200) / 10;
4094                 }
4095                 intel_dp->num_sink_rates = i;
4096         }
4097
4098         intel_dp_print_rates(intel_dp);
4099
4100         if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4101               DP_DWN_STRM_PORT_PRESENT))
4102                 return true; /* native DP sink */
4103
4104         if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
4105                 return true; /* no per-port downstream info */
4106
4107         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
4108                                     intel_dp->downstream_ports,
4109                                     DP_MAX_DOWNSTREAM_PORTS) < 0)
4110                 return false; /* downstream port status fetch failed */
4111
4112         return true;
4113 }
4114
4115 static void
4116 intel_dp_probe_oui(struct intel_dp *intel_dp)
4117 {
4118         u8 buf[3];
4119
4120         if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
4121                 return;
4122
4123         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
4124                 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
4125                               buf[0], buf[1], buf[2]);
4126
4127         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
4128                 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
4129                               buf[0], buf[1], buf[2]);
4130 }
4131
4132 static bool
4133 intel_dp_probe_mst(struct intel_dp *intel_dp)
4134 {
4135         u8 buf[1];
4136
4137         if (!intel_dp->can_mst)
4138                 return false;
4139
4140         if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
4141                 return false;
4142
4143         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
4144                 if (buf[0] & DP_MST_CAP) {
4145                         DRM_DEBUG_KMS("Sink is MST capable\n");
4146                         intel_dp->is_mst = true;
4147                 } else {
4148                         DRM_DEBUG_KMS("Sink is not MST capable\n");
4149                         intel_dp->is_mst = false;
4150                 }
4151         }
4152
4153 #if 0
4154         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4155         return intel_dp->is_mst;
4156 #else
4157         return false;
4158 #endif
4159 }
4160
4161 static void intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
4162 {
4163         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4164         struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4165         u8 buf;
4166
4167         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
4168                 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4169                 return;
4170         }
4171
4172         if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4173                                buf & ~DP_TEST_SINK_START) < 0)
4174                 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4175
4176         hsw_enable_ips(intel_crtc);
4177 }
4178
4179 static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
4180 {
4181         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4182         struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4183         u8 buf;
4184
4185         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
4186                 return -EIO;
4187
4188         if (!(buf & DP_TEST_CRC_SUPPORTED))
4189                 return -ENOTTY;
4190
4191         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4192                 return -EIO;
4193
4194         hsw_disable_ips(intel_crtc);
4195
4196         if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4197                                buf | DP_TEST_SINK_START) < 0) {
4198                 hsw_enable_ips(intel_crtc);
4199                 return -EIO;
4200         }
4201
4202         return 0;
4203 }
4204
4205 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4206 {
4207         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4208         struct drm_device *dev = dig_port->base.base.dev;
4209         struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4210         u8 buf;
4211         int test_crc_count;
4212         int attempts = 6;
4213         int ret;
4214
4215         ret = intel_dp_sink_crc_start(intel_dp);
4216         if (ret)
4217                 return ret;
4218
4219         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0) {
4220                 ret = -EIO;
4221                 goto stop;
4222         }
4223
4224         test_crc_count = buf & DP_TEST_COUNT_MASK;
4225
4226         do {
4227                 if (drm_dp_dpcd_readb(&intel_dp->aux,
4228                                       DP_TEST_SINK_MISC, &buf) < 0) {
4229                         ret = -EIO;
4230                         goto stop;
4231                 }
4232                 intel_wait_for_vblank(dev, intel_crtc->pipe);
4233         } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
4234
4235         if (attempts == 0) {
4236                 DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
4237                 ret = -ETIMEDOUT;
4238                 goto stop;
4239         }
4240
4241         if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
4242                 ret = -EIO;
4243 stop:
4244         intel_dp_sink_crc_stop(intel_dp);
4245         return ret;
4246 }
4247
4248 static bool
4249 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4250 {
4251         return intel_dp_dpcd_read_wake(&intel_dp->aux,
4252                                        DP_DEVICE_SERVICE_IRQ_VECTOR,
4253                                        sink_irq_vector, 1) == 1;
4254 }
4255
4256 #if 0
4257 static bool
4258 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4259 {
4260         int ret;
4261
4262         ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4263                                              DP_SINK_COUNT_ESI,
4264                                              sink_irq_vector, 14);
4265         if (ret != 14)
4266                 return false;
4267
4268         return true;
4269 }
4270 #endif
4271
4272 static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4273 {
4274         uint8_t test_result = DP_TEST_ACK;
4275         return test_result;
4276 }
4277
4278 static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4279 {
4280         uint8_t test_result = DP_TEST_NAK;
4281         return test_result;
4282 }
4283
4284 static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
4285 {
4286         uint8_t test_result = DP_TEST_NAK;
4287         struct intel_connector *intel_connector = intel_dp->attached_connector;
4288         struct drm_connector *connector = &intel_connector->base;
4289
4290         if (intel_connector->detect_edid == NULL ||
4291             connector->edid_corrupt ||
4292             intel_dp->aux.i2c_defer_count > 6) {
4293                 /* Check EDID read for NACKs, DEFERs and corruption
4294                  * (DP CTS 1.2 Core r1.1)
4295                  *    4.2.2.4 : Failed EDID read, I2C_NAK
4296                  *    4.2.2.5 : Failed EDID read, I2C_DEFER
4297                  *    4.2.2.6 : EDID corruption detected
4298                  * Use failsafe mode for all cases
4299                  */
4300                 if (intel_dp->aux.i2c_nack_count > 0 ||
4301                         intel_dp->aux.i2c_defer_count > 0)
4302                         DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4303                                       intel_dp->aux.i2c_nack_count,
4304                                       intel_dp->aux.i2c_defer_count);
4305                 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4306         } else {
4307                 struct edid *block = intel_connector->detect_edid;
4308
4309                 /* We have to write the checksum
4310                  * of the last block read
4311                  */
4312                 block += intel_connector->detect_edid->extensions;
4313
4314                 if (!drm_dp_dpcd_write(&intel_dp->aux,
4315                                         DP_TEST_EDID_CHECKSUM,
4316                                         &block->checksum,
4317                                         1))
4318                         DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4319
4320                 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4321                 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4322         }
4323
4324         /* Set test active flag here so userspace doesn't interrupt things */
4325         intel_dp->compliance_test_active = 1;
4326
4327         return test_result;
4328 }
4329
4330 static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4331 {
4332         uint8_t test_result = DP_TEST_NAK;
4333         return test_result;
4334 }
4335
4336 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4337 {
4338         uint8_t response = DP_TEST_NAK;
4339         uint8_t rxdata = 0;
4340         int status = 0;
4341
4342         intel_dp->compliance_test_active = 0;
4343         intel_dp->compliance_test_type = 0;
4344         intel_dp->compliance_test_data = 0;
4345
4346         intel_dp->aux.i2c_nack_count = 0;
4347         intel_dp->aux.i2c_defer_count = 0;
4348
4349         status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4350         if (status <= 0) {
4351                 DRM_DEBUG_KMS("Could not read test request from sink\n");
4352                 goto update_status;
4353         }
4354
4355         switch (rxdata) {
4356         case DP_TEST_LINK_TRAINING:
4357                 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4358                 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4359                 response = intel_dp_autotest_link_training(intel_dp);
4360                 break;
4361         case DP_TEST_LINK_VIDEO_PATTERN:
4362                 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4363                 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4364                 response = intel_dp_autotest_video_pattern(intel_dp);
4365                 break;
4366         case DP_TEST_LINK_EDID_READ:
4367                 DRM_DEBUG_KMS("EDID test requested\n");
4368                 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4369                 response = intel_dp_autotest_edid(intel_dp);
4370                 break;
4371         case DP_TEST_LINK_PHY_TEST_PATTERN:
4372                 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4373                 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4374                 response = intel_dp_autotest_phy_pattern(intel_dp);
4375                 break;
4376         default:
4377                 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4378                 break;
4379         }
4380
4381 update_status:
4382         status = drm_dp_dpcd_write(&intel_dp->aux,
4383                                    DP_TEST_RESPONSE,
4384                                    &response, 1);
4385         if (status <= 0)
4386                 DRM_DEBUG_KMS("Could not write test response to sink\n");
4387 }
4388
4389 #if 0
4390 static int
4391 intel_dp_check_mst_status(struct intel_dp *intel_dp)
4392 {
4393         bool bret;
4394
4395         if (intel_dp->is_mst) {
4396                 u8 esi[16] = { 0 };
4397                 int ret = 0;
4398                 int retry;
4399                 bool handled;
4400                 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4401 go_again:
4402                 if (bret == true) {
4403
4404                         /* check link status - esi[10] = 0x200c */
4405                         if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4406                                 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4407                                 intel_dp_start_link_train(intel_dp);
4408                                 intel_dp_complete_link_train(intel_dp);
4409                                 intel_dp_stop_link_train(intel_dp);
4410                         }
4411
4412                         DRM_DEBUG_KMS("got esi %3ph\n", esi);
4413                         ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4414
4415                         if (handled) {
4416                                 for (retry = 0; retry < 3; retry++) {
4417                                         int wret;
4418                                         wret = drm_dp_dpcd_write(&intel_dp->aux,
4419                                                                  DP_SINK_COUNT_ESI+1,
4420                                                                  &esi[1], 3);
4421                                         if (wret == 3) {
4422                                                 break;
4423                                         }
4424                                 }
4425
4426                                 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4427                                 if (bret == true) {
4428                                         DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4429                                         goto go_again;
4430                                 }
4431                         } else
4432                                 ret = 0;
4433
4434                         return ret;
4435                 } else {
4436                         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4437                         DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4438                         intel_dp->is_mst = false;
4439                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4440                         /* send a hotplug event */
4441                         drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4442                 }
4443         }
4444         return -EINVAL;
4445 }
4446 #endif
4447
4448 /*
4449  * According to DP spec
4450  * 5.1.2:
4451  *  1. Read DPCD
4452  *  2. Configure link according to Receiver Capabilities
4453  *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
4454  *  4. Check link status on receipt of hot-plug interrupt
4455  */
4456 static void
4457 intel_dp_check_link_status(struct intel_dp *intel_dp)
4458 {
4459         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4460         struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4461         u8 sink_irq_vector;
4462         u8 link_status[DP_LINK_STATUS_SIZE];
4463
4464         WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4465
4466         if (!intel_encoder->base.crtc)
4467                 return;
4468
4469         if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4470                 return;
4471
4472         /* Try to read receiver status if the link appears to be up */
4473         if (!intel_dp_get_link_status(intel_dp, link_status)) {
4474                 return;
4475         }
4476
4477         /* Now read the DPCD to see if it's actually running */
4478         if (!intel_dp_get_dpcd(intel_dp)) {
4479                 return;
4480         }
4481
4482         /* Try to read the source of the interrupt */
4483         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4484             intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4485                 /* Clear interrupt source */
4486                 drm_dp_dpcd_writeb(&intel_dp->aux,
4487                                    DP_DEVICE_SERVICE_IRQ_VECTOR,
4488                                    sink_irq_vector);
4489
4490                 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4491                         DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
4492                 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4493                         DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4494         }
4495
4496         if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
4497                 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4498                               intel_encoder->base.name);
4499                 intel_dp_start_link_train(intel_dp);
4500                 intel_dp_complete_link_train(intel_dp);
4501                 intel_dp_stop_link_train(intel_dp);
4502         }
4503 }
4504
4505 /* XXX this is probably wrong for multiple downstream ports */
4506 static enum drm_connector_status
4507 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4508 {
4509         uint8_t *dpcd = intel_dp->dpcd;
4510         uint8_t type;
4511
4512         if (!intel_dp_get_dpcd(intel_dp))
4513                 return connector_status_disconnected;
4514
4515         /* if there's no downstream port, we're done */
4516         if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
4517                 return connector_status_connected;
4518
4519         /* If we're HPD-aware, SINK_COUNT changes dynamically */
4520         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4521             intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4522                 uint8_t reg;
4523
4524                 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4525                                             &reg, 1) < 0)
4526                         return connector_status_unknown;
4527
4528                 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4529                                               : connector_status_disconnected;
4530         }
4531
4532         /* If no HPD, poke DDC gently */
4533         if (drm_probe_ddc(intel_dp->aux.ddc))
4534                 return connector_status_connected;
4535
4536         /* Well we tried, say unknown for unreliable port types */
4537         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4538                 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4539                 if (type == DP_DS_PORT_TYPE_VGA ||
4540                     type == DP_DS_PORT_TYPE_NON_EDID)
4541                         return connector_status_unknown;
4542         } else {
4543                 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4544                         DP_DWN_STRM_PORT_TYPE_MASK;
4545                 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4546                     type == DP_DWN_STRM_PORT_TYPE_OTHER)
4547                         return connector_status_unknown;
4548         }
4549
4550         /* Anything else is out of spec, warn and ignore */
4551         DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4552         return connector_status_disconnected;
4553 }
4554
4555 static enum drm_connector_status
4556 edp_detect(struct intel_dp *intel_dp)
4557 {
4558         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4559         enum drm_connector_status status;
4560
4561         status = intel_panel_detect(dev);
4562         if (status == connector_status_unknown)
4563                 status = connector_status_connected;
4564
4565         return status;
4566 }
4567
4568 static enum drm_connector_status
4569 ironlake_dp_detect(struct intel_dp *intel_dp)
4570 {
4571         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4572         struct drm_i915_private *dev_priv = dev->dev_private;
4573         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4574
4575         if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4576                 return connector_status_disconnected;
4577
4578         return intel_dp_detect_dpcd(intel_dp);
4579 }
4580
4581 static int g4x_digital_port_connected(struct drm_device *dev,
4582                                        struct intel_digital_port *intel_dig_port)
4583 {
4584         struct drm_i915_private *dev_priv = dev->dev_private;
4585         uint32_t bit;
4586
4587         if (IS_VALLEYVIEW(dev)) {
4588                 switch (intel_dig_port->port) {
4589                 case PORT_B:
4590                         bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4591                         break;
4592                 case PORT_C:
4593                         bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4594                         break;
4595                 case PORT_D:
4596                         bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4597                         break;
4598                 default:
4599                         return -EINVAL;
4600                 }
4601         } else {
4602                 switch (intel_dig_port->port) {
4603                 case PORT_B:
4604                         bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4605                         break;
4606                 case PORT_C:
4607                         bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4608                         break;
4609                 case PORT_D:
4610                         bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4611                         break;
4612                 default:
4613                         return -EINVAL;
4614                 }
4615         }
4616
4617         if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
4618                 return 0;
4619         return 1;
4620 }
4621
4622 static enum drm_connector_status
4623 g4x_dp_detect(struct intel_dp *intel_dp)
4624 {
4625         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4626         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4627         int ret;
4628
4629         /* Can't disconnect eDP, but you can close the lid... */
4630         if (is_edp(intel_dp)) {
4631                 enum drm_connector_status status;
4632
4633                 status = intel_panel_detect(dev);
4634                 if (status == connector_status_unknown)
4635                         status = connector_status_connected;
4636                 return status;
4637         }
4638
4639         ret = g4x_digital_port_connected(dev, intel_dig_port);
4640         if (ret == -EINVAL)
4641                 return connector_status_unknown;
4642         else if (ret == 0)
4643                 return connector_status_disconnected;
4644
4645         return intel_dp_detect_dpcd(intel_dp);
4646 }
4647
4648 static struct edid *
4649 intel_dp_get_edid(struct intel_dp *intel_dp)
4650 {
4651         struct intel_connector *intel_connector = intel_dp->attached_connector;
4652
4653         /* use cached edid if we have one */
4654         if (intel_connector->edid) {
4655                 /* invalid edid */
4656                 if (IS_ERR(intel_connector->edid))
4657                         return NULL;
4658
4659                 return drm_edid_duplicate(intel_connector->edid);
4660         } else
4661                 return drm_get_edid(&intel_connector->base,
4662                                     intel_dp->aux.ddc);
4663 }
4664
4665 static void
4666 intel_dp_set_edid(struct intel_dp *intel_dp)
4667 {
4668         struct intel_connector *intel_connector = intel_dp->attached_connector;
4669         struct edid *edid;
4670
4671         edid = intel_dp_get_edid(intel_dp);
4672         intel_connector->detect_edid = edid;
4673
4674         if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4675                 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4676         else
4677                 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4678 }
4679
4680 static void
4681 intel_dp_unset_edid(struct intel_dp *intel_dp)
4682 {
4683         struct intel_connector *intel_connector = intel_dp->attached_connector;
4684
4685         kfree(intel_connector->detect_edid);
4686         intel_connector->detect_edid = NULL;
4687
4688         intel_dp->has_audio = false;
4689 }
4690
4691 static enum intel_display_power_domain
4692 intel_dp_power_get(struct intel_dp *dp)
4693 {
4694         struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4695         enum intel_display_power_domain power_domain;
4696
4697         power_domain = intel_display_port_power_domain(encoder);
4698         intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4699
4700         return power_domain;
4701 }
4702
4703 static void
4704 intel_dp_power_put(struct intel_dp *dp,
4705                    enum intel_display_power_domain power_domain)
4706 {
4707         struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4708         intel_display_power_put(to_i915(encoder->base.dev), power_domain);
4709 }
4710
4711 static enum drm_connector_status
4712 intel_dp_detect(struct drm_connector *connector, bool force)
4713 {
4714         struct intel_dp *intel_dp = intel_attached_dp(connector);
4715         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4716         struct intel_encoder *intel_encoder = &intel_dig_port->base;
4717         struct drm_device *dev = connector->dev;
4718         enum drm_connector_status status;
4719         enum intel_display_power_domain power_domain;
4720         bool ret;
4721         u8 sink_irq_vector;
4722
4723         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4724                       connector->base.id, connector->name);
4725         intel_dp_unset_edid(intel_dp);
4726
4727         if (intel_dp->is_mst) {
4728                 /* MST devices are disconnected from a monitor POV */
4729                 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4730                         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4731                 return connector_status_disconnected;
4732         }
4733
4734         power_domain = intel_dp_power_get(intel_dp);
4735
4736         /* Can't disconnect eDP, but you can close the lid... */
4737         if (is_edp(intel_dp))
4738                 status = edp_detect(intel_dp);
4739         else if (HAS_PCH_SPLIT(dev))
4740                 status = ironlake_dp_detect(intel_dp);
4741         else
4742                 status = g4x_dp_detect(intel_dp);
4743         if (status != connector_status_connected)
4744                 goto out;
4745
4746         intel_dp_probe_oui(intel_dp);
4747
4748         ret = intel_dp_probe_mst(intel_dp);
4749         if (ret) {
4750                 /* if we are in MST mode then this connector
4751                    won't appear connected or have anything with EDID on it */
4752                 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4753                         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4754                 status = connector_status_disconnected;
4755                 goto out;
4756         }
4757
4758         intel_dp_set_edid(intel_dp);
4759
4760         if (intel_encoder->type != INTEL_OUTPUT_EDP)
4761                 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4762         status = connector_status_connected;
4763
4764         /* Try to read the source of the interrupt */
4765         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4766             intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4767                 /* Clear interrupt source */
4768                 drm_dp_dpcd_writeb(&intel_dp->aux,
4769                                    DP_DEVICE_SERVICE_IRQ_VECTOR,
4770                                    sink_irq_vector);
4771
4772                 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4773                         intel_dp_handle_test_request(intel_dp);
4774                 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4775                         DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4776         }
4777
4778 out:
4779         intel_dp_power_put(intel_dp, power_domain);
4780         return status;
4781 }
4782
4783 static void
4784 intel_dp_force(struct drm_connector *connector)
4785 {
4786         struct intel_dp *intel_dp = intel_attached_dp(connector);
4787         struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4788         enum intel_display_power_domain power_domain;
4789
4790         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4791                       connector->base.id, connector->name);
4792         intel_dp_unset_edid(intel_dp);
4793
4794         if (connector->status != connector_status_connected)
4795                 return;
4796
4797         power_domain = intel_dp_power_get(intel_dp);
4798
4799         intel_dp_set_edid(intel_dp);
4800
4801         intel_dp_power_put(intel_dp, power_domain);
4802
4803         if (intel_encoder->type != INTEL_OUTPUT_EDP)
4804                 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4805 }
4806
4807 static int intel_dp_get_modes(struct drm_connector *connector)
4808 {
4809         struct intel_connector *intel_connector = to_intel_connector(connector);
4810         struct edid *edid;
4811
4812         edid = intel_connector->detect_edid;
4813         if (edid) {
4814                 int ret = intel_connector_update_modes(connector, edid);
4815                 if (ret)
4816                         return ret;
4817         }
4818
4819         /* if eDP has no EDID, fall back to fixed mode */
4820         if (is_edp(intel_attached_dp(connector)) &&
4821             intel_connector->panel.fixed_mode) {
4822                 struct drm_display_mode *mode;
4823
4824                 mode = drm_mode_duplicate(connector->dev,
4825                                           intel_connector->panel.fixed_mode);
4826                 if (mode) {
4827                         drm_mode_probed_add(connector, mode);
4828                         return 1;
4829                 }
4830         }
4831
4832         return 0;
4833 }
4834
4835 static bool
4836 intel_dp_detect_audio(struct drm_connector *connector)
4837 {
4838         bool has_audio = false;
4839         struct edid *edid;
4840
4841         edid = to_intel_connector(connector)->detect_edid;
4842         if (edid)
4843                 has_audio = drm_detect_monitor_audio(edid);
4844
4845         return has_audio;
4846 }
4847
4848 static int
4849 intel_dp_set_property(struct drm_connector *connector,
4850                       struct drm_property *property,
4851                       uint64_t val)
4852 {
4853         struct drm_i915_private *dev_priv = connector->dev->dev_private;
4854         struct intel_connector *intel_connector = to_intel_connector(connector);
4855         struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4856         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4857         int ret;
4858
4859         ret = drm_object_property_set_value(&connector->base, property, val);
4860         if (ret)
4861                 return ret;
4862
4863         if (property == dev_priv->force_audio_property) {
4864                 int i = val;
4865                 bool has_audio;
4866
4867                 if (i == intel_dp->force_audio)
4868                         return 0;
4869
4870                 intel_dp->force_audio = i;
4871
4872                 if (i == HDMI_AUDIO_AUTO)
4873                         has_audio = intel_dp_detect_audio(connector);
4874                 else
4875                         has_audio = (i == HDMI_AUDIO_ON);
4876
4877                 if (has_audio == intel_dp->has_audio)
4878                         return 0;
4879
4880                 intel_dp->has_audio = has_audio;
4881                 goto done;
4882         }
4883
4884         if (property == dev_priv->broadcast_rgb_property) {
4885                 bool old_auto = intel_dp->color_range_auto;
4886                 uint32_t old_range = intel_dp->color_range;
4887
4888                 switch (val) {
4889                 case INTEL_BROADCAST_RGB_AUTO:
4890                         intel_dp->color_range_auto = true;
4891                         break;
4892                 case INTEL_BROADCAST_RGB_FULL:
4893                         intel_dp->color_range_auto = false;
4894                         intel_dp->color_range = 0;
4895                         break;
4896                 case INTEL_BROADCAST_RGB_LIMITED:
4897                         intel_dp->color_range_auto = false;
4898                         intel_dp->color_range = DP_COLOR_RANGE_16_235;
4899                         break;
4900                 default:
4901                         return -EINVAL;
4902                 }
4903
4904                 if (old_auto == intel_dp->color_range_auto &&
4905                     old_range == intel_dp->color_range)
4906                         return 0;
4907
4908                 goto done;
4909         }
4910
4911         if (is_edp(intel_dp) &&
4912             property == connector->dev->mode_config.scaling_mode_property) {
4913                 if (val == DRM_MODE_SCALE_NONE) {
4914                         DRM_DEBUG_KMS("no scaling not supported\n");
4915                         return -EINVAL;
4916                 }
4917
4918                 if (intel_connector->panel.fitting_mode == val) {
4919                         /* the eDP scaling property is not changed */
4920                         return 0;
4921                 }
4922                 intel_connector->panel.fitting_mode = val;
4923
4924                 goto done;
4925         }
4926
4927         return -EINVAL;
4928
4929 done:
4930         if (intel_encoder->base.crtc)
4931                 intel_crtc_restore_mode(intel_encoder->base.crtc);
4932
4933         return 0;
4934 }
4935
4936 static void
4937 intel_dp_connector_destroy(struct drm_connector *connector)
4938 {
4939         struct intel_connector *intel_connector = to_intel_connector(connector);
4940
4941         kfree(intel_connector->detect_edid);
4942
4943         if (!IS_ERR_OR_NULL(intel_connector->edid))
4944                 kfree(intel_connector->edid);
4945
4946         /* Can't call is_edp() since the encoder may have been destroyed
4947          * already. */
4948         if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4949                 intel_panel_fini(&intel_connector->panel);
4950
4951         drm_connector_cleanup(connector);
4952         kfree(connector);
4953 }
4954
4955 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4956 {
4957         struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4958         struct intel_dp *intel_dp = &intel_dig_port->dp;
4959
4960         drm_dp_aux_unregister(&intel_dp->aux);
4961         intel_dp_mst_encoder_cleanup(intel_dig_port);
4962         if (is_edp(intel_dp)) {
4963                 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4964                 /*
4965                  * vdd might still be enabled do to the delayed vdd off.
4966                  * Make sure vdd is actually turned off here.
4967                  */
4968                 pps_lock(intel_dp);
4969                 edp_panel_vdd_off_sync(intel_dp);
4970                 pps_unlock(intel_dp);
4971
4972 #if 0
4973                 if (intel_dp->edp_notifier.notifier_call) {
4974                         unregister_reboot_notifier(&intel_dp->edp_notifier);
4975                         intel_dp->edp_notifier.notifier_call = NULL;
4976                 }
4977 #endif
4978         }
4979         drm_encoder_cleanup(encoder);
4980         kfree(intel_dig_port);
4981 }
4982
4983 static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4984 {
4985         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4986
4987         if (!is_edp(intel_dp))
4988                 return;
4989
4990         /*
4991          * vdd might still be enabled do to the delayed vdd off.
4992          * Make sure vdd is actually turned off here.
4993          */
4994         cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4995         pps_lock(intel_dp);
4996         edp_panel_vdd_off_sync(intel_dp);
4997         pps_unlock(intel_dp);
4998 }
4999
5000 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
5001 {
5002         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5003         struct drm_device *dev = intel_dig_port->base.base.dev;
5004         struct drm_i915_private *dev_priv = dev->dev_private;
5005         enum intel_display_power_domain power_domain;
5006
5007         lockdep_assert_held(&dev_priv->pps_mutex);
5008
5009         if (!edp_have_panel_vdd(intel_dp))
5010                 return;
5011
5012         /*
5013          * The VDD bit needs a power domain reference, so if the bit is
5014          * already enabled when we boot or resume, grab this reference and
5015          * schedule a vdd off, so we don't hold on to the reference
5016          * indefinitely.
5017          */
5018         DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
5019         power_domain = intel_display_port_power_domain(&intel_dig_port->base);
5020         intel_display_power_get(dev_priv, power_domain);
5021
5022         edp_panel_vdd_schedule_off(intel_dp);
5023 }
5024
5025 static void intel_dp_encoder_reset(struct drm_encoder *encoder)
5026 {
5027         struct intel_dp *intel_dp;
5028
5029         if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
5030                 return;
5031
5032         intel_dp = enc_to_intel_dp(encoder);
5033
5034         pps_lock(intel_dp);
5035
5036         /*
5037          * Read out the current power sequencer assignment,
5038          * in case the BIOS did something with it.
5039          */
5040         if (IS_VALLEYVIEW(encoder->dev))
5041                 vlv_initial_power_sequencer_setup(intel_dp);
5042
5043         intel_edp_panel_vdd_sanitize(intel_dp);
5044
5045         pps_unlock(intel_dp);
5046 }
5047
5048 static const struct drm_connector_funcs intel_dp_connector_funcs = {
5049         .dpms = drm_atomic_helper_connector_dpms,
5050         .detect = intel_dp_detect,
5051         .force = intel_dp_force,
5052         .fill_modes = drm_helper_probe_single_connector_modes,
5053         .set_property = intel_dp_set_property,
5054         .atomic_get_property = intel_connector_atomic_get_property,
5055         .destroy = intel_dp_connector_destroy,
5056         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5057         .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
5058 };
5059
5060 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
5061         .get_modes = intel_dp_get_modes,
5062         .mode_valid = intel_dp_mode_valid,
5063         .best_encoder = intel_best_encoder,
5064 };
5065
5066 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
5067         .reset = intel_dp_encoder_reset,
5068         .destroy = intel_dp_encoder_destroy,
5069 };
5070
5071 bool
5072 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
5073 {
5074         struct intel_dp *intel_dp = &intel_dig_port->dp;
5075         struct intel_encoder *intel_encoder = &intel_dig_port->base;
5076         struct drm_device *dev = intel_dig_port->base.base.dev;
5077         struct drm_i915_private *dev_priv = dev->dev_private;
5078         enum intel_display_power_domain power_domain;
5079         bool ret = true;
5080
5081         if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
5082                 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
5083
5084         if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
5085                 /*
5086                  * vdd off can generate a long pulse on eDP which
5087                  * would require vdd on to handle it, and thus we
5088                  * would end up in an endless cycle of
5089                  * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
5090                  */
5091                 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
5092                               port_name(intel_dig_port->port));
5093                 return false;
5094         }
5095
5096         DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
5097                       port_name(intel_dig_port->port),
5098                       long_hpd ? "long" : "short");
5099
5100         power_domain = intel_display_port_power_domain(intel_encoder);
5101         intel_display_power_get(dev_priv, power_domain);
5102
5103         if (long_hpd) {
5104                 /* indicate that we need to restart link training */
5105                 intel_dp->train_set_valid = false;
5106
5107                 if (HAS_PCH_SPLIT(dev)) {
5108                         if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
5109                                 goto mst_fail;
5110                 } else {
5111                         if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
5112                                 goto mst_fail;
5113                 }
5114
5115                 if (!intel_dp_get_dpcd(intel_dp)) {
5116                         goto mst_fail;
5117                 }
5118
5119                 intel_dp_probe_oui(intel_dp);
5120
5121                 if (!intel_dp_probe_mst(intel_dp)) {
5122                         drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5123                         intel_dp_check_link_status(intel_dp);
5124                         drm_modeset_unlock(&dev->mode_config.connection_mutex);
5125                         goto mst_fail;
5126                 }
5127         } else {
5128                 if (intel_dp->is_mst) {
5129 #if 0
5130                         if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
5131                                 goto mst_fail;
5132 #endif
5133                 }
5134
5135                 if (!intel_dp->is_mst) {
5136                         drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5137                         intel_dp_check_link_status(intel_dp);
5138                         drm_modeset_unlock(&dev->mode_config.connection_mutex);
5139                 }
5140         }
5141
5142         ret = false;
5143
5144         goto put_power;
5145 mst_fail:
5146         /* if we were in MST mode, and device is not there get out of MST mode */
5147         if (intel_dp->is_mst) {
5148                 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5149                 intel_dp->is_mst = false;
5150 #if 0
5151                 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
5152 #endif
5153         }
5154 put_power:
5155         intel_display_power_put(dev_priv, power_domain);
5156
5157         return ret;
5158 }
5159
5160 /* Return which DP Port should be selected for Transcoder DP control */
5161 int
5162 intel_trans_dp_port_sel(struct drm_crtc *crtc)
5163 {
5164         struct drm_device *dev = crtc->dev;
5165         struct intel_encoder *intel_encoder;
5166         struct intel_dp *intel_dp;
5167
5168         for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5169                 intel_dp = enc_to_intel_dp(&intel_encoder->base);
5170
5171                 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
5172                     intel_encoder->type == INTEL_OUTPUT_EDP)
5173                         return intel_dp->output_reg;
5174         }
5175
5176         return -1;
5177 }
5178
5179 /* check the VBT to see whether the eDP is on another port */
5180 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
5181 {
5182         struct drm_i915_private *dev_priv = dev->dev_private;
5183         union child_device_config *p_child;
5184         int i;
5185         static const short port_mapping[] = {
5186                 [PORT_B] = DVO_PORT_DPB,
5187                 [PORT_C] = DVO_PORT_DPC,
5188                 [PORT_D] = DVO_PORT_DPD,
5189                 [PORT_E] = DVO_PORT_DPE,
5190         };
5191
5192         if (port == PORT_A)
5193                 return true;
5194
5195         if (!dev_priv->vbt.child_dev_num)
5196                 return false;
5197
5198         for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5199                 p_child = dev_priv->vbt.child_dev + i;
5200
5201                 if (p_child->common.dvo_port == port_mapping[port] &&
5202                     (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5203                     (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
5204                         return true;
5205         }
5206         return false;
5207 }
5208
5209 void
5210 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5211 {
5212         struct intel_connector *intel_connector = to_intel_connector(connector);
5213
5214         intel_attach_force_audio_property(connector);
5215         intel_attach_broadcast_rgb_property(connector);
5216         intel_dp->color_range_auto = true;
5217
5218         if (is_edp(intel_dp)) {
5219                 drm_mode_create_scaling_mode_property(connector->dev);
5220                 drm_object_attach_property(
5221                         &connector->base,
5222                         connector->dev->mode_config.scaling_mode_property,
5223                         DRM_MODE_SCALE_ASPECT);
5224                 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
5225         }
5226 }
5227
5228 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5229 {
5230         intel_dp->last_power_cycle = jiffies;
5231         intel_dp->last_power_on = jiffies;
5232         intel_dp->last_backlight_off = jiffies;
5233 }
5234
5235 static void
5236 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5237                                     struct intel_dp *intel_dp)
5238 {
5239         struct drm_i915_private *dev_priv = dev->dev_private;
5240         struct edp_power_seq cur, vbt, spec,
5241                 *final = &intel_dp->pps_delays;
5242         u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
5243         int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg = 0;
5244
5245         lockdep_assert_held(&dev_priv->pps_mutex);
5246
5247         /* already initialized? */
5248         if (final->t11_t12 != 0)
5249                 return;
5250
5251         if (IS_BROXTON(dev)) {
5252                 /*
5253                  * TODO: BXT has 2 sets of PPS registers.
5254                  * Correct Register for Broxton need to be identified
5255                  * using VBT. hardcoding for now
5256                  */
5257                 pp_ctrl_reg = BXT_PP_CONTROL(0);
5258                 pp_on_reg = BXT_PP_ON_DELAYS(0);
5259                 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5260         } else if (HAS_PCH_SPLIT(dev)) {
5261                 pp_ctrl_reg = PCH_PP_CONTROL;
5262                 pp_on_reg = PCH_PP_ON_DELAYS;
5263                 pp_off_reg = PCH_PP_OFF_DELAYS;
5264                 pp_div_reg = PCH_PP_DIVISOR;
5265         } else {
5266                 enum i915_pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5267
5268                 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5269                 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5270                 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5271                 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5272         }
5273
5274         /* Workaround: Need to write PP_CONTROL with the unlock key as
5275          * the very first thing. */
5276         pp_ctl = ironlake_get_pp_control(intel_dp);
5277
5278         pp_on = I915_READ(pp_on_reg);
5279         pp_off = I915_READ(pp_off_reg);
5280         if (!IS_BROXTON(dev)) {
5281                 I915_WRITE(pp_ctrl_reg, pp_ctl);
5282                 pp_div = I915_READ(pp_div_reg);
5283         }
5284
5285         /* Pull timing values out of registers */
5286         cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5287                 PANEL_POWER_UP_DELAY_SHIFT;
5288
5289         cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5290                 PANEL_LIGHT_ON_DELAY_SHIFT;
5291
5292         cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5293                 PANEL_LIGHT_OFF_DELAY_SHIFT;
5294
5295         cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5296                 PANEL_POWER_DOWN_DELAY_SHIFT;
5297
5298         if (IS_BROXTON(dev)) {
5299                 u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5300                         BXT_POWER_CYCLE_DELAY_SHIFT;
5301                 if (tmp > 0)
5302                         cur.t11_t12 = (tmp - 1) * 1000;
5303                 else
5304                         cur.t11_t12 = 0;
5305         } else {
5306                 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
5307                        PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
5308         }
5309
5310         DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5311                       cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5312
5313         vbt = dev_priv->vbt.edp_pps;
5314
5315         /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5316          * our hw here, which are all in 100usec. */
5317         spec.t1_t3 = 210 * 10;
5318         spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5319         spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5320         spec.t10 = 500 * 10;
5321         /* This one is special and actually in units of 100ms, but zero
5322          * based in the hw (so we need to add 100 ms). But the sw vbt
5323          * table multiplies it with 1000 to make it in units of 100usec,
5324          * too. */
5325         spec.t11_t12 = (510 + 100) * 10;
5326
5327         DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5328                       vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5329
5330         /* Use the max of the register settings and vbt. If both are
5331          * unset, fall back to the spec limits. */
5332 #define assign_final(field)     final->field = (max(cur.field, vbt.field) == 0 ? \
5333                                        spec.field : \
5334                                        max(cur.field, vbt.field))
5335         assign_final(t1_t3);
5336         assign_final(t8);
5337         assign_final(t9);
5338         assign_final(t10);
5339         assign_final(t11_t12);
5340 #undef assign_final
5341
5342 #define get_delay(field)        (DIV_ROUND_UP(final->field, 10))
5343         intel_dp->panel_power_up_delay = get_delay(t1_t3);
5344         intel_dp->backlight_on_delay = get_delay(t8);
5345         intel_dp->backlight_off_delay = get_delay(t9);
5346         intel_dp->panel_power_down_delay = get_delay(t10);
5347         intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5348 #undef get_delay
5349
5350         DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5351                       intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5352                       intel_dp->panel_power_cycle_delay);
5353
5354         DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5355                       intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
5356 }
5357
5358 static void
5359 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5360                                               struct intel_dp *intel_dp)
5361 {
5362         struct drm_i915_private *dev_priv = dev->dev_private;
5363         u32 pp_on, pp_off, pp_div, port_sel = 0;
5364         int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
5365         int pp_on_reg, pp_off_reg, pp_div_reg = 0, pp_ctrl_reg;
5366         enum port port = dp_to_dig_port(intel_dp)->port;
5367         const struct edp_power_seq *seq = &intel_dp->pps_delays;
5368
5369         lockdep_assert_held(&dev_priv->pps_mutex);
5370
5371         if (IS_BROXTON(dev)) {
5372                 /*
5373                  * TODO: BXT has 2 sets of PPS registers.
5374                  * Correct Register for Broxton need to be identified
5375                  * using VBT. hardcoding for now
5376                  */
5377                 pp_ctrl_reg = BXT_PP_CONTROL(0);
5378                 pp_on_reg = BXT_PP_ON_DELAYS(0);
5379                 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5380
5381         } else if (HAS_PCH_SPLIT(dev)) {
5382                 pp_on_reg = PCH_PP_ON_DELAYS;
5383                 pp_off_reg = PCH_PP_OFF_DELAYS;
5384                 pp_div_reg = PCH_PP_DIVISOR;
5385         } else {
5386                 enum i915_pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5387
5388                 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5389                 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5390                 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5391         }
5392
5393         /*
5394          * And finally store the new values in the power sequencer. The
5395          * backlight delays are set to 1 because we do manual waits on them. For
5396          * T8, even BSpec recommends doing it. For T9, if we don't do this,
5397          * we'll end up waiting for the backlight off delay twice: once when we
5398          * do the manual sleep, and once when we disable the panel and wait for
5399          * the PP_STATUS bit to become zero.
5400          */
5401         pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
5402                 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5403         pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
5404                  (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
5405         /* Compute the divisor for the pp clock, simply match the Bspec
5406          * formula. */
5407         if (IS_BROXTON(dev)) {
5408                 pp_div = I915_READ(pp_ctrl_reg);
5409                 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5410                 pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5411                                 << BXT_POWER_CYCLE_DELAY_SHIFT);
5412         } else {
5413                 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5414                 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5415                                 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5416         }
5417
5418         /* Haswell doesn't have any port selection bits for the panel
5419          * power sequencer any more. */
5420         if (IS_VALLEYVIEW(dev)) {
5421                 port_sel = PANEL_PORT_SELECT_VLV(port);
5422         } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5423                 if (port == PORT_A)
5424                         port_sel = PANEL_PORT_SELECT_DPA;
5425                 else
5426                         port_sel = PANEL_PORT_SELECT_DPD;
5427         }
5428
5429         pp_on |= port_sel;
5430
5431         I915_WRITE(pp_on_reg, pp_on);
5432         I915_WRITE(pp_off_reg, pp_off);
5433         if (IS_BROXTON(dev))
5434                 I915_WRITE(pp_ctrl_reg, pp_div);
5435         else
5436                 I915_WRITE(pp_div_reg, pp_div);
5437
5438         DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
5439                       I915_READ(pp_on_reg),
5440                       I915_READ(pp_off_reg),
5441                       IS_BROXTON(dev) ?
5442                       (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
5443                       I915_READ(pp_div_reg));
5444 }
5445
5446 /**
5447  * intel_dp_set_drrs_state - program registers for RR switch to take effect
5448  * @dev: DRM device
5449  * @refresh_rate: RR to be programmed
5450  *
5451  * This function gets called when refresh rate (RR) has to be changed from
5452  * one frequency to another. Switches can be between high and low RR
5453  * supported by the panel or to any other RR based on media playback (in
5454  * this case, RR value needs to be passed from user space).
5455  *
5456  * The caller of this function needs to take a lock on dev_priv->drrs.
5457  */
5458 static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
5459 {
5460         struct drm_i915_private *dev_priv = dev->dev_private;
5461         struct intel_encoder *encoder;
5462         struct intel_digital_port *dig_port = NULL;
5463         struct intel_dp *intel_dp = dev_priv->drrs.dp;
5464         struct intel_crtc_state *config = NULL;
5465         struct intel_crtc *intel_crtc = NULL;
5466         u32 reg, val;
5467         enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
5468
5469         if (refresh_rate <= 0) {
5470                 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5471                 return;
5472         }
5473
5474         if (intel_dp == NULL) {
5475                 DRM_DEBUG_KMS("DRRS not supported.\n");
5476                 return;
5477         }
5478
5479         /*
5480          * FIXME: This needs proper synchronization with psr state for some
5481          * platforms that cannot have PSR and DRRS enabled at the same time.
5482          */
5483
5484         dig_port = dp_to_dig_port(intel_dp);
5485         encoder = &dig_port->base;
5486         intel_crtc = to_intel_crtc(encoder->base.crtc);
5487
5488         if (!intel_crtc) {
5489                 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5490                 return;
5491         }
5492
5493         config = intel_crtc->config;
5494
5495         if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5496                 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5497                 return;
5498         }
5499
5500         if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5501                         refresh_rate)
5502                 index = DRRS_LOW_RR;
5503
5504         if (index == dev_priv->drrs.refresh_rate_type) {
5505                 DRM_DEBUG_KMS(
5506                         "DRRS requested for previously set RR...ignoring\n");
5507                 return;
5508         }
5509
5510         if (!intel_crtc->active) {
5511                 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5512                 return;
5513         }
5514
5515         if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
5516                 switch (index) {
5517                 case DRRS_HIGH_RR:
5518                         intel_dp_set_m_n(intel_crtc, M1_N1);
5519                         break;
5520                 case DRRS_LOW_RR:
5521                         intel_dp_set_m_n(intel_crtc, M2_N2);
5522                         break;
5523                 case DRRS_MAX_RR:
5524                 default:
5525                         DRM_ERROR("Unsupported refreshrate type\n");
5526                 }
5527         } else if (INTEL_INFO(dev)->gen > 6) {
5528                 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5529                 val = I915_READ(reg);
5530
5531                 if (index > DRRS_HIGH_RR) {
5532                         if (IS_VALLEYVIEW(dev))
5533                                 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5534                         else
5535                                 val |= PIPECONF_EDP_RR_MODE_SWITCH;
5536                 } else {
5537                         if (IS_VALLEYVIEW(dev))
5538                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5539                         else
5540                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5541                 }
5542                 I915_WRITE(reg, val);
5543         }
5544
5545         dev_priv->drrs.refresh_rate_type = index;
5546
5547         DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5548 }
5549
5550 /**
5551  * intel_edp_drrs_enable - init drrs struct if supported
5552  * @intel_dp: DP struct
5553  *
5554  * Initializes frontbuffer_bits and drrs.dp
5555  */
5556 void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5557 {
5558         struct drm_device *dev = intel_dp_to_dev(intel_dp);
5559         struct drm_i915_private *dev_priv = dev->dev_private;
5560         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5561         struct drm_crtc *crtc = dig_port->base.base.crtc;
5562         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5563
5564         if (!intel_crtc->config->has_drrs) {
5565                 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5566                 return;
5567         }
5568
5569         mutex_lock(&dev_priv->drrs.mutex);
5570         if (WARN_ON(dev_priv->drrs.dp)) {
5571                 DRM_ERROR("DRRS already enabled\n");
5572                 goto unlock;
5573         }
5574
5575         dev_priv->drrs.busy_frontbuffer_bits = 0;
5576
5577         dev_priv->drrs.dp = intel_dp;
5578
5579 unlock:
5580         mutex_unlock(&dev_priv->drrs.mutex);
5581 }
5582
5583 /**
5584  * intel_edp_drrs_disable - Disable DRRS
5585  * @intel_dp: DP struct
5586  *
5587  */
5588 void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5589 {
5590         struct drm_device *dev = intel_dp_to_dev(intel_dp);
5591         struct drm_i915_private *dev_priv = dev->dev_private;
5592         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5593         struct drm_crtc *crtc = dig_port->base.base.crtc;
5594         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5595
5596         if (!intel_crtc->config->has_drrs)
5597                 return;
5598
5599         mutex_lock(&dev_priv->drrs.mutex);
5600         if (!dev_priv->drrs.dp) {
5601                 mutex_unlock(&dev_priv->drrs.mutex);
5602                 return;
5603         }
5604
5605         if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5606                 intel_dp_set_drrs_state(dev_priv->dev,
5607                         intel_dp->attached_connector->panel.
5608                         fixed_mode->vrefresh);
5609
5610         dev_priv->drrs.dp = NULL;
5611         mutex_unlock(&dev_priv->drrs.mutex);
5612
5613         cancel_delayed_work_sync(&dev_priv->drrs.work);
5614 }
5615
5616 static void intel_edp_drrs_downclock_work(struct work_struct *work)
5617 {
5618         struct drm_i915_private *dev_priv =
5619                 container_of(work, typeof(*dev_priv), drrs.work.work);
5620         struct intel_dp *intel_dp;
5621
5622         mutex_lock(&dev_priv->drrs.mutex);
5623
5624         intel_dp = dev_priv->drrs.dp;
5625
5626         if (!intel_dp)
5627                 goto unlock;
5628
5629         /*
5630          * The delayed work can race with an invalidate hence we need to
5631          * recheck.
5632          */
5633
5634         if (dev_priv->drrs.busy_frontbuffer_bits)
5635                 goto unlock;
5636
5637         if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5638                 intel_dp_set_drrs_state(dev_priv->dev,
5639                         intel_dp->attached_connector->panel.
5640                         downclock_mode->vrefresh);
5641
5642 unlock:
5643         mutex_unlock(&dev_priv->drrs.mutex);
5644 }
5645
5646 /**
5647  * intel_edp_drrs_invalidate - Disable Idleness DRRS
5648  * @dev: DRM device
5649  * @frontbuffer_bits: frontbuffer plane tracking bits
5650  *
5651  * This function gets called everytime rendering on the given planes start.
5652  * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
5653  *
5654  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5655  */
5656 void intel_edp_drrs_invalidate(struct drm_device *dev,
5657                 unsigned frontbuffer_bits)
5658 {
5659         struct drm_i915_private *dev_priv = dev->dev_private;
5660         struct drm_crtc *crtc;
5661         enum i915_pipe pipe;
5662
5663         if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5664                 return;
5665
5666         cancel_delayed_work(&dev_priv->drrs.work);
5667
5668         mutex_lock(&dev_priv->drrs.mutex);
5669         if (!dev_priv->drrs.dp) {
5670                 mutex_unlock(&dev_priv->drrs.mutex);
5671                 return;
5672         }
5673
5674         crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5675         pipe = to_intel_crtc(crtc)->pipe;
5676
5677         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5678         dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5679
5680         /* invalidate means busy screen hence upclock */
5681         if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5682                 intel_dp_set_drrs_state(dev_priv->dev,
5683                                 dev_priv->drrs.dp->attached_connector->panel.
5684                                 fixed_mode->vrefresh);
5685
5686         mutex_unlock(&dev_priv->drrs.mutex);
5687 }
5688
5689 /**
5690  * intel_edp_drrs_flush - Restart Idleness DRRS
5691  * @dev: DRM device
5692  * @frontbuffer_bits: frontbuffer plane tracking bits
5693  *
5694  * This function gets called every time rendering on the given planes has
5695  * completed or flip on a crtc is completed. So DRRS should be upclocked
5696  * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5697  * if no other planes are dirty.
5698  *
5699  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5700  */
5701 void intel_edp_drrs_flush(struct drm_device *dev,
5702                 unsigned frontbuffer_bits)
5703 {
5704         struct drm_i915_private *dev_priv = dev->dev_private;
5705         struct drm_crtc *crtc;
5706         enum i915_pipe pipe;
5707
5708         if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5709                 return;
5710
5711         cancel_delayed_work(&dev_priv->drrs.work);
5712
5713         mutex_lock(&dev_priv->drrs.mutex);
5714         if (!dev_priv->drrs.dp) {
5715                 mutex_unlock(&dev_priv->drrs.mutex);
5716                 return;
5717         }
5718
5719         crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5720         pipe = to_intel_crtc(crtc)->pipe;
5721
5722         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5723         dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5724
5725         /* flush means busy screen hence upclock */
5726         if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5727                 intel_dp_set_drrs_state(dev_priv->dev,
5728                                 dev_priv->drrs.dp->attached_connector->panel.
5729                                 fixed_mode->vrefresh);
5730
5731         /*
5732          * flush also means no more activity hence schedule downclock, if all
5733          * other fbs are quiescent too
5734          */
5735         if (!dev_priv->drrs.busy_frontbuffer_bits)
5736                 schedule_delayed_work(&dev_priv->drrs.work,
5737                                 msecs_to_jiffies(1000));
5738         mutex_unlock(&dev_priv->drrs.mutex);
5739 }
5740
5741 /**
5742  * DOC: Display Refresh Rate Switching (DRRS)
5743  *
5744  * Display Refresh Rate Switching (DRRS) is a power conservation feature
5745  * which enables swtching between low and high refresh rates,
5746  * dynamically, based on the usage scenario. This feature is applicable
5747  * for internal panels.
5748  *
5749  * Indication that the panel supports DRRS is given by the panel EDID, which
5750  * would list multiple refresh rates for one resolution.
5751  *
5752  * DRRS is of 2 types - static and seamless.
5753  * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5754  * (may appear as a blink on screen) and is used in dock-undock scenario.
5755  * Seamless DRRS involves changing RR without any visual effect to the user
5756  * and can be used during normal system usage. This is done by programming
5757  * certain registers.
5758  *
5759  * Support for static/seamless DRRS may be indicated in the VBT based on
5760  * inputs from the panel spec.
5761  *
5762  * DRRS saves power by switching to low RR based on usage scenarios.
5763  *
5764  * eDP DRRS:-
5765  *        The implementation is based on frontbuffer tracking implementation.
5766  * When there is a disturbance on the screen triggered by user activity or a
5767  * periodic system activity, DRRS is disabled (RR is changed to high RR).
5768  * When there is no movement on screen, after a timeout of 1 second, a switch
5769  * to low RR is made.
5770  *        For integration with frontbuffer tracking code,
5771  * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5772  *
5773  * DRRS can be further extended to support other internal panels and also
5774  * the scenario of video playback wherein RR is set based on the rate
5775  * requested by userspace.
5776  */
5777
5778 /**
5779  * intel_dp_drrs_init - Init basic DRRS work and mutex.
5780  * @intel_connector: eDP connector
5781  * @fixed_mode: preferred mode of panel
5782  *
5783  * This function is  called only once at driver load to initialize basic
5784  * DRRS stuff.
5785  *
5786  * Returns:
5787  * Downclock mode if panel supports it, else return NULL.
5788  * DRRS support is determined by the presence of downclock mode (apart
5789  * from VBT setting).
5790  */
5791 static struct drm_display_mode *
5792 intel_dp_drrs_init(struct intel_connector *intel_connector,
5793                 struct drm_display_mode *fixed_mode)
5794 {
5795         struct drm_connector *connector = &intel_connector->base;
5796         struct drm_device *dev = connector->dev;
5797         struct drm_i915_private *dev_priv = dev->dev_private;
5798         struct drm_display_mode *downclock_mode = NULL;
5799
5800         INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5801         lockinit(&dev_priv->drrs.mutex, "i915dm", 0, LK_CANRECURSE);
5802
5803         if (INTEL_INFO(dev)->gen <= 6) {
5804                 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5805                 return NULL;
5806         }
5807
5808         if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5809                 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5810                 return NULL;
5811         }
5812
5813         downclock_mode = intel_find_panel_downclock
5814                                         (dev, fixed_mode, connector);
5815
5816         if (!downclock_mode) {
5817                 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5818                 return NULL;
5819         }
5820
5821         dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5822
5823         dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5824         DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5825         return downclock_mode;
5826 }
5827
5828 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5829                                      struct intel_connector *intel_connector)
5830 {
5831         struct drm_connector *connector = &intel_connector->base;
5832         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5833         struct intel_encoder *intel_encoder = &intel_dig_port->base;
5834         struct drm_device *dev = intel_encoder->base.dev;
5835         struct drm_i915_private *dev_priv = dev->dev_private;
5836         struct drm_display_mode *fixed_mode = NULL;
5837         struct drm_display_mode *downclock_mode = NULL;
5838         bool has_dpcd;
5839         struct drm_display_mode *scan;
5840         struct edid *edid;
5841         enum i915_pipe pipe = INVALID_PIPE;
5842
5843         if (!is_edp(intel_dp))
5844                 return true;
5845
5846         pps_lock(intel_dp);
5847         intel_edp_panel_vdd_sanitize(intel_dp);
5848         pps_unlock(intel_dp);
5849
5850         /* Cache DPCD and EDID for edp. */
5851         has_dpcd = intel_dp_get_dpcd(intel_dp);
5852
5853         if (has_dpcd) {
5854                 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5855                         dev_priv->no_aux_handshake =
5856                                 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5857                                 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5858         } else {
5859                 /* if this fails, presume the device is a ghost */
5860                 DRM_INFO("failed to retrieve link info, disabling eDP\n");
5861                 return false;
5862         }
5863
5864         /* We now know it's not a ghost, init power sequence regs. */
5865         pps_lock(intel_dp);
5866         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5867         pps_unlock(intel_dp);
5868
5869         mutex_lock(&dev->mode_config.mutex);
5870         edid = drm_get_edid(connector, intel_dp->aux.ddc);
5871         if (edid) {
5872                 if (drm_add_edid_modes(connector, edid)) {
5873                         drm_mode_connector_update_edid_property(connector,
5874                                                                 edid);
5875                         drm_edid_to_eld(connector, edid);
5876                 } else {
5877                         kfree(edid);
5878                         edid = ERR_PTR(-EINVAL);
5879                 }
5880         } else {
5881                 edid = ERR_PTR(-ENOENT);
5882         }
5883         intel_connector->edid = edid;
5884
5885         /* prefer fixed mode from EDID if available */
5886         list_for_each_entry(scan, &connector->probed_modes, head) {
5887                 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5888                         fixed_mode = drm_mode_duplicate(dev, scan);
5889                         downclock_mode = intel_dp_drrs_init(
5890                                                 intel_connector, fixed_mode);
5891                         break;
5892                 }
5893         }
5894
5895         /* fallback to VBT if available for eDP */
5896         if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5897                 fixed_mode = drm_mode_duplicate(dev,
5898                                         dev_priv->vbt.lfp_lvds_vbt_mode);
5899                 if (fixed_mode)
5900                         fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5901         }
5902         mutex_unlock(&dev->mode_config.mutex);
5903
5904         if (IS_VALLEYVIEW(dev)) {
5905 #if 0
5906                 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5907                 register_reboot_notifier(&intel_dp->edp_notifier);
5908 #endif
5909
5910                 /*
5911                  * Figure out the current pipe for the initial backlight setup.
5912                  * If the current pipe isn't valid, try the PPS pipe, and if that
5913                  * fails just assume pipe A.
5914                  */
5915                 if (IS_CHERRYVIEW(dev))
5916                         pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5917                 else
5918                         pipe = PORT_TO_PIPE(intel_dp->DP);
5919
5920                 if (pipe != PIPE_A && pipe != PIPE_B)
5921                         pipe = intel_dp->pps_pipe;
5922
5923                 if (pipe != PIPE_A && pipe != PIPE_B)
5924                         pipe = PIPE_A;
5925
5926                 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5927                               pipe_name(pipe));
5928         }
5929
5930         intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5931         intel_connector->panel.backlight_power = intel_edp_backlight_power;
5932         intel_panel_setup_backlight(connector, pipe);
5933
5934         return true;
5935 }
5936
5937 bool
5938 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5939                         struct intel_connector *intel_connector)
5940 {
5941         struct drm_connector *connector = &intel_connector->base;
5942         struct intel_dp *intel_dp = &intel_dig_port->dp;
5943         struct intel_encoder *intel_encoder = &intel_dig_port->base;
5944         struct drm_device *dev = intel_encoder->base.dev;
5945         struct drm_i915_private *dev_priv = dev->dev_private;
5946         enum port port = intel_dig_port->port;
5947         int type;
5948
5949         intel_dp->pps_pipe = INVALID_PIPE;
5950
5951         /* intel_dp vfuncs */
5952         if (INTEL_INFO(dev)->gen >= 9)
5953                 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5954         else if (IS_VALLEYVIEW(dev))
5955                 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5956         else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5957                 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5958         else if (HAS_PCH_SPLIT(dev))
5959                 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5960         else
5961                 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5962
5963         if (INTEL_INFO(dev)->gen >= 9)
5964                 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5965         else
5966                 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
5967
5968         /* Preserve the current hw state. */
5969         intel_dp->DP = I915_READ(intel_dp->output_reg);
5970         intel_dp->attached_connector = intel_connector;
5971
5972         if (intel_dp_is_edp(dev, port))
5973                 type = DRM_MODE_CONNECTOR_eDP;
5974         else
5975                 type = DRM_MODE_CONNECTOR_DisplayPort;
5976
5977         /*
5978          * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5979          * for DP the encoder type can be set by the caller to
5980          * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5981          */
5982         if (type == DRM_MODE_CONNECTOR_eDP)
5983                 intel_encoder->type = INTEL_OUTPUT_EDP;
5984
5985         /* eDP only on port B and/or C on vlv/chv */
5986         if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5987                     port != PORT_B && port != PORT_C))
5988                 return false;
5989
5990         DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5991                         type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5992                         port_name(port));
5993
5994         drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
5995         drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5996
5997         connector->interlace_allowed = true;
5998         connector->doublescan_allowed = 0;
5999
6000         INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
6001                           edp_panel_vdd_work);
6002
6003         intel_connector_attach_encoder(intel_connector, intel_encoder);
6004         drm_connector_register(connector);
6005
6006         if (HAS_DDI(dev))
6007                 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
6008         else
6009                 intel_connector->get_hw_state = intel_connector_get_hw_state;
6010         intel_connector->unregister = intel_dp_connector_unregister;
6011
6012         /* Set up the hotplug pin. */
6013         switch (port) {
6014         case PORT_A:
6015                 intel_encoder->hpd_pin = HPD_PORT_A;
6016                 break;
6017         case PORT_B:
6018                 intel_encoder->hpd_pin = HPD_PORT_B;
6019                 break;
6020         case PORT_C:
6021                 intel_encoder->hpd_pin = HPD_PORT_C;
6022                 break;
6023         case PORT_D:
6024                 intel_encoder->hpd_pin = HPD_PORT_D;
6025                 break;
6026         case PORT_E:
6027                 intel_encoder->hpd_pin = HPD_PORT_E;
6028                 break;
6029         default:
6030                 BUG();
6031         }
6032
6033         if (is_edp(intel_dp)) {
6034                 pps_lock(intel_dp);
6035                 intel_dp_init_panel_power_timestamps(intel_dp);
6036                 if (IS_VALLEYVIEW(dev))
6037                         vlv_initial_power_sequencer_setup(intel_dp);
6038                 else
6039                         intel_dp_init_panel_power_sequencer(dev, intel_dp);
6040                 pps_unlock(intel_dp);
6041         }
6042
6043         intel_dp_aux_init(intel_dp, intel_connector);
6044
6045         /* init MST on ports that can support it */
6046         if (HAS_DP_MST(dev) &&
6047             (port == PORT_B || port == PORT_C || port == PORT_D))
6048                 intel_dp_mst_encoder_init(intel_dig_port,
6049                                           intel_connector->base.base.id);
6050
6051         if (!intel_edp_init_connector(intel_dp, intel_connector)) {
6052                 drm_dp_aux_unregister(&intel_dp->aux);
6053                 if (is_edp(intel_dp)) {
6054                         cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
6055                         /*
6056                          * vdd might still be enabled do to the delayed vdd off.
6057                          * Make sure vdd is actually turned off here.
6058                          */
6059                         pps_lock(intel_dp);
6060                         edp_panel_vdd_off_sync(intel_dp);
6061                         pps_unlock(intel_dp);
6062                 }
6063                 drm_connector_unregister(connector);
6064                 drm_connector_cleanup(connector);
6065                 return false;
6066         }
6067
6068         intel_dp_add_properties(intel_dp, connector);
6069
6070         /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
6071          * 0xd.  Failure to do so will result in spurious interrupts being
6072          * generated on the port when a cable is not attached.
6073          */
6074         if (IS_G4X(dev) && !IS_GM45(dev)) {
6075                 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
6076                 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
6077         }
6078
6079 #if 0
6080         i915_debugfs_connector_add(connector);
6081 #endif
6082
6083         return true;
6084 }
6085
6086 void
6087 intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
6088 {
6089         struct drm_i915_private *dev_priv = dev->dev_private;
6090         struct intel_digital_port *intel_dig_port;
6091         struct intel_encoder *intel_encoder;
6092         struct drm_encoder *encoder;
6093         struct intel_connector *intel_connector;
6094
6095         intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
6096         if (!intel_dig_port)
6097                 return;
6098
6099         intel_connector = intel_connector_alloc();
6100         if (!intel_connector) {
6101                 kfree(intel_dig_port);
6102                 return;
6103         }
6104
6105         intel_encoder = &intel_dig_port->base;
6106         encoder = &intel_encoder->base;
6107
6108         drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
6109                          DRM_MODE_ENCODER_TMDS);
6110
6111         intel_encoder->compute_config = intel_dp_compute_config;
6112         intel_encoder->disable = intel_disable_dp;
6113         intel_encoder->get_hw_state = intel_dp_get_hw_state;
6114         intel_encoder->get_config = intel_dp_get_config;
6115         intel_encoder->suspend = intel_dp_encoder_suspend;
6116         if (IS_CHERRYVIEW(dev)) {
6117                 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
6118                 intel_encoder->pre_enable = chv_pre_enable_dp;
6119                 intel_encoder->enable = vlv_enable_dp;
6120                 intel_encoder->post_disable = chv_post_disable_dp;
6121         } else if (IS_VALLEYVIEW(dev)) {
6122                 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
6123                 intel_encoder->pre_enable = vlv_pre_enable_dp;
6124                 intel_encoder->enable = vlv_enable_dp;
6125                 intel_encoder->post_disable = vlv_post_disable_dp;
6126         } else {
6127                 intel_encoder->pre_enable = g4x_pre_enable_dp;
6128                 intel_encoder->enable = g4x_enable_dp;
6129                 if (INTEL_INFO(dev)->gen >= 5)
6130                         intel_encoder->post_disable = ilk_post_disable_dp;
6131         }
6132
6133         intel_dig_port->port = port;
6134         intel_dig_port->dp.output_reg = output_reg;
6135
6136         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
6137         if (IS_CHERRYVIEW(dev)) {
6138                 if (port == PORT_D)
6139                         intel_encoder->crtc_mask = 1 << 2;
6140                 else
6141                         intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
6142         } else {
6143                 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
6144         }
6145         intel_encoder->cloneable = 0;
6146
6147         intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
6148         dev_priv->hotplug.irq_port[port] = intel_dig_port;
6149
6150         if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
6151                 drm_encoder_cleanup(encoder);
6152                 kfree(intel_dig_port);
6153                 kfree(intel_connector);
6154         }
6155 }
6156
6157 #if 0
6158 void intel_dp_mst_suspend(struct drm_device *dev)
6159 {
6160         struct drm_i915_private *dev_priv = dev->dev_private;
6161         int i;
6162
6163         /* disable MST */
6164         for (i = 0; i < I915_MAX_PORTS; i++) {
6165                 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6166                 if (!intel_dig_port)
6167                         continue;
6168
6169                 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6170                         if (!intel_dig_port->dp.can_mst)
6171                                 continue;
6172                         if (intel_dig_port->dp.is_mst)
6173                                 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6174                 }
6175         }
6176 }
6177 #endif
6178
6179 void intel_dp_mst_resume(struct drm_device *dev)
6180 {
6181         struct drm_i915_private *dev_priv = dev->dev_private;
6182         int i;
6183
6184         for (i = 0; i < I915_MAX_PORTS; i++) {
6185                 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6186                 if (!intel_dig_port)
6187                         continue;
6188                 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6189 #if 0
6190                         int ret;
6191
6192                         if (!intel_dig_port->dp.can_mst)
6193                                 continue;
6194
6195                         ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6196                         if (ret != 0) {
6197                                 intel_dp_check_mst_status(&intel_dig_port->dp);
6198                         }
6199 #endif
6200                 }
6201         }
6202 }