Merge branch 'vendor/OPENSSL'
[dragonfly.git] / sys / dev / drm / i915 / intel_dp.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Keith Packard <keithp@keithp.com>
25  *
26  */
27
28 #include <linux/i2c.h>
29 #include <linux/export.h>
30 #include <drm/drmP.h>
31 #include <linux/slab.h>
32 #include <drm/drm_atomic_helper.h>
33 #include <drm/drm_crtc.h>
34 #include <drm/drm_crtc_helper.h>
35 #include <drm/drm_edid.h>
36 #include "intel_drv.h"
37 #include <drm/i915_drm.h>
38 #include "i915_drv.h"
39
40 #define DP_LINK_CHECK_TIMEOUT   (10 * 1000)
41
42 static int disable_aux_irq = 0;
43 TUNABLE_INT("drm.i915.disable_aux_irq", &disable_aux_irq);
44
45 /* Compliance test status bits  */
46 #define INTEL_DP_RESOLUTION_SHIFT_MASK  0
47 #define INTEL_DP_RESOLUTION_PREFERRED   (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48 #define INTEL_DP_RESOLUTION_STANDARD    (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49 #define INTEL_DP_RESOLUTION_FAILSAFE    (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
50
51 struct dp_link_dpll {
52         int link_bw;
53         struct dpll dpll;
54 };
55
56 static const struct dp_link_dpll gen4_dpll[] = {
57         { DP_LINK_BW_1_62,
58                 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
59         { DP_LINK_BW_2_7,
60                 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
61 };
62
63 static const struct dp_link_dpll pch_dpll[] = {
64         { DP_LINK_BW_1_62,
65                 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
66         { DP_LINK_BW_2_7,
67                 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
68 };
69
70 static const struct dp_link_dpll vlv_dpll[] = {
71         { DP_LINK_BW_1_62,
72                 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
73         { DP_LINK_BW_2_7,
74                 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
75 };
76
77 /*
78  * CHV supports eDP 1.4 that have  more link rates.
79  * Below only provides the fixed rate but exclude variable rate.
80  */
81 static const struct dp_link_dpll chv_dpll[] = {
82         /*
83          * CHV requires to program fractional division for m2.
84          * m2 is stored in fixed point format using formula below
85          * (m2_int << 22) | m2_fraction
86          */
87         { DP_LINK_BW_1_62,      /* m2_int = 32, m2_fraction = 1677722 */
88                 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
89         { DP_LINK_BW_2_7,       /* m2_int = 27, m2_fraction = 0 */
90                 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
91         { DP_LINK_BW_5_4,       /* m2_int = 27, m2_fraction = 0 */
92                 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
93 };
94
95 static const int skl_rates[] = { 162000, 216000, 270000,
96                                   324000, 432000, 540000 };
97 static const int default_rates[] = { 162000, 270000, 540000 };
98
99 /**
100  * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
101  * @intel_dp: DP struct
102  *
103  * If a CPU or PCH DP output is attached to an eDP panel, this function
104  * will return true, and false otherwise.
105  */
106 static bool is_edp(struct intel_dp *intel_dp)
107 {
108         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
109
110         return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
111 }
112
113 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
114 {
115         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
116
117         return intel_dig_port->base.base.dev;
118 }
119
120 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
121 {
122         return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
123 }
124
125 static void intel_dp_link_down(struct intel_dp *intel_dp);
126 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
127 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
128 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
129 static void vlv_steal_power_sequencer(struct drm_device *dev,
130                                       enum i915_pipe pipe);
131
132 static int
133 intel_dp_max_link_bw(struct intel_dp  *intel_dp)
134 {
135         int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
136
137         switch (max_link_bw) {
138         case DP_LINK_BW_1_62:
139         case DP_LINK_BW_2_7:
140         case DP_LINK_BW_5_4:
141                 break;
142         default:
143                 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
144                      max_link_bw);
145                 max_link_bw = DP_LINK_BW_1_62;
146                 break;
147         }
148         return max_link_bw;
149 }
150
151 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
152 {
153         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
154         struct drm_device *dev = intel_dig_port->base.base.dev;
155         u8 source_max, sink_max;
156
157         source_max = 4;
158         if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
159             (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
160                 source_max = 2;
161
162         sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
163
164         return min(source_max, sink_max);
165 }
166
167 /*
168  * The units on the numbers in the next two are... bizarre.  Examples will
169  * make it clearer; this one parallels an example in the eDP spec.
170  *
171  * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
172  *
173  *     270000 * 1 * 8 / 10 == 216000
174  *
175  * The actual data capacity of that configuration is 2.16Gbit/s, so the
176  * units are decakilobits.  ->clock in a drm_display_mode is in kilohertz -
177  * or equivalently, kilopixels per second - so for 1680x1050R it'd be
178  * 119000.  At 18bpp that's 2142000 kilobits per second.
179  *
180  * Thus the strange-looking division by 10 in intel_dp_link_required, to
181  * get the result in decakilobits instead of kilobits.
182  */
183
184 static int
185 intel_dp_link_required(int pixel_clock, int bpp)
186 {
187         return (pixel_clock * bpp + 9) / 10;
188 }
189
190 static int
191 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
192 {
193         return (max_link_clock * max_lanes * 8) / 10;
194 }
195
196 static enum drm_mode_status
197 intel_dp_mode_valid(struct drm_connector *connector,
198                     struct drm_display_mode *mode)
199 {
200         struct intel_dp *intel_dp = intel_attached_dp(connector);
201         struct intel_connector *intel_connector = to_intel_connector(connector);
202         struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
203         int target_clock = mode->clock;
204         int max_rate, mode_rate, max_lanes, max_link_clock;
205
206         if (is_edp(intel_dp) && fixed_mode) {
207                 if (mode->hdisplay > fixed_mode->hdisplay)
208                         return MODE_PANEL;
209
210                 if (mode->vdisplay > fixed_mode->vdisplay)
211                         return MODE_PANEL;
212
213                 target_clock = fixed_mode->clock;
214         }
215
216         max_link_clock = intel_dp_max_link_rate(intel_dp);
217         max_lanes = intel_dp_max_lane_count(intel_dp);
218
219         max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
220         mode_rate = intel_dp_link_required(target_clock, 18);
221
222         if (mode_rate > max_rate)
223                 return MODE_CLOCK_HIGH;
224
225         if (mode->clock < 10000)
226                 return MODE_CLOCK_LOW;
227
228         if (mode->flags & DRM_MODE_FLAG_DBLCLK)
229                 return MODE_H_ILLEGAL;
230
231         return MODE_OK;
232 }
233
234 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
235 {
236         int     i;
237         uint32_t v = 0;
238
239         if (src_bytes > 4)
240                 src_bytes = 4;
241         for (i = 0; i < src_bytes; i++)
242                 v |= ((uint32_t) src[i]) << ((3-i) * 8);
243         return v;
244 }
245
246 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
247 {
248         int i;
249         if (dst_bytes > 4)
250                 dst_bytes = 4;
251         for (i = 0; i < dst_bytes; i++)
252                 dst[i] = src >> ((3-i) * 8);
253 }
254
255 /* hrawclock is 1/4 the FSB frequency */
256 static int
257 intel_hrawclk(struct drm_device *dev)
258 {
259         struct drm_i915_private *dev_priv = dev->dev_private;
260         uint32_t clkcfg;
261
262         /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
263         if (IS_VALLEYVIEW(dev))
264                 return 200;
265
266         clkcfg = I915_READ(CLKCFG);
267         switch (clkcfg & CLKCFG_FSB_MASK) {
268         case CLKCFG_FSB_400:
269                 return 100;
270         case CLKCFG_FSB_533:
271                 return 133;
272         case CLKCFG_FSB_667:
273                 return 166;
274         case CLKCFG_FSB_800:
275                 return 200;
276         case CLKCFG_FSB_1067:
277                 return 266;
278         case CLKCFG_FSB_1333:
279                 return 333;
280         /* these two are just a guess; one of them might be right */
281         case CLKCFG_FSB_1600:
282         case CLKCFG_FSB_1600_ALT:
283                 return 400;
284         default:
285                 return 133;
286         }
287 }
288
289 static void
290 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
291                                     struct intel_dp *intel_dp);
292 static void
293 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
294                                               struct intel_dp *intel_dp);
295
296 static void pps_lock(struct intel_dp *intel_dp)
297 {
298         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
299         struct intel_encoder *encoder = &intel_dig_port->base;
300         struct drm_device *dev = encoder->base.dev;
301         struct drm_i915_private *dev_priv = dev->dev_private;
302         enum intel_display_power_domain power_domain;
303
304         /*
305          * See vlv_power_sequencer_reset() why we need
306          * a power domain reference here.
307          */
308         power_domain = intel_display_port_power_domain(encoder);
309         intel_display_power_get(dev_priv, power_domain);
310
311         mutex_lock(&dev_priv->pps_mutex);
312 }
313
314 static void pps_unlock(struct intel_dp *intel_dp)
315 {
316         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
317         struct intel_encoder *encoder = &intel_dig_port->base;
318         struct drm_device *dev = encoder->base.dev;
319         struct drm_i915_private *dev_priv = dev->dev_private;
320         enum intel_display_power_domain power_domain;
321
322         mutex_unlock(&dev_priv->pps_mutex);
323
324         power_domain = intel_display_port_power_domain(encoder);
325         intel_display_power_put(dev_priv, power_domain);
326 }
327
328 static void
329 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
330 {
331         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
332         struct drm_device *dev = intel_dig_port->base.base.dev;
333         struct drm_i915_private *dev_priv = dev->dev_private;
334         enum i915_pipe pipe = intel_dp->pps_pipe;
335         bool pll_enabled;
336         uint32_t DP;
337
338         if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
339                  "skipping pipe %c power seqeuncer kick due to port %c being active\n",
340                  pipe_name(pipe), port_name(intel_dig_port->port)))
341                 return;
342
343         DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
344                       pipe_name(pipe), port_name(intel_dig_port->port));
345
346         /* Preserve the BIOS-computed detected bit. This is
347          * supposed to be read-only.
348          */
349         DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
350         DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
351         DP |= DP_PORT_WIDTH(1);
352         DP |= DP_LINK_TRAIN_PAT_1;
353
354         if (IS_CHERRYVIEW(dev))
355                 DP |= DP_PIPE_SELECT_CHV(pipe);
356         else if (pipe == PIPE_B)
357                 DP |= DP_PIPEB_SELECT;
358
359         pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
360
361         /*
362          * The DPLL for the pipe must be enabled for this to work.
363          * So enable temporarily it if it's not already enabled.
364          */
365         if (!pll_enabled)
366                 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
367                                  &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
368
369         /*
370          * Similar magic as in intel_dp_enable_port().
371          * We _must_ do this port enable + disable trick
372          * to make this power seqeuencer lock onto the port.
373          * Otherwise even VDD force bit won't work.
374          */
375         I915_WRITE(intel_dp->output_reg, DP);
376         POSTING_READ(intel_dp->output_reg);
377
378         I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
379         POSTING_READ(intel_dp->output_reg);
380
381         I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
382         POSTING_READ(intel_dp->output_reg);
383
384         if (!pll_enabled)
385                 vlv_force_pll_off(dev, pipe);
386 }
387
388 static enum i915_pipe
389 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
390 {
391         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
392         struct drm_device *dev = intel_dig_port->base.base.dev;
393         struct drm_i915_private *dev_priv = dev->dev_private;
394         struct intel_encoder *encoder;
395         unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
396         enum i915_pipe pipe;
397
398         lockdep_assert_held(&dev_priv->pps_mutex);
399
400         /* We should never land here with regular DP ports */
401         WARN_ON(!is_edp(intel_dp));
402
403         if (intel_dp->pps_pipe != INVALID_PIPE)
404                 return intel_dp->pps_pipe;
405
406         /*
407          * We don't have power sequencer currently.
408          * Pick one that's not used by other ports.
409          */
410         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
411                             base.head) {
412                 struct intel_dp *tmp;
413
414                 if (encoder->type != INTEL_OUTPUT_EDP)
415                         continue;
416
417                 tmp = enc_to_intel_dp(&encoder->base);
418
419                 if (tmp->pps_pipe != INVALID_PIPE)
420                         pipes &= ~(1 << tmp->pps_pipe);
421         }
422
423         /*
424          * Didn't find one. This should not happen since there
425          * are two power sequencers and up to two eDP ports.
426          */
427         if (WARN_ON(pipes == 0))
428                 pipe = PIPE_A;
429         else
430                 pipe = ffs(pipes) - 1;
431
432         vlv_steal_power_sequencer(dev, pipe);
433         intel_dp->pps_pipe = pipe;
434
435         DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
436                       pipe_name(intel_dp->pps_pipe),
437                       port_name(intel_dig_port->port));
438
439         /* init power sequencer on this pipe and port */
440         intel_dp_init_panel_power_sequencer(dev, intel_dp);
441         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
442
443         /*
444          * Even vdd force doesn't work until we've made
445          * the power sequencer lock in on the port.
446          */
447         vlv_power_sequencer_kick(intel_dp);
448
449         return intel_dp->pps_pipe;
450 }
451
452 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
453                                enum i915_pipe pipe);
454
455 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
456                                enum i915_pipe pipe)
457 {
458         return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
459 }
460
461 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
462                                 enum i915_pipe pipe)
463 {
464         return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
465 }
466
467 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
468                          enum i915_pipe pipe)
469 {
470         return true;
471 }
472
473 static enum i915_pipe
474 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
475                      enum port port,
476                      vlv_pipe_check pipe_check)
477 {
478         enum i915_pipe pipe;
479
480         for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
481                 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
482                         PANEL_PORT_SELECT_MASK;
483
484                 if (port_sel != PANEL_PORT_SELECT_VLV(port))
485                         continue;
486
487                 if (!pipe_check(dev_priv, pipe))
488                         continue;
489
490                 return pipe;
491         }
492
493         return INVALID_PIPE;
494 }
495
496 static void
497 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
498 {
499         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
500         struct drm_device *dev = intel_dig_port->base.base.dev;
501         struct drm_i915_private *dev_priv = dev->dev_private;
502         enum port port = intel_dig_port->port;
503
504         lockdep_assert_held(&dev_priv->pps_mutex);
505
506         /* try to find a pipe with this port selected */
507         /* first pick one where the panel is on */
508         intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
509                                                   vlv_pipe_has_pp_on);
510         /* didn't find one? pick one where vdd is on */
511         if (intel_dp->pps_pipe == INVALID_PIPE)
512                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
513                                                           vlv_pipe_has_vdd_on);
514         /* didn't find one? pick one with just the correct port */
515         if (intel_dp->pps_pipe == INVALID_PIPE)
516                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
517                                                           vlv_pipe_any);
518
519         /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
520         if (intel_dp->pps_pipe == INVALID_PIPE) {
521                 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
522                               port_name(port));
523                 return;
524         }
525
526         DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
527                       port_name(port), pipe_name(intel_dp->pps_pipe));
528
529         intel_dp_init_panel_power_sequencer(dev, intel_dp);
530         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
531 }
532
533 void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
534 {
535         struct drm_device *dev = dev_priv->dev;
536         struct intel_encoder *encoder;
537
538         if (WARN_ON(!IS_VALLEYVIEW(dev)))
539                 return;
540
541         /*
542          * We can't grab pps_mutex here due to deadlock with power_domain
543          * mutex when power_domain functions are called while holding pps_mutex.
544          * That also means that in order to use pps_pipe the code needs to
545          * hold both a power domain reference and pps_mutex, and the power domain
546          * reference get/put must be done while _not_ holding pps_mutex.
547          * pps_{lock,unlock}() do these steps in the correct order, so one
548          * should use them always.
549          */
550
551         list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
552                 struct intel_dp *intel_dp;
553
554                 if (encoder->type != INTEL_OUTPUT_EDP)
555                         continue;
556
557                 intel_dp = enc_to_intel_dp(&encoder->base);
558                 intel_dp->pps_pipe = INVALID_PIPE;
559         }
560 }
561
562 static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
563 {
564         struct drm_device *dev = intel_dp_to_dev(intel_dp);
565
566         if (HAS_PCH_SPLIT(dev))
567                 return PCH_PP_CONTROL;
568         else
569                 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
570 }
571
572 static u32 _pp_stat_reg(struct intel_dp *intel_dp)
573 {
574         struct drm_device *dev = intel_dp_to_dev(intel_dp);
575
576         if (HAS_PCH_SPLIT(dev))
577                 return PCH_PP_STATUS;
578         else
579                 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
580 }
581
582 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
583    This function only applicable when panel PM state is not to be tracked */
584 #if 0
585 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
586                               void *unused)
587 {
588         struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
589                                                  edp_notifier);
590         struct drm_device *dev = intel_dp_to_dev(intel_dp);
591         struct drm_i915_private *dev_priv = dev->dev_private;
592         u32 pp_div;
593         u32 pp_ctrl_reg, pp_div_reg;
594
595         if (!is_edp(intel_dp) || code != SYS_RESTART)
596                 return 0;
597
598         pps_lock(intel_dp);
599
600         if (IS_VALLEYVIEW(dev)) {
601                 enum i915_pipe pipe = vlv_power_sequencer_pipe(intel_dp);
602
603                 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
604                 pp_div_reg  = VLV_PIPE_PP_DIVISOR(pipe);
605                 pp_div = I915_READ(pp_div_reg);
606                 pp_div &= PP_REFERENCE_DIVIDER_MASK;
607
608                 /* 0x1F write to PP_DIV_REG sets max cycle delay */
609                 I915_WRITE(pp_div_reg, pp_div | 0x1F);
610                 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
611                 msleep(intel_dp->panel_power_cycle_delay);
612         }
613
614         pps_unlock(intel_dp);
615
616         return 0;
617 }
618 #endif
619
620 static bool edp_have_panel_power(struct intel_dp *intel_dp)
621 {
622         struct drm_device *dev = intel_dp_to_dev(intel_dp);
623         struct drm_i915_private *dev_priv = dev->dev_private;
624
625         lockdep_assert_held(&dev_priv->pps_mutex);
626
627         if (IS_VALLEYVIEW(dev) &&
628             intel_dp->pps_pipe == INVALID_PIPE)
629                 return false;
630
631         return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
632 }
633
634 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
635 {
636         struct drm_device *dev = intel_dp_to_dev(intel_dp);
637         struct drm_i915_private *dev_priv = dev->dev_private;
638
639         lockdep_assert_held(&dev_priv->pps_mutex);
640
641         if (IS_VALLEYVIEW(dev) &&
642             intel_dp->pps_pipe == INVALID_PIPE)
643                 return false;
644
645         return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
646 }
647
648 static void
649 intel_dp_check_edp(struct intel_dp *intel_dp)
650 {
651         struct drm_device *dev = intel_dp_to_dev(intel_dp);
652         struct drm_i915_private *dev_priv = dev->dev_private;
653
654         if (!is_edp(intel_dp))
655                 return;
656
657         if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
658                 WARN(1, "eDP powered off while attempting aux channel communication.\n");
659                 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
660                               I915_READ(_pp_stat_reg(intel_dp)),
661                               I915_READ(_pp_ctrl_reg(intel_dp)));
662         }
663 }
664
665 static uint32_t
666 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
667 {
668         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
669         struct drm_device *dev = intel_dig_port->base.base.dev;
670         struct drm_i915_private *dev_priv = dev->dev_private;
671         uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
672         uint32_t status;
673         bool done;
674
675 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
676         if (has_aux_irq)
677                 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
678                                           msecs_to_jiffies_timeout(10));
679         else
680                 done = wait_for_atomic(C, 10) == 0;
681         if (!done)
682                 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
683                           has_aux_irq);
684 #undef C
685
686         return status;
687 }
688
689 static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
690 {
691         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
692         struct drm_device *dev = intel_dig_port->base.base.dev;
693
694         /*
695          * The clock divider is based off the hrawclk, and would like to run at
696          * 2MHz.  So, take the hrawclk value and divide by 2 and use that
697          */
698         return index ? 0 : intel_hrawclk(dev) / 2;
699 }
700
701 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
702 {
703         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
704         struct drm_device *dev = intel_dig_port->base.base.dev;
705         struct drm_i915_private *dev_priv = dev->dev_private;
706
707         if (index)
708                 return 0;
709
710         if (intel_dig_port->port == PORT_A) {
711                 return DIV_ROUND_UP(dev_priv->display.get_display_clock_speed(dev), 2000);
712         } else {
713                 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
714         }
715 }
716
717 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
718 {
719         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
720         struct drm_device *dev = intel_dig_port->base.base.dev;
721         struct drm_i915_private *dev_priv = dev->dev_private;
722
723         if (intel_dig_port->port == PORT_A) {
724                 if (index)
725                         return 0;
726                 return DIV_ROUND_CLOSEST(dev_priv->display.get_display_clock_speed(dev), 2000);
727         } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
728                 /* Workaround for non-ULT HSW */
729                 switch (index) {
730                 case 0: return 63;
731                 case 1: return 72;
732                 default: return 0;
733                 }
734         } else  {
735                 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
736         }
737 }
738
739 static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
740 {
741         return index ? 0 : 100;
742 }
743
744 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
745 {
746         /*
747          * SKL doesn't need us to program the AUX clock divider (Hardware will
748          * derive the clock from CDCLK automatically). We still implement the
749          * get_aux_clock_divider vfunc to plug-in into the existing code.
750          */
751         return index ? 0 : 1;
752 }
753
754 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
755                                       bool has_aux_irq,
756                                       int send_bytes,
757                                       uint32_t aux_clock_divider)
758 {
759         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
760         struct drm_device *dev = intel_dig_port->base.base.dev;
761         uint32_t precharge, timeout;
762
763         if (IS_GEN6(dev))
764                 precharge = 3;
765         else
766                 precharge = 5;
767
768         if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
769                 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
770         else
771                 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
772
773         return DP_AUX_CH_CTL_SEND_BUSY |
774                DP_AUX_CH_CTL_DONE |
775                (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
776                DP_AUX_CH_CTL_TIME_OUT_ERROR |
777                timeout |
778                DP_AUX_CH_CTL_RECEIVE_ERROR |
779                (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
780                (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
781                (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
782 }
783
784 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
785                                       bool has_aux_irq,
786                                       int send_bytes,
787                                       uint32_t unused)
788 {
789         return DP_AUX_CH_CTL_SEND_BUSY |
790                DP_AUX_CH_CTL_DONE |
791                (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
792                DP_AUX_CH_CTL_TIME_OUT_ERROR |
793                DP_AUX_CH_CTL_TIME_OUT_1600us |
794                DP_AUX_CH_CTL_RECEIVE_ERROR |
795                (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
796                DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
797 }
798
799 static int
800 intel_dp_aux_ch(struct intel_dp *intel_dp,
801                 const uint8_t *send, int send_bytes,
802                 uint8_t *recv, int recv_size)
803 {
804         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
805         struct drm_device *dev = intel_dig_port->base.base.dev;
806         struct drm_i915_private *dev_priv = dev->dev_private;
807         uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
808         uint32_t ch_data = ch_ctl + 4;
809         uint32_t aux_clock_divider;
810         int i, ret, recv_bytes;
811         uint32_t status;
812         int try, clock = 0;
813         bool has_aux_irq = HAS_AUX_IRQ(dev) && !disable_aux_irq;
814         bool vdd;
815
816         pps_lock(intel_dp);
817
818         /*
819          * We will be called with VDD already enabled for dpcd/edid/oui reads.
820          * In such cases we want to leave VDD enabled and it's up to upper layers
821          * to turn it off. But for eg. i2c-dev access we need to turn it on/off
822          * ourselves.
823          */
824         vdd = edp_panel_vdd_on(intel_dp);
825
826         /* dp aux is extremely sensitive to irq latency, hence request the
827          * lowest possible wakeup latency and so prevent the cpu from going into
828          * deep sleep states.
829          */
830         pm_qos_update_request(&dev_priv->pm_qos, 0);
831
832         intel_dp_check_edp(intel_dp);
833
834         intel_aux_display_runtime_get(dev_priv);
835
836         /* Try to wait for any previous AUX channel activity */
837         for (try = 0; try < 3; try++) {
838                 status = I915_READ_NOTRACE(ch_ctl);
839                 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
840                         break;
841                 msleep(1);
842         }
843
844         if (try == 3) {
845                 WARN(1, "dp_aux_ch not started status 0x%08x\n",
846                      I915_READ(ch_ctl));
847                 ret = -EBUSY;
848                 goto out;
849         }
850
851         /* Only 5 data registers! */
852         if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
853                 ret = -E2BIG;
854                 goto out;
855         }
856
857         while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
858                 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
859                                                           has_aux_irq,
860                                                           send_bytes,
861                                                           aux_clock_divider);
862
863                 /* Must try at least 3 times according to DP spec */
864                 for (try = 0; try < 5; try++) {
865                         /* Load the send data into the aux channel data registers */
866                         for (i = 0; i < send_bytes; i += 4)
867                                 I915_WRITE(ch_data + i,
868                                            intel_dp_pack_aux(send + i,
869                                                              send_bytes - i));
870
871                         /* Send the command and wait for it to complete */
872                         I915_WRITE(ch_ctl, send_ctl);
873
874                         status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
875
876                         /* Clear done status and any errors */
877                         I915_WRITE(ch_ctl,
878                                    status |
879                                    DP_AUX_CH_CTL_DONE |
880                                    DP_AUX_CH_CTL_TIME_OUT_ERROR |
881                                    DP_AUX_CH_CTL_RECEIVE_ERROR);
882
883                         if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
884                                 continue;
885
886                         /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
887                          *   400us delay required for errors and timeouts
888                          *   Timeout errors from the HW already meet this
889                          *   requirement so skip to next iteration
890                          */
891                         if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
892                                 usleep_range(400, 500);
893                                 continue;
894                         }
895                         if (status & DP_AUX_CH_CTL_DONE)
896                                 goto done;
897                 }
898         }
899
900         if ((status & DP_AUX_CH_CTL_DONE) == 0) {
901                 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
902                 ret = -EBUSY;
903                 goto out;
904         }
905
906 done:
907         /* Check for timeout or receive error.
908          * Timeouts occur when the sink is not connected
909          */
910         if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
911                 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
912                 ret = -EIO;
913                 goto out;
914         }
915
916         /* Timeouts occur when the device isn't connected, so they're
917          * "normal" -- don't fill the kernel log with these */
918         if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
919                 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
920                 ret = -ETIMEDOUT;
921                 goto out;
922         }
923
924         /* Unload any bytes sent back from the other side */
925         recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
926                       DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
927         if (recv_bytes > recv_size)
928                 recv_bytes = recv_size;
929
930         for (i = 0; i < recv_bytes; i += 4)
931                 intel_dp_unpack_aux(I915_READ(ch_data + i),
932                                     recv + i, recv_bytes - i);
933
934         ret = recv_bytes;
935 out:
936         pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
937         intel_aux_display_runtime_put(dev_priv);
938
939         if (vdd)
940                 edp_panel_vdd_off(intel_dp, false);
941
942         pps_unlock(intel_dp);
943
944         return ret;
945 }
946
947 #define BARE_ADDRESS_SIZE       3
948 #define HEADER_SIZE             (BARE_ADDRESS_SIZE + 1)
949 static ssize_t
950 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
951 {
952         struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
953         uint8_t txbuf[20], rxbuf[20];
954         size_t txsize, rxsize;
955         int ret;
956
957         txbuf[0] = (msg->request << 4) |
958                 ((msg->address >> 16) & 0xf);
959         txbuf[1] = (msg->address >> 8) & 0xff;
960         txbuf[2] = msg->address & 0xff;
961         txbuf[3] = msg->size - 1;
962
963         switch (msg->request & ~DP_AUX_I2C_MOT) {
964         case DP_AUX_NATIVE_WRITE:
965         case DP_AUX_I2C_WRITE:
966                 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
967                 rxsize = 2; /* 0 or 1 data bytes */
968
969                 if (WARN_ON(txsize > 20))
970                         return -E2BIG;
971
972                 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
973
974                 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
975                 if (ret > 0) {
976                         msg->reply = rxbuf[0] >> 4;
977
978                         if (ret > 1) {
979                                 /* Number of bytes written in a short write. */
980                                 ret = clamp_t(int, rxbuf[1], 0, msg->size);
981                         } else {
982                                 /* Return payload size. */
983                                 ret = msg->size;
984                         }
985                 }
986                 break;
987
988         case DP_AUX_NATIVE_READ:
989         case DP_AUX_I2C_READ:
990                 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
991                 rxsize = msg->size + 1;
992
993                 if (WARN_ON(rxsize > 20))
994                         return -E2BIG;
995
996                 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
997                 if (ret > 0) {
998                         msg->reply = rxbuf[0] >> 4;
999                         /*
1000                          * Assume happy day, and copy the data. The caller is
1001                          * expected to check msg->reply before touching it.
1002                          *
1003                          * Return payload size.
1004                          */
1005                         ret--;
1006                         memcpy(msg->buffer, rxbuf + 1, ret);
1007                 }
1008                 break;
1009
1010         default:
1011                 ret = -EINVAL;
1012                 break;
1013         }
1014
1015         return ret;
1016 }
1017
1018 static int
1019 intel_dp_i2c_aux_ch(struct device *adapter, int mode,
1020                     uint8_t write_byte, uint8_t *read_byte)
1021 {
1022         struct i2c_algo_dp_aux_data *data = device_get_softc(adapter);
1023         struct intel_dp *intel_dp = data->priv;
1024         uint16_t address = data->address;
1025         uint8_t msg[5];
1026         uint8_t reply[2];
1027         unsigned retry;
1028         int msg_bytes;
1029         int reply_bytes;
1030         int ret;
1031
1032         intel_edp_panel_vdd_on(intel_dp);
1033         intel_dp_check_edp(intel_dp);
1034         /* Set up the command byte */
1035         if (mode & MODE_I2C_READ)
1036                 msg[0] = DP_AUX_I2C_READ << 4;
1037         else
1038                 msg[0] = DP_AUX_I2C_WRITE << 4;
1039
1040         if (!(mode & MODE_I2C_STOP))
1041                 msg[0] |= DP_AUX_I2C_MOT << 4;
1042
1043         msg[1] = address >> 8;
1044         msg[2] = address;
1045
1046         switch (mode) {
1047         case MODE_I2C_WRITE:
1048                 msg[3] = 0;
1049                 msg[4] = write_byte;
1050                 msg_bytes = 5;
1051                 reply_bytes = 1;
1052                 break;
1053         case MODE_I2C_READ:
1054                 msg[3] = 0;
1055                 msg_bytes = 4;
1056                 reply_bytes = 2;
1057                 break;
1058         default:
1059                 msg_bytes = 3;
1060                 reply_bytes = 1;
1061                 break;
1062         }
1063
1064         /*
1065          * DP1.2 sections 2.7.7.1.5.6.1 and 2.7.7.1.6.6.1: A DP Source device is
1066          * required to retry at least seven times upon receiving AUX_DEFER
1067          * before giving up the AUX transaction.
1068          */
1069         for (retry = 0; retry < 7; retry++) {
1070                 ret = intel_dp_aux_ch(intel_dp,
1071                                       msg, msg_bytes,
1072                                       reply, reply_bytes);
1073                 if (ret < 0) {
1074                         DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
1075                         goto out;
1076                 }
1077
1078                 switch ((reply[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK) {
1079                 case DP_AUX_NATIVE_REPLY_ACK:
1080                         /* I2C-over-AUX Reply field is only valid
1081                          * when paired with AUX ACK.
1082                          */
1083                         break;
1084                 case DP_AUX_NATIVE_REPLY_NACK:
1085                         DRM_DEBUG_KMS("aux_ch native nack\n");
1086                         ret = -EREMOTEIO;
1087                         goto out;
1088                 case DP_AUX_NATIVE_REPLY_DEFER:
1089                         /*
1090                          * For now, just give more slack to branch devices. We
1091                          * could check the DPCD for I2C bit rate capabilities,
1092                          * and if available, adjust the interval. We could also
1093                          * be more careful with DP-to-Legacy adapters where a
1094                          * long legacy cable may force very low I2C bit rates.
1095                          */
1096                         if (intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
1097                             DP_DWN_STRM_PORT_PRESENT)
1098                                 usleep_range(500, 600);
1099                         else
1100                                 usleep_range(300, 400);
1101                         continue;
1102                 default:
1103                         DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
1104                                   reply[0]);
1105                         ret = -EREMOTEIO;
1106                         goto out;
1107                 }
1108
1109                 switch ((reply[0] >> 4) & DP_AUX_I2C_REPLY_MASK) {
1110                 case DP_AUX_I2C_REPLY_ACK:
1111                         if (mode == MODE_I2C_READ) {
1112                                 *read_byte = reply[1];
1113                         }
1114                         ret = 0;        /* reply_bytes - 1 */
1115                         goto out;
1116                 case DP_AUX_I2C_REPLY_NACK:
1117                         DRM_DEBUG_KMS("aux_i2c nack\n");
1118                         ret = -EREMOTEIO;
1119                         goto out;
1120                 case DP_AUX_I2C_REPLY_DEFER:
1121                         DRM_DEBUG_KMS("aux_i2c defer\n");
1122                         udelay(100);
1123                         break;
1124                 default:
1125                         DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
1126                         ret = -EREMOTEIO;
1127                         goto out;
1128                 }
1129         }
1130
1131         DRM_ERROR("too many retries, giving up\n");
1132         ret = -EREMOTEIO;
1133
1134 out:
1135         return ret;
1136 }
1137
1138 static void
1139 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1140 {
1141         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1142         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1143         enum port port = intel_dig_port->port;
1144         const char *name = NULL;
1145         int ret;
1146
1147         switch (port) {
1148         case PORT_A:
1149                 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
1150                 name = "DPDDC-A";
1151                 break;
1152         case PORT_B:
1153                 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
1154                 name = "DPDDC-B";
1155                 break;
1156         case PORT_C:
1157                 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
1158                 name = "DPDDC-C";
1159                 break;
1160         case PORT_D:
1161                 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
1162                 name = "DPDDC-D";
1163                 break;
1164         default:
1165                 BUG();
1166         }
1167
1168         /*
1169          * The AUX_CTL register is usually DP_CTL + 0x10.
1170          *
1171          * On Haswell and Broadwell though:
1172          *   - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1173          *   - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1174          *
1175          * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1176          */
1177         if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
1178                 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
1179
1180         intel_dp->aux.name = name;
1181         intel_dp->aux.dev = dev->dev;
1182         intel_dp->aux.transfer = intel_dp_aux_transfer;
1183
1184         DRM_DEBUG_KMS("i2c_init %s\n", name);
1185         ret = iic_dp_aux_add_bus(connector->base.dev->dev, name,
1186             intel_dp_i2c_aux_ch, intel_dp, &intel_dp->dp_iic_bus,
1187             &intel_dp->aux.ddc);
1188         WARN(ret, "intel_dp_i2c_init failed with error %d for port %c\n",
1189              ret, port_name(port));
1190
1191 }
1192
1193 static void
1194 intel_dp_connector_unregister(struct intel_connector *intel_connector)
1195 {
1196         intel_connector_unregister(intel_connector);
1197 }
1198
1199 #if 0
1200 static int
1201 intel_dp_i2c_init(struct intel_dp *intel_dp,
1202                   struct intel_connector *intel_connector, const char *name)
1203 {
1204         int     ret;
1205
1206         DRM_DEBUG_KMS("i2c_init %s\n", name);
1207 #if 0
1208         memset(&intel_dp->adapter, '\0', sizeof(intel_dp->adapter));
1209         intel_dp->adapter.owner = THIS_MODULE;
1210         intel_dp->adapter.class = I2C_CLASS_DDC;
1211         strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
1212         intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
1213         intel_dp->adapter.algo_data = &intel_dp->algo;
1214         intel_dp->adapter.dev.parent = intel_connector->base.dev->dev;
1215
1216         ret = i2c_dp_aux_add_bus(&intel_dp->adapter);
1217         if (ret < 0)
1218                 return ret;
1219
1220         ret = sysfs_create_link(&intel_connector->base.kdev->kobj,
1221                                 &intel_dp->adapter.dev.kobj,
1222                                 intel_dp->adapter.dev.kobj.name);
1223 #endif
1224         ret = iic_dp_aux_add_bus(intel_connector->base.dev->dev, name,
1225             intel_dp_i2c_aux_ch, intel_dp, &intel_dp->dp_iic_bus,
1226             &intel_dp->adapter);
1227
1228         return ret;
1229 }
1230 #endif
1231
1232 static void
1233 skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
1234 {
1235         u32 ctrl1;
1236
1237         memset(&pipe_config->dpll_hw_state, 0,
1238                sizeof(pipe_config->dpll_hw_state));
1239
1240         pipe_config->ddi_pll_sel = SKL_DPLL0;
1241         pipe_config->dpll_hw_state.cfgcr1 = 0;
1242         pipe_config->dpll_hw_state.cfgcr2 = 0;
1243
1244         ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
1245         switch (link_clock / 2) {
1246         case 81000:
1247                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
1248                                               SKL_DPLL0);
1249                 break;
1250         case 135000:
1251                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
1252                                               SKL_DPLL0);
1253                 break;
1254         case 270000:
1255                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
1256                                               SKL_DPLL0);
1257                 break;
1258         case 162000:
1259                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
1260                                               SKL_DPLL0);
1261                 break;
1262         /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1263         results in CDCLK change. Need to handle the change of CDCLK by
1264         disabling pipes and re-enabling them */
1265         case 108000:
1266                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
1267                                               SKL_DPLL0);
1268                 break;
1269         case 216000:
1270                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
1271                                               SKL_DPLL0);
1272                 break;
1273
1274         }
1275         pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1276 }
1277
1278 static void
1279 hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
1280 {
1281         memset(&pipe_config->dpll_hw_state, 0,
1282                sizeof(pipe_config->dpll_hw_state));
1283
1284         switch (link_bw) {
1285         case DP_LINK_BW_1_62:
1286                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1287                 break;
1288         case DP_LINK_BW_2_7:
1289                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1290                 break;
1291         case DP_LINK_BW_5_4:
1292                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1293                 break;
1294         }
1295 }
1296
1297 static int
1298 intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1299 {
1300         if (intel_dp->num_sink_rates) {
1301                 *sink_rates = intel_dp->sink_rates;
1302                 return intel_dp->num_sink_rates;
1303         }
1304
1305         *sink_rates = default_rates;
1306
1307         return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1308 }
1309
1310 static bool intel_dp_source_supports_hbr2(struct drm_device *dev)
1311 {
1312         /* WaDisableHBR2:skl */
1313         if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1314                 return false;
1315
1316         if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1317             (INTEL_INFO(dev)->gen >= 9))
1318                 return true;
1319         else
1320                 return false;
1321 }
1322
1323 static int
1324 intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
1325 {
1326         if (IS_SKYLAKE(dev)) {
1327                 *source_rates = skl_rates;
1328                 return ARRAY_SIZE(skl_rates);
1329         }
1330
1331         *source_rates = default_rates;
1332
1333         /* This depends on the fact that 5.4 is last value in the array */
1334         if (intel_dp_source_supports_hbr2(dev))
1335                 return (DP_LINK_BW_5_4 >> 3) + 1;
1336         else
1337                 return (DP_LINK_BW_2_7 >> 3) + 1;
1338 }
1339
1340 static void
1341 intel_dp_set_clock(struct intel_encoder *encoder,
1342                    struct intel_crtc_state *pipe_config, int link_bw)
1343 {
1344         struct drm_device *dev = encoder->base.dev;
1345         const struct dp_link_dpll *divisor = NULL;
1346         int i, count = 0;
1347
1348         if (IS_G4X(dev)) {
1349                 divisor = gen4_dpll;
1350                 count = ARRAY_SIZE(gen4_dpll);
1351         } else if (HAS_PCH_SPLIT(dev)) {
1352                 divisor = pch_dpll;
1353                 count = ARRAY_SIZE(pch_dpll);
1354         } else if (IS_CHERRYVIEW(dev)) {
1355                 divisor = chv_dpll;
1356                 count = ARRAY_SIZE(chv_dpll);
1357         } else if (IS_VALLEYVIEW(dev)) {
1358                 divisor = vlv_dpll;
1359                 count = ARRAY_SIZE(vlv_dpll);
1360         }
1361
1362         if (divisor && count) {
1363                 for (i = 0; i < count; i++) {
1364                         if (link_bw == divisor[i].link_bw) {
1365                                 pipe_config->dpll = divisor[i].dpll;
1366                                 pipe_config->clock_set = true;
1367                                 break;
1368                         }
1369                 }
1370         }
1371 }
1372
1373 static int intersect_rates(const int *source_rates, int source_len,
1374                            const int *sink_rates, int sink_len,
1375                            int *common_rates)
1376 {
1377         int i = 0, j = 0, k = 0;
1378
1379         while (i < source_len && j < sink_len) {
1380                 if (source_rates[i] == sink_rates[j]) {
1381                         if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1382                                 return k;
1383                         common_rates[k] = source_rates[i];
1384                         ++k;
1385                         ++i;
1386                         ++j;
1387                 } else if (source_rates[i] < sink_rates[j]) {
1388                         ++i;
1389                 } else {
1390                         ++j;
1391                 }
1392         }
1393         return k;
1394 }
1395
1396 static int intel_dp_common_rates(struct intel_dp *intel_dp,
1397                                  int *common_rates)
1398 {
1399         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1400         const int *source_rates, *sink_rates;
1401         int source_len, sink_len;
1402
1403         sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1404         source_len = intel_dp_source_rates(dev, &source_rates);
1405
1406         return intersect_rates(source_rates, source_len,
1407                                sink_rates, sink_len,
1408                                common_rates);
1409 }
1410
1411 static void snprintf_int_array(char *str, size_t len,
1412                                const int *array, int nelem)
1413 {
1414         int i;
1415
1416         str[0] = '\0';
1417
1418         for (i = 0; i < nelem; i++) {
1419                 int r = ksnprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1420                 if (r >= len)
1421                         return;
1422                 str += r;
1423                 len -= r;
1424         }
1425 }
1426
1427 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1428 {
1429         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1430         const int *source_rates, *sink_rates;
1431         int source_len, sink_len, common_len;
1432         int common_rates[DP_MAX_SUPPORTED_RATES];
1433         char str[128]; /* FIXME: too big for stack? */
1434
1435         if ((drm_debug & DRM_UT_KMS) == 0)
1436                 return;
1437
1438         source_len = intel_dp_source_rates(dev, &source_rates);
1439         snprintf_int_array(str, sizeof(str), source_rates, source_len);
1440         DRM_DEBUG_KMS("source rates: %s\n", str);
1441
1442         sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1443         snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1444         DRM_DEBUG_KMS("sink rates: %s\n", str);
1445
1446         common_len = intel_dp_common_rates(intel_dp, common_rates);
1447         snprintf_int_array(str, sizeof(str), common_rates, common_len);
1448         DRM_DEBUG_KMS("common rates: %s\n", str);
1449 }
1450
1451 static int rate_to_index(int find, const int *rates)
1452 {
1453         int i = 0;
1454
1455         for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1456                 if (find == rates[i])
1457                         break;
1458
1459         return i;
1460 }
1461
1462 int
1463 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1464 {
1465         int rates[DP_MAX_SUPPORTED_RATES] = {};
1466         int len;
1467
1468         len = intel_dp_common_rates(intel_dp, rates);
1469         if (WARN_ON(len <= 0))
1470                 return 162000;
1471
1472         return rates[rate_to_index(0, rates) - 1];
1473 }
1474
1475 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1476 {
1477         return rate_to_index(rate, intel_dp->sink_rates);
1478 }
1479
1480 bool
1481 intel_dp_compute_config(struct intel_encoder *encoder,
1482                         struct intel_crtc_state *pipe_config)
1483 {
1484         struct drm_device *dev = encoder->base.dev;
1485         struct drm_i915_private *dev_priv = dev->dev_private;
1486         struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1487         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1488         enum port port = dp_to_dig_port(intel_dp)->port;
1489         struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1490         struct intel_connector *intel_connector = intel_dp->attached_connector;
1491         int lane_count, clock;
1492         int min_lane_count = 1;
1493         int max_lane_count = intel_dp_max_lane_count(intel_dp);
1494         /* Conveniently, the link BW constants become indices with a shift...*/
1495         int min_clock = 0;
1496         int max_clock;
1497         int bpp, mode_rate;
1498         int link_avail, link_clock;
1499         int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1500         int common_len;
1501
1502         common_len = intel_dp_common_rates(intel_dp, common_rates);
1503
1504         /* No common link rates between source and sink */
1505         WARN_ON(common_len <= 0);
1506
1507         max_clock = common_len - 1;
1508
1509         if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1510                 pipe_config->has_pch_encoder = true;
1511
1512         pipe_config->has_dp_encoder = true;
1513         pipe_config->has_drrs = false;
1514         pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
1515
1516         if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1517                 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1518                                        adjusted_mode);
1519
1520                 if (INTEL_INFO(dev)->gen >= 9) {
1521                         int ret;
1522                         ret = skl_update_scaler_users(intel_crtc, pipe_config, NULL, NULL, 0);
1523                         if (ret)
1524                                 return ret;
1525                 }
1526
1527                 if (!HAS_PCH_SPLIT(dev))
1528                         intel_gmch_panel_fitting(intel_crtc, pipe_config,
1529                                                  intel_connector->panel.fitting_mode);
1530                 else
1531                         intel_pch_panel_fitting(intel_crtc, pipe_config,
1532                                                 intel_connector->panel.fitting_mode);
1533         }
1534
1535         if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1536                 return false;
1537
1538         DRM_DEBUG_KMS("DP link computation with max lane count %i "
1539                       "max bw %d pixel clock %iKHz\n",
1540                       max_lane_count, common_rates[max_clock],
1541                       adjusted_mode->crtc_clock);
1542
1543         /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1544          * bpc in between. */
1545         bpp = pipe_config->pipe_bpp;
1546         if (is_edp(intel_dp)) {
1547                 if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
1548                         DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1549                                       dev_priv->vbt.edp_bpp);
1550                         bpp = dev_priv->vbt.edp_bpp;
1551                 }
1552
1553                 /*
1554                  * Use the maximum clock and number of lanes the eDP panel
1555                  * advertizes being capable of. The panels are generally
1556                  * designed to support only a single clock and lane
1557                  * configuration, and typically these values correspond to the
1558                  * native resolution of the panel.
1559                  */
1560                 min_lane_count = max_lane_count;
1561                 min_clock = max_clock;
1562         }
1563
1564         for (; bpp >= 6*3; bpp -= 2*3) {
1565                 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1566                                                    bpp);
1567
1568                 for (clock = min_clock; clock <= max_clock; clock++) {
1569                         for (lane_count = min_lane_count;
1570                                 lane_count <= max_lane_count;
1571                                 lane_count <<= 1) {
1572
1573                                 link_clock = common_rates[clock];
1574                                 link_avail = intel_dp_max_data_rate(link_clock,
1575                                                                     lane_count);
1576
1577                                 if (mode_rate <= link_avail) {
1578                                         goto found;
1579                                 }
1580                         }
1581                 }
1582         }
1583
1584         return false;
1585
1586 found:
1587         if (intel_dp->color_range_auto) {
1588                 /*
1589                  * See:
1590                  * CEA-861-E - 5.1 Default Encoding Parameters
1591                  * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1592                  */
1593                 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
1594                         intel_dp->color_range = DP_COLOR_RANGE_16_235;
1595                 else
1596                         intel_dp->color_range = 0;
1597         }
1598
1599         if (intel_dp->color_range)
1600                 pipe_config->limited_color_range = true;
1601
1602         intel_dp->lane_count = lane_count;
1603
1604         if (intel_dp->num_sink_rates) {
1605                 intel_dp->link_bw = 0;
1606                 intel_dp->rate_select =
1607                         intel_dp_rate_select(intel_dp, common_rates[clock]);
1608         } else {
1609                 intel_dp->link_bw =
1610                         drm_dp_link_rate_to_bw_code(common_rates[clock]);
1611                 intel_dp->rate_select = 0;
1612         }
1613
1614         pipe_config->pipe_bpp = bpp;
1615         pipe_config->port_clock = common_rates[clock];
1616
1617         DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
1618                       intel_dp->link_bw, intel_dp->lane_count,
1619                       pipe_config->port_clock, bpp);
1620         DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1621                       mode_rate, link_avail);
1622
1623         intel_link_compute_m_n(bpp, lane_count,
1624                                adjusted_mode->crtc_clock,
1625                                pipe_config->port_clock,
1626                                &pipe_config->dp_m_n);
1627
1628         if (intel_connector->panel.downclock_mode != NULL &&
1629                 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1630                         pipe_config->has_drrs = true;
1631                         intel_link_compute_m_n(bpp, lane_count,
1632                                 intel_connector->panel.downclock_mode->clock,
1633                                 pipe_config->port_clock,
1634                                 &pipe_config->dp_m2_n2);
1635         }
1636
1637         if (IS_SKYLAKE(dev) && is_edp(intel_dp))
1638                 skl_edp_set_pll_config(pipe_config, common_rates[clock]);
1639         else if (IS_BROXTON(dev))
1640                 /* handled in ddi */;
1641         else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1642                 hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
1643         else
1644                 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
1645
1646         return true;
1647 }
1648
1649 static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
1650 {
1651         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1652         struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1653         struct drm_device *dev = crtc->base.dev;
1654         struct drm_i915_private *dev_priv = dev->dev_private;
1655         u32 dpa_ctl;
1656
1657         DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1658                       crtc->config->port_clock);
1659         dpa_ctl = I915_READ(DP_A);
1660         dpa_ctl &= ~DP_PLL_FREQ_MASK;
1661
1662         if (crtc->config->port_clock == 162000) {
1663                 /* For a long time we've carried around a ILK-DevA w/a for the
1664                  * 160MHz clock. If we're really unlucky, it's still required.
1665                  */
1666                 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
1667                 dpa_ctl |= DP_PLL_FREQ_160MHZ;
1668                 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
1669         } else {
1670                 dpa_ctl |= DP_PLL_FREQ_270MHZ;
1671                 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
1672         }
1673
1674         I915_WRITE(DP_A, dpa_ctl);
1675
1676         POSTING_READ(DP_A);
1677         udelay(500);
1678 }
1679
1680 static void intel_dp_prepare(struct intel_encoder *encoder)
1681 {
1682         struct drm_device *dev = encoder->base.dev;
1683         struct drm_i915_private *dev_priv = dev->dev_private;
1684         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1685         enum port port = dp_to_dig_port(intel_dp)->port;
1686         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1687         struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1688
1689         /*
1690          * There are four kinds of DP registers:
1691          *
1692          *      IBX PCH
1693          *      SNB CPU
1694          *      IVB CPU
1695          *      CPT PCH
1696          *
1697          * IBX PCH and CPU are the same for almost everything,
1698          * except that the CPU DP PLL is configured in this
1699          * register
1700          *
1701          * CPT PCH is quite different, having many bits moved
1702          * to the TRANS_DP_CTL register instead. That
1703          * configuration happens (oddly) in ironlake_pch_enable
1704          */
1705
1706         /* Preserve the BIOS-computed detected bit. This is
1707          * supposed to be read-only.
1708          */
1709         intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1710
1711         /* Handle DP bits in common between all three register formats */
1712         intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1713         intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
1714
1715         if (crtc->config->has_audio)
1716                 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
1717
1718         /* Split out the IBX/CPU vs CPT settings */
1719
1720         if (IS_GEN7(dev) && port == PORT_A) {
1721                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1722                         intel_dp->DP |= DP_SYNC_HS_HIGH;
1723                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1724                         intel_dp->DP |= DP_SYNC_VS_HIGH;
1725                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1726
1727                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1728                         intel_dp->DP |= DP_ENHANCED_FRAMING;
1729
1730                 intel_dp->DP |= crtc->pipe << 29;
1731         } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
1732                 u32 trans_dp;
1733
1734                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1735
1736                 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1737                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1738                         trans_dp |= TRANS_DP_ENH_FRAMING;
1739                 else
1740                         trans_dp &= ~TRANS_DP_ENH_FRAMING;
1741                 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
1742         } else {
1743                 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
1744                         intel_dp->DP |= intel_dp->color_range;
1745
1746                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1747                         intel_dp->DP |= DP_SYNC_HS_HIGH;
1748                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1749                         intel_dp->DP |= DP_SYNC_VS_HIGH;
1750                 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1751
1752                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1753                         intel_dp->DP |= DP_ENHANCED_FRAMING;
1754
1755                 if (IS_CHERRYVIEW(dev))
1756                         intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1757                 else if (crtc->pipe == PIPE_B)
1758                         intel_dp->DP |= DP_PIPEB_SELECT;
1759         }
1760 }
1761
1762 #define IDLE_ON_MASK            (PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
1763 #define IDLE_ON_VALUE           (PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
1764
1765 #define IDLE_OFF_MASK           (PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
1766 #define IDLE_OFF_VALUE          (0     | PP_SEQUENCE_NONE | 0                     | 0)
1767
1768 #define IDLE_CYCLE_MASK         (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1769 #define IDLE_CYCLE_VALUE        (0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
1770
1771 static void wait_panel_status(struct intel_dp *intel_dp,
1772                                        u32 mask,
1773                                        u32 value)
1774 {
1775         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1776         struct drm_i915_private *dev_priv = dev->dev_private;
1777         u32 pp_stat_reg, pp_ctrl_reg;
1778
1779         lockdep_assert_held(&dev_priv->pps_mutex);
1780
1781         pp_stat_reg = _pp_stat_reg(intel_dp);
1782         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1783
1784         DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1785                         mask, value,
1786                         I915_READ(pp_stat_reg),
1787                         I915_READ(pp_ctrl_reg));
1788
1789         if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1790                 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1791                                 I915_READ(pp_stat_reg),
1792                                 I915_READ(pp_ctrl_reg));
1793         }
1794
1795         DRM_DEBUG_KMS("Wait complete\n");
1796 }
1797
1798 static void wait_panel_on(struct intel_dp *intel_dp)
1799 {
1800         DRM_DEBUG_KMS("Wait for panel power on\n");
1801         wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1802 }
1803
1804 static void wait_panel_off(struct intel_dp *intel_dp)
1805 {
1806         DRM_DEBUG_KMS("Wait for panel power off time\n");
1807         wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1808 }
1809
1810 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1811 {
1812         DRM_DEBUG_KMS("Wait for panel power cycle\n");
1813
1814         /* When we disable the VDD override bit last we have to do the manual
1815          * wait. */
1816         wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1817                                        intel_dp->panel_power_cycle_delay);
1818
1819         wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1820 }
1821
1822 static void wait_backlight_on(struct intel_dp *intel_dp)
1823 {
1824         wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1825                                        intel_dp->backlight_on_delay);
1826 }
1827
1828 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1829 {
1830         wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1831                                        intel_dp->backlight_off_delay);
1832 }
1833
1834 /* Read the current pp_control value, unlocking the register if it
1835  * is locked
1836  */
1837
1838 static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1839 {
1840         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1841         struct drm_i915_private *dev_priv = dev->dev_private;
1842         u32 control;
1843
1844         lockdep_assert_held(&dev_priv->pps_mutex);
1845
1846         control = I915_READ(_pp_ctrl_reg(intel_dp));
1847         control &= ~PANEL_UNLOCK_MASK;
1848         control |= PANEL_UNLOCK_REGS;
1849         return control;
1850 }
1851
1852 /*
1853  * Must be paired with edp_panel_vdd_off().
1854  * Must hold pps_mutex around the whole on/off sequence.
1855  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1856  */
1857 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1858 {
1859         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1860         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1861         struct intel_encoder *intel_encoder = &intel_dig_port->base;
1862         struct drm_i915_private *dev_priv = dev->dev_private;
1863         enum intel_display_power_domain power_domain;
1864         u32 pp;
1865         u32 pp_stat_reg, pp_ctrl_reg;
1866         bool need_to_disable = !intel_dp->want_panel_vdd;
1867
1868         lockdep_assert_held(&dev_priv->pps_mutex);
1869
1870         if (!is_edp(intel_dp))
1871                 return false;
1872
1873         cancel_delayed_work(&intel_dp->panel_vdd_work);
1874         intel_dp->want_panel_vdd = true;
1875
1876         if (edp_have_panel_vdd(intel_dp))
1877                 return need_to_disable;
1878
1879         power_domain = intel_display_port_power_domain(intel_encoder);
1880         intel_display_power_get(dev_priv, power_domain);
1881
1882         DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1883                       port_name(intel_dig_port->port));
1884
1885         if (!edp_have_panel_power(intel_dp))
1886                 wait_panel_power_cycle(intel_dp);
1887
1888         pp = ironlake_get_pp_control(intel_dp);
1889         pp |= EDP_FORCE_VDD;
1890
1891         pp_stat_reg = _pp_stat_reg(intel_dp);
1892         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1893
1894         I915_WRITE(pp_ctrl_reg, pp);
1895         POSTING_READ(pp_ctrl_reg);
1896         DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1897                         I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1898         /*
1899          * If the panel wasn't on, delay before accessing aux channel
1900          */
1901         if (!edp_have_panel_power(intel_dp)) {
1902                 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1903                               port_name(intel_dig_port->port));
1904                 msleep(intel_dp->panel_power_up_delay);
1905         }
1906
1907         return need_to_disable;
1908 }
1909
1910 /*
1911  * Must be paired with intel_edp_panel_vdd_off() or
1912  * intel_edp_panel_off().
1913  * Nested calls to these functions are not allowed since
1914  * we drop the lock. Caller must use some higher level
1915  * locking to prevent nested calls from other threads.
1916  */
1917 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1918 {
1919         bool vdd;
1920
1921         if (!is_edp(intel_dp))
1922                 return;
1923
1924         pps_lock(intel_dp);
1925         vdd = edp_panel_vdd_on(intel_dp);
1926         pps_unlock(intel_dp);
1927
1928         I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
1929              port_name(dp_to_dig_port(intel_dp)->port));
1930 }
1931
1932 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1933 {
1934         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1935         struct drm_i915_private *dev_priv = dev->dev_private;
1936         struct intel_digital_port *intel_dig_port =
1937                 dp_to_dig_port(intel_dp);
1938         struct intel_encoder *intel_encoder = &intel_dig_port->base;
1939         enum intel_display_power_domain power_domain;
1940         u32 pp;
1941         u32 pp_stat_reg, pp_ctrl_reg;
1942
1943         lockdep_assert_held(&dev_priv->pps_mutex);
1944
1945         WARN_ON(intel_dp->want_panel_vdd);
1946
1947         if (!edp_have_panel_vdd(intel_dp))
1948                 return;
1949
1950         DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1951                       port_name(intel_dig_port->port));
1952
1953         pp = ironlake_get_pp_control(intel_dp);
1954         pp &= ~EDP_FORCE_VDD;
1955
1956         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1957         pp_stat_reg = _pp_stat_reg(intel_dp);
1958
1959         I915_WRITE(pp_ctrl_reg, pp);
1960         POSTING_READ(pp_ctrl_reg);
1961
1962         /* Make sure sequencer is idle before allowing subsequent activity */
1963         DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1964         I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1965
1966         if ((pp & POWER_TARGET_ON) == 0)
1967                 intel_dp->last_power_cycle = jiffies;
1968
1969         power_domain = intel_display_port_power_domain(intel_encoder);
1970         intel_display_power_put(dev_priv, power_domain);
1971 }
1972
1973 static void edp_panel_vdd_work(struct work_struct *__work)
1974 {
1975         struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1976                                                  struct intel_dp, panel_vdd_work);
1977
1978         pps_lock(intel_dp);
1979         if (!intel_dp->want_panel_vdd)
1980                 edp_panel_vdd_off_sync(intel_dp);
1981         pps_unlock(intel_dp);
1982 }
1983
1984 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1985 {
1986         unsigned long delay;
1987
1988         /*
1989          * Queue the timer to fire a long time from now (relative to the power
1990          * down delay) to keep the panel power up across a sequence of
1991          * operations.
1992          */
1993         delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1994         schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1995 }
1996
1997 /*
1998  * Must be paired with edp_panel_vdd_on().
1999  * Must hold pps_mutex around the whole on/off sequence.
2000  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2001  */
2002 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
2003 {
2004         struct drm_i915_private *dev_priv =
2005                 intel_dp_to_dev(intel_dp)->dev_private;
2006
2007         lockdep_assert_held(&dev_priv->pps_mutex);
2008
2009         if (!is_edp(intel_dp))
2010                 return;
2011
2012         I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
2013              port_name(dp_to_dig_port(intel_dp)->port));
2014
2015         intel_dp->want_panel_vdd = false;
2016
2017         if (sync)
2018                 edp_panel_vdd_off_sync(intel_dp);
2019         else
2020                 edp_panel_vdd_schedule_off(intel_dp);
2021 }
2022
2023 static void edp_panel_on(struct intel_dp *intel_dp)
2024 {
2025         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2026         struct drm_i915_private *dev_priv = dev->dev_private;
2027         u32 pp;
2028         u32 pp_ctrl_reg;
2029
2030         lockdep_assert_held(&dev_priv->pps_mutex);
2031
2032         if (!is_edp(intel_dp))
2033                 return;
2034
2035         DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
2036                       port_name(dp_to_dig_port(intel_dp)->port));
2037
2038         if (WARN(edp_have_panel_power(intel_dp),
2039                  "eDP port %c panel power already on\n",
2040                  port_name(dp_to_dig_port(intel_dp)->port)))
2041                 return;
2042
2043         wait_panel_power_cycle(intel_dp);
2044
2045         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2046         pp = ironlake_get_pp_control(intel_dp);
2047         if (IS_GEN5(dev)) {
2048                 /* ILK workaround: disable reset around power sequence */
2049                 pp &= ~PANEL_POWER_RESET;
2050                 I915_WRITE(pp_ctrl_reg, pp);
2051                 POSTING_READ(pp_ctrl_reg);
2052         }
2053
2054         pp |= POWER_TARGET_ON;
2055         if (!IS_GEN5(dev))
2056                 pp |= PANEL_POWER_RESET;
2057
2058         I915_WRITE(pp_ctrl_reg, pp);
2059         POSTING_READ(pp_ctrl_reg);
2060
2061         wait_panel_on(intel_dp);
2062         intel_dp->last_power_on = jiffies;
2063
2064         if (IS_GEN5(dev)) {
2065                 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
2066                 I915_WRITE(pp_ctrl_reg, pp);
2067                 POSTING_READ(pp_ctrl_reg);
2068         }
2069 }
2070
2071 void intel_edp_panel_on(struct intel_dp *intel_dp)
2072 {
2073         if (!is_edp(intel_dp))
2074                 return;
2075
2076         pps_lock(intel_dp);
2077         edp_panel_on(intel_dp);
2078         pps_unlock(intel_dp);
2079 }
2080
2081
2082 static void edp_panel_off(struct intel_dp *intel_dp)
2083 {
2084         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2085         struct intel_encoder *intel_encoder = &intel_dig_port->base;
2086         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2087         struct drm_i915_private *dev_priv = dev->dev_private;
2088         enum intel_display_power_domain power_domain;
2089         u32 pp;
2090         u32 pp_ctrl_reg;
2091
2092         lockdep_assert_held(&dev_priv->pps_mutex);
2093
2094         if (!is_edp(intel_dp))
2095                 return;
2096
2097         DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2098                       port_name(dp_to_dig_port(intel_dp)->port));
2099
2100         WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2101              port_name(dp_to_dig_port(intel_dp)->port));
2102
2103         pp = ironlake_get_pp_control(intel_dp);
2104         /* We need to switch off panel power _and_ force vdd, for otherwise some
2105          * panels get very unhappy and cease to work. */
2106         pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2107                 EDP_BLC_ENABLE);
2108
2109         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2110
2111         intel_dp->want_panel_vdd = false;
2112
2113         I915_WRITE(pp_ctrl_reg, pp);
2114         POSTING_READ(pp_ctrl_reg);
2115
2116         intel_dp->last_power_cycle = jiffies;
2117         wait_panel_off(intel_dp);
2118
2119         /* We got a reference when we enabled the VDD. */
2120         power_domain = intel_display_port_power_domain(intel_encoder);
2121         intel_display_power_put(dev_priv, power_domain);
2122 }
2123
2124 void intel_edp_panel_off(struct intel_dp *intel_dp)
2125 {
2126         if (!is_edp(intel_dp))
2127                 return;
2128
2129         pps_lock(intel_dp);
2130         edp_panel_off(intel_dp);
2131         pps_unlock(intel_dp);
2132 }
2133
2134 /* Enable backlight in the panel power control. */
2135 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2136 {
2137         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2138         struct drm_device *dev = intel_dig_port->base.base.dev;
2139         struct drm_i915_private *dev_priv = dev->dev_private;
2140         u32 pp;
2141         u32 pp_ctrl_reg;
2142
2143         /*
2144          * If we enable the backlight right away following a panel power
2145          * on, we may see slight flicker as the panel syncs with the eDP
2146          * link.  So delay a bit to make sure the image is solid before
2147          * allowing it to appear.
2148          */
2149         wait_backlight_on(intel_dp);
2150
2151         pps_lock(intel_dp);
2152
2153         pp = ironlake_get_pp_control(intel_dp);
2154         pp |= EDP_BLC_ENABLE;
2155
2156         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2157
2158         I915_WRITE(pp_ctrl_reg, pp);
2159         POSTING_READ(pp_ctrl_reg);
2160
2161         pps_unlock(intel_dp);
2162 }
2163
2164 /* Enable backlight PWM and backlight PP control. */
2165 void intel_edp_backlight_on(struct intel_dp *intel_dp)
2166 {
2167         if (!is_edp(intel_dp))
2168                 return;
2169
2170         DRM_DEBUG_KMS("\n");
2171
2172         intel_panel_enable_backlight(intel_dp->attached_connector);
2173         _intel_edp_backlight_on(intel_dp);
2174 }
2175
2176 /* Disable backlight in the panel power control. */
2177 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2178 {
2179         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2180         struct drm_i915_private *dev_priv = dev->dev_private;
2181         u32 pp;
2182         u32 pp_ctrl_reg;
2183
2184         if (!is_edp(intel_dp))
2185                 return;
2186
2187         pps_lock(intel_dp);
2188
2189         pp = ironlake_get_pp_control(intel_dp);
2190         pp &= ~EDP_BLC_ENABLE;
2191
2192         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2193
2194         I915_WRITE(pp_ctrl_reg, pp);
2195         POSTING_READ(pp_ctrl_reg);
2196
2197         pps_unlock(intel_dp);
2198
2199         intel_dp->last_backlight_off = jiffies;
2200         edp_wait_backlight_off(intel_dp);
2201 }
2202
2203 /* Disable backlight PP control and backlight PWM. */
2204 void intel_edp_backlight_off(struct intel_dp *intel_dp)
2205 {
2206         if (!is_edp(intel_dp))
2207                 return;
2208
2209         DRM_DEBUG_KMS("\n");
2210
2211         _intel_edp_backlight_off(intel_dp);
2212         intel_panel_disable_backlight(intel_dp->attached_connector);
2213 }
2214
2215 /*
2216  * Hook for controlling the panel power control backlight through the bl_power
2217  * sysfs attribute. Take care to handle multiple calls.
2218  */
2219 static void intel_edp_backlight_power(struct intel_connector *connector,
2220                                       bool enable)
2221 {
2222         struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2223         bool is_enabled;
2224
2225         pps_lock(intel_dp);
2226         is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2227         pps_unlock(intel_dp);
2228
2229         if (is_enabled == enable)
2230                 return;
2231
2232         DRM_DEBUG_KMS("panel power control backlight %s\n",
2233                       enable ? "enable" : "disable");
2234
2235         if (enable)
2236                 _intel_edp_backlight_on(intel_dp);
2237         else
2238                 _intel_edp_backlight_off(intel_dp);
2239 }
2240
2241 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2242 {
2243         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2244         struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2245         struct drm_device *dev = crtc->dev;
2246         struct drm_i915_private *dev_priv = dev->dev_private;
2247         u32 dpa_ctl;
2248
2249         assert_pipe_disabled(dev_priv,
2250                              to_intel_crtc(crtc)->pipe);
2251
2252         DRM_DEBUG_KMS("\n");
2253         dpa_ctl = I915_READ(DP_A);
2254         WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2255         WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2256
2257         /* We don't adjust intel_dp->DP while tearing down the link, to
2258          * facilitate link retraining (e.g. after hotplug). Hence clear all
2259          * enable bits here to ensure that we don't enable too much. */
2260         intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2261         intel_dp->DP |= DP_PLL_ENABLE;
2262         I915_WRITE(DP_A, intel_dp->DP);
2263         POSTING_READ(DP_A);
2264         udelay(200);
2265 }
2266
2267 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2268 {
2269         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2270         struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2271         struct drm_device *dev = crtc->dev;
2272         struct drm_i915_private *dev_priv = dev->dev_private;
2273         u32 dpa_ctl;
2274
2275         assert_pipe_disabled(dev_priv,
2276                              to_intel_crtc(crtc)->pipe);
2277
2278         dpa_ctl = I915_READ(DP_A);
2279         WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2280              "dp pll off, should be on\n");
2281         WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2282
2283         /* We can't rely on the value tracked for the DP register in
2284          * intel_dp->DP because link_down must not change that (otherwise link
2285          * re-training will fail. */
2286         dpa_ctl &= ~DP_PLL_ENABLE;
2287         I915_WRITE(DP_A, dpa_ctl);
2288         POSTING_READ(DP_A);
2289         udelay(200);
2290 }
2291
2292 /* If the sink supports it, try to set the power state appropriately */
2293 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2294 {
2295         int ret, i;
2296
2297         /* Should have a valid DPCD by this point */
2298         if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2299                 return;
2300
2301         if (mode != DRM_MODE_DPMS_ON) {
2302                 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2303                                          DP_SET_POWER_D3);
2304         } else {
2305                 /*
2306                  * When turning on, we need to retry for 1ms to give the sink
2307                  * time to wake up.
2308                  */
2309                 for (i = 0; i < 3; i++) {
2310                         ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2311                                                  DP_SET_POWER_D0);
2312                         if (ret == 1)
2313                                 break;
2314                         msleep(1);
2315                 }
2316         }
2317
2318         if (ret != 1)
2319                 DRM_DEBUG_KMS("failed to %s sink power state\n",
2320                               mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2321 }
2322
2323 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2324                                   enum i915_pipe *pipe)
2325 {
2326         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2327         enum port port = dp_to_dig_port(intel_dp)->port;
2328         struct drm_device *dev = encoder->base.dev;
2329         struct drm_i915_private *dev_priv = dev->dev_private;
2330         enum intel_display_power_domain power_domain;
2331         u32 tmp;
2332
2333         power_domain = intel_display_port_power_domain(encoder);
2334         if (!intel_display_power_is_enabled(dev_priv, power_domain))
2335                 return false;
2336
2337         tmp = I915_READ(intel_dp->output_reg);
2338
2339         if (!(tmp & DP_PORT_EN))
2340                 return false;
2341
2342         if (IS_GEN7(dev) && port == PORT_A) {
2343                 *pipe = PORT_TO_PIPE_CPT(tmp);
2344         } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
2345                 enum i915_pipe p;
2346
2347                 for_each_pipe(dev_priv, p) {
2348                         u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2349                         if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2350                                 *pipe = p;
2351                                 return true;
2352                         }
2353                 }
2354
2355                 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2356                               intel_dp->output_reg);
2357         } else if (IS_CHERRYVIEW(dev)) {
2358                 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2359         } else {
2360                 *pipe = PORT_TO_PIPE(tmp);
2361         }
2362
2363         return true;
2364 }
2365
2366 static void intel_dp_get_config(struct intel_encoder *encoder,
2367                                 struct intel_crtc_state *pipe_config)
2368 {
2369         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2370         u32 tmp, flags = 0;
2371         struct drm_device *dev = encoder->base.dev;
2372         struct drm_i915_private *dev_priv = dev->dev_private;
2373         enum port port = dp_to_dig_port(intel_dp)->port;
2374         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2375         int dotclock;
2376
2377         tmp = I915_READ(intel_dp->output_reg);
2378
2379         pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2380
2381         if (HAS_PCH_CPT(dev) && port != PORT_A) {
2382                 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2383                 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2384                         flags |= DRM_MODE_FLAG_PHSYNC;
2385                 else
2386                         flags |= DRM_MODE_FLAG_NHSYNC;
2387
2388                 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2389                         flags |= DRM_MODE_FLAG_PVSYNC;
2390                 else
2391                         flags |= DRM_MODE_FLAG_NVSYNC;
2392         } else {
2393                 if (tmp & DP_SYNC_HS_HIGH)
2394                         flags |= DRM_MODE_FLAG_PHSYNC;
2395                 else
2396                         flags |= DRM_MODE_FLAG_NHSYNC;
2397
2398                 if (tmp & DP_SYNC_VS_HIGH)
2399                         flags |= DRM_MODE_FLAG_PVSYNC;
2400                 else
2401                         flags |= DRM_MODE_FLAG_NVSYNC;
2402         }
2403
2404         pipe_config->base.adjusted_mode.flags |= flags;
2405
2406         if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2407             tmp & DP_COLOR_RANGE_16_235)
2408                 pipe_config->limited_color_range = true;
2409
2410         pipe_config->has_dp_encoder = true;
2411
2412         intel_dp_get_m_n(crtc, pipe_config);
2413
2414         if (port == PORT_A) {
2415                 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2416                         pipe_config->port_clock = 162000;
2417                 else
2418                         pipe_config->port_clock = 270000;
2419         }
2420
2421         dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2422                                             &pipe_config->dp_m_n);
2423
2424         if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2425                 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2426
2427         pipe_config->base.adjusted_mode.crtc_clock = dotclock;
2428
2429         if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2430             pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2431                 /*
2432                  * This is a big fat ugly hack.
2433                  *
2434                  * Some machines in UEFI boot mode provide us a VBT that has 18
2435                  * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2436                  * unknown we fail to light up. Yet the same BIOS boots up with
2437                  * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2438                  * max, not what it tells us to use.
2439                  *
2440                  * Note: This will still be broken if the eDP panel is not lit
2441                  * up by the BIOS, and thus we can't get the mode at module
2442                  * load.
2443                  */
2444                 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2445                               pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2446                 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2447         }
2448 }
2449
2450 static void intel_disable_dp(struct intel_encoder *encoder)
2451 {
2452         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2453         struct drm_device *dev = encoder->base.dev;
2454         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2455
2456         if (crtc->config->has_audio)
2457                 intel_audio_codec_disable(encoder);
2458
2459         if (HAS_PSR(dev) && !HAS_DDI(dev))
2460                 intel_psr_disable(intel_dp);
2461
2462         /* Make sure the panel is off before trying to change the mode. But also
2463          * ensure that we have vdd while we switch off the panel. */
2464         intel_edp_panel_vdd_on(intel_dp);
2465         intel_edp_backlight_off(intel_dp);
2466         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2467         intel_edp_panel_off(intel_dp);
2468
2469         /* disable the port before the pipe on g4x */
2470         if (INTEL_INFO(dev)->gen < 5)
2471                 intel_dp_link_down(intel_dp);
2472 }
2473
2474 static void ilk_post_disable_dp(struct intel_encoder *encoder)
2475 {
2476         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2477         enum port port = dp_to_dig_port(intel_dp)->port;
2478
2479         intel_dp_link_down(intel_dp);
2480         if (port == PORT_A)
2481                 ironlake_edp_pll_off(intel_dp);
2482 }
2483
2484 static void vlv_post_disable_dp(struct intel_encoder *encoder)
2485 {
2486         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2487
2488         intel_dp_link_down(intel_dp);
2489 }
2490
2491 static void chv_post_disable_dp(struct intel_encoder *encoder)
2492 {
2493         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2494         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2495         struct drm_device *dev = encoder->base.dev;
2496         struct drm_i915_private *dev_priv = dev->dev_private;
2497         struct intel_crtc *intel_crtc =
2498                 to_intel_crtc(encoder->base.crtc);
2499         enum dpio_channel ch = vlv_dport_to_channel(dport);
2500         enum i915_pipe pipe = intel_crtc->pipe;
2501         u32 val;
2502
2503         intel_dp_link_down(intel_dp);
2504
2505         mutex_lock(&dev_priv->sb_lock);
2506
2507         /* Propagate soft reset to data lane reset */
2508         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2509         val |= CHV_PCS_REQ_SOFTRESET_EN;
2510         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2511
2512         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2513         val |= CHV_PCS_REQ_SOFTRESET_EN;
2514         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2515
2516         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2517         val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2518         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2519
2520         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2521         val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2522         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2523
2524         mutex_unlock(&dev_priv->sb_lock);
2525 }
2526
2527 static void
2528 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2529                          uint32_t *DP,
2530                          uint8_t dp_train_pat)
2531 {
2532         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2533         struct drm_device *dev = intel_dig_port->base.base.dev;
2534         struct drm_i915_private *dev_priv = dev->dev_private;
2535         enum port port = intel_dig_port->port;
2536
2537         if (HAS_DDI(dev)) {
2538                 uint32_t temp = I915_READ(DP_TP_CTL(port));
2539
2540                 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2541                         temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2542                 else
2543                         temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2544
2545                 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2546                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2547                 case DP_TRAINING_PATTERN_DISABLE:
2548                         temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2549
2550                         break;
2551                 case DP_TRAINING_PATTERN_1:
2552                         temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2553                         break;
2554                 case DP_TRAINING_PATTERN_2:
2555                         temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2556                         break;
2557                 case DP_TRAINING_PATTERN_3:
2558                         temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2559                         break;
2560                 }
2561                 I915_WRITE(DP_TP_CTL(port), temp);
2562
2563         } else if ((IS_GEN7(dev) && port == PORT_A) ||
2564                    (HAS_PCH_CPT(dev) && port != PORT_A)) {
2565                 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2566
2567                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2568                 case DP_TRAINING_PATTERN_DISABLE:
2569                         *DP |= DP_LINK_TRAIN_OFF_CPT;
2570                         break;
2571                 case DP_TRAINING_PATTERN_1:
2572                         *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2573                         break;
2574                 case DP_TRAINING_PATTERN_2:
2575                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2576                         break;
2577                 case DP_TRAINING_PATTERN_3:
2578                         DRM_ERROR("DP training pattern 3 not supported\n");
2579                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2580                         break;
2581                 }
2582
2583         } else {
2584                 if (IS_CHERRYVIEW(dev))
2585                         *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2586                 else
2587                         *DP &= ~DP_LINK_TRAIN_MASK;
2588
2589                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2590                 case DP_TRAINING_PATTERN_DISABLE:
2591                         *DP |= DP_LINK_TRAIN_OFF;
2592                         break;
2593                 case DP_TRAINING_PATTERN_1:
2594                         *DP |= DP_LINK_TRAIN_PAT_1;
2595                         break;
2596                 case DP_TRAINING_PATTERN_2:
2597                         *DP |= DP_LINK_TRAIN_PAT_2;
2598                         break;
2599                 case DP_TRAINING_PATTERN_3:
2600                         if (IS_CHERRYVIEW(dev)) {
2601                                 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2602                         } else {
2603                                 DRM_ERROR("DP training pattern 3 not supported\n");
2604                                 *DP |= DP_LINK_TRAIN_PAT_2;
2605                         }
2606                         break;
2607                 }
2608         }
2609 }
2610
2611 static void intel_dp_enable_port(struct intel_dp *intel_dp)
2612 {
2613         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2614         struct drm_i915_private *dev_priv = dev->dev_private;
2615
2616         /* enable with pattern 1 (as per spec) */
2617         _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2618                                  DP_TRAINING_PATTERN_1);
2619
2620         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2621         POSTING_READ(intel_dp->output_reg);
2622
2623         /*
2624          * Magic for VLV/CHV. We _must_ first set up the register
2625          * without actually enabling the port, and then do another
2626          * write to enable the port. Otherwise link training will
2627          * fail when the power sequencer is freshly used for this port.
2628          */
2629         intel_dp->DP |= DP_PORT_EN;
2630
2631         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2632         POSTING_READ(intel_dp->output_reg);
2633 }
2634
2635 static void intel_enable_dp(struct intel_encoder *encoder)
2636 {
2637         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2638         struct drm_device *dev = encoder->base.dev;
2639         struct drm_i915_private *dev_priv = dev->dev_private;
2640         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2641         uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2642         unsigned int lane_mask = 0x0;
2643
2644         if (WARN_ON(dp_reg & DP_PORT_EN))
2645                 return;
2646
2647         pps_lock(intel_dp);
2648
2649         if (IS_VALLEYVIEW(dev))
2650                 vlv_init_panel_power_sequencer(intel_dp);
2651
2652         intel_dp_enable_port(intel_dp);
2653
2654         edp_panel_vdd_on(intel_dp);
2655         edp_panel_on(intel_dp);
2656         edp_panel_vdd_off(intel_dp, true);
2657
2658         pps_unlock(intel_dp);
2659
2660         if (IS_VALLEYVIEW(dev))
2661                 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2662                                     lane_mask);
2663
2664         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2665         intel_dp_start_link_train(intel_dp);
2666         intel_dp_complete_link_train(intel_dp);
2667         intel_dp_stop_link_train(intel_dp);
2668
2669         if (crtc->config->has_audio) {
2670                 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2671                                  pipe_name(crtc->pipe));
2672                 intel_audio_codec_enable(encoder);
2673         }
2674 }
2675
2676 static void g4x_enable_dp(struct intel_encoder *encoder)
2677 {
2678         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2679
2680         intel_enable_dp(encoder);
2681         intel_edp_backlight_on(intel_dp);
2682 }
2683
2684 static void vlv_enable_dp(struct intel_encoder *encoder)
2685 {
2686         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2687
2688         intel_edp_backlight_on(intel_dp);
2689         intel_psr_enable(intel_dp);
2690 }
2691
2692 static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2693 {
2694         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2695         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2696
2697         intel_dp_prepare(encoder);
2698
2699         /* Only ilk+ has port A */
2700         if (dport->port == PORT_A) {
2701                 ironlake_set_pll_cpu_edp(intel_dp);
2702                 ironlake_edp_pll_on(intel_dp);
2703         }
2704 }
2705
2706 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2707 {
2708         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2709         struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2710         enum i915_pipe pipe = intel_dp->pps_pipe;
2711         int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2712
2713         edp_panel_vdd_off_sync(intel_dp);
2714
2715         /*
2716          * VLV seems to get confused when multiple power seqeuencers
2717          * have the same port selected (even if only one has power/vdd
2718          * enabled). The failure manifests as vlv_wait_port_ready() failing
2719          * CHV on the other hand doesn't seem to mind having the same port
2720          * selected in multiple power seqeuencers, but let's clear the
2721          * port select always when logically disconnecting a power sequencer
2722          * from a port.
2723          */
2724         DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2725                       pipe_name(pipe), port_name(intel_dig_port->port));
2726         I915_WRITE(pp_on_reg, 0);
2727         POSTING_READ(pp_on_reg);
2728
2729         intel_dp->pps_pipe = INVALID_PIPE;
2730 }
2731
2732 static void vlv_steal_power_sequencer(struct drm_device *dev,
2733                                       enum i915_pipe pipe)
2734 {
2735         struct drm_i915_private *dev_priv = dev->dev_private;
2736         struct intel_encoder *encoder;
2737
2738         lockdep_assert_held(&dev_priv->pps_mutex);
2739
2740         if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2741                 return;
2742
2743         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2744                             base.head) {
2745                 struct intel_dp *intel_dp;
2746                 enum port port;
2747
2748                 if (encoder->type != INTEL_OUTPUT_EDP)
2749                         continue;
2750
2751                 intel_dp = enc_to_intel_dp(&encoder->base);
2752                 port = dp_to_dig_port(intel_dp)->port;
2753
2754                 if (intel_dp->pps_pipe != pipe)
2755                         continue;
2756
2757                 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2758                               pipe_name(pipe), port_name(port));
2759
2760                 WARN(encoder->connectors_active,
2761                      "stealing pipe %c power sequencer from active eDP port %c\n",
2762                      pipe_name(pipe), port_name(port));
2763
2764                 /* make sure vdd is off before we steal it */
2765                 vlv_detach_power_sequencer(intel_dp);
2766         }
2767 }
2768
2769 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2770 {
2771         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2772         struct intel_encoder *encoder = &intel_dig_port->base;
2773         struct drm_device *dev = encoder->base.dev;
2774         struct drm_i915_private *dev_priv = dev->dev_private;
2775         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2776
2777         lockdep_assert_held(&dev_priv->pps_mutex);
2778
2779         if (!is_edp(intel_dp))
2780                 return;
2781
2782         if (intel_dp->pps_pipe == crtc->pipe)
2783                 return;
2784
2785         /*
2786          * If another power sequencer was being used on this
2787          * port previously make sure to turn off vdd there while
2788          * we still have control of it.
2789          */
2790         if (intel_dp->pps_pipe != INVALID_PIPE)
2791                 vlv_detach_power_sequencer(intel_dp);
2792
2793         /*
2794          * We may be stealing the power
2795          * sequencer from another port.
2796          */
2797         vlv_steal_power_sequencer(dev, crtc->pipe);
2798
2799         /* now it's all ours */
2800         intel_dp->pps_pipe = crtc->pipe;
2801
2802         DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2803                       pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2804
2805         /* init power sequencer on this pipe and port */
2806         intel_dp_init_panel_power_sequencer(dev, intel_dp);
2807         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2808 }
2809
2810 static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2811 {
2812         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2813         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2814         struct drm_device *dev = encoder->base.dev;
2815         struct drm_i915_private *dev_priv = dev->dev_private;
2816         struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2817         enum dpio_channel port = vlv_dport_to_channel(dport);
2818         int pipe = intel_crtc->pipe;
2819         u32 val;
2820
2821         mutex_lock(&dev_priv->sb_lock);
2822
2823         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2824         val = 0;
2825         if (pipe)
2826                 val |= (1<<21);
2827         else
2828                 val &= ~(1<<21);
2829         val |= 0x001000c4;
2830         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2831         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2832         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2833
2834         mutex_unlock(&dev_priv->sb_lock);
2835
2836         intel_enable_dp(encoder);
2837 }
2838
2839 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2840 {
2841         struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2842         struct drm_device *dev = encoder->base.dev;
2843         struct drm_i915_private *dev_priv = dev->dev_private;
2844         struct intel_crtc *intel_crtc =
2845                 to_intel_crtc(encoder->base.crtc);
2846         enum dpio_channel port = vlv_dport_to_channel(dport);
2847         int pipe = intel_crtc->pipe;
2848
2849         intel_dp_prepare(encoder);
2850
2851         /* Program Tx lane resets to default */
2852         mutex_lock(&dev_priv->sb_lock);
2853         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2854                          DPIO_PCS_TX_LANE2_RESET |
2855                          DPIO_PCS_TX_LANE1_RESET);
2856         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2857                          DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2858                          DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2859                          (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2860                                  DPIO_PCS_CLK_SOFT_RESET);
2861
2862         /* Fix up inter-pair skew failure */
2863         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2864         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2865         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2866         mutex_unlock(&dev_priv->sb_lock);
2867 }
2868
2869 static void chv_pre_enable_dp(struct intel_encoder *encoder)
2870 {
2871         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2872         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2873         struct drm_device *dev = encoder->base.dev;
2874         struct drm_i915_private *dev_priv = dev->dev_private;
2875         struct intel_crtc *intel_crtc =
2876                 to_intel_crtc(encoder->base.crtc);
2877         enum dpio_channel ch = vlv_dport_to_channel(dport);
2878         int pipe = intel_crtc->pipe;
2879         int data, i, stagger;
2880         u32 val;
2881
2882         mutex_lock(&dev_priv->sb_lock);
2883
2884         /* allow hardware to manage TX FIFO reset source */
2885         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2886         val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2887         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2888
2889         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2890         val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2891         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2892
2893         /* Deassert soft data lane reset*/
2894         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2895         val |= CHV_PCS_REQ_SOFTRESET_EN;
2896         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2897
2898         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2899         val |= CHV_PCS_REQ_SOFTRESET_EN;
2900         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2901
2902         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2903         val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2904         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2905
2906         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2907         val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2908         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2909
2910         /* Program Tx lane latency optimal setting*/
2911         for (i = 0; i < 4; i++) {
2912                 /* Set the upar bit */
2913                 data = (i == 1) ? 0x0 : 0x1;
2914                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2915                                 data << DPIO_UPAR_SHIFT);
2916         }
2917
2918         /* Data lane stagger programming */
2919         if (intel_crtc->config->port_clock > 270000)
2920                 stagger = 0x18;
2921         else if (intel_crtc->config->port_clock > 135000)
2922                 stagger = 0xd;
2923         else if (intel_crtc->config->port_clock > 67500)
2924                 stagger = 0x7;
2925         else if (intel_crtc->config->port_clock > 33750)
2926                 stagger = 0x4;
2927         else
2928                 stagger = 0x2;
2929
2930         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2931         val |= DPIO_TX2_STAGGER_MASK(0x1f);
2932         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2933
2934         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2935         val |= DPIO_TX2_STAGGER_MASK(0x1f);
2936         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2937
2938         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
2939                        DPIO_LANESTAGGER_STRAP(stagger) |
2940                        DPIO_LANESTAGGER_STRAP_OVRD |
2941                        DPIO_TX1_STAGGER_MASK(0x1f) |
2942                        DPIO_TX1_STAGGER_MULT(6) |
2943                        DPIO_TX2_STAGGER_MULT(0));
2944
2945         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
2946                        DPIO_LANESTAGGER_STRAP(stagger) |
2947                        DPIO_LANESTAGGER_STRAP_OVRD |
2948                        DPIO_TX1_STAGGER_MASK(0x1f) |
2949                        DPIO_TX1_STAGGER_MULT(7) |
2950                        DPIO_TX2_STAGGER_MULT(5));
2951
2952         mutex_unlock(&dev_priv->sb_lock);
2953
2954         intel_enable_dp(encoder);
2955 }
2956
2957 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2958 {
2959         struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2960         struct drm_device *dev = encoder->base.dev;
2961         struct drm_i915_private *dev_priv = dev->dev_private;
2962         struct intel_crtc *intel_crtc =
2963                 to_intel_crtc(encoder->base.crtc);
2964         enum dpio_channel ch = vlv_dport_to_channel(dport);
2965         enum i915_pipe pipe = intel_crtc->pipe;
2966         u32 val;
2967
2968         intel_dp_prepare(encoder);
2969
2970         mutex_lock(&dev_priv->sb_lock);
2971
2972         /* program left/right clock distribution */
2973         if (pipe != PIPE_B) {
2974                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2975                 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2976                 if (ch == DPIO_CH0)
2977                         val |= CHV_BUFLEFTENA1_FORCE;
2978                 if (ch == DPIO_CH1)
2979                         val |= CHV_BUFRIGHTENA1_FORCE;
2980                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2981         } else {
2982                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2983                 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2984                 if (ch == DPIO_CH0)
2985                         val |= CHV_BUFLEFTENA2_FORCE;
2986                 if (ch == DPIO_CH1)
2987                         val |= CHV_BUFRIGHTENA2_FORCE;
2988                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2989         }
2990
2991         /* program clock channel usage */
2992         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2993         val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2994         if (pipe != PIPE_B)
2995                 val &= ~CHV_PCS_USEDCLKCHANNEL;
2996         else
2997                 val |= CHV_PCS_USEDCLKCHANNEL;
2998         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2999
3000         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
3001         val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3002         if (pipe != PIPE_B)
3003                 val &= ~CHV_PCS_USEDCLKCHANNEL;
3004         else
3005                 val |= CHV_PCS_USEDCLKCHANNEL;
3006         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
3007
3008         /*
3009          * This a a bit weird since generally CL
3010          * matches the pipe, but here we need to
3011          * pick the CL based on the port.
3012          */
3013         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
3014         if (pipe != PIPE_B)
3015                 val &= ~CHV_CMN_USEDCLKCHANNEL;
3016         else
3017                 val |= CHV_CMN_USEDCLKCHANNEL;
3018         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
3019
3020         mutex_unlock(&dev_priv->sb_lock);
3021 }
3022
3023 /*
3024  * Native read with retry for link status and receiver capability reads for
3025  * cases where the sink may still be asleep.
3026  *
3027  * Sinks are *supposed* to come up within 1ms from an off state, but we're also
3028  * supposed to retry 3 times per the spec.
3029  */
3030 static ssize_t
3031 intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
3032                         void *buffer, size_t size)
3033 {
3034         ssize_t ret;
3035         int i;
3036
3037         /*
3038          * Sometime we just get the same incorrect byte repeated
3039          * over the entire buffer. Doing just one throw away read
3040          * initially seems to "solve" it.
3041          */
3042         drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
3043
3044         for (i = 0; i < 3; i++) {
3045                 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
3046                 if (ret == size)
3047                         return ret;
3048                 msleep(1);
3049         }
3050
3051         return ret;
3052 }
3053
3054 /*
3055  * Fetch AUX CH registers 0x202 - 0x207 which contain
3056  * link status information
3057  */
3058 static bool
3059 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
3060 {
3061         return intel_dp_dpcd_read_wake(&intel_dp->aux,
3062                                        DP_LANE0_1_STATUS,
3063                                        link_status,
3064                                        DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
3065 }
3066
3067 /* These are source-specific values. */
3068 static uint8_t
3069 intel_dp_voltage_max(struct intel_dp *intel_dp)
3070 {
3071         struct drm_device *dev = intel_dp_to_dev(intel_dp);
3072         struct drm_i915_private *dev_priv = dev->dev_private;
3073         enum port port = dp_to_dig_port(intel_dp)->port;
3074
3075         if (IS_BROXTON(dev))
3076                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3077         else if (INTEL_INFO(dev)->gen >= 9) {
3078                 if (dev_priv->edp_low_vswing && port == PORT_A)
3079                         return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3080                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3081         } else if (IS_VALLEYVIEW(dev))
3082                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3083         else if (IS_GEN7(dev) && port == PORT_A)
3084                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3085         else if (HAS_PCH_CPT(dev) && port != PORT_A)
3086                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3087         else
3088                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3089 }
3090
3091 static uint8_t
3092 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3093 {
3094         struct drm_device *dev = intel_dp_to_dev(intel_dp);
3095         enum port port = dp_to_dig_port(intel_dp)->port;
3096
3097         if (INTEL_INFO(dev)->gen >= 9) {
3098                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3099                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3100                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
3101                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3102                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3103                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3104                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3105                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3106                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3107                 default:
3108                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3109                 }
3110         } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
3111                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3112                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3113                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
3114                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3115                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3116                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3117                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3118                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3119                 default:
3120                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3121                 }
3122         } else if (IS_VALLEYVIEW(dev)) {
3123                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3124                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3125                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
3126                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3127                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3128                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3129                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3130                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3131                 default:
3132                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3133                 }
3134         } else if (IS_GEN7(dev) && port == PORT_A) {
3135                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3136                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3137                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3138                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3139                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3140                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3141                 default:
3142                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3143                 }
3144         } else {
3145                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3146                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3147                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3148                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3149                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3150                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3151                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3152                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3153                 default:
3154                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3155                 }
3156         }
3157 }
3158
3159 static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
3160 {
3161         struct drm_device *dev = intel_dp_to_dev(intel_dp);
3162         struct drm_i915_private *dev_priv = dev->dev_private;
3163         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3164         struct intel_crtc *intel_crtc =
3165                 to_intel_crtc(dport->base.base.crtc);
3166         unsigned long demph_reg_value, preemph_reg_value,
3167                 uniqtranscale_reg_value;
3168         uint8_t train_set = intel_dp->train_set[0];
3169         enum dpio_channel port = vlv_dport_to_channel(dport);
3170         int pipe = intel_crtc->pipe;
3171
3172         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3173         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3174                 preemph_reg_value = 0x0004000;
3175                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3176                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3177                         demph_reg_value = 0x2B405555;
3178                         uniqtranscale_reg_value = 0x552AB83A;
3179                         break;
3180                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3181                         demph_reg_value = 0x2B404040;
3182                         uniqtranscale_reg_value = 0x5548B83A;
3183                         break;
3184                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3185                         demph_reg_value = 0x2B245555;
3186                         uniqtranscale_reg_value = 0x5560B83A;
3187                         break;
3188                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3189                         demph_reg_value = 0x2B405555;
3190                         uniqtranscale_reg_value = 0x5598DA3A;
3191                         break;
3192                 default:
3193                         return 0;
3194                 }
3195                 break;
3196         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3197                 preemph_reg_value = 0x0002000;
3198                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3199                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3200                         demph_reg_value = 0x2B404040;
3201                         uniqtranscale_reg_value = 0x5552B83A;
3202                         break;
3203                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3204                         demph_reg_value = 0x2B404848;
3205                         uniqtranscale_reg_value = 0x5580B83A;
3206                         break;
3207                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3208                         demph_reg_value = 0x2B404040;
3209                         uniqtranscale_reg_value = 0x55ADDA3A;
3210                         break;
3211                 default:
3212                         return 0;
3213                 }
3214                 break;
3215         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3216                 preemph_reg_value = 0x0000000;
3217                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3218                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3219                         demph_reg_value = 0x2B305555;
3220                         uniqtranscale_reg_value = 0x5570B83A;
3221                         break;
3222                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3223                         demph_reg_value = 0x2B2B4040;
3224                         uniqtranscale_reg_value = 0x55ADDA3A;
3225                         break;
3226                 default:
3227                         return 0;
3228                 }
3229                 break;
3230         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3231                 preemph_reg_value = 0x0006000;
3232                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3233                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3234                         demph_reg_value = 0x1B405555;
3235                         uniqtranscale_reg_value = 0x55ADDA3A;
3236                         break;
3237                 default:
3238                         return 0;
3239                 }
3240                 break;
3241         default:
3242                 return 0;
3243         }
3244
3245         mutex_lock(&dev_priv->sb_lock);
3246         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3247         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3248         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
3249                          uniqtranscale_reg_value);
3250         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3251         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3252         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3253         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
3254         mutex_unlock(&dev_priv->sb_lock);
3255
3256         return 0;
3257 }
3258
3259 static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3260 {
3261         struct drm_device *dev = intel_dp_to_dev(intel_dp);
3262         struct drm_i915_private *dev_priv = dev->dev_private;
3263         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3264         struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3265         u32 deemph_reg_value, margin_reg_value, val;
3266         uint8_t train_set = intel_dp->train_set[0];
3267         enum dpio_channel ch = vlv_dport_to_channel(dport);
3268         enum i915_pipe pipe = intel_crtc->pipe;
3269         int i;
3270
3271         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3272         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3273                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3274                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3275                         deemph_reg_value = 128;
3276                         margin_reg_value = 52;
3277                         break;
3278                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3279                         deemph_reg_value = 128;
3280                         margin_reg_value = 77;
3281                         break;
3282                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3283                         deemph_reg_value = 128;
3284                         margin_reg_value = 102;
3285                         break;
3286                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3287                         deemph_reg_value = 128;
3288                         margin_reg_value = 154;
3289                         /* FIXME extra to set for 1200 */
3290                         break;
3291                 default:
3292                         return 0;
3293                 }
3294                 break;
3295         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3296                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3297                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3298                         deemph_reg_value = 85;
3299                         margin_reg_value = 78;
3300                         break;
3301                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3302                         deemph_reg_value = 85;
3303                         margin_reg_value = 116;
3304                         break;
3305                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3306                         deemph_reg_value = 85;
3307                         margin_reg_value = 154;
3308                         break;
3309                 default:
3310                         return 0;
3311                 }
3312                 break;
3313         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3314                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3315                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3316                         deemph_reg_value = 64;
3317                         margin_reg_value = 104;
3318                         break;
3319                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3320                         deemph_reg_value = 64;
3321                         margin_reg_value = 154;
3322                         break;
3323                 default:
3324                         return 0;
3325                 }
3326                 break;
3327         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3328                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3329                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3330                         deemph_reg_value = 43;
3331                         margin_reg_value = 154;
3332                         break;
3333                 default:
3334                         return 0;
3335                 }
3336                 break;
3337         default:
3338                 return 0;
3339         }
3340
3341         mutex_lock(&dev_priv->sb_lock);
3342
3343         /* Clear calc init */
3344         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3345         val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3346         val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3347         val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3348         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3349
3350         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3351         val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3352         val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3353         val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3354         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3355
3356         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3357         val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3358         val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3359         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3360
3361         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3362         val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3363         val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3364         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3365
3366         /* Program swing deemph */
3367         for (i = 0; i < 4; i++) {
3368                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3369                 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3370                 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3371                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3372         }
3373
3374         /* Program swing margin */
3375         for (i = 0; i < 4; i++) {
3376                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3377                 val &= ~DPIO_SWING_MARGIN000_MASK;
3378                 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3379                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3380         }
3381
3382         /* Disable unique transition scale */
3383         for (i = 0; i < 4; i++) {
3384                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3385                 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3386                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3387         }
3388
3389         if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
3390                         == DP_TRAIN_PRE_EMPH_LEVEL_0) &&
3391                 ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
3392                         == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
3393
3394                 /*
3395                  * The document said it needs to set bit 27 for ch0 and bit 26
3396                  * for ch1. Might be a typo in the doc.
3397                  * For now, for this unique transition scale selection, set bit
3398                  * 27 for ch0 and ch1.
3399                  */
3400                 for (i = 0; i < 4; i++) {
3401                         val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3402                         val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3403                         vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3404                 }
3405
3406                 for (i = 0; i < 4; i++) {
3407                         val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3408                         val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3409                         val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3410                         vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3411                 }
3412         }
3413
3414         /* Start swing calculation */
3415         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3416         val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3417         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3418
3419         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3420         val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3421         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3422
3423         /* LRC Bypass */
3424         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3425         val |= DPIO_LRC_BYPASS;
3426         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3427
3428         mutex_unlock(&dev_priv->sb_lock);
3429
3430         return 0;
3431 }
3432
3433 static void
3434 intel_get_adjust_train(struct intel_dp *intel_dp,
3435                        const uint8_t link_status[DP_LINK_STATUS_SIZE])
3436 {
3437         uint8_t v = 0;
3438         uint8_t p = 0;
3439         int lane;
3440         uint8_t voltage_max;
3441         uint8_t preemph_max;
3442
3443         for (lane = 0; lane < intel_dp->lane_count; lane++) {
3444                 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3445                 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
3446
3447                 if (this_v > v)
3448                         v = this_v;
3449                 if (this_p > p)
3450                         p = this_p;
3451         }
3452
3453         voltage_max = intel_dp_voltage_max(intel_dp);
3454         if (v >= voltage_max)
3455                 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
3456
3457         preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3458         if (p >= preemph_max)
3459                 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
3460
3461         for (lane = 0; lane < 4; lane++)
3462                 intel_dp->train_set[lane] = v | p;
3463 }
3464
3465 static uint32_t
3466 gen4_signal_levels(uint8_t train_set)
3467 {
3468         uint32_t        signal_levels = 0;
3469
3470         switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3471         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3472         default:
3473                 signal_levels |= DP_VOLTAGE_0_4;
3474                 break;
3475         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3476                 signal_levels |= DP_VOLTAGE_0_6;
3477                 break;
3478         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3479                 signal_levels |= DP_VOLTAGE_0_8;
3480                 break;
3481         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3482                 signal_levels |= DP_VOLTAGE_1_2;
3483                 break;
3484         }
3485         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3486         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3487         default:
3488                 signal_levels |= DP_PRE_EMPHASIS_0;
3489                 break;
3490         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3491                 signal_levels |= DP_PRE_EMPHASIS_3_5;
3492                 break;
3493         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3494                 signal_levels |= DP_PRE_EMPHASIS_6;
3495                 break;
3496         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3497                 signal_levels |= DP_PRE_EMPHASIS_9_5;
3498                 break;
3499         }
3500         return signal_levels;
3501 }
3502
3503 /* Gen6's DP voltage swing and pre-emphasis control */
3504 static uint32_t
3505 gen6_edp_signal_levels(uint8_t train_set)
3506 {
3507         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3508                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3509         switch (signal_levels) {
3510         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3511         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3512                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3513         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3514                 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3515         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3516         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3517                 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3518         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3519         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3520                 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3521         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3522         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3523                 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3524         default:
3525                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3526                               "0x%x\n", signal_levels);
3527                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3528         }
3529 }
3530
3531 /* Gen7's DP voltage swing and pre-emphasis control */
3532 static uint32_t
3533 gen7_edp_signal_levels(uint8_t train_set)
3534 {
3535         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3536                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3537         switch (signal_levels) {
3538         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3539                 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3540         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3541                 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3542         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3543                 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3544
3545         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3546                 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3547         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3548                 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3549
3550         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3551                 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3552         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3553                 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3554
3555         default:
3556                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3557                               "0x%x\n", signal_levels);
3558                 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3559         }
3560 }
3561
3562 /* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
3563 static uint32_t
3564 hsw_signal_levels(uint8_t train_set)
3565 {
3566         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3567                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3568         switch (signal_levels) {
3569         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3570                 return DDI_BUF_TRANS_SELECT(0);
3571         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3572                 return DDI_BUF_TRANS_SELECT(1);
3573         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3574                 return DDI_BUF_TRANS_SELECT(2);
3575         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
3576                 return DDI_BUF_TRANS_SELECT(3);
3577
3578         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3579                 return DDI_BUF_TRANS_SELECT(4);
3580         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3581                 return DDI_BUF_TRANS_SELECT(5);
3582         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3583                 return DDI_BUF_TRANS_SELECT(6);
3584
3585         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3586                 return DDI_BUF_TRANS_SELECT(7);
3587         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3588                 return DDI_BUF_TRANS_SELECT(8);
3589
3590         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3591                 return DDI_BUF_TRANS_SELECT(9);
3592         default:
3593                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3594                               "0x%x\n", signal_levels);
3595                 return DDI_BUF_TRANS_SELECT(0);
3596         }
3597 }
3598
3599 static void bxt_signal_levels(struct intel_dp *intel_dp)
3600 {
3601         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3602         enum port port = dport->port;
3603         struct drm_device *dev = dport->base.base.dev;
3604         struct intel_encoder *encoder = &dport->base;
3605         uint8_t train_set = intel_dp->train_set[0];
3606         uint32_t level = 0;
3607
3608         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3609                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3610         switch (signal_levels) {
3611         default:
3612                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emph level\n");
3613         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3614                 level = 0;
3615                 break;
3616         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3617                 level = 1;
3618                 break;
3619         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3620                 level = 2;
3621                 break;
3622         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
3623                 level = 3;
3624                 break;
3625         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3626                 level = 4;
3627                 break;
3628         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3629                 level = 5;
3630                 break;
3631         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3632                 level = 6;
3633                 break;
3634         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3635                 level = 7;
3636                 break;
3637         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3638                 level = 8;
3639                 break;
3640         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3641                 level = 9;
3642                 break;
3643         }
3644
3645         bxt_ddi_vswing_sequence(dev, level, port, encoder->type);
3646 }
3647
3648 /* Properly updates "DP" with the correct signal levels. */
3649 static void
3650 intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3651 {
3652         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3653         enum port port = intel_dig_port->port;
3654         struct drm_device *dev = intel_dig_port->base.base.dev;
3655         uint32_t signal_levels, mask;
3656         uint8_t train_set = intel_dp->train_set[0];
3657
3658         if (IS_BROXTON(dev)) {
3659                 signal_levels = 0;
3660                 bxt_signal_levels(intel_dp);
3661                 mask = 0;
3662         } else if (HAS_DDI(dev)) {
3663                 signal_levels = hsw_signal_levels(train_set);
3664                 mask = DDI_BUF_EMP_MASK;
3665         } else if (IS_CHERRYVIEW(dev)) {
3666                 signal_levels = chv_signal_levels(intel_dp);
3667                 mask = 0;
3668         } else if (IS_VALLEYVIEW(dev)) {
3669                 signal_levels = vlv_signal_levels(intel_dp);
3670                 mask = 0;
3671         } else if (IS_GEN7(dev) && port == PORT_A) {
3672                 signal_levels = gen7_edp_signal_levels(train_set);
3673                 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3674         } else if (IS_GEN6(dev) && port == PORT_A) {
3675                 signal_levels = gen6_edp_signal_levels(train_set);
3676                 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3677         } else {
3678                 signal_levels = gen4_signal_levels(train_set);
3679                 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3680         }
3681
3682         if (mask)
3683                 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3684
3685         DRM_DEBUG_KMS("Using vswing level %d\n",
3686                 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3687         DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3688                 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3689                         DP_TRAIN_PRE_EMPHASIS_SHIFT);
3690
3691         *DP = (*DP & ~mask) | signal_levels;
3692 }
3693
3694 static bool
3695 intel_dp_set_link_train(struct intel_dp *intel_dp,
3696                         uint32_t *DP,
3697                         uint8_t dp_train_pat)
3698 {
3699         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3700         struct drm_device *dev = intel_dig_port->base.base.dev;
3701         struct drm_i915_private *dev_priv = dev->dev_private;
3702         uint8_t buf[sizeof(intel_dp->train_set) + 1];
3703         int ret, len;
3704
3705         _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3706
3707         I915_WRITE(intel_dp->output_reg, *DP);
3708         POSTING_READ(intel_dp->output_reg);
3709
3710         buf[0] = dp_train_pat;
3711         if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
3712             DP_TRAINING_PATTERN_DISABLE) {
3713                 /* don't write DP_TRAINING_LANEx_SET on disable */
3714                 len = 1;
3715         } else {
3716                 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3717                 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3718                 len = intel_dp->lane_count + 1;
3719         }
3720
3721         ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3722                                 buf, len);
3723
3724         return ret == len;
3725 }
3726
3727 static bool
3728 intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3729                         uint8_t dp_train_pat)
3730 {
3731         if (!intel_dp->train_set_valid)
3732                 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
3733         intel_dp_set_signal_levels(intel_dp, DP);
3734         return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3735 }
3736
3737 static bool
3738 intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3739                            const uint8_t link_status[DP_LINK_STATUS_SIZE])
3740 {
3741         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3742         struct drm_device *dev = intel_dig_port->base.base.dev;
3743         struct drm_i915_private *dev_priv = dev->dev_private;
3744         int ret;
3745
3746         intel_get_adjust_train(intel_dp, link_status);
3747         intel_dp_set_signal_levels(intel_dp, DP);
3748
3749         I915_WRITE(intel_dp->output_reg, *DP);
3750         POSTING_READ(intel_dp->output_reg);
3751
3752         ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3753                                 intel_dp->train_set, intel_dp->lane_count);
3754
3755         return ret == intel_dp->lane_count;
3756 }
3757
3758 static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3759 {
3760         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3761         struct drm_device *dev = intel_dig_port->base.base.dev;
3762         struct drm_i915_private *dev_priv = dev->dev_private;
3763         enum port port = intel_dig_port->port;
3764         uint32_t val;
3765
3766         if (!HAS_DDI(dev))
3767                 return;
3768
3769         val = I915_READ(DP_TP_CTL(port));
3770         val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3771         val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3772         I915_WRITE(DP_TP_CTL(port), val);
3773
3774         /*
3775          * On PORT_A we can have only eDP in SST mode. There the only reason
3776          * we need to set idle transmission mode is to work around a HW issue
3777          * where we enable the pipe while not in idle link-training mode.
3778          * In this case there is requirement to wait for a minimum number of
3779          * idle patterns to be sent.
3780          */
3781         if (port == PORT_A)
3782                 return;
3783
3784         if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3785                      1))
3786                 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3787 }
3788
3789 /* Enable corresponding port and start training pattern 1 */
3790 void
3791 intel_dp_start_link_train(struct intel_dp *intel_dp)
3792 {
3793         struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
3794         struct drm_device *dev = encoder->dev;
3795         int i;
3796         uint8_t voltage;
3797         int voltage_tries, loop_tries;
3798         uint32_t DP = intel_dp->DP;
3799         uint8_t link_config[2];
3800
3801         if (HAS_DDI(dev))
3802                 intel_ddi_prepare_link_retrain(encoder);
3803
3804         /* Write the link configuration data */
3805         link_config[0] = intel_dp->link_bw;
3806         link_config[1] = intel_dp->lane_count;
3807         if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3808                 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
3809         drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
3810         if (intel_dp->num_sink_rates)
3811                 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3812                                 &intel_dp->rate_select, 1);
3813
3814         link_config[0] = 0;
3815         link_config[1] = DP_SET_ANSI_8B10B;
3816         drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
3817
3818         DP |= DP_PORT_EN;
3819
3820         /* clock recovery */
3821         if (!intel_dp_reset_link_train(intel_dp, &DP,
3822                                        DP_TRAINING_PATTERN_1 |
3823                                        DP_LINK_SCRAMBLING_DISABLE)) {
3824                 DRM_ERROR("failed to enable link training\n");
3825                 return;
3826         }
3827
3828         voltage = 0xff;
3829         voltage_tries = 0;
3830         loop_tries = 0;
3831         for (;;) {
3832                 uint8_t link_status[DP_LINK_STATUS_SIZE];
3833
3834                 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
3835                 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3836                         DRM_ERROR("failed to get link status\n");
3837                         break;
3838                 }
3839
3840                 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3841                         DRM_DEBUG_KMS("clock recovery OK\n");
3842                         break;
3843                 }
3844
3845                 /*
3846                  * if we used previously trained voltage and pre-emphasis values
3847                  * and we don't get clock recovery, reset link training values
3848                  */
3849                 if (intel_dp->train_set_valid) {
3850                         DRM_DEBUG_KMS("clock recovery not ok, reset");
3851                         /* clear the flag as we are not reusing train set */
3852                         intel_dp->train_set_valid = false;
3853                         if (!intel_dp_reset_link_train(intel_dp, &DP,
3854                                                        DP_TRAINING_PATTERN_1 |
3855                                                        DP_LINK_SCRAMBLING_DISABLE)) {
3856                                 DRM_ERROR("failed to enable link training\n");
3857                                 return;
3858                         }
3859                         continue;
3860                 }
3861
3862                 /* Check to see if we've tried the max voltage */
3863                 for (i = 0; i < intel_dp->lane_count; i++)
3864                         if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
3865                                 break;
3866                 if (i == intel_dp->lane_count) {
3867                         ++loop_tries;
3868                         if (loop_tries == 5) {
3869                                 DRM_ERROR("too many full retries, give up\n");
3870                                 break;
3871                         }
3872                         intel_dp_reset_link_train(intel_dp, &DP,
3873                                                   DP_TRAINING_PATTERN_1 |
3874                                                   DP_LINK_SCRAMBLING_DISABLE);
3875                         voltage_tries = 0;
3876                         continue;
3877                 }
3878
3879                 /* Check to see if we've tried the same voltage 5 times */
3880                 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
3881                         ++voltage_tries;
3882                         if (voltage_tries == 5) {
3883                                 DRM_ERROR("too many voltage retries, give up\n");
3884                                 break;
3885                         }
3886                 } else
3887                         voltage_tries = 0;
3888                 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
3889
3890                 /* Update training set as requested by target */
3891                 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3892                         DRM_ERROR("failed to update link training\n");
3893                         break;
3894                 }
3895         }
3896
3897         intel_dp->DP = DP;
3898 }
3899
3900 void
3901 intel_dp_complete_link_train(struct intel_dp *intel_dp)
3902 {
3903         bool channel_eq = false;
3904         int tries, cr_tries;
3905         uint32_t DP = intel_dp->DP;
3906         uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3907
3908         /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
3909         if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
3910                 training_pattern = DP_TRAINING_PATTERN_3;
3911
3912         /* channel equalization */
3913         if (!intel_dp_set_link_train(intel_dp, &DP,
3914                                      training_pattern |
3915                                      DP_LINK_SCRAMBLING_DISABLE)) {
3916                 DRM_ERROR("failed to start channel equalization\n");
3917                 return;
3918         }
3919
3920         tries = 0;
3921         cr_tries = 0;
3922         channel_eq = false;
3923         for (;;) {
3924                 uint8_t link_status[DP_LINK_STATUS_SIZE];
3925
3926                 if (cr_tries > 5) {
3927                         DRM_ERROR("failed to train DP, aborting\n");
3928                         break;
3929                 }
3930
3931                 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
3932                 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3933                         DRM_ERROR("failed to get link status\n");
3934                         break;
3935                 }
3936
3937                 /* Make sure clock is still ok */
3938                 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3939                         intel_dp->train_set_valid = false;
3940                         intel_dp_start_link_train(intel_dp);
3941                         intel_dp_set_link_train(intel_dp, &DP,
3942                                                 training_pattern |
3943                                                 DP_LINK_SCRAMBLING_DISABLE);
3944                         cr_tries++;
3945                         continue;
3946                 }
3947
3948                 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3949                         channel_eq = true;
3950                         break;
3951                 }
3952
3953                 /* Try 5 times, then try clock recovery if that fails */
3954                 if (tries > 5) {
3955                         intel_dp->train_set_valid = false;
3956                         intel_dp_start_link_train(intel_dp);
3957                         intel_dp_set_link_train(intel_dp, &DP,
3958                                                 training_pattern |
3959                                                 DP_LINK_SCRAMBLING_DISABLE);
3960                         tries = 0;
3961                         cr_tries++;
3962                         continue;
3963                 }
3964
3965                 /* Update training set as requested by target */
3966                 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3967                         DRM_ERROR("failed to update link training\n");
3968                         break;
3969                 }
3970                 ++tries;
3971         }
3972
3973         intel_dp_set_idle_link_train(intel_dp);
3974
3975         intel_dp->DP = DP;
3976
3977         if (channel_eq) {
3978                 intel_dp->train_set_valid = true;
3979                 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
3980         }
3981 }
3982
3983 void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3984 {
3985         intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3986                                 DP_TRAINING_PATTERN_DISABLE);
3987 }
3988
3989 static void
3990 intel_dp_link_down(struct intel_dp *intel_dp)
3991 {
3992         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3993         struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
3994         enum port port = intel_dig_port->port;
3995         struct drm_device *dev = intel_dig_port->base.base.dev;
3996         struct drm_i915_private *dev_priv = dev->dev_private;
3997         uint32_t DP = intel_dp->DP;
3998
3999         if (WARN_ON(HAS_DDI(dev)))
4000                 return;
4001
4002         if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
4003                 return;
4004
4005         DRM_DEBUG_KMS("\n");
4006
4007         if ((IS_GEN7(dev) && port == PORT_A) ||
4008             (HAS_PCH_CPT(dev) && port != PORT_A)) {
4009                 DP &= ~DP_LINK_TRAIN_MASK_CPT;
4010                 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
4011         } else {
4012                 if (IS_CHERRYVIEW(dev))
4013                         DP &= ~DP_LINK_TRAIN_MASK_CHV;
4014                 else
4015                         DP &= ~DP_LINK_TRAIN_MASK;
4016                 DP |= DP_LINK_TRAIN_PAT_IDLE;
4017         }
4018         I915_WRITE(intel_dp->output_reg, DP);
4019         POSTING_READ(intel_dp->output_reg);
4020
4021         DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
4022         I915_WRITE(intel_dp->output_reg, DP);
4023         POSTING_READ(intel_dp->output_reg);
4024
4025         /*
4026          * HW workaround for IBX, we need to move the port
4027          * to transcoder A after disabling it to allow the
4028          * matching HDMI port to be enabled on transcoder A.
4029          */
4030         if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
4031                 /* always enable with pattern 1 (as per spec) */
4032                 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
4033                 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
4034                 I915_WRITE(intel_dp->output_reg, DP);
4035                 POSTING_READ(intel_dp->output_reg);
4036
4037                 DP &= ~DP_PORT_EN;
4038                 I915_WRITE(intel_dp->output_reg, DP);
4039                 POSTING_READ(intel_dp->output_reg);
4040         }
4041
4042         msleep(intel_dp->panel_power_down_delay);
4043 }
4044
4045 static bool
4046 intel_dp_get_dpcd(struct intel_dp *intel_dp)
4047 {
4048         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4049         struct drm_device *dev = dig_port->base.base.dev;
4050         struct drm_i915_private *dev_priv = dev->dev_private;
4051         uint8_t rev;
4052
4053         if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
4054                                     sizeof(intel_dp->dpcd)) < 0)
4055                 return false; /* aux transfer failed */
4056
4057         DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
4058
4059         if (intel_dp->dpcd[DP_DPCD_REV] == 0)
4060                 return false; /* DPCD not present */
4061
4062         /* Check if the panel supports PSR */
4063         memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
4064         if (is_edp(intel_dp)) {
4065                 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
4066                                         intel_dp->psr_dpcd,
4067                                         sizeof(intel_dp->psr_dpcd));
4068                 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
4069                         dev_priv->psr.sink_support = true;
4070                         DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
4071                 }
4072
4073                 if (INTEL_INFO(dev)->gen >= 9 &&
4074                         (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
4075                         uint8_t frame_sync_cap;
4076
4077                         dev_priv->psr.sink_support = true;
4078                         intel_dp_dpcd_read_wake(&intel_dp->aux,
4079                                         DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
4080                                         &frame_sync_cap, 1);
4081                         dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
4082                         /* PSR2 needs frame sync as well */
4083                         dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
4084                         DRM_DEBUG_KMS("PSR2 %s on sink",
4085                                 dev_priv->psr.psr2_support ? "supported" : "not supported");
4086                 }
4087         }
4088
4089         /* Training Pattern 3 support, Intel platforms that support HBR2 alone
4090          * have support for TP3 hence that check is used along with dpcd check
4091          * to ensure TP3 can be enabled.
4092          * SKL < B0: due it's WaDisableHBR2 is the only exception where TP3 is
4093          * supported but still not enabled.
4094          */
4095         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
4096             intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
4097             intel_dp_source_supports_hbr2(dev)) {
4098                 intel_dp->use_tps3 = true;
4099                 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
4100         } else
4101                 intel_dp->use_tps3 = false;
4102
4103         /* Intermediate frequency support */
4104         if (is_edp(intel_dp) &&
4105             (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
4106             (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
4107             (rev >= 0x03)) { /* eDp v1.4 or higher */
4108                 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
4109                 int i;
4110
4111                 intel_dp_dpcd_read_wake(&intel_dp->aux,
4112                                 DP_SUPPORTED_LINK_RATES,
4113                                 sink_rates,
4114                                 sizeof(sink_rates));
4115
4116                 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
4117                         int val = le16_to_cpu(sink_rates[i]);
4118
4119                         if (val == 0)
4120                                 break;
4121
4122                         /* Value read is in kHz while drm clock is saved in deca-kHz */
4123                         intel_dp->sink_rates[i] = (val * 200) / 10;
4124                 }
4125                 intel_dp->num_sink_rates = i;
4126         }
4127
4128         intel_dp_print_rates(intel_dp);
4129
4130         if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4131               DP_DWN_STRM_PORT_PRESENT))
4132                 return true; /* native DP sink */
4133
4134         if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
4135                 return true; /* no per-port downstream info */
4136
4137         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
4138                                     intel_dp->downstream_ports,
4139                                     DP_MAX_DOWNSTREAM_PORTS) < 0)
4140                 return false; /* downstream port status fetch failed */
4141
4142         return true;
4143 }
4144
4145 static void
4146 intel_dp_probe_oui(struct intel_dp *intel_dp)
4147 {
4148         u8 buf[3];
4149
4150         if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
4151                 return;
4152
4153         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
4154                 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
4155                               buf[0], buf[1], buf[2]);
4156
4157         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
4158                 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
4159                               buf[0], buf[1], buf[2]);
4160 }
4161
4162 static bool
4163 intel_dp_probe_mst(struct intel_dp *intel_dp)
4164 {
4165         u8 buf[1];
4166
4167         if (!intel_dp->can_mst)
4168                 return false;
4169
4170         if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
4171                 return false;
4172
4173         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
4174                 if (buf[0] & DP_MST_CAP) {
4175                         DRM_DEBUG_KMS("Sink is MST capable\n");
4176                         intel_dp->is_mst = true;
4177                 } else {
4178                         DRM_DEBUG_KMS("Sink is not MST capable\n");
4179                         intel_dp->is_mst = false;
4180                 }
4181         }
4182
4183 #if 0
4184         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4185         return intel_dp->is_mst;
4186 #else
4187         return false;
4188 #endif
4189 }
4190
4191 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4192 {
4193         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4194         struct drm_device *dev = intel_dig_port->base.base.dev;
4195         struct intel_crtc *intel_crtc =
4196                 to_intel_crtc(intel_dig_port->base.base.crtc);
4197         u8 buf;
4198         int test_crc_count;
4199         int attempts = 6;
4200         int ret = 0;
4201
4202         hsw_disable_ips(intel_crtc);
4203
4204         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0) {
4205                 ret = -EIO;
4206                 goto out;
4207         }
4208
4209         if (!(buf & DP_TEST_CRC_SUPPORTED)) {
4210                 ret = -ENOTTY;
4211                 goto out;
4212         }
4213
4214         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
4215                 ret = -EIO;
4216                 goto out;
4217         }
4218
4219         if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4220                                 buf | DP_TEST_SINK_START) < 0) {
4221                 ret = -EIO;
4222                 goto out;
4223         }
4224
4225         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0) {
4226                 ret = -EIO;
4227                 goto out;
4228         }
4229
4230         test_crc_count = buf & DP_TEST_COUNT_MASK;
4231
4232         do {
4233                 if (drm_dp_dpcd_readb(&intel_dp->aux,
4234                                       DP_TEST_SINK_MISC, &buf) < 0) {
4235                         ret = -EIO;
4236                         goto out;
4237                 }
4238                 intel_wait_for_vblank(dev, intel_crtc->pipe);
4239         } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
4240
4241         if (attempts == 0) {
4242                 DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
4243                 ret = -ETIMEDOUT;
4244                 goto out;
4245         }
4246
4247         if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4248                 ret = -EIO;
4249                 goto out;
4250         }
4251
4252         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
4253                 ret = -EIO;
4254                 goto out;
4255         }
4256         if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4257                                buf & ~DP_TEST_SINK_START) < 0) {
4258                 ret = -EIO;
4259                 goto out;
4260         }
4261 out:
4262         hsw_enable_ips(intel_crtc);
4263         return ret;
4264 }
4265
4266 static bool
4267 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4268 {
4269         return intel_dp_dpcd_read_wake(&intel_dp->aux,
4270                                        DP_DEVICE_SERVICE_IRQ_VECTOR,
4271                                        sink_irq_vector, 1) == 1;
4272 }
4273
4274 static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4275 {
4276         uint8_t test_result = DP_TEST_ACK;
4277         return test_result;
4278 }
4279
4280 static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4281 {
4282         uint8_t test_result = DP_TEST_NAK;
4283         return test_result;
4284 }
4285
4286 static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
4287 {
4288         uint8_t test_result = DP_TEST_NAK;
4289         struct intel_connector *intel_connector = intel_dp->attached_connector;
4290         struct drm_connector *connector = &intel_connector->base;
4291
4292         if (intel_connector->detect_edid == NULL ||
4293             connector->edid_corrupt ||
4294             intel_dp->aux.i2c_defer_count > 6) {
4295                 /* Check EDID read for NACKs, DEFERs and corruption
4296                  * (DP CTS 1.2 Core r1.1)
4297                  *    4.2.2.4 : Failed EDID read, I2C_NAK
4298                  *    4.2.2.5 : Failed EDID read, I2C_DEFER
4299                  *    4.2.2.6 : EDID corruption detected
4300                  * Use failsafe mode for all cases
4301                  */
4302                 if (intel_dp->aux.i2c_nack_count > 0 ||
4303                         intel_dp->aux.i2c_defer_count > 0)
4304                         DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4305                                       intel_dp->aux.i2c_nack_count,
4306                                       intel_dp->aux.i2c_defer_count);
4307                 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4308         } else {
4309                 if (!drm_dp_dpcd_write(&intel_dp->aux,
4310                                         DP_TEST_EDID_CHECKSUM,
4311                                         &intel_connector->detect_edid->checksum,
4312                                         1))
4313                         DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4314
4315                 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4316                 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4317         }
4318
4319         /* Set test active flag here so userspace doesn't interrupt things */
4320         intel_dp->compliance_test_active = 1;
4321
4322         return test_result;
4323 }
4324
4325 static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4326 {
4327         uint8_t test_result = DP_TEST_NAK;
4328         return test_result;
4329 }
4330
4331 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4332 {
4333         uint8_t response = DP_TEST_NAK;
4334         uint8_t rxdata = 0;
4335         int status = 0;
4336
4337         intel_dp->compliance_test_active = 0;
4338         intel_dp->compliance_test_type = 0;
4339         intel_dp->compliance_test_data = 0;
4340
4341         intel_dp->aux.i2c_nack_count = 0;
4342         intel_dp->aux.i2c_defer_count = 0;
4343
4344         status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4345         if (status <= 0) {
4346                 DRM_DEBUG_KMS("Could not read test request from sink\n");
4347                 goto update_status;
4348         }
4349
4350         switch (rxdata) {
4351         case DP_TEST_LINK_TRAINING:
4352                 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4353                 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4354                 response = intel_dp_autotest_link_training(intel_dp);
4355                 break;
4356         case DP_TEST_LINK_VIDEO_PATTERN:
4357                 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4358                 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4359                 response = intel_dp_autotest_video_pattern(intel_dp);
4360                 break;
4361         case DP_TEST_LINK_EDID_READ:
4362                 DRM_DEBUG_KMS("EDID test requested\n");
4363                 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4364                 response = intel_dp_autotest_edid(intel_dp);
4365                 break;
4366         case DP_TEST_LINK_PHY_TEST_PATTERN:
4367                 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4368                 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4369                 response = intel_dp_autotest_phy_pattern(intel_dp);
4370                 break;
4371         default:
4372                 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4373                 break;
4374         }
4375
4376 update_status:
4377         status = drm_dp_dpcd_write(&intel_dp->aux,
4378                                    DP_TEST_RESPONSE,
4379                                    &response, 1);
4380         if (status <= 0)
4381                 DRM_DEBUG_KMS("Could not write test response to sink\n");
4382 }
4383
4384 #if 0
4385 static int
4386 intel_dp_check_mst_status(struct intel_dp *intel_dp)
4387 {
4388         bool bret;
4389
4390         if (intel_dp->is_mst) {
4391                 u8 esi[16] = { 0 };
4392                 int ret = 0;
4393                 int retry;
4394                 bool handled;
4395                 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4396 go_again:
4397                 if (bret == true) {
4398
4399                         /* check link status - esi[10] = 0x200c */
4400                         if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4401                                 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4402                                 intel_dp_start_link_train(intel_dp);
4403                                 intel_dp_complete_link_train(intel_dp);
4404                                 intel_dp_stop_link_train(intel_dp);
4405                         }
4406
4407                         DRM_DEBUG_KMS("got esi %3ph\n", esi);
4408                         ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4409
4410                         if (handled) {
4411                                 for (retry = 0; retry < 3; retry++) {
4412                                         int wret;
4413                                         wret = drm_dp_dpcd_write(&intel_dp->aux,
4414                                                                  DP_SINK_COUNT_ESI+1,
4415                                                                  &esi[1], 3);
4416                                         if (wret == 3) {
4417                                                 break;
4418                                         }
4419                                 }
4420
4421                                 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4422                                 if (bret == true) {
4423                                         DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4424                                         goto go_again;
4425                                 }
4426                         } else
4427                                 ret = 0;
4428
4429                         return ret;
4430                 } else {
4431                         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4432                         DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4433                         intel_dp->is_mst = false;
4434                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4435                         /* send a hotplug event */
4436                         drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4437                 }
4438         }
4439         return -EINVAL;
4440 }
4441 #endif
4442
4443 /*
4444  * According to DP spec
4445  * 5.1.2:
4446  *  1. Read DPCD
4447  *  2. Configure link according to Receiver Capabilities
4448  *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
4449  *  4. Check link status on receipt of hot-plug interrupt
4450  */
4451 static void
4452 intel_dp_check_link_status(struct intel_dp *intel_dp)
4453 {
4454         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4455         struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4456         u8 sink_irq_vector;
4457         u8 link_status[DP_LINK_STATUS_SIZE];
4458
4459         WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4460
4461         if (!intel_encoder->connectors_active)
4462                 return;
4463
4464         if (WARN_ON(!intel_encoder->base.crtc))
4465                 return;
4466
4467         if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4468                 return;
4469
4470         /* Try to read receiver status if the link appears to be up */
4471         if (!intel_dp_get_link_status(intel_dp, link_status)) {
4472                 return;
4473         }
4474
4475         /* Now read the DPCD to see if it's actually running */
4476         if (!intel_dp_get_dpcd(intel_dp)) {
4477                 return;
4478         }
4479
4480         /* Try to read the source of the interrupt */
4481         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4482             intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4483                 /* Clear interrupt source */
4484                 drm_dp_dpcd_writeb(&intel_dp->aux,
4485                                    DP_DEVICE_SERVICE_IRQ_VECTOR,
4486                                    sink_irq_vector);
4487
4488                 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4489                         DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
4490                 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4491                         DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4492         }
4493
4494         if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
4495                 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4496                               intel_encoder->base.name);
4497                 intel_dp_start_link_train(intel_dp);
4498                 intel_dp_complete_link_train(intel_dp);
4499                 intel_dp_stop_link_train(intel_dp);
4500         }
4501 }
4502
4503 /* XXX this is probably wrong for multiple downstream ports */
4504 static enum drm_connector_status
4505 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4506 {
4507         uint8_t *dpcd = intel_dp->dpcd;
4508         uint8_t type;
4509
4510         if (!intel_dp_get_dpcd(intel_dp))
4511                 return connector_status_disconnected;
4512
4513         /* if there's no downstream port, we're done */
4514         if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
4515                 return connector_status_connected;
4516
4517         /* If we're HPD-aware, SINK_COUNT changes dynamically */
4518         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4519             intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4520                 uint8_t reg;
4521
4522                 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4523                                             &reg, 1) < 0)
4524                         return connector_status_unknown;
4525
4526                 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4527                                               : connector_status_disconnected;
4528         }
4529
4530         /* If no HPD, poke DDC gently */
4531         if (drm_probe_ddc(intel_dp->aux.ddc))
4532                 return connector_status_connected;
4533
4534         /* Well we tried, say unknown for unreliable port types */
4535         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4536                 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4537                 if (type == DP_DS_PORT_TYPE_VGA ||
4538                     type == DP_DS_PORT_TYPE_NON_EDID)
4539                         return connector_status_unknown;
4540         } else {
4541                 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4542                         DP_DWN_STRM_PORT_TYPE_MASK;
4543                 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4544                     type == DP_DWN_STRM_PORT_TYPE_OTHER)
4545                         return connector_status_unknown;
4546         }
4547
4548         /* Anything else is out of spec, warn and ignore */
4549         DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4550         return connector_status_disconnected;
4551 }
4552
4553 static enum drm_connector_status
4554 edp_detect(struct intel_dp *intel_dp)
4555 {
4556         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4557         enum drm_connector_status status;
4558
4559         status = intel_panel_detect(dev);
4560         if (status == connector_status_unknown)
4561                 status = connector_status_connected;
4562
4563         return status;
4564 }
4565
4566 static enum drm_connector_status
4567 ironlake_dp_detect(struct intel_dp *intel_dp)
4568 {
4569         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4570         struct drm_i915_private *dev_priv = dev->dev_private;
4571         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4572
4573         if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4574                 return connector_status_disconnected;
4575
4576         return intel_dp_detect_dpcd(intel_dp);
4577 }
4578
4579 static int g4x_digital_port_connected(struct drm_device *dev,
4580                                        struct intel_digital_port *intel_dig_port)
4581 {
4582         struct drm_i915_private *dev_priv = dev->dev_private;
4583         uint32_t bit;
4584
4585         if (IS_VALLEYVIEW(dev)) {
4586                 switch (intel_dig_port->port) {
4587                 case PORT_B:
4588                         bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4589                         break;
4590                 case PORT_C:
4591                         bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4592                         break;
4593                 case PORT_D:
4594                         bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4595                         break;
4596                 default:
4597                         return -EINVAL;
4598                 }
4599         } else {
4600                 switch (intel_dig_port->port) {
4601                 case PORT_B:
4602                         bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4603                         break;
4604                 case PORT_C:
4605                         bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4606                         break;
4607                 case PORT_D:
4608                         bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4609                         break;
4610                 default:
4611                         return -EINVAL;
4612                 }
4613         }
4614
4615         if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
4616                 return 0;
4617         return 1;
4618 }
4619
4620 static enum drm_connector_status
4621 g4x_dp_detect(struct intel_dp *intel_dp)
4622 {
4623         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4624         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4625         int ret;
4626
4627         /* Can't disconnect eDP, but you can close the lid... */
4628         if (is_edp(intel_dp)) {
4629                 enum drm_connector_status status;
4630
4631                 status = intel_panel_detect(dev);
4632                 if (status == connector_status_unknown)
4633                         status = connector_status_connected;
4634                 return status;
4635         }
4636
4637         ret = g4x_digital_port_connected(dev, intel_dig_port);
4638         if (ret == -EINVAL)
4639                 return connector_status_unknown;
4640         else if (ret == 0)
4641                 return connector_status_disconnected;
4642
4643         return intel_dp_detect_dpcd(intel_dp);
4644 }
4645
4646 static struct edid *
4647 intel_dp_get_edid(struct intel_dp *intel_dp)
4648 {
4649         struct intel_connector *intel_connector = intel_dp->attached_connector;
4650
4651         /* use cached edid if we have one */
4652         if (intel_connector->edid) {
4653                 /* invalid edid */
4654                 if (IS_ERR(intel_connector->edid))
4655                         return NULL;
4656
4657                 return drm_edid_duplicate(intel_connector->edid);
4658         } else
4659                 return drm_get_edid(&intel_connector->base,
4660                                     intel_dp->aux.ddc);
4661 }
4662
4663 static void
4664 intel_dp_set_edid(struct intel_dp *intel_dp)
4665 {
4666         struct intel_connector *intel_connector = intel_dp->attached_connector;
4667         struct edid *edid;
4668
4669         edid = intel_dp_get_edid(intel_dp);
4670         intel_connector->detect_edid = edid;
4671
4672         if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4673                 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4674         else
4675                 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4676 }
4677
4678 static void
4679 intel_dp_unset_edid(struct intel_dp *intel_dp)
4680 {
4681         struct intel_connector *intel_connector = intel_dp->attached_connector;
4682
4683         kfree(intel_connector->detect_edid);
4684         intel_connector->detect_edid = NULL;
4685
4686         intel_dp->has_audio = false;
4687 }
4688
4689 static enum intel_display_power_domain
4690 intel_dp_power_get(struct intel_dp *dp)
4691 {
4692         struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4693         enum intel_display_power_domain power_domain;
4694
4695         power_domain = intel_display_port_power_domain(encoder);
4696         intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4697
4698         return power_domain;
4699 }
4700
4701 static void
4702 intel_dp_power_put(struct intel_dp *dp,
4703                    enum intel_display_power_domain power_domain)
4704 {
4705         struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4706         intel_display_power_put(to_i915(encoder->base.dev), power_domain);
4707 }
4708
4709 static enum drm_connector_status
4710 intel_dp_detect(struct drm_connector *connector, bool force)
4711 {
4712         struct intel_dp *intel_dp = intel_attached_dp(connector);
4713         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4714         struct intel_encoder *intel_encoder = &intel_dig_port->base;
4715         struct drm_device *dev = connector->dev;
4716         enum drm_connector_status status;
4717         enum intel_display_power_domain power_domain;
4718         bool ret;
4719         u8 sink_irq_vector;
4720
4721         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4722                       connector->base.id, connector->name);
4723         intel_dp_unset_edid(intel_dp);
4724
4725         if (intel_dp->is_mst) {
4726                 /* MST devices are disconnected from a monitor POV */
4727                 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4728                         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4729                 return connector_status_disconnected;
4730         }
4731
4732         power_domain = intel_dp_power_get(intel_dp);
4733
4734         /* Can't disconnect eDP, but you can close the lid... */
4735         if (is_edp(intel_dp))
4736                 status = edp_detect(intel_dp);
4737         else if (HAS_PCH_SPLIT(dev))
4738                 status = ironlake_dp_detect(intel_dp);
4739         else
4740                 status = g4x_dp_detect(intel_dp);
4741         if (status != connector_status_connected)
4742                 goto out;
4743
4744         intel_dp_probe_oui(intel_dp);
4745
4746         ret = intel_dp_probe_mst(intel_dp);
4747         if (ret) {
4748                 /* if we are in MST mode then this connector
4749                    won't appear connected or have anything with EDID on it */
4750                 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4751                         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4752                 status = connector_status_disconnected;
4753                 goto out;
4754         }
4755
4756         intel_dp_set_edid(intel_dp);
4757
4758         if (intel_encoder->type != INTEL_OUTPUT_EDP)
4759                 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4760         status = connector_status_connected;
4761
4762         /* Try to read the source of the interrupt */
4763         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4764             intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4765                 /* Clear interrupt source */
4766                 drm_dp_dpcd_writeb(&intel_dp->aux,
4767                                    DP_DEVICE_SERVICE_IRQ_VECTOR,
4768                                    sink_irq_vector);
4769
4770                 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4771                         intel_dp_handle_test_request(intel_dp);
4772                 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4773                         DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4774         }
4775
4776 out:
4777         intel_dp_power_put(intel_dp, power_domain);
4778         return status;
4779 }
4780
4781 static void
4782 intel_dp_force(struct drm_connector *connector)
4783 {
4784         struct intel_dp *intel_dp = intel_attached_dp(connector);
4785         struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4786         enum intel_display_power_domain power_domain;
4787
4788         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4789                       connector->base.id, connector->name);
4790         intel_dp_unset_edid(intel_dp);
4791
4792         if (connector->status != connector_status_connected)
4793                 return;
4794
4795         power_domain = intel_dp_power_get(intel_dp);
4796
4797         intel_dp_set_edid(intel_dp);
4798
4799         intel_dp_power_put(intel_dp, power_domain);
4800
4801         if (intel_encoder->type != INTEL_OUTPUT_EDP)
4802                 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4803 }
4804
4805 static int intel_dp_get_modes(struct drm_connector *connector)
4806 {
4807         struct intel_connector *intel_connector = to_intel_connector(connector);
4808         struct edid *edid;
4809
4810         edid = intel_connector->detect_edid;
4811         if (edid) {
4812                 int ret = intel_connector_update_modes(connector, edid);
4813                 if (ret)
4814                         return ret;
4815         }
4816
4817         /* if eDP has no EDID, fall back to fixed mode */
4818         if (is_edp(intel_attached_dp(connector)) &&
4819             intel_connector->panel.fixed_mode) {
4820                 struct drm_display_mode *mode;
4821
4822                 mode = drm_mode_duplicate(connector->dev,
4823                                           intel_connector->panel.fixed_mode);
4824                 if (mode) {
4825                         drm_mode_probed_add(connector, mode);
4826                         return 1;
4827                 }
4828         }
4829
4830         return 0;
4831 }
4832
4833 static bool
4834 intel_dp_detect_audio(struct drm_connector *connector)
4835 {
4836         bool has_audio = false;
4837         struct edid *edid;
4838
4839         edid = to_intel_connector(connector)->detect_edid;
4840         if (edid)
4841                 has_audio = drm_detect_monitor_audio(edid);
4842
4843         return has_audio;
4844 }
4845
4846 static int
4847 intel_dp_set_property(struct drm_connector *connector,
4848                       struct drm_property *property,
4849                       uint64_t val)
4850 {
4851         struct drm_i915_private *dev_priv = connector->dev->dev_private;
4852         struct intel_connector *intel_connector = to_intel_connector(connector);
4853         struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4854         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4855         int ret;
4856
4857         ret = drm_object_property_set_value(&connector->base, property, val);
4858         if (ret)
4859                 return ret;
4860
4861         if (property == dev_priv->force_audio_property) {
4862                 int i = val;
4863                 bool has_audio;
4864
4865                 if (i == intel_dp->force_audio)
4866                         return 0;
4867
4868                 intel_dp->force_audio = i;
4869
4870                 if (i == HDMI_AUDIO_AUTO)
4871                         has_audio = intel_dp_detect_audio(connector);
4872                 else
4873                         has_audio = (i == HDMI_AUDIO_ON);
4874
4875                 if (has_audio == intel_dp->has_audio)
4876                         return 0;
4877
4878                 intel_dp->has_audio = has_audio;
4879                 goto done;
4880         }
4881
4882         if (property == dev_priv->broadcast_rgb_property) {
4883                 bool old_auto = intel_dp->color_range_auto;
4884                 uint32_t old_range = intel_dp->color_range;
4885
4886                 switch (val) {
4887                 case INTEL_BROADCAST_RGB_AUTO:
4888                         intel_dp->color_range_auto = true;
4889                         break;
4890                 case INTEL_BROADCAST_RGB_FULL:
4891                         intel_dp->color_range_auto = false;
4892                         intel_dp->color_range = 0;
4893                         break;
4894                 case INTEL_BROADCAST_RGB_LIMITED:
4895                         intel_dp->color_range_auto = false;
4896                         intel_dp->color_range = DP_COLOR_RANGE_16_235;
4897                         break;
4898                 default:
4899                         return -EINVAL;
4900                 }
4901
4902                 if (old_auto == intel_dp->color_range_auto &&
4903                     old_range == intel_dp->color_range)
4904                         return 0;
4905
4906                 goto done;
4907         }
4908
4909         if (is_edp(intel_dp) &&
4910             property == connector->dev->mode_config.scaling_mode_property) {
4911                 if (val == DRM_MODE_SCALE_NONE) {
4912                         DRM_DEBUG_KMS("no scaling not supported\n");
4913                         return -EINVAL;
4914                 }
4915
4916                 if (intel_connector->panel.fitting_mode == val) {
4917                         /* the eDP scaling property is not changed */
4918                         return 0;
4919                 }
4920                 intel_connector->panel.fitting_mode = val;
4921
4922                 goto done;
4923         }
4924
4925         return -EINVAL;
4926
4927 done:
4928         if (intel_encoder->base.crtc)
4929                 intel_crtc_restore_mode(intel_encoder->base.crtc);
4930
4931         return 0;
4932 }
4933
4934 static void
4935 intel_dp_connector_destroy(struct drm_connector *connector)
4936 {
4937         struct intel_connector *intel_connector = to_intel_connector(connector);
4938
4939         kfree(intel_connector->detect_edid);
4940
4941         if (!IS_ERR_OR_NULL(intel_connector->edid))
4942                 kfree(intel_connector->edid);
4943
4944         /* Can't call is_edp() since the encoder may have been destroyed
4945          * already. */
4946         if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4947                 intel_panel_fini(&intel_connector->panel);
4948
4949         drm_connector_cleanup(connector);
4950         kfree(connector);
4951 }
4952
4953 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4954 {
4955         struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4956         struct intel_dp *intel_dp = &intel_dig_port->dp;
4957
4958         drm_dp_aux_unregister(&intel_dp->aux);
4959         intel_dp_mst_encoder_cleanup(intel_dig_port);
4960         if (is_edp(intel_dp)) {
4961                 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4962                 /*
4963                  * vdd might still be enabled do to the delayed vdd off.
4964                  * Make sure vdd is actually turned off here.
4965                  */
4966                 pps_lock(intel_dp);
4967                 edp_panel_vdd_off_sync(intel_dp);
4968                 pps_unlock(intel_dp);
4969
4970 #if 0
4971                 if (intel_dp->edp_notifier.notifier_call) {
4972                         unregister_reboot_notifier(&intel_dp->edp_notifier);
4973                         intel_dp->edp_notifier.notifier_call = NULL;
4974                 }
4975 #endif
4976         }
4977         drm_encoder_cleanup(encoder);
4978         kfree(intel_dig_port);
4979 }
4980
4981 static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4982 {
4983         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4984
4985         if (!is_edp(intel_dp))
4986                 return;
4987
4988         /*
4989          * vdd might still be enabled do to the delayed vdd off.
4990          * Make sure vdd is actually turned off here.
4991          */
4992         cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4993         pps_lock(intel_dp);
4994         edp_panel_vdd_off_sync(intel_dp);
4995         pps_unlock(intel_dp);
4996 }
4997
4998 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4999 {
5000         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5001         struct drm_device *dev = intel_dig_port->base.base.dev;
5002         struct drm_i915_private *dev_priv = dev->dev_private;
5003         enum intel_display_power_domain power_domain;
5004
5005         lockdep_assert_held(&dev_priv->pps_mutex);
5006
5007         if (!edp_have_panel_vdd(intel_dp))
5008                 return;
5009
5010         /*
5011          * The VDD bit needs a power domain reference, so if the bit is
5012          * already enabled when we boot or resume, grab this reference and
5013          * schedule a vdd off, so we don't hold on to the reference
5014          * indefinitely.
5015          */
5016         DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
5017         power_domain = intel_display_port_power_domain(&intel_dig_port->base);
5018         intel_display_power_get(dev_priv, power_domain);
5019
5020         edp_panel_vdd_schedule_off(intel_dp);
5021 }
5022
5023 static void intel_dp_encoder_reset(struct drm_encoder *encoder)
5024 {
5025         struct intel_dp *intel_dp;
5026
5027         if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
5028                 return;
5029
5030         intel_dp = enc_to_intel_dp(encoder);
5031
5032         pps_lock(intel_dp);
5033
5034         /*
5035          * Read out the current power sequencer assignment,
5036          * in case the BIOS did something with it.
5037          */
5038         if (IS_VALLEYVIEW(encoder->dev))
5039                 vlv_initial_power_sequencer_setup(intel_dp);
5040
5041         intel_edp_panel_vdd_sanitize(intel_dp);
5042
5043         pps_unlock(intel_dp);
5044 }
5045
5046 static const struct drm_connector_funcs intel_dp_connector_funcs = {
5047         .dpms = intel_connector_dpms,
5048         .detect = intel_dp_detect,
5049         .force = intel_dp_force,
5050         .fill_modes = drm_helper_probe_single_connector_modes,
5051         .set_property = intel_dp_set_property,
5052         .atomic_get_property = intel_connector_atomic_get_property,
5053         .destroy = intel_dp_connector_destroy,
5054         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5055         .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
5056 };
5057
5058 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
5059         .get_modes = intel_dp_get_modes,
5060         .mode_valid = intel_dp_mode_valid,
5061         .best_encoder = intel_best_encoder,
5062 };
5063
5064 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
5065         .reset = intel_dp_encoder_reset,
5066         .destroy = intel_dp_encoder_destroy,
5067 };
5068
5069 void
5070 intel_dp_hot_plug(struct intel_encoder *intel_encoder)
5071 {
5072         return;
5073 }
5074
5075 bool
5076 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
5077 {
5078         struct intel_dp *intel_dp = &intel_dig_port->dp;
5079         struct intel_encoder *intel_encoder = &intel_dig_port->base;
5080         struct drm_device *dev = intel_dig_port->base.base.dev;
5081         struct drm_i915_private *dev_priv = dev->dev_private;
5082         enum intel_display_power_domain power_domain;
5083         bool ret = true;
5084
5085         if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
5086                 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
5087
5088         if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
5089                 /*
5090                  * vdd off can generate a long pulse on eDP which
5091                  * would require vdd on to handle it, and thus we
5092                  * would end up in an endless cycle of
5093                  * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
5094                  */
5095                 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
5096                               port_name(intel_dig_port->port));
5097                 return false;
5098         }
5099
5100         DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
5101                       port_name(intel_dig_port->port),
5102                       long_hpd ? "long" : "short");
5103
5104         power_domain = intel_display_port_power_domain(intel_encoder);
5105         intel_display_power_get(dev_priv, power_domain);
5106
5107         if (long_hpd) {
5108                 /* indicate that we need to restart link training */
5109                 intel_dp->train_set_valid = false;
5110
5111                 if (HAS_PCH_SPLIT(dev)) {
5112                         if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
5113                                 goto mst_fail;
5114                 } else {
5115                         if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
5116                                 goto mst_fail;
5117                 }
5118
5119                 if (!intel_dp_get_dpcd(intel_dp)) {
5120                         goto mst_fail;
5121                 }
5122
5123                 intel_dp_probe_oui(intel_dp);
5124
5125                 if (!intel_dp_probe_mst(intel_dp))
5126                         goto mst_fail;
5127
5128         } else {
5129                 if (intel_dp->is_mst) {
5130 #if 0
5131                         if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
5132                                 goto mst_fail;
5133 #endif
5134                 }
5135
5136                 if (!intel_dp->is_mst) {
5137                         /*
5138                          * we'll check the link status via the normal hot plug path later -
5139                          * but for short hpds we should check it now
5140                          */
5141                         drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5142                         intel_dp_check_link_status(intel_dp);
5143                         drm_modeset_unlock(&dev->mode_config.connection_mutex);
5144                 }
5145         }
5146
5147         ret = false;
5148
5149         goto put_power;
5150 mst_fail:
5151         /* if we were in MST mode, and device is not there get out of MST mode */
5152         if (intel_dp->is_mst) {
5153                 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5154                 intel_dp->is_mst = false;
5155 #if 0
5156                 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
5157 #endif
5158         }
5159 put_power:
5160         intel_display_power_put(dev_priv, power_domain);
5161
5162         return ret;
5163 }
5164
5165 /* Return which DP Port should be selected for Transcoder DP control */
5166 int
5167 intel_trans_dp_port_sel(struct drm_crtc *crtc)
5168 {
5169         struct drm_device *dev = crtc->dev;
5170         struct intel_encoder *intel_encoder;
5171         struct intel_dp *intel_dp;
5172
5173         for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5174                 intel_dp = enc_to_intel_dp(&intel_encoder->base);
5175
5176                 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
5177                     intel_encoder->type == INTEL_OUTPUT_EDP)
5178                         return intel_dp->output_reg;
5179         }
5180
5181         return -1;
5182 }
5183
5184 /* check the VBT to see whether the eDP is on DP-D port */
5185 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
5186 {
5187         struct drm_i915_private *dev_priv = dev->dev_private;
5188         union child_device_config *p_child;
5189         int i;
5190         static const short port_mapping[] = {
5191                 [PORT_B] = PORT_IDPB,
5192                 [PORT_C] = PORT_IDPC,
5193                 [PORT_D] = PORT_IDPD,
5194         };
5195
5196         if (port == PORT_A)
5197                 return true;
5198
5199         if (!dev_priv->vbt.child_dev_num)
5200                 return false;
5201
5202         for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5203                 p_child = dev_priv->vbt.child_dev + i;
5204
5205                 if (p_child->common.dvo_port == port_mapping[port] &&
5206                     (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5207                     (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
5208                         return true;
5209         }
5210         return false;
5211 }
5212
5213 void
5214 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5215 {
5216         struct intel_connector *intel_connector = to_intel_connector(connector);
5217
5218         intel_attach_force_audio_property(connector);
5219         intel_attach_broadcast_rgb_property(connector);
5220         intel_dp->color_range_auto = true;
5221
5222         if (is_edp(intel_dp)) {
5223                 drm_mode_create_scaling_mode_property(connector->dev);
5224                 drm_object_attach_property(
5225                         &connector->base,
5226                         connector->dev->mode_config.scaling_mode_property,
5227                         DRM_MODE_SCALE_ASPECT);
5228                 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
5229         }
5230 }
5231
5232 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5233 {
5234         intel_dp->last_power_cycle = jiffies;
5235         intel_dp->last_power_on = jiffies;
5236         intel_dp->last_backlight_off = jiffies;
5237 }
5238
5239 static void
5240 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5241                                     struct intel_dp *intel_dp)
5242 {
5243         struct drm_i915_private *dev_priv = dev->dev_private;
5244         struct edp_power_seq cur, vbt, spec,
5245                 *final = &intel_dp->pps_delays;
5246         u32 pp_on, pp_off, pp_div, pp;
5247         int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
5248
5249         lockdep_assert_held(&dev_priv->pps_mutex);
5250
5251         /* already initialized? */
5252         if (final->t11_t12 != 0)
5253                 return;
5254
5255         if (HAS_PCH_SPLIT(dev)) {
5256                 pp_ctrl_reg = PCH_PP_CONTROL;
5257                 pp_on_reg = PCH_PP_ON_DELAYS;
5258                 pp_off_reg = PCH_PP_OFF_DELAYS;
5259                 pp_div_reg = PCH_PP_DIVISOR;
5260         } else {
5261                 enum i915_pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5262
5263                 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5264                 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5265                 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5266                 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5267         }
5268
5269         /* Workaround: Need to write PP_CONTROL with the unlock key as
5270          * the very first thing. */
5271         pp = ironlake_get_pp_control(intel_dp);
5272         I915_WRITE(pp_ctrl_reg, pp);
5273
5274         pp_on = I915_READ(pp_on_reg);
5275         pp_off = I915_READ(pp_off_reg);
5276         pp_div = I915_READ(pp_div_reg);
5277
5278         /* Pull timing values out of registers */
5279         cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5280                 PANEL_POWER_UP_DELAY_SHIFT;
5281
5282         cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5283                 PANEL_LIGHT_ON_DELAY_SHIFT;
5284
5285         cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5286                 PANEL_LIGHT_OFF_DELAY_SHIFT;
5287
5288         cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5289                 PANEL_POWER_DOWN_DELAY_SHIFT;
5290
5291         cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
5292                        PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
5293
5294         DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5295                       cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5296
5297         vbt = dev_priv->vbt.edp_pps;
5298
5299         /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5300          * our hw here, which are all in 100usec. */
5301         spec.t1_t3 = 210 * 10;
5302         spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5303         spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5304         spec.t10 = 500 * 10;
5305         /* This one is special and actually in units of 100ms, but zero
5306          * based in the hw (so we need to add 100 ms). But the sw vbt
5307          * table multiplies it with 1000 to make it in units of 100usec,
5308          * too. */
5309         spec.t11_t12 = (510 + 100) * 10;
5310
5311         DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5312                       vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5313
5314         /* Use the max of the register settings and vbt. If both are
5315          * unset, fall back to the spec limits. */
5316 #define assign_final(field)     final->field = (max(cur.field, vbt.field) == 0 ? \
5317                                        spec.field : \
5318                                        max(cur.field, vbt.field))
5319         assign_final(t1_t3);
5320         assign_final(t8);
5321         assign_final(t9);
5322         assign_final(t10);
5323         assign_final(t11_t12);
5324 #undef assign_final
5325
5326 #define get_delay(field)        (DIV_ROUND_UP(final->field, 10))
5327         intel_dp->panel_power_up_delay = get_delay(t1_t3);
5328         intel_dp->backlight_on_delay = get_delay(t8);
5329         intel_dp->backlight_off_delay = get_delay(t9);
5330         intel_dp->panel_power_down_delay = get_delay(t10);
5331         intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5332 #undef get_delay
5333
5334         DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5335                       intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5336                       intel_dp->panel_power_cycle_delay);
5337
5338         DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5339                       intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
5340 }
5341
5342 static void
5343 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5344                                               struct intel_dp *intel_dp)
5345 {
5346         struct drm_i915_private *dev_priv = dev->dev_private;
5347         u32 pp_on, pp_off, pp_div, port_sel = 0;
5348         int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
5349         int pp_on_reg, pp_off_reg, pp_div_reg;
5350         enum port port = dp_to_dig_port(intel_dp)->port;
5351         const struct edp_power_seq *seq = &intel_dp->pps_delays;
5352
5353         lockdep_assert_held(&dev_priv->pps_mutex);
5354
5355         if (HAS_PCH_SPLIT(dev)) {
5356                 pp_on_reg = PCH_PP_ON_DELAYS;
5357                 pp_off_reg = PCH_PP_OFF_DELAYS;
5358                 pp_div_reg = PCH_PP_DIVISOR;
5359         } else {
5360                 enum i915_pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5361
5362                 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5363                 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5364                 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5365         }
5366
5367         /*
5368          * And finally store the new values in the power sequencer. The
5369          * backlight delays are set to 1 because we do manual waits on them. For
5370          * T8, even BSpec recommends doing it. For T9, if we don't do this,
5371          * we'll end up waiting for the backlight off delay twice: once when we
5372          * do the manual sleep, and once when we disable the panel and wait for
5373          * the PP_STATUS bit to become zero.
5374          */
5375         pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
5376                 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5377         pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
5378                  (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
5379         /* Compute the divisor for the pp clock, simply match the Bspec
5380          * formula. */
5381         pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5382         pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5383                         << PANEL_POWER_CYCLE_DELAY_SHIFT);
5384
5385         /* Haswell doesn't have any port selection bits for the panel
5386          * power sequencer any more. */
5387         if (IS_VALLEYVIEW(dev)) {
5388                 port_sel = PANEL_PORT_SELECT_VLV(port);
5389         } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5390                 if (port == PORT_A)
5391                         port_sel = PANEL_PORT_SELECT_DPA;
5392                 else
5393                         port_sel = PANEL_PORT_SELECT_DPD;
5394         }
5395
5396         pp_on |= port_sel;
5397
5398         I915_WRITE(pp_on_reg, pp_on);
5399         I915_WRITE(pp_off_reg, pp_off);
5400         I915_WRITE(pp_div_reg, pp_div);
5401
5402         DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
5403                       I915_READ(pp_on_reg),
5404                       I915_READ(pp_off_reg),
5405                       I915_READ(pp_div_reg));
5406 }
5407
5408 /**
5409  * intel_dp_set_drrs_state - program registers for RR switch to take effect
5410  * @dev: DRM device
5411  * @refresh_rate: RR to be programmed
5412  *
5413  * This function gets called when refresh rate (RR) has to be changed from
5414  * one frequency to another. Switches can be between high and low RR
5415  * supported by the panel or to any other RR based on media playback (in
5416  * this case, RR value needs to be passed from user space).
5417  *
5418  * The caller of this function needs to take a lock on dev_priv->drrs.
5419  */
5420 static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
5421 {
5422         struct drm_i915_private *dev_priv = dev->dev_private;
5423         struct intel_encoder *encoder;
5424         struct intel_digital_port *dig_port = NULL;
5425         struct intel_dp *intel_dp = dev_priv->drrs.dp;
5426         struct intel_crtc_state *config = NULL;
5427         struct intel_crtc *intel_crtc = NULL;
5428         u32 reg, val;
5429         enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
5430
5431         if (refresh_rate <= 0) {
5432                 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5433                 return;
5434         }
5435
5436         if (intel_dp == NULL) {
5437                 DRM_DEBUG_KMS("DRRS not supported.\n");
5438                 return;
5439         }
5440
5441         /*
5442          * FIXME: This needs proper synchronization with psr state for some
5443          * platforms that cannot have PSR and DRRS enabled at the same time.
5444          */
5445
5446         dig_port = dp_to_dig_port(intel_dp);
5447         encoder = &dig_port->base;
5448         intel_crtc = to_intel_crtc(encoder->base.crtc);
5449
5450         if (!intel_crtc) {
5451                 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5452                 return;
5453         }
5454
5455         config = intel_crtc->config;
5456
5457         if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5458                 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5459                 return;
5460         }
5461
5462         if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5463                         refresh_rate)
5464                 index = DRRS_LOW_RR;
5465
5466         if (index == dev_priv->drrs.refresh_rate_type) {
5467                 DRM_DEBUG_KMS(
5468                         "DRRS requested for previously set RR...ignoring\n");
5469                 return;
5470         }
5471
5472         if (!intel_crtc->active) {
5473                 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5474                 return;
5475         }
5476
5477         if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
5478                 switch (index) {
5479                 case DRRS_HIGH_RR:
5480                         intel_dp_set_m_n(intel_crtc, M1_N1);
5481                         break;
5482                 case DRRS_LOW_RR:
5483                         intel_dp_set_m_n(intel_crtc, M2_N2);
5484                         break;
5485                 case DRRS_MAX_RR:
5486                 default:
5487                         DRM_ERROR("Unsupported refreshrate type\n");
5488                 }
5489         } else if (INTEL_INFO(dev)->gen > 6) {
5490                 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5491                 val = I915_READ(reg);
5492
5493                 if (index > DRRS_HIGH_RR) {
5494                         if (IS_VALLEYVIEW(dev))
5495                                 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5496                         else
5497                                 val |= PIPECONF_EDP_RR_MODE_SWITCH;
5498                 } else {
5499                         if (IS_VALLEYVIEW(dev))
5500                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5501                         else
5502                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5503                 }
5504                 I915_WRITE(reg, val);
5505         }
5506
5507         dev_priv->drrs.refresh_rate_type = index;
5508
5509         DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5510 }
5511
5512 /**
5513  * intel_edp_drrs_enable - init drrs struct if supported
5514  * @intel_dp: DP struct
5515  *
5516  * Initializes frontbuffer_bits and drrs.dp
5517  */
5518 void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5519 {
5520         struct drm_device *dev = intel_dp_to_dev(intel_dp);
5521         struct drm_i915_private *dev_priv = dev->dev_private;
5522         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5523         struct drm_crtc *crtc = dig_port->base.base.crtc;
5524         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5525
5526         if (!intel_crtc->config->has_drrs) {
5527                 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5528                 return;
5529         }
5530
5531         mutex_lock(&dev_priv->drrs.mutex);
5532         if (WARN_ON(dev_priv->drrs.dp)) {
5533                 DRM_ERROR("DRRS already enabled\n");
5534                 goto unlock;
5535         }
5536
5537         dev_priv->drrs.busy_frontbuffer_bits = 0;
5538
5539         dev_priv->drrs.dp = intel_dp;
5540
5541 unlock:
5542         mutex_unlock(&dev_priv->drrs.mutex);
5543 }
5544
5545 /**
5546  * intel_edp_drrs_disable - Disable DRRS
5547  * @intel_dp: DP struct
5548  *
5549  */
5550 void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5551 {
5552         struct drm_device *dev = intel_dp_to_dev(intel_dp);
5553         struct drm_i915_private *dev_priv = dev->dev_private;
5554         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5555         struct drm_crtc *crtc = dig_port->base.base.crtc;
5556         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5557
5558         if (!intel_crtc->config->has_drrs)
5559                 return;
5560
5561         mutex_lock(&dev_priv->drrs.mutex);
5562         if (!dev_priv->drrs.dp) {
5563                 mutex_unlock(&dev_priv->drrs.mutex);
5564                 return;
5565         }
5566
5567         if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5568                 intel_dp_set_drrs_state(dev_priv->dev,
5569                         intel_dp->attached_connector->panel.
5570                         fixed_mode->vrefresh);
5571
5572         dev_priv->drrs.dp = NULL;
5573         mutex_unlock(&dev_priv->drrs.mutex);
5574
5575         cancel_delayed_work_sync(&dev_priv->drrs.work);
5576 }
5577
5578 static void intel_edp_drrs_downclock_work(struct work_struct *work)
5579 {
5580         struct drm_i915_private *dev_priv =
5581                 container_of(work, typeof(*dev_priv), drrs.work.work);
5582         struct intel_dp *intel_dp;
5583
5584         mutex_lock(&dev_priv->drrs.mutex);
5585
5586         intel_dp = dev_priv->drrs.dp;
5587
5588         if (!intel_dp)
5589                 goto unlock;
5590
5591         /*
5592          * The delayed work can race with an invalidate hence we need to
5593          * recheck.
5594          */
5595
5596         if (dev_priv->drrs.busy_frontbuffer_bits)
5597                 goto unlock;
5598
5599         if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5600                 intel_dp_set_drrs_state(dev_priv->dev,
5601                         intel_dp->attached_connector->panel.
5602                         downclock_mode->vrefresh);
5603
5604 unlock:
5605         mutex_unlock(&dev_priv->drrs.mutex);
5606 }
5607
5608 /**
5609  * intel_edp_drrs_invalidate - Invalidate DRRS
5610  * @dev: DRM device
5611  * @frontbuffer_bits: frontbuffer plane tracking bits
5612  *
5613  * When there is a disturbance on screen (due to cursor movement/time
5614  * update etc), DRRS needs to be invalidated, i.e. need to switch to
5615  * high RR.
5616  *
5617  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5618  */
5619 void intel_edp_drrs_invalidate(struct drm_device *dev,
5620                 unsigned frontbuffer_bits)
5621 {
5622         struct drm_i915_private *dev_priv = dev->dev_private;
5623         struct drm_crtc *crtc;
5624         enum i915_pipe pipe;
5625
5626         if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5627                 return;
5628
5629         cancel_delayed_work(&dev_priv->drrs.work);
5630
5631         mutex_lock(&dev_priv->drrs.mutex);
5632         if (!dev_priv->drrs.dp) {
5633                 mutex_unlock(&dev_priv->drrs.mutex);
5634                 return;
5635         }
5636
5637         crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5638         pipe = to_intel_crtc(crtc)->pipe;
5639
5640         if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
5641                 intel_dp_set_drrs_state(dev_priv->dev,
5642                                 dev_priv->drrs.dp->attached_connector->panel.
5643                                 fixed_mode->vrefresh);
5644         }
5645
5646         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5647
5648         dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5649         mutex_unlock(&dev_priv->drrs.mutex);
5650 }
5651
5652 /**
5653  * intel_edp_drrs_flush - Flush DRRS
5654  * @dev: DRM device
5655  * @frontbuffer_bits: frontbuffer plane tracking bits
5656  *
5657  * When there is no movement on screen, DRRS work can be scheduled.
5658  * This DRRS work is responsible for setting relevant registers after a
5659  * timeout of 1 second.
5660  *
5661  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5662  */
5663 void intel_edp_drrs_flush(struct drm_device *dev,
5664                 unsigned frontbuffer_bits)
5665 {
5666         struct drm_i915_private *dev_priv = dev->dev_private;
5667         struct drm_crtc *crtc;
5668         enum i915_pipe pipe;
5669
5670         if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5671                 return;
5672
5673         cancel_delayed_work(&dev_priv->drrs.work);
5674
5675         mutex_lock(&dev_priv->drrs.mutex);
5676         if (!dev_priv->drrs.dp) {
5677                 mutex_unlock(&dev_priv->drrs.mutex);
5678                 return;
5679         }
5680
5681         crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5682         pipe = to_intel_crtc(crtc)->pipe;
5683         dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5684
5685         if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR &&
5686                         !dev_priv->drrs.busy_frontbuffer_bits)
5687                 schedule_delayed_work(&dev_priv->drrs.work,
5688                                 msecs_to_jiffies(1000));
5689         mutex_unlock(&dev_priv->drrs.mutex);
5690 }
5691
5692 /**
5693  * DOC: Display Refresh Rate Switching (DRRS)
5694  *
5695  * Display Refresh Rate Switching (DRRS) is a power conservation feature
5696  * which enables swtching between low and high refresh rates,
5697  * dynamically, based on the usage scenario. This feature is applicable
5698  * for internal panels.
5699  *
5700  * Indication that the panel supports DRRS is given by the panel EDID, which
5701  * would list multiple refresh rates for one resolution.
5702  *
5703  * DRRS is of 2 types - static and seamless.
5704  * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5705  * (may appear as a blink on screen) and is used in dock-undock scenario.
5706  * Seamless DRRS involves changing RR without any visual effect to the user
5707  * and can be used during normal system usage. This is done by programming
5708  * certain registers.
5709  *
5710  * Support for static/seamless DRRS may be indicated in the VBT based on
5711  * inputs from the panel spec.
5712  *
5713  * DRRS saves power by switching to low RR based on usage scenarios.
5714  *
5715  * eDP DRRS:-
5716  *        The implementation is based on frontbuffer tracking implementation.
5717  * When there is a disturbance on the screen triggered by user activity or a
5718  * periodic system activity, DRRS is disabled (RR is changed to high RR).
5719  * When there is no movement on screen, after a timeout of 1 second, a switch
5720  * to low RR is made.
5721  *        For integration with frontbuffer tracking code,
5722  * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5723  *
5724  * DRRS can be further extended to support other internal panels and also
5725  * the scenario of video playback wherein RR is set based on the rate
5726  * requested by userspace.
5727  */
5728
5729 /**
5730  * intel_dp_drrs_init - Init basic DRRS work and mutex.
5731  * @intel_connector: eDP connector
5732  * @fixed_mode: preferred mode of panel
5733  *
5734  * This function is  called only once at driver load to initialize basic
5735  * DRRS stuff.
5736  *
5737  * Returns:
5738  * Downclock mode if panel supports it, else return NULL.
5739  * DRRS support is determined by the presence of downclock mode (apart
5740  * from VBT setting).
5741  */
5742 static struct drm_display_mode *
5743 intel_dp_drrs_init(struct intel_connector *intel_connector,
5744                 struct drm_display_mode *fixed_mode)
5745 {
5746         struct drm_connector *connector = &intel_connector->base;
5747         struct drm_device *dev = connector->dev;
5748         struct drm_i915_private *dev_priv = dev->dev_private;
5749         struct drm_display_mode *downclock_mode = NULL;
5750
5751         INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5752         lockinit(&dev_priv->drrs.mutex, "i915dm", 0, LK_CANRECURSE);
5753
5754         if (INTEL_INFO(dev)->gen <= 6) {
5755                 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5756                 return NULL;
5757         }
5758
5759         if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5760                 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5761                 return NULL;
5762         }
5763
5764         downclock_mode = intel_find_panel_downclock
5765                                         (dev, fixed_mode, connector);
5766
5767         if (!downclock_mode) {
5768                 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5769                 return NULL;
5770         }
5771
5772         dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5773
5774         dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5775         DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5776         return downclock_mode;
5777 }
5778
5779 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5780                                      struct intel_connector *intel_connector)
5781 {
5782         struct drm_connector *connector = &intel_connector->base;
5783         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5784         struct intel_encoder *intel_encoder = &intel_dig_port->base;
5785         struct drm_device *dev = intel_encoder->base.dev;
5786         struct drm_i915_private *dev_priv = dev->dev_private;
5787         struct drm_display_mode *fixed_mode = NULL;
5788         struct drm_display_mode *downclock_mode = NULL;
5789         bool has_dpcd;
5790         struct drm_display_mode *scan;
5791         struct edid *edid;
5792         enum i915_pipe pipe = INVALID_PIPE;
5793
5794         if (!is_edp(intel_dp))
5795                 return true;
5796
5797         pps_lock(intel_dp);
5798         intel_edp_panel_vdd_sanitize(intel_dp);
5799         pps_unlock(intel_dp);
5800
5801         /* Cache DPCD and EDID for edp. */
5802         has_dpcd = intel_dp_get_dpcd(intel_dp);
5803
5804         if (has_dpcd) {
5805                 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5806                         dev_priv->no_aux_handshake =
5807                                 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5808                                 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5809         } else {
5810                 /* if this fails, presume the device is a ghost */
5811                 DRM_INFO("failed to retrieve link info, disabling eDP\n");
5812                 return false;
5813         }
5814
5815         /* We now know it's not a ghost, init power sequence regs. */
5816         pps_lock(intel_dp);
5817         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5818         pps_unlock(intel_dp);
5819
5820         mutex_lock(&dev->mode_config.mutex);
5821         edid = drm_get_edid(connector, intel_dp->aux.ddc);
5822         if (edid) {
5823                 if (drm_add_edid_modes(connector, edid)) {
5824                         drm_mode_connector_update_edid_property(connector,
5825                                                                 edid);
5826                         drm_edid_to_eld(connector, edid);
5827                 } else {
5828                         kfree(edid);
5829                         edid = ERR_PTR(-EINVAL);
5830                 }
5831         } else {
5832                 edid = ERR_PTR(-ENOENT);
5833         }
5834         intel_connector->edid = edid;
5835
5836         /* prefer fixed mode from EDID if available */
5837         list_for_each_entry(scan, &connector->probed_modes, head) {
5838                 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5839                         fixed_mode = drm_mode_duplicate(dev, scan);
5840                         downclock_mode = intel_dp_drrs_init(
5841                                                 intel_connector, fixed_mode);
5842                         break;
5843                 }
5844         }
5845
5846         /* fallback to VBT if available for eDP */
5847         if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5848                 fixed_mode = drm_mode_duplicate(dev,
5849                                         dev_priv->vbt.lfp_lvds_vbt_mode);
5850                 if (fixed_mode)
5851                         fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5852         }
5853         mutex_unlock(&dev->mode_config.mutex);
5854
5855         if (IS_VALLEYVIEW(dev)) {
5856 #if 0
5857                 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5858                 register_reboot_notifier(&intel_dp->edp_notifier);
5859 #endif
5860
5861                 /*
5862                  * Figure out the current pipe for the initial backlight setup.
5863                  * If the current pipe isn't valid, try the PPS pipe, and if that
5864                  * fails just assume pipe A.
5865                  */
5866                 if (IS_CHERRYVIEW(dev))
5867                         pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5868                 else
5869                         pipe = PORT_TO_PIPE(intel_dp->DP);
5870
5871                 if (pipe != PIPE_A && pipe != PIPE_B)
5872                         pipe = intel_dp->pps_pipe;
5873
5874                 if (pipe != PIPE_A && pipe != PIPE_B)
5875                         pipe = PIPE_A;
5876
5877                 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5878                               pipe_name(pipe));
5879         }
5880
5881         intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5882         intel_connector->panel.backlight_power = intel_edp_backlight_power;
5883         intel_panel_setup_backlight(connector, pipe);
5884
5885         return true;
5886 }
5887
5888 bool
5889 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5890                         struct intel_connector *intel_connector)
5891 {
5892         struct drm_connector *connector = &intel_connector->base;
5893         struct intel_dp *intel_dp = &intel_dig_port->dp;
5894         struct intel_encoder *intel_encoder = &intel_dig_port->base;
5895         struct drm_device *dev = intel_encoder->base.dev;
5896         struct drm_i915_private *dev_priv = dev->dev_private;
5897         enum port port = intel_dig_port->port;
5898         int type;
5899
5900         intel_dp->pps_pipe = INVALID_PIPE;
5901
5902         /* intel_dp vfuncs */
5903         if (INTEL_INFO(dev)->gen >= 9)
5904                 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5905         else if (IS_VALLEYVIEW(dev))
5906                 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5907         else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5908                 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5909         else if (HAS_PCH_SPLIT(dev))
5910                 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5911         else
5912                 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5913
5914         if (INTEL_INFO(dev)->gen >= 9)
5915                 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5916         else
5917                 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
5918
5919         /* Preserve the current hw state. */
5920         intel_dp->DP = I915_READ(intel_dp->output_reg);
5921         intel_dp->attached_connector = intel_connector;
5922
5923         if (intel_dp_is_edp(dev, port))
5924                 type = DRM_MODE_CONNECTOR_eDP;
5925         else
5926                 type = DRM_MODE_CONNECTOR_DisplayPort;
5927
5928         /*
5929          * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5930          * for DP the encoder type can be set by the caller to
5931          * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5932          */
5933         if (type == DRM_MODE_CONNECTOR_eDP)
5934                 intel_encoder->type = INTEL_OUTPUT_EDP;
5935
5936         /* eDP only on port B and/or C on vlv/chv */
5937         if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5938                     port != PORT_B && port != PORT_C))
5939                 return false;
5940
5941         DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5942                         type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5943                         port_name(port));
5944
5945         drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
5946         drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5947
5948         connector->interlace_allowed = true;
5949         connector->doublescan_allowed = 0;
5950
5951         INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
5952                           edp_panel_vdd_work);
5953
5954         intel_connector_attach_encoder(intel_connector, intel_encoder);
5955         drm_connector_register(connector);
5956
5957         if (HAS_DDI(dev))
5958                 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5959         else
5960                 intel_connector->get_hw_state = intel_connector_get_hw_state;
5961         intel_connector->unregister = intel_dp_connector_unregister;
5962
5963         /* Set up the hotplug pin. */
5964         switch (port) {
5965         case PORT_A:
5966                 intel_encoder->hpd_pin = HPD_PORT_A;
5967                 break;
5968         case PORT_B:
5969                 intel_encoder->hpd_pin = HPD_PORT_B;
5970                 break;
5971         case PORT_C:
5972                 intel_encoder->hpd_pin = HPD_PORT_C;
5973                 break;
5974         case PORT_D:
5975                 intel_encoder->hpd_pin = HPD_PORT_D;
5976                 break;
5977         default:
5978                 BUG();
5979         }
5980
5981         if (is_edp(intel_dp)) {
5982                 pps_lock(intel_dp);
5983                 intel_dp_init_panel_power_timestamps(intel_dp);
5984                 if (IS_VALLEYVIEW(dev))
5985                         vlv_initial_power_sequencer_setup(intel_dp);
5986                 else
5987                         intel_dp_init_panel_power_sequencer(dev, intel_dp);
5988                 pps_unlock(intel_dp);
5989         }
5990
5991         intel_dp_aux_init(intel_dp, intel_connector);
5992
5993         /* init MST on ports that can support it */
5994         if (HAS_DP_MST(dev) &&
5995             (port == PORT_B || port == PORT_C || port == PORT_D))
5996                 intel_dp_mst_encoder_init(intel_dig_port,
5997                                           intel_connector->base.base.id);
5998
5999         if (!intel_edp_init_connector(intel_dp, intel_connector)) {
6000                 drm_dp_aux_unregister(&intel_dp->aux);
6001                 if (is_edp(intel_dp)) {
6002                         cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
6003                         /*
6004                          * vdd might still be enabled do to the delayed vdd off.
6005                          * Make sure vdd is actually turned off here.
6006                          */
6007                         pps_lock(intel_dp);
6008                         edp_panel_vdd_off_sync(intel_dp);
6009                         pps_unlock(intel_dp);
6010                 }
6011                 drm_connector_unregister(connector);
6012                 drm_connector_cleanup(connector);
6013                 return false;
6014         }
6015
6016         intel_dp_add_properties(intel_dp, connector);
6017
6018         /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
6019          * 0xd.  Failure to do so will result in spurious interrupts being
6020          * generated on the port when a cable is not attached.
6021          */
6022         if (IS_G4X(dev) && !IS_GM45(dev)) {
6023                 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
6024                 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
6025         }
6026
6027 #if 0
6028         i915_debugfs_connector_add(connector);
6029 #endif
6030
6031         return true;
6032 }
6033
6034 void
6035 intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
6036 {
6037         struct drm_i915_private *dev_priv = dev->dev_private;
6038         struct intel_digital_port *intel_dig_port;
6039         struct intel_encoder *intel_encoder;
6040         struct drm_encoder *encoder;
6041         struct intel_connector *intel_connector;
6042
6043         intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
6044         if (!intel_dig_port)
6045                 return;
6046
6047         intel_connector = intel_connector_alloc();
6048         if (!intel_connector) {
6049                 kfree(intel_dig_port);
6050                 return;
6051         }
6052
6053         intel_encoder = &intel_dig_port->base;
6054         encoder = &intel_encoder->base;
6055
6056         drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
6057                          DRM_MODE_ENCODER_TMDS);
6058
6059         intel_encoder->compute_config = intel_dp_compute_config;
6060         intel_encoder->disable = intel_disable_dp;
6061         intel_encoder->get_hw_state = intel_dp_get_hw_state;
6062         intel_encoder->get_config = intel_dp_get_config;
6063         intel_encoder->suspend = intel_dp_encoder_suspend;
6064         if (IS_CHERRYVIEW(dev)) {
6065                 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
6066                 intel_encoder->pre_enable = chv_pre_enable_dp;
6067                 intel_encoder->enable = vlv_enable_dp;
6068                 intel_encoder->post_disable = chv_post_disable_dp;
6069         } else if (IS_VALLEYVIEW(dev)) {
6070                 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
6071                 intel_encoder->pre_enable = vlv_pre_enable_dp;
6072                 intel_encoder->enable = vlv_enable_dp;
6073                 intel_encoder->post_disable = vlv_post_disable_dp;
6074         } else {
6075                 intel_encoder->pre_enable = g4x_pre_enable_dp;
6076                 intel_encoder->enable = g4x_enable_dp;
6077                 if (INTEL_INFO(dev)->gen >= 5)
6078                         intel_encoder->post_disable = ilk_post_disable_dp;
6079         }
6080
6081         intel_dig_port->port = port;
6082         intel_dig_port->dp.output_reg = output_reg;
6083
6084         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
6085         if (IS_CHERRYVIEW(dev)) {
6086                 if (port == PORT_D)
6087                         intel_encoder->crtc_mask = 1 << 2;
6088                 else
6089                         intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
6090         } else {
6091                 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
6092         }
6093         intel_encoder->cloneable = 0;
6094         intel_encoder->hot_plug = intel_dp_hot_plug;
6095
6096         intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
6097         dev_priv->hpd_irq_port[port] = intel_dig_port;
6098
6099         if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
6100                 drm_encoder_cleanup(encoder);
6101                 kfree(intel_dig_port);
6102                 kfree(intel_connector);
6103         }
6104 }
6105
6106 #if 0
6107 void intel_dp_mst_suspend(struct drm_device *dev)
6108 {
6109         struct drm_i915_private *dev_priv = dev->dev_private;
6110         int i;
6111
6112         /* disable MST */
6113         for (i = 0; i < I915_MAX_PORTS; i++) {
6114                 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
6115                 if (!intel_dig_port)
6116                         continue;
6117
6118                 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6119                         if (!intel_dig_port->dp.can_mst)
6120                                 continue;
6121                         if (intel_dig_port->dp.is_mst)
6122                                 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6123                 }
6124         }
6125 }
6126 #endif
6127
6128 void intel_dp_mst_resume(struct drm_device *dev)
6129 {
6130         struct drm_i915_private *dev_priv = dev->dev_private;
6131         int i;
6132
6133         for (i = 0; i < I915_MAX_PORTS; i++) {
6134                 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
6135                 if (!intel_dig_port)
6136                         continue;
6137                 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6138 #if 0
6139                         int ret;
6140
6141                         if (!intel_dig_port->dp.can_mst)
6142                                 continue;
6143
6144                         ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6145                         if (ret != 0) {
6146                                 intel_dp_check_mst_status(&intel_dig_port->dp);
6147                         }
6148 #endif
6149                 }
6150         }
6151 }