drm/i915: Update to Linux 4.4
[dragonfly.git] / sys / dev / drm / i915 / intel_dp.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Keith Packard <keithp@keithp.com>
25  *
26  */
27
28 #include <linux/i2c.h>
29 #include <linux/export.h>
30 #include <linux/notifier.h>
31 #include <drm/drmP.h>
32 #include <linux/slab.h>
33 #include <drm/drm_atomic_helper.h>
34 #include <drm/drm_crtc.h>
35 #include <drm/drm_crtc_helper.h>
36 #include <drm/drm_edid.h>
37 #include "intel_drv.h"
38 #include <drm/i915_drm.h>
39 #include "i915_drv.h"
40
41 #define DP_LINK_CHECK_TIMEOUT   (10 * 1000)
42
43 static int disable_aux_irq = 0;
44 TUNABLE_INT("drm.i915.disable_aux_irq", &disable_aux_irq);
45
46 /* Compliance test status bits  */
47 #define INTEL_DP_RESOLUTION_SHIFT_MASK  0
48 #define INTEL_DP_RESOLUTION_PREFERRED   (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49 #define INTEL_DP_RESOLUTION_STANDARD    (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
50 #define INTEL_DP_RESOLUTION_FAILSAFE    (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
51
52 struct dp_link_dpll {
53         int clock;
54         struct dpll dpll;
55 };
56
57 static const struct dp_link_dpll gen4_dpll[] = {
58         { 162000,
59                 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
60         { 270000,
61                 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
62 };
63
64 static const struct dp_link_dpll pch_dpll[] = {
65         { 162000,
66                 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
67         { 270000,
68                 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
69 };
70
71 static const struct dp_link_dpll vlv_dpll[] = {
72         { 162000,
73                 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
74         { 270000,
75                 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
76 };
77
78 /*
79  * CHV supports eDP 1.4 that have  more link rates.
80  * Below only provides the fixed rate but exclude variable rate.
81  */
82 static const struct dp_link_dpll chv_dpll[] = {
83         /*
84          * CHV requires to program fractional division for m2.
85          * m2 is stored in fixed point format using formula below
86          * (m2_int << 22) | m2_fraction
87          */
88         { 162000,       /* m2_int = 32, m2_fraction = 1677722 */
89                 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
90         { 270000,       /* m2_int = 27, m2_fraction = 0 */
91                 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
92         { 540000,       /* m2_int = 27, m2_fraction = 0 */
93                 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
94 };
95
96 static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
97                                   324000, 432000, 540000 };
98 static const int skl_rates[] = { 162000, 216000, 270000,
99                                   324000, 432000, 540000 };
100 static const int default_rates[] = { 162000, 270000, 540000 };
101
102 /**
103  * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
104  * @intel_dp: DP struct
105  *
106  * If a CPU or PCH DP output is attached to an eDP panel, this function
107  * will return true, and false otherwise.
108  */
109 static bool is_edp(struct intel_dp *intel_dp)
110 {
111         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
112
113         return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
114 }
115
116 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
117 {
118         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
119
120         return intel_dig_port->base.base.dev;
121 }
122
123 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
124 {
125         return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
126 }
127
128 static void intel_dp_link_down(struct intel_dp *intel_dp);
129 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
130 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
131 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
132 static void vlv_steal_power_sequencer(struct drm_device *dev,
133                                       enum i915_pipe pipe);
134
135 static unsigned int intel_dp_unused_lane_mask(int lane_count)
136 {
137         return ~((1 << lane_count) - 1) & 0xf;
138 }
139
140 static int
141 intel_dp_max_link_bw(struct intel_dp  *intel_dp)
142 {
143         int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
144
145         switch (max_link_bw) {
146         case DP_LINK_BW_1_62:
147         case DP_LINK_BW_2_7:
148         case DP_LINK_BW_5_4:
149                 break;
150         default:
151                 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
152                      max_link_bw);
153                 max_link_bw = DP_LINK_BW_1_62;
154                 break;
155         }
156         return max_link_bw;
157 }
158
159 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
160 {
161         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
162         struct drm_device *dev = intel_dig_port->base.base.dev;
163         u8 source_max, sink_max;
164
165         source_max = 4;
166         if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
167             (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
168                 source_max = 2;
169
170         sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
171
172         return min(source_max, sink_max);
173 }
174
175 /*
176  * The units on the numbers in the next two are... bizarre.  Examples will
177  * make it clearer; this one parallels an example in the eDP spec.
178  *
179  * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
180  *
181  *     270000 * 1 * 8 / 10 == 216000
182  *
183  * The actual data capacity of that configuration is 2.16Gbit/s, so the
184  * units are decakilobits.  ->clock in a drm_display_mode is in kilohertz -
185  * or equivalently, kilopixels per second - so for 1680x1050R it'd be
186  * 119000.  At 18bpp that's 2142000 kilobits per second.
187  *
188  * Thus the strange-looking division by 10 in intel_dp_link_required, to
189  * get the result in decakilobits instead of kilobits.
190  */
191
192 static int
193 intel_dp_link_required(int pixel_clock, int bpp)
194 {
195         return (pixel_clock * bpp + 9) / 10;
196 }
197
198 static int
199 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
200 {
201         return (max_link_clock * max_lanes * 8) / 10;
202 }
203
204 static enum drm_mode_status
205 intel_dp_mode_valid(struct drm_connector *connector,
206                     struct drm_display_mode *mode)
207 {
208         struct intel_dp *intel_dp = intel_attached_dp(connector);
209         struct intel_connector *intel_connector = to_intel_connector(connector);
210         struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
211         int target_clock = mode->clock;
212         int max_rate, mode_rate, max_lanes, max_link_clock;
213
214         if (is_edp(intel_dp) && fixed_mode) {
215                 if (mode->hdisplay > fixed_mode->hdisplay)
216                         return MODE_PANEL;
217
218                 if (mode->vdisplay > fixed_mode->vdisplay)
219                         return MODE_PANEL;
220
221                 target_clock = fixed_mode->clock;
222         }
223
224         max_link_clock = intel_dp_max_link_rate(intel_dp);
225         max_lanes = intel_dp_max_lane_count(intel_dp);
226
227         max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
228         mode_rate = intel_dp_link_required(target_clock, 18);
229
230         if (mode_rate > max_rate)
231                 return MODE_CLOCK_HIGH;
232
233         if (mode->clock < 10000)
234                 return MODE_CLOCK_LOW;
235
236         if (mode->flags & DRM_MODE_FLAG_DBLCLK)
237                 return MODE_H_ILLEGAL;
238
239         return MODE_OK;
240 }
241
242 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
243 {
244         int     i;
245         uint32_t v = 0;
246
247         if (src_bytes > 4)
248                 src_bytes = 4;
249         for (i = 0; i < src_bytes; i++)
250                 v |= ((uint32_t) src[i]) << ((3-i) * 8);
251         return v;
252 }
253
254 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
255 {
256         int i;
257         if (dst_bytes > 4)
258                 dst_bytes = 4;
259         for (i = 0; i < dst_bytes; i++)
260                 dst[i] = src >> ((3-i) * 8);
261 }
262
263 static void
264 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
265                                     struct intel_dp *intel_dp);
266 static void
267 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
268                                               struct intel_dp *intel_dp);
269
270 static void pps_lock(struct intel_dp *intel_dp)
271 {
272         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
273         struct intel_encoder *encoder = &intel_dig_port->base;
274         struct drm_device *dev = encoder->base.dev;
275         struct drm_i915_private *dev_priv = dev->dev_private;
276         enum intel_display_power_domain power_domain;
277
278         /*
279          * See vlv_power_sequencer_reset() why we need
280          * a power domain reference here.
281          */
282         power_domain = intel_display_port_aux_power_domain(encoder);
283         intel_display_power_get(dev_priv, power_domain);
284
285         mutex_lock(&dev_priv->pps_mutex);
286 }
287
288 static void pps_unlock(struct intel_dp *intel_dp)
289 {
290         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
291         struct intel_encoder *encoder = &intel_dig_port->base;
292         struct drm_device *dev = encoder->base.dev;
293         struct drm_i915_private *dev_priv = dev->dev_private;
294         enum intel_display_power_domain power_domain;
295
296         mutex_unlock(&dev_priv->pps_mutex);
297
298         power_domain = intel_display_port_aux_power_domain(encoder);
299         intel_display_power_put(dev_priv, power_domain);
300 }
301
302 static void
303 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
304 {
305         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
306         struct drm_device *dev = intel_dig_port->base.base.dev;
307         struct drm_i915_private *dev_priv = dev->dev_private;
308         enum i915_pipe pipe = intel_dp->pps_pipe;
309         bool pll_enabled, release_cl_override = false;
310         enum dpio_phy phy = DPIO_PHY(pipe);
311         enum dpio_channel ch = vlv_pipe_to_channel(pipe);
312         uint32_t DP;
313
314         if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
315                  "skipping pipe %c power seqeuncer kick due to port %c being active\n",
316                  pipe_name(pipe), port_name(intel_dig_port->port)))
317                 return;
318
319         DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
320                       pipe_name(pipe), port_name(intel_dig_port->port));
321
322         /* Preserve the BIOS-computed detected bit. This is
323          * supposed to be read-only.
324          */
325         DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
326         DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
327         DP |= DP_PORT_WIDTH(1);
328         DP |= DP_LINK_TRAIN_PAT_1;
329
330         if (IS_CHERRYVIEW(dev))
331                 DP |= DP_PIPE_SELECT_CHV(pipe);
332         else if (pipe == PIPE_B)
333                 DP |= DP_PIPEB_SELECT;
334
335         pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
336
337         /*
338          * The DPLL for the pipe must be enabled for this to work.
339          * So enable temporarily it if it's not already enabled.
340          */
341         if (!pll_enabled) {
342                 release_cl_override = IS_CHERRYVIEW(dev) &&
343                         !chv_phy_powergate_ch(dev_priv, phy, ch, true);
344
345                 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
346                                  &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
347         }
348
349         /*
350          * Similar magic as in intel_dp_enable_port().
351          * We _must_ do this port enable + disable trick
352          * to make this power seqeuencer lock onto the port.
353          * Otherwise even VDD force bit won't work.
354          */
355         I915_WRITE(intel_dp->output_reg, DP);
356         POSTING_READ(intel_dp->output_reg);
357
358         I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
359         POSTING_READ(intel_dp->output_reg);
360
361         I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
362         POSTING_READ(intel_dp->output_reg);
363
364         if (!pll_enabled) {
365                 vlv_force_pll_off(dev, pipe);
366
367                 if (release_cl_override)
368                         chv_phy_powergate_ch(dev_priv, phy, ch, false);
369         }
370 }
371
372 static enum i915_pipe
373 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
374 {
375         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
376         struct drm_device *dev = intel_dig_port->base.base.dev;
377         struct drm_i915_private *dev_priv = dev->dev_private;
378         struct intel_encoder *encoder;
379         unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
380         enum i915_pipe pipe;
381
382         lockdep_assert_held(&dev_priv->pps_mutex);
383
384         /* We should never land here with regular DP ports */
385         WARN_ON(!is_edp(intel_dp));
386
387         if (intel_dp->pps_pipe != INVALID_PIPE)
388                 return intel_dp->pps_pipe;
389
390         /*
391          * We don't have power sequencer currently.
392          * Pick one that's not used by other ports.
393          */
394         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
395                             base.head) {
396                 struct intel_dp *tmp;
397
398                 if (encoder->type != INTEL_OUTPUT_EDP)
399                         continue;
400
401                 tmp = enc_to_intel_dp(&encoder->base);
402
403                 if (tmp->pps_pipe != INVALID_PIPE)
404                         pipes &= ~(1 << tmp->pps_pipe);
405         }
406
407         /*
408          * Didn't find one. This should not happen since there
409          * are two power sequencers and up to two eDP ports.
410          */
411         if (WARN_ON(pipes == 0))
412                 pipe = PIPE_A;
413         else
414                 pipe = ffs(pipes) - 1;
415
416         vlv_steal_power_sequencer(dev, pipe);
417         intel_dp->pps_pipe = pipe;
418
419         DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
420                       pipe_name(intel_dp->pps_pipe),
421                       port_name(intel_dig_port->port));
422
423         /* init power sequencer on this pipe and port */
424         intel_dp_init_panel_power_sequencer(dev, intel_dp);
425         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
426
427         /*
428          * Even vdd force doesn't work until we've made
429          * the power sequencer lock in on the port.
430          */
431         vlv_power_sequencer_kick(intel_dp);
432
433         return intel_dp->pps_pipe;
434 }
435
436 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
437                                enum i915_pipe pipe);
438
439 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
440                                enum i915_pipe pipe)
441 {
442         return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
443 }
444
445 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
446                                 enum i915_pipe pipe)
447 {
448         return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
449 }
450
451 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
452                          enum i915_pipe pipe)
453 {
454         return true;
455 }
456
457 static enum i915_pipe
458 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
459                      enum port port,
460                      vlv_pipe_check pipe_check)
461 {
462         enum i915_pipe pipe;
463
464         for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
465                 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
466                         PANEL_PORT_SELECT_MASK;
467
468                 if (port_sel != PANEL_PORT_SELECT_VLV(port))
469                         continue;
470
471                 if (!pipe_check(dev_priv, pipe))
472                         continue;
473
474                 return pipe;
475         }
476
477         return INVALID_PIPE;
478 }
479
480 static void
481 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
482 {
483         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
484         struct drm_device *dev = intel_dig_port->base.base.dev;
485         struct drm_i915_private *dev_priv = dev->dev_private;
486         enum port port = intel_dig_port->port;
487
488         lockdep_assert_held(&dev_priv->pps_mutex);
489
490         /* try to find a pipe with this port selected */
491         /* first pick one where the panel is on */
492         intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
493                                                   vlv_pipe_has_pp_on);
494         /* didn't find one? pick one where vdd is on */
495         if (intel_dp->pps_pipe == INVALID_PIPE)
496                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
497                                                           vlv_pipe_has_vdd_on);
498         /* didn't find one? pick one with just the correct port */
499         if (intel_dp->pps_pipe == INVALID_PIPE)
500                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
501                                                           vlv_pipe_any);
502
503         /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
504         if (intel_dp->pps_pipe == INVALID_PIPE) {
505                 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
506                               port_name(port));
507                 return;
508         }
509
510         DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
511                       port_name(port), pipe_name(intel_dp->pps_pipe));
512
513         intel_dp_init_panel_power_sequencer(dev, intel_dp);
514         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
515 }
516
517 void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
518 {
519         struct drm_device *dev = dev_priv->dev;
520         struct intel_encoder *encoder;
521
522         if (WARN_ON(!IS_VALLEYVIEW(dev)))
523                 return;
524
525         /*
526          * We can't grab pps_mutex here due to deadlock with power_domain
527          * mutex when power_domain functions are called while holding pps_mutex.
528          * That also means that in order to use pps_pipe the code needs to
529          * hold both a power domain reference and pps_mutex, and the power domain
530          * reference get/put must be done while _not_ holding pps_mutex.
531          * pps_{lock,unlock}() do these steps in the correct order, so one
532          * should use them always.
533          */
534
535         list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
536                 struct intel_dp *intel_dp;
537
538                 if (encoder->type != INTEL_OUTPUT_EDP)
539                         continue;
540
541                 intel_dp = enc_to_intel_dp(&encoder->base);
542                 intel_dp->pps_pipe = INVALID_PIPE;
543         }
544 }
545
546 static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
547 {
548         struct drm_device *dev = intel_dp_to_dev(intel_dp);
549
550         if (IS_BROXTON(dev))
551                 return BXT_PP_CONTROL(0);
552         else if (HAS_PCH_SPLIT(dev))
553                 return PCH_PP_CONTROL;
554         else
555                 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
556 }
557
558 static u32 _pp_stat_reg(struct intel_dp *intel_dp)
559 {
560         struct drm_device *dev = intel_dp_to_dev(intel_dp);
561
562         if (IS_BROXTON(dev))
563                 return BXT_PP_STATUS(0);
564         else if (HAS_PCH_SPLIT(dev))
565                 return PCH_PP_STATUS;
566         else
567                 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
568 }
569
570 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
571    This function only applicable when panel PM state is not to be tracked */
572 #if 0
573 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
574                               void *unused)
575 {
576         struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
577                                                  edp_notifier);
578         struct drm_device *dev = intel_dp_to_dev(intel_dp);
579         struct drm_i915_private *dev_priv = dev->dev_private;
580
581         if (!is_edp(intel_dp) || code != SYS_RESTART)
582                 return 0;
583
584         pps_lock(intel_dp);
585
586         if (IS_VALLEYVIEW(dev)) {
587                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
588                 u32 pp_ctrl_reg, pp_div_reg;
589                 u32 pp_div;
590
591                 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
592                 pp_div_reg  = VLV_PIPE_PP_DIVISOR(pipe);
593                 pp_div = I915_READ(pp_div_reg);
594                 pp_div &= PP_REFERENCE_DIVIDER_MASK;
595
596                 /* 0x1F write to PP_DIV_REG sets max cycle delay */
597                 I915_WRITE(pp_div_reg, pp_div | 0x1F);
598                 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
599                 msleep(intel_dp->panel_power_cycle_delay);
600         }
601
602         pps_unlock(intel_dp);
603
604         return 0;
605 }
606 #endif
607
608 static bool edp_have_panel_power(struct intel_dp *intel_dp)
609 {
610         struct drm_device *dev = intel_dp_to_dev(intel_dp);
611         struct drm_i915_private *dev_priv = dev->dev_private;
612
613         lockdep_assert_held(&dev_priv->pps_mutex);
614
615         if (IS_VALLEYVIEW(dev) &&
616             intel_dp->pps_pipe == INVALID_PIPE)
617                 return false;
618
619         return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
620 }
621
622 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
623 {
624         struct drm_device *dev = intel_dp_to_dev(intel_dp);
625         struct drm_i915_private *dev_priv = dev->dev_private;
626
627         lockdep_assert_held(&dev_priv->pps_mutex);
628
629         if (IS_VALLEYVIEW(dev) &&
630             intel_dp->pps_pipe == INVALID_PIPE)
631                 return false;
632
633         return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
634 }
635
636 static void
637 intel_dp_check_edp(struct intel_dp *intel_dp)
638 {
639         struct drm_device *dev = intel_dp_to_dev(intel_dp);
640         struct drm_i915_private *dev_priv = dev->dev_private;
641
642         if (!is_edp(intel_dp))
643                 return;
644
645         if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
646                 WARN(1, "eDP powered off while attempting aux channel communication.\n");
647                 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
648                               I915_READ(_pp_stat_reg(intel_dp)),
649                               I915_READ(_pp_ctrl_reg(intel_dp)));
650         }
651 }
652
653 static uint32_t
654 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
655 {
656         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
657         struct drm_device *dev = intel_dig_port->base.base.dev;
658         struct drm_i915_private *dev_priv = dev->dev_private;
659         uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
660         uint32_t status;
661         bool done;
662
663 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
664         if (has_aux_irq)
665                 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
666                                           msecs_to_jiffies_timeout(10));
667         else
668                 done = wait_for_atomic(C, 10) == 0;
669         if (!done)
670                 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
671                           has_aux_irq);
672 #undef C
673
674         return status;
675 }
676
677 static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
678 {
679         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
680         struct drm_device *dev = intel_dig_port->base.base.dev;
681
682         /*
683          * The clock divider is based off the hrawclk, and would like to run at
684          * 2MHz.  So, take the hrawclk value and divide by 2 and use that
685          */
686         return index ? 0 : intel_hrawclk(dev) / 2;
687 }
688
689 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
690 {
691         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
692         struct drm_device *dev = intel_dig_port->base.base.dev;
693         struct drm_i915_private *dev_priv = dev->dev_private;
694
695         if (index)
696                 return 0;
697
698         if (intel_dig_port->port == PORT_A) {
699                 return DIV_ROUND_UP(dev_priv->cdclk_freq, 2000);
700
701         } else {
702                 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
703         }
704 }
705
706 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
707 {
708         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
709         struct drm_device *dev = intel_dig_port->base.base.dev;
710         struct drm_i915_private *dev_priv = dev->dev_private;
711
712         if (intel_dig_port->port == PORT_A) {
713                 if (index)
714                         return 0;
715                 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
716         } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
717                 /* Workaround for non-ULT HSW */
718                 switch (index) {
719                 case 0: return 63;
720                 case 1: return 72;
721                 default: return 0;
722                 }
723         } else  {
724                 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
725         }
726 }
727
728 static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
729 {
730         return index ? 0 : 100;
731 }
732
733 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
734 {
735         /*
736          * SKL doesn't need us to program the AUX clock divider (Hardware will
737          * derive the clock from CDCLK automatically). We still implement the
738          * get_aux_clock_divider vfunc to plug-in into the existing code.
739          */
740         return index ? 0 : 1;
741 }
742
743 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
744                                       bool has_aux_irq,
745                                       int send_bytes,
746                                       uint32_t aux_clock_divider)
747 {
748         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
749         struct drm_device *dev = intel_dig_port->base.base.dev;
750         uint32_t precharge, timeout;
751
752         if (IS_GEN6(dev))
753                 precharge = 3;
754         else
755                 precharge = 5;
756
757         if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
758                 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
759         else
760                 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
761
762         return DP_AUX_CH_CTL_SEND_BUSY |
763                DP_AUX_CH_CTL_DONE |
764                (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
765                DP_AUX_CH_CTL_TIME_OUT_ERROR |
766                timeout |
767                DP_AUX_CH_CTL_RECEIVE_ERROR |
768                (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
769                (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
770                (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
771 }
772
773 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
774                                       bool has_aux_irq,
775                                       int send_bytes,
776                                       uint32_t unused)
777 {
778         return DP_AUX_CH_CTL_SEND_BUSY |
779                DP_AUX_CH_CTL_DONE |
780                (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
781                DP_AUX_CH_CTL_TIME_OUT_ERROR |
782                DP_AUX_CH_CTL_TIME_OUT_1600us |
783                DP_AUX_CH_CTL_RECEIVE_ERROR |
784                (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
785                DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
786 }
787
788 static int
789 intel_dp_aux_ch(struct intel_dp *intel_dp,
790                 const uint8_t *send, int send_bytes,
791                 uint8_t *recv, int recv_size)
792 {
793         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
794         struct drm_device *dev = intel_dig_port->base.base.dev;
795         struct drm_i915_private *dev_priv = dev->dev_private;
796         uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
797         uint32_t ch_data = ch_ctl + 4;
798         uint32_t aux_clock_divider;
799         int i, ret, recv_bytes;
800         uint32_t status;
801         int try, clock = 0;
802 #ifdef __DragonFly__
803         bool has_aux_irq = HAS_AUX_IRQ(dev) && !disable_aux_irq;
804 #else
805         bool has_aux_irq = HAS_AUX_IRQ(dev);
806 #endif
807         bool vdd;
808
809         pps_lock(intel_dp);
810
811         /*
812          * We will be called with VDD already enabled for dpcd/edid/oui reads.
813          * In such cases we want to leave VDD enabled and it's up to upper layers
814          * to turn it off. But for eg. i2c-dev access we need to turn it on/off
815          * ourselves.
816          */
817         vdd = edp_panel_vdd_on(intel_dp);
818
819         /* dp aux is extremely sensitive to irq latency, hence request the
820          * lowest possible wakeup latency and so prevent the cpu from going into
821          * deep sleep states.
822          */
823         pm_qos_update_request(&dev_priv->pm_qos, 0);
824
825         intel_dp_check_edp(intel_dp);
826
827         /* Try to wait for any previous AUX channel activity */
828         for (try = 0; try < 3; try++) {
829                 status = I915_READ_NOTRACE(ch_ctl);
830                 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
831                         break;
832                 msleep(1);
833         }
834
835         if (try == 3) {
836                 static u32 last_status = -1;
837                 const u32 status = I915_READ(ch_ctl);
838
839                 if (status != last_status) {
840                         WARN(1, "dp_aux_ch not started status 0x%08x\n",
841                              status);
842                         last_status = status;
843                 }
844
845                 ret = -EBUSY;
846                 goto out;
847         }
848
849         /* Only 5 data registers! */
850         if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
851                 ret = -E2BIG;
852                 goto out;
853         }
854
855         while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
856                 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
857                                                           has_aux_irq,
858                                                           send_bytes,
859                                                           aux_clock_divider);
860
861                 /* Must try at least 3 times according to DP spec */
862                 for (try = 0; try < 5; try++) {
863                         /* Load the send data into the aux channel data registers */
864                         for (i = 0; i < send_bytes; i += 4)
865                                 I915_WRITE(ch_data + i,
866                                            intel_dp_pack_aux(send + i,
867                                                              send_bytes - i));
868
869                         /* Send the command and wait for it to complete */
870                         I915_WRITE(ch_ctl, send_ctl);
871
872                         status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
873
874                         /* Clear done status and any errors */
875                         I915_WRITE(ch_ctl,
876                                    status |
877                                    DP_AUX_CH_CTL_DONE |
878                                    DP_AUX_CH_CTL_TIME_OUT_ERROR |
879                                    DP_AUX_CH_CTL_RECEIVE_ERROR);
880
881                         if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
882                                 continue;
883
884                         /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
885                          *   400us delay required for errors and timeouts
886                          *   Timeout errors from the HW already meet this
887                          *   requirement so skip to next iteration
888                          */
889                         if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
890                                 usleep_range(400, 500);
891                                 continue;
892                         }
893                         if (status & DP_AUX_CH_CTL_DONE)
894                                 goto done;
895                 }
896         }
897
898         if ((status & DP_AUX_CH_CTL_DONE) == 0) {
899                 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
900                 ret = -EBUSY;
901                 goto out;
902         }
903
904 done:
905         /* Check for timeout or receive error.
906          * Timeouts occur when the sink is not connected
907          */
908         if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
909                 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
910                 ret = -EIO;
911                 goto out;
912         }
913
914         /* Timeouts occur when the device isn't connected, so they're
915          * "normal" -- don't fill the kernel log with these */
916         if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
917                 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
918                 ret = -ETIMEDOUT;
919                 goto out;
920         }
921
922         /* Unload any bytes sent back from the other side */
923         recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
924                       DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
925         if (recv_bytes > recv_size)
926                 recv_bytes = recv_size;
927
928         for (i = 0; i < recv_bytes; i += 4)
929                 intel_dp_unpack_aux(I915_READ(ch_data + i),
930                                     recv + i, recv_bytes - i);
931
932         ret = recv_bytes;
933 out:
934         pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
935
936         if (vdd)
937                 edp_panel_vdd_off(intel_dp, false);
938
939         pps_unlock(intel_dp);
940
941         return ret;
942 }
943
944 #define BARE_ADDRESS_SIZE       3
945 #define HEADER_SIZE             (BARE_ADDRESS_SIZE + 1)
946 static ssize_t
947 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
948 {
949         struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
950         uint8_t txbuf[20], rxbuf[20];
951         size_t txsize, rxsize;
952         int ret;
953
954         txbuf[0] = (msg->request << 4) |
955                 ((msg->address >> 16) & 0xf);
956         txbuf[1] = (msg->address >> 8) & 0xff;
957         txbuf[2] = msg->address & 0xff;
958         txbuf[3] = msg->size - 1;
959
960         switch (msg->request & ~DP_AUX_I2C_MOT) {
961         case DP_AUX_NATIVE_WRITE:
962         case DP_AUX_I2C_WRITE:
963         case DP_AUX_I2C_WRITE_STATUS_UPDATE:
964                 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
965                 rxsize = 2; /* 0 or 1 data bytes */
966
967                 if (WARN_ON(txsize > 20))
968                         return -E2BIG;
969
970                 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
971
972                 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
973                 if (ret > 0) {
974                         msg->reply = rxbuf[0] >> 4;
975
976                         if (ret > 1) {
977                                 /* Number of bytes written in a short write. */
978                                 ret = clamp_t(int, rxbuf[1], 0, msg->size);
979                         } else {
980                                 /* Return payload size. */
981                                 ret = msg->size;
982                         }
983                 }
984                 break;
985
986         case DP_AUX_NATIVE_READ:
987         case DP_AUX_I2C_READ:
988                 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
989                 rxsize = msg->size + 1;
990
991                 if (WARN_ON(rxsize > 20))
992                         return -E2BIG;
993
994                 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
995                 if (ret > 0) {
996                         msg->reply = rxbuf[0] >> 4;
997                         /*
998                          * Assume happy day, and copy the data. The caller is
999                          * expected to check msg->reply before touching it.
1000                          *
1001                          * Return payload size.
1002                          */
1003                         ret--;
1004                         memcpy(msg->buffer, rxbuf + 1, ret);
1005                 }
1006                 break;
1007
1008         default:
1009                 ret = -EINVAL;
1010                 break;
1011         }
1012
1013         return ret;
1014 }
1015
1016 static int
1017 intel_dp_i2c_aux_ch(struct device *adapter, int mode,
1018                     uint8_t write_byte, uint8_t *read_byte)
1019 {
1020         struct i2c_algo_dp_aux_data *data = device_get_softc(adapter);
1021         struct intel_dp *intel_dp = data->priv;
1022         uint16_t address = data->address;
1023         uint8_t msg[5];
1024         uint8_t reply[2];
1025         unsigned retry;
1026         int msg_bytes;
1027         int reply_bytes;
1028         int ret;
1029
1030         intel_edp_panel_vdd_on(intel_dp);
1031         intel_dp_check_edp(intel_dp);
1032         /* Set up the command byte */
1033         if (mode & MODE_I2C_READ)
1034                 msg[0] = DP_AUX_I2C_READ << 4;
1035         else
1036                 msg[0] = DP_AUX_I2C_WRITE << 4;
1037
1038         if (!(mode & MODE_I2C_STOP))
1039                 msg[0] |= DP_AUX_I2C_MOT << 4;
1040
1041         msg[1] = address >> 8;
1042         msg[2] = address;
1043
1044         switch (mode) {
1045         case MODE_I2C_WRITE:
1046                 msg[3] = 0;
1047                 msg[4] = write_byte;
1048                 msg_bytes = 5;
1049                 reply_bytes = 1;
1050                 break;
1051         case MODE_I2C_READ:
1052                 msg[3] = 0;
1053                 msg_bytes = 4;
1054                 reply_bytes = 2;
1055                 break;
1056         default:
1057                 msg_bytes = 3;
1058                 reply_bytes = 1;
1059                 break;
1060         }
1061
1062         /*
1063          * DP1.2 sections 2.7.7.1.5.6.1 and 2.7.7.1.6.6.1: A DP Source device is
1064          * required to retry at least seven times upon receiving AUX_DEFER
1065          * before giving up the AUX transaction.
1066          */
1067         for (retry = 0; retry < 7; retry++) {
1068                 ret = intel_dp_aux_ch(intel_dp,
1069                                       msg, msg_bytes,
1070                                       reply, reply_bytes);
1071                 if (ret < 0) {
1072                         DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
1073                         goto out;
1074                 }
1075
1076                 switch ((reply[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK) {
1077                 case DP_AUX_NATIVE_REPLY_ACK:
1078                         /* I2C-over-AUX Reply field is only valid
1079                          * when paired with AUX ACK.
1080                          */
1081                         break;
1082                 case DP_AUX_NATIVE_REPLY_NACK:
1083                         DRM_DEBUG_KMS("aux_ch native nack\n");
1084                         ret = -EREMOTEIO;
1085                         goto out;
1086                 case DP_AUX_NATIVE_REPLY_DEFER:
1087                         /*
1088                          * For now, just give more slack to branch devices. We
1089                          * could check the DPCD for I2C bit rate capabilities,
1090                          * and if available, adjust the interval. We could also
1091                          * be more careful with DP-to-Legacy adapters where a
1092                          * long legacy cable may force very low I2C bit rates.
1093                          */
1094                         if (intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
1095                             DP_DWN_STRM_PORT_PRESENT)
1096                                 usleep_range(500, 600);
1097                         else
1098                                 usleep_range(300, 400);
1099                         continue;
1100                 default:
1101                         DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
1102                                   reply[0]);
1103                         ret = -EREMOTEIO;
1104                         goto out;
1105                 }
1106
1107                 switch ((reply[0] >> 4) & DP_AUX_I2C_REPLY_MASK) {
1108                 case DP_AUX_I2C_REPLY_ACK:
1109                         if (mode == MODE_I2C_READ) {
1110                                 *read_byte = reply[1];
1111                         }
1112                         ret = 0;        /* reply_bytes - 1 */
1113                         goto out;
1114                 case DP_AUX_I2C_REPLY_NACK:
1115                         DRM_DEBUG_KMS("aux_i2c nack\n");
1116                         ret = -EREMOTEIO;
1117                         goto out;
1118                 case DP_AUX_I2C_REPLY_DEFER:
1119                         DRM_DEBUG_KMS("aux_i2c defer\n");
1120                         udelay(100);
1121                         break;
1122                 default:
1123                         DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
1124                         ret = -EREMOTEIO;
1125                         goto out;
1126                 }
1127         }
1128
1129         DRM_ERROR("too many retries, giving up\n");
1130         ret = -EREMOTEIO;
1131
1132 out:
1133         return ret;
1134 }
1135
1136 static void
1137 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1138 {
1139         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1140         struct drm_i915_private *dev_priv = dev->dev_private;
1141         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1142         enum port port = intel_dig_port->port;
1143         struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
1144         const char *name = NULL;
1145         uint32_t porte_aux_ctl_reg = DPA_AUX_CH_CTL;
1146         int ret;
1147
1148         /* On SKL we don't have Aux for port E so we rely on VBT to set
1149          * a proper alternate aux channel.
1150          */
1151         if (IS_SKYLAKE(dev) && port == PORT_E) {
1152                 switch (info->alternate_aux_channel) {
1153                 case DP_AUX_B:
1154                         porte_aux_ctl_reg = DPB_AUX_CH_CTL;
1155                         break;
1156                 case DP_AUX_C:
1157                         porte_aux_ctl_reg = DPC_AUX_CH_CTL;
1158                         break;
1159                 case DP_AUX_D:
1160                         porte_aux_ctl_reg = DPD_AUX_CH_CTL;
1161                         break;
1162                 case DP_AUX_A:
1163                 default:
1164                         porte_aux_ctl_reg = DPA_AUX_CH_CTL;
1165                 }
1166         }
1167
1168         switch (port) {
1169         case PORT_A:
1170                 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
1171                 name = "DPDDC-A";
1172                 break;
1173         case PORT_B:
1174                 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
1175                 name = "DPDDC-B";
1176                 break;
1177         case PORT_C:
1178                 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
1179                 name = "DPDDC-C";
1180                 break;
1181         case PORT_D:
1182                 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
1183                 name = "DPDDC-D";
1184                 break;
1185         case PORT_E:
1186                 intel_dp->aux_ch_ctl_reg = porte_aux_ctl_reg;
1187                 name = "DPDDC-E";
1188                 break;
1189         default:
1190                 BUG();
1191         }
1192
1193         /*
1194          * The AUX_CTL register is usually DP_CTL + 0x10.
1195          *
1196          * On Haswell and Broadwell though:
1197          *   - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1198          *   - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1199          *
1200          * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1201          */
1202         if (!IS_HASWELL(dev) && !IS_BROADWELL(dev) && port != PORT_E)
1203                 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
1204
1205         intel_dp->aux.name = name;
1206         intel_dp->aux.dev = dev->dev;
1207         intel_dp->aux.transfer = intel_dp_aux_transfer;
1208
1209         DRM_DEBUG_KMS("i2c_init %s\n", name);
1210
1211         ret = iic_dp_aux_add_bus(connector->base.dev->dev, name,
1212             intel_dp_i2c_aux_ch, intel_dp, &intel_dp->dp_iic_bus,
1213             &intel_dp->aux.ddc);
1214         WARN(ret, "intel_dp_i2c_init failed with error %d for port %c\n",
1215              ret, port_name(port));
1216
1217 #if 0
1218         ret = sysfs_create_link(&connector->base.kdev->kobj,
1219                                 &intel_dp->aux.ddc.dev.kobj,
1220                                 intel_dp->aux.ddc.dev.kobj.name);
1221         if (ret < 0) {
1222                 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
1223                 drm_dp_aux_unregister(&intel_dp->aux);
1224         }
1225 #endif
1226 }
1227
1228 static void
1229 intel_dp_connector_unregister(struct intel_connector *intel_connector)
1230 {
1231 #if 0
1232         struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1233
1234         if (!intel_connector->mst_port)
1235                 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1236                                   intel_dp->aux.ddc.dev.kobj.name);
1237 #endif
1238         intel_connector_unregister(intel_connector);
1239 }
1240
1241 static void
1242 skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
1243 {
1244         u32 ctrl1;
1245
1246         memset(&pipe_config->dpll_hw_state, 0,
1247                sizeof(pipe_config->dpll_hw_state));
1248
1249         pipe_config->ddi_pll_sel = SKL_DPLL0;
1250         pipe_config->dpll_hw_state.cfgcr1 = 0;
1251         pipe_config->dpll_hw_state.cfgcr2 = 0;
1252
1253         ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
1254         switch (pipe_config->port_clock / 2) {
1255         case 81000:
1256                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
1257                                               SKL_DPLL0);
1258                 break;
1259         case 135000:
1260                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
1261                                               SKL_DPLL0);
1262                 break;
1263         case 270000:
1264                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
1265                                               SKL_DPLL0);
1266                 break;
1267         case 162000:
1268                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
1269                                               SKL_DPLL0);
1270                 break;
1271         /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1272         results in CDCLK change. Need to handle the change of CDCLK by
1273         disabling pipes and re-enabling them */
1274         case 108000:
1275                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
1276                                               SKL_DPLL0);
1277                 break;
1278         case 216000:
1279                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
1280                                               SKL_DPLL0);
1281                 break;
1282
1283         }
1284         pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1285 }
1286
1287 void
1288 hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
1289 {
1290         memset(&pipe_config->dpll_hw_state, 0,
1291                sizeof(pipe_config->dpll_hw_state));
1292
1293         switch (pipe_config->port_clock / 2) {
1294         case 81000:
1295                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1296                 break;
1297         case 135000:
1298                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1299                 break;
1300         case 270000:
1301                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1302                 break;
1303         }
1304 }
1305
1306 static int
1307 intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1308 {
1309         if (intel_dp->num_sink_rates) {
1310                 *sink_rates = intel_dp->sink_rates;
1311                 return intel_dp->num_sink_rates;
1312         }
1313
1314         *sink_rates = default_rates;
1315
1316         return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1317 }
1318
1319 static bool intel_dp_source_supports_hbr2(struct drm_device *dev)
1320 {
1321         /* WaDisableHBR2:skl */
1322         if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1323                 return false;
1324
1325         if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1326             (INTEL_INFO(dev)->gen >= 9))
1327                 return true;
1328         else
1329                 return false;
1330 }
1331
1332 static int
1333 intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
1334 {
1335         int size;
1336
1337         if (IS_BROXTON(dev)) {
1338                 *source_rates = bxt_rates;
1339                 size = ARRAY_SIZE(bxt_rates);
1340         } else if (IS_SKYLAKE(dev)) {
1341                 *source_rates = skl_rates;
1342                 size = ARRAY_SIZE(skl_rates);
1343         } else {
1344                 *source_rates = default_rates;
1345                 size = ARRAY_SIZE(default_rates);
1346         }
1347
1348         /* This depends on the fact that 5.4 is last value in the array */
1349         if (!intel_dp_source_supports_hbr2(dev))
1350                 size--;
1351
1352         return size;
1353 }
1354
1355 static void
1356 intel_dp_set_clock(struct intel_encoder *encoder,
1357                    struct intel_crtc_state *pipe_config)
1358 {
1359         struct drm_device *dev = encoder->base.dev;
1360         const struct dp_link_dpll *divisor = NULL;
1361         int i, count = 0;
1362
1363         if (IS_G4X(dev)) {
1364                 divisor = gen4_dpll;
1365                 count = ARRAY_SIZE(gen4_dpll);
1366         } else if (HAS_PCH_SPLIT(dev)) {
1367                 divisor = pch_dpll;
1368                 count = ARRAY_SIZE(pch_dpll);
1369         } else if (IS_CHERRYVIEW(dev)) {
1370                 divisor = chv_dpll;
1371                 count = ARRAY_SIZE(chv_dpll);
1372         } else if (IS_VALLEYVIEW(dev)) {
1373                 divisor = vlv_dpll;
1374                 count = ARRAY_SIZE(vlv_dpll);
1375         }
1376
1377         if (divisor && count) {
1378                 for (i = 0; i < count; i++) {
1379                         if (pipe_config->port_clock == divisor[i].clock) {
1380                                 pipe_config->dpll = divisor[i].dpll;
1381                                 pipe_config->clock_set = true;
1382                                 break;
1383                         }
1384                 }
1385         }
1386 }
1387
1388 static int intersect_rates(const int *source_rates, int source_len,
1389                            const int *sink_rates, int sink_len,
1390                            int *common_rates)
1391 {
1392         int i = 0, j = 0, k = 0;
1393
1394         while (i < source_len && j < sink_len) {
1395                 if (source_rates[i] == sink_rates[j]) {
1396                         if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1397                                 return k;
1398                         common_rates[k] = source_rates[i];
1399                         ++k;
1400                         ++i;
1401                         ++j;
1402                 } else if (source_rates[i] < sink_rates[j]) {
1403                         ++i;
1404                 } else {
1405                         ++j;
1406                 }
1407         }
1408         return k;
1409 }
1410
1411 static int intel_dp_common_rates(struct intel_dp *intel_dp,
1412                                  int *common_rates)
1413 {
1414         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1415         const int *source_rates, *sink_rates;
1416         int source_len, sink_len;
1417
1418         sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1419         source_len = intel_dp_source_rates(dev, &source_rates);
1420
1421         return intersect_rates(source_rates, source_len,
1422                                sink_rates, sink_len,
1423                                common_rates);
1424 }
1425
1426 static void snprintf_int_array(char *str, size_t len,
1427                                const int *array, int nelem)
1428 {
1429         int i;
1430
1431         str[0] = '\0';
1432
1433         for (i = 0; i < nelem; i++) {
1434                 int r = ksnprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1435                 if (r >= len)
1436                         return;
1437                 str += r;
1438                 len -= r;
1439         }
1440 }
1441
1442 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1443 {
1444         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1445         const int *source_rates, *sink_rates;
1446         int source_len, sink_len, common_len;
1447         int common_rates[DP_MAX_SUPPORTED_RATES];
1448         char str[128]; /* FIXME: too big for stack? */
1449
1450         if ((drm_debug & DRM_UT_KMS) == 0)
1451                 return;
1452
1453         source_len = intel_dp_source_rates(dev, &source_rates);
1454         snprintf_int_array(str, sizeof(str), source_rates, source_len);
1455         DRM_DEBUG_KMS("source rates: %s\n", str);
1456
1457         sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1458         snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1459         DRM_DEBUG_KMS("sink rates: %s\n", str);
1460
1461         common_len = intel_dp_common_rates(intel_dp, common_rates);
1462         snprintf_int_array(str, sizeof(str), common_rates, common_len);
1463         DRM_DEBUG_KMS("common rates: %s\n", str);
1464 }
1465
1466 static int rate_to_index(int find, const int *rates)
1467 {
1468         int i = 0;
1469
1470         for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1471                 if (find == rates[i])
1472                         break;
1473
1474         return i;
1475 }
1476
1477 int
1478 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1479 {
1480         int rates[DP_MAX_SUPPORTED_RATES] = {};
1481         int len;
1482
1483         len = intel_dp_common_rates(intel_dp, rates);
1484         if (WARN_ON(len <= 0))
1485                 return 162000;
1486
1487         return rates[rate_to_index(0, rates) - 1];
1488 }
1489
1490 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1491 {
1492         return rate_to_index(rate, intel_dp->sink_rates);
1493 }
1494
1495 static void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1496                                   uint8_t *link_bw, uint8_t *rate_select)
1497 {
1498         if (intel_dp->num_sink_rates) {
1499                 *link_bw = 0;
1500                 *rate_select =
1501                         intel_dp_rate_select(intel_dp, port_clock);
1502         } else {
1503                 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1504                 *rate_select = 0;
1505         }
1506 }
1507
1508 bool
1509 intel_dp_compute_config(struct intel_encoder *encoder,
1510                         struct intel_crtc_state *pipe_config)
1511 {
1512         struct drm_device *dev = encoder->base.dev;
1513         struct drm_i915_private *dev_priv = dev->dev_private;
1514         struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1515         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1516         enum port port = dp_to_dig_port(intel_dp)->port;
1517         struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1518         struct intel_connector *intel_connector = intel_dp->attached_connector;
1519         int lane_count, clock;
1520         int min_lane_count = 1;
1521         int max_lane_count = intel_dp_max_lane_count(intel_dp);
1522         /* Conveniently, the link BW constants become indices with a shift...*/
1523         int min_clock = 0;
1524         int max_clock;
1525         int bpp, mode_rate;
1526         int link_avail, link_clock;
1527         int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1528         int common_len;
1529         uint8_t link_bw, rate_select;
1530
1531         common_len = intel_dp_common_rates(intel_dp, common_rates);
1532
1533         /* No common link rates between source and sink */
1534         WARN_ON(common_len <= 0);
1535
1536         max_clock = common_len - 1;
1537
1538         if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1539                 pipe_config->has_pch_encoder = true;
1540
1541         pipe_config->has_dp_encoder = true;
1542         pipe_config->has_drrs = false;
1543         pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
1544
1545         if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1546                 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1547                                        adjusted_mode);
1548
1549                 if (INTEL_INFO(dev)->gen >= 9) {
1550                         int ret;
1551                         ret = skl_update_scaler_crtc(pipe_config);
1552                         if (ret)
1553                                 return ret;
1554                 }
1555
1556                 if (!HAS_PCH_SPLIT(dev))
1557                         intel_gmch_panel_fitting(intel_crtc, pipe_config,
1558                                                  intel_connector->panel.fitting_mode);
1559                 else
1560                         intel_pch_panel_fitting(intel_crtc, pipe_config,
1561                                                 intel_connector->panel.fitting_mode);
1562         }
1563
1564         if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1565                 return false;
1566
1567         DRM_DEBUG_KMS("DP link computation with max lane count %i "
1568                       "max bw %d pixel clock %iKHz\n",
1569                       max_lane_count, common_rates[max_clock],
1570                       adjusted_mode->crtc_clock);
1571
1572         /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1573          * bpc in between. */
1574         bpp = pipe_config->pipe_bpp;
1575         if (is_edp(intel_dp)) {
1576
1577                 /* Get bpp from vbt only for panels that dont have bpp in edid */
1578                 if (intel_connector->base.display_info.bpc == 0 &&
1579                         (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
1580                         DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1581                                       dev_priv->vbt.edp_bpp);
1582                         bpp = dev_priv->vbt.edp_bpp;
1583                 }
1584
1585                 /*
1586                  * Use the maximum clock and number of lanes the eDP panel
1587                  * advertizes being capable of. The panels are generally
1588                  * designed to support only a single clock and lane
1589                  * configuration, and typically these values correspond to the
1590                  * native resolution of the panel.
1591                  */
1592                 min_lane_count = max_lane_count;
1593                 min_clock = max_clock;
1594         }
1595
1596         for (; bpp >= 6*3; bpp -= 2*3) {
1597                 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1598                                                    bpp);
1599
1600                 for (clock = min_clock; clock <= max_clock; clock++) {
1601                         for (lane_count = min_lane_count;
1602                                 lane_count <= max_lane_count;
1603                                 lane_count <<= 1) {
1604
1605                                 link_clock = common_rates[clock];
1606                                 link_avail = intel_dp_max_data_rate(link_clock,
1607                                                                     lane_count);
1608
1609                                 if (mode_rate <= link_avail) {
1610                                         goto found;
1611                                 }
1612                         }
1613                 }
1614         }
1615
1616         return false;
1617
1618 found:
1619         if (intel_dp->color_range_auto) {
1620                 /*
1621                  * See:
1622                  * CEA-861-E - 5.1 Default Encoding Parameters
1623                  * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1624                  */
1625                 pipe_config->limited_color_range =
1626                         bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
1627         } else {
1628                 pipe_config->limited_color_range =
1629                         intel_dp->limited_color_range;
1630         }
1631
1632         pipe_config->lane_count = lane_count;
1633
1634         pipe_config->pipe_bpp = bpp;
1635         pipe_config->port_clock = common_rates[clock];
1636
1637         intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1638                               &link_bw, &rate_select);
1639
1640         DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1641                       link_bw, rate_select, pipe_config->lane_count,
1642                       pipe_config->port_clock, bpp);
1643         DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1644                       mode_rate, link_avail);
1645
1646         intel_link_compute_m_n(bpp, lane_count,
1647                                adjusted_mode->crtc_clock,
1648                                pipe_config->port_clock,
1649                                &pipe_config->dp_m_n);
1650
1651         if (intel_connector->panel.downclock_mode != NULL &&
1652                 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1653                         pipe_config->has_drrs = true;
1654                         intel_link_compute_m_n(bpp, lane_count,
1655                                 intel_connector->panel.downclock_mode->clock,
1656                                 pipe_config->port_clock,
1657                                 &pipe_config->dp_m2_n2);
1658         }
1659
1660         if (IS_SKYLAKE(dev) && is_edp(intel_dp))
1661                 skl_edp_set_pll_config(pipe_config);
1662         else if (IS_BROXTON(dev))
1663                 /* handled in ddi */;
1664         else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1665                 hsw_dp_set_ddi_pll_sel(pipe_config);
1666         else
1667                 intel_dp_set_clock(encoder, pipe_config);
1668
1669         return true;
1670 }
1671
1672 static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
1673 {
1674         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1675         struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1676         struct drm_device *dev = crtc->base.dev;
1677         struct drm_i915_private *dev_priv = dev->dev_private;
1678         u32 dpa_ctl;
1679
1680         DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1681                       crtc->config->port_clock);
1682         dpa_ctl = I915_READ(DP_A);
1683         dpa_ctl &= ~DP_PLL_FREQ_MASK;
1684
1685         if (crtc->config->port_clock == 162000) {
1686                 /* For a long time we've carried around a ILK-DevA w/a for the
1687                  * 160MHz clock. If we're really unlucky, it's still required.
1688                  */
1689                 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
1690                 dpa_ctl |= DP_PLL_FREQ_160MHZ;
1691                 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
1692         } else {
1693                 dpa_ctl |= DP_PLL_FREQ_270MHZ;
1694                 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
1695         }
1696
1697         I915_WRITE(DP_A, dpa_ctl);
1698
1699         POSTING_READ(DP_A);
1700         udelay(500);
1701 }
1702
1703 void intel_dp_set_link_params(struct intel_dp *intel_dp,
1704                               const struct intel_crtc_state *pipe_config)
1705 {
1706         intel_dp->link_rate = pipe_config->port_clock;
1707         intel_dp->lane_count = pipe_config->lane_count;
1708 }
1709
1710 static void intel_dp_prepare(struct intel_encoder *encoder)
1711 {
1712         struct drm_device *dev = encoder->base.dev;
1713         struct drm_i915_private *dev_priv = dev->dev_private;
1714         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1715         enum port port = dp_to_dig_port(intel_dp)->port;
1716         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1717         const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1718
1719         intel_dp_set_link_params(intel_dp, crtc->config);
1720
1721         /*
1722          * There are four kinds of DP registers:
1723          *
1724          *      IBX PCH
1725          *      SNB CPU
1726          *      IVB CPU
1727          *      CPT PCH
1728          *
1729          * IBX PCH and CPU are the same for almost everything,
1730          * except that the CPU DP PLL is configured in this
1731          * register
1732          *
1733          * CPT PCH is quite different, having many bits moved
1734          * to the TRANS_DP_CTL register instead. That
1735          * configuration happens (oddly) in ironlake_pch_enable
1736          */
1737
1738         /* Preserve the BIOS-computed detected bit. This is
1739          * supposed to be read-only.
1740          */
1741         intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1742
1743         /* Handle DP bits in common between all three register formats */
1744         intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1745         intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
1746
1747         if (crtc->config->has_audio)
1748                 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
1749
1750         /* Split out the IBX/CPU vs CPT settings */
1751
1752         if (IS_GEN7(dev) && port == PORT_A) {
1753                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1754                         intel_dp->DP |= DP_SYNC_HS_HIGH;
1755                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1756                         intel_dp->DP |= DP_SYNC_VS_HIGH;
1757                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1758
1759                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1760                         intel_dp->DP |= DP_ENHANCED_FRAMING;
1761
1762                 intel_dp->DP |= crtc->pipe << 29;
1763         } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
1764                 u32 trans_dp;
1765
1766                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1767
1768                 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1769                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1770                         trans_dp |= TRANS_DP_ENH_FRAMING;
1771                 else
1772                         trans_dp &= ~TRANS_DP_ENH_FRAMING;
1773                 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
1774         } else {
1775                 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
1776                     crtc->config->limited_color_range)
1777                         intel_dp->DP |= DP_COLOR_RANGE_16_235;
1778
1779                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1780                         intel_dp->DP |= DP_SYNC_HS_HIGH;
1781                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1782                         intel_dp->DP |= DP_SYNC_VS_HIGH;
1783                 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1784
1785                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1786                         intel_dp->DP |= DP_ENHANCED_FRAMING;
1787
1788                 if (IS_CHERRYVIEW(dev))
1789                         intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1790                 else if (crtc->pipe == PIPE_B)
1791                         intel_dp->DP |= DP_PIPEB_SELECT;
1792         }
1793 }
1794
1795 #define IDLE_ON_MASK            (PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
1796 #define IDLE_ON_VALUE           (PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
1797
1798 #define IDLE_OFF_MASK           (PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
1799 #define IDLE_OFF_VALUE          (0     | PP_SEQUENCE_NONE | 0                     | 0)
1800
1801 #define IDLE_CYCLE_MASK         (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1802 #define IDLE_CYCLE_VALUE        (0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
1803
1804 static void wait_panel_status(struct intel_dp *intel_dp,
1805                                        u32 mask,
1806                                        u32 value)
1807 {
1808         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1809         struct drm_i915_private *dev_priv = dev->dev_private;
1810         u32 pp_stat_reg, pp_ctrl_reg;
1811
1812         lockdep_assert_held(&dev_priv->pps_mutex);
1813
1814         pp_stat_reg = _pp_stat_reg(intel_dp);
1815         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1816
1817         DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1818                         mask, value,
1819                         I915_READ(pp_stat_reg),
1820                         I915_READ(pp_ctrl_reg));
1821
1822         if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1823                 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1824                                 I915_READ(pp_stat_reg),
1825                                 I915_READ(pp_ctrl_reg));
1826         }
1827
1828         DRM_DEBUG_KMS("Wait complete\n");
1829 }
1830
1831 static void wait_panel_on(struct intel_dp *intel_dp)
1832 {
1833         DRM_DEBUG_KMS("Wait for panel power on\n");
1834         wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1835 }
1836
1837 static void wait_panel_off(struct intel_dp *intel_dp)
1838 {
1839         DRM_DEBUG_KMS("Wait for panel power off time\n");
1840         wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1841 }
1842
1843 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1844 {
1845         DRM_DEBUG_KMS("Wait for panel power cycle\n");
1846
1847         /* When we disable the VDD override bit last we have to do the manual
1848          * wait. */
1849         wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1850                                        intel_dp->panel_power_cycle_delay);
1851
1852         wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1853 }
1854
1855 static void wait_backlight_on(struct intel_dp *intel_dp)
1856 {
1857         wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1858                                        intel_dp->backlight_on_delay);
1859 }
1860
1861 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1862 {
1863         wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1864                                        intel_dp->backlight_off_delay);
1865 }
1866
1867 /* Read the current pp_control value, unlocking the register if it
1868  * is locked
1869  */
1870
1871 static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1872 {
1873         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1874         struct drm_i915_private *dev_priv = dev->dev_private;
1875         u32 control;
1876
1877         lockdep_assert_held(&dev_priv->pps_mutex);
1878
1879         control = I915_READ(_pp_ctrl_reg(intel_dp));
1880         if (!IS_BROXTON(dev)) {
1881                 control &= ~PANEL_UNLOCK_MASK;
1882                 control |= PANEL_UNLOCK_REGS;
1883         }
1884         return control;
1885 }
1886
1887 /*
1888  * Must be paired with edp_panel_vdd_off().
1889  * Must hold pps_mutex around the whole on/off sequence.
1890  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1891  */
1892 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1893 {
1894         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1895         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1896         struct intel_encoder *intel_encoder = &intel_dig_port->base;
1897         struct drm_i915_private *dev_priv = dev->dev_private;
1898         enum intel_display_power_domain power_domain;
1899         u32 pp;
1900         u32 pp_stat_reg, pp_ctrl_reg;
1901         bool need_to_disable = !intel_dp->want_panel_vdd;
1902
1903         lockdep_assert_held(&dev_priv->pps_mutex);
1904
1905         if (!is_edp(intel_dp))
1906                 return false;
1907
1908         cancel_delayed_work(&intel_dp->panel_vdd_work);
1909         intel_dp->want_panel_vdd = true;
1910
1911         if (edp_have_panel_vdd(intel_dp))
1912                 return need_to_disable;
1913
1914         power_domain = intel_display_port_aux_power_domain(intel_encoder);
1915         intel_display_power_get(dev_priv, power_domain);
1916
1917         DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1918                       port_name(intel_dig_port->port));
1919
1920         if (!edp_have_panel_power(intel_dp))
1921                 wait_panel_power_cycle(intel_dp);
1922
1923         pp = ironlake_get_pp_control(intel_dp);
1924         pp |= EDP_FORCE_VDD;
1925
1926         pp_stat_reg = _pp_stat_reg(intel_dp);
1927         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1928
1929         I915_WRITE(pp_ctrl_reg, pp);
1930         POSTING_READ(pp_ctrl_reg);
1931         DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1932                         I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1933         /*
1934          * If the panel wasn't on, delay before accessing aux channel
1935          */
1936         if (!edp_have_panel_power(intel_dp)) {
1937                 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1938                               port_name(intel_dig_port->port));
1939                 msleep(intel_dp->panel_power_up_delay);
1940         }
1941
1942         return need_to_disable;
1943 }
1944
1945 /*
1946  * Must be paired with intel_edp_panel_vdd_off() or
1947  * intel_edp_panel_off().
1948  * Nested calls to these functions are not allowed since
1949  * we drop the lock. Caller must use some higher level
1950  * locking to prevent nested calls from other threads.
1951  */
1952 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1953 {
1954         bool vdd;
1955
1956         if (!is_edp(intel_dp))
1957                 return;
1958
1959         pps_lock(intel_dp);
1960         vdd = edp_panel_vdd_on(intel_dp);
1961         pps_unlock(intel_dp);
1962
1963         I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
1964              port_name(dp_to_dig_port(intel_dp)->port));
1965 }
1966
1967 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1968 {
1969         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1970         struct drm_i915_private *dev_priv = dev->dev_private;
1971         struct intel_digital_port *intel_dig_port =
1972                 dp_to_dig_port(intel_dp);
1973         struct intel_encoder *intel_encoder = &intel_dig_port->base;
1974         enum intel_display_power_domain power_domain;
1975         u32 pp;
1976         u32 pp_stat_reg, pp_ctrl_reg;
1977
1978         lockdep_assert_held(&dev_priv->pps_mutex);
1979
1980         WARN_ON(intel_dp->want_panel_vdd);
1981
1982         if (!edp_have_panel_vdd(intel_dp))
1983                 return;
1984
1985         DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1986                       port_name(intel_dig_port->port));
1987
1988         pp = ironlake_get_pp_control(intel_dp);
1989         pp &= ~EDP_FORCE_VDD;
1990
1991         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1992         pp_stat_reg = _pp_stat_reg(intel_dp);
1993
1994         I915_WRITE(pp_ctrl_reg, pp);
1995         POSTING_READ(pp_ctrl_reg);
1996
1997         /* Make sure sequencer is idle before allowing subsequent activity */
1998         DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1999         I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
2000
2001         if ((pp & POWER_TARGET_ON) == 0)
2002                 intel_dp->last_power_cycle = jiffies;
2003
2004         power_domain = intel_display_port_aux_power_domain(intel_encoder);
2005         intel_display_power_put(dev_priv, power_domain);
2006 }
2007
2008 static void edp_panel_vdd_work(struct work_struct *__work)
2009 {
2010         struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
2011                                                  struct intel_dp, panel_vdd_work);
2012
2013         pps_lock(intel_dp);
2014         if (!intel_dp->want_panel_vdd)
2015                 edp_panel_vdd_off_sync(intel_dp);
2016         pps_unlock(intel_dp);
2017 }
2018
2019 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
2020 {
2021         unsigned long delay;
2022
2023         /*
2024          * Queue the timer to fire a long time from now (relative to the power
2025          * down delay) to keep the panel power up across a sequence of
2026          * operations.
2027          */
2028         delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
2029         schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
2030 }
2031
2032 /*
2033  * Must be paired with edp_panel_vdd_on().
2034  * Must hold pps_mutex around the whole on/off sequence.
2035  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2036  */
2037 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
2038 {
2039         struct drm_i915_private *dev_priv =
2040                 intel_dp_to_dev(intel_dp)->dev_private;
2041
2042         lockdep_assert_held(&dev_priv->pps_mutex);
2043
2044         if (!is_edp(intel_dp))
2045                 return;
2046
2047         I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
2048              port_name(dp_to_dig_port(intel_dp)->port));
2049
2050         intel_dp->want_panel_vdd = false;
2051
2052         if (sync)
2053                 edp_panel_vdd_off_sync(intel_dp);
2054         else
2055                 edp_panel_vdd_schedule_off(intel_dp);
2056 }
2057
2058 static void edp_panel_on(struct intel_dp *intel_dp)
2059 {
2060         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2061         struct drm_i915_private *dev_priv = dev->dev_private;
2062         u32 pp;
2063         u32 pp_ctrl_reg;
2064
2065         lockdep_assert_held(&dev_priv->pps_mutex);
2066
2067         if (!is_edp(intel_dp))
2068                 return;
2069
2070         DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
2071                       port_name(dp_to_dig_port(intel_dp)->port));
2072
2073         if (WARN(edp_have_panel_power(intel_dp),
2074                  "eDP port %c panel power already on\n",
2075                  port_name(dp_to_dig_port(intel_dp)->port)))
2076                 return;
2077
2078         wait_panel_power_cycle(intel_dp);
2079
2080         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2081         pp = ironlake_get_pp_control(intel_dp);
2082         if (IS_GEN5(dev)) {
2083                 /* ILK workaround: disable reset around power sequence */
2084                 pp &= ~PANEL_POWER_RESET;
2085                 I915_WRITE(pp_ctrl_reg, pp);
2086                 POSTING_READ(pp_ctrl_reg);
2087         }
2088
2089         pp |= POWER_TARGET_ON;
2090         if (!IS_GEN5(dev))
2091                 pp |= PANEL_POWER_RESET;
2092
2093         I915_WRITE(pp_ctrl_reg, pp);
2094         POSTING_READ(pp_ctrl_reg);
2095
2096         wait_panel_on(intel_dp);
2097         intel_dp->last_power_on = jiffies;
2098
2099         if (IS_GEN5(dev)) {
2100                 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
2101                 I915_WRITE(pp_ctrl_reg, pp);
2102                 POSTING_READ(pp_ctrl_reg);
2103         }
2104 }
2105
2106 void intel_edp_panel_on(struct intel_dp *intel_dp)
2107 {
2108         if (!is_edp(intel_dp))
2109                 return;
2110
2111         pps_lock(intel_dp);
2112         edp_panel_on(intel_dp);
2113         pps_unlock(intel_dp);
2114 }
2115
2116
2117 static void edp_panel_off(struct intel_dp *intel_dp)
2118 {
2119         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2120         struct intel_encoder *intel_encoder = &intel_dig_port->base;
2121         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2122         struct drm_i915_private *dev_priv = dev->dev_private;
2123         enum intel_display_power_domain power_domain;
2124         u32 pp;
2125         u32 pp_ctrl_reg;
2126
2127         lockdep_assert_held(&dev_priv->pps_mutex);
2128
2129         if (!is_edp(intel_dp))
2130                 return;
2131
2132         DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2133                       port_name(dp_to_dig_port(intel_dp)->port));
2134
2135         WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2136              port_name(dp_to_dig_port(intel_dp)->port));
2137
2138         pp = ironlake_get_pp_control(intel_dp);
2139         /* We need to switch off panel power _and_ force vdd, for otherwise some
2140          * panels get very unhappy and cease to work. */
2141         pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2142                 EDP_BLC_ENABLE);
2143
2144         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2145
2146         intel_dp->want_panel_vdd = false;
2147
2148         I915_WRITE(pp_ctrl_reg, pp);
2149         POSTING_READ(pp_ctrl_reg);
2150
2151         intel_dp->last_power_cycle = jiffies;
2152         wait_panel_off(intel_dp);
2153
2154         /* We got a reference when we enabled the VDD. */
2155         power_domain = intel_display_port_aux_power_domain(intel_encoder);
2156         intel_display_power_put(dev_priv, power_domain);
2157 }
2158
2159 void intel_edp_panel_off(struct intel_dp *intel_dp)
2160 {
2161         if (!is_edp(intel_dp))
2162                 return;
2163
2164         pps_lock(intel_dp);
2165         edp_panel_off(intel_dp);
2166         pps_unlock(intel_dp);
2167 }
2168
2169 /* Enable backlight in the panel power control. */
2170 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2171 {
2172         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2173         struct drm_device *dev = intel_dig_port->base.base.dev;
2174         struct drm_i915_private *dev_priv = dev->dev_private;
2175         u32 pp;
2176         u32 pp_ctrl_reg;
2177
2178         /*
2179          * If we enable the backlight right away following a panel power
2180          * on, we may see slight flicker as the panel syncs with the eDP
2181          * link.  So delay a bit to make sure the image is solid before
2182          * allowing it to appear.
2183          */
2184         wait_backlight_on(intel_dp);
2185
2186         pps_lock(intel_dp);
2187
2188         pp = ironlake_get_pp_control(intel_dp);
2189         pp |= EDP_BLC_ENABLE;
2190
2191         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2192
2193         I915_WRITE(pp_ctrl_reg, pp);
2194         POSTING_READ(pp_ctrl_reg);
2195
2196         pps_unlock(intel_dp);
2197 }
2198
2199 /* Enable backlight PWM and backlight PP control. */
2200 void intel_edp_backlight_on(struct intel_dp *intel_dp)
2201 {
2202         if (!is_edp(intel_dp))
2203                 return;
2204
2205         DRM_DEBUG_KMS("\n");
2206
2207         intel_panel_enable_backlight(intel_dp->attached_connector);
2208         _intel_edp_backlight_on(intel_dp);
2209 }
2210
2211 /* Disable backlight in the panel power control. */
2212 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2213 {
2214         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2215         struct drm_i915_private *dev_priv = dev->dev_private;
2216         u32 pp;
2217         u32 pp_ctrl_reg;
2218
2219         if (!is_edp(intel_dp))
2220                 return;
2221
2222         pps_lock(intel_dp);
2223
2224         pp = ironlake_get_pp_control(intel_dp);
2225         pp &= ~EDP_BLC_ENABLE;
2226
2227         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2228
2229         I915_WRITE(pp_ctrl_reg, pp);
2230         POSTING_READ(pp_ctrl_reg);
2231
2232         pps_unlock(intel_dp);
2233
2234         intel_dp->last_backlight_off = jiffies;
2235         edp_wait_backlight_off(intel_dp);
2236 }
2237
2238 /* Disable backlight PP control and backlight PWM. */
2239 void intel_edp_backlight_off(struct intel_dp *intel_dp)
2240 {
2241         if (!is_edp(intel_dp))
2242                 return;
2243
2244         DRM_DEBUG_KMS("\n");
2245
2246         _intel_edp_backlight_off(intel_dp);
2247         intel_panel_disable_backlight(intel_dp->attached_connector);
2248 }
2249
2250 /*
2251  * Hook for controlling the panel power control backlight through the bl_power
2252  * sysfs attribute. Take care to handle multiple calls.
2253  */
2254 static void intel_edp_backlight_power(struct intel_connector *connector,
2255                                       bool enable)
2256 {
2257         struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2258         bool is_enabled;
2259
2260         pps_lock(intel_dp);
2261         is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2262         pps_unlock(intel_dp);
2263
2264         if (is_enabled == enable)
2265                 return;
2266
2267         DRM_DEBUG_KMS("panel power control backlight %s\n",
2268                       enable ? "enable" : "disable");
2269
2270         if (enable)
2271                 _intel_edp_backlight_on(intel_dp);
2272         else
2273                 _intel_edp_backlight_off(intel_dp);
2274 }
2275
2276 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2277 {
2278         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2279         struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2280         struct drm_device *dev = crtc->dev;
2281         struct drm_i915_private *dev_priv = dev->dev_private;
2282         u32 dpa_ctl;
2283
2284         assert_pipe_disabled(dev_priv,
2285                              to_intel_crtc(crtc)->pipe);
2286
2287         DRM_DEBUG_KMS("\n");
2288         dpa_ctl = I915_READ(DP_A);
2289         WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2290         WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2291
2292         /* We don't adjust intel_dp->DP while tearing down the link, to
2293          * facilitate link retraining (e.g. after hotplug). Hence clear all
2294          * enable bits here to ensure that we don't enable too much. */
2295         intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2296         intel_dp->DP |= DP_PLL_ENABLE;
2297         I915_WRITE(DP_A, intel_dp->DP);
2298         POSTING_READ(DP_A);
2299         udelay(200);
2300 }
2301
2302 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2303 {
2304         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2305         struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2306         struct drm_device *dev = crtc->dev;
2307         struct drm_i915_private *dev_priv = dev->dev_private;
2308         u32 dpa_ctl;
2309
2310         assert_pipe_disabled(dev_priv,
2311                              to_intel_crtc(crtc)->pipe);
2312
2313         dpa_ctl = I915_READ(DP_A);
2314         WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2315              "dp pll off, should be on\n");
2316         WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2317
2318         /* We can't rely on the value tracked for the DP register in
2319          * intel_dp->DP because link_down must not change that (otherwise link
2320          * re-training will fail. */
2321         dpa_ctl &= ~DP_PLL_ENABLE;
2322         I915_WRITE(DP_A, dpa_ctl);
2323         POSTING_READ(DP_A);
2324         udelay(200);
2325 }
2326
2327 /* If the sink supports it, try to set the power state appropriately */
2328 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2329 {
2330         int ret, i;
2331
2332         /* Should have a valid DPCD by this point */
2333         if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2334                 return;
2335
2336         if (mode != DRM_MODE_DPMS_ON) {
2337                 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2338                                          DP_SET_POWER_D3);
2339         } else {
2340                 /*
2341                  * When turning on, we need to retry for 1ms to give the sink
2342                  * time to wake up.
2343                  */
2344                 for (i = 0; i < 3; i++) {
2345                         ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2346                                                  DP_SET_POWER_D0);
2347                         if (ret == 1)
2348                                 break;
2349                         msleep(1);
2350                 }
2351         }
2352
2353         if (ret != 1)
2354                 DRM_DEBUG_KMS("failed to %s sink power state\n",
2355                               mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2356 }
2357
2358 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2359                                   enum i915_pipe *pipe)
2360 {
2361         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2362         enum port port = dp_to_dig_port(intel_dp)->port;
2363         struct drm_device *dev = encoder->base.dev;
2364         struct drm_i915_private *dev_priv = dev->dev_private;
2365         enum intel_display_power_domain power_domain;
2366         u32 tmp;
2367
2368         power_domain = intel_display_port_power_domain(encoder);
2369         if (!intel_display_power_is_enabled(dev_priv, power_domain))
2370                 return false;
2371
2372         tmp = I915_READ(intel_dp->output_reg);
2373
2374         if (!(tmp & DP_PORT_EN))
2375                 return false;
2376
2377         if (IS_GEN7(dev) && port == PORT_A) {
2378                 *pipe = PORT_TO_PIPE_CPT(tmp);
2379         } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
2380                 enum i915_pipe p;
2381
2382                 for_each_pipe(dev_priv, p) {
2383                         u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2384                         if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2385                                 *pipe = p;
2386                                 return true;
2387                         }
2388                 }
2389
2390                 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2391                               intel_dp->output_reg);
2392         } else if (IS_CHERRYVIEW(dev)) {
2393                 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2394         } else {
2395                 *pipe = PORT_TO_PIPE(tmp);
2396         }
2397
2398         return true;
2399 }
2400
2401 static void intel_dp_get_config(struct intel_encoder *encoder,
2402                                 struct intel_crtc_state *pipe_config)
2403 {
2404         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2405         u32 tmp, flags = 0;
2406         struct drm_device *dev = encoder->base.dev;
2407         struct drm_i915_private *dev_priv = dev->dev_private;
2408         enum port port = dp_to_dig_port(intel_dp)->port;
2409         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2410         int dotclock;
2411
2412         tmp = I915_READ(intel_dp->output_reg);
2413
2414         pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2415
2416         if (HAS_PCH_CPT(dev) && port != PORT_A) {
2417                 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2418
2419                 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2420                         flags |= DRM_MODE_FLAG_PHSYNC;
2421                 else
2422                         flags |= DRM_MODE_FLAG_NHSYNC;
2423
2424                 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2425                         flags |= DRM_MODE_FLAG_PVSYNC;
2426                 else
2427                         flags |= DRM_MODE_FLAG_NVSYNC;
2428         } else {
2429                 if (tmp & DP_SYNC_HS_HIGH)
2430                         flags |= DRM_MODE_FLAG_PHSYNC;
2431                 else
2432                         flags |= DRM_MODE_FLAG_NHSYNC;
2433
2434                 if (tmp & DP_SYNC_VS_HIGH)
2435                         flags |= DRM_MODE_FLAG_PVSYNC;
2436                 else
2437                         flags |= DRM_MODE_FLAG_NVSYNC;
2438         }
2439
2440         pipe_config->base.adjusted_mode.flags |= flags;
2441
2442         if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2443             tmp & DP_COLOR_RANGE_16_235)
2444                 pipe_config->limited_color_range = true;
2445
2446         pipe_config->has_dp_encoder = true;
2447
2448         pipe_config->lane_count =
2449                 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2450
2451         intel_dp_get_m_n(crtc, pipe_config);
2452
2453         if (port == PORT_A) {
2454                 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2455                         pipe_config->port_clock = 162000;
2456                 else
2457                         pipe_config->port_clock = 270000;
2458         }
2459
2460         dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2461                                             &pipe_config->dp_m_n);
2462
2463         if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2464                 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2465
2466         pipe_config->base.adjusted_mode.crtc_clock = dotclock;
2467
2468         if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2469             pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2470                 /*
2471                  * This is a big fat ugly hack.
2472                  *
2473                  * Some machines in UEFI boot mode provide us a VBT that has 18
2474                  * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2475                  * unknown we fail to light up. Yet the same BIOS boots up with
2476                  * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2477                  * max, not what it tells us to use.
2478                  *
2479                  * Note: This will still be broken if the eDP panel is not lit
2480                  * up by the BIOS, and thus we can't get the mode at module
2481                  * load.
2482                  */
2483                 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2484                               pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2485                 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2486         }
2487 }
2488
2489 static void intel_disable_dp(struct intel_encoder *encoder)
2490 {
2491         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2492         struct drm_device *dev = encoder->base.dev;
2493         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2494
2495         if (crtc->config->has_audio)
2496                 intel_audio_codec_disable(encoder);
2497
2498         if (HAS_PSR(dev) && !HAS_DDI(dev))
2499                 intel_psr_disable(intel_dp);
2500
2501         /* Make sure the panel is off before trying to change the mode. But also
2502          * ensure that we have vdd while we switch off the panel. */
2503         intel_edp_panel_vdd_on(intel_dp);
2504         intel_edp_backlight_off(intel_dp);
2505         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2506         intel_edp_panel_off(intel_dp);
2507
2508         /* disable the port before the pipe on g4x */
2509         if (INTEL_INFO(dev)->gen < 5)
2510                 intel_dp_link_down(intel_dp);
2511 }
2512
2513 static void ilk_post_disable_dp(struct intel_encoder *encoder)
2514 {
2515         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2516         enum port port = dp_to_dig_port(intel_dp)->port;
2517
2518         intel_dp_link_down(intel_dp);
2519         if (port == PORT_A)
2520                 ironlake_edp_pll_off(intel_dp);
2521 }
2522
2523 static void vlv_post_disable_dp(struct intel_encoder *encoder)
2524 {
2525         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2526
2527         intel_dp_link_down(intel_dp);
2528 }
2529
2530 static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
2531                                      bool reset)
2532 {
2533         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2534         enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
2535         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2536         enum i915_pipe pipe = crtc->pipe;
2537         uint32_t val;
2538
2539         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2540         if (reset)
2541                 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2542         else
2543                 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2544         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2545
2546         if (crtc->config->lane_count > 2) {
2547                 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2548                 if (reset)
2549                         val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2550                 else
2551                         val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2552                 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2553         }
2554
2555         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2556         val |= CHV_PCS_REQ_SOFTRESET_EN;
2557         if (reset)
2558                 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2559         else
2560                 val |= DPIO_PCS_CLK_SOFT_RESET;
2561         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2562
2563         if (crtc->config->lane_count > 2) {
2564                 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2565                 val |= CHV_PCS_REQ_SOFTRESET_EN;
2566                 if (reset)
2567                         val &= ~DPIO_PCS_CLK_SOFT_RESET;
2568                 else
2569                         val |= DPIO_PCS_CLK_SOFT_RESET;
2570                 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2571         }
2572 }
2573
2574 static void chv_post_disable_dp(struct intel_encoder *encoder)
2575 {
2576         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2577         struct drm_device *dev = encoder->base.dev;
2578         struct drm_i915_private *dev_priv = dev->dev_private;
2579
2580         intel_dp_link_down(intel_dp);
2581
2582         mutex_lock(&dev_priv->sb_lock);
2583
2584         /* Assert data lane reset */
2585         chv_data_lane_soft_reset(encoder, true);
2586
2587         mutex_unlock(&dev_priv->sb_lock);
2588 }
2589
2590 static void
2591 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2592                          uint32_t *DP,
2593                          uint8_t dp_train_pat)
2594 {
2595         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2596         struct drm_device *dev = intel_dig_port->base.base.dev;
2597         struct drm_i915_private *dev_priv = dev->dev_private;
2598         enum port port = intel_dig_port->port;
2599
2600         if (HAS_DDI(dev)) {
2601                 uint32_t temp = I915_READ(DP_TP_CTL(port));
2602
2603                 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2604                         temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2605                 else
2606                         temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2607
2608                 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2609                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2610                 case DP_TRAINING_PATTERN_DISABLE:
2611                         temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2612
2613                         break;
2614                 case DP_TRAINING_PATTERN_1:
2615                         temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2616                         break;
2617                 case DP_TRAINING_PATTERN_2:
2618                         temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2619                         break;
2620                 case DP_TRAINING_PATTERN_3:
2621                         temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2622                         break;
2623                 }
2624                 I915_WRITE(DP_TP_CTL(port), temp);
2625
2626         } else if ((IS_GEN7(dev) && port == PORT_A) ||
2627                    (HAS_PCH_CPT(dev) && port != PORT_A)) {
2628                 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2629
2630                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2631                 case DP_TRAINING_PATTERN_DISABLE:
2632                         *DP |= DP_LINK_TRAIN_OFF_CPT;
2633                         break;
2634                 case DP_TRAINING_PATTERN_1:
2635                         *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2636                         break;
2637                 case DP_TRAINING_PATTERN_2:
2638                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2639                         break;
2640                 case DP_TRAINING_PATTERN_3:
2641                         DRM_ERROR("DP training pattern 3 not supported\n");
2642                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2643                         break;
2644                 }
2645
2646         } else {
2647                 if (IS_CHERRYVIEW(dev))
2648                         *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2649                 else
2650                         *DP &= ~DP_LINK_TRAIN_MASK;
2651
2652                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2653                 case DP_TRAINING_PATTERN_DISABLE:
2654                         *DP |= DP_LINK_TRAIN_OFF;
2655                         break;
2656                 case DP_TRAINING_PATTERN_1:
2657                         *DP |= DP_LINK_TRAIN_PAT_1;
2658                         break;
2659                 case DP_TRAINING_PATTERN_2:
2660                         *DP |= DP_LINK_TRAIN_PAT_2;
2661                         break;
2662                 case DP_TRAINING_PATTERN_3:
2663                         if (IS_CHERRYVIEW(dev)) {
2664                                 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2665                         } else {
2666                                 DRM_ERROR("DP training pattern 3 not supported\n");
2667                                 *DP |= DP_LINK_TRAIN_PAT_2;
2668                         }
2669                         break;
2670                 }
2671         }
2672 }
2673
2674 static void intel_dp_enable_port(struct intel_dp *intel_dp)
2675 {
2676         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2677         struct drm_i915_private *dev_priv = dev->dev_private;
2678
2679         /* enable with pattern 1 (as per spec) */
2680         _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2681                                  DP_TRAINING_PATTERN_1);
2682
2683         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2684         POSTING_READ(intel_dp->output_reg);
2685
2686         /*
2687          * Magic for VLV/CHV. We _must_ first set up the register
2688          * without actually enabling the port, and then do another
2689          * write to enable the port. Otherwise link training will
2690          * fail when the power sequencer is freshly used for this port.
2691          */
2692         intel_dp->DP |= DP_PORT_EN;
2693
2694         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2695         POSTING_READ(intel_dp->output_reg);
2696 }
2697
2698 static void intel_enable_dp(struct intel_encoder *encoder)
2699 {
2700         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2701         struct drm_device *dev = encoder->base.dev;
2702         struct drm_i915_private *dev_priv = dev->dev_private;
2703         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2704         uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2705
2706         if (WARN_ON(dp_reg & DP_PORT_EN))
2707                 return;
2708
2709         pps_lock(intel_dp);
2710
2711         if (IS_VALLEYVIEW(dev))
2712                 vlv_init_panel_power_sequencer(intel_dp);
2713
2714         intel_dp_enable_port(intel_dp);
2715
2716         edp_panel_vdd_on(intel_dp);
2717         edp_panel_on(intel_dp);
2718         edp_panel_vdd_off(intel_dp, true);
2719
2720         pps_unlock(intel_dp);
2721
2722         if (IS_VALLEYVIEW(dev)) {
2723                 unsigned int lane_mask = 0x0;
2724
2725                 if (IS_CHERRYVIEW(dev))
2726                         lane_mask = intel_dp_unused_lane_mask(crtc->config->lane_count);
2727
2728                 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2729                                     lane_mask);
2730         }
2731
2732         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2733         intel_dp_start_link_train(intel_dp);
2734         intel_dp_stop_link_train(intel_dp);
2735
2736         if (crtc->config->has_audio) {
2737                 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2738                                  pipe_name(crtc->pipe));
2739                 intel_audio_codec_enable(encoder);
2740         }
2741 }
2742
2743 static void g4x_enable_dp(struct intel_encoder *encoder)
2744 {
2745         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2746
2747         intel_enable_dp(encoder);
2748         intel_edp_backlight_on(intel_dp);
2749 }
2750
2751 static void vlv_enable_dp(struct intel_encoder *encoder)
2752 {
2753         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2754
2755         intel_edp_backlight_on(intel_dp);
2756         intel_psr_enable(intel_dp);
2757 }
2758
2759 static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2760 {
2761         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2762         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2763
2764         intel_dp_prepare(encoder);
2765
2766         /* Only ilk+ has port A */
2767         if (dport->port == PORT_A) {
2768                 ironlake_set_pll_cpu_edp(intel_dp);
2769                 ironlake_edp_pll_on(intel_dp);
2770         }
2771 }
2772
2773 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2774 {
2775         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2776         struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2777         enum i915_pipe pipe = intel_dp->pps_pipe;
2778         int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2779
2780         edp_panel_vdd_off_sync(intel_dp);
2781
2782         /*
2783          * VLV seems to get confused when multiple power seqeuencers
2784          * have the same port selected (even if only one has power/vdd
2785          * enabled). The failure manifests as vlv_wait_port_ready() failing
2786          * CHV on the other hand doesn't seem to mind having the same port
2787          * selected in multiple power seqeuencers, but let's clear the
2788          * port select always when logically disconnecting a power sequencer
2789          * from a port.
2790          */
2791         DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2792                       pipe_name(pipe), port_name(intel_dig_port->port));
2793         I915_WRITE(pp_on_reg, 0);
2794         POSTING_READ(pp_on_reg);
2795
2796         intel_dp->pps_pipe = INVALID_PIPE;
2797 }
2798
2799 static void vlv_steal_power_sequencer(struct drm_device *dev,
2800                                       enum i915_pipe pipe)
2801 {
2802         struct drm_i915_private *dev_priv = dev->dev_private;
2803         struct intel_encoder *encoder;
2804
2805         lockdep_assert_held(&dev_priv->pps_mutex);
2806
2807         if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2808                 return;
2809
2810         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2811                             base.head) {
2812                 struct intel_dp *intel_dp;
2813                 enum port port;
2814
2815                 if (encoder->type != INTEL_OUTPUT_EDP)
2816                         continue;
2817
2818                 intel_dp = enc_to_intel_dp(&encoder->base);
2819                 port = dp_to_dig_port(intel_dp)->port;
2820
2821                 if (intel_dp->pps_pipe != pipe)
2822                         continue;
2823
2824                 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2825                               pipe_name(pipe), port_name(port));
2826
2827                 WARN(encoder->base.crtc,
2828                      "stealing pipe %c power sequencer from active eDP port %c\n",
2829                      pipe_name(pipe), port_name(port));
2830
2831                 /* make sure vdd is off before we steal it */
2832                 vlv_detach_power_sequencer(intel_dp);
2833         }
2834 }
2835
2836 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2837 {
2838         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2839         struct intel_encoder *encoder = &intel_dig_port->base;
2840         struct drm_device *dev = encoder->base.dev;
2841         struct drm_i915_private *dev_priv = dev->dev_private;
2842         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2843
2844         lockdep_assert_held(&dev_priv->pps_mutex);
2845
2846         if (!is_edp(intel_dp))
2847                 return;
2848
2849         if (intel_dp->pps_pipe == crtc->pipe)
2850                 return;
2851
2852         /*
2853          * If another power sequencer was being used on this
2854          * port previously make sure to turn off vdd there while
2855          * we still have control of it.
2856          */
2857         if (intel_dp->pps_pipe != INVALID_PIPE)
2858                 vlv_detach_power_sequencer(intel_dp);
2859
2860         /*
2861          * We may be stealing the power
2862          * sequencer from another port.
2863          */
2864         vlv_steal_power_sequencer(dev, crtc->pipe);
2865
2866         /* now it's all ours */
2867         intel_dp->pps_pipe = crtc->pipe;
2868
2869         DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2870                       pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2871
2872         /* init power sequencer on this pipe and port */
2873         intel_dp_init_panel_power_sequencer(dev, intel_dp);
2874         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2875 }
2876
2877 static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2878 {
2879         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2880         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2881         struct drm_device *dev = encoder->base.dev;
2882         struct drm_i915_private *dev_priv = dev->dev_private;
2883         struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2884         enum dpio_channel port = vlv_dport_to_channel(dport);
2885         int pipe = intel_crtc->pipe;
2886         u32 val;
2887
2888         mutex_lock(&dev_priv->sb_lock);
2889
2890         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2891         val = 0;
2892         if (pipe)
2893                 val |= (1<<21);
2894         else
2895                 val &= ~(1<<21);
2896         val |= 0x001000c4;
2897         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2898         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2899         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2900
2901         mutex_unlock(&dev_priv->sb_lock);
2902
2903         intel_enable_dp(encoder);
2904 }
2905
2906 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2907 {
2908         struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2909         struct drm_device *dev = encoder->base.dev;
2910         struct drm_i915_private *dev_priv = dev->dev_private;
2911         struct intel_crtc *intel_crtc =
2912                 to_intel_crtc(encoder->base.crtc);
2913         enum dpio_channel port = vlv_dport_to_channel(dport);
2914         int pipe = intel_crtc->pipe;
2915
2916         intel_dp_prepare(encoder);
2917
2918         /* Program Tx lane resets to default */
2919         mutex_lock(&dev_priv->sb_lock);
2920         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2921                          DPIO_PCS_TX_LANE2_RESET |
2922                          DPIO_PCS_TX_LANE1_RESET);
2923         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2924                          DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2925                          DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2926                          (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2927                                  DPIO_PCS_CLK_SOFT_RESET);
2928
2929         /* Fix up inter-pair skew failure */
2930         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2931         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2932         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2933         mutex_unlock(&dev_priv->sb_lock);
2934 }
2935
2936 static void chv_pre_enable_dp(struct intel_encoder *encoder)
2937 {
2938         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2939         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2940         struct drm_device *dev = encoder->base.dev;
2941         struct drm_i915_private *dev_priv = dev->dev_private;
2942         struct intel_crtc *intel_crtc =
2943                 to_intel_crtc(encoder->base.crtc);
2944         enum dpio_channel ch = vlv_dport_to_channel(dport);
2945         int pipe = intel_crtc->pipe;
2946         int data, i, stagger;
2947         u32 val;
2948
2949         mutex_lock(&dev_priv->sb_lock);
2950
2951         /* allow hardware to manage TX FIFO reset source */
2952         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2953         val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2954         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2955
2956         if (intel_crtc->config->lane_count > 2) {
2957                 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2958                 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2959                 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2960         }
2961
2962         /* Program Tx lane latency optimal setting*/
2963         for (i = 0; i < intel_crtc->config->lane_count; i++) {
2964                 /* Set the upar bit */
2965                 if (intel_crtc->config->lane_count == 1)
2966                         data = 0x0;
2967                 else
2968                         data = (i == 1) ? 0x0 : 0x1;
2969                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2970                                 data << DPIO_UPAR_SHIFT);
2971         }
2972
2973         /* Data lane stagger programming */
2974         if (intel_crtc->config->port_clock > 270000)
2975                 stagger = 0x18;
2976         else if (intel_crtc->config->port_clock > 135000)
2977                 stagger = 0xd;
2978         else if (intel_crtc->config->port_clock > 67500)
2979                 stagger = 0x7;
2980         else if (intel_crtc->config->port_clock > 33750)
2981                 stagger = 0x4;
2982         else
2983                 stagger = 0x2;
2984
2985         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2986         val |= DPIO_TX2_STAGGER_MASK(0x1f);
2987         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2988
2989         if (intel_crtc->config->lane_count > 2) {
2990                 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2991                 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2992                 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2993         }
2994
2995         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
2996                        DPIO_LANESTAGGER_STRAP(stagger) |
2997                        DPIO_LANESTAGGER_STRAP_OVRD |
2998                        DPIO_TX1_STAGGER_MASK(0x1f) |
2999                        DPIO_TX1_STAGGER_MULT(6) |
3000                        DPIO_TX2_STAGGER_MULT(0));
3001
3002         if (intel_crtc->config->lane_count > 2) {
3003                 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
3004                                DPIO_LANESTAGGER_STRAP(stagger) |
3005                                DPIO_LANESTAGGER_STRAP_OVRD |
3006                                DPIO_TX1_STAGGER_MASK(0x1f) |
3007                                DPIO_TX1_STAGGER_MULT(7) |
3008                                DPIO_TX2_STAGGER_MULT(5));
3009         }
3010
3011         /* Deassert data lane reset */
3012         chv_data_lane_soft_reset(encoder, false);
3013
3014         mutex_unlock(&dev_priv->sb_lock);
3015
3016         intel_enable_dp(encoder);
3017
3018         /* Second common lane will stay alive on its own now */
3019         if (dport->release_cl2_override) {
3020                 chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
3021                 dport->release_cl2_override = false;
3022         }
3023 }
3024
3025 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
3026 {
3027         struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
3028         struct drm_device *dev = encoder->base.dev;
3029         struct drm_i915_private *dev_priv = dev->dev_private;
3030         struct intel_crtc *intel_crtc =
3031                 to_intel_crtc(encoder->base.crtc);
3032         enum dpio_channel ch = vlv_dport_to_channel(dport);
3033         enum i915_pipe pipe = intel_crtc->pipe;
3034         unsigned int lane_mask =
3035                 intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
3036         u32 val;
3037
3038         intel_dp_prepare(encoder);
3039
3040         /*
3041          * Must trick the second common lane into life.
3042          * Otherwise we can't even access the PLL.
3043          */
3044         if (ch == DPIO_CH0 && pipe == PIPE_B)
3045                 dport->release_cl2_override =
3046                         !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
3047
3048         chv_phy_powergate_lanes(encoder, true, lane_mask);
3049
3050         mutex_lock(&dev_priv->sb_lock);
3051
3052         /* Assert data lane reset */
3053         chv_data_lane_soft_reset(encoder, true);
3054
3055         /* program left/right clock distribution */
3056         if (pipe != PIPE_B) {
3057                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3058                 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3059                 if (ch == DPIO_CH0)
3060                         val |= CHV_BUFLEFTENA1_FORCE;
3061                 if (ch == DPIO_CH1)
3062                         val |= CHV_BUFRIGHTENA1_FORCE;
3063                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3064         } else {
3065                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3066                 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3067                 if (ch == DPIO_CH0)
3068                         val |= CHV_BUFLEFTENA2_FORCE;
3069                 if (ch == DPIO_CH1)
3070                         val |= CHV_BUFRIGHTENA2_FORCE;
3071                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3072         }
3073
3074         /* program clock channel usage */
3075         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
3076         val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3077         if (pipe != PIPE_B)
3078                 val &= ~CHV_PCS_USEDCLKCHANNEL;
3079         else
3080                 val |= CHV_PCS_USEDCLKCHANNEL;
3081         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
3082
3083         if (intel_crtc->config->lane_count > 2) {
3084                 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
3085                 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3086                 if (pipe != PIPE_B)
3087                         val &= ~CHV_PCS_USEDCLKCHANNEL;
3088                 else
3089                         val |= CHV_PCS_USEDCLKCHANNEL;
3090                 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
3091         }
3092
3093         /*
3094          * This a a bit weird since generally CL
3095          * matches the pipe, but here we need to
3096          * pick the CL based on the port.
3097          */
3098         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
3099         if (pipe != PIPE_B)
3100                 val &= ~CHV_CMN_USEDCLKCHANNEL;
3101         else
3102                 val |= CHV_CMN_USEDCLKCHANNEL;
3103         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
3104
3105         mutex_unlock(&dev_priv->sb_lock);
3106 }
3107
3108 static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
3109 {
3110         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3111         enum i915_pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
3112         u32 val;
3113
3114         mutex_lock(&dev_priv->sb_lock);
3115
3116         /* disable left/right clock distribution */
3117         if (pipe != PIPE_B) {
3118                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3119                 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3120                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3121         } else {
3122                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3123                 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3124                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3125         }
3126
3127         mutex_unlock(&dev_priv->sb_lock);
3128
3129         /*
3130          * Leave the power down bit cleared for at least one
3131          * lane so that chv_powergate_phy_ch() will power
3132          * on something when the channel is otherwise unused.
3133          * When the port is off and the override is removed
3134          * the lanes power down anyway, so otherwise it doesn't
3135          * really matter what the state of power down bits is
3136          * after this.
3137          */
3138         chv_phy_powergate_lanes(encoder, false, 0x0);
3139 }
3140
3141 /*
3142  * Native read with retry for link status and receiver capability reads for
3143  * cases where the sink may still be asleep.
3144  *
3145  * Sinks are *supposed* to come up within 1ms from an off state, but we're also
3146  * supposed to retry 3 times per the spec.
3147  */
3148 static ssize_t
3149 intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
3150                         void *buffer, size_t size)
3151 {
3152         ssize_t ret;
3153         int i;
3154
3155         /*
3156          * Sometime we just get the same incorrect byte repeated
3157          * over the entire buffer. Doing just one throw away read
3158          * initially seems to "solve" it.
3159          */
3160         drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
3161
3162         for (i = 0; i < 3; i++) {
3163                 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
3164                 if (ret == size)
3165                         return ret;
3166                 msleep(1);
3167         }
3168
3169         return ret;
3170 }
3171
3172 /*
3173  * Fetch AUX CH registers 0x202 - 0x207 which contain
3174  * link status information
3175  */
3176 static bool
3177 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
3178 {
3179         return intel_dp_dpcd_read_wake(&intel_dp->aux,
3180                                        DP_LANE0_1_STATUS,
3181                                        link_status,
3182                                        DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
3183 }
3184
3185 /* These are source-specific values. */
3186 static uint8_t
3187 intel_dp_voltage_max(struct intel_dp *intel_dp)
3188 {
3189         struct drm_device *dev = intel_dp_to_dev(intel_dp);
3190         struct drm_i915_private *dev_priv = dev->dev_private;
3191         enum port port = dp_to_dig_port(intel_dp)->port;
3192
3193         if (IS_BROXTON(dev))
3194                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3195         else if (INTEL_INFO(dev)->gen >= 9) {
3196                 if (dev_priv->edp_low_vswing && port == PORT_A)
3197                         return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3198                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3199         } else if (IS_VALLEYVIEW(dev))
3200                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3201         else if (IS_GEN7(dev) && port == PORT_A)
3202                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3203         else if (HAS_PCH_CPT(dev) && port != PORT_A)
3204                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3205         else
3206                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3207 }
3208
3209 static uint8_t
3210 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3211 {
3212         struct drm_device *dev = intel_dp_to_dev(intel_dp);
3213         enum port port = dp_to_dig_port(intel_dp)->port;
3214
3215         if (INTEL_INFO(dev)->gen >= 9) {
3216                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3217                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3218                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
3219                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3220                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3221                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3222                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3223                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3224                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3225                 default:
3226                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3227                 }
3228         } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
3229                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3230                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3231                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
3232                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3233                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3234                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3235                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3236                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3237                 default:
3238                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3239                 }
3240         } else if (IS_VALLEYVIEW(dev)) {
3241                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3242                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3243                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
3244                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3245                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3246                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3247                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3248                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3249                 default:
3250                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3251                 }
3252         } else if (IS_GEN7(dev) && port == PORT_A) {
3253                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3254                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3255                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3256                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3257                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3258                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3259                 default:
3260                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3261                 }
3262         } else {
3263                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3264                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3265                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3266                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3267                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3268                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3269                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3270                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3271                 default:
3272                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3273                 }
3274         }
3275 }
3276
3277 static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
3278 {
3279         struct drm_device *dev = intel_dp_to_dev(intel_dp);
3280         struct drm_i915_private *dev_priv = dev->dev_private;
3281         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3282         struct intel_crtc *intel_crtc =
3283                 to_intel_crtc(dport->base.base.crtc);
3284         unsigned long demph_reg_value, preemph_reg_value,
3285                 uniqtranscale_reg_value;
3286         uint8_t train_set = intel_dp->train_set[0];
3287         enum dpio_channel port = vlv_dport_to_channel(dport);
3288         int pipe = intel_crtc->pipe;
3289
3290         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3291         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3292                 preemph_reg_value = 0x0004000;
3293                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3294                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3295                         demph_reg_value = 0x2B405555;
3296                         uniqtranscale_reg_value = 0x552AB83A;
3297                         break;
3298                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3299                         demph_reg_value = 0x2B404040;
3300                         uniqtranscale_reg_value = 0x5548B83A;
3301                         break;
3302                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3303                         demph_reg_value = 0x2B245555;
3304                         uniqtranscale_reg_value = 0x5560B83A;
3305                         break;
3306                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3307                         demph_reg_value = 0x2B405555;
3308                         uniqtranscale_reg_value = 0x5598DA3A;
3309                         break;
3310                 default:
3311                         return 0;
3312                 }
3313                 break;
3314         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3315                 preemph_reg_value = 0x0002000;
3316                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3317                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3318                         demph_reg_value = 0x2B404040;
3319                         uniqtranscale_reg_value = 0x5552B83A;
3320                         break;
3321                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3322                         demph_reg_value = 0x2B404848;
3323                         uniqtranscale_reg_value = 0x5580B83A;
3324                         break;
3325                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3326                         demph_reg_value = 0x2B404040;
3327                         uniqtranscale_reg_value = 0x55ADDA3A;
3328                         break;
3329                 default:
3330                         return 0;
3331                 }
3332                 break;
3333         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3334                 preemph_reg_value = 0x0000000;
3335                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3336                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3337                         demph_reg_value = 0x2B305555;
3338                         uniqtranscale_reg_value = 0x5570B83A;
3339                         break;
3340                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3341                         demph_reg_value = 0x2B2B4040;
3342                         uniqtranscale_reg_value = 0x55ADDA3A;
3343                         break;
3344                 default:
3345                         return 0;
3346                 }
3347                 break;
3348         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3349                 preemph_reg_value = 0x0006000;
3350                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3351                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3352                         demph_reg_value = 0x1B405555;
3353                         uniqtranscale_reg_value = 0x55ADDA3A;
3354                         break;
3355                 default:
3356                         return 0;
3357                 }
3358                 break;
3359         default:
3360                 return 0;
3361         }
3362
3363         mutex_lock(&dev_priv->sb_lock);
3364         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3365         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3366         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
3367                          uniqtranscale_reg_value);
3368         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3369         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3370         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3371         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
3372         mutex_unlock(&dev_priv->sb_lock);
3373
3374         return 0;
3375 }
3376
3377 static bool chv_need_uniq_trans_scale(uint8_t train_set)
3378 {
3379         return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
3380                 (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3381 }
3382
3383 static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3384 {
3385         struct drm_device *dev = intel_dp_to_dev(intel_dp);
3386         struct drm_i915_private *dev_priv = dev->dev_private;
3387         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3388         struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3389         u32 deemph_reg_value, margin_reg_value, val;
3390         uint8_t train_set = intel_dp->train_set[0];
3391         enum dpio_channel ch = vlv_dport_to_channel(dport);
3392         enum i915_pipe pipe = intel_crtc->pipe;
3393         int i;
3394
3395         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3396         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3397                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3398                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3399                         deemph_reg_value = 128;
3400                         margin_reg_value = 52;
3401                         break;
3402                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3403                         deemph_reg_value = 128;
3404                         margin_reg_value = 77;
3405                         break;
3406                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3407                         deemph_reg_value = 128;
3408                         margin_reg_value = 102;
3409                         break;
3410                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3411                         deemph_reg_value = 128;
3412                         margin_reg_value = 154;
3413                         /* FIXME extra to set for 1200 */
3414                         break;
3415                 default:
3416                         return 0;
3417                 }
3418                 break;
3419         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3420                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3421                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3422                         deemph_reg_value = 85;
3423                         margin_reg_value = 78;
3424                         break;
3425                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3426                         deemph_reg_value = 85;
3427                         margin_reg_value = 116;
3428                         break;
3429                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3430                         deemph_reg_value = 85;
3431                         margin_reg_value = 154;
3432                         break;
3433                 default:
3434                         return 0;
3435                 }
3436                 break;
3437         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3438                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3439                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3440                         deemph_reg_value = 64;
3441                         margin_reg_value = 104;
3442                         break;
3443                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3444                         deemph_reg_value = 64;
3445                         margin_reg_value = 154;
3446                         break;
3447                 default:
3448                         return 0;
3449                 }
3450                 break;
3451         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3452                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3453                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3454                         deemph_reg_value = 43;
3455                         margin_reg_value = 154;
3456                         break;
3457                 default:
3458                         return 0;
3459                 }
3460                 break;
3461         default:
3462                 return 0;
3463         }
3464
3465         mutex_lock(&dev_priv->sb_lock);
3466
3467         /* Clear calc init */
3468         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3469         val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3470         val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3471         val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3472         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3473
3474         if (intel_crtc->config->lane_count > 2) {
3475                 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3476                 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3477                 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3478                 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3479                 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3480         }
3481
3482         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3483         val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3484         val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3485         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3486
3487         if (intel_crtc->config->lane_count > 2) {
3488                 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3489                 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3490                 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3491                 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3492         }
3493
3494         /* Program swing deemph */
3495         for (i = 0; i < intel_crtc->config->lane_count; i++) {
3496                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3497                 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3498                 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3499                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3500         }
3501
3502         /* Program swing margin */
3503         for (i = 0; i < intel_crtc->config->lane_count; i++) {
3504                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3505
3506                 val &= ~DPIO_SWING_MARGIN000_MASK;
3507                 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3508
3509                 /*
3510                  * Supposedly this value shouldn't matter when unique transition
3511                  * scale is disabled, but in fact it does matter. Let's just
3512                  * always program the same value and hope it's OK.
3513                  */
3514                 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3515                 val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
3516
3517                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3518         }
3519
3520         /*
3521          * The document said it needs to set bit 27 for ch0 and bit 26
3522          * for ch1. Might be a typo in the doc.
3523          * For now, for this unique transition scale selection, set bit
3524          * 27 for ch0 and ch1.
3525          */
3526         for (i = 0; i < intel_crtc->config->lane_count; i++) {
3527                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3528                 if (chv_need_uniq_trans_scale(train_set))
3529                         val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3530                 else
3531                         val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3532                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3533         }
3534
3535         /* Start swing calculation */
3536         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3537         val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3538         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3539
3540         if (intel_crtc->config->lane_count > 2) {
3541                 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3542                 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3543                 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3544         }
3545
3546         mutex_unlock(&dev_priv->sb_lock);
3547
3548         return 0;
3549 }
3550
3551 static void
3552 intel_get_adjust_train(struct intel_dp *intel_dp,
3553                        const uint8_t link_status[DP_LINK_STATUS_SIZE])
3554 {
3555         uint8_t v = 0;
3556         uint8_t p = 0;
3557         int lane;
3558         uint8_t voltage_max;
3559         uint8_t preemph_max;
3560
3561         for (lane = 0; lane < intel_dp->lane_count; lane++) {
3562                 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3563                 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
3564
3565                 if (this_v > v)
3566                         v = this_v;
3567                 if (this_p > p)
3568                         p = this_p;
3569         }
3570
3571         voltage_max = intel_dp_voltage_max(intel_dp);
3572         if (v >= voltage_max)
3573                 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
3574
3575         preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3576         if (p >= preemph_max)
3577                 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
3578
3579         for (lane = 0; lane < 4; lane++)
3580                 intel_dp->train_set[lane] = v | p;
3581 }
3582
3583 static uint32_t
3584 gen4_signal_levels(uint8_t train_set)
3585 {
3586         uint32_t        signal_levels = 0;
3587
3588         switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3589         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3590         default:
3591                 signal_levels |= DP_VOLTAGE_0_4;
3592                 break;
3593         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3594                 signal_levels |= DP_VOLTAGE_0_6;
3595                 break;
3596         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3597                 signal_levels |= DP_VOLTAGE_0_8;
3598                 break;
3599         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3600                 signal_levels |= DP_VOLTAGE_1_2;
3601                 break;
3602         }
3603         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3604         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3605         default:
3606                 signal_levels |= DP_PRE_EMPHASIS_0;
3607                 break;
3608         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3609                 signal_levels |= DP_PRE_EMPHASIS_3_5;
3610                 break;
3611         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3612                 signal_levels |= DP_PRE_EMPHASIS_6;
3613                 break;
3614         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3615                 signal_levels |= DP_PRE_EMPHASIS_9_5;
3616                 break;
3617         }
3618         return signal_levels;
3619 }
3620
3621 /* Gen6's DP voltage swing and pre-emphasis control */
3622 static uint32_t
3623 gen6_edp_signal_levels(uint8_t train_set)
3624 {
3625         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3626                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3627         switch (signal_levels) {
3628         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3629         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3630                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3631         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3632                 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3633         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3634         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3635                 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3636         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3637         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3638                 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3639         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3640         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3641                 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3642         default:
3643                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3644                               "0x%x\n", signal_levels);
3645                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3646         }
3647 }
3648
3649 /* Gen7's DP voltage swing and pre-emphasis control */
3650 static uint32_t
3651 gen7_edp_signal_levels(uint8_t train_set)
3652 {
3653         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3654                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3655         switch (signal_levels) {
3656         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3657                 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3658         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3659                 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3660         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3661                 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3662
3663         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3664                 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3665         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3666                 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3667
3668         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3669                 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3670         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3671                 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3672
3673         default:
3674                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3675                               "0x%x\n", signal_levels);
3676                 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3677         }
3678 }
3679
3680 /* Properly updates "DP" with the correct signal levels. */
3681 static void
3682 intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3683 {
3684         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3685         enum port port = intel_dig_port->port;
3686         struct drm_device *dev = intel_dig_port->base.base.dev;
3687         uint32_t signal_levels, mask = 0;
3688         uint8_t train_set = intel_dp->train_set[0];
3689
3690         if (HAS_DDI(dev)) {
3691                 signal_levels = ddi_signal_levels(intel_dp);
3692
3693                 if (IS_BROXTON(dev))
3694                         signal_levels = 0;
3695                 else
3696                         mask = DDI_BUF_EMP_MASK;
3697         } else if (IS_CHERRYVIEW(dev)) {
3698                 signal_levels = chv_signal_levels(intel_dp);
3699         } else if (IS_VALLEYVIEW(dev)) {
3700                 signal_levels = vlv_signal_levels(intel_dp);
3701         } else if (IS_GEN7(dev) && port == PORT_A) {
3702                 signal_levels = gen7_edp_signal_levels(train_set);
3703                 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3704         } else if (IS_GEN6(dev) && port == PORT_A) {
3705                 signal_levels = gen6_edp_signal_levels(train_set);
3706                 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3707         } else {
3708                 signal_levels = gen4_signal_levels(train_set);
3709                 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3710         }
3711
3712         if (mask)
3713                 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3714
3715         DRM_DEBUG_KMS("Using vswing level %d\n",
3716                 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3717         DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3718                 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3719                         DP_TRAIN_PRE_EMPHASIS_SHIFT);
3720
3721         *DP = (*DP & ~mask) | signal_levels;
3722 }
3723
3724 static bool
3725 intel_dp_set_link_train(struct intel_dp *intel_dp,
3726                         uint32_t *DP,
3727                         uint8_t dp_train_pat)
3728 {
3729         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3730         struct drm_i915_private *dev_priv =
3731                 to_i915(intel_dig_port->base.base.dev);
3732         uint8_t buf[sizeof(intel_dp->train_set) + 1];
3733         int ret, len;
3734
3735         _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3736
3737         I915_WRITE(intel_dp->output_reg, *DP);
3738         POSTING_READ(intel_dp->output_reg);
3739
3740         buf[0] = dp_train_pat;
3741         if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
3742             DP_TRAINING_PATTERN_DISABLE) {
3743                 /* don't write DP_TRAINING_LANEx_SET on disable */
3744                 len = 1;
3745         } else {
3746                 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3747                 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3748                 len = intel_dp->lane_count + 1;
3749         }
3750
3751         ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3752                                 buf, len);
3753
3754         return ret == len;
3755 }
3756
3757 static bool
3758 intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3759                         uint8_t dp_train_pat)
3760 {
3761         if (!intel_dp->train_set_valid)
3762                 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
3763         intel_dp_set_signal_levels(intel_dp, DP);
3764         return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3765 }
3766
3767 static bool
3768 intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3769                            const uint8_t link_status[DP_LINK_STATUS_SIZE])
3770 {
3771         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3772         struct drm_i915_private *dev_priv =
3773                 to_i915(intel_dig_port->base.base.dev);
3774         int ret;
3775
3776         intel_get_adjust_train(intel_dp, link_status);
3777         intel_dp_set_signal_levels(intel_dp, DP);
3778
3779         I915_WRITE(intel_dp->output_reg, *DP);
3780         POSTING_READ(intel_dp->output_reg);
3781
3782         ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3783                                 intel_dp->train_set, intel_dp->lane_count);
3784
3785         return ret == intel_dp->lane_count;
3786 }
3787
3788 static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3789 {
3790         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3791         struct drm_device *dev = intel_dig_port->base.base.dev;
3792         struct drm_i915_private *dev_priv = dev->dev_private;
3793         enum port port = intel_dig_port->port;
3794         uint32_t val;
3795
3796         if (!HAS_DDI(dev))
3797                 return;
3798
3799         val = I915_READ(DP_TP_CTL(port));
3800         val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3801         val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3802         I915_WRITE(DP_TP_CTL(port), val);
3803
3804         /*
3805          * On PORT_A we can have only eDP in SST mode. There the only reason
3806          * we need to set idle transmission mode is to work around a HW issue
3807          * where we enable the pipe while not in idle link-training mode.
3808          * In this case there is requirement to wait for a minimum number of
3809          * idle patterns to be sent.
3810          */
3811         if (port == PORT_A)
3812                 return;
3813
3814         if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3815                      1))
3816                 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3817 }
3818
3819 /* Enable corresponding port and start training pattern 1 */
3820 static void
3821 intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
3822 {
3823         struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
3824         struct drm_device *dev = encoder->dev;
3825         int i;
3826         uint8_t voltage;
3827         int voltage_tries, loop_tries;
3828         uint32_t DP = intel_dp->DP;
3829         uint8_t link_config[2];
3830         uint8_t link_bw, rate_select;
3831
3832         if (HAS_DDI(dev))
3833                 intel_ddi_prepare_link_retrain(encoder);
3834
3835         intel_dp_compute_rate(intel_dp, intel_dp->link_rate,
3836                               &link_bw, &rate_select);
3837
3838         /* Write the link configuration data */
3839         link_config[0] = link_bw;
3840         link_config[1] = intel_dp->lane_count;
3841         if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3842                 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
3843         drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
3844         if (intel_dp->num_sink_rates)
3845                 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3846                                   &rate_select, 1);
3847
3848         link_config[0] = 0;
3849         link_config[1] = DP_SET_ANSI_8B10B;
3850         drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
3851
3852         DP |= DP_PORT_EN;
3853
3854         /* clock recovery */
3855         if (!intel_dp_reset_link_train(intel_dp, &DP,
3856                                        DP_TRAINING_PATTERN_1 |
3857                                        DP_LINK_SCRAMBLING_DISABLE)) {
3858                 DRM_ERROR("failed to enable link training\n");
3859                 return;
3860         }
3861
3862         voltage = 0xff;
3863         voltage_tries = 0;
3864         loop_tries = 0;
3865         for (;;) {
3866                 uint8_t link_status[DP_LINK_STATUS_SIZE];
3867
3868                 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
3869                 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3870                         DRM_ERROR("failed to get link status\n");
3871                         break;
3872                 }
3873
3874                 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3875                         DRM_DEBUG_KMS("clock recovery OK\n");
3876                         break;
3877                 }
3878
3879                 /*
3880                  * if we used previously trained voltage and pre-emphasis values
3881                  * and we don't get clock recovery, reset link training values
3882                  */
3883                 if (intel_dp->train_set_valid) {
3884                         DRM_DEBUG_KMS("clock recovery not ok, reset");
3885                         /* clear the flag as we are not reusing train set */
3886                         intel_dp->train_set_valid = false;
3887                         if (!intel_dp_reset_link_train(intel_dp, &DP,
3888                                                        DP_TRAINING_PATTERN_1 |
3889                                                        DP_LINK_SCRAMBLING_DISABLE)) {
3890                                 DRM_ERROR("failed to enable link training\n");
3891                                 return;
3892                         }
3893                         continue;
3894                 }
3895
3896                 /* Check to see if we've tried the max voltage */
3897                 for (i = 0; i < intel_dp->lane_count; i++)
3898                         if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
3899                                 break;
3900                 if (i == intel_dp->lane_count) {
3901                         ++loop_tries;
3902                         if (loop_tries == 5) {
3903                                 DRM_ERROR("too many full retries, give up\n");
3904                                 break;
3905                         }
3906                         intel_dp_reset_link_train(intel_dp, &DP,
3907                                                   DP_TRAINING_PATTERN_1 |
3908                                                   DP_LINK_SCRAMBLING_DISABLE);
3909                         voltage_tries = 0;
3910                         continue;
3911                 }
3912
3913                 /* Check to see if we've tried the same voltage 5 times */
3914                 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
3915                         ++voltage_tries;
3916                         if (voltage_tries == 5) {
3917                                 DRM_ERROR("too many voltage retries, give up\n");
3918                                 break;
3919                         }
3920                 } else
3921                         voltage_tries = 0;
3922                 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
3923
3924                 /* Update training set as requested by target */
3925                 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3926                         DRM_ERROR("failed to update link training\n");
3927                         break;
3928                 }
3929         }
3930
3931         intel_dp->DP = DP;
3932 }
3933
3934 static void
3935 intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
3936 {
3937         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3938         struct drm_device *dev = dig_port->base.base.dev;
3939         bool channel_eq = false;
3940         int tries, cr_tries;
3941         uint32_t DP = intel_dp->DP;
3942         uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3943
3944         /*
3945          * Training Pattern 3 for HBR2 or 1.2 devices that support it.
3946          *
3947          * Intel platforms that support HBR2 also support TPS3. TPS3 support is
3948          * also mandatory for downstream devices that support HBR2.
3949          *
3950          * Due to WaDisableHBR2 SKL < B0 is the only exception where TPS3 is
3951          * supported but still not enabled.
3952          */
3953         if (intel_dp_source_supports_hbr2(dev) &&
3954             drm_dp_tps3_supported(intel_dp->dpcd))
3955                 training_pattern = DP_TRAINING_PATTERN_3;
3956         else if (intel_dp->link_rate == 540000)
3957                 DRM_ERROR("5.4 Gbps link rate without HBR2/TPS3 support\n");
3958
3959         /* channel equalization */
3960         if (!intel_dp_set_link_train(intel_dp, &DP,
3961                                      training_pattern |
3962                                      DP_LINK_SCRAMBLING_DISABLE)) {
3963                 DRM_ERROR("failed to start channel equalization\n");
3964                 return;
3965         }
3966
3967         tries = 0;
3968         cr_tries = 0;
3969         channel_eq = false;
3970         for (;;) {
3971                 uint8_t link_status[DP_LINK_STATUS_SIZE];
3972
3973                 if (cr_tries > 5) {
3974                         DRM_ERROR("failed to train DP, aborting\n");
3975                         break;
3976                 }
3977
3978                 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
3979                 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3980                         DRM_ERROR("failed to get link status\n");
3981                         break;
3982                 }
3983
3984                 /* Make sure clock is still ok */
3985                 if (!drm_dp_clock_recovery_ok(link_status,
3986                                               intel_dp->lane_count)) {
3987                         intel_dp->train_set_valid = false;
3988                         intel_dp_link_training_clock_recovery(intel_dp);
3989                         intel_dp_set_link_train(intel_dp, &DP,
3990                                                 training_pattern |
3991                                                 DP_LINK_SCRAMBLING_DISABLE);
3992                         cr_tries++;
3993                         continue;
3994                 }
3995
3996                 if (drm_dp_channel_eq_ok(link_status,
3997                                          intel_dp->lane_count)) {
3998                         channel_eq = true;
3999                         break;
4000                 }
4001
4002                 /* Try 5 times, then try clock recovery if that fails */
4003                 if (tries > 5) {
4004                         intel_dp->train_set_valid = false;
4005                         intel_dp_link_training_clock_recovery(intel_dp);
4006                         intel_dp_set_link_train(intel_dp, &DP,
4007                                                 training_pattern |
4008                                                 DP_LINK_SCRAMBLING_DISABLE);
4009                         tries = 0;
4010                         cr_tries++;
4011                         continue;
4012                 }
4013
4014                 /* Update training set as requested by target */
4015                 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
4016                         DRM_ERROR("failed to update link training\n");
4017                         break;
4018                 }
4019                 ++tries;
4020         }
4021
4022         intel_dp_set_idle_link_train(intel_dp);
4023
4024         intel_dp->DP = DP;
4025
4026         if (channel_eq) {
4027                 intel_dp->train_set_valid = true;
4028                 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
4029         }
4030 }
4031
4032 void intel_dp_stop_link_train(struct intel_dp *intel_dp)
4033 {
4034         intel_dp_set_link_train(intel_dp, &intel_dp->DP,
4035                                 DP_TRAINING_PATTERN_DISABLE);
4036 }
4037
4038 void
4039 intel_dp_start_link_train(struct intel_dp *intel_dp)
4040 {
4041         intel_dp_link_training_clock_recovery(intel_dp);
4042         intel_dp_link_training_channel_equalization(intel_dp);
4043 }
4044
4045 static void
4046 intel_dp_link_down(struct intel_dp *intel_dp)
4047 {
4048         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4049         struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
4050         enum port port = intel_dig_port->port;
4051         struct drm_device *dev = intel_dig_port->base.base.dev;
4052         struct drm_i915_private *dev_priv = dev->dev_private;
4053         uint32_t DP = intel_dp->DP;
4054
4055         if (WARN_ON(HAS_DDI(dev)))
4056                 return;
4057
4058         if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
4059                 return;
4060
4061         DRM_DEBUG_KMS("\n");
4062
4063         if ((IS_GEN7(dev) && port == PORT_A) ||
4064             (HAS_PCH_CPT(dev) && port != PORT_A)) {
4065                 DP &= ~DP_LINK_TRAIN_MASK_CPT;
4066                 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
4067         } else {
4068                 if (IS_CHERRYVIEW(dev))
4069                         DP &= ~DP_LINK_TRAIN_MASK_CHV;
4070                 else
4071                         DP &= ~DP_LINK_TRAIN_MASK;
4072                 DP |= DP_LINK_TRAIN_PAT_IDLE;
4073         }
4074         I915_WRITE(intel_dp->output_reg, DP);
4075         POSTING_READ(intel_dp->output_reg);
4076
4077         DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
4078         I915_WRITE(intel_dp->output_reg, DP);
4079         POSTING_READ(intel_dp->output_reg);
4080
4081         /*
4082          * HW workaround for IBX, we need to move the port
4083          * to transcoder A after disabling it to allow the
4084          * matching HDMI port to be enabled on transcoder A.
4085          */
4086         if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
4087                 /* always enable with pattern 1 (as per spec) */
4088                 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
4089                 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
4090                 I915_WRITE(intel_dp->output_reg, DP);
4091                 POSTING_READ(intel_dp->output_reg);
4092
4093                 DP &= ~DP_PORT_EN;
4094                 I915_WRITE(intel_dp->output_reg, DP);
4095                 POSTING_READ(intel_dp->output_reg);
4096         }
4097
4098         msleep(intel_dp->panel_power_down_delay);
4099 }
4100
4101 static bool
4102 intel_dp_get_dpcd(struct intel_dp *intel_dp)
4103 {
4104         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4105         struct drm_device *dev = dig_port->base.base.dev;
4106         struct drm_i915_private *dev_priv = dev->dev_private;
4107         uint8_t rev;
4108
4109         if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
4110                                     sizeof(intel_dp->dpcd)) < 0)
4111                 return false; /* aux transfer failed */
4112
4113 #ifdef __DragonFly__
4114         char dpcd_hex_dump[DP_RECEIVER_CAP_SIZE * 3];
4115         DRM_DEBUG_KMS("DPCD: %s\n", hexncpy(intel_dp->dpcd, sizeof(intel_dp->dpcd),
4116                       dpcd_hex_dump, sizeof(dpcd_hex_dump), " "));
4117 #else
4118         DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
4119 #endif
4120
4121         if (intel_dp->dpcd[DP_DPCD_REV] == 0)
4122                 return false; /* DPCD not present */
4123
4124         /* Check if the panel supports PSR */
4125         memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
4126         if (is_edp(intel_dp)) {
4127                 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
4128                                         intel_dp->psr_dpcd,
4129                                         sizeof(intel_dp->psr_dpcd));
4130                 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
4131                         dev_priv->psr.sink_support = true;
4132                         DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
4133                 }
4134
4135                 if (INTEL_INFO(dev)->gen >= 9 &&
4136                         (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
4137                         uint8_t frame_sync_cap;
4138
4139                         dev_priv->psr.sink_support = true;
4140                         intel_dp_dpcd_read_wake(&intel_dp->aux,
4141                                         DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
4142                                         &frame_sync_cap, 1);
4143                         dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
4144                         /* PSR2 needs frame sync as well */
4145                         dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
4146                         DRM_DEBUG_KMS("PSR2 %s on sink",
4147                                 dev_priv->psr.psr2_support ? "supported" : "not supported");
4148                 }
4149         }
4150
4151         DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
4152                       yesno(intel_dp_source_supports_hbr2(dev)),
4153                       yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
4154
4155         /* Intermediate frequency support */
4156         if (is_edp(intel_dp) &&
4157             (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
4158             (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
4159             (rev >= 0x03)) { /* eDp v1.4 or higher */
4160                 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
4161                 int i;
4162
4163                 intel_dp_dpcd_read_wake(&intel_dp->aux,
4164                                 DP_SUPPORTED_LINK_RATES,
4165                                 sink_rates,
4166                                 sizeof(sink_rates));
4167
4168                 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
4169                         int val = le16_to_cpu(sink_rates[i]);
4170
4171                         if (val == 0)
4172                                 break;
4173
4174                         /* Value read is in kHz while drm clock is saved in deca-kHz */
4175                         intel_dp->sink_rates[i] = (val * 200) / 10;
4176                 }
4177                 intel_dp->num_sink_rates = i;
4178         }
4179
4180         intel_dp_print_rates(intel_dp);
4181
4182         if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4183               DP_DWN_STRM_PORT_PRESENT))
4184                 return true; /* native DP sink */
4185
4186         if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
4187                 return true; /* no per-port downstream info */
4188
4189         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
4190                                     intel_dp->downstream_ports,
4191                                     DP_MAX_DOWNSTREAM_PORTS) < 0)
4192                 return false; /* downstream port status fetch failed */
4193
4194         return true;
4195 }
4196
4197 static void
4198 intel_dp_probe_oui(struct intel_dp *intel_dp)
4199 {
4200         u8 buf[3];
4201
4202         if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
4203                 return;
4204
4205         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
4206                 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
4207                               buf[0], buf[1], buf[2]);
4208
4209         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
4210                 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
4211                               buf[0], buf[1], buf[2]);
4212 }
4213
4214 static bool
4215 intel_dp_probe_mst(struct intel_dp *intel_dp)
4216 {
4217         u8 buf[1];
4218
4219         if (!intel_dp->can_mst)
4220                 return false;
4221
4222         if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
4223                 return false;
4224
4225         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
4226                 if (buf[0] & DP_MST_CAP) {
4227                         DRM_DEBUG_KMS("Sink is MST capable\n");
4228                         intel_dp->is_mst = true;
4229                 } else {
4230                         DRM_DEBUG_KMS("Sink is not MST capable\n");
4231                         intel_dp->is_mst = false;
4232                 }
4233         }
4234
4235 #if 0
4236         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4237         return intel_dp->is_mst;
4238 #else
4239         return false;
4240 #endif
4241 }
4242
4243 static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
4244 {
4245         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4246         struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4247         u8 buf;
4248         int ret = 0;
4249
4250         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
4251                 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4252                 ret = -EIO;
4253                 goto out;
4254         }
4255
4256         if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4257                                buf & ~DP_TEST_SINK_START) < 0) {
4258                 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4259                 ret = -EIO;
4260                 goto out;
4261         }
4262
4263         intel_dp->sink_crc.started = false;
4264  out:
4265         hsw_enable_ips(intel_crtc);
4266         return ret;
4267 }
4268
4269 static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
4270 {
4271         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4272         struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4273         u8 buf;
4274         int ret;
4275
4276         if (intel_dp->sink_crc.started) {
4277                 ret = intel_dp_sink_crc_stop(intel_dp);
4278                 if (ret)
4279                         return ret;
4280         }
4281
4282         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
4283                 return -EIO;
4284
4285         if (!(buf & DP_TEST_CRC_SUPPORTED))
4286                 return -ENOTTY;
4287
4288         intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
4289
4290         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4291                 return -EIO;
4292
4293         hsw_disable_ips(intel_crtc);
4294
4295         if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4296                                buf | DP_TEST_SINK_START) < 0) {
4297                 hsw_enable_ips(intel_crtc);
4298                 return -EIO;
4299         }
4300
4301         intel_dp->sink_crc.started = true;
4302         return 0;
4303 }
4304
4305 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4306 {
4307         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4308         struct drm_device *dev = dig_port->base.base.dev;
4309         struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4310         u8 buf;
4311         int count, ret;
4312         int attempts = 6;
4313         bool old_equal_new;
4314
4315         ret = intel_dp_sink_crc_start(intel_dp);
4316         if (ret)
4317                 return ret;
4318
4319         do {
4320                 intel_wait_for_vblank(dev, intel_crtc->pipe);
4321
4322                 if (drm_dp_dpcd_readb(&intel_dp->aux,
4323                                       DP_TEST_SINK_MISC, &buf) < 0) {
4324                         ret = -EIO;
4325                         goto stop;
4326                 }
4327                 count = buf & DP_TEST_COUNT_MASK;
4328
4329                 /*
4330                  * Count might be reset during the loop. In this case
4331                  * last known count needs to be reset as well.
4332                  */
4333                 if (count == 0)
4334                         intel_dp->sink_crc.last_count = 0;
4335
4336                 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4337                         ret = -EIO;
4338                         goto stop;
4339                 }
4340
4341                 old_equal_new = (count == intel_dp->sink_crc.last_count &&
4342                                  !memcmp(intel_dp->sink_crc.last_crc, crc,
4343                                          6 * sizeof(u8)));
4344
4345         } while (--attempts && (count == 0 || old_equal_new));
4346
4347         intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
4348         memcpy(intel_dp->sink_crc.last_crc, crc, 6 * sizeof(u8));
4349
4350         if (attempts == 0) {
4351                 if (old_equal_new) {
4352                         DRM_DEBUG_KMS("Unreliable Sink CRC counter: Current returned CRC is identical to the previous one\n");
4353                 } else {
4354                         DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
4355                         ret = -ETIMEDOUT;
4356                         goto stop;
4357                 }
4358         }
4359
4360 stop:
4361         intel_dp_sink_crc_stop(intel_dp);
4362         return ret;
4363 }
4364
4365 static bool
4366 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4367 {
4368         return intel_dp_dpcd_read_wake(&intel_dp->aux,
4369                                        DP_DEVICE_SERVICE_IRQ_VECTOR,
4370                                        sink_irq_vector, 1) == 1;
4371 }
4372
4373 #if 0
4374 static bool
4375 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4376 {
4377         int ret;
4378
4379         ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4380                                              DP_SINK_COUNT_ESI,
4381                                              sink_irq_vector, 14);
4382         if (ret != 14)
4383                 return false;
4384
4385         return true;
4386 }
4387 #endif
4388
4389 static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4390 {
4391         uint8_t test_result = DP_TEST_ACK;
4392         return test_result;
4393 }
4394
4395 static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4396 {
4397         uint8_t test_result = DP_TEST_NAK;
4398         return test_result;
4399 }
4400
4401 static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
4402 {
4403         uint8_t test_result = DP_TEST_NAK;
4404         struct intel_connector *intel_connector = intel_dp->attached_connector;
4405         struct drm_connector *connector = &intel_connector->base;
4406
4407         if (intel_connector->detect_edid == NULL ||
4408             connector->edid_corrupt ||
4409             intel_dp->aux.i2c_defer_count > 6) {
4410                 /* Check EDID read for NACKs, DEFERs and corruption
4411                  * (DP CTS 1.2 Core r1.1)
4412                  *    4.2.2.4 : Failed EDID read, I2C_NAK
4413                  *    4.2.2.5 : Failed EDID read, I2C_DEFER
4414                  *    4.2.2.6 : EDID corruption detected
4415                  * Use failsafe mode for all cases
4416                  */
4417                 if (intel_dp->aux.i2c_nack_count > 0 ||
4418                         intel_dp->aux.i2c_defer_count > 0)
4419                         DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4420                                       intel_dp->aux.i2c_nack_count,
4421                                       intel_dp->aux.i2c_defer_count);
4422                 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4423         } else {
4424                 struct edid *block = intel_connector->detect_edid;
4425
4426                 /* We have to write the checksum
4427                  * of the last block read
4428                  */
4429                 block += intel_connector->detect_edid->extensions;
4430
4431                 if (!drm_dp_dpcd_write(&intel_dp->aux,
4432                                         DP_TEST_EDID_CHECKSUM,
4433                                         &block->checksum,
4434                                         1))
4435                         DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4436
4437                 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4438                 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4439         }
4440
4441         /* Set test active flag here so userspace doesn't interrupt things */
4442         intel_dp->compliance_test_active = 1;
4443
4444         return test_result;
4445 }
4446
4447 static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4448 {
4449         uint8_t test_result = DP_TEST_NAK;
4450         return test_result;
4451 }
4452
4453 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4454 {
4455         uint8_t response = DP_TEST_NAK;
4456         uint8_t rxdata = 0;
4457         int status = 0;
4458
4459         intel_dp->compliance_test_active = 0;
4460         intel_dp->compliance_test_type = 0;
4461         intel_dp->compliance_test_data = 0;
4462
4463         intel_dp->aux.i2c_nack_count = 0;
4464         intel_dp->aux.i2c_defer_count = 0;
4465
4466         status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4467         if (status <= 0) {
4468                 DRM_DEBUG_KMS("Could not read test request from sink\n");
4469                 goto update_status;
4470         }
4471
4472         switch (rxdata) {
4473         case DP_TEST_LINK_TRAINING:
4474                 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4475                 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4476                 response = intel_dp_autotest_link_training(intel_dp);
4477                 break;
4478         case DP_TEST_LINK_VIDEO_PATTERN:
4479                 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4480                 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4481                 response = intel_dp_autotest_video_pattern(intel_dp);
4482                 break;
4483         case DP_TEST_LINK_EDID_READ:
4484                 DRM_DEBUG_KMS("EDID test requested\n");
4485                 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4486                 response = intel_dp_autotest_edid(intel_dp);
4487                 break;
4488         case DP_TEST_LINK_PHY_TEST_PATTERN:
4489                 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4490                 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4491                 response = intel_dp_autotest_phy_pattern(intel_dp);
4492                 break;
4493         default:
4494                 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4495                 break;
4496         }
4497
4498 update_status:
4499         status = drm_dp_dpcd_write(&intel_dp->aux,
4500                                    DP_TEST_RESPONSE,
4501                                    &response, 1);
4502         if (status <= 0)
4503                 DRM_DEBUG_KMS("Could not write test response to sink\n");
4504 }
4505
4506 #if 0
4507 static int
4508 intel_dp_check_mst_status(struct intel_dp *intel_dp)
4509 {
4510         bool bret;
4511
4512         if (intel_dp->is_mst) {
4513                 u8 esi[16] = { 0 };
4514                 int ret = 0;
4515                 int retry;
4516                 bool handled;
4517                 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4518 go_again:
4519                 if (bret == true) {
4520
4521                         /* check link status - esi[10] = 0x200c */
4522                         if (intel_dp->active_mst_links &&
4523                             !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4524                                 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4525                                 intel_dp_start_link_train(intel_dp);
4526                                 intel_dp_stop_link_train(intel_dp);
4527                         }
4528
4529                         DRM_DEBUG_KMS("got esi %3ph\n", esi);
4530                         ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4531
4532                         if (handled) {
4533                                 for (retry = 0; retry < 3; retry++) {
4534                                         int wret;
4535                                         wret = drm_dp_dpcd_write(&intel_dp->aux,
4536                                                                  DP_SINK_COUNT_ESI+1,
4537                                                                  &esi[1], 3);
4538                                         if (wret == 3) {
4539                                                 break;
4540                                         }
4541                                 }
4542
4543                                 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4544                                 if (bret == true) {
4545                                         DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4546                                         goto go_again;
4547                                 }
4548                         } else
4549                                 ret = 0;
4550
4551                         return ret;
4552                 } else {
4553                         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4554                         DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4555                         intel_dp->is_mst = false;
4556                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4557                         /* send a hotplug event */
4558                         drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4559                 }
4560         }
4561         return -EINVAL;
4562 }
4563 #endif
4564
4565 /*
4566  * According to DP spec
4567  * 5.1.2:
4568  *  1. Read DPCD
4569  *  2. Configure link according to Receiver Capabilities
4570  *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
4571  *  4. Check link status on receipt of hot-plug interrupt
4572  */
4573 static void
4574 intel_dp_check_link_status(struct intel_dp *intel_dp)
4575 {
4576         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4577         struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4578         u8 sink_irq_vector;
4579         u8 link_status[DP_LINK_STATUS_SIZE];
4580
4581         WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4582
4583         if (!intel_encoder->base.crtc)
4584                 return;
4585
4586         if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4587                 return;
4588
4589         /* Try to read receiver status if the link appears to be up */
4590         if (!intel_dp_get_link_status(intel_dp, link_status)) {
4591                 return;
4592         }
4593
4594         /* Now read the DPCD to see if it's actually running */
4595         if (!intel_dp_get_dpcd(intel_dp)) {
4596                 return;
4597         }
4598
4599         /* Try to read the source of the interrupt */
4600         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4601             intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4602                 /* Clear interrupt source */
4603                 drm_dp_dpcd_writeb(&intel_dp->aux,
4604                                    DP_DEVICE_SERVICE_IRQ_VECTOR,
4605                                    sink_irq_vector);
4606
4607                 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4608                         DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
4609                 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4610                         DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4611         }
4612
4613         if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
4614                 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4615                               intel_encoder->base.name);
4616                 intel_dp_start_link_train(intel_dp);
4617                 intel_dp_stop_link_train(intel_dp);
4618         }
4619 }
4620
4621 /* XXX this is probably wrong for multiple downstream ports */
4622 static enum drm_connector_status
4623 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4624 {
4625         uint8_t *dpcd = intel_dp->dpcd;
4626         uint8_t type;
4627
4628         if (!intel_dp_get_dpcd(intel_dp))
4629                 return connector_status_disconnected;
4630
4631         /* if there's no downstream port, we're done */
4632         if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
4633                 return connector_status_connected;
4634
4635         /* If we're HPD-aware, SINK_COUNT changes dynamically */
4636         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4637             intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4638                 uint8_t reg;
4639
4640                 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4641                                             &reg, 1) < 0)
4642                         return connector_status_unknown;
4643
4644                 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4645                                               : connector_status_disconnected;
4646         }
4647
4648         /* If no HPD, poke DDC gently */
4649         if (drm_probe_ddc(intel_dp->aux.ddc))
4650                 return connector_status_connected;
4651
4652         /* Well we tried, say unknown for unreliable port types */
4653         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4654                 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4655                 if (type == DP_DS_PORT_TYPE_VGA ||
4656                     type == DP_DS_PORT_TYPE_NON_EDID)
4657                         return connector_status_unknown;
4658         } else {
4659                 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4660                         DP_DWN_STRM_PORT_TYPE_MASK;
4661                 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4662                     type == DP_DWN_STRM_PORT_TYPE_OTHER)
4663                         return connector_status_unknown;
4664         }
4665
4666         /* Anything else is out of spec, warn and ignore */
4667         DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4668         return connector_status_disconnected;
4669 }
4670
4671 static enum drm_connector_status
4672 edp_detect(struct intel_dp *intel_dp)
4673 {
4674         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4675         enum drm_connector_status status;
4676
4677         status = intel_panel_detect(dev);
4678         if (status == connector_status_unknown)
4679                 status = connector_status_connected;
4680
4681         return status;
4682 }
4683
4684 static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
4685                                        struct intel_digital_port *port)
4686 {
4687         u32 bit;
4688
4689         switch (port->port) {
4690         case PORT_A:
4691                 return true;
4692         case PORT_B:
4693                 bit = SDE_PORTB_HOTPLUG;
4694                 break;
4695         case PORT_C:
4696                 bit = SDE_PORTC_HOTPLUG;
4697                 break;
4698         case PORT_D:
4699                 bit = SDE_PORTD_HOTPLUG;
4700                 break;
4701         default:
4702                 MISSING_CASE(port->port);
4703                 return false;
4704         }
4705
4706         return I915_READ(SDEISR) & bit;
4707 }
4708
4709 static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
4710                                        struct intel_digital_port *port)
4711 {
4712         u32 bit;
4713
4714         switch (port->port) {
4715         case PORT_A:
4716                 return true;
4717         case PORT_B:
4718                 bit = SDE_PORTB_HOTPLUG_CPT;
4719                 break;
4720         case PORT_C:
4721                 bit = SDE_PORTC_HOTPLUG_CPT;
4722                 break;
4723         case PORT_D:
4724                 bit = SDE_PORTD_HOTPLUG_CPT;
4725                 break;
4726         case PORT_E:
4727                 bit = SDE_PORTE_HOTPLUG_SPT;
4728                 break;
4729         default:
4730                 MISSING_CASE(port->port);
4731                 return false;
4732         }
4733
4734         return I915_READ(SDEISR) & bit;
4735 }
4736
4737 static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
4738                                        struct intel_digital_port *port)
4739 {
4740         u32 bit;
4741
4742         switch (port->port) {
4743         case PORT_B:
4744                 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4745                 break;
4746         case PORT_C:
4747                 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4748                 break;
4749         case PORT_D:
4750                 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4751                 break;
4752         default:
4753                 MISSING_CASE(port->port);
4754                 return false;
4755         }
4756
4757         return I915_READ(PORT_HOTPLUG_STAT) & bit;
4758 }
4759
4760 static bool vlv_digital_port_connected(struct drm_i915_private *dev_priv,
4761                                        struct intel_digital_port *port)
4762 {
4763         u32 bit;
4764
4765         switch (port->port) {
4766         case PORT_B:
4767                 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4768                 break;
4769         case PORT_C:
4770                 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4771                 break;
4772         case PORT_D:
4773                 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4774                 break;
4775         default:
4776                 MISSING_CASE(port->port);
4777                 return false;
4778         }
4779
4780         return I915_READ(PORT_HOTPLUG_STAT) & bit;
4781 }
4782
4783 static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
4784                                        struct intel_digital_port *intel_dig_port)
4785 {
4786         struct intel_encoder *intel_encoder = &intel_dig_port->base;
4787         enum port port;
4788         u32 bit;
4789
4790         intel_hpd_pin_to_port(intel_encoder->hpd_pin, &port);
4791         switch (port) {
4792         case PORT_A:
4793                 bit = BXT_DE_PORT_HP_DDIA;
4794                 break;
4795         case PORT_B:
4796                 bit = BXT_DE_PORT_HP_DDIB;
4797                 break;
4798         case PORT_C:
4799                 bit = BXT_DE_PORT_HP_DDIC;
4800                 break;
4801         default:
4802                 MISSING_CASE(port);
4803                 return false;
4804         }
4805
4806         return I915_READ(GEN8_DE_PORT_ISR) & bit;
4807 }
4808
4809 /*
4810  * intel_digital_port_connected - is the specified port connected?
4811  * @dev_priv: i915 private structure
4812  * @port: the port to test
4813  *
4814  * Return %true if @port is connected, %false otherwise.
4815  */
4816 bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
4817                                          struct intel_digital_port *port)
4818 {
4819         if (HAS_PCH_IBX(dev_priv))
4820                 return ibx_digital_port_connected(dev_priv, port);
4821         if (HAS_PCH_SPLIT(dev_priv))
4822                 return cpt_digital_port_connected(dev_priv, port);
4823         else if (IS_BROXTON(dev_priv))
4824                 return bxt_digital_port_connected(dev_priv, port);
4825         else if (IS_VALLEYVIEW(dev_priv))
4826                 return vlv_digital_port_connected(dev_priv, port);
4827         else
4828                 return g4x_digital_port_connected(dev_priv, port);
4829 }
4830
4831 static enum drm_connector_status
4832 ironlake_dp_detect(struct intel_dp *intel_dp)
4833 {
4834         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4835         struct drm_i915_private *dev_priv = dev->dev_private;
4836         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4837
4838         if (!intel_digital_port_connected(dev_priv, intel_dig_port))
4839                 return connector_status_disconnected;
4840
4841         return intel_dp_detect_dpcd(intel_dp);
4842 }
4843
4844 static enum drm_connector_status
4845 g4x_dp_detect(struct intel_dp *intel_dp)
4846 {
4847         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4848         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4849
4850         /* Can't disconnect eDP, but you can close the lid... */
4851         if (is_edp(intel_dp)) {
4852                 enum drm_connector_status status;
4853
4854                 status = intel_panel_detect(dev);
4855                 if (status == connector_status_unknown)
4856                         status = connector_status_connected;
4857                 return status;
4858         }
4859
4860         if (!intel_digital_port_connected(dev->dev_private, intel_dig_port))
4861                 return connector_status_disconnected;
4862
4863         return intel_dp_detect_dpcd(intel_dp);
4864 }
4865
4866 static struct edid *
4867 intel_dp_get_edid(struct intel_dp *intel_dp)
4868 {
4869         struct intel_connector *intel_connector = intel_dp->attached_connector;
4870
4871         /* use cached edid if we have one */
4872         if (intel_connector->edid) {
4873                 /* invalid edid */
4874                 if (IS_ERR(intel_connector->edid))
4875                         return NULL;
4876
4877                 return drm_edid_duplicate(intel_connector->edid);
4878         } else
4879                 return drm_get_edid(&intel_connector->base,
4880                                     intel_dp->aux.ddc);
4881 }
4882
4883 static void
4884 intel_dp_set_edid(struct intel_dp *intel_dp)
4885 {
4886         struct intel_connector *intel_connector = intel_dp->attached_connector;
4887         struct edid *edid;
4888
4889         edid = intel_dp_get_edid(intel_dp);
4890         intel_connector->detect_edid = edid;
4891
4892         if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4893                 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4894         else
4895                 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4896 }
4897
4898 static void
4899 intel_dp_unset_edid(struct intel_dp *intel_dp)
4900 {
4901         struct intel_connector *intel_connector = intel_dp->attached_connector;
4902
4903         kfree(intel_connector->detect_edid);
4904         intel_connector->detect_edid = NULL;
4905
4906         intel_dp->has_audio = false;
4907 }
4908
4909 static enum drm_connector_status
4910 intel_dp_detect(struct drm_connector *connector, bool force)
4911 {
4912         struct intel_dp *intel_dp = intel_attached_dp(connector);
4913         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4914         struct intel_encoder *intel_encoder = &intel_dig_port->base;
4915         struct drm_device *dev = connector->dev;
4916         enum drm_connector_status status;
4917         enum intel_display_power_domain power_domain;
4918         bool ret;
4919         u8 sink_irq_vector;
4920
4921         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4922                       connector->base.id, connector->name);
4923         intel_dp_unset_edid(intel_dp);
4924
4925         if (intel_dp->is_mst) {
4926                 /* MST devices are disconnected from a monitor POV */
4927                 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4928                         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4929                 return connector_status_disconnected;
4930         }
4931
4932         power_domain = intel_display_port_aux_power_domain(intel_encoder);
4933         intel_display_power_get(to_i915(dev), power_domain);
4934
4935         /* Can't disconnect eDP, but you can close the lid... */
4936         if (is_edp(intel_dp))
4937                 status = edp_detect(intel_dp);
4938         else if (HAS_PCH_SPLIT(dev))
4939                 status = ironlake_dp_detect(intel_dp);
4940         else
4941                 status = g4x_dp_detect(intel_dp);
4942         if (status != connector_status_connected)
4943                 goto out;
4944
4945         intel_dp_probe_oui(intel_dp);
4946
4947         ret = intel_dp_probe_mst(intel_dp);
4948         if (ret) {
4949                 /* if we are in MST mode then this connector
4950                    won't appear connected or have anything with EDID on it */
4951                 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4952                         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4953                 status = connector_status_disconnected;
4954                 goto out;
4955         }
4956
4957         intel_dp_set_edid(intel_dp);
4958
4959         if (intel_encoder->type != INTEL_OUTPUT_EDP)
4960                 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4961         status = connector_status_connected;
4962
4963         /* Try to read the source of the interrupt */
4964         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4965             intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4966                 /* Clear interrupt source */
4967                 drm_dp_dpcd_writeb(&intel_dp->aux,
4968                                    DP_DEVICE_SERVICE_IRQ_VECTOR,
4969                                    sink_irq_vector);
4970
4971                 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4972                         intel_dp_handle_test_request(intel_dp);
4973                 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4974                         DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4975         }
4976
4977 out:
4978         intel_display_power_put(to_i915(dev), power_domain);
4979         return status;
4980 }
4981
4982 static void
4983 intel_dp_force(struct drm_connector *connector)
4984 {
4985         struct intel_dp *intel_dp = intel_attached_dp(connector);
4986         struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4987         struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
4988         enum intel_display_power_domain power_domain;
4989
4990         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4991                       connector->base.id, connector->name);
4992         intel_dp_unset_edid(intel_dp);
4993
4994         if (connector->status != connector_status_connected)
4995                 return;
4996
4997         power_domain = intel_display_port_aux_power_domain(intel_encoder);
4998         intel_display_power_get(dev_priv, power_domain);
4999
5000         intel_dp_set_edid(intel_dp);
5001
5002         intel_display_power_put(dev_priv, power_domain);
5003
5004         if (intel_encoder->type != INTEL_OUTPUT_EDP)
5005                 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
5006 }
5007
5008 static int intel_dp_get_modes(struct drm_connector *connector)
5009 {
5010         struct intel_connector *intel_connector = to_intel_connector(connector);
5011         struct edid *edid;
5012
5013         edid = intel_connector->detect_edid;
5014         if (edid) {
5015                 int ret = intel_connector_update_modes(connector, edid);
5016                 if (ret)
5017                         return ret;
5018         }
5019
5020         /* if eDP has no EDID, fall back to fixed mode */
5021         if (is_edp(intel_attached_dp(connector)) &&
5022             intel_connector->panel.fixed_mode) {
5023                 struct drm_display_mode *mode;
5024
5025                 mode = drm_mode_duplicate(connector->dev,
5026                                           intel_connector->panel.fixed_mode);
5027                 if (mode) {
5028                         drm_mode_probed_add(connector, mode);
5029                         return 1;
5030                 }
5031         }
5032
5033         return 0;
5034 }
5035
5036 static bool
5037 intel_dp_detect_audio(struct drm_connector *connector)
5038 {
5039         bool has_audio = false;
5040         struct edid *edid;
5041
5042         edid = to_intel_connector(connector)->detect_edid;
5043         if (edid)
5044                 has_audio = drm_detect_monitor_audio(edid);
5045
5046         return has_audio;
5047 }
5048
5049 static int
5050 intel_dp_set_property(struct drm_connector *connector,
5051                       struct drm_property *property,
5052                       uint64_t val)
5053 {
5054         struct drm_i915_private *dev_priv = connector->dev->dev_private;
5055         struct intel_connector *intel_connector = to_intel_connector(connector);
5056         struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
5057         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
5058         int ret;
5059
5060         ret = drm_object_property_set_value(&connector->base, property, val);
5061         if (ret)
5062                 return ret;
5063
5064         if (property == dev_priv->force_audio_property) {
5065                 int i = val;
5066                 bool has_audio;
5067
5068                 if (i == intel_dp->force_audio)
5069                         return 0;
5070
5071                 intel_dp->force_audio = i;
5072
5073                 if (i == HDMI_AUDIO_AUTO)
5074                         has_audio = intel_dp_detect_audio(connector);
5075                 else
5076                         has_audio = (i == HDMI_AUDIO_ON);
5077
5078                 if (has_audio == intel_dp->has_audio)
5079                         return 0;
5080
5081                 intel_dp->has_audio = has_audio;
5082                 goto done;
5083         }
5084
5085         if (property == dev_priv->broadcast_rgb_property) {
5086                 bool old_auto = intel_dp->color_range_auto;
5087                 bool old_range = intel_dp->limited_color_range;
5088
5089                 switch (val) {
5090                 case INTEL_BROADCAST_RGB_AUTO:
5091                         intel_dp->color_range_auto = true;
5092                         break;
5093                 case INTEL_BROADCAST_RGB_FULL:
5094                         intel_dp->color_range_auto = false;
5095                         intel_dp->limited_color_range = false;
5096                         break;
5097                 case INTEL_BROADCAST_RGB_LIMITED:
5098                         intel_dp->color_range_auto = false;
5099                         intel_dp->limited_color_range = true;
5100                         break;
5101                 default:
5102                         return -EINVAL;
5103                 }
5104
5105                 if (old_auto == intel_dp->color_range_auto &&
5106                     old_range == intel_dp->limited_color_range)
5107                         return 0;
5108
5109                 goto done;
5110         }
5111
5112         if (is_edp(intel_dp) &&
5113             property == connector->dev->mode_config.scaling_mode_property) {
5114                 if (val == DRM_MODE_SCALE_NONE) {
5115                         DRM_DEBUG_KMS("no scaling not supported\n");
5116                         return -EINVAL;
5117                 }
5118
5119                 if (intel_connector->panel.fitting_mode == val) {
5120                         /* the eDP scaling property is not changed */
5121                         return 0;
5122                 }
5123                 intel_connector->panel.fitting_mode = val;
5124
5125                 goto done;
5126         }
5127
5128         return -EINVAL;
5129
5130 done:
5131         if (intel_encoder->base.crtc)
5132                 intel_crtc_restore_mode(intel_encoder->base.crtc);
5133
5134         return 0;
5135 }
5136
5137 static void
5138 intel_dp_connector_destroy(struct drm_connector *connector)
5139 {
5140         struct intel_connector *intel_connector = to_intel_connector(connector);
5141
5142         kfree(intel_connector->detect_edid);
5143
5144         if (!IS_ERR_OR_NULL(intel_connector->edid))
5145                 kfree(intel_connector->edid);
5146
5147         /* Can't call is_edp() since the encoder may have been destroyed
5148          * already. */
5149         if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5150                 intel_panel_fini(&intel_connector->panel);
5151
5152         drm_connector_cleanup(connector);
5153         kfree(connector);
5154 }
5155
5156 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
5157 {
5158         struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
5159         struct intel_dp *intel_dp = &intel_dig_port->dp;
5160
5161         drm_dp_aux_unregister(&intel_dp->aux);
5162         intel_dp_mst_encoder_cleanup(intel_dig_port);
5163         if (is_edp(intel_dp)) {
5164                 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5165                 /*
5166                  * vdd might still be enabled do to the delayed vdd off.
5167                  * Make sure vdd is actually turned off here.
5168                  */
5169                 pps_lock(intel_dp);
5170                 edp_panel_vdd_off_sync(intel_dp);
5171                 pps_unlock(intel_dp);
5172
5173 #if 0
5174                 if (intel_dp->edp_notifier.notifier_call) {
5175                         unregister_reboot_notifier(&intel_dp->edp_notifier);
5176                         intel_dp->edp_notifier.notifier_call = NULL;
5177                 }
5178 #endif
5179         }
5180         drm_encoder_cleanup(encoder);
5181         kfree(intel_dig_port);
5182 }
5183
5184 static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
5185 {
5186         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
5187
5188         if (!is_edp(intel_dp))
5189                 return;
5190
5191         /*
5192          * vdd might still be enabled do to the delayed vdd off.
5193          * Make sure vdd is actually turned off here.
5194          */
5195         cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5196         pps_lock(intel_dp);
5197         edp_panel_vdd_off_sync(intel_dp);
5198         pps_unlock(intel_dp);
5199 }
5200
5201 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
5202 {
5203         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5204         struct drm_device *dev = intel_dig_port->base.base.dev;
5205         struct drm_i915_private *dev_priv = dev->dev_private;
5206         enum intel_display_power_domain power_domain;
5207
5208         lockdep_assert_held(&dev_priv->pps_mutex);
5209
5210         if (!edp_have_panel_vdd(intel_dp))
5211                 return;
5212
5213         /*
5214          * The VDD bit needs a power domain reference, so if the bit is
5215          * already enabled when we boot or resume, grab this reference and
5216          * schedule a vdd off, so we don't hold on to the reference
5217          * indefinitely.
5218          */
5219         DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
5220         power_domain = intel_display_port_aux_power_domain(&intel_dig_port->base);
5221         intel_display_power_get(dev_priv, power_domain);
5222
5223         edp_panel_vdd_schedule_off(intel_dp);
5224 }
5225
5226 static void intel_dp_encoder_reset(struct drm_encoder *encoder)
5227 {
5228         struct intel_dp *intel_dp;
5229
5230         if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
5231                 return;
5232
5233         intel_dp = enc_to_intel_dp(encoder);
5234
5235         pps_lock(intel_dp);
5236
5237         /*
5238          * Read out the current power sequencer assignment,
5239          * in case the BIOS did something with it.
5240          */
5241         if (IS_VALLEYVIEW(encoder->dev))
5242                 vlv_initial_power_sequencer_setup(intel_dp);
5243
5244         intel_edp_panel_vdd_sanitize(intel_dp);
5245
5246         pps_unlock(intel_dp);
5247 }
5248
5249 static const struct drm_connector_funcs intel_dp_connector_funcs = {
5250         .dpms = drm_atomic_helper_connector_dpms,
5251         .detect = intel_dp_detect,
5252         .force = intel_dp_force,
5253         .fill_modes = drm_helper_probe_single_connector_modes,
5254         .set_property = intel_dp_set_property,
5255         .atomic_get_property = intel_connector_atomic_get_property,
5256         .destroy = intel_dp_connector_destroy,
5257         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5258         .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
5259 };
5260
5261 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
5262         .get_modes = intel_dp_get_modes,
5263         .mode_valid = intel_dp_mode_valid,
5264         .best_encoder = intel_best_encoder,
5265 };
5266
5267 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
5268         .reset = intel_dp_encoder_reset,
5269         .destroy = intel_dp_encoder_destroy,
5270 };
5271
5272 bool
5273 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
5274 {
5275         struct intel_dp *intel_dp = &intel_dig_port->dp;
5276         struct intel_encoder *intel_encoder = &intel_dig_port->base;
5277         struct drm_device *dev = intel_dig_port->base.base.dev;
5278         struct drm_i915_private *dev_priv = dev->dev_private;
5279         enum intel_display_power_domain power_domain;
5280         bool ret = true;
5281
5282         if (intel_dig_port->base.type != INTEL_OUTPUT_EDP &&
5283             intel_dig_port->base.type != INTEL_OUTPUT_HDMI)
5284                 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
5285
5286         if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
5287                 /*
5288                  * vdd off can generate a long pulse on eDP which
5289                  * would require vdd on to handle it, and thus we
5290                  * would end up in an endless cycle of
5291                  * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
5292                  */
5293                 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
5294                               port_name(intel_dig_port->port));
5295                 return false;
5296         }
5297
5298         DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
5299                       port_name(intel_dig_port->port),
5300                       long_hpd ? "long" : "short");
5301
5302         power_domain = intel_display_port_aux_power_domain(intel_encoder);
5303         intel_display_power_get(dev_priv, power_domain);
5304
5305         if (long_hpd) {
5306                 /* indicate that we need to restart link training */
5307                 intel_dp->train_set_valid = false;
5308
5309                 if (!intel_digital_port_connected(dev_priv, intel_dig_port))
5310                         goto mst_fail;
5311
5312                 if (!intel_dp_get_dpcd(intel_dp)) {
5313                         goto mst_fail;
5314                 }
5315
5316                 intel_dp_probe_oui(intel_dp);
5317
5318                 if (!intel_dp_probe_mst(intel_dp)) {
5319                         goto mst_fail;
5320                 }
5321         } else {
5322                 if (intel_dp->is_mst) {
5323 #if 0
5324                         if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
5325                                 goto mst_fail;
5326 #endif
5327                 }
5328
5329                 if (!intel_dp->is_mst) {
5330                         drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5331                         intel_dp_check_link_status(intel_dp);
5332                         drm_modeset_unlock(&dev->mode_config.connection_mutex);
5333                 }
5334         }
5335
5336         ret = false;
5337
5338         goto put_power;
5339 mst_fail:
5340         /* if we were in MST mode, and device is not there get out of MST mode */
5341         if (intel_dp->is_mst) {
5342                 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5343                 intel_dp->is_mst = false;
5344 #if 0
5345                 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
5346 #endif
5347         }
5348 put_power:
5349         intel_display_power_put(dev_priv, power_domain);
5350
5351         return ret;
5352 }
5353
5354 /* Return which DP Port should be selected for Transcoder DP control */
5355 int
5356 intel_trans_dp_port_sel(struct drm_crtc *crtc)
5357 {
5358         struct drm_device *dev = crtc->dev;
5359         struct intel_encoder *intel_encoder;
5360         struct intel_dp *intel_dp;
5361
5362         for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5363                 intel_dp = enc_to_intel_dp(&intel_encoder->base);
5364
5365                 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
5366                     intel_encoder->type == INTEL_OUTPUT_EDP)
5367                         return intel_dp->output_reg;
5368         }
5369
5370         return -1;
5371 }
5372
5373 /* check the VBT to see whether the eDP is on another port */
5374 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
5375 {
5376         struct drm_i915_private *dev_priv = dev->dev_private;
5377         union child_device_config *p_child;
5378         int i;
5379         static const short port_mapping[] = {
5380                 [PORT_B] = DVO_PORT_DPB,
5381                 [PORT_C] = DVO_PORT_DPC,
5382                 [PORT_D] = DVO_PORT_DPD,
5383                 [PORT_E] = DVO_PORT_DPE,
5384         };
5385
5386         /*
5387          * eDP not supported on g4x. so bail out early just
5388          * for a bit extra safety in case the VBT is bonkers.
5389          */
5390         if (INTEL_INFO(dev)->gen < 5)
5391                 return false;
5392
5393         if (port == PORT_A)
5394                 return true;
5395
5396         if (!dev_priv->vbt.child_dev_num)
5397                 return false;
5398
5399         for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5400                 p_child = dev_priv->vbt.child_dev + i;
5401
5402                 if (p_child->common.dvo_port == port_mapping[port] &&
5403                     (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5404                     (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
5405                         return true;
5406         }
5407         return false;
5408 }
5409
5410 void
5411 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5412 {
5413         struct intel_connector *intel_connector = to_intel_connector(connector);
5414
5415         intel_attach_force_audio_property(connector);
5416         intel_attach_broadcast_rgb_property(connector);
5417         intel_dp->color_range_auto = true;
5418
5419         if (is_edp(intel_dp)) {
5420                 drm_mode_create_scaling_mode_property(connector->dev);
5421                 drm_object_attach_property(
5422                         &connector->base,
5423                         connector->dev->mode_config.scaling_mode_property,
5424                         DRM_MODE_SCALE_ASPECT);
5425                 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
5426         }
5427 }
5428
5429 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5430 {
5431         intel_dp->last_power_cycle = jiffies;
5432         intel_dp->last_power_on = jiffies;
5433         intel_dp->last_backlight_off = jiffies;
5434 }
5435
5436 static void
5437 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5438                                     struct intel_dp *intel_dp)
5439 {
5440         struct drm_i915_private *dev_priv = dev->dev_private;
5441         struct edp_power_seq cur, vbt, spec,
5442                 *final = &intel_dp->pps_delays;
5443         u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
5444         int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg = 0;
5445
5446         lockdep_assert_held(&dev_priv->pps_mutex);
5447
5448         /* already initialized? */
5449         if (final->t11_t12 != 0)
5450                 return;
5451
5452         if (IS_BROXTON(dev)) {
5453                 /*
5454                  * TODO: BXT has 2 sets of PPS registers.
5455                  * Correct Register for Broxton need to be identified
5456                  * using VBT. hardcoding for now
5457                  */
5458                 pp_ctrl_reg = BXT_PP_CONTROL(0);
5459                 pp_on_reg = BXT_PP_ON_DELAYS(0);
5460                 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5461         } else if (HAS_PCH_SPLIT(dev)) {
5462                 pp_ctrl_reg = PCH_PP_CONTROL;
5463                 pp_on_reg = PCH_PP_ON_DELAYS;
5464                 pp_off_reg = PCH_PP_OFF_DELAYS;
5465                 pp_div_reg = PCH_PP_DIVISOR;
5466         } else {
5467                 enum i915_pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5468
5469                 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5470                 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5471                 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5472                 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5473         }
5474
5475         /* Workaround: Need to write PP_CONTROL with the unlock key as
5476          * the very first thing. */
5477         pp_ctl = ironlake_get_pp_control(intel_dp);
5478
5479         pp_on = I915_READ(pp_on_reg);
5480         pp_off = I915_READ(pp_off_reg);
5481         if (!IS_BROXTON(dev)) {
5482                 I915_WRITE(pp_ctrl_reg, pp_ctl);
5483                 pp_div = I915_READ(pp_div_reg);
5484         }
5485
5486         /* Pull timing values out of registers */
5487         cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5488                 PANEL_POWER_UP_DELAY_SHIFT;
5489
5490         cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5491                 PANEL_LIGHT_ON_DELAY_SHIFT;
5492
5493         cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5494                 PANEL_LIGHT_OFF_DELAY_SHIFT;
5495
5496         cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5497                 PANEL_POWER_DOWN_DELAY_SHIFT;
5498
5499         if (IS_BROXTON(dev)) {
5500                 u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5501                         BXT_POWER_CYCLE_DELAY_SHIFT;
5502                 if (tmp > 0)
5503                         cur.t11_t12 = (tmp - 1) * 1000;
5504                 else
5505                         cur.t11_t12 = 0;
5506         } else {
5507                 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
5508                        PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
5509         }
5510
5511         DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5512                       cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5513
5514         vbt = dev_priv->vbt.edp_pps;
5515
5516         /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5517          * our hw here, which are all in 100usec. */
5518         spec.t1_t3 = 210 * 10;
5519         spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5520         spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5521         spec.t10 = 500 * 10;
5522         /* This one is special and actually in units of 100ms, but zero
5523          * based in the hw (so we need to add 100 ms). But the sw vbt
5524          * table multiplies it with 1000 to make it in units of 100usec,
5525          * too. */
5526         spec.t11_t12 = (510 + 100) * 10;
5527
5528         DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5529                       vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5530
5531         /* Use the max of the register settings and vbt. If both are
5532          * unset, fall back to the spec limits. */
5533 #define assign_final(field)     final->field = (max(cur.field, vbt.field) == 0 ? \
5534                                        spec.field : \
5535                                        max(cur.field, vbt.field))
5536         assign_final(t1_t3);
5537         assign_final(t8);
5538         assign_final(t9);
5539         assign_final(t10);
5540         assign_final(t11_t12);
5541 #undef assign_final
5542
5543 #define get_delay(field)        (DIV_ROUND_UP(final->field, 10))
5544         intel_dp->panel_power_up_delay = get_delay(t1_t3);
5545         intel_dp->backlight_on_delay = get_delay(t8);
5546         intel_dp->backlight_off_delay = get_delay(t9);
5547         intel_dp->panel_power_down_delay = get_delay(t10);
5548         intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5549 #undef get_delay
5550
5551         DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5552                       intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5553                       intel_dp->panel_power_cycle_delay);
5554
5555         DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5556                       intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
5557 }
5558
5559 static void
5560 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5561                                               struct intel_dp *intel_dp)
5562 {
5563         struct drm_i915_private *dev_priv = dev->dev_private;
5564         u32 pp_on, pp_off, pp_div, port_sel = 0;
5565         int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
5566         int pp_on_reg, pp_off_reg, pp_div_reg = 0, pp_ctrl_reg;
5567         enum port port = dp_to_dig_port(intel_dp)->port;
5568         const struct edp_power_seq *seq = &intel_dp->pps_delays;
5569
5570         lockdep_assert_held(&dev_priv->pps_mutex);
5571
5572         if (IS_BROXTON(dev)) {
5573                 /*
5574                  * TODO: BXT has 2 sets of PPS registers.
5575                  * Correct Register for Broxton need to be identified
5576                  * using VBT. hardcoding for now
5577                  */
5578                 pp_ctrl_reg = BXT_PP_CONTROL(0);
5579                 pp_on_reg = BXT_PP_ON_DELAYS(0);
5580                 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5581
5582         } else if (HAS_PCH_SPLIT(dev)) {
5583                 pp_on_reg = PCH_PP_ON_DELAYS;
5584                 pp_off_reg = PCH_PP_OFF_DELAYS;
5585                 pp_div_reg = PCH_PP_DIVISOR;
5586         } else {
5587                 enum i915_pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5588
5589                 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5590                 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5591                 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5592         }
5593
5594         /*
5595          * And finally store the new values in the power sequencer. The
5596          * backlight delays are set to 1 because we do manual waits on them. For
5597          * T8, even BSpec recommends doing it. For T9, if we don't do this,
5598          * we'll end up waiting for the backlight off delay twice: once when we
5599          * do the manual sleep, and once when we disable the panel and wait for
5600          * the PP_STATUS bit to become zero.
5601          */
5602         pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
5603                 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5604         pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
5605                  (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
5606         /* Compute the divisor for the pp clock, simply match the Bspec
5607          * formula. */
5608         if (IS_BROXTON(dev)) {
5609                 pp_div = I915_READ(pp_ctrl_reg);
5610                 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5611                 pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5612                                 << BXT_POWER_CYCLE_DELAY_SHIFT);
5613         } else {
5614                 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5615                 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5616                                 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5617         }
5618
5619         /* Haswell doesn't have any port selection bits for the panel
5620          * power sequencer any more. */
5621         if (IS_VALLEYVIEW(dev)) {
5622                 port_sel = PANEL_PORT_SELECT_VLV(port);
5623         } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5624                 if (port == PORT_A)
5625                         port_sel = PANEL_PORT_SELECT_DPA;
5626                 else
5627                         port_sel = PANEL_PORT_SELECT_DPD;
5628         }
5629
5630         pp_on |= port_sel;
5631
5632         I915_WRITE(pp_on_reg, pp_on);
5633         I915_WRITE(pp_off_reg, pp_off);
5634         if (IS_BROXTON(dev))
5635                 I915_WRITE(pp_ctrl_reg, pp_div);
5636         else
5637                 I915_WRITE(pp_div_reg, pp_div);
5638
5639         DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
5640                       I915_READ(pp_on_reg),
5641                       I915_READ(pp_off_reg),
5642                       IS_BROXTON(dev) ?
5643                       (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
5644                       I915_READ(pp_div_reg));
5645 }
5646
5647 /**
5648  * intel_dp_set_drrs_state - program registers for RR switch to take effect
5649  * @dev: DRM device
5650  * @refresh_rate: RR to be programmed
5651  *
5652  * This function gets called when refresh rate (RR) has to be changed from
5653  * one frequency to another. Switches can be between high and low RR
5654  * supported by the panel or to any other RR based on media playback (in
5655  * this case, RR value needs to be passed from user space).
5656  *
5657  * The caller of this function needs to take a lock on dev_priv->drrs.
5658  */
5659 static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
5660 {
5661         struct drm_i915_private *dev_priv = dev->dev_private;
5662         struct intel_encoder *encoder;
5663         struct intel_digital_port *dig_port = NULL;
5664         struct intel_dp *intel_dp = dev_priv->drrs.dp;
5665         struct intel_crtc_state *config = NULL;
5666         struct intel_crtc *intel_crtc = NULL;
5667         enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
5668
5669         if (refresh_rate <= 0) {
5670                 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5671                 return;
5672         }
5673
5674         if (intel_dp == NULL) {
5675                 DRM_DEBUG_KMS("DRRS not supported.\n");
5676                 return;
5677         }
5678
5679         /*
5680          * FIXME: This needs proper synchronization with psr state for some
5681          * platforms that cannot have PSR and DRRS enabled at the same time.
5682          */
5683
5684         dig_port = dp_to_dig_port(intel_dp);
5685         encoder = &dig_port->base;
5686         intel_crtc = to_intel_crtc(encoder->base.crtc);
5687
5688         if (!intel_crtc) {
5689                 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5690                 return;
5691         }
5692
5693         config = intel_crtc->config;
5694
5695         if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5696                 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5697                 return;
5698         }
5699
5700         if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5701                         refresh_rate)
5702                 index = DRRS_LOW_RR;
5703
5704         if (index == dev_priv->drrs.refresh_rate_type) {
5705                 DRM_DEBUG_KMS(
5706                         "DRRS requested for previously set RR...ignoring\n");
5707                 return;
5708         }
5709
5710         if (!intel_crtc->active) {
5711                 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5712                 return;
5713         }
5714
5715         if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
5716                 switch (index) {
5717                 case DRRS_HIGH_RR:
5718                         intel_dp_set_m_n(intel_crtc, M1_N1);
5719                         break;
5720                 case DRRS_LOW_RR:
5721                         intel_dp_set_m_n(intel_crtc, M2_N2);
5722                         break;
5723                 case DRRS_MAX_RR:
5724                 default:
5725                         DRM_ERROR("Unsupported refreshrate type\n");
5726                 }
5727         } else if (INTEL_INFO(dev)->gen > 6) {
5728                 u32 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5729                 u32 val;
5730
5731                 val = I915_READ(reg);
5732                 if (index > DRRS_HIGH_RR) {
5733                         if (IS_VALLEYVIEW(dev))
5734                                 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5735                         else
5736                                 val |= PIPECONF_EDP_RR_MODE_SWITCH;
5737                 } else {
5738                         if (IS_VALLEYVIEW(dev))
5739                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5740                         else
5741                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5742                 }
5743                 I915_WRITE(reg, val);
5744         }
5745
5746         dev_priv->drrs.refresh_rate_type = index;
5747
5748         DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5749 }
5750
5751 /**
5752  * intel_edp_drrs_enable - init drrs struct if supported
5753  * @intel_dp: DP struct
5754  *
5755  * Initializes frontbuffer_bits and drrs.dp
5756  */
5757 void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5758 {
5759         struct drm_device *dev = intel_dp_to_dev(intel_dp);
5760         struct drm_i915_private *dev_priv = dev->dev_private;
5761         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5762         struct drm_crtc *crtc = dig_port->base.base.crtc;
5763         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5764
5765         if (!intel_crtc->config->has_drrs) {
5766                 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5767                 return;
5768         }
5769
5770         mutex_lock(&dev_priv->drrs.mutex);
5771         if (WARN_ON(dev_priv->drrs.dp)) {
5772                 DRM_ERROR("DRRS already enabled\n");
5773                 goto unlock;
5774         }
5775
5776         dev_priv->drrs.busy_frontbuffer_bits = 0;
5777
5778         dev_priv->drrs.dp = intel_dp;
5779
5780 unlock:
5781         mutex_unlock(&dev_priv->drrs.mutex);
5782 }
5783
5784 /**
5785  * intel_edp_drrs_disable - Disable DRRS
5786  * @intel_dp: DP struct
5787  *
5788  */
5789 void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5790 {
5791         struct drm_device *dev = intel_dp_to_dev(intel_dp);
5792         struct drm_i915_private *dev_priv = dev->dev_private;
5793         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5794         struct drm_crtc *crtc = dig_port->base.base.crtc;
5795         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5796
5797         if (!intel_crtc->config->has_drrs)
5798                 return;
5799
5800         mutex_lock(&dev_priv->drrs.mutex);
5801         if (!dev_priv->drrs.dp) {
5802                 mutex_unlock(&dev_priv->drrs.mutex);
5803                 return;
5804         }
5805
5806         if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5807                 intel_dp_set_drrs_state(dev_priv->dev,
5808                         intel_dp->attached_connector->panel.
5809                         fixed_mode->vrefresh);
5810
5811         dev_priv->drrs.dp = NULL;
5812         mutex_unlock(&dev_priv->drrs.mutex);
5813
5814         cancel_delayed_work_sync(&dev_priv->drrs.work);
5815 }
5816
5817 static void intel_edp_drrs_downclock_work(struct work_struct *work)
5818 {
5819         struct drm_i915_private *dev_priv =
5820                 container_of(work, typeof(*dev_priv), drrs.work.work);
5821         struct intel_dp *intel_dp;
5822
5823         mutex_lock(&dev_priv->drrs.mutex);
5824
5825         intel_dp = dev_priv->drrs.dp;
5826
5827         if (!intel_dp)
5828                 goto unlock;
5829
5830         /*
5831          * The delayed work can race with an invalidate hence we need to
5832          * recheck.
5833          */
5834
5835         if (dev_priv->drrs.busy_frontbuffer_bits)
5836                 goto unlock;
5837
5838         if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5839                 intel_dp_set_drrs_state(dev_priv->dev,
5840                         intel_dp->attached_connector->panel.
5841                         downclock_mode->vrefresh);
5842
5843 unlock:
5844         mutex_unlock(&dev_priv->drrs.mutex);
5845 }
5846
5847 /**
5848  * intel_edp_drrs_invalidate - Disable Idleness DRRS
5849  * @dev: DRM device
5850  * @frontbuffer_bits: frontbuffer plane tracking bits
5851  *
5852  * This function gets called everytime rendering on the given planes start.
5853  * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
5854  *
5855  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5856  */
5857 void intel_edp_drrs_invalidate(struct drm_device *dev,
5858                 unsigned frontbuffer_bits)
5859 {
5860         struct drm_i915_private *dev_priv = dev->dev_private;
5861         struct drm_crtc *crtc;
5862         enum i915_pipe pipe;
5863
5864         if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5865                 return;
5866
5867         cancel_delayed_work(&dev_priv->drrs.work);
5868
5869         mutex_lock(&dev_priv->drrs.mutex);
5870         if (!dev_priv->drrs.dp) {
5871                 mutex_unlock(&dev_priv->drrs.mutex);
5872                 return;
5873         }
5874
5875         crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5876         pipe = to_intel_crtc(crtc)->pipe;
5877
5878         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5879         dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5880
5881         /* invalidate means busy screen hence upclock */
5882         if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5883                 intel_dp_set_drrs_state(dev_priv->dev,
5884                                 dev_priv->drrs.dp->attached_connector->panel.
5885                                 fixed_mode->vrefresh);
5886
5887         mutex_unlock(&dev_priv->drrs.mutex);
5888 }
5889
5890 /**
5891  * intel_edp_drrs_flush - Restart Idleness DRRS
5892  * @dev: DRM device
5893  * @frontbuffer_bits: frontbuffer plane tracking bits
5894  *
5895  * This function gets called every time rendering on the given planes has
5896  * completed or flip on a crtc is completed. So DRRS should be upclocked
5897  * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5898  * if no other planes are dirty.
5899  *
5900  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5901  */
5902 void intel_edp_drrs_flush(struct drm_device *dev,
5903                 unsigned frontbuffer_bits)
5904 {
5905         struct drm_i915_private *dev_priv = dev->dev_private;
5906         struct drm_crtc *crtc;
5907         enum i915_pipe pipe;
5908
5909         if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5910                 return;
5911
5912         cancel_delayed_work(&dev_priv->drrs.work);
5913
5914         mutex_lock(&dev_priv->drrs.mutex);
5915         if (!dev_priv->drrs.dp) {
5916                 mutex_unlock(&dev_priv->drrs.mutex);
5917                 return;
5918         }
5919
5920         crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5921         pipe = to_intel_crtc(crtc)->pipe;
5922
5923         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5924         dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5925
5926         /* flush means busy screen hence upclock */
5927         if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5928                 intel_dp_set_drrs_state(dev_priv->dev,
5929                                 dev_priv->drrs.dp->attached_connector->panel.
5930                                 fixed_mode->vrefresh);
5931
5932         /*
5933          * flush also means no more activity hence schedule downclock, if all
5934          * other fbs are quiescent too
5935          */
5936         if (!dev_priv->drrs.busy_frontbuffer_bits)
5937                 schedule_delayed_work(&dev_priv->drrs.work,
5938                                 msecs_to_jiffies(1000));
5939         mutex_unlock(&dev_priv->drrs.mutex);
5940 }
5941
5942 /**
5943  * DOC: Display Refresh Rate Switching (DRRS)
5944  *
5945  * Display Refresh Rate Switching (DRRS) is a power conservation feature
5946  * which enables swtching between low and high refresh rates,
5947  * dynamically, based on the usage scenario. This feature is applicable
5948  * for internal panels.
5949  *
5950  * Indication that the panel supports DRRS is given by the panel EDID, which
5951  * would list multiple refresh rates for one resolution.
5952  *
5953  * DRRS is of 2 types - static and seamless.
5954  * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5955  * (may appear as a blink on screen) and is used in dock-undock scenario.
5956  * Seamless DRRS involves changing RR without any visual effect to the user
5957  * and can be used during normal system usage. This is done by programming
5958  * certain registers.
5959  *
5960  * Support for static/seamless DRRS may be indicated in the VBT based on
5961  * inputs from the panel spec.
5962  *
5963  * DRRS saves power by switching to low RR based on usage scenarios.
5964  *
5965  * eDP DRRS:-
5966  *        The implementation is based on frontbuffer tracking implementation.
5967  * When there is a disturbance on the screen triggered by user activity or a
5968  * periodic system activity, DRRS is disabled (RR is changed to high RR).
5969  * When there is no movement on screen, after a timeout of 1 second, a switch
5970  * to low RR is made.
5971  *        For integration with frontbuffer tracking code,
5972  * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5973  *
5974  * DRRS can be further extended to support other internal panels and also
5975  * the scenario of video playback wherein RR is set based on the rate
5976  * requested by userspace.
5977  */
5978
5979 /**
5980  * intel_dp_drrs_init - Init basic DRRS work and mutex.
5981  * @intel_connector: eDP connector
5982  * @fixed_mode: preferred mode of panel
5983  *
5984  * This function is  called only once at driver load to initialize basic
5985  * DRRS stuff.
5986  *
5987  * Returns:
5988  * Downclock mode if panel supports it, else return NULL.
5989  * DRRS support is determined by the presence of downclock mode (apart
5990  * from VBT setting).
5991  */
5992 static struct drm_display_mode *
5993 intel_dp_drrs_init(struct intel_connector *intel_connector,
5994                 struct drm_display_mode *fixed_mode)
5995 {
5996         struct drm_connector *connector = &intel_connector->base;
5997         struct drm_device *dev = connector->dev;
5998         struct drm_i915_private *dev_priv = dev->dev_private;
5999         struct drm_display_mode *downclock_mode = NULL;
6000
6001         INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
6002         lockinit(&dev_priv->drrs.mutex, "i915dm", 0, LK_CANRECURSE);
6003
6004         if (INTEL_INFO(dev)->gen <= 6) {
6005                 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
6006                 return NULL;
6007         }
6008
6009         if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
6010                 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
6011                 return NULL;
6012         }
6013
6014         downclock_mode = intel_find_panel_downclock
6015                                         (dev, fixed_mode, connector);
6016
6017         if (!downclock_mode) {
6018                 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
6019                 return NULL;
6020         }
6021
6022         dev_priv->drrs.type = dev_priv->vbt.drrs_type;
6023
6024         dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
6025         DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
6026         return downclock_mode;
6027 }
6028
6029 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
6030                                      struct intel_connector *intel_connector)
6031 {
6032         struct drm_connector *connector = &intel_connector->base;
6033         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
6034         struct intel_encoder *intel_encoder = &intel_dig_port->base;
6035         struct drm_device *dev = intel_encoder->base.dev;
6036         struct drm_i915_private *dev_priv = dev->dev_private;
6037         struct drm_display_mode *fixed_mode = NULL;
6038         struct drm_display_mode *downclock_mode = NULL;
6039         bool has_dpcd;
6040         struct drm_display_mode *scan;
6041         struct edid *edid;
6042         enum i915_pipe pipe = INVALID_PIPE;
6043
6044         if (!is_edp(intel_dp))
6045                 return true;
6046
6047         pps_lock(intel_dp);
6048         intel_edp_panel_vdd_sanitize(intel_dp);
6049         pps_unlock(intel_dp);
6050
6051         /* Cache DPCD and EDID for edp. */
6052         has_dpcd = intel_dp_get_dpcd(intel_dp);
6053
6054         if (has_dpcd) {
6055                 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
6056                         dev_priv->no_aux_handshake =
6057                                 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
6058                                 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
6059         } else {
6060                 /* if this fails, presume the device is a ghost */
6061                 DRM_INFO("failed to retrieve link info, disabling eDP\n");
6062                 return false;
6063         }
6064
6065         /* We now know it's not a ghost, init power sequence regs. */
6066         pps_lock(intel_dp);
6067         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
6068         pps_unlock(intel_dp);
6069
6070         mutex_lock(&dev->mode_config.mutex);
6071         edid = drm_get_edid(connector, intel_dp->aux.ddc);
6072         if (edid) {
6073                 if (drm_add_edid_modes(connector, edid)) {
6074                         drm_mode_connector_update_edid_property(connector,
6075                                                                 edid);
6076                         drm_edid_to_eld(connector, edid);
6077                 } else {
6078                         kfree(edid);
6079                         edid = ERR_PTR(-EINVAL);
6080                 }
6081         } else {
6082                 edid = ERR_PTR(-ENOENT);
6083         }
6084         intel_connector->edid = edid;
6085
6086         /* prefer fixed mode from EDID if available */
6087         list_for_each_entry(scan, &connector->probed_modes, head) {
6088                 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
6089                         fixed_mode = drm_mode_duplicate(dev, scan);
6090                         downclock_mode = intel_dp_drrs_init(
6091                                                 intel_connector, fixed_mode);
6092                         break;
6093                 }
6094         }
6095
6096         /* fallback to VBT if available for eDP */
6097         if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
6098                 fixed_mode = drm_mode_duplicate(dev,
6099                                         dev_priv->vbt.lfp_lvds_vbt_mode);
6100                 if (fixed_mode)
6101                         fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
6102         }
6103         mutex_unlock(&dev->mode_config.mutex);
6104
6105         if (IS_VALLEYVIEW(dev)) {
6106 #if 0
6107                 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
6108                 register_reboot_notifier(&intel_dp->edp_notifier);
6109 #endif
6110
6111                 /*
6112                  * Figure out the current pipe for the initial backlight setup.
6113                  * If the current pipe isn't valid, try the PPS pipe, and if that
6114                  * fails just assume pipe A.
6115                  */
6116                 if (IS_CHERRYVIEW(dev))
6117                         pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
6118                 else
6119                         pipe = PORT_TO_PIPE(intel_dp->DP);
6120
6121                 if (pipe != PIPE_A && pipe != PIPE_B)
6122                         pipe = intel_dp->pps_pipe;
6123
6124                 if (pipe != PIPE_A && pipe != PIPE_B)
6125                         pipe = PIPE_A;
6126
6127                 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
6128                               pipe_name(pipe));
6129         }
6130
6131         intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
6132         intel_connector->panel.backlight.power = intel_edp_backlight_power;
6133         intel_panel_setup_backlight(connector, pipe);
6134
6135         return true;
6136 }
6137
6138 bool
6139 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
6140                         struct intel_connector *intel_connector)
6141 {
6142         struct drm_connector *connector = &intel_connector->base;
6143         struct intel_dp *intel_dp = &intel_dig_port->dp;
6144         struct intel_encoder *intel_encoder = &intel_dig_port->base;
6145         struct drm_device *dev = intel_encoder->base.dev;
6146         struct drm_i915_private *dev_priv = dev->dev_private;
6147         enum port port = intel_dig_port->port;
6148         int type;
6149
6150         intel_dp->pps_pipe = INVALID_PIPE;
6151
6152         /* intel_dp vfuncs */
6153         if (INTEL_INFO(dev)->gen >= 9)
6154                 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
6155         else if (IS_VALLEYVIEW(dev))
6156                 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
6157         else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
6158                 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
6159         else if (HAS_PCH_SPLIT(dev))
6160                 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
6161         else
6162                 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
6163
6164         if (INTEL_INFO(dev)->gen >= 9)
6165                 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
6166         else
6167                 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
6168
6169         /* Preserve the current hw state. */
6170         intel_dp->DP = I915_READ(intel_dp->output_reg);
6171         intel_dp->attached_connector = intel_connector;
6172
6173         if (intel_dp_is_edp(dev, port))
6174                 type = DRM_MODE_CONNECTOR_eDP;
6175         else
6176                 type = DRM_MODE_CONNECTOR_DisplayPort;
6177
6178         /*
6179          * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
6180          * for DP the encoder type can be set by the caller to
6181          * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
6182          */
6183         if (type == DRM_MODE_CONNECTOR_eDP)
6184                 intel_encoder->type = INTEL_OUTPUT_EDP;
6185
6186         /* eDP only on port B and/or C on vlv/chv */
6187         if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
6188                     port != PORT_B && port != PORT_C))
6189                 return false;
6190
6191         DRM_DEBUG_KMS("Adding %s connector on port %c\n",
6192                         type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
6193                         port_name(port));
6194
6195         drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
6196         drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
6197
6198         connector->interlace_allowed = true;
6199         connector->doublescan_allowed = 0;
6200
6201         INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
6202                           edp_panel_vdd_work);
6203
6204         intel_connector_attach_encoder(intel_connector, intel_encoder);
6205         drm_connector_register(connector);
6206
6207         if (HAS_DDI(dev))
6208                 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
6209         else
6210                 intel_connector->get_hw_state = intel_connector_get_hw_state;
6211         intel_connector->unregister = intel_dp_connector_unregister;
6212
6213         /* Set up the hotplug pin. */
6214         switch (port) {
6215         case PORT_A:
6216                 intel_encoder->hpd_pin = HPD_PORT_A;
6217                 break;
6218         case PORT_B:
6219                 intel_encoder->hpd_pin = HPD_PORT_B;
6220                 if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) < BXT_REVID_B0))
6221                         intel_encoder->hpd_pin = HPD_PORT_A;
6222                 break;
6223         case PORT_C:
6224                 intel_encoder->hpd_pin = HPD_PORT_C;
6225                 break;
6226         case PORT_D:
6227                 intel_encoder->hpd_pin = HPD_PORT_D;
6228                 break;
6229         case PORT_E:
6230                 intel_encoder->hpd_pin = HPD_PORT_E;
6231                 break;
6232         default:
6233                 BUG();
6234         }
6235
6236         if (is_edp(intel_dp)) {
6237                 pps_lock(intel_dp);
6238                 intel_dp_init_panel_power_timestamps(intel_dp);
6239                 if (IS_VALLEYVIEW(dev))
6240                         vlv_initial_power_sequencer_setup(intel_dp);
6241                 else
6242                         intel_dp_init_panel_power_sequencer(dev, intel_dp);
6243                 pps_unlock(intel_dp);
6244         }
6245
6246         intel_dp_aux_init(intel_dp, intel_connector);
6247
6248         /* init MST on ports that can support it */
6249         if (HAS_DP_MST(dev) &&
6250             (port == PORT_B || port == PORT_C || port == PORT_D))
6251                 intel_dp_mst_encoder_init(intel_dig_port,
6252                                           intel_connector->base.base.id);
6253
6254         if (!intel_edp_init_connector(intel_dp, intel_connector)) {
6255                 drm_dp_aux_unregister(&intel_dp->aux);
6256                 if (is_edp(intel_dp)) {
6257                         cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
6258                         /*
6259                          * vdd might still be enabled do to the delayed vdd off.
6260                          * Make sure vdd is actually turned off here.
6261                          */
6262                         pps_lock(intel_dp);
6263                         edp_panel_vdd_off_sync(intel_dp);
6264                         pps_unlock(intel_dp);
6265                 }
6266                 drm_connector_unregister(connector);
6267                 drm_connector_cleanup(connector);
6268                 return false;
6269         }
6270
6271         intel_dp_add_properties(intel_dp, connector);
6272
6273         /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
6274          * 0xd.  Failure to do so will result in spurious interrupts being
6275          * generated on the port when a cable is not attached.
6276          */
6277         if (IS_G4X(dev) && !IS_GM45(dev)) {
6278                 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
6279                 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
6280         }
6281
6282         i915_debugfs_connector_add(connector);
6283
6284         return true;
6285 }
6286
6287 void
6288 intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
6289 {
6290         struct drm_i915_private *dev_priv = dev->dev_private;
6291         struct intel_digital_port *intel_dig_port;
6292         struct intel_encoder *intel_encoder;
6293         struct drm_encoder *encoder;
6294         struct intel_connector *intel_connector;
6295
6296         intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
6297         if (!intel_dig_port)
6298                 return;
6299
6300         intel_connector = intel_connector_alloc();
6301         if (!intel_connector)
6302                 goto err_connector_alloc;
6303
6304         intel_encoder = &intel_dig_port->base;
6305         encoder = &intel_encoder->base;
6306
6307         drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
6308                          DRM_MODE_ENCODER_TMDS);
6309
6310         intel_encoder->compute_config = intel_dp_compute_config;
6311         intel_encoder->disable = intel_disable_dp;
6312         intel_encoder->get_hw_state = intel_dp_get_hw_state;
6313         intel_encoder->get_config = intel_dp_get_config;
6314         intel_encoder->suspend = intel_dp_encoder_suspend;
6315         if (IS_CHERRYVIEW(dev)) {
6316                 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
6317                 intel_encoder->pre_enable = chv_pre_enable_dp;
6318                 intel_encoder->enable = vlv_enable_dp;
6319                 intel_encoder->post_disable = chv_post_disable_dp;
6320                 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
6321         } else if (IS_VALLEYVIEW(dev)) {
6322                 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
6323                 intel_encoder->pre_enable = vlv_pre_enable_dp;
6324                 intel_encoder->enable = vlv_enable_dp;
6325                 intel_encoder->post_disable = vlv_post_disable_dp;
6326         } else {
6327                 intel_encoder->pre_enable = g4x_pre_enable_dp;
6328                 intel_encoder->enable = g4x_enable_dp;
6329                 if (INTEL_INFO(dev)->gen >= 5)
6330                         intel_encoder->post_disable = ilk_post_disable_dp;
6331         }
6332
6333         intel_dig_port->port = port;
6334         intel_dig_port->dp.output_reg = output_reg;
6335
6336         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
6337         if (IS_CHERRYVIEW(dev)) {
6338                 if (port == PORT_D)
6339                         intel_encoder->crtc_mask = 1 << 2;
6340                 else
6341                         intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
6342         } else {
6343                 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
6344         }
6345         intel_encoder->cloneable = 0;
6346
6347         intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
6348         dev_priv->hotplug.irq_port[port] = intel_dig_port;
6349
6350         if (!intel_dp_init_connector(intel_dig_port, intel_connector))
6351                 goto err_init_connector;
6352
6353         return;
6354
6355 err_init_connector:
6356         drm_encoder_cleanup(encoder);
6357         kfree(intel_connector);
6358 err_connector_alloc:
6359         kfree(intel_dig_port);
6360
6361         return;
6362 }
6363
6364 #if 0
6365 void intel_dp_mst_suspend(struct drm_device *dev)
6366 {
6367         struct drm_i915_private *dev_priv = dev->dev_private;
6368         int i;
6369
6370         /* disable MST */
6371         for (i = 0; i < I915_MAX_PORTS; i++) {
6372                 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6373                 if (!intel_dig_port)
6374                         continue;
6375
6376                 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6377                         if (!intel_dig_port->dp.can_mst)
6378                                 continue;
6379                         if (intel_dig_port->dp.is_mst)
6380                                 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6381                 }
6382         }
6383 }
6384 #endif
6385
6386 void intel_dp_mst_resume(struct drm_device *dev)
6387 {
6388         struct drm_i915_private *dev_priv = dev->dev_private;
6389         int i;
6390
6391         for (i = 0; i < I915_MAX_PORTS; i++) {
6392                 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6393                 if (!intel_dig_port)
6394                         continue;
6395                 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6396 #if 0
6397                         int ret;
6398
6399                         if (!intel_dig_port->dp.can_mst)
6400                                 continue;
6401
6402                         ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6403                         if (ret != 0) {
6404                                 intel_dp_check_mst_status(&intel_dig_port->dp);
6405                         }
6406 #endif
6407                 }
6408         }
6409 }