2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Keith Packard <keithp@keithp.com>
28 #include <linux/i2c.h>
29 #include <linux/export.h>
30 #include <linux/notifier.h>
32 #include <linux/slab.h>
33 #include <drm/drm_atomic_helper.h>
34 #include <drm/drm_crtc.h>
35 #include <drm/drm_crtc_helper.h>
36 #include <drm/drm_edid.h>
37 #include "intel_drv.h"
38 #include <drm/i915_drm.h>
41 #define DP_LINK_CHECK_TIMEOUT (10 * 1000)
43 static int disable_aux_irq = 0;
44 TUNABLE_INT("drm.i915.disable_aux_irq", &disable_aux_irq);
46 /* Compliance test status bits */
47 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0
48 #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49 #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
50 #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
57 static const struct dp_link_dpll gen4_dpll[] = {
59 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
61 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
64 static const struct dp_link_dpll pch_dpll[] = {
66 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
68 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
71 static const struct dp_link_dpll vlv_dpll[] = {
73 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
75 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
79 * CHV supports eDP 1.4 that have more link rates.
80 * Below only provides the fixed rate but exclude variable rate.
82 static const struct dp_link_dpll chv_dpll[] = {
84 * CHV requires to program fractional division for m2.
85 * m2 is stored in fixed point format using formula below
86 * (m2_int << 22) | m2_fraction
88 { 162000, /* m2_int = 32, m2_fraction = 1677722 */
89 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
90 { 270000, /* m2_int = 27, m2_fraction = 0 */
91 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
92 { 540000, /* m2_int = 27, m2_fraction = 0 */
93 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
96 static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
97 324000, 432000, 540000 };
98 static const int skl_rates[] = { 162000, 216000, 270000,
99 324000, 432000, 540000 };
100 static const int default_rates[] = { 162000, 270000, 540000 };
103 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
104 * @intel_dp: DP struct
106 * If a CPU or PCH DP output is attached to an eDP panel, this function
107 * will return true, and false otherwise.
109 static bool is_edp(struct intel_dp *intel_dp)
111 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
113 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
116 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
118 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
120 return intel_dig_port->base.base.dev;
123 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
125 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
128 static void intel_dp_link_down(struct intel_dp *intel_dp);
129 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
130 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
131 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
132 static void vlv_steal_power_sequencer(struct drm_device *dev,
133 enum i915_pipe pipe);
135 static unsigned int intel_dp_unused_lane_mask(int lane_count)
137 return ~((1 << lane_count) - 1) & 0xf;
141 intel_dp_max_link_bw(struct intel_dp *intel_dp)
143 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
145 switch (max_link_bw) {
146 case DP_LINK_BW_1_62:
151 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
153 max_link_bw = DP_LINK_BW_1_62;
159 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
161 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
162 struct drm_device *dev = intel_dig_port->base.base.dev;
163 u8 source_max, sink_max;
166 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
167 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
170 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
172 return min(source_max, sink_max);
176 * The units on the numbers in the next two are... bizarre. Examples will
177 * make it clearer; this one parallels an example in the eDP spec.
179 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
181 * 270000 * 1 * 8 / 10 == 216000
183 * The actual data capacity of that configuration is 2.16Gbit/s, so the
184 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
185 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
186 * 119000. At 18bpp that's 2142000 kilobits per second.
188 * Thus the strange-looking division by 10 in intel_dp_link_required, to
189 * get the result in decakilobits instead of kilobits.
193 intel_dp_link_required(int pixel_clock, int bpp)
195 return (pixel_clock * bpp + 9) / 10;
199 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
201 return (max_link_clock * max_lanes * 8) / 10;
204 static enum drm_mode_status
205 intel_dp_mode_valid(struct drm_connector *connector,
206 struct drm_display_mode *mode)
208 struct intel_dp *intel_dp = intel_attached_dp(connector);
209 struct intel_connector *intel_connector = to_intel_connector(connector);
210 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
211 int target_clock = mode->clock;
212 int max_rate, mode_rate, max_lanes, max_link_clock;
214 if (is_edp(intel_dp) && fixed_mode) {
215 if (mode->hdisplay > fixed_mode->hdisplay)
218 if (mode->vdisplay > fixed_mode->vdisplay)
221 target_clock = fixed_mode->clock;
224 max_link_clock = intel_dp_max_link_rate(intel_dp);
225 max_lanes = intel_dp_max_lane_count(intel_dp);
227 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
228 mode_rate = intel_dp_link_required(target_clock, 18);
230 if (mode_rate > max_rate)
231 return MODE_CLOCK_HIGH;
233 if (mode->clock < 10000)
234 return MODE_CLOCK_LOW;
236 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
237 return MODE_H_ILLEGAL;
242 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
249 for (i = 0; i < src_bytes; i++)
250 v |= ((uint32_t) src[i]) << ((3-i) * 8);
254 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
259 for (i = 0; i < dst_bytes; i++)
260 dst[i] = src >> ((3-i) * 8);
264 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
265 struct intel_dp *intel_dp);
267 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
268 struct intel_dp *intel_dp);
270 static void pps_lock(struct intel_dp *intel_dp)
272 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
273 struct intel_encoder *encoder = &intel_dig_port->base;
274 struct drm_device *dev = encoder->base.dev;
275 struct drm_i915_private *dev_priv = dev->dev_private;
276 enum intel_display_power_domain power_domain;
279 * See vlv_power_sequencer_reset() why we need
280 * a power domain reference here.
282 power_domain = intel_display_port_aux_power_domain(encoder);
283 intel_display_power_get(dev_priv, power_domain);
285 mutex_lock(&dev_priv->pps_mutex);
288 static void pps_unlock(struct intel_dp *intel_dp)
290 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
291 struct intel_encoder *encoder = &intel_dig_port->base;
292 struct drm_device *dev = encoder->base.dev;
293 struct drm_i915_private *dev_priv = dev->dev_private;
294 enum intel_display_power_domain power_domain;
296 mutex_unlock(&dev_priv->pps_mutex);
298 power_domain = intel_display_port_aux_power_domain(encoder);
299 intel_display_power_put(dev_priv, power_domain);
303 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
305 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
306 struct drm_device *dev = intel_dig_port->base.base.dev;
307 struct drm_i915_private *dev_priv = dev->dev_private;
308 enum i915_pipe pipe = intel_dp->pps_pipe;
309 bool pll_enabled, release_cl_override = false;
310 enum dpio_phy phy = DPIO_PHY(pipe);
311 enum dpio_channel ch = vlv_pipe_to_channel(pipe);
314 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
315 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
316 pipe_name(pipe), port_name(intel_dig_port->port)))
319 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
320 pipe_name(pipe), port_name(intel_dig_port->port));
322 /* Preserve the BIOS-computed detected bit. This is
323 * supposed to be read-only.
325 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
326 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
327 DP |= DP_PORT_WIDTH(1);
328 DP |= DP_LINK_TRAIN_PAT_1;
330 if (IS_CHERRYVIEW(dev))
331 DP |= DP_PIPE_SELECT_CHV(pipe);
332 else if (pipe == PIPE_B)
333 DP |= DP_PIPEB_SELECT;
335 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
338 * The DPLL for the pipe must be enabled for this to work.
339 * So enable temporarily it if it's not already enabled.
342 release_cl_override = IS_CHERRYVIEW(dev) &&
343 !chv_phy_powergate_ch(dev_priv, phy, ch, true);
345 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
346 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
350 * Similar magic as in intel_dp_enable_port().
351 * We _must_ do this port enable + disable trick
352 * to make this power seqeuencer lock onto the port.
353 * Otherwise even VDD force bit won't work.
355 I915_WRITE(intel_dp->output_reg, DP);
356 POSTING_READ(intel_dp->output_reg);
358 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
359 POSTING_READ(intel_dp->output_reg);
361 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
362 POSTING_READ(intel_dp->output_reg);
365 vlv_force_pll_off(dev, pipe);
367 if (release_cl_override)
368 chv_phy_powergate_ch(dev_priv, phy, ch, false);
372 static enum i915_pipe
373 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
375 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
376 struct drm_device *dev = intel_dig_port->base.base.dev;
377 struct drm_i915_private *dev_priv = dev->dev_private;
378 struct intel_encoder *encoder;
379 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
382 lockdep_assert_held(&dev_priv->pps_mutex);
384 /* We should never land here with regular DP ports */
385 WARN_ON(!is_edp(intel_dp));
387 if (intel_dp->pps_pipe != INVALID_PIPE)
388 return intel_dp->pps_pipe;
391 * We don't have power sequencer currently.
392 * Pick one that's not used by other ports.
394 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
396 struct intel_dp *tmp;
398 if (encoder->type != INTEL_OUTPUT_EDP)
401 tmp = enc_to_intel_dp(&encoder->base);
403 if (tmp->pps_pipe != INVALID_PIPE)
404 pipes &= ~(1 << tmp->pps_pipe);
408 * Didn't find one. This should not happen since there
409 * are two power sequencers and up to two eDP ports.
411 if (WARN_ON(pipes == 0))
414 pipe = ffs(pipes) - 1;
416 vlv_steal_power_sequencer(dev, pipe);
417 intel_dp->pps_pipe = pipe;
419 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
420 pipe_name(intel_dp->pps_pipe),
421 port_name(intel_dig_port->port));
423 /* init power sequencer on this pipe and port */
424 intel_dp_init_panel_power_sequencer(dev, intel_dp);
425 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
428 * Even vdd force doesn't work until we've made
429 * the power sequencer lock in on the port.
431 vlv_power_sequencer_kick(intel_dp);
433 return intel_dp->pps_pipe;
436 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
437 enum i915_pipe pipe);
439 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
442 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
445 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
448 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
451 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
457 static enum i915_pipe
458 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
460 vlv_pipe_check pipe_check)
464 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
465 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
466 PANEL_PORT_SELECT_MASK;
468 if (port_sel != PANEL_PORT_SELECT_VLV(port))
471 if (!pipe_check(dev_priv, pipe))
481 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
483 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
484 struct drm_device *dev = intel_dig_port->base.base.dev;
485 struct drm_i915_private *dev_priv = dev->dev_private;
486 enum port port = intel_dig_port->port;
488 lockdep_assert_held(&dev_priv->pps_mutex);
490 /* try to find a pipe with this port selected */
491 /* first pick one where the panel is on */
492 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
494 /* didn't find one? pick one where vdd is on */
495 if (intel_dp->pps_pipe == INVALID_PIPE)
496 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
497 vlv_pipe_has_vdd_on);
498 /* didn't find one? pick one with just the correct port */
499 if (intel_dp->pps_pipe == INVALID_PIPE)
500 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
503 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
504 if (intel_dp->pps_pipe == INVALID_PIPE) {
505 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
510 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
511 port_name(port), pipe_name(intel_dp->pps_pipe));
513 intel_dp_init_panel_power_sequencer(dev, intel_dp);
514 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
517 void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
519 struct drm_device *dev = dev_priv->dev;
520 struct intel_encoder *encoder;
522 if (WARN_ON(!IS_VALLEYVIEW(dev)))
526 * We can't grab pps_mutex here due to deadlock with power_domain
527 * mutex when power_domain functions are called while holding pps_mutex.
528 * That also means that in order to use pps_pipe the code needs to
529 * hold both a power domain reference and pps_mutex, and the power domain
530 * reference get/put must be done while _not_ holding pps_mutex.
531 * pps_{lock,unlock}() do these steps in the correct order, so one
532 * should use them always.
535 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
536 struct intel_dp *intel_dp;
538 if (encoder->type != INTEL_OUTPUT_EDP)
541 intel_dp = enc_to_intel_dp(&encoder->base);
542 intel_dp->pps_pipe = INVALID_PIPE;
546 static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
548 struct drm_device *dev = intel_dp_to_dev(intel_dp);
551 return BXT_PP_CONTROL(0);
552 else if (HAS_PCH_SPLIT(dev))
553 return PCH_PP_CONTROL;
555 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
558 static u32 _pp_stat_reg(struct intel_dp *intel_dp)
560 struct drm_device *dev = intel_dp_to_dev(intel_dp);
563 return BXT_PP_STATUS(0);
564 else if (HAS_PCH_SPLIT(dev))
565 return PCH_PP_STATUS;
567 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
570 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
571 This function only applicable when panel PM state is not to be tracked */
573 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
576 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
578 struct drm_device *dev = intel_dp_to_dev(intel_dp);
579 struct drm_i915_private *dev_priv = dev->dev_private;
581 if (!is_edp(intel_dp) || code != SYS_RESTART)
586 if (IS_VALLEYVIEW(dev)) {
587 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
588 u32 pp_ctrl_reg, pp_div_reg;
591 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
592 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
593 pp_div = I915_READ(pp_div_reg);
594 pp_div &= PP_REFERENCE_DIVIDER_MASK;
596 /* 0x1F write to PP_DIV_REG sets max cycle delay */
597 I915_WRITE(pp_div_reg, pp_div | 0x1F);
598 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
599 msleep(intel_dp->panel_power_cycle_delay);
602 pps_unlock(intel_dp);
608 static bool edp_have_panel_power(struct intel_dp *intel_dp)
610 struct drm_device *dev = intel_dp_to_dev(intel_dp);
611 struct drm_i915_private *dev_priv = dev->dev_private;
613 lockdep_assert_held(&dev_priv->pps_mutex);
615 if (IS_VALLEYVIEW(dev) &&
616 intel_dp->pps_pipe == INVALID_PIPE)
619 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
622 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
624 struct drm_device *dev = intel_dp_to_dev(intel_dp);
625 struct drm_i915_private *dev_priv = dev->dev_private;
627 lockdep_assert_held(&dev_priv->pps_mutex);
629 if (IS_VALLEYVIEW(dev) &&
630 intel_dp->pps_pipe == INVALID_PIPE)
633 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
637 intel_dp_check_edp(struct intel_dp *intel_dp)
639 struct drm_device *dev = intel_dp_to_dev(intel_dp);
640 struct drm_i915_private *dev_priv = dev->dev_private;
642 if (!is_edp(intel_dp))
645 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
646 WARN(1, "eDP powered off while attempting aux channel communication.\n");
647 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
648 I915_READ(_pp_stat_reg(intel_dp)),
649 I915_READ(_pp_ctrl_reg(intel_dp)));
654 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
656 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
657 struct drm_device *dev = intel_dig_port->base.base.dev;
658 struct drm_i915_private *dev_priv = dev->dev_private;
659 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
663 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
665 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
666 msecs_to_jiffies_timeout(10));
668 done = wait_for_atomic(C, 10) == 0;
670 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
677 static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
679 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
680 struct drm_device *dev = intel_dig_port->base.base.dev;
683 * The clock divider is based off the hrawclk, and would like to run at
684 * 2MHz. So, take the hrawclk value and divide by 2 and use that
686 return index ? 0 : intel_hrawclk(dev) / 2;
689 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
691 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
692 struct drm_device *dev = intel_dig_port->base.base.dev;
693 struct drm_i915_private *dev_priv = dev->dev_private;
698 if (intel_dig_port->port == PORT_A) {
699 return DIV_ROUND_UP(dev_priv->cdclk_freq, 2000);
702 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
706 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
708 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
709 struct drm_device *dev = intel_dig_port->base.base.dev;
710 struct drm_i915_private *dev_priv = dev->dev_private;
712 if (intel_dig_port->port == PORT_A) {
715 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
716 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
717 /* Workaround for non-ULT HSW */
724 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
728 static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
730 return index ? 0 : 100;
733 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
736 * SKL doesn't need us to program the AUX clock divider (Hardware will
737 * derive the clock from CDCLK automatically). We still implement the
738 * get_aux_clock_divider vfunc to plug-in into the existing code.
740 return index ? 0 : 1;
743 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
746 uint32_t aux_clock_divider)
748 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
749 struct drm_device *dev = intel_dig_port->base.base.dev;
750 uint32_t precharge, timeout;
757 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
758 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
760 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
762 return DP_AUX_CH_CTL_SEND_BUSY |
764 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
765 DP_AUX_CH_CTL_TIME_OUT_ERROR |
767 DP_AUX_CH_CTL_RECEIVE_ERROR |
768 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
769 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
770 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
773 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
778 return DP_AUX_CH_CTL_SEND_BUSY |
780 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
781 DP_AUX_CH_CTL_TIME_OUT_ERROR |
782 DP_AUX_CH_CTL_TIME_OUT_1600us |
783 DP_AUX_CH_CTL_RECEIVE_ERROR |
784 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
785 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
789 intel_dp_aux_ch(struct intel_dp *intel_dp,
790 const uint8_t *send, int send_bytes,
791 uint8_t *recv, int recv_size)
793 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
794 struct drm_device *dev = intel_dig_port->base.base.dev;
795 struct drm_i915_private *dev_priv = dev->dev_private;
796 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
797 uint32_t ch_data = ch_ctl + 4;
798 uint32_t aux_clock_divider;
799 int i, ret, recv_bytes;
803 bool has_aux_irq = HAS_AUX_IRQ(dev) && !disable_aux_irq;
805 bool has_aux_irq = HAS_AUX_IRQ(dev);
812 * We will be called with VDD already enabled for dpcd/edid/oui reads.
813 * In such cases we want to leave VDD enabled and it's up to upper layers
814 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
817 vdd = edp_panel_vdd_on(intel_dp);
819 /* dp aux is extremely sensitive to irq latency, hence request the
820 * lowest possible wakeup latency and so prevent the cpu from going into
823 pm_qos_update_request(&dev_priv->pm_qos, 0);
825 intel_dp_check_edp(intel_dp);
827 /* Try to wait for any previous AUX channel activity */
828 for (try = 0; try < 3; try++) {
829 status = I915_READ_NOTRACE(ch_ctl);
830 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
836 static u32 last_status = -1;
837 const u32 status = I915_READ(ch_ctl);
839 if (status != last_status) {
840 WARN(1, "dp_aux_ch not started status 0x%08x\n",
842 last_status = status;
849 /* Only 5 data registers! */
850 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
855 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
856 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
861 /* Must try at least 3 times according to DP spec */
862 for (try = 0; try < 5; try++) {
863 /* Load the send data into the aux channel data registers */
864 for (i = 0; i < send_bytes; i += 4)
865 I915_WRITE(ch_data + i,
866 intel_dp_pack_aux(send + i,
869 /* Send the command and wait for it to complete */
870 I915_WRITE(ch_ctl, send_ctl);
872 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
874 /* Clear done status and any errors */
878 DP_AUX_CH_CTL_TIME_OUT_ERROR |
879 DP_AUX_CH_CTL_RECEIVE_ERROR);
881 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
884 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
885 * 400us delay required for errors and timeouts
886 * Timeout errors from the HW already meet this
887 * requirement so skip to next iteration
889 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
890 usleep_range(400, 500);
893 if (status & DP_AUX_CH_CTL_DONE)
898 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
899 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
905 /* Check for timeout or receive error.
906 * Timeouts occur when the sink is not connected
908 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
909 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
914 /* Timeouts occur when the device isn't connected, so they're
915 * "normal" -- don't fill the kernel log with these */
916 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
917 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
922 /* Unload any bytes sent back from the other side */
923 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
924 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
925 if (recv_bytes > recv_size)
926 recv_bytes = recv_size;
928 for (i = 0; i < recv_bytes; i += 4)
929 intel_dp_unpack_aux(I915_READ(ch_data + i),
930 recv + i, recv_bytes - i);
934 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
937 edp_panel_vdd_off(intel_dp, false);
939 pps_unlock(intel_dp);
944 #define BARE_ADDRESS_SIZE 3
945 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
947 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
949 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
950 uint8_t txbuf[20], rxbuf[20];
951 size_t txsize, rxsize;
954 txbuf[0] = (msg->request << 4) |
955 ((msg->address >> 16) & 0xf);
956 txbuf[1] = (msg->address >> 8) & 0xff;
957 txbuf[2] = msg->address & 0xff;
958 txbuf[3] = msg->size - 1;
960 switch (msg->request & ~DP_AUX_I2C_MOT) {
961 case DP_AUX_NATIVE_WRITE:
962 case DP_AUX_I2C_WRITE:
963 case DP_AUX_I2C_WRITE_STATUS_UPDATE:
964 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
965 rxsize = 2; /* 0 or 1 data bytes */
967 if (WARN_ON(txsize > 20))
970 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
972 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
974 msg->reply = rxbuf[0] >> 4;
977 /* Number of bytes written in a short write. */
978 ret = clamp_t(int, rxbuf[1], 0, msg->size);
980 /* Return payload size. */
986 case DP_AUX_NATIVE_READ:
987 case DP_AUX_I2C_READ:
988 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
989 rxsize = msg->size + 1;
991 if (WARN_ON(rxsize > 20))
994 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
996 msg->reply = rxbuf[0] >> 4;
998 * Assume happy day, and copy the data. The caller is
999 * expected to check msg->reply before touching it.
1001 * Return payload size.
1004 memcpy(msg->buffer, rxbuf + 1, ret);
1017 intel_dp_i2c_aux_ch(struct device *adapter, int mode,
1018 uint8_t write_byte, uint8_t *read_byte)
1020 struct i2c_algo_dp_aux_data *data = device_get_softc(adapter);
1021 struct intel_dp *intel_dp = data->priv;
1022 uint16_t address = data->address;
1030 intel_edp_panel_vdd_on(intel_dp);
1031 intel_dp_check_edp(intel_dp);
1032 /* Set up the command byte */
1033 if (mode & MODE_I2C_READ)
1034 msg[0] = DP_AUX_I2C_READ << 4;
1036 msg[0] = DP_AUX_I2C_WRITE << 4;
1038 if (!(mode & MODE_I2C_STOP))
1039 msg[0] |= DP_AUX_I2C_MOT << 4;
1041 msg[1] = address >> 8;
1045 case MODE_I2C_WRITE:
1047 msg[4] = write_byte;
1063 * DP1.2 sections 2.7.7.1.5.6.1 and 2.7.7.1.6.6.1: A DP Source device is
1064 * required to retry at least seven times upon receiving AUX_DEFER
1065 * before giving up the AUX transaction.
1067 for (retry = 0; retry < 7; retry++) {
1068 ret = intel_dp_aux_ch(intel_dp,
1070 reply, reply_bytes);
1072 DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
1076 switch ((reply[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK) {
1077 case DP_AUX_NATIVE_REPLY_ACK:
1078 /* I2C-over-AUX Reply field is only valid
1079 * when paired with AUX ACK.
1082 case DP_AUX_NATIVE_REPLY_NACK:
1083 DRM_DEBUG_KMS("aux_ch native nack\n");
1086 case DP_AUX_NATIVE_REPLY_DEFER:
1088 * For now, just give more slack to branch devices. We
1089 * could check the DPCD for I2C bit rate capabilities,
1090 * and if available, adjust the interval. We could also
1091 * be more careful with DP-to-Legacy adapters where a
1092 * long legacy cable may force very low I2C bit rates.
1094 if (intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
1095 DP_DWN_STRM_PORT_PRESENT)
1096 usleep_range(500, 600);
1098 usleep_range(300, 400);
1101 DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
1107 switch ((reply[0] >> 4) & DP_AUX_I2C_REPLY_MASK) {
1108 case DP_AUX_I2C_REPLY_ACK:
1109 if (mode == MODE_I2C_READ) {
1110 *read_byte = reply[1];
1112 ret = 0; /* reply_bytes - 1 */
1114 case DP_AUX_I2C_REPLY_NACK:
1115 DRM_DEBUG_KMS("aux_i2c nack\n");
1118 case DP_AUX_I2C_REPLY_DEFER:
1119 DRM_DEBUG_KMS("aux_i2c defer\n");
1123 DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
1129 DRM_ERROR("too many retries, giving up\n");
1137 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1139 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1140 struct drm_i915_private *dev_priv = dev->dev_private;
1141 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1142 enum port port = intel_dig_port->port;
1143 struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
1144 const char *name = NULL;
1145 uint32_t porte_aux_ctl_reg = DPA_AUX_CH_CTL;
1148 /* On SKL we don't have Aux for port E so we rely on VBT to set
1149 * a proper alternate aux channel.
1151 if (IS_SKYLAKE(dev) && port == PORT_E) {
1152 switch (info->alternate_aux_channel) {
1154 porte_aux_ctl_reg = DPB_AUX_CH_CTL;
1157 porte_aux_ctl_reg = DPC_AUX_CH_CTL;
1160 porte_aux_ctl_reg = DPD_AUX_CH_CTL;
1164 porte_aux_ctl_reg = DPA_AUX_CH_CTL;
1170 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
1174 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
1178 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
1182 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
1186 intel_dp->aux_ch_ctl_reg = porte_aux_ctl_reg;
1194 * The AUX_CTL register is usually DP_CTL + 0x10.
1196 * On Haswell and Broadwell though:
1197 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1198 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1200 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1202 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev) && port != PORT_E)
1203 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
1205 intel_dp->aux.name = name;
1206 intel_dp->aux.dev = dev->dev;
1207 intel_dp->aux.transfer = intel_dp_aux_transfer;
1209 DRM_DEBUG_KMS("i2c_init %s\n", name);
1211 ret = iic_dp_aux_add_bus(connector->base.dev->dev, name,
1212 intel_dp_i2c_aux_ch, intel_dp, &intel_dp->dp_iic_bus,
1213 &intel_dp->aux.ddc);
1214 WARN(ret, "intel_dp_i2c_init failed with error %d for port %c\n",
1215 ret, port_name(port));
1218 ret = sysfs_create_link(&connector->base.kdev->kobj,
1219 &intel_dp->aux.ddc.dev.kobj,
1220 intel_dp->aux.ddc.dev.kobj.name);
1222 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
1223 drm_dp_aux_unregister(&intel_dp->aux);
1229 intel_dp_connector_unregister(struct intel_connector *intel_connector)
1232 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1234 if (!intel_connector->mst_port)
1235 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1236 intel_dp->aux.ddc.dev.kobj.name);
1238 intel_connector_unregister(intel_connector);
1242 skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
1246 memset(&pipe_config->dpll_hw_state, 0,
1247 sizeof(pipe_config->dpll_hw_state));
1249 pipe_config->ddi_pll_sel = SKL_DPLL0;
1250 pipe_config->dpll_hw_state.cfgcr1 = 0;
1251 pipe_config->dpll_hw_state.cfgcr2 = 0;
1253 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
1254 switch (pipe_config->port_clock / 2) {
1256 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
1260 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
1264 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
1268 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
1271 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1272 results in CDCLK change. Need to handle the change of CDCLK by
1273 disabling pipes and re-enabling them */
1275 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
1279 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
1284 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1288 hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
1290 memset(&pipe_config->dpll_hw_state, 0,
1291 sizeof(pipe_config->dpll_hw_state));
1293 switch (pipe_config->port_clock / 2) {
1295 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1298 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1301 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1307 intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1309 if (intel_dp->num_sink_rates) {
1310 *sink_rates = intel_dp->sink_rates;
1311 return intel_dp->num_sink_rates;
1314 *sink_rates = default_rates;
1316 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1319 static bool intel_dp_source_supports_hbr2(struct drm_device *dev)
1321 /* WaDisableHBR2:skl */
1322 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1325 if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1326 (INTEL_INFO(dev)->gen >= 9))
1333 intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
1337 if (IS_BROXTON(dev)) {
1338 *source_rates = bxt_rates;
1339 size = ARRAY_SIZE(bxt_rates);
1340 } else if (IS_SKYLAKE(dev)) {
1341 *source_rates = skl_rates;
1342 size = ARRAY_SIZE(skl_rates);
1344 *source_rates = default_rates;
1345 size = ARRAY_SIZE(default_rates);
1348 /* This depends on the fact that 5.4 is last value in the array */
1349 if (!intel_dp_source_supports_hbr2(dev))
1356 intel_dp_set_clock(struct intel_encoder *encoder,
1357 struct intel_crtc_state *pipe_config)
1359 struct drm_device *dev = encoder->base.dev;
1360 const struct dp_link_dpll *divisor = NULL;
1364 divisor = gen4_dpll;
1365 count = ARRAY_SIZE(gen4_dpll);
1366 } else if (HAS_PCH_SPLIT(dev)) {
1368 count = ARRAY_SIZE(pch_dpll);
1369 } else if (IS_CHERRYVIEW(dev)) {
1371 count = ARRAY_SIZE(chv_dpll);
1372 } else if (IS_VALLEYVIEW(dev)) {
1374 count = ARRAY_SIZE(vlv_dpll);
1377 if (divisor && count) {
1378 for (i = 0; i < count; i++) {
1379 if (pipe_config->port_clock == divisor[i].clock) {
1380 pipe_config->dpll = divisor[i].dpll;
1381 pipe_config->clock_set = true;
1388 static int intersect_rates(const int *source_rates, int source_len,
1389 const int *sink_rates, int sink_len,
1392 int i = 0, j = 0, k = 0;
1394 while (i < source_len && j < sink_len) {
1395 if (source_rates[i] == sink_rates[j]) {
1396 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1398 common_rates[k] = source_rates[i];
1402 } else if (source_rates[i] < sink_rates[j]) {
1411 static int intel_dp_common_rates(struct intel_dp *intel_dp,
1414 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1415 const int *source_rates, *sink_rates;
1416 int source_len, sink_len;
1418 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1419 source_len = intel_dp_source_rates(dev, &source_rates);
1421 return intersect_rates(source_rates, source_len,
1422 sink_rates, sink_len,
1426 static void snprintf_int_array(char *str, size_t len,
1427 const int *array, int nelem)
1433 for (i = 0; i < nelem; i++) {
1434 int r = ksnprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1442 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1444 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1445 const int *source_rates, *sink_rates;
1446 int source_len, sink_len, common_len;
1447 int common_rates[DP_MAX_SUPPORTED_RATES];
1448 char str[128]; /* FIXME: too big for stack? */
1450 if ((drm_debug & DRM_UT_KMS) == 0)
1453 source_len = intel_dp_source_rates(dev, &source_rates);
1454 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1455 DRM_DEBUG_KMS("source rates: %s\n", str);
1457 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1458 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1459 DRM_DEBUG_KMS("sink rates: %s\n", str);
1461 common_len = intel_dp_common_rates(intel_dp, common_rates);
1462 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1463 DRM_DEBUG_KMS("common rates: %s\n", str);
1466 static int rate_to_index(int find, const int *rates)
1470 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1471 if (find == rates[i])
1478 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1480 int rates[DP_MAX_SUPPORTED_RATES] = {};
1483 len = intel_dp_common_rates(intel_dp, rates);
1484 if (WARN_ON(len <= 0))
1487 return rates[rate_to_index(0, rates) - 1];
1490 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1492 return rate_to_index(rate, intel_dp->sink_rates);
1495 static void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1496 uint8_t *link_bw, uint8_t *rate_select)
1498 if (intel_dp->num_sink_rates) {
1501 intel_dp_rate_select(intel_dp, port_clock);
1503 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1509 intel_dp_compute_config(struct intel_encoder *encoder,
1510 struct intel_crtc_state *pipe_config)
1512 struct drm_device *dev = encoder->base.dev;
1513 struct drm_i915_private *dev_priv = dev->dev_private;
1514 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1515 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1516 enum port port = dp_to_dig_port(intel_dp)->port;
1517 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1518 struct intel_connector *intel_connector = intel_dp->attached_connector;
1519 int lane_count, clock;
1520 int min_lane_count = 1;
1521 int max_lane_count = intel_dp_max_lane_count(intel_dp);
1522 /* Conveniently, the link BW constants become indices with a shift...*/
1526 int link_avail, link_clock;
1527 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1529 uint8_t link_bw, rate_select;
1531 common_len = intel_dp_common_rates(intel_dp, common_rates);
1533 /* No common link rates between source and sink */
1534 WARN_ON(common_len <= 0);
1536 max_clock = common_len - 1;
1538 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1539 pipe_config->has_pch_encoder = true;
1541 pipe_config->has_dp_encoder = true;
1542 pipe_config->has_drrs = false;
1543 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
1545 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1546 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1549 if (INTEL_INFO(dev)->gen >= 9) {
1551 ret = skl_update_scaler_crtc(pipe_config);
1556 if (!HAS_PCH_SPLIT(dev))
1557 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1558 intel_connector->panel.fitting_mode);
1560 intel_pch_panel_fitting(intel_crtc, pipe_config,
1561 intel_connector->panel.fitting_mode);
1564 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1567 DRM_DEBUG_KMS("DP link computation with max lane count %i "
1568 "max bw %d pixel clock %iKHz\n",
1569 max_lane_count, common_rates[max_clock],
1570 adjusted_mode->crtc_clock);
1572 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1573 * bpc in between. */
1574 bpp = pipe_config->pipe_bpp;
1575 if (is_edp(intel_dp)) {
1577 /* Get bpp from vbt only for panels that dont have bpp in edid */
1578 if (intel_connector->base.display_info.bpc == 0 &&
1579 (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
1580 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1581 dev_priv->vbt.edp_bpp);
1582 bpp = dev_priv->vbt.edp_bpp;
1586 * Use the maximum clock and number of lanes the eDP panel
1587 * advertizes being capable of. The panels are generally
1588 * designed to support only a single clock and lane
1589 * configuration, and typically these values correspond to the
1590 * native resolution of the panel.
1592 min_lane_count = max_lane_count;
1593 min_clock = max_clock;
1596 for (; bpp >= 6*3; bpp -= 2*3) {
1597 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1600 for (clock = min_clock; clock <= max_clock; clock++) {
1601 for (lane_count = min_lane_count;
1602 lane_count <= max_lane_count;
1605 link_clock = common_rates[clock];
1606 link_avail = intel_dp_max_data_rate(link_clock,
1609 if (mode_rate <= link_avail) {
1619 if (intel_dp->color_range_auto) {
1622 * CEA-861-E - 5.1 Default Encoding Parameters
1623 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1625 pipe_config->limited_color_range =
1626 bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
1628 pipe_config->limited_color_range =
1629 intel_dp->limited_color_range;
1632 pipe_config->lane_count = lane_count;
1634 pipe_config->pipe_bpp = bpp;
1635 pipe_config->port_clock = common_rates[clock];
1637 intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1638 &link_bw, &rate_select);
1640 DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1641 link_bw, rate_select, pipe_config->lane_count,
1642 pipe_config->port_clock, bpp);
1643 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1644 mode_rate, link_avail);
1646 intel_link_compute_m_n(bpp, lane_count,
1647 adjusted_mode->crtc_clock,
1648 pipe_config->port_clock,
1649 &pipe_config->dp_m_n);
1651 if (intel_connector->panel.downclock_mode != NULL &&
1652 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1653 pipe_config->has_drrs = true;
1654 intel_link_compute_m_n(bpp, lane_count,
1655 intel_connector->panel.downclock_mode->clock,
1656 pipe_config->port_clock,
1657 &pipe_config->dp_m2_n2);
1660 if (IS_SKYLAKE(dev) && is_edp(intel_dp))
1661 skl_edp_set_pll_config(pipe_config);
1662 else if (IS_BROXTON(dev))
1663 /* handled in ddi */;
1664 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1665 hsw_dp_set_ddi_pll_sel(pipe_config);
1667 intel_dp_set_clock(encoder, pipe_config);
1672 static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
1674 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1675 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1676 struct drm_device *dev = crtc->base.dev;
1677 struct drm_i915_private *dev_priv = dev->dev_private;
1680 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1681 crtc->config->port_clock);
1682 dpa_ctl = I915_READ(DP_A);
1683 dpa_ctl &= ~DP_PLL_FREQ_MASK;
1685 if (crtc->config->port_clock == 162000) {
1686 /* For a long time we've carried around a ILK-DevA w/a for the
1687 * 160MHz clock. If we're really unlucky, it's still required.
1689 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
1690 dpa_ctl |= DP_PLL_FREQ_160MHZ;
1691 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
1693 dpa_ctl |= DP_PLL_FREQ_270MHZ;
1694 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
1697 I915_WRITE(DP_A, dpa_ctl);
1703 void intel_dp_set_link_params(struct intel_dp *intel_dp,
1704 const struct intel_crtc_state *pipe_config)
1706 intel_dp->link_rate = pipe_config->port_clock;
1707 intel_dp->lane_count = pipe_config->lane_count;
1710 static void intel_dp_prepare(struct intel_encoder *encoder)
1712 struct drm_device *dev = encoder->base.dev;
1713 struct drm_i915_private *dev_priv = dev->dev_private;
1714 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1715 enum port port = dp_to_dig_port(intel_dp)->port;
1716 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1717 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1719 intel_dp_set_link_params(intel_dp, crtc->config);
1722 * There are four kinds of DP registers:
1729 * IBX PCH and CPU are the same for almost everything,
1730 * except that the CPU DP PLL is configured in this
1733 * CPT PCH is quite different, having many bits moved
1734 * to the TRANS_DP_CTL register instead. That
1735 * configuration happens (oddly) in ironlake_pch_enable
1738 /* Preserve the BIOS-computed detected bit. This is
1739 * supposed to be read-only.
1741 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1743 /* Handle DP bits in common between all three register formats */
1744 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1745 intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
1747 if (crtc->config->has_audio)
1748 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
1750 /* Split out the IBX/CPU vs CPT settings */
1752 if (IS_GEN7(dev) && port == PORT_A) {
1753 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1754 intel_dp->DP |= DP_SYNC_HS_HIGH;
1755 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1756 intel_dp->DP |= DP_SYNC_VS_HIGH;
1757 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1759 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1760 intel_dp->DP |= DP_ENHANCED_FRAMING;
1762 intel_dp->DP |= crtc->pipe << 29;
1763 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
1766 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1768 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1769 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1770 trans_dp |= TRANS_DP_ENH_FRAMING;
1772 trans_dp &= ~TRANS_DP_ENH_FRAMING;
1773 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
1775 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
1776 crtc->config->limited_color_range)
1777 intel_dp->DP |= DP_COLOR_RANGE_16_235;
1779 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1780 intel_dp->DP |= DP_SYNC_HS_HIGH;
1781 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1782 intel_dp->DP |= DP_SYNC_VS_HIGH;
1783 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1785 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1786 intel_dp->DP |= DP_ENHANCED_FRAMING;
1788 if (IS_CHERRYVIEW(dev))
1789 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1790 else if (crtc->pipe == PIPE_B)
1791 intel_dp->DP |= DP_PIPEB_SELECT;
1795 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1796 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
1798 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1799 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
1801 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1802 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
1804 static void wait_panel_status(struct intel_dp *intel_dp,
1808 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1809 struct drm_i915_private *dev_priv = dev->dev_private;
1810 u32 pp_stat_reg, pp_ctrl_reg;
1812 lockdep_assert_held(&dev_priv->pps_mutex);
1814 pp_stat_reg = _pp_stat_reg(intel_dp);
1815 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1817 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1819 I915_READ(pp_stat_reg),
1820 I915_READ(pp_ctrl_reg));
1822 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1823 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1824 I915_READ(pp_stat_reg),
1825 I915_READ(pp_ctrl_reg));
1828 DRM_DEBUG_KMS("Wait complete\n");
1831 static void wait_panel_on(struct intel_dp *intel_dp)
1833 DRM_DEBUG_KMS("Wait for panel power on\n");
1834 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1837 static void wait_panel_off(struct intel_dp *intel_dp)
1839 DRM_DEBUG_KMS("Wait for panel power off time\n");
1840 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1843 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1845 DRM_DEBUG_KMS("Wait for panel power cycle\n");
1847 /* When we disable the VDD override bit last we have to do the manual
1849 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1850 intel_dp->panel_power_cycle_delay);
1852 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1855 static void wait_backlight_on(struct intel_dp *intel_dp)
1857 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1858 intel_dp->backlight_on_delay);
1861 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1863 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1864 intel_dp->backlight_off_delay);
1867 /* Read the current pp_control value, unlocking the register if it
1871 static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1873 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1874 struct drm_i915_private *dev_priv = dev->dev_private;
1877 lockdep_assert_held(&dev_priv->pps_mutex);
1879 control = I915_READ(_pp_ctrl_reg(intel_dp));
1880 if (!IS_BROXTON(dev)) {
1881 control &= ~PANEL_UNLOCK_MASK;
1882 control |= PANEL_UNLOCK_REGS;
1888 * Must be paired with edp_panel_vdd_off().
1889 * Must hold pps_mutex around the whole on/off sequence.
1890 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1892 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1894 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1895 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1896 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1897 struct drm_i915_private *dev_priv = dev->dev_private;
1898 enum intel_display_power_domain power_domain;
1900 u32 pp_stat_reg, pp_ctrl_reg;
1901 bool need_to_disable = !intel_dp->want_panel_vdd;
1903 lockdep_assert_held(&dev_priv->pps_mutex);
1905 if (!is_edp(intel_dp))
1908 cancel_delayed_work(&intel_dp->panel_vdd_work);
1909 intel_dp->want_panel_vdd = true;
1911 if (edp_have_panel_vdd(intel_dp))
1912 return need_to_disable;
1914 power_domain = intel_display_port_aux_power_domain(intel_encoder);
1915 intel_display_power_get(dev_priv, power_domain);
1917 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1918 port_name(intel_dig_port->port));
1920 if (!edp_have_panel_power(intel_dp))
1921 wait_panel_power_cycle(intel_dp);
1923 pp = ironlake_get_pp_control(intel_dp);
1924 pp |= EDP_FORCE_VDD;
1926 pp_stat_reg = _pp_stat_reg(intel_dp);
1927 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1929 I915_WRITE(pp_ctrl_reg, pp);
1930 POSTING_READ(pp_ctrl_reg);
1931 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1932 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1934 * If the panel wasn't on, delay before accessing aux channel
1936 if (!edp_have_panel_power(intel_dp)) {
1937 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1938 port_name(intel_dig_port->port));
1939 msleep(intel_dp->panel_power_up_delay);
1942 return need_to_disable;
1946 * Must be paired with intel_edp_panel_vdd_off() or
1947 * intel_edp_panel_off().
1948 * Nested calls to these functions are not allowed since
1949 * we drop the lock. Caller must use some higher level
1950 * locking to prevent nested calls from other threads.
1952 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1956 if (!is_edp(intel_dp))
1960 vdd = edp_panel_vdd_on(intel_dp);
1961 pps_unlock(intel_dp);
1963 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
1964 port_name(dp_to_dig_port(intel_dp)->port));
1967 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1969 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1970 struct drm_i915_private *dev_priv = dev->dev_private;
1971 struct intel_digital_port *intel_dig_port =
1972 dp_to_dig_port(intel_dp);
1973 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1974 enum intel_display_power_domain power_domain;
1976 u32 pp_stat_reg, pp_ctrl_reg;
1978 lockdep_assert_held(&dev_priv->pps_mutex);
1980 WARN_ON(intel_dp->want_panel_vdd);
1982 if (!edp_have_panel_vdd(intel_dp))
1985 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1986 port_name(intel_dig_port->port));
1988 pp = ironlake_get_pp_control(intel_dp);
1989 pp &= ~EDP_FORCE_VDD;
1991 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1992 pp_stat_reg = _pp_stat_reg(intel_dp);
1994 I915_WRITE(pp_ctrl_reg, pp);
1995 POSTING_READ(pp_ctrl_reg);
1997 /* Make sure sequencer is idle before allowing subsequent activity */
1998 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1999 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
2001 if ((pp & POWER_TARGET_ON) == 0)
2002 intel_dp->last_power_cycle = jiffies;
2004 power_domain = intel_display_port_aux_power_domain(intel_encoder);
2005 intel_display_power_put(dev_priv, power_domain);
2008 static void edp_panel_vdd_work(struct work_struct *__work)
2010 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
2011 struct intel_dp, panel_vdd_work);
2014 if (!intel_dp->want_panel_vdd)
2015 edp_panel_vdd_off_sync(intel_dp);
2016 pps_unlock(intel_dp);
2019 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
2021 unsigned long delay;
2024 * Queue the timer to fire a long time from now (relative to the power
2025 * down delay) to keep the panel power up across a sequence of
2028 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
2029 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
2033 * Must be paired with edp_panel_vdd_on().
2034 * Must hold pps_mutex around the whole on/off sequence.
2035 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2037 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
2039 struct drm_i915_private *dev_priv =
2040 intel_dp_to_dev(intel_dp)->dev_private;
2042 lockdep_assert_held(&dev_priv->pps_mutex);
2044 if (!is_edp(intel_dp))
2047 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
2048 port_name(dp_to_dig_port(intel_dp)->port));
2050 intel_dp->want_panel_vdd = false;
2053 edp_panel_vdd_off_sync(intel_dp);
2055 edp_panel_vdd_schedule_off(intel_dp);
2058 static void edp_panel_on(struct intel_dp *intel_dp)
2060 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2061 struct drm_i915_private *dev_priv = dev->dev_private;
2065 lockdep_assert_held(&dev_priv->pps_mutex);
2067 if (!is_edp(intel_dp))
2070 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
2071 port_name(dp_to_dig_port(intel_dp)->port));
2073 if (WARN(edp_have_panel_power(intel_dp),
2074 "eDP port %c panel power already on\n",
2075 port_name(dp_to_dig_port(intel_dp)->port)))
2078 wait_panel_power_cycle(intel_dp);
2080 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2081 pp = ironlake_get_pp_control(intel_dp);
2083 /* ILK workaround: disable reset around power sequence */
2084 pp &= ~PANEL_POWER_RESET;
2085 I915_WRITE(pp_ctrl_reg, pp);
2086 POSTING_READ(pp_ctrl_reg);
2089 pp |= POWER_TARGET_ON;
2091 pp |= PANEL_POWER_RESET;
2093 I915_WRITE(pp_ctrl_reg, pp);
2094 POSTING_READ(pp_ctrl_reg);
2096 wait_panel_on(intel_dp);
2097 intel_dp->last_power_on = jiffies;
2100 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
2101 I915_WRITE(pp_ctrl_reg, pp);
2102 POSTING_READ(pp_ctrl_reg);
2106 void intel_edp_panel_on(struct intel_dp *intel_dp)
2108 if (!is_edp(intel_dp))
2112 edp_panel_on(intel_dp);
2113 pps_unlock(intel_dp);
2117 static void edp_panel_off(struct intel_dp *intel_dp)
2119 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2120 struct intel_encoder *intel_encoder = &intel_dig_port->base;
2121 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2122 struct drm_i915_private *dev_priv = dev->dev_private;
2123 enum intel_display_power_domain power_domain;
2127 lockdep_assert_held(&dev_priv->pps_mutex);
2129 if (!is_edp(intel_dp))
2132 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2133 port_name(dp_to_dig_port(intel_dp)->port));
2135 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2136 port_name(dp_to_dig_port(intel_dp)->port));
2138 pp = ironlake_get_pp_control(intel_dp);
2139 /* We need to switch off panel power _and_ force vdd, for otherwise some
2140 * panels get very unhappy and cease to work. */
2141 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2144 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2146 intel_dp->want_panel_vdd = false;
2148 I915_WRITE(pp_ctrl_reg, pp);
2149 POSTING_READ(pp_ctrl_reg);
2151 intel_dp->last_power_cycle = jiffies;
2152 wait_panel_off(intel_dp);
2154 /* We got a reference when we enabled the VDD. */
2155 power_domain = intel_display_port_aux_power_domain(intel_encoder);
2156 intel_display_power_put(dev_priv, power_domain);
2159 void intel_edp_panel_off(struct intel_dp *intel_dp)
2161 if (!is_edp(intel_dp))
2165 edp_panel_off(intel_dp);
2166 pps_unlock(intel_dp);
2169 /* Enable backlight in the panel power control. */
2170 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2172 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2173 struct drm_device *dev = intel_dig_port->base.base.dev;
2174 struct drm_i915_private *dev_priv = dev->dev_private;
2179 * If we enable the backlight right away following a panel power
2180 * on, we may see slight flicker as the panel syncs with the eDP
2181 * link. So delay a bit to make sure the image is solid before
2182 * allowing it to appear.
2184 wait_backlight_on(intel_dp);
2188 pp = ironlake_get_pp_control(intel_dp);
2189 pp |= EDP_BLC_ENABLE;
2191 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2193 I915_WRITE(pp_ctrl_reg, pp);
2194 POSTING_READ(pp_ctrl_reg);
2196 pps_unlock(intel_dp);
2199 /* Enable backlight PWM and backlight PP control. */
2200 void intel_edp_backlight_on(struct intel_dp *intel_dp)
2202 if (!is_edp(intel_dp))
2205 DRM_DEBUG_KMS("\n");
2207 intel_panel_enable_backlight(intel_dp->attached_connector);
2208 _intel_edp_backlight_on(intel_dp);
2211 /* Disable backlight in the panel power control. */
2212 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2214 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2215 struct drm_i915_private *dev_priv = dev->dev_private;
2219 if (!is_edp(intel_dp))
2224 pp = ironlake_get_pp_control(intel_dp);
2225 pp &= ~EDP_BLC_ENABLE;
2227 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2229 I915_WRITE(pp_ctrl_reg, pp);
2230 POSTING_READ(pp_ctrl_reg);
2232 pps_unlock(intel_dp);
2234 intel_dp->last_backlight_off = jiffies;
2235 edp_wait_backlight_off(intel_dp);
2238 /* Disable backlight PP control and backlight PWM. */
2239 void intel_edp_backlight_off(struct intel_dp *intel_dp)
2241 if (!is_edp(intel_dp))
2244 DRM_DEBUG_KMS("\n");
2246 _intel_edp_backlight_off(intel_dp);
2247 intel_panel_disable_backlight(intel_dp->attached_connector);
2251 * Hook for controlling the panel power control backlight through the bl_power
2252 * sysfs attribute. Take care to handle multiple calls.
2254 static void intel_edp_backlight_power(struct intel_connector *connector,
2257 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2261 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2262 pps_unlock(intel_dp);
2264 if (is_enabled == enable)
2267 DRM_DEBUG_KMS("panel power control backlight %s\n",
2268 enable ? "enable" : "disable");
2271 _intel_edp_backlight_on(intel_dp);
2273 _intel_edp_backlight_off(intel_dp);
2276 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2278 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2279 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2280 struct drm_device *dev = crtc->dev;
2281 struct drm_i915_private *dev_priv = dev->dev_private;
2284 assert_pipe_disabled(dev_priv,
2285 to_intel_crtc(crtc)->pipe);
2287 DRM_DEBUG_KMS("\n");
2288 dpa_ctl = I915_READ(DP_A);
2289 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2290 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2292 /* We don't adjust intel_dp->DP while tearing down the link, to
2293 * facilitate link retraining (e.g. after hotplug). Hence clear all
2294 * enable bits here to ensure that we don't enable too much. */
2295 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2296 intel_dp->DP |= DP_PLL_ENABLE;
2297 I915_WRITE(DP_A, intel_dp->DP);
2302 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2304 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2305 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2306 struct drm_device *dev = crtc->dev;
2307 struct drm_i915_private *dev_priv = dev->dev_private;
2310 assert_pipe_disabled(dev_priv,
2311 to_intel_crtc(crtc)->pipe);
2313 dpa_ctl = I915_READ(DP_A);
2314 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2315 "dp pll off, should be on\n");
2316 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2318 /* We can't rely on the value tracked for the DP register in
2319 * intel_dp->DP because link_down must not change that (otherwise link
2320 * re-training will fail. */
2321 dpa_ctl &= ~DP_PLL_ENABLE;
2322 I915_WRITE(DP_A, dpa_ctl);
2327 /* If the sink supports it, try to set the power state appropriately */
2328 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2332 /* Should have a valid DPCD by this point */
2333 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2336 if (mode != DRM_MODE_DPMS_ON) {
2337 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2341 * When turning on, we need to retry for 1ms to give the sink
2344 for (i = 0; i < 3; i++) {
2345 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2354 DRM_DEBUG_KMS("failed to %s sink power state\n",
2355 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2358 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2359 enum i915_pipe *pipe)
2361 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2362 enum port port = dp_to_dig_port(intel_dp)->port;
2363 struct drm_device *dev = encoder->base.dev;
2364 struct drm_i915_private *dev_priv = dev->dev_private;
2365 enum intel_display_power_domain power_domain;
2368 power_domain = intel_display_port_power_domain(encoder);
2369 if (!intel_display_power_is_enabled(dev_priv, power_domain))
2372 tmp = I915_READ(intel_dp->output_reg);
2374 if (!(tmp & DP_PORT_EN))
2377 if (IS_GEN7(dev) && port == PORT_A) {
2378 *pipe = PORT_TO_PIPE_CPT(tmp);
2379 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
2382 for_each_pipe(dev_priv, p) {
2383 u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2384 if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2390 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2391 intel_dp->output_reg);
2392 } else if (IS_CHERRYVIEW(dev)) {
2393 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2395 *pipe = PORT_TO_PIPE(tmp);
2401 static void intel_dp_get_config(struct intel_encoder *encoder,
2402 struct intel_crtc_state *pipe_config)
2404 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2406 struct drm_device *dev = encoder->base.dev;
2407 struct drm_i915_private *dev_priv = dev->dev_private;
2408 enum port port = dp_to_dig_port(intel_dp)->port;
2409 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2412 tmp = I915_READ(intel_dp->output_reg);
2414 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2416 if (HAS_PCH_CPT(dev) && port != PORT_A) {
2417 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2419 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2420 flags |= DRM_MODE_FLAG_PHSYNC;
2422 flags |= DRM_MODE_FLAG_NHSYNC;
2424 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2425 flags |= DRM_MODE_FLAG_PVSYNC;
2427 flags |= DRM_MODE_FLAG_NVSYNC;
2429 if (tmp & DP_SYNC_HS_HIGH)
2430 flags |= DRM_MODE_FLAG_PHSYNC;
2432 flags |= DRM_MODE_FLAG_NHSYNC;
2434 if (tmp & DP_SYNC_VS_HIGH)
2435 flags |= DRM_MODE_FLAG_PVSYNC;
2437 flags |= DRM_MODE_FLAG_NVSYNC;
2440 pipe_config->base.adjusted_mode.flags |= flags;
2442 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2443 tmp & DP_COLOR_RANGE_16_235)
2444 pipe_config->limited_color_range = true;
2446 pipe_config->has_dp_encoder = true;
2448 pipe_config->lane_count =
2449 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2451 intel_dp_get_m_n(crtc, pipe_config);
2453 if (port == PORT_A) {
2454 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2455 pipe_config->port_clock = 162000;
2457 pipe_config->port_clock = 270000;
2460 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2461 &pipe_config->dp_m_n);
2463 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2464 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2466 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
2468 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2469 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2471 * This is a big fat ugly hack.
2473 * Some machines in UEFI boot mode provide us a VBT that has 18
2474 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2475 * unknown we fail to light up. Yet the same BIOS boots up with
2476 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2477 * max, not what it tells us to use.
2479 * Note: This will still be broken if the eDP panel is not lit
2480 * up by the BIOS, and thus we can't get the mode at module
2483 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2484 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2485 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2489 static void intel_disable_dp(struct intel_encoder *encoder)
2491 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2492 struct drm_device *dev = encoder->base.dev;
2493 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2495 if (crtc->config->has_audio)
2496 intel_audio_codec_disable(encoder);
2498 if (HAS_PSR(dev) && !HAS_DDI(dev))
2499 intel_psr_disable(intel_dp);
2501 /* Make sure the panel is off before trying to change the mode. But also
2502 * ensure that we have vdd while we switch off the panel. */
2503 intel_edp_panel_vdd_on(intel_dp);
2504 intel_edp_backlight_off(intel_dp);
2505 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2506 intel_edp_panel_off(intel_dp);
2508 /* disable the port before the pipe on g4x */
2509 if (INTEL_INFO(dev)->gen < 5)
2510 intel_dp_link_down(intel_dp);
2513 static void ilk_post_disable_dp(struct intel_encoder *encoder)
2515 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2516 enum port port = dp_to_dig_port(intel_dp)->port;
2518 intel_dp_link_down(intel_dp);
2520 ironlake_edp_pll_off(intel_dp);
2523 static void vlv_post_disable_dp(struct intel_encoder *encoder)
2525 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2527 intel_dp_link_down(intel_dp);
2530 static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
2533 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2534 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
2535 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2536 enum i915_pipe pipe = crtc->pipe;
2539 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2541 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2543 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2544 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2546 if (crtc->config->lane_count > 2) {
2547 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2549 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2551 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2552 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2555 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2556 val |= CHV_PCS_REQ_SOFTRESET_EN;
2558 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2560 val |= DPIO_PCS_CLK_SOFT_RESET;
2561 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2563 if (crtc->config->lane_count > 2) {
2564 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2565 val |= CHV_PCS_REQ_SOFTRESET_EN;
2567 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2569 val |= DPIO_PCS_CLK_SOFT_RESET;
2570 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2574 static void chv_post_disable_dp(struct intel_encoder *encoder)
2576 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2577 struct drm_device *dev = encoder->base.dev;
2578 struct drm_i915_private *dev_priv = dev->dev_private;
2580 intel_dp_link_down(intel_dp);
2582 mutex_lock(&dev_priv->sb_lock);
2584 /* Assert data lane reset */
2585 chv_data_lane_soft_reset(encoder, true);
2587 mutex_unlock(&dev_priv->sb_lock);
2591 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2593 uint8_t dp_train_pat)
2595 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2596 struct drm_device *dev = intel_dig_port->base.base.dev;
2597 struct drm_i915_private *dev_priv = dev->dev_private;
2598 enum port port = intel_dig_port->port;
2601 uint32_t temp = I915_READ(DP_TP_CTL(port));
2603 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2604 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2606 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2608 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2609 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2610 case DP_TRAINING_PATTERN_DISABLE:
2611 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2614 case DP_TRAINING_PATTERN_1:
2615 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2617 case DP_TRAINING_PATTERN_2:
2618 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2620 case DP_TRAINING_PATTERN_3:
2621 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2624 I915_WRITE(DP_TP_CTL(port), temp);
2626 } else if ((IS_GEN7(dev) && port == PORT_A) ||
2627 (HAS_PCH_CPT(dev) && port != PORT_A)) {
2628 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2630 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2631 case DP_TRAINING_PATTERN_DISABLE:
2632 *DP |= DP_LINK_TRAIN_OFF_CPT;
2634 case DP_TRAINING_PATTERN_1:
2635 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2637 case DP_TRAINING_PATTERN_2:
2638 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2640 case DP_TRAINING_PATTERN_3:
2641 DRM_ERROR("DP training pattern 3 not supported\n");
2642 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2647 if (IS_CHERRYVIEW(dev))
2648 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2650 *DP &= ~DP_LINK_TRAIN_MASK;
2652 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2653 case DP_TRAINING_PATTERN_DISABLE:
2654 *DP |= DP_LINK_TRAIN_OFF;
2656 case DP_TRAINING_PATTERN_1:
2657 *DP |= DP_LINK_TRAIN_PAT_1;
2659 case DP_TRAINING_PATTERN_2:
2660 *DP |= DP_LINK_TRAIN_PAT_2;
2662 case DP_TRAINING_PATTERN_3:
2663 if (IS_CHERRYVIEW(dev)) {
2664 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2666 DRM_ERROR("DP training pattern 3 not supported\n");
2667 *DP |= DP_LINK_TRAIN_PAT_2;
2674 static void intel_dp_enable_port(struct intel_dp *intel_dp)
2676 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2677 struct drm_i915_private *dev_priv = dev->dev_private;
2679 /* enable with pattern 1 (as per spec) */
2680 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2681 DP_TRAINING_PATTERN_1);
2683 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2684 POSTING_READ(intel_dp->output_reg);
2687 * Magic for VLV/CHV. We _must_ first set up the register
2688 * without actually enabling the port, and then do another
2689 * write to enable the port. Otherwise link training will
2690 * fail when the power sequencer is freshly used for this port.
2692 intel_dp->DP |= DP_PORT_EN;
2694 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2695 POSTING_READ(intel_dp->output_reg);
2698 static void intel_enable_dp(struct intel_encoder *encoder)
2700 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2701 struct drm_device *dev = encoder->base.dev;
2702 struct drm_i915_private *dev_priv = dev->dev_private;
2703 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2704 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2706 if (WARN_ON(dp_reg & DP_PORT_EN))
2711 if (IS_VALLEYVIEW(dev))
2712 vlv_init_panel_power_sequencer(intel_dp);
2714 intel_dp_enable_port(intel_dp);
2716 edp_panel_vdd_on(intel_dp);
2717 edp_panel_on(intel_dp);
2718 edp_panel_vdd_off(intel_dp, true);
2720 pps_unlock(intel_dp);
2722 if (IS_VALLEYVIEW(dev)) {
2723 unsigned int lane_mask = 0x0;
2725 if (IS_CHERRYVIEW(dev))
2726 lane_mask = intel_dp_unused_lane_mask(crtc->config->lane_count);
2728 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2732 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2733 intel_dp_start_link_train(intel_dp);
2734 intel_dp_stop_link_train(intel_dp);
2736 if (crtc->config->has_audio) {
2737 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2738 pipe_name(crtc->pipe));
2739 intel_audio_codec_enable(encoder);
2743 static void g4x_enable_dp(struct intel_encoder *encoder)
2745 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2747 intel_enable_dp(encoder);
2748 intel_edp_backlight_on(intel_dp);
2751 static void vlv_enable_dp(struct intel_encoder *encoder)
2753 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2755 intel_edp_backlight_on(intel_dp);
2756 intel_psr_enable(intel_dp);
2759 static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2761 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2762 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2764 intel_dp_prepare(encoder);
2766 /* Only ilk+ has port A */
2767 if (dport->port == PORT_A) {
2768 ironlake_set_pll_cpu_edp(intel_dp);
2769 ironlake_edp_pll_on(intel_dp);
2773 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2775 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2776 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2777 enum i915_pipe pipe = intel_dp->pps_pipe;
2778 int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2780 edp_panel_vdd_off_sync(intel_dp);
2783 * VLV seems to get confused when multiple power seqeuencers
2784 * have the same port selected (even if only one has power/vdd
2785 * enabled). The failure manifests as vlv_wait_port_ready() failing
2786 * CHV on the other hand doesn't seem to mind having the same port
2787 * selected in multiple power seqeuencers, but let's clear the
2788 * port select always when logically disconnecting a power sequencer
2791 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2792 pipe_name(pipe), port_name(intel_dig_port->port));
2793 I915_WRITE(pp_on_reg, 0);
2794 POSTING_READ(pp_on_reg);
2796 intel_dp->pps_pipe = INVALID_PIPE;
2799 static void vlv_steal_power_sequencer(struct drm_device *dev,
2800 enum i915_pipe pipe)
2802 struct drm_i915_private *dev_priv = dev->dev_private;
2803 struct intel_encoder *encoder;
2805 lockdep_assert_held(&dev_priv->pps_mutex);
2807 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2810 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2812 struct intel_dp *intel_dp;
2815 if (encoder->type != INTEL_OUTPUT_EDP)
2818 intel_dp = enc_to_intel_dp(&encoder->base);
2819 port = dp_to_dig_port(intel_dp)->port;
2821 if (intel_dp->pps_pipe != pipe)
2824 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2825 pipe_name(pipe), port_name(port));
2827 WARN(encoder->base.crtc,
2828 "stealing pipe %c power sequencer from active eDP port %c\n",
2829 pipe_name(pipe), port_name(port));
2831 /* make sure vdd is off before we steal it */
2832 vlv_detach_power_sequencer(intel_dp);
2836 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2838 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2839 struct intel_encoder *encoder = &intel_dig_port->base;
2840 struct drm_device *dev = encoder->base.dev;
2841 struct drm_i915_private *dev_priv = dev->dev_private;
2842 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2844 lockdep_assert_held(&dev_priv->pps_mutex);
2846 if (!is_edp(intel_dp))
2849 if (intel_dp->pps_pipe == crtc->pipe)
2853 * If another power sequencer was being used on this
2854 * port previously make sure to turn off vdd there while
2855 * we still have control of it.
2857 if (intel_dp->pps_pipe != INVALID_PIPE)
2858 vlv_detach_power_sequencer(intel_dp);
2861 * We may be stealing the power
2862 * sequencer from another port.
2864 vlv_steal_power_sequencer(dev, crtc->pipe);
2866 /* now it's all ours */
2867 intel_dp->pps_pipe = crtc->pipe;
2869 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2870 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2872 /* init power sequencer on this pipe and port */
2873 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2874 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2877 static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2879 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2880 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2881 struct drm_device *dev = encoder->base.dev;
2882 struct drm_i915_private *dev_priv = dev->dev_private;
2883 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2884 enum dpio_channel port = vlv_dport_to_channel(dport);
2885 int pipe = intel_crtc->pipe;
2888 mutex_lock(&dev_priv->sb_lock);
2890 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2897 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2898 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2899 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2901 mutex_unlock(&dev_priv->sb_lock);
2903 intel_enable_dp(encoder);
2906 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2908 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2909 struct drm_device *dev = encoder->base.dev;
2910 struct drm_i915_private *dev_priv = dev->dev_private;
2911 struct intel_crtc *intel_crtc =
2912 to_intel_crtc(encoder->base.crtc);
2913 enum dpio_channel port = vlv_dport_to_channel(dport);
2914 int pipe = intel_crtc->pipe;
2916 intel_dp_prepare(encoder);
2918 /* Program Tx lane resets to default */
2919 mutex_lock(&dev_priv->sb_lock);
2920 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2921 DPIO_PCS_TX_LANE2_RESET |
2922 DPIO_PCS_TX_LANE1_RESET);
2923 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2924 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2925 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2926 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2927 DPIO_PCS_CLK_SOFT_RESET);
2929 /* Fix up inter-pair skew failure */
2930 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2931 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2932 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2933 mutex_unlock(&dev_priv->sb_lock);
2936 static void chv_pre_enable_dp(struct intel_encoder *encoder)
2938 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2939 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2940 struct drm_device *dev = encoder->base.dev;
2941 struct drm_i915_private *dev_priv = dev->dev_private;
2942 struct intel_crtc *intel_crtc =
2943 to_intel_crtc(encoder->base.crtc);
2944 enum dpio_channel ch = vlv_dport_to_channel(dport);
2945 int pipe = intel_crtc->pipe;
2946 int data, i, stagger;
2949 mutex_lock(&dev_priv->sb_lock);
2951 /* allow hardware to manage TX FIFO reset source */
2952 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2953 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2954 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2956 if (intel_crtc->config->lane_count > 2) {
2957 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2958 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2959 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2962 /* Program Tx lane latency optimal setting*/
2963 for (i = 0; i < intel_crtc->config->lane_count; i++) {
2964 /* Set the upar bit */
2965 if (intel_crtc->config->lane_count == 1)
2968 data = (i == 1) ? 0x0 : 0x1;
2969 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2970 data << DPIO_UPAR_SHIFT);
2973 /* Data lane stagger programming */
2974 if (intel_crtc->config->port_clock > 270000)
2976 else if (intel_crtc->config->port_clock > 135000)
2978 else if (intel_crtc->config->port_clock > 67500)
2980 else if (intel_crtc->config->port_clock > 33750)
2985 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2986 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2987 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2989 if (intel_crtc->config->lane_count > 2) {
2990 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2991 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2992 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2995 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
2996 DPIO_LANESTAGGER_STRAP(stagger) |
2997 DPIO_LANESTAGGER_STRAP_OVRD |
2998 DPIO_TX1_STAGGER_MASK(0x1f) |
2999 DPIO_TX1_STAGGER_MULT(6) |
3000 DPIO_TX2_STAGGER_MULT(0));
3002 if (intel_crtc->config->lane_count > 2) {
3003 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
3004 DPIO_LANESTAGGER_STRAP(stagger) |
3005 DPIO_LANESTAGGER_STRAP_OVRD |
3006 DPIO_TX1_STAGGER_MASK(0x1f) |
3007 DPIO_TX1_STAGGER_MULT(7) |
3008 DPIO_TX2_STAGGER_MULT(5));
3011 /* Deassert data lane reset */
3012 chv_data_lane_soft_reset(encoder, false);
3014 mutex_unlock(&dev_priv->sb_lock);
3016 intel_enable_dp(encoder);
3018 /* Second common lane will stay alive on its own now */
3019 if (dport->release_cl2_override) {
3020 chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
3021 dport->release_cl2_override = false;
3025 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
3027 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
3028 struct drm_device *dev = encoder->base.dev;
3029 struct drm_i915_private *dev_priv = dev->dev_private;
3030 struct intel_crtc *intel_crtc =
3031 to_intel_crtc(encoder->base.crtc);
3032 enum dpio_channel ch = vlv_dport_to_channel(dport);
3033 enum i915_pipe pipe = intel_crtc->pipe;
3034 unsigned int lane_mask =
3035 intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
3038 intel_dp_prepare(encoder);
3041 * Must trick the second common lane into life.
3042 * Otherwise we can't even access the PLL.
3044 if (ch == DPIO_CH0 && pipe == PIPE_B)
3045 dport->release_cl2_override =
3046 !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
3048 chv_phy_powergate_lanes(encoder, true, lane_mask);
3050 mutex_lock(&dev_priv->sb_lock);
3052 /* Assert data lane reset */
3053 chv_data_lane_soft_reset(encoder, true);
3055 /* program left/right clock distribution */
3056 if (pipe != PIPE_B) {
3057 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3058 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3060 val |= CHV_BUFLEFTENA1_FORCE;
3062 val |= CHV_BUFRIGHTENA1_FORCE;
3063 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3065 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3066 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3068 val |= CHV_BUFLEFTENA2_FORCE;
3070 val |= CHV_BUFRIGHTENA2_FORCE;
3071 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3074 /* program clock channel usage */
3075 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
3076 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3078 val &= ~CHV_PCS_USEDCLKCHANNEL;
3080 val |= CHV_PCS_USEDCLKCHANNEL;
3081 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
3083 if (intel_crtc->config->lane_count > 2) {
3084 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
3085 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3087 val &= ~CHV_PCS_USEDCLKCHANNEL;
3089 val |= CHV_PCS_USEDCLKCHANNEL;
3090 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
3094 * This a a bit weird since generally CL
3095 * matches the pipe, but here we need to
3096 * pick the CL based on the port.
3098 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
3100 val &= ~CHV_CMN_USEDCLKCHANNEL;
3102 val |= CHV_CMN_USEDCLKCHANNEL;
3103 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
3105 mutex_unlock(&dev_priv->sb_lock);
3108 static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
3110 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3111 enum i915_pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
3114 mutex_lock(&dev_priv->sb_lock);
3116 /* disable left/right clock distribution */
3117 if (pipe != PIPE_B) {
3118 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3119 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3120 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3122 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3123 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3124 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3127 mutex_unlock(&dev_priv->sb_lock);
3130 * Leave the power down bit cleared for at least one
3131 * lane so that chv_powergate_phy_ch() will power
3132 * on something when the channel is otherwise unused.
3133 * When the port is off and the override is removed
3134 * the lanes power down anyway, so otherwise it doesn't
3135 * really matter what the state of power down bits is
3138 chv_phy_powergate_lanes(encoder, false, 0x0);
3142 * Native read with retry for link status and receiver capability reads for
3143 * cases where the sink may still be asleep.
3145 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
3146 * supposed to retry 3 times per the spec.
3149 intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
3150 void *buffer, size_t size)
3156 * Sometime we just get the same incorrect byte repeated
3157 * over the entire buffer. Doing just one throw away read
3158 * initially seems to "solve" it.
3160 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
3162 for (i = 0; i < 3; i++) {
3163 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
3173 * Fetch AUX CH registers 0x202 - 0x207 which contain
3174 * link status information
3177 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
3179 return intel_dp_dpcd_read_wake(&intel_dp->aux,
3182 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
3185 /* These are source-specific values. */
3187 intel_dp_voltage_max(struct intel_dp *intel_dp)
3189 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3190 struct drm_i915_private *dev_priv = dev->dev_private;
3191 enum port port = dp_to_dig_port(intel_dp)->port;
3193 if (IS_BROXTON(dev))
3194 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3195 else if (INTEL_INFO(dev)->gen >= 9) {
3196 if (dev_priv->edp_low_vswing && port == PORT_A)
3197 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3198 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3199 } else if (IS_VALLEYVIEW(dev))
3200 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3201 else if (IS_GEN7(dev) && port == PORT_A)
3202 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3203 else if (HAS_PCH_CPT(dev) && port != PORT_A)
3204 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3206 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3210 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3212 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3213 enum port port = dp_to_dig_port(intel_dp)->port;
3215 if (INTEL_INFO(dev)->gen >= 9) {
3216 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3217 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3218 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3219 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3220 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3221 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3222 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3223 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3224 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3226 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3228 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
3229 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3230 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3231 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3232 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3233 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3234 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3235 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3236 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3238 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3240 } else if (IS_VALLEYVIEW(dev)) {
3241 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3242 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3243 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3244 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3245 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3246 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3247 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3248 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3250 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3252 } else if (IS_GEN7(dev) && port == PORT_A) {
3253 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3254 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3255 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3256 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3257 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3258 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3260 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3263 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3264 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3265 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3266 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3267 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3268 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3269 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3270 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3272 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3277 static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
3279 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3280 struct drm_i915_private *dev_priv = dev->dev_private;
3281 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3282 struct intel_crtc *intel_crtc =
3283 to_intel_crtc(dport->base.base.crtc);
3284 unsigned long demph_reg_value, preemph_reg_value,
3285 uniqtranscale_reg_value;
3286 uint8_t train_set = intel_dp->train_set[0];
3287 enum dpio_channel port = vlv_dport_to_channel(dport);
3288 int pipe = intel_crtc->pipe;
3290 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3291 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3292 preemph_reg_value = 0x0004000;
3293 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3294 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3295 demph_reg_value = 0x2B405555;
3296 uniqtranscale_reg_value = 0x552AB83A;
3298 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3299 demph_reg_value = 0x2B404040;
3300 uniqtranscale_reg_value = 0x5548B83A;
3302 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3303 demph_reg_value = 0x2B245555;
3304 uniqtranscale_reg_value = 0x5560B83A;
3306 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3307 demph_reg_value = 0x2B405555;
3308 uniqtranscale_reg_value = 0x5598DA3A;
3314 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3315 preemph_reg_value = 0x0002000;
3316 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3317 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3318 demph_reg_value = 0x2B404040;
3319 uniqtranscale_reg_value = 0x5552B83A;
3321 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3322 demph_reg_value = 0x2B404848;
3323 uniqtranscale_reg_value = 0x5580B83A;
3325 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3326 demph_reg_value = 0x2B404040;
3327 uniqtranscale_reg_value = 0x55ADDA3A;
3333 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3334 preemph_reg_value = 0x0000000;
3335 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3336 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3337 demph_reg_value = 0x2B305555;
3338 uniqtranscale_reg_value = 0x5570B83A;
3340 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3341 demph_reg_value = 0x2B2B4040;
3342 uniqtranscale_reg_value = 0x55ADDA3A;
3348 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3349 preemph_reg_value = 0x0006000;
3350 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3351 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3352 demph_reg_value = 0x1B405555;
3353 uniqtranscale_reg_value = 0x55ADDA3A;
3363 mutex_lock(&dev_priv->sb_lock);
3364 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3365 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3366 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
3367 uniqtranscale_reg_value);
3368 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3369 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3370 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3371 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
3372 mutex_unlock(&dev_priv->sb_lock);
3377 static bool chv_need_uniq_trans_scale(uint8_t train_set)
3379 return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
3380 (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3383 static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3385 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3386 struct drm_i915_private *dev_priv = dev->dev_private;
3387 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3388 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3389 u32 deemph_reg_value, margin_reg_value, val;
3390 uint8_t train_set = intel_dp->train_set[0];
3391 enum dpio_channel ch = vlv_dport_to_channel(dport);
3392 enum i915_pipe pipe = intel_crtc->pipe;
3395 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3396 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3397 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3398 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3399 deemph_reg_value = 128;
3400 margin_reg_value = 52;
3402 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3403 deemph_reg_value = 128;
3404 margin_reg_value = 77;
3406 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3407 deemph_reg_value = 128;
3408 margin_reg_value = 102;
3410 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3411 deemph_reg_value = 128;
3412 margin_reg_value = 154;
3413 /* FIXME extra to set for 1200 */
3419 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3420 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3421 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3422 deemph_reg_value = 85;
3423 margin_reg_value = 78;
3425 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3426 deemph_reg_value = 85;
3427 margin_reg_value = 116;
3429 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3430 deemph_reg_value = 85;
3431 margin_reg_value = 154;
3437 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3438 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3439 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3440 deemph_reg_value = 64;
3441 margin_reg_value = 104;
3443 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3444 deemph_reg_value = 64;
3445 margin_reg_value = 154;
3451 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3452 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3453 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3454 deemph_reg_value = 43;
3455 margin_reg_value = 154;
3465 mutex_lock(&dev_priv->sb_lock);
3467 /* Clear calc init */
3468 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3469 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3470 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3471 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3472 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3474 if (intel_crtc->config->lane_count > 2) {
3475 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3476 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3477 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3478 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3479 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3482 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3483 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3484 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3485 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3487 if (intel_crtc->config->lane_count > 2) {
3488 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3489 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3490 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3491 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3494 /* Program swing deemph */
3495 for (i = 0; i < intel_crtc->config->lane_count; i++) {
3496 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3497 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3498 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3499 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3502 /* Program swing margin */
3503 for (i = 0; i < intel_crtc->config->lane_count; i++) {
3504 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3506 val &= ~DPIO_SWING_MARGIN000_MASK;
3507 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3510 * Supposedly this value shouldn't matter when unique transition
3511 * scale is disabled, but in fact it does matter. Let's just
3512 * always program the same value and hope it's OK.
3514 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3515 val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
3517 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3521 * The document said it needs to set bit 27 for ch0 and bit 26
3522 * for ch1. Might be a typo in the doc.
3523 * For now, for this unique transition scale selection, set bit
3524 * 27 for ch0 and ch1.
3526 for (i = 0; i < intel_crtc->config->lane_count; i++) {
3527 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3528 if (chv_need_uniq_trans_scale(train_set))
3529 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3531 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3532 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3535 /* Start swing calculation */
3536 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3537 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3538 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3540 if (intel_crtc->config->lane_count > 2) {
3541 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3542 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3543 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3546 mutex_unlock(&dev_priv->sb_lock);
3552 intel_get_adjust_train(struct intel_dp *intel_dp,
3553 const uint8_t link_status[DP_LINK_STATUS_SIZE])
3558 uint8_t voltage_max;
3559 uint8_t preemph_max;
3561 for (lane = 0; lane < intel_dp->lane_count; lane++) {
3562 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3563 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
3571 voltage_max = intel_dp_voltage_max(intel_dp);
3572 if (v >= voltage_max)
3573 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
3575 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3576 if (p >= preemph_max)
3577 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
3579 for (lane = 0; lane < 4; lane++)
3580 intel_dp->train_set[lane] = v | p;
3584 gen4_signal_levels(uint8_t train_set)
3586 uint32_t signal_levels = 0;
3588 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3589 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3591 signal_levels |= DP_VOLTAGE_0_4;
3593 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3594 signal_levels |= DP_VOLTAGE_0_6;
3596 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3597 signal_levels |= DP_VOLTAGE_0_8;
3599 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3600 signal_levels |= DP_VOLTAGE_1_2;
3603 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3604 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3606 signal_levels |= DP_PRE_EMPHASIS_0;
3608 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3609 signal_levels |= DP_PRE_EMPHASIS_3_5;
3611 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3612 signal_levels |= DP_PRE_EMPHASIS_6;
3614 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3615 signal_levels |= DP_PRE_EMPHASIS_9_5;
3618 return signal_levels;
3621 /* Gen6's DP voltage swing and pre-emphasis control */
3623 gen6_edp_signal_levels(uint8_t train_set)
3625 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3626 DP_TRAIN_PRE_EMPHASIS_MASK);
3627 switch (signal_levels) {
3628 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3629 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3630 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3631 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3632 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3633 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3634 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3635 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3636 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3637 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3638 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3639 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3640 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3641 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3643 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3644 "0x%x\n", signal_levels);
3645 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3649 /* Gen7's DP voltage swing and pre-emphasis control */
3651 gen7_edp_signal_levels(uint8_t train_set)
3653 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3654 DP_TRAIN_PRE_EMPHASIS_MASK);
3655 switch (signal_levels) {
3656 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3657 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3658 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3659 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3660 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3661 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3663 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3664 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3665 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3666 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3668 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3669 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3670 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3671 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3674 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3675 "0x%x\n", signal_levels);
3676 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3680 /* Properly updates "DP" with the correct signal levels. */
3682 intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3684 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3685 enum port port = intel_dig_port->port;
3686 struct drm_device *dev = intel_dig_port->base.base.dev;
3687 uint32_t signal_levels, mask = 0;
3688 uint8_t train_set = intel_dp->train_set[0];
3691 signal_levels = ddi_signal_levels(intel_dp);
3693 if (IS_BROXTON(dev))
3696 mask = DDI_BUF_EMP_MASK;
3697 } else if (IS_CHERRYVIEW(dev)) {
3698 signal_levels = chv_signal_levels(intel_dp);
3699 } else if (IS_VALLEYVIEW(dev)) {
3700 signal_levels = vlv_signal_levels(intel_dp);
3701 } else if (IS_GEN7(dev) && port == PORT_A) {
3702 signal_levels = gen7_edp_signal_levels(train_set);
3703 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3704 } else if (IS_GEN6(dev) && port == PORT_A) {
3705 signal_levels = gen6_edp_signal_levels(train_set);
3706 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3708 signal_levels = gen4_signal_levels(train_set);
3709 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3713 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3715 DRM_DEBUG_KMS("Using vswing level %d\n",
3716 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3717 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3718 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3719 DP_TRAIN_PRE_EMPHASIS_SHIFT);
3721 *DP = (*DP & ~mask) | signal_levels;
3725 intel_dp_set_link_train(struct intel_dp *intel_dp,
3727 uint8_t dp_train_pat)
3729 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3730 struct drm_i915_private *dev_priv =
3731 to_i915(intel_dig_port->base.base.dev);
3732 uint8_t buf[sizeof(intel_dp->train_set) + 1];
3735 _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3737 I915_WRITE(intel_dp->output_reg, *DP);
3738 POSTING_READ(intel_dp->output_reg);
3740 buf[0] = dp_train_pat;
3741 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
3742 DP_TRAINING_PATTERN_DISABLE) {
3743 /* don't write DP_TRAINING_LANEx_SET on disable */
3746 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3747 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3748 len = intel_dp->lane_count + 1;
3751 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3758 intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3759 uint8_t dp_train_pat)
3761 if (!intel_dp->train_set_valid)
3762 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
3763 intel_dp_set_signal_levels(intel_dp, DP);
3764 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3768 intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3769 const uint8_t link_status[DP_LINK_STATUS_SIZE])
3771 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3772 struct drm_i915_private *dev_priv =
3773 to_i915(intel_dig_port->base.base.dev);
3776 intel_get_adjust_train(intel_dp, link_status);
3777 intel_dp_set_signal_levels(intel_dp, DP);
3779 I915_WRITE(intel_dp->output_reg, *DP);
3780 POSTING_READ(intel_dp->output_reg);
3782 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3783 intel_dp->train_set, intel_dp->lane_count);
3785 return ret == intel_dp->lane_count;
3788 static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3790 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3791 struct drm_device *dev = intel_dig_port->base.base.dev;
3792 struct drm_i915_private *dev_priv = dev->dev_private;
3793 enum port port = intel_dig_port->port;
3799 val = I915_READ(DP_TP_CTL(port));
3800 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3801 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3802 I915_WRITE(DP_TP_CTL(port), val);
3805 * On PORT_A we can have only eDP in SST mode. There the only reason
3806 * we need to set idle transmission mode is to work around a HW issue
3807 * where we enable the pipe while not in idle link-training mode.
3808 * In this case there is requirement to wait for a minimum number of
3809 * idle patterns to be sent.
3814 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3816 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3819 /* Enable corresponding port and start training pattern 1 */
3821 intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
3823 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
3824 struct drm_device *dev = encoder->dev;
3827 int voltage_tries, loop_tries;
3828 uint32_t DP = intel_dp->DP;
3829 uint8_t link_config[2];
3830 uint8_t link_bw, rate_select;
3833 intel_ddi_prepare_link_retrain(encoder);
3835 intel_dp_compute_rate(intel_dp, intel_dp->link_rate,
3836 &link_bw, &rate_select);
3838 /* Write the link configuration data */
3839 link_config[0] = link_bw;
3840 link_config[1] = intel_dp->lane_count;
3841 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3842 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
3843 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
3844 if (intel_dp->num_sink_rates)
3845 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3849 link_config[1] = DP_SET_ANSI_8B10B;
3850 drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
3854 /* clock recovery */
3855 if (!intel_dp_reset_link_train(intel_dp, &DP,
3856 DP_TRAINING_PATTERN_1 |
3857 DP_LINK_SCRAMBLING_DISABLE)) {
3858 DRM_ERROR("failed to enable link training\n");
3866 uint8_t link_status[DP_LINK_STATUS_SIZE];
3868 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
3869 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3870 DRM_ERROR("failed to get link status\n");
3874 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3875 DRM_DEBUG_KMS("clock recovery OK\n");
3880 * if we used previously trained voltage and pre-emphasis values
3881 * and we don't get clock recovery, reset link training values
3883 if (intel_dp->train_set_valid) {
3884 DRM_DEBUG_KMS("clock recovery not ok, reset");
3885 /* clear the flag as we are not reusing train set */
3886 intel_dp->train_set_valid = false;
3887 if (!intel_dp_reset_link_train(intel_dp, &DP,
3888 DP_TRAINING_PATTERN_1 |
3889 DP_LINK_SCRAMBLING_DISABLE)) {
3890 DRM_ERROR("failed to enable link training\n");
3896 /* Check to see if we've tried the max voltage */
3897 for (i = 0; i < intel_dp->lane_count; i++)
3898 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
3900 if (i == intel_dp->lane_count) {
3902 if (loop_tries == 5) {
3903 DRM_ERROR("too many full retries, give up\n");
3906 intel_dp_reset_link_train(intel_dp, &DP,
3907 DP_TRAINING_PATTERN_1 |
3908 DP_LINK_SCRAMBLING_DISABLE);
3913 /* Check to see if we've tried the same voltage 5 times */
3914 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
3916 if (voltage_tries == 5) {
3917 DRM_ERROR("too many voltage retries, give up\n");
3922 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
3924 /* Update training set as requested by target */
3925 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3926 DRM_ERROR("failed to update link training\n");
3935 intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
3937 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3938 struct drm_device *dev = dig_port->base.base.dev;
3939 bool channel_eq = false;
3940 int tries, cr_tries;
3941 uint32_t DP = intel_dp->DP;
3942 uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3945 * Training Pattern 3 for HBR2 or 1.2 devices that support it.
3947 * Intel platforms that support HBR2 also support TPS3. TPS3 support is
3948 * also mandatory for downstream devices that support HBR2.
3950 * Due to WaDisableHBR2 SKL < B0 is the only exception where TPS3 is
3951 * supported but still not enabled.
3953 if (intel_dp_source_supports_hbr2(dev) &&
3954 drm_dp_tps3_supported(intel_dp->dpcd))
3955 training_pattern = DP_TRAINING_PATTERN_3;
3956 else if (intel_dp->link_rate == 540000)
3957 DRM_ERROR("5.4 Gbps link rate without HBR2/TPS3 support\n");
3959 /* channel equalization */
3960 if (!intel_dp_set_link_train(intel_dp, &DP,
3962 DP_LINK_SCRAMBLING_DISABLE)) {
3963 DRM_ERROR("failed to start channel equalization\n");
3971 uint8_t link_status[DP_LINK_STATUS_SIZE];
3974 DRM_ERROR("failed to train DP, aborting\n");
3978 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
3979 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3980 DRM_ERROR("failed to get link status\n");
3984 /* Make sure clock is still ok */
3985 if (!drm_dp_clock_recovery_ok(link_status,
3986 intel_dp->lane_count)) {
3987 intel_dp->train_set_valid = false;
3988 intel_dp_link_training_clock_recovery(intel_dp);
3989 intel_dp_set_link_train(intel_dp, &DP,
3991 DP_LINK_SCRAMBLING_DISABLE);
3996 if (drm_dp_channel_eq_ok(link_status,
3997 intel_dp->lane_count)) {
4002 /* Try 5 times, then try clock recovery if that fails */
4004 intel_dp->train_set_valid = false;
4005 intel_dp_link_training_clock_recovery(intel_dp);
4006 intel_dp_set_link_train(intel_dp, &DP,
4008 DP_LINK_SCRAMBLING_DISABLE);
4014 /* Update training set as requested by target */
4015 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
4016 DRM_ERROR("failed to update link training\n");
4022 intel_dp_set_idle_link_train(intel_dp);
4027 intel_dp->train_set_valid = true;
4028 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
4032 void intel_dp_stop_link_train(struct intel_dp *intel_dp)
4034 intel_dp_set_link_train(intel_dp, &intel_dp->DP,
4035 DP_TRAINING_PATTERN_DISABLE);
4039 intel_dp_start_link_train(struct intel_dp *intel_dp)
4041 intel_dp_link_training_clock_recovery(intel_dp);
4042 intel_dp_link_training_channel_equalization(intel_dp);
4046 intel_dp_link_down(struct intel_dp *intel_dp)
4048 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4049 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
4050 enum port port = intel_dig_port->port;
4051 struct drm_device *dev = intel_dig_port->base.base.dev;
4052 struct drm_i915_private *dev_priv = dev->dev_private;
4053 uint32_t DP = intel_dp->DP;
4055 if (WARN_ON(HAS_DDI(dev)))
4058 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
4061 DRM_DEBUG_KMS("\n");
4063 if ((IS_GEN7(dev) && port == PORT_A) ||
4064 (HAS_PCH_CPT(dev) && port != PORT_A)) {
4065 DP &= ~DP_LINK_TRAIN_MASK_CPT;
4066 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
4068 if (IS_CHERRYVIEW(dev))
4069 DP &= ~DP_LINK_TRAIN_MASK_CHV;
4071 DP &= ~DP_LINK_TRAIN_MASK;
4072 DP |= DP_LINK_TRAIN_PAT_IDLE;
4074 I915_WRITE(intel_dp->output_reg, DP);
4075 POSTING_READ(intel_dp->output_reg);
4077 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
4078 I915_WRITE(intel_dp->output_reg, DP);
4079 POSTING_READ(intel_dp->output_reg);
4082 * HW workaround for IBX, we need to move the port
4083 * to transcoder A after disabling it to allow the
4084 * matching HDMI port to be enabled on transcoder A.
4086 if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
4087 /* always enable with pattern 1 (as per spec) */
4088 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
4089 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
4090 I915_WRITE(intel_dp->output_reg, DP);
4091 POSTING_READ(intel_dp->output_reg);
4094 I915_WRITE(intel_dp->output_reg, DP);
4095 POSTING_READ(intel_dp->output_reg);
4098 msleep(intel_dp->panel_power_down_delay);
4102 intel_dp_get_dpcd(struct intel_dp *intel_dp)
4104 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4105 struct drm_device *dev = dig_port->base.base.dev;
4106 struct drm_i915_private *dev_priv = dev->dev_private;
4109 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
4110 sizeof(intel_dp->dpcd)) < 0)
4111 return false; /* aux transfer failed */
4113 #ifdef __DragonFly__
4114 char dpcd_hex_dump[DP_RECEIVER_CAP_SIZE * 3];
4115 DRM_DEBUG_KMS("DPCD: %s\n", hexncpy(intel_dp->dpcd, sizeof(intel_dp->dpcd),
4116 dpcd_hex_dump, sizeof(dpcd_hex_dump), " "));
4118 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
4121 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
4122 return false; /* DPCD not present */
4124 /* Check if the panel supports PSR */
4125 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
4126 if (is_edp(intel_dp)) {
4127 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
4129 sizeof(intel_dp->psr_dpcd));
4130 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
4131 dev_priv->psr.sink_support = true;
4132 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
4135 if (INTEL_INFO(dev)->gen >= 9 &&
4136 (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
4137 uint8_t frame_sync_cap;
4139 dev_priv->psr.sink_support = true;
4140 intel_dp_dpcd_read_wake(&intel_dp->aux,
4141 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
4142 &frame_sync_cap, 1);
4143 dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
4144 /* PSR2 needs frame sync as well */
4145 dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
4146 DRM_DEBUG_KMS("PSR2 %s on sink",
4147 dev_priv->psr.psr2_support ? "supported" : "not supported");
4151 DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
4152 yesno(intel_dp_source_supports_hbr2(dev)),
4153 yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
4155 /* Intermediate frequency support */
4156 if (is_edp(intel_dp) &&
4157 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
4158 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
4159 (rev >= 0x03)) { /* eDp v1.4 or higher */
4160 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
4163 intel_dp_dpcd_read_wake(&intel_dp->aux,
4164 DP_SUPPORTED_LINK_RATES,
4166 sizeof(sink_rates));
4168 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
4169 int val = le16_to_cpu(sink_rates[i]);
4174 /* Value read is in kHz while drm clock is saved in deca-kHz */
4175 intel_dp->sink_rates[i] = (val * 200) / 10;
4177 intel_dp->num_sink_rates = i;
4180 intel_dp_print_rates(intel_dp);
4182 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4183 DP_DWN_STRM_PORT_PRESENT))
4184 return true; /* native DP sink */
4186 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
4187 return true; /* no per-port downstream info */
4189 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
4190 intel_dp->downstream_ports,
4191 DP_MAX_DOWNSTREAM_PORTS) < 0)
4192 return false; /* downstream port status fetch failed */
4198 intel_dp_probe_oui(struct intel_dp *intel_dp)
4202 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
4205 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
4206 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
4207 buf[0], buf[1], buf[2]);
4209 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
4210 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
4211 buf[0], buf[1], buf[2]);
4215 intel_dp_probe_mst(struct intel_dp *intel_dp)
4219 if (!intel_dp->can_mst)
4222 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
4225 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
4226 if (buf[0] & DP_MST_CAP) {
4227 DRM_DEBUG_KMS("Sink is MST capable\n");
4228 intel_dp->is_mst = true;
4230 DRM_DEBUG_KMS("Sink is not MST capable\n");
4231 intel_dp->is_mst = false;
4236 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4237 return intel_dp->is_mst;
4243 static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
4245 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4246 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4250 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
4251 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4256 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4257 buf & ~DP_TEST_SINK_START) < 0) {
4258 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4263 intel_dp->sink_crc.started = false;
4265 hsw_enable_ips(intel_crtc);
4269 static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
4271 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4272 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4276 if (intel_dp->sink_crc.started) {
4277 ret = intel_dp_sink_crc_stop(intel_dp);
4282 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
4285 if (!(buf & DP_TEST_CRC_SUPPORTED))
4288 intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
4290 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4293 hsw_disable_ips(intel_crtc);
4295 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4296 buf | DP_TEST_SINK_START) < 0) {
4297 hsw_enable_ips(intel_crtc);
4301 intel_dp->sink_crc.started = true;
4305 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4307 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4308 struct drm_device *dev = dig_port->base.base.dev;
4309 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4315 ret = intel_dp_sink_crc_start(intel_dp);
4320 intel_wait_for_vblank(dev, intel_crtc->pipe);
4322 if (drm_dp_dpcd_readb(&intel_dp->aux,
4323 DP_TEST_SINK_MISC, &buf) < 0) {
4327 count = buf & DP_TEST_COUNT_MASK;
4330 * Count might be reset during the loop. In this case
4331 * last known count needs to be reset as well.
4334 intel_dp->sink_crc.last_count = 0;
4336 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4341 old_equal_new = (count == intel_dp->sink_crc.last_count &&
4342 !memcmp(intel_dp->sink_crc.last_crc, crc,
4345 } while (--attempts && (count == 0 || old_equal_new));
4347 intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
4348 memcpy(intel_dp->sink_crc.last_crc, crc, 6 * sizeof(u8));
4350 if (attempts == 0) {
4351 if (old_equal_new) {
4352 DRM_DEBUG_KMS("Unreliable Sink CRC counter: Current returned CRC is identical to the previous one\n");
4354 DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
4361 intel_dp_sink_crc_stop(intel_dp);
4366 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4368 return intel_dp_dpcd_read_wake(&intel_dp->aux,
4369 DP_DEVICE_SERVICE_IRQ_VECTOR,
4370 sink_irq_vector, 1) == 1;
4375 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4379 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4381 sink_irq_vector, 14);
4389 static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4391 uint8_t test_result = DP_TEST_ACK;
4395 static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4397 uint8_t test_result = DP_TEST_NAK;
4401 static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
4403 uint8_t test_result = DP_TEST_NAK;
4404 struct intel_connector *intel_connector = intel_dp->attached_connector;
4405 struct drm_connector *connector = &intel_connector->base;
4407 if (intel_connector->detect_edid == NULL ||
4408 connector->edid_corrupt ||
4409 intel_dp->aux.i2c_defer_count > 6) {
4410 /* Check EDID read for NACKs, DEFERs and corruption
4411 * (DP CTS 1.2 Core r1.1)
4412 * 4.2.2.4 : Failed EDID read, I2C_NAK
4413 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4414 * 4.2.2.6 : EDID corruption detected
4415 * Use failsafe mode for all cases
4417 if (intel_dp->aux.i2c_nack_count > 0 ||
4418 intel_dp->aux.i2c_defer_count > 0)
4419 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4420 intel_dp->aux.i2c_nack_count,
4421 intel_dp->aux.i2c_defer_count);
4422 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4424 struct edid *block = intel_connector->detect_edid;
4426 /* We have to write the checksum
4427 * of the last block read
4429 block += intel_connector->detect_edid->extensions;
4431 if (!drm_dp_dpcd_write(&intel_dp->aux,
4432 DP_TEST_EDID_CHECKSUM,
4435 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4437 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4438 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4441 /* Set test active flag here so userspace doesn't interrupt things */
4442 intel_dp->compliance_test_active = 1;
4447 static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4449 uint8_t test_result = DP_TEST_NAK;
4453 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4455 uint8_t response = DP_TEST_NAK;
4459 intel_dp->compliance_test_active = 0;
4460 intel_dp->compliance_test_type = 0;
4461 intel_dp->compliance_test_data = 0;
4463 intel_dp->aux.i2c_nack_count = 0;
4464 intel_dp->aux.i2c_defer_count = 0;
4466 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4468 DRM_DEBUG_KMS("Could not read test request from sink\n");
4473 case DP_TEST_LINK_TRAINING:
4474 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4475 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4476 response = intel_dp_autotest_link_training(intel_dp);
4478 case DP_TEST_LINK_VIDEO_PATTERN:
4479 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4480 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4481 response = intel_dp_autotest_video_pattern(intel_dp);
4483 case DP_TEST_LINK_EDID_READ:
4484 DRM_DEBUG_KMS("EDID test requested\n");
4485 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4486 response = intel_dp_autotest_edid(intel_dp);
4488 case DP_TEST_LINK_PHY_TEST_PATTERN:
4489 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4490 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4491 response = intel_dp_autotest_phy_pattern(intel_dp);
4494 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4499 status = drm_dp_dpcd_write(&intel_dp->aux,
4503 DRM_DEBUG_KMS("Could not write test response to sink\n");
4508 intel_dp_check_mst_status(struct intel_dp *intel_dp)
4512 if (intel_dp->is_mst) {
4517 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4521 /* check link status - esi[10] = 0x200c */
4522 if (intel_dp->active_mst_links &&
4523 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4524 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4525 intel_dp_start_link_train(intel_dp);
4526 intel_dp_stop_link_train(intel_dp);
4529 DRM_DEBUG_KMS("got esi %3ph\n", esi);
4530 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4533 for (retry = 0; retry < 3; retry++) {
4535 wret = drm_dp_dpcd_write(&intel_dp->aux,
4536 DP_SINK_COUNT_ESI+1,
4543 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4545 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4553 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4554 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4555 intel_dp->is_mst = false;
4556 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4557 /* send a hotplug event */
4558 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4566 * According to DP spec
4569 * 2. Configure link according to Receiver Capabilities
4570 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4571 * 4. Check link status on receipt of hot-plug interrupt
4574 intel_dp_check_link_status(struct intel_dp *intel_dp)
4576 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4577 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4579 u8 link_status[DP_LINK_STATUS_SIZE];
4581 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4583 if (!intel_encoder->base.crtc)
4586 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4589 /* Try to read receiver status if the link appears to be up */
4590 if (!intel_dp_get_link_status(intel_dp, link_status)) {
4594 /* Now read the DPCD to see if it's actually running */
4595 if (!intel_dp_get_dpcd(intel_dp)) {
4599 /* Try to read the source of the interrupt */
4600 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4601 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4602 /* Clear interrupt source */
4603 drm_dp_dpcd_writeb(&intel_dp->aux,
4604 DP_DEVICE_SERVICE_IRQ_VECTOR,
4607 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4608 DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
4609 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4610 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4613 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
4614 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4615 intel_encoder->base.name);
4616 intel_dp_start_link_train(intel_dp);
4617 intel_dp_stop_link_train(intel_dp);
4621 /* XXX this is probably wrong for multiple downstream ports */
4622 static enum drm_connector_status
4623 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4625 uint8_t *dpcd = intel_dp->dpcd;
4628 if (!intel_dp_get_dpcd(intel_dp))
4629 return connector_status_disconnected;
4631 /* if there's no downstream port, we're done */
4632 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
4633 return connector_status_connected;
4635 /* If we're HPD-aware, SINK_COUNT changes dynamically */
4636 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4637 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4640 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4642 return connector_status_unknown;
4644 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4645 : connector_status_disconnected;
4648 /* If no HPD, poke DDC gently */
4649 if (drm_probe_ddc(intel_dp->aux.ddc))
4650 return connector_status_connected;
4652 /* Well we tried, say unknown for unreliable port types */
4653 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4654 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4655 if (type == DP_DS_PORT_TYPE_VGA ||
4656 type == DP_DS_PORT_TYPE_NON_EDID)
4657 return connector_status_unknown;
4659 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4660 DP_DWN_STRM_PORT_TYPE_MASK;
4661 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4662 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4663 return connector_status_unknown;
4666 /* Anything else is out of spec, warn and ignore */
4667 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4668 return connector_status_disconnected;
4671 static enum drm_connector_status
4672 edp_detect(struct intel_dp *intel_dp)
4674 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4675 enum drm_connector_status status;
4677 status = intel_panel_detect(dev);
4678 if (status == connector_status_unknown)
4679 status = connector_status_connected;
4684 static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
4685 struct intel_digital_port *port)
4689 switch (port->port) {
4693 bit = SDE_PORTB_HOTPLUG;
4696 bit = SDE_PORTC_HOTPLUG;
4699 bit = SDE_PORTD_HOTPLUG;
4702 MISSING_CASE(port->port);
4706 return I915_READ(SDEISR) & bit;
4709 static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
4710 struct intel_digital_port *port)
4714 switch (port->port) {
4718 bit = SDE_PORTB_HOTPLUG_CPT;
4721 bit = SDE_PORTC_HOTPLUG_CPT;
4724 bit = SDE_PORTD_HOTPLUG_CPT;
4727 bit = SDE_PORTE_HOTPLUG_SPT;
4730 MISSING_CASE(port->port);
4734 return I915_READ(SDEISR) & bit;
4737 static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
4738 struct intel_digital_port *port)
4742 switch (port->port) {
4744 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4747 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4750 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4753 MISSING_CASE(port->port);
4757 return I915_READ(PORT_HOTPLUG_STAT) & bit;
4760 static bool vlv_digital_port_connected(struct drm_i915_private *dev_priv,
4761 struct intel_digital_port *port)
4765 switch (port->port) {
4767 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4770 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4773 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4776 MISSING_CASE(port->port);
4780 return I915_READ(PORT_HOTPLUG_STAT) & bit;
4783 static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
4784 struct intel_digital_port *intel_dig_port)
4786 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4790 intel_hpd_pin_to_port(intel_encoder->hpd_pin, &port);
4793 bit = BXT_DE_PORT_HP_DDIA;
4796 bit = BXT_DE_PORT_HP_DDIB;
4799 bit = BXT_DE_PORT_HP_DDIC;
4806 return I915_READ(GEN8_DE_PORT_ISR) & bit;
4810 * intel_digital_port_connected - is the specified port connected?
4811 * @dev_priv: i915 private structure
4812 * @port: the port to test
4814 * Return %true if @port is connected, %false otherwise.
4816 bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
4817 struct intel_digital_port *port)
4819 if (HAS_PCH_IBX(dev_priv))
4820 return ibx_digital_port_connected(dev_priv, port);
4821 if (HAS_PCH_SPLIT(dev_priv))
4822 return cpt_digital_port_connected(dev_priv, port);
4823 else if (IS_BROXTON(dev_priv))
4824 return bxt_digital_port_connected(dev_priv, port);
4825 else if (IS_VALLEYVIEW(dev_priv))
4826 return vlv_digital_port_connected(dev_priv, port);
4828 return g4x_digital_port_connected(dev_priv, port);
4831 static enum drm_connector_status
4832 ironlake_dp_detect(struct intel_dp *intel_dp)
4834 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4835 struct drm_i915_private *dev_priv = dev->dev_private;
4836 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4838 if (!intel_digital_port_connected(dev_priv, intel_dig_port))
4839 return connector_status_disconnected;
4841 return intel_dp_detect_dpcd(intel_dp);
4844 static enum drm_connector_status
4845 g4x_dp_detect(struct intel_dp *intel_dp)
4847 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4848 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4850 /* Can't disconnect eDP, but you can close the lid... */
4851 if (is_edp(intel_dp)) {
4852 enum drm_connector_status status;
4854 status = intel_panel_detect(dev);
4855 if (status == connector_status_unknown)
4856 status = connector_status_connected;
4860 if (!intel_digital_port_connected(dev->dev_private, intel_dig_port))
4861 return connector_status_disconnected;
4863 return intel_dp_detect_dpcd(intel_dp);
4866 static struct edid *
4867 intel_dp_get_edid(struct intel_dp *intel_dp)
4869 struct intel_connector *intel_connector = intel_dp->attached_connector;
4871 /* use cached edid if we have one */
4872 if (intel_connector->edid) {
4874 if (IS_ERR(intel_connector->edid))
4877 return drm_edid_duplicate(intel_connector->edid);
4879 return drm_get_edid(&intel_connector->base,
4884 intel_dp_set_edid(struct intel_dp *intel_dp)
4886 struct intel_connector *intel_connector = intel_dp->attached_connector;
4889 edid = intel_dp_get_edid(intel_dp);
4890 intel_connector->detect_edid = edid;
4892 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4893 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4895 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4899 intel_dp_unset_edid(struct intel_dp *intel_dp)
4901 struct intel_connector *intel_connector = intel_dp->attached_connector;
4903 kfree(intel_connector->detect_edid);
4904 intel_connector->detect_edid = NULL;
4906 intel_dp->has_audio = false;
4909 static enum drm_connector_status
4910 intel_dp_detect(struct drm_connector *connector, bool force)
4912 struct intel_dp *intel_dp = intel_attached_dp(connector);
4913 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4914 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4915 struct drm_device *dev = connector->dev;
4916 enum drm_connector_status status;
4917 enum intel_display_power_domain power_domain;
4921 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4922 connector->base.id, connector->name);
4923 intel_dp_unset_edid(intel_dp);
4925 if (intel_dp->is_mst) {
4926 /* MST devices are disconnected from a monitor POV */
4927 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4928 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4929 return connector_status_disconnected;
4932 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4933 intel_display_power_get(to_i915(dev), power_domain);
4935 /* Can't disconnect eDP, but you can close the lid... */
4936 if (is_edp(intel_dp))
4937 status = edp_detect(intel_dp);
4938 else if (HAS_PCH_SPLIT(dev))
4939 status = ironlake_dp_detect(intel_dp);
4941 status = g4x_dp_detect(intel_dp);
4942 if (status != connector_status_connected)
4945 intel_dp_probe_oui(intel_dp);
4947 ret = intel_dp_probe_mst(intel_dp);
4949 /* if we are in MST mode then this connector
4950 won't appear connected or have anything with EDID on it */
4951 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4952 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4953 status = connector_status_disconnected;
4957 intel_dp_set_edid(intel_dp);
4959 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4960 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4961 status = connector_status_connected;
4963 /* Try to read the source of the interrupt */
4964 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4965 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4966 /* Clear interrupt source */
4967 drm_dp_dpcd_writeb(&intel_dp->aux,
4968 DP_DEVICE_SERVICE_IRQ_VECTOR,
4971 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4972 intel_dp_handle_test_request(intel_dp);
4973 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4974 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4978 intel_display_power_put(to_i915(dev), power_domain);
4983 intel_dp_force(struct drm_connector *connector)
4985 struct intel_dp *intel_dp = intel_attached_dp(connector);
4986 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4987 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
4988 enum intel_display_power_domain power_domain;
4990 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4991 connector->base.id, connector->name);
4992 intel_dp_unset_edid(intel_dp);
4994 if (connector->status != connector_status_connected)
4997 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4998 intel_display_power_get(dev_priv, power_domain);
5000 intel_dp_set_edid(intel_dp);
5002 intel_display_power_put(dev_priv, power_domain);
5004 if (intel_encoder->type != INTEL_OUTPUT_EDP)
5005 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
5008 static int intel_dp_get_modes(struct drm_connector *connector)
5010 struct intel_connector *intel_connector = to_intel_connector(connector);
5013 edid = intel_connector->detect_edid;
5015 int ret = intel_connector_update_modes(connector, edid);
5020 /* if eDP has no EDID, fall back to fixed mode */
5021 if (is_edp(intel_attached_dp(connector)) &&
5022 intel_connector->panel.fixed_mode) {
5023 struct drm_display_mode *mode;
5025 mode = drm_mode_duplicate(connector->dev,
5026 intel_connector->panel.fixed_mode);
5028 drm_mode_probed_add(connector, mode);
5037 intel_dp_detect_audio(struct drm_connector *connector)
5039 bool has_audio = false;
5042 edid = to_intel_connector(connector)->detect_edid;
5044 has_audio = drm_detect_monitor_audio(edid);
5050 intel_dp_set_property(struct drm_connector *connector,
5051 struct drm_property *property,
5054 struct drm_i915_private *dev_priv = connector->dev->dev_private;
5055 struct intel_connector *intel_connector = to_intel_connector(connector);
5056 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
5057 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
5060 ret = drm_object_property_set_value(&connector->base, property, val);
5064 if (property == dev_priv->force_audio_property) {
5068 if (i == intel_dp->force_audio)
5071 intel_dp->force_audio = i;
5073 if (i == HDMI_AUDIO_AUTO)
5074 has_audio = intel_dp_detect_audio(connector);
5076 has_audio = (i == HDMI_AUDIO_ON);
5078 if (has_audio == intel_dp->has_audio)
5081 intel_dp->has_audio = has_audio;
5085 if (property == dev_priv->broadcast_rgb_property) {
5086 bool old_auto = intel_dp->color_range_auto;
5087 bool old_range = intel_dp->limited_color_range;
5090 case INTEL_BROADCAST_RGB_AUTO:
5091 intel_dp->color_range_auto = true;
5093 case INTEL_BROADCAST_RGB_FULL:
5094 intel_dp->color_range_auto = false;
5095 intel_dp->limited_color_range = false;
5097 case INTEL_BROADCAST_RGB_LIMITED:
5098 intel_dp->color_range_auto = false;
5099 intel_dp->limited_color_range = true;
5105 if (old_auto == intel_dp->color_range_auto &&
5106 old_range == intel_dp->limited_color_range)
5112 if (is_edp(intel_dp) &&
5113 property == connector->dev->mode_config.scaling_mode_property) {
5114 if (val == DRM_MODE_SCALE_NONE) {
5115 DRM_DEBUG_KMS("no scaling not supported\n");
5119 if (intel_connector->panel.fitting_mode == val) {
5120 /* the eDP scaling property is not changed */
5123 intel_connector->panel.fitting_mode = val;
5131 if (intel_encoder->base.crtc)
5132 intel_crtc_restore_mode(intel_encoder->base.crtc);
5138 intel_dp_connector_destroy(struct drm_connector *connector)
5140 struct intel_connector *intel_connector = to_intel_connector(connector);
5142 kfree(intel_connector->detect_edid);
5144 if (!IS_ERR_OR_NULL(intel_connector->edid))
5145 kfree(intel_connector->edid);
5147 /* Can't call is_edp() since the encoder may have been destroyed
5149 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5150 intel_panel_fini(&intel_connector->panel);
5152 drm_connector_cleanup(connector);
5156 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
5158 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
5159 struct intel_dp *intel_dp = &intel_dig_port->dp;
5161 drm_dp_aux_unregister(&intel_dp->aux);
5162 intel_dp_mst_encoder_cleanup(intel_dig_port);
5163 if (is_edp(intel_dp)) {
5164 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5166 * vdd might still be enabled do to the delayed vdd off.
5167 * Make sure vdd is actually turned off here.
5170 edp_panel_vdd_off_sync(intel_dp);
5171 pps_unlock(intel_dp);
5174 if (intel_dp->edp_notifier.notifier_call) {
5175 unregister_reboot_notifier(&intel_dp->edp_notifier);
5176 intel_dp->edp_notifier.notifier_call = NULL;
5180 drm_encoder_cleanup(encoder);
5181 kfree(intel_dig_port);
5184 static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
5186 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
5188 if (!is_edp(intel_dp))
5192 * vdd might still be enabled do to the delayed vdd off.
5193 * Make sure vdd is actually turned off here.
5195 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5197 edp_panel_vdd_off_sync(intel_dp);
5198 pps_unlock(intel_dp);
5201 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
5203 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5204 struct drm_device *dev = intel_dig_port->base.base.dev;
5205 struct drm_i915_private *dev_priv = dev->dev_private;
5206 enum intel_display_power_domain power_domain;
5208 lockdep_assert_held(&dev_priv->pps_mutex);
5210 if (!edp_have_panel_vdd(intel_dp))
5214 * The VDD bit needs a power domain reference, so if the bit is
5215 * already enabled when we boot or resume, grab this reference and
5216 * schedule a vdd off, so we don't hold on to the reference
5219 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
5220 power_domain = intel_display_port_aux_power_domain(&intel_dig_port->base);
5221 intel_display_power_get(dev_priv, power_domain);
5223 edp_panel_vdd_schedule_off(intel_dp);
5226 static void intel_dp_encoder_reset(struct drm_encoder *encoder)
5228 struct intel_dp *intel_dp;
5230 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
5233 intel_dp = enc_to_intel_dp(encoder);
5238 * Read out the current power sequencer assignment,
5239 * in case the BIOS did something with it.
5241 if (IS_VALLEYVIEW(encoder->dev))
5242 vlv_initial_power_sequencer_setup(intel_dp);
5244 intel_edp_panel_vdd_sanitize(intel_dp);
5246 pps_unlock(intel_dp);
5249 static const struct drm_connector_funcs intel_dp_connector_funcs = {
5250 .dpms = drm_atomic_helper_connector_dpms,
5251 .detect = intel_dp_detect,
5252 .force = intel_dp_force,
5253 .fill_modes = drm_helper_probe_single_connector_modes,
5254 .set_property = intel_dp_set_property,
5255 .atomic_get_property = intel_connector_atomic_get_property,
5256 .destroy = intel_dp_connector_destroy,
5257 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5258 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
5261 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
5262 .get_modes = intel_dp_get_modes,
5263 .mode_valid = intel_dp_mode_valid,
5264 .best_encoder = intel_best_encoder,
5267 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
5268 .reset = intel_dp_encoder_reset,
5269 .destroy = intel_dp_encoder_destroy,
5273 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
5275 struct intel_dp *intel_dp = &intel_dig_port->dp;
5276 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5277 struct drm_device *dev = intel_dig_port->base.base.dev;
5278 struct drm_i915_private *dev_priv = dev->dev_private;
5279 enum intel_display_power_domain power_domain;
5282 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP &&
5283 intel_dig_port->base.type != INTEL_OUTPUT_HDMI)
5284 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
5286 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
5288 * vdd off can generate a long pulse on eDP which
5289 * would require vdd on to handle it, and thus we
5290 * would end up in an endless cycle of
5291 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
5293 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
5294 port_name(intel_dig_port->port));
5298 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
5299 port_name(intel_dig_port->port),
5300 long_hpd ? "long" : "short");
5302 power_domain = intel_display_port_aux_power_domain(intel_encoder);
5303 intel_display_power_get(dev_priv, power_domain);
5306 /* indicate that we need to restart link training */
5307 intel_dp->train_set_valid = false;
5309 if (!intel_digital_port_connected(dev_priv, intel_dig_port))
5312 if (!intel_dp_get_dpcd(intel_dp)) {
5316 intel_dp_probe_oui(intel_dp);
5318 if (!intel_dp_probe_mst(intel_dp)) {
5322 if (intel_dp->is_mst) {
5324 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
5329 if (!intel_dp->is_mst) {
5330 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5331 intel_dp_check_link_status(intel_dp);
5332 drm_modeset_unlock(&dev->mode_config.connection_mutex);
5340 /* if we were in MST mode, and device is not there get out of MST mode */
5341 if (intel_dp->is_mst) {
5342 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5343 intel_dp->is_mst = false;
5345 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
5349 intel_display_power_put(dev_priv, power_domain);
5354 /* Return which DP Port should be selected for Transcoder DP control */
5356 intel_trans_dp_port_sel(struct drm_crtc *crtc)
5358 struct drm_device *dev = crtc->dev;
5359 struct intel_encoder *intel_encoder;
5360 struct intel_dp *intel_dp;
5362 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5363 intel_dp = enc_to_intel_dp(&intel_encoder->base);
5365 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
5366 intel_encoder->type == INTEL_OUTPUT_EDP)
5367 return intel_dp->output_reg;
5373 /* check the VBT to see whether the eDP is on another port */
5374 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
5376 struct drm_i915_private *dev_priv = dev->dev_private;
5377 union child_device_config *p_child;
5379 static const short port_mapping[] = {
5380 [PORT_B] = DVO_PORT_DPB,
5381 [PORT_C] = DVO_PORT_DPC,
5382 [PORT_D] = DVO_PORT_DPD,
5383 [PORT_E] = DVO_PORT_DPE,
5387 * eDP not supported on g4x. so bail out early just
5388 * for a bit extra safety in case the VBT is bonkers.
5390 if (INTEL_INFO(dev)->gen < 5)
5396 if (!dev_priv->vbt.child_dev_num)
5399 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5400 p_child = dev_priv->vbt.child_dev + i;
5402 if (p_child->common.dvo_port == port_mapping[port] &&
5403 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5404 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
5411 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5413 struct intel_connector *intel_connector = to_intel_connector(connector);
5415 intel_attach_force_audio_property(connector);
5416 intel_attach_broadcast_rgb_property(connector);
5417 intel_dp->color_range_auto = true;
5419 if (is_edp(intel_dp)) {
5420 drm_mode_create_scaling_mode_property(connector->dev);
5421 drm_object_attach_property(
5423 connector->dev->mode_config.scaling_mode_property,
5424 DRM_MODE_SCALE_ASPECT);
5425 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
5429 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5431 intel_dp->last_power_cycle = jiffies;
5432 intel_dp->last_power_on = jiffies;
5433 intel_dp->last_backlight_off = jiffies;
5437 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5438 struct intel_dp *intel_dp)
5440 struct drm_i915_private *dev_priv = dev->dev_private;
5441 struct edp_power_seq cur, vbt, spec,
5442 *final = &intel_dp->pps_delays;
5443 u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
5444 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg = 0;
5446 lockdep_assert_held(&dev_priv->pps_mutex);
5448 /* already initialized? */
5449 if (final->t11_t12 != 0)
5452 if (IS_BROXTON(dev)) {
5454 * TODO: BXT has 2 sets of PPS registers.
5455 * Correct Register for Broxton need to be identified
5456 * using VBT. hardcoding for now
5458 pp_ctrl_reg = BXT_PP_CONTROL(0);
5459 pp_on_reg = BXT_PP_ON_DELAYS(0);
5460 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5461 } else if (HAS_PCH_SPLIT(dev)) {
5462 pp_ctrl_reg = PCH_PP_CONTROL;
5463 pp_on_reg = PCH_PP_ON_DELAYS;
5464 pp_off_reg = PCH_PP_OFF_DELAYS;
5465 pp_div_reg = PCH_PP_DIVISOR;
5467 enum i915_pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5469 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5470 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5471 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5472 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5475 /* Workaround: Need to write PP_CONTROL with the unlock key as
5476 * the very first thing. */
5477 pp_ctl = ironlake_get_pp_control(intel_dp);
5479 pp_on = I915_READ(pp_on_reg);
5480 pp_off = I915_READ(pp_off_reg);
5481 if (!IS_BROXTON(dev)) {
5482 I915_WRITE(pp_ctrl_reg, pp_ctl);
5483 pp_div = I915_READ(pp_div_reg);
5486 /* Pull timing values out of registers */
5487 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5488 PANEL_POWER_UP_DELAY_SHIFT;
5490 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5491 PANEL_LIGHT_ON_DELAY_SHIFT;
5493 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5494 PANEL_LIGHT_OFF_DELAY_SHIFT;
5496 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5497 PANEL_POWER_DOWN_DELAY_SHIFT;
5499 if (IS_BROXTON(dev)) {
5500 u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5501 BXT_POWER_CYCLE_DELAY_SHIFT;
5503 cur.t11_t12 = (tmp - 1) * 1000;
5507 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
5508 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
5511 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5512 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5514 vbt = dev_priv->vbt.edp_pps;
5516 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5517 * our hw here, which are all in 100usec. */
5518 spec.t1_t3 = 210 * 10;
5519 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5520 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5521 spec.t10 = 500 * 10;
5522 /* This one is special and actually in units of 100ms, but zero
5523 * based in the hw (so we need to add 100 ms). But the sw vbt
5524 * table multiplies it with 1000 to make it in units of 100usec,
5526 spec.t11_t12 = (510 + 100) * 10;
5528 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5529 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5531 /* Use the max of the register settings and vbt. If both are
5532 * unset, fall back to the spec limits. */
5533 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
5535 max(cur.field, vbt.field))
5536 assign_final(t1_t3);
5540 assign_final(t11_t12);
5543 #define get_delay(field) (DIV_ROUND_UP(final->field, 10))
5544 intel_dp->panel_power_up_delay = get_delay(t1_t3);
5545 intel_dp->backlight_on_delay = get_delay(t8);
5546 intel_dp->backlight_off_delay = get_delay(t9);
5547 intel_dp->panel_power_down_delay = get_delay(t10);
5548 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5551 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5552 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5553 intel_dp->panel_power_cycle_delay);
5555 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5556 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
5560 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5561 struct intel_dp *intel_dp)
5563 struct drm_i915_private *dev_priv = dev->dev_private;
5564 u32 pp_on, pp_off, pp_div, port_sel = 0;
5565 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
5566 int pp_on_reg, pp_off_reg, pp_div_reg = 0, pp_ctrl_reg;
5567 enum port port = dp_to_dig_port(intel_dp)->port;
5568 const struct edp_power_seq *seq = &intel_dp->pps_delays;
5570 lockdep_assert_held(&dev_priv->pps_mutex);
5572 if (IS_BROXTON(dev)) {
5574 * TODO: BXT has 2 sets of PPS registers.
5575 * Correct Register for Broxton need to be identified
5576 * using VBT. hardcoding for now
5578 pp_ctrl_reg = BXT_PP_CONTROL(0);
5579 pp_on_reg = BXT_PP_ON_DELAYS(0);
5580 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5582 } else if (HAS_PCH_SPLIT(dev)) {
5583 pp_on_reg = PCH_PP_ON_DELAYS;
5584 pp_off_reg = PCH_PP_OFF_DELAYS;
5585 pp_div_reg = PCH_PP_DIVISOR;
5587 enum i915_pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5589 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5590 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5591 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5595 * And finally store the new values in the power sequencer. The
5596 * backlight delays are set to 1 because we do manual waits on them. For
5597 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5598 * we'll end up waiting for the backlight off delay twice: once when we
5599 * do the manual sleep, and once when we disable the panel and wait for
5600 * the PP_STATUS bit to become zero.
5602 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
5603 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5604 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
5605 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
5606 /* Compute the divisor for the pp clock, simply match the Bspec
5608 if (IS_BROXTON(dev)) {
5609 pp_div = I915_READ(pp_ctrl_reg);
5610 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5611 pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5612 << BXT_POWER_CYCLE_DELAY_SHIFT);
5614 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5615 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5616 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5619 /* Haswell doesn't have any port selection bits for the panel
5620 * power sequencer any more. */
5621 if (IS_VALLEYVIEW(dev)) {
5622 port_sel = PANEL_PORT_SELECT_VLV(port);
5623 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5625 port_sel = PANEL_PORT_SELECT_DPA;
5627 port_sel = PANEL_PORT_SELECT_DPD;
5632 I915_WRITE(pp_on_reg, pp_on);
5633 I915_WRITE(pp_off_reg, pp_off);
5634 if (IS_BROXTON(dev))
5635 I915_WRITE(pp_ctrl_reg, pp_div);
5637 I915_WRITE(pp_div_reg, pp_div);
5639 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
5640 I915_READ(pp_on_reg),
5641 I915_READ(pp_off_reg),
5643 (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
5644 I915_READ(pp_div_reg));
5648 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5650 * @refresh_rate: RR to be programmed
5652 * This function gets called when refresh rate (RR) has to be changed from
5653 * one frequency to another. Switches can be between high and low RR
5654 * supported by the panel or to any other RR based on media playback (in
5655 * this case, RR value needs to be passed from user space).
5657 * The caller of this function needs to take a lock on dev_priv->drrs.
5659 static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
5661 struct drm_i915_private *dev_priv = dev->dev_private;
5662 struct intel_encoder *encoder;
5663 struct intel_digital_port *dig_port = NULL;
5664 struct intel_dp *intel_dp = dev_priv->drrs.dp;
5665 struct intel_crtc_state *config = NULL;
5666 struct intel_crtc *intel_crtc = NULL;
5667 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
5669 if (refresh_rate <= 0) {
5670 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5674 if (intel_dp == NULL) {
5675 DRM_DEBUG_KMS("DRRS not supported.\n");
5680 * FIXME: This needs proper synchronization with psr state for some
5681 * platforms that cannot have PSR and DRRS enabled at the same time.
5684 dig_port = dp_to_dig_port(intel_dp);
5685 encoder = &dig_port->base;
5686 intel_crtc = to_intel_crtc(encoder->base.crtc);
5689 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5693 config = intel_crtc->config;
5695 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5696 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5700 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5702 index = DRRS_LOW_RR;
5704 if (index == dev_priv->drrs.refresh_rate_type) {
5706 "DRRS requested for previously set RR...ignoring\n");
5710 if (!intel_crtc->active) {
5711 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5715 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
5718 intel_dp_set_m_n(intel_crtc, M1_N1);
5721 intel_dp_set_m_n(intel_crtc, M2_N2);
5725 DRM_ERROR("Unsupported refreshrate type\n");
5727 } else if (INTEL_INFO(dev)->gen > 6) {
5728 u32 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5731 val = I915_READ(reg);
5732 if (index > DRRS_HIGH_RR) {
5733 if (IS_VALLEYVIEW(dev))
5734 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5736 val |= PIPECONF_EDP_RR_MODE_SWITCH;
5738 if (IS_VALLEYVIEW(dev))
5739 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5741 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5743 I915_WRITE(reg, val);
5746 dev_priv->drrs.refresh_rate_type = index;
5748 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5752 * intel_edp_drrs_enable - init drrs struct if supported
5753 * @intel_dp: DP struct
5755 * Initializes frontbuffer_bits and drrs.dp
5757 void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5759 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5760 struct drm_i915_private *dev_priv = dev->dev_private;
5761 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5762 struct drm_crtc *crtc = dig_port->base.base.crtc;
5763 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5765 if (!intel_crtc->config->has_drrs) {
5766 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5770 mutex_lock(&dev_priv->drrs.mutex);
5771 if (WARN_ON(dev_priv->drrs.dp)) {
5772 DRM_ERROR("DRRS already enabled\n");
5776 dev_priv->drrs.busy_frontbuffer_bits = 0;
5778 dev_priv->drrs.dp = intel_dp;
5781 mutex_unlock(&dev_priv->drrs.mutex);
5785 * intel_edp_drrs_disable - Disable DRRS
5786 * @intel_dp: DP struct
5789 void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5791 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5792 struct drm_i915_private *dev_priv = dev->dev_private;
5793 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5794 struct drm_crtc *crtc = dig_port->base.base.crtc;
5795 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5797 if (!intel_crtc->config->has_drrs)
5800 mutex_lock(&dev_priv->drrs.mutex);
5801 if (!dev_priv->drrs.dp) {
5802 mutex_unlock(&dev_priv->drrs.mutex);
5806 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5807 intel_dp_set_drrs_state(dev_priv->dev,
5808 intel_dp->attached_connector->panel.
5809 fixed_mode->vrefresh);
5811 dev_priv->drrs.dp = NULL;
5812 mutex_unlock(&dev_priv->drrs.mutex);
5814 cancel_delayed_work_sync(&dev_priv->drrs.work);
5817 static void intel_edp_drrs_downclock_work(struct work_struct *work)
5819 struct drm_i915_private *dev_priv =
5820 container_of(work, typeof(*dev_priv), drrs.work.work);
5821 struct intel_dp *intel_dp;
5823 mutex_lock(&dev_priv->drrs.mutex);
5825 intel_dp = dev_priv->drrs.dp;
5831 * The delayed work can race with an invalidate hence we need to
5835 if (dev_priv->drrs.busy_frontbuffer_bits)
5838 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5839 intel_dp_set_drrs_state(dev_priv->dev,
5840 intel_dp->attached_connector->panel.
5841 downclock_mode->vrefresh);
5844 mutex_unlock(&dev_priv->drrs.mutex);
5848 * intel_edp_drrs_invalidate - Disable Idleness DRRS
5850 * @frontbuffer_bits: frontbuffer plane tracking bits
5852 * This function gets called everytime rendering on the given planes start.
5853 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
5855 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5857 void intel_edp_drrs_invalidate(struct drm_device *dev,
5858 unsigned frontbuffer_bits)
5860 struct drm_i915_private *dev_priv = dev->dev_private;
5861 struct drm_crtc *crtc;
5862 enum i915_pipe pipe;
5864 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5867 cancel_delayed_work(&dev_priv->drrs.work);
5869 mutex_lock(&dev_priv->drrs.mutex);
5870 if (!dev_priv->drrs.dp) {
5871 mutex_unlock(&dev_priv->drrs.mutex);
5875 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5876 pipe = to_intel_crtc(crtc)->pipe;
5878 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5879 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5881 /* invalidate means busy screen hence upclock */
5882 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5883 intel_dp_set_drrs_state(dev_priv->dev,
5884 dev_priv->drrs.dp->attached_connector->panel.
5885 fixed_mode->vrefresh);
5887 mutex_unlock(&dev_priv->drrs.mutex);
5891 * intel_edp_drrs_flush - Restart Idleness DRRS
5893 * @frontbuffer_bits: frontbuffer plane tracking bits
5895 * This function gets called every time rendering on the given planes has
5896 * completed or flip on a crtc is completed. So DRRS should be upclocked
5897 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5898 * if no other planes are dirty.
5900 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5902 void intel_edp_drrs_flush(struct drm_device *dev,
5903 unsigned frontbuffer_bits)
5905 struct drm_i915_private *dev_priv = dev->dev_private;
5906 struct drm_crtc *crtc;
5907 enum i915_pipe pipe;
5909 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5912 cancel_delayed_work(&dev_priv->drrs.work);
5914 mutex_lock(&dev_priv->drrs.mutex);
5915 if (!dev_priv->drrs.dp) {
5916 mutex_unlock(&dev_priv->drrs.mutex);
5920 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5921 pipe = to_intel_crtc(crtc)->pipe;
5923 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5924 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5926 /* flush means busy screen hence upclock */
5927 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5928 intel_dp_set_drrs_state(dev_priv->dev,
5929 dev_priv->drrs.dp->attached_connector->panel.
5930 fixed_mode->vrefresh);
5933 * flush also means no more activity hence schedule downclock, if all
5934 * other fbs are quiescent too
5936 if (!dev_priv->drrs.busy_frontbuffer_bits)
5937 schedule_delayed_work(&dev_priv->drrs.work,
5938 msecs_to_jiffies(1000));
5939 mutex_unlock(&dev_priv->drrs.mutex);
5943 * DOC: Display Refresh Rate Switching (DRRS)
5945 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5946 * which enables swtching between low and high refresh rates,
5947 * dynamically, based on the usage scenario. This feature is applicable
5948 * for internal panels.
5950 * Indication that the panel supports DRRS is given by the panel EDID, which
5951 * would list multiple refresh rates for one resolution.
5953 * DRRS is of 2 types - static and seamless.
5954 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5955 * (may appear as a blink on screen) and is used in dock-undock scenario.
5956 * Seamless DRRS involves changing RR without any visual effect to the user
5957 * and can be used during normal system usage. This is done by programming
5958 * certain registers.
5960 * Support for static/seamless DRRS may be indicated in the VBT based on
5961 * inputs from the panel spec.
5963 * DRRS saves power by switching to low RR based on usage scenarios.
5966 * The implementation is based on frontbuffer tracking implementation.
5967 * When there is a disturbance on the screen triggered by user activity or a
5968 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5969 * When there is no movement on screen, after a timeout of 1 second, a switch
5970 * to low RR is made.
5971 * For integration with frontbuffer tracking code,
5972 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5974 * DRRS can be further extended to support other internal panels and also
5975 * the scenario of video playback wherein RR is set based on the rate
5976 * requested by userspace.
5980 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5981 * @intel_connector: eDP connector
5982 * @fixed_mode: preferred mode of panel
5984 * This function is called only once at driver load to initialize basic
5988 * Downclock mode if panel supports it, else return NULL.
5989 * DRRS support is determined by the presence of downclock mode (apart
5990 * from VBT setting).
5992 static struct drm_display_mode *
5993 intel_dp_drrs_init(struct intel_connector *intel_connector,
5994 struct drm_display_mode *fixed_mode)
5996 struct drm_connector *connector = &intel_connector->base;
5997 struct drm_device *dev = connector->dev;
5998 struct drm_i915_private *dev_priv = dev->dev_private;
5999 struct drm_display_mode *downclock_mode = NULL;
6001 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
6002 lockinit(&dev_priv->drrs.mutex, "i915dm", 0, LK_CANRECURSE);
6004 if (INTEL_INFO(dev)->gen <= 6) {
6005 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
6009 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
6010 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
6014 downclock_mode = intel_find_panel_downclock
6015 (dev, fixed_mode, connector);
6017 if (!downclock_mode) {
6018 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
6022 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
6024 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
6025 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
6026 return downclock_mode;
6029 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
6030 struct intel_connector *intel_connector)
6032 struct drm_connector *connector = &intel_connector->base;
6033 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
6034 struct intel_encoder *intel_encoder = &intel_dig_port->base;
6035 struct drm_device *dev = intel_encoder->base.dev;
6036 struct drm_i915_private *dev_priv = dev->dev_private;
6037 struct drm_display_mode *fixed_mode = NULL;
6038 struct drm_display_mode *downclock_mode = NULL;
6040 struct drm_display_mode *scan;
6042 enum i915_pipe pipe = INVALID_PIPE;
6044 if (!is_edp(intel_dp))
6048 intel_edp_panel_vdd_sanitize(intel_dp);
6049 pps_unlock(intel_dp);
6051 /* Cache DPCD and EDID for edp. */
6052 has_dpcd = intel_dp_get_dpcd(intel_dp);
6055 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
6056 dev_priv->no_aux_handshake =
6057 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
6058 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
6060 /* if this fails, presume the device is a ghost */
6061 DRM_INFO("failed to retrieve link info, disabling eDP\n");
6065 /* We now know it's not a ghost, init power sequence regs. */
6067 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
6068 pps_unlock(intel_dp);
6070 mutex_lock(&dev->mode_config.mutex);
6071 edid = drm_get_edid(connector, intel_dp->aux.ddc);
6073 if (drm_add_edid_modes(connector, edid)) {
6074 drm_mode_connector_update_edid_property(connector,
6076 drm_edid_to_eld(connector, edid);
6079 edid = ERR_PTR(-EINVAL);
6082 edid = ERR_PTR(-ENOENT);
6084 intel_connector->edid = edid;
6086 /* prefer fixed mode from EDID if available */
6087 list_for_each_entry(scan, &connector->probed_modes, head) {
6088 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
6089 fixed_mode = drm_mode_duplicate(dev, scan);
6090 downclock_mode = intel_dp_drrs_init(
6091 intel_connector, fixed_mode);
6096 /* fallback to VBT if available for eDP */
6097 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
6098 fixed_mode = drm_mode_duplicate(dev,
6099 dev_priv->vbt.lfp_lvds_vbt_mode);
6101 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
6103 mutex_unlock(&dev->mode_config.mutex);
6105 if (IS_VALLEYVIEW(dev)) {
6107 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
6108 register_reboot_notifier(&intel_dp->edp_notifier);
6112 * Figure out the current pipe for the initial backlight setup.
6113 * If the current pipe isn't valid, try the PPS pipe, and if that
6114 * fails just assume pipe A.
6116 if (IS_CHERRYVIEW(dev))
6117 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
6119 pipe = PORT_TO_PIPE(intel_dp->DP);
6121 if (pipe != PIPE_A && pipe != PIPE_B)
6122 pipe = intel_dp->pps_pipe;
6124 if (pipe != PIPE_A && pipe != PIPE_B)
6127 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
6131 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
6132 intel_connector->panel.backlight.power = intel_edp_backlight_power;
6133 intel_panel_setup_backlight(connector, pipe);
6139 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
6140 struct intel_connector *intel_connector)
6142 struct drm_connector *connector = &intel_connector->base;
6143 struct intel_dp *intel_dp = &intel_dig_port->dp;
6144 struct intel_encoder *intel_encoder = &intel_dig_port->base;
6145 struct drm_device *dev = intel_encoder->base.dev;
6146 struct drm_i915_private *dev_priv = dev->dev_private;
6147 enum port port = intel_dig_port->port;
6150 intel_dp->pps_pipe = INVALID_PIPE;
6152 /* intel_dp vfuncs */
6153 if (INTEL_INFO(dev)->gen >= 9)
6154 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
6155 else if (IS_VALLEYVIEW(dev))
6156 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
6157 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
6158 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
6159 else if (HAS_PCH_SPLIT(dev))
6160 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
6162 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
6164 if (INTEL_INFO(dev)->gen >= 9)
6165 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
6167 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
6169 /* Preserve the current hw state. */
6170 intel_dp->DP = I915_READ(intel_dp->output_reg);
6171 intel_dp->attached_connector = intel_connector;
6173 if (intel_dp_is_edp(dev, port))
6174 type = DRM_MODE_CONNECTOR_eDP;
6176 type = DRM_MODE_CONNECTOR_DisplayPort;
6179 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
6180 * for DP the encoder type can be set by the caller to
6181 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
6183 if (type == DRM_MODE_CONNECTOR_eDP)
6184 intel_encoder->type = INTEL_OUTPUT_EDP;
6186 /* eDP only on port B and/or C on vlv/chv */
6187 if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
6188 port != PORT_B && port != PORT_C))
6191 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
6192 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
6195 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
6196 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
6198 connector->interlace_allowed = true;
6199 connector->doublescan_allowed = 0;
6201 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
6202 edp_panel_vdd_work);
6204 intel_connector_attach_encoder(intel_connector, intel_encoder);
6205 drm_connector_register(connector);
6208 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
6210 intel_connector->get_hw_state = intel_connector_get_hw_state;
6211 intel_connector->unregister = intel_dp_connector_unregister;
6213 /* Set up the hotplug pin. */
6216 intel_encoder->hpd_pin = HPD_PORT_A;
6219 intel_encoder->hpd_pin = HPD_PORT_B;
6220 if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) < BXT_REVID_B0))
6221 intel_encoder->hpd_pin = HPD_PORT_A;
6224 intel_encoder->hpd_pin = HPD_PORT_C;
6227 intel_encoder->hpd_pin = HPD_PORT_D;
6230 intel_encoder->hpd_pin = HPD_PORT_E;
6236 if (is_edp(intel_dp)) {
6238 intel_dp_init_panel_power_timestamps(intel_dp);
6239 if (IS_VALLEYVIEW(dev))
6240 vlv_initial_power_sequencer_setup(intel_dp);
6242 intel_dp_init_panel_power_sequencer(dev, intel_dp);
6243 pps_unlock(intel_dp);
6246 intel_dp_aux_init(intel_dp, intel_connector);
6248 /* init MST on ports that can support it */
6249 if (HAS_DP_MST(dev) &&
6250 (port == PORT_B || port == PORT_C || port == PORT_D))
6251 intel_dp_mst_encoder_init(intel_dig_port,
6252 intel_connector->base.base.id);
6254 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
6255 drm_dp_aux_unregister(&intel_dp->aux);
6256 if (is_edp(intel_dp)) {
6257 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
6259 * vdd might still be enabled do to the delayed vdd off.
6260 * Make sure vdd is actually turned off here.
6263 edp_panel_vdd_off_sync(intel_dp);
6264 pps_unlock(intel_dp);
6266 drm_connector_unregister(connector);
6267 drm_connector_cleanup(connector);
6271 intel_dp_add_properties(intel_dp, connector);
6273 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
6274 * 0xd. Failure to do so will result in spurious interrupts being
6275 * generated on the port when a cable is not attached.
6277 if (IS_G4X(dev) && !IS_GM45(dev)) {
6278 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
6279 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
6282 i915_debugfs_connector_add(connector);
6288 intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
6290 struct drm_i915_private *dev_priv = dev->dev_private;
6291 struct intel_digital_port *intel_dig_port;
6292 struct intel_encoder *intel_encoder;
6293 struct drm_encoder *encoder;
6294 struct intel_connector *intel_connector;
6296 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
6297 if (!intel_dig_port)
6300 intel_connector = intel_connector_alloc();
6301 if (!intel_connector)
6302 goto err_connector_alloc;
6304 intel_encoder = &intel_dig_port->base;
6305 encoder = &intel_encoder->base;
6307 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
6308 DRM_MODE_ENCODER_TMDS);
6310 intel_encoder->compute_config = intel_dp_compute_config;
6311 intel_encoder->disable = intel_disable_dp;
6312 intel_encoder->get_hw_state = intel_dp_get_hw_state;
6313 intel_encoder->get_config = intel_dp_get_config;
6314 intel_encoder->suspend = intel_dp_encoder_suspend;
6315 if (IS_CHERRYVIEW(dev)) {
6316 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
6317 intel_encoder->pre_enable = chv_pre_enable_dp;
6318 intel_encoder->enable = vlv_enable_dp;
6319 intel_encoder->post_disable = chv_post_disable_dp;
6320 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
6321 } else if (IS_VALLEYVIEW(dev)) {
6322 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
6323 intel_encoder->pre_enable = vlv_pre_enable_dp;
6324 intel_encoder->enable = vlv_enable_dp;
6325 intel_encoder->post_disable = vlv_post_disable_dp;
6327 intel_encoder->pre_enable = g4x_pre_enable_dp;
6328 intel_encoder->enable = g4x_enable_dp;
6329 if (INTEL_INFO(dev)->gen >= 5)
6330 intel_encoder->post_disable = ilk_post_disable_dp;
6333 intel_dig_port->port = port;
6334 intel_dig_port->dp.output_reg = output_reg;
6336 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
6337 if (IS_CHERRYVIEW(dev)) {
6339 intel_encoder->crtc_mask = 1 << 2;
6341 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
6343 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
6345 intel_encoder->cloneable = 0;
6347 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
6348 dev_priv->hotplug.irq_port[port] = intel_dig_port;
6350 if (!intel_dp_init_connector(intel_dig_port, intel_connector))
6351 goto err_init_connector;
6356 drm_encoder_cleanup(encoder);
6357 kfree(intel_connector);
6358 err_connector_alloc:
6359 kfree(intel_dig_port);
6365 void intel_dp_mst_suspend(struct drm_device *dev)
6367 struct drm_i915_private *dev_priv = dev->dev_private;
6371 for (i = 0; i < I915_MAX_PORTS; i++) {
6372 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6373 if (!intel_dig_port)
6376 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6377 if (!intel_dig_port->dp.can_mst)
6379 if (intel_dig_port->dp.is_mst)
6380 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6386 void intel_dp_mst_resume(struct drm_device *dev)
6388 struct drm_i915_private *dev_priv = dev->dev_private;
6391 for (i = 0; i < I915_MAX_PORTS; i++) {
6392 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6393 if (!intel_dig_port)
6395 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6399 if (!intel_dig_port->dp.can_mst)
6402 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6404 intel_dp_check_mst_status(&intel_dig_port->dp);