2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Keith Packard <keithp@keithp.com>
28 #include <linux/i2c.h>
29 #include <linux/export.h>
31 #include <linux/slab.h>
32 #include <drm/drm_atomic_helper.h>
33 #include <drm/drm_crtc.h>
34 #include <drm/drm_crtc_helper.h>
35 #include <drm/drm_edid.h>
36 #include "intel_drv.h"
37 #include <drm/i915_drm.h>
40 #define DP_LINK_CHECK_TIMEOUT (10 * 1000)
42 static int disable_aux_irq = 0;
43 TUNABLE_INT("drm.i915.disable_aux_irq", &disable_aux_irq);
45 /* Compliance test status bits */
46 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0
47 #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48 #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49 #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
56 static const struct dp_link_dpll gen4_dpll[] = {
58 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
60 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
63 static const struct dp_link_dpll pch_dpll[] = {
65 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
67 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
70 static const struct dp_link_dpll vlv_dpll[] = {
72 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
74 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
78 * CHV supports eDP 1.4 that have more link rates.
79 * Below only provides the fixed rate but exclude variable rate.
81 static const struct dp_link_dpll chv_dpll[] = {
83 * CHV requires to program fractional division for m2.
84 * m2 is stored in fixed point format using formula below
85 * (m2_int << 22) | m2_fraction
87 { DP_LINK_BW_1_62, /* m2_int = 32, m2_fraction = 1677722 */
88 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
89 { DP_LINK_BW_2_7, /* m2_int = 27, m2_fraction = 0 */
90 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
91 { DP_LINK_BW_5_4, /* m2_int = 27, m2_fraction = 0 */
92 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
95 static const int skl_rates[] = { 162000, 216000, 270000,
96 324000, 432000, 540000 };
97 static const int default_rates[] = { 162000, 270000, 540000 };
100 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
101 * @intel_dp: DP struct
103 * If a CPU or PCH DP output is attached to an eDP panel, this function
104 * will return true, and false otherwise.
106 static bool is_edp(struct intel_dp *intel_dp)
108 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
110 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
113 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
115 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
117 return intel_dig_port->base.base.dev;
120 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
122 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
125 static void intel_dp_link_down(struct intel_dp *intel_dp);
126 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
127 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
128 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
129 static void vlv_steal_power_sequencer(struct drm_device *dev,
130 enum i915_pipe pipe);
133 intel_dp_max_link_bw(struct intel_dp *intel_dp)
135 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
137 switch (max_link_bw) {
138 case DP_LINK_BW_1_62:
143 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
145 max_link_bw = DP_LINK_BW_1_62;
151 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
153 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
154 struct drm_device *dev = intel_dig_port->base.base.dev;
155 u8 source_max, sink_max;
158 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
159 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
162 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
164 return min(source_max, sink_max);
168 * The units on the numbers in the next two are... bizarre. Examples will
169 * make it clearer; this one parallels an example in the eDP spec.
171 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
173 * 270000 * 1 * 8 / 10 == 216000
175 * The actual data capacity of that configuration is 2.16Gbit/s, so the
176 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
177 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
178 * 119000. At 18bpp that's 2142000 kilobits per second.
180 * Thus the strange-looking division by 10 in intel_dp_link_required, to
181 * get the result in decakilobits instead of kilobits.
185 intel_dp_link_required(int pixel_clock, int bpp)
187 return (pixel_clock * bpp + 9) / 10;
191 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
193 return (max_link_clock * max_lanes * 8) / 10;
196 static enum drm_mode_status
197 intel_dp_mode_valid(struct drm_connector *connector,
198 struct drm_display_mode *mode)
200 struct intel_dp *intel_dp = intel_attached_dp(connector);
201 struct intel_connector *intel_connector = to_intel_connector(connector);
202 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
203 int target_clock = mode->clock;
204 int max_rate, mode_rate, max_lanes, max_link_clock;
206 if (is_edp(intel_dp) && fixed_mode) {
207 if (mode->hdisplay > fixed_mode->hdisplay)
210 if (mode->vdisplay > fixed_mode->vdisplay)
213 target_clock = fixed_mode->clock;
216 max_link_clock = intel_dp_max_link_rate(intel_dp);
217 max_lanes = intel_dp_max_lane_count(intel_dp);
219 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
220 mode_rate = intel_dp_link_required(target_clock, 18);
222 if (mode_rate > max_rate)
223 return MODE_CLOCK_HIGH;
225 if (mode->clock < 10000)
226 return MODE_CLOCK_LOW;
228 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
229 return MODE_H_ILLEGAL;
234 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
241 for (i = 0; i < src_bytes; i++)
242 v |= ((uint32_t) src[i]) << ((3-i) * 8);
246 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
251 for (i = 0; i < dst_bytes; i++)
252 dst[i] = src >> ((3-i) * 8);
255 /* hrawclock is 1/4 the FSB frequency */
257 intel_hrawclk(struct drm_device *dev)
259 struct drm_i915_private *dev_priv = dev->dev_private;
262 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
263 if (IS_VALLEYVIEW(dev))
266 clkcfg = I915_READ(CLKCFG);
267 switch (clkcfg & CLKCFG_FSB_MASK) {
276 case CLKCFG_FSB_1067:
278 case CLKCFG_FSB_1333:
280 /* these two are just a guess; one of them might be right */
281 case CLKCFG_FSB_1600:
282 case CLKCFG_FSB_1600_ALT:
290 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
291 struct intel_dp *intel_dp);
293 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
294 struct intel_dp *intel_dp);
296 static void pps_lock(struct intel_dp *intel_dp)
298 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
299 struct intel_encoder *encoder = &intel_dig_port->base;
300 struct drm_device *dev = encoder->base.dev;
301 struct drm_i915_private *dev_priv = dev->dev_private;
302 enum intel_display_power_domain power_domain;
305 * See vlv_power_sequencer_reset() why we need
306 * a power domain reference here.
308 power_domain = intel_display_port_power_domain(encoder);
309 intel_display_power_get(dev_priv, power_domain);
311 mutex_lock(&dev_priv->pps_mutex);
314 static void pps_unlock(struct intel_dp *intel_dp)
316 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
317 struct intel_encoder *encoder = &intel_dig_port->base;
318 struct drm_device *dev = encoder->base.dev;
319 struct drm_i915_private *dev_priv = dev->dev_private;
320 enum intel_display_power_domain power_domain;
322 mutex_unlock(&dev_priv->pps_mutex);
324 power_domain = intel_display_port_power_domain(encoder);
325 intel_display_power_put(dev_priv, power_domain);
329 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
331 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
332 struct drm_device *dev = intel_dig_port->base.base.dev;
333 struct drm_i915_private *dev_priv = dev->dev_private;
334 enum i915_pipe pipe = intel_dp->pps_pipe;
338 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
339 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
340 pipe_name(pipe), port_name(intel_dig_port->port)))
343 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
344 pipe_name(pipe), port_name(intel_dig_port->port));
346 /* Preserve the BIOS-computed detected bit. This is
347 * supposed to be read-only.
349 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
350 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
351 DP |= DP_PORT_WIDTH(1);
352 DP |= DP_LINK_TRAIN_PAT_1;
354 if (IS_CHERRYVIEW(dev))
355 DP |= DP_PIPE_SELECT_CHV(pipe);
356 else if (pipe == PIPE_B)
357 DP |= DP_PIPEB_SELECT;
359 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
362 * The DPLL for the pipe must be enabled for this to work.
363 * So enable temporarily it if it's not already enabled.
366 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
367 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
370 * Similar magic as in intel_dp_enable_port().
371 * We _must_ do this port enable + disable trick
372 * to make this power seqeuencer lock onto the port.
373 * Otherwise even VDD force bit won't work.
375 I915_WRITE(intel_dp->output_reg, DP);
376 POSTING_READ(intel_dp->output_reg);
378 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
379 POSTING_READ(intel_dp->output_reg);
381 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
382 POSTING_READ(intel_dp->output_reg);
385 vlv_force_pll_off(dev, pipe);
388 static enum i915_pipe
389 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
391 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
392 struct drm_device *dev = intel_dig_port->base.base.dev;
393 struct drm_i915_private *dev_priv = dev->dev_private;
394 struct intel_encoder *encoder;
395 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
398 lockdep_assert_held(&dev_priv->pps_mutex);
400 /* We should never land here with regular DP ports */
401 WARN_ON(!is_edp(intel_dp));
403 if (intel_dp->pps_pipe != INVALID_PIPE)
404 return intel_dp->pps_pipe;
407 * We don't have power sequencer currently.
408 * Pick one that's not used by other ports.
410 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
412 struct intel_dp *tmp;
414 if (encoder->type != INTEL_OUTPUT_EDP)
417 tmp = enc_to_intel_dp(&encoder->base);
419 if (tmp->pps_pipe != INVALID_PIPE)
420 pipes &= ~(1 << tmp->pps_pipe);
424 * Didn't find one. This should not happen since there
425 * are two power sequencers and up to two eDP ports.
427 if (WARN_ON(pipes == 0))
430 pipe = ffs(pipes) - 1;
432 vlv_steal_power_sequencer(dev, pipe);
433 intel_dp->pps_pipe = pipe;
435 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
436 pipe_name(intel_dp->pps_pipe),
437 port_name(intel_dig_port->port));
439 /* init power sequencer on this pipe and port */
440 intel_dp_init_panel_power_sequencer(dev, intel_dp);
441 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
444 * Even vdd force doesn't work until we've made
445 * the power sequencer lock in on the port.
447 vlv_power_sequencer_kick(intel_dp);
449 return intel_dp->pps_pipe;
452 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
453 enum i915_pipe pipe);
455 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
458 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
461 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
464 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
467 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
473 static enum i915_pipe
474 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
476 vlv_pipe_check pipe_check)
480 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
481 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
482 PANEL_PORT_SELECT_MASK;
484 if (port_sel != PANEL_PORT_SELECT_VLV(port))
487 if (!pipe_check(dev_priv, pipe))
497 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
499 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
500 struct drm_device *dev = intel_dig_port->base.base.dev;
501 struct drm_i915_private *dev_priv = dev->dev_private;
502 enum port port = intel_dig_port->port;
504 lockdep_assert_held(&dev_priv->pps_mutex);
506 /* try to find a pipe with this port selected */
507 /* first pick one where the panel is on */
508 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
510 /* didn't find one? pick one where vdd is on */
511 if (intel_dp->pps_pipe == INVALID_PIPE)
512 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
513 vlv_pipe_has_vdd_on);
514 /* didn't find one? pick one with just the correct port */
515 if (intel_dp->pps_pipe == INVALID_PIPE)
516 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
519 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
520 if (intel_dp->pps_pipe == INVALID_PIPE) {
521 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
526 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
527 port_name(port), pipe_name(intel_dp->pps_pipe));
529 intel_dp_init_panel_power_sequencer(dev, intel_dp);
530 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
533 void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
535 struct drm_device *dev = dev_priv->dev;
536 struct intel_encoder *encoder;
538 if (WARN_ON(!IS_VALLEYVIEW(dev)))
542 * We can't grab pps_mutex here due to deadlock with power_domain
543 * mutex when power_domain functions are called while holding pps_mutex.
544 * That also means that in order to use pps_pipe the code needs to
545 * hold both a power domain reference and pps_mutex, and the power domain
546 * reference get/put must be done while _not_ holding pps_mutex.
547 * pps_{lock,unlock}() do these steps in the correct order, so one
548 * should use them always.
551 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
552 struct intel_dp *intel_dp;
554 if (encoder->type != INTEL_OUTPUT_EDP)
557 intel_dp = enc_to_intel_dp(&encoder->base);
558 intel_dp->pps_pipe = INVALID_PIPE;
562 static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
564 struct drm_device *dev = intel_dp_to_dev(intel_dp);
566 if (HAS_PCH_SPLIT(dev))
567 return PCH_PP_CONTROL;
569 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
572 static u32 _pp_stat_reg(struct intel_dp *intel_dp)
574 struct drm_device *dev = intel_dp_to_dev(intel_dp);
576 if (HAS_PCH_SPLIT(dev))
577 return PCH_PP_STATUS;
579 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
582 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
583 This function only applicable when panel PM state is not to be tracked */
585 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
588 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
590 struct drm_device *dev = intel_dp_to_dev(intel_dp);
591 struct drm_i915_private *dev_priv = dev->dev_private;
593 u32 pp_ctrl_reg, pp_div_reg;
595 if (!is_edp(intel_dp) || code != SYS_RESTART)
600 if (IS_VALLEYVIEW(dev)) {
601 enum i915_pipe pipe = vlv_power_sequencer_pipe(intel_dp);
603 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
604 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
605 pp_div = I915_READ(pp_div_reg);
606 pp_div &= PP_REFERENCE_DIVIDER_MASK;
608 /* 0x1F write to PP_DIV_REG sets max cycle delay */
609 I915_WRITE(pp_div_reg, pp_div | 0x1F);
610 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
611 msleep(intel_dp->panel_power_cycle_delay);
614 pps_unlock(intel_dp);
620 static bool edp_have_panel_power(struct intel_dp *intel_dp)
622 struct drm_device *dev = intel_dp_to_dev(intel_dp);
623 struct drm_i915_private *dev_priv = dev->dev_private;
625 lockdep_assert_held(&dev_priv->pps_mutex);
627 if (IS_VALLEYVIEW(dev) &&
628 intel_dp->pps_pipe == INVALID_PIPE)
631 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
634 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
636 struct drm_device *dev = intel_dp_to_dev(intel_dp);
637 struct drm_i915_private *dev_priv = dev->dev_private;
639 lockdep_assert_held(&dev_priv->pps_mutex);
641 if (IS_VALLEYVIEW(dev) &&
642 intel_dp->pps_pipe == INVALID_PIPE)
645 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
649 intel_dp_check_edp(struct intel_dp *intel_dp)
651 struct drm_device *dev = intel_dp_to_dev(intel_dp);
652 struct drm_i915_private *dev_priv = dev->dev_private;
654 if (!is_edp(intel_dp))
657 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
658 WARN(1, "eDP powered off while attempting aux channel communication.\n");
659 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
660 I915_READ(_pp_stat_reg(intel_dp)),
661 I915_READ(_pp_ctrl_reg(intel_dp)));
666 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
668 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
669 struct drm_device *dev = intel_dig_port->base.base.dev;
670 struct drm_i915_private *dev_priv = dev->dev_private;
671 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
675 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
677 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
678 msecs_to_jiffies_timeout(10));
680 done = wait_for_atomic(C, 10) == 0;
682 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
689 static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
691 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
692 struct drm_device *dev = intel_dig_port->base.base.dev;
695 * The clock divider is based off the hrawclk, and would like to run at
696 * 2MHz. So, take the hrawclk value and divide by 2 and use that
698 return index ? 0 : intel_hrawclk(dev) / 2;
701 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
703 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
704 struct drm_device *dev = intel_dig_port->base.base.dev;
705 struct drm_i915_private *dev_priv = dev->dev_private;
710 if (intel_dig_port->port == PORT_A) {
711 return DIV_ROUND_UP(dev_priv->display.get_display_clock_speed(dev), 2000);
713 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
717 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
719 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
720 struct drm_device *dev = intel_dig_port->base.base.dev;
721 struct drm_i915_private *dev_priv = dev->dev_private;
723 if (intel_dig_port->port == PORT_A) {
726 return DIV_ROUND_CLOSEST(dev_priv->display.get_display_clock_speed(dev), 2000);
727 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
728 /* Workaround for non-ULT HSW */
735 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
739 static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
741 return index ? 0 : 100;
744 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
747 * SKL doesn't need us to program the AUX clock divider (Hardware will
748 * derive the clock from CDCLK automatically). We still implement the
749 * get_aux_clock_divider vfunc to plug-in into the existing code.
751 return index ? 0 : 1;
754 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
757 uint32_t aux_clock_divider)
759 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
760 struct drm_device *dev = intel_dig_port->base.base.dev;
761 uint32_t precharge, timeout;
768 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
769 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
771 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
773 return DP_AUX_CH_CTL_SEND_BUSY |
775 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
776 DP_AUX_CH_CTL_TIME_OUT_ERROR |
778 DP_AUX_CH_CTL_RECEIVE_ERROR |
779 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
780 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
781 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
784 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
789 return DP_AUX_CH_CTL_SEND_BUSY |
791 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
792 DP_AUX_CH_CTL_TIME_OUT_ERROR |
793 DP_AUX_CH_CTL_TIME_OUT_1600us |
794 DP_AUX_CH_CTL_RECEIVE_ERROR |
795 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
796 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
800 intel_dp_aux_ch(struct intel_dp *intel_dp,
801 const uint8_t *send, int send_bytes,
802 uint8_t *recv, int recv_size)
804 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
805 struct drm_device *dev = intel_dig_port->base.base.dev;
806 struct drm_i915_private *dev_priv = dev->dev_private;
807 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
808 uint32_t ch_data = ch_ctl + 4;
809 uint32_t aux_clock_divider;
810 int i, ret, recv_bytes;
813 bool has_aux_irq = HAS_AUX_IRQ(dev) && !disable_aux_irq;
819 * We will be called with VDD already enabled for dpcd/edid/oui reads.
820 * In such cases we want to leave VDD enabled and it's up to upper layers
821 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
824 vdd = edp_panel_vdd_on(intel_dp);
826 /* dp aux is extremely sensitive to irq latency, hence request the
827 * lowest possible wakeup latency and so prevent the cpu from going into
830 pm_qos_update_request(&dev_priv->pm_qos, 0);
832 intel_dp_check_edp(intel_dp);
834 intel_aux_display_runtime_get(dev_priv);
836 /* Try to wait for any previous AUX channel activity */
837 for (try = 0; try < 3; try++) {
838 status = I915_READ_NOTRACE(ch_ctl);
839 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
845 WARN(1, "dp_aux_ch not started status 0x%08x\n",
851 /* Only 5 data registers! */
852 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
857 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
858 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
863 /* Must try at least 3 times according to DP spec */
864 for (try = 0; try < 5; try++) {
865 /* Load the send data into the aux channel data registers */
866 for (i = 0; i < send_bytes; i += 4)
867 I915_WRITE(ch_data + i,
868 intel_dp_pack_aux(send + i,
871 /* Send the command and wait for it to complete */
872 I915_WRITE(ch_ctl, send_ctl);
874 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
876 /* Clear done status and any errors */
880 DP_AUX_CH_CTL_TIME_OUT_ERROR |
881 DP_AUX_CH_CTL_RECEIVE_ERROR);
883 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
886 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
887 * 400us delay required for errors and timeouts
888 * Timeout errors from the HW already meet this
889 * requirement so skip to next iteration
891 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
892 usleep_range(400, 500);
895 if (status & DP_AUX_CH_CTL_DONE)
900 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
901 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
907 /* Check for timeout or receive error.
908 * Timeouts occur when the sink is not connected
910 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
911 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
916 /* Timeouts occur when the device isn't connected, so they're
917 * "normal" -- don't fill the kernel log with these */
918 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
919 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
924 /* Unload any bytes sent back from the other side */
925 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
926 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
927 if (recv_bytes > recv_size)
928 recv_bytes = recv_size;
930 for (i = 0; i < recv_bytes; i += 4)
931 intel_dp_unpack_aux(I915_READ(ch_data + i),
932 recv + i, recv_bytes - i);
936 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
937 intel_aux_display_runtime_put(dev_priv);
940 edp_panel_vdd_off(intel_dp, false);
942 pps_unlock(intel_dp);
947 #define BARE_ADDRESS_SIZE 3
948 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
950 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
952 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
953 uint8_t txbuf[20], rxbuf[20];
954 size_t txsize, rxsize;
957 txbuf[0] = (msg->request << 4) |
958 ((msg->address >> 16) & 0xf);
959 txbuf[1] = (msg->address >> 8) & 0xff;
960 txbuf[2] = msg->address & 0xff;
961 txbuf[3] = msg->size - 1;
963 switch (msg->request & ~DP_AUX_I2C_MOT) {
964 case DP_AUX_NATIVE_WRITE:
965 case DP_AUX_I2C_WRITE:
966 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
967 rxsize = 2; /* 0 or 1 data bytes */
969 if (WARN_ON(txsize > 20))
972 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
974 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
976 msg->reply = rxbuf[0] >> 4;
979 /* Number of bytes written in a short write. */
980 ret = clamp_t(int, rxbuf[1], 0, msg->size);
982 /* Return payload size. */
988 case DP_AUX_NATIVE_READ:
989 case DP_AUX_I2C_READ:
990 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
991 rxsize = msg->size + 1;
993 if (WARN_ON(rxsize > 20))
996 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
998 msg->reply = rxbuf[0] >> 4;
1000 * Assume happy day, and copy the data. The caller is
1001 * expected to check msg->reply before touching it.
1003 * Return payload size.
1006 memcpy(msg->buffer, rxbuf + 1, ret);
1019 intel_dp_i2c_aux_ch(struct device *adapter, int mode,
1020 uint8_t write_byte, uint8_t *read_byte)
1022 struct i2c_algo_dp_aux_data *data = device_get_softc(adapter);
1023 struct intel_dp *intel_dp = data->priv;
1024 uint16_t address = data->address;
1032 intel_edp_panel_vdd_on(intel_dp);
1033 intel_dp_check_edp(intel_dp);
1034 /* Set up the command byte */
1035 if (mode & MODE_I2C_READ)
1036 msg[0] = DP_AUX_I2C_READ << 4;
1038 msg[0] = DP_AUX_I2C_WRITE << 4;
1040 if (!(mode & MODE_I2C_STOP))
1041 msg[0] |= DP_AUX_I2C_MOT << 4;
1043 msg[1] = address >> 8;
1047 case MODE_I2C_WRITE:
1049 msg[4] = write_byte;
1065 * DP1.2 sections 2.7.7.1.5.6.1 and 2.7.7.1.6.6.1: A DP Source device is
1066 * required to retry at least seven times upon receiving AUX_DEFER
1067 * before giving up the AUX transaction.
1069 for (retry = 0; retry < 7; retry++) {
1070 ret = intel_dp_aux_ch(intel_dp,
1072 reply, reply_bytes);
1074 DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
1078 switch ((reply[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK) {
1079 case DP_AUX_NATIVE_REPLY_ACK:
1080 /* I2C-over-AUX Reply field is only valid
1081 * when paired with AUX ACK.
1084 case DP_AUX_NATIVE_REPLY_NACK:
1085 DRM_DEBUG_KMS("aux_ch native nack\n");
1088 case DP_AUX_NATIVE_REPLY_DEFER:
1090 * For now, just give more slack to branch devices. We
1091 * could check the DPCD for I2C bit rate capabilities,
1092 * and if available, adjust the interval. We could also
1093 * be more careful with DP-to-Legacy adapters where a
1094 * long legacy cable may force very low I2C bit rates.
1096 if (intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
1097 DP_DWN_STRM_PORT_PRESENT)
1098 usleep_range(500, 600);
1100 usleep_range(300, 400);
1103 DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
1109 switch ((reply[0] >> 4) & DP_AUX_I2C_REPLY_MASK) {
1110 case DP_AUX_I2C_REPLY_ACK:
1111 if (mode == MODE_I2C_READ) {
1112 *read_byte = reply[1];
1114 ret = 0; /* reply_bytes - 1 */
1116 case DP_AUX_I2C_REPLY_NACK:
1117 DRM_DEBUG_KMS("aux_i2c nack\n");
1120 case DP_AUX_I2C_REPLY_DEFER:
1121 DRM_DEBUG_KMS("aux_i2c defer\n");
1125 DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
1131 DRM_ERROR("too many retries, giving up\n");
1139 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1141 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1142 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1143 enum port port = intel_dig_port->port;
1144 const char *name = NULL;
1149 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
1153 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
1157 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
1161 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
1169 * The AUX_CTL register is usually DP_CTL + 0x10.
1171 * On Haswell and Broadwell though:
1172 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1173 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1175 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1177 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
1178 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
1180 intel_dp->aux.name = name;
1181 intel_dp->aux.dev = dev->dev;
1182 intel_dp->aux.transfer = intel_dp_aux_transfer;
1184 DRM_DEBUG_KMS("i2c_init %s\n", name);
1185 ret = iic_dp_aux_add_bus(connector->base.dev->dev, name,
1186 intel_dp_i2c_aux_ch, intel_dp, &intel_dp->dp_iic_bus,
1187 &intel_dp->aux.ddc);
1188 WARN(ret, "intel_dp_i2c_init failed with error %d for port %c\n",
1189 ret, port_name(port));
1194 intel_dp_connector_unregister(struct intel_connector *intel_connector)
1196 intel_connector_unregister(intel_connector);
1201 intel_dp_i2c_init(struct intel_dp *intel_dp,
1202 struct intel_connector *intel_connector, const char *name)
1206 DRM_DEBUG_KMS("i2c_init %s\n", name);
1208 memset(&intel_dp->adapter, '\0', sizeof(intel_dp->adapter));
1209 intel_dp->adapter.owner = THIS_MODULE;
1210 intel_dp->adapter.class = I2C_CLASS_DDC;
1211 strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
1212 intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
1213 intel_dp->adapter.algo_data = &intel_dp->algo;
1214 intel_dp->adapter.dev.parent = intel_connector->base.dev->dev;
1216 ret = i2c_dp_aux_add_bus(&intel_dp->adapter);
1220 ret = sysfs_create_link(&intel_connector->base.kdev->kobj,
1221 &intel_dp->adapter.dev.kobj,
1222 intel_dp->adapter.dev.kobj.name);
1224 ret = iic_dp_aux_add_bus(intel_connector->base.dev->dev, name,
1225 intel_dp_i2c_aux_ch, intel_dp, &intel_dp->dp_iic_bus,
1226 &intel_dp->adapter);
1233 skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
1237 memset(&pipe_config->dpll_hw_state, 0,
1238 sizeof(pipe_config->dpll_hw_state));
1240 pipe_config->ddi_pll_sel = SKL_DPLL0;
1241 pipe_config->dpll_hw_state.cfgcr1 = 0;
1242 pipe_config->dpll_hw_state.cfgcr2 = 0;
1244 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
1245 switch (link_clock / 2) {
1247 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
1251 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
1255 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
1259 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
1262 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1263 results in CDCLK change. Need to handle the change of CDCLK by
1264 disabling pipes and re-enabling them */
1266 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
1270 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
1275 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1279 hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
1281 memset(&pipe_config->dpll_hw_state, 0,
1282 sizeof(pipe_config->dpll_hw_state));
1285 case DP_LINK_BW_1_62:
1286 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1288 case DP_LINK_BW_2_7:
1289 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1291 case DP_LINK_BW_5_4:
1292 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1298 intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1300 if (intel_dp->num_sink_rates) {
1301 *sink_rates = intel_dp->sink_rates;
1302 return intel_dp->num_sink_rates;
1305 *sink_rates = default_rates;
1307 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1310 static bool intel_dp_source_supports_hbr2(struct drm_device *dev)
1312 /* WaDisableHBR2:skl */
1313 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1316 if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1317 (INTEL_INFO(dev)->gen >= 9))
1324 intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
1326 if (IS_SKYLAKE(dev)) {
1327 *source_rates = skl_rates;
1328 return ARRAY_SIZE(skl_rates);
1331 *source_rates = default_rates;
1333 /* This depends on the fact that 5.4 is last value in the array */
1334 if (intel_dp_source_supports_hbr2(dev))
1335 return (DP_LINK_BW_5_4 >> 3) + 1;
1337 return (DP_LINK_BW_2_7 >> 3) + 1;
1341 intel_dp_set_clock(struct intel_encoder *encoder,
1342 struct intel_crtc_state *pipe_config, int link_bw)
1344 struct drm_device *dev = encoder->base.dev;
1345 const struct dp_link_dpll *divisor = NULL;
1349 divisor = gen4_dpll;
1350 count = ARRAY_SIZE(gen4_dpll);
1351 } else if (HAS_PCH_SPLIT(dev)) {
1353 count = ARRAY_SIZE(pch_dpll);
1354 } else if (IS_CHERRYVIEW(dev)) {
1356 count = ARRAY_SIZE(chv_dpll);
1357 } else if (IS_VALLEYVIEW(dev)) {
1359 count = ARRAY_SIZE(vlv_dpll);
1362 if (divisor && count) {
1363 for (i = 0; i < count; i++) {
1364 if (link_bw == divisor[i].link_bw) {
1365 pipe_config->dpll = divisor[i].dpll;
1366 pipe_config->clock_set = true;
1373 static int intersect_rates(const int *source_rates, int source_len,
1374 const int *sink_rates, int sink_len,
1377 int i = 0, j = 0, k = 0;
1379 while (i < source_len && j < sink_len) {
1380 if (source_rates[i] == sink_rates[j]) {
1381 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1383 common_rates[k] = source_rates[i];
1387 } else if (source_rates[i] < sink_rates[j]) {
1396 static int intel_dp_common_rates(struct intel_dp *intel_dp,
1399 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1400 const int *source_rates, *sink_rates;
1401 int source_len, sink_len;
1403 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1404 source_len = intel_dp_source_rates(dev, &source_rates);
1406 return intersect_rates(source_rates, source_len,
1407 sink_rates, sink_len,
1411 static void snprintf_int_array(char *str, size_t len,
1412 const int *array, int nelem)
1418 for (i = 0; i < nelem; i++) {
1419 int r = ksnprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1427 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1429 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1430 const int *source_rates, *sink_rates;
1431 int source_len, sink_len, common_len;
1432 int common_rates[DP_MAX_SUPPORTED_RATES];
1433 char str[128]; /* FIXME: too big for stack? */
1435 if ((drm_debug & DRM_UT_KMS) == 0)
1438 source_len = intel_dp_source_rates(dev, &source_rates);
1439 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1440 DRM_DEBUG_KMS("source rates: %s\n", str);
1442 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1443 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1444 DRM_DEBUG_KMS("sink rates: %s\n", str);
1446 common_len = intel_dp_common_rates(intel_dp, common_rates);
1447 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1448 DRM_DEBUG_KMS("common rates: %s\n", str);
1451 static int rate_to_index(int find, const int *rates)
1455 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1456 if (find == rates[i])
1463 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1465 int rates[DP_MAX_SUPPORTED_RATES] = {};
1468 len = intel_dp_common_rates(intel_dp, rates);
1469 if (WARN_ON(len <= 0))
1472 return rates[rate_to_index(0, rates) - 1];
1475 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1477 return rate_to_index(rate, intel_dp->sink_rates);
1481 intel_dp_compute_config(struct intel_encoder *encoder,
1482 struct intel_crtc_state *pipe_config)
1484 struct drm_device *dev = encoder->base.dev;
1485 struct drm_i915_private *dev_priv = dev->dev_private;
1486 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1487 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1488 enum port port = dp_to_dig_port(intel_dp)->port;
1489 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1490 struct intel_connector *intel_connector = intel_dp->attached_connector;
1491 int lane_count, clock;
1492 int min_lane_count = 1;
1493 int max_lane_count = intel_dp_max_lane_count(intel_dp);
1494 /* Conveniently, the link BW constants become indices with a shift...*/
1498 int link_avail, link_clock;
1499 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1502 common_len = intel_dp_common_rates(intel_dp, common_rates);
1504 /* No common link rates between source and sink */
1505 WARN_ON(common_len <= 0);
1507 max_clock = common_len - 1;
1509 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1510 pipe_config->has_pch_encoder = true;
1512 pipe_config->has_dp_encoder = true;
1513 pipe_config->has_drrs = false;
1514 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
1516 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1517 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1520 if (INTEL_INFO(dev)->gen >= 9) {
1522 ret = skl_update_scaler_users(intel_crtc, pipe_config, NULL, NULL, 0);
1527 if (!HAS_PCH_SPLIT(dev))
1528 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1529 intel_connector->panel.fitting_mode);
1531 intel_pch_panel_fitting(intel_crtc, pipe_config,
1532 intel_connector->panel.fitting_mode);
1535 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1538 DRM_DEBUG_KMS("DP link computation with max lane count %i "
1539 "max bw %d pixel clock %iKHz\n",
1540 max_lane_count, common_rates[max_clock],
1541 adjusted_mode->crtc_clock);
1543 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1544 * bpc in between. */
1545 bpp = pipe_config->pipe_bpp;
1546 if (is_edp(intel_dp)) {
1547 if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
1548 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1549 dev_priv->vbt.edp_bpp);
1550 bpp = dev_priv->vbt.edp_bpp;
1554 * Use the maximum clock and number of lanes the eDP panel
1555 * advertizes being capable of. The panels are generally
1556 * designed to support only a single clock and lane
1557 * configuration, and typically these values correspond to the
1558 * native resolution of the panel.
1560 min_lane_count = max_lane_count;
1561 min_clock = max_clock;
1564 for (; bpp >= 6*3; bpp -= 2*3) {
1565 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1568 for (clock = min_clock; clock <= max_clock; clock++) {
1569 for (lane_count = min_lane_count;
1570 lane_count <= max_lane_count;
1573 link_clock = common_rates[clock];
1574 link_avail = intel_dp_max_data_rate(link_clock,
1577 if (mode_rate <= link_avail) {
1587 if (intel_dp->color_range_auto) {
1590 * CEA-861-E - 5.1 Default Encoding Parameters
1591 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1593 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
1594 intel_dp->color_range = DP_COLOR_RANGE_16_235;
1596 intel_dp->color_range = 0;
1599 if (intel_dp->color_range)
1600 pipe_config->limited_color_range = true;
1602 intel_dp->lane_count = lane_count;
1604 if (intel_dp->num_sink_rates) {
1605 intel_dp->link_bw = 0;
1606 intel_dp->rate_select =
1607 intel_dp_rate_select(intel_dp, common_rates[clock]);
1610 drm_dp_link_rate_to_bw_code(common_rates[clock]);
1611 intel_dp->rate_select = 0;
1614 pipe_config->pipe_bpp = bpp;
1615 pipe_config->port_clock = common_rates[clock];
1617 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
1618 intel_dp->link_bw, intel_dp->lane_count,
1619 pipe_config->port_clock, bpp);
1620 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1621 mode_rate, link_avail);
1623 intel_link_compute_m_n(bpp, lane_count,
1624 adjusted_mode->crtc_clock,
1625 pipe_config->port_clock,
1626 &pipe_config->dp_m_n);
1628 if (intel_connector->panel.downclock_mode != NULL &&
1629 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1630 pipe_config->has_drrs = true;
1631 intel_link_compute_m_n(bpp, lane_count,
1632 intel_connector->panel.downclock_mode->clock,
1633 pipe_config->port_clock,
1634 &pipe_config->dp_m2_n2);
1637 if (IS_SKYLAKE(dev) && is_edp(intel_dp))
1638 skl_edp_set_pll_config(pipe_config, common_rates[clock]);
1639 else if (IS_BROXTON(dev))
1640 /* handled in ddi */;
1641 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1642 hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
1644 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
1649 static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
1651 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1652 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1653 struct drm_device *dev = crtc->base.dev;
1654 struct drm_i915_private *dev_priv = dev->dev_private;
1657 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1658 crtc->config->port_clock);
1659 dpa_ctl = I915_READ(DP_A);
1660 dpa_ctl &= ~DP_PLL_FREQ_MASK;
1662 if (crtc->config->port_clock == 162000) {
1663 /* For a long time we've carried around a ILK-DevA w/a for the
1664 * 160MHz clock. If we're really unlucky, it's still required.
1666 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
1667 dpa_ctl |= DP_PLL_FREQ_160MHZ;
1668 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
1670 dpa_ctl |= DP_PLL_FREQ_270MHZ;
1671 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
1674 I915_WRITE(DP_A, dpa_ctl);
1680 static void intel_dp_prepare(struct intel_encoder *encoder)
1682 struct drm_device *dev = encoder->base.dev;
1683 struct drm_i915_private *dev_priv = dev->dev_private;
1684 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1685 enum port port = dp_to_dig_port(intel_dp)->port;
1686 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1687 struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1690 * There are four kinds of DP registers:
1697 * IBX PCH and CPU are the same for almost everything,
1698 * except that the CPU DP PLL is configured in this
1701 * CPT PCH is quite different, having many bits moved
1702 * to the TRANS_DP_CTL register instead. That
1703 * configuration happens (oddly) in ironlake_pch_enable
1706 /* Preserve the BIOS-computed detected bit. This is
1707 * supposed to be read-only.
1709 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1711 /* Handle DP bits in common between all three register formats */
1712 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1713 intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
1715 if (crtc->config->has_audio)
1716 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
1718 /* Split out the IBX/CPU vs CPT settings */
1720 if (IS_GEN7(dev) && port == PORT_A) {
1721 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1722 intel_dp->DP |= DP_SYNC_HS_HIGH;
1723 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1724 intel_dp->DP |= DP_SYNC_VS_HIGH;
1725 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1727 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1728 intel_dp->DP |= DP_ENHANCED_FRAMING;
1730 intel_dp->DP |= crtc->pipe << 29;
1731 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
1734 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1736 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1737 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1738 trans_dp |= TRANS_DP_ENH_FRAMING;
1740 trans_dp &= ~TRANS_DP_ENH_FRAMING;
1741 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
1743 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
1744 intel_dp->DP |= intel_dp->color_range;
1746 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1747 intel_dp->DP |= DP_SYNC_HS_HIGH;
1748 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1749 intel_dp->DP |= DP_SYNC_VS_HIGH;
1750 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1752 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1753 intel_dp->DP |= DP_ENHANCED_FRAMING;
1755 if (IS_CHERRYVIEW(dev))
1756 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1757 else if (crtc->pipe == PIPE_B)
1758 intel_dp->DP |= DP_PIPEB_SELECT;
1762 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1763 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
1765 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1766 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
1768 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1769 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
1771 static void wait_panel_status(struct intel_dp *intel_dp,
1775 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1776 struct drm_i915_private *dev_priv = dev->dev_private;
1777 u32 pp_stat_reg, pp_ctrl_reg;
1779 lockdep_assert_held(&dev_priv->pps_mutex);
1781 pp_stat_reg = _pp_stat_reg(intel_dp);
1782 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1784 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1786 I915_READ(pp_stat_reg),
1787 I915_READ(pp_ctrl_reg));
1789 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1790 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1791 I915_READ(pp_stat_reg),
1792 I915_READ(pp_ctrl_reg));
1795 DRM_DEBUG_KMS("Wait complete\n");
1798 static void wait_panel_on(struct intel_dp *intel_dp)
1800 DRM_DEBUG_KMS("Wait for panel power on\n");
1801 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1804 static void wait_panel_off(struct intel_dp *intel_dp)
1806 DRM_DEBUG_KMS("Wait for panel power off time\n");
1807 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1810 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1812 DRM_DEBUG_KMS("Wait for panel power cycle\n");
1814 /* When we disable the VDD override bit last we have to do the manual
1816 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1817 intel_dp->panel_power_cycle_delay);
1819 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1822 static void wait_backlight_on(struct intel_dp *intel_dp)
1824 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1825 intel_dp->backlight_on_delay);
1828 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1830 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1831 intel_dp->backlight_off_delay);
1834 /* Read the current pp_control value, unlocking the register if it
1838 static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1840 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1841 struct drm_i915_private *dev_priv = dev->dev_private;
1844 lockdep_assert_held(&dev_priv->pps_mutex);
1846 control = I915_READ(_pp_ctrl_reg(intel_dp));
1847 control &= ~PANEL_UNLOCK_MASK;
1848 control |= PANEL_UNLOCK_REGS;
1853 * Must be paired with edp_panel_vdd_off().
1854 * Must hold pps_mutex around the whole on/off sequence.
1855 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1857 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1859 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1860 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1861 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1862 struct drm_i915_private *dev_priv = dev->dev_private;
1863 enum intel_display_power_domain power_domain;
1865 u32 pp_stat_reg, pp_ctrl_reg;
1866 bool need_to_disable = !intel_dp->want_panel_vdd;
1868 lockdep_assert_held(&dev_priv->pps_mutex);
1870 if (!is_edp(intel_dp))
1873 cancel_delayed_work(&intel_dp->panel_vdd_work);
1874 intel_dp->want_panel_vdd = true;
1876 if (edp_have_panel_vdd(intel_dp))
1877 return need_to_disable;
1879 power_domain = intel_display_port_power_domain(intel_encoder);
1880 intel_display_power_get(dev_priv, power_domain);
1882 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1883 port_name(intel_dig_port->port));
1885 if (!edp_have_panel_power(intel_dp))
1886 wait_panel_power_cycle(intel_dp);
1888 pp = ironlake_get_pp_control(intel_dp);
1889 pp |= EDP_FORCE_VDD;
1891 pp_stat_reg = _pp_stat_reg(intel_dp);
1892 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1894 I915_WRITE(pp_ctrl_reg, pp);
1895 POSTING_READ(pp_ctrl_reg);
1896 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1897 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1899 * If the panel wasn't on, delay before accessing aux channel
1901 if (!edp_have_panel_power(intel_dp)) {
1902 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1903 port_name(intel_dig_port->port));
1904 msleep(intel_dp->panel_power_up_delay);
1907 return need_to_disable;
1911 * Must be paired with intel_edp_panel_vdd_off() or
1912 * intel_edp_panel_off().
1913 * Nested calls to these functions are not allowed since
1914 * we drop the lock. Caller must use some higher level
1915 * locking to prevent nested calls from other threads.
1917 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1921 if (!is_edp(intel_dp))
1925 vdd = edp_panel_vdd_on(intel_dp);
1926 pps_unlock(intel_dp);
1928 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
1929 port_name(dp_to_dig_port(intel_dp)->port));
1932 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1934 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1935 struct drm_i915_private *dev_priv = dev->dev_private;
1936 struct intel_digital_port *intel_dig_port =
1937 dp_to_dig_port(intel_dp);
1938 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1939 enum intel_display_power_domain power_domain;
1941 u32 pp_stat_reg, pp_ctrl_reg;
1943 lockdep_assert_held(&dev_priv->pps_mutex);
1945 WARN_ON(intel_dp->want_panel_vdd);
1947 if (!edp_have_panel_vdd(intel_dp))
1950 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1951 port_name(intel_dig_port->port));
1953 pp = ironlake_get_pp_control(intel_dp);
1954 pp &= ~EDP_FORCE_VDD;
1956 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1957 pp_stat_reg = _pp_stat_reg(intel_dp);
1959 I915_WRITE(pp_ctrl_reg, pp);
1960 POSTING_READ(pp_ctrl_reg);
1962 /* Make sure sequencer is idle before allowing subsequent activity */
1963 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1964 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1966 if ((pp & POWER_TARGET_ON) == 0)
1967 intel_dp->last_power_cycle = jiffies;
1969 power_domain = intel_display_port_power_domain(intel_encoder);
1970 intel_display_power_put(dev_priv, power_domain);
1973 static void edp_panel_vdd_work(struct work_struct *__work)
1975 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1976 struct intel_dp, panel_vdd_work);
1979 if (!intel_dp->want_panel_vdd)
1980 edp_panel_vdd_off_sync(intel_dp);
1981 pps_unlock(intel_dp);
1984 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1986 unsigned long delay;
1989 * Queue the timer to fire a long time from now (relative to the power
1990 * down delay) to keep the panel power up across a sequence of
1993 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1994 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1998 * Must be paired with edp_panel_vdd_on().
1999 * Must hold pps_mutex around the whole on/off sequence.
2000 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2002 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
2004 struct drm_i915_private *dev_priv =
2005 intel_dp_to_dev(intel_dp)->dev_private;
2007 lockdep_assert_held(&dev_priv->pps_mutex);
2009 if (!is_edp(intel_dp))
2012 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
2013 port_name(dp_to_dig_port(intel_dp)->port));
2015 intel_dp->want_panel_vdd = false;
2018 edp_panel_vdd_off_sync(intel_dp);
2020 edp_panel_vdd_schedule_off(intel_dp);
2023 static void edp_panel_on(struct intel_dp *intel_dp)
2025 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2026 struct drm_i915_private *dev_priv = dev->dev_private;
2030 lockdep_assert_held(&dev_priv->pps_mutex);
2032 if (!is_edp(intel_dp))
2035 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
2036 port_name(dp_to_dig_port(intel_dp)->port));
2038 if (WARN(edp_have_panel_power(intel_dp),
2039 "eDP port %c panel power already on\n",
2040 port_name(dp_to_dig_port(intel_dp)->port)))
2043 wait_panel_power_cycle(intel_dp);
2045 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2046 pp = ironlake_get_pp_control(intel_dp);
2048 /* ILK workaround: disable reset around power sequence */
2049 pp &= ~PANEL_POWER_RESET;
2050 I915_WRITE(pp_ctrl_reg, pp);
2051 POSTING_READ(pp_ctrl_reg);
2054 pp |= POWER_TARGET_ON;
2056 pp |= PANEL_POWER_RESET;
2058 I915_WRITE(pp_ctrl_reg, pp);
2059 POSTING_READ(pp_ctrl_reg);
2061 wait_panel_on(intel_dp);
2062 intel_dp->last_power_on = jiffies;
2065 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
2066 I915_WRITE(pp_ctrl_reg, pp);
2067 POSTING_READ(pp_ctrl_reg);
2071 void intel_edp_panel_on(struct intel_dp *intel_dp)
2073 if (!is_edp(intel_dp))
2077 edp_panel_on(intel_dp);
2078 pps_unlock(intel_dp);
2082 static void edp_panel_off(struct intel_dp *intel_dp)
2084 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2085 struct intel_encoder *intel_encoder = &intel_dig_port->base;
2086 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2087 struct drm_i915_private *dev_priv = dev->dev_private;
2088 enum intel_display_power_domain power_domain;
2092 lockdep_assert_held(&dev_priv->pps_mutex);
2094 if (!is_edp(intel_dp))
2097 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2098 port_name(dp_to_dig_port(intel_dp)->port));
2100 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2101 port_name(dp_to_dig_port(intel_dp)->port));
2103 pp = ironlake_get_pp_control(intel_dp);
2104 /* We need to switch off panel power _and_ force vdd, for otherwise some
2105 * panels get very unhappy and cease to work. */
2106 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2109 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2111 intel_dp->want_panel_vdd = false;
2113 I915_WRITE(pp_ctrl_reg, pp);
2114 POSTING_READ(pp_ctrl_reg);
2116 intel_dp->last_power_cycle = jiffies;
2117 wait_panel_off(intel_dp);
2119 /* We got a reference when we enabled the VDD. */
2120 power_domain = intel_display_port_power_domain(intel_encoder);
2121 intel_display_power_put(dev_priv, power_domain);
2124 void intel_edp_panel_off(struct intel_dp *intel_dp)
2126 if (!is_edp(intel_dp))
2130 edp_panel_off(intel_dp);
2131 pps_unlock(intel_dp);
2134 /* Enable backlight in the panel power control. */
2135 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2137 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2138 struct drm_device *dev = intel_dig_port->base.base.dev;
2139 struct drm_i915_private *dev_priv = dev->dev_private;
2144 * If we enable the backlight right away following a panel power
2145 * on, we may see slight flicker as the panel syncs with the eDP
2146 * link. So delay a bit to make sure the image is solid before
2147 * allowing it to appear.
2149 wait_backlight_on(intel_dp);
2153 pp = ironlake_get_pp_control(intel_dp);
2154 pp |= EDP_BLC_ENABLE;
2156 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2158 I915_WRITE(pp_ctrl_reg, pp);
2159 POSTING_READ(pp_ctrl_reg);
2161 pps_unlock(intel_dp);
2164 /* Enable backlight PWM and backlight PP control. */
2165 void intel_edp_backlight_on(struct intel_dp *intel_dp)
2167 if (!is_edp(intel_dp))
2170 DRM_DEBUG_KMS("\n");
2172 intel_panel_enable_backlight(intel_dp->attached_connector);
2173 _intel_edp_backlight_on(intel_dp);
2176 /* Disable backlight in the panel power control. */
2177 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2179 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2180 struct drm_i915_private *dev_priv = dev->dev_private;
2184 if (!is_edp(intel_dp))
2189 pp = ironlake_get_pp_control(intel_dp);
2190 pp &= ~EDP_BLC_ENABLE;
2192 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2194 I915_WRITE(pp_ctrl_reg, pp);
2195 POSTING_READ(pp_ctrl_reg);
2197 pps_unlock(intel_dp);
2199 intel_dp->last_backlight_off = jiffies;
2200 edp_wait_backlight_off(intel_dp);
2203 /* Disable backlight PP control and backlight PWM. */
2204 void intel_edp_backlight_off(struct intel_dp *intel_dp)
2206 if (!is_edp(intel_dp))
2209 DRM_DEBUG_KMS("\n");
2211 _intel_edp_backlight_off(intel_dp);
2212 intel_panel_disable_backlight(intel_dp->attached_connector);
2216 * Hook for controlling the panel power control backlight through the bl_power
2217 * sysfs attribute. Take care to handle multiple calls.
2219 static void intel_edp_backlight_power(struct intel_connector *connector,
2222 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2226 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2227 pps_unlock(intel_dp);
2229 if (is_enabled == enable)
2232 DRM_DEBUG_KMS("panel power control backlight %s\n",
2233 enable ? "enable" : "disable");
2236 _intel_edp_backlight_on(intel_dp);
2238 _intel_edp_backlight_off(intel_dp);
2241 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2243 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2244 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2245 struct drm_device *dev = crtc->dev;
2246 struct drm_i915_private *dev_priv = dev->dev_private;
2249 assert_pipe_disabled(dev_priv,
2250 to_intel_crtc(crtc)->pipe);
2252 DRM_DEBUG_KMS("\n");
2253 dpa_ctl = I915_READ(DP_A);
2254 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2255 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2257 /* We don't adjust intel_dp->DP while tearing down the link, to
2258 * facilitate link retraining (e.g. after hotplug). Hence clear all
2259 * enable bits here to ensure that we don't enable too much. */
2260 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2261 intel_dp->DP |= DP_PLL_ENABLE;
2262 I915_WRITE(DP_A, intel_dp->DP);
2267 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2269 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2270 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2271 struct drm_device *dev = crtc->dev;
2272 struct drm_i915_private *dev_priv = dev->dev_private;
2275 assert_pipe_disabled(dev_priv,
2276 to_intel_crtc(crtc)->pipe);
2278 dpa_ctl = I915_READ(DP_A);
2279 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2280 "dp pll off, should be on\n");
2281 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2283 /* We can't rely on the value tracked for the DP register in
2284 * intel_dp->DP because link_down must not change that (otherwise link
2285 * re-training will fail. */
2286 dpa_ctl &= ~DP_PLL_ENABLE;
2287 I915_WRITE(DP_A, dpa_ctl);
2292 /* If the sink supports it, try to set the power state appropriately */
2293 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2297 /* Should have a valid DPCD by this point */
2298 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2301 if (mode != DRM_MODE_DPMS_ON) {
2302 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2306 * When turning on, we need to retry for 1ms to give the sink
2309 for (i = 0; i < 3; i++) {
2310 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2319 DRM_DEBUG_KMS("failed to %s sink power state\n",
2320 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2323 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2324 enum i915_pipe *pipe)
2326 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2327 enum port port = dp_to_dig_port(intel_dp)->port;
2328 struct drm_device *dev = encoder->base.dev;
2329 struct drm_i915_private *dev_priv = dev->dev_private;
2330 enum intel_display_power_domain power_domain;
2333 power_domain = intel_display_port_power_domain(encoder);
2334 if (!intel_display_power_is_enabled(dev_priv, power_domain))
2337 tmp = I915_READ(intel_dp->output_reg);
2339 if (!(tmp & DP_PORT_EN))
2342 if (IS_GEN7(dev) && port == PORT_A) {
2343 *pipe = PORT_TO_PIPE_CPT(tmp);
2344 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
2347 for_each_pipe(dev_priv, p) {
2348 u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2349 if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2355 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2356 intel_dp->output_reg);
2357 } else if (IS_CHERRYVIEW(dev)) {
2358 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2360 *pipe = PORT_TO_PIPE(tmp);
2366 static void intel_dp_get_config(struct intel_encoder *encoder,
2367 struct intel_crtc_state *pipe_config)
2369 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2371 struct drm_device *dev = encoder->base.dev;
2372 struct drm_i915_private *dev_priv = dev->dev_private;
2373 enum port port = dp_to_dig_port(intel_dp)->port;
2374 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2377 tmp = I915_READ(intel_dp->output_reg);
2379 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2381 if (HAS_PCH_CPT(dev) && port != PORT_A) {
2382 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2383 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2384 flags |= DRM_MODE_FLAG_PHSYNC;
2386 flags |= DRM_MODE_FLAG_NHSYNC;
2388 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2389 flags |= DRM_MODE_FLAG_PVSYNC;
2391 flags |= DRM_MODE_FLAG_NVSYNC;
2393 if (tmp & DP_SYNC_HS_HIGH)
2394 flags |= DRM_MODE_FLAG_PHSYNC;
2396 flags |= DRM_MODE_FLAG_NHSYNC;
2398 if (tmp & DP_SYNC_VS_HIGH)
2399 flags |= DRM_MODE_FLAG_PVSYNC;
2401 flags |= DRM_MODE_FLAG_NVSYNC;
2404 pipe_config->base.adjusted_mode.flags |= flags;
2406 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2407 tmp & DP_COLOR_RANGE_16_235)
2408 pipe_config->limited_color_range = true;
2410 pipe_config->has_dp_encoder = true;
2412 intel_dp_get_m_n(crtc, pipe_config);
2414 if (port == PORT_A) {
2415 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2416 pipe_config->port_clock = 162000;
2418 pipe_config->port_clock = 270000;
2421 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2422 &pipe_config->dp_m_n);
2424 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2425 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2427 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
2429 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2430 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2432 * This is a big fat ugly hack.
2434 * Some machines in UEFI boot mode provide us a VBT that has 18
2435 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2436 * unknown we fail to light up. Yet the same BIOS boots up with
2437 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2438 * max, not what it tells us to use.
2440 * Note: This will still be broken if the eDP panel is not lit
2441 * up by the BIOS, and thus we can't get the mode at module
2444 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2445 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2446 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2450 static void intel_disable_dp(struct intel_encoder *encoder)
2452 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2453 struct drm_device *dev = encoder->base.dev;
2454 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2456 if (crtc->config->has_audio)
2457 intel_audio_codec_disable(encoder);
2459 if (HAS_PSR(dev) && !HAS_DDI(dev))
2460 intel_psr_disable(intel_dp);
2462 /* Make sure the panel is off before trying to change the mode. But also
2463 * ensure that we have vdd while we switch off the panel. */
2464 intel_edp_panel_vdd_on(intel_dp);
2465 intel_edp_backlight_off(intel_dp);
2466 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2467 intel_edp_panel_off(intel_dp);
2469 /* disable the port before the pipe on g4x */
2470 if (INTEL_INFO(dev)->gen < 5)
2471 intel_dp_link_down(intel_dp);
2474 static void ilk_post_disable_dp(struct intel_encoder *encoder)
2476 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2477 enum port port = dp_to_dig_port(intel_dp)->port;
2479 intel_dp_link_down(intel_dp);
2481 ironlake_edp_pll_off(intel_dp);
2484 static void vlv_post_disable_dp(struct intel_encoder *encoder)
2486 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2488 intel_dp_link_down(intel_dp);
2491 static void chv_post_disable_dp(struct intel_encoder *encoder)
2493 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2494 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2495 struct drm_device *dev = encoder->base.dev;
2496 struct drm_i915_private *dev_priv = dev->dev_private;
2497 struct intel_crtc *intel_crtc =
2498 to_intel_crtc(encoder->base.crtc);
2499 enum dpio_channel ch = vlv_dport_to_channel(dport);
2500 enum i915_pipe pipe = intel_crtc->pipe;
2503 intel_dp_link_down(intel_dp);
2505 mutex_lock(&dev_priv->sb_lock);
2507 /* Propagate soft reset to data lane reset */
2508 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2509 val |= CHV_PCS_REQ_SOFTRESET_EN;
2510 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2512 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2513 val |= CHV_PCS_REQ_SOFTRESET_EN;
2514 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2516 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2517 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2518 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2520 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2521 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2522 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2524 mutex_unlock(&dev_priv->sb_lock);
2528 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2530 uint8_t dp_train_pat)
2532 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2533 struct drm_device *dev = intel_dig_port->base.base.dev;
2534 struct drm_i915_private *dev_priv = dev->dev_private;
2535 enum port port = intel_dig_port->port;
2538 uint32_t temp = I915_READ(DP_TP_CTL(port));
2540 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2541 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2543 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2545 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2546 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2547 case DP_TRAINING_PATTERN_DISABLE:
2548 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2551 case DP_TRAINING_PATTERN_1:
2552 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2554 case DP_TRAINING_PATTERN_2:
2555 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2557 case DP_TRAINING_PATTERN_3:
2558 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2561 I915_WRITE(DP_TP_CTL(port), temp);
2563 } else if ((IS_GEN7(dev) && port == PORT_A) ||
2564 (HAS_PCH_CPT(dev) && port != PORT_A)) {
2565 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2567 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2568 case DP_TRAINING_PATTERN_DISABLE:
2569 *DP |= DP_LINK_TRAIN_OFF_CPT;
2571 case DP_TRAINING_PATTERN_1:
2572 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2574 case DP_TRAINING_PATTERN_2:
2575 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2577 case DP_TRAINING_PATTERN_3:
2578 DRM_ERROR("DP training pattern 3 not supported\n");
2579 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2584 if (IS_CHERRYVIEW(dev))
2585 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2587 *DP &= ~DP_LINK_TRAIN_MASK;
2589 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2590 case DP_TRAINING_PATTERN_DISABLE:
2591 *DP |= DP_LINK_TRAIN_OFF;
2593 case DP_TRAINING_PATTERN_1:
2594 *DP |= DP_LINK_TRAIN_PAT_1;
2596 case DP_TRAINING_PATTERN_2:
2597 *DP |= DP_LINK_TRAIN_PAT_2;
2599 case DP_TRAINING_PATTERN_3:
2600 if (IS_CHERRYVIEW(dev)) {
2601 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2603 DRM_ERROR("DP training pattern 3 not supported\n");
2604 *DP |= DP_LINK_TRAIN_PAT_2;
2611 static void intel_dp_enable_port(struct intel_dp *intel_dp)
2613 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2614 struct drm_i915_private *dev_priv = dev->dev_private;
2616 /* enable with pattern 1 (as per spec) */
2617 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2618 DP_TRAINING_PATTERN_1);
2620 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2621 POSTING_READ(intel_dp->output_reg);
2624 * Magic for VLV/CHV. We _must_ first set up the register
2625 * without actually enabling the port, and then do another
2626 * write to enable the port. Otherwise link training will
2627 * fail when the power sequencer is freshly used for this port.
2629 intel_dp->DP |= DP_PORT_EN;
2631 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2632 POSTING_READ(intel_dp->output_reg);
2635 static void intel_enable_dp(struct intel_encoder *encoder)
2637 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2638 struct drm_device *dev = encoder->base.dev;
2639 struct drm_i915_private *dev_priv = dev->dev_private;
2640 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2641 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2642 unsigned int lane_mask = 0x0;
2644 if (WARN_ON(dp_reg & DP_PORT_EN))
2649 if (IS_VALLEYVIEW(dev))
2650 vlv_init_panel_power_sequencer(intel_dp);
2652 intel_dp_enable_port(intel_dp);
2654 edp_panel_vdd_on(intel_dp);
2655 edp_panel_on(intel_dp);
2656 edp_panel_vdd_off(intel_dp, true);
2658 pps_unlock(intel_dp);
2660 if (IS_VALLEYVIEW(dev))
2661 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2664 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2665 intel_dp_start_link_train(intel_dp);
2666 intel_dp_complete_link_train(intel_dp);
2667 intel_dp_stop_link_train(intel_dp);
2669 if (crtc->config->has_audio) {
2670 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2671 pipe_name(crtc->pipe));
2672 intel_audio_codec_enable(encoder);
2676 static void g4x_enable_dp(struct intel_encoder *encoder)
2678 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2680 intel_enable_dp(encoder);
2681 intel_edp_backlight_on(intel_dp);
2684 static void vlv_enable_dp(struct intel_encoder *encoder)
2686 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2688 intel_edp_backlight_on(intel_dp);
2689 intel_psr_enable(intel_dp);
2692 static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2694 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2695 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2697 intel_dp_prepare(encoder);
2699 /* Only ilk+ has port A */
2700 if (dport->port == PORT_A) {
2701 ironlake_set_pll_cpu_edp(intel_dp);
2702 ironlake_edp_pll_on(intel_dp);
2706 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2708 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2709 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2710 enum i915_pipe pipe = intel_dp->pps_pipe;
2711 int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2713 edp_panel_vdd_off_sync(intel_dp);
2716 * VLV seems to get confused when multiple power seqeuencers
2717 * have the same port selected (even if only one has power/vdd
2718 * enabled). The failure manifests as vlv_wait_port_ready() failing
2719 * CHV on the other hand doesn't seem to mind having the same port
2720 * selected in multiple power seqeuencers, but let's clear the
2721 * port select always when logically disconnecting a power sequencer
2724 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2725 pipe_name(pipe), port_name(intel_dig_port->port));
2726 I915_WRITE(pp_on_reg, 0);
2727 POSTING_READ(pp_on_reg);
2729 intel_dp->pps_pipe = INVALID_PIPE;
2732 static void vlv_steal_power_sequencer(struct drm_device *dev,
2733 enum i915_pipe pipe)
2735 struct drm_i915_private *dev_priv = dev->dev_private;
2736 struct intel_encoder *encoder;
2738 lockdep_assert_held(&dev_priv->pps_mutex);
2740 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2743 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2745 struct intel_dp *intel_dp;
2748 if (encoder->type != INTEL_OUTPUT_EDP)
2751 intel_dp = enc_to_intel_dp(&encoder->base);
2752 port = dp_to_dig_port(intel_dp)->port;
2754 if (intel_dp->pps_pipe != pipe)
2757 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2758 pipe_name(pipe), port_name(port));
2760 WARN(encoder->connectors_active,
2761 "stealing pipe %c power sequencer from active eDP port %c\n",
2762 pipe_name(pipe), port_name(port));
2764 /* make sure vdd is off before we steal it */
2765 vlv_detach_power_sequencer(intel_dp);
2769 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2771 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2772 struct intel_encoder *encoder = &intel_dig_port->base;
2773 struct drm_device *dev = encoder->base.dev;
2774 struct drm_i915_private *dev_priv = dev->dev_private;
2775 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2777 lockdep_assert_held(&dev_priv->pps_mutex);
2779 if (!is_edp(intel_dp))
2782 if (intel_dp->pps_pipe == crtc->pipe)
2786 * If another power sequencer was being used on this
2787 * port previously make sure to turn off vdd there while
2788 * we still have control of it.
2790 if (intel_dp->pps_pipe != INVALID_PIPE)
2791 vlv_detach_power_sequencer(intel_dp);
2794 * We may be stealing the power
2795 * sequencer from another port.
2797 vlv_steal_power_sequencer(dev, crtc->pipe);
2799 /* now it's all ours */
2800 intel_dp->pps_pipe = crtc->pipe;
2802 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2803 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2805 /* init power sequencer on this pipe and port */
2806 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2807 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2810 static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2812 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2813 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2814 struct drm_device *dev = encoder->base.dev;
2815 struct drm_i915_private *dev_priv = dev->dev_private;
2816 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2817 enum dpio_channel port = vlv_dport_to_channel(dport);
2818 int pipe = intel_crtc->pipe;
2821 mutex_lock(&dev_priv->sb_lock);
2823 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2830 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2831 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2832 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2834 mutex_unlock(&dev_priv->sb_lock);
2836 intel_enable_dp(encoder);
2839 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2841 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2842 struct drm_device *dev = encoder->base.dev;
2843 struct drm_i915_private *dev_priv = dev->dev_private;
2844 struct intel_crtc *intel_crtc =
2845 to_intel_crtc(encoder->base.crtc);
2846 enum dpio_channel port = vlv_dport_to_channel(dport);
2847 int pipe = intel_crtc->pipe;
2849 intel_dp_prepare(encoder);
2851 /* Program Tx lane resets to default */
2852 mutex_lock(&dev_priv->sb_lock);
2853 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2854 DPIO_PCS_TX_LANE2_RESET |
2855 DPIO_PCS_TX_LANE1_RESET);
2856 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2857 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2858 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2859 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2860 DPIO_PCS_CLK_SOFT_RESET);
2862 /* Fix up inter-pair skew failure */
2863 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2864 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2865 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2866 mutex_unlock(&dev_priv->sb_lock);
2869 static void chv_pre_enable_dp(struct intel_encoder *encoder)
2871 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2872 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2873 struct drm_device *dev = encoder->base.dev;
2874 struct drm_i915_private *dev_priv = dev->dev_private;
2875 struct intel_crtc *intel_crtc =
2876 to_intel_crtc(encoder->base.crtc);
2877 enum dpio_channel ch = vlv_dport_to_channel(dport);
2878 int pipe = intel_crtc->pipe;
2879 int data, i, stagger;
2882 mutex_lock(&dev_priv->sb_lock);
2884 /* allow hardware to manage TX FIFO reset source */
2885 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2886 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2887 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2889 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2890 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2891 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2893 /* Deassert soft data lane reset*/
2894 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2895 val |= CHV_PCS_REQ_SOFTRESET_EN;
2896 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2898 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2899 val |= CHV_PCS_REQ_SOFTRESET_EN;
2900 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2902 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2903 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2904 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2906 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2907 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2908 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2910 /* Program Tx lane latency optimal setting*/
2911 for (i = 0; i < 4; i++) {
2912 /* Set the upar bit */
2913 data = (i == 1) ? 0x0 : 0x1;
2914 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2915 data << DPIO_UPAR_SHIFT);
2918 /* Data lane stagger programming */
2919 if (intel_crtc->config->port_clock > 270000)
2921 else if (intel_crtc->config->port_clock > 135000)
2923 else if (intel_crtc->config->port_clock > 67500)
2925 else if (intel_crtc->config->port_clock > 33750)
2930 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2931 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2932 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2934 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2935 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2936 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2938 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
2939 DPIO_LANESTAGGER_STRAP(stagger) |
2940 DPIO_LANESTAGGER_STRAP_OVRD |
2941 DPIO_TX1_STAGGER_MASK(0x1f) |
2942 DPIO_TX1_STAGGER_MULT(6) |
2943 DPIO_TX2_STAGGER_MULT(0));
2945 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
2946 DPIO_LANESTAGGER_STRAP(stagger) |
2947 DPIO_LANESTAGGER_STRAP_OVRD |
2948 DPIO_TX1_STAGGER_MASK(0x1f) |
2949 DPIO_TX1_STAGGER_MULT(7) |
2950 DPIO_TX2_STAGGER_MULT(5));
2952 mutex_unlock(&dev_priv->sb_lock);
2954 intel_enable_dp(encoder);
2957 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2959 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2960 struct drm_device *dev = encoder->base.dev;
2961 struct drm_i915_private *dev_priv = dev->dev_private;
2962 struct intel_crtc *intel_crtc =
2963 to_intel_crtc(encoder->base.crtc);
2964 enum dpio_channel ch = vlv_dport_to_channel(dport);
2965 enum i915_pipe pipe = intel_crtc->pipe;
2968 intel_dp_prepare(encoder);
2970 mutex_lock(&dev_priv->sb_lock);
2972 /* program left/right clock distribution */
2973 if (pipe != PIPE_B) {
2974 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2975 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2977 val |= CHV_BUFLEFTENA1_FORCE;
2979 val |= CHV_BUFRIGHTENA1_FORCE;
2980 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2982 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2983 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2985 val |= CHV_BUFLEFTENA2_FORCE;
2987 val |= CHV_BUFRIGHTENA2_FORCE;
2988 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2991 /* program clock channel usage */
2992 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2993 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2995 val &= ~CHV_PCS_USEDCLKCHANNEL;
2997 val |= CHV_PCS_USEDCLKCHANNEL;
2998 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
3000 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
3001 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3003 val &= ~CHV_PCS_USEDCLKCHANNEL;
3005 val |= CHV_PCS_USEDCLKCHANNEL;
3006 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
3009 * This a a bit weird since generally CL
3010 * matches the pipe, but here we need to
3011 * pick the CL based on the port.
3013 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
3015 val &= ~CHV_CMN_USEDCLKCHANNEL;
3017 val |= CHV_CMN_USEDCLKCHANNEL;
3018 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
3020 mutex_unlock(&dev_priv->sb_lock);
3024 * Native read with retry for link status and receiver capability reads for
3025 * cases where the sink may still be asleep.
3027 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
3028 * supposed to retry 3 times per the spec.
3031 intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
3032 void *buffer, size_t size)
3038 * Sometime we just get the same incorrect byte repeated
3039 * over the entire buffer. Doing just one throw away read
3040 * initially seems to "solve" it.
3042 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
3044 for (i = 0; i < 3; i++) {
3045 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
3055 * Fetch AUX CH registers 0x202 - 0x207 which contain
3056 * link status information
3059 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
3061 return intel_dp_dpcd_read_wake(&intel_dp->aux,
3064 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
3067 /* These are source-specific values. */
3069 intel_dp_voltage_max(struct intel_dp *intel_dp)
3071 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3072 struct drm_i915_private *dev_priv = dev->dev_private;
3073 enum port port = dp_to_dig_port(intel_dp)->port;
3075 if (IS_BROXTON(dev))
3076 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3077 else if (INTEL_INFO(dev)->gen >= 9) {
3078 if (dev_priv->edp_low_vswing && port == PORT_A)
3079 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3080 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3081 } else if (IS_VALLEYVIEW(dev))
3082 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3083 else if (IS_GEN7(dev) && port == PORT_A)
3084 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3085 else if (HAS_PCH_CPT(dev) && port != PORT_A)
3086 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3088 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3092 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3094 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3095 enum port port = dp_to_dig_port(intel_dp)->port;
3097 if (INTEL_INFO(dev)->gen >= 9) {
3098 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3099 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3100 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3101 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3102 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3103 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3104 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3105 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3106 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3108 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3110 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
3111 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3112 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3113 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3114 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3115 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3116 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3117 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3118 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3120 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3122 } else if (IS_VALLEYVIEW(dev)) {
3123 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3124 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3125 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3126 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3127 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3128 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3129 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3130 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3132 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3134 } else if (IS_GEN7(dev) && port == PORT_A) {
3135 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3136 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3137 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3138 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3139 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3140 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3142 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3145 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3146 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3147 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3148 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3149 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3150 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3151 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3152 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3154 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3159 static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
3161 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3162 struct drm_i915_private *dev_priv = dev->dev_private;
3163 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3164 struct intel_crtc *intel_crtc =
3165 to_intel_crtc(dport->base.base.crtc);
3166 unsigned long demph_reg_value, preemph_reg_value,
3167 uniqtranscale_reg_value;
3168 uint8_t train_set = intel_dp->train_set[0];
3169 enum dpio_channel port = vlv_dport_to_channel(dport);
3170 int pipe = intel_crtc->pipe;
3172 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3173 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3174 preemph_reg_value = 0x0004000;
3175 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3176 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3177 demph_reg_value = 0x2B405555;
3178 uniqtranscale_reg_value = 0x552AB83A;
3180 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3181 demph_reg_value = 0x2B404040;
3182 uniqtranscale_reg_value = 0x5548B83A;
3184 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3185 demph_reg_value = 0x2B245555;
3186 uniqtranscale_reg_value = 0x5560B83A;
3188 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3189 demph_reg_value = 0x2B405555;
3190 uniqtranscale_reg_value = 0x5598DA3A;
3196 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3197 preemph_reg_value = 0x0002000;
3198 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3199 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3200 demph_reg_value = 0x2B404040;
3201 uniqtranscale_reg_value = 0x5552B83A;
3203 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3204 demph_reg_value = 0x2B404848;
3205 uniqtranscale_reg_value = 0x5580B83A;
3207 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3208 demph_reg_value = 0x2B404040;
3209 uniqtranscale_reg_value = 0x55ADDA3A;
3215 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3216 preemph_reg_value = 0x0000000;
3217 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3218 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3219 demph_reg_value = 0x2B305555;
3220 uniqtranscale_reg_value = 0x5570B83A;
3222 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3223 demph_reg_value = 0x2B2B4040;
3224 uniqtranscale_reg_value = 0x55ADDA3A;
3230 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3231 preemph_reg_value = 0x0006000;
3232 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3233 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3234 demph_reg_value = 0x1B405555;
3235 uniqtranscale_reg_value = 0x55ADDA3A;
3245 mutex_lock(&dev_priv->sb_lock);
3246 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3247 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3248 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
3249 uniqtranscale_reg_value);
3250 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3251 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3252 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3253 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
3254 mutex_unlock(&dev_priv->sb_lock);
3259 static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3261 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3262 struct drm_i915_private *dev_priv = dev->dev_private;
3263 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3264 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3265 u32 deemph_reg_value, margin_reg_value, val;
3266 uint8_t train_set = intel_dp->train_set[0];
3267 enum dpio_channel ch = vlv_dport_to_channel(dport);
3268 enum i915_pipe pipe = intel_crtc->pipe;
3271 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3272 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3273 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3274 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3275 deemph_reg_value = 128;
3276 margin_reg_value = 52;
3278 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3279 deemph_reg_value = 128;
3280 margin_reg_value = 77;
3282 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3283 deemph_reg_value = 128;
3284 margin_reg_value = 102;
3286 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3287 deemph_reg_value = 128;
3288 margin_reg_value = 154;
3289 /* FIXME extra to set for 1200 */
3295 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3296 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3297 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3298 deemph_reg_value = 85;
3299 margin_reg_value = 78;
3301 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3302 deemph_reg_value = 85;
3303 margin_reg_value = 116;
3305 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3306 deemph_reg_value = 85;
3307 margin_reg_value = 154;
3313 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3314 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3315 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3316 deemph_reg_value = 64;
3317 margin_reg_value = 104;
3319 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3320 deemph_reg_value = 64;
3321 margin_reg_value = 154;
3327 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3328 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3329 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3330 deemph_reg_value = 43;
3331 margin_reg_value = 154;
3341 mutex_lock(&dev_priv->sb_lock);
3343 /* Clear calc init */
3344 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3345 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3346 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3347 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3348 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3350 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3351 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3352 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3353 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3354 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3356 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3357 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3358 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3359 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3361 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3362 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3363 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3364 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3366 /* Program swing deemph */
3367 for (i = 0; i < 4; i++) {
3368 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3369 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3370 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3371 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3374 /* Program swing margin */
3375 for (i = 0; i < 4; i++) {
3376 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3377 val &= ~DPIO_SWING_MARGIN000_MASK;
3378 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3379 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3382 /* Disable unique transition scale */
3383 for (i = 0; i < 4; i++) {
3384 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3385 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3386 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3389 if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
3390 == DP_TRAIN_PRE_EMPH_LEVEL_0) &&
3391 ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
3392 == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
3395 * The document said it needs to set bit 27 for ch0 and bit 26
3396 * for ch1. Might be a typo in the doc.
3397 * For now, for this unique transition scale selection, set bit
3398 * 27 for ch0 and ch1.
3400 for (i = 0; i < 4; i++) {
3401 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3402 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3403 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3406 for (i = 0; i < 4; i++) {
3407 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3408 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3409 val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3410 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3414 /* Start swing calculation */
3415 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3416 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3417 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3419 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3420 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3421 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3424 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3425 val |= DPIO_LRC_BYPASS;
3426 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3428 mutex_unlock(&dev_priv->sb_lock);
3434 intel_get_adjust_train(struct intel_dp *intel_dp,
3435 const uint8_t link_status[DP_LINK_STATUS_SIZE])
3440 uint8_t voltage_max;
3441 uint8_t preemph_max;
3443 for (lane = 0; lane < intel_dp->lane_count; lane++) {
3444 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3445 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
3453 voltage_max = intel_dp_voltage_max(intel_dp);
3454 if (v >= voltage_max)
3455 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
3457 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3458 if (p >= preemph_max)
3459 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
3461 for (lane = 0; lane < 4; lane++)
3462 intel_dp->train_set[lane] = v | p;
3466 gen4_signal_levels(uint8_t train_set)
3468 uint32_t signal_levels = 0;
3470 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3471 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3473 signal_levels |= DP_VOLTAGE_0_4;
3475 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3476 signal_levels |= DP_VOLTAGE_0_6;
3478 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3479 signal_levels |= DP_VOLTAGE_0_8;
3481 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3482 signal_levels |= DP_VOLTAGE_1_2;
3485 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3486 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3488 signal_levels |= DP_PRE_EMPHASIS_0;
3490 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3491 signal_levels |= DP_PRE_EMPHASIS_3_5;
3493 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3494 signal_levels |= DP_PRE_EMPHASIS_6;
3496 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3497 signal_levels |= DP_PRE_EMPHASIS_9_5;
3500 return signal_levels;
3503 /* Gen6's DP voltage swing and pre-emphasis control */
3505 gen6_edp_signal_levels(uint8_t train_set)
3507 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3508 DP_TRAIN_PRE_EMPHASIS_MASK);
3509 switch (signal_levels) {
3510 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3511 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3512 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3513 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3514 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3515 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3516 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3517 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3518 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3519 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3520 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3521 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3522 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3523 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3525 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3526 "0x%x\n", signal_levels);
3527 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3531 /* Gen7's DP voltage swing and pre-emphasis control */
3533 gen7_edp_signal_levels(uint8_t train_set)
3535 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3536 DP_TRAIN_PRE_EMPHASIS_MASK);
3537 switch (signal_levels) {
3538 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3539 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3540 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3541 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3542 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3543 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3545 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3546 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3547 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3548 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3550 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3551 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3552 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3553 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3556 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3557 "0x%x\n", signal_levels);
3558 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3562 /* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
3564 hsw_signal_levels(uint8_t train_set)
3566 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3567 DP_TRAIN_PRE_EMPHASIS_MASK);
3568 switch (signal_levels) {
3569 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3570 return DDI_BUF_TRANS_SELECT(0);
3571 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3572 return DDI_BUF_TRANS_SELECT(1);
3573 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3574 return DDI_BUF_TRANS_SELECT(2);
3575 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
3576 return DDI_BUF_TRANS_SELECT(3);
3578 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3579 return DDI_BUF_TRANS_SELECT(4);
3580 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3581 return DDI_BUF_TRANS_SELECT(5);
3582 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3583 return DDI_BUF_TRANS_SELECT(6);
3585 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3586 return DDI_BUF_TRANS_SELECT(7);
3587 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3588 return DDI_BUF_TRANS_SELECT(8);
3590 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3591 return DDI_BUF_TRANS_SELECT(9);
3593 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3594 "0x%x\n", signal_levels);
3595 return DDI_BUF_TRANS_SELECT(0);
3599 static void bxt_signal_levels(struct intel_dp *intel_dp)
3601 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3602 enum port port = dport->port;
3603 struct drm_device *dev = dport->base.base.dev;
3604 struct intel_encoder *encoder = &dport->base;
3605 uint8_t train_set = intel_dp->train_set[0];
3608 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3609 DP_TRAIN_PRE_EMPHASIS_MASK);
3610 switch (signal_levels) {
3612 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emph level\n");
3613 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3616 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3619 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3622 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
3625 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3628 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3631 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3634 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3637 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3640 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3645 bxt_ddi_vswing_sequence(dev, level, port, encoder->type);
3648 /* Properly updates "DP" with the correct signal levels. */
3650 intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3652 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3653 enum port port = intel_dig_port->port;
3654 struct drm_device *dev = intel_dig_port->base.base.dev;
3655 uint32_t signal_levels, mask;
3656 uint8_t train_set = intel_dp->train_set[0];
3658 if (IS_BROXTON(dev)) {
3660 bxt_signal_levels(intel_dp);
3662 } else if (HAS_DDI(dev)) {
3663 signal_levels = hsw_signal_levels(train_set);
3664 mask = DDI_BUF_EMP_MASK;
3665 } else if (IS_CHERRYVIEW(dev)) {
3666 signal_levels = chv_signal_levels(intel_dp);
3668 } else if (IS_VALLEYVIEW(dev)) {
3669 signal_levels = vlv_signal_levels(intel_dp);
3671 } else if (IS_GEN7(dev) && port == PORT_A) {
3672 signal_levels = gen7_edp_signal_levels(train_set);
3673 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3674 } else if (IS_GEN6(dev) && port == PORT_A) {
3675 signal_levels = gen6_edp_signal_levels(train_set);
3676 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3678 signal_levels = gen4_signal_levels(train_set);
3679 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3683 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3685 DRM_DEBUG_KMS("Using vswing level %d\n",
3686 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3687 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3688 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3689 DP_TRAIN_PRE_EMPHASIS_SHIFT);
3691 *DP = (*DP & ~mask) | signal_levels;
3695 intel_dp_set_link_train(struct intel_dp *intel_dp,
3697 uint8_t dp_train_pat)
3699 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3700 struct drm_device *dev = intel_dig_port->base.base.dev;
3701 struct drm_i915_private *dev_priv = dev->dev_private;
3702 uint8_t buf[sizeof(intel_dp->train_set) + 1];
3705 _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3707 I915_WRITE(intel_dp->output_reg, *DP);
3708 POSTING_READ(intel_dp->output_reg);
3710 buf[0] = dp_train_pat;
3711 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
3712 DP_TRAINING_PATTERN_DISABLE) {
3713 /* don't write DP_TRAINING_LANEx_SET on disable */
3716 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3717 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3718 len = intel_dp->lane_count + 1;
3721 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3728 intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3729 uint8_t dp_train_pat)
3731 if (!intel_dp->train_set_valid)
3732 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
3733 intel_dp_set_signal_levels(intel_dp, DP);
3734 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3738 intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3739 const uint8_t link_status[DP_LINK_STATUS_SIZE])
3741 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3742 struct drm_device *dev = intel_dig_port->base.base.dev;
3743 struct drm_i915_private *dev_priv = dev->dev_private;
3746 intel_get_adjust_train(intel_dp, link_status);
3747 intel_dp_set_signal_levels(intel_dp, DP);
3749 I915_WRITE(intel_dp->output_reg, *DP);
3750 POSTING_READ(intel_dp->output_reg);
3752 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3753 intel_dp->train_set, intel_dp->lane_count);
3755 return ret == intel_dp->lane_count;
3758 static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3760 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3761 struct drm_device *dev = intel_dig_port->base.base.dev;
3762 struct drm_i915_private *dev_priv = dev->dev_private;
3763 enum port port = intel_dig_port->port;
3769 val = I915_READ(DP_TP_CTL(port));
3770 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3771 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3772 I915_WRITE(DP_TP_CTL(port), val);
3775 * On PORT_A we can have only eDP in SST mode. There the only reason
3776 * we need to set idle transmission mode is to work around a HW issue
3777 * where we enable the pipe while not in idle link-training mode.
3778 * In this case there is requirement to wait for a minimum number of
3779 * idle patterns to be sent.
3784 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3786 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3789 /* Enable corresponding port and start training pattern 1 */
3791 intel_dp_start_link_train(struct intel_dp *intel_dp)
3793 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
3794 struct drm_device *dev = encoder->dev;
3797 int voltage_tries, loop_tries;
3798 uint32_t DP = intel_dp->DP;
3799 uint8_t link_config[2];
3802 intel_ddi_prepare_link_retrain(encoder);
3804 /* Write the link configuration data */
3805 link_config[0] = intel_dp->link_bw;
3806 link_config[1] = intel_dp->lane_count;
3807 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3808 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
3809 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
3810 if (intel_dp->num_sink_rates)
3811 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3812 &intel_dp->rate_select, 1);
3815 link_config[1] = DP_SET_ANSI_8B10B;
3816 drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
3820 /* clock recovery */
3821 if (!intel_dp_reset_link_train(intel_dp, &DP,
3822 DP_TRAINING_PATTERN_1 |
3823 DP_LINK_SCRAMBLING_DISABLE)) {
3824 DRM_ERROR("failed to enable link training\n");
3832 uint8_t link_status[DP_LINK_STATUS_SIZE];
3834 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
3835 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3836 DRM_ERROR("failed to get link status\n");
3840 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3841 DRM_DEBUG_KMS("clock recovery OK\n");
3846 * if we used previously trained voltage and pre-emphasis values
3847 * and we don't get clock recovery, reset link training values
3849 if (intel_dp->train_set_valid) {
3850 DRM_DEBUG_KMS("clock recovery not ok, reset");
3851 /* clear the flag as we are not reusing train set */
3852 intel_dp->train_set_valid = false;
3853 if (!intel_dp_reset_link_train(intel_dp, &DP,
3854 DP_TRAINING_PATTERN_1 |
3855 DP_LINK_SCRAMBLING_DISABLE)) {
3856 DRM_ERROR("failed to enable link training\n");
3862 /* Check to see if we've tried the max voltage */
3863 for (i = 0; i < intel_dp->lane_count; i++)
3864 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
3866 if (i == intel_dp->lane_count) {
3868 if (loop_tries == 5) {
3869 DRM_ERROR("too many full retries, give up\n");
3872 intel_dp_reset_link_train(intel_dp, &DP,
3873 DP_TRAINING_PATTERN_1 |
3874 DP_LINK_SCRAMBLING_DISABLE);
3879 /* Check to see if we've tried the same voltage 5 times */
3880 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
3882 if (voltage_tries == 5) {
3883 DRM_ERROR("too many voltage retries, give up\n");
3888 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
3890 /* Update training set as requested by target */
3891 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3892 DRM_ERROR("failed to update link training\n");
3901 intel_dp_complete_link_train(struct intel_dp *intel_dp)
3903 bool channel_eq = false;
3904 int tries, cr_tries;
3905 uint32_t DP = intel_dp->DP;
3906 uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3908 /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
3909 if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
3910 training_pattern = DP_TRAINING_PATTERN_3;
3912 /* channel equalization */
3913 if (!intel_dp_set_link_train(intel_dp, &DP,
3915 DP_LINK_SCRAMBLING_DISABLE)) {
3916 DRM_ERROR("failed to start channel equalization\n");
3924 uint8_t link_status[DP_LINK_STATUS_SIZE];
3927 DRM_ERROR("failed to train DP, aborting\n");
3931 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
3932 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3933 DRM_ERROR("failed to get link status\n");
3937 /* Make sure clock is still ok */
3938 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3939 intel_dp->train_set_valid = false;
3940 intel_dp_start_link_train(intel_dp);
3941 intel_dp_set_link_train(intel_dp, &DP,
3943 DP_LINK_SCRAMBLING_DISABLE);
3948 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3953 /* Try 5 times, then try clock recovery if that fails */
3955 intel_dp->train_set_valid = false;
3956 intel_dp_start_link_train(intel_dp);
3957 intel_dp_set_link_train(intel_dp, &DP,
3959 DP_LINK_SCRAMBLING_DISABLE);
3965 /* Update training set as requested by target */
3966 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3967 DRM_ERROR("failed to update link training\n");
3973 intel_dp_set_idle_link_train(intel_dp);
3978 intel_dp->train_set_valid = true;
3979 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
3983 void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3985 intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3986 DP_TRAINING_PATTERN_DISABLE);
3990 intel_dp_link_down(struct intel_dp *intel_dp)
3992 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3993 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
3994 enum port port = intel_dig_port->port;
3995 struct drm_device *dev = intel_dig_port->base.base.dev;
3996 struct drm_i915_private *dev_priv = dev->dev_private;
3997 uint32_t DP = intel_dp->DP;
3999 if (WARN_ON(HAS_DDI(dev)))
4002 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
4005 DRM_DEBUG_KMS("\n");
4007 if ((IS_GEN7(dev) && port == PORT_A) ||
4008 (HAS_PCH_CPT(dev) && port != PORT_A)) {
4009 DP &= ~DP_LINK_TRAIN_MASK_CPT;
4010 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
4012 if (IS_CHERRYVIEW(dev))
4013 DP &= ~DP_LINK_TRAIN_MASK_CHV;
4015 DP &= ~DP_LINK_TRAIN_MASK;
4016 DP |= DP_LINK_TRAIN_PAT_IDLE;
4018 I915_WRITE(intel_dp->output_reg, DP);
4019 POSTING_READ(intel_dp->output_reg);
4021 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
4022 I915_WRITE(intel_dp->output_reg, DP);
4023 POSTING_READ(intel_dp->output_reg);
4026 * HW workaround for IBX, we need to move the port
4027 * to transcoder A after disabling it to allow the
4028 * matching HDMI port to be enabled on transcoder A.
4030 if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
4031 /* always enable with pattern 1 (as per spec) */
4032 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
4033 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
4034 I915_WRITE(intel_dp->output_reg, DP);
4035 POSTING_READ(intel_dp->output_reg);
4038 I915_WRITE(intel_dp->output_reg, DP);
4039 POSTING_READ(intel_dp->output_reg);
4042 msleep(intel_dp->panel_power_down_delay);
4046 intel_dp_get_dpcd(struct intel_dp *intel_dp)
4048 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4049 struct drm_device *dev = dig_port->base.base.dev;
4050 struct drm_i915_private *dev_priv = dev->dev_private;
4053 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
4054 sizeof(intel_dp->dpcd)) < 0)
4055 return false; /* aux transfer failed */
4057 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
4059 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
4060 return false; /* DPCD not present */
4062 /* Check if the panel supports PSR */
4063 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
4064 if (is_edp(intel_dp)) {
4065 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
4067 sizeof(intel_dp->psr_dpcd));
4068 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
4069 dev_priv->psr.sink_support = true;
4070 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
4073 if (INTEL_INFO(dev)->gen >= 9 &&
4074 (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
4075 uint8_t frame_sync_cap;
4077 dev_priv->psr.sink_support = true;
4078 intel_dp_dpcd_read_wake(&intel_dp->aux,
4079 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
4080 &frame_sync_cap, 1);
4081 dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
4082 /* PSR2 needs frame sync as well */
4083 dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
4084 DRM_DEBUG_KMS("PSR2 %s on sink",
4085 dev_priv->psr.psr2_support ? "supported" : "not supported");
4089 /* Training Pattern 3 support, Intel platforms that support HBR2 alone
4090 * have support for TP3 hence that check is used along with dpcd check
4091 * to ensure TP3 can be enabled.
4092 * SKL < B0: due it's WaDisableHBR2 is the only exception where TP3 is
4093 * supported but still not enabled.
4095 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
4096 intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
4097 intel_dp_source_supports_hbr2(dev)) {
4098 intel_dp->use_tps3 = true;
4099 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
4101 intel_dp->use_tps3 = false;
4103 /* Intermediate frequency support */
4104 if (is_edp(intel_dp) &&
4105 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
4106 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
4107 (rev >= 0x03)) { /* eDp v1.4 or higher */
4108 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
4111 intel_dp_dpcd_read_wake(&intel_dp->aux,
4112 DP_SUPPORTED_LINK_RATES,
4114 sizeof(sink_rates));
4116 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
4117 int val = le16_to_cpu(sink_rates[i]);
4122 /* Value read is in kHz while drm clock is saved in deca-kHz */
4123 intel_dp->sink_rates[i] = (val * 200) / 10;
4125 intel_dp->num_sink_rates = i;
4128 intel_dp_print_rates(intel_dp);
4130 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4131 DP_DWN_STRM_PORT_PRESENT))
4132 return true; /* native DP sink */
4134 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
4135 return true; /* no per-port downstream info */
4137 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
4138 intel_dp->downstream_ports,
4139 DP_MAX_DOWNSTREAM_PORTS) < 0)
4140 return false; /* downstream port status fetch failed */
4146 intel_dp_probe_oui(struct intel_dp *intel_dp)
4150 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
4153 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
4154 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
4155 buf[0], buf[1], buf[2]);
4157 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
4158 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
4159 buf[0], buf[1], buf[2]);
4163 intel_dp_probe_mst(struct intel_dp *intel_dp)
4167 if (!intel_dp->can_mst)
4170 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
4173 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
4174 if (buf[0] & DP_MST_CAP) {
4175 DRM_DEBUG_KMS("Sink is MST capable\n");
4176 intel_dp->is_mst = true;
4178 DRM_DEBUG_KMS("Sink is not MST capable\n");
4179 intel_dp->is_mst = false;
4184 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4185 return intel_dp->is_mst;
4191 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4193 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4194 struct drm_device *dev = intel_dig_port->base.base.dev;
4195 struct intel_crtc *intel_crtc =
4196 to_intel_crtc(intel_dig_port->base.base.crtc);
4202 hsw_disable_ips(intel_crtc);
4204 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0) {
4209 if (!(buf & DP_TEST_CRC_SUPPORTED)) {
4214 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
4219 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4220 buf | DP_TEST_SINK_START) < 0) {
4225 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0) {
4230 test_crc_count = buf & DP_TEST_COUNT_MASK;
4233 if (drm_dp_dpcd_readb(&intel_dp->aux,
4234 DP_TEST_SINK_MISC, &buf) < 0) {
4238 intel_wait_for_vblank(dev, intel_crtc->pipe);
4239 } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
4241 if (attempts == 0) {
4242 DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
4247 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4252 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
4256 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4257 buf & ~DP_TEST_SINK_START) < 0) {
4262 hsw_enable_ips(intel_crtc);
4267 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4269 return intel_dp_dpcd_read_wake(&intel_dp->aux,
4270 DP_DEVICE_SERVICE_IRQ_VECTOR,
4271 sink_irq_vector, 1) == 1;
4274 static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4276 uint8_t test_result = DP_TEST_ACK;
4280 static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4282 uint8_t test_result = DP_TEST_NAK;
4286 static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
4288 uint8_t test_result = DP_TEST_NAK;
4289 struct intel_connector *intel_connector = intel_dp->attached_connector;
4290 struct drm_connector *connector = &intel_connector->base;
4292 if (intel_connector->detect_edid == NULL ||
4293 connector->edid_corrupt ||
4294 intel_dp->aux.i2c_defer_count > 6) {
4295 /* Check EDID read for NACKs, DEFERs and corruption
4296 * (DP CTS 1.2 Core r1.1)
4297 * 4.2.2.4 : Failed EDID read, I2C_NAK
4298 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4299 * 4.2.2.6 : EDID corruption detected
4300 * Use failsafe mode for all cases
4302 if (intel_dp->aux.i2c_nack_count > 0 ||
4303 intel_dp->aux.i2c_defer_count > 0)
4304 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4305 intel_dp->aux.i2c_nack_count,
4306 intel_dp->aux.i2c_defer_count);
4307 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4309 if (!drm_dp_dpcd_write(&intel_dp->aux,
4310 DP_TEST_EDID_CHECKSUM,
4311 &intel_connector->detect_edid->checksum,
4313 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4315 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4316 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4319 /* Set test active flag here so userspace doesn't interrupt things */
4320 intel_dp->compliance_test_active = 1;
4325 static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4327 uint8_t test_result = DP_TEST_NAK;
4331 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4333 uint8_t response = DP_TEST_NAK;
4337 intel_dp->compliance_test_active = 0;
4338 intel_dp->compliance_test_type = 0;
4339 intel_dp->compliance_test_data = 0;
4341 intel_dp->aux.i2c_nack_count = 0;
4342 intel_dp->aux.i2c_defer_count = 0;
4344 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4346 DRM_DEBUG_KMS("Could not read test request from sink\n");
4351 case DP_TEST_LINK_TRAINING:
4352 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4353 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4354 response = intel_dp_autotest_link_training(intel_dp);
4356 case DP_TEST_LINK_VIDEO_PATTERN:
4357 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4358 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4359 response = intel_dp_autotest_video_pattern(intel_dp);
4361 case DP_TEST_LINK_EDID_READ:
4362 DRM_DEBUG_KMS("EDID test requested\n");
4363 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4364 response = intel_dp_autotest_edid(intel_dp);
4366 case DP_TEST_LINK_PHY_TEST_PATTERN:
4367 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4368 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4369 response = intel_dp_autotest_phy_pattern(intel_dp);
4372 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4377 status = drm_dp_dpcd_write(&intel_dp->aux,
4381 DRM_DEBUG_KMS("Could not write test response to sink\n");
4386 intel_dp_check_mst_status(struct intel_dp *intel_dp)
4390 if (intel_dp->is_mst) {
4395 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4399 /* check link status - esi[10] = 0x200c */
4400 if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4401 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4402 intel_dp_start_link_train(intel_dp);
4403 intel_dp_complete_link_train(intel_dp);
4404 intel_dp_stop_link_train(intel_dp);
4407 DRM_DEBUG_KMS("got esi %3ph\n", esi);
4408 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4411 for (retry = 0; retry < 3; retry++) {
4413 wret = drm_dp_dpcd_write(&intel_dp->aux,
4414 DP_SINK_COUNT_ESI+1,
4421 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4423 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4431 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4432 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4433 intel_dp->is_mst = false;
4434 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4435 /* send a hotplug event */
4436 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4444 * According to DP spec
4447 * 2. Configure link according to Receiver Capabilities
4448 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4449 * 4. Check link status on receipt of hot-plug interrupt
4452 intel_dp_check_link_status(struct intel_dp *intel_dp)
4454 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4455 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4457 u8 link_status[DP_LINK_STATUS_SIZE];
4459 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4461 if (!intel_encoder->connectors_active)
4464 if (WARN_ON(!intel_encoder->base.crtc))
4467 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4470 /* Try to read receiver status if the link appears to be up */
4471 if (!intel_dp_get_link_status(intel_dp, link_status)) {
4475 /* Now read the DPCD to see if it's actually running */
4476 if (!intel_dp_get_dpcd(intel_dp)) {
4480 /* Try to read the source of the interrupt */
4481 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4482 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4483 /* Clear interrupt source */
4484 drm_dp_dpcd_writeb(&intel_dp->aux,
4485 DP_DEVICE_SERVICE_IRQ_VECTOR,
4488 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4489 DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
4490 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4491 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4494 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
4495 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4496 intel_encoder->base.name);
4497 intel_dp_start_link_train(intel_dp);
4498 intel_dp_complete_link_train(intel_dp);
4499 intel_dp_stop_link_train(intel_dp);
4503 /* XXX this is probably wrong for multiple downstream ports */
4504 static enum drm_connector_status
4505 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4507 uint8_t *dpcd = intel_dp->dpcd;
4510 if (!intel_dp_get_dpcd(intel_dp))
4511 return connector_status_disconnected;
4513 /* if there's no downstream port, we're done */
4514 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
4515 return connector_status_connected;
4517 /* If we're HPD-aware, SINK_COUNT changes dynamically */
4518 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4519 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4522 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4524 return connector_status_unknown;
4526 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4527 : connector_status_disconnected;
4530 /* If no HPD, poke DDC gently */
4531 if (drm_probe_ddc(intel_dp->aux.ddc))
4532 return connector_status_connected;
4534 /* Well we tried, say unknown for unreliable port types */
4535 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4536 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4537 if (type == DP_DS_PORT_TYPE_VGA ||
4538 type == DP_DS_PORT_TYPE_NON_EDID)
4539 return connector_status_unknown;
4541 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4542 DP_DWN_STRM_PORT_TYPE_MASK;
4543 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4544 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4545 return connector_status_unknown;
4548 /* Anything else is out of spec, warn and ignore */
4549 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4550 return connector_status_disconnected;
4553 static enum drm_connector_status
4554 edp_detect(struct intel_dp *intel_dp)
4556 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4557 enum drm_connector_status status;
4559 status = intel_panel_detect(dev);
4560 if (status == connector_status_unknown)
4561 status = connector_status_connected;
4566 static enum drm_connector_status
4567 ironlake_dp_detect(struct intel_dp *intel_dp)
4569 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4570 struct drm_i915_private *dev_priv = dev->dev_private;
4571 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4573 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4574 return connector_status_disconnected;
4576 return intel_dp_detect_dpcd(intel_dp);
4579 static int g4x_digital_port_connected(struct drm_device *dev,
4580 struct intel_digital_port *intel_dig_port)
4582 struct drm_i915_private *dev_priv = dev->dev_private;
4585 if (IS_VALLEYVIEW(dev)) {
4586 switch (intel_dig_port->port) {
4588 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4591 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4594 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4600 switch (intel_dig_port->port) {
4602 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4605 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4608 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4615 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
4620 static enum drm_connector_status
4621 g4x_dp_detect(struct intel_dp *intel_dp)
4623 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4624 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4627 /* Can't disconnect eDP, but you can close the lid... */
4628 if (is_edp(intel_dp)) {
4629 enum drm_connector_status status;
4631 status = intel_panel_detect(dev);
4632 if (status == connector_status_unknown)
4633 status = connector_status_connected;
4637 ret = g4x_digital_port_connected(dev, intel_dig_port);
4639 return connector_status_unknown;
4641 return connector_status_disconnected;
4643 return intel_dp_detect_dpcd(intel_dp);
4646 static struct edid *
4647 intel_dp_get_edid(struct intel_dp *intel_dp)
4649 struct intel_connector *intel_connector = intel_dp->attached_connector;
4651 /* use cached edid if we have one */
4652 if (intel_connector->edid) {
4654 if (IS_ERR(intel_connector->edid))
4657 return drm_edid_duplicate(intel_connector->edid);
4659 return drm_get_edid(&intel_connector->base,
4664 intel_dp_set_edid(struct intel_dp *intel_dp)
4666 struct intel_connector *intel_connector = intel_dp->attached_connector;
4669 edid = intel_dp_get_edid(intel_dp);
4670 intel_connector->detect_edid = edid;
4672 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4673 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4675 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4679 intel_dp_unset_edid(struct intel_dp *intel_dp)
4681 struct intel_connector *intel_connector = intel_dp->attached_connector;
4683 kfree(intel_connector->detect_edid);
4684 intel_connector->detect_edid = NULL;
4686 intel_dp->has_audio = false;
4689 static enum intel_display_power_domain
4690 intel_dp_power_get(struct intel_dp *dp)
4692 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4693 enum intel_display_power_domain power_domain;
4695 power_domain = intel_display_port_power_domain(encoder);
4696 intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4698 return power_domain;
4702 intel_dp_power_put(struct intel_dp *dp,
4703 enum intel_display_power_domain power_domain)
4705 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4706 intel_display_power_put(to_i915(encoder->base.dev), power_domain);
4709 static enum drm_connector_status
4710 intel_dp_detect(struct drm_connector *connector, bool force)
4712 struct intel_dp *intel_dp = intel_attached_dp(connector);
4713 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4714 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4715 struct drm_device *dev = connector->dev;
4716 enum drm_connector_status status;
4717 enum intel_display_power_domain power_domain;
4721 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4722 connector->base.id, connector->name);
4723 intel_dp_unset_edid(intel_dp);
4725 if (intel_dp->is_mst) {
4726 /* MST devices are disconnected from a monitor POV */
4727 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4728 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4729 return connector_status_disconnected;
4732 power_domain = intel_dp_power_get(intel_dp);
4734 /* Can't disconnect eDP, but you can close the lid... */
4735 if (is_edp(intel_dp))
4736 status = edp_detect(intel_dp);
4737 else if (HAS_PCH_SPLIT(dev))
4738 status = ironlake_dp_detect(intel_dp);
4740 status = g4x_dp_detect(intel_dp);
4741 if (status != connector_status_connected)
4744 intel_dp_probe_oui(intel_dp);
4746 ret = intel_dp_probe_mst(intel_dp);
4748 /* if we are in MST mode then this connector
4749 won't appear connected or have anything with EDID on it */
4750 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4751 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4752 status = connector_status_disconnected;
4756 intel_dp_set_edid(intel_dp);
4758 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4759 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4760 status = connector_status_connected;
4762 /* Try to read the source of the interrupt */
4763 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4764 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4765 /* Clear interrupt source */
4766 drm_dp_dpcd_writeb(&intel_dp->aux,
4767 DP_DEVICE_SERVICE_IRQ_VECTOR,
4770 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4771 intel_dp_handle_test_request(intel_dp);
4772 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4773 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4777 intel_dp_power_put(intel_dp, power_domain);
4782 intel_dp_force(struct drm_connector *connector)
4784 struct intel_dp *intel_dp = intel_attached_dp(connector);
4785 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4786 enum intel_display_power_domain power_domain;
4788 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4789 connector->base.id, connector->name);
4790 intel_dp_unset_edid(intel_dp);
4792 if (connector->status != connector_status_connected)
4795 power_domain = intel_dp_power_get(intel_dp);
4797 intel_dp_set_edid(intel_dp);
4799 intel_dp_power_put(intel_dp, power_domain);
4801 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4802 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4805 static int intel_dp_get_modes(struct drm_connector *connector)
4807 struct intel_connector *intel_connector = to_intel_connector(connector);
4810 edid = intel_connector->detect_edid;
4812 int ret = intel_connector_update_modes(connector, edid);
4817 /* if eDP has no EDID, fall back to fixed mode */
4818 if (is_edp(intel_attached_dp(connector)) &&
4819 intel_connector->panel.fixed_mode) {
4820 struct drm_display_mode *mode;
4822 mode = drm_mode_duplicate(connector->dev,
4823 intel_connector->panel.fixed_mode);
4825 drm_mode_probed_add(connector, mode);
4834 intel_dp_detect_audio(struct drm_connector *connector)
4836 bool has_audio = false;
4839 edid = to_intel_connector(connector)->detect_edid;
4841 has_audio = drm_detect_monitor_audio(edid);
4847 intel_dp_set_property(struct drm_connector *connector,
4848 struct drm_property *property,
4851 struct drm_i915_private *dev_priv = connector->dev->dev_private;
4852 struct intel_connector *intel_connector = to_intel_connector(connector);
4853 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4854 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4857 ret = drm_object_property_set_value(&connector->base, property, val);
4861 if (property == dev_priv->force_audio_property) {
4865 if (i == intel_dp->force_audio)
4868 intel_dp->force_audio = i;
4870 if (i == HDMI_AUDIO_AUTO)
4871 has_audio = intel_dp_detect_audio(connector);
4873 has_audio = (i == HDMI_AUDIO_ON);
4875 if (has_audio == intel_dp->has_audio)
4878 intel_dp->has_audio = has_audio;
4882 if (property == dev_priv->broadcast_rgb_property) {
4883 bool old_auto = intel_dp->color_range_auto;
4884 uint32_t old_range = intel_dp->color_range;
4887 case INTEL_BROADCAST_RGB_AUTO:
4888 intel_dp->color_range_auto = true;
4890 case INTEL_BROADCAST_RGB_FULL:
4891 intel_dp->color_range_auto = false;
4892 intel_dp->color_range = 0;
4894 case INTEL_BROADCAST_RGB_LIMITED:
4895 intel_dp->color_range_auto = false;
4896 intel_dp->color_range = DP_COLOR_RANGE_16_235;
4902 if (old_auto == intel_dp->color_range_auto &&
4903 old_range == intel_dp->color_range)
4909 if (is_edp(intel_dp) &&
4910 property == connector->dev->mode_config.scaling_mode_property) {
4911 if (val == DRM_MODE_SCALE_NONE) {
4912 DRM_DEBUG_KMS("no scaling not supported\n");
4916 if (intel_connector->panel.fitting_mode == val) {
4917 /* the eDP scaling property is not changed */
4920 intel_connector->panel.fitting_mode = val;
4928 if (intel_encoder->base.crtc)
4929 intel_crtc_restore_mode(intel_encoder->base.crtc);
4935 intel_dp_connector_destroy(struct drm_connector *connector)
4937 struct intel_connector *intel_connector = to_intel_connector(connector);
4939 kfree(intel_connector->detect_edid);
4941 if (!IS_ERR_OR_NULL(intel_connector->edid))
4942 kfree(intel_connector->edid);
4944 /* Can't call is_edp() since the encoder may have been destroyed
4946 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4947 intel_panel_fini(&intel_connector->panel);
4949 drm_connector_cleanup(connector);
4953 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4955 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4956 struct intel_dp *intel_dp = &intel_dig_port->dp;
4958 drm_dp_aux_unregister(&intel_dp->aux);
4959 intel_dp_mst_encoder_cleanup(intel_dig_port);
4960 if (is_edp(intel_dp)) {
4961 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4963 * vdd might still be enabled do to the delayed vdd off.
4964 * Make sure vdd is actually turned off here.
4967 edp_panel_vdd_off_sync(intel_dp);
4968 pps_unlock(intel_dp);
4971 if (intel_dp->edp_notifier.notifier_call) {
4972 unregister_reboot_notifier(&intel_dp->edp_notifier);
4973 intel_dp->edp_notifier.notifier_call = NULL;
4977 drm_encoder_cleanup(encoder);
4978 kfree(intel_dig_port);
4981 static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4983 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4985 if (!is_edp(intel_dp))
4989 * vdd might still be enabled do to the delayed vdd off.
4990 * Make sure vdd is actually turned off here.
4992 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4994 edp_panel_vdd_off_sync(intel_dp);
4995 pps_unlock(intel_dp);
4998 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
5000 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5001 struct drm_device *dev = intel_dig_port->base.base.dev;
5002 struct drm_i915_private *dev_priv = dev->dev_private;
5003 enum intel_display_power_domain power_domain;
5005 lockdep_assert_held(&dev_priv->pps_mutex);
5007 if (!edp_have_panel_vdd(intel_dp))
5011 * The VDD bit needs a power domain reference, so if the bit is
5012 * already enabled when we boot or resume, grab this reference and
5013 * schedule a vdd off, so we don't hold on to the reference
5016 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
5017 power_domain = intel_display_port_power_domain(&intel_dig_port->base);
5018 intel_display_power_get(dev_priv, power_domain);
5020 edp_panel_vdd_schedule_off(intel_dp);
5023 static void intel_dp_encoder_reset(struct drm_encoder *encoder)
5025 struct intel_dp *intel_dp;
5027 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
5030 intel_dp = enc_to_intel_dp(encoder);
5035 * Read out the current power sequencer assignment,
5036 * in case the BIOS did something with it.
5038 if (IS_VALLEYVIEW(encoder->dev))
5039 vlv_initial_power_sequencer_setup(intel_dp);
5041 intel_edp_panel_vdd_sanitize(intel_dp);
5043 pps_unlock(intel_dp);
5046 static const struct drm_connector_funcs intel_dp_connector_funcs = {
5047 .dpms = intel_connector_dpms,
5048 .detect = intel_dp_detect,
5049 .force = intel_dp_force,
5050 .fill_modes = drm_helper_probe_single_connector_modes,
5051 .set_property = intel_dp_set_property,
5052 .atomic_get_property = intel_connector_atomic_get_property,
5053 .destroy = intel_dp_connector_destroy,
5054 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5055 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
5058 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
5059 .get_modes = intel_dp_get_modes,
5060 .mode_valid = intel_dp_mode_valid,
5061 .best_encoder = intel_best_encoder,
5064 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
5065 .reset = intel_dp_encoder_reset,
5066 .destroy = intel_dp_encoder_destroy,
5070 intel_dp_hot_plug(struct intel_encoder *intel_encoder)
5076 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
5078 struct intel_dp *intel_dp = &intel_dig_port->dp;
5079 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5080 struct drm_device *dev = intel_dig_port->base.base.dev;
5081 struct drm_i915_private *dev_priv = dev->dev_private;
5082 enum intel_display_power_domain power_domain;
5085 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
5086 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
5088 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
5090 * vdd off can generate a long pulse on eDP which
5091 * would require vdd on to handle it, and thus we
5092 * would end up in an endless cycle of
5093 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
5095 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
5096 port_name(intel_dig_port->port));
5100 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
5101 port_name(intel_dig_port->port),
5102 long_hpd ? "long" : "short");
5104 power_domain = intel_display_port_power_domain(intel_encoder);
5105 intel_display_power_get(dev_priv, power_domain);
5108 /* indicate that we need to restart link training */
5109 intel_dp->train_set_valid = false;
5111 if (HAS_PCH_SPLIT(dev)) {
5112 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
5115 if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
5119 if (!intel_dp_get_dpcd(intel_dp)) {
5123 intel_dp_probe_oui(intel_dp);
5125 if (!intel_dp_probe_mst(intel_dp))
5129 if (intel_dp->is_mst) {
5131 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
5136 if (!intel_dp->is_mst) {
5138 * we'll check the link status via the normal hot plug path later -
5139 * but for short hpds we should check it now
5141 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5142 intel_dp_check_link_status(intel_dp);
5143 drm_modeset_unlock(&dev->mode_config.connection_mutex);
5151 /* if we were in MST mode, and device is not there get out of MST mode */
5152 if (intel_dp->is_mst) {
5153 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5154 intel_dp->is_mst = false;
5156 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
5160 intel_display_power_put(dev_priv, power_domain);
5165 /* Return which DP Port should be selected for Transcoder DP control */
5167 intel_trans_dp_port_sel(struct drm_crtc *crtc)
5169 struct drm_device *dev = crtc->dev;
5170 struct intel_encoder *intel_encoder;
5171 struct intel_dp *intel_dp;
5173 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5174 intel_dp = enc_to_intel_dp(&intel_encoder->base);
5176 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
5177 intel_encoder->type == INTEL_OUTPUT_EDP)
5178 return intel_dp->output_reg;
5184 /* check the VBT to see whether the eDP is on DP-D port */
5185 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
5187 struct drm_i915_private *dev_priv = dev->dev_private;
5188 union child_device_config *p_child;
5190 static const short port_mapping[] = {
5191 [PORT_B] = PORT_IDPB,
5192 [PORT_C] = PORT_IDPC,
5193 [PORT_D] = PORT_IDPD,
5199 if (!dev_priv->vbt.child_dev_num)
5202 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5203 p_child = dev_priv->vbt.child_dev + i;
5205 if (p_child->common.dvo_port == port_mapping[port] &&
5206 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5207 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
5214 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5216 struct intel_connector *intel_connector = to_intel_connector(connector);
5218 intel_attach_force_audio_property(connector);
5219 intel_attach_broadcast_rgb_property(connector);
5220 intel_dp->color_range_auto = true;
5222 if (is_edp(intel_dp)) {
5223 drm_mode_create_scaling_mode_property(connector->dev);
5224 drm_object_attach_property(
5226 connector->dev->mode_config.scaling_mode_property,
5227 DRM_MODE_SCALE_ASPECT);
5228 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
5232 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5234 intel_dp->last_power_cycle = jiffies;
5235 intel_dp->last_power_on = jiffies;
5236 intel_dp->last_backlight_off = jiffies;
5240 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5241 struct intel_dp *intel_dp)
5243 struct drm_i915_private *dev_priv = dev->dev_private;
5244 struct edp_power_seq cur, vbt, spec,
5245 *final = &intel_dp->pps_delays;
5246 u32 pp_on, pp_off, pp_div, pp;
5247 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
5249 lockdep_assert_held(&dev_priv->pps_mutex);
5251 /* already initialized? */
5252 if (final->t11_t12 != 0)
5255 if (HAS_PCH_SPLIT(dev)) {
5256 pp_ctrl_reg = PCH_PP_CONTROL;
5257 pp_on_reg = PCH_PP_ON_DELAYS;
5258 pp_off_reg = PCH_PP_OFF_DELAYS;
5259 pp_div_reg = PCH_PP_DIVISOR;
5261 enum i915_pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5263 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5264 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5265 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5266 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5269 /* Workaround: Need to write PP_CONTROL with the unlock key as
5270 * the very first thing. */
5271 pp = ironlake_get_pp_control(intel_dp);
5272 I915_WRITE(pp_ctrl_reg, pp);
5274 pp_on = I915_READ(pp_on_reg);
5275 pp_off = I915_READ(pp_off_reg);
5276 pp_div = I915_READ(pp_div_reg);
5278 /* Pull timing values out of registers */
5279 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5280 PANEL_POWER_UP_DELAY_SHIFT;
5282 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5283 PANEL_LIGHT_ON_DELAY_SHIFT;
5285 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5286 PANEL_LIGHT_OFF_DELAY_SHIFT;
5288 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5289 PANEL_POWER_DOWN_DELAY_SHIFT;
5291 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
5292 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
5294 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5295 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5297 vbt = dev_priv->vbt.edp_pps;
5299 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5300 * our hw here, which are all in 100usec. */
5301 spec.t1_t3 = 210 * 10;
5302 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5303 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5304 spec.t10 = 500 * 10;
5305 /* This one is special and actually in units of 100ms, but zero
5306 * based in the hw (so we need to add 100 ms). But the sw vbt
5307 * table multiplies it with 1000 to make it in units of 100usec,
5309 spec.t11_t12 = (510 + 100) * 10;
5311 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5312 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5314 /* Use the max of the register settings and vbt. If both are
5315 * unset, fall back to the spec limits. */
5316 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
5318 max(cur.field, vbt.field))
5319 assign_final(t1_t3);
5323 assign_final(t11_t12);
5326 #define get_delay(field) (DIV_ROUND_UP(final->field, 10))
5327 intel_dp->panel_power_up_delay = get_delay(t1_t3);
5328 intel_dp->backlight_on_delay = get_delay(t8);
5329 intel_dp->backlight_off_delay = get_delay(t9);
5330 intel_dp->panel_power_down_delay = get_delay(t10);
5331 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5334 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5335 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5336 intel_dp->panel_power_cycle_delay);
5338 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5339 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
5343 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5344 struct intel_dp *intel_dp)
5346 struct drm_i915_private *dev_priv = dev->dev_private;
5347 u32 pp_on, pp_off, pp_div, port_sel = 0;
5348 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
5349 int pp_on_reg, pp_off_reg, pp_div_reg;
5350 enum port port = dp_to_dig_port(intel_dp)->port;
5351 const struct edp_power_seq *seq = &intel_dp->pps_delays;
5353 lockdep_assert_held(&dev_priv->pps_mutex);
5355 if (HAS_PCH_SPLIT(dev)) {
5356 pp_on_reg = PCH_PP_ON_DELAYS;
5357 pp_off_reg = PCH_PP_OFF_DELAYS;
5358 pp_div_reg = PCH_PP_DIVISOR;
5360 enum i915_pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5362 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5363 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5364 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5368 * And finally store the new values in the power sequencer. The
5369 * backlight delays are set to 1 because we do manual waits on them. For
5370 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5371 * we'll end up waiting for the backlight off delay twice: once when we
5372 * do the manual sleep, and once when we disable the panel and wait for
5373 * the PP_STATUS bit to become zero.
5375 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
5376 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5377 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
5378 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
5379 /* Compute the divisor for the pp clock, simply match the Bspec
5381 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5382 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5383 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5385 /* Haswell doesn't have any port selection bits for the panel
5386 * power sequencer any more. */
5387 if (IS_VALLEYVIEW(dev)) {
5388 port_sel = PANEL_PORT_SELECT_VLV(port);
5389 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5391 port_sel = PANEL_PORT_SELECT_DPA;
5393 port_sel = PANEL_PORT_SELECT_DPD;
5398 I915_WRITE(pp_on_reg, pp_on);
5399 I915_WRITE(pp_off_reg, pp_off);
5400 I915_WRITE(pp_div_reg, pp_div);
5402 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
5403 I915_READ(pp_on_reg),
5404 I915_READ(pp_off_reg),
5405 I915_READ(pp_div_reg));
5409 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5411 * @refresh_rate: RR to be programmed
5413 * This function gets called when refresh rate (RR) has to be changed from
5414 * one frequency to another. Switches can be between high and low RR
5415 * supported by the panel or to any other RR based on media playback (in
5416 * this case, RR value needs to be passed from user space).
5418 * The caller of this function needs to take a lock on dev_priv->drrs.
5420 static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
5422 struct drm_i915_private *dev_priv = dev->dev_private;
5423 struct intel_encoder *encoder;
5424 struct intel_digital_port *dig_port = NULL;
5425 struct intel_dp *intel_dp = dev_priv->drrs.dp;
5426 struct intel_crtc_state *config = NULL;
5427 struct intel_crtc *intel_crtc = NULL;
5429 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
5431 if (refresh_rate <= 0) {
5432 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5436 if (intel_dp == NULL) {
5437 DRM_DEBUG_KMS("DRRS not supported.\n");
5442 * FIXME: This needs proper synchronization with psr state for some
5443 * platforms that cannot have PSR and DRRS enabled at the same time.
5446 dig_port = dp_to_dig_port(intel_dp);
5447 encoder = &dig_port->base;
5448 intel_crtc = to_intel_crtc(encoder->base.crtc);
5451 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5455 config = intel_crtc->config;
5457 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5458 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5462 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5464 index = DRRS_LOW_RR;
5466 if (index == dev_priv->drrs.refresh_rate_type) {
5468 "DRRS requested for previously set RR...ignoring\n");
5472 if (!intel_crtc->active) {
5473 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5477 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
5480 intel_dp_set_m_n(intel_crtc, M1_N1);
5483 intel_dp_set_m_n(intel_crtc, M2_N2);
5487 DRM_ERROR("Unsupported refreshrate type\n");
5489 } else if (INTEL_INFO(dev)->gen > 6) {
5490 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5491 val = I915_READ(reg);
5493 if (index > DRRS_HIGH_RR) {
5494 if (IS_VALLEYVIEW(dev))
5495 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5497 val |= PIPECONF_EDP_RR_MODE_SWITCH;
5499 if (IS_VALLEYVIEW(dev))
5500 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5502 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5504 I915_WRITE(reg, val);
5507 dev_priv->drrs.refresh_rate_type = index;
5509 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5513 * intel_edp_drrs_enable - init drrs struct if supported
5514 * @intel_dp: DP struct
5516 * Initializes frontbuffer_bits and drrs.dp
5518 void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5520 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5521 struct drm_i915_private *dev_priv = dev->dev_private;
5522 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5523 struct drm_crtc *crtc = dig_port->base.base.crtc;
5524 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5526 if (!intel_crtc->config->has_drrs) {
5527 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5531 mutex_lock(&dev_priv->drrs.mutex);
5532 if (WARN_ON(dev_priv->drrs.dp)) {
5533 DRM_ERROR("DRRS already enabled\n");
5537 dev_priv->drrs.busy_frontbuffer_bits = 0;
5539 dev_priv->drrs.dp = intel_dp;
5542 mutex_unlock(&dev_priv->drrs.mutex);
5546 * intel_edp_drrs_disable - Disable DRRS
5547 * @intel_dp: DP struct
5550 void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5552 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5553 struct drm_i915_private *dev_priv = dev->dev_private;
5554 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5555 struct drm_crtc *crtc = dig_port->base.base.crtc;
5556 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5558 if (!intel_crtc->config->has_drrs)
5561 mutex_lock(&dev_priv->drrs.mutex);
5562 if (!dev_priv->drrs.dp) {
5563 mutex_unlock(&dev_priv->drrs.mutex);
5567 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5568 intel_dp_set_drrs_state(dev_priv->dev,
5569 intel_dp->attached_connector->panel.
5570 fixed_mode->vrefresh);
5572 dev_priv->drrs.dp = NULL;
5573 mutex_unlock(&dev_priv->drrs.mutex);
5575 cancel_delayed_work_sync(&dev_priv->drrs.work);
5578 static void intel_edp_drrs_downclock_work(struct work_struct *work)
5580 struct drm_i915_private *dev_priv =
5581 container_of(work, typeof(*dev_priv), drrs.work.work);
5582 struct intel_dp *intel_dp;
5584 mutex_lock(&dev_priv->drrs.mutex);
5586 intel_dp = dev_priv->drrs.dp;
5592 * The delayed work can race with an invalidate hence we need to
5596 if (dev_priv->drrs.busy_frontbuffer_bits)
5599 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5600 intel_dp_set_drrs_state(dev_priv->dev,
5601 intel_dp->attached_connector->panel.
5602 downclock_mode->vrefresh);
5605 mutex_unlock(&dev_priv->drrs.mutex);
5609 * intel_edp_drrs_invalidate - Invalidate DRRS
5611 * @frontbuffer_bits: frontbuffer plane tracking bits
5613 * When there is a disturbance on screen (due to cursor movement/time
5614 * update etc), DRRS needs to be invalidated, i.e. need to switch to
5617 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5619 void intel_edp_drrs_invalidate(struct drm_device *dev,
5620 unsigned frontbuffer_bits)
5622 struct drm_i915_private *dev_priv = dev->dev_private;
5623 struct drm_crtc *crtc;
5624 enum i915_pipe pipe;
5626 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5629 cancel_delayed_work(&dev_priv->drrs.work);
5631 mutex_lock(&dev_priv->drrs.mutex);
5632 if (!dev_priv->drrs.dp) {
5633 mutex_unlock(&dev_priv->drrs.mutex);
5637 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5638 pipe = to_intel_crtc(crtc)->pipe;
5640 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
5641 intel_dp_set_drrs_state(dev_priv->dev,
5642 dev_priv->drrs.dp->attached_connector->panel.
5643 fixed_mode->vrefresh);
5646 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5648 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5649 mutex_unlock(&dev_priv->drrs.mutex);
5653 * intel_edp_drrs_flush - Flush DRRS
5655 * @frontbuffer_bits: frontbuffer plane tracking bits
5657 * When there is no movement on screen, DRRS work can be scheduled.
5658 * This DRRS work is responsible for setting relevant registers after a
5659 * timeout of 1 second.
5661 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5663 void intel_edp_drrs_flush(struct drm_device *dev,
5664 unsigned frontbuffer_bits)
5666 struct drm_i915_private *dev_priv = dev->dev_private;
5667 struct drm_crtc *crtc;
5668 enum i915_pipe pipe;
5670 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5673 cancel_delayed_work(&dev_priv->drrs.work);
5675 mutex_lock(&dev_priv->drrs.mutex);
5676 if (!dev_priv->drrs.dp) {
5677 mutex_unlock(&dev_priv->drrs.mutex);
5681 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5682 pipe = to_intel_crtc(crtc)->pipe;
5683 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5685 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR &&
5686 !dev_priv->drrs.busy_frontbuffer_bits)
5687 schedule_delayed_work(&dev_priv->drrs.work,
5688 msecs_to_jiffies(1000));
5689 mutex_unlock(&dev_priv->drrs.mutex);
5693 * DOC: Display Refresh Rate Switching (DRRS)
5695 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5696 * which enables swtching between low and high refresh rates,
5697 * dynamically, based on the usage scenario. This feature is applicable
5698 * for internal panels.
5700 * Indication that the panel supports DRRS is given by the panel EDID, which
5701 * would list multiple refresh rates for one resolution.
5703 * DRRS is of 2 types - static and seamless.
5704 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5705 * (may appear as a blink on screen) and is used in dock-undock scenario.
5706 * Seamless DRRS involves changing RR without any visual effect to the user
5707 * and can be used during normal system usage. This is done by programming
5708 * certain registers.
5710 * Support for static/seamless DRRS may be indicated in the VBT based on
5711 * inputs from the panel spec.
5713 * DRRS saves power by switching to low RR based on usage scenarios.
5716 * The implementation is based on frontbuffer tracking implementation.
5717 * When there is a disturbance on the screen triggered by user activity or a
5718 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5719 * When there is no movement on screen, after a timeout of 1 second, a switch
5720 * to low RR is made.
5721 * For integration with frontbuffer tracking code,
5722 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5724 * DRRS can be further extended to support other internal panels and also
5725 * the scenario of video playback wherein RR is set based on the rate
5726 * requested by userspace.
5730 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5731 * @intel_connector: eDP connector
5732 * @fixed_mode: preferred mode of panel
5734 * This function is called only once at driver load to initialize basic
5738 * Downclock mode if panel supports it, else return NULL.
5739 * DRRS support is determined by the presence of downclock mode (apart
5740 * from VBT setting).
5742 static struct drm_display_mode *
5743 intel_dp_drrs_init(struct intel_connector *intel_connector,
5744 struct drm_display_mode *fixed_mode)
5746 struct drm_connector *connector = &intel_connector->base;
5747 struct drm_device *dev = connector->dev;
5748 struct drm_i915_private *dev_priv = dev->dev_private;
5749 struct drm_display_mode *downclock_mode = NULL;
5751 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5752 lockinit(&dev_priv->drrs.mutex, "i915dm", 0, LK_CANRECURSE);
5754 if (INTEL_INFO(dev)->gen <= 6) {
5755 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5759 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5760 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5764 downclock_mode = intel_find_panel_downclock
5765 (dev, fixed_mode, connector);
5767 if (!downclock_mode) {
5768 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5772 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5774 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5775 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5776 return downclock_mode;
5779 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5780 struct intel_connector *intel_connector)
5782 struct drm_connector *connector = &intel_connector->base;
5783 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5784 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5785 struct drm_device *dev = intel_encoder->base.dev;
5786 struct drm_i915_private *dev_priv = dev->dev_private;
5787 struct drm_display_mode *fixed_mode = NULL;
5788 struct drm_display_mode *downclock_mode = NULL;
5790 struct drm_display_mode *scan;
5792 enum i915_pipe pipe = INVALID_PIPE;
5794 if (!is_edp(intel_dp))
5798 intel_edp_panel_vdd_sanitize(intel_dp);
5799 pps_unlock(intel_dp);
5801 /* Cache DPCD and EDID for edp. */
5802 has_dpcd = intel_dp_get_dpcd(intel_dp);
5805 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5806 dev_priv->no_aux_handshake =
5807 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5808 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5810 /* if this fails, presume the device is a ghost */
5811 DRM_INFO("failed to retrieve link info, disabling eDP\n");
5815 /* We now know it's not a ghost, init power sequence regs. */
5817 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5818 pps_unlock(intel_dp);
5820 mutex_lock(&dev->mode_config.mutex);
5821 edid = drm_get_edid(connector, intel_dp->aux.ddc);
5823 if (drm_add_edid_modes(connector, edid)) {
5824 drm_mode_connector_update_edid_property(connector,
5826 drm_edid_to_eld(connector, edid);
5829 edid = ERR_PTR(-EINVAL);
5832 edid = ERR_PTR(-ENOENT);
5834 intel_connector->edid = edid;
5836 /* prefer fixed mode from EDID if available */
5837 list_for_each_entry(scan, &connector->probed_modes, head) {
5838 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5839 fixed_mode = drm_mode_duplicate(dev, scan);
5840 downclock_mode = intel_dp_drrs_init(
5841 intel_connector, fixed_mode);
5846 /* fallback to VBT if available for eDP */
5847 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5848 fixed_mode = drm_mode_duplicate(dev,
5849 dev_priv->vbt.lfp_lvds_vbt_mode);
5851 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5853 mutex_unlock(&dev->mode_config.mutex);
5855 if (IS_VALLEYVIEW(dev)) {
5857 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5858 register_reboot_notifier(&intel_dp->edp_notifier);
5862 * Figure out the current pipe for the initial backlight setup.
5863 * If the current pipe isn't valid, try the PPS pipe, and if that
5864 * fails just assume pipe A.
5866 if (IS_CHERRYVIEW(dev))
5867 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5869 pipe = PORT_TO_PIPE(intel_dp->DP);
5871 if (pipe != PIPE_A && pipe != PIPE_B)
5872 pipe = intel_dp->pps_pipe;
5874 if (pipe != PIPE_A && pipe != PIPE_B)
5877 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5881 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5882 intel_connector->panel.backlight_power = intel_edp_backlight_power;
5883 intel_panel_setup_backlight(connector, pipe);
5889 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5890 struct intel_connector *intel_connector)
5892 struct drm_connector *connector = &intel_connector->base;
5893 struct intel_dp *intel_dp = &intel_dig_port->dp;
5894 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5895 struct drm_device *dev = intel_encoder->base.dev;
5896 struct drm_i915_private *dev_priv = dev->dev_private;
5897 enum port port = intel_dig_port->port;
5900 intel_dp->pps_pipe = INVALID_PIPE;
5902 /* intel_dp vfuncs */
5903 if (INTEL_INFO(dev)->gen >= 9)
5904 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5905 else if (IS_VALLEYVIEW(dev))
5906 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5907 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5908 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5909 else if (HAS_PCH_SPLIT(dev))
5910 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5912 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5914 if (INTEL_INFO(dev)->gen >= 9)
5915 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5917 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
5919 /* Preserve the current hw state. */
5920 intel_dp->DP = I915_READ(intel_dp->output_reg);
5921 intel_dp->attached_connector = intel_connector;
5923 if (intel_dp_is_edp(dev, port))
5924 type = DRM_MODE_CONNECTOR_eDP;
5926 type = DRM_MODE_CONNECTOR_DisplayPort;
5929 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5930 * for DP the encoder type can be set by the caller to
5931 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5933 if (type == DRM_MODE_CONNECTOR_eDP)
5934 intel_encoder->type = INTEL_OUTPUT_EDP;
5936 /* eDP only on port B and/or C on vlv/chv */
5937 if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5938 port != PORT_B && port != PORT_C))
5941 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5942 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5945 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
5946 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5948 connector->interlace_allowed = true;
5949 connector->doublescan_allowed = 0;
5951 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
5952 edp_panel_vdd_work);
5954 intel_connector_attach_encoder(intel_connector, intel_encoder);
5955 drm_connector_register(connector);
5958 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5960 intel_connector->get_hw_state = intel_connector_get_hw_state;
5961 intel_connector->unregister = intel_dp_connector_unregister;
5963 /* Set up the hotplug pin. */
5966 intel_encoder->hpd_pin = HPD_PORT_A;
5969 intel_encoder->hpd_pin = HPD_PORT_B;
5972 intel_encoder->hpd_pin = HPD_PORT_C;
5975 intel_encoder->hpd_pin = HPD_PORT_D;
5981 if (is_edp(intel_dp)) {
5983 intel_dp_init_panel_power_timestamps(intel_dp);
5984 if (IS_VALLEYVIEW(dev))
5985 vlv_initial_power_sequencer_setup(intel_dp);
5987 intel_dp_init_panel_power_sequencer(dev, intel_dp);
5988 pps_unlock(intel_dp);
5991 intel_dp_aux_init(intel_dp, intel_connector);
5993 /* init MST on ports that can support it */
5994 if (HAS_DP_MST(dev) &&
5995 (port == PORT_B || port == PORT_C || port == PORT_D))
5996 intel_dp_mst_encoder_init(intel_dig_port,
5997 intel_connector->base.base.id);
5999 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
6000 drm_dp_aux_unregister(&intel_dp->aux);
6001 if (is_edp(intel_dp)) {
6002 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
6004 * vdd might still be enabled do to the delayed vdd off.
6005 * Make sure vdd is actually turned off here.
6008 edp_panel_vdd_off_sync(intel_dp);
6009 pps_unlock(intel_dp);
6011 drm_connector_unregister(connector);
6012 drm_connector_cleanup(connector);
6016 intel_dp_add_properties(intel_dp, connector);
6018 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
6019 * 0xd. Failure to do so will result in spurious interrupts being
6020 * generated on the port when a cable is not attached.
6022 if (IS_G4X(dev) && !IS_GM45(dev)) {
6023 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
6024 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
6028 i915_debugfs_connector_add(connector);
6035 intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
6037 struct drm_i915_private *dev_priv = dev->dev_private;
6038 struct intel_digital_port *intel_dig_port;
6039 struct intel_encoder *intel_encoder;
6040 struct drm_encoder *encoder;
6041 struct intel_connector *intel_connector;
6043 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
6044 if (!intel_dig_port)
6047 intel_connector = intel_connector_alloc();
6048 if (!intel_connector) {
6049 kfree(intel_dig_port);
6053 intel_encoder = &intel_dig_port->base;
6054 encoder = &intel_encoder->base;
6056 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
6057 DRM_MODE_ENCODER_TMDS);
6059 intel_encoder->compute_config = intel_dp_compute_config;
6060 intel_encoder->disable = intel_disable_dp;
6061 intel_encoder->get_hw_state = intel_dp_get_hw_state;
6062 intel_encoder->get_config = intel_dp_get_config;
6063 intel_encoder->suspend = intel_dp_encoder_suspend;
6064 if (IS_CHERRYVIEW(dev)) {
6065 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
6066 intel_encoder->pre_enable = chv_pre_enable_dp;
6067 intel_encoder->enable = vlv_enable_dp;
6068 intel_encoder->post_disable = chv_post_disable_dp;
6069 } else if (IS_VALLEYVIEW(dev)) {
6070 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
6071 intel_encoder->pre_enable = vlv_pre_enable_dp;
6072 intel_encoder->enable = vlv_enable_dp;
6073 intel_encoder->post_disable = vlv_post_disable_dp;
6075 intel_encoder->pre_enable = g4x_pre_enable_dp;
6076 intel_encoder->enable = g4x_enable_dp;
6077 if (INTEL_INFO(dev)->gen >= 5)
6078 intel_encoder->post_disable = ilk_post_disable_dp;
6081 intel_dig_port->port = port;
6082 intel_dig_port->dp.output_reg = output_reg;
6084 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
6085 if (IS_CHERRYVIEW(dev)) {
6087 intel_encoder->crtc_mask = 1 << 2;
6089 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
6091 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
6093 intel_encoder->cloneable = 0;
6094 intel_encoder->hot_plug = intel_dp_hot_plug;
6096 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
6097 dev_priv->hpd_irq_port[port] = intel_dig_port;
6099 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
6100 drm_encoder_cleanup(encoder);
6101 kfree(intel_dig_port);
6102 kfree(intel_connector);
6107 void intel_dp_mst_suspend(struct drm_device *dev)
6109 struct drm_i915_private *dev_priv = dev->dev_private;
6113 for (i = 0; i < I915_MAX_PORTS; i++) {
6114 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
6115 if (!intel_dig_port)
6118 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6119 if (!intel_dig_port->dp.can_mst)
6121 if (intel_dig_port->dp.is_mst)
6122 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6128 void intel_dp_mst_resume(struct drm_device *dev)
6130 struct drm_i915_private *dev_priv = dev->dev_private;
6133 for (i = 0; i < I915_MAX_PORTS; i++) {
6134 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
6135 if (!intel_dig_port)
6137 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6141 if (!intel_dig_port->dp.can_mst)
6144 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6146 intel_dp_check_mst_status(&intel_dig_port->dp);