2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Keith Packard <keithp@keithp.com>
28 #include <linux/i2c.h>
29 #include <linux/export.h>
31 #include <linux/slab.h>
32 #include <drm/drm_atomic_helper.h>
33 #include <drm/drm_crtc.h>
34 #include <drm/drm_crtc_helper.h>
35 #include <drm/drm_edid.h>
36 #include "intel_drv.h"
37 #include <drm/i915_drm.h>
40 #define DP_LINK_CHECK_TIMEOUT (10 * 1000)
42 static int disable_aux_irq = 0;
43 TUNABLE_INT("drm.i915.disable_aux_irq", &disable_aux_irq);
50 static const struct dp_link_dpll gen4_dpll[] = {
52 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
54 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
57 static const struct dp_link_dpll pch_dpll[] = {
59 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
61 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
64 static const struct dp_link_dpll vlv_dpll[] = {
66 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
68 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
72 * CHV supports eDP 1.4 that have more link rates.
73 * Below only provides the fixed rate but exclude variable rate.
75 static const struct dp_link_dpll chv_dpll[] = {
77 * CHV requires to program fractional division for m2.
78 * m2 is stored in fixed point format using formula below
79 * (m2_int << 22) | m2_fraction
81 { DP_LINK_BW_1_62, /* m2_int = 32, m2_fraction = 1677722 */
82 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
83 { DP_LINK_BW_2_7, /* m2_int = 27, m2_fraction = 0 */
84 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
85 { DP_LINK_BW_5_4, /* m2_int = 27, m2_fraction = 0 */
86 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
88 /* Skylake supports following rates */
89 static const int gen9_rates[] = { 162000, 216000, 270000,
90 324000, 432000, 540000 };
91 static const int chv_rates[] = { 162000, 202500, 210000, 216000,
92 243000, 270000, 324000, 405000,
93 420000, 432000, 540000 };
94 static const int default_rates[] = { 162000, 270000, 540000 };
97 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
98 * @intel_dp: DP struct
100 * If a CPU or PCH DP output is attached to an eDP panel, this function
101 * will return true, and false otherwise.
103 static bool is_edp(struct intel_dp *intel_dp)
105 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
107 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
110 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
112 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
114 return intel_dig_port->base.base.dev;
117 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
119 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
122 static void intel_dp_link_down(struct intel_dp *intel_dp);
123 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
124 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
125 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
126 static void vlv_steal_power_sequencer(struct drm_device *dev,
127 enum i915_pipe pipe);
130 intel_dp_max_link_bw(struct intel_dp *intel_dp)
132 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
134 switch (max_link_bw) {
135 case DP_LINK_BW_1_62:
140 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
142 max_link_bw = DP_LINK_BW_1_62;
148 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
150 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
151 struct drm_device *dev = intel_dig_port->base.base.dev;
152 u8 source_max, sink_max;
155 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
156 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
159 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
161 return min(source_max, sink_max);
165 * The units on the numbers in the next two are... bizarre. Examples will
166 * make it clearer; this one parallels an example in the eDP spec.
168 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
170 * 270000 * 1 * 8 / 10 == 216000
172 * The actual data capacity of that configuration is 2.16Gbit/s, so the
173 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
174 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
175 * 119000. At 18bpp that's 2142000 kilobits per second.
177 * Thus the strange-looking division by 10 in intel_dp_link_required, to
178 * get the result in decakilobits instead of kilobits.
182 intel_dp_link_required(int pixel_clock, int bpp)
184 return (pixel_clock * bpp + 9) / 10;
188 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
190 return (max_link_clock * max_lanes * 8) / 10;
193 static enum drm_mode_status
194 intel_dp_mode_valid(struct drm_connector *connector,
195 struct drm_display_mode *mode)
197 struct intel_dp *intel_dp = intel_attached_dp(connector);
198 struct intel_connector *intel_connector = to_intel_connector(connector);
199 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
200 int target_clock = mode->clock;
201 int max_rate, mode_rate, max_lanes, max_link_clock;
203 if (is_edp(intel_dp) && fixed_mode) {
204 if (mode->hdisplay > fixed_mode->hdisplay)
207 if (mode->vdisplay > fixed_mode->vdisplay)
210 target_clock = fixed_mode->clock;
213 max_link_clock = intel_dp_max_link_rate(intel_dp);
214 max_lanes = intel_dp_max_lane_count(intel_dp);
216 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
217 mode_rate = intel_dp_link_required(target_clock, 18);
219 if (mode_rate > max_rate)
220 return MODE_CLOCK_HIGH;
222 if (mode->clock < 10000)
223 return MODE_CLOCK_LOW;
225 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
226 return MODE_H_ILLEGAL;
231 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
238 for (i = 0; i < src_bytes; i++)
239 v |= ((uint32_t) src[i]) << ((3-i) * 8);
243 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
248 for (i = 0; i < dst_bytes; i++)
249 dst[i] = src >> ((3-i) * 8);
252 /* hrawclock is 1/4 the FSB frequency */
254 intel_hrawclk(struct drm_device *dev)
256 struct drm_i915_private *dev_priv = dev->dev_private;
259 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
260 if (IS_VALLEYVIEW(dev))
263 clkcfg = I915_READ(CLKCFG);
264 switch (clkcfg & CLKCFG_FSB_MASK) {
273 case CLKCFG_FSB_1067:
275 case CLKCFG_FSB_1333:
277 /* these two are just a guess; one of them might be right */
278 case CLKCFG_FSB_1600:
279 case CLKCFG_FSB_1600_ALT:
287 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
288 struct intel_dp *intel_dp);
290 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
291 struct intel_dp *intel_dp);
293 static void pps_lock(struct intel_dp *intel_dp)
295 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
296 struct intel_encoder *encoder = &intel_dig_port->base;
297 struct drm_device *dev = encoder->base.dev;
298 struct drm_i915_private *dev_priv = dev->dev_private;
299 enum intel_display_power_domain power_domain;
302 * See vlv_power_sequencer_reset() why we need
303 * a power domain reference here.
305 power_domain = intel_display_port_power_domain(encoder);
306 intel_display_power_get(dev_priv, power_domain);
308 mutex_lock(&dev_priv->pps_mutex);
311 static void pps_unlock(struct intel_dp *intel_dp)
313 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
314 struct intel_encoder *encoder = &intel_dig_port->base;
315 struct drm_device *dev = encoder->base.dev;
316 struct drm_i915_private *dev_priv = dev->dev_private;
317 enum intel_display_power_domain power_domain;
319 mutex_unlock(&dev_priv->pps_mutex);
321 power_domain = intel_display_port_power_domain(encoder);
322 intel_display_power_put(dev_priv, power_domain);
326 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
328 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
329 struct drm_device *dev = intel_dig_port->base.base.dev;
330 struct drm_i915_private *dev_priv = dev->dev_private;
331 enum i915_pipe pipe = intel_dp->pps_pipe;
335 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
336 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
337 pipe_name(pipe), port_name(intel_dig_port->port)))
340 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
341 pipe_name(pipe), port_name(intel_dig_port->port));
343 /* Preserve the BIOS-computed detected bit. This is
344 * supposed to be read-only.
346 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
347 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
348 DP |= DP_PORT_WIDTH(1);
349 DP |= DP_LINK_TRAIN_PAT_1;
351 if (IS_CHERRYVIEW(dev))
352 DP |= DP_PIPE_SELECT_CHV(pipe);
353 else if (pipe == PIPE_B)
354 DP |= DP_PIPEB_SELECT;
356 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
359 * The DPLL for the pipe must be enabled for this to work.
360 * So enable temporarily it if it's not already enabled.
363 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
364 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
367 * Similar magic as in intel_dp_enable_port().
368 * We _must_ do this port enable + disable trick
369 * to make this power seqeuencer lock onto the port.
370 * Otherwise even VDD force bit won't work.
372 I915_WRITE(intel_dp->output_reg, DP);
373 POSTING_READ(intel_dp->output_reg);
375 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
376 POSTING_READ(intel_dp->output_reg);
378 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
379 POSTING_READ(intel_dp->output_reg);
382 vlv_force_pll_off(dev, pipe);
385 static enum i915_pipe
386 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
388 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
389 struct drm_device *dev = intel_dig_port->base.base.dev;
390 struct drm_i915_private *dev_priv = dev->dev_private;
391 struct intel_encoder *encoder;
392 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
395 lockdep_assert_held(&dev_priv->pps_mutex);
397 /* We should never land here with regular DP ports */
398 WARN_ON(!is_edp(intel_dp));
400 if (intel_dp->pps_pipe != INVALID_PIPE)
401 return intel_dp->pps_pipe;
404 * We don't have power sequencer currently.
405 * Pick one that's not used by other ports.
407 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
409 struct intel_dp *tmp;
411 if (encoder->type != INTEL_OUTPUT_EDP)
414 tmp = enc_to_intel_dp(&encoder->base);
416 if (tmp->pps_pipe != INVALID_PIPE)
417 pipes &= ~(1 << tmp->pps_pipe);
421 * Didn't find one. This should not happen since there
422 * are two power sequencers and up to two eDP ports.
424 if (WARN_ON(pipes == 0))
427 pipe = ffs(pipes) - 1;
429 vlv_steal_power_sequencer(dev, pipe);
430 intel_dp->pps_pipe = pipe;
432 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
433 pipe_name(intel_dp->pps_pipe),
434 port_name(intel_dig_port->port));
436 /* init power sequencer on this pipe and port */
437 intel_dp_init_panel_power_sequencer(dev, intel_dp);
438 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
441 * Even vdd force doesn't work until we've made
442 * the power sequencer lock in on the port.
444 vlv_power_sequencer_kick(intel_dp);
446 return intel_dp->pps_pipe;
449 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
450 enum i915_pipe pipe);
452 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
455 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
458 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
461 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
464 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
470 static enum i915_pipe
471 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
473 vlv_pipe_check pipe_check)
477 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
478 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
479 PANEL_PORT_SELECT_MASK;
481 if (port_sel != PANEL_PORT_SELECT_VLV(port))
484 if (!pipe_check(dev_priv, pipe))
494 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
496 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
497 struct drm_device *dev = intel_dig_port->base.base.dev;
498 struct drm_i915_private *dev_priv = dev->dev_private;
499 enum port port = intel_dig_port->port;
501 lockdep_assert_held(&dev_priv->pps_mutex);
503 /* try to find a pipe with this port selected */
504 /* first pick one where the panel is on */
505 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
507 /* didn't find one? pick one where vdd is on */
508 if (intel_dp->pps_pipe == INVALID_PIPE)
509 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
510 vlv_pipe_has_vdd_on);
511 /* didn't find one? pick one with just the correct port */
512 if (intel_dp->pps_pipe == INVALID_PIPE)
513 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
516 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
517 if (intel_dp->pps_pipe == INVALID_PIPE) {
518 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
523 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
524 port_name(port), pipe_name(intel_dp->pps_pipe));
526 intel_dp_init_panel_power_sequencer(dev, intel_dp);
527 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
530 void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
532 struct drm_device *dev = dev_priv->dev;
533 struct intel_encoder *encoder;
535 if (WARN_ON(!IS_VALLEYVIEW(dev)))
539 * We can't grab pps_mutex here due to deadlock with power_domain
540 * mutex when power_domain functions are called while holding pps_mutex.
541 * That also means that in order to use pps_pipe the code needs to
542 * hold both a power domain reference and pps_mutex, and the power domain
543 * reference get/put must be done while _not_ holding pps_mutex.
544 * pps_{lock,unlock}() do these steps in the correct order, so one
545 * should use them always.
548 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
549 struct intel_dp *intel_dp;
551 if (encoder->type != INTEL_OUTPUT_EDP)
554 intel_dp = enc_to_intel_dp(&encoder->base);
555 intel_dp->pps_pipe = INVALID_PIPE;
559 static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
561 struct drm_device *dev = intel_dp_to_dev(intel_dp);
563 if (HAS_PCH_SPLIT(dev))
564 return PCH_PP_CONTROL;
566 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
569 static u32 _pp_stat_reg(struct intel_dp *intel_dp)
571 struct drm_device *dev = intel_dp_to_dev(intel_dp);
573 if (HAS_PCH_SPLIT(dev))
574 return PCH_PP_STATUS;
576 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
579 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
580 This function only applicable when panel PM state is not to be tracked */
582 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
585 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
587 struct drm_device *dev = intel_dp_to_dev(intel_dp);
588 struct drm_i915_private *dev_priv = dev->dev_private;
590 u32 pp_ctrl_reg, pp_div_reg;
592 if (!is_edp(intel_dp) || code != SYS_RESTART)
597 if (IS_VALLEYVIEW(dev)) {
598 enum i915_pipe pipe = vlv_power_sequencer_pipe(intel_dp);
600 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
601 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
602 pp_div = I915_READ(pp_div_reg);
603 pp_div &= PP_REFERENCE_DIVIDER_MASK;
605 /* 0x1F write to PP_DIV_REG sets max cycle delay */
606 I915_WRITE(pp_div_reg, pp_div | 0x1F);
607 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
608 msleep(intel_dp->panel_power_cycle_delay);
611 pps_unlock(intel_dp);
617 static bool edp_have_panel_power(struct intel_dp *intel_dp)
619 struct drm_device *dev = intel_dp_to_dev(intel_dp);
620 struct drm_i915_private *dev_priv = dev->dev_private;
622 lockdep_assert_held(&dev_priv->pps_mutex);
624 if (IS_VALLEYVIEW(dev) &&
625 intel_dp->pps_pipe == INVALID_PIPE)
628 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
631 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
633 struct drm_device *dev = intel_dp_to_dev(intel_dp);
634 struct drm_i915_private *dev_priv = dev->dev_private;
636 lockdep_assert_held(&dev_priv->pps_mutex);
638 if (IS_VALLEYVIEW(dev) &&
639 intel_dp->pps_pipe == INVALID_PIPE)
642 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
646 intel_dp_check_edp(struct intel_dp *intel_dp)
648 struct drm_device *dev = intel_dp_to_dev(intel_dp);
649 struct drm_i915_private *dev_priv = dev->dev_private;
651 if (!is_edp(intel_dp))
654 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
655 WARN(1, "eDP powered off while attempting aux channel communication.\n");
656 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
657 I915_READ(_pp_stat_reg(intel_dp)),
658 I915_READ(_pp_ctrl_reg(intel_dp)));
663 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
665 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
666 struct drm_device *dev = intel_dig_port->base.base.dev;
667 struct drm_i915_private *dev_priv = dev->dev_private;
668 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
672 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
674 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
675 msecs_to_jiffies_timeout(10));
677 done = wait_for_atomic(C, 10) == 0;
679 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
686 static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
688 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
689 struct drm_device *dev = intel_dig_port->base.base.dev;
692 * The clock divider is based off the hrawclk, and would like to run at
693 * 2MHz. So, take the hrawclk value and divide by 2 and use that
695 return index ? 0 : intel_hrawclk(dev) / 2;
698 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
700 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
701 struct drm_device *dev = intel_dig_port->base.base.dev;
706 if (intel_dig_port->port == PORT_A) {
707 if (IS_GEN6(dev) || IS_GEN7(dev))
708 return 200; /* SNB & IVB eDP input clock at 400Mhz */
710 return 225; /* eDP input clock at 450Mhz */
712 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
716 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
718 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
719 struct drm_device *dev = intel_dig_port->base.base.dev;
720 struct drm_i915_private *dev_priv = dev->dev_private;
722 if (intel_dig_port->port == PORT_A) {
725 return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
726 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
727 /* Workaround for non-ULT HSW */
734 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
738 static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
740 return index ? 0 : 100;
743 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
746 * SKL doesn't need us to program the AUX clock divider (Hardware will
747 * derive the clock from CDCLK automatically). We still implement the
748 * get_aux_clock_divider vfunc to plug-in into the existing code.
750 return index ? 0 : 1;
753 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
756 uint32_t aux_clock_divider)
758 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
759 struct drm_device *dev = intel_dig_port->base.base.dev;
760 uint32_t precharge, timeout;
767 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
768 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
770 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
772 return DP_AUX_CH_CTL_SEND_BUSY |
774 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
775 DP_AUX_CH_CTL_TIME_OUT_ERROR |
777 DP_AUX_CH_CTL_RECEIVE_ERROR |
778 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
779 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
780 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
783 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
788 return DP_AUX_CH_CTL_SEND_BUSY |
790 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
791 DP_AUX_CH_CTL_TIME_OUT_ERROR |
792 DP_AUX_CH_CTL_TIME_OUT_1600us |
793 DP_AUX_CH_CTL_RECEIVE_ERROR |
794 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
795 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
799 intel_dp_aux_ch(struct intel_dp *intel_dp,
800 const uint8_t *send, int send_bytes,
801 uint8_t *recv, int recv_size)
803 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
804 struct drm_device *dev = intel_dig_port->base.base.dev;
805 struct drm_i915_private *dev_priv = dev->dev_private;
806 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
807 uint32_t ch_data = ch_ctl + 4;
808 uint32_t aux_clock_divider;
809 int i, ret, recv_bytes;
812 bool has_aux_irq = HAS_AUX_IRQ(dev) && !disable_aux_irq;
818 * We will be called with VDD already enabled for dpcd/edid/oui reads.
819 * In such cases we want to leave VDD enabled and it's up to upper layers
820 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
823 vdd = edp_panel_vdd_on(intel_dp);
825 /* dp aux is extremely sensitive to irq latency, hence request the
826 * lowest possible wakeup latency and so prevent the cpu from going into
829 pm_qos_update_request(&dev_priv->pm_qos, 0);
831 intel_dp_check_edp(intel_dp);
833 intel_aux_display_runtime_get(dev_priv);
835 /* Try to wait for any previous AUX channel activity */
836 for (try = 0; try < 3; try++) {
837 status = I915_READ_NOTRACE(ch_ctl);
838 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
844 WARN(1, "dp_aux_ch not started status 0x%08x\n",
850 /* Only 5 data registers! */
851 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
856 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
857 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
862 /* Must try at least 3 times according to DP spec */
863 for (try = 0; try < 5; try++) {
864 /* Load the send data into the aux channel data registers */
865 for (i = 0; i < send_bytes; i += 4)
866 I915_WRITE(ch_data + i,
867 intel_dp_pack_aux(send + i,
870 /* Send the command and wait for it to complete */
871 I915_WRITE(ch_ctl, send_ctl);
873 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
875 /* Clear done status and any errors */
879 DP_AUX_CH_CTL_TIME_OUT_ERROR |
880 DP_AUX_CH_CTL_RECEIVE_ERROR);
882 if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
883 DP_AUX_CH_CTL_RECEIVE_ERROR))
885 if (status & DP_AUX_CH_CTL_DONE)
890 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
891 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
897 /* Check for timeout or receive error.
898 * Timeouts occur when the sink is not connected
900 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
901 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
906 /* Timeouts occur when the device isn't connected, so they're
907 * "normal" -- don't fill the kernel log with these */
908 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
909 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
914 /* Unload any bytes sent back from the other side */
915 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
916 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
917 if (recv_bytes > recv_size)
918 recv_bytes = recv_size;
920 for (i = 0; i < recv_bytes; i += 4)
921 intel_dp_unpack_aux(I915_READ(ch_data + i),
922 recv + i, recv_bytes - i);
926 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
927 intel_aux_display_runtime_put(dev_priv);
930 edp_panel_vdd_off(intel_dp, false);
932 pps_unlock(intel_dp);
937 #define BARE_ADDRESS_SIZE 3
938 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
940 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
942 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
943 uint8_t txbuf[20], rxbuf[20];
944 size_t txsize, rxsize;
947 txbuf[0] = (msg->request << 4) |
948 ((msg->address >> 16) & 0xf);
949 txbuf[1] = (msg->address >> 8) & 0xff;
950 txbuf[2] = msg->address & 0xff;
951 txbuf[3] = msg->size - 1;
953 switch (msg->request & ~DP_AUX_I2C_MOT) {
954 case DP_AUX_NATIVE_WRITE:
955 case DP_AUX_I2C_WRITE:
956 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
957 rxsize = 2; /* 0 or 1 data bytes */
959 if (WARN_ON(txsize > 20))
962 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
964 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
966 msg->reply = rxbuf[0] >> 4;
969 /* Number of bytes written in a short write. */
970 ret = clamp_t(int, rxbuf[1], 0, msg->size);
972 /* Return payload size. */
978 case DP_AUX_NATIVE_READ:
979 case DP_AUX_I2C_READ:
980 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
981 rxsize = msg->size + 1;
983 if (WARN_ON(rxsize > 20))
986 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
988 msg->reply = rxbuf[0] >> 4;
990 * Assume happy day, and copy the data. The caller is
991 * expected to check msg->reply before touching it.
993 * Return payload size.
996 memcpy(msg->buffer, rxbuf + 1, ret);
1009 intel_dp_i2c_aux_ch(struct device *adapter, int mode,
1010 uint8_t write_byte, uint8_t *read_byte)
1012 struct i2c_algo_dp_aux_data *data = device_get_softc(adapter);
1013 struct intel_dp *intel_dp = data->priv;
1014 uint16_t address = data->address;
1022 intel_edp_panel_vdd_on(intel_dp);
1023 intel_dp_check_edp(intel_dp);
1024 /* Set up the command byte */
1025 if (mode & MODE_I2C_READ)
1026 msg[0] = DP_AUX_I2C_READ << 4;
1028 msg[0] = DP_AUX_I2C_WRITE << 4;
1030 if (!(mode & MODE_I2C_STOP))
1031 msg[0] |= DP_AUX_I2C_MOT << 4;
1033 msg[1] = address >> 8;
1037 case MODE_I2C_WRITE:
1039 msg[4] = write_byte;
1055 * DP1.2 sections 2.7.7.1.5.6.1 and 2.7.7.1.6.6.1: A DP Source device is
1056 * required to retry at least seven times upon receiving AUX_DEFER
1057 * before giving up the AUX transaction.
1059 for (retry = 0; retry < 7; retry++) {
1060 ret = intel_dp_aux_ch(intel_dp,
1062 reply, reply_bytes);
1064 DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
1068 switch ((reply[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK) {
1069 case DP_AUX_NATIVE_REPLY_ACK:
1070 /* I2C-over-AUX Reply field is only valid
1071 * when paired with AUX ACK.
1074 case DP_AUX_NATIVE_REPLY_NACK:
1075 DRM_DEBUG_KMS("aux_ch native nack\n");
1078 case DP_AUX_NATIVE_REPLY_DEFER:
1080 * For now, just give more slack to branch devices. We
1081 * could check the DPCD for I2C bit rate capabilities,
1082 * and if available, adjust the interval. We could also
1083 * be more careful with DP-to-Legacy adapters where a
1084 * long legacy cable may force very low I2C bit rates.
1086 if (intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
1087 DP_DWN_STRM_PORT_PRESENT)
1088 usleep_range(500, 600);
1090 usleep_range(300, 400);
1093 DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
1099 switch ((reply[0] >> 4) & DP_AUX_I2C_REPLY_MASK) {
1100 case DP_AUX_I2C_REPLY_ACK:
1101 if (mode == MODE_I2C_READ) {
1102 *read_byte = reply[1];
1104 ret = 0; /* reply_bytes - 1 */
1106 case DP_AUX_I2C_REPLY_NACK:
1107 DRM_DEBUG_KMS("aux_i2c nack\n");
1110 case DP_AUX_I2C_REPLY_DEFER:
1111 DRM_DEBUG_KMS("aux_i2c defer\n");
1115 DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
1121 DRM_ERROR("too many retries, giving up\n");
1129 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1131 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1132 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1133 enum port port = intel_dig_port->port;
1134 const char *name = NULL;
1139 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
1143 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
1147 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
1151 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
1159 * The AUX_CTL register is usually DP_CTL + 0x10.
1161 * On Haswell and Broadwell though:
1162 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1163 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1165 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1167 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
1168 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
1170 intel_dp->aux.name = name;
1171 intel_dp->aux.dev = dev->dev;
1172 intel_dp->aux.transfer = intel_dp_aux_transfer;
1174 DRM_DEBUG_KMS("i2c_init %s\n", name);
1175 ret = iic_dp_aux_add_bus(connector->base.dev->dev, name,
1176 intel_dp_i2c_aux_ch, intel_dp, &intel_dp->dp_iic_bus,
1177 &intel_dp->aux.ddc);
1178 WARN(ret, "intel_dp_i2c_init failed with error %d for port %c\n",
1179 ret, port_name(port));
1184 intel_dp_connector_unregister(struct intel_connector *intel_connector)
1186 intel_connector_unregister(intel_connector);
1191 intel_dp_i2c_init(struct intel_dp *intel_dp,
1192 struct intel_connector *intel_connector, const char *name)
1196 DRM_DEBUG_KMS("i2c_init %s\n", name);
1198 memset(&intel_dp->adapter, '\0', sizeof(intel_dp->adapter));
1199 intel_dp->adapter.owner = THIS_MODULE;
1200 intel_dp->adapter.class = I2C_CLASS_DDC;
1201 strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
1202 intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
1203 intel_dp->adapter.algo_data = &intel_dp->algo;
1204 intel_dp->adapter.dev.parent = intel_connector->base.dev->dev;
1206 ret = i2c_dp_aux_add_bus(&intel_dp->adapter);
1210 ret = sysfs_create_link(&intel_connector->base.kdev->kobj,
1211 &intel_dp->adapter.dev.kobj,
1212 intel_dp->adapter.dev.kobj.name);
1214 ret = iic_dp_aux_add_bus(intel_connector->base.dev->dev, name,
1215 intel_dp_i2c_aux_ch, intel_dp, &intel_dp->dp_iic_bus,
1216 &intel_dp->adapter);
1223 skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
1227 pipe_config->ddi_pll_sel = SKL_DPLL0;
1228 pipe_config->dpll_hw_state.cfgcr1 = 0;
1229 pipe_config->dpll_hw_state.cfgcr2 = 0;
1231 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
1232 switch (link_clock / 2) {
1234 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810,
1238 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350,
1242 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700,
1246 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1620,
1249 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1250 results in CDCLK change. Need to handle the change of CDCLK by
1251 disabling pipes and re-enabling them */
1253 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1080,
1257 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2160,
1262 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1266 hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
1269 case DP_LINK_BW_1_62:
1270 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1272 case DP_LINK_BW_2_7:
1273 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1275 case DP_LINK_BW_5_4:
1276 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1282 intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1284 if (intel_dp->num_sink_rates) {
1285 *sink_rates = intel_dp->sink_rates;
1286 return intel_dp->num_sink_rates;
1289 *sink_rates = default_rates;
1291 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1295 intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
1297 if (INTEL_INFO(dev)->gen >= 9) {
1298 *source_rates = gen9_rates;
1299 return ARRAY_SIZE(gen9_rates);
1300 } else if (IS_CHERRYVIEW(dev)) {
1301 *source_rates = chv_rates;
1302 return ARRAY_SIZE(chv_rates);
1305 *source_rates = default_rates;
1307 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1308 /* WaDisableHBR2:skl */
1309 return (DP_LINK_BW_2_7 >> 3) + 1;
1310 else if (INTEL_INFO(dev)->gen >= 8 ||
1311 (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
1312 return (DP_LINK_BW_5_4 >> 3) + 1;
1314 return (DP_LINK_BW_2_7 >> 3) + 1;
1318 intel_dp_set_clock(struct intel_encoder *encoder,
1319 struct intel_crtc_state *pipe_config, int link_bw)
1321 struct drm_device *dev = encoder->base.dev;
1322 const struct dp_link_dpll *divisor = NULL;
1326 divisor = gen4_dpll;
1327 count = ARRAY_SIZE(gen4_dpll);
1328 } else if (HAS_PCH_SPLIT(dev)) {
1330 count = ARRAY_SIZE(pch_dpll);
1331 } else if (IS_CHERRYVIEW(dev)) {
1333 count = ARRAY_SIZE(chv_dpll);
1334 } else if (IS_VALLEYVIEW(dev)) {
1336 count = ARRAY_SIZE(vlv_dpll);
1339 if (divisor && count) {
1340 for (i = 0; i < count; i++) {
1341 if (link_bw == divisor[i].link_bw) {
1342 pipe_config->dpll = divisor[i].dpll;
1343 pipe_config->clock_set = true;
1350 static int intersect_rates(const int *source_rates, int source_len,
1351 const int *sink_rates, int sink_len,
1354 int i = 0, j = 0, k = 0;
1356 while (i < source_len && j < sink_len) {
1357 if (source_rates[i] == sink_rates[j]) {
1358 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1360 common_rates[k] = source_rates[i];
1364 } else if (source_rates[i] < sink_rates[j]) {
1373 static int intel_dp_common_rates(struct intel_dp *intel_dp,
1376 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1377 const int *source_rates, *sink_rates;
1378 int source_len, sink_len;
1380 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1381 source_len = intel_dp_source_rates(dev, &source_rates);
1383 return intersect_rates(source_rates, source_len,
1384 sink_rates, sink_len,
1388 static void snprintf_int_array(char *str, size_t len,
1389 const int *array, int nelem)
1395 for (i = 0; i < nelem; i++) {
1396 int r = ksnprintf(str, len, "%d,", array[i]);
1404 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1406 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1407 const int *source_rates, *sink_rates;
1408 int source_len, sink_len, common_len;
1409 int common_rates[DP_MAX_SUPPORTED_RATES];
1410 char str[128]; /* FIXME: too big for stack? */
1412 if ((drm_debug & DRM_UT_KMS) == 0)
1415 source_len = intel_dp_source_rates(dev, &source_rates);
1416 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1417 DRM_DEBUG_KMS("source rates: %s\n", str);
1419 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1420 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1421 DRM_DEBUG_KMS("sink rates: %s\n", str);
1423 common_len = intel_dp_common_rates(intel_dp, common_rates);
1424 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1425 DRM_DEBUG_KMS("common rates: %s\n", str);
1428 static int rate_to_index(int find, const int *rates)
1432 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1433 if (find == rates[i])
1440 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1442 int rates[DP_MAX_SUPPORTED_RATES] = {};
1445 len = intel_dp_common_rates(intel_dp, rates);
1446 if (WARN_ON(len <= 0))
1449 return rates[rate_to_index(0, rates) - 1];
1452 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1454 return rate_to_index(rate, intel_dp->sink_rates);
1458 intel_dp_compute_config(struct intel_encoder *encoder,
1459 struct intel_crtc_state *pipe_config)
1461 struct drm_device *dev = encoder->base.dev;
1462 struct drm_i915_private *dev_priv = dev->dev_private;
1463 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1464 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1465 enum port port = dp_to_dig_port(intel_dp)->port;
1466 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1467 struct intel_connector *intel_connector = intel_dp->attached_connector;
1468 int lane_count, clock;
1469 int min_lane_count = 1;
1470 int max_lane_count = intel_dp_max_lane_count(intel_dp);
1471 /* Conveniently, the link BW constants become indices with a shift...*/
1475 int link_avail, link_clock;
1476 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1479 common_len = intel_dp_common_rates(intel_dp, common_rates);
1481 /* No common link rates between source and sink */
1482 WARN_ON(common_len <= 0);
1484 max_clock = common_len - 1;
1486 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1487 pipe_config->has_pch_encoder = true;
1489 pipe_config->has_dp_encoder = true;
1490 pipe_config->has_drrs = false;
1491 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
1493 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1494 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1496 if (!HAS_PCH_SPLIT(dev))
1497 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1498 intel_connector->panel.fitting_mode);
1500 intel_pch_panel_fitting(intel_crtc, pipe_config,
1501 intel_connector->panel.fitting_mode);
1504 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1507 DRM_DEBUG_KMS("DP link computation with max lane count %i "
1508 "max bw %d pixel clock %iKHz\n",
1509 max_lane_count, common_rates[max_clock],
1510 adjusted_mode->crtc_clock);
1512 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1513 * bpc in between. */
1514 bpp = pipe_config->pipe_bpp;
1515 if (is_edp(intel_dp)) {
1516 if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
1517 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1518 dev_priv->vbt.edp_bpp);
1519 bpp = dev_priv->vbt.edp_bpp;
1523 * Use the maximum clock and number of lanes the eDP panel
1524 * advertizes being capable of. The panels are generally
1525 * designed to support only a single clock and lane
1526 * configuration, and typically these values correspond to the
1527 * native resolution of the panel.
1529 min_lane_count = max_lane_count;
1530 min_clock = max_clock;
1533 for (; bpp >= 6*3; bpp -= 2*3) {
1534 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1537 for (clock = min_clock; clock <= max_clock; clock++) {
1538 for (lane_count = min_lane_count;
1539 lane_count <= max_lane_count;
1542 link_clock = common_rates[clock];
1543 link_avail = intel_dp_max_data_rate(link_clock,
1546 if (mode_rate <= link_avail) {
1556 if (intel_dp->color_range_auto) {
1559 * CEA-861-E - 5.1 Default Encoding Parameters
1560 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1562 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
1563 intel_dp->color_range = DP_COLOR_RANGE_16_235;
1565 intel_dp->color_range = 0;
1568 if (intel_dp->color_range)
1569 pipe_config->limited_color_range = true;
1571 intel_dp->lane_count = lane_count;
1573 if (intel_dp->num_sink_rates) {
1574 intel_dp->link_bw = 0;
1575 intel_dp->rate_select =
1576 intel_dp_rate_select(intel_dp, common_rates[clock]);
1579 drm_dp_link_rate_to_bw_code(common_rates[clock]);
1580 intel_dp->rate_select = 0;
1583 pipe_config->pipe_bpp = bpp;
1584 pipe_config->port_clock = common_rates[clock];
1586 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
1587 intel_dp->link_bw, intel_dp->lane_count,
1588 pipe_config->port_clock, bpp);
1589 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1590 mode_rate, link_avail);
1592 intel_link_compute_m_n(bpp, lane_count,
1593 adjusted_mode->crtc_clock,
1594 pipe_config->port_clock,
1595 &pipe_config->dp_m_n);
1597 if (intel_connector->panel.downclock_mode != NULL &&
1598 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1599 pipe_config->has_drrs = true;
1600 intel_link_compute_m_n(bpp, lane_count,
1601 intel_connector->panel.downclock_mode->clock,
1602 pipe_config->port_clock,
1603 &pipe_config->dp_m2_n2);
1606 if (IS_SKYLAKE(dev) && is_edp(intel_dp))
1607 skl_edp_set_pll_config(pipe_config, common_rates[clock]);
1608 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1609 hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
1611 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
1616 static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
1618 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1619 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1620 struct drm_device *dev = crtc->base.dev;
1621 struct drm_i915_private *dev_priv = dev->dev_private;
1624 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1625 crtc->config->port_clock);
1626 dpa_ctl = I915_READ(DP_A);
1627 dpa_ctl &= ~DP_PLL_FREQ_MASK;
1629 if (crtc->config->port_clock == 162000) {
1630 /* For a long time we've carried around a ILK-DevA w/a for the
1631 * 160MHz clock. If we're really unlucky, it's still required.
1633 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
1634 dpa_ctl |= DP_PLL_FREQ_160MHZ;
1635 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
1637 dpa_ctl |= DP_PLL_FREQ_270MHZ;
1638 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
1641 I915_WRITE(DP_A, dpa_ctl);
1647 static void intel_dp_prepare(struct intel_encoder *encoder)
1649 struct drm_device *dev = encoder->base.dev;
1650 struct drm_i915_private *dev_priv = dev->dev_private;
1651 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1652 enum port port = dp_to_dig_port(intel_dp)->port;
1653 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1654 struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1657 * There are four kinds of DP registers:
1664 * IBX PCH and CPU are the same for almost everything,
1665 * except that the CPU DP PLL is configured in this
1668 * CPT PCH is quite different, having many bits moved
1669 * to the TRANS_DP_CTL register instead. That
1670 * configuration happens (oddly) in ironlake_pch_enable
1673 /* Preserve the BIOS-computed detected bit. This is
1674 * supposed to be read-only.
1676 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1678 /* Handle DP bits in common between all three register formats */
1679 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1680 intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
1682 if (crtc->config->has_audio)
1683 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
1685 /* Split out the IBX/CPU vs CPT settings */
1687 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
1688 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1689 intel_dp->DP |= DP_SYNC_HS_HIGH;
1690 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1691 intel_dp->DP |= DP_SYNC_VS_HIGH;
1692 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1694 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1695 intel_dp->DP |= DP_ENHANCED_FRAMING;
1697 intel_dp->DP |= crtc->pipe << 29;
1698 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
1699 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
1700 intel_dp->DP |= intel_dp->color_range;
1702 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1703 intel_dp->DP |= DP_SYNC_HS_HIGH;
1704 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1705 intel_dp->DP |= DP_SYNC_VS_HIGH;
1706 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1708 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1709 intel_dp->DP |= DP_ENHANCED_FRAMING;
1711 if (!IS_CHERRYVIEW(dev)) {
1712 if (crtc->pipe == 1)
1713 intel_dp->DP |= DP_PIPEB_SELECT;
1715 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1718 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1722 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1723 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
1725 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1726 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
1728 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1729 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
1731 static void wait_panel_status(struct intel_dp *intel_dp,
1735 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1736 struct drm_i915_private *dev_priv = dev->dev_private;
1737 u32 pp_stat_reg, pp_ctrl_reg;
1739 lockdep_assert_held(&dev_priv->pps_mutex);
1741 pp_stat_reg = _pp_stat_reg(intel_dp);
1742 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1744 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1746 I915_READ(pp_stat_reg),
1747 I915_READ(pp_ctrl_reg));
1749 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1750 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1751 I915_READ(pp_stat_reg),
1752 I915_READ(pp_ctrl_reg));
1755 DRM_DEBUG_KMS("Wait complete\n");
1758 static void wait_panel_on(struct intel_dp *intel_dp)
1760 DRM_DEBUG_KMS("Wait for panel power on\n");
1761 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1764 static void wait_panel_off(struct intel_dp *intel_dp)
1766 DRM_DEBUG_KMS("Wait for panel power off time\n");
1767 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1770 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1772 DRM_DEBUG_KMS("Wait for panel power cycle\n");
1774 /* When we disable the VDD override bit last we have to do the manual
1776 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1777 intel_dp->panel_power_cycle_delay);
1779 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1782 static void wait_backlight_on(struct intel_dp *intel_dp)
1784 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1785 intel_dp->backlight_on_delay);
1788 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1790 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1791 intel_dp->backlight_off_delay);
1794 /* Read the current pp_control value, unlocking the register if it
1798 static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1800 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1801 struct drm_i915_private *dev_priv = dev->dev_private;
1804 lockdep_assert_held(&dev_priv->pps_mutex);
1806 control = I915_READ(_pp_ctrl_reg(intel_dp));
1807 control &= ~PANEL_UNLOCK_MASK;
1808 control |= PANEL_UNLOCK_REGS;
1813 * Must be paired with edp_panel_vdd_off().
1814 * Must hold pps_mutex around the whole on/off sequence.
1815 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1817 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1819 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1820 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1821 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1822 struct drm_i915_private *dev_priv = dev->dev_private;
1823 enum intel_display_power_domain power_domain;
1825 u32 pp_stat_reg, pp_ctrl_reg;
1826 bool need_to_disable = !intel_dp->want_panel_vdd;
1828 lockdep_assert_held(&dev_priv->pps_mutex);
1830 if (!is_edp(intel_dp))
1833 cancel_delayed_work(&intel_dp->panel_vdd_work);
1834 intel_dp->want_panel_vdd = true;
1836 if (edp_have_panel_vdd(intel_dp))
1837 return need_to_disable;
1839 power_domain = intel_display_port_power_domain(intel_encoder);
1840 intel_display_power_get(dev_priv, power_domain);
1842 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1843 port_name(intel_dig_port->port));
1845 if (!edp_have_panel_power(intel_dp))
1846 wait_panel_power_cycle(intel_dp);
1848 pp = ironlake_get_pp_control(intel_dp);
1849 pp |= EDP_FORCE_VDD;
1851 pp_stat_reg = _pp_stat_reg(intel_dp);
1852 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1854 I915_WRITE(pp_ctrl_reg, pp);
1855 POSTING_READ(pp_ctrl_reg);
1856 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1857 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1859 * If the panel wasn't on, delay before accessing aux channel
1861 if (!edp_have_panel_power(intel_dp)) {
1862 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1863 port_name(intel_dig_port->port));
1864 msleep(intel_dp->panel_power_up_delay);
1867 return need_to_disable;
1871 * Must be paired with intel_edp_panel_vdd_off() or
1872 * intel_edp_panel_off().
1873 * Nested calls to these functions are not allowed since
1874 * we drop the lock. Caller must use some higher level
1875 * locking to prevent nested calls from other threads.
1877 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1881 if (!is_edp(intel_dp))
1885 vdd = edp_panel_vdd_on(intel_dp);
1886 pps_unlock(intel_dp);
1888 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
1889 port_name(dp_to_dig_port(intel_dp)->port));
1892 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1894 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1895 struct drm_i915_private *dev_priv = dev->dev_private;
1896 struct intel_digital_port *intel_dig_port =
1897 dp_to_dig_port(intel_dp);
1898 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1899 enum intel_display_power_domain power_domain;
1901 u32 pp_stat_reg, pp_ctrl_reg;
1903 lockdep_assert_held(&dev_priv->pps_mutex);
1905 WARN_ON(intel_dp->want_panel_vdd);
1907 if (!edp_have_panel_vdd(intel_dp))
1910 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1911 port_name(intel_dig_port->port));
1913 pp = ironlake_get_pp_control(intel_dp);
1914 pp &= ~EDP_FORCE_VDD;
1916 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1917 pp_stat_reg = _pp_stat_reg(intel_dp);
1919 I915_WRITE(pp_ctrl_reg, pp);
1920 POSTING_READ(pp_ctrl_reg);
1922 /* Make sure sequencer is idle before allowing subsequent activity */
1923 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1924 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1926 if ((pp & POWER_TARGET_ON) == 0)
1927 intel_dp->last_power_cycle = jiffies;
1929 power_domain = intel_display_port_power_domain(intel_encoder);
1930 intel_display_power_put(dev_priv, power_domain);
1933 static void edp_panel_vdd_work(struct work_struct *__work)
1935 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1936 struct intel_dp, panel_vdd_work);
1939 if (!intel_dp->want_panel_vdd)
1940 edp_panel_vdd_off_sync(intel_dp);
1941 pps_unlock(intel_dp);
1944 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1946 unsigned long delay;
1949 * Queue the timer to fire a long time from now (relative to the power
1950 * down delay) to keep the panel power up across a sequence of
1953 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1954 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1958 * Must be paired with edp_panel_vdd_on().
1959 * Must hold pps_mutex around the whole on/off sequence.
1960 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1962 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1964 struct drm_i915_private *dev_priv =
1965 intel_dp_to_dev(intel_dp)->dev_private;
1967 lockdep_assert_held(&dev_priv->pps_mutex);
1969 if (!is_edp(intel_dp))
1972 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
1973 port_name(dp_to_dig_port(intel_dp)->port));
1975 intel_dp->want_panel_vdd = false;
1978 edp_panel_vdd_off_sync(intel_dp);
1980 edp_panel_vdd_schedule_off(intel_dp);
1983 static void edp_panel_on(struct intel_dp *intel_dp)
1985 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1986 struct drm_i915_private *dev_priv = dev->dev_private;
1990 lockdep_assert_held(&dev_priv->pps_mutex);
1992 if (!is_edp(intel_dp))
1995 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1996 port_name(dp_to_dig_port(intel_dp)->port));
1998 if (WARN(edp_have_panel_power(intel_dp),
1999 "eDP port %c panel power already on\n",
2000 port_name(dp_to_dig_port(intel_dp)->port)))
2003 wait_panel_power_cycle(intel_dp);
2005 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2006 pp = ironlake_get_pp_control(intel_dp);
2008 /* ILK workaround: disable reset around power sequence */
2009 pp &= ~PANEL_POWER_RESET;
2010 I915_WRITE(pp_ctrl_reg, pp);
2011 POSTING_READ(pp_ctrl_reg);
2014 pp |= POWER_TARGET_ON;
2016 pp |= PANEL_POWER_RESET;
2018 I915_WRITE(pp_ctrl_reg, pp);
2019 POSTING_READ(pp_ctrl_reg);
2021 wait_panel_on(intel_dp);
2022 intel_dp->last_power_on = jiffies;
2025 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
2026 I915_WRITE(pp_ctrl_reg, pp);
2027 POSTING_READ(pp_ctrl_reg);
2031 void intel_edp_panel_on(struct intel_dp *intel_dp)
2033 if (!is_edp(intel_dp))
2037 edp_panel_on(intel_dp);
2038 pps_unlock(intel_dp);
2042 static void edp_panel_off(struct intel_dp *intel_dp)
2044 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2045 struct intel_encoder *intel_encoder = &intel_dig_port->base;
2046 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2047 struct drm_i915_private *dev_priv = dev->dev_private;
2048 enum intel_display_power_domain power_domain;
2052 lockdep_assert_held(&dev_priv->pps_mutex);
2054 if (!is_edp(intel_dp))
2057 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2058 port_name(dp_to_dig_port(intel_dp)->port));
2060 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2061 port_name(dp_to_dig_port(intel_dp)->port));
2063 pp = ironlake_get_pp_control(intel_dp);
2064 /* We need to switch off panel power _and_ force vdd, for otherwise some
2065 * panels get very unhappy and cease to work. */
2066 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2069 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2071 intel_dp->want_panel_vdd = false;
2073 I915_WRITE(pp_ctrl_reg, pp);
2074 POSTING_READ(pp_ctrl_reg);
2076 intel_dp->last_power_cycle = jiffies;
2077 wait_panel_off(intel_dp);
2079 /* We got a reference when we enabled the VDD. */
2080 power_domain = intel_display_port_power_domain(intel_encoder);
2081 intel_display_power_put(dev_priv, power_domain);
2084 void intel_edp_panel_off(struct intel_dp *intel_dp)
2086 if (!is_edp(intel_dp))
2090 edp_panel_off(intel_dp);
2091 pps_unlock(intel_dp);
2094 /* Enable backlight in the panel power control. */
2095 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2097 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2098 struct drm_device *dev = intel_dig_port->base.base.dev;
2099 struct drm_i915_private *dev_priv = dev->dev_private;
2104 * If we enable the backlight right away following a panel power
2105 * on, we may see slight flicker as the panel syncs with the eDP
2106 * link. So delay a bit to make sure the image is solid before
2107 * allowing it to appear.
2109 wait_backlight_on(intel_dp);
2113 pp = ironlake_get_pp_control(intel_dp);
2114 pp |= EDP_BLC_ENABLE;
2116 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2118 I915_WRITE(pp_ctrl_reg, pp);
2119 POSTING_READ(pp_ctrl_reg);
2121 pps_unlock(intel_dp);
2124 /* Enable backlight PWM and backlight PP control. */
2125 void intel_edp_backlight_on(struct intel_dp *intel_dp)
2127 if (!is_edp(intel_dp))
2130 DRM_DEBUG_KMS("\n");
2132 intel_panel_enable_backlight(intel_dp->attached_connector);
2133 _intel_edp_backlight_on(intel_dp);
2136 /* Disable backlight in the panel power control. */
2137 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2139 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2140 struct drm_i915_private *dev_priv = dev->dev_private;
2144 if (!is_edp(intel_dp))
2149 pp = ironlake_get_pp_control(intel_dp);
2150 pp &= ~EDP_BLC_ENABLE;
2152 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2154 I915_WRITE(pp_ctrl_reg, pp);
2155 POSTING_READ(pp_ctrl_reg);
2157 pps_unlock(intel_dp);
2159 intel_dp->last_backlight_off = jiffies;
2160 edp_wait_backlight_off(intel_dp);
2163 /* Disable backlight PP control and backlight PWM. */
2164 void intel_edp_backlight_off(struct intel_dp *intel_dp)
2166 if (!is_edp(intel_dp))
2169 DRM_DEBUG_KMS("\n");
2171 _intel_edp_backlight_off(intel_dp);
2172 intel_panel_disable_backlight(intel_dp->attached_connector);
2176 * Hook for controlling the panel power control backlight through the bl_power
2177 * sysfs attribute. Take care to handle multiple calls.
2179 static void intel_edp_backlight_power(struct intel_connector *connector,
2182 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2186 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2187 pps_unlock(intel_dp);
2189 if (is_enabled == enable)
2192 DRM_DEBUG_KMS("panel power control backlight %s\n",
2193 enable ? "enable" : "disable");
2196 _intel_edp_backlight_on(intel_dp);
2198 _intel_edp_backlight_off(intel_dp);
2201 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2203 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2204 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2205 struct drm_device *dev = crtc->dev;
2206 struct drm_i915_private *dev_priv = dev->dev_private;
2209 assert_pipe_disabled(dev_priv,
2210 to_intel_crtc(crtc)->pipe);
2212 DRM_DEBUG_KMS("\n");
2213 dpa_ctl = I915_READ(DP_A);
2214 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2215 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2217 /* We don't adjust intel_dp->DP while tearing down the link, to
2218 * facilitate link retraining (e.g. after hotplug). Hence clear all
2219 * enable bits here to ensure that we don't enable too much. */
2220 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2221 intel_dp->DP |= DP_PLL_ENABLE;
2222 I915_WRITE(DP_A, intel_dp->DP);
2227 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2229 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2230 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2231 struct drm_device *dev = crtc->dev;
2232 struct drm_i915_private *dev_priv = dev->dev_private;
2235 assert_pipe_disabled(dev_priv,
2236 to_intel_crtc(crtc)->pipe);
2238 dpa_ctl = I915_READ(DP_A);
2239 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2240 "dp pll off, should be on\n");
2241 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2243 /* We can't rely on the value tracked for the DP register in
2244 * intel_dp->DP because link_down must not change that (otherwise link
2245 * re-training will fail. */
2246 dpa_ctl &= ~DP_PLL_ENABLE;
2247 I915_WRITE(DP_A, dpa_ctl);
2252 /* If the sink supports it, try to set the power state appropriately */
2253 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2257 /* Should have a valid DPCD by this point */
2258 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2261 if (mode != DRM_MODE_DPMS_ON) {
2262 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2266 * When turning on, we need to retry for 1ms to give the sink
2269 for (i = 0; i < 3; i++) {
2270 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2279 DRM_DEBUG_KMS("failed to %s sink power state\n",
2280 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2283 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2284 enum i915_pipe *pipe)
2286 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2287 enum port port = dp_to_dig_port(intel_dp)->port;
2288 struct drm_device *dev = encoder->base.dev;
2289 struct drm_i915_private *dev_priv = dev->dev_private;
2290 enum intel_display_power_domain power_domain;
2293 power_domain = intel_display_port_power_domain(encoder);
2294 if (!intel_display_power_is_enabled(dev_priv, power_domain))
2297 tmp = I915_READ(intel_dp->output_reg);
2299 if (!(tmp & DP_PORT_EN))
2302 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
2303 *pipe = PORT_TO_PIPE_CPT(tmp);
2304 } else if (IS_CHERRYVIEW(dev)) {
2305 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2306 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
2307 *pipe = PORT_TO_PIPE(tmp);
2313 switch (intel_dp->output_reg) {
2315 trans_sel = TRANS_DP_PORT_SEL_B;
2318 trans_sel = TRANS_DP_PORT_SEL_C;
2321 trans_sel = TRANS_DP_PORT_SEL_D;
2327 for_each_pipe(dev_priv, i) {
2328 trans_dp = I915_READ(TRANS_DP_CTL(i));
2329 if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
2335 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2336 intel_dp->output_reg);
2342 static void intel_dp_get_config(struct intel_encoder *encoder,
2343 struct intel_crtc_state *pipe_config)
2345 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2347 struct drm_device *dev = encoder->base.dev;
2348 struct drm_i915_private *dev_priv = dev->dev_private;
2349 enum port port = dp_to_dig_port(intel_dp)->port;
2350 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2353 tmp = I915_READ(intel_dp->output_reg);
2355 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2357 if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
2358 if (tmp & DP_SYNC_HS_HIGH)
2359 flags |= DRM_MODE_FLAG_PHSYNC;
2361 flags |= DRM_MODE_FLAG_NHSYNC;
2363 if (tmp & DP_SYNC_VS_HIGH)
2364 flags |= DRM_MODE_FLAG_PVSYNC;
2366 flags |= DRM_MODE_FLAG_NVSYNC;
2368 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2369 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2370 flags |= DRM_MODE_FLAG_PHSYNC;
2372 flags |= DRM_MODE_FLAG_NHSYNC;
2374 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2375 flags |= DRM_MODE_FLAG_PVSYNC;
2377 flags |= DRM_MODE_FLAG_NVSYNC;
2380 pipe_config->base.adjusted_mode.flags |= flags;
2382 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2383 tmp & DP_COLOR_RANGE_16_235)
2384 pipe_config->limited_color_range = true;
2386 pipe_config->has_dp_encoder = true;
2388 intel_dp_get_m_n(crtc, pipe_config);
2390 if (port == PORT_A) {
2391 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2392 pipe_config->port_clock = 162000;
2394 pipe_config->port_clock = 270000;
2397 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2398 &pipe_config->dp_m_n);
2400 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2401 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2403 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
2405 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2406 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2408 * This is a big fat ugly hack.
2410 * Some machines in UEFI boot mode provide us a VBT that has 18
2411 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2412 * unknown we fail to light up. Yet the same BIOS boots up with
2413 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2414 * max, not what it tells us to use.
2416 * Note: This will still be broken if the eDP panel is not lit
2417 * up by the BIOS, and thus we can't get the mode at module
2420 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2421 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2422 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2426 static void intel_disable_dp(struct intel_encoder *encoder)
2428 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2429 struct drm_device *dev = encoder->base.dev;
2430 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2432 if (crtc->config->has_audio)
2433 intel_audio_codec_disable(encoder);
2435 if (HAS_PSR(dev) && !HAS_DDI(dev))
2436 intel_psr_disable(intel_dp);
2438 /* Make sure the panel is off before trying to change the mode. But also
2439 * ensure that we have vdd while we switch off the panel. */
2440 intel_edp_panel_vdd_on(intel_dp);
2441 intel_edp_backlight_off(intel_dp);
2442 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2443 intel_edp_panel_off(intel_dp);
2445 /* disable the port before the pipe on g4x */
2446 if (INTEL_INFO(dev)->gen < 5)
2447 intel_dp_link_down(intel_dp);
2450 static void ilk_post_disable_dp(struct intel_encoder *encoder)
2452 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2453 enum port port = dp_to_dig_port(intel_dp)->port;
2455 intel_dp_link_down(intel_dp);
2457 ironlake_edp_pll_off(intel_dp);
2460 static void vlv_post_disable_dp(struct intel_encoder *encoder)
2462 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2464 intel_dp_link_down(intel_dp);
2467 static void chv_post_disable_dp(struct intel_encoder *encoder)
2469 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2470 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2471 struct drm_device *dev = encoder->base.dev;
2472 struct drm_i915_private *dev_priv = dev->dev_private;
2473 struct intel_crtc *intel_crtc =
2474 to_intel_crtc(encoder->base.crtc);
2475 enum dpio_channel ch = vlv_dport_to_channel(dport);
2476 enum i915_pipe pipe = intel_crtc->pipe;
2479 intel_dp_link_down(intel_dp);
2481 mutex_lock(&dev_priv->dpio_lock);
2483 /* Propagate soft reset to data lane reset */
2484 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2485 val |= CHV_PCS_REQ_SOFTRESET_EN;
2486 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2488 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2489 val |= CHV_PCS_REQ_SOFTRESET_EN;
2490 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2492 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2493 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2494 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2496 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2497 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2498 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2500 mutex_unlock(&dev_priv->dpio_lock);
2504 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2506 uint8_t dp_train_pat)
2508 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2509 struct drm_device *dev = intel_dig_port->base.base.dev;
2510 struct drm_i915_private *dev_priv = dev->dev_private;
2511 enum port port = intel_dig_port->port;
2514 uint32_t temp = I915_READ(DP_TP_CTL(port));
2516 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2517 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2519 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2521 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2522 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2523 case DP_TRAINING_PATTERN_DISABLE:
2524 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2527 case DP_TRAINING_PATTERN_1:
2528 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2530 case DP_TRAINING_PATTERN_2:
2531 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2533 case DP_TRAINING_PATTERN_3:
2534 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2537 I915_WRITE(DP_TP_CTL(port), temp);
2539 } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
2540 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2542 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2543 case DP_TRAINING_PATTERN_DISABLE:
2544 *DP |= DP_LINK_TRAIN_OFF_CPT;
2546 case DP_TRAINING_PATTERN_1:
2547 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2549 case DP_TRAINING_PATTERN_2:
2550 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2552 case DP_TRAINING_PATTERN_3:
2553 DRM_ERROR("DP training pattern 3 not supported\n");
2554 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2559 if (IS_CHERRYVIEW(dev))
2560 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2562 *DP &= ~DP_LINK_TRAIN_MASK;
2564 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2565 case DP_TRAINING_PATTERN_DISABLE:
2566 *DP |= DP_LINK_TRAIN_OFF;
2568 case DP_TRAINING_PATTERN_1:
2569 *DP |= DP_LINK_TRAIN_PAT_1;
2571 case DP_TRAINING_PATTERN_2:
2572 *DP |= DP_LINK_TRAIN_PAT_2;
2574 case DP_TRAINING_PATTERN_3:
2575 if (IS_CHERRYVIEW(dev)) {
2576 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2578 DRM_ERROR("DP training pattern 3 not supported\n");
2579 *DP |= DP_LINK_TRAIN_PAT_2;
2586 static void intel_dp_enable_port(struct intel_dp *intel_dp)
2588 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2589 struct drm_i915_private *dev_priv = dev->dev_private;
2591 /* enable with pattern 1 (as per spec) */
2592 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2593 DP_TRAINING_PATTERN_1);
2595 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2596 POSTING_READ(intel_dp->output_reg);
2599 * Magic for VLV/CHV. We _must_ first set up the register
2600 * without actually enabling the port, and then do another
2601 * write to enable the port. Otherwise link training will
2602 * fail when the power sequencer is freshly used for this port.
2604 intel_dp->DP |= DP_PORT_EN;
2606 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2607 POSTING_READ(intel_dp->output_reg);
2610 static void intel_enable_dp(struct intel_encoder *encoder)
2612 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2613 struct drm_device *dev = encoder->base.dev;
2614 struct drm_i915_private *dev_priv = dev->dev_private;
2615 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2616 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2618 if (WARN_ON(dp_reg & DP_PORT_EN))
2623 if (IS_VALLEYVIEW(dev))
2624 vlv_init_panel_power_sequencer(intel_dp);
2626 intel_dp_enable_port(intel_dp);
2628 edp_panel_vdd_on(intel_dp);
2629 edp_panel_on(intel_dp);
2630 edp_panel_vdd_off(intel_dp, true);
2632 pps_unlock(intel_dp);
2634 if (IS_VALLEYVIEW(dev))
2635 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp));
2637 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2638 intel_dp_start_link_train(intel_dp);
2639 intel_dp_complete_link_train(intel_dp);
2640 intel_dp_stop_link_train(intel_dp);
2642 if (crtc->config->has_audio) {
2643 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2644 pipe_name(crtc->pipe));
2645 intel_audio_codec_enable(encoder);
2649 static void g4x_enable_dp(struct intel_encoder *encoder)
2651 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2653 intel_enable_dp(encoder);
2654 intel_edp_backlight_on(intel_dp);
2657 static void vlv_enable_dp(struct intel_encoder *encoder)
2659 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2661 intel_edp_backlight_on(intel_dp);
2662 intel_psr_enable(intel_dp);
2665 static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2667 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2668 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2670 intel_dp_prepare(encoder);
2672 /* Only ilk+ has port A */
2673 if (dport->port == PORT_A) {
2674 ironlake_set_pll_cpu_edp(intel_dp);
2675 ironlake_edp_pll_on(intel_dp);
2679 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2681 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2682 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2683 enum i915_pipe pipe = intel_dp->pps_pipe;
2684 int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2686 edp_panel_vdd_off_sync(intel_dp);
2689 * VLV seems to get confused when multiple power seqeuencers
2690 * have the same port selected (even if only one has power/vdd
2691 * enabled). The failure manifests as vlv_wait_port_ready() failing
2692 * CHV on the other hand doesn't seem to mind having the same port
2693 * selected in multiple power seqeuencers, but let's clear the
2694 * port select always when logically disconnecting a power sequencer
2697 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2698 pipe_name(pipe), port_name(intel_dig_port->port));
2699 I915_WRITE(pp_on_reg, 0);
2700 POSTING_READ(pp_on_reg);
2702 intel_dp->pps_pipe = INVALID_PIPE;
2705 static void vlv_steal_power_sequencer(struct drm_device *dev,
2706 enum i915_pipe pipe)
2708 struct drm_i915_private *dev_priv = dev->dev_private;
2709 struct intel_encoder *encoder;
2711 lockdep_assert_held(&dev_priv->pps_mutex);
2713 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2716 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2718 struct intel_dp *intel_dp;
2721 if (encoder->type != INTEL_OUTPUT_EDP)
2724 intel_dp = enc_to_intel_dp(&encoder->base);
2725 port = dp_to_dig_port(intel_dp)->port;
2727 if (intel_dp->pps_pipe != pipe)
2730 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2731 pipe_name(pipe), port_name(port));
2733 WARN(encoder->connectors_active,
2734 "stealing pipe %c power sequencer from active eDP port %c\n",
2735 pipe_name(pipe), port_name(port));
2737 /* make sure vdd is off before we steal it */
2738 vlv_detach_power_sequencer(intel_dp);
2742 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2744 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2745 struct intel_encoder *encoder = &intel_dig_port->base;
2746 struct drm_device *dev = encoder->base.dev;
2747 struct drm_i915_private *dev_priv = dev->dev_private;
2748 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2750 lockdep_assert_held(&dev_priv->pps_mutex);
2752 if (!is_edp(intel_dp))
2755 if (intel_dp->pps_pipe == crtc->pipe)
2759 * If another power sequencer was being used on this
2760 * port previously make sure to turn off vdd there while
2761 * we still have control of it.
2763 if (intel_dp->pps_pipe != INVALID_PIPE)
2764 vlv_detach_power_sequencer(intel_dp);
2767 * We may be stealing the power
2768 * sequencer from another port.
2770 vlv_steal_power_sequencer(dev, crtc->pipe);
2772 /* now it's all ours */
2773 intel_dp->pps_pipe = crtc->pipe;
2775 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2776 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2778 /* init power sequencer on this pipe and port */
2779 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2780 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2783 static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2785 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2786 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2787 struct drm_device *dev = encoder->base.dev;
2788 struct drm_i915_private *dev_priv = dev->dev_private;
2789 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2790 enum dpio_channel port = vlv_dport_to_channel(dport);
2791 int pipe = intel_crtc->pipe;
2794 mutex_lock(&dev_priv->dpio_lock);
2796 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2803 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2804 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2805 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2807 mutex_unlock(&dev_priv->dpio_lock);
2809 intel_enable_dp(encoder);
2812 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2814 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2815 struct drm_device *dev = encoder->base.dev;
2816 struct drm_i915_private *dev_priv = dev->dev_private;
2817 struct intel_crtc *intel_crtc =
2818 to_intel_crtc(encoder->base.crtc);
2819 enum dpio_channel port = vlv_dport_to_channel(dport);
2820 int pipe = intel_crtc->pipe;
2822 intel_dp_prepare(encoder);
2824 /* Program Tx lane resets to default */
2825 mutex_lock(&dev_priv->dpio_lock);
2826 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2827 DPIO_PCS_TX_LANE2_RESET |
2828 DPIO_PCS_TX_LANE1_RESET);
2829 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2830 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2831 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2832 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2833 DPIO_PCS_CLK_SOFT_RESET);
2835 /* Fix up inter-pair skew failure */
2836 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2837 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2838 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2839 mutex_unlock(&dev_priv->dpio_lock);
2842 static void chv_pre_enable_dp(struct intel_encoder *encoder)
2844 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2845 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2846 struct drm_device *dev = encoder->base.dev;
2847 struct drm_i915_private *dev_priv = dev->dev_private;
2848 struct intel_crtc *intel_crtc =
2849 to_intel_crtc(encoder->base.crtc);
2850 enum dpio_channel ch = vlv_dport_to_channel(dport);
2851 int pipe = intel_crtc->pipe;
2855 mutex_lock(&dev_priv->dpio_lock);
2857 /* allow hardware to manage TX FIFO reset source */
2858 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2859 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2860 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2862 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2863 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2864 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2866 /* Deassert soft data lane reset*/
2867 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2868 val |= CHV_PCS_REQ_SOFTRESET_EN;
2869 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2871 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2872 val |= CHV_PCS_REQ_SOFTRESET_EN;
2873 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2875 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2876 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2877 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2879 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2880 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2881 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2883 /* Program Tx lane latency optimal setting*/
2884 for (i = 0; i < 4; i++) {
2885 /* Set the upar bit */
2886 data = (i == 1) ? 0x0 : 0x1;
2887 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2888 data << DPIO_UPAR_SHIFT);
2891 /* Data lane stagger programming */
2892 /* FIXME: Fix up value only after power analysis */
2894 mutex_unlock(&dev_priv->dpio_lock);
2896 intel_enable_dp(encoder);
2899 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2901 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2902 struct drm_device *dev = encoder->base.dev;
2903 struct drm_i915_private *dev_priv = dev->dev_private;
2904 struct intel_crtc *intel_crtc =
2905 to_intel_crtc(encoder->base.crtc);
2906 enum dpio_channel ch = vlv_dport_to_channel(dport);
2907 enum i915_pipe pipe = intel_crtc->pipe;
2910 intel_dp_prepare(encoder);
2912 mutex_lock(&dev_priv->dpio_lock);
2914 /* program left/right clock distribution */
2915 if (pipe != PIPE_B) {
2916 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2917 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2919 val |= CHV_BUFLEFTENA1_FORCE;
2921 val |= CHV_BUFRIGHTENA1_FORCE;
2922 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2924 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2925 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2927 val |= CHV_BUFLEFTENA2_FORCE;
2929 val |= CHV_BUFRIGHTENA2_FORCE;
2930 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2933 /* program clock channel usage */
2934 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2935 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2937 val &= ~CHV_PCS_USEDCLKCHANNEL;
2939 val |= CHV_PCS_USEDCLKCHANNEL;
2940 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2942 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2943 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2945 val &= ~CHV_PCS_USEDCLKCHANNEL;
2947 val |= CHV_PCS_USEDCLKCHANNEL;
2948 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2951 * This a a bit weird since generally CL
2952 * matches the pipe, but here we need to
2953 * pick the CL based on the port.
2955 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2957 val &= ~CHV_CMN_USEDCLKCHANNEL;
2959 val |= CHV_CMN_USEDCLKCHANNEL;
2960 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2962 mutex_unlock(&dev_priv->dpio_lock);
2966 * Native read with retry for link status and receiver capability reads for
2967 * cases where the sink may still be asleep.
2969 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
2970 * supposed to retry 3 times per the spec.
2973 intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
2974 void *buffer, size_t size)
2980 * Sometime we just get the same incorrect byte repeated
2981 * over the entire buffer. Doing just one throw away read
2982 * initially seems to "solve" it.
2984 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
2986 for (i = 0; i < 3; i++) {
2987 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
2997 * Fetch AUX CH registers 0x202 - 0x207 which contain
2998 * link status information
3001 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
3003 return intel_dp_dpcd_read_wake(&intel_dp->aux,
3006 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
3009 /* These are source-specific values. */
3011 intel_dp_voltage_max(struct intel_dp *intel_dp)
3013 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3014 struct drm_i915_private *dev_priv = dev->dev_private;
3015 enum port port = dp_to_dig_port(intel_dp)->port;
3017 if (INTEL_INFO(dev)->gen >= 9) {
3018 if (dev_priv->vbt.edp_low_vswing && port == PORT_A)
3019 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3020 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3021 } else if (IS_VALLEYVIEW(dev))
3022 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3023 else if (IS_GEN7(dev) && port == PORT_A)
3024 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3025 else if (HAS_PCH_CPT(dev) && port != PORT_A)
3026 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3028 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3032 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3034 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3035 enum port port = dp_to_dig_port(intel_dp)->port;
3037 if (INTEL_INFO(dev)->gen >= 9) {
3038 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3039 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3040 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3041 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3042 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3043 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3044 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3045 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3046 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3048 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3050 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
3051 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3052 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3053 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3054 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3055 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3056 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3057 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3058 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3060 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3062 } else if (IS_VALLEYVIEW(dev)) {
3063 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3064 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3065 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3066 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3067 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3068 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3069 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3070 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3072 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3074 } else if (IS_GEN7(dev) && port == PORT_A) {
3075 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3076 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3077 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3078 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3079 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3080 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3082 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3085 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3086 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3087 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3088 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3089 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3090 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3091 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3092 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3094 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3099 static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
3101 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3102 struct drm_i915_private *dev_priv = dev->dev_private;
3103 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3104 struct intel_crtc *intel_crtc =
3105 to_intel_crtc(dport->base.base.crtc);
3106 unsigned long demph_reg_value, preemph_reg_value,
3107 uniqtranscale_reg_value;
3108 uint8_t train_set = intel_dp->train_set[0];
3109 enum dpio_channel port = vlv_dport_to_channel(dport);
3110 int pipe = intel_crtc->pipe;
3112 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3113 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3114 preemph_reg_value = 0x0004000;
3115 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3116 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3117 demph_reg_value = 0x2B405555;
3118 uniqtranscale_reg_value = 0x552AB83A;
3120 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3121 demph_reg_value = 0x2B404040;
3122 uniqtranscale_reg_value = 0x5548B83A;
3124 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3125 demph_reg_value = 0x2B245555;
3126 uniqtranscale_reg_value = 0x5560B83A;
3128 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3129 demph_reg_value = 0x2B405555;
3130 uniqtranscale_reg_value = 0x5598DA3A;
3136 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3137 preemph_reg_value = 0x0002000;
3138 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3139 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3140 demph_reg_value = 0x2B404040;
3141 uniqtranscale_reg_value = 0x5552B83A;
3143 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3144 demph_reg_value = 0x2B404848;
3145 uniqtranscale_reg_value = 0x5580B83A;
3147 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3148 demph_reg_value = 0x2B404040;
3149 uniqtranscale_reg_value = 0x55ADDA3A;
3155 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3156 preemph_reg_value = 0x0000000;
3157 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3158 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3159 demph_reg_value = 0x2B305555;
3160 uniqtranscale_reg_value = 0x5570B83A;
3162 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3163 demph_reg_value = 0x2B2B4040;
3164 uniqtranscale_reg_value = 0x55ADDA3A;
3170 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3171 preemph_reg_value = 0x0006000;
3172 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3173 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3174 demph_reg_value = 0x1B405555;
3175 uniqtranscale_reg_value = 0x55ADDA3A;
3185 mutex_lock(&dev_priv->dpio_lock);
3186 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3187 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3188 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
3189 uniqtranscale_reg_value);
3190 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3191 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3192 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3193 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
3194 mutex_unlock(&dev_priv->dpio_lock);
3199 static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
3201 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3202 struct drm_i915_private *dev_priv = dev->dev_private;
3203 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3204 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3205 u32 deemph_reg_value, margin_reg_value, val;
3206 uint8_t train_set = intel_dp->train_set[0];
3207 enum dpio_channel ch = vlv_dport_to_channel(dport);
3208 enum i915_pipe pipe = intel_crtc->pipe;
3211 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3212 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3213 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3214 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3215 deemph_reg_value = 128;
3216 margin_reg_value = 52;
3218 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3219 deemph_reg_value = 128;
3220 margin_reg_value = 77;
3222 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3223 deemph_reg_value = 128;
3224 margin_reg_value = 102;
3226 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3227 deemph_reg_value = 128;
3228 margin_reg_value = 154;
3229 /* FIXME extra to set for 1200 */
3235 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3236 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3237 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3238 deemph_reg_value = 85;
3239 margin_reg_value = 78;
3241 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3242 deemph_reg_value = 85;
3243 margin_reg_value = 116;
3245 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3246 deemph_reg_value = 85;
3247 margin_reg_value = 154;
3253 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3254 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3255 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3256 deemph_reg_value = 64;
3257 margin_reg_value = 104;
3259 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3260 deemph_reg_value = 64;
3261 margin_reg_value = 154;
3267 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3268 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3269 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3270 deemph_reg_value = 43;
3271 margin_reg_value = 154;
3281 mutex_lock(&dev_priv->dpio_lock);
3283 /* Clear calc init */
3284 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3285 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3286 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3287 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3288 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3290 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3291 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3292 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3293 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3294 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3296 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3297 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3298 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3299 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3301 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3302 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3303 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3304 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3306 /* Program swing deemph */
3307 for (i = 0; i < 4; i++) {
3308 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3309 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3310 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3311 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3314 /* Program swing margin */
3315 for (i = 0; i < 4; i++) {
3316 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3317 val &= ~DPIO_SWING_MARGIN000_MASK;
3318 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3319 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3322 /* Disable unique transition scale */
3323 for (i = 0; i < 4; i++) {
3324 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3325 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3326 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3329 if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
3330 == DP_TRAIN_PRE_EMPH_LEVEL_0) &&
3331 ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
3332 == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
3335 * The document said it needs to set bit 27 for ch0 and bit 26
3336 * for ch1. Might be a typo in the doc.
3337 * For now, for this unique transition scale selection, set bit
3338 * 27 for ch0 and ch1.
3340 for (i = 0; i < 4; i++) {
3341 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3342 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3343 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3346 for (i = 0; i < 4; i++) {
3347 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3348 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3349 val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3350 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3354 /* Start swing calculation */
3355 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3356 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3357 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3359 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3360 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3361 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3364 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3365 val |= DPIO_LRC_BYPASS;
3366 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3368 mutex_unlock(&dev_priv->dpio_lock);
3374 intel_get_adjust_train(struct intel_dp *intel_dp,
3375 const uint8_t link_status[DP_LINK_STATUS_SIZE])
3380 uint8_t voltage_max;
3381 uint8_t preemph_max;
3383 for (lane = 0; lane < intel_dp->lane_count; lane++) {
3384 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3385 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
3393 voltage_max = intel_dp_voltage_max(intel_dp);
3394 if (v >= voltage_max)
3395 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
3397 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3398 if (p >= preemph_max)
3399 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
3401 for (lane = 0; lane < 4; lane++)
3402 intel_dp->train_set[lane] = v | p;
3406 intel_gen4_signal_levels(uint8_t train_set)
3408 uint32_t signal_levels = 0;
3410 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3411 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3413 signal_levels |= DP_VOLTAGE_0_4;
3415 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3416 signal_levels |= DP_VOLTAGE_0_6;
3418 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3419 signal_levels |= DP_VOLTAGE_0_8;
3421 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3422 signal_levels |= DP_VOLTAGE_1_2;
3425 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3426 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3428 signal_levels |= DP_PRE_EMPHASIS_0;
3430 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3431 signal_levels |= DP_PRE_EMPHASIS_3_5;
3433 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3434 signal_levels |= DP_PRE_EMPHASIS_6;
3436 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3437 signal_levels |= DP_PRE_EMPHASIS_9_5;
3440 return signal_levels;
3443 /* Gen6's DP voltage swing and pre-emphasis control */
3445 intel_gen6_edp_signal_levels(uint8_t train_set)
3447 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3448 DP_TRAIN_PRE_EMPHASIS_MASK);
3449 switch (signal_levels) {
3450 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3451 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3452 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3453 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3454 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3455 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3456 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3457 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3458 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3459 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3460 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3461 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3462 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3463 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3465 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3466 "0x%x\n", signal_levels);
3467 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3471 /* Gen7's DP voltage swing and pre-emphasis control */
3473 intel_gen7_edp_signal_levels(uint8_t train_set)
3475 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3476 DP_TRAIN_PRE_EMPHASIS_MASK);
3477 switch (signal_levels) {
3478 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3479 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3480 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3481 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3482 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3483 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3485 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3486 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3487 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3488 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3490 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3491 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3492 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3493 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3496 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3497 "0x%x\n", signal_levels);
3498 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3502 /* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
3504 intel_hsw_signal_levels(uint8_t train_set)
3506 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3507 DP_TRAIN_PRE_EMPHASIS_MASK);
3508 switch (signal_levels) {
3509 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3510 return DDI_BUF_TRANS_SELECT(0);
3511 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3512 return DDI_BUF_TRANS_SELECT(1);
3513 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3514 return DDI_BUF_TRANS_SELECT(2);
3515 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
3516 return DDI_BUF_TRANS_SELECT(3);
3518 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3519 return DDI_BUF_TRANS_SELECT(4);
3520 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3521 return DDI_BUF_TRANS_SELECT(5);
3522 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3523 return DDI_BUF_TRANS_SELECT(6);
3525 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3526 return DDI_BUF_TRANS_SELECT(7);
3527 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3528 return DDI_BUF_TRANS_SELECT(8);
3530 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3531 return DDI_BUF_TRANS_SELECT(9);
3533 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3534 "0x%x\n", signal_levels);
3535 return DDI_BUF_TRANS_SELECT(0);
3539 /* Properly updates "DP" with the correct signal levels. */
3541 intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3543 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3544 enum port port = intel_dig_port->port;
3545 struct drm_device *dev = intel_dig_port->base.base.dev;
3546 uint32_t signal_levels, mask;
3547 uint8_t train_set = intel_dp->train_set[0];
3549 if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
3550 signal_levels = intel_hsw_signal_levels(train_set);
3551 mask = DDI_BUF_EMP_MASK;
3552 } else if (IS_CHERRYVIEW(dev)) {
3553 signal_levels = intel_chv_signal_levels(intel_dp);
3555 } else if (IS_VALLEYVIEW(dev)) {
3556 signal_levels = intel_vlv_signal_levels(intel_dp);
3558 } else if (IS_GEN7(dev) && port == PORT_A) {
3559 signal_levels = intel_gen7_edp_signal_levels(train_set);
3560 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3561 } else if (IS_GEN6(dev) && port == PORT_A) {
3562 signal_levels = intel_gen6_edp_signal_levels(train_set);
3563 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3565 signal_levels = intel_gen4_signal_levels(train_set);
3566 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3569 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3571 *DP = (*DP & ~mask) | signal_levels;
3575 intel_dp_set_link_train(struct intel_dp *intel_dp,
3577 uint8_t dp_train_pat)
3579 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3580 struct drm_device *dev = intel_dig_port->base.base.dev;
3581 struct drm_i915_private *dev_priv = dev->dev_private;
3582 uint8_t buf[sizeof(intel_dp->train_set) + 1];
3585 _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3587 I915_WRITE(intel_dp->output_reg, *DP);
3588 POSTING_READ(intel_dp->output_reg);
3590 buf[0] = dp_train_pat;
3591 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
3592 DP_TRAINING_PATTERN_DISABLE) {
3593 /* don't write DP_TRAINING_LANEx_SET on disable */
3596 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3597 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3598 len = intel_dp->lane_count + 1;
3601 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3608 intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3609 uint8_t dp_train_pat)
3611 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
3612 intel_dp_set_signal_levels(intel_dp, DP);
3613 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3617 intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3618 const uint8_t link_status[DP_LINK_STATUS_SIZE])
3620 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3621 struct drm_device *dev = intel_dig_port->base.base.dev;
3622 struct drm_i915_private *dev_priv = dev->dev_private;
3625 intel_get_adjust_train(intel_dp, link_status);
3626 intel_dp_set_signal_levels(intel_dp, DP);
3628 I915_WRITE(intel_dp->output_reg, *DP);
3629 POSTING_READ(intel_dp->output_reg);
3631 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3632 intel_dp->train_set, intel_dp->lane_count);
3634 return ret == intel_dp->lane_count;
3637 static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3639 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3640 struct drm_device *dev = intel_dig_port->base.base.dev;
3641 struct drm_i915_private *dev_priv = dev->dev_private;
3642 enum port port = intel_dig_port->port;
3648 val = I915_READ(DP_TP_CTL(port));
3649 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3650 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3651 I915_WRITE(DP_TP_CTL(port), val);
3654 * On PORT_A we can have only eDP in SST mode. There the only reason
3655 * we need to set idle transmission mode is to work around a HW issue
3656 * where we enable the pipe while not in idle link-training mode.
3657 * In this case there is requirement to wait for a minimum number of
3658 * idle patterns to be sent.
3663 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3665 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3668 /* Enable corresponding port and start training pattern 1 */
3670 intel_dp_start_link_train(struct intel_dp *intel_dp)
3672 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
3673 struct drm_device *dev = encoder->dev;
3676 int voltage_tries, loop_tries;
3677 uint32_t DP = intel_dp->DP;
3678 uint8_t link_config[2];
3681 intel_ddi_prepare_link_retrain(encoder);
3683 /* Write the link configuration data */
3684 link_config[0] = intel_dp->link_bw;
3685 link_config[1] = intel_dp->lane_count;
3686 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3687 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
3688 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
3689 if (intel_dp->num_sink_rates)
3690 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3691 &intel_dp->rate_select, 1);
3694 link_config[1] = DP_SET_ANSI_8B10B;
3695 drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
3699 /* clock recovery */
3700 if (!intel_dp_reset_link_train(intel_dp, &DP,
3701 DP_TRAINING_PATTERN_1 |
3702 DP_LINK_SCRAMBLING_DISABLE)) {
3703 DRM_ERROR("failed to enable link training\n");
3711 uint8_t link_status[DP_LINK_STATUS_SIZE];
3713 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
3714 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3715 DRM_ERROR("failed to get link status\n");
3719 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3720 DRM_DEBUG_KMS("clock recovery OK\n");
3724 /* Check to see if we've tried the max voltage */
3725 for (i = 0; i < intel_dp->lane_count; i++)
3726 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
3728 if (i == intel_dp->lane_count) {
3730 if (loop_tries == 5) {
3731 DRM_ERROR("too many full retries, give up\n");
3734 intel_dp_reset_link_train(intel_dp, &DP,
3735 DP_TRAINING_PATTERN_1 |
3736 DP_LINK_SCRAMBLING_DISABLE);
3741 /* Check to see if we've tried the same voltage 5 times */
3742 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
3744 if (voltage_tries == 5) {
3745 DRM_ERROR("too many voltage retries, give up\n");
3750 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
3752 /* Update training set as requested by target */
3753 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3754 DRM_ERROR("failed to update link training\n");
3763 intel_dp_complete_link_train(struct intel_dp *intel_dp)
3765 bool channel_eq = false;
3766 int tries, cr_tries;
3767 uint32_t DP = intel_dp->DP;
3768 uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3770 /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
3771 if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
3772 training_pattern = DP_TRAINING_PATTERN_3;
3774 /* channel equalization */
3775 if (!intel_dp_set_link_train(intel_dp, &DP,
3777 DP_LINK_SCRAMBLING_DISABLE)) {
3778 DRM_ERROR("failed to start channel equalization\n");
3786 uint8_t link_status[DP_LINK_STATUS_SIZE];
3789 DRM_ERROR("failed to train DP, aborting\n");
3793 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
3794 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3795 DRM_ERROR("failed to get link status\n");
3799 /* Make sure clock is still ok */
3800 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3801 intel_dp_start_link_train(intel_dp);
3802 intel_dp_set_link_train(intel_dp, &DP,
3804 DP_LINK_SCRAMBLING_DISABLE);
3809 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3814 /* Try 5 times, then try clock recovery if that fails */
3816 intel_dp_start_link_train(intel_dp);
3817 intel_dp_set_link_train(intel_dp, &DP,
3819 DP_LINK_SCRAMBLING_DISABLE);
3825 /* Update training set as requested by target */
3826 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3827 DRM_ERROR("failed to update link training\n");
3833 intel_dp_set_idle_link_train(intel_dp);
3838 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
3842 void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3844 intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3845 DP_TRAINING_PATTERN_DISABLE);
3849 intel_dp_link_down(struct intel_dp *intel_dp)
3851 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3852 enum port port = intel_dig_port->port;
3853 struct drm_device *dev = intel_dig_port->base.base.dev;
3854 struct drm_i915_private *dev_priv = dev->dev_private;
3855 uint32_t DP = intel_dp->DP;
3857 if (WARN_ON(HAS_DDI(dev)))
3860 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3863 DRM_DEBUG_KMS("\n");
3865 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
3866 DP &= ~DP_LINK_TRAIN_MASK_CPT;
3867 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
3869 if (IS_CHERRYVIEW(dev))
3870 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3872 DP &= ~DP_LINK_TRAIN_MASK;
3873 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
3875 POSTING_READ(intel_dp->output_reg);
3877 if (HAS_PCH_IBX(dev) &&
3878 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
3879 /* Hardware workaround: leaving our transcoder select
3880 * set to transcoder B while it's off will prevent the
3881 * corresponding HDMI output on transcoder A.
3883 * Combine this with another hardware workaround:
3884 * transcoder select bit can only be cleared while the
3887 DP &= ~DP_PIPEB_SELECT;
3888 I915_WRITE(intel_dp->output_reg, DP);
3889 POSTING_READ(intel_dp->output_reg);
3892 DP &= ~DP_AUDIO_OUTPUT_ENABLE;
3893 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
3894 POSTING_READ(intel_dp->output_reg);
3895 msleep(intel_dp->panel_power_down_delay);
3899 intel_dp_get_dpcd(struct intel_dp *intel_dp)
3901 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3902 struct drm_device *dev = dig_port->base.base.dev;
3903 struct drm_i915_private *dev_priv = dev->dev_private;
3906 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3907 sizeof(intel_dp->dpcd)) < 0)
3908 return false; /* aux transfer failed */
3910 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3912 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3913 return false; /* DPCD not present */
3915 /* Check if the panel supports PSR */
3916 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
3917 if (is_edp(intel_dp)) {
3918 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3920 sizeof(intel_dp->psr_dpcd));
3921 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3922 dev_priv->psr.sink_support = true;
3923 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3927 /* Training Pattern 3 support, both source and sink */
3928 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
3929 intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
3930 (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
3931 intel_dp->use_tps3 = true;
3932 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
3934 intel_dp->use_tps3 = false;
3936 /* Intermediate frequency support */
3937 if (is_edp(intel_dp) &&
3938 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3939 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3940 (rev >= 0x03)) { /* eDp v1.4 or higher */
3941 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
3944 intel_dp_dpcd_read_wake(&intel_dp->aux,
3945 DP_SUPPORTED_LINK_RATES,
3947 sizeof(sink_rates));
3949 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3950 int val = le16_to_cpu(sink_rates[i]);
3955 /* Value read is in kHz while drm clock is saved in deca-kHz */
3956 intel_dp->sink_rates[i] = (val * 200) / 10;
3958 intel_dp->num_sink_rates = i;
3961 intel_dp_print_rates(intel_dp);
3963 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3964 DP_DWN_STRM_PORT_PRESENT))
3965 return true; /* native DP sink */
3967 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3968 return true; /* no per-port downstream info */
3970 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3971 intel_dp->downstream_ports,
3972 DP_MAX_DOWNSTREAM_PORTS) < 0)
3973 return false; /* downstream port status fetch failed */
3979 intel_dp_probe_oui(struct intel_dp *intel_dp)
3983 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3986 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
3987 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3988 buf[0], buf[1], buf[2]);
3990 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
3991 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3992 buf[0], buf[1], buf[2]);
3997 intel_dp_probe_mst(struct intel_dp *intel_dp)
4001 if (!intel_dp->can_mst)
4004 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
4007 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
4008 if (buf[0] & DP_MST_CAP) {
4009 DRM_DEBUG_KMS("Sink is MST capable\n");
4010 intel_dp->is_mst = true;
4012 DRM_DEBUG_KMS("Sink is not MST capable\n");
4013 intel_dp->is_mst = false;
4017 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4018 return intel_dp->is_mst;
4022 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4024 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4025 struct drm_device *dev = intel_dig_port->base.base.dev;
4026 struct intel_crtc *intel_crtc =
4027 to_intel_crtc(intel_dig_port->base.base.crtc);
4032 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
4035 if (!(buf & DP_TEST_CRC_SUPPORTED))
4038 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4041 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4042 buf | DP_TEST_SINK_START) < 0)
4045 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
4047 test_crc_count = buf & DP_TEST_COUNT_MASK;
4050 if (drm_dp_dpcd_readb(&intel_dp->aux,
4051 DP_TEST_SINK_MISC, &buf) < 0)
4053 intel_wait_for_vblank(dev, intel_crtc->pipe);
4054 } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
4056 if (attempts == 0) {
4057 DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
4061 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
4064 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4066 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4067 buf & ~DP_TEST_SINK_START) < 0)
4074 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4076 return intel_dp_dpcd_read_wake(&intel_dp->aux,
4077 DP_DEVICE_SERVICE_IRQ_VECTOR,
4078 sink_irq_vector, 1) == 1;
4082 intel_dp_handle_test_request(struct intel_dp *intel_dp)
4084 /* NAK by default */
4085 drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, DP_TEST_NAK);
4090 intel_dp_check_mst_status(struct intel_dp *intel_dp)
4094 if (intel_dp->is_mst) {
4099 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4103 /* check link status - esi[10] = 0x200c */
4104 if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4105 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4106 intel_dp_start_link_train(intel_dp);
4107 intel_dp_complete_link_train(intel_dp);
4108 intel_dp_stop_link_train(intel_dp);
4111 DRM_DEBUG_KMS("got esi %3ph\n", esi);
4112 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4115 for (retry = 0; retry < 3; retry++) {
4117 wret = drm_dp_dpcd_write(&intel_dp->aux,
4118 DP_SINK_COUNT_ESI+1,
4125 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4127 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4135 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4136 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4137 intel_dp->is_mst = false;
4138 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4139 /* send a hotplug event */
4140 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4148 * According to DP spec
4151 * 2. Configure link according to Receiver Capabilities
4152 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4153 * 4. Check link status on receipt of hot-plug interrupt
4156 intel_dp_check_link_status(struct intel_dp *intel_dp)
4158 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4159 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4161 u8 link_status[DP_LINK_STATUS_SIZE];
4163 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4165 if (!intel_encoder->connectors_active)
4168 if (WARN_ON(!intel_encoder->base.crtc))
4171 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4174 /* Try to read receiver status if the link appears to be up */
4175 if (!intel_dp_get_link_status(intel_dp, link_status)) {
4179 /* Now read the DPCD to see if it's actually running */
4180 if (!intel_dp_get_dpcd(intel_dp)) {
4184 /* Try to read the source of the interrupt */
4185 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4186 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4187 /* Clear interrupt source */
4188 drm_dp_dpcd_writeb(&intel_dp->aux,
4189 DP_DEVICE_SERVICE_IRQ_VECTOR,
4192 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4193 intel_dp_handle_test_request(intel_dp);
4194 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4195 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4198 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
4199 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4200 intel_encoder->base.name);
4201 intel_dp_start_link_train(intel_dp);
4202 intel_dp_complete_link_train(intel_dp);
4203 intel_dp_stop_link_train(intel_dp);
4207 /* XXX this is probably wrong for multiple downstream ports */
4208 static enum drm_connector_status
4209 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4211 uint8_t *dpcd = intel_dp->dpcd;
4214 if (!intel_dp_get_dpcd(intel_dp))
4215 return connector_status_disconnected;
4217 /* if there's no downstream port, we're done */
4218 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
4219 return connector_status_connected;
4221 /* If we're HPD-aware, SINK_COUNT changes dynamically */
4222 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4223 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4226 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4228 return connector_status_unknown;
4230 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4231 : connector_status_disconnected;
4234 /* If no HPD, poke DDC gently */
4235 if (drm_probe_ddc(intel_dp->aux.ddc))
4236 return connector_status_connected;
4238 /* Well we tried, say unknown for unreliable port types */
4239 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4240 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4241 if (type == DP_DS_PORT_TYPE_VGA ||
4242 type == DP_DS_PORT_TYPE_NON_EDID)
4243 return connector_status_unknown;
4245 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4246 DP_DWN_STRM_PORT_TYPE_MASK;
4247 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4248 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4249 return connector_status_unknown;
4252 /* Anything else is out of spec, warn and ignore */
4253 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4254 return connector_status_disconnected;
4257 static enum drm_connector_status
4258 edp_detect(struct intel_dp *intel_dp)
4260 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4261 enum drm_connector_status status;
4263 status = intel_panel_detect(dev);
4264 if (status == connector_status_unknown)
4265 status = connector_status_connected;
4270 static enum drm_connector_status
4271 ironlake_dp_detect(struct intel_dp *intel_dp)
4273 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4274 struct drm_i915_private *dev_priv = dev->dev_private;
4275 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4277 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4278 return connector_status_disconnected;
4280 return intel_dp_detect_dpcd(intel_dp);
4283 static int g4x_digital_port_connected(struct drm_device *dev,
4284 struct intel_digital_port *intel_dig_port)
4286 struct drm_i915_private *dev_priv = dev->dev_private;
4289 if (IS_VALLEYVIEW(dev)) {
4290 switch (intel_dig_port->port) {
4292 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4295 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4298 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4304 switch (intel_dig_port->port) {
4306 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4309 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4312 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4319 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
4324 static enum drm_connector_status
4325 g4x_dp_detect(struct intel_dp *intel_dp)
4327 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4328 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4331 /* Can't disconnect eDP, but you can close the lid... */
4332 if (is_edp(intel_dp)) {
4333 enum drm_connector_status status;
4335 status = intel_panel_detect(dev);
4336 if (status == connector_status_unknown)
4337 status = connector_status_connected;
4341 ret = g4x_digital_port_connected(dev, intel_dig_port);
4343 return connector_status_unknown;
4345 return connector_status_disconnected;
4347 return intel_dp_detect_dpcd(intel_dp);
4350 static struct edid *
4351 intel_dp_get_edid(struct intel_dp *intel_dp)
4353 struct intel_connector *intel_connector = intel_dp->attached_connector;
4355 /* use cached edid if we have one */
4356 if (intel_connector->edid) {
4358 if (IS_ERR(intel_connector->edid))
4361 return drm_edid_duplicate(intel_connector->edid);
4363 return drm_get_edid(&intel_connector->base,
4368 intel_dp_set_edid(struct intel_dp *intel_dp)
4370 struct intel_connector *intel_connector = intel_dp->attached_connector;
4373 edid = intel_dp_get_edid(intel_dp);
4374 intel_connector->detect_edid = edid;
4376 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4377 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4379 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4383 intel_dp_unset_edid(struct intel_dp *intel_dp)
4385 struct intel_connector *intel_connector = intel_dp->attached_connector;
4387 kfree(intel_connector->detect_edid);
4388 intel_connector->detect_edid = NULL;
4390 intel_dp->has_audio = false;
4393 static enum intel_display_power_domain
4394 intel_dp_power_get(struct intel_dp *dp)
4396 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4397 enum intel_display_power_domain power_domain;
4399 power_domain = intel_display_port_power_domain(encoder);
4400 intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4402 return power_domain;
4406 intel_dp_power_put(struct intel_dp *dp,
4407 enum intel_display_power_domain power_domain)
4409 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4410 intel_display_power_put(to_i915(encoder->base.dev), power_domain);
4413 static enum drm_connector_status
4414 intel_dp_detect(struct drm_connector *connector, bool force)
4416 struct intel_dp *intel_dp = intel_attached_dp(connector);
4417 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4418 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4419 struct drm_device *dev = connector->dev;
4420 enum drm_connector_status status;
4421 enum intel_display_power_domain power_domain;
4423 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4424 connector->base.id, connector->name);
4425 intel_dp_unset_edid(intel_dp);
4427 if (intel_dp->is_mst) {
4428 /* MST devices are disconnected from a monitor POV */
4429 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4430 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4431 return connector_status_disconnected;
4434 power_domain = intel_dp_power_get(intel_dp);
4436 /* Can't disconnect eDP, but you can close the lid... */
4437 if (is_edp(intel_dp))
4438 status = edp_detect(intel_dp);
4439 else if (HAS_PCH_SPLIT(dev))
4440 status = ironlake_dp_detect(intel_dp);
4442 status = g4x_dp_detect(intel_dp);
4443 if (status != connector_status_connected)
4446 intel_dp_probe_oui(intel_dp);
4449 ret = intel_dp_probe_mst(intel_dp);
4451 /* if we are in MST mode then this connector
4452 won't appear connected or have anything with EDID on it */
4453 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4454 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4455 status = connector_status_disconnected;
4460 intel_dp_set_edid(intel_dp);
4462 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4463 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4464 status = connector_status_connected;
4467 intel_dp_power_put(intel_dp, power_domain);
4472 intel_dp_force(struct drm_connector *connector)
4474 struct intel_dp *intel_dp = intel_attached_dp(connector);
4475 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4476 enum intel_display_power_domain power_domain;
4478 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4479 connector->base.id, connector->name);
4480 intel_dp_unset_edid(intel_dp);
4482 if (connector->status != connector_status_connected)
4485 power_domain = intel_dp_power_get(intel_dp);
4487 intel_dp_set_edid(intel_dp);
4489 intel_dp_power_put(intel_dp, power_domain);
4491 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4492 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4495 static int intel_dp_get_modes(struct drm_connector *connector)
4497 struct intel_connector *intel_connector = to_intel_connector(connector);
4500 edid = intel_connector->detect_edid;
4502 int ret = intel_connector_update_modes(connector, edid);
4507 /* if eDP has no EDID, fall back to fixed mode */
4508 if (is_edp(intel_attached_dp(connector)) &&
4509 intel_connector->panel.fixed_mode) {
4510 struct drm_display_mode *mode;
4512 mode = drm_mode_duplicate(connector->dev,
4513 intel_connector->panel.fixed_mode);
4515 drm_mode_probed_add(connector, mode);
4524 intel_dp_detect_audio(struct drm_connector *connector)
4526 bool has_audio = false;
4529 edid = to_intel_connector(connector)->detect_edid;
4531 has_audio = drm_detect_monitor_audio(edid);
4537 intel_dp_set_property(struct drm_connector *connector,
4538 struct drm_property *property,
4541 struct drm_i915_private *dev_priv = connector->dev->dev_private;
4542 struct intel_connector *intel_connector = to_intel_connector(connector);
4543 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4544 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4547 ret = drm_object_property_set_value(&connector->base, property, val);
4551 if (property == dev_priv->force_audio_property) {
4555 if (i == intel_dp->force_audio)
4558 intel_dp->force_audio = i;
4560 if (i == HDMI_AUDIO_AUTO)
4561 has_audio = intel_dp_detect_audio(connector);
4563 has_audio = (i == HDMI_AUDIO_ON);
4565 if (has_audio == intel_dp->has_audio)
4568 intel_dp->has_audio = has_audio;
4572 if (property == dev_priv->broadcast_rgb_property) {
4573 bool old_auto = intel_dp->color_range_auto;
4574 uint32_t old_range = intel_dp->color_range;
4577 case INTEL_BROADCAST_RGB_AUTO:
4578 intel_dp->color_range_auto = true;
4580 case INTEL_BROADCAST_RGB_FULL:
4581 intel_dp->color_range_auto = false;
4582 intel_dp->color_range = 0;
4584 case INTEL_BROADCAST_RGB_LIMITED:
4585 intel_dp->color_range_auto = false;
4586 intel_dp->color_range = DP_COLOR_RANGE_16_235;
4592 if (old_auto == intel_dp->color_range_auto &&
4593 old_range == intel_dp->color_range)
4599 if (is_edp(intel_dp) &&
4600 property == connector->dev->mode_config.scaling_mode_property) {
4601 if (val == DRM_MODE_SCALE_NONE) {
4602 DRM_DEBUG_KMS("no scaling not supported\n");
4606 if (intel_connector->panel.fitting_mode == val) {
4607 /* the eDP scaling property is not changed */
4610 intel_connector->panel.fitting_mode = val;
4618 if (intel_encoder->base.crtc)
4619 intel_crtc_restore_mode(intel_encoder->base.crtc);
4625 intel_dp_connector_destroy(struct drm_connector *connector)
4627 struct intel_connector *intel_connector = to_intel_connector(connector);
4629 kfree(intel_connector->detect_edid);
4631 if (!IS_ERR_OR_NULL(intel_connector->edid))
4632 kfree(intel_connector->edid);
4634 /* Can't call is_edp() since the encoder may have been destroyed
4636 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4637 intel_panel_fini(&intel_connector->panel);
4639 drm_connector_cleanup(connector);
4643 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4645 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4646 struct intel_dp *intel_dp = &intel_dig_port->dp;
4648 drm_dp_aux_unregister(&intel_dp->aux);
4649 intel_dp_mst_encoder_cleanup(intel_dig_port);
4650 if (is_edp(intel_dp)) {
4651 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4653 * vdd might still be enabled do to the delayed vdd off.
4654 * Make sure vdd is actually turned off here.
4657 edp_panel_vdd_off_sync(intel_dp);
4658 pps_unlock(intel_dp);
4661 if (intel_dp->edp_notifier.notifier_call) {
4662 unregister_reboot_notifier(&intel_dp->edp_notifier);
4663 intel_dp->edp_notifier.notifier_call = NULL;
4667 drm_encoder_cleanup(encoder);
4668 kfree(intel_dig_port);
4671 static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4673 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4675 if (!is_edp(intel_dp))
4679 * vdd might still be enabled do to the delayed vdd off.
4680 * Make sure vdd is actually turned off here.
4682 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4684 edp_panel_vdd_off_sync(intel_dp);
4685 pps_unlock(intel_dp);
4688 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4690 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4691 struct drm_device *dev = intel_dig_port->base.base.dev;
4692 struct drm_i915_private *dev_priv = dev->dev_private;
4693 enum intel_display_power_domain power_domain;
4695 lockdep_assert_held(&dev_priv->pps_mutex);
4697 if (!edp_have_panel_vdd(intel_dp))
4701 * The VDD bit needs a power domain reference, so if the bit is
4702 * already enabled when we boot or resume, grab this reference and
4703 * schedule a vdd off, so we don't hold on to the reference
4706 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4707 power_domain = intel_display_port_power_domain(&intel_dig_port->base);
4708 intel_display_power_get(dev_priv, power_domain);
4710 edp_panel_vdd_schedule_off(intel_dp);
4713 static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4715 struct intel_dp *intel_dp;
4717 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4720 intel_dp = enc_to_intel_dp(encoder);
4725 * Read out the current power sequencer assignment,
4726 * in case the BIOS did something with it.
4728 if (IS_VALLEYVIEW(encoder->dev))
4729 vlv_initial_power_sequencer_setup(intel_dp);
4731 intel_edp_panel_vdd_sanitize(intel_dp);
4733 pps_unlock(intel_dp);
4736 static const struct drm_connector_funcs intel_dp_connector_funcs = {
4737 .dpms = intel_connector_dpms,
4738 .detect = intel_dp_detect,
4739 .force = intel_dp_force,
4740 .fill_modes = drm_helper_probe_single_connector_modes,
4741 .set_property = intel_dp_set_property,
4742 .atomic_get_property = intel_connector_atomic_get_property,
4743 .destroy = intel_dp_connector_destroy,
4744 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4745 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
4748 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4749 .get_modes = intel_dp_get_modes,
4750 .mode_valid = intel_dp_mode_valid,
4751 .best_encoder = intel_best_encoder,
4754 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
4755 .reset = intel_dp_encoder_reset,
4756 .destroy = intel_dp_encoder_destroy,
4760 intel_dp_hot_plug(struct intel_encoder *intel_encoder)
4766 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4768 struct intel_dp *intel_dp = &intel_dig_port->dp;
4769 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4770 struct drm_device *dev = intel_dig_port->base.base.dev;
4771 struct drm_i915_private *dev_priv = dev->dev_private;
4772 enum intel_display_power_domain power_domain;
4775 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4776 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
4778 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4780 * vdd off can generate a long pulse on eDP which
4781 * would require vdd on to handle it, and thus we
4782 * would end up in an endless cycle of
4783 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4785 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4786 port_name(intel_dig_port->port));
4790 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4791 port_name(intel_dig_port->port),
4792 long_hpd ? "long" : "short");
4794 power_domain = intel_display_port_power_domain(intel_encoder);
4795 intel_display_power_get(dev_priv, power_domain);
4800 if (HAS_PCH_SPLIT(dev)) {
4801 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4804 if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
4808 if (!intel_dp_get_dpcd(intel_dp)) {
4812 intel_dp_probe_oui(intel_dp);
4815 if (!intel_dp_probe_mst(intel_dp))
4820 if (intel_dp->is_mst) {
4822 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
4827 if (!intel_dp->is_mst) {
4829 * we'll check the link status via the normal hot plug path later -
4830 * but for short hpds we should check it now
4832 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4833 intel_dp_check_link_status(intel_dp);
4834 drm_modeset_unlock(&dev->mode_config.connection_mutex);
4840 /* if we were in MST mode, and device is not there get out of MST mode */
4841 if (intel_dp->is_mst) {
4842 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
4843 intel_dp->is_mst = false;
4845 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4849 intel_display_power_put(dev_priv, power_domain);
4854 /* Return which DP Port should be selected for Transcoder DP control */
4856 intel_trans_dp_port_sel(struct drm_crtc *crtc)
4858 struct drm_device *dev = crtc->dev;
4859 struct intel_encoder *intel_encoder;
4860 struct intel_dp *intel_dp;
4862 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
4863 intel_dp = enc_to_intel_dp(&intel_encoder->base);
4865 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
4866 intel_encoder->type == INTEL_OUTPUT_EDP)
4867 return intel_dp->output_reg;
4873 /* check the VBT to see whether the eDP is on DP-D port */
4874 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
4876 struct drm_i915_private *dev_priv = dev->dev_private;
4877 union child_device_config *p_child;
4879 static const short port_mapping[] = {
4880 [PORT_B] = PORT_IDPB,
4881 [PORT_C] = PORT_IDPC,
4882 [PORT_D] = PORT_IDPD,
4888 if (!dev_priv->vbt.child_dev_num)
4891 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
4892 p_child = dev_priv->vbt.child_dev + i;
4894 if (p_child->common.dvo_port == port_mapping[port] &&
4895 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
4896 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
4903 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
4905 struct intel_connector *intel_connector = to_intel_connector(connector);
4907 intel_attach_force_audio_property(connector);
4908 intel_attach_broadcast_rgb_property(connector);
4909 intel_dp->color_range_auto = true;
4911 if (is_edp(intel_dp)) {
4912 drm_mode_create_scaling_mode_property(connector->dev);
4913 drm_object_attach_property(
4915 connector->dev->mode_config.scaling_mode_property,
4916 DRM_MODE_SCALE_ASPECT);
4917 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
4921 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
4923 intel_dp->last_power_cycle = jiffies;
4924 intel_dp->last_power_on = jiffies;
4925 intel_dp->last_backlight_off = jiffies;
4929 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
4930 struct intel_dp *intel_dp)
4932 struct drm_i915_private *dev_priv = dev->dev_private;
4933 struct edp_power_seq cur, vbt, spec,
4934 *final = &intel_dp->pps_delays;
4935 u32 pp_on, pp_off, pp_div, pp;
4936 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
4938 lockdep_assert_held(&dev_priv->pps_mutex);
4940 /* already initialized? */
4941 if (final->t11_t12 != 0)
4944 if (HAS_PCH_SPLIT(dev)) {
4945 pp_ctrl_reg = PCH_PP_CONTROL;
4946 pp_on_reg = PCH_PP_ON_DELAYS;
4947 pp_off_reg = PCH_PP_OFF_DELAYS;
4948 pp_div_reg = PCH_PP_DIVISOR;
4950 enum i915_pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4952 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
4953 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4954 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4955 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
4958 /* Workaround: Need to write PP_CONTROL with the unlock key as
4959 * the very first thing. */
4960 pp = ironlake_get_pp_control(intel_dp);
4961 I915_WRITE(pp_ctrl_reg, pp);
4963 pp_on = I915_READ(pp_on_reg);
4964 pp_off = I915_READ(pp_off_reg);
4965 pp_div = I915_READ(pp_div_reg);
4967 /* Pull timing values out of registers */
4968 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
4969 PANEL_POWER_UP_DELAY_SHIFT;
4971 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
4972 PANEL_LIGHT_ON_DELAY_SHIFT;
4974 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
4975 PANEL_LIGHT_OFF_DELAY_SHIFT;
4977 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
4978 PANEL_POWER_DOWN_DELAY_SHIFT;
4980 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
4981 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
4983 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4984 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
4986 vbt = dev_priv->vbt.edp_pps;
4988 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
4989 * our hw here, which are all in 100usec. */
4990 spec.t1_t3 = 210 * 10;
4991 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
4992 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
4993 spec.t10 = 500 * 10;
4994 /* This one is special and actually in units of 100ms, but zero
4995 * based in the hw (so we need to add 100 ms). But the sw vbt
4996 * table multiplies it with 1000 to make it in units of 100usec,
4998 spec.t11_t12 = (510 + 100) * 10;
5000 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5001 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5003 /* Use the max of the register settings and vbt. If both are
5004 * unset, fall back to the spec limits. */
5005 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
5007 max(cur.field, vbt.field))
5008 assign_final(t1_t3);
5012 assign_final(t11_t12);
5015 #define get_delay(field) (DIV_ROUND_UP(final->field, 10))
5016 intel_dp->panel_power_up_delay = get_delay(t1_t3);
5017 intel_dp->backlight_on_delay = get_delay(t8);
5018 intel_dp->backlight_off_delay = get_delay(t9);
5019 intel_dp->panel_power_down_delay = get_delay(t10);
5020 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5023 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5024 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5025 intel_dp->panel_power_cycle_delay);
5027 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5028 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
5032 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5033 struct intel_dp *intel_dp)
5035 struct drm_i915_private *dev_priv = dev->dev_private;
5036 u32 pp_on, pp_off, pp_div, port_sel = 0;
5037 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
5038 int pp_on_reg, pp_off_reg, pp_div_reg;
5039 enum port port = dp_to_dig_port(intel_dp)->port;
5040 const struct edp_power_seq *seq = &intel_dp->pps_delays;
5042 lockdep_assert_held(&dev_priv->pps_mutex);
5044 if (HAS_PCH_SPLIT(dev)) {
5045 pp_on_reg = PCH_PP_ON_DELAYS;
5046 pp_off_reg = PCH_PP_OFF_DELAYS;
5047 pp_div_reg = PCH_PP_DIVISOR;
5049 enum i915_pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5051 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5052 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5053 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5057 * And finally store the new values in the power sequencer. The
5058 * backlight delays are set to 1 because we do manual waits on them. For
5059 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5060 * we'll end up waiting for the backlight off delay twice: once when we
5061 * do the manual sleep, and once when we disable the panel and wait for
5062 * the PP_STATUS bit to become zero.
5064 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
5065 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5066 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
5067 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
5068 /* Compute the divisor for the pp clock, simply match the Bspec
5070 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5071 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5072 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5074 /* Haswell doesn't have any port selection bits for the panel
5075 * power sequencer any more. */
5076 if (IS_VALLEYVIEW(dev)) {
5077 port_sel = PANEL_PORT_SELECT_VLV(port);
5078 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5080 port_sel = PANEL_PORT_SELECT_DPA;
5082 port_sel = PANEL_PORT_SELECT_DPD;
5087 I915_WRITE(pp_on_reg, pp_on);
5088 I915_WRITE(pp_off_reg, pp_off);
5089 I915_WRITE(pp_div_reg, pp_div);
5091 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
5092 I915_READ(pp_on_reg),
5093 I915_READ(pp_off_reg),
5094 I915_READ(pp_div_reg));
5098 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5100 * @refresh_rate: RR to be programmed
5102 * This function gets called when refresh rate (RR) has to be changed from
5103 * one frequency to another. Switches can be between high and low RR
5104 * supported by the panel or to any other RR based on media playback (in
5105 * this case, RR value needs to be passed from user space).
5107 * The caller of this function needs to take a lock on dev_priv->drrs.
5109 static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
5111 struct drm_i915_private *dev_priv = dev->dev_private;
5112 struct intel_encoder *encoder;
5113 struct intel_digital_port *dig_port = NULL;
5114 struct intel_dp *intel_dp = dev_priv->drrs.dp;
5115 struct intel_crtc_state *config = NULL;
5116 struct intel_crtc *intel_crtc = NULL;
5118 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
5120 if (refresh_rate <= 0) {
5121 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5125 if (intel_dp == NULL) {
5126 DRM_DEBUG_KMS("DRRS not supported.\n");
5131 * FIXME: This needs proper synchronization with psr state for some
5132 * platforms that cannot have PSR and DRRS enabled at the same time.
5135 dig_port = dp_to_dig_port(intel_dp);
5136 encoder = &dig_port->base;
5137 intel_crtc = to_intel_crtc(encoder->base.crtc);
5140 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5144 config = intel_crtc->config;
5146 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5147 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5151 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5153 index = DRRS_LOW_RR;
5155 if (index == dev_priv->drrs.refresh_rate_type) {
5157 "DRRS requested for previously set RR...ignoring\n");
5161 if (!intel_crtc->active) {
5162 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5166 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
5169 intel_dp_set_m_n(intel_crtc, M1_N1);
5172 intel_dp_set_m_n(intel_crtc, M2_N2);
5176 DRM_ERROR("Unsupported refreshrate type\n");
5178 } else if (INTEL_INFO(dev)->gen > 6) {
5179 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5180 val = I915_READ(reg);
5182 if (index > DRRS_HIGH_RR) {
5183 if (IS_VALLEYVIEW(dev))
5184 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5186 val |= PIPECONF_EDP_RR_MODE_SWITCH;
5188 if (IS_VALLEYVIEW(dev))
5189 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5191 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5193 I915_WRITE(reg, val);
5196 dev_priv->drrs.refresh_rate_type = index;
5198 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5202 * intel_edp_drrs_enable - init drrs struct if supported
5203 * @intel_dp: DP struct
5205 * Initializes frontbuffer_bits and drrs.dp
5207 void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5209 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5210 struct drm_i915_private *dev_priv = dev->dev_private;
5211 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5212 struct drm_crtc *crtc = dig_port->base.base.crtc;
5213 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5215 if (!intel_crtc->config->has_drrs) {
5216 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5220 mutex_lock(&dev_priv->drrs.mutex);
5221 if (WARN_ON(dev_priv->drrs.dp)) {
5222 DRM_ERROR("DRRS already enabled\n");
5226 dev_priv->drrs.busy_frontbuffer_bits = 0;
5228 dev_priv->drrs.dp = intel_dp;
5231 mutex_unlock(&dev_priv->drrs.mutex);
5235 * intel_edp_drrs_disable - Disable DRRS
5236 * @intel_dp: DP struct
5239 void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5241 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5242 struct drm_i915_private *dev_priv = dev->dev_private;
5243 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5244 struct drm_crtc *crtc = dig_port->base.base.crtc;
5245 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5247 if (!intel_crtc->config->has_drrs)
5250 mutex_lock(&dev_priv->drrs.mutex);
5251 if (!dev_priv->drrs.dp) {
5252 mutex_unlock(&dev_priv->drrs.mutex);
5256 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5257 intel_dp_set_drrs_state(dev_priv->dev,
5258 intel_dp->attached_connector->panel.
5259 fixed_mode->vrefresh);
5261 dev_priv->drrs.dp = NULL;
5262 mutex_unlock(&dev_priv->drrs.mutex);
5264 cancel_delayed_work_sync(&dev_priv->drrs.work);
5267 static void intel_edp_drrs_downclock_work(struct work_struct *work)
5269 struct drm_i915_private *dev_priv =
5270 container_of(work, typeof(*dev_priv), drrs.work.work);
5271 struct intel_dp *intel_dp;
5273 mutex_lock(&dev_priv->drrs.mutex);
5275 intel_dp = dev_priv->drrs.dp;
5281 * The delayed work can race with an invalidate hence we need to
5285 if (dev_priv->drrs.busy_frontbuffer_bits)
5288 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5289 intel_dp_set_drrs_state(dev_priv->dev,
5290 intel_dp->attached_connector->panel.
5291 downclock_mode->vrefresh);
5294 mutex_unlock(&dev_priv->drrs.mutex);
5298 * intel_edp_drrs_invalidate - Invalidate DRRS
5300 * @frontbuffer_bits: frontbuffer plane tracking bits
5302 * When there is a disturbance on screen (due to cursor movement/time
5303 * update etc), DRRS needs to be invalidated, i.e. need to switch to
5306 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5308 void intel_edp_drrs_invalidate(struct drm_device *dev,
5309 unsigned frontbuffer_bits)
5311 struct drm_i915_private *dev_priv = dev->dev_private;
5312 struct drm_crtc *crtc;
5313 enum i915_pipe pipe;
5315 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5318 cancel_delayed_work(&dev_priv->drrs.work);
5320 mutex_lock(&dev_priv->drrs.mutex);
5321 if (!dev_priv->drrs.dp) {
5322 mutex_unlock(&dev_priv->drrs.mutex);
5326 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5327 pipe = to_intel_crtc(crtc)->pipe;
5329 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
5330 intel_dp_set_drrs_state(dev_priv->dev,
5331 dev_priv->drrs.dp->attached_connector->panel.
5332 fixed_mode->vrefresh);
5335 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5337 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5338 mutex_unlock(&dev_priv->drrs.mutex);
5342 * intel_edp_drrs_flush - Flush DRRS
5344 * @frontbuffer_bits: frontbuffer plane tracking bits
5346 * When there is no movement on screen, DRRS work can be scheduled.
5347 * This DRRS work is responsible for setting relevant registers after a
5348 * timeout of 1 second.
5350 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5352 void intel_edp_drrs_flush(struct drm_device *dev,
5353 unsigned frontbuffer_bits)
5355 struct drm_i915_private *dev_priv = dev->dev_private;
5356 struct drm_crtc *crtc;
5357 enum i915_pipe pipe;
5359 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5362 cancel_delayed_work(&dev_priv->drrs.work);
5364 mutex_lock(&dev_priv->drrs.mutex);
5365 if (!dev_priv->drrs.dp) {
5366 mutex_unlock(&dev_priv->drrs.mutex);
5370 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5371 pipe = to_intel_crtc(crtc)->pipe;
5372 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5374 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR &&
5375 !dev_priv->drrs.busy_frontbuffer_bits)
5376 schedule_delayed_work(&dev_priv->drrs.work,
5377 msecs_to_jiffies(1000));
5378 mutex_unlock(&dev_priv->drrs.mutex);
5382 * DOC: Display Refresh Rate Switching (DRRS)
5384 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5385 * which enables swtching between low and high refresh rates,
5386 * dynamically, based on the usage scenario. This feature is applicable
5387 * for internal panels.
5389 * Indication that the panel supports DRRS is given by the panel EDID, which
5390 * would list multiple refresh rates for one resolution.
5392 * DRRS is of 2 types - static and seamless.
5393 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5394 * (may appear as a blink on screen) and is used in dock-undock scenario.
5395 * Seamless DRRS involves changing RR without any visual effect to the user
5396 * and can be used during normal system usage. This is done by programming
5397 * certain registers.
5399 * Support for static/seamless DRRS may be indicated in the VBT based on
5400 * inputs from the panel spec.
5402 * DRRS saves power by switching to low RR based on usage scenarios.
5405 * The implementation is based on frontbuffer tracking implementation.
5406 * When there is a disturbance on the screen triggered by user activity or a
5407 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5408 * When there is no movement on screen, after a timeout of 1 second, a switch
5409 * to low RR is made.
5410 * For integration with frontbuffer tracking code,
5411 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5413 * DRRS can be further extended to support other internal panels and also
5414 * the scenario of video playback wherein RR is set based on the rate
5415 * requested by userspace.
5419 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5420 * @intel_connector: eDP connector
5421 * @fixed_mode: preferred mode of panel
5423 * This function is called only once at driver load to initialize basic
5427 * Downclock mode if panel supports it, else return NULL.
5428 * DRRS support is determined by the presence of downclock mode (apart
5429 * from VBT setting).
5431 static struct drm_display_mode *
5432 intel_dp_drrs_init(struct intel_connector *intel_connector,
5433 struct drm_display_mode *fixed_mode)
5435 struct drm_connector *connector = &intel_connector->base;
5436 struct drm_device *dev = connector->dev;
5437 struct drm_i915_private *dev_priv = dev->dev_private;
5438 struct drm_display_mode *downclock_mode = NULL;
5440 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5441 lockinit(&dev_priv->drrs.mutex, "i915dm", 0, LK_CANRECURSE);
5443 if (INTEL_INFO(dev)->gen <= 6) {
5444 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5448 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5449 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5453 downclock_mode = intel_find_panel_downclock
5454 (dev, fixed_mode, connector);
5456 if (!downclock_mode) {
5457 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5461 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5463 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5464 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5465 return downclock_mode;
5468 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5469 struct intel_connector *intel_connector)
5471 struct drm_connector *connector = &intel_connector->base;
5472 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5473 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5474 struct drm_device *dev = intel_encoder->base.dev;
5475 struct drm_i915_private *dev_priv = dev->dev_private;
5476 struct drm_display_mode *fixed_mode = NULL;
5477 struct drm_display_mode *downclock_mode = NULL;
5479 struct drm_display_mode *scan;
5481 enum i915_pipe pipe = INVALID_PIPE;
5483 if (!is_edp(intel_dp))
5487 intel_edp_panel_vdd_sanitize(intel_dp);
5488 pps_unlock(intel_dp);
5490 /* Cache DPCD and EDID for edp. */
5491 has_dpcd = intel_dp_get_dpcd(intel_dp);
5494 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5495 dev_priv->no_aux_handshake =
5496 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5497 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5499 /* if this fails, presume the device is a ghost */
5500 DRM_INFO("failed to retrieve link info, disabling eDP\n");
5504 /* We now know it's not a ghost, init power sequence regs. */
5506 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5507 pps_unlock(intel_dp);
5509 mutex_lock(&dev->mode_config.mutex);
5510 edid = drm_get_edid(connector, intel_dp->aux.ddc);
5512 if (drm_add_edid_modes(connector, edid)) {
5513 drm_mode_connector_update_edid_property(connector,
5515 drm_edid_to_eld(connector, edid);
5518 edid = ERR_PTR(-EINVAL);
5521 edid = ERR_PTR(-ENOENT);
5523 intel_connector->edid = edid;
5525 /* prefer fixed mode from EDID if available */
5526 list_for_each_entry(scan, &connector->probed_modes, head) {
5527 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5528 fixed_mode = drm_mode_duplicate(dev, scan);
5529 downclock_mode = intel_dp_drrs_init(
5530 intel_connector, fixed_mode);
5535 /* fallback to VBT if available for eDP */
5536 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5537 fixed_mode = drm_mode_duplicate(dev,
5538 dev_priv->vbt.lfp_lvds_vbt_mode);
5540 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5542 mutex_unlock(&dev->mode_config.mutex);
5544 if (IS_VALLEYVIEW(dev)) {
5546 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5547 register_reboot_notifier(&intel_dp->edp_notifier);
5551 * Figure out the current pipe for the initial backlight setup.
5552 * If the current pipe isn't valid, try the PPS pipe, and if that
5553 * fails just assume pipe A.
5555 if (IS_CHERRYVIEW(dev))
5556 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5558 pipe = PORT_TO_PIPE(intel_dp->DP);
5560 if (pipe != PIPE_A && pipe != PIPE_B)
5561 pipe = intel_dp->pps_pipe;
5563 if (pipe != PIPE_A && pipe != PIPE_B)
5566 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5570 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5571 intel_connector->panel.backlight_power = intel_edp_backlight_power;
5572 intel_panel_setup_backlight(connector, pipe);
5578 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5579 struct intel_connector *intel_connector)
5581 struct drm_connector *connector = &intel_connector->base;
5582 struct intel_dp *intel_dp = &intel_dig_port->dp;
5583 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5584 struct drm_device *dev = intel_encoder->base.dev;
5585 struct drm_i915_private *dev_priv = dev->dev_private;
5586 enum port port = intel_dig_port->port;
5589 intel_dp->pps_pipe = INVALID_PIPE;
5591 /* intel_dp vfuncs */
5592 if (INTEL_INFO(dev)->gen >= 9)
5593 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5594 else if (IS_VALLEYVIEW(dev))
5595 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5596 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5597 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5598 else if (HAS_PCH_SPLIT(dev))
5599 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5601 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5603 if (INTEL_INFO(dev)->gen >= 9)
5604 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5606 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
5608 /* Preserve the current hw state. */
5609 intel_dp->DP = I915_READ(intel_dp->output_reg);
5610 intel_dp->attached_connector = intel_connector;
5612 if (intel_dp_is_edp(dev, port))
5613 type = DRM_MODE_CONNECTOR_eDP;
5615 type = DRM_MODE_CONNECTOR_DisplayPort;
5618 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5619 * for DP the encoder type can be set by the caller to
5620 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5622 if (type == DRM_MODE_CONNECTOR_eDP)
5623 intel_encoder->type = INTEL_OUTPUT_EDP;
5625 /* eDP only on port B and/or C on vlv/chv */
5626 if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5627 port != PORT_B && port != PORT_C))
5630 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5631 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5634 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
5635 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5637 connector->interlace_allowed = true;
5638 connector->doublescan_allowed = 0;
5640 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
5641 edp_panel_vdd_work);
5643 intel_connector_attach_encoder(intel_connector, intel_encoder);
5644 drm_connector_register(connector);
5647 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5649 intel_connector->get_hw_state = intel_connector_get_hw_state;
5650 intel_connector->unregister = intel_dp_connector_unregister;
5652 /* Set up the hotplug pin. */
5655 intel_encoder->hpd_pin = HPD_PORT_A;
5658 intel_encoder->hpd_pin = HPD_PORT_B;
5661 intel_encoder->hpd_pin = HPD_PORT_C;
5664 intel_encoder->hpd_pin = HPD_PORT_D;
5670 if (is_edp(intel_dp)) {
5672 intel_dp_init_panel_power_timestamps(intel_dp);
5673 if (IS_VALLEYVIEW(dev))
5674 vlv_initial_power_sequencer_setup(intel_dp);
5676 intel_dp_init_panel_power_sequencer(dev, intel_dp);
5677 pps_unlock(intel_dp);
5680 intel_dp_aux_init(intel_dp, intel_connector);
5682 /* init MST on ports that can support it */
5683 if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
5684 if (port == PORT_B || port == PORT_C || port == PORT_D) {
5685 intel_dp_mst_encoder_init(intel_dig_port,
5686 intel_connector->base.base.id);
5690 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
5691 drm_dp_aux_unregister(&intel_dp->aux);
5692 if (is_edp(intel_dp)) {
5693 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5695 * vdd might still be enabled do to the delayed vdd off.
5696 * Make sure vdd is actually turned off here.
5699 edp_panel_vdd_off_sync(intel_dp);
5700 pps_unlock(intel_dp);
5702 drm_connector_unregister(connector);
5703 drm_connector_cleanup(connector);
5707 intel_dp_add_properties(intel_dp, connector);
5709 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5710 * 0xd. Failure to do so will result in spurious interrupts being
5711 * generated on the port when a cable is not attached.
5713 if (IS_G4X(dev) && !IS_GM45(dev)) {
5714 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5715 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5722 intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
5724 struct drm_i915_private *dev_priv = dev->dev_private;
5725 struct intel_digital_port *intel_dig_port;
5726 struct intel_encoder *intel_encoder;
5727 struct drm_encoder *encoder;
5728 struct intel_connector *intel_connector;
5730 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
5731 if (!intel_dig_port)
5734 intel_connector = intel_connector_alloc();
5735 if (!intel_connector) {
5736 kfree(intel_dig_port);
5740 intel_encoder = &intel_dig_port->base;
5741 encoder = &intel_encoder->base;
5743 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5744 DRM_MODE_ENCODER_TMDS);
5746 intel_encoder->compute_config = intel_dp_compute_config;
5747 intel_encoder->disable = intel_disable_dp;
5748 intel_encoder->get_hw_state = intel_dp_get_hw_state;
5749 intel_encoder->get_config = intel_dp_get_config;
5750 intel_encoder->suspend = intel_dp_encoder_suspend;
5751 if (IS_CHERRYVIEW(dev)) {
5752 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
5753 intel_encoder->pre_enable = chv_pre_enable_dp;
5754 intel_encoder->enable = vlv_enable_dp;
5755 intel_encoder->post_disable = chv_post_disable_dp;
5756 } else if (IS_VALLEYVIEW(dev)) {
5757 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
5758 intel_encoder->pre_enable = vlv_pre_enable_dp;
5759 intel_encoder->enable = vlv_enable_dp;
5760 intel_encoder->post_disable = vlv_post_disable_dp;
5762 intel_encoder->pre_enable = g4x_pre_enable_dp;
5763 intel_encoder->enable = g4x_enable_dp;
5764 if (INTEL_INFO(dev)->gen >= 5)
5765 intel_encoder->post_disable = ilk_post_disable_dp;
5768 intel_dig_port->port = port;
5769 intel_dig_port->dp.output_reg = output_reg;
5771 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
5772 if (IS_CHERRYVIEW(dev)) {
5774 intel_encoder->crtc_mask = 1 << 2;
5776 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
5778 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5780 intel_encoder->cloneable = 0;
5781 intel_encoder->hot_plug = intel_dp_hot_plug;
5783 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5784 dev_priv->hpd_irq_port[port] = intel_dig_port;
5786 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
5787 drm_encoder_cleanup(encoder);
5788 kfree(intel_dig_port);
5789 kfree(intel_connector);
5794 void intel_dp_mst_suspend(struct drm_device *dev)
5796 struct drm_i915_private *dev_priv = dev->dev_private;
5800 for (i = 0; i < I915_MAX_PORTS; i++) {
5801 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5802 if (!intel_dig_port)
5805 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5806 if (!intel_dig_port->dp.can_mst)
5808 if (intel_dig_port->dp.is_mst)
5809 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
5815 void intel_dp_mst_resume(struct drm_device *dev)
5817 struct drm_i915_private *dev_priv = dev->dev_private;
5820 for (i = 0; i < I915_MAX_PORTS; i++) {
5821 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5822 if (!intel_dig_port)
5824 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5828 if (!intel_dig_port->dp.can_mst)
5831 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
5833 intel_dp_check_mst_status(&intel_dig_port->dp);