2 * Copyright 2011 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Alex Deucher
27 #include "radeon_asic.h"
32 const u32 r600_utc[R600_PM_NUMBER_OF_TC] =
51 const u32 r600_dtc[R600_PM_NUMBER_OF_TC] =
70 void r600_dpm_print_class_info(u32 class, u32 class2)
72 printk("\tui class: ");
73 switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
74 case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
78 case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
81 case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
84 case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
85 printk("performance\n");
88 printk("\tinternal class: ");
89 if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
93 if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
95 if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
97 if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
98 printk("limited_pwr ");
99 if (class & ATOM_PPLIB_CLASSIFICATION_REST)
101 if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
103 if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
105 if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
107 if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
109 if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
111 if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
113 if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
115 if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
117 if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
119 if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
120 printk("limited_pwr2 ");
121 if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
123 if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
129 void r600_dpm_print_cap_info(u32 caps)
132 if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
133 printk("single_disp ");
134 if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
136 if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
141 void r600_dpm_print_ps_status(struct radeon_device *rdev,
142 struct radeon_ps *rps)
144 printk("\tstatus: ");
145 if (rps == rdev->pm.dpm.current_ps)
147 if (rps == rdev->pm.dpm.requested_ps)
149 if (rps == rdev->pm.dpm.boot_ps)
154 u32 r600_dpm_get_vblank_time(struct radeon_device *rdev)
156 struct drm_device *dev = rdev->ddev;
157 struct drm_crtc *crtc;
158 struct radeon_crtc *radeon_crtc;
159 u32 line_time_us, vblank_lines;
160 u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
162 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
163 radeon_crtc = to_radeon_crtc(crtc);
164 if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
165 line_time_us = (radeon_crtc->hw_mode.crtc_htotal * 1000) /
166 radeon_crtc->hw_mode.clock;
167 vblank_lines = radeon_crtc->hw_mode.crtc_vblank_end -
168 radeon_crtc->hw_mode.crtc_vdisplay +
169 (radeon_crtc->v_border * 2);
170 vblank_time_us = vblank_lines * line_time_us;
175 return vblank_time_us;
178 u32 r600_dpm_get_vrefresh(struct radeon_device *rdev)
180 struct drm_device *dev = rdev->ddev;
181 struct drm_crtc *crtc;
182 struct radeon_crtc *radeon_crtc;
185 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
186 radeon_crtc = to_radeon_crtc(crtc);
187 if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
188 vrefresh = radeon_crtc->hw_mode.vrefresh;
196 void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
203 i_c = (i * r_c) / 100;
212 *p = i_c / (1 << (2 * (*u)));
215 int r600_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th)
220 if ((fl == 0) || (fh == 0) || (fl > fh))
224 t1 = (t * (k - 100));
225 a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100));
227 ah = ((a * t) + 5000) / 10000;
236 void r600_gfx_clockgating_enable(struct radeon_device *rdev, bool enable)
241 WREG32_P(SCLK_PWRMGT_CNTL, DYN_GFX_CLK_OFF_EN, ~DYN_GFX_CLK_OFF_EN);
243 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN);
245 WREG32(CG_RLC_REQ_AND_RSP, 0x2);
247 for (i = 0; i < rdev->usec_timeout; i++) {
248 if (((RREG32(CG_RLC_REQ_AND_RSP) & CG_RLC_RSP_TYPE_MASK) >> CG_RLC_RSP_TYPE_SHIFT) == 1)
253 WREG32(CG_RLC_REQ_AND_RSP, 0x0);
255 WREG32(GRBM_PWR_CNTL, 0x1);
256 RREG32(GRBM_PWR_CNTL);
260 void r600_dynamicpm_enable(struct radeon_device *rdev, bool enable)
263 WREG32_P(GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, ~GLOBAL_PWRMGT_EN);
265 WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN);
268 void r600_enable_thermal_protection(struct radeon_device *rdev, bool enable)
271 WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS);
273 WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS);
276 void r600_enable_acpi_pm(struct radeon_device *rdev)
278 WREG32_P(GENERAL_PWRMGT, STATIC_PM_EN, ~STATIC_PM_EN);
281 void r600_enable_dynamic_pcie_gen2(struct radeon_device *rdev, bool enable)
284 WREG32_P(GENERAL_PWRMGT, ENABLE_GEN2PCIE, ~ENABLE_GEN2PCIE);
286 WREG32_P(GENERAL_PWRMGT, 0, ~ENABLE_GEN2PCIE);
289 bool r600_dynamicpm_enabled(struct radeon_device *rdev)
291 if (RREG32(GENERAL_PWRMGT) & GLOBAL_PWRMGT_EN)
297 void r600_enable_sclk_control(struct radeon_device *rdev, bool enable)
300 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF);
302 WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF);
305 void r600_enable_mclk_control(struct radeon_device *rdev, bool enable)
308 WREG32_P(MCLK_PWRMGT_CNTL, 0, ~MPLL_PWRMGT_OFF);
310 WREG32_P(MCLK_PWRMGT_CNTL, MPLL_PWRMGT_OFF, ~MPLL_PWRMGT_OFF);
313 void r600_enable_spll_bypass(struct radeon_device *rdev, bool enable)
316 WREG32_P(CG_SPLL_FUNC_CNTL, SPLL_BYPASS_EN, ~SPLL_BYPASS_EN);
318 WREG32_P(CG_SPLL_FUNC_CNTL, 0, ~SPLL_BYPASS_EN);
321 void r600_wait_for_spll_change(struct radeon_device *rdev)
325 for (i = 0; i < rdev->usec_timeout; i++) {
326 if (RREG32(CG_SPLL_FUNC_CNTL) & SPLL_CHG_STATUS)
332 void r600_set_bsp(struct radeon_device *rdev, u32 u, u32 p)
334 WREG32(CG_BSP, BSP(p) | BSU(u));
337 void r600_set_at(struct radeon_device *rdev,
338 u32 l_to_m, u32 m_to_h,
339 u32 h_to_m, u32 m_to_l)
341 WREG32(CG_RT, FLS(l_to_m) | FMS(m_to_h));
342 WREG32(CG_LT, FHS(h_to_m) | FMS(m_to_l));
345 void r600_set_tc(struct radeon_device *rdev,
346 u32 index, u32 u_t, u32 d_t)
348 WREG32(CG_FFCT_0 + (index * 4), UTC_0(u_t) | DTC_0(d_t));
351 void r600_select_td(struct radeon_device *rdev,
354 if (td == R600_TD_AUTO)
355 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_FORCE_TREND_SEL);
357 WREG32_P(SCLK_PWRMGT_CNTL, FIR_FORCE_TREND_SEL, ~FIR_FORCE_TREND_SEL);
358 if (td == R600_TD_UP)
359 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_TREND_MODE);
360 if (td == R600_TD_DOWN)
361 WREG32_P(SCLK_PWRMGT_CNTL, FIR_TREND_MODE, ~FIR_TREND_MODE);
364 void r600_set_vrc(struct radeon_device *rdev, u32 vrv)
369 void r600_set_tpu(struct radeon_device *rdev, u32 u)
371 WREG32_P(CG_TPC, TPU(u), ~TPU_MASK);
374 void r600_set_tpc(struct radeon_device *rdev, u32 c)
376 WREG32_P(CG_TPC, TPCC(c), ~TPCC_MASK);
379 void r600_set_sstu(struct radeon_device *rdev, u32 u)
381 WREG32_P(CG_SSP, CG_SSTU(u), ~CG_SSTU_MASK);
384 void r600_set_sst(struct radeon_device *rdev, u32 t)
386 WREG32_P(CG_SSP, CG_SST(t), ~CG_SST_MASK);
389 void r600_set_git(struct radeon_device *rdev, u32 t)
391 WREG32_P(CG_GIT, CG_GICST(t), ~CG_GICST_MASK);
394 void r600_set_fctu(struct radeon_device *rdev, u32 u)
396 WREG32_P(CG_FC_T, FC_TU(u), ~FC_TU_MASK);
399 void r600_set_fct(struct radeon_device *rdev, u32 t)
401 WREG32_P(CG_FC_T, FC_T(t), ~FC_T_MASK);
404 void r600_set_ctxcgtt3d_rphc(struct radeon_device *rdev, u32 p)
406 WREG32_P(CG_CTX_CGTT3D_R, PHC(p), ~PHC_MASK);
409 void r600_set_ctxcgtt3d_rsdc(struct radeon_device *rdev, u32 s)
411 WREG32_P(CG_CTX_CGTT3D_R, SDC(s), ~SDC_MASK);
414 void r600_set_vddc3d_oorsu(struct radeon_device *rdev, u32 u)
416 WREG32_P(CG_VDDC3D_OOR, SU(u), ~SU_MASK);
419 void r600_set_vddc3d_oorphc(struct radeon_device *rdev, u32 p)
421 WREG32_P(CG_VDDC3D_OOR, PHC(p), ~PHC_MASK);
424 void r600_set_vddc3d_oorsdc(struct radeon_device *rdev, u32 s)
426 WREG32_P(CG_VDDC3D_OOR, SDC(s), ~SDC_MASK);
429 void r600_set_mpll_lock_time(struct radeon_device *rdev, u32 lock_time)
431 WREG32_P(MPLL_TIME, MPLL_LOCK_TIME(lock_time), ~MPLL_LOCK_TIME_MASK);
434 void r600_set_mpll_reset_time(struct radeon_device *rdev, u32 reset_time)
436 WREG32_P(MPLL_TIME, MPLL_RESET_TIME(reset_time), ~MPLL_RESET_TIME_MASK);
439 void r600_engine_clock_entry_enable(struct radeon_device *rdev,
440 u32 index, bool enable)
443 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
444 STEP_0_SPLL_ENTRY_VALID, ~STEP_0_SPLL_ENTRY_VALID);
446 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
447 0, ~STEP_0_SPLL_ENTRY_VALID);
450 void r600_engine_clock_entry_enable_pulse_skipping(struct radeon_device *rdev,
451 u32 index, bool enable)
454 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
455 STEP_0_SPLL_STEP_ENABLE, ~STEP_0_SPLL_STEP_ENABLE);
457 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
458 0, ~STEP_0_SPLL_STEP_ENABLE);
461 void r600_engine_clock_entry_enable_post_divider(struct radeon_device *rdev,
462 u32 index, bool enable)
465 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
466 STEP_0_POST_DIV_EN, ~STEP_0_POST_DIV_EN);
468 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
469 0, ~STEP_0_POST_DIV_EN);
472 void r600_engine_clock_entry_set_post_divider(struct radeon_device *rdev,
473 u32 index, u32 divider)
475 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2),
476 STEP_0_SPLL_POST_DIV(divider), ~STEP_0_SPLL_POST_DIV_MASK);
479 void r600_engine_clock_entry_set_reference_divider(struct radeon_device *rdev,
480 u32 index, u32 divider)
482 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2),
483 STEP_0_SPLL_REF_DIV(divider), ~STEP_0_SPLL_REF_DIV_MASK);
486 void r600_engine_clock_entry_set_feedback_divider(struct radeon_device *rdev,
487 u32 index, u32 divider)
489 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2),
490 STEP_0_SPLL_FB_DIV(divider), ~STEP_0_SPLL_FB_DIV_MASK);
493 void r600_engine_clock_entry_set_step_time(struct radeon_device *rdev,
494 u32 index, u32 step_time)
496 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2),
497 STEP_0_SPLL_STEP_TIME(step_time), ~STEP_0_SPLL_STEP_TIME_MASK);
500 void r600_vid_rt_set_ssu(struct radeon_device *rdev, u32 u)
502 WREG32_P(VID_RT, SSTU(u), ~SSTU_MASK);
505 void r600_vid_rt_set_vru(struct radeon_device *rdev, u32 u)
507 WREG32_P(VID_RT, VID_CRTU(u), ~VID_CRTU_MASK);
510 void r600_vid_rt_set_vrt(struct radeon_device *rdev, u32 rt)
512 WREG32_P(VID_RT, VID_CRT(rt), ~VID_CRT_MASK);
515 void r600_voltage_control_enable_pins(struct radeon_device *rdev,
518 WREG32(LOWER_GPIO_ENABLE, mask & 0xffffffff);
519 WREG32(UPPER_GPIO_ENABLE, upper_32_bits(mask));
523 void r600_voltage_control_program_voltages(struct radeon_device *rdev,
524 enum r600_power_level index, u64 pins)
527 u32 ix = 3 - (3 & index);
529 WREG32(CTXSW_VID_LOWER_GPIO_CNTL + (ix * 4), pins & 0xffffffff);
531 mask = 7 << (3 * ix);
532 tmp = RREG32(VID_UPPER_GPIO_CNTL);
533 tmp = (tmp & ~mask) | ((pins >> (32 - (3 * ix))) & mask);
534 WREG32(VID_UPPER_GPIO_CNTL, tmp);
537 void r600_voltage_control_deactivate_static_control(struct radeon_device *rdev,
542 gpio = RREG32(GPIOPAD_MASK);
544 WREG32(GPIOPAD_MASK, gpio);
546 gpio = RREG32(GPIOPAD_EN);
548 WREG32(GPIOPAD_EN, gpio);
550 gpio = RREG32(GPIOPAD_A);
552 WREG32(GPIOPAD_A, gpio);
555 void r600_power_level_enable(struct radeon_device *rdev,
556 enum r600_power_level index, bool enable)
558 u32 ix = 3 - (3 & index);
561 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), CTXSW_FREQ_STATE_ENABLE,
562 ~CTXSW_FREQ_STATE_ENABLE);
564 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), 0,
565 ~CTXSW_FREQ_STATE_ENABLE);
568 void r600_power_level_set_voltage_index(struct radeon_device *rdev,
569 enum r600_power_level index, u32 voltage_index)
571 u32 ix = 3 - (3 & index);
573 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4),
574 CTXSW_FREQ_VIDS_CFG_INDEX(voltage_index), ~CTXSW_FREQ_VIDS_CFG_INDEX_MASK);
577 void r600_power_level_set_mem_clock_index(struct radeon_device *rdev,
578 enum r600_power_level index, u32 mem_clock_index)
580 u32 ix = 3 - (3 & index);
582 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4),
583 CTXSW_FREQ_MCLK_CFG_INDEX(mem_clock_index), ~CTXSW_FREQ_MCLK_CFG_INDEX_MASK);
586 void r600_power_level_set_eng_clock_index(struct radeon_device *rdev,
587 enum r600_power_level index, u32 eng_clock_index)
589 u32 ix = 3 - (3 & index);
591 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4),
592 CTXSW_FREQ_SCLK_CFG_INDEX(eng_clock_index), ~CTXSW_FREQ_SCLK_CFG_INDEX_MASK);
595 void r600_power_level_set_watermark_id(struct radeon_device *rdev,
596 enum r600_power_level index,
597 enum r600_display_watermark watermark_id)
599 u32 ix = 3 - (3 & index);
602 if (watermark_id == R600_DISPLAY_WATERMARK_HIGH)
603 tmp = CTXSW_FREQ_DISPLAY_WATERMARK;
604 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), tmp, ~CTXSW_FREQ_DISPLAY_WATERMARK);
607 void r600_power_level_set_pcie_gen2(struct radeon_device *rdev,
608 enum r600_power_level index, bool compatible)
610 u32 ix = 3 - (3 & index);
614 tmp = CTXSW_FREQ_GEN2PCIE_VOLT;
615 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), tmp, ~CTXSW_FREQ_GEN2PCIE_VOLT);
618 enum r600_power_level r600_power_level_get_current_index(struct radeon_device *rdev)
622 tmp = RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK;
623 tmp >>= CURRENT_PROFILE_INDEX_SHIFT;
627 enum r600_power_level r600_power_level_get_target_index(struct radeon_device *rdev)
631 tmp = RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & TARGET_PROFILE_INDEX_MASK;
632 tmp >>= TARGET_PROFILE_INDEX_SHIFT;
636 void r600_power_level_set_enter_index(struct radeon_device *rdev,
637 enum r600_power_level index)
639 WREG32_P(TARGET_AND_CURRENT_PROFILE_INDEX, DYN_PWR_ENTER_INDEX(index),
640 ~DYN_PWR_ENTER_INDEX_MASK);
643 void r600_wait_for_power_level_unequal(struct radeon_device *rdev,
644 enum r600_power_level index)
648 for (i = 0; i < rdev->usec_timeout; i++) {
649 if (r600_power_level_get_target_index(rdev) != index)
654 for (i = 0; i < rdev->usec_timeout; i++) {
655 if (r600_power_level_get_current_index(rdev) != index)
661 void r600_wait_for_power_level(struct radeon_device *rdev,
662 enum r600_power_level index)
666 for (i = 0; i < rdev->usec_timeout; i++) {
667 if (r600_power_level_get_target_index(rdev) == index)
672 for (i = 0; i < rdev->usec_timeout; i++) {
673 if (r600_power_level_get_current_index(rdev) == index)
679 void r600_start_dpm(struct radeon_device *rdev)
681 r600_enable_sclk_control(rdev, false);
682 r600_enable_mclk_control(rdev, false);
684 r600_dynamicpm_enable(rdev, true);
686 radeon_wait_for_vblank(rdev, 0);
687 radeon_wait_for_vblank(rdev, 1);
689 r600_enable_spll_bypass(rdev, true);
690 r600_wait_for_spll_change(rdev);
691 r600_enable_spll_bypass(rdev, false);
692 r600_wait_for_spll_change(rdev);
694 r600_enable_spll_bypass(rdev, true);
695 r600_wait_for_spll_change(rdev);
696 r600_enable_spll_bypass(rdev, false);
697 r600_wait_for_spll_change(rdev);
699 r600_enable_sclk_control(rdev, true);
700 r600_enable_mclk_control(rdev, true);
703 void r600_stop_dpm(struct radeon_device *rdev)
705 r600_dynamicpm_enable(rdev, false);
708 int r600_dpm_pre_set_power_state(struct radeon_device *rdev)
713 void r600_dpm_post_set_power_state(struct radeon_device *rdev)
718 bool r600_is_uvd_state(u32 class, u32 class2)
720 if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
722 if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
724 if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
726 if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
728 if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
733 int r600_set_thermal_temperature_range(struct radeon_device *rdev,
734 int min_temp, int max_temp)
736 int low_temp = 0 * 1000;
737 int high_temp = 255 * 1000;
739 if (low_temp < min_temp)
741 if (high_temp > max_temp)
742 high_temp = max_temp;
743 if (high_temp < low_temp) {
744 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
748 WREG32_P(CG_THERMAL_INT, DIG_THERM_INTH(high_temp / 1000), ~DIG_THERM_INTH_MASK);
749 WREG32_P(CG_THERMAL_INT, DIG_THERM_INTL(low_temp / 1000), ~DIG_THERM_INTL_MASK);
750 WREG32_P(CG_THERMAL_CTRL, DIG_THERM_DPM(high_temp / 1000), ~DIG_THERM_DPM_MASK);
752 rdev->pm.dpm.thermal.min_temp = low_temp;
753 rdev->pm.dpm.thermal.max_temp = high_temp;
758 bool r600_is_internal_thermal_sensor(enum radeon_int_thermal_type sensor)
761 case THERMAL_TYPE_RV6XX:
762 case THERMAL_TYPE_RV770:
763 case THERMAL_TYPE_EVERGREEN:
764 case THERMAL_TYPE_SUMO:
765 case THERMAL_TYPE_NI:
766 case THERMAL_TYPE_SI:
767 case THERMAL_TYPE_CI:
768 case THERMAL_TYPE_KV:
770 case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
771 case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
772 return false; /* need special handling */
773 case THERMAL_TYPE_NONE:
774 case THERMAL_TYPE_EXTERNAL:
775 case THERMAL_TYPE_EXTERNAL_GPIO:
782 struct _ATOM_POWERPLAY_INFO info;
783 struct _ATOM_POWERPLAY_INFO_V2 info_2;
784 struct _ATOM_POWERPLAY_INFO_V3 info_3;
785 struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
786 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
787 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
788 struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
789 struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
793 struct _ATOM_PPLIB_FANTABLE fan;
794 struct _ATOM_PPLIB_FANTABLE2 fan2;
797 static int r600_parse_clk_voltage_dep_table(struct radeon_clock_voltage_dependency_table *radeon_table,
798 ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table)
800 u32 size = atom_table->ucNumEntries *
801 sizeof(struct radeon_clock_voltage_dependency_entry);
803 ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
805 radeon_table->entries = kzalloc(size, GFP_KERNEL);
806 if (!radeon_table->entries)
809 entry = &atom_table->entries[0];
810 for (i = 0; i < atom_table->ucNumEntries; i++) {
811 radeon_table->entries[i].clk = le16_to_cpu(entry->usClockLow) |
812 (entry->ucClockHigh << 16);
813 radeon_table->entries[i].v = le16_to_cpu(entry->usVoltage);
814 entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *)
815 ((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record));
817 radeon_table->count = atom_table->ucNumEntries;
822 /* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
823 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
824 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
825 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
826 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
827 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
828 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
830 int r600_parse_extended_power_table(struct radeon_device *rdev)
832 struct radeon_mode_info *mode_info = &rdev->mode_info;
833 union power_info *power_info;
834 union fan_info *fan_info;
835 ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
836 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
841 if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
842 &frev, &crev, &data_offset))
844 power_info = (union power_info *)((uint8_t*)mode_info->atom_context->bios + data_offset);
847 if (le16_to_cpu(power_info->pplib.usTableSize) >=
848 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
849 if (power_info->pplib3.usFanTableOffset) {
850 fan_info = (union fan_info *)((uint8_t*)mode_info->atom_context->bios + data_offset +
851 le16_to_cpu(power_info->pplib3.usFanTableOffset));
852 rdev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst;
853 rdev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin);
854 rdev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed);
855 rdev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh);
856 rdev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin);
857 rdev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed);
858 rdev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh);
859 if (fan_info->fan.ucFanTableFormat >= 2)
860 rdev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax);
862 rdev->pm.dpm.fan.t_max = 10900;
863 rdev->pm.dpm.fan.cycle_delay = 100000;
864 rdev->pm.dpm.fan.ucode_fan_control = true;
868 /* clock dependancy tables, shedding tables */
869 if (le16_to_cpu(power_info->pplib.usTableSize) >=
870 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) {
871 if (power_info->pplib4.usVddcDependencyOnSCLKOffset) {
872 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
873 ((uint8_t*)mode_info->atom_context->bios + data_offset +
874 le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
875 ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
880 if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
881 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
882 ((uint8_t*)mode_info->atom_context->bios + data_offset +
883 le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
884 ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
887 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
891 if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
892 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
893 ((uint8_t*)mode_info->atom_context->bios + data_offset +
894 le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
895 ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
898 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
899 kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries);
903 if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
904 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
905 (mode_info->atom_context->bios + data_offset +
906 le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
907 ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
910 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
911 kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries);
912 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries);
916 if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
917 ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
918 (ATOM_PPLIB_Clock_Voltage_Limit_Table *)
919 ((uint8_t*)mode_info->atom_context->bios + data_offset +
920 le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset));
921 if (clk_v->ucNumEntries) {
922 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk =
923 le16_to_cpu(clk_v->entries[0].usSclkLow) |
924 (clk_v->entries[0].ucSclkHigh << 16);
925 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk =
926 le16_to_cpu(clk_v->entries[0].usMclkLow) |
927 (clk_v->entries[0].ucMclkHigh << 16);
928 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc =
929 le16_to_cpu(clk_v->entries[0].usVddc);
930 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci =
931 le16_to_cpu(clk_v->entries[0].usVddci);
934 if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) {
935 ATOM_PPLIB_PhaseSheddingLimits_Table *psl =
936 (ATOM_PPLIB_PhaseSheddingLimits_Table *)
937 ((uint8_t*)mode_info->atom_context->bios + data_offset +
938 le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
939 ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
941 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
942 kzalloc(psl->ucNumEntries *
943 sizeof(struct radeon_phase_shedding_limits_entry),
945 if (!rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
946 r600_free_extended_power_table(rdev);
950 entry = &psl->entries[0];
951 for (i = 0; i < psl->ucNumEntries; i++) {
952 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
953 le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16);
954 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
955 le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16);
956 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
957 le16_to_cpu(entry->usVoltage);
958 entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *)
959 ((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record));
961 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
967 if (le16_to_cpu(power_info->pplib.usTableSize) >=
968 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) {
969 rdev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit);
970 rdev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit);
971 rdev->pm.dpm.near_tdp_limit_adjusted = rdev->pm.dpm.near_tdp_limit;
972 rdev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit);
973 if (rdev->pm.dpm.tdp_od_limit)
974 rdev->pm.dpm.power_control = true;
976 rdev->pm.dpm.power_control = false;
977 rdev->pm.dpm.tdp_adjustment = 0;
978 rdev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold);
979 rdev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage);
980 rdev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope);
981 if (power_info->pplib5.usCACLeakageTableOffset) {
982 ATOM_PPLIB_CAC_Leakage_Table *cac_table =
983 (ATOM_PPLIB_CAC_Leakage_Table *)
984 ((uint8_t*)mode_info->atom_context->bios + data_offset +
985 le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
986 ATOM_PPLIB_CAC_Leakage_Record *entry;
987 u32 size = cac_table->ucNumEntries * sizeof(struct radeon_cac_leakage_table);
988 rdev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
989 if (!rdev->pm.dpm.dyn_state.cac_leakage_table.entries) {
990 r600_free_extended_power_table(rdev);
993 entry = &cac_table->entries[0];
994 for (i = 0; i < cac_table->ucNumEntries; i++) {
995 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
996 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 =
997 le16_to_cpu(entry->usVddc1);
998 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 =
999 le16_to_cpu(entry->usVddc2);
1000 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 =
1001 le16_to_cpu(entry->usVddc3);
1003 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
1004 le16_to_cpu(entry->usVddc);
1005 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
1006 le32_to_cpu(entry->ulLeakageValue);
1008 entry = (ATOM_PPLIB_CAC_Leakage_Record *)
1009 ((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record));
1011 rdev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
1016 if (le16_to_cpu(power_info->pplib.usTableSize) >=
1017 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
1018 ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
1019 (mode_info->atom_context->bios + data_offset +
1020 le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
1021 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) &&
1022 ext_hdr->usVCETableOffset) {
1023 VCEClockInfoArray *array = (VCEClockInfoArray *)
1024 (mode_info->atom_context->bios + data_offset +
1025 le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
1026 ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
1027 (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
1028 (mode_info->atom_context->bios + data_offset +
1029 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
1030 1 + array->ucNumEntries * sizeof(VCEClockInfo));
1031 ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
1032 u32 size = limits->numEntries *
1033 sizeof(struct radeon_vce_clock_voltage_dependency_entry);
1034 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
1035 kzalloc(size, GFP_KERNEL);
1036 if (!rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
1037 r600_free_extended_power_table(rdev);
1040 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
1042 entry = &limits->entries[0];
1043 for (i = 0; i < limits->numEntries; i++) {
1044 VCEClockInfo *vce_clk = (VCEClockInfo *)
1045 ((u8 *)&array->entries[0] +
1046 (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
1047 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
1048 le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
1049 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk =
1050 le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
1051 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v =
1052 le16_to_cpu(entry->usVoltage);
1053 entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
1054 ((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
1057 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
1058 ext_hdr->usUVDTableOffset) {
1059 UVDClockInfoArray *array = (UVDClockInfoArray *)
1060 (mode_info->atom_context->bios + data_offset +
1061 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1);
1062 ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits =
1063 (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
1064 (mode_info->atom_context->bios + data_offset +
1065 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 +
1066 1 + (array->ucNumEntries * sizeof (UVDClockInfo)));
1067 ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry;
1068 u32 size = limits->numEntries *
1069 sizeof(struct radeon_uvd_clock_voltage_dependency_entry);
1070 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
1071 kzalloc(size, GFP_KERNEL);
1072 if (!rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
1073 r600_free_extended_power_table(rdev);
1076 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
1078 entry = &limits->entries[0];
1079 for (i = 0; i < limits->numEntries; i++) {
1080 UVDClockInfo *uvd_clk = (UVDClockInfo *)
1081 ((u8 *)&array->entries[0] +
1082 (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo)));
1083 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk =
1084 le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16);
1085 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
1086 le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
1087 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
1088 le16_to_cpu(entry->usVoltage);
1089 entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
1090 ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
1093 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) &&
1094 ext_hdr->usSAMUTableOffset) {
1095 ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits =
1096 (ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
1097 (mode_info->atom_context->bios + data_offset +
1098 le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1);
1099 ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry;
1100 u32 size = limits->numEntries *
1101 sizeof(struct radeon_clock_voltage_dependency_entry);
1102 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
1103 kzalloc(size, GFP_KERNEL);
1104 if (!rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
1105 r600_free_extended_power_table(rdev);
1108 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
1110 entry = &limits->entries[0];
1111 for (i = 0; i < limits->numEntries; i++) {
1112 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk =
1113 le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16);
1114 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v =
1115 le16_to_cpu(entry->usVoltage);
1116 entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *)
1117 ((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record));
1120 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
1121 ext_hdr->usPPMTableOffset) {
1122 ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
1123 (mode_info->atom_context->bios + data_offset +
1124 le16_to_cpu(ext_hdr->usPPMTableOffset));
1125 rdev->pm.dpm.dyn_state.ppm_table =
1126 kzalloc(sizeof(struct radeon_ppm_table), GFP_KERNEL);
1127 if (!rdev->pm.dpm.dyn_state.ppm_table) {
1128 r600_free_extended_power_table(rdev);
1131 rdev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
1132 rdev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
1133 le16_to_cpu(ppm->usCpuCoreNumber);
1134 rdev->pm.dpm.dyn_state.ppm_table->platform_tdp =
1135 le32_to_cpu(ppm->ulPlatformTDP);
1136 rdev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp =
1137 le32_to_cpu(ppm->ulSmallACPlatformTDP);
1138 rdev->pm.dpm.dyn_state.ppm_table->platform_tdc =
1139 le32_to_cpu(ppm->ulPlatformTDC);
1140 rdev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc =
1141 le32_to_cpu(ppm->ulSmallACPlatformTDC);
1142 rdev->pm.dpm.dyn_state.ppm_table->apu_tdp =
1143 le32_to_cpu(ppm->ulApuTDP);
1144 rdev->pm.dpm.dyn_state.ppm_table->dgpu_tdp =
1145 le32_to_cpu(ppm->ulDGpuTDP);
1146 rdev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power =
1147 le32_to_cpu(ppm->ulDGpuUlvPower);
1148 rdev->pm.dpm.dyn_state.ppm_table->tj_max =
1149 le32_to_cpu(ppm->ulTjmax);
1151 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) &&
1152 ext_hdr->usACPTableOffset) {
1153 ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits =
1154 (ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
1155 (mode_info->atom_context->bios + data_offset +
1156 le16_to_cpu(ext_hdr->usACPTableOffset) + 1);
1157 ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry;
1158 u32 size = limits->numEntries *
1159 sizeof(struct radeon_clock_voltage_dependency_entry);
1160 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
1161 kzalloc(size, GFP_KERNEL);
1162 if (!rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
1163 r600_free_extended_power_table(rdev);
1166 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
1168 entry = &limits->entries[0];
1169 for (i = 0; i < limits->numEntries; i++) {
1170 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk =
1171 le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16);
1172 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v =
1173 le16_to_cpu(entry->usVoltage);
1174 entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *)
1175 ((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record));
1178 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) &&
1179 ext_hdr->usPowerTuneTableOffset) {
1180 u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset +
1181 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
1182 ATOM_PowerTune_Table *pt;
1183 rdev->pm.dpm.dyn_state.cac_tdp_table =
1184 kzalloc(sizeof(struct radeon_cac_tdp_table), GFP_KERNEL);
1185 if (!rdev->pm.dpm.dyn_state.cac_tdp_table) {
1186 r600_free_extended_power_table(rdev);
1190 ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
1191 (mode_info->atom_context->bios + data_offset +
1192 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
1193 rdev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
1194 ppt->usMaximumPowerDeliveryLimit;
1195 pt = &ppt->power_tune_table;
1197 ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
1198 (mode_info->atom_context->bios + data_offset +
1199 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
1200 rdev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255;
1201 pt = &ppt->power_tune_table;
1203 rdev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP);
1204 rdev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp =
1205 le16_to_cpu(pt->usConfigurableTDP);
1206 rdev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC);
1207 rdev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit =
1208 le16_to_cpu(pt->usBatteryPowerLimit);
1209 rdev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit =
1210 le16_to_cpu(pt->usSmallPowerLimit);
1211 rdev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage =
1212 le16_to_cpu(pt->usLowCACLeakage);
1213 rdev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage =
1214 le16_to_cpu(pt->usHighCACLeakage);
1221 void r600_free_extended_power_table(struct radeon_device *rdev)
1223 struct radeon_dpm_dynamic_state *dyn_state = &rdev->pm.dpm.dyn_state;
1225 kfree(dyn_state->vddc_dependency_on_sclk.entries);
1226 kfree(dyn_state->vddci_dependency_on_mclk.entries);
1227 kfree(dyn_state->vddc_dependency_on_mclk.entries);
1228 kfree(dyn_state->mvdd_dependency_on_mclk.entries);
1229 kfree(dyn_state->cac_leakage_table.entries);
1230 kfree(dyn_state->phase_shedding_limits_table.entries);
1231 kfree(dyn_state->ppm_table);
1232 kfree(dyn_state->cac_tdp_table);
1233 kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
1234 kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
1235 kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
1236 kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
1239 enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev,
1241 enum radeon_pcie_gen asic_gen,
1242 enum radeon_pcie_gen default_gen)
1245 case RADEON_PCIE_GEN1:
1246 return RADEON_PCIE_GEN1;
1247 case RADEON_PCIE_GEN2:
1248 return RADEON_PCIE_GEN2;
1249 case RADEON_PCIE_GEN3:
1250 return RADEON_PCIE_GEN3;
1252 if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == RADEON_PCIE_GEN3))
1253 return RADEON_PCIE_GEN3;
1254 else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == RADEON_PCIE_GEN2))
1255 return RADEON_PCIE_GEN2;
1257 return RADEON_PCIE_GEN1;
1259 return RADEON_PCIE_GEN1;
1262 u16 r600_get_pcie_lane_support(struct radeon_device *rdev,
1266 switch (asic_lanes) {
1269 return default_lanes;
1285 u8 r600_encode_pci_lane_width(u32 lanes)
1287 u8 encoded_lanes[] = { 0, 1, 2, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0, 0, 6 };
1292 return encoded_lanes[lanes];