2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/seq_file.h>
29 #include <linux/firmware.h>
30 #include <linux/module.h>
32 #include <uapi_drm/radeon_drm.h>
34 #include "radeon_asic.h"
35 #include "radeon_audio.h"
36 #include "radeon_mode.h"
40 #include "radeon_ucode.h"
43 MODULE_FIRMWARE("radeon/R600_pfp.bin");
44 MODULE_FIRMWARE("radeon/R600_me.bin");
45 MODULE_FIRMWARE("radeon/RV610_pfp.bin");
46 MODULE_FIRMWARE("radeon/RV610_me.bin");
47 MODULE_FIRMWARE("radeon/RV630_pfp.bin");
48 MODULE_FIRMWARE("radeon/RV630_me.bin");
49 MODULE_FIRMWARE("radeon/RV620_pfp.bin");
50 MODULE_FIRMWARE("radeon/RV620_me.bin");
51 MODULE_FIRMWARE("radeon/RV635_pfp.bin");
52 MODULE_FIRMWARE("radeon/RV635_me.bin");
53 MODULE_FIRMWARE("radeon/RV670_pfp.bin");
54 MODULE_FIRMWARE("radeon/RV670_me.bin");
55 MODULE_FIRMWARE("radeon/RS780_pfp.bin");
56 MODULE_FIRMWARE("radeon/RS780_me.bin");
57 MODULE_FIRMWARE("radeon/RV770_pfp.bin");
58 MODULE_FIRMWARE("radeon/RV770_me.bin");
59 MODULE_FIRMWARE("radeon/RV770_smc.bin");
60 MODULE_FIRMWARE("radeon/RV730_pfp.bin");
61 MODULE_FIRMWARE("radeon/RV730_me.bin");
62 MODULE_FIRMWARE("radeon/RV730_smc.bin");
63 MODULE_FIRMWARE("radeon/RV740_smc.bin");
64 MODULE_FIRMWARE("radeon/RV710_pfp.bin");
65 MODULE_FIRMWARE("radeon/RV710_me.bin");
66 MODULE_FIRMWARE("radeon/RV710_smc.bin");
67 MODULE_FIRMWARE("radeon/R600_rlc.bin");
68 MODULE_FIRMWARE("radeon/R700_rlc.bin");
69 MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
70 MODULE_FIRMWARE("radeon/CEDAR_me.bin");
71 MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
72 MODULE_FIRMWARE("radeon/CEDAR_smc.bin");
73 MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
74 MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
75 MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
76 MODULE_FIRMWARE("radeon/REDWOOD_smc.bin");
77 MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
78 MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
79 MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
80 MODULE_FIRMWARE("radeon/JUNIPER_smc.bin");
81 MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
82 MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
83 MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
84 MODULE_FIRMWARE("radeon/CYPRESS_smc.bin");
85 MODULE_FIRMWARE("radeon/PALM_pfp.bin");
86 MODULE_FIRMWARE("radeon/PALM_me.bin");
87 MODULE_FIRMWARE("radeon/SUMO_rlc.bin");
88 MODULE_FIRMWARE("radeon/SUMO_pfp.bin");
89 MODULE_FIRMWARE("radeon/SUMO_me.bin");
90 MODULE_FIRMWARE("radeon/SUMO2_pfp.bin");
91 MODULE_FIRMWARE("radeon/SUMO2_me.bin");
92 MODULE_FIRMWARE("radeon/OLAND_pfp.bin");
93 MODULE_FIRMWARE("radeon/OLAND_me.bin");
94 MODULE_FIRMWARE("radeon/OLAND_ce.bin");
95 MODULE_FIRMWARE("radeon/OLAND_mc.bin");
96 MODULE_FIRMWARE("radeon/OLAND_rlc.bin");
98 static const u32 crtc_offsets[2] =
101 AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
104 int r600_debugfs_mc_info_init(struct radeon_device *rdev);
106 /* r600,rv610,rv630,rv620,rv635,rv670 */
107 static void r600_gpu_init(struct radeon_device *rdev);
108 void r600_irq_disable(struct radeon_device *rdev);
109 static void r600_pcie_gen2_enable(struct radeon_device *rdev);
112 * Indirect registers accessor
114 u32 r600_rcu_rreg(struct radeon_device *rdev, u32 reg)
119 spin_lock_irqsave(&rdev->rcu_idx_lock, flags);
120 WREG32(R600_RCU_INDEX, ((reg) & 0x1fff));
121 r = RREG32(R600_RCU_DATA);
122 spin_unlock_irqrestore(&rdev->rcu_idx_lock, flags);
126 void r600_rcu_wreg(struct radeon_device *rdev, u32 reg, u32 v)
130 spin_lock_irqsave(&rdev->rcu_idx_lock, flags);
131 WREG32(R600_RCU_INDEX, ((reg) & 0x1fff));
132 WREG32(R600_RCU_DATA, (v));
133 spin_unlock_irqrestore(&rdev->rcu_idx_lock, flags);
136 u32 r600_uvd_ctx_rreg(struct radeon_device *rdev, u32 reg)
141 spin_lock_irqsave(&rdev->uvd_idx_lock, flags);
142 WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff));
143 r = RREG32(R600_UVD_CTX_DATA);
144 spin_unlock_irqrestore(&rdev->uvd_idx_lock, flags);
148 void r600_uvd_ctx_wreg(struct radeon_device *rdev, u32 reg, u32 v)
152 spin_lock_irqsave(&rdev->uvd_idx_lock, flags);
153 WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff));
154 WREG32(R600_UVD_CTX_DATA, (v));
155 spin_unlock_irqrestore(&rdev->uvd_idx_lock, flags);
159 * r600_get_allowed_info_register - fetch the register for the info ioctl
161 * @rdev: radeon_device pointer
162 * @reg: register offset in bytes
163 * @val: register value
165 * Returns 0 for success or -EINVAL for an invalid register
168 int r600_get_allowed_info_register(struct radeon_device *rdev,
174 case R_000E50_SRBM_STATUS:
185 * r600_get_xclk - get the xclk
187 * @rdev: radeon_device pointer
189 * Returns the reference clock used by the gfx engine
190 * (r6xx, IGPs, APUs).
192 u32 r600_get_xclk(struct radeon_device *rdev)
194 return rdev->clock.spll.reference_freq;
197 int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
199 unsigned fb_div = 0, ref_div, vclk_div = 0, dclk_div = 0;
202 /* bypass vclk and dclk with bclk */
203 WREG32_P(CG_UPLL_FUNC_CNTL_2,
204 VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1),
205 ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
207 /* assert BYPASS_EN, deassert UPLL_RESET, UPLL_SLEEP and UPLL_CTLREQ */
208 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~(
209 UPLL_RESET_MASK | UPLL_SLEEP_MASK | UPLL_CTLREQ_MASK));
211 if (rdev->family >= CHIP_RS780)
212 WREG32_P(GFX_MACRO_BYPASS_CNTL, UPLL_BYPASS_CNTL,
215 if (!vclk || !dclk) {
216 /* keep the Bypass mode, put PLL to sleep */
217 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
221 if (rdev->clock.spll.reference_freq == 10000)
226 r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 50000, 160000,
227 ref_div + 1, 0xFFF, 2, 30, ~0,
228 &fb_div, &vclk_div, &dclk_div);
232 if (rdev->family >= CHIP_RV670 && rdev->family < CHIP_RS780)
237 r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
241 /* assert PLL_RESET */
242 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK);
244 /* For RS780 we have to choose ref clk */
245 if (rdev->family >= CHIP_RS780)
246 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_REFCLK_SRC_SEL_MASK,
247 ~UPLL_REFCLK_SRC_SEL_MASK);
249 /* set the required fb, ref and post divder values */
250 WREG32_P(CG_UPLL_FUNC_CNTL,
251 UPLL_FB_DIV(fb_div) |
252 UPLL_REF_DIV(ref_div),
253 ~(UPLL_FB_DIV_MASK | UPLL_REF_DIV_MASK));
254 WREG32_P(CG_UPLL_FUNC_CNTL_2,
255 UPLL_SW_HILEN(vclk_div >> 1) |
256 UPLL_SW_LOLEN((vclk_div >> 1) + (vclk_div & 1)) |
257 UPLL_SW_HILEN2(dclk_div >> 1) |
258 UPLL_SW_LOLEN2((dclk_div >> 1) + (dclk_div & 1)) |
259 UPLL_DIVEN_MASK | UPLL_DIVEN2_MASK,
262 /* give the PLL some time to settle */
265 /* deassert PLL_RESET */
266 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
270 /* deassert BYPASS EN */
271 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK);
273 if (rdev->family >= CHIP_RS780)
274 WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~UPLL_BYPASS_CNTL);
276 r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
280 /* switch VCLK and DCLK selection */
281 WREG32_P(CG_UPLL_FUNC_CNTL_2,
282 VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2),
283 ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
290 void dce3_program_fmt(struct drm_encoder *encoder)
292 struct drm_device *dev = encoder->dev;
293 struct radeon_device *rdev = dev->dev_private;
294 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
295 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
296 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
299 enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE;
302 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
303 bpc = radeon_get_monitor_bpc(connector);
304 dither = radeon_connector->dither;
307 /* LVDS FMT is set up by atom */
308 if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
311 /* not needed for analog */
312 if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
313 (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
321 if (dither == RADEON_FMT_DITHER_ENABLE)
322 /* XXX sort out optimal dither settings */
323 tmp |= FMT_SPATIAL_DITHER_EN;
325 tmp |= FMT_TRUNCATE_EN;
328 if (dither == RADEON_FMT_DITHER_ENABLE)
329 /* XXX sort out optimal dither settings */
330 tmp |= (FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH);
332 tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH);
340 WREG32(FMT_BIT_DEPTH_CONTROL + radeon_crtc->crtc_offset, tmp);
343 /* get temperature in millidegrees */
344 int rv6xx_get_temp(struct radeon_device *rdev)
346 u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >>
348 int actual_temp = temp & 0xff;
353 return actual_temp * 1000;
356 void r600_pm_get_dynpm_state(struct radeon_device *rdev)
360 rdev->pm.dynpm_can_upclock = true;
361 rdev->pm.dynpm_can_downclock = true;
363 /* power state array is low to high, default is first */
364 if ((rdev->flags & RADEON_IS_IGP) || (rdev->family == CHIP_R600)) {
365 int min_power_state_index = 0;
367 if (rdev->pm.num_power_states > 2)
368 min_power_state_index = 1;
370 switch (rdev->pm.dynpm_planned_action) {
371 case DYNPM_ACTION_MINIMUM:
372 rdev->pm.requested_power_state_index = min_power_state_index;
373 rdev->pm.requested_clock_mode_index = 0;
374 rdev->pm.dynpm_can_downclock = false;
376 case DYNPM_ACTION_DOWNCLOCK:
377 if (rdev->pm.current_power_state_index == min_power_state_index) {
378 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
379 rdev->pm.dynpm_can_downclock = false;
381 if (rdev->pm.active_crtc_count > 1) {
382 for (i = 0; i < rdev->pm.num_power_states; i++) {
383 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
385 else if (i >= rdev->pm.current_power_state_index) {
386 rdev->pm.requested_power_state_index =
387 rdev->pm.current_power_state_index;
390 rdev->pm.requested_power_state_index = i;
395 if (rdev->pm.current_power_state_index == 0)
396 rdev->pm.requested_power_state_index =
397 rdev->pm.num_power_states - 1;
399 rdev->pm.requested_power_state_index =
400 rdev->pm.current_power_state_index - 1;
403 rdev->pm.requested_clock_mode_index = 0;
404 /* don't use the power state if crtcs are active and no display flag is set */
405 if ((rdev->pm.active_crtc_count > 0) &&
406 (rdev->pm.power_state[rdev->pm.requested_power_state_index].
407 clock_info[rdev->pm.requested_clock_mode_index].flags &
408 RADEON_PM_MODE_NO_DISPLAY)) {
409 rdev->pm.requested_power_state_index++;
412 case DYNPM_ACTION_UPCLOCK:
413 if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
414 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
415 rdev->pm.dynpm_can_upclock = false;
417 if (rdev->pm.active_crtc_count > 1) {
418 for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
419 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
421 else if (i <= rdev->pm.current_power_state_index) {
422 rdev->pm.requested_power_state_index =
423 rdev->pm.current_power_state_index;
426 rdev->pm.requested_power_state_index = i;
431 rdev->pm.requested_power_state_index =
432 rdev->pm.current_power_state_index + 1;
434 rdev->pm.requested_clock_mode_index = 0;
436 case DYNPM_ACTION_DEFAULT:
437 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
438 rdev->pm.requested_clock_mode_index = 0;
439 rdev->pm.dynpm_can_upclock = false;
441 case DYNPM_ACTION_NONE:
443 DRM_ERROR("Requested mode for not defined action\n");
447 /* XXX select a power state based on AC/DC, single/dualhead, etc. */
448 /* for now just select the first power state and switch between clock modes */
449 /* power state array is low to high, default is first (0) */
450 if (rdev->pm.active_crtc_count > 1) {
451 rdev->pm.requested_power_state_index = -1;
452 /* start at 1 as we don't want the default mode */
453 for (i = 1; i < rdev->pm.num_power_states; i++) {
454 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
456 else if ((rdev->pm.power_state[i].type == POWER_STATE_TYPE_PERFORMANCE) ||
457 (rdev->pm.power_state[i].type == POWER_STATE_TYPE_BATTERY)) {
458 rdev->pm.requested_power_state_index = i;
462 /* if nothing selected, grab the default state. */
463 if (rdev->pm.requested_power_state_index == -1)
464 rdev->pm.requested_power_state_index = 0;
466 rdev->pm.requested_power_state_index = 1;
468 switch (rdev->pm.dynpm_planned_action) {
469 case DYNPM_ACTION_MINIMUM:
470 rdev->pm.requested_clock_mode_index = 0;
471 rdev->pm.dynpm_can_downclock = false;
473 case DYNPM_ACTION_DOWNCLOCK:
474 if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
475 if (rdev->pm.current_clock_mode_index == 0) {
476 rdev->pm.requested_clock_mode_index = 0;
477 rdev->pm.dynpm_can_downclock = false;
479 rdev->pm.requested_clock_mode_index =
480 rdev->pm.current_clock_mode_index - 1;
482 rdev->pm.requested_clock_mode_index = 0;
483 rdev->pm.dynpm_can_downclock = false;
485 /* don't use the power state if crtcs are active and no display flag is set */
486 if ((rdev->pm.active_crtc_count > 0) &&
487 (rdev->pm.power_state[rdev->pm.requested_power_state_index].
488 clock_info[rdev->pm.requested_clock_mode_index].flags &
489 RADEON_PM_MODE_NO_DISPLAY)) {
490 rdev->pm.requested_clock_mode_index++;
493 case DYNPM_ACTION_UPCLOCK:
494 if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
495 if (rdev->pm.current_clock_mode_index ==
496 (rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1)) {
497 rdev->pm.requested_clock_mode_index = rdev->pm.current_clock_mode_index;
498 rdev->pm.dynpm_can_upclock = false;
500 rdev->pm.requested_clock_mode_index =
501 rdev->pm.current_clock_mode_index + 1;
503 rdev->pm.requested_clock_mode_index =
504 rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1;
505 rdev->pm.dynpm_can_upclock = false;
508 case DYNPM_ACTION_DEFAULT:
509 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
510 rdev->pm.requested_clock_mode_index = 0;
511 rdev->pm.dynpm_can_upclock = false;
513 case DYNPM_ACTION_NONE:
515 DRM_ERROR("Requested mode for not defined action\n");
520 DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n",
521 rdev->pm.power_state[rdev->pm.requested_power_state_index].
522 clock_info[rdev->pm.requested_clock_mode_index].sclk,
523 rdev->pm.power_state[rdev->pm.requested_power_state_index].
524 clock_info[rdev->pm.requested_clock_mode_index].mclk,
525 rdev->pm.power_state[rdev->pm.requested_power_state_index].
529 void rs780_pm_init_profile(struct radeon_device *rdev)
531 if (rdev->pm.num_power_states == 2) {
533 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
534 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
535 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
536 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
538 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
539 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
540 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
541 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
543 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
544 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0;
545 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
546 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
548 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
549 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
550 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
551 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
553 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
554 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
555 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
556 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
558 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
559 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
560 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
561 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
563 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
564 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 1;
565 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
566 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
567 } else if (rdev->pm.num_power_states == 3) {
569 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
570 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
571 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
572 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
574 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
575 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
576 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
577 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
579 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
580 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
581 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
582 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
584 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
585 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 2;
586 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
587 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
589 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 1;
590 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 1;
591 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
592 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
594 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 1;
595 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 1;
596 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
597 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
599 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 1;
600 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
601 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
602 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
605 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
606 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
607 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
608 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
610 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 2;
611 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 2;
612 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
613 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
615 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 2;
616 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 2;
617 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
618 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
620 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 2;
621 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 3;
622 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
623 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
625 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
626 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
627 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
628 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
630 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
631 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
632 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
633 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
635 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
636 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 3;
637 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
638 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
642 void r600_pm_init_profile(struct radeon_device *rdev)
646 if (rdev->family == CHIP_R600) {
649 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
650 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
651 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
652 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
654 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
655 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
656 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
657 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
659 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
660 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
661 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
662 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
664 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
665 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
666 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
667 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
669 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
670 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
671 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
672 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
674 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
675 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
676 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
677 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
679 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
680 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
681 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
682 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
684 if (rdev->pm.num_power_states < 4) {
686 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
687 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
688 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
689 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
691 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
692 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
693 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
694 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
696 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
697 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
698 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
699 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
701 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
702 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
703 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
704 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
706 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
707 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 2;
708 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
709 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
711 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
712 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 2;
713 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
714 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
716 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
717 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
718 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
719 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
722 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
723 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
724 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
725 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
727 if (rdev->flags & RADEON_IS_MOBILITY)
728 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
730 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
731 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
732 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
733 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
734 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
736 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
737 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
738 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
739 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
741 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
742 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
743 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
744 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
745 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
747 if (rdev->flags & RADEON_IS_MOBILITY)
748 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
750 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
751 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
752 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
753 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
754 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
756 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
757 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
758 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
759 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
761 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
762 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
763 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
764 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
765 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
770 void r600_pm_misc(struct radeon_device *rdev)
772 int req_ps_idx = rdev->pm.requested_power_state_index;
773 int req_cm_idx = rdev->pm.requested_clock_mode_index;
774 struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
775 struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
777 if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
778 /* 0xff01 is a flag rather then an actual voltage */
779 if (voltage->voltage == 0xff01)
781 if (voltage->voltage != rdev->pm.current_vddc) {
782 radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
783 rdev->pm.current_vddc = voltage->voltage;
784 DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage->voltage);
789 bool r600_gui_idle(struct radeon_device *rdev)
791 if (RREG32(GRBM_STATUS) & GUI_ACTIVE)
797 /* hpd for digital panel detect/disconnect */
798 bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
800 bool connected = false;
802 if (ASIC_IS_DCE3(rdev)) {
805 if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
809 if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
813 if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
817 if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
822 if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
826 if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
835 if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
839 if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
843 if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
853 void r600_hpd_set_polarity(struct radeon_device *rdev,
854 enum radeon_hpd_id hpd)
857 bool connected = r600_hpd_sense(rdev, hpd);
859 if (ASIC_IS_DCE3(rdev)) {
862 tmp = RREG32(DC_HPD1_INT_CONTROL);
864 tmp &= ~DC_HPDx_INT_POLARITY;
866 tmp |= DC_HPDx_INT_POLARITY;
867 WREG32(DC_HPD1_INT_CONTROL, tmp);
870 tmp = RREG32(DC_HPD2_INT_CONTROL);
872 tmp &= ~DC_HPDx_INT_POLARITY;
874 tmp |= DC_HPDx_INT_POLARITY;
875 WREG32(DC_HPD2_INT_CONTROL, tmp);
878 tmp = RREG32(DC_HPD3_INT_CONTROL);
880 tmp &= ~DC_HPDx_INT_POLARITY;
882 tmp |= DC_HPDx_INT_POLARITY;
883 WREG32(DC_HPD3_INT_CONTROL, tmp);
886 tmp = RREG32(DC_HPD4_INT_CONTROL);
888 tmp &= ~DC_HPDx_INT_POLARITY;
890 tmp |= DC_HPDx_INT_POLARITY;
891 WREG32(DC_HPD4_INT_CONTROL, tmp);
894 tmp = RREG32(DC_HPD5_INT_CONTROL);
896 tmp &= ~DC_HPDx_INT_POLARITY;
898 tmp |= DC_HPDx_INT_POLARITY;
899 WREG32(DC_HPD5_INT_CONTROL, tmp);
903 tmp = RREG32(DC_HPD6_INT_CONTROL);
905 tmp &= ~DC_HPDx_INT_POLARITY;
907 tmp |= DC_HPDx_INT_POLARITY;
908 WREG32(DC_HPD6_INT_CONTROL, tmp);
916 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
918 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
920 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
921 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
924 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
926 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
928 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
929 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
932 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
934 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
936 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
937 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
945 void r600_hpd_init(struct radeon_device *rdev)
947 struct drm_device *dev = rdev->ddev;
948 struct drm_connector *connector;
951 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
952 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
954 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
955 connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
956 /* don't try to enable hpd on eDP or LVDS avoid breaking the
957 * aux dp channel on imac and help (but not completely fix)
958 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
962 if (ASIC_IS_DCE3(rdev)) {
963 u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
964 if (ASIC_IS_DCE32(rdev))
967 switch (radeon_connector->hpd.hpd) {
969 WREG32(DC_HPD1_CONTROL, tmp);
972 WREG32(DC_HPD2_CONTROL, tmp);
975 WREG32(DC_HPD3_CONTROL, tmp);
978 WREG32(DC_HPD4_CONTROL, tmp);
982 WREG32(DC_HPD5_CONTROL, tmp);
985 WREG32(DC_HPD6_CONTROL, tmp);
991 switch (radeon_connector->hpd.hpd) {
993 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
996 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
999 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
1005 if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
1006 enable |= 1 << radeon_connector->hpd.hpd;
1007 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
1009 radeon_irq_kms_enable_hpd(rdev, enable);
1012 void r600_hpd_fini(struct radeon_device *rdev)
1014 struct drm_device *dev = rdev->ddev;
1015 struct drm_connector *connector;
1016 unsigned disable = 0;
1018 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1019 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
1020 if (ASIC_IS_DCE3(rdev)) {
1021 switch (radeon_connector->hpd.hpd) {
1023 WREG32(DC_HPD1_CONTROL, 0);
1026 WREG32(DC_HPD2_CONTROL, 0);
1029 WREG32(DC_HPD3_CONTROL, 0);
1032 WREG32(DC_HPD4_CONTROL, 0);
1036 WREG32(DC_HPD5_CONTROL, 0);
1039 WREG32(DC_HPD6_CONTROL, 0);
1045 switch (radeon_connector->hpd.hpd) {
1047 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
1050 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
1053 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
1059 if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
1060 disable |= 1 << radeon_connector->hpd.hpd;
1062 radeon_irq_kms_disable_hpd(rdev, disable);
1068 void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
1073 /* flush hdp cache so updates hit vram */
1074 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
1075 !(rdev->flags & RADEON_IS_AGP)) {
1076 volatile uint32_t *ptr = rdev->gart.ptr;
1079 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
1080 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
1081 * This seems to cause problems on some AGP cards. Just use the old
1084 WREG32(HDP_DEBUG1, 0);
1087 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
1089 WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
1090 WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
1091 WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
1092 for (i = 0; i < rdev->usec_timeout; i++) {
1093 /* read MC_STATUS */
1094 tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
1095 tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
1097 printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
1107 int r600_pcie_gart_init(struct radeon_device *rdev)
1111 if (rdev->gart.robj) {
1112 WARN(1, "R600 PCIE GART already initialized\n");
1115 /* Initialize common gart structure */
1116 r = radeon_gart_init(rdev);
1119 rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
1120 return radeon_gart_table_vram_alloc(rdev);
1123 static int r600_pcie_gart_enable(struct radeon_device *rdev)
1128 if (rdev->gart.robj == NULL) {
1129 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
1132 r = radeon_gart_table_vram_pin(rdev);
1136 /* Setup L2 cache */
1137 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
1138 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
1139 EFFECTIVE_L2_QUEUE_SIZE(7));
1140 WREG32(VM_L2_CNTL2, 0);
1141 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
1142 /* Setup TLB control */
1143 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
1144 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
1145 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
1146 ENABLE_WAIT_L2_QUERY;
1147 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
1148 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
1149 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
1150 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
1151 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
1152 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
1153 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
1154 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
1155 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
1156 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
1157 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
1158 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
1159 WREG32(MC_VM_L1_TLB_MCB_RD_UVD_CNTL, tmp);
1160 WREG32(MC_VM_L1_TLB_MCB_WR_UVD_CNTL, tmp);
1161 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1162 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1163 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
1164 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
1165 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
1166 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
1167 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
1168 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
1169 (u32)(rdev->dummy_page.addr >> 12));
1170 for (i = 1; i < 7; i++)
1171 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
1173 r600_pcie_gart_tlb_flush(rdev);
1174 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1175 (unsigned)(rdev->mc.gtt_size >> 20),
1176 (unsigned long long)rdev->gart.table_addr);
1177 rdev->gart.ready = true;
1181 static void r600_pcie_gart_disable(struct radeon_device *rdev)
1186 /* Disable all tables */
1187 for (i = 0; i < 7; i++)
1188 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
1190 /* Disable L2 cache */
1191 WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
1192 EFFECTIVE_L2_QUEUE_SIZE(7));
1193 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
1194 /* Setup L1 TLB control */
1195 tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
1196 ENABLE_WAIT_L2_QUERY;
1197 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
1198 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
1199 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
1200 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
1201 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
1202 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
1203 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
1204 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
1205 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp);
1206 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp);
1207 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
1208 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
1209 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
1210 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
1211 WREG32(MC_VM_L1_TLB_MCB_RD_UVD_CNTL, tmp);
1212 WREG32(MC_VM_L1_TLB_MCB_WR_UVD_CNTL, tmp);
1213 radeon_gart_table_vram_unpin(rdev);
1216 static void r600_pcie_gart_fini(struct radeon_device *rdev)
1218 radeon_gart_fini(rdev);
1219 r600_pcie_gart_disable(rdev);
1220 radeon_gart_table_vram_free(rdev);
1223 static void r600_agp_enable(struct radeon_device *rdev)
1228 /* Setup L2 cache */
1229 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
1230 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
1231 EFFECTIVE_L2_QUEUE_SIZE(7));
1232 WREG32(VM_L2_CNTL2, 0);
1233 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
1234 /* Setup TLB control */
1235 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
1236 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
1237 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
1238 ENABLE_WAIT_L2_QUERY;
1239 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
1240 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
1241 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
1242 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
1243 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
1244 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
1245 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
1246 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
1247 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
1248 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
1249 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
1250 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
1251 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1252 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1253 for (i = 0; i < 7; i++)
1254 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
1257 int r600_mc_wait_for_idle(struct radeon_device *rdev)
1262 for (i = 0; i < rdev->usec_timeout; i++) {
1263 /* read MC_STATUS */
1264 tmp = RREG32(R_000E50_SRBM_STATUS) & 0x3F00;
1272 uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg)
1274 unsigned long flags;
1277 spin_lock_irqsave(&rdev->mc_idx_lock, flags);
1278 WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg));
1279 r = RREG32(R_0028FC_MC_DATA);
1280 WREG32(R_0028F8_MC_INDEX, ~C_0028F8_MC_IND_ADDR);
1281 spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
1285 void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
1287 unsigned long flags;
1289 spin_lock_irqsave(&rdev->mc_idx_lock, flags);
1290 WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg) |
1291 S_0028F8_MC_IND_WR_EN(1));
1292 WREG32(R_0028FC_MC_DATA, v);
1293 WREG32(R_0028F8_MC_INDEX, 0x7F);
1294 spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
1297 static void r600_mc_program(struct radeon_device *rdev)
1299 struct rv515_mc_save save;
1303 /* Initialize HDP */
1304 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1305 WREG32((0x2c14 + j), 0x00000000);
1306 WREG32((0x2c18 + j), 0x00000000);
1307 WREG32((0x2c1c + j), 0x00000000);
1308 WREG32((0x2c20 + j), 0x00000000);
1309 WREG32((0x2c24 + j), 0x00000000);
1311 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
1313 rv515_mc_stop(rdev, &save);
1314 if (r600_mc_wait_for_idle(rdev)) {
1315 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1317 /* Lockout access through VGA aperture (doesn't exist before R600) */
1318 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
1319 /* Update configuration */
1320 if (rdev->flags & RADEON_IS_AGP) {
1321 if (rdev->mc.vram_start < rdev->mc.gtt_start) {
1322 /* VRAM before AGP */
1323 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1324 rdev->mc.vram_start >> 12);
1325 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1326 rdev->mc.gtt_end >> 12);
1328 /* VRAM after AGP */
1329 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1330 rdev->mc.gtt_start >> 12);
1331 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1332 rdev->mc.vram_end >> 12);
1335 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
1336 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
1338 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
1339 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
1340 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
1341 WREG32(MC_VM_FB_LOCATION, tmp);
1342 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
1343 WREG32(HDP_NONSURFACE_INFO, (2 << 7));
1344 WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
1345 if (rdev->flags & RADEON_IS_AGP) {
1346 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
1347 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
1348 WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
1350 WREG32(MC_VM_AGP_BASE, 0);
1351 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
1352 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
1354 if (r600_mc_wait_for_idle(rdev)) {
1355 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1357 rv515_mc_resume(rdev, &save);
1358 /* we need to own VRAM, so turn off the VGA renderer here
1359 * to stop it overwriting our objects */
1360 rv515_vga_render_disable(rdev);
1364 * r600_vram_gtt_location - try to find VRAM & GTT location
1365 * @rdev: radeon device structure holding all necessary informations
1366 * @mc: memory controller structure holding memory informations
1368 * Function will place try to place VRAM at same place as in CPU (PCI)
1369 * address space as some GPU seems to have issue when we reprogram at
1370 * different address space.
1372 * If there is not enough space to fit the unvisible VRAM after the
1373 * aperture then we limit the VRAM size to the aperture.
1375 * If we are using AGP then place VRAM adjacent to AGP aperture are we need
1376 * them to be in one from GPU point of view so that we can program GPU to
1377 * catch access outside them (weird GPU policy see ??).
1379 * This function will never fails, worst case are limiting VRAM or GTT.
1381 * Note: GTT start, end, size should be initialized before calling this
1382 * function on AGP platform.
1384 static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
1386 u64 size_bf, size_af;
1388 if (mc->mc_vram_size > 0xE0000000) {
1389 /* leave room for at least 512M GTT */
1390 dev_warn(rdev->dev, "limiting VRAM\n");
1391 mc->real_vram_size = 0xE0000000;
1392 mc->mc_vram_size = 0xE0000000;
1394 if (rdev->flags & RADEON_IS_AGP) {
1395 size_bf = mc->gtt_start;
1396 size_af = mc->mc_mask - mc->gtt_end;
1397 if (size_bf > size_af) {
1398 if (mc->mc_vram_size > size_bf) {
1399 dev_warn(rdev->dev, "limiting VRAM\n");
1400 mc->real_vram_size = size_bf;
1401 mc->mc_vram_size = size_bf;
1403 mc->vram_start = mc->gtt_start - mc->mc_vram_size;
1405 if (mc->mc_vram_size > size_af) {
1406 dev_warn(rdev->dev, "limiting VRAM\n");
1407 mc->real_vram_size = size_af;
1408 mc->mc_vram_size = size_af;
1410 mc->vram_start = mc->gtt_end + 1;
1412 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
1413 dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
1414 mc->mc_vram_size >> 20, mc->vram_start,
1415 mc->vram_end, mc->real_vram_size >> 20);
1418 if (rdev->flags & RADEON_IS_IGP) {
1419 base = RREG32(MC_VM_FB_LOCATION) & 0xFFFF;
1422 radeon_vram_location(rdev, &rdev->mc, base);
1423 rdev->mc.gtt_base_align = 0;
1424 radeon_gtt_location(rdev, mc);
1428 static int r600_mc_init(struct radeon_device *rdev)
1431 int chansize, numchan;
1432 uint32_t h_addr, l_addr;
1433 unsigned long long k8_addr;
1435 /* Get VRAM informations */
1436 rdev->mc.vram_is_ddr = true;
1437 tmp = RREG32(RAMCFG);
1438 if (tmp & CHANSIZE_OVERRIDE) {
1440 } else if (tmp & CHANSIZE_MASK) {
1445 tmp = RREG32(CHMAP);
1446 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
1461 rdev->mc.vram_width = numchan * chansize;
1462 /* Could aper size report 0 ? */
1463 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
1464 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
1465 /* Setup GPU memory space */
1466 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
1467 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
1468 rdev->mc.visible_vram_size = rdev->mc.aper_size;
1469 r600_vram_gtt_location(rdev, &rdev->mc);
1471 if (rdev->flags & RADEON_IS_IGP) {
1472 rs690_pm_info(rdev);
1473 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
1475 if (rdev->family == CHIP_RS780 || rdev->family == CHIP_RS880) {
1476 /* Use K8 direct mapping for fast fb access. */
1477 rdev->fastfb_working = false;
1478 h_addr = G_000012_K8_ADDR_EXT(RREG32_MC(R_000012_MC_MISC_UMA_CNTL));
1479 l_addr = RREG32_MC(R_000011_K8_FB_LOCATION);
1480 k8_addr = ((unsigned long long)h_addr) << 32 | l_addr;
1481 #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
1482 if (k8_addr + rdev->mc.visible_vram_size < 0x100000000ULL)
1485 /* FastFB shall be used with UMA memory. Here it is simply disabled when sideport
1486 * memory is present.
1488 if (rdev->mc.igp_sideport_enabled == false && radeon_fastfb == 1) {
1489 DRM_INFO("Direct mapping: aper base at 0x%llx, replaced by direct mapping base 0x%llx.\n",
1490 (unsigned long long)rdev->mc.aper_base, k8_addr);
1491 rdev->mc.aper_base = (resource_size_t)k8_addr;
1492 rdev->fastfb_working = true;
1498 radeon_update_bandwidth_info(rdev);
1502 int r600_vram_scratch_init(struct radeon_device *rdev)
1505 void *vram_scratch_ptr_ptr;
1507 if (rdev->vram_scratch.robj == NULL) {
1508 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
1509 PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
1510 0, NULL, &rdev->vram_scratch.robj);
1516 r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
1517 if (unlikely(r != 0))
1519 r = radeon_bo_pin(rdev->vram_scratch.robj,
1520 RADEON_GEM_DOMAIN_VRAM, &rdev->vram_scratch.gpu_addr);
1522 radeon_bo_unreserve(rdev->vram_scratch.robj);
1525 vram_scratch_ptr_ptr = &rdev->vram_scratch.ptr;
1526 r = radeon_bo_kmap(rdev->vram_scratch.robj,
1527 vram_scratch_ptr_ptr);
1529 radeon_bo_unpin(rdev->vram_scratch.robj);
1530 radeon_bo_unreserve(rdev->vram_scratch.robj);
1535 void r600_vram_scratch_fini(struct radeon_device *rdev)
1539 if (rdev->vram_scratch.robj == NULL) {
1542 r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
1543 if (likely(r == 0)) {
1544 radeon_bo_kunmap(rdev->vram_scratch.robj);
1545 radeon_bo_unpin(rdev->vram_scratch.robj);
1546 radeon_bo_unreserve(rdev->vram_scratch.robj);
1548 radeon_bo_unref(&rdev->vram_scratch.robj);
1551 void r600_set_bios_scratch_engine_hung(struct radeon_device *rdev, bool hung)
1553 u32 tmp = RREG32(R600_BIOS_3_SCRATCH);
1556 tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
1558 tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
1560 WREG32(R600_BIOS_3_SCRATCH, tmp);
1563 static void r600_print_gpu_status_regs(struct radeon_device *rdev)
1565 dev_info(rdev->dev, " R_008010_GRBM_STATUS = 0x%08X\n",
1566 RREG32(R_008010_GRBM_STATUS));
1567 dev_info(rdev->dev, " R_008014_GRBM_STATUS2 = 0x%08X\n",
1568 RREG32(R_008014_GRBM_STATUS2));
1569 dev_info(rdev->dev, " R_000E50_SRBM_STATUS = 0x%08X\n",
1570 RREG32(R_000E50_SRBM_STATUS));
1571 dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
1572 RREG32(CP_STALLED_STAT1));
1573 dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
1574 RREG32(CP_STALLED_STAT2));
1575 dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
1576 RREG32(CP_BUSY_STAT));
1577 dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
1579 dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
1580 RREG32(DMA_STATUS_REG));
1583 static bool r600_is_display_hung(struct radeon_device *rdev)
1589 for (i = 0; i < rdev->num_crtc; i++) {
1590 if (RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]) & AVIVO_CRTC_EN) {
1591 crtc_status[i] = RREG32(AVIVO_D1CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
1592 crtc_hung |= (1 << i);
1596 for (j = 0; j < 10; j++) {
1597 for (i = 0; i < rdev->num_crtc; i++) {
1598 if (crtc_hung & (1 << i)) {
1599 tmp = RREG32(AVIVO_D1CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
1600 if (tmp != crtc_status[i])
1601 crtc_hung &= ~(1 << i);
1612 u32 r600_gpu_check_soft_reset(struct radeon_device *rdev)
1618 tmp = RREG32(R_008010_GRBM_STATUS);
1619 if (rdev->family >= CHIP_RV770) {
1620 if (G_008010_PA_BUSY(tmp) | G_008010_SC_BUSY(tmp) |
1621 G_008010_SH_BUSY(tmp) | G_008010_SX_BUSY(tmp) |
1622 G_008010_TA_BUSY(tmp) | G_008010_VGT_BUSY(tmp) |
1623 G_008010_DB03_BUSY(tmp) | G_008010_CB03_BUSY(tmp) |
1624 G_008010_SPI03_BUSY(tmp) | G_008010_VGT_BUSY_NO_DMA(tmp))
1625 reset_mask |= RADEON_RESET_GFX;
1627 if (G_008010_PA_BUSY(tmp) | G_008010_SC_BUSY(tmp) |
1628 G_008010_SH_BUSY(tmp) | G_008010_SX_BUSY(tmp) |
1629 G_008010_TA03_BUSY(tmp) | G_008010_VGT_BUSY(tmp) |
1630 G_008010_DB03_BUSY(tmp) | G_008010_CB03_BUSY(tmp) |
1631 G_008010_SPI03_BUSY(tmp) | G_008010_VGT_BUSY_NO_DMA(tmp))
1632 reset_mask |= RADEON_RESET_GFX;
1635 if (G_008010_CF_RQ_PENDING(tmp) | G_008010_PF_RQ_PENDING(tmp) |
1636 G_008010_CP_BUSY(tmp) | G_008010_CP_COHERENCY_BUSY(tmp))
1637 reset_mask |= RADEON_RESET_CP;
1639 if (G_008010_GRBM_EE_BUSY(tmp))
1640 reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
1642 /* DMA_STATUS_REG */
1643 tmp = RREG32(DMA_STATUS_REG);
1644 if (!(tmp & DMA_IDLE))
1645 reset_mask |= RADEON_RESET_DMA;
1648 tmp = RREG32(R_000E50_SRBM_STATUS);
1649 if (G_000E50_RLC_RQ_PENDING(tmp) | G_000E50_RLC_BUSY(tmp))
1650 reset_mask |= RADEON_RESET_RLC;
1652 if (G_000E50_IH_BUSY(tmp))
1653 reset_mask |= RADEON_RESET_IH;
1655 if (G_000E50_SEM_BUSY(tmp))
1656 reset_mask |= RADEON_RESET_SEM;
1658 if (G_000E50_GRBM_RQ_PENDING(tmp))
1659 reset_mask |= RADEON_RESET_GRBM;
1661 if (G_000E50_VMC_BUSY(tmp))
1662 reset_mask |= RADEON_RESET_VMC;
1664 if (G_000E50_MCB_BUSY(tmp) | G_000E50_MCDZ_BUSY(tmp) |
1665 G_000E50_MCDY_BUSY(tmp) | G_000E50_MCDX_BUSY(tmp) |
1666 G_000E50_MCDW_BUSY(tmp))
1667 reset_mask |= RADEON_RESET_MC;
1669 if (r600_is_display_hung(rdev))
1670 reset_mask |= RADEON_RESET_DISPLAY;
1672 /* Skip MC reset as it's mostly likely not hung, just busy */
1673 if (reset_mask & RADEON_RESET_MC) {
1674 DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
1675 reset_mask &= ~RADEON_RESET_MC;
1681 static void r600_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
1683 struct rv515_mc_save save;
1684 u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
1687 if (reset_mask == 0)
1690 dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
1692 r600_print_gpu_status_regs(rdev);
1694 /* Disable CP parsing/prefetching */
1695 if (rdev->family >= CHIP_RV770)
1696 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1) | S_0086D8_CP_PFP_HALT(1));
1698 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1700 /* disable the RLC */
1701 WREG32(RLC_CNTL, 0);
1703 if (reset_mask & RADEON_RESET_DMA) {
1705 tmp = RREG32(DMA_RB_CNTL);
1706 tmp &= ~DMA_RB_ENABLE;
1707 WREG32(DMA_RB_CNTL, tmp);
1712 rv515_mc_stop(rdev, &save);
1713 if (r600_mc_wait_for_idle(rdev)) {
1714 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1717 if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) {
1718 if (rdev->family >= CHIP_RV770)
1719 grbm_soft_reset |= S_008020_SOFT_RESET_DB(1) |
1720 S_008020_SOFT_RESET_CB(1) |
1721 S_008020_SOFT_RESET_PA(1) |
1722 S_008020_SOFT_RESET_SC(1) |
1723 S_008020_SOFT_RESET_SPI(1) |
1724 S_008020_SOFT_RESET_SX(1) |
1725 S_008020_SOFT_RESET_SH(1) |
1726 S_008020_SOFT_RESET_TC(1) |
1727 S_008020_SOFT_RESET_TA(1) |
1728 S_008020_SOFT_RESET_VC(1) |
1729 S_008020_SOFT_RESET_VGT(1);
1731 grbm_soft_reset |= S_008020_SOFT_RESET_CR(1) |
1732 S_008020_SOFT_RESET_DB(1) |
1733 S_008020_SOFT_RESET_CB(1) |
1734 S_008020_SOFT_RESET_PA(1) |
1735 S_008020_SOFT_RESET_SC(1) |
1736 S_008020_SOFT_RESET_SMX(1) |
1737 S_008020_SOFT_RESET_SPI(1) |
1738 S_008020_SOFT_RESET_SX(1) |
1739 S_008020_SOFT_RESET_SH(1) |
1740 S_008020_SOFT_RESET_TC(1) |
1741 S_008020_SOFT_RESET_TA(1) |
1742 S_008020_SOFT_RESET_VC(1) |
1743 S_008020_SOFT_RESET_VGT(1);
1746 if (reset_mask & RADEON_RESET_CP) {
1747 grbm_soft_reset |= S_008020_SOFT_RESET_CP(1) |
1748 S_008020_SOFT_RESET_VGT(1);
1750 srbm_soft_reset |= S_000E60_SOFT_RESET_GRBM(1);
1753 if (reset_mask & RADEON_RESET_DMA) {
1754 if (rdev->family >= CHIP_RV770)
1755 srbm_soft_reset |= RV770_SOFT_RESET_DMA;
1757 srbm_soft_reset |= SOFT_RESET_DMA;
1760 if (reset_mask & RADEON_RESET_RLC)
1761 srbm_soft_reset |= S_000E60_SOFT_RESET_RLC(1);
1763 if (reset_mask & RADEON_RESET_SEM)
1764 srbm_soft_reset |= S_000E60_SOFT_RESET_SEM(1);
1766 if (reset_mask & RADEON_RESET_IH)
1767 srbm_soft_reset |= S_000E60_SOFT_RESET_IH(1);
1769 if (reset_mask & RADEON_RESET_GRBM)
1770 srbm_soft_reset |= S_000E60_SOFT_RESET_GRBM(1);
1772 if (!(rdev->flags & RADEON_IS_IGP)) {
1773 if (reset_mask & RADEON_RESET_MC)
1774 srbm_soft_reset |= S_000E60_SOFT_RESET_MC(1);
1777 if (reset_mask & RADEON_RESET_VMC)
1778 srbm_soft_reset |= S_000E60_SOFT_RESET_VMC(1);
1780 if (grbm_soft_reset) {
1781 tmp = RREG32(R_008020_GRBM_SOFT_RESET);
1782 tmp |= grbm_soft_reset;
1783 dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
1784 WREG32(R_008020_GRBM_SOFT_RESET, tmp);
1785 tmp = RREG32(R_008020_GRBM_SOFT_RESET);
1789 tmp &= ~grbm_soft_reset;
1790 WREG32(R_008020_GRBM_SOFT_RESET, tmp);
1791 tmp = RREG32(R_008020_GRBM_SOFT_RESET);
1794 if (srbm_soft_reset) {
1795 tmp = RREG32(SRBM_SOFT_RESET);
1796 tmp |= srbm_soft_reset;
1797 dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1798 WREG32(SRBM_SOFT_RESET, tmp);
1799 tmp = RREG32(SRBM_SOFT_RESET);
1803 tmp &= ~srbm_soft_reset;
1804 WREG32(SRBM_SOFT_RESET, tmp);
1805 tmp = RREG32(SRBM_SOFT_RESET);
1808 /* Wait a little for things to settle down */
1811 rv515_mc_resume(rdev, &save);
1814 r600_print_gpu_status_regs(rdev);
1817 static void r600_gpu_pci_config_reset(struct radeon_device *rdev)
1819 struct rv515_mc_save save;
1822 dev_info(rdev->dev, "GPU pci config reset\n");
1826 /* Disable CP parsing/prefetching */
1827 if (rdev->family >= CHIP_RV770)
1828 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1) | S_0086D8_CP_PFP_HALT(1));
1830 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1832 /* disable the RLC */
1833 WREG32(RLC_CNTL, 0);
1836 tmp = RREG32(DMA_RB_CNTL);
1837 tmp &= ~DMA_RB_ENABLE;
1838 WREG32(DMA_RB_CNTL, tmp);
1842 /* set mclk/sclk to bypass */
1843 if (rdev->family >= CHIP_RV770)
1844 rv770_set_clk_bypass_mode(rdev);
1846 pci_disable_busmaster(rdev->pdev->dev.bsddev);
1847 /* disable mem access */
1848 rv515_mc_stop(rdev, &save);
1849 if (r600_mc_wait_for_idle(rdev)) {
1850 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1853 /* BIF reset workaround. Not sure if this is needed on 6xx */
1854 tmp = RREG32(BUS_CNTL);
1855 tmp |= VGA_COHE_SPEC_TIMER_DIS;
1856 WREG32(BUS_CNTL, tmp);
1858 tmp = RREG32(BIF_SCRATCH0);
1861 radeon_pci_config_reset(rdev);
1864 /* BIF reset workaround. Not sure if this is needed on 6xx */
1865 tmp = SOFT_RESET_BIF;
1866 WREG32(SRBM_SOFT_RESET, tmp);
1868 WREG32(SRBM_SOFT_RESET, 0);
1870 /* wait for asic to come out of reset */
1871 for (i = 0; i < rdev->usec_timeout; i++) {
1872 if (RREG32(CONFIG_MEMSIZE) != 0xffffffff)
1878 int r600_asic_reset(struct radeon_device *rdev, bool hard)
1883 r600_gpu_pci_config_reset(rdev);
1887 reset_mask = r600_gpu_check_soft_reset(rdev);
1890 r600_set_bios_scratch_engine_hung(rdev, true);
1892 /* try soft reset */
1893 r600_gpu_soft_reset(rdev, reset_mask);
1895 reset_mask = r600_gpu_check_soft_reset(rdev);
1897 /* try pci config reset */
1898 if (reset_mask && radeon_hard_reset)
1899 r600_gpu_pci_config_reset(rdev);
1901 reset_mask = r600_gpu_check_soft_reset(rdev);
1904 r600_set_bios_scratch_engine_hung(rdev, false);
1910 * r600_gfx_is_lockup - Check if the GFX engine is locked up
1912 * @rdev: radeon_device pointer
1913 * @ring: radeon_ring structure holding ring information
1915 * Check if the GFX engine is locked up.
1916 * Returns true if the engine appears to be locked up, false if not.
1918 bool r600_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
1920 u32 reset_mask = r600_gpu_check_soft_reset(rdev);
1922 if (!(reset_mask & (RADEON_RESET_GFX |
1923 RADEON_RESET_COMPUTE |
1924 RADEON_RESET_CP))) {
1925 radeon_ring_lockup_update(rdev, ring);
1928 return radeon_ring_test_lockup(rdev, ring);
1931 u32 r6xx_remap_render_backend(struct radeon_device *rdev,
1932 u32 tiling_pipe_num,
1934 u32 total_max_rb_num,
1935 u32 disabled_rb_mask)
1937 u32 rendering_pipe_num, rb_num_width, req_rb_num;
1938 u32 pipe_rb_ratio, pipe_rb_remain, tmp;
1939 u32 data = 0, mask = 1 << (max_rb_num - 1);
1942 /* mask out the RBs that don't exist on that asic */
1943 tmp = disabled_rb_mask | ((0xff << max_rb_num) & 0xff);
1944 /* make sure at least one RB is available */
1945 if ((tmp & 0xff) != 0xff)
1946 disabled_rb_mask = tmp;
1948 rendering_pipe_num = 1 << tiling_pipe_num;
1949 req_rb_num = total_max_rb_num - r600_count_pipe_bits(disabled_rb_mask);
1950 BUG_ON(rendering_pipe_num < req_rb_num);
1952 pipe_rb_ratio = rendering_pipe_num / req_rb_num;
1953 pipe_rb_remain = rendering_pipe_num - pipe_rb_ratio * req_rb_num;
1955 if (rdev->family <= CHIP_RV740) {
1963 for (i = 0; i < max_rb_num; i++) {
1964 if (!(mask & disabled_rb_mask)) {
1965 for (j = 0; j < pipe_rb_ratio; j++) {
1966 data <<= rb_num_width;
1967 data |= max_rb_num - i - 1;
1969 if (pipe_rb_remain) {
1970 data <<= rb_num_width;
1971 data |= max_rb_num - i - 1;
1981 int r600_count_pipe_bits(uint32_t val)
1983 return hweight32(val);
1986 static void r600_gpu_init(struct radeon_device *rdev)
1990 u32 cc_gc_shader_pipe_config;
1994 u32 sq_gpr_resource_mgmt_1 = 0;
1995 u32 sq_gpr_resource_mgmt_2 = 0;
1996 u32 sq_thread_resource_mgmt = 0;
1997 u32 sq_stack_resource_mgmt_1 = 0;
1998 u32 sq_stack_resource_mgmt_2 = 0;
1999 u32 disabled_rb_mask;
2001 rdev->config.r600.tiling_group_size = 256;
2002 switch (rdev->family) {
2004 rdev->config.r600.max_pipes = 4;
2005 rdev->config.r600.max_tile_pipes = 8;
2006 rdev->config.r600.max_simds = 4;
2007 rdev->config.r600.max_backends = 4;
2008 rdev->config.r600.max_gprs = 256;
2009 rdev->config.r600.max_threads = 192;
2010 rdev->config.r600.max_stack_entries = 256;
2011 rdev->config.r600.max_hw_contexts = 8;
2012 rdev->config.r600.max_gs_threads = 16;
2013 rdev->config.r600.sx_max_export_size = 128;
2014 rdev->config.r600.sx_max_export_pos_size = 16;
2015 rdev->config.r600.sx_max_export_smx_size = 128;
2016 rdev->config.r600.sq_num_cf_insts = 2;
2020 rdev->config.r600.max_pipes = 2;
2021 rdev->config.r600.max_tile_pipes = 2;
2022 rdev->config.r600.max_simds = 3;
2023 rdev->config.r600.max_backends = 1;
2024 rdev->config.r600.max_gprs = 128;
2025 rdev->config.r600.max_threads = 192;
2026 rdev->config.r600.max_stack_entries = 128;
2027 rdev->config.r600.max_hw_contexts = 8;
2028 rdev->config.r600.max_gs_threads = 4;
2029 rdev->config.r600.sx_max_export_size = 128;
2030 rdev->config.r600.sx_max_export_pos_size = 16;
2031 rdev->config.r600.sx_max_export_smx_size = 128;
2032 rdev->config.r600.sq_num_cf_insts = 2;
2038 rdev->config.r600.max_pipes = 1;
2039 rdev->config.r600.max_tile_pipes = 1;
2040 rdev->config.r600.max_simds = 2;
2041 rdev->config.r600.max_backends = 1;
2042 rdev->config.r600.max_gprs = 128;
2043 rdev->config.r600.max_threads = 192;
2044 rdev->config.r600.max_stack_entries = 128;
2045 rdev->config.r600.max_hw_contexts = 4;
2046 rdev->config.r600.max_gs_threads = 4;
2047 rdev->config.r600.sx_max_export_size = 128;
2048 rdev->config.r600.sx_max_export_pos_size = 16;
2049 rdev->config.r600.sx_max_export_smx_size = 128;
2050 rdev->config.r600.sq_num_cf_insts = 1;
2053 rdev->config.r600.max_pipes = 4;
2054 rdev->config.r600.max_tile_pipes = 4;
2055 rdev->config.r600.max_simds = 4;
2056 rdev->config.r600.max_backends = 4;
2057 rdev->config.r600.max_gprs = 192;
2058 rdev->config.r600.max_threads = 192;
2059 rdev->config.r600.max_stack_entries = 256;
2060 rdev->config.r600.max_hw_contexts = 8;
2061 rdev->config.r600.max_gs_threads = 16;
2062 rdev->config.r600.sx_max_export_size = 128;
2063 rdev->config.r600.sx_max_export_pos_size = 16;
2064 rdev->config.r600.sx_max_export_smx_size = 128;
2065 rdev->config.r600.sq_num_cf_insts = 2;
2071 /* Initialize HDP */
2072 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
2073 WREG32((0x2c14 + j), 0x00000000);
2074 WREG32((0x2c18 + j), 0x00000000);
2075 WREG32((0x2c1c + j), 0x00000000);
2076 WREG32((0x2c20 + j), 0x00000000);
2077 WREG32((0x2c24 + j), 0x00000000);
2080 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
2084 ramcfg = RREG32(RAMCFG);
2085 switch (rdev->config.r600.max_tile_pipes) {
2087 tiling_config |= PIPE_TILING(0);
2090 tiling_config |= PIPE_TILING(1);
2093 tiling_config |= PIPE_TILING(2);
2096 tiling_config |= PIPE_TILING(3);
2101 rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
2102 rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
2103 tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
2104 tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
2106 tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
2108 tiling_config |= ROW_TILING(3);
2109 tiling_config |= SAMPLE_SPLIT(3);
2111 tiling_config |= ROW_TILING(tmp);
2112 tiling_config |= SAMPLE_SPLIT(tmp);
2114 tiling_config |= BANK_SWAPS(1);
2116 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0x00ffff00;
2117 tmp = rdev->config.r600.max_simds -
2118 r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK);
2119 rdev->config.r600.active_simds = tmp;
2121 disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK;
2123 for (i = 0; i < rdev->config.r600.max_backends; i++)
2125 /* if all the backends are disabled, fix it up here */
2126 if ((disabled_rb_mask & tmp) == tmp) {
2127 for (i = 0; i < rdev->config.r600.max_backends; i++)
2128 disabled_rb_mask &= ~(1 << i);
2130 tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
2131 tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.r600.max_backends,
2132 R6XX_MAX_BACKENDS, disabled_rb_mask);
2133 tiling_config |= tmp << 16;
2134 rdev->config.r600.backend_map = tmp;
2136 rdev->config.r600.tile_config = tiling_config;
2137 WREG32(GB_TILING_CONFIG, tiling_config);
2138 WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
2139 WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
2140 WREG32(DMA_TILING_CONFIG, tiling_config & 0xffff);
2142 tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
2143 WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
2144 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
2146 /* Setup some CP states */
2147 WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b)));
2148 WREG32(CP_MEQ_THRESHOLDS, (MEQ_END(0x40) | ROQ_END(0x40)));
2150 WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | SYNC_GRADIENT |
2151 SYNC_WALKER | SYNC_ALIGNER));
2152 /* Setup various GPU states */
2153 if (rdev->family == CHIP_RV670)
2154 WREG32(ARB_GDEC_RD_CNTL, 0x00000021);
2156 tmp = RREG32(SX_DEBUG_1);
2157 tmp |= SMX_EVENT_RELEASE;
2158 if ((rdev->family > CHIP_R600))
2159 tmp |= ENABLE_NEW_SMX_ADDRESS;
2160 WREG32(SX_DEBUG_1, tmp);
2162 if (((rdev->family) == CHIP_R600) ||
2163 ((rdev->family) == CHIP_RV630) ||
2164 ((rdev->family) == CHIP_RV610) ||
2165 ((rdev->family) == CHIP_RV620) ||
2166 ((rdev->family) == CHIP_RS780) ||
2167 ((rdev->family) == CHIP_RS880)) {
2168 WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE);
2170 WREG32(DB_DEBUG, 0);
2172 WREG32(DB_WATERMARKS, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) |
2173 DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
2175 WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
2176 WREG32(VGT_NUM_INSTANCES, 0);
2178 WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
2179 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(0));
2181 tmp = RREG32(SQ_MS_FIFO_SIZES);
2182 if (((rdev->family) == CHIP_RV610) ||
2183 ((rdev->family) == CHIP_RV620) ||
2184 ((rdev->family) == CHIP_RS780) ||
2185 ((rdev->family) == CHIP_RS880)) {
2186 tmp = (CACHE_FIFO_SIZE(0xa) |
2187 FETCH_FIFO_HIWATER(0xa) |
2188 DONE_FIFO_HIWATER(0xe0) |
2189 ALU_UPDATE_FIFO_HIWATER(0x8));
2190 } else if (((rdev->family) == CHIP_R600) ||
2191 ((rdev->family) == CHIP_RV630)) {
2192 tmp &= ~DONE_FIFO_HIWATER(0xff);
2193 tmp |= DONE_FIFO_HIWATER(0x4);
2195 WREG32(SQ_MS_FIFO_SIZES, tmp);
2197 /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
2198 * should be adjusted as needed by the 2D/3D drivers. This just sets default values
2200 sq_config = RREG32(SQ_CONFIG);
2201 sq_config &= ~(PS_PRIO(3) |
2205 sq_config |= (DX9_CONSTS |
2212 if ((rdev->family) == CHIP_R600) {
2213 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(124) |
2215 NUM_CLAUSE_TEMP_GPRS(4));
2216 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(0) |
2218 sq_thread_resource_mgmt = (NUM_PS_THREADS(136) |
2219 NUM_VS_THREADS(48) |
2222 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(128) |
2223 NUM_VS_STACK_ENTRIES(128));
2224 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(0) |
2225 NUM_ES_STACK_ENTRIES(0));
2226 } else if (((rdev->family) == CHIP_RV610) ||
2227 ((rdev->family) == CHIP_RV620) ||
2228 ((rdev->family) == CHIP_RS780) ||
2229 ((rdev->family) == CHIP_RS880)) {
2230 /* no vertex cache */
2231 sq_config &= ~VC_ENABLE;
2233 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
2235 NUM_CLAUSE_TEMP_GPRS(2));
2236 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
2238 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
2239 NUM_VS_THREADS(78) |
2241 NUM_ES_THREADS(31));
2242 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
2243 NUM_VS_STACK_ENTRIES(40));
2244 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
2245 NUM_ES_STACK_ENTRIES(16));
2246 } else if (((rdev->family) == CHIP_RV630) ||
2247 ((rdev->family) == CHIP_RV635)) {
2248 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
2250 NUM_CLAUSE_TEMP_GPRS(2));
2251 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(18) |
2253 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
2254 NUM_VS_THREADS(78) |
2256 NUM_ES_THREADS(31));
2257 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
2258 NUM_VS_STACK_ENTRIES(40));
2259 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
2260 NUM_ES_STACK_ENTRIES(16));
2261 } else if ((rdev->family) == CHIP_RV670) {
2262 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
2264 NUM_CLAUSE_TEMP_GPRS(2));
2265 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
2267 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
2268 NUM_VS_THREADS(78) |
2270 NUM_ES_THREADS(31));
2271 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(64) |
2272 NUM_VS_STACK_ENTRIES(64));
2273 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(64) |
2274 NUM_ES_STACK_ENTRIES(64));
2277 WREG32(SQ_CONFIG, sq_config);
2278 WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
2279 WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
2280 WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
2281 WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
2282 WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
2284 if (((rdev->family) == CHIP_RV610) ||
2285 ((rdev->family) == CHIP_RV620) ||
2286 ((rdev->family) == CHIP_RS780) ||
2287 ((rdev->family) == CHIP_RS880)) {
2288 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY));
2290 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC));
2293 /* More default values. 2D/3D driver should adjust as needed */
2294 WREG32(PA_SC_AA_SAMPLE_LOCS_2S, (S0_X(0xc) | S0_Y(0x4) |
2295 S1_X(0x4) | S1_Y(0xc)));
2296 WREG32(PA_SC_AA_SAMPLE_LOCS_4S, (S0_X(0xe) | S0_Y(0xe) |
2297 S1_X(0x2) | S1_Y(0x2) |
2298 S2_X(0xa) | S2_Y(0x6) |
2299 S3_X(0x6) | S3_Y(0xa)));
2300 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0, (S0_X(0xe) | S0_Y(0xb) |
2301 S1_X(0x4) | S1_Y(0xc) |
2302 S2_X(0x1) | S2_Y(0x6) |
2303 S3_X(0xa) | S3_Y(0xe)));
2304 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1, (S4_X(0x6) | S4_Y(0x1) |
2305 S5_X(0x0) | S5_Y(0x0) |
2306 S6_X(0xb) | S6_Y(0x4) |
2307 S7_X(0x7) | S7_Y(0x8)));
2309 WREG32(VGT_STRMOUT_EN, 0);
2310 tmp = rdev->config.r600.max_pipes * 16;
2311 switch (rdev->family) {
2327 WREG32(VGT_ES_PER_GS, 128);
2328 WREG32(VGT_GS_PER_ES, tmp);
2329 WREG32(VGT_GS_PER_VS, 2);
2330 WREG32(VGT_GS_VERTEX_REUSE, 16);
2332 /* more default values. 2D/3D driver should adjust as needed */
2333 WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
2334 WREG32(VGT_STRMOUT_EN, 0);
2336 WREG32(PA_SC_MODE_CNTL, 0);
2337 WREG32(PA_SC_AA_CONFIG, 0);
2338 WREG32(PA_SC_LINE_STIPPLE, 0);
2339 WREG32(SPI_INPUT_Z, 0);
2340 WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
2341 WREG32(CB_COLOR7_FRAG, 0);
2343 /* Clear render buffer base addresses */
2344 WREG32(CB_COLOR0_BASE, 0);
2345 WREG32(CB_COLOR1_BASE, 0);
2346 WREG32(CB_COLOR2_BASE, 0);
2347 WREG32(CB_COLOR3_BASE, 0);
2348 WREG32(CB_COLOR4_BASE, 0);
2349 WREG32(CB_COLOR5_BASE, 0);
2350 WREG32(CB_COLOR6_BASE, 0);
2351 WREG32(CB_COLOR7_BASE, 0);
2352 WREG32(CB_COLOR7_FRAG, 0);
2354 switch (rdev->family) {
2359 tmp = TC_L2_SIZE(8);
2363 tmp = TC_L2_SIZE(4);
2366 tmp = TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT;
2369 tmp = TC_L2_SIZE(0);
2372 WREG32(TC_CNTL, tmp);
2374 tmp = RREG32(HDP_HOST_PATH_CNTL);
2375 WREG32(HDP_HOST_PATH_CNTL, tmp);
2377 tmp = RREG32(ARB_POP);
2378 tmp |= ENABLE_TC128;
2379 WREG32(ARB_POP, tmp);
2381 WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
2382 WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
2384 WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
2385 WREG32(VC_ENHANCE, 0);
2390 * Indirect registers accessor
2392 u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
2394 unsigned long flags;
2397 spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
2398 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
2399 (void)RREG32(PCIE_PORT_INDEX);
2400 r = RREG32(PCIE_PORT_DATA);
2401 spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
2405 void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
2407 unsigned long flags;
2409 spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
2410 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
2411 (void)RREG32(PCIE_PORT_INDEX);
2412 WREG32(PCIE_PORT_DATA, (v));
2413 (void)RREG32(PCIE_PORT_DATA);
2414 spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
2420 void r600_cp_stop(struct radeon_device *rdev)
2422 if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
2423 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
2424 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
2425 WREG32(SCRATCH_UMSK, 0);
2426 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
2429 int r600_init_microcode(struct radeon_device *rdev)
2431 const char *chip_name;
2432 const char *rlc_chip_name;
2433 const char *smc_chip_name = "RV770";
2434 size_t pfp_req_size, me_req_size, rlc_req_size, smc_req_size = 0;
2440 switch (rdev->family) {
2443 rlc_chip_name = "R600";
2446 chip_name = "RV610";
2447 rlc_chip_name = "R600";
2450 chip_name = "RV630";
2451 rlc_chip_name = "R600";
2454 chip_name = "RV620";
2455 rlc_chip_name = "R600";
2458 chip_name = "RV635";
2459 rlc_chip_name = "R600";
2462 chip_name = "RV670";
2463 rlc_chip_name = "R600";
2467 chip_name = "RS780";
2468 rlc_chip_name = "R600";
2471 chip_name = "RV770";
2472 rlc_chip_name = "R700";
2473 smc_chip_name = "RV770";
2474 smc_req_size = ALIGN(RV770_SMC_UCODE_SIZE, 4);
2477 chip_name = "RV730";
2478 rlc_chip_name = "R700";
2479 smc_chip_name = "RV730";
2480 smc_req_size = ALIGN(RV730_SMC_UCODE_SIZE, 4);
2483 chip_name = "RV710";
2484 rlc_chip_name = "R700";
2485 smc_chip_name = "RV710";
2486 smc_req_size = ALIGN(RV710_SMC_UCODE_SIZE, 4);
2489 chip_name = "RV730";
2490 rlc_chip_name = "R700";
2491 smc_chip_name = "RV740";
2492 smc_req_size = ALIGN(RV740_SMC_UCODE_SIZE, 4);
2495 chip_name = "CEDAR";
2496 rlc_chip_name = "CEDAR";
2497 smc_chip_name = "CEDAR";
2498 smc_req_size = ALIGN(CEDAR_SMC_UCODE_SIZE, 4);
2501 chip_name = "REDWOOD";
2502 rlc_chip_name = "REDWOOD";
2503 smc_chip_name = "REDWOOD";
2504 smc_req_size = ALIGN(REDWOOD_SMC_UCODE_SIZE, 4);
2507 chip_name = "JUNIPER";
2508 rlc_chip_name = "JUNIPER";
2509 smc_chip_name = "JUNIPER";
2510 smc_req_size = ALIGN(JUNIPER_SMC_UCODE_SIZE, 4);
2514 chip_name = "CYPRESS";
2515 rlc_chip_name = "CYPRESS";
2516 smc_chip_name = "CYPRESS";
2517 smc_req_size = ALIGN(CYPRESS_SMC_UCODE_SIZE, 4);
2521 rlc_chip_name = "SUMO";
2525 rlc_chip_name = "SUMO";
2528 chip_name = "SUMO2";
2529 rlc_chip_name = "SUMO";
2534 if (rdev->family >= CHIP_CEDAR) {
2535 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
2536 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
2537 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
2538 } else if (rdev->family >= CHIP_RV770) {
2539 pfp_req_size = R700_PFP_UCODE_SIZE * 4;
2540 me_req_size = R700_PM4_UCODE_SIZE * 4;
2541 rlc_req_size = R700_RLC_UCODE_SIZE * 4;
2543 pfp_req_size = R600_PFP_UCODE_SIZE * 4;
2544 me_req_size = R600_PM4_UCODE_SIZE * 12;
2545 rlc_req_size = R600_RLC_UCODE_SIZE * 4;
2548 DRM_INFO("Loading %s Microcode\n", chip_name);
2550 ksnprintf(fw_name, sizeof(fw_name), "radeonkmsfw_%s_pfp", chip_name);
2551 err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
2554 if (rdev->pfp_fw->datasize != pfp_req_size) {
2556 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
2557 rdev->pfp_fw->datasize, fw_name);
2562 ksnprintf(fw_name, sizeof(fw_name), "radeonkmsfw_%s_me", chip_name);
2563 err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
2566 if (rdev->me_fw->datasize != me_req_size) {
2568 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
2569 rdev->me_fw->datasize, fw_name);
2573 ksnprintf(fw_name, sizeof(fw_name), "radeonkmsfw_%s_rlc", rlc_chip_name);
2574 err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
2577 if (rdev->rlc_fw->datasize != rlc_req_size) {
2579 "r600_rlc: Bogus length %zu in firmware \"%s\"\n",
2580 rdev->rlc_fw->datasize, fw_name);
2584 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_HEMLOCK)) {
2585 ksnprintf(fw_name, sizeof(fw_name), "radeonkmsfw_%s_smc", smc_chip_name);
2586 err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
2589 "smc: error loading firmware \"%s\"\n",
2591 release_firmware(rdev->smc_fw);
2592 rdev->smc_fw = NULL;
2594 } else if (rdev->smc_fw->datasize != smc_req_size) {
2596 "smc: Bogus length %zu in firmware \"%s\"\n",
2597 rdev->smc_fw->datasize, fw_name);
2606 "r600_cp: Failed to load firmware \"%s\"\n",
2608 release_firmware(rdev->pfp_fw);
2609 rdev->pfp_fw = NULL;
2610 release_firmware(rdev->me_fw);
2612 release_firmware(rdev->rlc_fw);
2613 rdev->rlc_fw = NULL;
2614 release_firmware(rdev->smc_fw);
2615 rdev->smc_fw = NULL;
2620 u32 r600_gfx_get_rptr(struct radeon_device *rdev,
2621 struct radeon_ring *ring)
2625 if (rdev->wb.enabled)
2626 rptr = rdev->wb.wb[ring->rptr_offs/4];
2628 rptr = RREG32(R600_CP_RB_RPTR);
2633 u32 r600_gfx_get_wptr(struct radeon_device *rdev,
2634 struct radeon_ring *ring)
2638 wptr = RREG32(R600_CP_RB_WPTR);
2643 void r600_gfx_set_wptr(struct radeon_device *rdev,
2644 struct radeon_ring *ring)
2646 WREG32(R600_CP_RB_WPTR, ring->wptr);
2647 (void)RREG32(R600_CP_RB_WPTR);
2651 * r600_fini_microcode - drop the firmwares image references
2653 * @rdev: radeon_device pointer
2655 * Drop the pfp, me and rlc firmwares image references.
2656 * Called at driver shutdown.
2658 void r600_fini_microcode(struct radeon_device *rdev)
2660 release_firmware(rdev->pfp_fw);
2661 rdev->pfp_fw = NULL;
2662 release_firmware(rdev->me_fw);
2664 release_firmware(rdev->rlc_fw);
2665 rdev->rlc_fw = NULL;
2666 release_firmware(rdev->smc_fw);
2667 rdev->smc_fw = NULL;
2670 static int r600_cp_load_microcode(struct radeon_device *rdev)
2672 const __be32 *fw_data;
2675 if (!rdev->me_fw || !rdev->pfp_fw)
2684 RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
2687 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
2688 RREG32(GRBM_SOFT_RESET);
2690 WREG32(GRBM_SOFT_RESET, 0);
2692 WREG32(CP_ME_RAM_WADDR, 0);
2694 fw_data = (const __be32 *)rdev->me_fw->data;
2695 WREG32(CP_ME_RAM_WADDR, 0);
2696 for (i = 0; i < R600_PM4_UCODE_SIZE * 3; i++)
2697 WREG32(CP_ME_RAM_DATA,
2698 be32_to_cpup(fw_data++));
2700 fw_data = (const __be32 *)rdev->pfp_fw->data;
2701 WREG32(CP_PFP_UCODE_ADDR, 0);
2702 for (i = 0; i < R600_PFP_UCODE_SIZE; i++)
2703 WREG32(CP_PFP_UCODE_DATA,
2704 be32_to_cpup(fw_data++));
2706 WREG32(CP_PFP_UCODE_ADDR, 0);
2707 WREG32(CP_ME_RAM_WADDR, 0);
2708 WREG32(CP_ME_RAM_RADDR, 0);
2712 int r600_cp_start(struct radeon_device *rdev)
2714 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2718 r = radeon_ring_lock(rdev, ring, 7);
2720 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
2723 radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
2724 radeon_ring_write(ring, 0x1);
2725 if (rdev->family >= CHIP_RV770) {
2726 radeon_ring_write(ring, 0x0);
2727 radeon_ring_write(ring, rdev->config.rv770.max_hw_contexts - 1);
2729 radeon_ring_write(ring, 0x3);
2730 radeon_ring_write(ring, rdev->config.r600.max_hw_contexts - 1);
2732 radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
2733 radeon_ring_write(ring, 0);
2734 radeon_ring_write(ring, 0);
2735 radeon_ring_unlock_commit(rdev, ring, false);
2738 WREG32(R_0086D8_CP_ME_CNTL, cp_me);
2742 int r600_cp_resume(struct radeon_device *rdev)
2744 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2750 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
2751 RREG32(GRBM_SOFT_RESET);
2753 WREG32(GRBM_SOFT_RESET, 0);
2755 /* Set ring buffer size */
2756 rb_bufsz = order_base_2(ring->ring_size / 8);
2757 tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
2759 tmp |= BUF_SWAP_32BIT;
2761 WREG32(CP_RB_CNTL, tmp);
2762 WREG32(CP_SEM_WAIT_TIMER, 0x0);
2764 /* Set the write pointer delay */
2765 WREG32(CP_RB_WPTR_DELAY, 0);
2767 /* Initialize the ring buffer's read and write pointers */
2768 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
2769 WREG32(CP_RB_RPTR_WR, 0);
2771 WREG32(CP_RB_WPTR, ring->wptr);
2773 /* set the wb address whether it's enabled or not */
2774 WREG32(CP_RB_RPTR_ADDR,
2775 ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
2776 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
2777 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
2779 if (rdev->wb.enabled)
2780 WREG32(SCRATCH_UMSK, 0xff);
2782 tmp |= RB_NO_UPDATE;
2783 WREG32(SCRATCH_UMSK, 0);
2787 WREG32(CP_RB_CNTL, tmp);
2789 WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
2790 WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
2792 r600_cp_start(rdev);
2794 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
2796 ring->ready = false;
2800 if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
2801 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
2806 void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size)
2811 /* Align ring size */
2812 rb_bufsz = order_base_2(ring_size / 8);
2813 ring_size = (1 << (rb_bufsz + 1)) * 4;
2814 ring->ring_size = ring_size;
2815 ring->align_mask = 16 - 1;
2817 if (radeon_ring_supports_scratch_reg(rdev, ring)) {
2818 r = radeon_scratch_get(rdev, &ring->rptr_save_reg);
2820 DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r);
2821 ring->rptr_save_reg = 0;
2826 void r600_cp_fini(struct radeon_device *rdev)
2828 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2830 radeon_ring_fini(rdev, ring);
2831 radeon_scratch_free(rdev, ring->rptr_save_reg);
2835 * GPU scratch registers helpers function.
2837 void r600_scratch_init(struct radeon_device *rdev)
2841 rdev->scratch.num_reg = 7;
2842 rdev->scratch.reg_base = SCRATCH_REG0;
2843 for (i = 0; i < rdev->scratch.num_reg; i++) {
2844 rdev->scratch.free[i] = true;
2845 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
2849 int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
2856 r = radeon_scratch_get(rdev, &scratch);
2858 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
2861 WREG32(scratch, 0xCAFEDEAD);
2862 r = radeon_ring_lock(rdev, ring, 3);
2864 DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ring->idx, r);
2865 radeon_scratch_free(rdev, scratch);
2868 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2869 radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2870 radeon_ring_write(ring, 0xDEADBEEF);
2871 radeon_ring_unlock_commit(rdev, ring, false);
2872 for (i = 0; i < rdev->usec_timeout; i++) {
2873 tmp = RREG32(scratch);
2874 if (tmp == 0xDEADBEEF)
2878 if (i < rdev->usec_timeout) {
2879 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
2881 DRM_ERROR("radeon: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
2882 ring->idx, scratch, tmp);
2885 radeon_scratch_free(rdev, scratch);
2890 * CP fences/semaphores
2893 void r600_fence_ring_emit(struct radeon_device *rdev,
2894 struct radeon_fence *fence)
2896 struct radeon_ring *ring = &rdev->ring[fence->ring];
2897 u32 cp_coher_cntl = PACKET3_TC_ACTION_ENA | PACKET3_VC_ACTION_ENA |
2898 PACKET3_SH_ACTION_ENA;
2900 if (rdev->family >= CHIP_RV770)
2901 cp_coher_cntl |= PACKET3_FULL_CACHE_ENA;
2903 if (rdev->wb.use_event) {
2904 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
2905 /* flush read cache over gart */
2906 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
2907 radeon_ring_write(ring, cp_coher_cntl);
2908 radeon_ring_write(ring, 0xFFFFFFFF);
2909 radeon_ring_write(ring, 0);
2910 radeon_ring_write(ring, 10); /* poll interval */
2911 /* EVENT_WRITE_EOP - flush caches, send int */
2912 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
2913 radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
2914 radeon_ring_write(ring, lower_32_bits(addr));
2915 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
2916 radeon_ring_write(ring, fence->seq);
2917 radeon_ring_write(ring, 0);
2919 /* flush read cache over gart */
2920 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
2921 radeon_ring_write(ring, cp_coher_cntl);
2922 radeon_ring_write(ring, 0xFFFFFFFF);
2923 radeon_ring_write(ring, 0);
2924 radeon_ring_write(ring, 10); /* poll interval */
2925 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
2926 radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0));
2927 /* wait for 3D idle clean */
2928 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2929 radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2930 radeon_ring_write(ring, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
2931 /* Emit fence sequence & fire IRQ */
2932 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2933 radeon_ring_write(ring, ((rdev->fence_drv[fence->ring].scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2934 radeon_ring_write(ring, fence->seq);
2935 /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
2936 radeon_ring_write(ring, PACKET0(CP_INT_STATUS, 0));
2937 radeon_ring_write(ring, RB_INT_STAT);
2942 * r600_semaphore_ring_emit - emit a semaphore on the CP ring
2944 * @rdev: radeon_device pointer
2945 * @ring: radeon ring buffer object
2946 * @semaphore: radeon semaphore object
2947 * @emit_wait: Is this a sempahore wait?
2949 * Emits a semaphore signal/wait packet to the CP ring and prevents the PFP
2950 * from running ahead of semaphore waits.
2952 bool r600_semaphore_ring_emit(struct radeon_device *rdev,
2953 struct radeon_ring *ring,
2954 struct radeon_semaphore *semaphore,
2957 uint64_t addr = semaphore->gpu_addr;
2958 unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
2960 if (rdev->family < CHIP_CAYMAN)
2961 sel |= PACKET3_SEM_WAIT_ON_SIGNAL;
2963 radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
2964 radeon_ring_write(ring, lower_32_bits(addr));
2965 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
2967 /* PFP_SYNC_ME packet only exists on 7xx+, only enable it on eg+ */
2968 if (emit_wait && (rdev->family >= CHIP_CEDAR)) {
2969 /* Prevent the PFP from running ahead of the semaphore wait */
2970 radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
2971 radeon_ring_write(ring, 0x0);
2978 * r600_copy_cpdma - copy pages using the CP DMA engine
2980 * @rdev: radeon_device pointer
2981 * @src_offset: src GPU address
2982 * @dst_offset: dst GPU address
2983 * @num_gpu_pages: number of GPU pages to xfer
2984 * @fence: radeon fence object
2986 * Copy GPU paging using the CP DMA engine (r6xx+).
2987 * Used by the radeon ttm implementation to move pages if
2988 * registered as the asic copy callback.
2990 int r600_copy_cpdma(struct radeon_device *rdev,
2991 uint64_t src_offset, uint64_t dst_offset,
2992 unsigned num_gpu_pages,
2993 struct radeon_fence **fence)
2995 struct radeon_semaphore *sem = NULL;
2996 int ring_index = rdev->asic->copy.blit_ring_index;
2997 struct radeon_ring *ring = &rdev->ring[ring_index];
2998 u32 size_in_bytes, cur_size_in_bytes, tmp;
3002 r = radeon_semaphore_create(rdev, &sem);
3004 DRM_ERROR("radeon: moving bo (%d).\n", r);
3008 size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
3009 num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
3010 r = radeon_ring_lock(rdev, ring, num_loops * 6 + 24);
3012 DRM_ERROR("radeon: moving bo (%d).\n", r);
3013 radeon_semaphore_free(rdev, &sem, NULL);
3017 radeon_semaphore_sync_to(sem, *fence);
3018 radeon_semaphore_sync_rings(rdev, sem, ring->idx);
3020 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
3021 radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
3022 radeon_ring_write(ring, WAIT_3D_IDLE_bit);
3023 for (i = 0; i < num_loops; i++) {
3024 cur_size_in_bytes = size_in_bytes;
3025 if (cur_size_in_bytes > 0x1fffff)
3026 cur_size_in_bytes = 0x1fffff;
3027 size_in_bytes -= cur_size_in_bytes;
3028 tmp = upper_32_bits(src_offset) & 0xff;
3029 if (size_in_bytes == 0)
3030 tmp |= PACKET3_CP_DMA_CP_SYNC;
3031 radeon_ring_write(ring, PACKET3(PACKET3_CP_DMA, 4));
3032 radeon_ring_write(ring, lower_32_bits(src_offset));
3033 radeon_ring_write(ring, tmp);
3034 radeon_ring_write(ring, lower_32_bits(dst_offset));
3035 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
3036 radeon_ring_write(ring, cur_size_in_bytes);
3037 src_offset += cur_size_in_bytes;
3038 dst_offset += cur_size_in_bytes;
3040 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
3041 radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
3042 radeon_ring_write(ring, WAIT_CP_DMA_IDLE_bit);
3044 r = radeon_fence_emit(rdev, fence, ring->idx);
3046 radeon_ring_unlock_undo(rdev, ring);
3047 radeon_semaphore_free(rdev, &sem, NULL);
3051 radeon_ring_unlock_commit(rdev, ring, false);
3052 radeon_semaphore_free(rdev, &sem, *fence);
3057 int r600_set_surface_reg(struct radeon_device *rdev, int reg,
3058 uint32_t tiling_flags, uint32_t pitch,
3059 uint32_t offset, uint32_t obj_size)
3061 /* FIXME: implement */
3065 void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
3067 /* FIXME: implement */
3070 static void r600_uvd_init(struct radeon_device *rdev)
3077 r = radeon_uvd_init(rdev);
3079 dev_err(rdev->dev, "failed UVD (%d) init.\n", r);
3081 * At this point rdev->uvd.vcpu_bo is NULL which trickles down
3082 * to early fails uvd_v1_0_resume() and thus nothing happens
3083 * there. So it is pointless to try to go through that code
3084 * hence why we disable uvd here.
3089 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
3090 r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], 4096);
3093 static void r600_uvd_start(struct radeon_device *rdev)
3100 r = uvd_v1_0_resume(rdev);
3102 dev_err(rdev->dev, "failed UVD resume (%d).\n", r);
3105 r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
3107 dev_err(rdev->dev, "failed initializing UVD fences (%d).\n", r);
3113 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
3116 static void r600_uvd_resume(struct radeon_device *rdev)
3118 struct radeon_ring *ring;
3121 if (!rdev->has_uvd || !rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size)
3124 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
3125 r = radeon_ring_init(rdev, ring, ring->ring_size, 0, RADEON_CP_PACKET2);
3127 dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r);
3130 r = uvd_v1_0_init(rdev);
3132 dev_err(rdev->dev, "failed initializing UVD (%d).\n", r);
3137 static int r600_startup(struct radeon_device *rdev)
3139 struct radeon_ring *ring;
3142 /* enable pcie gen2 link */
3143 r600_pcie_gen2_enable(rdev);
3145 /* scratch needs to be initialized before MC */
3146 r = r600_vram_scratch_init(rdev);
3150 r600_mc_program(rdev);
3152 if (rdev->flags & RADEON_IS_AGP) {
3153 r600_agp_enable(rdev);
3155 r = r600_pcie_gart_enable(rdev);
3159 r600_gpu_init(rdev);
3161 /* allocate wb buffer */
3162 r = radeon_wb_init(rdev);
3166 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
3168 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
3172 r600_uvd_start(rdev);
3175 if (!rdev->irq.installed) {
3176 r = radeon_irq_kms_init(rdev);
3181 r = r600_irq_init(rdev);
3183 DRM_ERROR("radeon: IH init failed (%d).\n", r);
3184 radeon_irq_kms_fini(rdev);
3189 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
3190 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
3195 r = r600_cp_load_microcode(rdev);
3198 r = r600_cp_resume(rdev);
3202 r600_uvd_resume(rdev);
3204 r = radeon_ib_pool_init(rdev);
3206 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
3210 r = radeon_audio_init(rdev);
3212 DRM_ERROR("radeon: audio init failed\n");
3219 void r600_vga_set_state(struct radeon_device *rdev, bool state)
3223 temp = RREG32(CONFIG_CNTL);
3224 if (state == false) {
3230 WREG32(CONFIG_CNTL, temp);
3233 int r600_resume(struct radeon_device *rdev)
3237 /* Do not reset GPU before posting, on r600 hw unlike on r500 hw,
3238 * posting will perform necessary task to bring back GPU into good
3242 atom_asic_init(rdev->mode_info.atom_context);
3244 if (rdev->pm.pm_method == PM_METHOD_DPM)
3245 radeon_pm_resume(rdev);
3247 rdev->accel_working = true;
3248 r = r600_startup(rdev);
3250 DRM_ERROR("r600 startup failed on resume\n");
3251 rdev->accel_working = false;
3258 int r600_suspend(struct radeon_device *rdev)
3260 radeon_pm_suspend(rdev);
3261 radeon_audio_fini(rdev);
3263 if (rdev->has_uvd) {
3264 uvd_v1_0_fini(rdev);
3265 radeon_uvd_suspend(rdev);
3267 r600_irq_suspend(rdev);
3268 radeon_wb_disable(rdev);
3269 r600_pcie_gart_disable(rdev);
3274 /* Plan is to move initialization in that function and use
3275 * helper function so that radeon_device_init pretty much
3276 * do nothing more than calling asic specific function. This
3277 * should also allow to remove a bunch of callback function
3280 int r600_init(struct radeon_device *rdev)
3284 if (r600_debugfs_mc_info_init(rdev)) {
3285 DRM_ERROR("Failed to register debugfs file for mc !\n");
3288 if (!radeon_get_bios(rdev)) {
3289 if (ASIC_IS_AVIVO(rdev))
3292 /* Must be an ATOMBIOS */
3293 if (!rdev->is_atom_bios) {
3294 dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
3297 r = radeon_atombios_init(rdev);
3300 /* Post card if necessary */
3301 if (!radeon_card_posted(rdev)) {
3303 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
3306 DRM_INFO("GPU not posted. posting now...\n");
3307 atom_asic_init(rdev->mode_info.atom_context);
3309 /* Initialize scratch registers */
3310 r600_scratch_init(rdev);
3311 /* Initialize surface registers */
3312 radeon_surface_init(rdev);
3313 /* Initialize clocks */
3314 radeon_get_clock_info(rdev->ddev);
3316 r = radeon_fence_driver_init(rdev);
3319 if (rdev->flags & RADEON_IS_AGP) {
3320 r = radeon_agp_init(rdev);
3322 radeon_agp_disable(rdev);
3324 r = r600_mc_init(rdev);
3327 /* Memory manager */
3328 r = radeon_bo_init(rdev);
3332 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
3333 r = r600_init_microcode(rdev);
3335 DRM_ERROR("Failed to load firmware!\n");
3340 /* Initialize power management */
3341 radeon_pm_init(rdev);
3343 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
3344 r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
3346 r600_uvd_init(rdev);
3348 rdev->ih.ring_obj = NULL;
3349 r600_ih_ring_init(rdev, 64 * 1024);
3351 r = r600_pcie_gart_init(rdev);
3355 rdev->accel_working = true;
3356 r = r600_startup(rdev);
3358 dev_err(rdev->dev, "disabling GPU acceleration\n");
3360 r600_irq_fini(rdev);
3361 radeon_wb_fini(rdev);
3362 radeon_ib_pool_fini(rdev);
3363 radeon_irq_kms_fini(rdev);
3364 r600_pcie_gart_fini(rdev);
3365 rdev->accel_working = false;
3371 void r600_fini(struct radeon_device *rdev)
3373 radeon_pm_fini(rdev);
3374 radeon_audio_fini(rdev);
3376 r600_irq_fini(rdev);
3377 if (rdev->has_uvd) {
3378 uvd_v1_0_fini(rdev);
3379 radeon_uvd_fini(rdev);
3381 radeon_wb_fini(rdev);
3382 radeon_ib_pool_fini(rdev);
3383 radeon_irq_kms_fini(rdev);
3384 r600_pcie_gart_fini(rdev);
3385 r600_vram_scratch_fini(rdev);
3386 radeon_agp_fini(rdev);
3387 radeon_gem_fini(rdev);
3388 radeon_fence_driver_fini(rdev);
3389 radeon_bo_fini(rdev);
3390 radeon_atombios_fini(rdev);
3391 r600_fini_microcode(rdev);
3400 void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
3402 struct radeon_ring *ring = &rdev->ring[ib->ring];
3405 if (ring->rptr_save_reg) {
3406 next_rptr = ring->wptr + 3 + 4;
3407 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
3408 radeon_ring_write(ring, ((ring->rptr_save_reg -
3409 PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
3410 radeon_ring_write(ring, next_rptr);
3411 } else if (rdev->wb.enabled) {
3412 next_rptr = ring->wptr + 5 + 4;
3413 radeon_ring_write(ring, PACKET3(PACKET3_MEM_WRITE, 3));
3414 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
3415 radeon_ring_write(ring, (upper_32_bits(ring->next_rptr_gpu_addr) & 0xff) | (1 << 18));
3416 radeon_ring_write(ring, next_rptr);
3417 radeon_ring_write(ring, 0);
3420 radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
3421 radeon_ring_write(ring,
3425 (ib->gpu_addr & 0xFFFFFFFC));
3426 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
3427 radeon_ring_write(ring, ib->length_dw);
3430 int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
3432 struct radeon_ib ib;
3438 r = radeon_scratch_get(rdev, &scratch);
3440 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
3443 WREG32(scratch, 0xCAFEDEAD);
3444 r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
3446 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
3449 ib.ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
3450 ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
3451 ib.ptr[2] = 0xDEADBEEF;
3453 r = radeon_ib_schedule(rdev, &ib, NULL, false);
3455 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
3458 r = radeon_fence_wait_timeout(ib.fence, false, usecs_to_jiffies(
3459 RADEON_USEC_IB_TEST_TIMEOUT));
3461 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
3463 } else if (r == 0) {
3464 DRM_ERROR("radeon: fence wait timed out.\n");
3471 for (i = 0; i < rdev->usec_timeout; i++) {
3472 tmp = RREG32(scratch);
3473 if (tmp == 0xDEADBEEF)
3477 if (i < rdev->usec_timeout) {
3478 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
3480 DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
3485 radeon_ib_free(rdev, &ib);
3487 radeon_scratch_free(rdev, scratch);
3494 * Interrupts use a ring buffer on r6xx/r7xx hardware. It works pretty
3495 * the same as the CP ring buffer, but in reverse. Rather than the CPU
3496 * writing to the ring and the GPU consuming, the GPU writes to the ring
3497 * and host consumes. As the host irq handler processes interrupts, it
3498 * increments the rptr. When the rptr catches up with the wptr, all the
3499 * current interrupts have been processed.
3502 void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
3506 /* Align ring size */
3507 rb_bufsz = order_base_2(ring_size / 4);
3508 ring_size = (1 << rb_bufsz) * 4;
3509 rdev->ih.ring_size = ring_size;
3510 rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
3514 int r600_ih_ring_alloc(struct radeon_device *rdev)
3519 /* Allocate ring buffer */
3520 if (rdev->ih.ring_obj == NULL) {
3521 r = radeon_bo_create(rdev, rdev->ih.ring_size,
3523 RADEON_GEM_DOMAIN_GTT, 0,
3524 NULL, &rdev->ih.ring_obj);
3526 DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
3529 r = radeon_bo_reserve(rdev->ih.ring_obj, false);
3530 if (unlikely(r != 0))
3532 r = radeon_bo_pin(rdev->ih.ring_obj,
3533 RADEON_GEM_DOMAIN_GTT,
3534 (u64 *)&rdev->ih.gpu_addr);
3536 radeon_bo_unreserve(rdev->ih.ring_obj);
3537 DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r);
3540 ring_ptr = &rdev->ih.ring;
3541 r = radeon_bo_kmap(rdev->ih.ring_obj,
3543 radeon_bo_unreserve(rdev->ih.ring_obj);
3545 DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r);
3552 void r600_ih_ring_fini(struct radeon_device *rdev)
3555 if (rdev->ih.ring_obj) {
3556 r = radeon_bo_reserve(rdev->ih.ring_obj, false);
3557 if (likely(r == 0)) {
3558 radeon_bo_kunmap(rdev->ih.ring_obj);
3559 radeon_bo_unpin(rdev->ih.ring_obj);
3560 radeon_bo_unreserve(rdev->ih.ring_obj);
3562 radeon_bo_unref(&rdev->ih.ring_obj);
3563 rdev->ih.ring = NULL;
3564 rdev->ih.ring_obj = NULL;
3568 void r600_rlc_stop(struct radeon_device *rdev)
3571 if ((rdev->family >= CHIP_RV770) &&
3572 (rdev->family <= CHIP_RV740)) {
3573 /* r7xx asics need to soft reset RLC before halting */
3574 WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
3575 RREG32(SRBM_SOFT_RESET);
3577 WREG32(SRBM_SOFT_RESET, 0);
3578 RREG32(SRBM_SOFT_RESET);
3581 WREG32(RLC_CNTL, 0);
3584 static void r600_rlc_start(struct radeon_device *rdev)
3586 WREG32(RLC_CNTL, RLC_ENABLE);
3589 static int r600_rlc_resume(struct radeon_device *rdev)
3592 const __be32 *fw_data;
3597 r600_rlc_stop(rdev);
3599 WREG32(RLC_HB_CNTL, 0);
3601 WREG32(RLC_HB_BASE, 0);
3602 WREG32(RLC_HB_RPTR, 0);
3603 WREG32(RLC_HB_WPTR, 0);
3604 WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
3605 WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
3606 WREG32(RLC_MC_CNTL, 0);
3607 WREG32(RLC_UCODE_CNTL, 0);
3609 fw_data = (const __be32 *)rdev->rlc_fw->data;
3610 if (rdev->family >= CHIP_RV770) {
3611 for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
3612 WREG32(RLC_UCODE_ADDR, i);
3613 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
3616 for (i = 0; i < R600_RLC_UCODE_SIZE; i++) {
3617 WREG32(RLC_UCODE_ADDR, i);
3618 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
3621 WREG32(RLC_UCODE_ADDR, 0);
3623 r600_rlc_start(rdev);
3628 static void r600_enable_interrupts(struct radeon_device *rdev)
3630 u32 ih_cntl = RREG32(IH_CNTL);
3631 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
3633 ih_cntl |= ENABLE_INTR;
3634 ih_rb_cntl |= IH_RB_ENABLE;
3635 WREG32(IH_CNTL, ih_cntl);
3636 WREG32(IH_RB_CNTL, ih_rb_cntl);
3637 rdev->ih.enabled = true;
3640 void r600_disable_interrupts(struct radeon_device *rdev)
3642 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
3643 u32 ih_cntl = RREG32(IH_CNTL);
3645 ih_rb_cntl &= ~IH_RB_ENABLE;
3646 ih_cntl &= ~ENABLE_INTR;
3647 WREG32(IH_RB_CNTL, ih_rb_cntl);
3648 WREG32(IH_CNTL, ih_cntl);
3649 /* set rptr, wptr to 0 */
3650 WREG32(IH_RB_RPTR, 0);
3651 WREG32(IH_RB_WPTR, 0);
3652 rdev->ih.enabled = false;
3656 static void r600_disable_interrupt_state(struct radeon_device *rdev)
3660 WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
3661 tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
3662 WREG32(DMA_CNTL, tmp);
3663 WREG32(GRBM_INT_CNTL, 0);
3664 WREG32(DxMODE_INT_MASK, 0);
3665 WREG32(D1GRPH_INTERRUPT_CONTROL, 0);
3666 WREG32(D2GRPH_INTERRUPT_CONTROL, 0);
3667 if (ASIC_IS_DCE3(rdev)) {
3668 WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0);
3669 WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0);
3670 tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3671 WREG32(DC_HPD1_INT_CONTROL, tmp);
3672 tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3673 WREG32(DC_HPD2_INT_CONTROL, tmp);
3674 tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3675 WREG32(DC_HPD3_INT_CONTROL, tmp);
3676 tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3677 WREG32(DC_HPD4_INT_CONTROL, tmp);
3678 if (ASIC_IS_DCE32(rdev)) {
3679 tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3680 WREG32(DC_HPD5_INT_CONTROL, tmp);
3681 tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3682 WREG32(DC_HPD6_INT_CONTROL, tmp);
3683 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3684 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp);
3685 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3686 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp);
3688 tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3689 WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
3690 tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3691 WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp);
3694 WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
3695 WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
3696 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
3697 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
3698 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
3699 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
3700 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
3701 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
3702 tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3703 WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
3704 tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3705 WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp);
3709 int r600_irq_init(struct radeon_device *rdev)
3713 u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
3716 ret = r600_ih_ring_alloc(rdev);
3721 r600_disable_interrupts(rdev);
3724 if (rdev->family >= CHIP_CEDAR)
3725 ret = evergreen_rlc_resume(rdev);
3727 ret = r600_rlc_resume(rdev);
3729 r600_ih_ring_fini(rdev);
3733 /* setup interrupt control */
3734 /* set dummy read address to ring address */
3735 WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
3736 interrupt_cntl = RREG32(INTERRUPT_CNTL);
3737 /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
3738 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
3740 interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
3741 /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
3742 interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
3743 WREG32(INTERRUPT_CNTL, interrupt_cntl);
3745 WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
3746 rb_bufsz = order_base_2(rdev->ih.ring_size / 4);
3748 ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
3749 IH_WPTR_OVERFLOW_CLEAR |
3752 if (rdev->wb.enabled)
3753 ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;
3755 /* set the writeback address whether it's enabled or not */
3756 WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC);
3757 WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF);
3759 WREG32(IH_RB_CNTL, ih_rb_cntl);
3761 /* set rptr, wptr to 0 */
3762 WREG32(IH_RB_RPTR, 0);
3763 WREG32(IH_RB_WPTR, 0);
3765 /* Default settings for IH_CNTL (disabled at first) */
3766 ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10);
3767 /* RPTR_REARM only works if msi's are enabled */
3768 if (rdev->msi_enabled)
3769 ih_cntl |= RPTR_REARM;
3770 WREG32(IH_CNTL, ih_cntl);
3772 /* force the active interrupt state to all disabled */
3773 if (rdev->family >= CHIP_CEDAR)
3774 evergreen_disable_interrupt_state(rdev);
3776 r600_disable_interrupt_state(rdev);
3778 /* at this point everything should be setup correctly to enable master */
3779 pci_enable_busmaster(rdev->dev->bsddev);
3782 r600_enable_interrupts(rdev);
3787 void r600_irq_suspend(struct radeon_device *rdev)
3789 r600_irq_disable(rdev);
3790 r600_rlc_stop(rdev);
3793 void r600_irq_fini(struct radeon_device *rdev)
3795 r600_irq_suspend(rdev);
3796 r600_ih_ring_fini(rdev);
3799 int r600_irq_set(struct radeon_device *rdev)
3801 u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
3803 u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
3804 u32 grbm_int_cntl = 0;
3807 u32 thermal_int = 0;
3809 if (!rdev->irq.installed) {
3810 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
3813 /* don't enable anything if the ih is disabled */
3814 if (!rdev->ih.enabled) {
3815 r600_disable_interrupts(rdev);
3816 /* force the active interrupt state to all disabled */
3817 r600_disable_interrupt_state(rdev);
3821 if (ASIC_IS_DCE3(rdev)) {
3822 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
3823 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
3824 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
3825 hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
3826 if (ASIC_IS_DCE32(rdev)) {
3827 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
3828 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
3829 hdmi0 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
3830 hdmi1 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
3832 hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3833 hdmi1 = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3836 hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
3837 hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
3838 hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
3839 hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3840 hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3843 dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
3845 if ((rdev->family > CHIP_R600) && (rdev->family < CHIP_RV770)) {
3846 thermal_int = RREG32(CG_THERMAL_INT) &
3847 ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
3848 } else if (rdev->family >= CHIP_RV770) {
3849 thermal_int = RREG32(RV770_CG_THERMAL_INT) &
3850 ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
3852 if (rdev->irq.dpm_thermal) {
3853 DRM_DEBUG("dpm thermal\n");
3854 thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
3857 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
3858 DRM_DEBUG("r600_irq_set: sw int\n");
3859 cp_int_cntl |= RB_INT_ENABLE;
3860 cp_int_cntl |= TIME_STAMP_INT_ENABLE;
3863 if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
3864 DRM_DEBUG("r600_irq_set: sw int dma\n");
3865 dma_cntl |= TRAP_ENABLE;
3868 if (rdev->irq.crtc_vblank_int[0] ||
3869 atomic_read(&rdev->irq.pflip[0])) {
3870 DRM_DEBUG("r600_irq_set: vblank 0\n");
3871 mode_int |= D1MODE_VBLANK_INT_MASK;
3873 if (rdev->irq.crtc_vblank_int[1] ||
3874 atomic_read(&rdev->irq.pflip[1])) {
3875 DRM_DEBUG("r600_irq_set: vblank 1\n");
3876 mode_int |= D2MODE_VBLANK_INT_MASK;
3878 if (rdev->irq.hpd[0]) {
3879 DRM_DEBUG("r600_irq_set: hpd 1\n");
3880 hpd1 |= DC_HPDx_INT_EN;
3882 if (rdev->irq.hpd[1]) {
3883 DRM_DEBUG("r600_irq_set: hpd 2\n");
3884 hpd2 |= DC_HPDx_INT_EN;
3886 if (rdev->irq.hpd[2]) {
3887 DRM_DEBUG("r600_irq_set: hpd 3\n");
3888 hpd3 |= DC_HPDx_INT_EN;
3890 if (rdev->irq.hpd[3]) {
3891 DRM_DEBUG("r600_irq_set: hpd 4\n");
3892 hpd4 |= DC_HPDx_INT_EN;
3894 if (rdev->irq.hpd[4]) {
3895 DRM_DEBUG("r600_irq_set: hpd 5\n");
3896 hpd5 |= DC_HPDx_INT_EN;
3898 if (rdev->irq.hpd[5]) {
3899 DRM_DEBUG("r600_irq_set: hpd 6\n");
3900 hpd6 |= DC_HPDx_INT_EN;
3902 if (rdev->irq.afmt[0]) {
3903 DRM_DEBUG("r600_irq_set: hdmi 0\n");
3904 hdmi0 |= HDMI0_AZ_FORMAT_WTRIG_MASK;
3906 if (rdev->irq.afmt[1]) {
3907 DRM_DEBUG("r600_irq_set: hdmi 0\n");
3908 hdmi1 |= HDMI0_AZ_FORMAT_WTRIG_MASK;
3911 WREG32(CP_INT_CNTL, cp_int_cntl);
3912 WREG32(DMA_CNTL, dma_cntl);
3913 WREG32(DxMODE_INT_MASK, mode_int);
3914 WREG32(D1GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK);
3915 WREG32(D2GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK);
3916 WREG32(GRBM_INT_CNTL, grbm_int_cntl);
3917 if (ASIC_IS_DCE3(rdev)) {
3918 WREG32(DC_HPD1_INT_CONTROL, hpd1);
3919 WREG32(DC_HPD2_INT_CONTROL, hpd2);
3920 WREG32(DC_HPD3_INT_CONTROL, hpd3);
3921 WREG32(DC_HPD4_INT_CONTROL, hpd4);
3922 if (ASIC_IS_DCE32(rdev)) {
3923 WREG32(DC_HPD5_INT_CONTROL, hpd5);
3924 WREG32(DC_HPD6_INT_CONTROL, hpd6);
3925 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, hdmi0);
3926 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, hdmi1);
3928 WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
3929 WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, hdmi1);
3932 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
3933 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
3934 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
3935 WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
3936 WREG32(HDMI1_AUDIO_PACKET_CONTROL, hdmi1);
3938 if ((rdev->family > CHIP_R600) && (rdev->family < CHIP_RV770)) {
3939 WREG32(CG_THERMAL_INT, thermal_int);
3940 } else if (rdev->family >= CHIP_RV770) {
3941 WREG32(RV770_CG_THERMAL_INT, thermal_int);
3945 RREG32(R_000E50_SRBM_STATUS);
3950 static void r600_irq_ack(struct radeon_device *rdev)
3954 if (ASIC_IS_DCE3(rdev)) {
3955 rdev->irq.stat_regs.r600.disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
3956 rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
3957 rdev->irq.stat_regs.r600.disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
3958 if (ASIC_IS_DCE32(rdev)) {
3959 rdev->irq.stat_regs.r600.hdmi0_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET0);
3960 rdev->irq.stat_regs.r600.hdmi1_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET1);
3962 rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS);
3963 rdev->irq.stat_regs.r600.hdmi1_status = RREG32(DCE3_HDMI1_STATUS);
3966 rdev->irq.stat_regs.r600.disp_int = RREG32(DISP_INTERRUPT_STATUS);
3967 rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
3968 rdev->irq.stat_regs.r600.disp_int_cont2 = 0;
3969 rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS);
3970 rdev->irq.stat_regs.r600.hdmi1_status = RREG32(HDMI1_STATUS);
3972 rdev->irq.stat_regs.r600.d1grph_int = RREG32(D1GRPH_INTERRUPT_STATUS);
3973 rdev->irq.stat_regs.r600.d2grph_int = RREG32(D2GRPH_INTERRUPT_STATUS);
3975 if (rdev->irq.stat_regs.r600.d1grph_int & DxGRPH_PFLIP_INT_OCCURRED)
3976 WREG32(D1GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
3977 if (rdev->irq.stat_regs.r600.d2grph_int & DxGRPH_PFLIP_INT_OCCURRED)
3978 WREG32(D2GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
3979 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT)
3980 WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
3981 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT)
3982 WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
3983 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT)
3984 WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
3985 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT)
3986 WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
3987 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
3988 if (ASIC_IS_DCE3(rdev)) {
3989 tmp = RREG32(DC_HPD1_INT_CONTROL);
3990 tmp |= DC_HPDx_INT_ACK;
3991 WREG32(DC_HPD1_INT_CONTROL, tmp);
3993 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
3994 tmp |= DC_HPDx_INT_ACK;
3995 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
3998 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
3999 if (ASIC_IS_DCE3(rdev)) {
4000 tmp = RREG32(DC_HPD2_INT_CONTROL);
4001 tmp |= DC_HPDx_INT_ACK;
4002 WREG32(DC_HPD2_INT_CONTROL, tmp);
4004 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
4005 tmp |= DC_HPDx_INT_ACK;
4006 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
4009 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
4010 if (ASIC_IS_DCE3(rdev)) {
4011 tmp = RREG32(DC_HPD3_INT_CONTROL);
4012 tmp |= DC_HPDx_INT_ACK;
4013 WREG32(DC_HPD3_INT_CONTROL, tmp);
4015 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
4016 tmp |= DC_HPDx_INT_ACK;
4017 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
4020 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
4021 tmp = RREG32(DC_HPD4_INT_CONTROL);
4022 tmp |= DC_HPDx_INT_ACK;
4023 WREG32(DC_HPD4_INT_CONTROL, tmp);
4025 if (ASIC_IS_DCE32(rdev)) {
4026 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
4027 tmp = RREG32(DC_HPD5_INT_CONTROL);
4028 tmp |= DC_HPDx_INT_ACK;
4029 WREG32(DC_HPD5_INT_CONTROL, tmp);
4031 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
4032 tmp = RREG32(DC_HPD5_INT_CONTROL);
4033 tmp |= DC_HPDx_INT_ACK;
4034 WREG32(DC_HPD6_INT_CONTROL, tmp);
4036 if (rdev->irq.stat_regs.r600.hdmi0_status & AFMT_AZ_FORMAT_WTRIG) {
4037 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0);
4038 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
4039 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp);
4041 if (rdev->irq.stat_regs.r600.hdmi1_status & AFMT_AZ_FORMAT_WTRIG) {
4042 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1);
4043 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
4044 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp);
4047 if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) {
4048 tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL);
4049 tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
4050 WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
4052 if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) {
4053 if (ASIC_IS_DCE3(rdev)) {
4054 tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL);
4055 tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
4056 WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp);
4058 tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL);
4059 tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
4060 WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp);
4066 void r600_irq_disable(struct radeon_device *rdev)
4068 r600_disable_interrupts(rdev);
4069 /* Wait and acknowledge irq */
4072 r600_disable_interrupt_state(rdev);
4075 static u32 r600_get_ih_wptr(struct radeon_device *rdev)
4079 if (rdev->wb.enabled)
4080 wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
4082 wptr = RREG32(IH_RB_WPTR);
4084 if (wptr & RB_OVERFLOW) {
4085 wptr &= ~RB_OVERFLOW;
4086 /* When a ring buffer overflow happen start parsing interrupt
4087 * from the last not overwritten vector (wptr + 16). Hopefully
4088 * this should allow us to catchup.
4090 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
4091 wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask);
4092 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
4093 tmp = RREG32(IH_RB_CNTL);
4094 tmp |= IH_WPTR_OVERFLOW_CLEAR;
4095 WREG32(IH_RB_CNTL, tmp);
4097 return (wptr & rdev->ih.ptr_mask);
4101 * Each IV ring entry is 128 bits:
4102 * [7:0] - interrupt source id
4104 * [59:32] - interrupt source data
4105 * [127:60] - reserved
4107 * The basic interrupt vector entries
4108 * are decoded as follows:
4109 * src_id src_data description
4114 * 19 0 FP Hot plug detection A
4115 * 19 1 FP Hot plug detection B
4116 * 19 2 DAC A auto-detection
4117 * 19 3 DAC B auto-detection
4123 * 181 - EOP Interrupt
4126 * Note, these are based on r600 and may need to be
4127 * adjusted or added to on newer asics
4130 irqreturn_t r600_irq_process(struct radeon_device *rdev)
4134 u32 src_id, src_data;
4136 bool queue_hotplug = false;
4137 bool queue_hdmi = false;
4138 bool queue_thermal = false;
4140 if (!rdev->ih.enabled || rdev->shutdown)
4143 /* No MSIs, need a dummy read to flush PCI DMAs */
4144 if (!rdev->msi_enabled)
4147 wptr = r600_get_ih_wptr(rdev);
4150 /* is somebody else already processing irqs? */
4151 if (atomic_xchg(&rdev->ih.lock, 1))
4154 rptr = rdev->ih.rptr;
4155 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
4157 /* Order reading of wptr vs. reading of IH ring data */
4160 /* display interrupts */
4163 while (rptr != wptr) {
4164 /* wptr/rptr are in bytes! */
4165 ring_index = rptr / 4;
4166 src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
4167 src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
4170 case 1: /* D1 vblank/vline */
4172 case 0: /* D1 vblank */
4173 if (!(rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT))
4174 DRM_DEBUG("IH: D1 vblank - IH event w/o asserted irq bit?\n");
4176 if (rdev->irq.crtc_vblank_int[0]) {
4177 drm_handle_vblank(rdev->ddev, 0);
4178 rdev->pm.vblank_sync = true;
4179 wake_up(&rdev->irq.vblank_queue);
4181 if (atomic_read(&rdev->irq.pflip[0]))
4182 radeon_crtc_handle_vblank(rdev, 0);
4183 rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
4184 DRM_DEBUG("IH: D1 vblank\n");
4187 case 1: /* D1 vline */
4188 if (!(rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT))
4189 DRM_DEBUG("IH: D1 vline - IH event w/o asserted irq bit?\n");
4191 rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;
4192 DRM_DEBUG("IH: D1 vline\n");
4196 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
4200 case 5: /* D2 vblank/vline */
4202 case 0: /* D2 vblank */
4203 if (!(rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT))
4204 DRM_DEBUG("IH: D2 vblank - IH event w/o asserted irq bit?\n");
4206 if (rdev->irq.crtc_vblank_int[1]) {
4207 drm_handle_vblank(rdev->ddev, 1);
4208 rdev->pm.vblank_sync = true;
4209 wake_up(&rdev->irq.vblank_queue);
4211 if (atomic_read(&rdev->irq.pflip[1]))
4212 radeon_crtc_handle_vblank(rdev, 1);
4213 rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
4214 DRM_DEBUG("IH: D2 vblank\n");
4217 case 1: /* D1 vline */
4218 if (!(rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT))
4219 DRM_DEBUG("IH: D2 vline - IH event w/o asserted irq bit?\n");
4221 rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;
4222 DRM_DEBUG("IH: D2 vline\n");
4226 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
4230 case 9: /* D1 pflip */
4231 DRM_DEBUG("IH: D1 flip\n");
4232 if (radeon_use_pflipirq > 0)
4233 radeon_crtc_handle_flip(rdev, 0);
4235 case 11: /* D2 pflip */
4236 DRM_DEBUG("IH: D2 flip\n");
4237 if (radeon_use_pflipirq > 0)
4238 radeon_crtc_handle_flip(rdev, 1);
4240 case 19: /* HPD/DAC hotplug */
4243 if (!(rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT))
4244 DRM_DEBUG("IH: HPD1 - IH event w/o asserted irq bit?\n");
4246 rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;
4247 queue_hotplug = true;
4248 DRM_DEBUG("IH: HPD1\n");
4251 if (!(rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT))
4252 DRM_DEBUG("IH: HPD2 - IH event w/o asserted irq bit?\n");
4254 rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;
4255 queue_hotplug = true;
4256 DRM_DEBUG("IH: HPD2\n");
4259 if (!(rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT))
4260 DRM_DEBUG("IH: HPD3 - IH event w/o asserted irq bit?\n");
4262 rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;
4263 queue_hotplug = true;
4264 DRM_DEBUG("IH: HPD3\n");
4267 if (!(rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT))
4268 DRM_DEBUG("IH: HPD4 - IH event w/o asserted irq bit?\n");
4270 rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;
4271 queue_hotplug = true;
4272 DRM_DEBUG("IH: HPD4\n");
4275 if (!(rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT))
4276 DRM_DEBUG("IH: HPD5 - IH event w/o asserted irq bit?\n");
4278 rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
4279 queue_hotplug = true;
4280 DRM_DEBUG("IH: HPD5\n");
4283 if (!(rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT))
4284 DRM_DEBUG("IH: HPD6 - IH event w/o asserted irq bit?\n");
4286 rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
4287 queue_hotplug = true;
4288 DRM_DEBUG("IH: HPD6\n");
4292 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
4299 if (!(rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG))
4300 DRM_DEBUG("IH: HDMI0 - IH event w/o asserted irq bit?\n");
4302 rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG;
4304 DRM_DEBUG("IH: HDMI0\n");
4308 if (!(rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG))
4309 DRM_DEBUG("IH: HDMI1 - IH event w/o asserted irq bit?\n");
4311 rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG;
4313 DRM_DEBUG("IH: HDMI1\n");
4317 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
4322 DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
4323 radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
4325 case 176: /* CP_INT in ring buffer */
4326 case 177: /* CP_INT in IB1 */
4327 case 178: /* CP_INT in IB2 */
4328 DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
4329 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
4331 case 181: /* CP EOP event */
4332 DRM_DEBUG("IH: CP EOP\n");
4333 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
4335 case 224: /* DMA trap event */
4336 DRM_DEBUG("IH: DMA trap\n");
4337 radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
4339 case 230: /* thermal low to high */
4340 DRM_DEBUG("IH: thermal low to high\n");
4341 rdev->pm.dpm.thermal.high_to_low = false;
4342 queue_thermal = true;
4344 case 231: /* thermal high to low */
4345 DRM_DEBUG("IH: thermal high to low\n");
4346 rdev->pm.dpm.thermal.high_to_low = true;
4347 queue_thermal = true;
4349 case 233: /* GUI IDLE */
4350 DRM_DEBUG("IH: GUI idle\n");
4353 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
4357 /* wptr/rptr are in bytes! */
4359 rptr &= rdev->ih.ptr_mask;
4360 WREG32(IH_RB_RPTR, rptr);
4363 taskqueue_enqueue(rdev->tq, &rdev->hotplug_work);
4365 taskqueue_enqueue(rdev->tq, &rdev->audio_work);
4366 if (queue_thermal && rdev->pm.dpm_enabled)
4367 taskqueue_enqueue(rdev->tq, &rdev->pm.dpm.thermal.work);
4368 rdev->ih.rptr = rptr;
4369 atomic_set(&rdev->ih.lock, 0);
4371 /* make sure wptr hasn't changed while processing */
4372 wptr = r600_get_ih_wptr(rdev);
4382 #if defined(CONFIG_DEBUG_FS)
4384 static int r600_debugfs_mc_info(struct seq_file *m, void *data)
4386 struct drm_info_node *node = (struct drm_info_node *) m->private;
4387 struct drm_device *dev = node->minor->dev;
4388 struct radeon_device *rdev = dev->dev_private;
4390 DREG32_SYS(m, rdev, R_000E50_SRBM_STATUS);
4391 DREG32_SYS(m, rdev, VM_L2_STATUS);
4395 static struct drm_info_list r600_mc_info_list[] = {
4396 {"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
4400 int r600_debugfs_mc_info_init(struct radeon_device *rdev)
4402 #if defined(CONFIG_DEBUG_FS)
4403 return radeon_debugfs_add_files(rdev, r600_mc_info_list, ARRAY_SIZE(r600_mc_info_list));
4410 * r600_mmio_hdp_flush - flush Host Data Path cache via MMIO
4411 * rdev: radeon device structure
4413 * Some R6XX/R7XX don't seem to take into account HDP flushes performed
4414 * through the ring buffer. This leads to corruption in rendering, see
4415 * http://bugzilla.kernel.org/show_bug.cgi?id=15186 . To avoid this, we
4416 * directly perform the HDP flush by writing the register through MMIO.
4418 void r600_mmio_hdp_flush(struct radeon_device *rdev)
4420 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
4421 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL.
4422 * This seems to cause problems on some AGP cards. Just use the old
4425 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
4426 rdev->vram_scratch.ptr && !(rdev->flags & RADEON_IS_AGP)) {
4427 volatile uint32_t *ptr = rdev->vram_scratch.ptr;
4430 WREG32(HDP_DEBUG1, 0);
4433 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
4436 void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes)
4438 u32 link_width_cntl, mask;
4440 if (rdev->flags & RADEON_IS_IGP)
4443 if (!(rdev->flags & RADEON_IS_PCIE))
4446 /* x2 cards have a special sequence */
4447 if (ASIC_IS_X2(rdev))
4450 radeon_gui_idle(rdev);
4454 mask = RADEON_PCIE_LC_LINK_WIDTH_X0;
4457 mask = RADEON_PCIE_LC_LINK_WIDTH_X1;
4460 mask = RADEON_PCIE_LC_LINK_WIDTH_X2;
4463 mask = RADEON_PCIE_LC_LINK_WIDTH_X4;
4466 mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
4469 /* not actually supported */
4470 mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
4473 mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
4476 DRM_ERROR("invalid pcie lane request: %d\n", lanes);
4480 link_width_cntl = RREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
4481 link_width_cntl &= ~RADEON_PCIE_LC_LINK_WIDTH_MASK;
4482 link_width_cntl |= mask << RADEON_PCIE_LC_LINK_WIDTH_SHIFT;
4483 link_width_cntl |= (RADEON_PCIE_LC_RECONFIG_NOW |
4484 R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE);
4486 WREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
4489 int r600_get_pcie_lanes(struct radeon_device *rdev)
4491 u32 link_width_cntl;
4493 if (rdev->flags & RADEON_IS_IGP)
4496 if (!(rdev->flags & RADEON_IS_PCIE))
4499 /* x2 cards have a special sequence */
4500 if (ASIC_IS_X2(rdev))
4503 radeon_gui_idle(rdev);
4505 link_width_cntl = RREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
4507 switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
4508 case RADEON_PCIE_LC_LINK_WIDTH_X1:
4510 case RADEON_PCIE_LC_LINK_WIDTH_X2:
4512 case RADEON_PCIE_LC_LINK_WIDTH_X4:
4514 case RADEON_PCIE_LC_LINK_WIDTH_X8:
4516 case RADEON_PCIE_LC_LINK_WIDTH_X12:
4517 /* not actually supported */
4519 case RADEON_PCIE_LC_LINK_WIDTH_X0:
4520 case RADEON_PCIE_LC_LINK_WIDTH_X16:
4526 static void r600_pcie_gen2_enable(struct radeon_device *rdev)
4528 u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp;
4533 if (radeon_pcie_gen2 == 0)
4536 if (rdev->flags & RADEON_IS_IGP)
4539 if (!(rdev->flags & RADEON_IS_PCIE))
4542 /* x2 cards have a special sequence */
4543 if (ASIC_IS_X2(rdev))
4546 /* only RV6xx+ chips are supported */
4547 if (rdev->family <= CHIP_R600)
4550 ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
4554 if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
4557 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
4558 if (speed_cntl & LC_CURRENT_DATA_RATE) {
4559 DRM_INFO("PCIE gen 2 link speeds already enabled\n");
4563 DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
4565 /* 55 nm r6xx asics */
4566 if ((rdev->family == CHIP_RV670) ||
4567 (rdev->family == CHIP_RV620) ||
4568 (rdev->family == CHIP_RV635)) {
4569 /* advertise upconfig capability */
4570 link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
4571 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
4572 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
4573 link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
4574 if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
4575 lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
4576 link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
4577 LC_RECONFIG_ARC_MISSING_ESCAPE);
4578 link_width_cntl |= lanes | LC_RECONFIG_NOW | LC_RENEGOTIATE_EN;
4579 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
4581 link_width_cntl |= LC_UPCONFIGURE_DIS;
4582 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
4586 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
4587 if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
4588 (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
4590 /* 55 nm r6xx asics */
4591 if ((rdev->family == CHIP_RV670) ||
4592 (rdev->family == CHIP_RV620) ||
4593 (rdev->family == CHIP_RV635)) {
4594 WREG32(MM_CFGREGS_CNTL, 0x8);
4595 link_cntl2 = RREG32(0x4088);
4596 WREG32(MM_CFGREGS_CNTL, 0);
4597 /* not supported yet */
4598 if (link_cntl2 & SELECTABLE_DEEMPHASIS)
4602 speed_cntl &= ~LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK;
4603 speed_cntl |= (0x3 << LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT);
4604 speed_cntl &= ~LC_VOLTAGE_TIMER_SEL_MASK;
4605 speed_cntl &= ~LC_FORCE_DIS_HW_SPEED_CHANGE;
4606 speed_cntl |= LC_FORCE_EN_HW_SPEED_CHANGE;
4607 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
4609 tmp = RREG32(0x541c);
4610 WREG32(0x541c, tmp | 0x8);
4611 WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN);
4612 link_cntl2 = RREG16(0x4088);
4613 link_cntl2 &= ~TARGET_LINK_SPEED_MASK;
4615 WREG16(0x4088, link_cntl2);
4616 WREG32(MM_CFGREGS_CNTL, 0);
4618 if ((rdev->family == CHIP_RV670) ||
4619 (rdev->family == CHIP_RV620) ||
4620 (rdev->family == CHIP_RV635)) {
4621 training_cntl = RREG32_PCIE_PORT(PCIE_LC_TRAINING_CNTL);
4622 training_cntl &= ~LC_POINT_7_PLUS_EN;
4623 WREG32_PCIE_PORT(PCIE_LC_TRAINING_CNTL, training_cntl);
4625 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
4626 speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
4627 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
4630 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
4631 speed_cntl |= LC_GEN2_EN_STRAP;
4632 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
4635 link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
4636 /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
4638 link_width_cntl |= LC_UPCONFIGURE_DIS;
4640 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
4641 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
4646 * r600_get_gpu_clock_counter - return GPU clock counter snapshot
4648 * @rdev: radeon_device pointer
4650 * Fetches a GPU clock counter snapshot (R6xx-cayman).
4651 * Returns the 64 bit clock counter snapshot.
4653 uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev)
4657 mutex_lock(&rdev->gpu_clock_mutex);
4658 WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
4659 clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
4660 ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
4661 mutex_unlock(&rdev->gpu_clock_mutex);