2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 * $FreeBSD: head/sys/dev/drm2/radeon/r100.c 255573 2013-09-14 17:24:41Z dumbbell $
32 #include <uapi_drm/radeon_drm.h>
33 #include "radeon_reg.h"
35 #include "radeon_asic.h"
42 #include "r100_reg_safe.h"
43 #include "rn50_reg_safe.h"
46 #define FIRMWARE_R100 "radeonkmsfw_R100_cp"
47 #define FIRMWARE_R200 "radeonkmsfw_R200_cp"
48 #define FIRMWARE_R300 "radeonkmsfw_R300_cp"
49 #define FIRMWARE_R420 "radeonkmsfw_R420_cp"
50 #define FIRMWARE_RS690 "radeonkmsfw_RS690_cp"
51 #define FIRMWARE_RS600 "radeonkmsfw_RS600_cp"
52 #define FIRMWARE_R520 "radeonkmsfw_R520_cp"
54 #include "r100_track.h"
56 /* This files gather functions specifics to:
57 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
58 * and others in some cases.
61 static bool r100_is_in_vblank(struct radeon_device *rdev, int crtc)
64 if (RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR)
69 if (RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR)
76 static bool r100_is_counter_moving(struct radeon_device *rdev, int crtc)
81 vline1 = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
82 vline2 = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
84 vline1 = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
85 vline2 = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
94 * r100_wait_for_vblank - vblank wait asic callback.
96 * @rdev: radeon_device pointer
97 * @crtc: crtc to wait for vblank on
99 * Wait for vblank on the requested crtc (r1xx-r4xx).
101 void r100_wait_for_vblank(struct radeon_device *rdev, int crtc)
105 if (crtc >= rdev->num_crtc)
109 if (!(RREG32(RADEON_CRTC_GEN_CNTL) & RADEON_CRTC_EN))
112 if (!(RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_EN))
116 /* depending on when we hit vblank, we may be close to active; if so,
117 * wait for another frame.
119 while (r100_is_in_vblank(rdev, crtc)) {
120 if (i++ % 100 == 0) {
121 if (!r100_is_counter_moving(rdev, crtc))
126 while (!r100_is_in_vblank(rdev, crtc)) {
127 if (i++ % 100 == 0) {
128 if (!r100_is_counter_moving(rdev, crtc))
135 * r100_pre_page_flip - pre-pageflip callback.
137 * @rdev: radeon_device pointer
138 * @crtc: crtc to prepare for pageflip on
140 * Pre-pageflip callback (r1xx-r4xx).
141 * Enables the pageflip irq (vblank irq).
143 void r100_pre_page_flip(struct radeon_device *rdev, int crtc)
145 /* enable the pflip int */
146 radeon_irq_kms_pflip_irq_get(rdev, crtc);
150 * r100_post_page_flip - pos-pageflip callback.
152 * @rdev: radeon_device pointer
153 * @crtc: crtc to cleanup pageflip on
155 * Post-pageflip callback (r1xx-r4xx).
156 * Disables the pageflip irq (vblank irq).
158 void r100_post_page_flip(struct radeon_device *rdev, int crtc)
160 /* disable the pflip int */
161 radeon_irq_kms_pflip_irq_put(rdev, crtc);
165 * r100_page_flip - pageflip callback.
167 * @rdev: radeon_device pointer
168 * @crtc_id: crtc to cleanup pageflip on
169 * @crtc_base: new address of the crtc (GPU MC address)
171 * Does the actual pageflip (r1xx-r4xx).
172 * During vblank we take the crtc lock and wait for the update_pending
173 * bit to go high, when it does, we release the lock, and allow the
174 * double buffered update to take place.
175 * Returns the current update pending status.
177 u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
179 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
180 u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK;
183 /* Lock the graphics update lock */
184 /* update the scanout addresses */
185 WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
187 /* Wait for update_pending to go high. */
188 for (i = 0; i < rdev->usec_timeout; i++) {
189 if (RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET)
193 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
195 /* Unlock the lock, so double-buffering can take place inside vblank */
196 tmp &= ~RADEON_CRTC_OFFSET__OFFSET_LOCK;
197 WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
199 /* Return current update_pending status: */
200 return RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET;
204 * r100_pm_get_dynpm_state - look up dynpm power state callback.
206 * @rdev: radeon_device pointer
208 * Look up the optimal power state based on the
209 * current state of the GPU (r1xx-r5xx).
210 * Used for dynpm only.
212 void r100_pm_get_dynpm_state(struct radeon_device *rdev)
215 rdev->pm.dynpm_can_upclock = true;
216 rdev->pm.dynpm_can_downclock = true;
218 switch (rdev->pm.dynpm_planned_action) {
219 case DYNPM_ACTION_MINIMUM:
220 rdev->pm.requested_power_state_index = 0;
221 rdev->pm.dynpm_can_downclock = false;
223 case DYNPM_ACTION_DOWNCLOCK:
224 if (rdev->pm.current_power_state_index == 0) {
225 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
226 rdev->pm.dynpm_can_downclock = false;
228 if (rdev->pm.active_crtc_count > 1) {
229 for (i = 0; i < rdev->pm.num_power_states; i++) {
230 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
232 else if (i >= rdev->pm.current_power_state_index) {
233 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
236 rdev->pm.requested_power_state_index = i;
241 rdev->pm.requested_power_state_index =
242 rdev->pm.current_power_state_index - 1;
244 /* don't use the power state if crtcs are active and no display flag is set */
245 if ((rdev->pm.active_crtc_count > 0) &&
246 (rdev->pm.power_state[rdev->pm.requested_power_state_index].clock_info[0].flags &
247 RADEON_PM_MODE_NO_DISPLAY)) {
248 rdev->pm.requested_power_state_index++;
251 case DYNPM_ACTION_UPCLOCK:
252 if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
253 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
254 rdev->pm.dynpm_can_upclock = false;
256 if (rdev->pm.active_crtc_count > 1) {
257 for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
258 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
260 else if (i <= rdev->pm.current_power_state_index) {
261 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
264 rdev->pm.requested_power_state_index = i;
269 rdev->pm.requested_power_state_index =
270 rdev->pm.current_power_state_index + 1;
273 case DYNPM_ACTION_DEFAULT:
274 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
275 rdev->pm.dynpm_can_upclock = false;
277 case DYNPM_ACTION_NONE:
279 DRM_ERROR("Requested mode for not defined action\n");
282 /* only one clock mode per power state */
283 rdev->pm.requested_clock_mode_index = 0;
285 DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n",
286 rdev->pm.power_state[rdev->pm.requested_power_state_index].
287 clock_info[rdev->pm.requested_clock_mode_index].sclk,
288 rdev->pm.power_state[rdev->pm.requested_power_state_index].
289 clock_info[rdev->pm.requested_clock_mode_index].mclk,
290 rdev->pm.power_state[rdev->pm.requested_power_state_index].
295 * r100_pm_init_profile - Initialize power profiles callback.
297 * @rdev: radeon_device pointer
299 * Initialize the power states used in profile mode
301 * Used for profile mode only.
303 void r100_pm_init_profile(struct radeon_device *rdev)
306 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
307 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
308 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
309 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
311 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
312 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
313 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
314 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
316 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
317 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0;
318 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
319 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
321 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
322 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
323 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
324 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
326 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
327 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
328 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
329 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
331 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
332 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
333 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
334 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
336 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
337 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
338 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
339 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
343 * r100_pm_misc - set additional pm hw parameters callback.
345 * @rdev: radeon_device pointer
347 * Set non-clock parameters associated with a power state
348 * (voltage, pcie lanes, etc.) (r1xx-r4xx).
350 void r100_pm_misc(struct radeon_device *rdev)
352 int requested_index = rdev->pm.requested_power_state_index;
353 struct radeon_power_state *ps = &rdev->pm.power_state[requested_index];
354 struct radeon_voltage *voltage = &ps->clock_info[0].voltage;
355 u32 tmp, sclk_cntl, sclk_cntl2, sclk_more_cntl;
357 if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) {
358 if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
359 tmp = RREG32(voltage->gpio.reg);
360 if (voltage->active_high)
361 tmp |= voltage->gpio.mask;
363 tmp &= ~(voltage->gpio.mask);
364 WREG32(voltage->gpio.reg, tmp);
366 DRM_UDELAY(voltage->delay);
368 tmp = RREG32(voltage->gpio.reg);
369 if (voltage->active_high)
370 tmp &= ~voltage->gpio.mask;
372 tmp |= voltage->gpio.mask;
373 WREG32(voltage->gpio.reg, tmp);
375 DRM_UDELAY(voltage->delay);
379 sclk_cntl = RREG32_PLL(SCLK_CNTL);
380 sclk_cntl2 = RREG32_PLL(SCLK_CNTL2);
381 sclk_cntl2 &= ~REDUCED_SPEED_SCLK_SEL(3);
382 sclk_more_cntl = RREG32_PLL(SCLK_MORE_CNTL);
383 sclk_more_cntl &= ~VOLTAGE_DELAY_SEL(3);
384 if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) {
385 sclk_more_cntl |= REDUCED_SPEED_SCLK_EN;
386 if (ps->misc & ATOM_PM_MISCINFO_DYN_CLK_3D_IDLE)
387 sclk_cntl2 |= REDUCED_SPEED_SCLK_MODE;
389 sclk_cntl2 &= ~REDUCED_SPEED_SCLK_MODE;
390 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2)
391 sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(0);
392 else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4)
393 sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(2);
395 sclk_more_cntl &= ~REDUCED_SPEED_SCLK_EN;
397 if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) {
398 sclk_more_cntl |= IO_CG_VOLTAGE_DROP;
399 if (voltage->delay) {
400 sclk_more_cntl |= VOLTAGE_DROP_SYNC;
401 switch (voltage->delay) {
403 sclk_more_cntl |= VOLTAGE_DELAY_SEL(0);
406 sclk_more_cntl |= VOLTAGE_DELAY_SEL(1);
409 sclk_more_cntl |= VOLTAGE_DELAY_SEL(2);
412 sclk_more_cntl |= VOLTAGE_DELAY_SEL(3);
416 sclk_more_cntl &= ~VOLTAGE_DROP_SYNC;
418 sclk_more_cntl &= ~IO_CG_VOLTAGE_DROP;
420 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN)
421 sclk_cntl &= ~FORCE_HDP;
423 sclk_cntl |= FORCE_HDP;
425 WREG32_PLL(SCLK_CNTL, sclk_cntl);
426 WREG32_PLL(SCLK_CNTL2, sclk_cntl2);
427 WREG32_PLL(SCLK_MORE_CNTL, sclk_more_cntl);
430 if ((rdev->flags & RADEON_IS_PCIE) &&
431 !(rdev->flags & RADEON_IS_IGP) &&
432 rdev->asic->pm.set_pcie_lanes &&
434 rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) {
435 radeon_set_pcie_lanes(rdev,
437 DRM_DEBUG_DRIVER("Setting: p: %d\n", ps->pcie_lanes);
442 * r100_pm_prepare - pre-power state change callback.
444 * @rdev: radeon_device pointer
446 * Prepare for a power state change (r1xx-r4xx).
448 void r100_pm_prepare(struct radeon_device *rdev)
450 struct drm_device *ddev = rdev->ddev;
451 struct drm_crtc *crtc;
452 struct radeon_crtc *radeon_crtc;
455 /* disable any active CRTCs */
456 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
457 radeon_crtc = to_radeon_crtc(crtc);
458 if (radeon_crtc->enabled) {
459 if (radeon_crtc->crtc_id) {
460 tmp = RREG32(RADEON_CRTC2_GEN_CNTL);
461 tmp |= RADEON_CRTC2_DISP_REQ_EN_B;
462 WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
464 tmp = RREG32(RADEON_CRTC_GEN_CNTL);
465 tmp |= RADEON_CRTC_DISP_REQ_EN_B;
466 WREG32(RADEON_CRTC_GEN_CNTL, tmp);
473 * r100_pm_finish - post-power state change callback.
475 * @rdev: radeon_device pointer
477 * Clean up after a power state change (r1xx-r4xx).
479 void r100_pm_finish(struct radeon_device *rdev)
481 struct drm_device *ddev = rdev->ddev;
482 struct drm_crtc *crtc;
483 struct radeon_crtc *radeon_crtc;
486 /* enable any active CRTCs */
487 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
488 radeon_crtc = to_radeon_crtc(crtc);
489 if (radeon_crtc->enabled) {
490 if (radeon_crtc->crtc_id) {
491 tmp = RREG32(RADEON_CRTC2_GEN_CNTL);
492 tmp &= ~RADEON_CRTC2_DISP_REQ_EN_B;
493 WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
495 tmp = RREG32(RADEON_CRTC_GEN_CNTL);
496 tmp &= ~RADEON_CRTC_DISP_REQ_EN_B;
497 WREG32(RADEON_CRTC_GEN_CNTL, tmp);
504 * r100_gui_idle - gui idle callback.
506 * @rdev: radeon_device pointer
508 * Check of the GUI (2D/3D engines) are idle (r1xx-r5xx).
509 * Returns true if idle, false if not.
511 bool r100_gui_idle(struct radeon_device *rdev)
513 if (RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_ACTIVE)
519 /* hpd for digital panel detect/disconnect */
521 * r100_hpd_sense - hpd sense callback.
523 * @rdev: radeon_device pointer
524 * @hpd: hpd (hotplug detect) pin
526 * Checks if a digital monitor is connected (r1xx-r4xx).
527 * Returns true if connected, false if not connected.
529 bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
531 bool connected = false;
535 if (RREG32(RADEON_FP_GEN_CNTL) & RADEON_FP_DETECT_SENSE)
539 if (RREG32(RADEON_FP2_GEN_CNTL) & RADEON_FP2_DETECT_SENSE)
549 * r100_hpd_set_polarity - hpd set polarity callback.
551 * @rdev: radeon_device pointer
552 * @hpd: hpd (hotplug detect) pin
554 * Set the polarity of the hpd pin (r1xx-r4xx).
556 void r100_hpd_set_polarity(struct radeon_device *rdev,
557 enum radeon_hpd_id hpd)
560 bool connected = r100_hpd_sense(rdev, hpd);
564 tmp = RREG32(RADEON_FP_GEN_CNTL);
566 tmp &= ~RADEON_FP_DETECT_INT_POL;
568 tmp |= RADEON_FP_DETECT_INT_POL;
569 WREG32(RADEON_FP_GEN_CNTL, tmp);
572 tmp = RREG32(RADEON_FP2_GEN_CNTL);
574 tmp &= ~RADEON_FP2_DETECT_INT_POL;
576 tmp |= RADEON_FP2_DETECT_INT_POL;
577 WREG32(RADEON_FP2_GEN_CNTL, tmp);
585 * r100_hpd_init - hpd setup callback.
587 * @rdev: radeon_device pointer
589 * Setup the hpd pins used by the card (r1xx-r4xx).
590 * Set the polarity, and enable the hpd interrupts.
592 void r100_hpd_init(struct radeon_device *rdev)
594 struct drm_device *dev = rdev->ddev;
595 struct drm_connector *connector;
598 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
599 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
600 enable |= 1 << radeon_connector->hpd.hpd;
601 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
603 radeon_irq_kms_enable_hpd(rdev, enable);
607 * r100_hpd_fini - hpd tear down callback.
609 * @rdev: radeon_device pointer
611 * Tear down the hpd pins used by the card (r1xx-r4xx).
612 * Disable the hpd interrupts.
614 void r100_hpd_fini(struct radeon_device *rdev)
616 struct drm_device *dev = rdev->ddev;
617 struct drm_connector *connector;
618 unsigned disable = 0;
620 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
621 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
622 disable |= 1 << radeon_connector->hpd.hpd;
624 radeon_irq_kms_disable_hpd(rdev, disable);
630 void r100_pci_gart_tlb_flush(struct radeon_device *rdev)
632 /* TODO: can we do somethings here ? */
633 /* It seems hw only cache one entry so we should discard this
634 * entry otherwise if first GPU GART read hit this entry it
635 * could end up in wrong address. */
638 int r100_pci_gart_init(struct radeon_device *rdev)
642 if (rdev->gart.ptr) {
643 DRM_ERROR("R100 PCI GART already initialized\n");
646 /* Initialize common gart structure */
647 r = radeon_gart_init(rdev);
650 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
651 rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
652 rdev->asic->gart.set_page = &r100_pci_gart_set_page;
653 return radeon_gart_table_ram_alloc(rdev);
656 int r100_pci_gart_enable(struct radeon_device *rdev)
660 radeon_gart_restore(rdev);
661 /* discard memory request outside of configured range */
662 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
663 WREG32(RADEON_AIC_CNTL, tmp);
664 /* set address range for PCI address translate */
665 WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_start);
666 WREG32(RADEON_AIC_HI_ADDR, rdev->mc.gtt_end);
667 /* set PCI GART page-table base address */
668 WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr);
669 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN;
670 WREG32(RADEON_AIC_CNTL, tmp);
671 r100_pci_gart_tlb_flush(rdev);
672 DRM_INFO("PCI GART of %uM enabled (table at 0x%016llX).\n",
673 (unsigned)(rdev->mc.gtt_size >> 20),
674 (unsigned long long)rdev->gart.table_addr);
675 rdev->gart.ready = true;
679 void r100_pci_gart_disable(struct radeon_device *rdev)
683 /* discard memory request outside of configured range */
684 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
685 WREG32(RADEON_AIC_CNTL, tmp & ~RADEON_PCIGART_TRANSLATE_EN);
686 WREG32(RADEON_AIC_LO_ADDR, 0);
687 WREG32(RADEON_AIC_HI_ADDR, 0);
690 int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
692 u32 *gtt = rdev->gart.ptr;
694 if (i < 0 || i > rdev->gart.num_gpu_pages) {
697 gtt[i] = cpu_to_le32(lower_32_bits(addr));
701 void r100_pci_gart_fini(struct radeon_device *rdev)
703 radeon_gart_fini(rdev);
704 r100_pci_gart_disable(rdev);
705 radeon_gart_table_ram_free(rdev);
708 int r100_irq_set(struct radeon_device *rdev)
712 if (!rdev->irq.installed) {
713 DRM_ERROR("Can't enable IRQ/MSI because no handler is installed\n");
714 WREG32(R_000040_GEN_INT_CNTL, 0);
717 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
718 tmp |= RADEON_SW_INT_ENABLE;
720 if (rdev->irq.crtc_vblank_int[0] ||
721 atomic_read(&rdev->irq.pflip[0])) {
722 tmp |= RADEON_CRTC_VBLANK_MASK;
724 if (rdev->irq.crtc_vblank_int[1] ||
725 atomic_read(&rdev->irq.pflip[1])) {
726 tmp |= RADEON_CRTC2_VBLANK_MASK;
728 if (rdev->irq.hpd[0]) {
729 tmp |= RADEON_FP_DETECT_MASK;
731 if (rdev->irq.hpd[1]) {
732 tmp |= RADEON_FP2_DETECT_MASK;
734 WREG32(RADEON_GEN_INT_CNTL, tmp);
738 void r100_irq_disable(struct radeon_device *rdev)
742 WREG32(R_000040_GEN_INT_CNTL, 0);
743 /* Wait and acknowledge irq */
745 tmp = RREG32(R_000044_GEN_INT_STATUS);
746 WREG32(R_000044_GEN_INT_STATUS, tmp);
749 static uint32_t r100_irq_ack(struct radeon_device *rdev)
751 uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS);
752 uint32_t irq_mask = RADEON_SW_INT_TEST |
753 RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT |
754 RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT;
757 WREG32(RADEON_GEN_INT_STATUS, irqs);
759 return irqs & irq_mask;
762 irqreturn_t r100_irq_process(struct radeon_device *rdev)
764 uint32_t status, msi_rearm;
765 bool queue_hotplug = false;
767 status = r100_irq_ack(rdev);
771 if (rdev->shutdown) {
776 if (status & RADEON_SW_INT_TEST) {
777 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
779 /* Vertical blank interrupts */
780 if (status & RADEON_CRTC_VBLANK_STAT) {
781 if (rdev->irq.crtc_vblank_int[0]) {
782 drm_handle_vblank(rdev->ddev, 0);
783 rdev->pm.vblank_sync = true;
784 DRM_WAKEUP(&rdev->irq.vblank_queue);
786 if (atomic_read(&rdev->irq.pflip[0]))
787 radeon_crtc_handle_flip(rdev, 0);
789 if (status & RADEON_CRTC2_VBLANK_STAT) {
790 if (rdev->irq.crtc_vblank_int[1]) {
791 drm_handle_vblank(rdev->ddev, 1);
792 rdev->pm.vblank_sync = true;
793 DRM_WAKEUP(&rdev->irq.vblank_queue);
795 if (atomic_read(&rdev->irq.pflip[1]))
796 radeon_crtc_handle_flip(rdev, 1);
798 if (status & RADEON_FP_DETECT_STAT) {
799 queue_hotplug = true;
802 if (status & RADEON_FP2_DETECT_STAT) {
803 queue_hotplug = true;
806 status = r100_irq_ack(rdev);
809 taskqueue_enqueue(rdev->tq, &rdev->hotplug_work);
810 if (rdev->msi_enabled) {
811 switch (rdev->family) {
814 msi_rearm = RREG32(RADEON_AIC_CNTL) & ~RS400_MSI_REARM;
815 WREG32(RADEON_AIC_CNTL, msi_rearm);
816 WREG32(RADEON_AIC_CNTL, msi_rearm | RS400_MSI_REARM);
819 WREG32(RADEON_MSI_REARM_EN, RV370_MSI_REARM_EN);
826 u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
829 return RREG32(RADEON_CRTC_CRNT_FRAME);
831 return RREG32(RADEON_CRTC2_CRNT_FRAME);
834 /* Who ever call radeon_fence_emit should call ring_lock and ask
835 * for enough space (today caller are ib schedule and buffer move) */
836 void r100_fence_ring_emit(struct radeon_device *rdev,
837 struct radeon_fence *fence)
839 struct radeon_ring *ring = &rdev->ring[fence->ring];
841 /* We have to make sure that caches are flushed before
842 * CPU might read something from VRAM. */
843 radeon_ring_write(ring, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));
844 radeon_ring_write(ring, RADEON_RB3D_DC_FLUSH_ALL);
845 radeon_ring_write(ring, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
846 radeon_ring_write(ring, RADEON_RB3D_ZC_FLUSH_ALL);
847 /* Wait until IDLE & CLEAN */
848 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
849 radeon_ring_write(ring, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN);
850 radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
851 radeon_ring_write(ring, rdev->config.r100.hdp_cntl |
852 RADEON_HDP_READ_BUFFER_INVALIDATE);
853 radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
854 radeon_ring_write(ring, rdev->config.r100.hdp_cntl);
855 /* Emit fence sequence & fire IRQ */
856 radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0));
857 radeon_ring_write(ring, fence->seq);
858 radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0));
859 radeon_ring_write(ring, RADEON_SW_INT_FIRE);
862 void r100_semaphore_ring_emit(struct radeon_device *rdev,
863 struct radeon_ring *ring,
864 struct radeon_semaphore *semaphore,
867 /* Unused on older asics, since we don't have semaphores or multiple rings */
868 panic("%s: Unused on older asics", __func__);
871 int r100_copy_blit(struct radeon_device *rdev,
874 unsigned num_gpu_pages,
875 struct radeon_fence **fence)
877 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
879 uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE;
881 uint32_t stride_pixels;
886 /* radeon limited to 16k stride */
887 stride_bytes &= 0x3fff;
888 /* radeon pitch is /64 */
889 pitch = stride_bytes / 64;
890 stride_pixels = stride_bytes / 4;
891 num_loops = DIV_ROUND_UP(num_gpu_pages, 8191);
893 /* Ask for enough room for blit + flush + fence */
894 ndw = 64 + (10 * num_loops);
895 r = radeon_ring_lock(rdev, ring, ndw);
897 DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw);
900 while (num_gpu_pages > 0) {
901 cur_pages = num_gpu_pages;
902 if (cur_pages > 8191) {
905 num_gpu_pages -= cur_pages;
907 /* pages are in Y direction - height
908 page width in X direction - width */
909 radeon_ring_write(ring, PACKET3(PACKET3_BITBLT_MULTI, 8));
910 radeon_ring_write(ring,
911 RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
912 RADEON_GMC_DST_PITCH_OFFSET_CNTL |
913 RADEON_GMC_SRC_CLIPPING |
914 RADEON_GMC_DST_CLIPPING |
915 RADEON_GMC_BRUSH_NONE |
916 (RADEON_COLOR_FORMAT_ARGB8888 << 8) |
917 RADEON_GMC_SRC_DATATYPE_COLOR |
919 RADEON_DP_SRC_SOURCE_MEMORY |
920 RADEON_GMC_CLR_CMP_CNTL_DIS |
921 RADEON_GMC_WR_MSK_DIS);
922 radeon_ring_write(ring, (pitch << 22) | (src_offset >> 10));
923 radeon_ring_write(ring, (pitch << 22) | (dst_offset >> 10));
924 radeon_ring_write(ring, (0x1fff) | (0x1fff << 16));
925 radeon_ring_write(ring, 0);
926 radeon_ring_write(ring, (0x1fff) | (0x1fff << 16));
927 radeon_ring_write(ring, num_gpu_pages);
928 radeon_ring_write(ring, num_gpu_pages);
929 radeon_ring_write(ring, cur_pages | (stride_pixels << 16));
931 radeon_ring_write(ring, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0));
932 radeon_ring_write(ring, RADEON_RB2D_DC_FLUSH_ALL);
933 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
934 radeon_ring_write(ring,
935 RADEON_WAIT_2D_IDLECLEAN |
936 RADEON_WAIT_HOST_IDLECLEAN |
937 RADEON_WAIT_DMA_GUI_IDLE);
939 r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
941 radeon_ring_unlock_commit(rdev, ring);
945 static int r100_cp_wait_for_idle(struct radeon_device *rdev)
950 for (i = 0; i < rdev->usec_timeout; i++) {
951 tmp = RREG32(R_000E40_RBBM_STATUS);
952 if (!G_000E40_CP_CMDSTRM_BUSY(tmp)) {
960 void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
964 r = radeon_ring_lock(rdev, ring, 2);
968 radeon_ring_write(ring, PACKET0(RADEON_ISYNC_CNTL, 0));
969 radeon_ring_write(ring,
970 RADEON_ISYNC_ANY2D_IDLE3D |
971 RADEON_ISYNC_ANY3D_IDLE2D |
972 RADEON_ISYNC_WAIT_IDLEGUI |
973 RADEON_ISYNC_CPSCRATCH_IDLEGUI);
974 radeon_ring_unlock_commit(rdev, ring);
978 /* Load the microcode for the CP */
979 static int r100_cp_init_microcode(struct radeon_device *rdev)
981 const char *fw_name = NULL;
986 if ((rdev->family == CHIP_R100) || (rdev->family == CHIP_RV100) ||
987 (rdev->family == CHIP_RV200) || (rdev->family == CHIP_RS100) ||
988 (rdev->family == CHIP_RS200)) {
989 DRM_INFO("Loading R100 Microcode\n");
990 fw_name = FIRMWARE_R100;
991 } else if ((rdev->family == CHIP_R200) ||
992 (rdev->family == CHIP_RV250) ||
993 (rdev->family == CHIP_RV280) ||
994 (rdev->family == CHIP_RS300)) {
995 DRM_INFO("Loading R200 Microcode\n");
996 fw_name = FIRMWARE_R200;
997 } else if ((rdev->family == CHIP_R300) ||
998 (rdev->family == CHIP_R350) ||
999 (rdev->family == CHIP_RV350) ||
1000 (rdev->family == CHIP_RV380) ||
1001 (rdev->family == CHIP_RS400) ||
1002 (rdev->family == CHIP_RS480)) {
1003 DRM_INFO("Loading R300 Microcode\n");
1004 fw_name = FIRMWARE_R300;
1005 } else if ((rdev->family == CHIP_R420) ||
1006 (rdev->family == CHIP_R423) ||
1007 (rdev->family == CHIP_RV410)) {
1008 DRM_INFO("Loading R400 Microcode\n");
1009 fw_name = FIRMWARE_R420;
1010 } else if ((rdev->family == CHIP_RS690) ||
1011 (rdev->family == CHIP_RS740)) {
1012 DRM_INFO("Loading RS690/RS740 Microcode\n");
1013 fw_name = FIRMWARE_RS690;
1014 } else if (rdev->family == CHIP_RS600) {
1015 DRM_INFO("Loading RS600 Microcode\n");
1016 fw_name = FIRMWARE_RS600;
1017 } else if ((rdev->family == CHIP_RV515) ||
1018 (rdev->family == CHIP_R520) ||
1019 (rdev->family == CHIP_RV530) ||
1020 (rdev->family == CHIP_R580) ||
1021 (rdev->family == CHIP_RV560) ||
1022 (rdev->family == CHIP_RV570)) {
1023 DRM_INFO("Loading R500 Microcode\n");
1024 fw_name = FIRMWARE_R520;
1028 rdev->me_fw = firmware_get(fw_name);
1029 if (rdev->me_fw == NULL) {
1030 DRM_ERROR("radeon_cp: Failed to load firmware \"%s\"\n",
1033 } else if (rdev->me_fw->datasize % 8) {
1035 "radeon_cp: Bogus length %zu in firmware \"%s\"\n",
1036 rdev->me_fw->datasize, fw_name);
1038 firmware_put(rdev->me_fw, FIRMWARE_UNLOAD);
1045 * r100_cp_fini_microcode - drop the firmware image reference
1047 * @rdev: radeon_device pointer
1049 * Drop the me firmware image reference.
1050 * Called at driver shutdown.
1052 static void r100_cp_fini_microcode (struct radeon_device *rdev)
1055 if (rdev->me_fw != NULL) {
1056 firmware_put(rdev->me_fw, FIRMWARE_UNLOAD);
1061 static void r100_cp_load_microcode(struct radeon_device *rdev)
1063 const __be32 *fw_data;
1066 if (r100_gui_wait_for_idle(rdev)) {
1067 DRM_ERROR("Failed to wait GUI idle while "
1068 "programming pipes. Bad things might happen.\n");
1072 size = rdev->me_fw->datasize / 4;
1073 fw_data = (const __be32 *)rdev->me_fw->data;
1074 WREG32(RADEON_CP_ME_RAM_ADDR, 0);
1075 for (i = 0; i < size; i += 2) {
1076 WREG32(RADEON_CP_ME_RAM_DATAH,
1077 be32_to_cpup(&fw_data[i]));
1078 WREG32(RADEON_CP_ME_RAM_DATAL,
1079 be32_to_cpup(&fw_data[i + 1]));
1084 int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
1086 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1090 unsigned pre_write_timer;
1091 unsigned pre_write_limit;
1092 unsigned indirect2_start;
1093 unsigned indirect1_start;
1097 if (r100_debugfs_cp_init(rdev)) {
1098 DRM_ERROR("Failed to register debugfs file for CP !\n");
1101 r = r100_cp_init_microcode(rdev);
1103 DRM_ERROR("Failed to load firmware!\n");
1108 /* Align ring size */
1109 rb_bufsz = drm_order(ring_size / 8);
1110 ring_size = (1 << (rb_bufsz + 1)) * 4;
1111 r100_cp_load_microcode(rdev);
1112 r = radeon_ring_init(rdev, ring, ring_size, RADEON_WB_CP_RPTR_OFFSET,
1113 RADEON_CP_RB_RPTR, RADEON_CP_RB_WPTR,
1114 0, 0x7fffff, RADEON_CP_PACKET2);
1118 /* Each time the cp read 1024 bytes (16 dword/quadword) update
1119 * the rptr copy in system ram */
1121 /* cp will read 128bytes at a time (4 dwords) */
1123 ring->align_mask = 16 - 1;
1124 /* Write to CP_RB_WPTR will be delayed for pre_write_timer clocks */
1125 pre_write_timer = 64;
1126 /* Force CP_RB_WPTR write if written more than one time before the
1129 pre_write_limit = 0;
1130 /* Setup the cp cache like this (cache size is 96 dwords) :
1132 * INDIRECT1 16 to 79
1133 * INDIRECT2 80 to 95
1134 * So ring cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords))
1135 * indirect1 cache size is 64dwords (> (2 * max_fetch = 2 * 4dwords))
1136 * indirect2 cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords))
1137 * Idea being that most of the gpu cmd will be through indirect1 buffer
1138 * so it gets the bigger cache.
1140 indirect2_start = 80;
1141 indirect1_start = 16;
1143 WREG32(0x718, pre_write_timer | (pre_write_limit << 28));
1144 tmp = (REG_SET(RADEON_RB_BUFSZ, rb_bufsz) |
1145 REG_SET(RADEON_RB_BLKSZ, rb_blksz) |
1146 REG_SET(RADEON_MAX_FETCH, max_fetch));
1148 tmp |= RADEON_BUF_SWAP_32BIT;
1150 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_NO_UPDATE);
1152 /* Set ring address */
1153 DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)ring->gpu_addr);
1154 WREG32(RADEON_CP_RB_BASE, ring->gpu_addr);
1155 /* Force read & write ptr to 0 */
1156 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE);
1157 WREG32(RADEON_CP_RB_RPTR_WR, 0);
1159 WREG32(RADEON_CP_RB_WPTR, ring->wptr);
1161 /* set the wb address whether it's enabled or not */
1162 WREG32(R_00070C_CP_RB_RPTR_ADDR,
1163 S_00070C_RB_RPTR_ADDR((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) >> 2));
1164 WREG32(R_000774_SCRATCH_ADDR, rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET);
1166 if (rdev->wb.enabled)
1167 WREG32(R_000770_SCRATCH_UMSK, 0xff);
1169 tmp |= RADEON_RB_NO_UPDATE;
1170 WREG32(R_000770_SCRATCH_UMSK, 0);
1173 WREG32(RADEON_CP_RB_CNTL, tmp);
1175 ring->rptr = RREG32(RADEON_CP_RB_RPTR);
1176 /* Set cp mode to bus mastering & enable cp*/
1177 WREG32(RADEON_CP_CSQ_MODE,
1178 REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
1179 REG_SET(RADEON_INDIRECT1_START, indirect1_start));
1180 WREG32(RADEON_CP_RB_WPTR_DELAY, 0);
1181 WREG32(RADEON_CP_CSQ_MODE, 0x00004D4D);
1182 WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM);
1184 /* at this point everything should be setup correctly to enable master */
1185 pci_enable_busmaster(rdev->dev);
1187 radeon_ring_start(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
1188 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
1190 DRM_ERROR("radeon: cp isn't working (%d).\n", r);
1194 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
1196 if (!ring->rptr_save_reg /* not resuming from suspend */
1197 && radeon_ring_supports_scratch_reg(rdev, ring)) {
1198 r = radeon_scratch_get(rdev, &ring->rptr_save_reg);
1200 DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r);
1201 ring->rptr_save_reg = 0;
1207 void r100_cp_fini(struct radeon_device *rdev)
1209 if (r100_cp_wait_for_idle(rdev)) {
1210 DRM_ERROR("Wait for CP idle timeout, shutting down CP.\n");
1213 r100_cp_disable(rdev);
1214 radeon_scratch_free(rdev, rdev->ring[RADEON_RING_TYPE_GFX_INDEX].rptr_save_reg);
1215 radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
1216 DRM_INFO("radeon: cp finalized\n");
1219 void r100_cp_disable(struct radeon_device *rdev)
1222 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
1223 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
1224 WREG32(RADEON_CP_CSQ_MODE, 0);
1225 WREG32(RADEON_CP_CSQ_CNTL, 0);
1226 WREG32(R_000770_SCRATCH_UMSK, 0);
1227 if (r100_gui_wait_for_idle(rdev)) {
1228 DRM_ERROR("Failed to wait GUI idle while "
1229 "programming pipes. Bad things might happen.\n");
1236 int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
1237 struct radeon_cs_packet *pkt,
1244 struct radeon_cs_reloc *reloc;
1247 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1249 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1251 radeon_cs_dump_packet(p, pkt);
1255 value = radeon_get_ib_value(p, idx);
1256 tmp = value & 0x003fffff;
1257 tmp += (((u32)reloc->lobj.gpu_offset) >> 10);
1259 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1260 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
1261 tile_flags |= RADEON_DST_TILE_MACRO;
1262 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
1263 if (reg == RADEON_SRC_PITCH_OFFSET) {
1264 DRM_ERROR("Cannot src blit from microtiled surface\n");
1265 radeon_cs_dump_packet(p, pkt);
1268 tile_flags |= RADEON_DST_TILE_MICRO;
1272 p->ib.ptr[idx] = (value & 0x3fc00000) | tmp;
1274 p->ib.ptr[idx] = (value & 0xffc00000) | tmp;
1278 int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
1279 struct radeon_cs_packet *pkt,
1283 struct radeon_cs_reloc *reloc;
1284 struct r100_cs_track *track;
1286 volatile uint32_t *ib;
1290 track = (struct r100_cs_track *)p->track;
1291 c = radeon_get_ib_value(p, idx++) & 0x1F;
1293 DRM_ERROR("Only 16 vertex buffers are allowed %d\n",
1295 radeon_cs_dump_packet(p, pkt);
1298 track->num_arrays = c;
1299 for (i = 0; i < (c - 1); i+=2, idx+=3) {
1300 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1302 DRM_ERROR("No reloc for packet3 %d\n",
1304 radeon_cs_dump_packet(p, pkt);
1307 idx_value = radeon_get_ib_value(p, idx);
1308 ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
1310 track->arrays[i + 0].esize = idx_value >> 8;
1311 track->arrays[i + 0].robj = reloc->robj;
1312 track->arrays[i + 0].esize &= 0x7F;
1313 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1315 DRM_ERROR("No reloc for packet3 %d\n",
1317 radeon_cs_dump_packet(p, pkt);
1320 ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->lobj.gpu_offset);
1321 track->arrays[i + 1].robj = reloc->robj;
1322 track->arrays[i + 1].esize = idx_value >> 24;
1323 track->arrays[i + 1].esize &= 0x7F;
1326 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1328 DRM_ERROR("No reloc for packet3 %d\n",
1330 radeon_cs_dump_packet(p, pkt);
1333 idx_value = radeon_get_ib_value(p, idx);
1334 ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
1335 track->arrays[i + 0].robj = reloc->robj;
1336 track->arrays[i + 0].esize = idx_value >> 8;
1337 track->arrays[i + 0].esize &= 0x7F;
1342 int r100_cs_parse_packet0(struct radeon_cs_parser *p,
1343 struct radeon_cs_packet *pkt,
1344 const unsigned *auth, unsigned n,
1345 radeon_packet0_check_t check)
1354 /* Check that register fall into register range
1355 * determined by the number of entry (n) in the
1356 * safe register bitmap.
1358 if (pkt->one_reg_wr) {
1359 if ((reg >> 7) > n) {
1363 if (((reg + (pkt->count << 2)) >> 7) > n) {
1367 for (i = 0; i <= pkt->count; i++, idx++) {
1369 m = 1 << ((reg >> 2) & 31);
1371 r = check(p, pkt, idx, reg);
1376 if (pkt->one_reg_wr) {
1377 if (!(auth[j] & m)) {
1388 * r100_cs_packet_next_vline() - parse userspace VLINE packet
1389 * @parser: parser structure holding parsing context.
1391 * Userspace sends a special sequence for VLINE waits.
1392 * PACKET0 - VLINE_START_END + value
1393 * PACKET0 - WAIT_UNTIL +_value
1394 * RELOC (P3) - crtc_id in reloc.
1396 * This function parses this and relocates the VLINE START END
1397 * and WAIT UNTIL packets to the correct crtc.
1398 * It also detects a switched off crtc and nulls out the
1399 * wait in that case.
1401 int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
1403 struct drm_mode_object *obj;
1404 struct drm_crtc *crtc;
1405 struct radeon_crtc *radeon_crtc;
1406 struct radeon_cs_packet p3reloc, waitreloc;
1409 uint32_t header, h_idx, reg;
1410 volatile uint32_t *ib;
1414 /* parse the wait until */
1415 r = radeon_cs_packet_parse(p, &waitreloc, p->idx);
1419 /* check its a wait until and only 1 count */
1420 if (waitreloc.reg != RADEON_WAIT_UNTIL ||
1421 waitreloc.count != 0) {
1422 DRM_ERROR("vline wait had illegal wait until segment\n");
1426 if (radeon_get_ib_value(p, waitreloc.idx + 1) != RADEON_WAIT_CRTC_VLINE) {
1427 DRM_ERROR("vline wait had illegal wait until\n");
1431 /* jump over the NOP */
1432 r = radeon_cs_packet_parse(p, &p3reloc, p->idx + waitreloc.count + 2);
1437 p->idx += waitreloc.count + 2;
1438 p->idx += p3reloc.count + 2;
1440 header = radeon_get_ib_value(p, h_idx);
1441 crtc_id = radeon_get_ib_value(p, h_idx + 5);
1442 reg = R100_CP_PACKET0_GET_REG(header);
1443 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
1445 DRM_ERROR("cannot find crtc %d\n", crtc_id);
1448 crtc = obj_to_crtc(obj);
1449 radeon_crtc = to_radeon_crtc(crtc);
1450 crtc_id = radeon_crtc->crtc_id;
1452 if (!crtc->enabled) {
1453 /* if the CRTC isn't enabled - we need to nop out the wait until */
1454 ib[h_idx + 2] = PACKET2(0);
1455 ib[h_idx + 3] = PACKET2(0);
1456 } else if (crtc_id == 1) {
1458 case AVIVO_D1MODE_VLINE_START_END:
1459 header &= ~R300_CP_PACKET0_REG_MASK;
1460 header |= AVIVO_D2MODE_VLINE_START_END >> 2;
1462 case RADEON_CRTC_GUI_TRIG_VLINE:
1463 header &= ~R300_CP_PACKET0_REG_MASK;
1464 header |= RADEON_CRTC2_GUI_TRIG_VLINE >> 2;
1467 DRM_ERROR("unknown crtc reloc\n");
1471 ib[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1;
1477 static int r100_get_vtx_size(uint32_t vtx_fmt)
1481 /* ordered according to bits in spec */
1482 if (vtx_fmt & RADEON_SE_VTX_FMT_W0)
1484 if (vtx_fmt & RADEON_SE_VTX_FMT_FPCOLOR)
1486 if (vtx_fmt & RADEON_SE_VTX_FMT_FPALPHA)
1488 if (vtx_fmt & RADEON_SE_VTX_FMT_PKCOLOR)
1490 if (vtx_fmt & RADEON_SE_VTX_FMT_FPSPEC)
1492 if (vtx_fmt & RADEON_SE_VTX_FMT_FPFOG)
1494 if (vtx_fmt & RADEON_SE_VTX_FMT_PKSPEC)
1496 if (vtx_fmt & RADEON_SE_VTX_FMT_ST0)
1498 if (vtx_fmt & RADEON_SE_VTX_FMT_ST1)
1500 if (vtx_fmt & RADEON_SE_VTX_FMT_Q1)
1502 if (vtx_fmt & RADEON_SE_VTX_FMT_ST2)
1504 if (vtx_fmt & RADEON_SE_VTX_FMT_Q2)
1506 if (vtx_fmt & RADEON_SE_VTX_FMT_ST3)
1508 if (vtx_fmt & RADEON_SE_VTX_FMT_Q3)
1510 if (vtx_fmt & RADEON_SE_VTX_FMT_Q0)
1513 if (vtx_fmt & (0x7 << 15))
1514 vtx_size += (vtx_fmt >> 15) & 0x7;
1515 if (vtx_fmt & RADEON_SE_VTX_FMT_N0)
1517 if (vtx_fmt & RADEON_SE_VTX_FMT_XY1)
1519 if (vtx_fmt & RADEON_SE_VTX_FMT_Z1)
1521 if (vtx_fmt & RADEON_SE_VTX_FMT_W1)
1523 if (vtx_fmt & RADEON_SE_VTX_FMT_N1)
1525 if (vtx_fmt & RADEON_SE_VTX_FMT_Z)
1530 static int r100_packet0_check(struct radeon_cs_parser *p,
1531 struct radeon_cs_packet *pkt,
1532 unsigned idx, unsigned reg)
1534 struct radeon_cs_reloc *reloc;
1535 struct r100_cs_track *track;
1536 volatile uint32_t *ib;
1544 track = (struct r100_cs_track *)p->track;
1546 idx_value = radeon_get_ib_value(p, idx);
1549 case RADEON_CRTC_GUI_TRIG_VLINE:
1550 r = r100_cs_packet_parse_vline(p);
1552 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1554 radeon_cs_dump_packet(p, pkt);
1558 /* FIXME: only allow PACKET3 blit? easier to check for out of
1560 case RADEON_DST_PITCH_OFFSET:
1561 case RADEON_SRC_PITCH_OFFSET:
1562 r = r100_reloc_pitch_offset(p, pkt, idx, reg);
1566 case RADEON_RB3D_DEPTHOFFSET:
1567 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1569 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1571 radeon_cs_dump_packet(p, pkt);
1574 track->zb.robj = reloc->robj;
1575 track->zb.offset = idx_value;
1576 track->zb_dirty = true;
1577 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1579 case RADEON_RB3D_COLOROFFSET:
1580 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1582 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1584 radeon_cs_dump_packet(p, pkt);
1587 track->cb[0].robj = reloc->robj;
1588 track->cb[0].offset = idx_value;
1589 track->cb_dirty = true;
1590 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1592 case RADEON_PP_TXOFFSET_0:
1593 case RADEON_PP_TXOFFSET_1:
1594 case RADEON_PP_TXOFFSET_2:
1595 i = (reg - RADEON_PP_TXOFFSET_0) / 24;
1596 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1598 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1600 radeon_cs_dump_packet(p, pkt);
1603 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1604 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
1605 tile_flags |= RADEON_TXO_MACRO_TILE;
1606 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
1607 tile_flags |= RADEON_TXO_MICRO_TILE_X2;
1609 tmp = idx_value & ~(0x7 << 2);
1611 ib[idx] = tmp + ((u32)reloc->lobj.gpu_offset);
1613 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1614 track->textures[i].robj = reloc->robj;
1615 track->tex_dirty = true;
1617 case RADEON_PP_CUBIC_OFFSET_T0_0:
1618 case RADEON_PP_CUBIC_OFFSET_T0_1:
1619 case RADEON_PP_CUBIC_OFFSET_T0_2:
1620 case RADEON_PP_CUBIC_OFFSET_T0_3:
1621 case RADEON_PP_CUBIC_OFFSET_T0_4:
1622 i = (reg - RADEON_PP_CUBIC_OFFSET_T0_0) / 4;
1623 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1625 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1627 radeon_cs_dump_packet(p, pkt);
1630 track->textures[0].cube_info[i].offset = idx_value;
1631 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1632 track->textures[0].cube_info[i].robj = reloc->robj;
1633 track->tex_dirty = true;
1635 case RADEON_PP_CUBIC_OFFSET_T1_0:
1636 case RADEON_PP_CUBIC_OFFSET_T1_1:
1637 case RADEON_PP_CUBIC_OFFSET_T1_2:
1638 case RADEON_PP_CUBIC_OFFSET_T1_3:
1639 case RADEON_PP_CUBIC_OFFSET_T1_4:
1640 i = (reg - RADEON_PP_CUBIC_OFFSET_T1_0) / 4;
1641 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1643 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1645 radeon_cs_dump_packet(p, pkt);
1648 track->textures[1].cube_info[i].offset = idx_value;
1649 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1650 track->textures[1].cube_info[i].robj = reloc->robj;
1651 track->tex_dirty = true;
1653 case RADEON_PP_CUBIC_OFFSET_T2_0:
1654 case RADEON_PP_CUBIC_OFFSET_T2_1:
1655 case RADEON_PP_CUBIC_OFFSET_T2_2:
1656 case RADEON_PP_CUBIC_OFFSET_T2_3:
1657 case RADEON_PP_CUBIC_OFFSET_T2_4:
1658 i = (reg - RADEON_PP_CUBIC_OFFSET_T2_0) / 4;
1659 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1661 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1663 radeon_cs_dump_packet(p, pkt);
1666 track->textures[2].cube_info[i].offset = idx_value;
1667 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1668 track->textures[2].cube_info[i].robj = reloc->robj;
1669 track->tex_dirty = true;
1671 case RADEON_RE_WIDTH_HEIGHT:
1672 track->maxy = ((idx_value >> 16) & 0x7FF);
1673 track->cb_dirty = true;
1674 track->zb_dirty = true;
1676 case RADEON_RB3D_COLORPITCH:
1677 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1679 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1681 radeon_cs_dump_packet(p, pkt);
1684 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1685 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
1686 tile_flags |= RADEON_COLOR_TILE_ENABLE;
1687 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
1688 tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
1690 tmp = idx_value & ~(0x7 << 16);
1694 ib[idx] = idx_value;
1696 track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
1697 track->cb_dirty = true;
1699 case RADEON_RB3D_DEPTHPITCH:
1700 track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK;
1701 track->zb_dirty = true;
1703 case RADEON_RB3D_CNTL:
1704 switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
1710 track->cb[0].cpp = 1;
1715 track->cb[0].cpp = 2;
1718 track->cb[0].cpp = 4;
1721 DRM_ERROR("Invalid color buffer format (%d) !\n",
1722 ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f));
1725 track->z_enabled = !!(idx_value & RADEON_Z_ENABLE);
1726 track->cb_dirty = true;
1727 track->zb_dirty = true;
1729 case RADEON_RB3D_ZSTENCILCNTL:
1730 switch (idx_value & 0xf) {
1745 track->zb_dirty = true;
1747 case RADEON_RB3D_ZPASS_ADDR:
1748 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1750 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1752 radeon_cs_dump_packet(p, pkt);
1755 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1757 case RADEON_PP_CNTL:
1759 uint32_t temp = idx_value >> 4;
1760 for (i = 0; i < track->num_texture; i++)
1761 track->textures[i].enabled = !!(temp & (1 << i));
1762 track->tex_dirty = true;
1765 case RADEON_SE_VF_CNTL:
1766 track->vap_vf_cntl = idx_value;
1768 case RADEON_SE_VTX_FMT:
1769 track->vtx_size = r100_get_vtx_size(idx_value);
1771 case RADEON_PP_TEX_SIZE_0:
1772 case RADEON_PP_TEX_SIZE_1:
1773 case RADEON_PP_TEX_SIZE_2:
1774 i = (reg - RADEON_PP_TEX_SIZE_0) / 8;
1775 track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1;
1776 track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
1777 track->tex_dirty = true;
1779 case RADEON_PP_TEX_PITCH_0:
1780 case RADEON_PP_TEX_PITCH_1:
1781 case RADEON_PP_TEX_PITCH_2:
1782 i = (reg - RADEON_PP_TEX_PITCH_0) / 8;
1783 track->textures[i].pitch = idx_value + 32;
1784 track->tex_dirty = true;
1786 case RADEON_PP_TXFILTER_0:
1787 case RADEON_PP_TXFILTER_1:
1788 case RADEON_PP_TXFILTER_2:
1789 i = (reg - RADEON_PP_TXFILTER_0) / 24;
1790 track->textures[i].num_levels = ((idx_value & RADEON_MAX_MIP_LEVEL_MASK)
1791 >> RADEON_MAX_MIP_LEVEL_SHIFT);
1792 tmp = (idx_value >> 23) & 0x7;
1793 if (tmp == 2 || tmp == 6)
1794 track->textures[i].roundup_w = false;
1795 tmp = (idx_value >> 27) & 0x7;
1796 if (tmp == 2 || tmp == 6)
1797 track->textures[i].roundup_h = false;
1798 track->tex_dirty = true;
1800 case RADEON_PP_TXFORMAT_0:
1801 case RADEON_PP_TXFORMAT_1:
1802 case RADEON_PP_TXFORMAT_2:
1803 i = (reg - RADEON_PP_TXFORMAT_0) / 24;
1804 if (idx_value & RADEON_TXFORMAT_NON_POWER2) {
1805 track->textures[i].use_pitch = 1;
1807 track->textures[i].use_pitch = 0;
1808 track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
1809 track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
1811 if (idx_value & RADEON_TXFORMAT_CUBIC_MAP_ENABLE)
1812 track->textures[i].tex_coord_type = 2;
1813 switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) {
1814 case RADEON_TXFORMAT_I8:
1815 case RADEON_TXFORMAT_RGB332:
1816 case RADEON_TXFORMAT_Y8:
1817 track->textures[i].cpp = 1;
1818 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
1820 case RADEON_TXFORMAT_AI88:
1821 case RADEON_TXFORMAT_ARGB1555:
1822 case RADEON_TXFORMAT_RGB565:
1823 case RADEON_TXFORMAT_ARGB4444:
1824 case RADEON_TXFORMAT_VYUY422:
1825 case RADEON_TXFORMAT_YVYU422:
1826 case RADEON_TXFORMAT_SHADOW16:
1827 case RADEON_TXFORMAT_LDUDV655:
1828 case RADEON_TXFORMAT_DUDV88:
1829 track->textures[i].cpp = 2;
1830 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
1832 case RADEON_TXFORMAT_ARGB8888:
1833 case RADEON_TXFORMAT_RGBA8888:
1834 case RADEON_TXFORMAT_SHADOW32:
1835 case RADEON_TXFORMAT_LDUDUV8888:
1836 track->textures[i].cpp = 4;
1837 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
1839 case RADEON_TXFORMAT_DXT1:
1840 track->textures[i].cpp = 1;
1841 track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
1843 case RADEON_TXFORMAT_DXT23:
1844 case RADEON_TXFORMAT_DXT45:
1845 track->textures[i].cpp = 1;
1846 track->textures[i].compress_format = R100_TRACK_COMP_DXT35;
1849 track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
1850 track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
1851 track->tex_dirty = true;
1853 case RADEON_PP_CUBIC_FACES_0:
1854 case RADEON_PP_CUBIC_FACES_1:
1855 case RADEON_PP_CUBIC_FACES_2:
1857 i = (reg - RADEON_PP_CUBIC_FACES_0) / 4;
1858 for (face = 0; face < 4; face++) {
1859 track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
1860 track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf);
1862 track->tex_dirty = true;
1865 DRM_ERROR("Forbidden register 0x%04X in cs at %d\n",
1872 int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
1873 struct radeon_cs_packet *pkt,
1874 struct radeon_bo *robj)
1879 value = radeon_get_ib_value(p, idx + 2);
1880 if ((value + 1) > radeon_bo_size(robj)) {
1881 DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER "
1882 "(need %u have %lu) !\n",
1884 radeon_bo_size(robj));
1890 static int r100_packet3_check(struct radeon_cs_parser *p,
1891 struct radeon_cs_packet *pkt)
1893 struct radeon_cs_reloc *reloc;
1894 struct r100_cs_track *track;
1896 volatile uint32_t *ib;
1901 track = (struct r100_cs_track *)p->track;
1902 switch (pkt->opcode) {
1903 case PACKET3_3D_LOAD_VBPNTR:
1904 r = r100_packet3_load_vbpntr(p, pkt, idx);
1908 case PACKET3_INDX_BUFFER:
1909 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1911 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
1912 radeon_cs_dump_packet(p, pkt);
1915 ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->lobj.gpu_offset);
1916 r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
1922 /* 3D_RNDR_GEN_INDX_PRIM on r100/r200 */
1923 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1925 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
1926 radeon_cs_dump_packet(p, pkt);
1929 ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->lobj.gpu_offset);
1930 track->num_arrays = 1;
1931 track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 2));
1933 track->arrays[0].robj = reloc->robj;
1934 track->arrays[0].esize = track->vtx_size;
1936 track->max_indx = radeon_get_ib_value(p, idx+1);
1938 track->vap_vf_cntl = radeon_get_ib_value(p, idx+3);
1939 track->immd_dwords = pkt->count - 1;
1940 r = r100_cs_track_check(p->rdev, track);
1944 case PACKET3_3D_DRAW_IMMD:
1945 if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) {
1946 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1949 track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 0));
1950 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1951 track->immd_dwords = pkt->count - 1;
1952 r = r100_cs_track_check(p->rdev, track);
1956 /* triggers drawing using in-packet vertex data */
1957 case PACKET3_3D_DRAW_IMMD_2:
1958 if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) {
1959 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1962 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1963 track->immd_dwords = pkt->count;
1964 r = r100_cs_track_check(p->rdev, track);
1968 /* triggers drawing using in-packet vertex data */
1969 case PACKET3_3D_DRAW_VBUF_2:
1970 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1971 r = r100_cs_track_check(p->rdev, track);
1975 /* triggers drawing of vertex buffers setup elsewhere */
1976 case PACKET3_3D_DRAW_INDX_2:
1977 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1978 r = r100_cs_track_check(p->rdev, track);
1982 /* triggers drawing using indices to vertex buffer */
1983 case PACKET3_3D_DRAW_VBUF:
1984 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1985 r = r100_cs_track_check(p->rdev, track);
1989 /* triggers drawing of vertex buffers setup elsewhere */
1990 case PACKET3_3D_DRAW_INDX:
1991 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1992 r = r100_cs_track_check(p->rdev, track);
1996 /* triggers drawing using indices to vertex buffer */
1997 case PACKET3_3D_CLEAR_HIZ:
1998 case PACKET3_3D_CLEAR_ZMASK:
1999 if (p->rdev->hyperz_filp != p->filp)
2005 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
2011 int r100_cs_parse(struct radeon_cs_parser *p)
2013 struct radeon_cs_packet pkt;
2014 struct r100_cs_track *track;
2017 track = kmalloc(sizeof(*track), M_DRM, M_ZERO | M_WAITOK);
2020 r100_cs_track_clear(p->rdev, track);
2023 r = radeon_cs_packet_parse(p, &pkt, p->idx);
2025 drm_free(p->track, M_DRM);
2029 p->idx += pkt.count + 2;
2031 case RADEON_PACKET_TYPE0:
2032 if (p->rdev->family >= CHIP_R200)
2033 r = r100_cs_parse_packet0(p, &pkt,
2034 p->rdev->config.r100.reg_safe_bm,
2035 p->rdev->config.r100.reg_safe_bm_size,
2036 &r200_packet0_check);
2038 r = r100_cs_parse_packet0(p, &pkt,
2039 p->rdev->config.r100.reg_safe_bm,
2040 p->rdev->config.r100.reg_safe_bm_size,
2041 &r100_packet0_check);
2043 case RADEON_PACKET_TYPE2:
2045 case RADEON_PACKET_TYPE3:
2046 r = r100_packet3_check(p, &pkt);
2049 DRM_ERROR("Unknown packet type %d !\n",
2051 drm_free(p->track, M_DRM);
2056 drm_free(p->track, M_DRM);
2060 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
2061 drm_free(p->track, M_DRM);
2066 static void r100_cs_track_texture_print(struct r100_cs_track_texture *t)
2068 DRM_ERROR("pitch %d\n", t->pitch);
2069 DRM_ERROR("use_pitch %d\n", t->use_pitch);
2070 DRM_ERROR("width %d\n", t->width);
2071 DRM_ERROR("width_11 %d\n", t->width_11);
2072 DRM_ERROR("height %d\n", t->height);
2073 DRM_ERROR("height_11 %d\n", t->height_11);
2074 DRM_ERROR("num levels %d\n", t->num_levels);
2075 DRM_ERROR("depth %d\n", t->txdepth);
2076 DRM_ERROR("bpp %d\n", t->cpp);
2077 DRM_ERROR("coordinate type %d\n", t->tex_coord_type);
2078 DRM_ERROR("width round to power of 2 %d\n", t->roundup_w);
2079 DRM_ERROR("height round to power of 2 %d\n", t->roundup_h);
2080 DRM_ERROR("compress format %d\n", t->compress_format);
2083 static int r100_track_compress_size(int compress_format, int w, int h)
2085 int block_width, block_height, block_bytes;
2086 int wblocks, hblocks;
2093 switch (compress_format) {
2094 case R100_TRACK_COMP_DXT1:
2099 case R100_TRACK_COMP_DXT35:
2105 hblocks = (h + block_height - 1) / block_height;
2106 wblocks = (w + block_width - 1) / block_width;
2107 if (wblocks < min_wblocks)
2108 wblocks = min_wblocks;
2109 sz = wblocks * hblocks * block_bytes;
2113 static int r100_cs_track_cube(struct radeon_device *rdev,
2114 struct r100_cs_track *track, unsigned idx)
2116 unsigned face, w, h;
2117 struct radeon_bo *cube_robj;
2119 unsigned compress_format = track->textures[idx].compress_format;
2121 for (face = 0; face < 5; face++) {
2122 cube_robj = track->textures[idx].cube_info[face].robj;
2123 w = track->textures[idx].cube_info[face].width;
2124 h = track->textures[idx].cube_info[face].height;
2126 if (compress_format) {
2127 size = r100_track_compress_size(compress_format, w, h);
2130 size *= track->textures[idx].cpp;
2132 size += track->textures[idx].cube_info[face].offset;
2134 if (size > radeon_bo_size(cube_robj)) {
2135 DRM_ERROR("Cube texture offset greater than object size %lu %lu\n",
2136 size, radeon_bo_size(cube_robj));
2137 r100_cs_track_texture_print(&track->textures[idx]);
2144 static int r100_cs_track_texture_check(struct radeon_device *rdev,
2145 struct r100_cs_track *track)
2147 struct radeon_bo *robj;
2149 unsigned u, i, w, h, d;
2152 for (u = 0; u < track->num_texture; u++) {
2153 if (!track->textures[u].enabled)
2155 if (track->textures[u].lookup_disable)
2157 robj = track->textures[u].robj;
2159 DRM_ERROR("No texture bound to unit %u\n", u);
2163 for (i = 0; i <= track->textures[u].num_levels; i++) {
2164 if (track->textures[u].use_pitch) {
2165 if (rdev->family < CHIP_R300)
2166 w = (track->textures[u].pitch / track->textures[u].cpp) / (1 << i);
2168 w = track->textures[u].pitch / (1 << i);
2170 w = track->textures[u].width;
2171 if (rdev->family >= CHIP_RV515)
2172 w |= track->textures[u].width_11;
2174 if (track->textures[u].roundup_w)
2175 w = roundup_pow_of_two(w);
2177 h = track->textures[u].height;
2178 if (rdev->family >= CHIP_RV515)
2179 h |= track->textures[u].height_11;
2181 if (track->textures[u].roundup_h)
2182 h = roundup_pow_of_two(h);
2183 if (track->textures[u].tex_coord_type == 1) {
2184 d = (1 << track->textures[u].txdepth) / (1 << i);
2190 if (track->textures[u].compress_format) {
2192 size += r100_track_compress_size(track->textures[u].compress_format, w, h) * d;
2193 /* compressed textures are block based */
2197 size *= track->textures[u].cpp;
2199 switch (track->textures[u].tex_coord_type) {
2204 if (track->separate_cube) {
2205 ret = r100_cs_track_cube(rdev, track, u);
2212 DRM_ERROR("Invalid texture coordinate type %u for unit "
2213 "%u\n", track->textures[u].tex_coord_type, u);
2216 if (size > radeon_bo_size(robj)) {
2217 DRM_ERROR("Texture of unit %u needs %lu bytes but is "
2218 "%lu\n", u, size, radeon_bo_size(robj));
2219 r100_cs_track_texture_print(&track->textures[u]);
2226 int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
2232 unsigned num_cb = track->cb_dirty ? track->num_cb : 0;
2234 if (num_cb && !track->zb_cb_clear && !track->color_channel_mask &&
2235 !track->blend_read_enable)
2238 for (i = 0; i < num_cb; i++) {
2239 if (track->cb[i].robj == NULL) {
2240 DRM_ERROR("[drm] No buffer for color buffer %d !\n", i);
2243 size = track->cb[i].pitch * track->cb[i].cpp * track->maxy;
2244 size += track->cb[i].offset;
2245 if (size > radeon_bo_size(track->cb[i].robj)) {
2246 DRM_ERROR("[drm] Buffer too small for color buffer %d "
2247 "(need %lu have %lu) !\n", i, size,
2248 radeon_bo_size(track->cb[i].robj));
2249 DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n",
2250 i, track->cb[i].pitch, track->cb[i].cpp,
2251 track->cb[i].offset, track->maxy);
2255 track->cb_dirty = false;
2257 if (track->zb_dirty && track->z_enabled) {
2258 if (track->zb.robj == NULL) {
2259 DRM_ERROR("[drm] No buffer for z buffer !\n");
2262 size = track->zb.pitch * track->zb.cpp * track->maxy;
2263 size += track->zb.offset;
2264 if (size > radeon_bo_size(track->zb.robj)) {
2265 DRM_ERROR("[drm] Buffer too small for z buffer "
2266 "(need %lu have %lu) !\n", size,
2267 radeon_bo_size(track->zb.robj));
2268 DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n",
2269 track->zb.pitch, track->zb.cpp,
2270 track->zb.offset, track->maxy);
2274 track->zb_dirty = false;
2276 if (track->aa_dirty && track->aaresolve) {
2277 if (track->aa.robj == NULL) {
2278 DRM_ERROR("[drm] No buffer for AA resolve buffer %d !\n", i);
2281 /* I believe the format comes from colorbuffer0. */
2282 size = track->aa.pitch * track->cb[0].cpp * track->maxy;
2283 size += track->aa.offset;
2284 if (size > radeon_bo_size(track->aa.robj)) {
2285 DRM_ERROR("[drm] Buffer too small for AA resolve buffer %d "
2286 "(need %lu have %lu) !\n", i, size,
2287 radeon_bo_size(track->aa.robj));
2288 DRM_ERROR("[drm] AA resolve buffer %d (%u %u %u %u)\n",
2289 i, track->aa.pitch, track->cb[0].cpp,
2290 track->aa.offset, track->maxy);
2294 track->aa_dirty = false;
2296 prim_walk = (track->vap_vf_cntl >> 4) & 0x3;
2297 if (track->vap_vf_cntl & (1 << 14)) {
2298 nverts = track->vap_alt_nverts;
2300 nverts = (track->vap_vf_cntl >> 16) & 0xFFFF;
2302 switch (prim_walk) {
2304 for (i = 0; i < track->num_arrays; i++) {
2305 size = track->arrays[i].esize * track->max_indx * 4;
2306 if (track->arrays[i].robj == NULL) {
2307 DRM_ERROR("(PW %u) Vertex array %u no buffer "
2308 "bound\n", prim_walk, i);
2311 if (size > radeon_bo_size(track->arrays[i].robj)) {
2312 dev_err(rdev->dev, "(PW %u) Vertex array %u "
2313 "need %lu dwords have %lu dwords\n",
2314 prim_walk, i, size >> 2,
2315 radeon_bo_size(track->arrays[i].robj)
2317 DRM_ERROR("Max indices %u\n", track->max_indx);
2323 for (i = 0; i < track->num_arrays; i++) {
2324 size = track->arrays[i].esize * (nverts - 1) * 4;
2325 if (track->arrays[i].robj == NULL) {
2326 DRM_ERROR("(PW %u) Vertex array %u no buffer "
2327 "bound\n", prim_walk, i);
2330 if (size > radeon_bo_size(track->arrays[i].robj)) {
2331 dev_err(rdev->dev, "(PW %u) Vertex array %u "
2332 "need %lu dwords have %lu dwords\n",
2333 prim_walk, i, size >> 2,
2334 radeon_bo_size(track->arrays[i].robj)
2341 size = track->vtx_size * nverts;
2342 if (size != track->immd_dwords) {
2343 DRM_ERROR("IMMD draw %u dwors but needs %lu dwords\n",
2344 track->immd_dwords, size);
2345 DRM_ERROR("VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n",
2346 nverts, track->vtx_size);
2351 DRM_ERROR("[drm] Invalid primitive walk %d for VAP_VF_CNTL\n",
2356 if (track->tex_dirty) {
2357 track->tex_dirty = false;
2358 return r100_cs_track_texture_check(rdev, track);
2363 void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track)
2367 track->cb_dirty = true;
2368 track->zb_dirty = true;
2369 track->tex_dirty = true;
2370 track->aa_dirty = true;
2372 if (rdev->family < CHIP_R300) {
2374 if (rdev->family <= CHIP_RS200)
2375 track->num_texture = 3;
2377 track->num_texture = 6;
2379 track->separate_cube = 1;
2382 track->num_texture = 16;
2384 track->separate_cube = 0;
2385 track->aaresolve = false;
2386 track->aa.robj = NULL;
2389 for (i = 0; i < track->num_cb; i++) {
2390 track->cb[i].robj = NULL;
2391 track->cb[i].pitch = 8192;
2392 track->cb[i].cpp = 16;
2393 track->cb[i].offset = 0;
2395 track->z_enabled = true;
2396 track->zb.robj = NULL;
2397 track->zb.pitch = 8192;
2399 track->zb.offset = 0;
2400 track->vtx_size = 0x7F;
2401 track->immd_dwords = 0xFFFFFFFFUL;
2402 track->num_arrays = 11;
2403 track->max_indx = 0x00FFFFFFUL;
2404 for (i = 0; i < track->num_arrays; i++) {
2405 track->arrays[i].robj = NULL;
2406 track->arrays[i].esize = 0x7F;
2408 for (i = 0; i < track->num_texture; i++) {
2409 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
2410 track->textures[i].pitch = 16536;
2411 track->textures[i].width = 16536;
2412 track->textures[i].height = 16536;
2413 track->textures[i].width_11 = 1 << 11;
2414 track->textures[i].height_11 = 1 << 11;
2415 track->textures[i].num_levels = 12;
2416 if (rdev->family <= CHIP_RS200) {
2417 track->textures[i].tex_coord_type = 0;
2418 track->textures[i].txdepth = 0;
2420 track->textures[i].txdepth = 16;
2421 track->textures[i].tex_coord_type = 1;
2423 track->textures[i].cpp = 64;
2424 track->textures[i].robj = NULL;
2425 /* CS IB emission code makes sure texture unit are disabled */
2426 track->textures[i].enabled = false;
2427 track->textures[i].lookup_disable = false;
2428 track->textures[i].roundup_w = true;
2429 track->textures[i].roundup_h = true;
2430 if (track->separate_cube)
2431 for (face = 0; face < 5; face++) {
2432 track->textures[i].cube_info[face].robj = NULL;
2433 track->textures[i].cube_info[face].width = 16536;
2434 track->textures[i].cube_info[face].height = 16536;
2435 track->textures[i].cube_info[face].offset = 0;
2441 * Global GPU functions
2443 static void r100_errata(struct radeon_device *rdev)
2445 rdev->pll_errata = 0;
2447 if (rdev->family == CHIP_RV200 || rdev->family == CHIP_RS200) {
2448 rdev->pll_errata |= CHIP_ERRATA_PLL_DUMMYREADS;
2451 if (rdev->family == CHIP_RV100 ||
2452 rdev->family == CHIP_RS100 ||
2453 rdev->family == CHIP_RS200) {
2454 rdev->pll_errata |= CHIP_ERRATA_PLL_DELAY;
2458 static int r100_rbbm_fifo_wait_for_entry(struct radeon_device *rdev, unsigned n)
2463 for (i = 0; i < rdev->usec_timeout; i++) {
2464 tmp = RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_FIFOCNT_MASK;
2473 int r100_gui_wait_for_idle(struct radeon_device *rdev)
2478 if (r100_rbbm_fifo_wait_for_entry(rdev, 64)) {
2479 DRM_ERROR("radeon: wait for empty RBBM fifo failed !"
2480 " Bad things might happen.\n");
2482 for (i = 0; i < rdev->usec_timeout; i++) {
2483 tmp = RREG32(RADEON_RBBM_STATUS);
2484 if (!(tmp & RADEON_RBBM_ACTIVE)) {
2492 int r100_mc_wait_for_idle(struct radeon_device *rdev)
2497 for (i = 0; i < rdev->usec_timeout; i++) {
2498 /* read MC_STATUS */
2499 tmp = RREG32(RADEON_MC_STATUS);
2500 if (tmp & RADEON_MC_IDLE) {
2508 bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
2512 rbbm_status = RREG32(R_000E40_RBBM_STATUS);
2513 if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
2514 radeon_ring_lockup_update(ring);
2517 /* force CP activities */
2518 radeon_ring_force_activity(rdev, ring);
2519 return radeon_ring_test_lockup(rdev, ring);
2522 /* required on r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */
2523 void r100_enable_bm(struct radeon_device *rdev)
2526 /* Enable bus mastering */
2527 tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
2528 WREG32(RADEON_BUS_CNTL, tmp);
2531 void r100_bm_disable(struct radeon_device *rdev)
2535 /* disable bus mastering */
2536 tmp = RREG32(R_000030_BUS_CNTL);
2537 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000044);
2539 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000042);
2541 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040);
2542 tmp = RREG32(RADEON_BUS_CNTL);
2544 pci_disable_busmaster(rdev->dev);
2548 int r100_asic_reset(struct radeon_device *rdev)
2550 struct r100_mc_save save;
2554 status = RREG32(R_000E40_RBBM_STATUS);
2555 if (!G_000E40_GUI_ACTIVE(status)) {
2558 r100_mc_stop(rdev, &save);
2559 status = RREG32(R_000E40_RBBM_STATUS);
2560 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
2562 WREG32(RADEON_CP_CSQ_CNTL, 0);
2563 tmp = RREG32(RADEON_CP_RB_CNTL);
2564 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
2565 WREG32(RADEON_CP_RB_RPTR_WR, 0);
2566 WREG32(RADEON_CP_RB_WPTR, 0);
2567 WREG32(RADEON_CP_RB_CNTL, tmp);
2568 /* save PCI state */
2569 pci_save_state(device_get_parent(rdev->dev));
2570 /* disable bus mastering */
2571 r100_bm_disable(rdev);
2572 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_SE(1) |
2573 S_0000F0_SOFT_RESET_RE(1) |
2574 S_0000F0_SOFT_RESET_PP(1) |
2575 S_0000F0_SOFT_RESET_RB(1));
2576 RREG32(R_0000F0_RBBM_SOFT_RESET);
2578 WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
2580 status = RREG32(R_000E40_RBBM_STATUS);
2581 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
2583 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
2584 RREG32(R_0000F0_RBBM_SOFT_RESET);
2586 WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
2588 status = RREG32(R_000E40_RBBM_STATUS);
2589 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
2590 /* restore PCI & busmastering */
2591 pci_restore_state(device_get_parent(rdev->dev));
2592 r100_enable_bm(rdev);
2593 /* Check if GPU is idle */
2594 if (G_000E40_SE_BUSY(status) || G_000E40_RE_BUSY(status) ||
2595 G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) {
2596 dev_err(rdev->dev, "failed to reset GPU\n");
2599 dev_info(rdev->dev, "GPU reset succeed\n");
2600 r100_mc_resume(rdev, &save);
2604 void r100_set_common_regs(struct radeon_device *rdev)
2606 struct drm_device *dev = rdev->ddev;
2607 bool force_dac2 = false;
2610 /* set these so they don't interfere with anything */
2611 WREG32(RADEON_OV0_SCALE_CNTL, 0);
2612 WREG32(RADEON_SUBPIC_CNTL, 0);
2613 WREG32(RADEON_VIPH_CONTROL, 0);
2614 WREG32(RADEON_I2C_CNTL_1, 0);
2615 WREG32(RADEON_DVI_I2C_CNTL_1, 0);
2616 WREG32(RADEON_CAP0_TRIG_CNTL, 0);
2617 WREG32(RADEON_CAP1_TRIG_CNTL, 0);
2619 /* always set up dac2 on rn50 and some rv100 as lots
2620 * of servers seem to wire it up to a VGA port but
2621 * don't report it in the bios connector
2624 switch (dev->pci_device) {
2633 /* DELL triple head servers */
2634 if ((dev->pci_subvendor == 0x1028 /* DELL */) &&
2635 ((dev->pci_subdevice == 0x016c) ||
2636 (dev->pci_subdevice == 0x016d) ||
2637 (dev->pci_subdevice == 0x016e) ||
2638 (dev->pci_subdevice == 0x016f) ||
2639 (dev->pci_subdevice == 0x0170) ||
2640 (dev->pci_subdevice == 0x017d) ||
2641 (dev->pci_subdevice == 0x017e) ||
2642 (dev->pci_subdevice == 0x0183) ||
2643 (dev->pci_subdevice == 0x018a) ||
2644 (dev->pci_subdevice == 0x019a)))
2650 u32 disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG);
2651 u32 tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
2652 u32 dac2_cntl = RREG32(RADEON_DAC_CNTL2);
2654 /* For CRT on DAC2, don't turn it on if BIOS didn't
2655 enable it, even it's detected.
2658 /* force it to crtc0 */
2659 dac2_cntl &= ~RADEON_DAC2_DAC_CLK_SEL;
2660 dac2_cntl |= RADEON_DAC2_DAC2_CLK_SEL;
2661 disp_hw_debug |= RADEON_CRT2_DISP1_SEL;
2663 /* set up the TV DAC */
2664 tv_dac_cntl &= ~(RADEON_TV_DAC_PEDESTAL |
2665 RADEON_TV_DAC_STD_MASK |
2666 RADEON_TV_DAC_RDACPD |
2667 RADEON_TV_DAC_GDACPD |
2668 RADEON_TV_DAC_BDACPD |
2669 RADEON_TV_DAC_BGADJ_MASK |
2670 RADEON_TV_DAC_DACADJ_MASK);
2671 tv_dac_cntl |= (RADEON_TV_DAC_NBLANK |
2672 RADEON_TV_DAC_NHOLD |
2673 RADEON_TV_DAC_STD_PS2 |
2676 WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
2677 WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
2678 WREG32(RADEON_DAC_CNTL2, dac2_cntl);
2681 /* switch PM block to ACPI mode */
2682 tmp = RREG32_PLL(RADEON_PLL_PWRMGT_CNTL);
2683 tmp &= ~RADEON_PM_MODE_SEL;
2684 WREG32_PLL(RADEON_PLL_PWRMGT_CNTL, tmp);
2691 static void r100_vram_get_type(struct radeon_device *rdev)
2695 rdev->mc.vram_is_ddr = false;
2696 if (rdev->flags & RADEON_IS_IGP)
2697 rdev->mc.vram_is_ddr = true;
2698 else if (RREG32(RADEON_MEM_SDRAM_MODE_REG) & RADEON_MEM_CFG_TYPE_DDR)
2699 rdev->mc.vram_is_ddr = true;
2700 if ((rdev->family == CHIP_RV100) ||
2701 (rdev->family == CHIP_RS100) ||
2702 (rdev->family == CHIP_RS200)) {
2703 tmp = RREG32(RADEON_MEM_CNTL);
2704 if (tmp & RV100_HALF_MODE) {
2705 rdev->mc.vram_width = 32;
2707 rdev->mc.vram_width = 64;
2709 if (rdev->flags & RADEON_SINGLE_CRTC) {
2710 rdev->mc.vram_width /= 4;
2711 rdev->mc.vram_is_ddr = true;
2713 } else if (rdev->family <= CHIP_RV280) {
2714 tmp = RREG32(RADEON_MEM_CNTL);
2715 if (tmp & RADEON_MEM_NUM_CHANNELS_MASK) {
2716 rdev->mc.vram_width = 128;
2718 rdev->mc.vram_width = 64;
2722 rdev->mc.vram_width = 128;
2726 static u32 r100_get_accessible_vram(struct radeon_device *rdev)
2731 aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
2733 /* Set HDP_APER_CNTL only on cards that are known not to be broken,
2734 * that is has the 2nd generation multifunction PCI interface
2736 if (rdev->family == CHIP_RV280 ||
2737 rdev->family >= CHIP_RV350) {
2738 WREG32_P(RADEON_HOST_PATH_CNTL, RADEON_HDP_APER_CNTL,
2739 ~RADEON_HDP_APER_CNTL);
2740 DRM_INFO("Generation 2 PCI interface, using max accessible memory\n");
2741 return aper_size * 2;
2744 /* Older cards have all sorts of funny issues to deal with. First
2745 * check if it's a multifunction card by reading the PCI config
2746 * header type... Limit those to one aperture size
2748 byte = pci_read_config(rdev->dev, 0xe, 1);
2750 DRM_INFO("Generation 1 PCI interface in multifunction mode\n");
2751 DRM_INFO("Limiting VRAM to one aperture\n");
2755 /* Single function older card. We read HDP_APER_CNTL to see how the BIOS
2756 * have set it up. We don't write this as it's broken on some ASICs but
2757 * we expect the BIOS to have done the right thing (might be too optimistic...)
2759 if (RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL)
2760 return aper_size * 2;
2764 void r100_vram_init_sizes(struct radeon_device *rdev)
2766 u64 config_aper_size;
2768 /* work out accessible VRAM */
2769 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
2770 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
2771 rdev->mc.visible_vram_size = r100_get_accessible_vram(rdev);
2772 /* FIXME we don't use the second aperture yet when we could use it */
2773 if (rdev->mc.visible_vram_size > rdev->mc.aper_size)
2774 rdev->mc.visible_vram_size = rdev->mc.aper_size;
2775 config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
2776 if (rdev->flags & RADEON_IS_IGP) {
2778 /* read NB_TOM to get the amount of ram stolen for the GPU */
2779 tom = RREG32(RADEON_NB_TOM);
2780 rdev->mc.real_vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16);
2781 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
2782 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
2784 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
2785 /* Some production boards of m6 will report 0
2788 if (rdev->mc.real_vram_size == 0) {
2789 rdev->mc.real_vram_size = 8192 * 1024;
2790 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
2792 /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM -
2793 * Novell bug 204882 + along with lots of ubuntu ones
2795 if (rdev->mc.aper_size > config_aper_size)
2796 config_aper_size = rdev->mc.aper_size;
2798 if (config_aper_size > rdev->mc.real_vram_size)
2799 rdev->mc.mc_vram_size = config_aper_size;
2801 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
2805 void r100_vga_set_state(struct radeon_device *rdev, bool state)
2809 temp = RREG32(RADEON_CONFIG_CNTL);
2810 if (state == false) {
2811 temp &= ~RADEON_CFG_VGA_RAM_EN;
2812 temp |= RADEON_CFG_VGA_IO_DIS;
2814 temp &= ~RADEON_CFG_VGA_IO_DIS;
2816 WREG32(RADEON_CONFIG_CNTL, temp);
2819 static void r100_mc_init(struct radeon_device *rdev)
2823 r100_vram_get_type(rdev);
2824 r100_vram_init_sizes(rdev);
2825 base = rdev->mc.aper_base;
2826 if (rdev->flags & RADEON_IS_IGP)
2827 base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
2828 radeon_vram_location(rdev, &rdev->mc, base);
2829 rdev->mc.gtt_base_align = 0;
2830 if (!(rdev->flags & RADEON_IS_AGP))
2831 radeon_gtt_location(rdev, &rdev->mc);
2832 radeon_update_bandwidth_info(rdev);
2837 * Indirect registers accessor
2839 void r100_pll_errata_after_index(struct radeon_device *rdev)
2841 if (rdev->pll_errata & CHIP_ERRATA_PLL_DUMMYREADS) {
2842 (void)RREG32(RADEON_CLOCK_CNTL_DATA);
2843 (void)RREG32(RADEON_CRTC_GEN_CNTL);
2847 static void r100_pll_errata_after_data(struct radeon_device *rdev)
2849 /* This workarounds is necessary on RV100, RS100 and RS200 chips
2850 * or the chip could hang on a subsequent access
2852 if (rdev->pll_errata & CHIP_ERRATA_PLL_DELAY) {
2856 /* This function is required to workaround a hardware bug in some (all?)
2857 * revisions of the R300. This workaround should be called after every
2858 * CLOCK_CNTL_INDEX register access. If not, register reads afterward
2859 * may not be correct.
2861 if (rdev->pll_errata & CHIP_ERRATA_R300_CG) {
2864 save = RREG32(RADEON_CLOCK_CNTL_INDEX);
2865 tmp = save & ~(0x3f | RADEON_PLL_WR_EN);
2866 WREG32(RADEON_CLOCK_CNTL_INDEX, tmp);
2867 tmp = RREG32(RADEON_CLOCK_CNTL_DATA);
2868 WREG32(RADEON_CLOCK_CNTL_INDEX, save);
2872 uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg)
2876 WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f);
2877 r100_pll_errata_after_index(rdev);
2878 data = RREG32(RADEON_CLOCK_CNTL_DATA);
2879 r100_pll_errata_after_data(rdev);
2883 void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
2885 WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN));
2886 r100_pll_errata_after_index(rdev);
2887 WREG32(RADEON_CLOCK_CNTL_DATA, v);
2888 r100_pll_errata_after_data(rdev);
2891 static void r100_set_safe_registers(struct radeon_device *rdev)
2893 if (ASIC_IS_RN50(rdev)) {
2894 rdev->config.r100.reg_safe_bm = rn50_reg_safe_bm;
2895 rdev->config.r100.reg_safe_bm_size = DRM_ARRAY_SIZE(rn50_reg_safe_bm);
2896 } else if (rdev->family < CHIP_R200) {
2897 rdev->config.r100.reg_safe_bm = r100_reg_safe_bm;
2898 rdev->config.r100.reg_safe_bm_size = DRM_ARRAY_SIZE(r100_reg_safe_bm);
2900 r200_set_safe_registers(rdev);
2907 #if defined(CONFIG_DEBUG_FS)
2908 static int r100_debugfs_rbbm_info(struct seq_file *m, void *data)
2910 struct drm_info_node *node = (struct drm_info_node *) m->private;
2911 struct drm_device *dev = node->minor->dev;
2912 struct radeon_device *rdev = dev->dev_private;
2913 uint32_t reg, value;
2916 seq_printf(m, "RBBM_STATUS 0x%08x\n", RREG32(RADEON_RBBM_STATUS));
2917 seq_printf(m, "RBBM_CMDFIFO_STAT 0x%08x\n", RREG32(0xE7C));
2918 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
2919 for (i = 0; i < 64; i++) {
2920 WREG32(RADEON_RBBM_CMDFIFO_ADDR, i | 0x100);
2921 reg = (RREG32(RADEON_RBBM_CMDFIFO_DATA) - 1) >> 2;
2922 WREG32(RADEON_RBBM_CMDFIFO_ADDR, i);
2923 value = RREG32(RADEON_RBBM_CMDFIFO_DATA);
2924 seq_printf(m, "[0x%03X] 0x%04X=0x%08X\n", i, reg, value);
2929 static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data)
2931 struct drm_info_node *node = (struct drm_info_node *) m->private;
2932 struct drm_device *dev = node->minor->dev;
2933 struct radeon_device *rdev = dev->dev_private;
2934 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2936 unsigned count, i, j;
2938 radeon_ring_free_size(rdev, ring);
2939 rdp = RREG32(RADEON_CP_RB_RPTR);
2940 wdp = RREG32(RADEON_CP_RB_WPTR);
2941 count = (rdp + ring->ring_size - wdp) & ring->ptr_mask;
2942 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
2943 seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp);
2944 seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
2945 seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
2946 seq_printf(m, "%u dwords in ring\n", count);
2947 for (j = 0; j <= count; j++) {
2948 i = (rdp + j) & ring->ptr_mask;
2949 seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
2955 static int r100_debugfs_cp_csq_fifo(struct seq_file *m, void *data)
2957 struct drm_info_node *node = (struct drm_info_node *) m->private;
2958 struct drm_device *dev = node->minor->dev;
2959 struct radeon_device *rdev = dev->dev_private;
2960 uint32_t csq_stat, csq2_stat, tmp;
2961 unsigned r_rptr, r_wptr, ib1_rptr, ib1_wptr, ib2_rptr, ib2_wptr;
2964 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
2965 seq_printf(m, "CP_CSQ_MODE 0x%08x\n", RREG32(RADEON_CP_CSQ_MODE));
2966 csq_stat = RREG32(RADEON_CP_CSQ_STAT);
2967 csq2_stat = RREG32(RADEON_CP_CSQ2_STAT);
2968 r_rptr = (csq_stat >> 0) & 0x3ff;
2969 r_wptr = (csq_stat >> 10) & 0x3ff;
2970 ib1_rptr = (csq_stat >> 20) & 0x3ff;
2971 ib1_wptr = (csq2_stat >> 0) & 0x3ff;
2972 ib2_rptr = (csq2_stat >> 10) & 0x3ff;
2973 ib2_wptr = (csq2_stat >> 20) & 0x3ff;
2974 seq_printf(m, "CP_CSQ_STAT 0x%08x\n", csq_stat);
2975 seq_printf(m, "CP_CSQ2_STAT 0x%08x\n", csq2_stat);
2976 seq_printf(m, "Ring rptr %u\n", r_rptr);
2977 seq_printf(m, "Ring wptr %u\n", r_wptr);
2978 seq_printf(m, "Indirect1 rptr %u\n", ib1_rptr);
2979 seq_printf(m, "Indirect1 wptr %u\n", ib1_wptr);
2980 seq_printf(m, "Indirect2 rptr %u\n", ib2_rptr);
2981 seq_printf(m, "Indirect2 wptr %u\n", ib2_wptr);
2982 /* FIXME: 0, 128, 640 depends on fifo setup see cp_init_kms
2983 * 128 = indirect1_start * 8 & 640 = indirect2_start * 8 */
2984 seq_printf(m, "Ring fifo:\n");
2985 for (i = 0; i < 256; i++) {
2986 WREG32(RADEON_CP_CSQ_ADDR, i << 2);
2987 tmp = RREG32(RADEON_CP_CSQ_DATA);
2988 seq_printf(m, "rfifo[%04d]=0x%08X\n", i, tmp);
2990 seq_printf(m, "Indirect1 fifo:\n");
2991 for (i = 256; i <= 512; i++) {
2992 WREG32(RADEON_CP_CSQ_ADDR, i << 2);
2993 tmp = RREG32(RADEON_CP_CSQ_DATA);
2994 seq_printf(m, "ib1fifo[%04d]=0x%08X\n", i, tmp);
2996 seq_printf(m, "Indirect2 fifo:\n");
2997 for (i = 640; i < ib1_wptr; i++) {
2998 WREG32(RADEON_CP_CSQ_ADDR, i << 2);
2999 tmp = RREG32(RADEON_CP_CSQ_DATA);
3000 seq_printf(m, "ib2fifo[%04d]=0x%08X\n", i, tmp);
3005 static int r100_debugfs_mc_info(struct seq_file *m, void *data)
3007 struct drm_info_node *node = (struct drm_info_node *) m->private;
3008 struct drm_device *dev = node->minor->dev;
3009 struct radeon_device *rdev = dev->dev_private;
3012 tmp = RREG32(RADEON_CONFIG_MEMSIZE);
3013 seq_printf(m, "CONFIG_MEMSIZE 0x%08x\n", tmp);
3014 tmp = RREG32(RADEON_MC_FB_LOCATION);
3015 seq_printf(m, "MC_FB_LOCATION 0x%08x\n", tmp);
3016 tmp = RREG32(RADEON_BUS_CNTL);
3017 seq_printf(m, "BUS_CNTL 0x%08x\n", tmp);
3018 tmp = RREG32(RADEON_MC_AGP_LOCATION);
3019 seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp);
3020 tmp = RREG32(RADEON_AGP_BASE);
3021 seq_printf(m, "AGP_BASE 0x%08x\n", tmp);
3022 tmp = RREG32(RADEON_HOST_PATH_CNTL);
3023 seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp);
3024 tmp = RREG32(0x01D0);
3025 seq_printf(m, "AIC_CTRL 0x%08x\n", tmp);
3026 tmp = RREG32(RADEON_AIC_LO_ADDR);
3027 seq_printf(m, "AIC_LO_ADDR 0x%08x\n", tmp);
3028 tmp = RREG32(RADEON_AIC_HI_ADDR);
3029 seq_printf(m, "AIC_HI_ADDR 0x%08x\n", tmp);
3030 tmp = RREG32(0x01E4);
3031 seq_printf(m, "AIC_TLB_ADDR 0x%08x\n", tmp);
3035 static struct drm_info_list r100_debugfs_rbbm_list[] = {
3036 {"r100_rbbm_info", r100_debugfs_rbbm_info, 0, NULL},
3039 static struct drm_info_list r100_debugfs_cp_list[] = {
3040 {"r100_cp_ring_info", r100_debugfs_cp_ring_info, 0, NULL},
3041 {"r100_cp_csq_fifo", r100_debugfs_cp_csq_fifo, 0, NULL},
3044 static struct drm_info_list r100_debugfs_mc_info_list[] = {
3045 {"r100_mc_info", r100_debugfs_mc_info, 0, NULL},
3049 int r100_debugfs_rbbm_init(struct radeon_device *rdev)
3051 #if defined(CONFIG_DEBUG_FS)
3052 return radeon_debugfs_add_files(rdev, r100_debugfs_rbbm_list, 1);
3058 int r100_debugfs_cp_init(struct radeon_device *rdev)
3060 #if defined(CONFIG_DEBUG_FS)
3061 return radeon_debugfs_add_files(rdev, r100_debugfs_cp_list, 2);
3067 int r100_debugfs_mc_info_init(struct radeon_device *rdev)
3069 #if defined(CONFIG_DEBUG_FS)
3070 return radeon_debugfs_add_files(rdev, r100_debugfs_mc_info_list, 1);
3076 int r100_set_surface_reg(struct radeon_device *rdev, int reg,
3077 uint32_t tiling_flags, uint32_t pitch,
3078 uint32_t offset, uint32_t obj_size)
3080 int surf_index = reg * 16;
3083 if (rdev->family <= CHIP_RS200) {
3084 if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
3085 == (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
3086 flags |= RADEON_SURF_TILE_COLOR_BOTH;
3087 if (tiling_flags & RADEON_TILING_MACRO)
3088 flags |= RADEON_SURF_TILE_COLOR_MACRO;
3089 /* setting pitch to 0 disables tiling */
3090 if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
3093 } else if (rdev->family <= CHIP_RV280) {
3094 if (tiling_flags & (RADEON_TILING_MACRO))
3095 flags |= R200_SURF_TILE_COLOR_MACRO;
3096 if (tiling_flags & RADEON_TILING_MICRO)
3097 flags |= R200_SURF_TILE_COLOR_MICRO;
3099 if (tiling_flags & RADEON_TILING_MACRO)
3100 flags |= R300_SURF_TILE_MACRO;
3101 if (tiling_flags & RADEON_TILING_MICRO)
3102 flags |= R300_SURF_TILE_MICRO;
3105 if (tiling_flags & RADEON_TILING_SWAP_16BIT)
3106 flags |= RADEON_SURF_AP0_SWP_16BPP | RADEON_SURF_AP1_SWP_16BPP;
3107 if (tiling_flags & RADEON_TILING_SWAP_32BIT)
3108 flags |= RADEON_SURF_AP0_SWP_32BPP | RADEON_SURF_AP1_SWP_32BPP;
3110 /* r100/r200 divide by 16 */
3111 if (rdev->family < CHIP_R300)
3112 flags |= pitch / 16;
3117 DRM_DEBUG_KMS("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1);
3118 WREG32(RADEON_SURFACE0_INFO + surf_index, flags);
3119 WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset);
3120 WREG32(RADEON_SURFACE0_UPPER_BOUND + surf_index, offset + obj_size - 1);
3124 void r100_clear_surface_reg(struct radeon_device *rdev, int reg)
3126 int surf_index = reg * 16;
3127 WREG32(RADEON_SURFACE0_INFO + surf_index, 0);
3130 void r100_bandwidth_update(struct radeon_device *rdev)
3132 fixed20_12 trcd_ff, trp_ff, tras_ff, trbs_ff, tcas_ff;
3133 fixed20_12 sclk_ff, mclk_ff, sclk_eff_ff, sclk_delay_ff;
3134 fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff;
3135 uint32_t temp, data, mem_trcd, mem_trp, mem_tras;
3136 fixed20_12 memtcas_ff[8] = {
3141 dfixed_init_half(1),
3142 dfixed_init_half(2),
3145 fixed20_12 memtcas_rs480_ff[8] = {
3151 dfixed_init_half(1),
3152 dfixed_init_half(2),
3153 dfixed_init_half(3),
3155 fixed20_12 memtcas2_ff[8] = {
3165 fixed20_12 memtrbs[8] = {
3167 dfixed_init_half(1),
3169 dfixed_init_half(2),
3171 dfixed_init_half(3),
3175 fixed20_12 memtrbs_r4xx[8] = {
3185 fixed20_12 min_mem_eff;
3186 fixed20_12 mc_latency_sclk, mc_latency_mclk, k1;
3187 fixed20_12 cur_latency_mclk, cur_latency_sclk;
3188 fixed20_12 disp_latency, disp_latency_overhead, disp_drain_rate,
3189 disp_drain_rate2, read_return_rate;
3190 fixed20_12 time_disp1_drop_priority;
3192 int cur_size = 16; /* in octawords */
3193 int critical_point = 0, critical_point2;
3194 /* uint32_t read_return_rate, time_disp1_drop_priority; */
3195 int stop_req, max_stop_req;
3196 struct drm_display_mode *mode1 = NULL;
3197 struct drm_display_mode *mode2 = NULL;
3198 uint32_t pixel_bytes1 = 0;
3199 uint32_t pixel_bytes2 = 0;
3201 radeon_update_display_priority(rdev);
3203 if (rdev->mode_info.crtcs[0]->base.enabled) {
3204 mode1 = &rdev->mode_info.crtcs[0]->base.mode;
3205 pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8;
3207 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
3208 if (rdev->mode_info.crtcs[1]->base.enabled) {
3209 mode2 = &rdev->mode_info.crtcs[1]->base.mode;
3210 pixel_bytes2 = rdev->mode_info.crtcs[1]->base.fb->bits_per_pixel / 8;
3214 min_mem_eff.full = dfixed_const_8(0);
3216 if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) {
3217 uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER);
3218 mc_init_misc_lat_timer &= ~(R300_MC_DISP1R_INIT_LAT_MASK << R300_MC_DISP1R_INIT_LAT_SHIFT);
3219 mc_init_misc_lat_timer &= ~(R300_MC_DISP0R_INIT_LAT_MASK << R300_MC_DISP0R_INIT_LAT_SHIFT);
3220 /* check crtc enables */
3222 mc_init_misc_lat_timer |= (1 << R300_MC_DISP1R_INIT_LAT_SHIFT);
3224 mc_init_misc_lat_timer |= (1 << R300_MC_DISP0R_INIT_LAT_SHIFT);
3225 WREG32(R300_MC_INIT_MISC_LAT_TIMER, mc_init_misc_lat_timer);
3229 * determine is there is enough bw for current mode
3231 sclk_ff = rdev->pm.sclk;
3232 mclk_ff = rdev->pm.mclk;
3234 temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1);
3235 temp_ff.full = dfixed_const(temp);
3236 mem_bw.full = dfixed_mul(mclk_ff, temp_ff);
3240 peak_disp_bw.full = 0;
3242 temp_ff.full = dfixed_const(1000);
3243 pix_clk.full = dfixed_const(mode1->clock); /* convert to fixed point */
3244 pix_clk.full = dfixed_div(pix_clk, temp_ff);
3245 temp_ff.full = dfixed_const(pixel_bytes1);
3246 peak_disp_bw.full += dfixed_mul(pix_clk, temp_ff);
3249 temp_ff.full = dfixed_const(1000);
3250 pix_clk2.full = dfixed_const(mode2->clock); /* convert to fixed point */
3251 pix_clk2.full = dfixed_div(pix_clk2, temp_ff);
3252 temp_ff.full = dfixed_const(pixel_bytes2);
3253 peak_disp_bw.full += dfixed_mul(pix_clk2, temp_ff);
3256 mem_bw.full = dfixed_mul(mem_bw, min_mem_eff);
3257 if (peak_disp_bw.full >= mem_bw.full) {
3258 DRM_ERROR("You may not have enough display bandwidth for current mode\n"
3259 "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n");
3262 /* Get values from the EXT_MEM_CNTL register...converting its contents. */
3263 temp = RREG32(RADEON_MEM_TIMING_CNTL);
3264 if ((rdev->family == CHIP_RV100) || (rdev->flags & RADEON_IS_IGP)) { /* RV100, M6, IGPs */
3265 mem_trcd = ((temp >> 2) & 0x3) + 1;
3266 mem_trp = ((temp & 0x3)) + 1;
3267 mem_tras = ((temp & 0x70) >> 4) + 1;
3268 } else if (rdev->family == CHIP_R300 ||
3269 rdev->family == CHIP_R350) { /* r300, r350 */
3270 mem_trcd = (temp & 0x7) + 1;
3271 mem_trp = ((temp >> 8) & 0x7) + 1;
3272 mem_tras = ((temp >> 11) & 0xf) + 4;
3273 } else if (rdev->family == CHIP_RV350 ||
3274 rdev->family <= CHIP_RV380) {
3276 mem_trcd = (temp & 0x7) + 3;
3277 mem_trp = ((temp >> 8) & 0x7) + 3;
3278 mem_tras = ((temp >> 11) & 0xf) + 6;
3279 } else if (rdev->family == CHIP_R420 ||
3280 rdev->family == CHIP_R423 ||
3281 rdev->family == CHIP_RV410) {
3283 mem_trcd = (temp & 0xf) + 3;
3286 mem_trp = ((temp >> 8) & 0xf) + 3;
3289 mem_tras = ((temp >> 12) & 0x1f) + 6;
3292 } else { /* RV200, R200 */
3293 mem_trcd = (temp & 0x7) + 1;
3294 mem_trp = ((temp >> 8) & 0x7) + 1;
3295 mem_tras = ((temp >> 12) & 0xf) + 4;
3298 trcd_ff.full = dfixed_const(mem_trcd);
3299 trp_ff.full = dfixed_const(mem_trp);
3300 tras_ff.full = dfixed_const(mem_tras);
3302 /* Get values from the MEM_SDRAM_MODE_REG register...converting its */
3303 temp = RREG32(RADEON_MEM_SDRAM_MODE_REG);
3304 data = (temp & (7 << 20)) >> 20;
3305 if ((rdev->family == CHIP_RV100) || rdev->flags & RADEON_IS_IGP) {
3306 if (rdev->family == CHIP_RS480) /* don't think rs400 */
3307 tcas_ff = memtcas_rs480_ff[data];
3309 tcas_ff = memtcas_ff[data];
3311 tcas_ff = memtcas2_ff[data];
3313 if (rdev->family == CHIP_RS400 ||
3314 rdev->family == CHIP_RS480) {
3315 /* extra cas latency stored in bits 23-25 0-4 clocks */
3316 data = (temp >> 23) & 0x7;
3318 tcas_ff.full += dfixed_const(data);
3321 if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) {
3322 /* on the R300, Tcas is included in Trbs.
3324 temp = RREG32(RADEON_MEM_CNTL);
3325 data = (R300_MEM_NUM_CHANNELS_MASK & temp);
3327 if (R300_MEM_USE_CD_CH_ONLY & temp) {
3328 temp = RREG32(R300_MC_IND_INDEX);
3329 temp &= ~R300_MC_IND_ADDR_MASK;
3330 temp |= R300_MC_READ_CNTL_CD_mcind;
3331 WREG32(R300_MC_IND_INDEX, temp);
3332 temp = RREG32(R300_MC_IND_DATA);
3333 data = (R300_MEM_RBS_POSITION_C_MASK & temp);
3335 temp = RREG32(R300_MC_READ_CNTL_AB);
3336 data = (R300_MEM_RBS_POSITION_A_MASK & temp);
3339 temp = RREG32(R300_MC_READ_CNTL_AB);
3340 data = (R300_MEM_RBS_POSITION_A_MASK & temp);
3342 if (rdev->family == CHIP_RV410 ||
3343 rdev->family == CHIP_R420 ||
3344 rdev->family == CHIP_R423)
3345 trbs_ff = memtrbs_r4xx[data];
3347 trbs_ff = memtrbs[data];
3348 tcas_ff.full += trbs_ff.full;
3351 sclk_eff_ff.full = sclk_ff.full;
3353 if (rdev->flags & RADEON_IS_AGP) {
3354 fixed20_12 agpmode_ff;
3355 agpmode_ff.full = dfixed_const(radeon_agpmode);
3356 temp_ff.full = dfixed_const_666(16);
3357 sclk_eff_ff.full -= dfixed_mul(agpmode_ff, temp_ff);
3359 /* TODO PCIE lanes may affect this - agpmode == 16?? */
3361 if (ASIC_IS_R300(rdev)) {
3362 sclk_delay_ff.full = dfixed_const(250);
3364 if ((rdev->family == CHIP_RV100) ||
3365 rdev->flags & RADEON_IS_IGP) {
3366 if (rdev->mc.vram_is_ddr)
3367 sclk_delay_ff.full = dfixed_const(41);
3369 sclk_delay_ff.full = dfixed_const(33);
3371 if (rdev->mc.vram_width == 128)
3372 sclk_delay_ff.full = dfixed_const(57);
3374 sclk_delay_ff.full = dfixed_const(41);
3378 mc_latency_sclk.full = dfixed_div(sclk_delay_ff, sclk_eff_ff);
3380 if (rdev->mc.vram_is_ddr) {
3381 if (rdev->mc.vram_width == 32) {
3382 k1.full = dfixed_const(40);
3385 k1.full = dfixed_const(20);
3389 k1.full = dfixed_const(40);
3393 temp_ff.full = dfixed_const(2);
3394 mc_latency_mclk.full = dfixed_mul(trcd_ff, temp_ff);
3395 temp_ff.full = dfixed_const(c);
3396 mc_latency_mclk.full += dfixed_mul(tcas_ff, temp_ff);
3397 temp_ff.full = dfixed_const(4);
3398 mc_latency_mclk.full += dfixed_mul(tras_ff, temp_ff);
3399 mc_latency_mclk.full += dfixed_mul(trp_ff, temp_ff);
3400 mc_latency_mclk.full += k1.full;
3402 mc_latency_mclk.full = dfixed_div(mc_latency_mclk, mclk_ff);
3403 mc_latency_mclk.full += dfixed_div(temp_ff, sclk_eff_ff);
3406 HW cursor time assuming worst case of full size colour cursor.
3408 temp_ff.full = dfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1))));
3409 temp_ff.full += trcd_ff.full;
3410 if (temp_ff.full < tras_ff.full)
3411 temp_ff.full = tras_ff.full;
3412 cur_latency_mclk.full = dfixed_div(temp_ff, mclk_ff);
3414 temp_ff.full = dfixed_const(cur_size);
3415 cur_latency_sclk.full = dfixed_div(temp_ff, sclk_eff_ff);
3417 Find the total latency for the display data.
3419 disp_latency_overhead.full = dfixed_const(8);
3420 disp_latency_overhead.full = dfixed_div(disp_latency_overhead, sclk_ff);
3421 mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full;
3422 mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full;
3424 if (mc_latency_mclk.full > mc_latency_sclk.full)
3425 disp_latency.full = mc_latency_mclk.full;
3427 disp_latency.full = mc_latency_sclk.full;
3429 /* setup Max GRPH_STOP_REQ default value */
3430 if (ASIC_IS_RV100(rdev))
3431 max_stop_req = 0x5c;
3433 max_stop_req = 0x7c;
3437 Set GRPH_BUFFER_CNTL register using h/w defined optimal values.
3438 GRPH_STOP_REQ <= MIN[ 0x7C, (CRTC_H_DISP + 1) * (bit depth) / 0x10 ]
3440 stop_req = mode1->hdisplay * pixel_bytes1 / 16;
3442 if (stop_req > max_stop_req)
3443 stop_req = max_stop_req;
3446 Find the drain rate of the display buffer.
3448 temp_ff.full = dfixed_const((16/pixel_bytes1));
3449 disp_drain_rate.full = dfixed_div(pix_clk, temp_ff);
3452 Find the critical point of the display buffer.
3454 crit_point_ff.full = dfixed_mul(disp_drain_rate, disp_latency);
3455 crit_point_ff.full += dfixed_const_half(0);
3457 critical_point = dfixed_trunc(crit_point_ff);
3459 if (rdev->disp_priority == 2) {
3464 The critical point should never be above max_stop_req-4. Setting
3465 GRPH_CRITICAL_CNTL = 0 will thus force high priority all the time.
3467 if (max_stop_req - critical_point < 4)
3470 if (critical_point == 0 && mode2 && rdev->family == CHIP_R300) {
3471 /* some R300 cards have problem with this set to 0, when CRTC2 is enabled.*/
3472 critical_point = 0x10;
3475 temp = RREG32(RADEON_GRPH_BUFFER_CNTL);
3476 temp &= ~(RADEON_GRPH_STOP_REQ_MASK);
3477 temp |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT);
3478 temp &= ~(RADEON_GRPH_START_REQ_MASK);
3479 if ((rdev->family == CHIP_R350) &&
3480 (stop_req > 0x15)) {
3483 temp |= (stop_req << RADEON_GRPH_START_REQ_SHIFT);
3484 temp |= RADEON_GRPH_BUFFER_SIZE;
3485 temp &= ~(RADEON_GRPH_CRITICAL_CNTL |
3486 RADEON_GRPH_CRITICAL_AT_SOF |
3487 RADEON_GRPH_STOP_CNTL);
3489 Write the result into the register.
3491 WREG32(RADEON_GRPH_BUFFER_CNTL, ((temp & ~RADEON_GRPH_CRITICAL_POINT_MASK) |
3492 (critical_point << RADEON_GRPH_CRITICAL_POINT_SHIFT)));
3495 if ((rdev->family == CHIP_RS400) ||
3496 (rdev->family == CHIP_RS480)) {
3497 /* attempt to program RS400 disp regs correctly ??? */
3498 temp = RREG32(RS400_DISP1_REG_CNTL);
3499 temp &= ~(RS400_DISP1_START_REQ_LEVEL_MASK |
3500 RS400_DISP1_STOP_REQ_LEVEL_MASK);
3501 WREG32(RS400_DISP1_REQ_CNTL1, (temp |
3502 (critical_point << RS400_DISP1_START_REQ_LEVEL_SHIFT) |
3503 (critical_point << RS400_DISP1_STOP_REQ_LEVEL_SHIFT)));
3504 temp = RREG32(RS400_DMIF_MEM_CNTL1);
3505 temp &= ~(RS400_DISP1_CRITICAL_POINT_START_MASK |
3506 RS400_DISP1_CRITICAL_POINT_STOP_MASK);
3507 WREG32(RS400_DMIF_MEM_CNTL1, (temp |
3508 (critical_point << RS400_DISP1_CRITICAL_POINT_START_SHIFT) |
3509 (critical_point << RS400_DISP1_CRITICAL_POINT_STOP_SHIFT)));
3513 DRM_DEBUG_KMS("GRPH_BUFFER_CNTL from to %x\n",
3514 /* (unsigned int)info->SavedReg->grph_buffer_cntl, */
3515 (unsigned int)RREG32(RADEON_GRPH_BUFFER_CNTL));
3520 stop_req = mode2->hdisplay * pixel_bytes2 / 16;
3522 if (stop_req > max_stop_req)
3523 stop_req = max_stop_req;
3526 Find the drain rate of the display buffer.
3528 temp_ff.full = dfixed_const((16/pixel_bytes2));
3529 disp_drain_rate2.full = dfixed_div(pix_clk2, temp_ff);
3531 grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL);
3532 grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK);
3533 grph2_cntl |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT);
3534 grph2_cntl &= ~(RADEON_GRPH_START_REQ_MASK);
3535 if ((rdev->family == CHIP_R350) &&
3536 (stop_req > 0x15)) {
3539 grph2_cntl |= (stop_req << RADEON_GRPH_START_REQ_SHIFT);
3540 grph2_cntl |= RADEON_GRPH_BUFFER_SIZE;
3541 grph2_cntl &= ~(RADEON_GRPH_CRITICAL_CNTL |
3542 RADEON_GRPH_CRITICAL_AT_SOF |
3543 RADEON_GRPH_STOP_CNTL);
3545 if ((rdev->family == CHIP_RS100) ||
3546 (rdev->family == CHIP_RS200))
3547 critical_point2 = 0;
3549 temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128;
3550 temp_ff.full = dfixed_const(temp);
3551 temp_ff.full = dfixed_mul(mclk_ff, temp_ff);
3552 if (sclk_ff.full < temp_ff.full)
3553 temp_ff.full = sclk_ff.full;
3555 read_return_rate.full = temp_ff.full;
3558 temp_ff.full = read_return_rate.full - disp_drain_rate.full;
3559 time_disp1_drop_priority.full = dfixed_div(crit_point_ff, temp_ff);
3561 time_disp1_drop_priority.full = 0;
3563 crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full;
3564 crit_point_ff.full = dfixed_mul(crit_point_ff, disp_drain_rate2);
3565 crit_point_ff.full += dfixed_const_half(0);
3567 critical_point2 = dfixed_trunc(crit_point_ff);
3569 if (rdev->disp_priority == 2) {
3570 critical_point2 = 0;
3573 if (max_stop_req - critical_point2 < 4)
3574 critical_point2 = 0;
3578 if (critical_point2 == 0 && rdev->family == CHIP_R300) {
3579 /* some R300 cards have problem with this set to 0 */
3580 critical_point2 = 0x10;
3583 WREG32(RADEON_GRPH2_BUFFER_CNTL, ((grph2_cntl & ~RADEON_GRPH_CRITICAL_POINT_MASK) |
3584 (critical_point2 << RADEON_GRPH_CRITICAL_POINT_SHIFT)));
3586 if ((rdev->family == CHIP_RS400) ||
3587 (rdev->family == CHIP_RS480)) {
3589 /* attempt to program RS400 disp2 regs correctly ??? */
3590 temp = RREG32(RS400_DISP2_REQ_CNTL1);
3591 temp &= ~(RS400_DISP2_START_REQ_LEVEL_MASK |
3592 RS400_DISP2_STOP_REQ_LEVEL_MASK);
3593 WREG32(RS400_DISP2_REQ_CNTL1, (temp |
3594 (critical_point2 << RS400_DISP1_START_REQ_LEVEL_SHIFT) |
3595 (critical_point2 << RS400_DISP1_STOP_REQ_LEVEL_SHIFT)));
3596 temp = RREG32(RS400_DISP2_REQ_CNTL2);
3597 temp &= ~(RS400_DISP2_CRITICAL_POINT_START_MASK |
3598 RS400_DISP2_CRITICAL_POINT_STOP_MASK);
3599 WREG32(RS400_DISP2_REQ_CNTL2, (temp |
3600 (critical_point2 << RS400_DISP2_CRITICAL_POINT_START_SHIFT) |
3601 (critical_point2 << RS400_DISP2_CRITICAL_POINT_STOP_SHIFT)));
3603 WREG32(RS400_DISP2_REQ_CNTL1, 0x105DC1CC);
3604 WREG32(RS400_DISP2_REQ_CNTL2, 0x2749D000);
3605 WREG32(RS400_DMIF_MEM_CNTL1, 0x29CA71DC);
3606 WREG32(RS400_DISP1_REQ_CNTL1, 0x28FBC3AC);
3609 DRM_DEBUG_KMS("GRPH2_BUFFER_CNTL from to %x\n",
3610 (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL));
3614 int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
3621 r = radeon_scratch_get(rdev, &scratch);
3623 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
3626 WREG32(scratch, 0xCAFEDEAD);
3627 r = radeon_ring_lock(rdev, ring, 2);
3629 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
3630 radeon_scratch_free(rdev, scratch);
3633 radeon_ring_write(ring, PACKET0(scratch, 0));
3634 radeon_ring_write(ring, 0xDEADBEEF);
3635 radeon_ring_unlock_commit(rdev, ring);
3636 for (i = 0; i < rdev->usec_timeout; i++) {
3637 tmp = RREG32(scratch);
3638 if (tmp == 0xDEADBEEF) {
3643 if (i < rdev->usec_timeout) {
3644 DRM_INFO("ring test succeeded in %d usecs\n", i);
3646 DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n",
3650 radeon_scratch_free(rdev, scratch);
3654 void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
3656 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
3658 if (ring->rptr_save_reg) {
3659 u32 next_rptr = ring->wptr + 2 + 3;
3660 radeon_ring_write(ring, PACKET0(ring->rptr_save_reg, 0));
3661 radeon_ring_write(ring, next_rptr);
3664 radeon_ring_write(ring, PACKET0(RADEON_CP_IB_BASE, 1));
3665 radeon_ring_write(ring, ib->gpu_addr);
3666 radeon_ring_write(ring, ib->length_dw);
3669 int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
3671 struct radeon_ib ib;
3677 r = radeon_scratch_get(rdev, &scratch);
3679 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
3682 WREG32(scratch, 0xCAFEDEAD);
3683 r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX, &ib, NULL, 256);
3685 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
3688 ib.ptr[0] = PACKET0(scratch, 0);
3689 ib.ptr[1] = 0xDEADBEEF;
3690 ib.ptr[2] = PACKET2(0);
3691 ib.ptr[3] = PACKET2(0);
3692 ib.ptr[4] = PACKET2(0);
3693 ib.ptr[5] = PACKET2(0);
3694 ib.ptr[6] = PACKET2(0);
3695 ib.ptr[7] = PACKET2(0);
3697 r = radeon_ib_schedule(rdev, &ib, NULL);
3699 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
3702 r = radeon_fence_wait(ib.fence, false);
3704 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
3707 for (i = 0; i < rdev->usec_timeout; i++) {
3708 tmp = RREG32(scratch);
3709 if (tmp == 0xDEADBEEF) {
3714 if (i < rdev->usec_timeout) {
3715 DRM_INFO("ib test succeeded in %u usecs\n", i);
3717 DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
3722 radeon_ib_free(rdev, &ib);
3724 radeon_scratch_free(rdev, scratch);
3728 void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save)
3730 /* Shutdown CP we shouldn't need to do that but better be safe than
3733 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
3734 WREG32(R_000740_CP_CSQ_CNTL, 0);
3736 /* Save few CRTC registers */
3737 save->GENMO_WT = RREG8(R_0003C2_GENMO_WT);
3738 save->CRTC_EXT_CNTL = RREG32(R_000054_CRTC_EXT_CNTL);
3739 save->CRTC_GEN_CNTL = RREG32(R_000050_CRTC_GEN_CNTL);
3740 save->CUR_OFFSET = RREG32(R_000260_CUR_OFFSET);
3741 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
3742 save->CRTC2_GEN_CNTL = RREG32(R_0003F8_CRTC2_GEN_CNTL);
3743 save->CUR2_OFFSET = RREG32(R_000360_CUR2_OFFSET);
3746 /* Disable VGA aperture access */
3747 WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & save->GENMO_WT);
3748 /* Disable cursor, overlay, crtc */
3749 WREG32(R_000260_CUR_OFFSET, save->CUR_OFFSET | S_000260_CUR_LOCK(1));
3750 WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL |
3751 S_000054_CRTC_DISPLAY_DIS(1));
3752 WREG32(R_000050_CRTC_GEN_CNTL,
3753 (C_000050_CRTC_CUR_EN & save->CRTC_GEN_CNTL) |
3754 S_000050_CRTC_DISP_REQ_EN_B(1));
3755 WREG32(R_000420_OV0_SCALE_CNTL,
3756 C_000420_OV0_OVERLAY_EN & RREG32(R_000420_OV0_SCALE_CNTL));
3757 WREG32(R_000260_CUR_OFFSET, C_000260_CUR_LOCK & save->CUR_OFFSET);
3758 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
3759 WREG32(R_000360_CUR2_OFFSET, save->CUR2_OFFSET |
3760 S_000360_CUR2_LOCK(1));
3761 WREG32(R_0003F8_CRTC2_GEN_CNTL,
3762 (C_0003F8_CRTC2_CUR_EN & save->CRTC2_GEN_CNTL) |
3763 S_0003F8_CRTC2_DISPLAY_DIS(1) |
3764 S_0003F8_CRTC2_DISP_REQ_EN_B(1));
3765 WREG32(R_000360_CUR2_OFFSET,
3766 C_000360_CUR2_LOCK & save->CUR2_OFFSET);
3770 void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save)
3772 /* Update base address for crtc */
3773 WREG32(R_00023C_DISPLAY_BASE_ADDR, rdev->mc.vram_start);
3774 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
3775 WREG32(R_00033C_CRTC2_DISPLAY_BASE_ADDR, rdev->mc.vram_start);
3777 /* Restore CRTC registers */
3778 WREG8(R_0003C2_GENMO_WT, save->GENMO_WT);
3779 WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL);
3780 WREG32(R_000050_CRTC_GEN_CNTL, save->CRTC_GEN_CNTL);
3781 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
3782 WREG32(R_0003F8_CRTC2_GEN_CNTL, save->CRTC2_GEN_CNTL);
3786 void r100_vga_render_disable(struct radeon_device *rdev)
3790 tmp = RREG8(R_0003C2_GENMO_WT);
3791 WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & tmp);
3794 static void r100_debugfs(struct radeon_device *rdev)
3798 r = r100_debugfs_mc_info_init(rdev);
3800 dev_warn(rdev->dev, "Failed to create r100_mc debugfs file.\n");
3803 static void r100_mc_program(struct radeon_device *rdev)
3805 struct r100_mc_save save;
3807 /* Stops all mc clients */
3808 r100_mc_stop(rdev, &save);
3809 if (rdev->flags & RADEON_IS_AGP) {
3810 WREG32(R_00014C_MC_AGP_LOCATION,
3811 S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) |
3812 S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16));
3813 WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base));
3814 if (rdev->family > CHIP_RV200)
3815 WREG32(R_00015C_AGP_BASE_2,
3816 upper_32_bits(rdev->mc.agp_base) & 0xff);
3818 WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF);
3819 WREG32(R_000170_AGP_BASE, 0);
3820 if (rdev->family > CHIP_RV200)
3821 WREG32(R_00015C_AGP_BASE_2, 0);
3823 /* Wait for mc idle */
3824 if (r100_mc_wait_for_idle(rdev))
3825 dev_warn(rdev->dev, "Wait for MC idle timeout.\n");
3826 /* Program MC, should be a 32bits limited address space */
3827 WREG32(R_000148_MC_FB_LOCATION,
3828 S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
3829 S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
3830 r100_mc_resume(rdev, &save);
3833 static void r100_clock_startup(struct radeon_device *rdev)
3837 if (radeon_dynclks != -1 && radeon_dynclks)
3838 radeon_legacy_set_clock_gating(rdev, 1);
3839 /* We need to force on some of the block */
3840 tmp = RREG32_PLL(R_00000D_SCLK_CNTL);
3841 tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1);
3842 if ((rdev->family == CHIP_RV250) || (rdev->family == CHIP_RV280))
3843 tmp |= S_00000D_FORCE_DISP1(1) | S_00000D_FORCE_DISP2(1);
3844 WREG32_PLL(R_00000D_SCLK_CNTL, tmp);
3847 static int r100_startup(struct radeon_device *rdev)
3851 /* set common regs */
3852 r100_set_common_regs(rdev);
3854 r100_mc_program(rdev);
3856 r100_clock_startup(rdev);
3857 /* Initialize GART (initialize after TTM so we can allocate
3858 * memory through TTM but finalize after TTM) */
3859 r100_enable_bm(rdev);
3860 if (rdev->flags & RADEON_IS_PCI) {
3861 r = r100_pci_gart_enable(rdev);
3866 /* allocate wb buffer */
3867 r = radeon_wb_init(rdev);
3871 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
3873 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
3878 if (!rdev->irq.installed) {
3879 r = radeon_irq_kms_init(rdev);
3885 rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
3886 /* 1M ring buffer */
3887 r = r100_cp_init(rdev, 1024 * 1024);
3889 dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
3893 r = radeon_ib_pool_init(rdev);
3895 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
3902 int r100_resume(struct radeon_device *rdev)
3906 /* Make sur GART are not working */
3907 if (rdev->flags & RADEON_IS_PCI)
3908 r100_pci_gart_disable(rdev);
3909 /* Resume clock before doing reset */
3910 r100_clock_startup(rdev);
3911 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
3912 if (radeon_asic_reset(rdev)) {
3913 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
3914 RREG32(R_000E40_RBBM_STATUS),
3915 RREG32(R_0007C0_CP_STAT));
3918 radeon_combios_asic_init(rdev->ddev);
3919 /* Resume clock after posting */
3920 r100_clock_startup(rdev);
3921 /* Initialize surface registers */
3922 radeon_surface_init(rdev);
3924 rdev->accel_working = true;
3925 r = r100_startup(rdev);
3927 rdev->accel_working = false;
3932 int r100_suspend(struct radeon_device *rdev)
3934 r100_cp_disable(rdev);
3935 radeon_wb_disable(rdev);
3936 r100_irq_disable(rdev);
3937 if (rdev->flags & RADEON_IS_PCI)
3938 r100_pci_gart_disable(rdev);
3942 void r100_fini(struct radeon_device *rdev)
3945 radeon_wb_fini(rdev);
3946 radeon_ib_pool_fini(rdev);
3947 radeon_gem_fini(rdev);
3948 if (rdev->flags & RADEON_IS_PCI)
3949 r100_pci_gart_fini(rdev);
3950 radeon_agp_fini(rdev);
3951 radeon_irq_kms_fini(rdev);
3952 radeon_fence_driver_fini(rdev);
3953 radeon_bo_fini(rdev);
3954 radeon_atombios_fini(rdev);
3955 r100_cp_fini_microcode(rdev);
3956 drm_free(rdev->bios, M_DRM);
3961 * Due to how kexec works, it can leave the hw fully initialised when it
3962 * boots the new kernel. However doing our init sequence with the CP and
3963 * WB stuff setup causes GPU hangs on the RN50 at least. So at startup
3964 * do some quick sanity checks and restore sane values to avoid this
3967 void r100_restore_sanity(struct radeon_device *rdev)
3971 tmp = RREG32(RADEON_CP_CSQ_CNTL);
3973 WREG32(RADEON_CP_CSQ_CNTL, 0);
3975 tmp = RREG32(RADEON_CP_RB_CNTL);
3977 WREG32(RADEON_CP_RB_CNTL, 0);
3979 tmp = RREG32(RADEON_SCRATCH_UMSK);
3981 WREG32(RADEON_SCRATCH_UMSK, 0);
3985 int r100_init(struct radeon_device *rdev)
3989 /* Register debugfs file specific to this group of asics */
3992 r100_vga_render_disable(rdev);
3993 /* Initialize scratch registers */
3994 radeon_scratch_init(rdev);
3995 /* Initialize surface registers */
3996 radeon_surface_init(rdev);
3997 /* sanity check some register to avoid hangs like after kexec */
3998 r100_restore_sanity(rdev);
3999 /* TODO: disable VGA need to use VGA request */
4001 if (!radeon_get_bios(rdev)) {
4002 if (ASIC_IS_AVIVO(rdev))
4005 if (rdev->is_atom_bios) {
4006 dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n");
4009 r = radeon_combios_init(rdev);
4013 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
4014 if (radeon_asic_reset(rdev)) {
4016 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
4017 RREG32(R_000E40_RBBM_STATUS),
4018 RREG32(R_0007C0_CP_STAT));
4020 /* check if cards are posted or not */
4021 if (radeon_boot_test_post_card(rdev) == false)
4023 /* Set asic errata */
4025 /* Initialize clocks */
4026 radeon_get_clock_info(rdev->ddev);
4027 /* initialize AGP */
4028 if (rdev->flags & RADEON_IS_AGP) {
4029 r = radeon_agp_init(rdev);
4031 radeon_agp_disable(rdev);
4034 /* initialize VRAM */
4037 r = radeon_fence_driver_init(rdev);
4040 /* Memory manager */
4041 r = radeon_bo_init(rdev);
4044 if (rdev->flags & RADEON_IS_PCI) {
4045 r = r100_pci_gart_init(rdev);
4049 r100_set_safe_registers(rdev);
4051 rdev->accel_working = true;
4052 r = r100_startup(rdev);
4054 /* Somethings want wront with the accel init stop accel */
4055 dev_err(rdev->dev, "Disabling GPU acceleration\n");
4057 radeon_wb_fini(rdev);
4058 radeon_ib_pool_fini(rdev);
4059 radeon_irq_kms_fini(rdev);
4060 if (rdev->flags & RADEON_IS_PCI)
4061 r100_pci_gart_fini(rdev);
4062 rdev->accel_working = false;
4067 uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
4068 bool always_indirect)
4070 if (reg < rdev->rmmio_size && !always_indirect)
4071 return bus_read_4(rdev->rmmio, reg);
4075 spin_lock(&rdev->mmio_idx_lock);
4076 bus_write_4(rdev->rmmio, RADEON_MM_INDEX, reg);
4077 ret = bus_read_4(rdev->rmmio, RADEON_MM_DATA);
4078 spin_unlock(&rdev->mmio_idx_lock);
4084 void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
4085 bool always_indirect)
4087 if (reg < rdev->rmmio_size && !always_indirect)
4088 bus_write_4(rdev->rmmio, reg, v);
4090 spin_lock(&rdev->mmio_idx_lock);
4091 bus_write_4(rdev->rmmio, RADEON_MM_INDEX, reg);
4092 bus_write_4(rdev->rmmio, RADEON_MM_DATA, v);
4093 spin_unlock(&rdev->mmio_idx_lock);
4097 u32 r100_io_rreg(struct radeon_device *rdev, u32 reg)
4099 if (reg < rdev->rio_mem_size)
4100 return bus_read_4(rdev->rio_mem, reg);
4102 /* XXX No locking? -- dumbbell@ */
4103 bus_write_4(rdev->rio_mem, RADEON_MM_INDEX, reg);
4104 return bus_read_4(rdev->rio_mem, RADEON_MM_DATA);
4108 void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v)
4110 if (reg < rdev->rio_mem_size)
4111 bus_write_4(rdev->rio_mem, reg, v);
4113 /* XXX No locking? -- dumbbell@ */
4114 bus_write_4(rdev->rio_mem, RADEON_MM_INDEX, reg);
4115 bus_write_4(rdev->rio_mem, RADEON_MM_DATA, v);