2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 * $FreeBSD: head/sys/dev/drm2/radeon/r100.c 255573 2013-09-14 17:24:41Z dumbbell $
32 #include <uapi_drm/radeon_drm.h>
33 #include "radeon_reg.h"
35 #include "radeon_asic.h"
42 #include "r100_reg_safe.h"
43 #include "rn50_reg_safe.h"
46 #define FIRMWARE_R100 "radeonkmsfw_R100_cp"
47 #define FIRMWARE_R200 "radeonkmsfw_R200_cp"
48 #define FIRMWARE_R300 "radeonkmsfw_R300_cp"
49 #define FIRMWARE_R420 "radeonkmsfw_R420_cp"
50 #define FIRMWARE_RS690 "radeonkmsfw_RS690_cp"
51 #define FIRMWARE_RS600 "radeonkmsfw_RS600_cp"
52 #define FIRMWARE_R520 "radeonkmsfw_R520_cp"
54 #include "r100_track.h"
56 /* This files gather functions specifics to:
57 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
58 * and others in some cases.
62 * r100_wait_for_vblank - vblank wait asic callback.
64 * @rdev: radeon_device pointer
65 * @crtc: crtc to wait for vblank on
67 * Wait for vblank on the requested crtc (r1xx-r4xx).
69 void r100_wait_for_vblank(struct radeon_device *rdev, int crtc)
73 if (crtc >= rdev->num_crtc)
77 if (RREG32(RADEON_CRTC_GEN_CNTL) & RADEON_CRTC_EN) {
78 for (i = 0; i < rdev->usec_timeout; i++) {
79 if (!(RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR))
83 for (i = 0; i < rdev->usec_timeout; i++) {
84 if (RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR)
90 if (RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_EN) {
91 for (i = 0; i < rdev->usec_timeout; i++) {
92 if (!(RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR))
96 for (i = 0; i < rdev->usec_timeout; i++) {
97 if (RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR)
106 * r100_pre_page_flip - pre-pageflip callback.
108 * @rdev: radeon_device pointer
109 * @crtc: crtc to prepare for pageflip on
111 * Pre-pageflip callback (r1xx-r4xx).
112 * Enables the pageflip irq (vblank irq).
114 void r100_pre_page_flip(struct radeon_device *rdev, int crtc)
116 /* enable the pflip int */
117 radeon_irq_kms_pflip_irq_get(rdev, crtc);
121 * r100_post_page_flip - pos-pageflip callback.
123 * @rdev: radeon_device pointer
124 * @crtc: crtc to cleanup pageflip on
126 * Post-pageflip callback (r1xx-r4xx).
127 * Disables the pageflip irq (vblank irq).
129 void r100_post_page_flip(struct radeon_device *rdev, int crtc)
131 /* disable the pflip int */
132 radeon_irq_kms_pflip_irq_put(rdev, crtc);
136 * r100_page_flip - pageflip callback.
138 * @rdev: radeon_device pointer
139 * @crtc_id: crtc to cleanup pageflip on
140 * @crtc_base: new address of the crtc (GPU MC address)
142 * Does the actual pageflip (r1xx-r4xx).
143 * During vblank we take the crtc lock and wait for the update_pending
144 * bit to go high, when it does, we release the lock, and allow the
145 * double buffered update to take place.
146 * Returns the current update pending status.
148 u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
150 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
151 u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK;
154 /* Lock the graphics update lock */
155 /* update the scanout addresses */
156 WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
158 /* Wait for update_pending to go high. */
159 for (i = 0; i < rdev->usec_timeout; i++) {
160 if (RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET)
164 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
166 /* Unlock the lock, so double-buffering can take place inside vblank */
167 tmp &= ~RADEON_CRTC_OFFSET__OFFSET_LOCK;
168 WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
170 /* Return current update_pending status: */
171 return RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET;
175 * r100_pm_get_dynpm_state - look up dynpm power state callback.
177 * @rdev: radeon_device pointer
179 * Look up the optimal power state based on the
180 * current state of the GPU (r1xx-r5xx).
181 * Used for dynpm only.
183 void r100_pm_get_dynpm_state(struct radeon_device *rdev)
186 rdev->pm.dynpm_can_upclock = true;
187 rdev->pm.dynpm_can_downclock = true;
189 switch (rdev->pm.dynpm_planned_action) {
190 case DYNPM_ACTION_MINIMUM:
191 rdev->pm.requested_power_state_index = 0;
192 rdev->pm.dynpm_can_downclock = false;
194 case DYNPM_ACTION_DOWNCLOCK:
195 if (rdev->pm.current_power_state_index == 0) {
196 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
197 rdev->pm.dynpm_can_downclock = false;
199 if (rdev->pm.active_crtc_count > 1) {
200 for (i = 0; i < rdev->pm.num_power_states; i++) {
201 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
203 else if (i >= rdev->pm.current_power_state_index) {
204 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
207 rdev->pm.requested_power_state_index = i;
212 rdev->pm.requested_power_state_index =
213 rdev->pm.current_power_state_index - 1;
215 /* don't use the power state if crtcs are active and no display flag is set */
216 if ((rdev->pm.active_crtc_count > 0) &&
217 (rdev->pm.power_state[rdev->pm.requested_power_state_index].clock_info[0].flags &
218 RADEON_PM_MODE_NO_DISPLAY)) {
219 rdev->pm.requested_power_state_index++;
222 case DYNPM_ACTION_UPCLOCK:
223 if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
224 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
225 rdev->pm.dynpm_can_upclock = false;
227 if (rdev->pm.active_crtc_count > 1) {
228 for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
229 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
231 else if (i <= rdev->pm.current_power_state_index) {
232 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
235 rdev->pm.requested_power_state_index = i;
240 rdev->pm.requested_power_state_index =
241 rdev->pm.current_power_state_index + 1;
244 case DYNPM_ACTION_DEFAULT:
245 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
246 rdev->pm.dynpm_can_upclock = false;
248 case DYNPM_ACTION_NONE:
250 DRM_ERROR("Requested mode for not defined action\n");
253 /* only one clock mode per power state */
254 rdev->pm.requested_clock_mode_index = 0;
256 DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n",
257 rdev->pm.power_state[rdev->pm.requested_power_state_index].
258 clock_info[rdev->pm.requested_clock_mode_index].sclk,
259 rdev->pm.power_state[rdev->pm.requested_power_state_index].
260 clock_info[rdev->pm.requested_clock_mode_index].mclk,
261 rdev->pm.power_state[rdev->pm.requested_power_state_index].
266 * r100_pm_init_profile - Initialize power profiles callback.
268 * @rdev: radeon_device pointer
270 * Initialize the power states used in profile mode
272 * Used for profile mode only.
274 void r100_pm_init_profile(struct radeon_device *rdev)
277 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
278 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
279 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
280 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
282 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
283 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
284 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
285 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
287 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
288 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0;
289 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
290 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
292 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
293 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
294 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
295 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
297 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
298 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
299 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
300 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
302 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
303 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
304 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
305 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
307 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
308 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
309 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
310 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
314 * r100_pm_misc - set additional pm hw parameters callback.
316 * @rdev: radeon_device pointer
318 * Set non-clock parameters associated with a power state
319 * (voltage, pcie lanes, etc.) (r1xx-r4xx).
321 void r100_pm_misc(struct radeon_device *rdev)
323 int requested_index = rdev->pm.requested_power_state_index;
324 struct radeon_power_state *ps = &rdev->pm.power_state[requested_index];
325 struct radeon_voltage *voltage = &ps->clock_info[0].voltage;
326 u32 tmp, sclk_cntl, sclk_cntl2, sclk_more_cntl;
328 if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) {
329 if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
330 tmp = RREG32(voltage->gpio.reg);
331 if (voltage->active_high)
332 tmp |= voltage->gpio.mask;
334 tmp &= ~(voltage->gpio.mask);
335 WREG32(voltage->gpio.reg, tmp);
337 DRM_UDELAY(voltage->delay);
339 tmp = RREG32(voltage->gpio.reg);
340 if (voltage->active_high)
341 tmp &= ~voltage->gpio.mask;
343 tmp |= voltage->gpio.mask;
344 WREG32(voltage->gpio.reg, tmp);
346 DRM_UDELAY(voltage->delay);
350 sclk_cntl = RREG32_PLL(SCLK_CNTL);
351 sclk_cntl2 = RREG32_PLL(SCLK_CNTL2);
352 sclk_cntl2 &= ~REDUCED_SPEED_SCLK_SEL(3);
353 sclk_more_cntl = RREG32_PLL(SCLK_MORE_CNTL);
354 sclk_more_cntl &= ~VOLTAGE_DELAY_SEL(3);
355 if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) {
356 sclk_more_cntl |= REDUCED_SPEED_SCLK_EN;
357 if (ps->misc & ATOM_PM_MISCINFO_DYN_CLK_3D_IDLE)
358 sclk_cntl2 |= REDUCED_SPEED_SCLK_MODE;
360 sclk_cntl2 &= ~REDUCED_SPEED_SCLK_MODE;
361 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2)
362 sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(0);
363 else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4)
364 sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(2);
366 sclk_more_cntl &= ~REDUCED_SPEED_SCLK_EN;
368 if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) {
369 sclk_more_cntl |= IO_CG_VOLTAGE_DROP;
370 if (voltage->delay) {
371 sclk_more_cntl |= VOLTAGE_DROP_SYNC;
372 switch (voltage->delay) {
374 sclk_more_cntl |= VOLTAGE_DELAY_SEL(0);
377 sclk_more_cntl |= VOLTAGE_DELAY_SEL(1);
380 sclk_more_cntl |= VOLTAGE_DELAY_SEL(2);
383 sclk_more_cntl |= VOLTAGE_DELAY_SEL(3);
387 sclk_more_cntl &= ~VOLTAGE_DROP_SYNC;
389 sclk_more_cntl &= ~IO_CG_VOLTAGE_DROP;
391 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN)
392 sclk_cntl &= ~FORCE_HDP;
394 sclk_cntl |= FORCE_HDP;
396 WREG32_PLL(SCLK_CNTL, sclk_cntl);
397 WREG32_PLL(SCLK_CNTL2, sclk_cntl2);
398 WREG32_PLL(SCLK_MORE_CNTL, sclk_more_cntl);
401 if ((rdev->flags & RADEON_IS_PCIE) &&
402 !(rdev->flags & RADEON_IS_IGP) &&
403 rdev->asic->pm.set_pcie_lanes &&
405 rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) {
406 radeon_set_pcie_lanes(rdev,
408 DRM_DEBUG_DRIVER("Setting: p: %d\n", ps->pcie_lanes);
413 * r100_pm_prepare - pre-power state change callback.
415 * @rdev: radeon_device pointer
417 * Prepare for a power state change (r1xx-r4xx).
419 void r100_pm_prepare(struct radeon_device *rdev)
421 struct drm_device *ddev = rdev->ddev;
422 struct drm_crtc *crtc;
423 struct radeon_crtc *radeon_crtc;
426 /* disable any active CRTCs */
427 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
428 radeon_crtc = to_radeon_crtc(crtc);
429 if (radeon_crtc->enabled) {
430 if (radeon_crtc->crtc_id) {
431 tmp = RREG32(RADEON_CRTC2_GEN_CNTL);
432 tmp |= RADEON_CRTC2_DISP_REQ_EN_B;
433 WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
435 tmp = RREG32(RADEON_CRTC_GEN_CNTL);
436 tmp |= RADEON_CRTC_DISP_REQ_EN_B;
437 WREG32(RADEON_CRTC_GEN_CNTL, tmp);
444 * r100_pm_finish - post-power state change callback.
446 * @rdev: radeon_device pointer
448 * Clean up after a power state change (r1xx-r4xx).
450 void r100_pm_finish(struct radeon_device *rdev)
452 struct drm_device *ddev = rdev->ddev;
453 struct drm_crtc *crtc;
454 struct radeon_crtc *radeon_crtc;
457 /* enable any active CRTCs */
458 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
459 radeon_crtc = to_radeon_crtc(crtc);
460 if (radeon_crtc->enabled) {
461 if (radeon_crtc->crtc_id) {
462 tmp = RREG32(RADEON_CRTC2_GEN_CNTL);
463 tmp &= ~RADEON_CRTC2_DISP_REQ_EN_B;
464 WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
466 tmp = RREG32(RADEON_CRTC_GEN_CNTL);
467 tmp &= ~RADEON_CRTC_DISP_REQ_EN_B;
468 WREG32(RADEON_CRTC_GEN_CNTL, tmp);
475 * r100_gui_idle - gui idle callback.
477 * @rdev: radeon_device pointer
479 * Check of the GUI (2D/3D engines) are idle (r1xx-r5xx).
480 * Returns true if idle, false if not.
482 bool r100_gui_idle(struct radeon_device *rdev)
484 if (RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_ACTIVE)
490 /* hpd for digital panel detect/disconnect */
492 * r100_hpd_sense - hpd sense callback.
494 * @rdev: radeon_device pointer
495 * @hpd: hpd (hotplug detect) pin
497 * Checks if a digital monitor is connected (r1xx-r4xx).
498 * Returns true if connected, false if not connected.
500 bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
502 bool connected = false;
506 if (RREG32(RADEON_FP_GEN_CNTL) & RADEON_FP_DETECT_SENSE)
510 if (RREG32(RADEON_FP2_GEN_CNTL) & RADEON_FP2_DETECT_SENSE)
520 * r100_hpd_set_polarity - hpd set polarity callback.
522 * @rdev: radeon_device pointer
523 * @hpd: hpd (hotplug detect) pin
525 * Set the polarity of the hpd pin (r1xx-r4xx).
527 void r100_hpd_set_polarity(struct radeon_device *rdev,
528 enum radeon_hpd_id hpd)
531 bool connected = r100_hpd_sense(rdev, hpd);
535 tmp = RREG32(RADEON_FP_GEN_CNTL);
537 tmp &= ~RADEON_FP_DETECT_INT_POL;
539 tmp |= RADEON_FP_DETECT_INT_POL;
540 WREG32(RADEON_FP_GEN_CNTL, tmp);
543 tmp = RREG32(RADEON_FP2_GEN_CNTL);
545 tmp &= ~RADEON_FP2_DETECT_INT_POL;
547 tmp |= RADEON_FP2_DETECT_INT_POL;
548 WREG32(RADEON_FP2_GEN_CNTL, tmp);
556 * r100_hpd_init - hpd setup callback.
558 * @rdev: radeon_device pointer
560 * Setup the hpd pins used by the card (r1xx-r4xx).
561 * Set the polarity, and enable the hpd interrupts.
563 void r100_hpd_init(struct radeon_device *rdev)
565 struct drm_device *dev = rdev->ddev;
566 struct drm_connector *connector;
569 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
570 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
571 enable |= 1 << radeon_connector->hpd.hpd;
572 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
574 radeon_irq_kms_enable_hpd(rdev, enable);
578 * r100_hpd_fini - hpd tear down callback.
580 * @rdev: radeon_device pointer
582 * Tear down the hpd pins used by the card (r1xx-r4xx).
583 * Disable the hpd interrupts.
585 void r100_hpd_fini(struct radeon_device *rdev)
587 struct drm_device *dev = rdev->ddev;
588 struct drm_connector *connector;
589 unsigned disable = 0;
591 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
592 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
593 disable |= 1 << radeon_connector->hpd.hpd;
595 radeon_irq_kms_disable_hpd(rdev, disable);
601 void r100_pci_gart_tlb_flush(struct radeon_device *rdev)
603 /* TODO: can we do somethings here ? */
604 /* It seems hw only cache one entry so we should discard this
605 * entry otherwise if first GPU GART read hit this entry it
606 * could end up in wrong address. */
609 int r100_pci_gart_init(struct radeon_device *rdev)
613 if (rdev->gart.ptr) {
614 DRM_ERROR("R100 PCI GART already initialized\n");
617 /* Initialize common gart structure */
618 r = radeon_gart_init(rdev);
621 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
622 rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
623 rdev->asic->gart.set_page = &r100_pci_gart_set_page;
624 return radeon_gart_table_ram_alloc(rdev);
627 int r100_pci_gart_enable(struct radeon_device *rdev)
631 radeon_gart_restore(rdev);
632 /* discard memory request outside of configured range */
633 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
634 WREG32(RADEON_AIC_CNTL, tmp);
635 /* set address range for PCI address translate */
636 WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_start);
637 WREG32(RADEON_AIC_HI_ADDR, rdev->mc.gtt_end);
638 /* set PCI GART page-table base address */
639 WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr);
640 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN;
641 WREG32(RADEON_AIC_CNTL, tmp);
642 r100_pci_gart_tlb_flush(rdev);
643 DRM_INFO("PCI GART of %uM enabled (table at 0x%016llX).\n",
644 (unsigned)(rdev->mc.gtt_size >> 20),
645 (unsigned long long)rdev->gart.table_addr);
646 rdev->gart.ready = true;
650 void r100_pci_gart_disable(struct radeon_device *rdev)
654 /* discard memory request outside of configured range */
655 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
656 WREG32(RADEON_AIC_CNTL, tmp & ~RADEON_PCIGART_TRANSLATE_EN);
657 WREG32(RADEON_AIC_LO_ADDR, 0);
658 WREG32(RADEON_AIC_HI_ADDR, 0);
661 int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
663 u32 *gtt = rdev->gart.ptr;
665 if (i < 0 || i > rdev->gart.num_gpu_pages) {
668 gtt[i] = cpu_to_le32(lower_32_bits(addr));
672 void r100_pci_gart_fini(struct radeon_device *rdev)
674 radeon_gart_fini(rdev);
675 r100_pci_gart_disable(rdev);
676 radeon_gart_table_ram_free(rdev);
679 int r100_irq_set(struct radeon_device *rdev)
683 if (!rdev->irq.installed) {
684 DRM_ERROR("Can't enable IRQ/MSI because no handler is installed\n");
685 WREG32(R_000040_GEN_INT_CNTL, 0);
688 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
689 tmp |= RADEON_SW_INT_ENABLE;
691 if (rdev->irq.crtc_vblank_int[0] ||
692 atomic_read(&rdev->irq.pflip[0])) {
693 tmp |= RADEON_CRTC_VBLANK_MASK;
695 if (rdev->irq.crtc_vblank_int[1] ||
696 atomic_read(&rdev->irq.pflip[1])) {
697 tmp |= RADEON_CRTC2_VBLANK_MASK;
699 if (rdev->irq.hpd[0]) {
700 tmp |= RADEON_FP_DETECT_MASK;
702 if (rdev->irq.hpd[1]) {
703 tmp |= RADEON_FP2_DETECT_MASK;
705 WREG32(RADEON_GEN_INT_CNTL, tmp);
709 void r100_irq_disable(struct radeon_device *rdev)
713 WREG32(R_000040_GEN_INT_CNTL, 0);
714 /* Wait and acknowledge irq */
716 tmp = RREG32(R_000044_GEN_INT_STATUS);
717 WREG32(R_000044_GEN_INT_STATUS, tmp);
720 static uint32_t r100_irq_ack(struct radeon_device *rdev)
722 uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS);
723 uint32_t irq_mask = RADEON_SW_INT_TEST |
724 RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT |
725 RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT;
728 WREG32(RADEON_GEN_INT_STATUS, irqs);
730 return irqs & irq_mask;
733 irqreturn_t r100_irq_process(struct radeon_device *rdev)
735 uint32_t status, msi_rearm;
736 bool queue_hotplug = false;
738 status = r100_irq_ack(rdev);
742 if (rdev->shutdown) {
747 if (status & RADEON_SW_INT_TEST) {
748 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
750 /* Vertical blank interrupts */
751 if (status & RADEON_CRTC_VBLANK_STAT) {
752 if (rdev->irq.crtc_vblank_int[0]) {
753 drm_handle_vblank(rdev->ddev, 0);
754 rdev->pm.vblank_sync = true;
755 DRM_WAKEUP(&rdev->irq.vblank_queue);
757 if (atomic_read(&rdev->irq.pflip[0]))
758 radeon_crtc_handle_flip(rdev, 0);
760 if (status & RADEON_CRTC2_VBLANK_STAT) {
761 if (rdev->irq.crtc_vblank_int[1]) {
762 drm_handle_vblank(rdev->ddev, 1);
763 rdev->pm.vblank_sync = true;
764 DRM_WAKEUP(&rdev->irq.vblank_queue);
766 if (atomic_read(&rdev->irq.pflip[1]))
767 radeon_crtc_handle_flip(rdev, 1);
769 if (status & RADEON_FP_DETECT_STAT) {
770 queue_hotplug = true;
773 if (status & RADEON_FP2_DETECT_STAT) {
774 queue_hotplug = true;
777 status = r100_irq_ack(rdev);
780 taskqueue_enqueue(rdev->tq, &rdev->hotplug_work);
781 if (rdev->msi_enabled) {
782 switch (rdev->family) {
785 msi_rearm = RREG32(RADEON_AIC_CNTL) & ~RS400_MSI_REARM;
786 WREG32(RADEON_AIC_CNTL, msi_rearm);
787 WREG32(RADEON_AIC_CNTL, msi_rearm | RS400_MSI_REARM);
790 WREG32(RADEON_MSI_REARM_EN, RV370_MSI_REARM_EN);
797 u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
800 return RREG32(RADEON_CRTC_CRNT_FRAME);
802 return RREG32(RADEON_CRTC2_CRNT_FRAME);
805 /* Who ever call radeon_fence_emit should call ring_lock and ask
806 * for enough space (today caller are ib schedule and buffer move) */
807 void r100_fence_ring_emit(struct radeon_device *rdev,
808 struct radeon_fence *fence)
810 struct radeon_ring *ring = &rdev->ring[fence->ring];
812 /* We have to make sure that caches are flushed before
813 * CPU might read something from VRAM. */
814 radeon_ring_write(ring, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));
815 radeon_ring_write(ring, RADEON_RB3D_DC_FLUSH_ALL);
816 radeon_ring_write(ring, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
817 radeon_ring_write(ring, RADEON_RB3D_ZC_FLUSH_ALL);
818 /* Wait until IDLE & CLEAN */
819 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
820 radeon_ring_write(ring, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN);
821 radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
822 radeon_ring_write(ring, rdev->config.r100.hdp_cntl |
823 RADEON_HDP_READ_BUFFER_INVALIDATE);
824 radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
825 radeon_ring_write(ring, rdev->config.r100.hdp_cntl);
826 /* Emit fence sequence & fire IRQ */
827 radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0));
828 radeon_ring_write(ring, fence->seq);
829 radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0));
830 radeon_ring_write(ring, RADEON_SW_INT_FIRE);
833 void r100_semaphore_ring_emit(struct radeon_device *rdev,
834 struct radeon_ring *ring,
835 struct radeon_semaphore *semaphore,
838 /* Unused on older asics, since we don't have semaphores or multiple rings */
839 panic("%s: Unused on older asics", __func__);
842 int r100_copy_blit(struct radeon_device *rdev,
845 unsigned num_gpu_pages,
846 struct radeon_fence **fence)
848 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
850 uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE;
852 uint32_t stride_pixels;
857 /* radeon limited to 16k stride */
858 stride_bytes &= 0x3fff;
859 /* radeon pitch is /64 */
860 pitch = stride_bytes / 64;
861 stride_pixels = stride_bytes / 4;
862 num_loops = DIV_ROUND_UP(num_gpu_pages, 8191);
864 /* Ask for enough room for blit + flush + fence */
865 ndw = 64 + (10 * num_loops);
866 r = radeon_ring_lock(rdev, ring, ndw);
868 DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw);
871 while (num_gpu_pages > 0) {
872 cur_pages = num_gpu_pages;
873 if (cur_pages > 8191) {
876 num_gpu_pages -= cur_pages;
878 /* pages are in Y direction - height
879 page width in X direction - width */
880 radeon_ring_write(ring, PACKET3(PACKET3_BITBLT_MULTI, 8));
881 radeon_ring_write(ring,
882 RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
883 RADEON_GMC_DST_PITCH_OFFSET_CNTL |
884 RADEON_GMC_SRC_CLIPPING |
885 RADEON_GMC_DST_CLIPPING |
886 RADEON_GMC_BRUSH_NONE |
887 (RADEON_COLOR_FORMAT_ARGB8888 << 8) |
888 RADEON_GMC_SRC_DATATYPE_COLOR |
890 RADEON_DP_SRC_SOURCE_MEMORY |
891 RADEON_GMC_CLR_CMP_CNTL_DIS |
892 RADEON_GMC_WR_MSK_DIS);
893 radeon_ring_write(ring, (pitch << 22) | (src_offset >> 10));
894 radeon_ring_write(ring, (pitch << 22) | (dst_offset >> 10));
895 radeon_ring_write(ring, (0x1fff) | (0x1fff << 16));
896 radeon_ring_write(ring, 0);
897 radeon_ring_write(ring, (0x1fff) | (0x1fff << 16));
898 radeon_ring_write(ring, num_gpu_pages);
899 radeon_ring_write(ring, num_gpu_pages);
900 radeon_ring_write(ring, cur_pages | (stride_pixels << 16));
902 radeon_ring_write(ring, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0));
903 radeon_ring_write(ring, RADEON_RB2D_DC_FLUSH_ALL);
904 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
905 radeon_ring_write(ring,
906 RADEON_WAIT_2D_IDLECLEAN |
907 RADEON_WAIT_HOST_IDLECLEAN |
908 RADEON_WAIT_DMA_GUI_IDLE);
910 r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
912 radeon_ring_unlock_commit(rdev, ring);
916 static int r100_cp_wait_for_idle(struct radeon_device *rdev)
921 for (i = 0; i < rdev->usec_timeout; i++) {
922 tmp = RREG32(R_000E40_RBBM_STATUS);
923 if (!G_000E40_CP_CMDSTRM_BUSY(tmp)) {
931 void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
935 r = radeon_ring_lock(rdev, ring, 2);
939 radeon_ring_write(ring, PACKET0(RADEON_ISYNC_CNTL, 0));
940 radeon_ring_write(ring,
941 RADEON_ISYNC_ANY2D_IDLE3D |
942 RADEON_ISYNC_ANY3D_IDLE2D |
943 RADEON_ISYNC_WAIT_IDLEGUI |
944 RADEON_ISYNC_CPSCRATCH_IDLEGUI);
945 radeon_ring_unlock_commit(rdev, ring);
949 /* Load the microcode for the CP */
950 static int r100_cp_init_microcode(struct radeon_device *rdev)
952 const char *fw_name = NULL;
957 if ((rdev->family == CHIP_R100) || (rdev->family == CHIP_RV100) ||
958 (rdev->family == CHIP_RV200) || (rdev->family == CHIP_RS100) ||
959 (rdev->family == CHIP_RS200)) {
960 DRM_INFO("Loading R100 Microcode\n");
961 fw_name = FIRMWARE_R100;
962 } else if ((rdev->family == CHIP_R200) ||
963 (rdev->family == CHIP_RV250) ||
964 (rdev->family == CHIP_RV280) ||
965 (rdev->family == CHIP_RS300)) {
966 DRM_INFO("Loading R200 Microcode\n");
967 fw_name = FIRMWARE_R200;
968 } else if ((rdev->family == CHIP_R300) ||
969 (rdev->family == CHIP_R350) ||
970 (rdev->family == CHIP_RV350) ||
971 (rdev->family == CHIP_RV380) ||
972 (rdev->family == CHIP_RS400) ||
973 (rdev->family == CHIP_RS480)) {
974 DRM_INFO("Loading R300 Microcode\n");
975 fw_name = FIRMWARE_R300;
976 } else if ((rdev->family == CHIP_R420) ||
977 (rdev->family == CHIP_R423) ||
978 (rdev->family == CHIP_RV410)) {
979 DRM_INFO("Loading R400 Microcode\n");
980 fw_name = FIRMWARE_R420;
981 } else if ((rdev->family == CHIP_RS690) ||
982 (rdev->family == CHIP_RS740)) {
983 DRM_INFO("Loading RS690/RS740 Microcode\n");
984 fw_name = FIRMWARE_RS690;
985 } else if (rdev->family == CHIP_RS600) {
986 DRM_INFO("Loading RS600 Microcode\n");
987 fw_name = FIRMWARE_RS600;
988 } else if ((rdev->family == CHIP_RV515) ||
989 (rdev->family == CHIP_R520) ||
990 (rdev->family == CHIP_RV530) ||
991 (rdev->family == CHIP_R580) ||
992 (rdev->family == CHIP_RV560) ||
993 (rdev->family == CHIP_RV570)) {
994 DRM_INFO("Loading R500 Microcode\n");
995 fw_name = FIRMWARE_R520;
999 rdev->me_fw = firmware_get(fw_name);
1000 if (rdev->me_fw == NULL) {
1001 DRM_ERROR("radeon_cp: Failed to load firmware \"%s\"\n",
1004 } else if (rdev->me_fw->datasize % 8) {
1006 "radeon_cp: Bogus length %zu in firmware \"%s\"\n",
1007 rdev->me_fw->datasize, fw_name);
1009 firmware_put(rdev->me_fw, FIRMWARE_UNLOAD);
1016 * r100_cp_fini_microcode - drop the firmware image reference
1018 * @rdev: radeon_device pointer
1020 * Drop the me firmware image reference.
1021 * Called at driver shutdown.
1023 static void r100_cp_fini_microcode (struct radeon_device *rdev)
1026 if (rdev->me_fw != NULL) {
1027 firmware_put(rdev->me_fw, FIRMWARE_UNLOAD);
1032 static void r100_cp_load_microcode(struct radeon_device *rdev)
1034 const __be32 *fw_data;
1037 if (r100_gui_wait_for_idle(rdev)) {
1038 DRM_ERROR("Failed to wait GUI idle while "
1039 "programming pipes. Bad things might happen.\n");
1043 size = rdev->me_fw->datasize / 4;
1044 fw_data = (const __be32 *)rdev->me_fw->data;
1045 WREG32(RADEON_CP_ME_RAM_ADDR, 0);
1046 for (i = 0; i < size; i += 2) {
1047 WREG32(RADEON_CP_ME_RAM_DATAH,
1048 be32_to_cpup(&fw_data[i]));
1049 WREG32(RADEON_CP_ME_RAM_DATAL,
1050 be32_to_cpup(&fw_data[i + 1]));
1055 int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
1057 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1061 unsigned pre_write_timer;
1062 unsigned pre_write_limit;
1063 unsigned indirect2_start;
1064 unsigned indirect1_start;
1068 if (r100_debugfs_cp_init(rdev)) {
1069 DRM_ERROR("Failed to register debugfs file for CP !\n");
1072 r = r100_cp_init_microcode(rdev);
1074 DRM_ERROR("Failed to load firmware!\n");
1079 /* Align ring size */
1080 rb_bufsz = drm_order(ring_size / 8);
1081 ring_size = (1 << (rb_bufsz + 1)) * 4;
1082 r100_cp_load_microcode(rdev);
1083 r = radeon_ring_init(rdev, ring, ring_size, RADEON_WB_CP_RPTR_OFFSET,
1084 RADEON_CP_RB_RPTR, RADEON_CP_RB_WPTR,
1085 0, 0x7fffff, RADEON_CP_PACKET2);
1089 /* Each time the cp read 1024 bytes (16 dword/quadword) update
1090 * the rptr copy in system ram */
1092 /* cp will read 128bytes at a time (4 dwords) */
1094 ring->align_mask = 16 - 1;
1095 /* Write to CP_RB_WPTR will be delayed for pre_write_timer clocks */
1096 pre_write_timer = 64;
1097 /* Force CP_RB_WPTR write if written more than one time before the
1100 pre_write_limit = 0;
1101 /* Setup the cp cache like this (cache size is 96 dwords) :
1103 * INDIRECT1 16 to 79
1104 * INDIRECT2 80 to 95
1105 * So ring cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords))
1106 * indirect1 cache size is 64dwords (> (2 * max_fetch = 2 * 4dwords))
1107 * indirect2 cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords))
1108 * Idea being that most of the gpu cmd will be through indirect1 buffer
1109 * so it gets the bigger cache.
1111 indirect2_start = 80;
1112 indirect1_start = 16;
1114 WREG32(0x718, pre_write_timer | (pre_write_limit << 28));
1115 tmp = (REG_SET(RADEON_RB_BUFSZ, rb_bufsz) |
1116 REG_SET(RADEON_RB_BLKSZ, rb_blksz) |
1117 REG_SET(RADEON_MAX_FETCH, max_fetch));
1119 tmp |= RADEON_BUF_SWAP_32BIT;
1121 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_NO_UPDATE);
1123 /* Set ring address */
1124 DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)ring->gpu_addr);
1125 WREG32(RADEON_CP_RB_BASE, ring->gpu_addr);
1126 /* Force read & write ptr to 0 */
1127 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE);
1128 WREG32(RADEON_CP_RB_RPTR_WR, 0);
1130 WREG32(RADEON_CP_RB_WPTR, ring->wptr);
1132 /* set the wb address whether it's enabled or not */
1133 WREG32(R_00070C_CP_RB_RPTR_ADDR,
1134 S_00070C_RB_RPTR_ADDR((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) >> 2));
1135 WREG32(R_000774_SCRATCH_ADDR, rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET);
1137 if (rdev->wb.enabled)
1138 WREG32(R_000770_SCRATCH_UMSK, 0xff);
1140 tmp |= RADEON_RB_NO_UPDATE;
1141 WREG32(R_000770_SCRATCH_UMSK, 0);
1144 WREG32(RADEON_CP_RB_CNTL, tmp);
1146 ring->rptr = RREG32(RADEON_CP_RB_RPTR);
1147 /* Set cp mode to bus mastering & enable cp*/
1148 WREG32(RADEON_CP_CSQ_MODE,
1149 REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
1150 REG_SET(RADEON_INDIRECT1_START, indirect1_start));
1151 WREG32(RADEON_CP_RB_WPTR_DELAY, 0);
1152 WREG32(RADEON_CP_CSQ_MODE, 0x00004D4D);
1153 WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM);
1155 /* at this point everything should be setup correctly to enable master */
1156 pci_enable_busmaster(rdev->dev);
1158 radeon_ring_start(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
1159 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
1161 DRM_ERROR("radeon: cp isn't working (%d).\n", r);
1165 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
1167 if (!ring->rptr_save_reg /* not resuming from suspend */
1168 && radeon_ring_supports_scratch_reg(rdev, ring)) {
1169 r = radeon_scratch_get(rdev, &ring->rptr_save_reg);
1171 DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r);
1172 ring->rptr_save_reg = 0;
1178 void r100_cp_fini(struct radeon_device *rdev)
1180 if (r100_cp_wait_for_idle(rdev)) {
1181 DRM_ERROR("Wait for CP idle timeout, shutting down CP.\n");
1184 r100_cp_disable(rdev);
1185 radeon_scratch_free(rdev, rdev->ring[RADEON_RING_TYPE_GFX_INDEX].rptr_save_reg);
1186 radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
1187 DRM_INFO("radeon: cp finalized\n");
1190 void r100_cp_disable(struct radeon_device *rdev)
1193 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
1194 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
1195 WREG32(RADEON_CP_CSQ_MODE, 0);
1196 WREG32(RADEON_CP_CSQ_CNTL, 0);
1197 WREG32(R_000770_SCRATCH_UMSK, 0);
1198 if (r100_gui_wait_for_idle(rdev)) {
1199 DRM_ERROR("Failed to wait GUI idle while "
1200 "programming pipes. Bad things might happen.\n");
1207 int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
1208 struct radeon_cs_packet *pkt,
1215 struct radeon_cs_reloc *reloc;
1218 r = r100_cs_packet_next_reloc(p, &reloc);
1220 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1222 r100_cs_dump_packet(p, pkt);
1226 value = radeon_get_ib_value(p, idx);
1227 tmp = value & 0x003fffff;
1228 tmp += (((u32)reloc->lobj.gpu_offset) >> 10);
1230 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1231 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
1232 tile_flags |= RADEON_DST_TILE_MACRO;
1233 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
1234 if (reg == RADEON_SRC_PITCH_OFFSET) {
1235 DRM_ERROR("Cannot src blit from microtiled surface\n");
1236 r100_cs_dump_packet(p, pkt);
1239 tile_flags |= RADEON_DST_TILE_MICRO;
1243 p->ib.ptr[idx] = (value & 0x3fc00000) | tmp;
1245 p->ib.ptr[idx] = (value & 0xffc00000) | tmp;
1249 int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
1250 struct radeon_cs_packet *pkt,
1254 struct radeon_cs_reloc *reloc;
1255 struct r100_cs_track *track;
1257 volatile uint32_t *ib;
1261 track = (struct r100_cs_track *)p->track;
1262 c = radeon_get_ib_value(p, idx++) & 0x1F;
1264 DRM_ERROR("Only 16 vertex buffers are allowed %d\n",
1266 r100_cs_dump_packet(p, pkt);
1269 track->num_arrays = c;
1270 for (i = 0; i < (c - 1); i+=2, idx+=3) {
1271 r = r100_cs_packet_next_reloc(p, &reloc);
1273 DRM_ERROR("No reloc for packet3 %d\n",
1275 r100_cs_dump_packet(p, pkt);
1278 idx_value = radeon_get_ib_value(p, idx);
1279 ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
1281 track->arrays[i + 0].esize = idx_value >> 8;
1282 track->arrays[i + 0].robj = reloc->robj;
1283 track->arrays[i + 0].esize &= 0x7F;
1284 r = r100_cs_packet_next_reloc(p, &reloc);
1286 DRM_ERROR("No reloc for packet3 %d\n",
1288 r100_cs_dump_packet(p, pkt);
1291 ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->lobj.gpu_offset);
1292 track->arrays[i + 1].robj = reloc->robj;
1293 track->arrays[i + 1].esize = idx_value >> 24;
1294 track->arrays[i + 1].esize &= 0x7F;
1297 r = r100_cs_packet_next_reloc(p, &reloc);
1299 DRM_ERROR("No reloc for packet3 %d\n",
1301 r100_cs_dump_packet(p, pkt);
1304 idx_value = radeon_get_ib_value(p, idx);
1305 ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
1306 track->arrays[i + 0].robj = reloc->robj;
1307 track->arrays[i + 0].esize = idx_value >> 8;
1308 track->arrays[i + 0].esize &= 0x7F;
1313 int r100_cs_parse_packet0(struct radeon_cs_parser *p,
1314 struct radeon_cs_packet *pkt,
1315 const unsigned *auth, unsigned n,
1316 radeon_packet0_check_t check)
1325 /* Check that register fall into register range
1326 * determined by the number of entry (n) in the
1327 * safe register bitmap.
1329 if (pkt->one_reg_wr) {
1330 if ((reg >> 7) > n) {
1334 if (((reg + (pkt->count << 2)) >> 7) > n) {
1338 for (i = 0; i <= pkt->count; i++, idx++) {
1340 m = 1 << ((reg >> 2) & 31);
1342 r = check(p, pkt, idx, reg);
1347 if (pkt->one_reg_wr) {
1348 if (!(auth[j] & m)) {
1358 void r100_cs_dump_packet(struct radeon_cs_parser *p,
1359 struct radeon_cs_packet *pkt)
1361 volatile uint32_t *ib;
1367 for (i = 0; i <= (pkt->count + 1); i++, idx++) {
1368 DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
1373 * r100_cs_packet_parse() - parse cp packet and point ib index to next packet
1374 * @parser: parser structure holding parsing context.
1375 * @pkt: where to store packet informations
1377 * Assume that chunk_ib_index is properly set. Will return -EINVAL
1378 * if packet is bigger than remaining ib size. or if packets is unknown.
1380 int r100_cs_packet_parse(struct radeon_cs_parser *p,
1381 struct radeon_cs_packet *pkt,
1384 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
1387 if (idx >= ib_chunk->length_dw) {
1388 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
1389 idx, ib_chunk->length_dw);
1392 header = radeon_get_ib_value(p, idx);
1394 pkt->type = CP_PACKET_GET_TYPE(header);
1395 pkt->count = CP_PACKET_GET_COUNT(header);
1396 switch (pkt->type) {
1398 pkt->reg = CP_PACKET0_GET_REG(header);
1399 pkt->one_reg_wr = CP_PACKET0_GET_ONE_REG_WR(header);
1402 pkt->opcode = CP_PACKET3_GET_OPCODE(header);
1408 DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
1411 if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
1412 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
1413 pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
1420 * r100_cs_packet_next_vline() - parse userspace VLINE packet
1421 * @parser: parser structure holding parsing context.
1423 * Userspace sends a special sequence for VLINE waits.
1424 * PACKET0 - VLINE_START_END + value
1425 * PACKET0 - WAIT_UNTIL +_value
1426 * RELOC (P3) - crtc_id in reloc.
1428 * This function parses this and relocates the VLINE START END
1429 * and WAIT UNTIL packets to the correct crtc.
1430 * It also detects a switched off crtc and nulls out the
1431 * wait in that case.
1433 int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
1435 struct drm_mode_object *obj;
1436 struct drm_crtc *crtc;
1437 struct radeon_crtc *radeon_crtc;
1438 struct radeon_cs_packet p3reloc, waitreloc;
1441 uint32_t header, h_idx, reg;
1442 volatile uint32_t *ib;
1446 /* parse the wait until */
1447 r = r100_cs_packet_parse(p, &waitreloc, p->idx);
1451 /* check its a wait until and only 1 count */
1452 if (waitreloc.reg != RADEON_WAIT_UNTIL ||
1453 waitreloc.count != 0) {
1454 DRM_ERROR("vline wait had illegal wait until segment\n");
1458 if (radeon_get_ib_value(p, waitreloc.idx + 1) != RADEON_WAIT_CRTC_VLINE) {
1459 DRM_ERROR("vline wait had illegal wait until\n");
1463 /* jump over the NOP */
1464 r = r100_cs_packet_parse(p, &p3reloc, p->idx + waitreloc.count + 2);
1469 p->idx += waitreloc.count + 2;
1470 p->idx += p3reloc.count + 2;
1472 header = radeon_get_ib_value(p, h_idx);
1473 crtc_id = radeon_get_ib_value(p, h_idx + 5);
1474 reg = CP_PACKET0_GET_REG(header);
1475 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
1477 DRM_ERROR("cannot find crtc %d\n", crtc_id);
1480 crtc = obj_to_crtc(obj);
1481 radeon_crtc = to_radeon_crtc(crtc);
1482 crtc_id = radeon_crtc->crtc_id;
1484 if (!crtc->enabled) {
1485 /* if the CRTC isn't enabled - we need to nop out the wait until */
1486 ib[h_idx + 2] = PACKET2(0);
1487 ib[h_idx + 3] = PACKET2(0);
1488 } else if (crtc_id == 1) {
1490 case AVIVO_D1MODE_VLINE_START_END:
1491 header &= ~R300_CP_PACKET0_REG_MASK;
1492 header |= AVIVO_D2MODE_VLINE_START_END >> 2;
1494 case RADEON_CRTC_GUI_TRIG_VLINE:
1495 header &= ~R300_CP_PACKET0_REG_MASK;
1496 header |= RADEON_CRTC2_GUI_TRIG_VLINE >> 2;
1499 DRM_ERROR("unknown crtc reloc\n");
1503 ib[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1;
1510 * r100_cs_packet_next_reloc() - parse next packet which should be reloc packet3
1511 * @parser: parser structure holding parsing context.
1512 * @data: pointer to relocation data
1513 * @offset_start: starting offset
1514 * @offset_mask: offset mask (to align start offset on)
1515 * @reloc: reloc informations
1517 * Check next packet is relocation packet3, do bo validation and compute
1518 * GPU offset using the provided start.
1520 int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
1521 struct radeon_cs_reloc **cs_reloc)
1523 struct radeon_cs_chunk *relocs_chunk;
1524 struct radeon_cs_packet p3reloc;
1528 if (p->chunk_relocs_idx == -1) {
1529 DRM_ERROR("No relocation chunk !\n");
1533 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
1534 r = r100_cs_packet_parse(p, &p3reloc, p->idx);
1538 p->idx += p3reloc.count + 2;
1539 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
1540 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
1542 r100_cs_dump_packet(p, &p3reloc);
1545 idx = radeon_get_ib_value(p, p3reloc.idx + 1);
1546 if (idx >= relocs_chunk->length_dw) {
1547 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
1548 idx, relocs_chunk->length_dw);
1549 r100_cs_dump_packet(p, &p3reloc);
1552 /* FIXME: we assume reloc size is 4 dwords */
1553 *cs_reloc = p->relocs_ptr[(idx / 4)];
1557 static int r100_get_vtx_size(uint32_t vtx_fmt)
1561 /* ordered according to bits in spec */
1562 if (vtx_fmt & RADEON_SE_VTX_FMT_W0)
1564 if (vtx_fmt & RADEON_SE_VTX_FMT_FPCOLOR)
1566 if (vtx_fmt & RADEON_SE_VTX_FMT_FPALPHA)
1568 if (vtx_fmt & RADEON_SE_VTX_FMT_PKCOLOR)
1570 if (vtx_fmt & RADEON_SE_VTX_FMT_FPSPEC)
1572 if (vtx_fmt & RADEON_SE_VTX_FMT_FPFOG)
1574 if (vtx_fmt & RADEON_SE_VTX_FMT_PKSPEC)
1576 if (vtx_fmt & RADEON_SE_VTX_FMT_ST0)
1578 if (vtx_fmt & RADEON_SE_VTX_FMT_ST1)
1580 if (vtx_fmt & RADEON_SE_VTX_FMT_Q1)
1582 if (vtx_fmt & RADEON_SE_VTX_FMT_ST2)
1584 if (vtx_fmt & RADEON_SE_VTX_FMT_Q2)
1586 if (vtx_fmt & RADEON_SE_VTX_FMT_ST3)
1588 if (vtx_fmt & RADEON_SE_VTX_FMT_Q3)
1590 if (vtx_fmt & RADEON_SE_VTX_FMT_Q0)
1593 if (vtx_fmt & (0x7 << 15))
1594 vtx_size += (vtx_fmt >> 15) & 0x7;
1595 if (vtx_fmt & RADEON_SE_VTX_FMT_N0)
1597 if (vtx_fmt & RADEON_SE_VTX_FMT_XY1)
1599 if (vtx_fmt & RADEON_SE_VTX_FMT_Z1)
1601 if (vtx_fmt & RADEON_SE_VTX_FMT_W1)
1603 if (vtx_fmt & RADEON_SE_VTX_FMT_N1)
1605 if (vtx_fmt & RADEON_SE_VTX_FMT_Z)
1610 static int r100_packet0_check(struct radeon_cs_parser *p,
1611 struct radeon_cs_packet *pkt,
1612 unsigned idx, unsigned reg)
1614 struct radeon_cs_reloc *reloc;
1615 struct r100_cs_track *track;
1616 volatile uint32_t *ib;
1624 track = (struct r100_cs_track *)p->track;
1626 idx_value = radeon_get_ib_value(p, idx);
1629 case RADEON_CRTC_GUI_TRIG_VLINE:
1630 r = r100_cs_packet_parse_vline(p);
1632 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1634 r100_cs_dump_packet(p, pkt);
1638 /* FIXME: only allow PACKET3 blit? easier to check for out of
1640 case RADEON_DST_PITCH_OFFSET:
1641 case RADEON_SRC_PITCH_OFFSET:
1642 r = r100_reloc_pitch_offset(p, pkt, idx, reg);
1646 case RADEON_RB3D_DEPTHOFFSET:
1647 r = r100_cs_packet_next_reloc(p, &reloc);
1649 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1651 r100_cs_dump_packet(p, pkt);
1654 track->zb.robj = reloc->robj;
1655 track->zb.offset = idx_value;
1656 track->zb_dirty = true;
1657 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1659 case RADEON_RB3D_COLOROFFSET:
1660 r = r100_cs_packet_next_reloc(p, &reloc);
1662 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1664 r100_cs_dump_packet(p, pkt);
1667 track->cb[0].robj = reloc->robj;
1668 track->cb[0].offset = idx_value;
1669 track->cb_dirty = true;
1670 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1672 case RADEON_PP_TXOFFSET_0:
1673 case RADEON_PP_TXOFFSET_1:
1674 case RADEON_PP_TXOFFSET_2:
1675 i = (reg - RADEON_PP_TXOFFSET_0) / 24;
1676 r = r100_cs_packet_next_reloc(p, &reloc);
1678 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1680 r100_cs_dump_packet(p, pkt);
1683 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1684 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
1685 tile_flags |= RADEON_TXO_MACRO_TILE;
1686 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
1687 tile_flags |= RADEON_TXO_MICRO_TILE_X2;
1689 tmp = idx_value & ~(0x7 << 2);
1691 ib[idx] = tmp + ((u32)reloc->lobj.gpu_offset);
1693 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1694 track->textures[i].robj = reloc->robj;
1695 track->tex_dirty = true;
1697 case RADEON_PP_CUBIC_OFFSET_T0_0:
1698 case RADEON_PP_CUBIC_OFFSET_T0_1:
1699 case RADEON_PP_CUBIC_OFFSET_T0_2:
1700 case RADEON_PP_CUBIC_OFFSET_T0_3:
1701 case RADEON_PP_CUBIC_OFFSET_T0_4:
1702 i = (reg - RADEON_PP_CUBIC_OFFSET_T0_0) / 4;
1703 r = r100_cs_packet_next_reloc(p, &reloc);
1705 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1707 r100_cs_dump_packet(p, pkt);
1710 track->textures[0].cube_info[i].offset = idx_value;
1711 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1712 track->textures[0].cube_info[i].robj = reloc->robj;
1713 track->tex_dirty = true;
1715 case RADEON_PP_CUBIC_OFFSET_T1_0:
1716 case RADEON_PP_CUBIC_OFFSET_T1_1:
1717 case RADEON_PP_CUBIC_OFFSET_T1_2:
1718 case RADEON_PP_CUBIC_OFFSET_T1_3:
1719 case RADEON_PP_CUBIC_OFFSET_T1_4:
1720 i = (reg - RADEON_PP_CUBIC_OFFSET_T1_0) / 4;
1721 r = r100_cs_packet_next_reloc(p, &reloc);
1723 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1725 r100_cs_dump_packet(p, pkt);
1728 track->textures[1].cube_info[i].offset = idx_value;
1729 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1730 track->textures[1].cube_info[i].robj = reloc->robj;
1731 track->tex_dirty = true;
1733 case RADEON_PP_CUBIC_OFFSET_T2_0:
1734 case RADEON_PP_CUBIC_OFFSET_T2_1:
1735 case RADEON_PP_CUBIC_OFFSET_T2_2:
1736 case RADEON_PP_CUBIC_OFFSET_T2_3:
1737 case RADEON_PP_CUBIC_OFFSET_T2_4:
1738 i = (reg - RADEON_PP_CUBIC_OFFSET_T2_0) / 4;
1739 r = r100_cs_packet_next_reloc(p, &reloc);
1741 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1743 r100_cs_dump_packet(p, pkt);
1746 track->textures[2].cube_info[i].offset = idx_value;
1747 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1748 track->textures[2].cube_info[i].robj = reloc->robj;
1749 track->tex_dirty = true;
1751 case RADEON_RE_WIDTH_HEIGHT:
1752 track->maxy = ((idx_value >> 16) & 0x7FF);
1753 track->cb_dirty = true;
1754 track->zb_dirty = true;
1756 case RADEON_RB3D_COLORPITCH:
1757 r = r100_cs_packet_next_reloc(p, &reloc);
1759 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1761 r100_cs_dump_packet(p, pkt);
1764 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1765 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
1766 tile_flags |= RADEON_COLOR_TILE_ENABLE;
1767 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
1768 tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
1770 tmp = idx_value & ~(0x7 << 16);
1774 ib[idx] = idx_value;
1776 track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
1777 track->cb_dirty = true;
1779 case RADEON_RB3D_DEPTHPITCH:
1780 track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK;
1781 track->zb_dirty = true;
1783 case RADEON_RB3D_CNTL:
1784 switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
1790 track->cb[0].cpp = 1;
1795 track->cb[0].cpp = 2;
1798 track->cb[0].cpp = 4;
1801 DRM_ERROR("Invalid color buffer format (%d) !\n",
1802 ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f));
1805 track->z_enabled = !!(idx_value & RADEON_Z_ENABLE);
1806 track->cb_dirty = true;
1807 track->zb_dirty = true;
1809 case RADEON_RB3D_ZSTENCILCNTL:
1810 switch (idx_value & 0xf) {
1825 track->zb_dirty = true;
1827 case RADEON_RB3D_ZPASS_ADDR:
1828 r = r100_cs_packet_next_reloc(p, &reloc);
1830 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1832 r100_cs_dump_packet(p, pkt);
1835 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1837 case RADEON_PP_CNTL:
1839 uint32_t temp = idx_value >> 4;
1840 for (i = 0; i < track->num_texture; i++)
1841 track->textures[i].enabled = !!(temp & (1 << i));
1842 track->tex_dirty = true;
1845 case RADEON_SE_VF_CNTL:
1846 track->vap_vf_cntl = idx_value;
1848 case RADEON_SE_VTX_FMT:
1849 track->vtx_size = r100_get_vtx_size(idx_value);
1851 case RADEON_PP_TEX_SIZE_0:
1852 case RADEON_PP_TEX_SIZE_1:
1853 case RADEON_PP_TEX_SIZE_2:
1854 i = (reg - RADEON_PP_TEX_SIZE_0) / 8;
1855 track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1;
1856 track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
1857 track->tex_dirty = true;
1859 case RADEON_PP_TEX_PITCH_0:
1860 case RADEON_PP_TEX_PITCH_1:
1861 case RADEON_PP_TEX_PITCH_2:
1862 i = (reg - RADEON_PP_TEX_PITCH_0) / 8;
1863 track->textures[i].pitch = idx_value + 32;
1864 track->tex_dirty = true;
1866 case RADEON_PP_TXFILTER_0:
1867 case RADEON_PP_TXFILTER_1:
1868 case RADEON_PP_TXFILTER_2:
1869 i = (reg - RADEON_PP_TXFILTER_0) / 24;
1870 track->textures[i].num_levels = ((idx_value & RADEON_MAX_MIP_LEVEL_MASK)
1871 >> RADEON_MAX_MIP_LEVEL_SHIFT);
1872 tmp = (idx_value >> 23) & 0x7;
1873 if (tmp == 2 || tmp == 6)
1874 track->textures[i].roundup_w = false;
1875 tmp = (idx_value >> 27) & 0x7;
1876 if (tmp == 2 || tmp == 6)
1877 track->textures[i].roundup_h = false;
1878 track->tex_dirty = true;
1880 case RADEON_PP_TXFORMAT_0:
1881 case RADEON_PP_TXFORMAT_1:
1882 case RADEON_PP_TXFORMAT_2:
1883 i = (reg - RADEON_PP_TXFORMAT_0) / 24;
1884 if (idx_value & RADEON_TXFORMAT_NON_POWER2) {
1885 track->textures[i].use_pitch = 1;
1887 track->textures[i].use_pitch = 0;
1888 track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
1889 track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
1891 if (idx_value & RADEON_TXFORMAT_CUBIC_MAP_ENABLE)
1892 track->textures[i].tex_coord_type = 2;
1893 switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) {
1894 case RADEON_TXFORMAT_I8:
1895 case RADEON_TXFORMAT_RGB332:
1896 case RADEON_TXFORMAT_Y8:
1897 track->textures[i].cpp = 1;
1898 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
1900 case RADEON_TXFORMAT_AI88:
1901 case RADEON_TXFORMAT_ARGB1555:
1902 case RADEON_TXFORMAT_RGB565:
1903 case RADEON_TXFORMAT_ARGB4444:
1904 case RADEON_TXFORMAT_VYUY422:
1905 case RADEON_TXFORMAT_YVYU422:
1906 case RADEON_TXFORMAT_SHADOW16:
1907 case RADEON_TXFORMAT_LDUDV655:
1908 case RADEON_TXFORMAT_DUDV88:
1909 track->textures[i].cpp = 2;
1910 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
1912 case RADEON_TXFORMAT_ARGB8888:
1913 case RADEON_TXFORMAT_RGBA8888:
1914 case RADEON_TXFORMAT_SHADOW32:
1915 case RADEON_TXFORMAT_LDUDUV8888:
1916 track->textures[i].cpp = 4;
1917 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
1919 case RADEON_TXFORMAT_DXT1:
1920 track->textures[i].cpp = 1;
1921 track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
1923 case RADEON_TXFORMAT_DXT23:
1924 case RADEON_TXFORMAT_DXT45:
1925 track->textures[i].cpp = 1;
1926 track->textures[i].compress_format = R100_TRACK_COMP_DXT35;
1929 track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
1930 track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
1931 track->tex_dirty = true;
1933 case RADEON_PP_CUBIC_FACES_0:
1934 case RADEON_PP_CUBIC_FACES_1:
1935 case RADEON_PP_CUBIC_FACES_2:
1937 i = (reg - RADEON_PP_CUBIC_FACES_0) / 4;
1938 for (face = 0; face < 4; face++) {
1939 track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
1940 track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf);
1942 track->tex_dirty = true;
1945 DRM_ERROR("Forbidden register 0x%04X in cs at %d\n",
1952 int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
1953 struct radeon_cs_packet *pkt,
1954 struct radeon_bo *robj)
1959 value = radeon_get_ib_value(p, idx + 2);
1960 if ((value + 1) > radeon_bo_size(robj)) {
1961 DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER "
1962 "(need %u have %lu) !\n",
1964 radeon_bo_size(robj));
1970 static int r100_packet3_check(struct radeon_cs_parser *p,
1971 struct radeon_cs_packet *pkt)
1973 struct radeon_cs_reloc *reloc;
1974 struct r100_cs_track *track;
1976 volatile uint32_t *ib;
1981 track = (struct r100_cs_track *)p->track;
1982 switch (pkt->opcode) {
1983 case PACKET3_3D_LOAD_VBPNTR:
1984 r = r100_packet3_load_vbpntr(p, pkt, idx);
1988 case PACKET3_INDX_BUFFER:
1989 r = r100_cs_packet_next_reloc(p, &reloc);
1991 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
1992 r100_cs_dump_packet(p, pkt);
1995 ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->lobj.gpu_offset);
1996 r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
2002 /* 3D_RNDR_GEN_INDX_PRIM on r100/r200 */
2003 r = r100_cs_packet_next_reloc(p, &reloc);
2005 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
2006 r100_cs_dump_packet(p, pkt);
2009 ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->lobj.gpu_offset);
2010 track->num_arrays = 1;
2011 track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 2));
2013 track->arrays[0].robj = reloc->robj;
2014 track->arrays[0].esize = track->vtx_size;
2016 track->max_indx = radeon_get_ib_value(p, idx+1);
2018 track->vap_vf_cntl = radeon_get_ib_value(p, idx+3);
2019 track->immd_dwords = pkt->count - 1;
2020 r = r100_cs_track_check(p->rdev, track);
2024 case PACKET3_3D_DRAW_IMMD:
2025 if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) {
2026 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
2029 track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 0));
2030 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
2031 track->immd_dwords = pkt->count - 1;
2032 r = r100_cs_track_check(p->rdev, track);
2036 /* triggers drawing using in-packet vertex data */
2037 case PACKET3_3D_DRAW_IMMD_2:
2038 if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) {
2039 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
2042 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
2043 track->immd_dwords = pkt->count;
2044 r = r100_cs_track_check(p->rdev, track);
2048 /* triggers drawing using in-packet vertex data */
2049 case PACKET3_3D_DRAW_VBUF_2:
2050 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
2051 r = r100_cs_track_check(p->rdev, track);
2055 /* triggers drawing of vertex buffers setup elsewhere */
2056 case PACKET3_3D_DRAW_INDX_2:
2057 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
2058 r = r100_cs_track_check(p->rdev, track);
2062 /* triggers drawing using indices to vertex buffer */
2063 case PACKET3_3D_DRAW_VBUF:
2064 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
2065 r = r100_cs_track_check(p->rdev, track);
2069 /* triggers drawing of vertex buffers setup elsewhere */
2070 case PACKET3_3D_DRAW_INDX:
2071 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
2072 r = r100_cs_track_check(p->rdev, track);
2076 /* triggers drawing using indices to vertex buffer */
2077 case PACKET3_3D_CLEAR_HIZ:
2078 case PACKET3_3D_CLEAR_ZMASK:
2079 if (p->rdev->hyperz_filp != p->filp)
2085 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
2091 int r100_cs_parse(struct radeon_cs_parser *p)
2093 struct radeon_cs_packet pkt;
2094 struct r100_cs_track *track;
2097 track = kmalloc(sizeof(*track), M_DRM, M_ZERO | M_WAITOK);
2100 r100_cs_track_clear(p->rdev, track);
2103 r = r100_cs_packet_parse(p, &pkt, p->idx);
2105 drm_free(p->track, M_DRM);
2109 p->idx += pkt.count + 2;
2112 if (p->rdev->family >= CHIP_R200)
2113 r = r100_cs_parse_packet0(p, &pkt,
2114 p->rdev->config.r100.reg_safe_bm,
2115 p->rdev->config.r100.reg_safe_bm_size,
2116 &r200_packet0_check);
2118 r = r100_cs_parse_packet0(p, &pkt,
2119 p->rdev->config.r100.reg_safe_bm,
2120 p->rdev->config.r100.reg_safe_bm_size,
2121 &r100_packet0_check);
2126 r = r100_packet3_check(p, &pkt);
2129 DRM_ERROR("Unknown packet type %d !\n",
2131 drm_free(p->track, M_DRM);
2136 drm_free(p->track, M_DRM);
2140 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
2141 drm_free(p->track, M_DRM);
2146 static void r100_cs_track_texture_print(struct r100_cs_track_texture *t)
2148 DRM_ERROR("pitch %d\n", t->pitch);
2149 DRM_ERROR("use_pitch %d\n", t->use_pitch);
2150 DRM_ERROR("width %d\n", t->width);
2151 DRM_ERROR("width_11 %d\n", t->width_11);
2152 DRM_ERROR("height %d\n", t->height);
2153 DRM_ERROR("height_11 %d\n", t->height_11);
2154 DRM_ERROR("num levels %d\n", t->num_levels);
2155 DRM_ERROR("depth %d\n", t->txdepth);
2156 DRM_ERROR("bpp %d\n", t->cpp);
2157 DRM_ERROR("coordinate type %d\n", t->tex_coord_type);
2158 DRM_ERROR("width round to power of 2 %d\n", t->roundup_w);
2159 DRM_ERROR("height round to power of 2 %d\n", t->roundup_h);
2160 DRM_ERROR("compress format %d\n", t->compress_format);
2163 static int r100_track_compress_size(int compress_format, int w, int h)
2165 int block_width, block_height, block_bytes;
2166 int wblocks, hblocks;
2173 switch (compress_format) {
2174 case R100_TRACK_COMP_DXT1:
2179 case R100_TRACK_COMP_DXT35:
2185 hblocks = (h + block_height - 1) / block_height;
2186 wblocks = (w + block_width - 1) / block_width;
2187 if (wblocks < min_wblocks)
2188 wblocks = min_wblocks;
2189 sz = wblocks * hblocks * block_bytes;
2193 static int r100_cs_track_cube(struct radeon_device *rdev,
2194 struct r100_cs_track *track, unsigned idx)
2196 unsigned face, w, h;
2197 struct radeon_bo *cube_robj;
2199 unsigned compress_format = track->textures[idx].compress_format;
2201 for (face = 0; face < 5; face++) {
2202 cube_robj = track->textures[idx].cube_info[face].robj;
2203 w = track->textures[idx].cube_info[face].width;
2204 h = track->textures[idx].cube_info[face].height;
2206 if (compress_format) {
2207 size = r100_track_compress_size(compress_format, w, h);
2210 size *= track->textures[idx].cpp;
2212 size += track->textures[idx].cube_info[face].offset;
2214 if (size > radeon_bo_size(cube_robj)) {
2215 DRM_ERROR("Cube texture offset greater than object size %lu %lu\n",
2216 size, radeon_bo_size(cube_robj));
2217 r100_cs_track_texture_print(&track->textures[idx]);
2224 static int r100_cs_track_texture_check(struct radeon_device *rdev,
2225 struct r100_cs_track *track)
2227 struct radeon_bo *robj;
2229 unsigned u, i, w, h, d;
2232 for (u = 0; u < track->num_texture; u++) {
2233 if (!track->textures[u].enabled)
2235 if (track->textures[u].lookup_disable)
2237 robj = track->textures[u].robj;
2239 DRM_ERROR("No texture bound to unit %u\n", u);
2243 for (i = 0; i <= track->textures[u].num_levels; i++) {
2244 if (track->textures[u].use_pitch) {
2245 if (rdev->family < CHIP_R300)
2246 w = (track->textures[u].pitch / track->textures[u].cpp) / (1 << i);
2248 w = track->textures[u].pitch / (1 << i);
2250 w = track->textures[u].width;
2251 if (rdev->family >= CHIP_RV515)
2252 w |= track->textures[u].width_11;
2254 if (track->textures[u].roundup_w)
2255 w = roundup_pow_of_two(w);
2257 h = track->textures[u].height;
2258 if (rdev->family >= CHIP_RV515)
2259 h |= track->textures[u].height_11;
2261 if (track->textures[u].roundup_h)
2262 h = roundup_pow_of_two(h);
2263 if (track->textures[u].tex_coord_type == 1) {
2264 d = (1 << track->textures[u].txdepth) / (1 << i);
2270 if (track->textures[u].compress_format) {
2272 size += r100_track_compress_size(track->textures[u].compress_format, w, h) * d;
2273 /* compressed textures are block based */
2277 size *= track->textures[u].cpp;
2279 switch (track->textures[u].tex_coord_type) {
2284 if (track->separate_cube) {
2285 ret = r100_cs_track_cube(rdev, track, u);
2292 DRM_ERROR("Invalid texture coordinate type %u for unit "
2293 "%u\n", track->textures[u].tex_coord_type, u);
2296 if (size > radeon_bo_size(robj)) {
2297 DRM_ERROR("Texture of unit %u needs %lu bytes but is "
2298 "%lu\n", u, size, radeon_bo_size(robj));
2299 r100_cs_track_texture_print(&track->textures[u]);
2306 int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
2312 unsigned num_cb = track->cb_dirty ? track->num_cb : 0;
2314 if (num_cb && !track->zb_cb_clear && !track->color_channel_mask &&
2315 !track->blend_read_enable)
2318 for (i = 0; i < num_cb; i++) {
2319 if (track->cb[i].robj == NULL) {
2320 DRM_ERROR("[drm] No buffer for color buffer %d !\n", i);
2323 size = track->cb[i].pitch * track->cb[i].cpp * track->maxy;
2324 size += track->cb[i].offset;
2325 if (size > radeon_bo_size(track->cb[i].robj)) {
2326 DRM_ERROR("[drm] Buffer too small for color buffer %d "
2327 "(need %lu have %lu) !\n", i, size,
2328 radeon_bo_size(track->cb[i].robj));
2329 DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n",
2330 i, track->cb[i].pitch, track->cb[i].cpp,
2331 track->cb[i].offset, track->maxy);
2335 track->cb_dirty = false;
2337 if (track->zb_dirty && track->z_enabled) {
2338 if (track->zb.robj == NULL) {
2339 DRM_ERROR("[drm] No buffer for z buffer !\n");
2342 size = track->zb.pitch * track->zb.cpp * track->maxy;
2343 size += track->zb.offset;
2344 if (size > radeon_bo_size(track->zb.robj)) {
2345 DRM_ERROR("[drm] Buffer too small for z buffer "
2346 "(need %lu have %lu) !\n", size,
2347 radeon_bo_size(track->zb.robj));
2348 DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n",
2349 track->zb.pitch, track->zb.cpp,
2350 track->zb.offset, track->maxy);
2354 track->zb_dirty = false;
2356 if (track->aa_dirty && track->aaresolve) {
2357 if (track->aa.robj == NULL) {
2358 DRM_ERROR("[drm] No buffer for AA resolve buffer %d !\n", i);
2361 /* I believe the format comes from colorbuffer0. */
2362 size = track->aa.pitch * track->cb[0].cpp * track->maxy;
2363 size += track->aa.offset;
2364 if (size > radeon_bo_size(track->aa.robj)) {
2365 DRM_ERROR("[drm] Buffer too small for AA resolve buffer %d "
2366 "(need %lu have %lu) !\n", i, size,
2367 radeon_bo_size(track->aa.robj));
2368 DRM_ERROR("[drm] AA resolve buffer %d (%u %u %u %u)\n",
2369 i, track->aa.pitch, track->cb[0].cpp,
2370 track->aa.offset, track->maxy);
2374 track->aa_dirty = false;
2376 prim_walk = (track->vap_vf_cntl >> 4) & 0x3;
2377 if (track->vap_vf_cntl & (1 << 14)) {
2378 nverts = track->vap_alt_nverts;
2380 nverts = (track->vap_vf_cntl >> 16) & 0xFFFF;
2382 switch (prim_walk) {
2384 for (i = 0; i < track->num_arrays; i++) {
2385 size = track->arrays[i].esize * track->max_indx * 4;
2386 if (track->arrays[i].robj == NULL) {
2387 DRM_ERROR("(PW %u) Vertex array %u no buffer "
2388 "bound\n", prim_walk, i);
2391 if (size > radeon_bo_size(track->arrays[i].robj)) {
2392 dev_err(rdev->dev, "(PW %u) Vertex array %u "
2393 "need %lu dwords have %lu dwords\n",
2394 prim_walk, i, size >> 2,
2395 radeon_bo_size(track->arrays[i].robj)
2397 DRM_ERROR("Max indices %u\n", track->max_indx);
2403 for (i = 0; i < track->num_arrays; i++) {
2404 size = track->arrays[i].esize * (nverts - 1) * 4;
2405 if (track->arrays[i].robj == NULL) {
2406 DRM_ERROR("(PW %u) Vertex array %u no buffer "
2407 "bound\n", prim_walk, i);
2410 if (size > radeon_bo_size(track->arrays[i].robj)) {
2411 dev_err(rdev->dev, "(PW %u) Vertex array %u "
2412 "need %lu dwords have %lu dwords\n",
2413 prim_walk, i, size >> 2,
2414 radeon_bo_size(track->arrays[i].robj)
2421 size = track->vtx_size * nverts;
2422 if (size != track->immd_dwords) {
2423 DRM_ERROR("IMMD draw %u dwors but needs %lu dwords\n",
2424 track->immd_dwords, size);
2425 DRM_ERROR("VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n",
2426 nverts, track->vtx_size);
2431 DRM_ERROR("[drm] Invalid primitive walk %d for VAP_VF_CNTL\n",
2436 if (track->tex_dirty) {
2437 track->tex_dirty = false;
2438 return r100_cs_track_texture_check(rdev, track);
2443 void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track)
2447 track->cb_dirty = true;
2448 track->zb_dirty = true;
2449 track->tex_dirty = true;
2450 track->aa_dirty = true;
2452 if (rdev->family < CHIP_R300) {
2454 if (rdev->family <= CHIP_RS200)
2455 track->num_texture = 3;
2457 track->num_texture = 6;
2459 track->separate_cube = 1;
2462 track->num_texture = 16;
2464 track->separate_cube = 0;
2465 track->aaresolve = false;
2466 track->aa.robj = NULL;
2469 for (i = 0; i < track->num_cb; i++) {
2470 track->cb[i].robj = NULL;
2471 track->cb[i].pitch = 8192;
2472 track->cb[i].cpp = 16;
2473 track->cb[i].offset = 0;
2475 track->z_enabled = true;
2476 track->zb.robj = NULL;
2477 track->zb.pitch = 8192;
2479 track->zb.offset = 0;
2480 track->vtx_size = 0x7F;
2481 track->immd_dwords = 0xFFFFFFFFUL;
2482 track->num_arrays = 11;
2483 track->max_indx = 0x00FFFFFFUL;
2484 for (i = 0; i < track->num_arrays; i++) {
2485 track->arrays[i].robj = NULL;
2486 track->arrays[i].esize = 0x7F;
2488 for (i = 0; i < track->num_texture; i++) {
2489 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
2490 track->textures[i].pitch = 16536;
2491 track->textures[i].width = 16536;
2492 track->textures[i].height = 16536;
2493 track->textures[i].width_11 = 1 << 11;
2494 track->textures[i].height_11 = 1 << 11;
2495 track->textures[i].num_levels = 12;
2496 if (rdev->family <= CHIP_RS200) {
2497 track->textures[i].tex_coord_type = 0;
2498 track->textures[i].txdepth = 0;
2500 track->textures[i].txdepth = 16;
2501 track->textures[i].tex_coord_type = 1;
2503 track->textures[i].cpp = 64;
2504 track->textures[i].robj = NULL;
2505 /* CS IB emission code makes sure texture unit are disabled */
2506 track->textures[i].enabled = false;
2507 track->textures[i].lookup_disable = false;
2508 track->textures[i].roundup_w = true;
2509 track->textures[i].roundup_h = true;
2510 if (track->separate_cube)
2511 for (face = 0; face < 5; face++) {
2512 track->textures[i].cube_info[face].robj = NULL;
2513 track->textures[i].cube_info[face].width = 16536;
2514 track->textures[i].cube_info[face].height = 16536;
2515 track->textures[i].cube_info[face].offset = 0;
2521 * Global GPU functions
2523 static void r100_errata(struct radeon_device *rdev)
2525 rdev->pll_errata = 0;
2527 if (rdev->family == CHIP_RV200 || rdev->family == CHIP_RS200) {
2528 rdev->pll_errata |= CHIP_ERRATA_PLL_DUMMYREADS;
2531 if (rdev->family == CHIP_RV100 ||
2532 rdev->family == CHIP_RS100 ||
2533 rdev->family == CHIP_RS200) {
2534 rdev->pll_errata |= CHIP_ERRATA_PLL_DELAY;
2538 static int r100_rbbm_fifo_wait_for_entry(struct radeon_device *rdev, unsigned n)
2543 for (i = 0; i < rdev->usec_timeout; i++) {
2544 tmp = RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_FIFOCNT_MASK;
2553 int r100_gui_wait_for_idle(struct radeon_device *rdev)
2558 if (r100_rbbm_fifo_wait_for_entry(rdev, 64)) {
2559 DRM_ERROR("radeon: wait for empty RBBM fifo failed !"
2560 " Bad things might happen.\n");
2562 for (i = 0; i < rdev->usec_timeout; i++) {
2563 tmp = RREG32(RADEON_RBBM_STATUS);
2564 if (!(tmp & RADEON_RBBM_ACTIVE)) {
2572 int r100_mc_wait_for_idle(struct radeon_device *rdev)
2577 for (i = 0; i < rdev->usec_timeout; i++) {
2578 /* read MC_STATUS */
2579 tmp = RREG32(RADEON_MC_STATUS);
2580 if (tmp & RADEON_MC_IDLE) {
2588 bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
2592 rbbm_status = RREG32(R_000E40_RBBM_STATUS);
2593 if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
2594 radeon_ring_lockup_update(ring);
2597 /* force CP activities */
2598 radeon_ring_force_activity(rdev, ring);
2599 return radeon_ring_test_lockup(rdev, ring);
2602 /* required on r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */
2603 void r100_enable_bm(struct radeon_device *rdev)
2606 /* Enable bus mastering */
2607 tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
2608 WREG32(RADEON_BUS_CNTL, tmp);
2611 void r100_bm_disable(struct radeon_device *rdev)
2615 /* disable bus mastering */
2616 tmp = RREG32(R_000030_BUS_CNTL);
2617 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000044);
2619 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000042);
2621 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040);
2622 tmp = RREG32(RADEON_BUS_CNTL);
2624 pci_disable_busmaster(rdev->dev);
2628 int r100_asic_reset(struct radeon_device *rdev)
2630 struct r100_mc_save save;
2634 status = RREG32(R_000E40_RBBM_STATUS);
2635 if (!G_000E40_GUI_ACTIVE(status)) {
2638 r100_mc_stop(rdev, &save);
2639 status = RREG32(R_000E40_RBBM_STATUS);
2640 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
2642 WREG32(RADEON_CP_CSQ_CNTL, 0);
2643 tmp = RREG32(RADEON_CP_RB_CNTL);
2644 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
2645 WREG32(RADEON_CP_RB_RPTR_WR, 0);
2646 WREG32(RADEON_CP_RB_WPTR, 0);
2647 WREG32(RADEON_CP_RB_CNTL, tmp);
2648 /* save PCI state */
2649 pci_save_state(device_get_parent(rdev->dev));
2650 /* disable bus mastering */
2651 r100_bm_disable(rdev);
2652 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_SE(1) |
2653 S_0000F0_SOFT_RESET_RE(1) |
2654 S_0000F0_SOFT_RESET_PP(1) |
2655 S_0000F0_SOFT_RESET_RB(1));
2656 RREG32(R_0000F0_RBBM_SOFT_RESET);
2658 WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
2660 status = RREG32(R_000E40_RBBM_STATUS);
2661 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
2663 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
2664 RREG32(R_0000F0_RBBM_SOFT_RESET);
2666 WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
2668 status = RREG32(R_000E40_RBBM_STATUS);
2669 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
2670 /* restore PCI & busmastering */
2671 pci_restore_state(device_get_parent(rdev->dev));
2672 r100_enable_bm(rdev);
2673 /* Check if GPU is idle */
2674 if (G_000E40_SE_BUSY(status) || G_000E40_RE_BUSY(status) ||
2675 G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) {
2676 dev_err(rdev->dev, "failed to reset GPU\n");
2679 dev_info(rdev->dev, "GPU reset succeed\n");
2680 r100_mc_resume(rdev, &save);
2684 void r100_set_common_regs(struct radeon_device *rdev)
2686 struct drm_device *dev = rdev->ddev;
2687 bool force_dac2 = false;
2690 /* set these so they don't interfere with anything */
2691 WREG32(RADEON_OV0_SCALE_CNTL, 0);
2692 WREG32(RADEON_SUBPIC_CNTL, 0);
2693 WREG32(RADEON_VIPH_CONTROL, 0);
2694 WREG32(RADEON_I2C_CNTL_1, 0);
2695 WREG32(RADEON_DVI_I2C_CNTL_1, 0);
2696 WREG32(RADEON_CAP0_TRIG_CNTL, 0);
2697 WREG32(RADEON_CAP1_TRIG_CNTL, 0);
2699 /* always set up dac2 on rn50 and some rv100 as lots
2700 * of servers seem to wire it up to a VGA port but
2701 * don't report it in the bios connector
2704 switch (dev->pci_device) {
2713 /* DELL triple head servers */
2714 if ((dev->pci_subvendor == 0x1028 /* DELL */) &&
2715 ((dev->pci_subdevice == 0x016c) ||
2716 (dev->pci_subdevice == 0x016d) ||
2717 (dev->pci_subdevice == 0x016e) ||
2718 (dev->pci_subdevice == 0x016f) ||
2719 (dev->pci_subdevice == 0x0170) ||
2720 (dev->pci_subdevice == 0x017d) ||
2721 (dev->pci_subdevice == 0x017e) ||
2722 (dev->pci_subdevice == 0x0183) ||
2723 (dev->pci_subdevice == 0x018a) ||
2724 (dev->pci_subdevice == 0x019a)))
2730 u32 disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG);
2731 u32 tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
2732 u32 dac2_cntl = RREG32(RADEON_DAC_CNTL2);
2734 /* For CRT on DAC2, don't turn it on if BIOS didn't
2735 enable it, even it's detected.
2738 /* force it to crtc0 */
2739 dac2_cntl &= ~RADEON_DAC2_DAC_CLK_SEL;
2740 dac2_cntl |= RADEON_DAC2_DAC2_CLK_SEL;
2741 disp_hw_debug |= RADEON_CRT2_DISP1_SEL;
2743 /* set up the TV DAC */
2744 tv_dac_cntl &= ~(RADEON_TV_DAC_PEDESTAL |
2745 RADEON_TV_DAC_STD_MASK |
2746 RADEON_TV_DAC_RDACPD |
2747 RADEON_TV_DAC_GDACPD |
2748 RADEON_TV_DAC_BDACPD |
2749 RADEON_TV_DAC_BGADJ_MASK |
2750 RADEON_TV_DAC_DACADJ_MASK);
2751 tv_dac_cntl |= (RADEON_TV_DAC_NBLANK |
2752 RADEON_TV_DAC_NHOLD |
2753 RADEON_TV_DAC_STD_PS2 |
2756 WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
2757 WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
2758 WREG32(RADEON_DAC_CNTL2, dac2_cntl);
2761 /* switch PM block to ACPI mode */
2762 tmp = RREG32_PLL(RADEON_PLL_PWRMGT_CNTL);
2763 tmp &= ~RADEON_PM_MODE_SEL;
2764 WREG32_PLL(RADEON_PLL_PWRMGT_CNTL, tmp);
2771 static void r100_vram_get_type(struct radeon_device *rdev)
2775 rdev->mc.vram_is_ddr = false;
2776 if (rdev->flags & RADEON_IS_IGP)
2777 rdev->mc.vram_is_ddr = true;
2778 else if (RREG32(RADEON_MEM_SDRAM_MODE_REG) & RADEON_MEM_CFG_TYPE_DDR)
2779 rdev->mc.vram_is_ddr = true;
2780 if ((rdev->family == CHIP_RV100) ||
2781 (rdev->family == CHIP_RS100) ||
2782 (rdev->family == CHIP_RS200)) {
2783 tmp = RREG32(RADEON_MEM_CNTL);
2784 if (tmp & RV100_HALF_MODE) {
2785 rdev->mc.vram_width = 32;
2787 rdev->mc.vram_width = 64;
2789 if (rdev->flags & RADEON_SINGLE_CRTC) {
2790 rdev->mc.vram_width /= 4;
2791 rdev->mc.vram_is_ddr = true;
2793 } else if (rdev->family <= CHIP_RV280) {
2794 tmp = RREG32(RADEON_MEM_CNTL);
2795 if (tmp & RADEON_MEM_NUM_CHANNELS_MASK) {
2796 rdev->mc.vram_width = 128;
2798 rdev->mc.vram_width = 64;
2802 rdev->mc.vram_width = 128;
2806 static u32 r100_get_accessible_vram(struct radeon_device *rdev)
2811 aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
2813 /* Set HDP_APER_CNTL only on cards that are known not to be broken,
2814 * that is has the 2nd generation multifunction PCI interface
2816 if (rdev->family == CHIP_RV280 ||
2817 rdev->family >= CHIP_RV350) {
2818 WREG32_P(RADEON_HOST_PATH_CNTL, RADEON_HDP_APER_CNTL,
2819 ~RADEON_HDP_APER_CNTL);
2820 DRM_INFO("Generation 2 PCI interface, using max accessible memory\n");
2821 return aper_size * 2;
2824 /* Older cards have all sorts of funny issues to deal with. First
2825 * check if it's a multifunction card by reading the PCI config
2826 * header type... Limit those to one aperture size
2828 byte = pci_read_config(rdev->dev, 0xe, 1);
2830 DRM_INFO("Generation 1 PCI interface in multifunction mode\n");
2831 DRM_INFO("Limiting VRAM to one aperture\n");
2835 /* Single function older card. We read HDP_APER_CNTL to see how the BIOS
2836 * have set it up. We don't write this as it's broken on some ASICs but
2837 * we expect the BIOS to have done the right thing (might be too optimistic...)
2839 if (RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL)
2840 return aper_size * 2;
2844 void r100_vram_init_sizes(struct radeon_device *rdev)
2846 u64 config_aper_size;
2848 /* work out accessible VRAM */
2849 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
2850 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
2851 rdev->mc.visible_vram_size = r100_get_accessible_vram(rdev);
2852 /* FIXME we don't use the second aperture yet when we could use it */
2853 if (rdev->mc.visible_vram_size > rdev->mc.aper_size)
2854 rdev->mc.visible_vram_size = rdev->mc.aper_size;
2855 config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
2856 if (rdev->flags & RADEON_IS_IGP) {
2858 /* read NB_TOM to get the amount of ram stolen for the GPU */
2859 tom = RREG32(RADEON_NB_TOM);
2860 rdev->mc.real_vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16);
2861 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
2862 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
2864 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
2865 /* Some production boards of m6 will report 0
2868 if (rdev->mc.real_vram_size == 0) {
2869 rdev->mc.real_vram_size = 8192 * 1024;
2870 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
2872 /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM -
2873 * Novell bug 204882 + along with lots of ubuntu ones
2875 if (rdev->mc.aper_size > config_aper_size)
2876 config_aper_size = rdev->mc.aper_size;
2878 if (config_aper_size > rdev->mc.real_vram_size)
2879 rdev->mc.mc_vram_size = config_aper_size;
2881 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
2885 void r100_vga_set_state(struct radeon_device *rdev, bool state)
2889 temp = RREG32(RADEON_CONFIG_CNTL);
2890 if (state == false) {
2891 temp &= ~RADEON_CFG_VGA_RAM_EN;
2892 temp |= RADEON_CFG_VGA_IO_DIS;
2894 temp &= ~RADEON_CFG_VGA_IO_DIS;
2896 WREG32(RADEON_CONFIG_CNTL, temp);
2899 static void r100_mc_init(struct radeon_device *rdev)
2903 r100_vram_get_type(rdev);
2904 r100_vram_init_sizes(rdev);
2905 base = rdev->mc.aper_base;
2906 if (rdev->flags & RADEON_IS_IGP)
2907 base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
2908 radeon_vram_location(rdev, &rdev->mc, base);
2909 rdev->mc.gtt_base_align = 0;
2910 if (!(rdev->flags & RADEON_IS_AGP))
2911 radeon_gtt_location(rdev, &rdev->mc);
2912 radeon_update_bandwidth_info(rdev);
2917 * Indirect registers accessor
2919 void r100_pll_errata_after_index(struct radeon_device *rdev)
2921 if (rdev->pll_errata & CHIP_ERRATA_PLL_DUMMYREADS) {
2922 (void)RREG32(RADEON_CLOCK_CNTL_DATA);
2923 (void)RREG32(RADEON_CRTC_GEN_CNTL);
2927 static void r100_pll_errata_after_data(struct radeon_device *rdev)
2929 /* This workarounds is necessary on RV100, RS100 and RS200 chips
2930 * or the chip could hang on a subsequent access
2932 if (rdev->pll_errata & CHIP_ERRATA_PLL_DELAY) {
2936 /* This function is required to workaround a hardware bug in some (all?)
2937 * revisions of the R300. This workaround should be called after every
2938 * CLOCK_CNTL_INDEX register access. If not, register reads afterward
2939 * may not be correct.
2941 if (rdev->pll_errata & CHIP_ERRATA_R300_CG) {
2944 save = RREG32(RADEON_CLOCK_CNTL_INDEX);
2945 tmp = save & ~(0x3f | RADEON_PLL_WR_EN);
2946 WREG32(RADEON_CLOCK_CNTL_INDEX, tmp);
2947 tmp = RREG32(RADEON_CLOCK_CNTL_DATA);
2948 WREG32(RADEON_CLOCK_CNTL_INDEX, save);
2952 uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg)
2956 WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f);
2957 r100_pll_errata_after_index(rdev);
2958 data = RREG32(RADEON_CLOCK_CNTL_DATA);
2959 r100_pll_errata_after_data(rdev);
2963 void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
2965 WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN));
2966 r100_pll_errata_after_index(rdev);
2967 WREG32(RADEON_CLOCK_CNTL_DATA, v);
2968 r100_pll_errata_after_data(rdev);
2971 static void r100_set_safe_registers(struct radeon_device *rdev)
2973 if (ASIC_IS_RN50(rdev)) {
2974 rdev->config.r100.reg_safe_bm = rn50_reg_safe_bm;
2975 rdev->config.r100.reg_safe_bm_size = DRM_ARRAY_SIZE(rn50_reg_safe_bm);
2976 } else if (rdev->family < CHIP_R200) {
2977 rdev->config.r100.reg_safe_bm = r100_reg_safe_bm;
2978 rdev->config.r100.reg_safe_bm_size = DRM_ARRAY_SIZE(r100_reg_safe_bm);
2980 r200_set_safe_registers(rdev);
2987 #if defined(CONFIG_DEBUG_FS)
2988 static int r100_debugfs_rbbm_info(struct seq_file *m, void *data)
2990 struct drm_info_node *node = (struct drm_info_node *) m->private;
2991 struct drm_device *dev = node->minor->dev;
2992 struct radeon_device *rdev = dev->dev_private;
2993 uint32_t reg, value;
2996 seq_printf(m, "RBBM_STATUS 0x%08x\n", RREG32(RADEON_RBBM_STATUS));
2997 seq_printf(m, "RBBM_CMDFIFO_STAT 0x%08x\n", RREG32(0xE7C));
2998 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
2999 for (i = 0; i < 64; i++) {
3000 WREG32(RADEON_RBBM_CMDFIFO_ADDR, i | 0x100);
3001 reg = (RREG32(RADEON_RBBM_CMDFIFO_DATA) - 1) >> 2;
3002 WREG32(RADEON_RBBM_CMDFIFO_ADDR, i);
3003 value = RREG32(RADEON_RBBM_CMDFIFO_DATA);
3004 seq_printf(m, "[0x%03X] 0x%04X=0x%08X\n", i, reg, value);
3009 static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data)
3011 struct drm_info_node *node = (struct drm_info_node *) m->private;
3012 struct drm_device *dev = node->minor->dev;
3013 struct radeon_device *rdev = dev->dev_private;
3014 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
3016 unsigned count, i, j;
3018 radeon_ring_free_size(rdev, ring);
3019 rdp = RREG32(RADEON_CP_RB_RPTR);
3020 wdp = RREG32(RADEON_CP_RB_WPTR);
3021 count = (rdp + ring->ring_size - wdp) & ring->ptr_mask;
3022 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
3023 seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp);
3024 seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
3025 seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
3026 seq_printf(m, "%u dwords in ring\n", count);
3027 for (j = 0; j <= count; j++) {
3028 i = (rdp + j) & ring->ptr_mask;
3029 seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
3035 static int r100_debugfs_cp_csq_fifo(struct seq_file *m, void *data)
3037 struct drm_info_node *node = (struct drm_info_node *) m->private;
3038 struct drm_device *dev = node->minor->dev;
3039 struct radeon_device *rdev = dev->dev_private;
3040 uint32_t csq_stat, csq2_stat, tmp;
3041 unsigned r_rptr, r_wptr, ib1_rptr, ib1_wptr, ib2_rptr, ib2_wptr;
3044 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
3045 seq_printf(m, "CP_CSQ_MODE 0x%08x\n", RREG32(RADEON_CP_CSQ_MODE));
3046 csq_stat = RREG32(RADEON_CP_CSQ_STAT);
3047 csq2_stat = RREG32(RADEON_CP_CSQ2_STAT);
3048 r_rptr = (csq_stat >> 0) & 0x3ff;
3049 r_wptr = (csq_stat >> 10) & 0x3ff;
3050 ib1_rptr = (csq_stat >> 20) & 0x3ff;
3051 ib1_wptr = (csq2_stat >> 0) & 0x3ff;
3052 ib2_rptr = (csq2_stat >> 10) & 0x3ff;
3053 ib2_wptr = (csq2_stat >> 20) & 0x3ff;
3054 seq_printf(m, "CP_CSQ_STAT 0x%08x\n", csq_stat);
3055 seq_printf(m, "CP_CSQ2_STAT 0x%08x\n", csq2_stat);
3056 seq_printf(m, "Ring rptr %u\n", r_rptr);
3057 seq_printf(m, "Ring wptr %u\n", r_wptr);
3058 seq_printf(m, "Indirect1 rptr %u\n", ib1_rptr);
3059 seq_printf(m, "Indirect1 wptr %u\n", ib1_wptr);
3060 seq_printf(m, "Indirect2 rptr %u\n", ib2_rptr);
3061 seq_printf(m, "Indirect2 wptr %u\n", ib2_wptr);
3062 /* FIXME: 0, 128, 640 depends on fifo setup see cp_init_kms
3063 * 128 = indirect1_start * 8 & 640 = indirect2_start * 8 */
3064 seq_printf(m, "Ring fifo:\n");
3065 for (i = 0; i < 256; i++) {
3066 WREG32(RADEON_CP_CSQ_ADDR, i << 2);
3067 tmp = RREG32(RADEON_CP_CSQ_DATA);
3068 seq_printf(m, "rfifo[%04d]=0x%08X\n", i, tmp);
3070 seq_printf(m, "Indirect1 fifo:\n");
3071 for (i = 256; i <= 512; i++) {
3072 WREG32(RADEON_CP_CSQ_ADDR, i << 2);
3073 tmp = RREG32(RADEON_CP_CSQ_DATA);
3074 seq_printf(m, "ib1fifo[%04d]=0x%08X\n", i, tmp);
3076 seq_printf(m, "Indirect2 fifo:\n");
3077 for (i = 640; i < ib1_wptr; i++) {
3078 WREG32(RADEON_CP_CSQ_ADDR, i << 2);
3079 tmp = RREG32(RADEON_CP_CSQ_DATA);
3080 seq_printf(m, "ib2fifo[%04d]=0x%08X\n", i, tmp);
3085 static int r100_debugfs_mc_info(struct seq_file *m, void *data)
3087 struct drm_info_node *node = (struct drm_info_node *) m->private;
3088 struct drm_device *dev = node->minor->dev;
3089 struct radeon_device *rdev = dev->dev_private;
3092 tmp = RREG32(RADEON_CONFIG_MEMSIZE);
3093 seq_printf(m, "CONFIG_MEMSIZE 0x%08x\n", tmp);
3094 tmp = RREG32(RADEON_MC_FB_LOCATION);
3095 seq_printf(m, "MC_FB_LOCATION 0x%08x\n", tmp);
3096 tmp = RREG32(RADEON_BUS_CNTL);
3097 seq_printf(m, "BUS_CNTL 0x%08x\n", tmp);
3098 tmp = RREG32(RADEON_MC_AGP_LOCATION);
3099 seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp);
3100 tmp = RREG32(RADEON_AGP_BASE);
3101 seq_printf(m, "AGP_BASE 0x%08x\n", tmp);
3102 tmp = RREG32(RADEON_HOST_PATH_CNTL);
3103 seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp);
3104 tmp = RREG32(0x01D0);
3105 seq_printf(m, "AIC_CTRL 0x%08x\n", tmp);
3106 tmp = RREG32(RADEON_AIC_LO_ADDR);
3107 seq_printf(m, "AIC_LO_ADDR 0x%08x\n", tmp);
3108 tmp = RREG32(RADEON_AIC_HI_ADDR);
3109 seq_printf(m, "AIC_HI_ADDR 0x%08x\n", tmp);
3110 tmp = RREG32(0x01E4);
3111 seq_printf(m, "AIC_TLB_ADDR 0x%08x\n", tmp);
3115 static struct drm_info_list r100_debugfs_rbbm_list[] = {
3116 {"r100_rbbm_info", r100_debugfs_rbbm_info, 0, NULL},
3119 static struct drm_info_list r100_debugfs_cp_list[] = {
3120 {"r100_cp_ring_info", r100_debugfs_cp_ring_info, 0, NULL},
3121 {"r100_cp_csq_fifo", r100_debugfs_cp_csq_fifo, 0, NULL},
3124 static struct drm_info_list r100_debugfs_mc_info_list[] = {
3125 {"r100_mc_info", r100_debugfs_mc_info, 0, NULL},
3129 int r100_debugfs_rbbm_init(struct radeon_device *rdev)
3131 #if defined(CONFIG_DEBUG_FS)
3132 return radeon_debugfs_add_files(rdev, r100_debugfs_rbbm_list, 1);
3138 int r100_debugfs_cp_init(struct radeon_device *rdev)
3140 #if defined(CONFIG_DEBUG_FS)
3141 return radeon_debugfs_add_files(rdev, r100_debugfs_cp_list, 2);
3147 int r100_debugfs_mc_info_init(struct radeon_device *rdev)
3149 #if defined(CONFIG_DEBUG_FS)
3150 return radeon_debugfs_add_files(rdev, r100_debugfs_mc_info_list, 1);
3156 int r100_set_surface_reg(struct radeon_device *rdev, int reg,
3157 uint32_t tiling_flags, uint32_t pitch,
3158 uint32_t offset, uint32_t obj_size)
3160 int surf_index = reg * 16;
3163 if (rdev->family <= CHIP_RS200) {
3164 if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
3165 == (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
3166 flags |= RADEON_SURF_TILE_COLOR_BOTH;
3167 if (tiling_flags & RADEON_TILING_MACRO)
3168 flags |= RADEON_SURF_TILE_COLOR_MACRO;
3169 } else if (rdev->family <= CHIP_RV280) {
3170 if (tiling_flags & (RADEON_TILING_MACRO))
3171 flags |= R200_SURF_TILE_COLOR_MACRO;
3172 if (tiling_flags & RADEON_TILING_MICRO)
3173 flags |= R200_SURF_TILE_COLOR_MICRO;
3175 if (tiling_flags & RADEON_TILING_MACRO)
3176 flags |= R300_SURF_TILE_MACRO;
3177 if (tiling_flags & RADEON_TILING_MICRO)
3178 flags |= R300_SURF_TILE_MICRO;
3181 if (tiling_flags & RADEON_TILING_SWAP_16BIT)
3182 flags |= RADEON_SURF_AP0_SWP_16BPP | RADEON_SURF_AP1_SWP_16BPP;
3183 if (tiling_flags & RADEON_TILING_SWAP_32BIT)
3184 flags |= RADEON_SURF_AP0_SWP_32BPP | RADEON_SURF_AP1_SWP_32BPP;
3186 /* when we aren't tiling the pitch seems to needs to be furtherdivided down. - tested on power5 + rn50 server */
3187 if (tiling_flags & (RADEON_TILING_SWAP_16BIT | RADEON_TILING_SWAP_32BIT)) {
3188 if (!(tiling_flags & (RADEON_TILING_MACRO | RADEON_TILING_MICRO)))
3189 if (ASIC_IS_RN50(rdev))
3193 /* r100/r200 divide by 16 */
3194 if (rdev->family < CHIP_R300)
3195 flags |= pitch / 16;
3200 DRM_DEBUG_KMS("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1);
3201 WREG32(RADEON_SURFACE0_INFO + surf_index, flags);
3202 WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset);
3203 WREG32(RADEON_SURFACE0_UPPER_BOUND + surf_index, offset + obj_size - 1);
3207 void r100_clear_surface_reg(struct radeon_device *rdev, int reg)
3209 int surf_index = reg * 16;
3210 WREG32(RADEON_SURFACE0_INFO + surf_index, 0);
3213 void r100_bandwidth_update(struct radeon_device *rdev)
3215 fixed20_12 trcd_ff, trp_ff, tras_ff, trbs_ff, tcas_ff;
3216 fixed20_12 sclk_ff, mclk_ff, sclk_eff_ff, sclk_delay_ff;
3217 fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff;
3218 uint32_t temp, data, mem_trcd, mem_trp, mem_tras;
3219 fixed20_12 memtcas_ff[8] = {
3224 dfixed_init_half(1),
3225 dfixed_init_half(2),
3228 fixed20_12 memtcas_rs480_ff[8] = {
3234 dfixed_init_half(1),
3235 dfixed_init_half(2),
3236 dfixed_init_half(3),
3238 fixed20_12 memtcas2_ff[8] = {
3248 fixed20_12 memtrbs[8] = {
3250 dfixed_init_half(1),
3252 dfixed_init_half(2),
3254 dfixed_init_half(3),
3258 fixed20_12 memtrbs_r4xx[8] = {
3268 fixed20_12 min_mem_eff;
3269 fixed20_12 mc_latency_sclk, mc_latency_mclk, k1;
3270 fixed20_12 cur_latency_mclk, cur_latency_sclk;
3271 fixed20_12 disp_latency, disp_latency_overhead, disp_drain_rate,
3272 disp_drain_rate2, read_return_rate;
3273 fixed20_12 time_disp1_drop_priority;
3275 int cur_size = 16; /* in octawords */
3276 int critical_point = 0, critical_point2;
3277 /* uint32_t read_return_rate, time_disp1_drop_priority; */
3278 int stop_req, max_stop_req;
3279 struct drm_display_mode *mode1 = NULL;
3280 struct drm_display_mode *mode2 = NULL;
3281 uint32_t pixel_bytes1 = 0;
3282 uint32_t pixel_bytes2 = 0;
3284 radeon_update_display_priority(rdev);
3286 if (rdev->mode_info.crtcs[0]->base.enabled) {
3287 mode1 = &rdev->mode_info.crtcs[0]->base.mode;
3288 pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8;
3290 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
3291 if (rdev->mode_info.crtcs[1]->base.enabled) {
3292 mode2 = &rdev->mode_info.crtcs[1]->base.mode;
3293 pixel_bytes2 = rdev->mode_info.crtcs[1]->base.fb->bits_per_pixel / 8;
3297 min_mem_eff.full = dfixed_const_8(0);
3299 if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) {
3300 uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER);
3301 mc_init_misc_lat_timer &= ~(R300_MC_DISP1R_INIT_LAT_MASK << R300_MC_DISP1R_INIT_LAT_SHIFT);
3302 mc_init_misc_lat_timer &= ~(R300_MC_DISP0R_INIT_LAT_MASK << R300_MC_DISP0R_INIT_LAT_SHIFT);
3303 /* check crtc enables */
3305 mc_init_misc_lat_timer |= (1 << R300_MC_DISP1R_INIT_LAT_SHIFT);
3307 mc_init_misc_lat_timer |= (1 << R300_MC_DISP0R_INIT_LAT_SHIFT);
3308 WREG32(R300_MC_INIT_MISC_LAT_TIMER, mc_init_misc_lat_timer);
3312 * determine is there is enough bw for current mode
3314 sclk_ff = rdev->pm.sclk;
3315 mclk_ff = rdev->pm.mclk;
3317 temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1);
3318 temp_ff.full = dfixed_const(temp);
3319 mem_bw.full = dfixed_mul(mclk_ff, temp_ff);
3323 peak_disp_bw.full = 0;
3325 temp_ff.full = dfixed_const(1000);
3326 pix_clk.full = dfixed_const(mode1->clock); /* convert to fixed point */
3327 pix_clk.full = dfixed_div(pix_clk, temp_ff);
3328 temp_ff.full = dfixed_const(pixel_bytes1);
3329 peak_disp_bw.full += dfixed_mul(pix_clk, temp_ff);
3332 temp_ff.full = dfixed_const(1000);
3333 pix_clk2.full = dfixed_const(mode2->clock); /* convert to fixed point */
3334 pix_clk2.full = dfixed_div(pix_clk2, temp_ff);
3335 temp_ff.full = dfixed_const(pixel_bytes2);
3336 peak_disp_bw.full += dfixed_mul(pix_clk2, temp_ff);
3339 mem_bw.full = dfixed_mul(mem_bw, min_mem_eff);
3340 if (peak_disp_bw.full >= mem_bw.full) {
3341 DRM_ERROR("You may not have enough display bandwidth for current mode\n"
3342 "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n");
3345 /* Get values from the EXT_MEM_CNTL register...converting its contents. */
3346 temp = RREG32(RADEON_MEM_TIMING_CNTL);
3347 if ((rdev->family == CHIP_RV100) || (rdev->flags & RADEON_IS_IGP)) { /* RV100, M6, IGPs */
3348 mem_trcd = ((temp >> 2) & 0x3) + 1;
3349 mem_trp = ((temp & 0x3)) + 1;
3350 mem_tras = ((temp & 0x70) >> 4) + 1;
3351 } else if (rdev->family == CHIP_R300 ||
3352 rdev->family == CHIP_R350) { /* r300, r350 */
3353 mem_trcd = (temp & 0x7) + 1;
3354 mem_trp = ((temp >> 8) & 0x7) + 1;
3355 mem_tras = ((temp >> 11) & 0xf) + 4;
3356 } else if (rdev->family == CHIP_RV350 ||
3357 rdev->family <= CHIP_RV380) {
3359 mem_trcd = (temp & 0x7) + 3;
3360 mem_trp = ((temp >> 8) & 0x7) + 3;
3361 mem_tras = ((temp >> 11) & 0xf) + 6;
3362 } else if (rdev->family == CHIP_R420 ||
3363 rdev->family == CHIP_R423 ||
3364 rdev->family == CHIP_RV410) {
3366 mem_trcd = (temp & 0xf) + 3;
3369 mem_trp = ((temp >> 8) & 0xf) + 3;
3372 mem_tras = ((temp >> 12) & 0x1f) + 6;
3375 } else { /* RV200, R200 */
3376 mem_trcd = (temp & 0x7) + 1;
3377 mem_trp = ((temp >> 8) & 0x7) + 1;
3378 mem_tras = ((temp >> 12) & 0xf) + 4;
3381 trcd_ff.full = dfixed_const(mem_trcd);
3382 trp_ff.full = dfixed_const(mem_trp);
3383 tras_ff.full = dfixed_const(mem_tras);
3385 /* Get values from the MEM_SDRAM_MODE_REG register...converting its */
3386 temp = RREG32(RADEON_MEM_SDRAM_MODE_REG);
3387 data = (temp & (7 << 20)) >> 20;
3388 if ((rdev->family == CHIP_RV100) || rdev->flags & RADEON_IS_IGP) {
3389 if (rdev->family == CHIP_RS480) /* don't think rs400 */
3390 tcas_ff = memtcas_rs480_ff[data];
3392 tcas_ff = memtcas_ff[data];
3394 tcas_ff = memtcas2_ff[data];
3396 if (rdev->family == CHIP_RS400 ||
3397 rdev->family == CHIP_RS480) {
3398 /* extra cas latency stored in bits 23-25 0-4 clocks */
3399 data = (temp >> 23) & 0x7;
3401 tcas_ff.full += dfixed_const(data);
3404 if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) {
3405 /* on the R300, Tcas is included in Trbs.
3407 temp = RREG32(RADEON_MEM_CNTL);
3408 data = (R300_MEM_NUM_CHANNELS_MASK & temp);
3410 if (R300_MEM_USE_CD_CH_ONLY & temp) {
3411 temp = RREG32(R300_MC_IND_INDEX);
3412 temp &= ~R300_MC_IND_ADDR_MASK;
3413 temp |= R300_MC_READ_CNTL_CD_mcind;
3414 WREG32(R300_MC_IND_INDEX, temp);
3415 temp = RREG32(R300_MC_IND_DATA);
3416 data = (R300_MEM_RBS_POSITION_C_MASK & temp);
3418 temp = RREG32(R300_MC_READ_CNTL_AB);
3419 data = (R300_MEM_RBS_POSITION_A_MASK & temp);
3422 temp = RREG32(R300_MC_READ_CNTL_AB);
3423 data = (R300_MEM_RBS_POSITION_A_MASK & temp);
3425 if (rdev->family == CHIP_RV410 ||
3426 rdev->family == CHIP_R420 ||
3427 rdev->family == CHIP_R423)
3428 trbs_ff = memtrbs_r4xx[data];
3430 trbs_ff = memtrbs[data];
3431 tcas_ff.full += trbs_ff.full;
3434 sclk_eff_ff.full = sclk_ff.full;
3436 if (rdev->flags & RADEON_IS_AGP) {
3437 fixed20_12 agpmode_ff;
3438 agpmode_ff.full = dfixed_const(radeon_agpmode);
3439 temp_ff.full = dfixed_const_666(16);
3440 sclk_eff_ff.full -= dfixed_mul(agpmode_ff, temp_ff);
3442 /* TODO PCIE lanes may affect this - agpmode == 16?? */
3444 if (ASIC_IS_R300(rdev)) {
3445 sclk_delay_ff.full = dfixed_const(250);
3447 if ((rdev->family == CHIP_RV100) ||
3448 rdev->flags & RADEON_IS_IGP) {
3449 if (rdev->mc.vram_is_ddr)
3450 sclk_delay_ff.full = dfixed_const(41);
3452 sclk_delay_ff.full = dfixed_const(33);
3454 if (rdev->mc.vram_width == 128)
3455 sclk_delay_ff.full = dfixed_const(57);
3457 sclk_delay_ff.full = dfixed_const(41);
3461 mc_latency_sclk.full = dfixed_div(sclk_delay_ff, sclk_eff_ff);
3463 if (rdev->mc.vram_is_ddr) {
3464 if (rdev->mc.vram_width == 32) {
3465 k1.full = dfixed_const(40);
3468 k1.full = dfixed_const(20);
3472 k1.full = dfixed_const(40);
3476 temp_ff.full = dfixed_const(2);
3477 mc_latency_mclk.full = dfixed_mul(trcd_ff, temp_ff);
3478 temp_ff.full = dfixed_const(c);
3479 mc_latency_mclk.full += dfixed_mul(tcas_ff, temp_ff);
3480 temp_ff.full = dfixed_const(4);
3481 mc_latency_mclk.full += dfixed_mul(tras_ff, temp_ff);
3482 mc_latency_mclk.full += dfixed_mul(trp_ff, temp_ff);
3483 mc_latency_mclk.full += k1.full;
3485 mc_latency_mclk.full = dfixed_div(mc_latency_mclk, mclk_ff);
3486 mc_latency_mclk.full += dfixed_div(temp_ff, sclk_eff_ff);
3489 HW cursor time assuming worst case of full size colour cursor.
3491 temp_ff.full = dfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1))));
3492 temp_ff.full += trcd_ff.full;
3493 if (temp_ff.full < tras_ff.full)
3494 temp_ff.full = tras_ff.full;
3495 cur_latency_mclk.full = dfixed_div(temp_ff, mclk_ff);
3497 temp_ff.full = dfixed_const(cur_size);
3498 cur_latency_sclk.full = dfixed_div(temp_ff, sclk_eff_ff);
3500 Find the total latency for the display data.
3502 disp_latency_overhead.full = dfixed_const(8);
3503 disp_latency_overhead.full = dfixed_div(disp_latency_overhead, sclk_ff);
3504 mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full;
3505 mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full;
3507 if (mc_latency_mclk.full > mc_latency_sclk.full)
3508 disp_latency.full = mc_latency_mclk.full;
3510 disp_latency.full = mc_latency_sclk.full;
3512 /* setup Max GRPH_STOP_REQ default value */
3513 if (ASIC_IS_RV100(rdev))
3514 max_stop_req = 0x5c;
3516 max_stop_req = 0x7c;
3520 Set GRPH_BUFFER_CNTL register using h/w defined optimal values.
3521 GRPH_STOP_REQ <= MIN[ 0x7C, (CRTC_H_DISP + 1) * (bit depth) / 0x10 ]
3523 stop_req = mode1->hdisplay * pixel_bytes1 / 16;
3525 if (stop_req > max_stop_req)
3526 stop_req = max_stop_req;
3529 Find the drain rate of the display buffer.
3531 temp_ff.full = dfixed_const((16/pixel_bytes1));
3532 disp_drain_rate.full = dfixed_div(pix_clk, temp_ff);
3535 Find the critical point of the display buffer.
3537 crit_point_ff.full = dfixed_mul(disp_drain_rate, disp_latency);
3538 crit_point_ff.full += dfixed_const_half(0);
3540 critical_point = dfixed_trunc(crit_point_ff);
3542 if (rdev->disp_priority == 2) {
3547 The critical point should never be above max_stop_req-4. Setting
3548 GRPH_CRITICAL_CNTL = 0 will thus force high priority all the time.
3550 if (max_stop_req - critical_point < 4)
3553 if (critical_point == 0 && mode2 && rdev->family == CHIP_R300) {
3554 /* some R300 cards have problem with this set to 0, when CRTC2 is enabled.*/
3555 critical_point = 0x10;
3558 temp = RREG32(RADEON_GRPH_BUFFER_CNTL);
3559 temp &= ~(RADEON_GRPH_STOP_REQ_MASK);
3560 temp |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT);
3561 temp &= ~(RADEON_GRPH_START_REQ_MASK);
3562 if ((rdev->family == CHIP_R350) &&
3563 (stop_req > 0x15)) {
3566 temp |= (stop_req << RADEON_GRPH_START_REQ_SHIFT);
3567 temp |= RADEON_GRPH_BUFFER_SIZE;
3568 temp &= ~(RADEON_GRPH_CRITICAL_CNTL |
3569 RADEON_GRPH_CRITICAL_AT_SOF |
3570 RADEON_GRPH_STOP_CNTL);
3572 Write the result into the register.
3574 WREG32(RADEON_GRPH_BUFFER_CNTL, ((temp & ~RADEON_GRPH_CRITICAL_POINT_MASK) |
3575 (critical_point << RADEON_GRPH_CRITICAL_POINT_SHIFT)));
3578 if ((rdev->family == CHIP_RS400) ||
3579 (rdev->family == CHIP_RS480)) {
3580 /* attempt to program RS400 disp regs correctly ??? */
3581 temp = RREG32(RS400_DISP1_REG_CNTL);
3582 temp &= ~(RS400_DISP1_START_REQ_LEVEL_MASK |
3583 RS400_DISP1_STOP_REQ_LEVEL_MASK);
3584 WREG32(RS400_DISP1_REQ_CNTL1, (temp |
3585 (critical_point << RS400_DISP1_START_REQ_LEVEL_SHIFT) |
3586 (critical_point << RS400_DISP1_STOP_REQ_LEVEL_SHIFT)));
3587 temp = RREG32(RS400_DMIF_MEM_CNTL1);
3588 temp &= ~(RS400_DISP1_CRITICAL_POINT_START_MASK |
3589 RS400_DISP1_CRITICAL_POINT_STOP_MASK);
3590 WREG32(RS400_DMIF_MEM_CNTL1, (temp |
3591 (critical_point << RS400_DISP1_CRITICAL_POINT_START_SHIFT) |
3592 (critical_point << RS400_DISP1_CRITICAL_POINT_STOP_SHIFT)));
3596 DRM_DEBUG_KMS("GRPH_BUFFER_CNTL from to %x\n",
3597 /* (unsigned int)info->SavedReg->grph_buffer_cntl, */
3598 (unsigned int)RREG32(RADEON_GRPH_BUFFER_CNTL));
3603 stop_req = mode2->hdisplay * pixel_bytes2 / 16;
3605 if (stop_req > max_stop_req)
3606 stop_req = max_stop_req;
3609 Find the drain rate of the display buffer.
3611 temp_ff.full = dfixed_const((16/pixel_bytes2));
3612 disp_drain_rate2.full = dfixed_div(pix_clk2, temp_ff);
3614 grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL);
3615 grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK);
3616 grph2_cntl |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT);
3617 grph2_cntl &= ~(RADEON_GRPH_START_REQ_MASK);
3618 if ((rdev->family == CHIP_R350) &&
3619 (stop_req > 0x15)) {
3622 grph2_cntl |= (stop_req << RADEON_GRPH_START_REQ_SHIFT);
3623 grph2_cntl |= RADEON_GRPH_BUFFER_SIZE;
3624 grph2_cntl &= ~(RADEON_GRPH_CRITICAL_CNTL |
3625 RADEON_GRPH_CRITICAL_AT_SOF |
3626 RADEON_GRPH_STOP_CNTL);
3628 if ((rdev->family == CHIP_RS100) ||
3629 (rdev->family == CHIP_RS200))
3630 critical_point2 = 0;
3632 temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128;
3633 temp_ff.full = dfixed_const(temp);
3634 temp_ff.full = dfixed_mul(mclk_ff, temp_ff);
3635 if (sclk_ff.full < temp_ff.full)
3636 temp_ff.full = sclk_ff.full;
3638 read_return_rate.full = temp_ff.full;
3641 temp_ff.full = read_return_rate.full - disp_drain_rate.full;
3642 time_disp1_drop_priority.full = dfixed_div(crit_point_ff, temp_ff);
3644 time_disp1_drop_priority.full = 0;
3646 crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full;
3647 crit_point_ff.full = dfixed_mul(crit_point_ff, disp_drain_rate2);
3648 crit_point_ff.full += dfixed_const_half(0);
3650 critical_point2 = dfixed_trunc(crit_point_ff);
3652 if (rdev->disp_priority == 2) {
3653 critical_point2 = 0;
3656 if (max_stop_req - critical_point2 < 4)
3657 critical_point2 = 0;
3661 if (critical_point2 == 0 && rdev->family == CHIP_R300) {
3662 /* some R300 cards have problem with this set to 0 */
3663 critical_point2 = 0x10;
3666 WREG32(RADEON_GRPH2_BUFFER_CNTL, ((grph2_cntl & ~RADEON_GRPH_CRITICAL_POINT_MASK) |
3667 (critical_point2 << RADEON_GRPH_CRITICAL_POINT_SHIFT)));
3669 if ((rdev->family == CHIP_RS400) ||
3670 (rdev->family == CHIP_RS480)) {
3672 /* attempt to program RS400 disp2 regs correctly ??? */
3673 temp = RREG32(RS400_DISP2_REQ_CNTL1);
3674 temp &= ~(RS400_DISP2_START_REQ_LEVEL_MASK |
3675 RS400_DISP2_STOP_REQ_LEVEL_MASK);
3676 WREG32(RS400_DISP2_REQ_CNTL1, (temp |
3677 (critical_point2 << RS400_DISP1_START_REQ_LEVEL_SHIFT) |
3678 (critical_point2 << RS400_DISP1_STOP_REQ_LEVEL_SHIFT)));
3679 temp = RREG32(RS400_DISP2_REQ_CNTL2);
3680 temp &= ~(RS400_DISP2_CRITICAL_POINT_START_MASK |
3681 RS400_DISP2_CRITICAL_POINT_STOP_MASK);
3682 WREG32(RS400_DISP2_REQ_CNTL2, (temp |
3683 (critical_point2 << RS400_DISP2_CRITICAL_POINT_START_SHIFT) |
3684 (critical_point2 << RS400_DISP2_CRITICAL_POINT_STOP_SHIFT)));
3686 WREG32(RS400_DISP2_REQ_CNTL1, 0x105DC1CC);
3687 WREG32(RS400_DISP2_REQ_CNTL2, 0x2749D000);
3688 WREG32(RS400_DMIF_MEM_CNTL1, 0x29CA71DC);
3689 WREG32(RS400_DISP1_REQ_CNTL1, 0x28FBC3AC);
3692 DRM_DEBUG_KMS("GRPH2_BUFFER_CNTL from to %x\n",
3693 (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL));
3697 int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
3704 r = radeon_scratch_get(rdev, &scratch);
3706 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
3709 WREG32(scratch, 0xCAFEDEAD);
3710 r = radeon_ring_lock(rdev, ring, 2);
3712 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
3713 radeon_scratch_free(rdev, scratch);
3716 radeon_ring_write(ring, PACKET0(scratch, 0));
3717 radeon_ring_write(ring, 0xDEADBEEF);
3718 radeon_ring_unlock_commit(rdev, ring);
3719 for (i = 0; i < rdev->usec_timeout; i++) {
3720 tmp = RREG32(scratch);
3721 if (tmp == 0xDEADBEEF) {
3726 if (i < rdev->usec_timeout) {
3727 DRM_INFO("ring test succeeded in %d usecs\n", i);
3729 DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n",
3733 radeon_scratch_free(rdev, scratch);
3737 void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
3739 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
3741 if (ring->rptr_save_reg) {
3742 u32 next_rptr = ring->wptr + 2 + 3;
3743 radeon_ring_write(ring, PACKET0(ring->rptr_save_reg, 0));
3744 radeon_ring_write(ring, next_rptr);
3747 radeon_ring_write(ring, PACKET0(RADEON_CP_IB_BASE, 1));
3748 radeon_ring_write(ring, ib->gpu_addr);
3749 radeon_ring_write(ring, ib->length_dw);
3752 int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
3754 struct radeon_ib ib;
3760 r = radeon_scratch_get(rdev, &scratch);
3762 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
3765 WREG32(scratch, 0xCAFEDEAD);
3766 r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX, &ib, NULL, 256);
3768 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
3771 ib.ptr[0] = PACKET0(scratch, 0);
3772 ib.ptr[1] = 0xDEADBEEF;
3773 ib.ptr[2] = PACKET2(0);
3774 ib.ptr[3] = PACKET2(0);
3775 ib.ptr[4] = PACKET2(0);
3776 ib.ptr[5] = PACKET2(0);
3777 ib.ptr[6] = PACKET2(0);
3778 ib.ptr[7] = PACKET2(0);
3780 r = radeon_ib_schedule(rdev, &ib, NULL);
3782 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
3785 r = radeon_fence_wait(ib.fence, false);
3787 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
3790 for (i = 0; i < rdev->usec_timeout; i++) {
3791 tmp = RREG32(scratch);
3792 if (tmp == 0xDEADBEEF) {
3797 if (i < rdev->usec_timeout) {
3798 DRM_INFO("ib test succeeded in %u usecs\n", i);
3800 DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
3805 radeon_ib_free(rdev, &ib);
3807 radeon_scratch_free(rdev, scratch);
3811 void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save)
3813 /* Shutdown CP we shouldn't need to do that but better be safe than
3816 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
3817 WREG32(R_000740_CP_CSQ_CNTL, 0);
3819 /* Save few CRTC registers */
3820 save->GENMO_WT = RREG8(R_0003C2_GENMO_WT);
3821 save->CRTC_EXT_CNTL = RREG32(R_000054_CRTC_EXT_CNTL);
3822 save->CRTC_GEN_CNTL = RREG32(R_000050_CRTC_GEN_CNTL);
3823 save->CUR_OFFSET = RREG32(R_000260_CUR_OFFSET);
3824 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
3825 save->CRTC2_GEN_CNTL = RREG32(R_0003F8_CRTC2_GEN_CNTL);
3826 save->CUR2_OFFSET = RREG32(R_000360_CUR2_OFFSET);
3829 /* Disable VGA aperture access */
3830 WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & save->GENMO_WT);
3831 /* Disable cursor, overlay, crtc */
3832 WREG32(R_000260_CUR_OFFSET, save->CUR_OFFSET | S_000260_CUR_LOCK(1));
3833 WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL |
3834 S_000054_CRTC_DISPLAY_DIS(1));
3835 WREG32(R_000050_CRTC_GEN_CNTL,
3836 (C_000050_CRTC_CUR_EN & save->CRTC_GEN_CNTL) |
3837 S_000050_CRTC_DISP_REQ_EN_B(1));
3838 WREG32(R_000420_OV0_SCALE_CNTL,
3839 C_000420_OV0_OVERLAY_EN & RREG32(R_000420_OV0_SCALE_CNTL));
3840 WREG32(R_000260_CUR_OFFSET, C_000260_CUR_LOCK & save->CUR_OFFSET);
3841 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
3842 WREG32(R_000360_CUR2_OFFSET, save->CUR2_OFFSET |
3843 S_000360_CUR2_LOCK(1));
3844 WREG32(R_0003F8_CRTC2_GEN_CNTL,
3845 (C_0003F8_CRTC2_CUR_EN & save->CRTC2_GEN_CNTL) |
3846 S_0003F8_CRTC2_DISPLAY_DIS(1) |
3847 S_0003F8_CRTC2_DISP_REQ_EN_B(1));
3848 WREG32(R_000360_CUR2_OFFSET,
3849 C_000360_CUR2_LOCK & save->CUR2_OFFSET);
3853 void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save)
3855 /* Update base address for crtc */
3856 WREG32(R_00023C_DISPLAY_BASE_ADDR, rdev->mc.vram_start);
3857 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
3858 WREG32(R_00033C_CRTC2_DISPLAY_BASE_ADDR, rdev->mc.vram_start);
3860 /* Restore CRTC registers */
3861 WREG8(R_0003C2_GENMO_WT, save->GENMO_WT);
3862 WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL);
3863 WREG32(R_000050_CRTC_GEN_CNTL, save->CRTC_GEN_CNTL);
3864 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
3865 WREG32(R_0003F8_CRTC2_GEN_CNTL, save->CRTC2_GEN_CNTL);
3869 void r100_vga_render_disable(struct radeon_device *rdev)
3873 tmp = RREG8(R_0003C2_GENMO_WT);
3874 WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & tmp);
3877 static void r100_debugfs(struct radeon_device *rdev)
3881 r = r100_debugfs_mc_info_init(rdev);
3883 dev_warn(rdev->dev, "Failed to create r100_mc debugfs file.\n");
3886 static void r100_mc_program(struct radeon_device *rdev)
3888 struct r100_mc_save save;
3890 /* Stops all mc clients */
3891 r100_mc_stop(rdev, &save);
3892 if (rdev->flags & RADEON_IS_AGP) {
3893 WREG32(R_00014C_MC_AGP_LOCATION,
3894 S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) |
3895 S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16));
3896 WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base));
3897 if (rdev->family > CHIP_RV200)
3898 WREG32(R_00015C_AGP_BASE_2,
3899 upper_32_bits(rdev->mc.agp_base) & 0xff);
3901 WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF);
3902 WREG32(R_000170_AGP_BASE, 0);
3903 if (rdev->family > CHIP_RV200)
3904 WREG32(R_00015C_AGP_BASE_2, 0);
3906 /* Wait for mc idle */
3907 if (r100_mc_wait_for_idle(rdev))
3908 dev_warn(rdev->dev, "Wait for MC idle timeout.\n");
3909 /* Program MC, should be a 32bits limited address space */
3910 WREG32(R_000148_MC_FB_LOCATION,
3911 S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
3912 S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
3913 r100_mc_resume(rdev, &save);
3916 static void r100_clock_startup(struct radeon_device *rdev)
3920 if (radeon_dynclks != -1 && radeon_dynclks)
3921 radeon_legacy_set_clock_gating(rdev, 1);
3922 /* We need to force on some of the block */
3923 tmp = RREG32_PLL(R_00000D_SCLK_CNTL);
3924 tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1);
3925 if ((rdev->family == CHIP_RV250) || (rdev->family == CHIP_RV280))
3926 tmp |= S_00000D_FORCE_DISP1(1) | S_00000D_FORCE_DISP2(1);
3927 WREG32_PLL(R_00000D_SCLK_CNTL, tmp);
3930 static int r100_startup(struct radeon_device *rdev)
3934 /* set common regs */
3935 r100_set_common_regs(rdev);
3937 r100_mc_program(rdev);
3939 r100_clock_startup(rdev);
3940 /* Initialize GART (initialize after TTM so we can allocate
3941 * memory through TTM but finalize after TTM) */
3942 r100_enable_bm(rdev);
3943 if (rdev->flags & RADEON_IS_PCI) {
3944 r = r100_pci_gart_enable(rdev);
3949 /* allocate wb buffer */
3950 r = radeon_wb_init(rdev);
3954 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
3956 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
3962 rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
3963 /* 1M ring buffer */
3964 r = r100_cp_init(rdev, 1024 * 1024);
3966 dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
3970 r = radeon_ib_pool_init(rdev);
3972 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
3979 int r100_resume(struct radeon_device *rdev)
3983 /* Make sur GART are not working */
3984 if (rdev->flags & RADEON_IS_PCI)
3985 r100_pci_gart_disable(rdev);
3986 /* Resume clock before doing reset */
3987 r100_clock_startup(rdev);
3988 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
3989 if (radeon_asic_reset(rdev)) {
3990 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
3991 RREG32(R_000E40_RBBM_STATUS),
3992 RREG32(R_0007C0_CP_STAT));
3995 radeon_combios_asic_init(rdev->ddev);
3996 /* Resume clock after posting */
3997 r100_clock_startup(rdev);
3998 /* Initialize surface registers */
3999 radeon_surface_init(rdev);
4001 rdev->accel_working = true;
4002 r = r100_startup(rdev);
4004 rdev->accel_working = false;
4009 int r100_suspend(struct radeon_device *rdev)
4011 r100_cp_disable(rdev);
4012 radeon_wb_disable(rdev);
4013 r100_irq_disable(rdev);
4014 if (rdev->flags & RADEON_IS_PCI)
4015 r100_pci_gart_disable(rdev);
4019 void r100_fini(struct radeon_device *rdev)
4022 radeon_wb_fini(rdev);
4023 radeon_ib_pool_fini(rdev);
4024 radeon_gem_fini(rdev);
4025 if (rdev->flags & RADEON_IS_PCI)
4026 r100_pci_gart_fini(rdev);
4027 radeon_agp_fini(rdev);
4028 radeon_irq_kms_fini(rdev);
4029 radeon_fence_driver_fini(rdev);
4030 radeon_bo_fini(rdev);
4031 radeon_atombios_fini(rdev);
4032 r100_cp_fini_microcode(rdev);
4033 drm_free(rdev->bios, M_DRM);
4038 * Due to how kexec works, it can leave the hw fully initialised when it
4039 * boots the new kernel. However doing our init sequence with the CP and
4040 * WB stuff setup causes GPU hangs on the RN50 at least. So at startup
4041 * do some quick sanity checks and restore sane values to avoid this
4044 void r100_restore_sanity(struct radeon_device *rdev)
4048 tmp = RREG32(RADEON_CP_CSQ_CNTL);
4050 WREG32(RADEON_CP_CSQ_CNTL, 0);
4052 tmp = RREG32(RADEON_CP_RB_CNTL);
4054 WREG32(RADEON_CP_RB_CNTL, 0);
4056 tmp = RREG32(RADEON_SCRATCH_UMSK);
4058 WREG32(RADEON_SCRATCH_UMSK, 0);
4062 int r100_init(struct radeon_device *rdev)
4066 /* Register debugfs file specific to this group of asics */
4069 r100_vga_render_disable(rdev);
4070 /* Initialize scratch registers */
4071 radeon_scratch_init(rdev);
4072 /* Initialize surface registers */
4073 radeon_surface_init(rdev);
4074 /* sanity check some register to avoid hangs like after kexec */
4075 r100_restore_sanity(rdev);
4076 /* TODO: disable VGA need to use VGA request */
4078 if (!radeon_get_bios(rdev)) {
4079 if (ASIC_IS_AVIVO(rdev))
4082 if (rdev->is_atom_bios) {
4083 dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n");
4086 r = radeon_combios_init(rdev);
4090 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
4091 if (radeon_asic_reset(rdev)) {
4093 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
4094 RREG32(R_000E40_RBBM_STATUS),
4095 RREG32(R_0007C0_CP_STAT));
4097 /* check if cards are posted or not */
4098 if (radeon_boot_test_post_card(rdev) == false)
4100 /* Set asic errata */
4102 /* Initialize clocks */
4103 radeon_get_clock_info(rdev->ddev);
4104 /* initialize AGP */
4105 if (rdev->flags & RADEON_IS_AGP) {
4106 r = radeon_agp_init(rdev);
4108 radeon_agp_disable(rdev);
4111 /* initialize VRAM */
4114 r = radeon_fence_driver_init(rdev);
4117 r = radeon_irq_kms_init(rdev);
4120 /* Memory manager */
4121 r = radeon_bo_init(rdev);
4124 if (rdev->flags & RADEON_IS_PCI) {
4125 r = r100_pci_gart_init(rdev);
4129 r100_set_safe_registers(rdev);
4131 rdev->accel_working = true;
4132 r = r100_startup(rdev);
4134 /* Somethings want wront with the accel init stop accel */
4135 dev_err(rdev->dev, "Disabling GPU acceleration\n");
4137 radeon_wb_fini(rdev);
4138 radeon_ib_pool_fini(rdev);
4139 radeon_irq_kms_fini(rdev);
4140 if (rdev->flags & RADEON_IS_PCI)
4141 r100_pci_gart_fini(rdev);
4142 rdev->accel_working = false;
4147 uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
4148 bool always_indirect)
4150 if (reg < rdev->rmmio_size && !always_indirect)
4151 return bus_read_4(rdev->rmmio, reg);
4155 spin_lock(&rdev->mmio_idx_lock);
4156 bus_write_4(rdev->rmmio, RADEON_MM_INDEX, reg);
4157 ret = bus_read_4(rdev->rmmio, RADEON_MM_DATA);
4158 spin_unlock(&rdev->mmio_idx_lock);
4164 void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
4165 bool always_indirect)
4167 if (reg < rdev->rmmio_size && !always_indirect)
4168 bus_write_4(rdev->rmmio, reg, v);
4170 spin_lock(&rdev->mmio_idx_lock);
4171 bus_write_4(rdev->rmmio, RADEON_MM_INDEX, reg);
4172 bus_write_4(rdev->rmmio, RADEON_MM_DATA, v);
4173 spin_unlock(&rdev->mmio_idx_lock);
4177 u32 r100_io_rreg(struct radeon_device *rdev, u32 reg)
4179 if (reg < rdev->rio_mem_size)
4180 return bus_read_4(rdev->rio_mem, reg);
4182 /* XXX No locking? -- dumbbell@ */
4183 bus_write_4(rdev->rio_mem, RADEON_MM_INDEX, reg);
4184 return bus_read_4(rdev->rio_mem, RADEON_MM_DATA);
4188 void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v)
4190 if (reg < rdev->rio_mem_size)
4191 bus_write_4(rdev->rio_mem, reg, v);
4193 /* XXX No locking? -- dumbbell@ */
4194 bus_write_4(rdev->rio_mem, RADEON_MM_INDEX, reg);
4195 bus_write_4(rdev->rio_mem, RADEON_MM_DATA, v);