2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
29 #include <uapi_drm/drm.h>
30 #include <drm/drm_crtc_helper.h>
31 #include "radeon_reg.h"
33 #include "radeon_asic.h"
34 #include <uapi_drm/radeon_drm.h>
35 #include "r100_track.h"
38 #include "r300_reg_safe.h"
40 /* This files gather functions specifics to: r300,r350,rv350,rv370,rv380
43 * - HOST_PATH_CNTL: r300 family seems to dislike write to HOST_PATH_CNTL
44 * using MMIO to flush host path read cache, this lead to HARDLOCKUP.
45 * However, scheduling such write to the ring seems harmless, i suspect
46 * the CP read collide with the flush somehow, or maybe the MC, hard to
47 * tell. (Jerome Glisse)
51 * Indirect registers accessor
53 uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg)
58 spin_lock_irqsave(&rdev->pcie_idx_lock, flags);
59 WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask));
60 r = RREG32(RADEON_PCIE_DATA);
61 spin_unlock_irqrestore(&rdev->pcie_idx_lock, flags);
65 void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
69 spin_lock_irqsave(&rdev->pcie_idx_lock, flags);
70 WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask));
71 WREG32(RADEON_PCIE_DATA, (v));
72 spin_unlock_irqrestore(&rdev->pcie_idx_lock, flags);
76 * rv370,rv380 PCIE GART
78 static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev);
80 void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
85 /* Workaround HW bug do flush 2 times */
86 for (i = 0; i < 2; i++) {
87 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
88 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp | RADEON_PCIE_TX_GART_INVALIDATE_TLB);
89 (void)RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
90 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
95 #define R300_PTE_UNSNOOPED (1 << 0)
96 #define R300_PTE_WRITEABLE (1 << 2)
97 #define R300_PTE_READABLE (1 << 3)
99 uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags)
101 addr = (lower_32_bits(addr) >> 8) |
102 ((upper_32_bits(addr) & 0xff) << 24);
103 if (flags & RADEON_GART_PAGE_READ)
104 addr |= R300_PTE_READABLE;
105 if (flags & RADEON_GART_PAGE_WRITE)
106 addr |= R300_PTE_WRITEABLE;
107 if (!(flags & RADEON_GART_PAGE_SNOOP))
108 addr |= R300_PTE_UNSNOOPED;
112 void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
115 volatile uint32_t *ptr = rdev->gart.ptr;
117 /* on x86 we want this to be CPU endian, on powerpc
118 * on powerpc without HW swappers, it'll get swapped on way
119 * into VRAM - so no need for cpu_to_le32 on VRAM tables */
121 *ptr = (uint32_t)entry;
124 int rv370_pcie_gart_init(struct radeon_device *rdev)
128 if (rdev->gart.robj) {
129 WARN(1, "RV370 PCIE GART already initialized\n");
132 /* Initialize common gart structure */
133 r = radeon_gart_init(rdev);
136 r = rv370_debugfs_pcie_gart_info_init(rdev);
138 DRM_ERROR("Failed to register debugfs file for PCIE gart !\n");
139 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
140 rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush;
141 rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry;
142 rdev->asic->gart.set_page = &rv370_pcie_gart_set_page;
143 return radeon_gart_table_vram_alloc(rdev);
146 int rv370_pcie_gart_enable(struct radeon_device *rdev)
152 if (rdev->gart.robj == NULL) {
153 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
156 r = radeon_gart_table_vram_pin(rdev);
159 /* discard memory request outside of configured range */
160 tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
161 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
162 WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_start);
163 tmp = rdev->mc.gtt_end & ~RADEON_GPU_PAGE_MASK;
164 WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp);
165 WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0);
166 WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0);
167 table_addr = rdev->gart.table_addr;
168 WREG32_PCIE(RADEON_PCIE_TX_GART_BASE, table_addr);
169 /* FIXME: setup default page */
170 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_start);
171 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0);
173 WREG32_PCIE(RADEON_PCIE_TX_GART_ERROR, 0);
174 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
175 tmp |= RADEON_PCIE_TX_GART_EN;
176 tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
177 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
178 rv370_pcie_gart_tlb_flush(rdev);
179 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
180 (unsigned)(rdev->mc.gtt_size >> 20),
181 (unsigned long long)table_addr);
182 rdev->gart.ready = true;
186 void rv370_pcie_gart_disable(struct radeon_device *rdev)
190 WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, 0);
191 WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, 0);
192 WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0);
193 WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0);
194 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
195 tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
196 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN);
197 radeon_gart_table_vram_unpin(rdev);
200 void rv370_pcie_gart_fini(struct radeon_device *rdev)
202 radeon_gart_fini(rdev);
203 rv370_pcie_gart_disable(rdev);
204 radeon_gart_table_vram_free(rdev);
207 void r300_fence_ring_emit(struct radeon_device *rdev,
208 struct radeon_fence *fence)
210 struct radeon_ring *ring = &rdev->ring[fence->ring];
212 /* Who ever call radeon_fence_emit should call ring_lock and ask
213 * for enough space (today caller are ib schedule and buffer move) */
214 /* Write SC register so SC & US assert idle */
215 radeon_ring_write(ring, PACKET0(R300_RE_SCISSORS_TL, 0));
216 radeon_ring_write(ring, 0);
217 radeon_ring_write(ring, PACKET0(R300_RE_SCISSORS_BR, 0));
218 radeon_ring_write(ring, 0);
220 radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
221 radeon_ring_write(ring, R300_RB3D_DC_FLUSH);
222 radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
223 radeon_ring_write(ring, R300_ZC_FLUSH);
224 /* Wait until IDLE & CLEAN */
225 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
226 radeon_ring_write(ring, (RADEON_WAIT_3D_IDLECLEAN |
227 RADEON_WAIT_2D_IDLECLEAN |
228 RADEON_WAIT_DMA_GUI_IDLE));
229 radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
230 radeon_ring_write(ring, rdev->config.r300.hdp_cntl |
231 RADEON_HDP_READ_BUFFER_INVALIDATE);
232 radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
233 radeon_ring_write(ring, rdev->config.r300.hdp_cntl);
234 /* Emit fence sequence & fire IRQ */
235 radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0));
236 radeon_ring_write(ring, fence->seq);
237 radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0));
238 radeon_ring_write(ring, RADEON_SW_INT_FIRE);
241 void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
243 unsigned gb_tile_config;
246 /* Sub pixel 1/12 so we can have 4K rendering according to doc */
247 gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16);
248 switch(rdev->num_gb_pipes) {
250 gb_tile_config |= R300_PIPE_COUNT_R300;
253 gb_tile_config |= R300_PIPE_COUNT_R420_3P;
256 gb_tile_config |= R300_PIPE_COUNT_R420;
260 gb_tile_config |= R300_PIPE_COUNT_RV350;
264 r = radeon_ring_lock(rdev, ring, 64);
268 radeon_ring_write(ring, PACKET0(RADEON_ISYNC_CNTL, 0));
269 radeon_ring_write(ring,
270 RADEON_ISYNC_ANY2D_IDLE3D |
271 RADEON_ISYNC_ANY3D_IDLE2D |
272 RADEON_ISYNC_WAIT_IDLEGUI |
273 RADEON_ISYNC_CPSCRATCH_IDLEGUI);
274 radeon_ring_write(ring, PACKET0(R300_GB_TILE_CONFIG, 0));
275 radeon_ring_write(ring, gb_tile_config);
276 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
277 radeon_ring_write(ring,
278 RADEON_WAIT_2D_IDLECLEAN |
279 RADEON_WAIT_3D_IDLECLEAN);
280 radeon_ring_write(ring, PACKET0(R300_DST_PIPE_CONFIG, 0));
281 radeon_ring_write(ring, R300_PIPE_AUTO_CONFIG);
282 radeon_ring_write(ring, PACKET0(R300_GB_SELECT, 0));
283 radeon_ring_write(ring, 0);
284 radeon_ring_write(ring, PACKET0(R300_GB_ENABLE, 0));
285 radeon_ring_write(ring, 0);
286 radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
287 radeon_ring_write(ring, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
288 radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
289 radeon_ring_write(ring, R300_ZC_FLUSH | R300_ZC_FREE);
290 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
291 radeon_ring_write(ring,
292 RADEON_WAIT_2D_IDLECLEAN |
293 RADEON_WAIT_3D_IDLECLEAN);
294 radeon_ring_write(ring, PACKET0(R300_GB_AA_CONFIG, 0));
295 radeon_ring_write(ring, 0);
296 radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
297 radeon_ring_write(ring, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
298 radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
299 radeon_ring_write(ring, R300_ZC_FLUSH | R300_ZC_FREE);
300 radeon_ring_write(ring, PACKET0(R300_GB_MSPOS0, 0));
301 radeon_ring_write(ring,
302 ((6 << R300_MS_X0_SHIFT) |
303 (6 << R300_MS_Y0_SHIFT) |
304 (6 << R300_MS_X1_SHIFT) |
305 (6 << R300_MS_Y1_SHIFT) |
306 (6 << R300_MS_X2_SHIFT) |
307 (6 << R300_MS_Y2_SHIFT) |
308 (6 << R300_MSBD0_Y_SHIFT) |
309 (6 << R300_MSBD0_X_SHIFT)));
310 radeon_ring_write(ring, PACKET0(R300_GB_MSPOS1, 0));
311 radeon_ring_write(ring,
312 ((6 << R300_MS_X3_SHIFT) |
313 (6 << R300_MS_Y3_SHIFT) |
314 (6 << R300_MS_X4_SHIFT) |
315 (6 << R300_MS_Y4_SHIFT) |
316 (6 << R300_MS_X5_SHIFT) |
317 (6 << R300_MS_Y5_SHIFT) |
318 (6 << R300_MSBD1_SHIFT)));
319 radeon_ring_write(ring, PACKET0(R300_GA_ENHANCE, 0));
320 radeon_ring_write(ring, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL);
321 radeon_ring_write(ring, PACKET0(R300_GA_POLY_MODE, 0));
322 radeon_ring_write(ring,
323 R300_FRONT_PTYPE_TRIANGE | R300_BACK_PTYPE_TRIANGE);
324 radeon_ring_write(ring, PACKET0(R300_GA_ROUND_MODE, 0));
325 radeon_ring_write(ring,
326 R300_GEOMETRY_ROUND_NEAREST |
327 R300_COLOR_ROUND_NEAREST);
328 radeon_ring_unlock_commit(rdev, ring, false);
331 static void r300_errata(struct radeon_device *rdev)
333 rdev->pll_errata = 0;
335 if (rdev->family == CHIP_R300 &&
336 (RREG32(RADEON_CONFIG_CNTL) & RADEON_CFG_ATI_REV_ID_MASK) == RADEON_CFG_ATI_REV_A11) {
337 rdev->pll_errata |= CHIP_ERRATA_R300_CG;
341 int r300_mc_wait_for_idle(struct radeon_device *rdev)
346 for (i = 0; i < rdev->usec_timeout; i++) {
348 tmp = RREG32(RADEON_MC_STATUS);
349 if (tmp & R300_MC_IDLE) {
357 static void r300_gpu_init(struct radeon_device *rdev)
359 uint32_t gb_tile_config, tmp;
361 if ((rdev->family == CHIP_R300 && rdev->pdev->device != 0x4144) ||
362 (rdev->family == CHIP_R350 && rdev->pdev->device != 0x4148)) {
364 rdev->num_gb_pipes = 2;
366 /* rv350,rv370,rv380,r300 AD, r350 AH */
367 rdev->num_gb_pipes = 1;
369 rdev->num_z_pipes = 1;
370 gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16);
371 switch (rdev->num_gb_pipes) {
373 gb_tile_config |= R300_PIPE_COUNT_R300;
376 gb_tile_config |= R300_PIPE_COUNT_R420_3P;
379 gb_tile_config |= R300_PIPE_COUNT_R420;
383 gb_tile_config |= R300_PIPE_COUNT_RV350;
386 WREG32(R300_GB_TILE_CONFIG, gb_tile_config);
388 if (r100_gui_wait_for_idle(rdev)) {
389 printk(KERN_WARNING "Failed to wait GUI idle while "
390 "programming pipes. Bad things might happen.\n");
393 tmp = RREG32(R300_DST_PIPE_CONFIG);
394 WREG32(R300_DST_PIPE_CONFIG, tmp | R300_PIPE_AUTO_CONFIG);
396 WREG32(R300_RB2D_DSTCACHE_MODE,
397 R300_DC_AUTOFLUSH_ENABLE |
398 R300_DC_DC_DISABLE_IGNORE_PE);
400 if (r100_gui_wait_for_idle(rdev)) {
401 printk(KERN_WARNING "Failed to wait GUI idle while "
402 "programming pipes. Bad things might happen.\n");
404 if (r300_mc_wait_for_idle(rdev)) {
405 printk(KERN_WARNING "Failed to wait MC idle while "
406 "programming pipes. Bad things might happen.\n");
408 DRM_INFO("radeon: %d quad pipes, %d Z pipes initialized.\n",
409 rdev->num_gb_pipes, rdev->num_z_pipes);
412 int r300_asic_reset(struct radeon_device *rdev, bool hard)
414 struct r100_mc_save save;
418 status = RREG32(R_000E40_RBBM_STATUS);
419 if (!G_000E40_GUI_ACTIVE(status)) {
422 r100_mc_stop(rdev, &save);
423 status = RREG32(R_000E40_RBBM_STATUS);
424 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
426 WREG32(RADEON_CP_CSQ_CNTL, 0);
427 tmp = RREG32(RADEON_CP_RB_CNTL);
428 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
429 WREG32(RADEON_CP_RB_RPTR_WR, 0);
430 WREG32(RADEON_CP_RB_WPTR, 0);
431 WREG32(RADEON_CP_RB_CNTL, tmp);
433 pci_save_state(device_get_parent(rdev->dev->bsddev));
434 /* disable bus mastering */
435 r100_bm_disable(rdev);
436 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) |
437 S_0000F0_SOFT_RESET_GA(1));
438 RREG32(R_0000F0_RBBM_SOFT_RESET);
440 WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
442 status = RREG32(R_000E40_RBBM_STATUS);
443 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
444 /* resetting the CP seems to be problematic sometimes it end up
445 * hard locking the computer, but it's necessary for successful
446 * reset more test & playing is needed on R3XX/R4XX to find a
447 * reliable (if any solution)
449 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
450 RREG32(R_0000F0_RBBM_SOFT_RESET);
452 WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
454 status = RREG32(R_000E40_RBBM_STATUS);
455 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
456 /* restore PCI & busmastering */
457 pci_restore_state(device_get_parent(rdev->dev->bsddev));
458 r100_enable_bm(rdev);
459 /* Check if GPU is idle */
460 if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
461 dev_err(rdev->dev, "failed to reset GPU\n");
464 dev_info(rdev->dev, "GPU reset succeed\n");
465 r100_mc_resume(rdev, &save);
470 * r300,r350,rv350,rv380 VRAM info
472 void r300_mc_init(struct radeon_device *rdev)
477 /* DDR for all card after R300 & IGP */
478 rdev->mc.vram_is_ddr = true;
479 tmp = RREG32(RADEON_MEM_CNTL);
480 tmp &= R300_MEM_NUM_CHANNELS_MASK;
482 case 0: rdev->mc.vram_width = 64; break;
483 case 1: rdev->mc.vram_width = 128; break;
484 case 2: rdev->mc.vram_width = 256; break;
485 default: rdev->mc.vram_width = 128; break;
487 r100_vram_init_sizes(rdev);
488 base = rdev->mc.aper_base;
489 if (rdev->flags & RADEON_IS_IGP)
490 base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
491 radeon_vram_location(rdev, &rdev->mc, base);
492 rdev->mc.gtt_base_align = 0;
493 if (!(rdev->flags & RADEON_IS_AGP))
494 radeon_gtt_location(rdev, &rdev->mc);
495 radeon_update_bandwidth_info(rdev);
498 void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes)
500 uint32_t link_width_cntl, mask;
502 if (rdev->flags & RADEON_IS_IGP)
505 if (!(rdev->flags & RADEON_IS_PCIE))
508 /* FIXME wait for idle */
512 mask = RADEON_PCIE_LC_LINK_WIDTH_X0;
515 mask = RADEON_PCIE_LC_LINK_WIDTH_X1;
518 mask = RADEON_PCIE_LC_LINK_WIDTH_X2;
521 mask = RADEON_PCIE_LC_LINK_WIDTH_X4;
524 mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
527 mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
531 mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
535 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
537 if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) ==
538 (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT))
541 link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK |
542 RADEON_PCIE_LC_RECONFIG_NOW |
543 RADEON_PCIE_LC_RECONFIG_LATER |
544 RADEON_PCIE_LC_SHORT_RECONFIG_EN);
545 link_width_cntl |= mask;
546 WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
547 WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl |
548 RADEON_PCIE_LC_RECONFIG_NOW));
550 /* wait for lane set to complete */
551 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
552 while (link_width_cntl == 0xffffffff)
553 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
557 int rv370_get_pcie_lanes(struct radeon_device *rdev)
561 if (rdev->flags & RADEON_IS_IGP)
564 if (!(rdev->flags & RADEON_IS_PCIE))
567 /* FIXME wait for idle */
569 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
571 switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
572 case RADEON_PCIE_LC_LINK_WIDTH_X0:
574 case RADEON_PCIE_LC_LINK_WIDTH_X1:
576 case RADEON_PCIE_LC_LINK_WIDTH_X2:
578 case RADEON_PCIE_LC_LINK_WIDTH_X4:
580 case RADEON_PCIE_LC_LINK_WIDTH_X8:
582 case RADEON_PCIE_LC_LINK_WIDTH_X16:
588 #if defined(CONFIG_DEBUG_FS)
589 static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data)
591 struct drm_info_node *node = (struct drm_info_node *) m->private;
592 struct drm_device *dev = node->minor->dev;
593 struct radeon_device *rdev = dev->dev_private;
596 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
597 seq_printf(m, "PCIE_TX_GART_CNTL 0x%08x\n", tmp);
598 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_BASE);
599 seq_printf(m, "PCIE_TX_GART_BASE 0x%08x\n", tmp);
600 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_LO);
601 seq_printf(m, "PCIE_TX_GART_START_LO 0x%08x\n", tmp);
602 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_HI);
603 seq_printf(m, "PCIE_TX_GART_START_HI 0x%08x\n", tmp);
604 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_LO);
605 seq_printf(m, "PCIE_TX_GART_END_LO 0x%08x\n", tmp);
606 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_HI);
607 seq_printf(m, "PCIE_TX_GART_END_HI 0x%08x\n", tmp);
608 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_ERROR);
609 seq_printf(m, "PCIE_TX_GART_ERROR 0x%08x\n", tmp);
613 static struct drm_info_list rv370_pcie_gart_info_list[] = {
614 {"rv370_pcie_gart_info", rv370_debugfs_pcie_gart_info, 0, NULL},
618 static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
620 #if defined(CONFIG_DEBUG_FS)
621 return radeon_debugfs_add_files(rdev, rv370_pcie_gart_info_list, 1);
627 static int r300_packet0_check(struct radeon_cs_parser *p,
628 struct radeon_cs_packet *pkt,
629 unsigned idx, unsigned reg)
631 struct radeon_bo_list *reloc;
632 struct r100_cs_track *track;
633 volatile uint32_t *ib;
634 uint32_t tmp, tile_flags = 0;
640 track = (struct r100_cs_track *)p->track;
641 idx_value = radeon_get_ib_value(p, idx);
644 case AVIVO_D1MODE_VLINE_START_END:
645 case RADEON_CRTC_GUI_TRIG_VLINE:
646 r = r100_cs_packet_parse_vline(p);
648 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
650 radeon_cs_dump_packet(p, pkt);
654 case RADEON_DST_PITCH_OFFSET:
655 case RADEON_SRC_PITCH_OFFSET:
656 r = r100_reloc_pitch_offset(p, pkt, idx, reg);
660 case R300_RB3D_COLOROFFSET0:
661 case R300_RB3D_COLOROFFSET1:
662 case R300_RB3D_COLOROFFSET2:
663 case R300_RB3D_COLOROFFSET3:
664 i = (reg - R300_RB3D_COLOROFFSET0) >> 2;
665 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
667 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
669 radeon_cs_dump_packet(p, pkt);
672 track->cb[i].robj = reloc->robj;
673 track->cb[i].offset = idx_value;
674 track->cb_dirty = true;
675 ib[idx] = idx_value + ((u32)reloc->gpu_offset);
677 case R300_ZB_DEPTHOFFSET:
678 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
680 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
682 radeon_cs_dump_packet(p, pkt);
685 track->zb.robj = reloc->robj;
686 track->zb.offset = idx_value;
687 track->zb_dirty = true;
688 ib[idx] = idx_value + ((u32)reloc->gpu_offset);
690 case R300_TX_OFFSET_0:
691 case R300_TX_OFFSET_0+4:
692 case R300_TX_OFFSET_0+8:
693 case R300_TX_OFFSET_0+12:
694 case R300_TX_OFFSET_0+16:
695 case R300_TX_OFFSET_0+20:
696 case R300_TX_OFFSET_0+24:
697 case R300_TX_OFFSET_0+28:
698 case R300_TX_OFFSET_0+32:
699 case R300_TX_OFFSET_0+36:
700 case R300_TX_OFFSET_0+40:
701 case R300_TX_OFFSET_0+44:
702 case R300_TX_OFFSET_0+48:
703 case R300_TX_OFFSET_0+52:
704 case R300_TX_OFFSET_0+56:
705 case R300_TX_OFFSET_0+60:
706 i = (reg - R300_TX_OFFSET_0) >> 2;
707 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
709 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
711 radeon_cs_dump_packet(p, pkt);
715 if (p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) {
716 ib[idx] = (idx_value & 31) | /* keep the 1st 5 bits */
717 ((idx_value & ~31) + (u32)reloc->gpu_offset);
719 if (reloc->tiling_flags & RADEON_TILING_MACRO)
720 tile_flags |= R300_TXO_MACRO_TILE;
721 if (reloc->tiling_flags & RADEON_TILING_MICRO)
722 tile_flags |= R300_TXO_MICRO_TILE;
723 else if (reloc->tiling_flags & RADEON_TILING_MICRO_SQUARE)
724 tile_flags |= R300_TXO_MICRO_TILE_SQUARE;
726 tmp = idx_value + ((u32)reloc->gpu_offset);
730 track->textures[i].robj = reloc->robj;
731 track->tex_dirty = true;
733 /* Tracked registers */
736 track->vap_vf_cntl = idx_value;
740 track->vtx_size = idx_value & 0x7F;
743 /* VAP_VF_MAX_VTX_INDX */
744 track->max_indx = idx_value & 0x00FFFFFFUL;
747 /* VAP_ALT_NUM_VERTICES - only valid on r500 */
748 if (p->rdev->family < CHIP_RV515)
750 track->vap_alt_nverts = idx_value & 0xFFFFFF;
754 track->maxy = ((idx_value >> 13) & 0x1FFF) + 1;
755 if (p->rdev->family < CHIP_RV515) {
758 track->cb_dirty = true;
759 track->zb_dirty = true;
763 if ((idx_value & (1 << 10)) && /* CMASK_ENABLE */
764 p->rdev->cmask_filp != p->filp) {
765 DRM_ERROR("Invalid RB3D_CCTL: Cannot enable CMASK.\n");
768 track->num_cb = ((idx_value >> 5) & 0x3) + 1;
769 track->cb_dirty = true;
775 /* RB3D_COLORPITCH0 */
776 /* RB3D_COLORPITCH1 */
777 /* RB3D_COLORPITCH2 */
778 /* RB3D_COLORPITCH3 */
779 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
780 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
782 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
784 radeon_cs_dump_packet(p, pkt);
788 if (reloc->tiling_flags & RADEON_TILING_MACRO)
789 tile_flags |= R300_COLOR_TILE_ENABLE;
790 if (reloc->tiling_flags & RADEON_TILING_MICRO)
791 tile_flags |= R300_COLOR_MICROTILE_ENABLE;
792 else if (reloc->tiling_flags & RADEON_TILING_MICRO_SQUARE)
793 tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE;
795 tmp = idx_value & ~(0x7 << 16);
799 i = (reg - 0x4E38) >> 2;
800 track->cb[i].pitch = idx_value & 0x3FFE;
801 switch (((idx_value >> 21) & 0xF)) {
805 track->cb[i].cpp = 1;
811 track->cb[i].cpp = 2;
814 if (p->rdev->family < CHIP_RV515) {
815 DRM_ERROR("Invalid color buffer format (%d)!\n",
816 ((idx_value >> 21) & 0xF));
821 track->cb[i].cpp = 4;
824 track->cb[i].cpp = 8;
827 track->cb[i].cpp = 16;
830 DRM_ERROR("Invalid color buffer format (%d) !\n",
831 ((idx_value >> 21) & 0xF));
834 track->cb_dirty = true;
839 track->z_enabled = true;
841 track->z_enabled = false;
843 track->zb_dirty = true;
847 switch ((idx_value & 0xF)) {
856 DRM_ERROR("Invalid z buffer format (%d) !\n",
860 track->zb_dirty = true;
864 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
865 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
867 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
869 radeon_cs_dump_packet(p, pkt);
873 if (reloc->tiling_flags & RADEON_TILING_MACRO)
874 tile_flags |= R300_DEPTHMACROTILE_ENABLE;
875 if (reloc->tiling_flags & RADEON_TILING_MICRO)
876 tile_flags |= R300_DEPTHMICROTILE_TILED;
877 else if (reloc->tiling_flags & RADEON_TILING_MICRO_SQUARE)
878 tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE;
880 tmp = idx_value & ~(0x7 << 16);
884 track->zb.pitch = idx_value & 0x3FFC;
885 track->zb_dirty = true;
889 for (i = 0; i < 16; i++) {
892 enabled = !!(idx_value & (1 << i));
893 track->textures[i].enabled = enabled;
895 track->tex_dirty = true;
913 /* TX_FORMAT1_[0-15] */
914 i = (reg - 0x44C0) >> 2;
915 tmp = (idx_value >> 25) & 0x3;
916 track->textures[i].tex_coord_type = tmp;
917 switch ((idx_value & 0x1F)) {
918 case R300_TX_FORMAT_X8:
919 case R300_TX_FORMAT_Y4X4:
920 case R300_TX_FORMAT_Z3Y3X2:
921 track->textures[i].cpp = 1;
922 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
924 case R300_TX_FORMAT_X16:
925 case R300_TX_FORMAT_FL_I16:
926 case R300_TX_FORMAT_Y8X8:
927 case R300_TX_FORMAT_Z5Y6X5:
928 case R300_TX_FORMAT_Z6Y5X5:
929 case R300_TX_FORMAT_W4Z4Y4X4:
930 case R300_TX_FORMAT_W1Z5Y5X5:
931 case R300_TX_FORMAT_D3DMFT_CxV8U8:
932 case R300_TX_FORMAT_B8G8_B8G8:
933 case R300_TX_FORMAT_G8R8_G8B8:
934 track->textures[i].cpp = 2;
935 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
937 case R300_TX_FORMAT_Y16X16:
938 case R300_TX_FORMAT_FL_I16A16:
939 case R300_TX_FORMAT_Z11Y11X10:
940 case R300_TX_FORMAT_Z10Y11X11:
941 case R300_TX_FORMAT_W8Z8Y8X8:
942 case R300_TX_FORMAT_W2Z10Y10X10:
944 case R300_TX_FORMAT_FL_I32:
946 track->textures[i].cpp = 4;
947 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
949 case R300_TX_FORMAT_W16Z16Y16X16:
950 case R300_TX_FORMAT_FL_R16G16B16A16:
951 case R300_TX_FORMAT_FL_I32A32:
952 track->textures[i].cpp = 8;
953 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
955 case R300_TX_FORMAT_FL_R32G32B32A32:
956 track->textures[i].cpp = 16;
957 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
959 case R300_TX_FORMAT_DXT1:
960 track->textures[i].cpp = 1;
961 track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
963 case R300_TX_FORMAT_ATI2N:
964 if (p->rdev->family < CHIP_R420) {
965 DRM_ERROR("Invalid texture format %u\n",
969 /* The same rules apply as for DXT3/5. */
971 case R300_TX_FORMAT_DXT3:
972 case R300_TX_FORMAT_DXT5:
973 track->textures[i].cpp = 1;
974 track->textures[i].compress_format = R100_TRACK_COMP_DXT35;
977 DRM_ERROR("Invalid texture format %u\n",
981 track->tex_dirty = true;
999 /* TX_FILTER0_[0-15] */
1000 i = (reg - 0x4400) >> 2;
1001 tmp = idx_value & 0x7;
1002 if (tmp == 2 || tmp == 4 || tmp == 6) {
1003 track->textures[i].roundup_w = false;
1005 tmp = (idx_value >> 3) & 0x7;
1006 if (tmp == 2 || tmp == 4 || tmp == 6) {
1007 track->textures[i].roundup_h = false;
1009 track->tex_dirty = true;
1027 /* TX_FORMAT2_[0-15] */
1028 i = (reg - 0x4500) >> 2;
1029 tmp = idx_value & 0x3FFF;
1030 track->textures[i].pitch = tmp + 1;
1031 if (p->rdev->family >= CHIP_RV515) {
1032 tmp = ((idx_value >> 15) & 1) << 11;
1033 track->textures[i].width_11 = tmp;
1034 tmp = ((idx_value >> 16) & 1) << 11;
1035 track->textures[i].height_11 = tmp;
1038 if (idx_value & (1 << 14)) {
1039 /* The same rules apply as for DXT1. */
1040 track->textures[i].compress_format =
1041 R100_TRACK_COMP_DXT1;
1043 } else if (idx_value & (1 << 14)) {
1044 DRM_ERROR("Forbidden bit TXFORMAT_MSB\n");
1047 track->tex_dirty = true;
1065 /* TX_FORMAT0_[0-15] */
1066 i = (reg - 0x4480) >> 2;
1067 tmp = idx_value & 0x7FF;
1068 track->textures[i].width = tmp + 1;
1069 tmp = (idx_value >> 11) & 0x7FF;
1070 track->textures[i].height = tmp + 1;
1071 tmp = (idx_value >> 26) & 0xF;
1072 track->textures[i].num_levels = tmp;
1073 tmp = idx_value & (1 << 31);
1074 track->textures[i].use_pitch = !!tmp;
1075 tmp = (idx_value >> 22) & 0xF;
1076 track->textures[i].txdepth = tmp;
1077 track->tex_dirty = true;
1079 case R300_ZB_ZPASS_ADDR:
1080 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1082 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1084 radeon_cs_dump_packet(p, pkt);
1087 ib[idx] = idx_value + ((u32)reloc->gpu_offset);
1090 /* RB3D_COLOR_CHANNEL_MASK */
1091 track->color_channel_mask = idx_value;
1092 track->cb_dirty = true;
1096 /* r300c emits this register - we need to disable hyperz for it
1097 * without complaining */
1098 if (p->rdev->hyperz_filp != p->filp) {
1099 if (idx_value & 0x1)
1100 ib[idx] = idx_value & ~1;
1105 track->zb_cb_clear = !!(idx_value & (1 << 5));
1106 track->cb_dirty = true;
1107 track->zb_dirty = true;
1108 if (p->rdev->hyperz_filp != p->filp) {
1109 if (idx_value & (R300_HIZ_ENABLE |
1110 R300_RD_COMP_ENABLE |
1111 R300_WR_COMP_ENABLE |
1112 R300_FAST_FILL_ENABLE))
1117 /* RB3D_BLENDCNTL */
1118 track->blend_read_enable = !!(idx_value & (1 << 2));
1119 track->cb_dirty = true;
1121 case R300_RB3D_AARESOLVE_OFFSET:
1122 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1124 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1126 radeon_cs_dump_packet(p, pkt);
1129 track->aa.robj = reloc->robj;
1130 track->aa.offset = idx_value;
1131 track->aa_dirty = true;
1132 ib[idx] = idx_value + ((u32)reloc->gpu_offset);
1134 case R300_RB3D_AARESOLVE_PITCH:
1135 track->aa.pitch = idx_value & 0x3FFE;
1136 track->aa_dirty = true;
1138 case R300_RB3D_AARESOLVE_CTL:
1139 track->aaresolve = idx_value & 0x1;
1140 track->aa_dirty = true;
1142 case 0x4f30: /* ZB_MASK_OFFSET */
1143 case 0x4f34: /* ZB_ZMASK_PITCH */
1144 case 0x4f44: /* ZB_HIZ_OFFSET */
1145 case 0x4f54: /* ZB_HIZ_PITCH */
1146 if (idx_value && (p->rdev->hyperz_filp != p->filp))
1150 if (idx_value && (p->rdev->hyperz_filp != p->filp))
1152 /* GB_Z_PEQ_CONFIG */
1153 if (p->rdev->family >= CHIP_RV350)
1158 /* valid register only on RV530 */
1159 if (p->rdev->family == CHIP_RV530)
1161 /* fallthrough do not move */
1167 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d (val=%08x)\n",
1168 reg, idx, idx_value);
1172 static int r300_packet3_check(struct radeon_cs_parser *p,
1173 struct radeon_cs_packet *pkt)
1175 struct radeon_bo_list *reloc;
1176 struct r100_cs_track *track;
1177 volatile uint32_t *ib;
1183 track = (struct r100_cs_track *)p->track;
1184 switch(pkt->opcode) {
1185 case PACKET3_3D_LOAD_VBPNTR:
1186 r = r100_packet3_load_vbpntr(p, pkt, idx);
1190 case PACKET3_INDX_BUFFER:
1191 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1193 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
1194 radeon_cs_dump_packet(p, pkt);
1197 ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->gpu_offset);
1198 r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
1204 case PACKET3_3D_DRAW_IMMD:
1205 /* Number of dwords is vtx_size * (num_vertices - 1)
1206 * PRIM_WALK must be equal to 3 vertex data in embedded
1208 if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) {
1209 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1212 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1213 track->immd_dwords = pkt->count - 1;
1214 r = r100_cs_track_check(p->rdev, track);
1219 case PACKET3_3D_DRAW_IMMD_2:
1220 /* Number of dwords is vtx_size * (num_vertices - 1)
1221 * PRIM_WALK must be equal to 3 vertex data in embedded
1223 if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) {
1224 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1227 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1228 track->immd_dwords = pkt->count;
1229 r = r100_cs_track_check(p->rdev, track);
1234 case PACKET3_3D_DRAW_VBUF:
1235 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1236 r = r100_cs_track_check(p->rdev, track);
1241 case PACKET3_3D_DRAW_VBUF_2:
1242 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1243 r = r100_cs_track_check(p->rdev, track);
1248 case PACKET3_3D_DRAW_INDX:
1249 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1250 r = r100_cs_track_check(p->rdev, track);
1255 case PACKET3_3D_DRAW_INDX_2:
1256 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1257 r = r100_cs_track_check(p->rdev, track);
1262 case PACKET3_3D_CLEAR_HIZ:
1263 case PACKET3_3D_CLEAR_ZMASK:
1264 if (p->rdev->hyperz_filp != p->filp)
1267 case PACKET3_3D_CLEAR_CMASK:
1268 if (p->rdev->cmask_filp != p->filp)
1274 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
1280 int r300_cs_parse(struct radeon_cs_parser *p)
1282 struct radeon_cs_packet pkt;
1283 struct r100_cs_track *track;
1286 track = kzalloc(sizeof(*track), GFP_KERNEL);
1289 r100_cs_track_clear(p->rdev, track);
1292 r = radeon_cs_packet_parse(p, &pkt, p->idx);
1296 p->idx += pkt.count + 2;
1298 case RADEON_PACKET_TYPE0:
1299 r = r100_cs_parse_packet0(p, &pkt,
1300 p->rdev->config.r300.reg_safe_bm,
1301 p->rdev->config.r300.reg_safe_bm_size,
1302 &r300_packet0_check);
1304 case RADEON_PACKET_TYPE2:
1306 case RADEON_PACKET_TYPE3:
1307 r = r300_packet3_check(p, &pkt);
1310 DRM_ERROR("Unknown packet type %d !\n", pkt.type);
1316 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
1320 void r300_set_reg_safe(struct radeon_device *rdev)
1322 rdev->config.r300.reg_safe_bm = r300_reg_safe_bm;
1323 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r300_reg_safe_bm);
1326 void r300_mc_program(struct radeon_device *rdev)
1328 struct r100_mc_save save;
1331 r = r100_debugfs_mc_info_init(rdev);
1333 dev_err(rdev->dev, "Failed to create r100_mc debugfs file.\n");
1336 /* Stops all mc clients */
1337 r100_mc_stop(rdev, &save);
1338 if (rdev->flags & RADEON_IS_AGP) {
1339 WREG32(R_00014C_MC_AGP_LOCATION,
1340 S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) |
1341 S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16));
1342 WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base));
1343 WREG32(R_00015C_AGP_BASE_2,
1344 upper_32_bits(rdev->mc.agp_base) & 0xff);
1346 WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF);
1347 WREG32(R_000170_AGP_BASE, 0);
1348 WREG32(R_00015C_AGP_BASE_2, 0);
1350 /* Wait for mc idle */
1351 if (r300_mc_wait_for_idle(rdev))
1352 DRM_INFO("Failed to wait MC idle before programming MC.\n");
1353 /* Program MC, should be a 32bits limited address space */
1354 WREG32(R_000148_MC_FB_LOCATION,
1355 S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
1356 S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
1357 r100_mc_resume(rdev, &save);
1360 void r300_clock_startup(struct radeon_device *rdev)
1364 if (radeon_dynclks != -1 && radeon_dynclks)
1365 radeon_legacy_set_clock_gating(rdev, 1);
1366 /* We need to force on some of the block */
1367 tmp = RREG32_PLL(R_00000D_SCLK_CNTL);
1368 tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1);
1369 if ((rdev->family == CHIP_RV350) || (rdev->family == CHIP_RV380))
1370 tmp |= S_00000D_FORCE_VAP(1);
1371 WREG32_PLL(R_00000D_SCLK_CNTL, tmp);
1374 static int r300_startup(struct radeon_device *rdev)
1378 /* set common regs */
1379 r100_set_common_regs(rdev);
1381 r300_mc_program(rdev);
1383 r300_clock_startup(rdev);
1384 /* Initialize GPU configuration (# pipes, ...) */
1385 r300_gpu_init(rdev);
1386 /* Initialize GART (initialize after TTM so we can allocate
1387 * memory through TTM but finalize after TTM) */
1388 if (rdev->flags & RADEON_IS_PCIE) {
1389 r = rv370_pcie_gart_enable(rdev);
1394 if (rdev->family == CHIP_R300 ||
1395 rdev->family == CHIP_R350 ||
1396 rdev->family == CHIP_RV350)
1397 r100_enable_bm(rdev);
1399 if (rdev->flags & RADEON_IS_PCI) {
1400 r = r100_pci_gart_enable(rdev);
1405 /* allocate wb buffer */
1406 r = radeon_wb_init(rdev);
1410 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
1412 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
1417 if (!rdev->irq.installed) {
1418 r = radeon_irq_kms_init(rdev);
1424 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
1425 /* 1M ring buffer */
1426 r = r100_cp_init(rdev, 1024 * 1024);
1428 dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
1432 r = radeon_ib_pool_init(rdev);
1434 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
1441 int r300_resume(struct radeon_device *rdev)
1445 /* Make sur GART are not working */
1446 if (rdev->flags & RADEON_IS_PCIE)
1447 rv370_pcie_gart_disable(rdev);
1448 if (rdev->flags & RADEON_IS_PCI)
1449 r100_pci_gart_disable(rdev);
1450 /* Resume clock before doing reset */
1451 r300_clock_startup(rdev);
1452 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
1453 if (radeon_asic_reset(rdev)) {
1454 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
1455 RREG32(R_000E40_RBBM_STATUS),
1456 RREG32(R_0007C0_CP_STAT));
1459 radeon_combios_asic_init(rdev->ddev);
1460 /* Resume clock after posting */
1461 r300_clock_startup(rdev);
1462 /* Initialize surface registers */
1463 radeon_surface_init(rdev);
1465 rdev->accel_working = true;
1466 r = r300_startup(rdev);
1468 rdev->accel_working = false;
1473 int r300_suspend(struct radeon_device *rdev)
1475 radeon_pm_suspend(rdev);
1476 r100_cp_disable(rdev);
1477 radeon_wb_disable(rdev);
1478 r100_irq_disable(rdev);
1479 if (rdev->flags & RADEON_IS_PCIE)
1480 rv370_pcie_gart_disable(rdev);
1481 if (rdev->flags & RADEON_IS_PCI)
1482 r100_pci_gart_disable(rdev);
1486 void r300_fini(struct radeon_device *rdev)
1488 radeon_pm_fini(rdev);
1490 radeon_wb_fini(rdev);
1491 radeon_ib_pool_fini(rdev);
1492 radeon_gem_fini(rdev);
1493 if (rdev->flags & RADEON_IS_PCIE)
1494 rv370_pcie_gart_fini(rdev);
1495 if (rdev->flags & RADEON_IS_PCI)
1496 r100_pci_gart_fini(rdev);
1497 radeon_agp_fini(rdev);
1498 radeon_irq_kms_fini(rdev);
1499 radeon_fence_driver_fini(rdev);
1500 radeon_bo_fini(rdev);
1501 radeon_atombios_fini(rdev);
1506 int r300_init(struct radeon_device *rdev)
1511 r100_vga_render_disable(rdev);
1512 /* Initialize scratch registers */
1513 radeon_scratch_init(rdev);
1514 /* Initialize surface registers */
1515 radeon_surface_init(rdev);
1516 /* TODO: disable VGA need to use VGA request */
1517 /* restore some register to sane defaults */
1518 r100_restore_sanity(rdev);
1520 if (!radeon_get_bios(rdev)) {
1521 if (ASIC_IS_AVIVO(rdev))
1524 if (rdev->is_atom_bios) {
1525 dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n");
1528 r = radeon_combios_init(rdev);
1532 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
1533 if (radeon_asic_reset(rdev)) {
1535 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
1536 RREG32(R_000E40_RBBM_STATUS),
1537 RREG32(R_0007C0_CP_STAT));
1539 /* check if cards are posted or not */
1540 if (radeon_boot_test_post_card(rdev) == false)
1542 /* Set asic errata */
1544 /* Initialize clocks */
1545 radeon_get_clock_info(rdev->ddev);
1546 /* initialize AGP */
1547 if (rdev->flags & RADEON_IS_AGP) {
1548 r = radeon_agp_init(rdev);
1550 radeon_agp_disable(rdev);
1553 /* initialize memory controller */
1556 r = radeon_fence_driver_init(rdev);
1559 /* Memory manager */
1560 r = radeon_bo_init(rdev);
1563 if (rdev->flags & RADEON_IS_PCIE) {
1564 r = rv370_pcie_gart_init(rdev);
1568 if (rdev->flags & RADEON_IS_PCI) {
1569 r = r100_pci_gart_init(rdev);
1573 r300_set_reg_safe(rdev);
1575 /* Initialize power management */
1576 radeon_pm_init(rdev);
1578 rdev->accel_working = true;
1579 r = r300_startup(rdev);
1581 /* Something went wrong with the accel init, so stop accel */
1582 dev_err(rdev->dev, "Disabling GPU acceleration\n");
1584 radeon_wb_fini(rdev);
1585 radeon_ib_pool_fini(rdev);
1586 radeon_irq_kms_fini(rdev);
1587 if (rdev->flags & RADEON_IS_PCIE)
1588 rv370_pcie_gart_fini(rdev);
1589 if (rdev->flags & RADEON_IS_PCI)
1590 r100_pci_gart_fini(rdev);
1591 radeon_agp_fini(rdev);
1592 rdev->accel_working = false;