2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/seq_file.h>
31 #include <drm/drm_crtc_helper.h>
32 #include "radeon_reg.h"
34 #include "radeon_asic.h"
35 #include <drm/radeon_drm.h>
36 #include "r100_track.h"
39 #include "r300_reg_safe.h"
41 /* This files gather functions specifics to: r300,r350,rv350,rv370,rv380
44 * - HOST_PATH_CNTL: r300 family seems to dislike write to HOST_PATH_CNTL
45 * using MMIO to flush host path read cache, this lead to HARDLOCKUP.
46 * However, scheduling such write to the ring seems harmless, i suspect
47 * the CP read collide with the flush somehow, or maybe the MC, hard to
48 * tell. (Jerome Glisse)
52 * Indirect registers accessor
54 uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg)
58 spin_lock(&rdev->pcie_idx_lock);
59 WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask));
60 r = RREG32(RADEON_PCIE_DATA);
61 spin_unlock(&rdev->pcie_idx_lock);
65 void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
67 spin_lock(&rdev->pcie_idx_lock);
68 WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask));
69 WREG32(RADEON_PCIE_DATA, (v));
70 spin_unlock(&rdev->pcie_idx_lock);
74 * rv370,rv380 PCIE GART
76 static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev);
78 void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
83 /* Workaround HW bug do flush 2 times */
84 for (i = 0; i < 2; i++) {
85 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
86 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp | RADEON_PCIE_TX_GART_INVALIDATE_TLB);
87 (void)RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
88 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
93 #define R300_PTE_UNSNOOPED (1 << 0)
94 #define R300_PTE_WRITEABLE (1 << 2)
95 #define R300_PTE_READABLE (1 << 3)
97 uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags)
99 addr = (lower_32_bits(addr) >> 8) |
100 ((upper_32_bits(addr) & 0xff) << 24);
101 if (flags & RADEON_GART_PAGE_READ)
102 addr |= R300_PTE_READABLE;
103 if (flags & RADEON_GART_PAGE_WRITE)
104 addr |= R300_PTE_WRITEABLE;
105 if (!(flags & RADEON_GART_PAGE_SNOOP))
106 addr |= R300_PTE_UNSNOOPED;
110 void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
113 void __iomem *ptr = rdev->gart.ptr;
115 /* on x86 we want this to be CPU endian, on powerpc
116 * on powerpc without HW swappers, it'll get swapped on way
117 * into VRAM - so no need for cpu_to_le32 on VRAM tables */
118 writel(entry, ((uint8_t __iomem *)ptr) + (i * 4));
121 int rv370_pcie_gart_init(struct radeon_device *rdev)
125 if (rdev->gart.robj) {
126 WARN(1, "RV370 PCIE GART already initialized\n");
129 /* Initialize common gart structure */
130 r = radeon_gart_init(rdev);
133 r = rv370_debugfs_pcie_gart_info_init(rdev);
135 DRM_ERROR("Failed to register debugfs file for PCIE gart !\n");
136 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
137 rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush;
138 rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry;
139 rdev->asic->gart.set_page = &rv370_pcie_gart_set_page;
140 return radeon_gart_table_vram_alloc(rdev);
143 int rv370_pcie_gart_enable(struct radeon_device *rdev)
149 if (rdev->gart.robj == NULL) {
150 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
153 r = radeon_gart_table_vram_pin(rdev);
156 /* discard memory request outside of configured range */
157 tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
158 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
159 WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_start);
160 tmp = rdev->mc.gtt_end & ~RADEON_GPU_PAGE_MASK;
161 WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp);
162 WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0);
163 WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0);
164 table_addr = rdev->gart.table_addr;
165 WREG32_PCIE(RADEON_PCIE_TX_GART_BASE, table_addr);
166 /* FIXME: setup default page */
167 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_start);
168 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0);
170 WREG32_PCIE(RADEON_PCIE_TX_GART_ERROR, 0);
171 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
172 tmp |= RADEON_PCIE_TX_GART_EN;
173 tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
174 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
175 rv370_pcie_gart_tlb_flush(rdev);
176 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
177 (unsigned)(rdev->mc.gtt_size >> 20),
178 (unsigned long long)table_addr);
179 rdev->gart.ready = true;
183 void rv370_pcie_gart_disable(struct radeon_device *rdev)
187 WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, 0);
188 WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, 0);
189 WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0);
190 WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0);
191 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
192 tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
193 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN);
194 radeon_gart_table_vram_unpin(rdev);
197 void rv370_pcie_gart_fini(struct radeon_device *rdev)
199 radeon_gart_fini(rdev);
200 rv370_pcie_gart_disable(rdev);
201 radeon_gart_table_vram_free(rdev);
204 void r300_fence_ring_emit(struct radeon_device *rdev,
205 struct radeon_fence *fence)
207 struct radeon_ring *ring = &rdev->ring[fence->ring];
209 /* Who ever call radeon_fence_emit should call ring_lock and ask
210 * for enough space (today caller are ib schedule and buffer move) */
211 /* Write SC register so SC & US assert idle */
212 radeon_ring_write(ring, PACKET0(R300_RE_SCISSORS_TL, 0));
213 radeon_ring_write(ring, 0);
214 radeon_ring_write(ring, PACKET0(R300_RE_SCISSORS_BR, 0));
215 radeon_ring_write(ring, 0);
217 radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
218 radeon_ring_write(ring, R300_RB3D_DC_FLUSH);
219 radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
220 radeon_ring_write(ring, R300_ZC_FLUSH);
221 /* Wait until IDLE & CLEAN */
222 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
223 radeon_ring_write(ring, (RADEON_WAIT_3D_IDLECLEAN |
224 RADEON_WAIT_2D_IDLECLEAN |
225 RADEON_WAIT_DMA_GUI_IDLE));
226 radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
227 radeon_ring_write(ring, rdev->config.r300.hdp_cntl |
228 RADEON_HDP_READ_BUFFER_INVALIDATE);
229 radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
230 radeon_ring_write(ring, rdev->config.r300.hdp_cntl);
231 /* Emit fence sequence & fire IRQ */
232 radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0));
233 radeon_ring_write(ring, fence->seq);
234 radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0));
235 radeon_ring_write(ring, RADEON_SW_INT_FIRE);
238 void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
240 unsigned gb_tile_config;
243 /* Sub pixel 1/12 so we can have 4K rendering according to doc */
244 gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16);
245 switch(rdev->num_gb_pipes) {
247 gb_tile_config |= R300_PIPE_COUNT_R300;
250 gb_tile_config |= R300_PIPE_COUNT_R420_3P;
253 gb_tile_config |= R300_PIPE_COUNT_R420;
257 gb_tile_config |= R300_PIPE_COUNT_RV350;
261 r = radeon_ring_lock(rdev, ring, 64);
265 radeon_ring_write(ring, PACKET0(RADEON_ISYNC_CNTL, 0));
266 radeon_ring_write(ring,
267 RADEON_ISYNC_ANY2D_IDLE3D |
268 RADEON_ISYNC_ANY3D_IDLE2D |
269 RADEON_ISYNC_WAIT_IDLEGUI |
270 RADEON_ISYNC_CPSCRATCH_IDLEGUI);
271 radeon_ring_write(ring, PACKET0(R300_GB_TILE_CONFIG, 0));
272 radeon_ring_write(ring, gb_tile_config);
273 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
274 radeon_ring_write(ring,
275 RADEON_WAIT_2D_IDLECLEAN |
276 RADEON_WAIT_3D_IDLECLEAN);
277 radeon_ring_write(ring, PACKET0(R300_DST_PIPE_CONFIG, 0));
278 radeon_ring_write(ring, R300_PIPE_AUTO_CONFIG);
279 radeon_ring_write(ring, PACKET0(R300_GB_SELECT, 0));
280 radeon_ring_write(ring, 0);
281 radeon_ring_write(ring, PACKET0(R300_GB_ENABLE, 0));
282 radeon_ring_write(ring, 0);
283 radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
284 radeon_ring_write(ring, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
285 radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
286 radeon_ring_write(ring, R300_ZC_FLUSH | R300_ZC_FREE);
287 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
288 radeon_ring_write(ring,
289 RADEON_WAIT_2D_IDLECLEAN |
290 RADEON_WAIT_3D_IDLECLEAN);
291 radeon_ring_write(ring, PACKET0(R300_GB_AA_CONFIG, 0));
292 radeon_ring_write(ring, 0);
293 radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
294 radeon_ring_write(ring, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
295 radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
296 radeon_ring_write(ring, R300_ZC_FLUSH | R300_ZC_FREE);
297 radeon_ring_write(ring, PACKET0(R300_GB_MSPOS0, 0));
298 radeon_ring_write(ring,
299 ((6 << R300_MS_X0_SHIFT) |
300 (6 << R300_MS_Y0_SHIFT) |
301 (6 << R300_MS_X1_SHIFT) |
302 (6 << R300_MS_Y1_SHIFT) |
303 (6 << R300_MS_X2_SHIFT) |
304 (6 << R300_MS_Y2_SHIFT) |
305 (6 << R300_MSBD0_Y_SHIFT) |
306 (6 << R300_MSBD0_X_SHIFT)));
307 radeon_ring_write(ring, PACKET0(R300_GB_MSPOS1, 0));
308 radeon_ring_write(ring,
309 ((6 << R300_MS_X3_SHIFT) |
310 (6 << R300_MS_Y3_SHIFT) |
311 (6 << R300_MS_X4_SHIFT) |
312 (6 << R300_MS_Y4_SHIFT) |
313 (6 << R300_MS_X5_SHIFT) |
314 (6 << R300_MS_Y5_SHIFT) |
315 (6 << R300_MSBD1_SHIFT)));
316 radeon_ring_write(ring, PACKET0(R300_GA_ENHANCE, 0));
317 radeon_ring_write(ring, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL);
318 radeon_ring_write(ring, PACKET0(R300_GA_POLY_MODE, 0));
319 radeon_ring_write(ring,
320 R300_FRONT_PTYPE_TRIANGE | R300_BACK_PTYPE_TRIANGE);
321 radeon_ring_write(ring, PACKET0(R300_GA_ROUND_MODE, 0));
322 radeon_ring_write(ring,
323 R300_GEOMETRY_ROUND_NEAREST |
324 R300_COLOR_ROUND_NEAREST);
325 radeon_ring_unlock_commit(rdev, ring, false);
328 static void r300_errata(struct radeon_device *rdev)
330 rdev->pll_errata = 0;
332 if (rdev->family == CHIP_R300 &&
333 (RREG32(RADEON_CONFIG_CNTL) & RADEON_CFG_ATI_REV_ID_MASK) == RADEON_CFG_ATI_REV_A11) {
334 rdev->pll_errata |= CHIP_ERRATA_R300_CG;
338 int r300_mc_wait_for_idle(struct radeon_device *rdev)
343 for (i = 0; i < rdev->usec_timeout; i++) {
345 tmp = RREG32(RADEON_MC_STATUS);
346 if (tmp & R300_MC_IDLE) {
354 static void r300_gpu_init(struct radeon_device *rdev)
356 uint32_t gb_tile_config, tmp;
358 if ((rdev->family == CHIP_R300 && rdev->pdev->device != 0x4144) ||
359 (rdev->family == CHIP_R350 && rdev->pdev->device != 0x4148)) {
361 rdev->num_gb_pipes = 2;
363 /* rv350,rv370,rv380,r300 AD, r350 AH */
364 rdev->num_gb_pipes = 1;
366 rdev->num_z_pipes = 1;
367 gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16);
368 switch (rdev->num_gb_pipes) {
370 gb_tile_config |= R300_PIPE_COUNT_R300;
373 gb_tile_config |= R300_PIPE_COUNT_R420_3P;
376 gb_tile_config |= R300_PIPE_COUNT_R420;
380 gb_tile_config |= R300_PIPE_COUNT_RV350;
383 WREG32(R300_GB_TILE_CONFIG, gb_tile_config);
385 if (r100_gui_wait_for_idle(rdev)) {
386 printk(KERN_WARNING "Failed to wait GUI idle while "
387 "programming pipes. Bad things might happen.\n");
390 tmp = RREG32(R300_DST_PIPE_CONFIG);
391 WREG32(R300_DST_PIPE_CONFIG, tmp | R300_PIPE_AUTO_CONFIG);
393 WREG32(R300_RB2D_DSTCACHE_MODE,
394 R300_DC_AUTOFLUSH_ENABLE |
395 R300_DC_DC_DISABLE_IGNORE_PE);
397 if (r100_gui_wait_for_idle(rdev)) {
398 printk(KERN_WARNING "Failed to wait GUI idle while "
399 "programming pipes. Bad things might happen.\n");
401 if (r300_mc_wait_for_idle(rdev)) {
402 printk(KERN_WARNING "Failed to wait MC idle while "
403 "programming pipes. Bad things might happen.\n");
405 DRM_INFO("radeon: %d quad pipes, %d Z pipes initialized.\n",
406 rdev->num_gb_pipes, rdev->num_z_pipes);
409 int r300_asic_reset(struct radeon_device *rdev, bool hard)
411 struct r100_mc_save save;
415 status = RREG32(R_000E40_RBBM_STATUS);
416 if (!G_000E40_GUI_ACTIVE(status)) {
419 r100_mc_stop(rdev, &save);
420 status = RREG32(R_000E40_RBBM_STATUS);
421 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
423 WREG32(RADEON_CP_CSQ_CNTL, 0);
424 tmp = RREG32(RADEON_CP_RB_CNTL);
425 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
426 WREG32(RADEON_CP_RB_RPTR_WR, 0);
427 WREG32(RADEON_CP_RB_WPTR, 0);
428 WREG32(RADEON_CP_RB_CNTL, tmp);
430 pci_save_state(device_get_parent(rdev->dev->bsddev));
431 /* disable bus mastering */
432 r100_bm_disable(rdev);
433 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) |
434 S_0000F0_SOFT_RESET_GA(1));
435 RREG32(R_0000F0_RBBM_SOFT_RESET);
437 WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
439 status = RREG32(R_000E40_RBBM_STATUS);
440 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
441 /* resetting the CP seems to be problematic sometimes it end up
442 * hard locking the computer, but it's necessary for successful
443 * reset more test & playing is needed on R3XX/R4XX to find a
444 * reliable (if any solution)
446 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
447 RREG32(R_0000F0_RBBM_SOFT_RESET);
449 WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
451 status = RREG32(R_000E40_RBBM_STATUS);
452 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
453 /* restore PCI & busmastering */
454 pci_restore_state(device_get_parent(rdev->dev->bsddev));
455 r100_enable_bm(rdev);
456 /* Check if GPU is idle */
457 if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
458 dev_err(rdev->dev, "failed to reset GPU\n");
461 dev_info(rdev->dev, "GPU reset succeed\n");
462 r100_mc_resume(rdev, &save);
467 * r300,r350,rv350,rv380 VRAM info
469 void r300_mc_init(struct radeon_device *rdev)
474 /* DDR for all card after R300 & IGP */
475 rdev->mc.vram_is_ddr = true;
476 tmp = RREG32(RADEON_MEM_CNTL);
477 tmp &= R300_MEM_NUM_CHANNELS_MASK;
479 case 0: rdev->mc.vram_width = 64; break;
480 case 1: rdev->mc.vram_width = 128; break;
481 case 2: rdev->mc.vram_width = 256; break;
482 default: rdev->mc.vram_width = 128; break;
484 r100_vram_init_sizes(rdev);
485 base = rdev->mc.aper_base;
486 if (rdev->flags & RADEON_IS_IGP)
487 base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
488 radeon_vram_location(rdev, &rdev->mc, base);
489 rdev->mc.gtt_base_align = 0;
490 if (!(rdev->flags & RADEON_IS_AGP))
491 radeon_gtt_location(rdev, &rdev->mc);
492 radeon_update_bandwidth_info(rdev);
495 void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes)
497 uint32_t link_width_cntl, mask;
499 if (rdev->flags & RADEON_IS_IGP)
502 if (!(rdev->flags & RADEON_IS_PCIE))
505 /* FIXME wait for idle */
509 mask = RADEON_PCIE_LC_LINK_WIDTH_X0;
512 mask = RADEON_PCIE_LC_LINK_WIDTH_X1;
515 mask = RADEON_PCIE_LC_LINK_WIDTH_X2;
518 mask = RADEON_PCIE_LC_LINK_WIDTH_X4;
521 mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
524 mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
528 mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
532 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
534 if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) ==
535 (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT))
538 link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK |
539 RADEON_PCIE_LC_RECONFIG_NOW |
540 RADEON_PCIE_LC_RECONFIG_LATER |
541 RADEON_PCIE_LC_SHORT_RECONFIG_EN);
542 link_width_cntl |= mask;
543 WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
544 WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl |
545 RADEON_PCIE_LC_RECONFIG_NOW));
547 /* wait for lane set to complete */
548 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
549 while (link_width_cntl == 0xffffffff)
550 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
554 int rv370_get_pcie_lanes(struct radeon_device *rdev)
558 if (rdev->flags & RADEON_IS_IGP)
561 if (!(rdev->flags & RADEON_IS_PCIE))
564 /* FIXME wait for idle */
566 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
568 switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
569 case RADEON_PCIE_LC_LINK_WIDTH_X0:
571 case RADEON_PCIE_LC_LINK_WIDTH_X1:
573 case RADEON_PCIE_LC_LINK_WIDTH_X2:
575 case RADEON_PCIE_LC_LINK_WIDTH_X4:
577 case RADEON_PCIE_LC_LINK_WIDTH_X8:
579 case RADEON_PCIE_LC_LINK_WIDTH_X16:
585 #if defined(CONFIG_DEBUG_FS)
586 static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data)
588 struct drm_info_node *node = (struct drm_info_node *) m->private;
589 struct drm_device *dev = node->minor->dev;
590 struct radeon_device *rdev = dev->dev_private;
593 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
594 seq_printf(m, "PCIE_TX_GART_CNTL 0x%08x\n", tmp);
595 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_BASE);
596 seq_printf(m, "PCIE_TX_GART_BASE 0x%08x\n", tmp);
597 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_LO);
598 seq_printf(m, "PCIE_TX_GART_START_LO 0x%08x\n", tmp);
599 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_HI);
600 seq_printf(m, "PCIE_TX_GART_START_HI 0x%08x\n", tmp);
601 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_LO);
602 seq_printf(m, "PCIE_TX_GART_END_LO 0x%08x\n", tmp);
603 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_HI);
604 seq_printf(m, "PCIE_TX_GART_END_HI 0x%08x\n", tmp);
605 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_ERROR);
606 seq_printf(m, "PCIE_TX_GART_ERROR 0x%08x\n", tmp);
610 static struct drm_info_list rv370_pcie_gart_info_list[] = {
611 {"rv370_pcie_gart_info", rv370_debugfs_pcie_gart_info, 0, NULL},
615 static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
617 #if defined(CONFIG_DEBUG_FS)
618 return radeon_debugfs_add_files(rdev, rv370_pcie_gart_info_list, 1);
624 static int r300_packet0_check(struct radeon_cs_parser *p,
625 struct radeon_cs_packet *pkt,
626 unsigned idx, unsigned reg)
628 struct radeon_bo_list *reloc;
629 struct r100_cs_track *track;
630 volatile uint32_t *ib;
631 uint32_t tmp, tile_flags = 0;
637 track = (struct r100_cs_track *)p->track;
638 idx_value = radeon_get_ib_value(p, idx);
641 case AVIVO_D1MODE_VLINE_START_END:
642 case RADEON_CRTC_GUI_TRIG_VLINE:
643 r = r100_cs_packet_parse_vline(p);
645 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
647 radeon_cs_dump_packet(p, pkt);
651 case RADEON_DST_PITCH_OFFSET:
652 case RADEON_SRC_PITCH_OFFSET:
653 r = r100_reloc_pitch_offset(p, pkt, idx, reg);
657 case R300_RB3D_COLOROFFSET0:
658 case R300_RB3D_COLOROFFSET1:
659 case R300_RB3D_COLOROFFSET2:
660 case R300_RB3D_COLOROFFSET3:
661 i = (reg - R300_RB3D_COLOROFFSET0) >> 2;
662 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
664 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
666 radeon_cs_dump_packet(p, pkt);
669 track->cb[i].robj = reloc->robj;
670 track->cb[i].offset = idx_value;
671 track->cb_dirty = true;
672 ib[idx] = idx_value + ((u32)reloc->gpu_offset);
674 case R300_ZB_DEPTHOFFSET:
675 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
677 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
679 radeon_cs_dump_packet(p, pkt);
682 track->zb.robj = reloc->robj;
683 track->zb.offset = idx_value;
684 track->zb_dirty = true;
685 ib[idx] = idx_value + ((u32)reloc->gpu_offset);
687 case R300_TX_OFFSET_0:
688 case R300_TX_OFFSET_0+4:
689 case R300_TX_OFFSET_0+8:
690 case R300_TX_OFFSET_0+12:
691 case R300_TX_OFFSET_0+16:
692 case R300_TX_OFFSET_0+20:
693 case R300_TX_OFFSET_0+24:
694 case R300_TX_OFFSET_0+28:
695 case R300_TX_OFFSET_0+32:
696 case R300_TX_OFFSET_0+36:
697 case R300_TX_OFFSET_0+40:
698 case R300_TX_OFFSET_0+44:
699 case R300_TX_OFFSET_0+48:
700 case R300_TX_OFFSET_0+52:
701 case R300_TX_OFFSET_0+56:
702 case R300_TX_OFFSET_0+60:
703 i = (reg - R300_TX_OFFSET_0) >> 2;
704 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
706 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
708 radeon_cs_dump_packet(p, pkt);
712 if (p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) {
713 ib[idx] = (idx_value & 31) | /* keep the 1st 5 bits */
714 ((idx_value & ~31) + (u32)reloc->gpu_offset);
716 if (reloc->tiling_flags & RADEON_TILING_MACRO)
717 tile_flags |= R300_TXO_MACRO_TILE;
718 if (reloc->tiling_flags & RADEON_TILING_MICRO)
719 tile_flags |= R300_TXO_MICRO_TILE;
720 else if (reloc->tiling_flags & RADEON_TILING_MICRO_SQUARE)
721 tile_flags |= R300_TXO_MICRO_TILE_SQUARE;
723 tmp = idx_value + ((u32)reloc->gpu_offset);
727 track->textures[i].robj = reloc->robj;
728 track->tex_dirty = true;
730 /* Tracked registers */
733 track->vap_vf_cntl = idx_value;
737 track->vtx_size = idx_value & 0x7F;
740 /* VAP_VF_MAX_VTX_INDX */
741 track->max_indx = idx_value & 0x00FFFFFFUL;
744 /* VAP_ALT_NUM_VERTICES - only valid on r500 */
745 if (p->rdev->family < CHIP_RV515)
747 track->vap_alt_nverts = idx_value & 0xFFFFFF;
751 track->maxy = ((idx_value >> 13) & 0x1FFF) + 1;
752 if (p->rdev->family < CHIP_RV515) {
755 track->cb_dirty = true;
756 track->zb_dirty = true;
760 if ((idx_value & (1 << 10)) && /* CMASK_ENABLE */
761 p->rdev->cmask_filp != p->filp) {
762 DRM_ERROR("Invalid RB3D_CCTL: Cannot enable CMASK.\n");
765 track->num_cb = ((idx_value >> 5) & 0x3) + 1;
766 track->cb_dirty = true;
772 /* RB3D_COLORPITCH0 */
773 /* RB3D_COLORPITCH1 */
774 /* RB3D_COLORPITCH2 */
775 /* RB3D_COLORPITCH3 */
776 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
777 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
779 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
781 radeon_cs_dump_packet(p, pkt);
785 if (reloc->tiling_flags & RADEON_TILING_MACRO)
786 tile_flags |= R300_COLOR_TILE_ENABLE;
787 if (reloc->tiling_flags & RADEON_TILING_MICRO)
788 tile_flags |= R300_COLOR_MICROTILE_ENABLE;
789 else if (reloc->tiling_flags & RADEON_TILING_MICRO_SQUARE)
790 tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE;
792 tmp = idx_value & ~(0x7 << 16);
796 i = (reg - 0x4E38) >> 2;
797 track->cb[i].pitch = idx_value & 0x3FFE;
798 switch (((idx_value >> 21) & 0xF)) {
802 track->cb[i].cpp = 1;
808 track->cb[i].cpp = 2;
811 if (p->rdev->family < CHIP_RV515) {
812 DRM_ERROR("Invalid color buffer format (%d)!\n",
813 ((idx_value >> 21) & 0xF));
818 track->cb[i].cpp = 4;
821 track->cb[i].cpp = 8;
824 track->cb[i].cpp = 16;
827 DRM_ERROR("Invalid color buffer format (%d) !\n",
828 ((idx_value >> 21) & 0xF));
831 track->cb_dirty = true;
836 track->z_enabled = true;
838 track->z_enabled = false;
840 track->zb_dirty = true;
844 switch ((idx_value & 0xF)) {
853 DRM_ERROR("Invalid z buffer format (%d) !\n",
857 track->zb_dirty = true;
861 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
862 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
864 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
866 radeon_cs_dump_packet(p, pkt);
870 if (reloc->tiling_flags & RADEON_TILING_MACRO)
871 tile_flags |= R300_DEPTHMACROTILE_ENABLE;
872 if (reloc->tiling_flags & RADEON_TILING_MICRO)
873 tile_flags |= R300_DEPTHMICROTILE_TILED;
874 else if (reloc->tiling_flags & RADEON_TILING_MICRO_SQUARE)
875 tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE;
877 tmp = idx_value & ~(0x7 << 16);
881 track->zb.pitch = idx_value & 0x3FFC;
882 track->zb_dirty = true;
886 for (i = 0; i < 16; i++) {
889 enabled = !!(idx_value & (1 << i));
890 track->textures[i].enabled = enabled;
892 track->tex_dirty = true;
910 /* TX_FORMAT1_[0-15] */
911 i = (reg - 0x44C0) >> 2;
912 tmp = (idx_value >> 25) & 0x3;
913 track->textures[i].tex_coord_type = tmp;
914 switch ((idx_value & 0x1F)) {
915 case R300_TX_FORMAT_X8:
916 case R300_TX_FORMAT_Y4X4:
917 case R300_TX_FORMAT_Z3Y3X2:
918 track->textures[i].cpp = 1;
919 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
921 case R300_TX_FORMAT_X16:
922 case R300_TX_FORMAT_FL_I16:
923 case R300_TX_FORMAT_Y8X8:
924 case R300_TX_FORMAT_Z5Y6X5:
925 case R300_TX_FORMAT_Z6Y5X5:
926 case R300_TX_FORMAT_W4Z4Y4X4:
927 case R300_TX_FORMAT_W1Z5Y5X5:
928 case R300_TX_FORMAT_D3DMFT_CxV8U8:
929 case R300_TX_FORMAT_B8G8_B8G8:
930 case R300_TX_FORMAT_G8R8_G8B8:
931 track->textures[i].cpp = 2;
932 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
934 case R300_TX_FORMAT_Y16X16:
935 case R300_TX_FORMAT_FL_I16A16:
936 case R300_TX_FORMAT_Z11Y11X10:
937 case R300_TX_FORMAT_Z10Y11X11:
938 case R300_TX_FORMAT_W8Z8Y8X8:
939 case R300_TX_FORMAT_W2Z10Y10X10:
941 case R300_TX_FORMAT_FL_I32:
943 track->textures[i].cpp = 4;
944 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
946 case R300_TX_FORMAT_W16Z16Y16X16:
947 case R300_TX_FORMAT_FL_R16G16B16A16:
948 case R300_TX_FORMAT_FL_I32A32:
949 track->textures[i].cpp = 8;
950 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
952 case R300_TX_FORMAT_FL_R32G32B32A32:
953 track->textures[i].cpp = 16;
954 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
956 case R300_TX_FORMAT_DXT1:
957 track->textures[i].cpp = 1;
958 track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
960 case R300_TX_FORMAT_ATI2N:
961 if (p->rdev->family < CHIP_R420) {
962 DRM_ERROR("Invalid texture format %u\n",
966 /* The same rules apply as for DXT3/5. */
968 case R300_TX_FORMAT_DXT3:
969 case R300_TX_FORMAT_DXT5:
970 track->textures[i].cpp = 1;
971 track->textures[i].compress_format = R100_TRACK_COMP_DXT35;
974 DRM_ERROR("Invalid texture format %u\n",
978 track->tex_dirty = true;
996 /* TX_FILTER0_[0-15] */
997 i = (reg - 0x4400) >> 2;
998 tmp = idx_value & 0x7;
999 if (tmp == 2 || tmp == 4 || tmp == 6) {
1000 track->textures[i].roundup_w = false;
1002 tmp = (idx_value >> 3) & 0x7;
1003 if (tmp == 2 || tmp == 4 || tmp == 6) {
1004 track->textures[i].roundup_h = false;
1006 track->tex_dirty = true;
1024 /* TX_FORMAT2_[0-15] */
1025 i = (reg - 0x4500) >> 2;
1026 tmp = idx_value & 0x3FFF;
1027 track->textures[i].pitch = tmp + 1;
1028 if (p->rdev->family >= CHIP_RV515) {
1029 tmp = ((idx_value >> 15) & 1) << 11;
1030 track->textures[i].width_11 = tmp;
1031 tmp = ((idx_value >> 16) & 1) << 11;
1032 track->textures[i].height_11 = tmp;
1035 if (idx_value & (1 << 14)) {
1036 /* The same rules apply as for DXT1. */
1037 track->textures[i].compress_format =
1038 R100_TRACK_COMP_DXT1;
1040 } else if (idx_value & (1 << 14)) {
1041 DRM_ERROR("Forbidden bit TXFORMAT_MSB\n");
1044 track->tex_dirty = true;
1062 /* TX_FORMAT0_[0-15] */
1063 i = (reg - 0x4480) >> 2;
1064 tmp = idx_value & 0x7FF;
1065 track->textures[i].width = tmp + 1;
1066 tmp = (idx_value >> 11) & 0x7FF;
1067 track->textures[i].height = tmp + 1;
1068 tmp = (idx_value >> 26) & 0xF;
1069 track->textures[i].num_levels = tmp;
1070 tmp = idx_value & (1 << 31);
1071 track->textures[i].use_pitch = !!tmp;
1072 tmp = (idx_value >> 22) & 0xF;
1073 track->textures[i].txdepth = tmp;
1074 track->tex_dirty = true;
1076 case R300_ZB_ZPASS_ADDR:
1077 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1079 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1081 radeon_cs_dump_packet(p, pkt);
1084 ib[idx] = idx_value + ((u32)reloc->gpu_offset);
1087 /* RB3D_COLOR_CHANNEL_MASK */
1088 track->color_channel_mask = idx_value;
1089 track->cb_dirty = true;
1093 /* r300c emits this register - we need to disable hyperz for it
1094 * without complaining */
1095 if (p->rdev->hyperz_filp != p->filp) {
1096 if (idx_value & 0x1)
1097 ib[idx] = idx_value & ~1;
1102 track->zb_cb_clear = !!(idx_value & (1 << 5));
1103 track->cb_dirty = true;
1104 track->zb_dirty = true;
1105 if (p->rdev->hyperz_filp != p->filp) {
1106 if (idx_value & (R300_HIZ_ENABLE |
1107 R300_RD_COMP_ENABLE |
1108 R300_WR_COMP_ENABLE |
1109 R300_FAST_FILL_ENABLE))
1114 /* RB3D_BLENDCNTL */
1115 track->blend_read_enable = !!(idx_value & (1 << 2));
1116 track->cb_dirty = true;
1118 case R300_RB3D_AARESOLVE_OFFSET:
1119 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1121 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1123 radeon_cs_dump_packet(p, pkt);
1126 track->aa.robj = reloc->robj;
1127 track->aa.offset = idx_value;
1128 track->aa_dirty = true;
1129 ib[idx] = idx_value + ((u32)reloc->gpu_offset);
1131 case R300_RB3D_AARESOLVE_PITCH:
1132 track->aa.pitch = idx_value & 0x3FFE;
1133 track->aa_dirty = true;
1135 case R300_RB3D_AARESOLVE_CTL:
1136 track->aaresolve = idx_value & 0x1;
1137 track->aa_dirty = true;
1139 case 0x4f30: /* ZB_MASK_OFFSET */
1140 case 0x4f34: /* ZB_ZMASK_PITCH */
1141 case 0x4f44: /* ZB_HIZ_OFFSET */
1142 case 0x4f54: /* ZB_HIZ_PITCH */
1143 if (idx_value && (p->rdev->hyperz_filp != p->filp))
1147 if (idx_value && (p->rdev->hyperz_filp != p->filp))
1149 /* GB_Z_PEQ_CONFIG */
1150 if (p->rdev->family >= CHIP_RV350)
1155 /* valid register only on RV530 */
1156 if (p->rdev->family == CHIP_RV530)
1158 /* fallthrough do not move */
1164 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d (val=%08x)\n",
1165 reg, idx, idx_value);
1169 static int r300_packet3_check(struct radeon_cs_parser *p,
1170 struct radeon_cs_packet *pkt)
1172 struct radeon_bo_list *reloc;
1173 struct r100_cs_track *track;
1174 volatile uint32_t *ib;
1180 track = (struct r100_cs_track *)p->track;
1181 switch(pkt->opcode) {
1182 case PACKET3_3D_LOAD_VBPNTR:
1183 r = r100_packet3_load_vbpntr(p, pkt, idx);
1187 case PACKET3_INDX_BUFFER:
1188 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1190 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
1191 radeon_cs_dump_packet(p, pkt);
1194 ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->gpu_offset);
1195 r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
1201 case PACKET3_3D_DRAW_IMMD:
1202 /* Number of dwords is vtx_size * (num_vertices - 1)
1203 * PRIM_WALK must be equal to 3 vertex data in embedded
1205 if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) {
1206 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1209 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1210 track->immd_dwords = pkt->count - 1;
1211 r = r100_cs_track_check(p->rdev, track);
1216 case PACKET3_3D_DRAW_IMMD_2:
1217 /* Number of dwords is vtx_size * (num_vertices - 1)
1218 * PRIM_WALK must be equal to 3 vertex data in embedded
1220 if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) {
1221 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1224 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1225 track->immd_dwords = pkt->count;
1226 r = r100_cs_track_check(p->rdev, track);
1231 case PACKET3_3D_DRAW_VBUF:
1232 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1233 r = r100_cs_track_check(p->rdev, track);
1238 case PACKET3_3D_DRAW_VBUF_2:
1239 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1240 r = r100_cs_track_check(p->rdev, track);
1245 case PACKET3_3D_DRAW_INDX:
1246 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1247 r = r100_cs_track_check(p->rdev, track);
1252 case PACKET3_3D_DRAW_INDX_2:
1253 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1254 r = r100_cs_track_check(p->rdev, track);
1259 case PACKET3_3D_CLEAR_HIZ:
1260 case PACKET3_3D_CLEAR_ZMASK:
1261 if (p->rdev->hyperz_filp != p->filp)
1264 case PACKET3_3D_CLEAR_CMASK:
1265 if (p->rdev->cmask_filp != p->filp)
1271 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
1277 int r300_cs_parse(struct radeon_cs_parser *p)
1279 struct radeon_cs_packet pkt;
1280 struct r100_cs_track *track;
1283 track = kzalloc(sizeof(*track), GFP_KERNEL);
1286 r100_cs_track_clear(p->rdev, track);
1289 r = radeon_cs_packet_parse(p, &pkt, p->idx);
1293 p->idx += pkt.count + 2;
1295 case RADEON_PACKET_TYPE0:
1296 r = r100_cs_parse_packet0(p, &pkt,
1297 p->rdev->config.r300.reg_safe_bm,
1298 p->rdev->config.r300.reg_safe_bm_size,
1299 &r300_packet0_check);
1301 case RADEON_PACKET_TYPE2:
1303 case RADEON_PACKET_TYPE3:
1304 r = r300_packet3_check(p, &pkt);
1307 DRM_ERROR("Unknown packet type %d !\n", pkt.type);
1313 } while (p->idx < p->chunk_ib->length_dw);
1317 void r300_set_reg_safe(struct radeon_device *rdev)
1319 rdev->config.r300.reg_safe_bm = r300_reg_safe_bm;
1320 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r300_reg_safe_bm);
1323 void r300_mc_program(struct radeon_device *rdev)
1325 struct r100_mc_save save;
1328 r = r100_debugfs_mc_info_init(rdev);
1330 dev_err(rdev->dev, "Failed to create r100_mc debugfs file.\n");
1333 /* Stops all mc clients */
1334 r100_mc_stop(rdev, &save);
1335 if (rdev->flags & RADEON_IS_AGP) {
1336 WREG32(R_00014C_MC_AGP_LOCATION,
1337 S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) |
1338 S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16));
1339 WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base));
1340 WREG32(R_00015C_AGP_BASE_2,
1341 upper_32_bits(rdev->mc.agp_base) & 0xff);
1343 WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF);
1344 WREG32(R_000170_AGP_BASE, 0);
1345 WREG32(R_00015C_AGP_BASE_2, 0);
1347 /* Wait for mc idle */
1348 if (r300_mc_wait_for_idle(rdev))
1349 DRM_INFO("Failed to wait MC idle before programming MC.\n");
1350 /* Program MC, should be a 32bits limited address space */
1351 WREG32(R_000148_MC_FB_LOCATION,
1352 S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
1353 S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
1354 r100_mc_resume(rdev, &save);
1357 void r300_clock_startup(struct radeon_device *rdev)
1361 if (radeon_dynclks != -1 && radeon_dynclks)
1362 radeon_legacy_set_clock_gating(rdev, 1);
1363 /* We need to force on some of the block */
1364 tmp = RREG32_PLL(R_00000D_SCLK_CNTL);
1365 tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1);
1366 if ((rdev->family == CHIP_RV350) || (rdev->family == CHIP_RV380))
1367 tmp |= S_00000D_FORCE_VAP(1);
1368 WREG32_PLL(R_00000D_SCLK_CNTL, tmp);
1371 static int r300_startup(struct radeon_device *rdev)
1375 /* set common regs */
1376 r100_set_common_regs(rdev);
1378 r300_mc_program(rdev);
1380 r300_clock_startup(rdev);
1381 /* Initialize GPU configuration (# pipes, ...) */
1382 r300_gpu_init(rdev);
1383 /* Initialize GART (initialize after TTM so we can allocate
1384 * memory through TTM but finalize after TTM) */
1385 if (rdev->flags & RADEON_IS_PCIE) {
1386 r = rv370_pcie_gart_enable(rdev);
1391 if (rdev->family == CHIP_R300 ||
1392 rdev->family == CHIP_R350 ||
1393 rdev->family == CHIP_RV350)
1394 r100_enable_bm(rdev);
1396 if (rdev->flags & RADEON_IS_PCI) {
1397 r = r100_pci_gart_enable(rdev);
1402 /* allocate wb buffer */
1403 r = radeon_wb_init(rdev);
1407 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
1409 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
1414 if (!rdev->irq.installed) {
1415 r = radeon_irq_kms_init(rdev);
1421 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
1422 /* 1M ring buffer */
1423 r = r100_cp_init(rdev, 1024 * 1024);
1425 dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
1429 r = radeon_ib_pool_init(rdev);
1431 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
1438 int r300_resume(struct radeon_device *rdev)
1442 /* Make sur GART are not working */
1443 if (rdev->flags & RADEON_IS_PCIE)
1444 rv370_pcie_gart_disable(rdev);
1445 if (rdev->flags & RADEON_IS_PCI)
1446 r100_pci_gart_disable(rdev);
1447 /* Resume clock before doing reset */
1448 r300_clock_startup(rdev);
1449 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
1450 if (radeon_asic_reset(rdev)) {
1451 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
1452 RREG32(R_000E40_RBBM_STATUS),
1453 RREG32(R_0007C0_CP_STAT));
1456 radeon_combios_asic_init(rdev->ddev);
1457 /* Resume clock after posting */
1458 r300_clock_startup(rdev);
1459 /* Initialize surface registers */
1460 radeon_surface_init(rdev);
1462 rdev->accel_working = true;
1463 r = r300_startup(rdev);
1465 rdev->accel_working = false;
1470 int r300_suspend(struct radeon_device *rdev)
1472 radeon_pm_suspend(rdev);
1473 r100_cp_disable(rdev);
1474 radeon_wb_disable(rdev);
1475 r100_irq_disable(rdev);
1476 if (rdev->flags & RADEON_IS_PCIE)
1477 rv370_pcie_gart_disable(rdev);
1478 if (rdev->flags & RADEON_IS_PCI)
1479 r100_pci_gart_disable(rdev);
1483 void r300_fini(struct radeon_device *rdev)
1485 radeon_pm_fini(rdev);
1487 radeon_wb_fini(rdev);
1488 radeon_ib_pool_fini(rdev);
1489 radeon_gem_fini(rdev);
1490 if (rdev->flags & RADEON_IS_PCIE)
1491 rv370_pcie_gart_fini(rdev);
1492 if (rdev->flags & RADEON_IS_PCI)
1493 r100_pci_gart_fini(rdev);
1494 radeon_agp_fini(rdev);
1495 radeon_irq_kms_fini(rdev);
1496 radeon_fence_driver_fini(rdev);
1497 radeon_bo_fini(rdev);
1498 radeon_atombios_fini(rdev);
1503 int r300_init(struct radeon_device *rdev)
1508 r100_vga_render_disable(rdev);
1509 /* Initialize scratch registers */
1510 radeon_scratch_init(rdev);
1511 /* Initialize surface registers */
1512 radeon_surface_init(rdev);
1513 /* TODO: disable VGA need to use VGA request */
1514 /* restore some register to sane defaults */
1515 r100_restore_sanity(rdev);
1517 if (!radeon_get_bios(rdev)) {
1518 if (ASIC_IS_AVIVO(rdev))
1521 if (rdev->is_atom_bios) {
1522 dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n");
1525 r = radeon_combios_init(rdev);
1529 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
1530 if (radeon_asic_reset(rdev)) {
1532 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
1533 RREG32(R_000E40_RBBM_STATUS),
1534 RREG32(R_0007C0_CP_STAT));
1536 /* check if cards are posted or not */
1537 if (radeon_boot_test_post_card(rdev) == false)
1539 /* Set asic errata */
1541 /* Initialize clocks */
1542 radeon_get_clock_info(rdev->ddev);
1543 /* initialize AGP */
1544 if (rdev->flags & RADEON_IS_AGP) {
1545 r = radeon_agp_init(rdev);
1547 radeon_agp_disable(rdev);
1550 /* initialize memory controller */
1553 r = radeon_fence_driver_init(rdev);
1556 /* Memory manager */
1557 r = radeon_bo_init(rdev);
1560 if (rdev->flags & RADEON_IS_PCIE) {
1561 r = rv370_pcie_gart_init(rdev);
1565 if (rdev->flags & RADEON_IS_PCI) {
1566 r = r100_pci_gart_init(rdev);
1570 r300_set_reg_safe(rdev);
1572 /* Initialize power management */
1573 radeon_pm_init(rdev);
1575 rdev->accel_working = true;
1576 r = r300_startup(rdev);
1578 /* Something went wrong with the accel init, so stop accel */
1579 dev_err(rdev->dev, "Disabling GPU acceleration\n");
1581 radeon_wb_fini(rdev);
1582 radeon_ib_pool_fini(rdev);
1583 radeon_irq_kms_fini(rdev);
1584 if (rdev->flags & RADEON_IS_PCIE)
1585 rv370_pcie_gart_fini(rdev);
1586 if (rdev->flags & RADEON_IS_PCI)
1587 r100_pci_gart_fini(rdev);
1588 radeon_agp_fini(rdev);
1589 rdev->accel_working = false;