2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 * $FreeBSD: head/sys/dev/drm2/radeon/radeon_ring.c 254885 2013-08-25 19:37:15Z dumbbell $
32 #include <uapi_drm/radeon_drm.h>
33 #include "radeon_reg.h"
40 * IBs (Indirect Buffers) and areas of GPU accessible memory where
41 * commands are stored. You can put a pointer to the IB in the
42 * command ring and the hw will fetch the commands from the IB
43 * and execute them. Generally userspace acceleration drivers
44 * produce command buffers which are send to the kernel and
45 * put in IBs for execution by the requested ring.
47 static int radeon_debugfs_sa_init(struct radeon_device *rdev);
48 #endif /* DUMBBELL_WIP */
51 * radeon_ib_get - request an IB (Indirect Buffer)
53 * @rdev: radeon_device pointer
54 * @ring: ring index the IB is associated with
55 * @ib: IB object returned
56 * @size: requested IB size
58 * Request an IB (all asics). IBs are allocated using the
60 * Returns 0 on success, error on failure.
62 int radeon_ib_get(struct radeon_device *rdev, int ring,
63 struct radeon_ib *ib, struct radeon_vm *vm,
68 r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256, true);
70 dev_err(rdev->dev, "failed to get a new IB (%d)\n", r);
74 r = radeon_semaphore_create(rdev, &ib->semaphore);
81 ib->ptr = radeon_sa_bo_cpu_addr(ib->sa_bo);
84 /* ib pool is bound at RADEON_VA_IB_OFFSET in virtual address
85 * space and soffset is the offset inside the pool bo
87 ib->gpu_addr = ib->sa_bo->soffset + RADEON_VA_IB_OFFSET;
89 ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo);
91 ib->is_const_ib = false;
92 for (i = 0; i < RADEON_NUM_RINGS; ++i)
93 ib->sync_to[i] = NULL;
99 * radeon_ib_free - free an IB (Indirect Buffer)
101 * @rdev: radeon_device pointer
102 * @ib: IB object to free
104 * Free an IB (all asics).
106 void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
108 radeon_semaphore_free(rdev, &ib->semaphore, ib->fence);
109 radeon_sa_bo_free(rdev, &ib->sa_bo, ib->fence);
110 radeon_fence_unref(&ib->fence);
114 * radeon_ib_sync_to - sync to fence before executing the IB
116 * @ib: IB object to add fence to
117 * @fence: fence to sync to
119 * Sync to the fence before executing the IB
121 void radeon_ib_sync_to(struct radeon_ib *ib, struct radeon_fence *fence)
123 struct radeon_fence *other;
128 other = ib->sync_to[fence->ring];
129 ib->sync_to[fence->ring] = radeon_fence_later(fence, other);
133 * radeon_ib_schedule - schedule an IB (Indirect Buffer) on the ring
135 * @rdev: radeon_device pointer
136 * @ib: IB object to schedule
137 * @const_ib: Const IB to schedule (SI only)
139 * Schedule an IB on the associated ring (all asics).
140 * Returns 0 on success, error on failure.
142 * On SI, there are two parallel engines fed from the primary ring,
143 * the CE (Constant Engine) and the DE (Drawing Engine). Since
144 * resource descriptors have moved to memory, the CE allows you to
145 * prime the caches while the DE is updating register state so that
146 * the resource descriptors will be already in cache when the draw is
147 * processed. To accomplish this, the userspace driver submits two
148 * IBs, one for the CE and one for the DE. If there is a CE IB (called
149 * a CONST_IB), it will be put on the ring prior to the DE IB. Prior
150 * to SI there was just a DE IB.
152 int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
153 struct radeon_ib *const_ib)
155 struct radeon_ring *ring = &rdev->ring[ib->ring];
156 bool need_sync = false;
159 if (!ib->length_dw || !ring->ready) {
160 /* TODO: Nothings in the ib we should report. */
161 dev_err(rdev->dev, "couldn't schedule ib\n");
165 /* 64 dwords should be enough for fence too */
166 r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_RINGS * 8);
168 dev_err(rdev->dev, "scheduling IB failed (%d).\n", r);
171 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
172 struct radeon_fence *fence = ib->sync_to[i];
173 if (radeon_fence_need_sync(fence, ib->ring)) {
175 radeon_semaphore_sync_rings(rdev, ib->semaphore,
176 fence->ring, ib->ring);
177 radeon_fence_note_sync(fence, ib->ring);
180 /* immediately free semaphore when we don't need to sync */
182 radeon_semaphore_free(rdev, &ib->semaphore, NULL);
184 /* if we can't remember our last VM flush then flush now! */
185 /* XXX figure out why we have to flush for every IB */
186 if (ib->vm /*&& !ib->vm->last_flush*/) {
187 radeon_ring_vm_flush(rdev, ib->ring, ib->vm);
190 radeon_ring_ib_execute(rdev, const_ib->ring, const_ib);
191 radeon_semaphore_free(rdev, &const_ib->semaphore, NULL);
193 radeon_ring_ib_execute(rdev, ib->ring, ib);
194 r = radeon_fence_emit(rdev, &ib->fence, ib->ring);
196 dev_err(rdev->dev, "failed to emit fence for new IB (%d)\n", r);
197 radeon_ring_unlock_undo(rdev, ring);
201 const_ib->fence = radeon_fence_ref(ib->fence);
203 /* we just flushed the VM, remember that */
204 if (ib->vm && !ib->vm->last_flush) {
205 ib->vm->last_flush = radeon_fence_ref(ib->fence);
207 radeon_ring_unlock_commit(rdev, ring);
212 * radeon_ib_pool_init - Init the IB (Indirect Buffer) pool
214 * @rdev: radeon_device pointer
216 * Initialize the suballocator to manage a pool of memory
217 * for use as IBs (all asics).
218 * Returns 0 on success, error on failure.
220 int radeon_ib_pool_init(struct radeon_device *rdev)
224 if (rdev->ib_pool_ready) {
227 r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo,
228 RADEON_IB_POOL_SIZE*64*1024,
229 RADEON_GPU_PAGE_SIZE,
230 RADEON_GEM_DOMAIN_GTT);
235 r = radeon_sa_bo_manager_start(rdev, &rdev->ring_tmp_bo);
240 rdev->ib_pool_ready = true;
242 if (radeon_debugfs_sa_init(rdev)) {
243 dev_err(rdev->dev, "failed to register debugfs file for SA\n");
245 #endif /* DUMBBELL_WIP */
250 * radeon_ib_pool_fini - Free the IB (Indirect Buffer) pool
252 * @rdev: radeon_device pointer
254 * Tear down the suballocator managing the pool of memory
255 * for use as IBs (all asics).
257 void radeon_ib_pool_fini(struct radeon_device *rdev)
259 if (rdev->ib_pool_ready) {
260 radeon_sa_bo_manager_suspend(rdev, &rdev->ring_tmp_bo);
261 radeon_sa_bo_manager_fini(rdev, &rdev->ring_tmp_bo);
262 rdev->ib_pool_ready = false;
267 * radeon_ib_ring_tests - test IBs on the rings
269 * @rdev: radeon_device pointer
271 * Test an IB (Indirect Buffer) on each ring.
272 * If the test fails, disable the ring.
273 * Returns 0 on success, error if the primary GFX ring
276 int radeon_ib_ring_tests(struct radeon_device *rdev)
281 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
282 struct radeon_ring *ring = &rdev->ring[i];
287 r = radeon_ib_test(rdev, i, ring);
291 if (i == RADEON_RING_TYPE_GFX_INDEX) {
292 /* oh, oh, that's really bad */
293 DRM_ERROR("radeon: failed testing IB on GFX ring (%d).\n", r);
294 rdev->accel_working = false;
298 /* still not good, but we can live with it */
299 DRM_ERROR("radeon: failed testing IB on ring %d (%d).\n", i, r);
309 * Most engines on the GPU are fed via ring buffers. Ring
310 * buffers are areas of GPU accessible memory that the host
311 * writes commands into and the GPU reads commands out of.
312 * There is a rptr (read pointer) that determines where the
313 * GPU is currently reading, and a wptr (write pointer)
314 * which determines where the host has written. When the
315 * pointers are equal, the ring is idle. When the host
316 * writes commands to the ring buffer, it increments the
317 * wptr. The GPU then starts fetching commands and executes
318 * them until the pointers are equal again.
320 static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring);
321 #endif /* DUMBBELL_WIP */
323 #if defined(DRM_DEBUG_CODE) && DRM_DEBUG_CODE != 0
325 * radeon_ring_write - write a value to the ring
327 * @ring: radeon_ring structure holding ring information
328 * @v: dword (dw) value to write
330 * Write a value to the requested ring buffer (all asics).
332 void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
335 if (ring->count_dw <= 0) {
336 DRM_ERROR("radeon: writing more dwords to the ring than expected!\n");
339 ring->ring[ring->wptr++] = v;
340 ring->wptr &= ring->ptr_mask;
342 ring->ring_free_dw--;
347 * radeon_ring_supports_scratch_reg - check if the ring supports
348 * writing to scratch registers
350 * @rdev: radeon_device pointer
351 * @ring: radeon_ring structure holding ring information
353 * Check if a specific ring supports writing to scratch registers (all asics).
354 * Returns true if the ring supports writing to scratch regs, false if not.
356 bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev,
357 struct radeon_ring *ring)
360 case RADEON_RING_TYPE_GFX_INDEX:
361 case CAYMAN_RING_TYPE_CP1_INDEX:
362 case CAYMAN_RING_TYPE_CP2_INDEX:
369 u32 radeon_ring_generic_get_rptr(struct radeon_device *rdev,
370 struct radeon_ring *ring)
374 if (rdev->wb.enabled && ring != &rdev->ring[R600_RING_TYPE_UVD_INDEX])
375 rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
377 rptr = RREG32(ring->rptr_reg);
378 rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
383 u32 radeon_ring_generic_get_wptr(struct radeon_device *rdev,
384 struct radeon_ring *ring)
388 wptr = RREG32(ring->wptr_reg);
389 wptr = (wptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
394 void radeon_ring_generic_set_wptr(struct radeon_device *rdev,
395 struct radeon_ring *ring)
397 WREG32(ring->wptr_reg, (ring->wptr << ring->ptr_reg_shift) & ring->ptr_reg_mask);
398 (void)RREG32(ring->wptr_reg);
402 * radeon_ring_free_size - update the free size
404 * @rdev: radeon_device pointer
405 * @ring: radeon_ring structure holding ring information
407 * Update the free dw slots in the ring buffer (all asics).
409 void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring)
411 ring->rptr = radeon_ring_get_rptr(rdev, ring);
412 /* This works because ring_size is a power of 2 */
413 ring->ring_free_dw = (ring->rptr + (ring->ring_size / 4));
414 ring->ring_free_dw -= ring->wptr;
415 ring->ring_free_dw &= ring->ptr_mask;
416 if (!ring->ring_free_dw) {
417 ring->ring_free_dw = ring->ring_size / 4;
422 * radeon_ring_alloc - allocate space on the ring buffer
424 * @rdev: radeon_device pointer
425 * @ring: radeon_ring structure holding ring information
426 * @ndw: number of dwords to allocate in the ring buffer
428 * Allocate @ndw dwords in the ring buffer (all asics).
429 * Returns 0 on success, error on failure.
431 int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
435 /* make sure we aren't trying to allocate more space than there is on the ring */
436 if (ndw > (ring->ring_size / 4))
438 /* Align requested size with padding so unlock_commit can
440 radeon_ring_free_size(rdev, ring);
441 if (ring->ring_free_dw == (ring->ring_size / 4)) {
442 /* This is an empty ring update lockup info to avoid
445 radeon_ring_lockup_update(ring);
447 ndw = (ndw + ring->align_mask) & ~ring->align_mask;
448 while (ndw > (ring->ring_free_dw - 1)) {
449 radeon_ring_free_size(rdev, ring);
450 if (ndw < ring->ring_free_dw) {
453 r = radeon_fence_wait_next_locked(rdev, ring->idx);
457 ring->count_dw = ndw;
458 ring->wptr_old = ring->wptr;
463 * radeon_ring_lock - lock the ring and allocate space on it
465 * @rdev: radeon_device pointer
466 * @ring: radeon_ring structure holding ring information
467 * @ndw: number of dwords to allocate in the ring buffer
469 * Lock the ring and allocate @ndw dwords in the ring buffer
471 * Returns 0 on success, error on failure.
473 int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
477 lockmgr(&rdev->ring_lock, LK_EXCLUSIVE);
478 r = radeon_ring_alloc(rdev, ring, ndw);
480 lockmgr(&rdev->ring_lock, LK_RELEASE);
487 * radeon_ring_commit - tell the GPU to execute the new
488 * commands on the ring buffer
490 * @rdev: radeon_device pointer
491 * @ring: radeon_ring structure holding ring information
493 * Update the wptr (write pointer) to tell the GPU to
494 * execute new commands on the ring buffer (all asics).
496 void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
498 /* We pad to match fetch size */
499 while (ring->wptr & ring->align_mask) {
500 radeon_ring_write(ring, ring->nop);
503 radeon_ring_set_wptr(rdev, ring);
507 * radeon_ring_unlock_commit - tell the GPU to execute the new
508 * commands on the ring buffer and unlock it
510 * @rdev: radeon_device pointer
511 * @ring: radeon_ring structure holding ring information
513 * Call radeon_ring_commit() then unlock the ring (all asics).
515 void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring)
517 radeon_ring_commit(rdev, ring);
518 lockmgr(&rdev->ring_lock, LK_RELEASE);
522 * radeon_ring_undo - reset the wptr
524 * @ring: radeon_ring structure holding ring information
526 * Reset the driver's copy of the wptr (all asics).
528 void radeon_ring_undo(struct radeon_ring *ring)
530 ring->wptr = ring->wptr_old;
534 * radeon_ring_unlock_undo - reset the wptr and unlock the ring
536 * @ring: radeon_ring structure holding ring information
538 * Call radeon_ring_undo() then unlock the ring (all asics).
540 void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *ring)
542 radeon_ring_undo(ring);
543 lockmgr(&rdev->ring_lock, LK_RELEASE);
547 * radeon_ring_force_activity - add some nop packets to the ring
549 * @rdev: radeon_device pointer
550 * @ring: radeon_ring structure holding ring information
552 * Add some nop packets to the ring to force activity (all asics).
553 * Used for lockup detection to see if the rptr is advancing.
555 void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring)
559 radeon_ring_free_size(rdev, ring);
560 if (ring->rptr == ring->wptr) {
561 r = radeon_ring_alloc(rdev, ring, 1);
563 radeon_ring_write(ring, ring->nop);
564 radeon_ring_commit(rdev, ring);
570 * radeon_ring_lockup_update - update lockup variables
572 * @ring: radeon_ring structure holding ring information
574 * Update the last rptr value and timestamp (all asics).
576 void radeon_ring_lockup_update(struct radeon_ring *ring)
578 ring->last_rptr = ring->rptr;
579 ring->last_activity = jiffies;
583 * radeon_ring_test_lockup() - check if ring is lockedup by recording information
584 * @rdev: radeon device structure
585 * @ring: radeon_ring structure holding ring information
587 * We don't need to initialize the lockup tracking information as we will either
588 * have CP rptr to a different value of jiffies wrap around which will force
589 * initialization of the lockup tracking informations.
591 * A possible false positivie is if we get call after while and last_cp_rptr ==
592 * the current CP rptr, even if it's unlikely it might happen. To avoid this
593 * if the elapsed time since last call is bigger than 2 second than we return
594 * false and update the tracking information. Due to this the caller must call
595 * radeon_ring_test_lockup several time in less than 2sec for lockup to be reported
596 * the fencing code should be cautious about that.
598 * Caller should write to the ring to force CP to do something so we don't get
599 * false positive when CP is just gived nothing to do.
602 bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
604 unsigned long cjiffies, elapsed;
607 if (!time_after(cjiffies, ring->last_activity)) {
608 /* likely a wrap around */
609 radeon_ring_lockup_update(ring);
612 ring->rptr = radeon_ring_get_rptr(rdev, ring);
613 if (ring->rptr != ring->last_rptr) {
614 /* CP is still working no lockup */
615 radeon_ring_lockup_update(ring);
618 elapsed = jiffies_to_msecs(cjiffies - ring->last_activity);
619 if (radeon_lockup_timeout && elapsed >= radeon_lockup_timeout) {
620 dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed);
623 /* give a chance to the GPU ... */
628 * radeon_ring_backup - Back up the content of a ring
630 * @rdev: radeon_device pointer
631 * @ring: the ring we want to back up
633 * Saves all unprocessed commits from a ring, returns the number of dwords saved.
635 unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring,
638 unsigned size, ptr, i;
640 /* just in case lock the ring */
641 lockmgr(&rdev->ring_lock, LK_EXCLUSIVE);
644 if (ring->ring_obj == NULL) {
645 lockmgr(&rdev->ring_lock, LK_RELEASE);
649 /* it doesn't make sense to save anything if all fences are signaled */
650 if (!radeon_fence_count_emitted(rdev, ring->idx)) {
651 lockmgr(&rdev->ring_lock, LK_RELEASE);
655 /* calculate the number of dw on the ring */
656 if (ring->rptr_save_reg)
657 ptr = RREG32(ring->rptr_save_reg);
658 else if (rdev->wb.enabled)
659 ptr = le32_to_cpu(*ring->next_rptr_cpu_addr);
661 /* no way to read back the next rptr */
662 lockmgr(&rdev->ring_lock, LK_RELEASE);
666 size = ring->wptr + (ring->ring_size / 4);
668 size &= ring->ptr_mask;
670 lockmgr(&rdev->ring_lock, LK_RELEASE);
674 /* and then save the content of the ring */
675 *data = kmalloc(size * sizeof(uint32_t), M_DRM, M_WAITOK);
677 lockmgr(&rdev->ring_lock, LK_RELEASE);
680 for (i = 0; i < size; ++i) {
681 (*data)[i] = ring->ring[ptr++];
682 ptr &= ring->ptr_mask;
685 lockmgr(&rdev->ring_lock, LK_RELEASE);
690 * radeon_ring_restore - append saved commands to the ring again
692 * @rdev: radeon_device pointer
693 * @ring: ring to append commands to
694 * @size: number of dwords we want to write
695 * @data: saved commands
697 * Allocates space on the ring and restore the previously saved commands.
699 int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
700 unsigned size, uint32_t *data)
707 /* restore the saved ring content */
708 r = radeon_ring_lock(rdev, ring, size);
712 for (i = 0; i < size; ++i) {
713 radeon_ring_write(ring, data[i]);
716 radeon_ring_unlock_commit(rdev, ring);
717 drm_free(data, M_DRM);
722 * radeon_ring_init - init driver ring struct.
724 * @rdev: radeon_device pointer
725 * @ring: radeon_ring structure holding ring information
726 * @ring_size: size of the ring
727 * @rptr_offs: offset of the rptr writeback location in the WB buffer
728 * @rptr_reg: MMIO offset of the rptr register
729 * @wptr_reg: MMIO offset of the wptr register
730 * @ptr_reg_shift: bit offset of the rptr/wptr values
731 * @ptr_reg_mask: bit mask of the rptr/wptr values
732 * @nop: nop packet for this ring
734 * Initialize the driver information for the selected ring (all asics).
735 * Returns 0 on success, error on failure.
737 int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size,
738 unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
739 u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop)
744 ring->ring_size = ring_size;
745 ring->rptr_offs = rptr_offs;
746 ring->rptr_reg = rptr_reg;
747 ring->wptr_reg = wptr_reg;
748 ring->ptr_reg_shift = ptr_reg_shift;
749 ring->ptr_reg_mask = ptr_reg_mask;
751 /* Allocate ring buffer */
752 if (ring->ring_obj == NULL) {
753 r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true,
754 RADEON_GEM_DOMAIN_GTT,
755 NULL, &ring->ring_obj);
757 dev_err(rdev->dev, "(%d) ring create failed\n", r);
760 r = radeon_bo_reserve(ring->ring_obj, false);
761 if (unlikely(r != 0)) {
762 radeon_bo_unref(&ring->ring_obj);
765 r = radeon_bo_pin(ring->ring_obj, RADEON_GEM_DOMAIN_GTT,
768 radeon_bo_unreserve(ring->ring_obj);
769 radeon_bo_unref(&ring->ring_obj);
770 dev_err(rdev->dev, "(%d) ring pin failed\n", r);
773 ring_ptr = &ring->ring;
774 r = radeon_bo_kmap(ring->ring_obj,
776 radeon_bo_unreserve(ring->ring_obj);
778 dev_err(rdev->dev, "(%d) ring map failed\n", r);
779 radeon_bo_unref(&ring->ring_obj);
783 ring->ptr_mask = (ring->ring_size / 4) - 1;
784 ring->ring_free_dw = ring->ring_size / 4;
785 if (rdev->wb.enabled) {
786 u32 index = RADEON_WB_RING0_NEXT_RPTR + (ring->idx * 4);
787 ring->next_rptr_gpu_addr = rdev->wb.gpu_addr + index;
788 ring->next_rptr_cpu_addr = &rdev->wb.wb[index/4];
791 if (radeon_debugfs_ring_init(rdev, ring)) {
792 DRM_ERROR("Failed to register debugfs file for rings !\n");
794 #endif /* DUMBBELL_WIP */
795 radeon_ring_lockup_update(ring);
800 * radeon_ring_fini - tear down the driver ring struct.
802 * @rdev: radeon_device pointer
803 * @ring: radeon_ring structure holding ring information
805 * Tear down the driver information for the selected ring (all asics).
807 void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *ring)
810 struct radeon_bo *ring_obj;
812 lockmgr(&rdev->ring_lock, LK_EXCLUSIVE);
813 ring_obj = ring->ring_obj;
816 ring->ring_obj = NULL;
817 lockmgr(&rdev->ring_lock, LK_RELEASE);
820 r = radeon_bo_reserve(ring_obj, false);
821 if (likely(r == 0)) {
822 radeon_bo_kunmap(ring_obj);
823 radeon_bo_unpin(ring_obj);
824 radeon_bo_unreserve(ring_obj);
826 radeon_bo_unref(&ring_obj);
833 #if defined(CONFIG_DEBUG_FS)
835 static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
837 struct drm_info_node *node = (struct drm_info_node *) m->private;
838 struct drm_device *dev = node->minor->dev;
839 struct radeon_device *rdev = dev->dev_private;
840 int ridx = *(int*)node->info_ent->data;
841 struct radeon_ring *ring = &rdev->ring[ridx];
842 unsigned count, i, j;
845 radeon_ring_free_size(rdev, ring);
846 count = (ring->ring_size / 4) - ring->ring_free_dw;
847 tmp = radeon_ring_get_wptr(rdev, ring);
848 seq_printf(m, "wptr(0x%04x): 0x%08x [%5d]\n", ring->wptr_reg, tmp, tmp);
849 tmp = radeon_ring_get_rptr(rdev, ring);
850 seq_printf(m, "rptr(0x%04x): 0x%08x [%5d]\n", ring->rptr_reg, tmp, tmp);
851 if (ring->rptr_save_reg) {
852 seq_printf(m, "rptr next(0x%04x): 0x%08x\n", ring->rptr_save_reg,
853 RREG32(ring->rptr_save_reg));
855 seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n", ring->wptr, ring->wptr);
856 seq_printf(m, "driver's copy of the rptr: 0x%08x [%5d]\n", ring->rptr, ring->rptr);
857 seq_printf(m, "last semaphore signal addr : 0x%016llx\n", ring->last_semaphore_signal_addr);
858 seq_printf(m, "last semaphore wait addr : 0x%016llx\n", ring->last_semaphore_wait_addr);
859 seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
860 seq_printf(m, "%u dwords in ring\n", count);
861 /* print 8 dw before current rptr as often it's the last executed
862 * packet that is the root issue
864 i = (ring->rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask;
865 for (j = 0; j <= (count + 32); j++) {
866 seq_printf(m, "r[%5d]=0x%08x\n", i, ring->ring[i]);
867 i = (i + 1) & ring->ptr_mask;
872 static int radeon_gfx_index = RADEON_RING_TYPE_GFX_INDEX;
873 static int cayman_cp1_index = CAYMAN_RING_TYPE_CP1_INDEX;
874 static int cayman_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX;
875 static int radeon_dma1_index = R600_RING_TYPE_DMA_INDEX;
876 static int radeon_dma2_index = CAYMAN_RING_TYPE_DMA1_INDEX;
877 static int r600_uvd_index = R600_RING_TYPE_UVD_INDEX;
879 static struct drm_info_list radeon_debugfs_ring_info_list[] = {
880 {"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_gfx_index},
881 {"radeon_ring_cp1", radeon_debugfs_ring_info, 0, &cayman_cp1_index},
882 {"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_cp2_index},
883 {"radeon_ring_dma1", radeon_debugfs_ring_info, 0, &radeon_dma1_index},
884 {"radeon_ring_dma2", radeon_debugfs_ring_info, 0, &radeon_dma2_index},
885 {"radeon_ring_uvd", radeon_debugfs_ring_info, 0, &r600_uvd_index},
888 static int radeon_debugfs_sa_info(struct seq_file *m, void *data)
890 struct drm_info_node *node = (struct drm_info_node *) m->private;
891 struct drm_device *dev = node->minor->dev;
892 struct radeon_device *rdev = dev->dev_private;
894 radeon_sa_bo_dump_debug_info(&rdev->ring_tmp_bo, m);
900 static struct drm_info_list radeon_debugfs_sa_list[] = {
901 {"radeon_sa_info", &radeon_debugfs_sa_info, 0, NULL},
907 static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring)
909 #if defined(CONFIG_DEBUG_FS)
911 for (i = 0; i < ARRAY_SIZE(radeon_debugfs_ring_info_list); ++i) {
912 struct drm_info_list *info = &radeon_debugfs_ring_info_list[i];
913 int ridx = *(int*)radeon_debugfs_ring_info_list[i].data;
916 if (&rdev->ring[ridx] != ring)
919 r = radeon_debugfs_add_files(rdev, info, 1);
927 static int radeon_debugfs_sa_init(struct radeon_device *rdev)
929 #if defined(CONFIG_DEBUG_FS)
930 return radeon_debugfs_add_files(rdev, radeon_debugfs_sa_list, 1);
935 #endif /* DUMBBELL_WIP */