2 * Copyright 2009 VMware, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Michel Dänzer
25 #include <uapi_drm/radeon_drm.h>
26 #include "radeon_reg.h"
29 #define RADEON_TEST_COPY_BLIT 1
30 #define RADEON_TEST_COPY_DMA 0
33 /* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */
34 static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
36 struct radeon_bo *vram_obj = NULL;
37 struct radeon_bo **gtt_obj = NULL;
38 struct radeon_fence *fence = NULL;
39 uint64_t gtt_addr, vram_addr;
44 case RADEON_TEST_COPY_DMA:
45 ring = radeon_copy_dma_ring_index(rdev);
47 case RADEON_TEST_COPY_BLIT:
48 ring = radeon_copy_blit_ring_index(rdev);
51 DRM_ERROR("Unknown copy method\n");
58 * (Total GTT - IB pool - writeback page - ring buffers) / test size
60 n = rdev->mc.gtt_size - rdev->gart_pin_size;
63 gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL);
65 DRM_ERROR("Failed to allocate %d pointers\n", n);
70 r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
73 DRM_ERROR("Failed to create VRAM object\n");
76 r = radeon_bo_reserve(vram_obj, false);
79 r = radeon_bo_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr);
81 DRM_ERROR("Failed to pin VRAM object\n");
84 for (i = 0; i < n; i++) {
85 void *gtt_map, *vram_map;
86 void **gtt_start, **gtt_end;
87 void **vram_start, **vram_end;
89 r = radeon_bo_create(rdev, size, PAGE_SIZE, true,
90 RADEON_GEM_DOMAIN_GTT, 0, NULL, gtt_obj + i);
92 DRM_ERROR("Failed to create GTT object %d\n", i);
96 r = radeon_bo_reserve(gtt_obj[i], false);
99 r = radeon_bo_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, >t_addr);
101 DRM_ERROR("Failed to pin GTT object %d\n", i);
105 r = radeon_bo_kmap(gtt_obj[i], >t_map);
107 DRM_ERROR("Failed to map GTT object %d\n", i);
111 for (gtt_start = gtt_map, gtt_end = (void *)((uintptr_t)gtt_map + size);
114 *gtt_start = gtt_start;
116 radeon_bo_kunmap(gtt_obj[i]);
118 if (ring == R600_RING_TYPE_DMA_INDEX)
119 r = radeon_copy_dma(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
121 r = radeon_copy_blit(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
123 DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
127 r = radeon_fence_wait(fence, false);
129 DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i);
133 radeon_fence_unref(&fence);
135 r = radeon_bo_kmap(vram_obj, &vram_map);
137 DRM_ERROR("Failed to map VRAM object after copy %d\n", i);
141 for (gtt_start = gtt_map, gtt_end = (void *)((uintptr_t)gtt_map + size),
142 vram_start = vram_map, vram_end = (void *)((uintptr_t)vram_map + size);
143 vram_start < vram_end;
144 gtt_start++, vram_start++) {
145 if (*vram_start != gtt_start) {
146 DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, "
147 "expected 0x%p (GTT/VRAM offset "
148 "0x%16llx/0x%16llx)\n",
149 i, *vram_start, gtt_start,
151 ((uintptr_t)gtt_addr - (uintptr_t)rdev->mc.gtt_start +
152 (uintptr_t)gtt_start - (uintptr_t)gtt_map),
154 ((uintptr_t)vram_addr - (uintptr_t)rdev->mc.vram_start +
155 (uintptr_t)gtt_start - (uintptr_t)gtt_map));
156 radeon_bo_kunmap(vram_obj);
159 *vram_start = vram_start;
162 radeon_bo_kunmap(vram_obj);
164 if (ring == R600_RING_TYPE_DMA_INDEX)
165 r = radeon_copy_dma(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
167 r = radeon_copy_blit(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
169 DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
173 r = radeon_fence_wait(fence, false);
175 DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i);
179 radeon_fence_unref(&fence);
181 r = radeon_bo_kmap(gtt_obj[i], >t_map);
183 DRM_ERROR("Failed to map GTT object after copy %d\n", i);
187 for (gtt_start = gtt_map, gtt_end = (void *)((uintptr_t)gtt_map + size),
188 vram_start = vram_map, vram_end = (void *)((uintptr_t)vram_map + size);
190 gtt_start++, vram_start++) {
191 if (*gtt_start != vram_start) {
192 DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, "
193 "expected 0x%p (VRAM/GTT offset "
194 "0x%16llx/0x%16llx)\n",
195 i, *gtt_start, vram_start,
197 ((uintptr_t)vram_addr - (uintptr_t)rdev->mc.vram_start +
198 (uintptr_t)vram_start - (uintptr_t)vram_map),
200 ((uintptr_t)gtt_addr - (uintptr_t)rdev->mc.gtt_start +
201 (uintptr_t)vram_start - (uintptr_t)vram_map));
202 radeon_bo_kunmap(gtt_obj[i]);
207 radeon_bo_kunmap(gtt_obj[i]);
209 DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%jx\n",
210 (uintmax_t)gtt_addr - rdev->mc.gtt_start);
215 if (radeon_bo_is_reserved(vram_obj)) {
216 radeon_bo_unpin(vram_obj);
217 radeon_bo_unreserve(vram_obj);
219 radeon_bo_unref(&vram_obj);
222 for (i = 0; i < n; i++) {
224 if (radeon_bo_is_reserved(gtt_obj[i])) {
225 radeon_bo_unpin(gtt_obj[i]);
226 radeon_bo_unreserve(gtt_obj[i]);
228 radeon_bo_unref(>t_obj[i]);
234 radeon_fence_unref(&fence);
237 printk(KERN_WARNING "Error while testing BO move.\n");
241 void radeon_test_moves(struct radeon_device *rdev)
243 if (rdev->asic->copy.dma)
244 radeon_do_test_moves(rdev, RADEON_TEST_COPY_DMA);
245 if (rdev->asic->copy.blit)
246 radeon_do_test_moves(rdev, RADEON_TEST_COPY_BLIT);
249 static int radeon_test_create_and_emit_fence(struct radeon_device *rdev,
250 struct radeon_ring *ring,
251 struct radeon_fence **fence)
253 uint32_t handle = ring->idx ^ 0xdeafbeef;
256 if (ring->idx == R600_RING_TYPE_UVD_INDEX) {
257 r = radeon_uvd_get_create_msg(rdev, ring->idx, handle, NULL);
259 DRM_ERROR("Failed to get dummy create msg\n");
263 r = radeon_uvd_get_destroy_msg(rdev, ring->idx, handle, fence);
265 DRM_ERROR("Failed to get dummy destroy msg\n");
269 } else if (ring->idx == TN_RING_TYPE_VCE1_INDEX ||
270 ring->idx == TN_RING_TYPE_VCE2_INDEX) {
271 r = radeon_vce_get_create_msg(rdev, ring->idx, handle, NULL);
273 DRM_ERROR("Failed to get dummy create msg\n");
277 r = radeon_vce_get_destroy_msg(rdev, ring->idx, handle, fence);
279 DRM_ERROR("Failed to get dummy destroy msg\n");
284 r = radeon_ring_lock(rdev, ring, 64);
286 DRM_ERROR("Failed to lock ring A %d\n", ring->idx);
289 radeon_fence_emit(rdev, fence, ring->idx);
290 radeon_ring_unlock_commit(rdev, ring, false);
295 void radeon_test_ring_sync(struct radeon_device *rdev,
296 struct radeon_ring *ringA,
297 struct radeon_ring *ringB)
299 struct radeon_fence *fence1 = NULL, *fence2 = NULL;
300 struct radeon_semaphore *semaphore = NULL;
303 r = radeon_semaphore_create(rdev, &semaphore);
305 DRM_ERROR("Failed to create semaphore\n");
309 r = radeon_ring_lock(rdev, ringA, 64);
311 DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
314 radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
315 radeon_ring_unlock_commit(rdev, ringA, false);
317 r = radeon_test_create_and_emit_fence(rdev, ringA, &fence1);
321 r = radeon_ring_lock(rdev, ringA, 64);
323 DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
326 radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
327 radeon_ring_unlock_commit(rdev, ringA, false);
329 r = radeon_test_create_and_emit_fence(rdev, ringA, &fence2);
335 if (radeon_fence_signaled(fence1)) {
336 DRM_ERROR("Fence 1 signaled without waiting for semaphore.\n");
340 r = radeon_ring_lock(rdev, ringB, 64);
342 DRM_ERROR("Failed to lock ring B %p\n", ringB);
345 radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore);
346 radeon_ring_unlock_commit(rdev, ringB, false);
348 r = radeon_fence_wait(fence1, false);
350 DRM_ERROR("Failed to wait for sync fence 1\n");
356 if (radeon_fence_signaled(fence2)) {
357 DRM_ERROR("Fence 2 signaled without waiting for semaphore.\n");
361 r = radeon_ring_lock(rdev, ringB, 64);
363 DRM_ERROR("Failed to lock ring B %p\n", ringB);
366 radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore);
367 radeon_ring_unlock_commit(rdev, ringB, false);
369 r = radeon_fence_wait(fence2, false);
371 DRM_ERROR("Failed to wait for sync fence 1\n");
376 radeon_semaphore_free(rdev, &semaphore, NULL);
379 radeon_fence_unref(&fence1);
382 radeon_fence_unref(&fence2);
385 printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
388 static void radeon_test_ring_sync2(struct radeon_device *rdev,
389 struct radeon_ring *ringA,
390 struct radeon_ring *ringB,
391 struct radeon_ring *ringC)
393 struct radeon_fence *fenceA = NULL, *fenceB = NULL;
394 struct radeon_semaphore *semaphore = NULL;
398 r = radeon_semaphore_create(rdev, &semaphore);
400 DRM_ERROR("Failed to create semaphore\n");
404 r = radeon_ring_lock(rdev, ringA, 64);
406 DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
409 radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
410 radeon_ring_unlock_commit(rdev, ringA, false);
412 r = radeon_test_create_and_emit_fence(rdev, ringA, &fenceA);
416 r = radeon_ring_lock(rdev, ringB, 64);
418 DRM_ERROR("Failed to lock ring B %d\n", ringB->idx);
421 radeon_semaphore_emit_wait(rdev, ringB->idx, semaphore);
422 radeon_ring_unlock_commit(rdev, ringB, false);
423 r = radeon_test_create_and_emit_fence(rdev, ringB, &fenceB);
429 if (radeon_fence_signaled(fenceA)) {
430 DRM_ERROR("Fence A signaled without waiting for semaphore.\n");
433 if (radeon_fence_signaled(fenceB)) {
434 DRM_ERROR("Fence B signaled without waiting for semaphore.\n");
438 r = radeon_ring_lock(rdev, ringC, 64);
440 DRM_ERROR("Failed to lock ring B %p\n", ringC);
443 radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore);
444 radeon_ring_unlock_commit(rdev, ringC, false);
446 for (i = 0; i < 30; ++i) {
448 sigA = radeon_fence_signaled(fenceA);
449 sigB = radeon_fence_signaled(fenceB);
454 if (!sigA && !sigB) {
455 DRM_ERROR("Neither fence A nor B has been signaled\n");
457 } else if (sigA && sigB) {
458 DRM_ERROR("Both fence A and B has been signaled\n");
462 DRM_INFO("Fence %c was first signaled\n", sigA ? 'A' : 'B');
464 r = radeon_ring_lock(rdev, ringC, 64);
466 DRM_ERROR("Failed to lock ring B %p\n", ringC);
469 radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore);
470 radeon_ring_unlock_commit(rdev, ringC, false);
474 r = radeon_fence_wait(fenceA, false);
476 DRM_ERROR("Failed to wait for sync fence A\n");
479 r = radeon_fence_wait(fenceB, false);
481 DRM_ERROR("Failed to wait for sync fence B\n");
486 radeon_semaphore_free(rdev, &semaphore, NULL);
489 radeon_fence_unref(&fenceA);
492 radeon_fence_unref(&fenceB);
495 printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
498 static bool radeon_test_sync_possible(struct radeon_ring *ringA,
499 struct radeon_ring *ringB)
501 if (ringA->idx == TN_RING_TYPE_VCE2_INDEX &&
502 ringB->idx == TN_RING_TYPE_VCE1_INDEX)
508 void radeon_test_syncing(struct radeon_device *rdev)
512 for (i = 1; i < RADEON_NUM_RINGS; ++i) {
513 struct radeon_ring *ringA = &rdev->ring[i];
517 for (j = 0; j < i; ++j) {
518 struct radeon_ring *ringB = &rdev->ring[j];
522 if (!radeon_test_sync_possible(ringA, ringB))
525 DRM_INFO("Testing syncing between rings %d and %d...\n", i, j);
526 radeon_test_ring_sync(rdev, ringA, ringB);
528 DRM_INFO("Testing syncing between rings %d and %d...\n", j, i);
529 radeon_test_ring_sync(rdev, ringB, ringA);
531 for (k = 0; k < j; ++k) {
532 struct radeon_ring *ringC = &rdev->ring[k];
536 if (!radeon_test_sync_possible(ringA, ringC))
539 if (!radeon_test_sync_possible(ringB, ringC))
542 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, j, k);
543 radeon_test_ring_sync2(rdev, ringA, ringB, ringC);
545 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, k, j);
546 radeon_test_ring_sync2(rdev, ringA, ringC, ringB);
548 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, i, k);
549 radeon_test_ring_sync2(rdev, ringB, ringA, ringC);
551 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, k, i);
552 radeon_test_ring_sync2(rdev, ringB, ringC, ringA);
554 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, i, j);
555 radeon_test_ring_sync2(rdev, ringC, ringA, ringB);
557 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, j, i);
558 radeon_test_ring_sync2(rdev, ringC, ringB, ringA);