2 * Copyright 2009 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Jerome Glisse <glisse@freedesktop.org>
31 * $FreeBSD: head/sys/dev/drm2/radeon/radeon_fence.c 254885 2013-08-25 19:37:15Z dumbbell $
34 #include "radeon_reg.h"
37 #include "radeon_trace.h"
38 #endif /* DUMBBELL_WIP */
42 * Fences mark an event in the GPUs pipeline and are used
43 * for GPU/CPU synchronization. When the fence is written,
44 * it is expected that all buffers associated with that fence
45 * are no longer in use by the associated ring on the GPU and
46 * that the the relevant GPU caches have been flushed. Whether
47 * we use a scratch register or memory location depends on the asic
48 * and whether writeback is enabled.
52 * radeon_fence_write - write a fence value
54 * @rdev: radeon_device pointer
55 * @seq: sequence number to write
56 * @ring: ring index the fence is associated with
58 * Writes a fence value to memory or a scratch register (all asics).
60 static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
62 struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
63 if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
65 *drv->cpu_addr = cpu_to_le32(seq);
68 WREG32(drv->scratch_reg, seq);
73 * radeon_fence_read - read a fence value
75 * @rdev: radeon_device pointer
76 * @ring: ring index the fence is associated with
78 * Reads a fence value from memory or a scratch register (all asics).
79 * Returns the value of the fence read from memory or register.
81 static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
83 struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
86 if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
88 seq = le32_to_cpu(*drv->cpu_addr);
90 seq = lower_32_bits(atomic64_read(&drv->last_seq));
93 seq = RREG32(drv->scratch_reg);
99 * radeon_fence_emit - emit a fence on the requested ring
101 * @rdev: radeon_device pointer
102 * @fence: radeon fence object
103 * @ring: ring index the fence is associated with
105 * Emits a fence command on the requested ring (all asics).
106 * Returns 0 on success, -ENOMEM on failure.
108 int radeon_fence_emit(struct radeon_device *rdev,
109 struct radeon_fence **fence,
112 /* we are protected by the ring emission mutex */
113 *fence = kmalloc(sizeof(struct radeon_fence), M_DRM,
115 if ((*fence) == NULL) {
118 refcount_init(&((*fence)->kref), 1);
119 (*fence)->rdev = rdev;
120 (*fence)->seq = ++rdev->fence_drv[ring].sync_seq[ring];
121 (*fence)->ring = ring;
122 radeon_fence_ring_emit(rdev, ring, *fence);
127 * radeon_fence_process - process a fence
129 * @rdev: radeon_device pointer
130 * @ring: ring index the fence is associated with
132 * Checks the current fence value and wakes the fence queue
133 * if the sequence number has increased (all asics).
135 void radeon_fence_process(struct radeon_device *rdev, int ring)
137 uint64_t seq, last_seq, last_emitted;
138 unsigned count_loop = 0;
141 /* Note there is a scenario here for an infinite loop but it's
142 * very unlikely to happen. For it to happen, the current polling
143 * process need to be interrupted by another process and another
144 * process needs to update the last_seq btw the atomic read and
145 * xchg of the current process.
147 * More over for this to go in infinite loop there need to be
148 * continuously new fence signaled ie radeon_fence_read needs
149 * to return a different value each time for both the currently
150 * polling process and the other process that xchg the last_seq
151 * btw atomic read and xchg of the current process. And the
152 * value the other process set as last seq must be higher than
153 * the seq value we just read. Which means that current process
154 * need to be interrupted after radeon_fence_read and before
157 * To be even more safe we count the number of time we loop and
158 * we bail after 10 loop just accepting the fact that we might
159 * have temporarly set the last_seq not to the true real last
160 * seq but to an older one.
162 last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
164 last_emitted = rdev->fence_drv[ring].sync_seq[ring];
165 seq = radeon_fence_read(rdev, ring);
166 seq |= last_seq & 0xffffffff00000000LL;
167 if (seq < last_seq) {
169 seq |= last_emitted & 0xffffffff00000000LL;
172 if (seq <= last_seq || seq > last_emitted) {
175 /* If we loop over we don't want to return without
176 * checking if a fence is signaled as it means that the
177 * seq we just read is different from the previous on.
181 if ((count_loop++) > 10) {
182 /* We looped over too many time leave with the
183 * fact that we might have set an older fence
184 * seq then the current real last seq as signaled
189 } while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq);
192 rdev->fence_drv[ring].last_activity = jiffies;
193 wake_up_all(&rdev->fence_queue);
198 * radeon_fence_destroy - destroy a fence
202 * Frees the fence object (all asics).
204 static void radeon_fence_destroy(struct radeon_fence *fence)
211 * radeon_fence_seq_signaled - check if a fence sequeuce number has signaled
213 * @rdev: radeon device pointer
214 * @seq: sequence number
215 * @ring: ring index the fence is associated with
217 * Check if the last singled fence sequnce number is >= the requested
218 * sequence number (all asics).
219 * Returns true if the fence has signaled (current fence value
220 * is >= requested value) or false if it has not (current fence
221 * value is < the requested value. Helper function for
222 * radeon_fence_signaled().
224 static bool radeon_fence_seq_signaled(struct radeon_device *rdev,
225 u64 seq, unsigned ring)
227 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
230 /* poll new last sequence at least once */
231 radeon_fence_process(rdev, ring);
232 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
239 * radeon_fence_signaled - check if a fence has signaled
241 * @fence: radeon fence object
243 * Check if the requested fence has signaled (all asics).
244 * Returns true if the fence has signaled or false if it has not.
246 bool radeon_fence_signaled(struct radeon_fence *fence)
251 if (fence->seq == RADEON_FENCE_SIGNALED_SEQ) {
254 if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) {
255 fence->seq = RADEON_FENCE_SIGNALED_SEQ;
262 * radeon_fence_wait_seq - wait for a specific sequence number
264 * @rdev: radeon device pointer
265 * @target_seq: sequence number we want to wait for
266 * @ring: ring index the fence is associated with
267 * @intr: use interruptable sleep
268 * @lock_ring: whether the ring should be locked or not
270 * Wait for the requested sequence number to be written (all asics).
271 * @intr selects whether to use interruptable (true) or non-interruptable
272 * (false) sleep when waiting for the sequence number. Helper function
273 * for radeon_fence_wait(), et al.
274 * Returns 0 if the sequence number has passed, error for all other cases.
275 * -EDEADLK is returned when a GPU lockup has been detected and the ring is
276 * marked as not ready so no further jobs get scheduled until a successful
279 static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq,
280 unsigned ring, bool intr, bool lock_ring)
282 unsigned long timeout, last_activity;
288 while (target_seq > atomic64_read(&rdev->fence_drv[ring].last_seq)) {
289 if (!rdev->ring[ring].ready) {
293 timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT;
294 if (time_after(rdev->fence_drv[ring].last_activity, timeout)) {
295 /* the normal case, timeout is somewhere before last_activity */
296 timeout = rdev->fence_drv[ring].last_activity - timeout;
298 /* either jiffies wrapped around, or no fence was signaled in the last 500ms
299 * anyway we will just wait for the minimum amount and then check for a lockup
303 seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
304 /* Save current last activity valuee, used to check for GPU lockups */
305 last_activity = rdev->fence_drv[ring].last_activity;
307 radeon_irq_kms_sw_irq_get(rdev, ring);
309 r = wait_event_interruptible_timeout(rdev->fence_queue,
310 (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)),
313 r = wait_event_timeout(rdev->fence_queue,
314 (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)),
317 radeon_irq_kms_sw_irq_put(rdev, ring);
318 if (unlikely(r < 0)) {
322 if (unlikely(!signaled)) {
323 /* we were interrupted for some reason and fence
324 * isn't signaled yet, resume waiting */
329 /* check if sequence value has changed since last_activity */
330 if (seq != atomic64_read(&rdev->fence_drv[ring].last_seq)) {
335 lockmgr(&rdev->ring_lock, LK_EXCLUSIVE);
338 /* test if somebody else has already decided that this is a lockup */
339 if (last_activity != rdev->fence_drv[ring].last_activity) {
341 lockmgr(&rdev->ring_lock, LK_RELEASE);
346 if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
347 /* good news we believe it's a lockup */
348 dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016jx last fence id 0x%016jx)\n",
351 /* change last activity so nobody else think there is a lockup */
352 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
353 rdev->fence_drv[i].last_activity = jiffies;
356 /* mark the ring as not ready any more */
357 rdev->ring[ring].ready = false;
359 lockmgr(&rdev->ring_lock, LK_RELEASE);
365 lockmgr(&rdev->ring_lock, LK_RELEASE);
373 * radeon_fence_wait - wait for a fence to signal
375 * @fence: radeon fence object
376 * @intr: use interruptable sleep
378 * Wait for the requested fence to signal (all asics).
379 * @intr selects whether to use interruptable (true) or non-interruptable
380 * (false) sleep when waiting for the fence.
381 * Returns 0 if the fence has passed, error for all other cases.
383 int radeon_fence_wait(struct radeon_fence *fence, bool intr)
388 WARN(1, "Querying an invalid fence : %p !\n", fence);
392 r = radeon_fence_wait_seq(fence->rdev, fence->seq,
393 fence->ring, intr, true);
397 fence->seq = RADEON_FENCE_SIGNALED_SEQ;
401 static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
405 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
406 if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i)) {
414 * radeon_fence_wait_any_seq - wait for a sequence number on any ring
416 * @rdev: radeon device pointer
417 * @target_seq: sequence number(s) we want to wait for
418 * @intr: use interruptable sleep
420 * Wait for the requested sequence number(s) to be written by any ring
421 * (all asics). Sequnce number array is indexed by ring id.
422 * @intr selects whether to use interruptable (true) or non-interruptable
423 * (false) sleep when waiting for the sequence number. Helper function
424 * for radeon_fence_wait_any(), et al.
425 * Returns 0 if the sequence number has passed, error for all other cases.
427 static int radeon_fence_wait_any_seq(struct radeon_device *rdev,
428 u64 *target_seq, bool intr)
430 unsigned long timeout, last_activity, tmp;
431 unsigned i, ring = RADEON_NUM_RINGS;
435 for (i = 0, last_activity = 0; i < RADEON_NUM_RINGS; ++i) {
436 if (!target_seq[i]) {
440 /* use the most recent one as indicator */
441 if (time_after(rdev->fence_drv[i].last_activity, last_activity)) {
442 last_activity = rdev->fence_drv[i].last_activity;
445 /* For lockup detection just pick the lowest ring we are
446 * actively waiting for
453 /* nothing to wait for ? */
454 if (ring == RADEON_NUM_RINGS) {
458 while (!radeon_fence_any_seq_signaled(rdev, target_seq)) {
459 timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT;
460 if (time_after(last_activity, timeout)) {
461 /* the normal case, timeout is somewhere before last_activity */
462 timeout = last_activity - timeout;
464 /* either jiffies wrapped around, or no fence was signaled in the last 500ms
465 * anyway we will just wait for the minimum amount and then check for a lockup
470 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
472 radeon_irq_kms_sw_irq_get(rdev, i);
476 r = wait_event_interruptible_timeout(rdev->fence_queue,
477 (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)),
480 r = wait_event_timeout(rdev->fence_queue,
481 (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)),
484 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
486 radeon_irq_kms_sw_irq_put(rdev, i);
489 if (unlikely(r < 0)) {
493 if (unlikely(!signaled)) {
494 /* we were interrupted for some reason and fence
495 * isn't signaled yet, resume waiting */
500 lockmgr(&rdev->ring_lock, LK_EXCLUSIVE);
501 for (i = 0, tmp = 0; i < RADEON_NUM_RINGS; ++i) {
502 if (time_after(rdev->fence_drv[i].last_activity, tmp)) {
503 tmp = rdev->fence_drv[i].last_activity;
506 /* test if somebody else has already decided that this is a lockup */
507 if (last_activity != tmp) {
509 lockmgr(&rdev->ring_lock, LK_RELEASE);
513 if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
514 /* good news we believe it's a lockup */
515 dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016jx)\n",
518 /* change last activity so nobody else think there is a lockup */
519 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
520 rdev->fence_drv[i].last_activity = jiffies;
523 /* mark the ring as not ready any more */
524 rdev->ring[ring].ready = false;
525 lockmgr(&rdev->ring_lock, LK_RELEASE);
528 lockmgr(&rdev->ring_lock, LK_RELEASE);
535 * radeon_fence_wait_any - wait for a fence to signal on any ring
537 * @rdev: radeon device pointer
538 * @fences: radeon fence object(s)
539 * @intr: use interruptable sleep
541 * Wait for any requested fence to signal (all asics). Fence
542 * array is indexed by ring id. @intr selects whether to use
543 * interruptable (true) or non-interruptable (false) sleep when
544 * waiting for the fences. Used by the suballocator.
545 * Returns 0 if any fence has passed, error for all other cases.
547 int radeon_fence_wait_any(struct radeon_device *rdev,
548 struct radeon_fence **fences,
551 uint64_t seq[RADEON_NUM_RINGS];
555 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
562 if (fences[i]->seq == RADEON_FENCE_SIGNALED_SEQ) {
563 /* something was allready signaled */
567 seq[i] = fences[i]->seq;
570 r = radeon_fence_wait_any_seq(rdev, seq, intr);
578 * radeon_fence_wait_next_locked - wait for the next fence to signal
580 * @rdev: radeon device pointer
581 * @ring: ring index the fence is associated with
583 * Wait for the next fence on the requested ring to signal (all asics).
584 * Returns 0 if the next fence has passed, error for all other cases.
585 * Caller must hold ring lock.
587 int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
591 seq = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
592 if (seq >= rdev->fence_drv[ring].sync_seq[ring]) {
593 /* nothing to wait for, last_seq is
594 already the last emited fence */
597 return radeon_fence_wait_seq(rdev, seq, ring, false, false);
601 * radeon_fence_wait_empty_locked - wait for all fences to signal
603 * @rdev: radeon device pointer
604 * @ring: ring index the fence is associated with
606 * Wait for all fences on the requested ring to signal (all asics).
607 * Returns 0 if the fences have passed, error for all other cases.
608 * Caller must hold ring lock.
610 int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
612 uint64_t seq = rdev->fence_drv[ring].sync_seq[ring];
615 r = radeon_fence_wait_seq(rdev, seq, ring, false, false);
620 dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%d)\n",
627 * radeon_fence_ref - take a ref on a fence
629 * @fence: radeon fence object
631 * Take a reference on a fence (all asics).
634 struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
636 refcount_acquire(&fence->kref);
641 * radeon_fence_unref - remove a ref on a fence
643 * @fence: radeon fence object
645 * Remove a reference on a fence (all asics).
647 void radeon_fence_unref(struct radeon_fence **fence)
649 struct radeon_fence *tmp = *fence;
653 if (refcount_release(&tmp->kref)) {
654 radeon_fence_destroy(tmp);
660 * radeon_fence_count_emitted - get the count of emitted fences
662 * @rdev: radeon device pointer
663 * @ring: ring index the fence is associated with
665 * Get the number of fences emitted on the requested ring (all asics).
666 * Returns the number of emitted fences on the ring. Used by the
667 * dynpm code to ring track activity.
669 unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
673 /* We are not protected by ring lock when reading the last sequence
674 * but it's ok to report slightly wrong fence count here.
676 radeon_fence_process(rdev, ring);
677 emitted = rdev->fence_drv[ring].sync_seq[ring]
678 - atomic64_read(&rdev->fence_drv[ring].last_seq);
679 /* to avoid 32bits warp around */
680 if (emitted > 0x10000000) {
681 emitted = 0x10000000;
683 return (unsigned)emitted;
687 * radeon_fence_need_sync - do we need a semaphore
689 * @fence: radeon fence object
690 * @dst_ring: which ring to check against
692 * Check if the fence needs to be synced against another ring
693 * (all asics). If so, we need to emit a semaphore.
694 * Returns true if we need to sync with another ring, false if
697 bool radeon_fence_need_sync(struct radeon_fence *fence, int dst_ring)
699 struct radeon_fence_driver *fdrv;
705 if (fence->ring == dst_ring) {
709 /* we are protected by the ring mutex */
710 fdrv = &fence->rdev->fence_drv[dst_ring];
711 if (fence->seq <= fdrv->sync_seq[fence->ring]) {
719 * radeon_fence_note_sync - record the sync point
721 * @fence: radeon fence object
722 * @dst_ring: which ring to check against
724 * Note the sequence number at which point the fence will
725 * be synced with the requested ring (all asics).
727 void radeon_fence_note_sync(struct radeon_fence *fence, int dst_ring)
729 struct radeon_fence_driver *dst, *src;
736 if (fence->ring == dst_ring) {
740 /* we are protected by the ring mutex */
741 src = &fence->rdev->fence_drv[fence->ring];
742 dst = &fence->rdev->fence_drv[dst_ring];
743 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
747 dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]);
752 * radeon_fence_driver_start_ring - make the fence driver
753 * ready for use on the requested ring.
755 * @rdev: radeon device pointer
756 * @ring: ring index to start the fence driver on
758 * Make the fence driver ready for processing (all asics).
759 * Not all asics have all rings, so each asic will only
760 * start the fence driver on the rings it has.
761 * Returns 0 for success, errors for failure.
763 int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
768 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
769 if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) {
770 rdev->fence_drv[ring].scratch_reg = 0;
771 if (ring != R600_RING_TYPE_UVD_INDEX) {
772 index = R600_WB_EVENT_OFFSET + ring * 4;
773 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
774 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr +
778 /* put fence directly behind firmware */
779 index = ALIGN(rdev->uvd_fw->datasize, 8);
780 rdev->fence_drv[ring].cpu_addr = (void*)((uint8_t*)rdev->uvd.cpu_addr + index);
781 rdev->fence_drv[ring].gpu_addr = rdev->uvd.gpu_addr + index;
785 r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
787 dev_err(rdev->dev, "fence failed to get scratch register\n");
790 index = RADEON_WB_SCRATCH_OFFSET +
791 rdev->fence_drv[ring].scratch_reg -
792 rdev->scratch.reg_base;
793 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
794 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
796 radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring);
797 rdev->fence_drv[ring].initialized = true;
798 dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016jx and cpu addr 0x%p\n",
799 ring, (uintmax_t)rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
804 * radeon_fence_driver_init_ring - init the fence driver
805 * for the requested ring.
807 * @rdev: radeon device pointer
808 * @ring: ring index to start the fence driver on
810 * Init the fence driver for the requested ring (all asics).
811 * Helper function for radeon_fence_driver_init().
813 static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
817 rdev->fence_drv[ring].scratch_reg = -1;
818 rdev->fence_drv[ring].cpu_addr = NULL;
819 rdev->fence_drv[ring].gpu_addr = 0;
820 for (i = 0; i < RADEON_NUM_RINGS; ++i)
821 rdev->fence_drv[ring].sync_seq[i] = 0;
822 atomic64_set(&rdev->fence_drv[ring].last_seq, 0);
823 rdev->fence_drv[ring].last_activity = jiffies;
824 rdev->fence_drv[ring].initialized = false;
828 * radeon_fence_driver_init - init the fence driver
829 * for all possible rings.
831 * @rdev: radeon device pointer
833 * Init the fence driver for all possible rings (all asics).
834 * Not all asics have all rings, so each asic will only
835 * start the fence driver on the rings it has using
836 * radeon_fence_driver_start_ring().
837 * Returns 0 for success.
839 int radeon_fence_driver_init(struct radeon_device *rdev)
843 init_waitqueue_head(&rdev->fence_queue);
844 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
845 radeon_fence_driver_init_ring(rdev, ring);
847 if (radeon_debugfs_fence_init(rdev)) {
848 dev_err(rdev->dev, "fence debugfs file creation failed\n");
854 * radeon_fence_driver_fini - tear down the fence driver
855 * for all possible rings.
857 * @rdev: radeon device pointer
859 * Tear down the fence driver for all possible rings (all asics).
861 void radeon_fence_driver_fini(struct radeon_device *rdev)
865 lockmgr(&rdev->ring_lock, LK_EXCLUSIVE);
866 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
867 if (!rdev->fence_drv[ring].initialized)
869 r = radeon_fence_wait_empty_locked(rdev, ring);
871 /* no need to trigger GPU reset as we are unloading */
872 radeon_fence_driver_force_completion(rdev);
874 wake_up_all(&rdev->fence_queue);
875 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
876 rdev->fence_drv[ring].initialized = false;
878 lockmgr(&rdev->ring_lock, LK_RELEASE);
882 * radeon_fence_driver_force_completion - force all fence waiter to complete
884 * @rdev: radeon device pointer
886 * In case of GPU reset failure make sure no process keep waiting on fence
887 * that will never complete.
889 void radeon_fence_driver_force_completion(struct radeon_device *rdev)
893 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
894 if (!rdev->fence_drv[ring].initialized)
896 radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring);
904 #if defined(CONFIG_DEBUG_FS)
905 static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
907 struct drm_info_node *node = (struct drm_info_node *)m->private;
908 struct drm_device *dev = node->minor->dev;
909 struct radeon_device *rdev = dev->dev_private;
912 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
913 if (!rdev->fence_drv[i].initialized)
916 seq_printf(m, "--- ring %d ---\n", i);
917 seq_printf(m, "Last signaled fence 0x%016llx\n",
918 (unsigned long long)atomic_load_acq_64(&rdev->fence_drv[i].last_seq));
919 seq_printf(m, "Last emitted 0x%016llx\n",
920 rdev->fence_drv[i].sync_seq[i]);
922 for (j = 0; j < RADEON_NUM_RINGS; ++j) {
923 if (i != j && rdev->fence_drv[j].initialized)
924 seq_printf(m, "Last sync to ring %d 0x%016llx\n",
925 j, rdev->fence_drv[i].sync_seq[j]);
931 static struct drm_info_list radeon_debugfs_fence_list[] = {
932 {"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL},
936 int radeon_debugfs_fence_init(struct radeon_device *rdev)
938 #if defined(CONFIG_DEBUG_FS)
939 return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 1);