2 * Copyright 2009 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Jerome Glisse <glisse@freedesktop.org>
31 * $FreeBSD: head/sys/dev/drm2/radeon/radeon_fence.c 254885 2013-08-25 19:37:15Z dumbbell $
35 #include "radeon_reg.h"
38 #include "radeon_trace.h"
39 #endif /* DUMBBELL_WIP */
43 * Fences mark an event in the GPUs pipeline and are used
44 * for GPU/CPU synchronization. When the fence is written,
45 * it is expected that all buffers associated with that fence
46 * are no longer in use by the associated ring on the GPU and
47 * that the the relevant GPU caches have been flushed. Whether
48 * we use a scratch register or memory location depends on the asic
49 * and whether writeback is enabled.
53 * radeon_fence_write - write a fence value
55 * @rdev: radeon_device pointer
56 * @seq: sequence number to write
57 * @ring: ring index the fence is associated with
59 * Writes a fence value to memory or a scratch register (all asics).
61 static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
63 struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
64 if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
65 *drv->cpu_addr = cpu_to_le32(seq);
67 WREG32(drv->scratch_reg, seq);
72 * radeon_fence_read - read a fence value
74 * @rdev: radeon_device pointer
75 * @ring: ring index the fence is associated with
77 * Reads a fence value from memory or a scratch register (all asics).
78 * Returns the value of the fence read from memory or register.
80 static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
82 struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
85 if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
86 seq = le32_to_cpu(*drv->cpu_addr);
88 seq = RREG32(drv->scratch_reg);
94 * radeon_fence_emit - emit a fence on the requested ring
96 * @rdev: radeon_device pointer
97 * @fence: radeon fence object
98 * @ring: ring index the fence is associated with
100 * Emits a fence command on the requested ring (all asics).
101 * Returns 0 on success, -ENOMEM on failure.
103 int radeon_fence_emit(struct radeon_device *rdev,
104 struct radeon_fence **fence,
107 /* we are protected by the ring emission mutex */
108 *fence = kmalloc(sizeof(struct radeon_fence), DRM_MEM_DRIVER,
110 if ((*fence) == NULL) {
113 refcount_init(&((*fence)->kref), 1);
114 (*fence)->rdev = rdev;
115 (*fence)->seq = ++rdev->fence_drv[ring].sync_seq[ring];
116 (*fence)->ring = ring;
117 radeon_fence_ring_emit(rdev, ring, *fence);
118 CTR2(KTR_DRM, "radeon fence: emit (ring=%d, seq=%d)", ring, (*fence)->seq);
123 * radeon_fence_process - process a fence
125 * @rdev: radeon_device pointer
126 * @ring: ring index the fence is associated with
128 * Checks the current fence value and wakes the fence queue
129 * if the sequence number has increased (all asics).
131 void radeon_fence_process(struct radeon_device *rdev, int ring)
133 uint64_t seq, last_seq, last_emitted;
134 unsigned count_loop = 0;
137 /* Note there is a scenario here for an infinite loop but it's
138 * very unlikely to happen. For it to happen, the current polling
139 * process need to be interrupted by another process and another
140 * process needs to update the last_seq btw the atomic read and
141 * xchg of the current process.
143 * More over for this to go in infinite loop there need to be
144 * continuously new fence signaled ie radeon_fence_read needs
145 * to return a different value each time for both the currently
146 * polling process and the other process that xchg the last_seq
147 * btw atomic read and xchg of the current process. And the
148 * value the other process set as last seq must be higher than
149 * the seq value we just read. Which means that current process
150 * need to be interrupted after radeon_fence_read and before
153 * To be even more safe we count the number of time we loop and
154 * we bail after 10 loop just accepting the fact that we might
155 * have temporarly set the last_seq not to the true real last
156 * seq but to an older one.
158 last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
160 last_emitted = rdev->fence_drv[ring].sync_seq[ring];
161 seq = radeon_fence_read(rdev, ring);
162 seq |= last_seq & 0xffffffff00000000LL;
163 if (seq < last_seq) {
165 seq |= last_emitted & 0xffffffff00000000LL;
168 if (seq <= last_seq || seq > last_emitted) {
171 /* If we loop over we don't want to return without
172 * checking if a fence is signaled as it means that the
173 * seq we just read is different from the previous on.
177 if ((count_loop++) > 10) {
178 /* We looped over too many time leave with the
179 * fact that we might have set an older fence
180 * seq then the current real last seq as signaled
185 } while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq);
188 rdev->fence_drv[ring].last_activity = jiffies;
189 cv_broadcast(&rdev->fence_queue);
194 * radeon_fence_destroy - destroy a fence
198 * Frees the fence object (all asics).
200 static void radeon_fence_destroy(struct radeon_fence *fence)
203 drm_free(fence, DRM_MEM_DRIVER);
207 * radeon_fence_seq_signaled - check if a fence sequeuce number has signaled
209 * @rdev: radeon device pointer
210 * @seq: sequence number
211 * @ring: ring index the fence is associated with
213 * Check if the last singled fence sequnce number is >= the requested
214 * sequence number (all asics).
215 * Returns true if the fence has signaled (current fence value
216 * is >= requested value) or false if it has not (current fence
217 * value is < the requested value. Helper function for
218 * radeon_fence_signaled().
220 static bool radeon_fence_seq_signaled(struct radeon_device *rdev,
221 u64 seq, unsigned ring)
223 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
226 /* poll new last sequence at least once */
227 radeon_fence_process(rdev, ring);
228 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
235 * radeon_fence_signaled - check if a fence has signaled
237 * @fence: radeon fence object
239 * Check if the requested fence has signaled (all asics).
240 * Returns true if the fence has signaled or false if it has not.
242 bool radeon_fence_signaled(struct radeon_fence *fence)
247 if (fence->seq == RADEON_FENCE_SIGNALED_SEQ) {
250 if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) {
251 fence->seq = RADEON_FENCE_SIGNALED_SEQ;
258 * radeon_fence_wait_seq - wait for a specific sequence number
260 * @rdev: radeon device pointer
261 * @target_seq: sequence number we want to wait for
262 * @ring: ring index the fence is associated with
263 * @intr: use interruptable sleep
264 * @lock_ring: whether the ring should be locked or not
266 * Wait for the requested sequence number to be written (all asics).
267 * @intr selects whether to use interruptable (true) or non-interruptable
268 * (false) sleep when waiting for the sequence number. Helper function
269 * for radeon_fence_wait(), et al.
270 * Returns 0 if the sequence number has passed, error for all other cases.
271 * -EDEADLK is returned when a GPU lockup has been detected and the ring is
272 * marked as not ready so no further jobs get scheduled until a successful
275 static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq,
276 unsigned ring, bool intr, bool lock_ring)
278 unsigned long timeout, last_activity;
281 bool signaled, fence_queue_locked;
284 while (target_seq > atomic64_read(&rdev->fence_drv[ring].last_seq)) {
285 if (!rdev->ring[ring].ready) {
289 timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT;
290 if (time_after(rdev->fence_drv[ring].last_activity, timeout)) {
291 /* the normal case, timeout is somewhere before last_activity */
292 timeout = rdev->fence_drv[ring].last_activity - timeout;
294 /* either jiffies wrapped around, or no fence was signaled in the last 500ms
295 * anyway we will just wait for the minimum amount and then check for a lockup
299 seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
300 /* Save current last activity valuee, used to check for GPU lockups */
301 last_activity = rdev->fence_drv[ring].last_activity;
303 CTR2(KTR_DRM, "radeon fence: wait begin (ring=%d, seq=%d)",
306 radeon_irq_kms_sw_irq_get(rdev, ring);
307 fence_queue_locked = false;
309 while (!(signaled = radeon_fence_seq_signaled(rdev,
310 target_seq, ring))) {
311 if (!fence_queue_locked) {
312 lockmgr(&rdev->fence_queue_mtx, LK_EXCLUSIVE);
313 fence_queue_locked = true;
316 r = cv_timedwait_sig(&rdev->fence_queue,
317 &rdev->fence_queue_mtx,
320 r = cv_timedwait(&rdev->fence_queue,
321 &rdev->fence_queue_mtx,
325 if (r == EWOULDBLOCK) {
327 radeon_fence_seq_signaled(
328 rdev, target_seq, ring);
333 if (fence_queue_locked) {
334 lockmgr(&rdev->fence_queue_mtx, LK_RELEASE);
336 radeon_irq_kms_sw_irq_put(rdev, ring);
337 if (unlikely(r == EINTR || r == ERESTART)) {
340 CTR2(KTR_DRM, "radeon fence: wait end (ring=%d, seq=%d)",
343 if (unlikely(!signaled)) {
345 /* we were interrupted for some reason and fence
346 * isn't signaled yet, resume waiting */
352 /* check if sequence value has changed since last_activity */
353 if (seq != atomic64_read(&rdev->fence_drv[ring].last_seq)) {
358 lockmgr(&rdev->ring_lock, LK_EXCLUSIVE);
361 /* test if somebody else has already decided that this is a lockup */
362 if (last_activity != rdev->fence_drv[ring].last_activity) {
364 lockmgr(&rdev->ring_lock, LK_RELEASE);
369 if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
370 /* good news we believe it's a lockup */
371 dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016jx last fence id 0x%016jx)\n",
372 (uintmax_t)target_seq, (uintmax_t)seq);
374 /* change last activity so nobody else think there is a lockup */
375 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
376 rdev->fence_drv[i].last_activity = jiffies;
379 /* mark the ring as not ready any more */
380 rdev->ring[ring].ready = false;
382 lockmgr(&rdev->ring_lock, LK_RELEASE);
388 lockmgr(&rdev->ring_lock, LK_RELEASE);
396 * radeon_fence_wait - wait for a fence to signal
398 * @fence: radeon fence object
399 * @intr: use interruptable sleep
401 * Wait for the requested fence to signal (all asics).
402 * @intr selects whether to use interruptable (true) or non-interruptable
403 * (false) sleep when waiting for the fence.
404 * Returns 0 if the fence has passed, error for all other cases.
406 int radeon_fence_wait(struct radeon_fence *fence, bool intr)
411 DRM_ERROR("Querying an invalid fence : %p !\n", fence);
415 r = radeon_fence_wait_seq(fence->rdev, fence->seq,
416 fence->ring, intr, true);
420 fence->seq = RADEON_FENCE_SIGNALED_SEQ;
424 static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
428 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
429 if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i)) {
437 * radeon_fence_wait_any_seq - wait for a sequence number on any ring
439 * @rdev: radeon device pointer
440 * @target_seq: sequence number(s) we want to wait for
441 * @intr: use interruptable sleep
443 * Wait for the requested sequence number(s) to be written by any ring
444 * (all asics). Sequnce number array is indexed by ring id.
445 * @intr selects whether to use interruptable (true) or non-interruptable
446 * (false) sleep when waiting for the sequence number. Helper function
447 * for radeon_fence_wait_any(), et al.
448 * Returns 0 if the sequence number has passed, error for all other cases.
450 static int radeon_fence_wait_any_seq(struct radeon_device *rdev,
451 u64 *target_seq, bool intr)
453 unsigned long timeout, last_activity, tmp;
454 unsigned i, ring = RADEON_NUM_RINGS;
455 bool signaled, fence_queue_locked;
458 for (i = 0, last_activity = 0; i < RADEON_NUM_RINGS; ++i) {
459 if (!target_seq[i]) {
463 /* use the most recent one as indicator */
464 if (time_after(rdev->fence_drv[i].last_activity, last_activity)) {
465 last_activity = rdev->fence_drv[i].last_activity;
468 /* For lockup detection just pick the lowest ring we are
469 * actively waiting for
476 /* nothing to wait for ? */
477 if (ring == RADEON_NUM_RINGS) {
481 while (!radeon_fence_any_seq_signaled(rdev, target_seq)) {
482 timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT;
483 if (time_after(last_activity, timeout)) {
484 /* the normal case, timeout is somewhere before last_activity */
485 timeout = last_activity - timeout;
487 /* either jiffies wrapped around, or no fence was signaled in the last 500ms
488 * anyway we will just wait for the minimum amount and then check for a lockup
493 CTR2(KTR_DRM, "radeon fence: wait begin (ring=%d, target_seq=%d)",
494 ring, target_seq[ring]);
495 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
497 radeon_irq_kms_sw_irq_get(rdev, i);
500 fence_queue_locked = false;
502 while (!(signaled = radeon_fence_any_seq_signaled(rdev,
504 if (!fence_queue_locked) {
505 lockmgr(&rdev->fence_queue_mtx, LK_EXCLUSIVE);
506 fence_queue_locked = true;
509 r = cv_timedwait_sig(&rdev->fence_queue,
510 &rdev->fence_queue_mtx,
513 r = cv_timedwait(&rdev->fence_queue,
514 &rdev->fence_queue_mtx,
518 if (r == EWOULDBLOCK) {
520 radeon_fence_any_seq_signaled(
526 if (fence_queue_locked) {
527 lockmgr(&rdev->fence_queue_mtx, LK_RELEASE);
529 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
531 radeon_irq_kms_sw_irq_put(rdev, i);
534 if (unlikely(r == EINTR || r == ERESTART)) {
537 CTR2(KTR_DRM, "radeon fence: wait end (ring=%d, target_seq=%d)",
538 ring, target_seq[ring]);
540 if (unlikely(!signaled)) {
542 /* we were interrupted for some reason and fence
543 * isn't signaled yet, resume waiting */
549 lockmgr(&rdev->ring_lock, LK_EXCLUSIVE);
550 for (i = 0, tmp = 0; i < RADEON_NUM_RINGS; ++i) {
551 if (time_after(rdev->fence_drv[i].last_activity, tmp)) {
552 tmp = rdev->fence_drv[i].last_activity;
555 /* test if somebody else has already decided that this is a lockup */
556 if (last_activity != tmp) {
558 lockmgr(&rdev->ring_lock, LK_RELEASE);
562 if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
563 /* good news we believe it's a lockup */
564 dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016jx)\n",
565 (uintmax_t)target_seq[ring]);
567 /* change last activity so nobody else think there is a lockup */
568 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
569 rdev->fence_drv[i].last_activity = jiffies;
572 /* mark the ring as not ready any more */
573 rdev->ring[ring].ready = false;
574 lockmgr(&rdev->ring_lock, LK_RELEASE);
577 lockmgr(&rdev->ring_lock, LK_RELEASE);
584 * radeon_fence_wait_any - wait for a fence to signal on any ring
586 * @rdev: radeon device pointer
587 * @fences: radeon fence object(s)
588 * @intr: use interruptable sleep
590 * Wait for any requested fence to signal (all asics). Fence
591 * array is indexed by ring id. @intr selects whether to use
592 * interruptable (true) or non-interruptable (false) sleep when
593 * waiting for the fences. Used by the suballocator.
594 * Returns 0 if any fence has passed, error for all other cases.
596 int radeon_fence_wait_any(struct radeon_device *rdev,
597 struct radeon_fence **fences,
600 uint64_t seq[RADEON_NUM_RINGS];
604 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
611 if (fences[i]->seq == RADEON_FENCE_SIGNALED_SEQ) {
612 /* something was allready signaled */
616 seq[i] = fences[i]->seq;
619 r = radeon_fence_wait_any_seq(rdev, seq, intr);
627 * radeon_fence_wait_next_locked - wait for the next fence to signal
629 * @rdev: radeon device pointer
630 * @ring: ring index the fence is associated with
632 * Wait for the next fence on the requested ring to signal (all asics).
633 * Returns 0 if the next fence has passed, error for all other cases.
634 * Caller must hold ring lock.
636 int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
640 seq = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
641 if (seq >= rdev->fence_drv[ring].sync_seq[ring]) {
642 /* nothing to wait for, last_seq is
643 already the last emited fence */
646 return radeon_fence_wait_seq(rdev, seq, ring, false, false);
650 * radeon_fence_wait_empty_locked - wait for all fences to signal
652 * @rdev: radeon device pointer
653 * @ring: ring index the fence is associated with
655 * Wait for all fences on the requested ring to signal (all asics).
656 * Returns 0 if the fences have passed, error for all other cases.
657 * Caller must hold ring lock.
659 int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
661 uint64_t seq = rdev->fence_drv[ring].sync_seq[ring];
664 r = radeon_fence_wait_seq(rdev, seq, ring, false, false);
669 dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%d)\n",
676 * radeon_fence_ref - take a ref on a fence
678 * @fence: radeon fence object
680 * Take a reference on a fence (all asics).
683 struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
685 refcount_acquire(&fence->kref);
690 * radeon_fence_unref - remove a ref on a fence
692 * @fence: radeon fence object
694 * Remove a reference on a fence (all asics).
696 void radeon_fence_unref(struct radeon_fence **fence)
698 struct radeon_fence *tmp = *fence;
702 if (refcount_release(&tmp->kref)) {
703 radeon_fence_destroy(tmp);
709 * radeon_fence_count_emitted - get the count of emitted fences
711 * @rdev: radeon device pointer
712 * @ring: ring index the fence is associated with
714 * Get the number of fences emitted on the requested ring (all asics).
715 * Returns the number of emitted fences on the ring. Used by the
716 * dynpm code to ring track activity.
718 unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
722 /* We are not protected by ring lock when reading the last sequence
723 * but it's ok to report slightly wrong fence count here.
725 radeon_fence_process(rdev, ring);
726 emitted = rdev->fence_drv[ring].sync_seq[ring]
727 - atomic64_read(&rdev->fence_drv[ring].last_seq);
728 /* to avoid 32bits warp around */
729 if (emitted > 0x10000000) {
730 emitted = 0x10000000;
732 return (unsigned)emitted;
736 * radeon_fence_need_sync - do we need a semaphore
738 * @fence: radeon fence object
739 * @dst_ring: which ring to check against
741 * Check if the fence needs to be synced against another ring
742 * (all asics). If so, we need to emit a semaphore.
743 * Returns true if we need to sync with another ring, false if
746 bool radeon_fence_need_sync(struct radeon_fence *fence, int dst_ring)
748 struct radeon_fence_driver *fdrv;
754 if (fence->ring == dst_ring) {
758 /* we are protected by the ring mutex */
759 fdrv = &fence->rdev->fence_drv[dst_ring];
760 if (fence->seq <= fdrv->sync_seq[fence->ring]) {
768 * radeon_fence_note_sync - record the sync point
770 * @fence: radeon fence object
771 * @dst_ring: which ring to check against
773 * Note the sequence number at which point the fence will
774 * be synced with the requested ring (all asics).
776 void radeon_fence_note_sync(struct radeon_fence *fence, int dst_ring)
778 struct radeon_fence_driver *dst, *src;
785 if (fence->ring == dst_ring) {
789 /* we are protected by the ring mutex */
790 src = &fence->rdev->fence_drv[fence->ring];
791 dst = &fence->rdev->fence_drv[dst_ring];
792 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
796 dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]);
801 * radeon_fence_driver_start_ring - make the fence driver
802 * ready for use on the requested ring.
804 * @rdev: radeon device pointer
805 * @ring: ring index to start the fence driver on
807 * Make the fence driver ready for processing (all asics).
808 * Not all asics have all rings, so each asic will only
809 * start the fence driver on the rings it has.
810 * Returns 0 for success, errors for failure.
812 int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
817 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
818 if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) {
819 rdev->fence_drv[ring].scratch_reg = 0;
820 index = R600_WB_EVENT_OFFSET + ring * 4;
822 r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
824 dev_err(rdev->dev, "fence failed to get scratch register\n");
827 index = RADEON_WB_SCRATCH_OFFSET +
828 rdev->fence_drv[ring].scratch_reg -
829 rdev->scratch.reg_base;
831 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
832 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
833 radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring);
834 rdev->fence_drv[ring].initialized = true;
835 dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016jx and cpu addr 0x%p\n",
836 ring, (uintmax_t)rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
841 * radeon_fence_driver_init_ring - init the fence driver
842 * for the requested ring.
844 * @rdev: radeon device pointer
845 * @ring: ring index to start the fence driver on
847 * Init the fence driver for the requested ring (all asics).
848 * Helper function for radeon_fence_driver_init().
850 static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
854 rdev->fence_drv[ring].scratch_reg = -1;
855 rdev->fence_drv[ring].cpu_addr = NULL;
856 rdev->fence_drv[ring].gpu_addr = 0;
857 for (i = 0; i < RADEON_NUM_RINGS; ++i)
858 rdev->fence_drv[ring].sync_seq[i] = 0;
859 atomic64_set(&rdev->fence_drv[ring].last_seq, 0);
860 rdev->fence_drv[ring].last_activity = jiffies;
861 rdev->fence_drv[ring].initialized = false;
865 * radeon_fence_driver_init - init the fence driver
866 * for all possible rings.
868 * @rdev: radeon device pointer
870 * Init the fence driver for all possible rings (all asics).
871 * Not all asics have all rings, so each asic will only
872 * start the fence driver on the rings it has using
873 * radeon_fence_driver_start_ring().
874 * Returns 0 for success.
876 int radeon_fence_driver_init(struct radeon_device *rdev)
880 lockinit(&rdev->fence_queue_mtx,
881 "drm__radeon_device__fence_queue_mtx", 0, LK_CANRECURSE);
882 cv_init(&rdev->fence_queue, "drm__radeon_device__fence_queue");
883 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
884 radeon_fence_driver_init_ring(rdev, ring);
886 if (radeon_debugfs_fence_init(rdev)) {
887 dev_err(rdev->dev, "fence debugfs file creation failed\n");
893 * radeon_fence_driver_fini - tear down the fence driver
894 * for all possible rings.
896 * @rdev: radeon device pointer
898 * Tear down the fence driver for all possible rings (all asics).
900 void radeon_fence_driver_fini(struct radeon_device *rdev)
904 lockmgr(&rdev->ring_lock, LK_EXCLUSIVE);
905 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
906 if (!rdev->fence_drv[ring].initialized)
908 r = radeon_fence_wait_empty_locked(rdev, ring);
910 /* no need to trigger GPU reset as we are unloading */
911 radeon_fence_driver_force_completion(rdev);
913 cv_broadcast(&rdev->fence_queue);
914 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
915 rdev->fence_drv[ring].initialized = false;
916 cv_destroy(&rdev->fence_queue);
918 lockmgr(&rdev->ring_lock, LK_RELEASE);
922 * radeon_fence_driver_force_completion - force all fence waiter to complete
924 * @rdev: radeon device pointer
926 * In case of GPU reset failure make sure no process keep waiting on fence
927 * that will never complete.
929 void radeon_fence_driver_force_completion(struct radeon_device *rdev)
933 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
934 if (!rdev->fence_drv[ring].initialized)
936 radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring);
944 #if defined(CONFIG_DEBUG_FS)
945 static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
947 struct drm_info_node *node = (struct drm_info_node *)m->private;
948 struct drm_device *dev = node->minor->dev;
949 struct radeon_device *rdev = dev->dev_private;
952 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
953 if (!rdev->fence_drv[i].initialized)
956 seq_printf(m, "--- ring %d ---\n", i);
957 seq_printf(m, "Last signaled fence 0x%016llx\n",
958 (unsigned long long)atomic_load_acq_64(&rdev->fence_drv[i].last_seq));
959 seq_printf(m, "Last emitted 0x%016llx\n",
960 rdev->fence_drv[i].sync_seq[i]);
962 for (j = 0; j < RADEON_NUM_RINGS; ++j) {
963 if (i != j && rdev->fence_drv[j].initialized)
964 seq_printf(m, "Last sync to ring %d 0x%016llx\n",
965 j, rdev->fence_drv[i].sync_seq[j]);
971 static struct drm_info_list radeon_debugfs_fence_list[] = {
972 {"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL},
976 int radeon_debugfs_fence_init(struct radeon_device *rdev)
978 #if defined(CONFIG_DEBUG_FS)
979 return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 1);