2 * Copyright 2009 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Jerome Glisse <glisse@freedesktop.org>
31 #include <linux/seq_file.h>
32 #include <linux/atomic.h>
33 #include <linux/wait.h>
34 #include <linux/kref.h>
35 #include <linux/firmware.h>
37 #include "radeon_reg.h"
40 #include "radeon_trace.h"
45 * Fences mark an event in the GPUs pipeline and are used
46 * for GPU/CPU synchronization. When the fence is written,
47 * it is expected that all buffers associated with that fence
48 * are no longer in use by the associated ring on the GPU and
49 * that the the relevant GPU caches have been flushed. Whether
50 * we use a scratch register or memory location depends on the asic
51 * and whether writeback is enabled.
55 * radeon_fence_write - write a fence value
57 * @rdev: radeon_device pointer
58 * @seq: sequence number to write
59 * @ring: ring index the fence is associated with
61 * Writes a fence value to memory or a scratch register (all asics).
63 static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
65 struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
66 if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
68 *drv->cpu_addr = cpu_to_le32(seq);
71 WREG32(drv->scratch_reg, seq);
76 * radeon_fence_read - read a fence value
78 * @rdev: radeon_device pointer
79 * @ring: ring index the fence is associated with
81 * Reads a fence value from memory or a scratch register (all asics).
82 * Returns the value of the fence read from memory or register.
84 static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
86 struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
89 if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
91 seq = le32_to_cpu(*drv->cpu_addr);
93 seq = lower_32_bits(atomic64_read(&drv->last_seq));
96 seq = RREG32(drv->scratch_reg);
102 * radeon_fence_schedule_check - schedule lockup check
104 * @rdev: radeon_device pointer
105 * @ring: ring index we should work with
107 * Queues a delayed work item to check for lockups.
109 static void radeon_fence_schedule_check(struct radeon_device *rdev, int ring)
112 * Do not reset the timer here with mod_delayed_work,
113 * this can livelock in an interaction with TTM delayed destroy.
115 queue_delayed_work(system_power_efficient_wq,
116 &rdev->fence_drv[ring].lockup_work,
117 RADEON_FENCE_JIFFIES_TIMEOUT);
121 * radeon_fence_emit - emit a fence on the requested ring
123 * @rdev: radeon_device pointer
124 * @fence: radeon fence object
125 * @ring: ring index the fence is associated with
127 * Emits a fence command on the requested ring (all asics).
128 * Returns 0 on success, -ENOMEM on failure.
130 int radeon_fence_emit(struct radeon_device *rdev,
131 struct radeon_fence **fence,
134 u64 seq = ++rdev->fence_drv[ring].sync_seq[ring];
136 /* we are protected by the ring emission mutex */
137 *fence = kmalloc(sizeof(struct radeon_fence), M_DRM, M_WAITOK);
138 if ((*fence) == NULL) {
141 (*fence)->rdev = rdev;
143 (*fence)->ring = ring;
144 fence_init(&(*fence)->base, &radeon_fence_ops,
145 &rdev->fence_queue.lock, rdev->fence_context + ring, seq);
146 radeon_fence_ring_emit(rdev, ring, *fence);
148 trace_radeon_fence_emit(rdev->ddev, ring, (*fence)->seq);
150 radeon_fence_schedule_check(rdev, ring);
155 * radeon_fence_check_signaled - callback from fence_queue
157 * this function is called with fence_queue lock held, which is also used
158 * for the fence locking itself, so unlocked variants are used for
159 * fence_signal, and remove_wait_queue.
161 static int radeon_fence_check_signaled(wait_queue_t *wait, unsigned mode, int flags, void *key)
163 struct radeon_fence *fence;
166 fence = container_of(wait, struct radeon_fence, fence_wake);
169 * We cannot use radeon_fence_process here because we're already
170 * in the waitqueue, in a call from wake_up_all.
172 seq = atomic64_read(&fence->rdev->fence_drv[fence->ring].last_seq);
173 if (seq >= fence->seq) {
174 int ret = fence_signal_locked(&fence->base);
177 FENCE_TRACE(&fence->base, "signaled from irq context\n");
179 FENCE_TRACE(&fence->base, "was already signaled\n");
181 radeon_irq_kms_sw_irq_put(fence->rdev, fence->ring);
182 __remove_wait_queue(&fence->rdev->fence_queue, &fence->fence_wake);
183 fence_put(&fence->base);
185 FENCE_TRACE(&fence->base, "pending\n");
190 * radeon_fence_activity - check for fence activity
192 * @rdev: radeon_device pointer
193 * @ring: ring index the fence is associated with
195 * Checks the current fence value and calculates the last
196 * signalled fence value. Returns true if activity occured
197 * on the ring, and the fence_queue should be waken up.
199 static bool radeon_fence_activity(struct radeon_device *rdev, int ring)
201 uint64_t seq, last_seq, last_emitted;
202 unsigned count_loop = 0;
205 /* Note there is a scenario here for an infinite loop but it's
206 * very unlikely to happen. For it to happen, the current polling
207 * process need to be interrupted by another process and another
208 * process needs to update the last_seq btw the atomic read and
209 * xchg of the current process.
211 * More over for this to go in infinite loop there need to be
212 * continuously new fence signaled ie radeon_fence_read needs
213 * to return a different value each time for both the currently
214 * polling process and the other process that xchg the last_seq
215 * btw atomic read and xchg of the current process. And the
216 * value the other process set as last seq must be higher than
217 * the seq value we just read. Which means that current process
218 * need to be interrupted after radeon_fence_read and before
221 * To be even more safe we count the number of time we loop and
222 * we bail after 10 loop just accepting the fact that we might
223 * have temporarly set the last_seq not to the true real last
224 * seq but to an older one.
226 last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
228 last_emitted = rdev->fence_drv[ring].sync_seq[ring];
229 seq = radeon_fence_read(rdev, ring);
230 seq |= last_seq & 0xffffffff00000000LL;
231 if (seq < last_seq) {
233 seq |= last_emitted & 0xffffffff00000000LL;
236 if (seq <= last_seq || seq > last_emitted) {
239 /* If we loop over we don't want to return without
240 * checking if a fence is signaled as it means that the
241 * seq we just read is different from the previous on.
245 if ((count_loop++) > 10) {
246 /* We looped over too many time leave with the
247 * fact that we might have set an older fence
248 * seq then the current real last seq as signaled
253 } while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq);
255 if (seq < last_emitted)
256 radeon_fence_schedule_check(rdev, ring);
262 * radeon_fence_check_lockup - check for hardware lockup
264 * @work: delayed work item
266 * Checks for fence activity and if there is none probe
267 * the hardware if a lockup occured.
269 static void radeon_fence_check_lockup(struct work_struct *work)
271 struct radeon_fence_driver *fence_drv;
272 struct radeon_device *rdev;
275 fence_drv = container_of(work, struct radeon_fence_driver,
277 rdev = fence_drv->rdev;
278 ring = fence_drv - &rdev->fence_drv[0];
280 if (!down_read_trylock(&rdev->exclusive_lock)) {
281 /* just reschedule the check if a reset is going on */
282 radeon_fence_schedule_check(rdev, ring);
286 if (fence_drv->delayed_irq && rdev->ddev->irq_enabled) {
287 unsigned long irqflags;
289 fence_drv->delayed_irq = false;
290 spin_lock_irqsave(&rdev->irq.lock, irqflags);
291 radeon_irq_set(rdev);
292 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
295 if (radeon_fence_activity(rdev, ring))
296 wake_up_all(&rdev->fence_queue);
298 else if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
300 /* good news we believe it's a lockup */
301 dev_warn(rdev->dev, "GPU lockup (current fence id "
302 "0x%016lx last fence id 0x%016lx on ring %d)\n",
303 (uint64_t)atomic64_read(&fence_drv->last_seq),
304 fence_drv->sync_seq[ring], ring);
306 /* remember that we need an reset */
307 rdev->needs_reset = true;
308 wake_up_all(&rdev->fence_queue);
310 up_read(&rdev->exclusive_lock);
314 * radeon_fence_process - process a fence
316 * @rdev: radeon_device pointer
317 * @ring: ring index the fence is associated with
319 * Checks the current fence value and wakes the fence queue
320 * if the sequence number has increased (all asics).
322 void radeon_fence_process(struct radeon_device *rdev, int ring)
324 if (radeon_fence_activity(rdev, ring))
325 wake_up_all(&rdev->fence_queue);
329 * radeon_fence_seq_signaled - check if a fence sequence number has signaled
331 * @rdev: radeon device pointer
332 * @seq: sequence number
333 * @ring: ring index the fence is associated with
335 * Check if the last signaled fence sequnce number is >= the requested
336 * sequence number (all asics).
337 * Returns true if the fence has signaled (current fence value
338 * is >= requested value) or false if it has not (current fence
339 * value is < the requested value. Helper function for
340 * radeon_fence_signaled().
342 static bool radeon_fence_seq_signaled(struct radeon_device *rdev,
343 u64 seq, unsigned ring)
345 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
348 /* poll new last sequence at least once */
349 radeon_fence_process(rdev, ring);
350 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
356 static bool radeon_fence_is_signaled(struct fence *f)
358 struct radeon_fence *fence = to_radeon_fence(f);
359 struct radeon_device *rdev = fence->rdev;
360 unsigned ring = fence->ring;
361 u64 seq = fence->seq;
363 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
367 if (down_read_trylock(&rdev->exclusive_lock)) {
368 radeon_fence_process(rdev, ring);
369 up_read(&rdev->exclusive_lock);
371 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
379 * radeon_fence_enable_signaling - enable signalling on fence
382 * This function is called with fence_queue lock held, and adds a callback
383 * to fence_queue that checks if this fence is signaled, and if so it
384 * signals the fence and removes itself.
386 static bool radeon_fence_enable_signaling(struct fence *f)
388 struct radeon_fence *fence = to_radeon_fence(f);
389 struct radeon_device *rdev = fence->rdev;
391 if (atomic64_read(&rdev->fence_drv[fence->ring].last_seq) >= fence->seq)
394 if (down_read_trylock(&rdev->exclusive_lock)) {
395 radeon_irq_kms_sw_irq_get(rdev, fence->ring);
397 if (radeon_fence_activity(rdev, fence->ring))
398 wake_up_all_locked(&rdev->fence_queue);
400 /* did fence get signaled after we enabled the sw irq? */
401 if (atomic64_read(&rdev->fence_drv[fence->ring].last_seq) >= fence->seq) {
402 radeon_irq_kms_sw_irq_put(rdev, fence->ring);
403 up_read(&rdev->exclusive_lock);
407 up_read(&rdev->exclusive_lock);
409 /* we're probably in a lockup, lets not fiddle too much */
410 if (radeon_irq_kms_sw_irq_get_delayed(rdev, fence->ring))
411 rdev->fence_drv[fence->ring].delayed_irq = true;
412 radeon_fence_schedule_check(rdev, fence->ring);
415 fence->fence_wake.flags = 0;
416 fence->fence_wake.private = NULL;
417 fence->fence_wake.func = radeon_fence_check_signaled;
418 __add_wait_queue(&rdev->fence_queue, &fence->fence_wake);
421 FENCE_TRACE(&fence->base, "armed on ring %i!\n", fence->ring);
426 * radeon_fence_signaled - check if a fence has signaled
428 * @fence: radeon fence object
430 * Check if the requested fence has signaled (all asics).
431 * Returns true if the fence has signaled or false if it has not.
433 bool radeon_fence_signaled(struct radeon_fence *fence)
438 if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) {
441 ret = fence_signal(&fence->base);
443 FENCE_TRACE(&fence->base, "signaled from radeon_fence_signaled\n");
450 * radeon_fence_any_seq_signaled - check if any sequence number is signaled
452 * @rdev: radeon device pointer
453 * @seq: sequence numbers
455 * Check if the last signaled fence sequnce number is >= the requested
456 * sequence number (all asics).
457 * Returns true if any has signaled (current value is >= requested value)
458 * or false if it has not. Helper function for radeon_fence_wait_seq.
460 static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
464 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
465 if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i))
472 * radeon_fence_wait_seq_timeout - wait for a specific sequence numbers
474 * @rdev: radeon device pointer
475 * @target_seq: sequence number(s) we want to wait for
476 * @intr: use interruptable sleep
477 * @timeout: maximum time to wait, or MAX_SCHEDULE_TIMEOUT for infinite wait
479 * Wait for the requested sequence number(s) to be written by any ring
480 * (all asics). Sequnce number array is indexed by ring id.
481 * @intr selects whether to use interruptable (true) or non-interruptable
482 * (false) sleep when waiting for the sequence number. Helper function
483 * for radeon_fence_wait_*().
484 * Returns remaining time if the sequence number has passed, 0 when
485 * the wait timeout, or an error for all other cases.
486 * -EDEADLK is returned when a GPU lockup has been detected.
488 static long radeon_fence_wait_seq_timeout(struct radeon_device *rdev,
489 u64 *target_seq, bool intr,
495 if (radeon_fence_any_seq_signaled(rdev, target_seq))
498 /* enable IRQs and tracing */
499 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
504 trace_radeon_fence_wait_begin(rdev->ddev, i, target_seq[i]);
506 radeon_irq_kms_sw_irq_get(rdev, i);
510 r = wait_event_interruptible_timeout(rdev->fence_queue, (
511 radeon_fence_any_seq_signaled(rdev, target_seq)
512 || rdev->needs_reset), timeout);
514 r = wait_event_timeout(rdev->fence_queue, (
515 radeon_fence_any_seq_signaled(rdev, target_seq)
516 || rdev->needs_reset), timeout);
519 if (rdev->needs_reset)
522 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
526 radeon_irq_kms_sw_irq_put(rdev, i);
528 trace_radeon_fence_wait_end(rdev->ddev, i, target_seq[i]);
536 * radeon_fence_wait - wait for a fence to signal
538 * @fence: radeon fence object
539 * @intr: use interruptible sleep
541 * Wait for the requested fence to signal (all asics).
542 * @intr selects whether to use interruptable (true) or non-interruptable
543 * (false) sleep when waiting for the fence.
544 * Returns 0 if the fence has passed, error for all other cases.
546 int radeon_fence_wait(struct radeon_fence *fence, bool intr)
548 u64 seq[RADEON_NUM_RINGS] = {};
551 seq[fence->ring] = fence->seq;
552 r = radeon_fence_wait_seq_timeout(fence->rdev, seq, intr, MAX_SCHEDULE_TIMEOUT);
557 r = fence_signal(&fence->base);
559 FENCE_TRACE(&fence->base, "signaled from fence_wait\n");
564 * radeon_fence_wait_any - wait for a fence to signal on any ring
566 * @rdev: radeon device pointer
567 * @fences: radeon fence object(s)
568 * @intr: use interruptable sleep
570 * Wait for any requested fence to signal (all asics). Fence
571 * array is indexed by ring id. @intr selects whether to use
572 * interruptable (true) or non-interruptable (false) sleep when
573 * waiting for the fences. Used by the suballocator.
574 * Returns 0 if any fence has passed, error for all other cases.
576 int radeon_fence_wait_any(struct radeon_device *rdev,
577 struct radeon_fence **fences,
580 u64 seq[RADEON_NUM_RINGS];
581 unsigned i, num_rings = 0;
584 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
591 seq[i] = fences[i]->seq;
595 /* nothing to wait for ? */
599 r = radeon_fence_wait_seq_timeout(rdev, seq, intr, MAX_SCHEDULE_TIMEOUT);
607 * radeon_fence_wait_next - wait for the next fence to signal
609 * @rdev: radeon device pointer
610 * @ring: ring index the fence is associated with
612 * Wait for the next fence on the requested ring to signal (all asics).
613 * Returns 0 if the next fence has passed, error for all other cases.
614 * Caller must hold ring lock.
616 int radeon_fence_wait_next(struct radeon_device *rdev, int ring)
618 u64 seq[RADEON_NUM_RINGS] = {};
621 seq[ring] = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
622 if (seq[ring] >= rdev->fence_drv[ring].sync_seq[ring]) {
623 /* nothing to wait for, last_seq is
624 already the last emited fence */
627 r = radeon_fence_wait_seq_timeout(rdev, seq, false, MAX_SCHEDULE_TIMEOUT);
634 * radeon_fence_wait_empty - wait for all fences to signal
636 * @rdev: radeon device pointer
637 * @ring: ring index the fence is associated with
639 * Wait for all fences on the requested ring to signal (all asics).
640 * Returns 0 if the fences have passed, error for all other cases.
641 * Caller must hold ring lock.
643 int radeon_fence_wait_empty(struct radeon_device *rdev, int ring)
645 u64 seq[RADEON_NUM_RINGS] = {};
648 seq[ring] = rdev->fence_drv[ring].sync_seq[ring];
652 r = radeon_fence_wait_seq_timeout(rdev, seq, false, MAX_SCHEDULE_TIMEOUT);
657 dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%ld)\n",
664 * radeon_fence_ref - take a ref on a fence
666 * @fence: radeon fence object
668 * Take a reference on a fence (all asics).
671 struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
673 fence_get(&fence->base);
678 * radeon_fence_unref - remove a ref on a fence
680 * @fence: radeon fence object
682 * Remove a reference on a fence (all asics).
684 void radeon_fence_unref(struct radeon_fence **fence)
686 struct radeon_fence *tmp = *fence;
690 fence_put(&tmp->base);
695 * radeon_fence_count_emitted - get the count of emitted fences
697 * @rdev: radeon device pointer
698 * @ring: ring index the fence is associated with
700 * Get the number of fences emitted on the requested ring (all asics).
701 * Returns the number of emitted fences on the ring. Used by the
702 * dynpm code to ring track activity.
704 unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
708 /* We are not protected by ring lock when reading the last sequence
709 * but it's ok to report slightly wrong fence count here.
711 radeon_fence_process(rdev, ring);
712 emitted = rdev->fence_drv[ring].sync_seq[ring]
713 - atomic64_read(&rdev->fence_drv[ring].last_seq);
714 /* to avoid 32bits warp around */
715 if (emitted > 0x10000000) {
716 emitted = 0x10000000;
718 return (unsigned)emitted;
722 * radeon_fence_need_sync - do we need a semaphore
724 * @fence: radeon fence object
725 * @dst_ring: which ring to check against
727 * Check if the fence needs to be synced against another ring
728 * (all asics). If so, we need to emit a semaphore.
729 * Returns true if we need to sync with another ring, false if
732 bool radeon_fence_need_sync(struct radeon_fence *fence, int dst_ring)
734 struct radeon_fence_driver *fdrv;
740 if (fence->ring == dst_ring) {
744 /* we are protected by the ring mutex */
745 fdrv = &fence->rdev->fence_drv[dst_ring];
746 if (fence->seq <= fdrv->sync_seq[fence->ring]) {
754 * radeon_fence_note_sync - record the sync point
756 * @fence: radeon fence object
757 * @dst_ring: which ring to check against
759 * Note the sequence number at which point the fence will
760 * be synced with the requested ring (all asics).
762 void radeon_fence_note_sync(struct radeon_fence *fence, int dst_ring)
764 struct radeon_fence_driver *dst, *src;
771 if (fence->ring == dst_ring) {
775 /* we are protected by the ring mutex */
776 src = &fence->rdev->fence_drv[fence->ring];
777 dst = &fence->rdev->fence_drv[dst_ring];
778 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
782 dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]);
787 * radeon_fence_driver_start_ring - make the fence driver
788 * ready for use on the requested ring.
790 * @rdev: radeon device pointer
791 * @ring: ring index to start the fence driver on
793 * Make the fence driver ready for processing (all asics).
794 * Not all asics have all rings, so each asic will only
795 * start the fence driver on the rings it has.
796 * Returns 0 for success, errors for failure.
798 int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
803 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
804 if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) {
805 rdev->fence_drv[ring].scratch_reg = 0;
806 if (ring != R600_RING_TYPE_UVD_INDEX) {
807 index = R600_WB_EVENT_OFFSET + ring * 4;
808 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
809 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr +
813 /* put fence directly behind firmware */
814 index = ALIGN(rdev->uvd_fw->datasize, 8);
815 rdev->fence_drv[ring].cpu_addr = (void*)((uint8_t*)rdev->uvd.cpu_addr + index);
816 rdev->fence_drv[ring].gpu_addr = rdev->uvd.gpu_addr + index;
820 r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
822 dev_err(rdev->dev, "fence failed to get scratch register\n");
825 index = RADEON_WB_SCRATCH_OFFSET +
826 rdev->fence_drv[ring].scratch_reg -
827 rdev->scratch.reg_base;
828 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
829 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
831 radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring);
832 rdev->fence_drv[ring].initialized = true;
833 dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016lx and cpu addr 0x%p\n",
834 ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
839 * radeon_fence_driver_init_ring - init the fence driver
840 * for the requested ring.
842 * @rdev: radeon device pointer
843 * @ring: ring index to start the fence driver on
845 * Init the fence driver for the requested ring (all asics).
846 * Helper function for radeon_fence_driver_init().
848 static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
852 rdev->fence_drv[ring].scratch_reg = -1;
853 rdev->fence_drv[ring].cpu_addr = NULL;
854 rdev->fence_drv[ring].gpu_addr = 0;
855 for (i = 0; i < RADEON_NUM_RINGS; ++i)
856 rdev->fence_drv[ring].sync_seq[i] = 0;
857 atomic64_set(&rdev->fence_drv[ring].last_seq, 0);
858 rdev->fence_drv[ring].initialized = false;
859 INIT_DELAYED_WORK(&rdev->fence_drv[ring].lockup_work,
860 radeon_fence_check_lockup);
861 rdev->fence_drv[ring].rdev = rdev;
865 * radeon_fence_driver_init - init the fence driver
866 * for all possible rings.
868 * @rdev: radeon device pointer
870 * Init the fence driver for all possible rings (all asics).
871 * Not all asics have all rings, so each asic will only
872 * start the fence driver on the rings it has using
873 * radeon_fence_driver_start_ring().
874 * Returns 0 for success.
876 int radeon_fence_driver_init(struct radeon_device *rdev)
880 init_waitqueue_head(&rdev->fence_queue);
881 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
882 radeon_fence_driver_init_ring(rdev, ring);
884 if (radeon_debugfs_fence_init(rdev)) {
885 dev_err(rdev->dev, "fence debugfs file creation failed\n");
891 * radeon_fence_driver_fini - tear down the fence driver
892 * for all possible rings.
894 * @rdev: radeon device pointer
896 * Tear down the fence driver for all possible rings (all asics).
898 void radeon_fence_driver_fini(struct radeon_device *rdev)
902 mutex_lock(&rdev->ring_lock);
903 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
904 if (!rdev->fence_drv[ring].initialized)
906 r = radeon_fence_wait_empty(rdev, ring);
908 /* no need to trigger GPU reset as we are unloading */
909 radeon_fence_driver_force_completion(rdev, ring);
911 cancel_delayed_work_sync(&rdev->fence_drv[ring].lockup_work);
912 wake_up_all(&rdev->fence_queue);
913 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
914 rdev->fence_drv[ring].initialized = false;
916 mutex_unlock(&rdev->ring_lock);
920 * radeon_fence_driver_force_completion - force all fence waiter to complete
922 * @rdev: radeon device pointer
923 * @ring: the ring to complete
925 * In case of GPU reset failure make sure no process keep waiting on fence
926 * that will never complete.
928 void radeon_fence_driver_force_completion(struct radeon_device *rdev, int ring)
930 if (rdev->fence_drv[ring].initialized) {
931 radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring);
932 cancel_delayed_work_sync(&rdev->fence_drv[ring].lockup_work);
940 #if defined(CONFIG_DEBUG_FS)
941 static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
943 struct drm_info_node *node = (struct drm_info_node *)m->private;
944 struct drm_device *dev = node->minor->dev;
945 struct radeon_device *rdev = dev->dev_private;
948 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
949 if (!rdev->fence_drv[i].initialized)
952 radeon_fence_process(rdev, i);
954 seq_printf(m, "--- ring %d ---\n", i);
955 seq_printf(m, "Last signaled fence 0x%016llx\n",
956 (unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq));
957 seq_printf(m, "Last emitted 0x%016llx\n",
958 rdev->fence_drv[i].sync_seq[i]);
960 for (j = 0; j < RADEON_NUM_RINGS; ++j) {
961 if (i != j && rdev->fence_drv[j].initialized)
962 seq_printf(m, "Last sync to ring %d 0x%016llx\n",
963 j, rdev->fence_drv[i].sync_seq[j]);
970 * radeon_debugfs_gpu_reset - manually trigger a gpu reset
972 * Manually trigger a gpu reset at the next fence wait.
974 static int radeon_debugfs_gpu_reset(struct seq_file *m, void *data)
976 struct drm_info_node *node = (struct drm_info_node *) m->private;
977 struct drm_device *dev = node->minor->dev;
978 struct radeon_device *rdev = dev->dev_private;
980 down_read(&rdev->exclusive_lock);
981 seq_printf(m, "%d\n", rdev->needs_reset);
982 rdev->needs_reset = true;
983 wake_up_all(&rdev->fence_queue);
984 up_read(&rdev->exclusive_lock);
989 static struct drm_info_list radeon_debugfs_fence_list[] = {
990 {"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL},
991 {"radeon_gpu_reset", &radeon_debugfs_gpu_reset, 0, NULL}
995 int radeon_debugfs_fence_init(struct radeon_device *rdev)
997 #if defined(CONFIG_DEBUG_FS)
998 return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 2);
1004 static const char *radeon_fence_get_driver_name(struct fence *fence)
1009 static const char *radeon_fence_get_timeline_name(struct fence *f)
1011 struct radeon_fence *fence = to_radeon_fence(f);
1012 switch (fence->ring) {
1013 case RADEON_RING_TYPE_GFX_INDEX: return "radeon.gfx";
1014 case CAYMAN_RING_TYPE_CP1_INDEX: return "radeon.cp1";
1015 case CAYMAN_RING_TYPE_CP2_INDEX: return "radeon.cp2";
1016 case R600_RING_TYPE_DMA_INDEX: return "radeon.dma";
1017 case CAYMAN_RING_TYPE_DMA1_INDEX: return "radeon.dma1";
1018 case R600_RING_TYPE_UVD_INDEX: return "radeon.uvd";
1019 case TN_RING_TYPE_VCE1_INDEX: return "radeon.vce1";
1020 case TN_RING_TYPE_VCE2_INDEX: return "radeon.vce2";
1021 default: WARN_ON_ONCE(1); return "radeon.unk";
1025 static inline bool radeon_test_signaled(struct radeon_fence *fence)
1027 /* XXX: This flag is probably not set as it should */
1028 return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags);
1031 static signed long radeon_fence_default_wait(struct fence *f, bool intr,
1034 struct radeon_fence *fence = to_radeon_fence(f);
1035 struct radeon_device *rdev = fence->rdev;
1038 fence_enable_sw_signaling(&fence->base);
1041 * This function has to return -EDEADLK, but cannot hold
1042 * exclusive_lock during the wait because some callers
1043 * may already hold it. This means checking needs_reset without
1044 * lock, and not fiddling with any gpu internals.
1046 * The callback installed with fence_enable_sw_signaling will
1047 * run before our wait_event_*timeout call, so we will see
1048 * both the signaled fence and the changes to needs_reset.
1052 t = wait_event_interruptible_timeout(rdev->fence_queue,
1053 /* XXX: there is something very wrong here */
1054 #ifdef __DragonFly__
1055 ((signaled = radeon_test_signaled(fence)) || 1 ||
1057 ((signaled = radeon_test_signaled(fence)) ||
1059 rdev->needs_reset), t);
1061 t = wait_event_timeout(rdev->fence_queue,
1062 #ifdef __DragonFly__
1063 ((signaled = radeon_test_signaled(fence)) || 1 ||
1065 ((signaled = radeon_test_signaled(fence)) ||
1067 rdev->needs_reset), t);
1069 if (t > 0 && !signaled)
1074 const struct fence_ops radeon_fence_ops = {
1075 .get_driver_name = radeon_fence_get_driver_name,
1076 .get_timeline_name = radeon_fence_get_timeline_name,
1077 .enable_signaling = radeon_fence_enable_signaling,
1078 .signaled = radeon_fence_is_signaled,
1079 .wait = radeon_fence_default_wait,