2 * Copyright 2009 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Jerome Glisse <glisse@freedesktop.org>
31 * $FreeBSD: head/sys/dev/drm2/radeon/radeon_fence.c 254885 2013-08-25 19:37:15Z dumbbell $
34 #include "radeon_reg.h"
37 #include "radeon_trace.h"
38 #endif /* DUMBBELL_WIP */
42 * Fences mark an event in the GPUs pipeline and are used
43 * for GPU/CPU synchronization. When the fence is written,
44 * it is expected that all buffers associated with that fence
45 * are no longer in use by the associated ring on the GPU and
46 * that the the relevant GPU caches have been flushed. Whether
47 * we use a scratch register or memory location depends on the asic
48 * and whether writeback is enabled.
52 * radeon_fence_write - write a fence value
54 * @rdev: radeon_device pointer
55 * @seq: sequence number to write
56 * @ring: ring index the fence is associated with
58 * Writes a fence value to memory or a scratch register (all asics).
60 static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
62 struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
63 if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
65 *drv->cpu_addr = cpu_to_le32(seq);
68 WREG32(drv->scratch_reg, seq);
73 * radeon_fence_read - read a fence value
75 * @rdev: radeon_device pointer
76 * @ring: ring index the fence is associated with
78 * Reads a fence value from memory or a scratch register (all asics).
79 * Returns the value of the fence read from memory or register.
81 static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
83 struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
86 if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
88 seq = le32_to_cpu(*drv->cpu_addr);
90 seq = lower_32_bits(atomic64_read(&drv->last_seq));
93 seq = RREG32(drv->scratch_reg);
99 * radeon_fence_schedule_check - schedule lockup check
101 * @rdev: radeon_device pointer
102 * @ring: ring index we should work with
104 * Queues a delayed work item to check for lockups.
106 static void radeon_fence_schedule_check(struct radeon_device *rdev, int ring)
109 * Do not reset the timer here with mod_delayed_work,
110 * this can livelock in an interaction with TTM delayed destroy.
112 queue_delayed_work(system_power_efficient_wq,
113 &rdev->fence_drv[ring].lockup_work,
114 RADEON_FENCE_JIFFIES_TIMEOUT);
118 * radeon_fence_emit - emit a fence on the requested ring
120 * @rdev: radeon_device pointer
121 * @fence: radeon fence object
122 * @ring: ring index the fence is associated with
124 * Emits a fence command on the requested ring (all asics).
125 * Returns 0 on success, -ENOMEM on failure.
127 int radeon_fence_emit(struct radeon_device *rdev,
128 struct radeon_fence **fence,
131 /* we are protected by the ring emission mutex */
132 *fence = kmalloc(sizeof(struct radeon_fence), M_DRM,
134 if ((*fence) == NULL) {
137 refcount_init(&((*fence)->kref), 1);
138 (*fence)->rdev = rdev;
139 (*fence)->seq = ++rdev->fence_drv[ring].sync_seq[ring];
140 (*fence)->ring = ring;
141 radeon_fence_ring_emit(rdev, ring, *fence);
143 trace_radeon_fence_emit(rdev->ddev, ring, (*fence)->seq);
145 radeon_fence_schedule_check(rdev, ring);
150 * radeon_fence_activity - check for fence activity
152 * @rdev: radeon_device pointer
153 * @ring: ring index the fence is associated with
155 * Checks the current fence value and calculates the last
156 * signalled fence value. Returns true if activity occured
157 * on the ring, and the fence_queue should be waken up.
159 static bool radeon_fence_activity(struct radeon_device *rdev, int ring)
161 uint64_t seq, last_seq, last_emitted;
162 unsigned count_loop = 0;
165 /* Note there is a scenario here for an infinite loop but it's
166 * very unlikely to happen. For it to happen, the current polling
167 * process need to be interrupted by another process and another
168 * process needs to update the last_seq btw the atomic read and
169 * xchg of the current process.
171 * More over for this to go in infinite loop there need to be
172 * continuously new fence signaled ie radeon_fence_read needs
173 * to return a different value each time for both the currently
174 * polling process and the other process that xchg the last_seq
175 * btw atomic read and xchg of the current process. And the
176 * value the other process set as last seq must be higher than
177 * the seq value we just read. Which means that current process
178 * need to be interrupted after radeon_fence_read and before
181 * To be even more safe we count the number of time we loop and
182 * we bail after 10 loop just accepting the fact that we might
183 * have temporarly set the last_seq not to the true real last
184 * seq but to an older one.
186 last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
188 last_emitted = rdev->fence_drv[ring].sync_seq[ring];
189 seq = radeon_fence_read(rdev, ring);
190 seq |= last_seq & 0xffffffff00000000LL;
191 if (seq < last_seq) {
193 seq |= last_emitted & 0xffffffff00000000LL;
196 if (seq <= last_seq || seq > last_emitted) {
199 /* If we loop over we don't want to return without
200 * checking if a fence is signaled as it means that the
201 * seq we just read is different from the previous on.
205 if ((count_loop++) > 10) {
206 /* We looped over too many time leave with the
207 * fact that we might have set an older fence
208 * seq then the current real last seq as signaled
213 } while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq);
215 if (seq < last_emitted)
216 radeon_fence_schedule_check(rdev, ring);
222 * radeon_fence_check_lockup - check for hardware lockup
224 * @work: delayed work item
226 * Checks for fence activity and if there is none probe
227 * the hardware if a lockup occured.
229 static void radeon_fence_check_lockup(struct work_struct *work)
231 struct radeon_fence_driver *fence_drv;
232 struct radeon_device *rdev;
235 fence_drv = container_of(work, struct radeon_fence_driver,
237 rdev = fence_drv->rdev;
238 ring = fence_drv - &rdev->fence_drv[0];
240 if (lockmgr(&rdev->exclusive_lock, LK_EXCLUSIVE|LK_NOWAIT)) {
241 /* just reschedule the check if a reset is going on */
242 radeon_fence_schedule_check(rdev, ring);
246 if (radeon_fence_activity(rdev, ring))
247 wake_up_all(&rdev->fence_queue);
249 else if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
251 /* good news we believe it's a lockup */
252 dev_warn(rdev->dev, "GPU lockup (current fence id "
253 "0x%016lx last fence id 0x%016lx on ring %d)\n",
254 (uint64_t)atomic64_read(&fence_drv->last_seq),
255 fence_drv->sync_seq[ring], ring);
257 /* remember that we need an reset */
258 rdev->needs_reset = true;
259 wake_up_all(&rdev->fence_queue);
261 lockmgr(&rdev->exclusive_lock, LK_RELEASE);
265 * radeon_fence_process - process a fence
267 * @rdev: radeon_device pointer
268 * @ring: ring index the fence is associated with
270 * Checks the current fence value and wakes the fence queue
271 * if the sequence number has increased (all asics).
273 void radeon_fence_process(struct radeon_device *rdev, int ring)
275 if (radeon_fence_activity(rdev, ring))
276 wake_up_all(&rdev->fence_queue);
280 * radeon_fence_destroy - destroy a fence
284 * Frees the fence object (all asics).
286 static void radeon_fence_destroy(struct radeon_fence *fence)
293 * radeon_fence_seq_signaled - check if a fence sequence number has signaled
295 * @rdev: radeon device pointer
296 * @seq: sequence number
297 * @ring: ring index the fence is associated with
299 * Check if the last signaled fence sequnce number is >= the requested
300 * sequence number (all asics).
301 * Returns true if the fence has signaled (current fence value
302 * is >= requested value) or false if it has not (current fence
303 * value is < the requested value. Helper function for
304 * radeon_fence_signaled().
306 static bool radeon_fence_seq_signaled(struct radeon_device *rdev,
307 u64 seq, unsigned ring)
309 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
312 /* poll new last sequence at least once */
313 radeon_fence_process(rdev, ring);
314 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
321 * radeon_fence_signaled - check if a fence has signaled
323 * @fence: radeon fence object
325 * Check if the requested fence has signaled (all asics).
326 * Returns true if the fence has signaled or false if it has not.
328 bool radeon_fence_signaled(struct radeon_fence *fence)
332 if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring))
338 * radeon_fence_any_seq_signaled - check if any sequence number is signaled
340 * @rdev: radeon device pointer
341 * @seq: sequence numbers
343 * Check if the last signaled fence sequnce number is >= the requested
344 * sequence number (all asics).
345 * Returns true if any has signaled (current value is >= requested value)
346 * or false if it has not. Helper function for radeon_fence_wait_seq.
348 static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
352 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
353 if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i))
360 * radeon_fence_wait_seq_timeout - wait for a specific sequence numbers
362 * @rdev: radeon device pointer
363 * @target_seq: sequence number(s) we want to wait for
364 * @intr: use interruptable sleep
365 * @timeout: maximum time to wait, or MAX_SCHEDULE_TIMEOUT for infinite wait
367 * Wait for the requested sequence number(s) to be written by any ring
368 * (all asics). Sequnce number array is indexed by ring id.
369 * @intr selects whether to use interruptable (true) or non-interruptable
370 * (false) sleep when waiting for the sequence number. Helper function
371 * for radeon_fence_wait_*().
372 * Returns remaining time if the sequence number has passed, 0 when
373 * the wait timeout, or an error for all other cases.
374 * -EDEADLK is returned when a GPU lockup has been detected.
376 static int radeon_fence_wait_seq_timeout(struct radeon_device *rdev,
377 u64 *target_seq, bool intr,
383 if (radeon_fence_any_seq_signaled(rdev, target_seq))
386 /* enable IRQs and tracing */
387 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
392 trace_radeon_fence_wait_begin(rdev->ddev, i, target_seq[i]);
394 radeon_irq_kms_sw_irq_get(rdev, i);
398 r = wait_event_interruptible_timeout(rdev->fence_queue, (
399 radeon_fence_any_seq_signaled(rdev, target_seq)
400 || rdev->needs_reset), timeout);
402 r = wait_event_timeout(rdev->fence_queue, (
403 radeon_fence_any_seq_signaled(rdev, target_seq)
404 || rdev->needs_reset), timeout);
407 if (rdev->needs_reset)
410 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
414 radeon_irq_kms_sw_irq_put(rdev, i);
416 trace_radeon_fence_wait_end(rdev->ddev, i, target_seq[i]);
424 * radeon_fence_wait - wait for a fence to signal
426 * @fence: radeon fence object
427 * @intr: use interruptible sleep
429 * Wait for the requested fence to signal (all asics).
430 * @intr selects whether to use interruptable (true) or non-interruptable
431 * (false) sleep when waiting for the fence.
432 * Returns 0 if the fence has passed, error for all other cases.
434 int radeon_fence_wait(struct radeon_fence *fence, bool intr)
436 uint64_t seq[RADEON_NUM_RINGS] = {};
440 WARN(1, "Querying an invalid fence : %p !\n", fence);
444 seq[fence->ring] = fence->seq;
445 r = radeon_fence_wait_seq_timeout(fence->rdev, seq, intr, INT_MAX);
454 * radeon_fence_wait_any - wait for a fence to signal on any ring
456 * @rdev: radeon device pointer
457 * @fences: radeon fence object(s)
458 * @intr: use interruptable sleep
460 * Wait for any requested fence to signal (all asics). Fence
461 * array is indexed by ring id. @intr selects whether to use
462 * interruptable (true) or non-interruptable (false) sleep when
463 * waiting for the fences. Used by the suballocator.
464 * Returns 0 if any fence has passed, error for all other cases.
466 int radeon_fence_wait_any(struct radeon_device *rdev,
467 struct radeon_fence **fences,
470 uint64_t seq[RADEON_NUM_RINGS];
471 unsigned i, num_rings = 0;
474 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
481 seq[i] = fences[i]->seq;
485 /* nothing to wait for ? */
489 r = radeon_fence_wait_seq_timeout(rdev, seq, intr, INT_MAX);
497 * radeon_fence_wait_next - wait for the next fence to signal
499 * @rdev: radeon device pointer
500 * @ring: ring index the fence is associated with
502 * Wait for the next fence on the requested ring to signal (all asics).
503 * Returns 0 if the next fence has passed, error for all other cases.
504 * Caller must hold ring lock.
506 int radeon_fence_wait_next(struct radeon_device *rdev, int ring)
508 uint64_t seq[RADEON_NUM_RINGS] = {};
511 seq[ring] = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
512 if (seq[ring] >= rdev->fence_drv[ring].sync_seq[ring]) {
513 /* nothing to wait for, last_seq is
514 already the last emited fence */
517 r = radeon_fence_wait_seq_timeout(rdev, seq, false, INT_MAX);
524 * radeon_fence_wait_empty - wait for all fences to signal
526 * @rdev: radeon device pointer
527 * @ring: ring index the fence is associated with
529 * Wait for all fences on the requested ring to signal (all asics).
530 * Returns 0 if the fences have passed, error for all other cases.
531 * Caller must hold ring lock.
533 int radeon_fence_wait_empty(struct radeon_device *rdev, int ring)
535 uint64_t seq[RADEON_NUM_RINGS] = {};
538 seq[ring] = rdev->fence_drv[ring].sync_seq[ring];
542 r = radeon_fence_wait_seq_timeout(rdev, seq, false, INT_MAX);
547 dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%d)\n",
554 * radeon_fence_ref - take a ref on a fence
556 * @fence: radeon fence object
558 * Take a reference on a fence (all asics).
561 struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
563 refcount_acquire(&fence->kref);
568 * radeon_fence_unref - remove a ref on a fence
570 * @fence: radeon fence object
572 * Remove a reference on a fence (all asics).
574 void radeon_fence_unref(struct radeon_fence **fence)
576 struct radeon_fence *tmp = *fence;
580 if (refcount_release(&tmp->kref)) {
581 radeon_fence_destroy(tmp);
587 * radeon_fence_count_emitted - get the count of emitted fences
589 * @rdev: radeon device pointer
590 * @ring: ring index the fence is associated with
592 * Get the number of fences emitted on the requested ring (all asics).
593 * Returns the number of emitted fences on the ring. Used by the
594 * dynpm code to ring track activity.
596 unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
600 /* We are not protected by ring lock when reading the last sequence
601 * but it's ok to report slightly wrong fence count here.
603 radeon_fence_process(rdev, ring);
604 emitted = rdev->fence_drv[ring].sync_seq[ring]
605 - atomic64_read(&rdev->fence_drv[ring].last_seq);
606 /* to avoid 32bits warp around */
607 if (emitted > 0x10000000) {
608 emitted = 0x10000000;
610 return (unsigned)emitted;
614 * radeon_fence_need_sync - do we need a semaphore
616 * @fence: radeon fence object
617 * @dst_ring: which ring to check against
619 * Check if the fence needs to be synced against another ring
620 * (all asics). If so, we need to emit a semaphore.
621 * Returns true if we need to sync with another ring, false if
624 bool radeon_fence_need_sync(struct radeon_fence *fence, int dst_ring)
626 struct radeon_fence_driver *fdrv;
632 if (fence->ring == dst_ring) {
636 /* we are protected by the ring mutex */
637 fdrv = &fence->rdev->fence_drv[dst_ring];
638 if (fence->seq <= fdrv->sync_seq[fence->ring]) {
646 * radeon_fence_note_sync - record the sync point
648 * @fence: radeon fence object
649 * @dst_ring: which ring to check against
651 * Note the sequence number at which point the fence will
652 * be synced with the requested ring (all asics).
654 void radeon_fence_note_sync(struct radeon_fence *fence, int dst_ring)
656 struct radeon_fence_driver *dst, *src;
663 if (fence->ring == dst_ring) {
667 /* we are protected by the ring mutex */
668 src = &fence->rdev->fence_drv[fence->ring];
669 dst = &fence->rdev->fence_drv[dst_ring];
670 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
674 dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]);
679 * radeon_fence_driver_start_ring - make the fence driver
680 * ready for use on the requested ring.
682 * @rdev: radeon device pointer
683 * @ring: ring index to start the fence driver on
685 * Make the fence driver ready for processing (all asics).
686 * Not all asics have all rings, so each asic will only
687 * start the fence driver on the rings it has.
688 * Returns 0 for success, errors for failure.
690 int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
695 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
696 if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) {
697 rdev->fence_drv[ring].scratch_reg = 0;
698 if (ring != R600_RING_TYPE_UVD_INDEX) {
699 index = R600_WB_EVENT_OFFSET + ring * 4;
700 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
701 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr +
705 /* put fence directly behind firmware */
706 index = ALIGN(rdev->uvd_fw->datasize, 8);
707 rdev->fence_drv[ring].cpu_addr = (void*)((uint8_t*)rdev->uvd.cpu_addr + index);
708 rdev->fence_drv[ring].gpu_addr = rdev->uvd.gpu_addr + index;
712 r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
714 dev_err(rdev->dev, "fence failed to get scratch register\n");
717 index = RADEON_WB_SCRATCH_OFFSET +
718 rdev->fence_drv[ring].scratch_reg -
719 rdev->scratch.reg_base;
720 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
721 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
723 radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring);
724 rdev->fence_drv[ring].initialized = true;
725 dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016jx and cpu addr 0x%p\n",
726 ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
731 * radeon_fence_driver_init_ring - init the fence driver
732 * for the requested ring.
734 * @rdev: radeon device pointer
735 * @ring: ring index to start the fence driver on
737 * Init the fence driver for the requested ring (all asics).
738 * Helper function for radeon_fence_driver_init().
740 static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
744 rdev->fence_drv[ring].scratch_reg = -1;
745 rdev->fence_drv[ring].cpu_addr = NULL;
746 rdev->fence_drv[ring].gpu_addr = 0;
747 for (i = 0; i < RADEON_NUM_RINGS; ++i)
748 rdev->fence_drv[ring].sync_seq[i] = 0;
749 atomic64_set(&rdev->fence_drv[ring].last_seq, 0);
750 rdev->fence_drv[ring].initialized = false;
751 INIT_DELAYED_WORK(&rdev->fence_drv[ring].lockup_work,
752 radeon_fence_check_lockup);
753 rdev->fence_drv[ring].rdev = rdev;
757 * radeon_fence_driver_init - init the fence driver
758 * for all possible rings.
760 * @rdev: radeon device pointer
762 * Init the fence driver for all possible rings (all asics).
763 * Not all asics have all rings, so each asic will only
764 * start the fence driver on the rings it has using
765 * radeon_fence_driver_start_ring().
766 * Returns 0 for success.
768 int radeon_fence_driver_init(struct radeon_device *rdev)
772 init_waitqueue_head(&rdev->fence_queue);
773 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
774 radeon_fence_driver_init_ring(rdev, ring);
776 if (radeon_debugfs_fence_init(rdev)) {
777 dev_err(rdev->dev, "fence debugfs file creation failed\n");
783 * radeon_fence_driver_fini - tear down the fence driver
784 * for all possible rings.
786 * @rdev: radeon device pointer
788 * Tear down the fence driver for all possible rings (all asics).
790 void radeon_fence_driver_fini(struct radeon_device *rdev)
794 lockmgr(&rdev->ring_lock, LK_EXCLUSIVE);
795 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
796 if (!rdev->fence_drv[ring].initialized)
798 r = radeon_fence_wait_empty(rdev, ring);
800 /* no need to trigger GPU reset as we are unloading */
801 radeon_fence_driver_force_completion(rdev, ring);
803 cancel_delayed_work_sync(&rdev->fence_drv[ring].lockup_work);
804 wake_up_all(&rdev->fence_queue);
805 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
806 rdev->fence_drv[ring].initialized = false;
808 lockmgr(&rdev->ring_lock, LK_RELEASE);
812 * radeon_fence_driver_force_completion - force all fence waiter to complete
814 * @rdev: radeon device pointer
815 * @ring: the ring to complete
817 * In case of GPU reset failure make sure no process keep waiting on fence
818 * that will never complete.
820 void radeon_fence_driver_force_completion(struct radeon_device *rdev, int ring)
822 if (rdev->fence_drv[ring].initialized) {
823 radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring);
824 cancel_delayed_work_sync(&rdev->fence_drv[ring].lockup_work);
832 #if defined(CONFIG_DEBUG_FS)
833 static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
835 struct drm_info_node *node = (struct drm_info_node *)m->private;
836 struct drm_device *dev = node->minor->dev;
837 struct radeon_device *rdev = dev->dev_private;
840 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
841 if (!rdev->fence_drv[i].initialized)
844 radeon_fence_process(rdev, i);
846 seq_printf(m, "--- ring %d ---\n", i);
847 seq_printf(m, "Last signaled fence 0x%016llx\n",
848 (unsigned long long)atomic_load_acq_64(&rdev->fence_drv[i].last_seq));
849 seq_printf(m, "Last emitted 0x%016llx\n",
850 rdev->fence_drv[i].sync_seq[i]);
852 for (j = 0; j < RADEON_NUM_RINGS; ++j) {
853 if (i != j && rdev->fence_drv[j].initialized)
854 seq_printf(m, "Last sync to ring %d 0x%016llx\n",
855 j, rdev->fence_drv[i].sync_seq[j]);
862 * radeon_debugfs_gpu_reset - manually trigger a gpu reset
864 * Manually trigger a gpu reset at the next fence wait.
866 static int radeon_debugfs_gpu_reset(struct seq_file *m, void *data)
868 struct drm_info_node *node = (struct drm_info_node *) m->private;
869 struct drm_device *dev = node->minor->dev;
870 struct radeon_device *rdev = dev->dev_private;
872 down_read(&rdev->exclusive_lock);
873 seq_printf(m, "%d\n", rdev->needs_reset);
874 rdev->needs_reset = true;
875 wake_up_all(&rdev->fence_queue);
876 up_read(&rdev->exclusive_lock);
881 static struct drm_info_list radeon_debugfs_fence_list[] = {
882 {"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL},
883 {"radeon_gpu_reset", &radeon_debugfs_gpu_reset, 0, NULL}
887 int radeon_debugfs_fence_init(struct radeon_device *rdev)
889 #if defined(CONFIG_DEBUG_FS)
890 return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 2);