2 * Copyright 2009 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Jerome Glisse <glisse@freedesktop.org>
31 * $FreeBSD: head/sys/dev/drm2/radeon/radeon_fence.c 254885 2013-08-25 19:37:15Z dumbbell $
34 #include "radeon_reg.h"
37 #include "radeon_trace.h"
38 #endif /* DUMBBELL_WIP */
42 * Fences mark an event in the GPUs pipeline and are used
43 * for GPU/CPU synchronization. When the fence is written,
44 * it is expected that all buffers associated with that fence
45 * are no longer in use by the associated ring on the GPU and
46 * that the the relevant GPU caches have been flushed. Whether
47 * we use a scratch register or memory location depends on the asic
48 * and whether writeback is enabled.
52 * radeon_fence_write - write a fence value
54 * @rdev: radeon_device pointer
55 * @seq: sequence number to write
56 * @ring: ring index the fence is associated with
58 * Writes a fence value to memory or a scratch register (all asics).
60 static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
62 struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
63 if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
65 *drv->cpu_addr = cpu_to_le32(seq);
68 WREG32(drv->scratch_reg, seq);
73 * radeon_fence_read - read a fence value
75 * @rdev: radeon_device pointer
76 * @ring: ring index the fence is associated with
78 * Reads a fence value from memory or a scratch register (all asics).
79 * Returns the value of the fence read from memory or register.
81 static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
83 struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
86 if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
88 seq = le32_to_cpu(*drv->cpu_addr);
90 seq = lower_32_bits(atomic64_read(&drv->last_seq));
93 seq = RREG32(drv->scratch_reg);
99 * radeon_fence_emit - emit a fence on the requested ring
101 * @rdev: radeon_device pointer
102 * @fence: radeon fence object
103 * @ring: ring index the fence is associated with
105 * Emits a fence command on the requested ring (all asics).
106 * Returns 0 on success, -ENOMEM on failure.
108 int radeon_fence_emit(struct radeon_device *rdev,
109 struct radeon_fence **fence,
112 /* we are protected by the ring emission mutex */
113 *fence = kmalloc(sizeof(struct radeon_fence), M_DRM,
115 if ((*fence) == NULL) {
118 refcount_init(&((*fence)->kref), 1);
119 (*fence)->rdev = rdev;
120 (*fence)->seq = ++rdev->fence_drv[ring].sync_seq[ring];
121 (*fence)->ring = ring;
122 radeon_fence_ring_emit(rdev, ring, *fence);
124 trace_radeon_fence_emit(rdev->ddev, ring, (*fence)->seq);
130 * radeon_fence_process - process a fence
132 * @rdev: radeon_device pointer
133 * @ring: ring index the fence is associated with
135 * Checks the current fence value and wakes the fence queue
136 * if the sequence number has increased (all asics).
138 void radeon_fence_process(struct radeon_device *rdev, int ring)
140 uint64_t seq, last_seq, last_emitted;
141 unsigned count_loop = 0;
144 /* Note there is a scenario here for an infinite loop but it's
145 * very unlikely to happen. For it to happen, the current polling
146 * process need to be interrupted by another process and another
147 * process needs to update the last_seq btw the atomic read and
148 * xchg of the current process.
150 * More over for this to go in infinite loop there need to be
151 * continuously new fence signaled ie radeon_fence_read needs
152 * to return a different value each time for both the currently
153 * polling process and the other process that xchg the last_seq
154 * btw atomic read and xchg of the current process. And the
155 * value the other process set as last seq must be higher than
156 * the seq value we just read. Which means that current process
157 * need to be interrupted after radeon_fence_read and before
160 * To be even more safe we count the number of time we loop and
161 * we bail after 10 loop just accepting the fact that we might
162 * have temporarly set the last_seq not to the true real last
163 * seq but to an older one.
165 last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
167 last_emitted = rdev->fence_drv[ring].sync_seq[ring];
168 seq = radeon_fence_read(rdev, ring);
169 seq |= last_seq & 0xffffffff00000000LL;
170 if (seq < last_seq) {
172 seq |= last_emitted & 0xffffffff00000000LL;
175 if (seq <= last_seq || seq > last_emitted) {
178 /* If we loop over we don't want to return without
179 * checking if a fence is signaled as it means that the
180 * seq we just read is different from the previous on.
184 if ((count_loop++) > 10) {
185 /* We looped over too many time leave with the
186 * fact that we might have set an older fence
187 * seq then the current real last seq as signaled
192 } while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq);
195 wake_up_all(&rdev->fence_queue);
199 * radeon_fence_destroy - destroy a fence
203 * Frees the fence object (all asics).
205 static void radeon_fence_destroy(struct radeon_fence *fence)
212 * radeon_fence_seq_signaled - check if a fence sequence number has signaled
214 * @rdev: radeon device pointer
215 * @seq: sequence number
216 * @ring: ring index the fence is associated with
218 * Check if the last signaled fence sequnce number is >= the requested
219 * sequence number (all asics).
220 * Returns true if the fence has signaled (current fence value
221 * is >= requested value) or false if it has not (current fence
222 * value is < the requested value. Helper function for
223 * radeon_fence_signaled().
225 static bool radeon_fence_seq_signaled(struct radeon_device *rdev,
226 u64 seq, unsigned ring)
228 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
231 /* poll new last sequence at least once */
232 radeon_fence_process(rdev, ring);
233 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
240 * radeon_fence_signaled - check if a fence has signaled
242 * @fence: radeon fence object
244 * Check if the requested fence has signaled (all asics).
245 * Returns true if the fence has signaled or false if it has not.
247 bool radeon_fence_signaled(struct radeon_fence *fence)
252 if (fence->seq == RADEON_FENCE_SIGNALED_SEQ) {
255 if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) {
256 fence->seq = RADEON_FENCE_SIGNALED_SEQ;
263 * radeon_fence_any_seq_signaled - check if any sequence number is signaled
265 * @rdev: radeon device pointer
266 * @seq: sequence numbers
268 * Check if the last signaled fence sequnce number is >= the requested
269 * sequence number (all asics).
270 * Returns true if any has signaled (current value is >= requested value)
271 * or false if it has not. Helper function for radeon_fence_wait_seq.
273 static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
277 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
278 if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i))
285 * radeon_fence_wait_seq - wait for a specific sequence numbers
287 * @rdev: radeon device pointer
288 * @target_seq: sequence number(s) we want to wait for
289 * @intr: use interruptable sleep
291 * Wait for the requested sequence number(s) to be written by any ring
292 * (all asics). Sequnce number array is indexed by ring id.
293 * @intr selects whether to use interruptable (true) or non-interruptable
294 * (false) sleep when waiting for the sequence number. Helper function
295 * for radeon_fence_wait_*().
296 * Returns 0 if the sequence number has passed, error for all other cases.
297 * -EDEADLK is returned when a GPU lockup has been detected.
299 static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq,
302 uint64_t last_seq[RADEON_NUM_RINGS];
306 while (!radeon_fence_any_seq_signaled(rdev, target_seq)) {
308 /* Save current sequence values, used to check for GPU lockups */
309 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
313 last_seq[i] = atomic64_read(&rdev->fence_drv[i].last_seq);
315 trace_radeon_fence_wait_begin(rdev->ddev, i, target_seq[i]);
317 radeon_irq_kms_sw_irq_get(rdev, i);
321 r = wait_event_interruptible_timeout(rdev->fence_queue, (
322 (signaled = radeon_fence_any_seq_signaled(rdev, target_seq))
323 || rdev->needs_reset), RADEON_FENCE_JIFFIES_TIMEOUT);
325 r = wait_event_timeout(rdev->fence_queue, (
326 (signaled = radeon_fence_any_seq_signaled(rdev, target_seq))
327 || rdev->needs_reset), RADEON_FENCE_JIFFIES_TIMEOUT);
330 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
334 radeon_irq_kms_sw_irq_put(rdev, i);
336 trace_radeon_fence_wait_end(rdev->ddev, i, target_seq[i]);
343 if (unlikely(!signaled)) {
344 if (rdev->needs_reset)
347 /* we were interrupted for some reason and fence
348 * isn't signaled yet, resume waiting */
352 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
356 if (last_seq[i] != atomic64_read(&rdev->fence_drv[i].last_seq))
360 if (i != RADEON_NUM_RINGS)
363 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
367 if (radeon_ring_is_lockup(rdev, i, &rdev->ring[i]))
371 if (i < RADEON_NUM_RINGS) {
372 /* good news we believe it's a lockup */
373 dev_warn(rdev->dev, "GPU lockup (waiting for "
374 "0x%016lx last fence id 0x%016lx on"
376 target_seq[i], last_seq[i], i);
378 /* remember that we need an reset */
379 rdev->needs_reset = true;
380 wake_up_all(&rdev->fence_queue);
389 * radeon_fence_wait - wait for a fence to signal
391 * @fence: radeon fence object
392 * @intr: use interruptable sleep
394 * Wait for the requested fence to signal (all asics).
395 * @intr selects whether to use interruptable (true) or non-interruptable
396 * (false) sleep when waiting for the fence.
397 * Returns 0 if the fence has passed, error for all other cases.
399 int radeon_fence_wait(struct radeon_fence *fence, bool intr)
401 uint64_t seq[RADEON_NUM_RINGS] = {};
405 WARN(1, "Querying an invalid fence : %p !\n", fence);
409 seq[fence->ring] = fence->seq;
410 if (seq[fence->ring] == RADEON_FENCE_SIGNALED_SEQ)
413 r = radeon_fence_wait_seq(fence->rdev, seq, intr);
417 fence->seq = RADEON_FENCE_SIGNALED_SEQ;
422 * radeon_fence_wait_any - wait for a fence to signal on any ring
424 * @rdev: radeon device pointer
425 * @fences: radeon fence object(s)
426 * @intr: use interruptable sleep
428 * Wait for any requested fence to signal (all asics). Fence
429 * array is indexed by ring id. @intr selects whether to use
430 * interruptable (true) or non-interruptable (false) sleep when
431 * waiting for the fences. Used by the suballocator.
432 * Returns 0 if any fence has passed, error for all other cases.
434 int radeon_fence_wait_any(struct radeon_device *rdev,
435 struct radeon_fence **fences,
438 uint64_t seq[RADEON_NUM_RINGS];
439 unsigned i, num_rings = 0;
442 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
449 seq[i] = fences[i]->seq;
452 /* test if something was allready signaled */
453 if (seq[i] == RADEON_FENCE_SIGNALED_SEQ)
457 /* nothing to wait for ? */
461 r = radeon_fence_wait_seq(rdev, seq, intr);
469 * radeon_fence_wait_next - wait for the next fence to signal
471 * @rdev: radeon device pointer
472 * @ring: ring index the fence is associated with
474 * Wait for the next fence on the requested ring to signal (all asics).
475 * Returns 0 if the next fence has passed, error for all other cases.
476 * Caller must hold ring lock.
478 int radeon_fence_wait_next(struct radeon_device *rdev, int ring)
480 uint64_t seq[RADEON_NUM_RINGS] = {};
482 seq[ring] = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
483 if (seq[ring] >= rdev->fence_drv[ring].sync_seq[ring]) {
484 /* nothing to wait for, last_seq is
485 already the last emited fence */
488 return radeon_fence_wait_seq(rdev, seq, false);
492 * radeon_fence_wait_empty - wait for all fences to signal
494 * @rdev: radeon device pointer
495 * @ring: ring index the fence is associated with
497 * Wait for all fences on the requested ring to signal (all asics).
498 * Returns 0 if the fences have passed, error for all other cases.
499 * Caller must hold ring lock.
501 int radeon_fence_wait_empty(struct radeon_device *rdev, int ring)
503 uint64_t seq[RADEON_NUM_RINGS] = {};
506 seq[ring] = rdev->fence_drv[ring].sync_seq[ring];
510 r = radeon_fence_wait_seq(rdev, seq, false);
515 dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%d)\n",
522 * radeon_fence_ref - take a ref on a fence
524 * @fence: radeon fence object
526 * Take a reference on a fence (all asics).
529 struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
531 refcount_acquire(&fence->kref);
536 * radeon_fence_unref - remove a ref on a fence
538 * @fence: radeon fence object
540 * Remove a reference on a fence (all asics).
542 void radeon_fence_unref(struct radeon_fence **fence)
544 struct radeon_fence *tmp = *fence;
548 if (refcount_release(&tmp->kref)) {
549 radeon_fence_destroy(tmp);
555 * radeon_fence_count_emitted - get the count of emitted fences
557 * @rdev: radeon device pointer
558 * @ring: ring index the fence is associated with
560 * Get the number of fences emitted on the requested ring (all asics).
561 * Returns the number of emitted fences on the ring. Used by the
562 * dynpm code to ring track activity.
564 unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
568 /* We are not protected by ring lock when reading the last sequence
569 * but it's ok to report slightly wrong fence count here.
571 radeon_fence_process(rdev, ring);
572 emitted = rdev->fence_drv[ring].sync_seq[ring]
573 - atomic64_read(&rdev->fence_drv[ring].last_seq);
574 /* to avoid 32bits warp around */
575 if (emitted > 0x10000000) {
576 emitted = 0x10000000;
578 return (unsigned)emitted;
582 * radeon_fence_need_sync - do we need a semaphore
584 * @fence: radeon fence object
585 * @dst_ring: which ring to check against
587 * Check if the fence needs to be synced against another ring
588 * (all asics). If so, we need to emit a semaphore.
589 * Returns true if we need to sync with another ring, false if
592 bool radeon_fence_need_sync(struct radeon_fence *fence, int dst_ring)
594 struct radeon_fence_driver *fdrv;
600 if (fence->ring == dst_ring) {
604 /* we are protected by the ring mutex */
605 fdrv = &fence->rdev->fence_drv[dst_ring];
606 if (fence->seq <= fdrv->sync_seq[fence->ring]) {
614 * radeon_fence_note_sync - record the sync point
616 * @fence: radeon fence object
617 * @dst_ring: which ring to check against
619 * Note the sequence number at which point the fence will
620 * be synced with the requested ring (all asics).
622 void radeon_fence_note_sync(struct radeon_fence *fence, int dst_ring)
624 struct radeon_fence_driver *dst, *src;
631 if (fence->ring == dst_ring) {
635 /* we are protected by the ring mutex */
636 src = &fence->rdev->fence_drv[fence->ring];
637 dst = &fence->rdev->fence_drv[dst_ring];
638 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
642 dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]);
647 * radeon_fence_driver_start_ring - make the fence driver
648 * ready for use on the requested ring.
650 * @rdev: radeon device pointer
651 * @ring: ring index to start the fence driver on
653 * Make the fence driver ready for processing (all asics).
654 * Not all asics have all rings, so each asic will only
655 * start the fence driver on the rings it has.
656 * Returns 0 for success, errors for failure.
658 int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
663 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
664 if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) {
665 rdev->fence_drv[ring].scratch_reg = 0;
666 if (ring != R600_RING_TYPE_UVD_INDEX) {
667 index = R600_WB_EVENT_OFFSET + ring * 4;
668 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
669 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr +
673 /* put fence directly behind firmware */
674 index = ALIGN(rdev->uvd_fw->datasize, 8);
675 rdev->fence_drv[ring].cpu_addr = (void*)((uint8_t*)rdev->uvd.cpu_addr + index);
676 rdev->fence_drv[ring].gpu_addr = rdev->uvd.gpu_addr + index;
680 r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
682 dev_err(rdev->dev, "fence failed to get scratch register\n");
685 index = RADEON_WB_SCRATCH_OFFSET +
686 rdev->fence_drv[ring].scratch_reg -
687 rdev->scratch.reg_base;
688 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
689 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
691 radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring);
692 rdev->fence_drv[ring].initialized = true;
693 dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016jx and cpu addr 0x%p\n",
694 ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
699 * radeon_fence_driver_init_ring - init the fence driver
700 * for the requested ring.
702 * @rdev: radeon device pointer
703 * @ring: ring index to start the fence driver on
705 * Init the fence driver for the requested ring (all asics).
706 * Helper function for radeon_fence_driver_init().
708 static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
712 rdev->fence_drv[ring].scratch_reg = -1;
713 rdev->fence_drv[ring].cpu_addr = NULL;
714 rdev->fence_drv[ring].gpu_addr = 0;
715 for (i = 0; i < RADEON_NUM_RINGS; ++i)
716 rdev->fence_drv[ring].sync_seq[i] = 0;
717 atomic64_set(&rdev->fence_drv[ring].last_seq, 0);
718 rdev->fence_drv[ring].initialized = false;
722 * radeon_fence_driver_init - init the fence driver
723 * for all possible rings.
725 * @rdev: radeon device pointer
727 * Init the fence driver for all possible rings (all asics).
728 * Not all asics have all rings, so each asic will only
729 * start the fence driver on the rings it has using
730 * radeon_fence_driver_start_ring().
731 * Returns 0 for success.
733 int radeon_fence_driver_init(struct radeon_device *rdev)
737 init_waitqueue_head(&rdev->fence_queue);
738 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
739 radeon_fence_driver_init_ring(rdev, ring);
741 if (radeon_debugfs_fence_init(rdev)) {
742 dev_err(rdev->dev, "fence debugfs file creation failed\n");
748 * radeon_fence_driver_fini - tear down the fence driver
749 * for all possible rings.
751 * @rdev: radeon device pointer
753 * Tear down the fence driver for all possible rings (all asics).
755 void radeon_fence_driver_fini(struct radeon_device *rdev)
759 lockmgr(&rdev->ring_lock, LK_EXCLUSIVE);
760 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
761 if (!rdev->fence_drv[ring].initialized)
763 r = radeon_fence_wait_empty(rdev, ring);
765 /* no need to trigger GPU reset as we are unloading */
766 radeon_fence_driver_force_completion(rdev, ring);
768 wake_up_all(&rdev->fence_queue);
769 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
770 rdev->fence_drv[ring].initialized = false;
772 lockmgr(&rdev->ring_lock, LK_RELEASE);
776 * radeon_fence_driver_force_completion - force all fence waiter to complete
778 * @rdev: radeon device pointer
779 * @ring: the ring to complete
781 * In case of GPU reset failure make sure no process keep waiting on fence
782 * that will never complete.
784 void radeon_fence_driver_force_completion(struct radeon_device *rdev, int ring)
786 if (rdev->fence_drv[ring].initialized)
787 radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring);
794 #if defined(CONFIG_DEBUG_FS)
795 static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
797 struct drm_info_node *node = (struct drm_info_node *)m->private;
798 struct drm_device *dev = node->minor->dev;
799 struct radeon_device *rdev = dev->dev_private;
802 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
803 if (!rdev->fence_drv[i].initialized)
806 radeon_fence_process(rdev, i);
808 seq_printf(m, "--- ring %d ---\n", i);
809 seq_printf(m, "Last signaled fence 0x%016llx\n",
810 (unsigned long long)atomic_load_acq_64(&rdev->fence_drv[i].last_seq));
811 seq_printf(m, "Last emitted 0x%016llx\n",
812 rdev->fence_drv[i].sync_seq[i]);
814 for (j = 0; j < RADEON_NUM_RINGS; ++j) {
815 if (i != j && rdev->fence_drv[j].initialized)
816 seq_printf(m, "Last sync to ring %d 0x%016llx\n",
817 j, rdev->fence_drv[i].sync_seq[j]);
824 * radeon_debugfs_gpu_reset - manually trigger a gpu reset
826 * Manually trigger a gpu reset at the next fence wait.
828 static int radeon_debugfs_gpu_reset(struct seq_file *m, void *data)
830 struct drm_info_node *node = (struct drm_info_node *) m->private;
831 struct drm_device *dev = node->minor->dev;
832 struct radeon_device *rdev = dev->dev_private;
834 down_read(&rdev->exclusive_lock);
835 seq_printf(m, "%d\n", rdev->needs_reset);
836 rdev->needs_reset = true;
837 up_read(&rdev->exclusive_lock);
842 static struct drm_info_list radeon_debugfs_fence_list[] = {
843 {"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL},
844 {"radeon_gpu_reset", &radeon_debugfs_gpu_reset, 0, NULL}
848 int radeon_debugfs_fence_init(struct radeon_device *rdev)
850 #if defined(CONFIG_DEBUG_FS)
851 return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 2);