drm/radeon: Sync to Linux 3.11
[dragonfly.git] / sys / dev / drm / radeon / radeon_fence.c
1 /*
2  * Copyright 2009 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Jerome Glisse <glisse@freedesktop.org>
29  *    Dave Airlie
30  *
31  * $FreeBSD: head/sys/dev/drm2/radeon/radeon_fence.c 254885 2013-08-25 19:37:15Z dumbbell $
32  */
33
34 #include <drm/drmP.h>
35 #include "radeon_reg.h"
36 #include "radeon.h"
37 #ifdef DUMBBELL_WIP
38 #include "radeon_trace.h"
39 #endif /* DUMBBELL_WIP */
40
41 /*
42  * Fences
43  * Fences mark an event in the GPUs pipeline and are used
44  * for GPU/CPU synchronization.  When the fence is written,
45  * it is expected that all buffers associated with that fence
46  * are no longer in use by the associated ring on the GPU and
47  * that the the relevant GPU caches have been flushed.  Whether
48  * we use a scratch register or memory location depends on the asic
49  * and whether writeback is enabled.
50  */
51
52 /**
53  * radeon_fence_write - write a fence value
54  *
55  * @rdev: radeon_device pointer
56  * @seq: sequence number to write
57  * @ring: ring index the fence is associated with
58  *
59  * Writes a fence value to memory or a scratch register (all asics).
60  */
61 static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
62 {
63         struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
64         if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
65                 if (drv->cpu_addr) {
66                         *drv->cpu_addr = cpu_to_le32(seq);
67                 }
68         } else {
69                 WREG32(drv->scratch_reg, seq);
70         }
71 }
72
73 /**
74  * radeon_fence_read - read a fence value
75  *
76  * @rdev: radeon_device pointer
77  * @ring: ring index the fence is associated with
78  *
79  * Reads a fence value from memory or a scratch register (all asics).
80  * Returns the value of the fence read from memory or register.
81  */
82 static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
83 {
84         struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
85         u32 seq = 0;
86
87         if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
88                 if (drv->cpu_addr) {
89                         seq = le32_to_cpu(*drv->cpu_addr);
90                 } else {
91                         seq = lower_32_bits(atomic64_read(&drv->last_seq));
92                 }
93         } else {
94                 seq = RREG32(drv->scratch_reg);
95         }
96         return seq;
97 }
98
99 /**
100  * radeon_fence_emit - emit a fence on the requested ring
101  *
102  * @rdev: radeon_device pointer
103  * @fence: radeon fence object
104  * @ring: ring index the fence is associated with
105  *
106  * Emits a fence command on the requested ring (all asics).
107  * Returns 0 on success, -ENOMEM on failure.
108  */
109 int radeon_fence_emit(struct radeon_device *rdev,
110                       struct radeon_fence **fence,
111                       int ring)
112 {
113         /* we are protected by the ring emission mutex */
114         *fence = kmalloc(sizeof(struct radeon_fence), M_DRM,
115                          M_WAITOK);
116         if ((*fence) == NULL) {
117                 return -ENOMEM;
118         }
119         refcount_init(&((*fence)->kref), 1);
120         (*fence)->rdev = rdev;
121         (*fence)->seq = ++rdev->fence_drv[ring].sync_seq[ring];
122         (*fence)->ring = ring;
123         radeon_fence_ring_emit(rdev, ring, *fence);
124         return 0;
125 }
126
127 /**
128  * radeon_fence_process - process a fence
129  *
130  * @rdev: radeon_device pointer
131  * @ring: ring index the fence is associated with
132  *
133  * Checks the current fence value and wakes the fence queue
134  * if the sequence number has increased (all asics).
135  */
136 void radeon_fence_process(struct radeon_device *rdev, int ring)
137 {
138         uint64_t seq, last_seq, last_emitted;
139         unsigned count_loop = 0;
140         bool wake = false;
141
142         /* Note there is a scenario here for an infinite loop but it's
143          * very unlikely to happen. For it to happen, the current polling
144          * process need to be interrupted by another process and another
145          * process needs to update the last_seq btw the atomic read and
146          * xchg of the current process.
147          *
148          * More over for this to go in infinite loop there need to be
149          * continuously new fence signaled ie radeon_fence_read needs
150          * to return a different value each time for both the currently
151          * polling process and the other process that xchg the last_seq
152          * btw atomic read and xchg of the current process. And the
153          * value the other process set as last seq must be higher than
154          * the seq value we just read. Which means that current process
155          * need to be interrupted after radeon_fence_read and before
156          * atomic xchg.
157          *
158          * To be even more safe we count the number of time we loop and
159          * we bail after 10 loop just accepting the fact that we might
160          * have temporarly set the last_seq not to the true real last
161          * seq but to an older one.
162          */
163         last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
164         do {
165                 last_emitted = rdev->fence_drv[ring].sync_seq[ring];
166                 seq = radeon_fence_read(rdev, ring);
167                 seq |= last_seq & 0xffffffff00000000LL;
168                 if (seq < last_seq) {
169                         seq &= 0xffffffff;
170                         seq |= last_emitted & 0xffffffff00000000LL;
171                 }
172
173                 if (seq <= last_seq || seq > last_emitted) {
174                         break;
175                 }
176                 /* If we loop over we don't want to return without
177                  * checking if a fence is signaled as it means that the
178                  * seq we just read is different from the previous on.
179                  */
180                 wake = true;
181                 last_seq = seq;
182                 if ((count_loop++) > 10) {
183                         /* We looped over too many time leave with the
184                          * fact that we might have set an older fence
185                          * seq then the current real last seq as signaled
186                          * by the hw.
187                          */
188                         break;
189                 }
190         } while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq);
191
192         if (wake) {
193                 rdev->fence_drv[ring].last_activity = jiffies;
194                 wake_up_all(&rdev->fence_queue);
195         }
196 }
197
198 /**
199  * radeon_fence_destroy - destroy a fence
200  *
201  * @kref: fence kref
202  *
203  * Frees the fence object (all asics).
204  */
205 static void radeon_fence_destroy(struct radeon_fence *fence)
206 {
207
208         drm_free(fence, M_DRM);
209 }
210
211 /**
212  * radeon_fence_seq_signaled - check if a fence sequeuce number has signaled
213  *
214  * @rdev: radeon device pointer
215  * @seq: sequence number
216  * @ring: ring index the fence is associated with
217  *
218  * Check if the last singled fence sequnce number is >= the requested
219  * sequence number (all asics).
220  * Returns true if the fence has signaled (current fence value
221  * is >= requested value) or false if it has not (current fence
222  * value is < the requested value.  Helper function for
223  * radeon_fence_signaled().
224  */
225 static bool radeon_fence_seq_signaled(struct radeon_device *rdev,
226                                       u64 seq, unsigned ring)
227 {
228         if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
229                 return true;
230         }
231         /* poll new last sequence at least once */
232         radeon_fence_process(rdev, ring);
233         if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
234                 return true;
235         }
236         return false;
237 }
238
239 /**
240  * radeon_fence_signaled - check if a fence has signaled
241  *
242  * @fence: radeon fence object
243  *
244  * Check if the requested fence has signaled (all asics).
245  * Returns true if the fence has signaled or false if it has not.
246  */
247 bool radeon_fence_signaled(struct radeon_fence *fence)
248 {
249         if (!fence) {
250                 return true;
251         }
252         if (fence->seq == RADEON_FENCE_SIGNALED_SEQ) {
253                 return true;
254         }
255         if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) {
256                 fence->seq = RADEON_FENCE_SIGNALED_SEQ;
257                 return true;
258         }
259         return false;
260 }
261
262 /**
263  * radeon_fence_wait_seq - wait for a specific sequence number
264  *
265  * @rdev: radeon device pointer
266  * @target_seq: sequence number we want to wait for
267  * @ring: ring index the fence is associated with
268  * @intr: use interruptable sleep
269  * @lock_ring: whether the ring should be locked or not
270  *
271  * Wait for the requested sequence number to be written (all asics).
272  * @intr selects whether to use interruptable (true) or non-interruptable
273  * (false) sleep when waiting for the sequence number.  Helper function
274  * for radeon_fence_wait(), et al.
275  * Returns 0 if the sequence number has passed, error for all other cases.
276  * -EDEADLK is returned when a GPU lockup has been detected and the ring is
277  * marked as not ready so no further jobs get scheduled until a successful
278  * reset.
279  */
280 static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq,
281                                  unsigned ring, bool intr, bool lock_ring)
282 {
283         unsigned long timeout, last_activity;
284         uint64_t seq;
285         unsigned i;
286         bool signaled;
287         int r;
288
289         while (target_seq > atomic64_read(&rdev->fence_drv[ring].last_seq)) {
290                 if (!rdev->ring[ring].ready) {
291                         return -EBUSY;
292                 }
293
294                 timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT;
295                 if (time_after(rdev->fence_drv[ring].last_activity, timeout)) {
296                         /* the normal case, timeout is somewhere before last_activity */
297                         timeout = rdev->fence_drv[ring].last_activity - timeout;
298                 } else {
299                         /* either jiffies wrapped around, or no fence was signaled in the last 500ms
300                          * anyway we will just wait for the minimum amount and then check for a lockup
301                          */
302                         timeout = 1;
303                 }
304                 seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
305                 /* Save current last activity valuee, used to check for GPU lockups */
306                 last_activity = rdev->fence_drv[ring].last_activity;
307
308                 radeon_irq_kms_sw_irq_get(rdev, ring);
309                 if (intr) {
310                         r = wait_event_interruptible_timeout(rdev->fence_queue,
311                                 (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)),
312                                 timeout);
313                 } else {
314                         r = wait_event_timeout(rdev->fence_queue,
315                                 (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)),
316                                 timeout);
317                 }
318                 radeon_irq_kms_sw_irq_put(rdev, ring);
319                 if (unlikely(r < 0)) {
320                         return r;
321                 }
322
323                 if (unlikely(!signaled)) {
324                         /* we were interrupted for some reason and fence
325                          * isn't signaled yet, resume waiting */
326                         if (r) {
327                                 continue;
328                         }
329
330                         /* check if sequence value has changed since last_activity */
331                         if (seq != atomic64_read(&rdev->fence_drv[ring].last_seq)) {
332                                 continue;
333                         }
334
335                         if (lock_ring) {
336                                 lockmgr(&rdev->ring_lock, LK_EXCLUSIVE);
337                         }
338
339                         /* test if somebody else has already decided that this is a lockup */
340                         if (last_activity != rdev->fence_drv[ring].last_activity) {
341                                 if (lock_ring) {
342                                         lockmgr(&rdev->ring_lock, LK_RELEASE);
343                                 }
344                                 continue;
345                         }
346
347                         if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
348                                 /* good news we believe it's a lockup */
349                                 dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016jx last fence id 0x%016jx)\n",
350                                          target_seq, seq);
351
352                                 /* change last activity so nobody else think there is a lockup */
353                                 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
354                                         rdev->fence_drv[i].last_activity = jiffies;
355                                 }
356
357                                 /* mark the ring as not ready any more */
358                                 rdev->ring[ring].ready = false;
359                                 if (lock_ring) {
360                                         lockmgr(&rdev->ring_lock, LK_RELEASE);
361                                 }
362                                 return -EDEADLK;
363                         }
364
365                         if (lock_ring) {
366                                 lockmgr(&rdev->ring_lock, LK_RELEASE);
367                         }
368                 }
369         }
370         return 0;
371 }
372
373 /**
374  * radeon_fence_wait - wait for a fence to signal
375  *
376  * @fence: radeon fence object
377  * @intr: use interruptable sleep
378  *
379  * Wait for the requested fence to signal (all asics).
380  * @intr selects whether to use interruptable (true) or non-interruptable
381  * (false) sleep when waiting for the fence.
382  * Returns 0 if the fence has passed, error for all other cases.
383  */
384 int radeon_fence_wait(struct radeon_fence *fence, bool intr)
385 {
386         int r;
387
388         if (fence == NULL) {
389                 DRM_ERROR("Querying an invalid fence : %p !\n", fence);
390                 return -EINVAL;
391         }
392
393         r = radeon_fence_wait_seq(fence->rdev, fence->seq,
394                                   fence->ring, intr, true);
395         if (r) {
396                 return r;
397         }
398         fence->seq = RADEON_FENCE_SIGNALED_SEQ;
399         return 0;
400 }
401
402 static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
403 {
404         unsigned i;
405
406         for (i = 0; i < RADEON_NUM_RINGS; ++i) {
407                 if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i)) {
408                         return true;
409                 }
410         }
411         return false;
412 }
413
414 /**
415  * radeon_fence_wait_any_seq - wait for a sequence number on any ring
416  *
417  * @rdev: radeon device pointer
418  * @target_seq: sequence number(s) we want to wait for
419  * @intr: use interruptable sleep
420  *
421  * Wait for the requested sequence number(s) to be written by any ring
422  * (all asics).  Sequnce number array is indexed by ring id.
423  * @intr selects whether to use interruptable (true) or non-interruptable
424  * (false) sleep when waiting for the sequence number.  Helper function
425  * for radeon_fence_wait_any(), et al.
426  * Returns 0 if the sequence number has passed, error for all other cases.
427  */
428 static int radeon_fence_wait_any_seq(struct radeon_device *rdev,
429                                      u64 *target_seq, bool intr)
430 {
431         unsigned long timeout, last_activity, tmp;
432         unsigned i, ring = RADEON_NUM_RINGS;
433         bool signaled;
434         int r;
435
436         for (i = 0, last_activity = 0; i < RADEON_NUM_RINGS; ++i) {
437                 if (!target_seq[i]) {
438                         continue;
439                 }
440
441                 /* use the most recent one as indicator */
442                 if (time_after(rdev->fence_drv[i].last_activity, last_activity)) {
443                         last_activity = rdev->fence_drv[i].last_activity;
444                 }
445
446                 /* For lockup detection just pick the lowest ring we are
447                  * actively waiting for
448                  */
449                 if (i < ring) {
450                         ring = i;
451                 }
452         }
453
454         /* nothing to wait for ? */
455         if (ring == RADEON_NUM_RINGS) {
456                 return -ENOENT;
457         }
458
459         while (!radeon_fence_any_seq_signaled(rdev, target_seq)) {
460                 timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT;
461                 if (time_after(last_activity, timeout)) {
462                         /* the normal case, timeout is somewhere before last_activity */
463                         timeout = last_activity - timeout;
464                 } else {
465                         /* either jiffies wrapped around, or no fence was signaled in the last 500ms
466                          * anyway we will just wait for the minimum amount and then check for a lockup
467                          */
468                         timeout = 1;
469                 }
470
471                 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
472                         if (target_seq[i]) {
473                                 radeon_irq_kms_sw_irq_get(rdev, i);
474                         }
475                 }
476                 if (intr) {
477                         r = wait_event_interruptible_timeout(rdev->fence_queue,
478                                 (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)),
479                                 timeout);
480                 } else {
481                         r = wait_event_timeout(rdev->fence_queue,
482                                 (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)),
483                                 timeout);
484                 }
485                 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
486                         if (target_seq[i]) {
487                                 radeon_irq_kms_sw_irq_put(rdev, i);
488                         }
489                 }
490                 if (unlikely(r < 0)) {
491                         return r;
492                 }
493
494                 if (unlikely(!signaled)) {
495                         /* we were interrupted for some reason and fence
496                          * isn't signaled yet, resume waiting */
497                         if (r) {
498                                 continue;
499                         }
500
501                         lockmgr(&rdev->ring_lock, LK_EXCLUSIVE);
502                         for (i = 0, tmp = 0; i < RADEON_NUM_RINGS; ++i) {
503                                 if (time_after(rdev->fence_drv[i].last_activity, tmp)) {
504                                         tmp = rdev->fence_drv[i].last_activity;
505                                 }
506                         }
507                         /* test if somebody else has already decided that this is a lockup */
508                         if (last_activity != tmp) {
509                                 last_activity = tmp;
510                                 lockmgr(&rdev->ring_lock, LK_RELEASE);
511                                 continue;
512                         }
513
514                         if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
515                                 /* good news we believe it's a lockup */
516                                 dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016jx)\n",
517                                          target_seq[ring]);
518
519                                 /* change last activity so nobody else think there is a lockup */
520                                 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
521                                         rdev->fence_drv[i].last_activity = jiffies;
522                                 }
523
524                                 /* mark the ring as not ready any more */
525                                 rdev->ring[ring].ready = false;
526                                 lockmgr(&rdev->ring_lock, LK_RELEASE);
527                                 return -EDEADLK;
528                         }
529                         lockmgr(&rdev->ring_lock, LK_RELEASE);
530                 }
531         }
532         return 0;
533 }
534
535 /**
536  * radeon_fence_wait_any - wait for a fence to signal on any ring
537  *
538  * @rdev: radeon device pointer
539  * @fences: radeon fence object(s)
540  * @intr: use interruptable sleep
541  *
542  * Wait for any requested fence to signal (all asics).  Fence
543  * array is indexed by ring id.  @intr selects whether to use
544  * interruptable (true) or non-interruptable (false) sleep when
545  * waiting for the fences. Used by the suballocator.
546  * Returns 0 if any fence has passed, error for all other cases.
547  */
548 int radeon_fence_wait_any(struct radeon_device *rdev,
549                           struct radeon_fence **fences,
550                           bool intr)
551 {
552         uint64_t seq[RADEON_NUM_RINGS];
553         unsigned i;
554         int r;
555
556         for (i = 0; i < RADEON_NUM_RINGS; ++i) {
557                 seq[i] = 0;
558
559                 if (!fences[i]) {
560                         continue;
561                 }
562
563                 if (fences[i]->seq == RADEON_FENCE_SIGNALED_SEQ) {
564                         /* something was allready signaled */
565                         return 0;
566                 }
567
568                 seq[i] = fences[i]->seq;
569         }
570
571         r = radeon_fence_wait_any_seq(rdev, seq, intr);
572         if (r) {
573                 return r;
574         }
575         return 0;
576 }
577
578 /**
579  * radeon_fence_wait_next_locked - wait for the next fence to signal
580  *
581  * @rdev: radeon device pointer
582  * @ring: ring index the fence is associated with
583  *
584  * Wait for the next fence on the requested ring to signal (all asics).
585  * Returns 0 if the next fence has passed, error for all other cases.
586  * Caller must hold ring lock.
587  */
588 int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
589 {
590         uint64_t seq;
591
592         seq = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
593         if (seq >= rdev->fence_drv[ring].sync_seq[ring]) {
594                 /* nothing to wait for, last_seq is
595                    already the last emited fence */
596                 return -ENOENT;
597         }
598         return radeon_fence_wait_seq(rdev, seq, ring, false, false);
599 }
600
601 /**
602  * radeon_fence_wait_empty_locked - wait for all fences to signal
603  *
604  * @rdev: radeon device pointer
605  * @ring: ring index the fence is associated with
606  *
607  * Wait for all fences on the requested ring to signal (all asics).
608  * Returns 0 if the fences have passed, error for all other cases.
609  * Caller must hold ring lock.
610  */
611 int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
612 {
613         uint64_t seq = rdev->fence_drv[ring].sync_seq[ring];
614         int r;
615
616         r = radeon_fence_wait_seq(rdev, seq, ring, false, false);
617         if (r) {
618                 if (r == -EDEADLK) {
619                         return -EDEADLK;
620                 }
621                 dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%d)\n",
622                         ring, r);
623         }
624         return 0;
625 }
626
627 /**
628  * radeon_fence_ref - take a ref on a fence
629  *
630  * @fence: radeon fence object
631  *
632  * Take a reference on a fence (all asics).
633  * Returns the fence.
634  */
635 struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
636 {
637         refcount_acquire(&fence->kref);
638         return fence;
639 }
640
641 /**
642  * radeon_fence_unref - remove a ref on a fence
643  *
644  * @fence: radeon fence object
645  *
646  * Remove a reference on a fence (all asics).
647  */
648 void radeon_fence_unref(struct radeon_fence **fence)
649 {
650         struct radeon_fence *tmp = *fence;
651
652         *fence = NULL;
653         if (tmp) {
654                 if (refcount_release(&tmp->kref)) {
655                         radeon_fence_destroy(tmp);
656                 }
657         }
658 }
659
660 /**
661  * radeon_fence_count_emitted - get the count of emitted fences
662  *
663  * @rdev: radeon device pointer
664  * @ring: ring index the fence is associated with
665  *
666  * Get the number of fences emitted on the requested ring (all asics).
667  * Returns the number of emitted fences on the ring.  Used by the
668  * dynpm code to ring track activity.
669  */
670 unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
671 {
672         uint64_t emitted;
673
674         /* We are not protected by ring lock when reading the last sequence
675          * but it's ok to report slightly wrong fence count here.
676          */
677         radeon_fence_process(rdev, ring);
678         emitted = rdev->fence_drv[ring].sync_seq[ring]
679                 - atomic64_read(&rdev->fence_drv[ring].last_seq);
680         /* to avoid 32bits warp around */
681         if (emitted > 0x10000000) {
682                 emitted = 0x10000000;
683         }
684         return (unsigned)emitted;
685 }
686
687 /**
688  * radeon_fence_need_sync - do we need a semaphore
689  *
690  * @fence: radeon fence object
691  * @dst_ring: which ring to check against
692  *
693  * Check if the fence needs to be synced against another ring
694  * (all asics).  If so, we need to emit a semaphore.
695  * Returns true if we need to sync with another ring, false if
696  * not.
697  */
698 bool radeon_fence_need_sync(struct radeon_fence *fence, int dst_ring)
699 {
700         struct radeon_fence_driver *fdrv;
701
702         if (!fence) {
703                 return false;
704         }
705
706         if (fence->ring == dst_ring) {
707                 return false;
708         }
709
710         /* we are protected by the ring mutex */
711         fdrv = &fence->rdev->fence_drv[dst_ring];
712         if (fence->seq <= fdrv->sync_seq[fence->ring]) {
713                 return false;
714         }
715
716         return true;
717 }
718
719 /**
720  * radeon_fence_note_sync - record the sync point
721  *
722  * @fence: radeon fence object
723  * @dst_ring: which ring to check against
724  *
725  * Note the sequence number at which point the fence will
726  * be synced with the requested ring (all asics).
727  */
728 void radeon_fence_note_sync(struct radeon_fence *fence, int dst_ring)
729 {
730         struct radeon_fence_driver *dst, *src;
731         unsigned i;
732
733         if (!fence) {
734                 return;
735         }
736
737         if (fence->ring == dst_ring) {
738                 return;
739         }
740
741         /* we are protected by the ring mutex */
742         src = &fence->rdev->fence_drv[fence->ring];
743         dst = &fence->rdev->fence_drv[dst_ring];
744         for (i = 0; i < RADEON_NUM_RINGS; ++i) {
745                 if (i == dst_ring) {
746                         continue;
747                 }
748                 dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]);
749         }
750 }
751
752 /**
753  * radeon_fence_driver_start_ring - make the fence driver
754  * ready for use on the requested ring.
755  *
756  * @rdev: radeon device pointer
757  * @ring: ring index to start the fence driver on
758  *
759  * Make the fence driver ready for processing (all asics).
760  * Not all asics have all rings, so each asic will only
761  * start the fence driver on the rings it has.
762  * Returns 0 for success, errors for failure.
763  */
764 int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
765 {
766         uint64_t index;
767         int r;
768
769         radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
770         if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) {
771                 rdev->fence_drv[ring].scratch_reg = 0;
772                 if (ring != R600_RING_TYPE_UVD_INDEX) {
773                         index = R600_WB_EVENT_OFFSET + ring * 4;
774                         rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
775                         rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr +
776                                                          index;
777
778                 } else {
779                         /* put fence directly behind firmware */
780                         index = ALIGN(rdev->uvd_fw->datasize, 8);
781                         rdev->fence_drv[ring].cpu_addr = (void*)((uint8_t*)rdev->uvd.cpu_addr + index);
782                         rdev->fence_drv[ring].gpu_addr = rdev->uvd.gpu_addr + index;
783                 }
784
785         } else {
786                 r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
787                 if (r) {
788                         dev_err(rdev->dev, "fence failed to get scratch register\n");
789                         return r;
790                 }
791                 index = RADEON_WB_SCRATCH_OFFSET +
792                         rdev->fence_drv[ring].scratch_reg -
793                         rdev->scratch.reg_base;
794                 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
795                 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
796         }
797         radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring);
798         rdev->fence_drv[ring].initialized = true;
799         dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016jx and cpu addr 0x%p\n",
800                  ring, (uintmax_t)rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
801         return 0;
802 }
803
804 /**
805  * radeon_fence_driver_init_ring - init the fence driver
806  * for the requested ring.
807  *
808  * @rdev: radeon device pointer
809  * @ring: ring index to start the fence driver on
810  *
811  * Init the fence driver for the requested ring (all asics).
812  * Helper function for radeon_fence_driver_init().
813  */
814 static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
815 {
816         int i;
817
818         rdev->fence_drv[ring].scratch_reg = -1;
819         rdev->fence_drv[ring].cpu_addr = NULL;
820         rdev->fence_drv[ring].gpu_addr = 0;
821         for (i = 0; i < RADEON_NUM_RINGS; ++i)
822                 rdev->fence_drv[ring].sync_seq[i] = 0;
823         atomic64_set(&rdev->fence_drv[ring].last_seq, 0);
824         rdev->fence_drv[ring].last_activity = jiffies;
825         rdev->fence_drv[ring].initialized = false;
826 }
827
828 /**
829  * radeon_fence_driver_init - init the fence driver
830  * for all possible rings.
831  *
832  * @rdev: radeon device pointer
833  *
834  * Init the fence driver for all possible rings (all asics).
835  * Not all asics have all rings, so each asic will only
836  * start the fence driver on the rings it has using
837  * radeon_fence_driver_start_ring().
838  * Returns 0 for success.
839  */
840 int radeon_fence_driver_init(struct radeon_device *rdev)
841 {
842         int ring;
843
844         init_waitqueue_head(&rdev->fence_queue);
845         for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
846                 radeon_fence_driver_init_ring(rdev, ring);
847         }
848         if (radeon_debugfs_fence_init(rdev)) {
849                 dev_err(rdev->dev, "fence debugfs file creation failed\n");
850         }
851         return 0;
852 }
853
854 /**
855  * radeon_fence_driver_fini - tear down the fence driver
856  * for all possible rings.
857  *
858  * @rdev: radeon device pointer
859  *
860  * Tear down the fence driver for all possible rings (all asics).
861  */
862 void radeon_fence_driver_fini(struct radeon_device *rdev)
863 {
864         int ring, r;
865
866         lockmgr(&rdev->ring_lock, LK_EXCLUSIVE);
867         for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
868                 if (!rdev->fence_drv[ring].initialized)
869                         continue;
870                 r = radeon_fence_wait_empty_locked(rdev, ring);
871                 if (r) {
872                         /* no need to trigger GPU reset as we are unloading */
873                         radeon_fence_driver_force_completion(rdev);
874                 }
875                 wake_up_all(&rdev->fence_queue);
876                 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
877                 rdev->fence_drv[ring].initialized = false;
878         }
879         lockmgr(&rdev->ring_lock, LK_RELEASE);
880 }
881
882 /**
883  * radeon_fence_driver_force_completion - force all fence waiter to complete
884  *
885  * @rdev: radeon device pointer
886  *
887  * In case of GPU reset failure make sure no process keep waiting on fence
888  * that will never complete.
889  */
890 void radeon_fence_driver_force_completion(struct radeon_device *rdev)
891 {
892         int ring;
893
894         for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
895                 if (!rdev->fence_drv[ring].initialized)
896                         continue;
897                 radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring);
898         }
899 }
900
901
902 /*
903  * Fence debugfs
904  */
905 #if defined(CONFIG_DEBUG_FS)
906 static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
907 {
908         struct drm_info_node *node = (struct drm_info_node *)m->private;
909         struct drm_device *dev = node->minor->dev;
910         struct radeon_device *rdev = dev->dev_private;
911         int i, j;
912
913         for (i = 0; i < RADEON_NUM_RINGS; ++i) {
914                 if (!rdev->fence_drv[i].initialized)
915                         continue;
916
917                 seq_printf(m, "--- ring %d ---\n", i);
918                 seq_printf(m, "Last signaled fence 0x%016llx\n",
919                            (unsigned long long)atomic_load_acq_64(&rdev->fence_drv[i].last_seq));
920                 seq_printf(m, "Last emitted        0x%016llx\n",
921                            rdev->fence_drv[i].sync_seq[i]);
922
923                 for (j = 0; j < RADEON_NUM_RINGS; ++j) {
924                         if (i != j && rdev->fence_drv[j].initialized)
925                                 seq_printf(m, "Last sync to ring %d 0x%016llx\n",
926                                            j, rdev->fence_drv[i].sync_seq[j]);
927                 }
928         }
929         return 0;
930 }
931
932 static struct drm_info_list radeon_debugfs_fence_list[] = {
933         {"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL},
934 };
935 #endif
936
937 int radeon_debugfs_fence_init(struct radeon_device *rdev)
938 {
939 #if defined(CONFIG_DEBUG_FS)
940         return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 1);
941 #else
942         return 0;
943 #endif
944 }