1 /* $NetBSD: linux_fence.c,v 1.14 2019/01/05 22:24:24 tnn Exp $ */
4 * Copyright (c) 2018 The NetBSD Foundation, Inc.
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
32 #include <sys/cdefs.h>
34 #include <sys/condvar.h>
35 #include <sys/queue.h>
37 #include <linux/compiler.h>
39 #include <linux/atomic.h>
40 #include <linux/errno.h>
41 #include <linux/kref.h>
42 #include <linux/fence.h>
43 #include <linux/sched.h>
44 #include <linux/slab.h>
45 #include <linux/spinlock.h>
50 * True if we print FENCE_TRACE messages, false if not. These are
51 * extremely noisy, too much even for AB_VERBOSE and AB_DEBUG in
54 int linux_fence_trace = 0;
57 * fence_init(fence, ops, lock, context, seqno)
59 * Initialize fence. Caller should call fence_destroy when done,
60 * after all references have been released.
63 fence_init(struct fence *fence, const struct fence_ops *ops, struct lock *lock,
64 unsigned context, unsigned seqno)
67 kref_init(&fence->refcount);
70 fence->context = context;
73 TAILQ_INIT(&fence->f_callbacks);
74 cv_init(&fence->f_cv, "fence");
78 * fence_destroy(fence)
80 * Clean up memory initialized with fence_init. This is meant to
81 * be used after a fence release callback.
84 fence_destroy(struct fence *fence)
88 KASSERT(!fence_referenced_p(fence));
90 KASSERT(TAILQ_EMPTY(&fence->f_callbacks));
92 cv_destroy(&fence->f_cv);
97 fence_free_cb(struct rcu_head *rcu)
99 struct fence *fence = container_of(rcu, struct fence, f_rcu);
101 fence_destroy(fence);
109 * Schedule fence to be destroyed and then freed with kfree after
110 * any pending RCU read sections on all CPUs have completed.
111 * Caller must guarantee all references have been released. This
112 * is meant to be used after a fence release callback.
114 * NOTE: Callers assume kfree will be used. We don't even use
115 * kmalloc to allocate these -- caller is expected to allocate
116 * memory with kmalloc to be initialized with fence_init.
119 fence_free(struct fence *fence)
122 call_rcu(&fence->f_rcu, &fence_free_cb);
129 static inline uint32_t
130 atomic_add_int_nv(volatile uint32_t *target, int32_t delta)
132 return (atomic_fetchadd_32(target, delta) + delta);
136 * fence_context_alloc(n)
138 * Return the first of a contiguous sequence of unique
139 * identifiers, at least until the system wraps around.
142 fence_context_alloc(unsigned n)
144 static volatile unsigned next_context = 0;
146 return atomic_add_int_nv(&next_context, n) - n;
151 * fence_is_later(a, b)
153 * True if the sequence number of fence a is later than the
154 * sequence number of fence b. Since sequence numbers wrap
155 * around, we define this to mean that the sequence number of
156 * fence a is no more than INT_MAX past the sequence number of
159 * The two fences must have the same context.
162 fence_is_later(struct fence *a, struct fence *b)
165 KASSERTMSG(a->context == b->context, "incommensurate fences"
166 ": %u @ %p =/= %u @ %p", a->context, a, b->context, b);
168 return a->seqno - b->seqno < INT_MAX;
175 * Acquire a reference to fence. The fence must not be being
176 * destroyed. Return the fence.
179 fence_get(struct fence *fence)
183 kref_get(&fence->refcount);
188 * fence_get_rcu(fence)
190 * Attempt to acquire a reference to a fence that may be about to
191 * be destroyed, during a read section. Return the fence on
192 * success, or NULL on failure.
195 fence_get_rcu(struct fence *fence)
198 if (!kref_get_unless_zero(&fence->refcount))
204 fence_release(struct kref *refcount)
206 struct fence *fence = container_of(refcount, struct fence, refcount);
208 if (fence->ops->release)
209 (*fence->ops->release)(fence);
217 * Release a reference to fence. If this was the last one, call
218 * the fence's release callback.
221 fence_put(struct fence *fence)
226 kref_put(&fence->refcount, &fence_release);
230 * fence_ensure_signal_enabled(fence)
232 * Internal subroutine. If the fence was already signalled,
233 * return -ENOENT. Otherwise, if the enable signalling callback
234 * has not been called yet, call it. If fails, signal the fence
235 * and return -ENOENT. If it succeeds, or if it had already been
236 * called, return zero to indicate success.
238 * Caller must hold the fence's lock.
241 fence_ensure_signal_enabled(struct fence *fence)
244 KKASSERT(spin_is_locked(fence->lock));
247 /* If the fence was already signalled, fail with -ENOENT. */
248 if (fence->flags & (1u << FENCE_FLAG_SIGNALED_BIT))
252 * If the enable signaling callback has been called, success.
253 * Otherwise, set the bit indicating it.
255 if (test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags))
258 /* Otherwise, note that we've called it and call it. */
259 if (!(*fence->ops->enable_signaling)(fence)) {
260 /* If it failed, signal and return -ENOENT. */
261 fence_signal_locked(fence);
270 * fence_add_callback(fence, fcb, fn)
272 * If fence has been signalled, return -ENOENT. If the enable
273 * signalling callback hasn't been called yet, call it; if it
274 * fails, return -ENOENT. Otherwise, arrange to call fn(fence,
275 * fcb) when it is signalled, and return 0.
277 * The fence uses memory allocated by the caller in fcb from the
278 * time of fence_add_callback either to the time of
279 * fence_remove_callback, or just before calling fn.
282 fence_add_callback(struct fence *fence, struct fence_cb *fcb, fence_func_t fn)
287 /* Optimistically try to skip the lock if it's already signalled. */
288 if (fence->flags & (1u << FENCE_FLAG_SIGNALED_BIT)) {
293 /* Acquire the lock. */
294 mutex_lock(fence->lock);
296 /* Ensure signalling is enabled, or fail if we can't. */
297 ret = fence_ensure_signal_enabled(fence);
301 /* Insert the callback. */
303 TAILQ_INSERT_TAIL(&fence->f_callbacks, fcb, fcb_entry);
304 fcb->fcb_onqueue = true;
306 /* Release the lock and we're done. */
307 out1: mutex_unlock(fence->lock);
312 * fence_remove_callback(fence, fcb)
314 * Remove the callback fcb from fence. Return true if it was
315 * removed from the list, or false if it had already run and so
316 * was no longer queued anyway. Caller must have already called
317 * fence_add_callback(fence, fcb).
320 fence_remove_callback(struct fence *fence, struct fence_cb *fcb)
325 mutex_lock(fence->lock);
326 onqueue = fcb->fcb_onqueue;
328 TAILQ_REMOVE(&fence->f_callbacks, fcb, fcb_entry);
329 fcb->fcb_onqueue = false;
331 mutex_unlock(fence->lock);
337 * fence_enable_sw_signaling(fence)
339 * If it hasn't been called yet and the fence hasn't been
340 * signalled yet, call the fence's enable_sw_signaling callback.
341 * If when that happens, the callback indicates failure by
342 * returning false, signal the fence.
345 fence_enable_sw_signaling(struct fence *fence)
347 mutex_lock(fence->lock);
348 (void)fence_ensure_signal_enabled(fence);
349 mutex_unlock(fence->lock);
353 * fence_is_signaled(fence)
355 * Test whether the fence has been signalled. If it has been
356 * signalled by fence_signal(_locked), return true. If the
357 * signalled callback returns true indicating that some implicit
358 * external condition has changed, call the callbacks as if with
362 fence_is_signaled(struct fence *fence)
366 mutex_lock(fence->lock);
367 signaled = fence_is_signaled_locked(fence);
368 mutex_unlock(fence->lock);
374 * fence_is_signaled_locked(fence)
376 * Test whether the fence has been signalled. Like
377 * fence_is_signaleed, but caller already holds the fence's lock.
380 fence_is_signaled_locked(struct fence *fence)
384 KKASSERT(spin_is_locked(fence->lock));
387 /* Check whether we already set the signalled bit. */
388 if (fence->flags & (1u << FENCE_FLAG_SIGNALED_BIT))
391 /* If there's a signalled callback, test it. */
392 if (fence->ops->signaled) {
393 if ((*fence->ops->signaled)(fence)) {
395 * It's been signalled implicitly by some
396 * external phenomonen. Act as though someone
397 * has called fence_signal.
399 fence_signal_locked(fence);
408 * fence_signal(fence)
410 * Signal the fence. If it has already been signalled, return
411 * -EINVAL. If it has not been signalled, call the enable
412 * signalling callback if it hasn't been called yet, and remove
413 * each registered callback from the queue and call it; then
417 fence_signal(struct fence *fence)
421 mutex_lock(fence->lock);
422 ret = fence_signal_locked(fence);
423 mutex_unlock(fence->lock);
429 * fence_signal_locked(fence)
431 * Signal the fence. Like fence_signal, but caller already holds
435 fence_signal_locked(struct fence *fence)
437 struct fence_cb *fcb, *next;
440 KKASSERT(spin_is_locked(fence->lock));
443 /* If it's been signalled, fail; otherwise set the signalled bit. */
444 if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
448 cv_broadcast(&fence->f_cv);
450 /* Remove and call the callbacks. */
451 TAILQ_FOREACH_MUTABLE(fcb, &fence->f_callbacks, fcb_entry, next) {
452 TAILQ_REMOVE(&fence->f_callbacks, fcb, fcb_entry);
453 fcb->fcb_onqueue = false;
454 (*fcb->fcb_func)(fence, fcb);
471 wait_any_cb(struct fence *fence, struct fence_cb *fcb)
473 struct wait_any *cb = container_of(fcb, struct wait_any, fcb);
475 mutex_lock(&cb->common->lock);
476 cb->common->done = true;
477 cv_broadcast(&cb->common->cv);
478 mutex_unlock(&cb->common->lock);
482 * fence_wait_any_timeout(fence, nfences, intr, timeout)
484 * Wait for any of fences[0], fences[1], fences[2], ...,
485 * fences[nfences-1] to be signaled.
488 fence_wait_any_timeout(struct fence **fences, uint32_t nfences, bool intr,
491 struct wait_any1 common;
497 /* Allocate an array of callback records. */
498 cb = kcalloc(nfences, sizeof(cb[0]), GFP_KERNEL);
504 /* Initialize a mutex and condvar for the common wait. */
505 lockinit(&common.lock, "drmfcl", 0, LK_CANRECURSE);
506 cv_init(&common.cv, "fence");
509 /* Add a callback to each of the fences, or stop here if we can't. */
510 for (i = 0; i < nfences; i++) {
511 cb[i].common = &common;
512 ret = fence_add_callback(fences[i], &cb[i].fcb, &wait_any_cb);
518 * Test whether any of the fences has been signalled. If they
519 * have, stop here. If the haven't, we are guaranteed to be
520 * notified by one of the callbacks when they have.
522 for (j = 0; j < nfences; j++) {
523 if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fences[j]->flags))
528 * None of them was ready immediately. Wait for one of the
529 * callbacks to notify us when it is done.
531 mutex_lock(&common.lock);
532 while (timeout > 0 && !common.done) {
536 if (timeout != MAX_SCHEDULE_TIMEOUT) {
537 ret = -cv_timedwait_sig(&common.cv,
538 &common.lock, MIN(timeout, /* paranoia */
539 MAX_SCHEDULE_TIMEOUT));
541 ret = -cv_wait_sig(&common.cv, &common.lock);
544 if (timeout != MAX_SCHEDULE_TIMEOUT) {
545 ret = -cv_timedwait(&common.cv,
546 &common.lock, MIN(timeout, /* paranoia */
547 MAX_SCHEDULE_TIMEOUT));
549 cv_wait(&common.cv, &common.lock);
557 timeout -= MIN(timeout, (unsigned)end - (unsigned)start);
559 mutex_unlock(&common.lock);
562 * Massage the return code: if we were interrupted, return
563 * ERESTARTSYS; if cv_timedwait timed out, return 0; otherwise
564 * return the remaining time.
567 if (ret == -EINTR || ret == -ERESTART)
569 if (ret == -EWOULDBLOCK)
576 out1: while (i --> 0)
577 (void)fence_remove_callback(fences[i], &cb[i].fcb);
578 cv_destroy(&common.cv);
579 mutex_destroy(&common.lock);
585 * fence_wait_timeout(fence, intr, timeout)
587 * Wait until fence is signalled; or until interrupt, if intr is
588 * true; or until timeout, if positive. Return -ERESTARTSYS if
589 * interrupted, negative error code on any other error, zero on
590 * timeout, or positive number of ticks remaining if the fence is
591 * signalled before the timeout. Works by calling the fence wait
594 * The timeout must be nonnegative and less than
595 * MAX_SCHEDULE_TIMEOUT.
598 fence_wait_timeout(struct fence *fence, bool intr, long timeout)
601 KKASSERT(timeout >= 0);
602 KKASSERT(timeout < MAX_SCHEDULE_TIMEOUT);
604 return (*fence->ops->wait)(fence, intr, timeout);
608 * fence_wait(fence, intr)
610 * Wait until fence is signalled; or until interrupt, if intr is
611 * true. Return -ERESTARTSYS if interrupted, negative error code
612 * on any other error, zero on sucess. Works by calling the fence
613 * wait callback with MAX_SCHEDULE_TIMEOUT.
616 fence_wait(struct fence *fence, bool intr)
620 ret = (*fence->ops->wait)(fence, intr, MAX_SCHEDULE_TIMEOUT);
623 return (ret < 0 ? ret : 0);
627 * fence_default_wait(fence, intr, timeout)
629 * Default implementation of fence wait callback using a condition
630 * variable. If the fence is already signalled, return timeout,
631 * or 1 if no timeout. If the enable signalling callback hasn't
632 * been called, call it, and if it fails, act as if the fence had
633 * been signalled. Otherwise, wait on the internal condvar. If
634 * timeout is MAX_SCHEDULE_TIMEOUT, treat it as no timeout.
637 fence_default_wait(struct fence *fence, bool intr, long timeout)
639 int starttime = 0, now = 0, deadline = 0; /* XXXGCC */
640 struct lock *lock = fence->lock;
644 KASSERTMSG(timeout >= 0, "timeout %ld", timeout);
645 KASSERTMSG(timeout <= MAX_SCHEDULE_TIMEOUT, "timeout %ld", timeout);
648 /* Optimistically try to skip the lock if it's already signalled. */
649 if (fence->flags & (1u << FENCE_FLAG_SIGNALED_BIT))
650 return (timeout < MAX_SCHEDULE_TIMEOUT ? timeout : 1);
652 /* Acquire the lock. */
653 mutex_lock(fence->lock);
655 /* Ensure signalling is enabled, or fail if we can't. */
656 ret = fence_ensure_signal_enabled(fence);
660 /* Find out what our deadline is so we can handle spurious wakeup. */
661 if (timeout < MAX_SCHEDULE_TIMEOUT) {
665 deadline = starttime + timeout;
668 /* Wait until the signalled bit is set. */
669 while (!(fence->flags & (1u << FENCE_FLAG_SIGNALED_BIT))) {
671 * If there's a timeout and we've passed the deadline,
674 if (timeout < MAX_SCHEDULE_TIMEOUT) {
681 if (timeout < MAX_SCHEDULE_TIMEOUT) {
682 ret = -cv_timedwait_sig(&fence->f_cv, lock,
685 ret = -cv_wait_sig(&fence->f_cv, lock);
688 if (timeout < MAX_SCHEDULE_TIMEOUT) {
689 ret = -cv_timedwait(&fence->f_cv, lock,
692 cv_wait(&fence->f_cv, lock);
696 /* If the wait failed, give up. */
702 /* All done. Release the lock. */
703 mutex_unlock(fence->lock);
705 /* If cv_timedwait gave up, return 0 meaning timeout. */
706 if (ret == -EWOULDBLOCK) {
707 /* Only cv_timedwait and cv_timedwait_sig can return this. */
708 KKASSERT(timeout < MAX_SCHEDULE_TIMEOUT);
712 /* If there was a timeout and the deadline passed, return 0. */
713 if (timeout < MAX_SCHEDULE_TIMEOUT) {
718 /* If we were interrupted, return -ERESTARTSYS. */
719 if (ret == -EINTR || ret == -ERESTART)
722 /* If there was any other kind of error, fail. */
727 * Success! Return the number of ticks left, at least 1, or 1
730 return (timeout < MAX_SCHEDULE_TIMEOUT ? MIN(deadline - now, 1) : 1);