2 * Copyright (c) 2009 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * Implement fast persistent locks based on atomic_cmpset_int() with
36 * semantics similar to lockmgr locks but faster and taking up much less
37 * space. Taken from HAMMER's lock implementation.
39 * These are meant to complement our LWKT tokens. Tokens are only held
40 * while the thread is running. Mutexes can be held across blocking
43 * Most of the support is in sys/mutex[2].h. We mostly provide backoff
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/kernel.h>
50 #include <sys/sysctl.h>
51 #include <sys/thread.h>
53 #include <machine/cpufunc.h>
55 #include <sys/thread2.h>
56 #include <sys/mutex2.h>
58 static __int64_t mtx_contention_count;
59 static __int64_t mtx_collision_count;
60 static __int64_t mtx_wakeup_count;
62 SYSCTL_QUAD(_kern, OID_AUTO, mtx_contention_count, CTLFLAG_RW,
63 &mtx_contention_count, 0, "");
64 SYSCTL_QUAD(_kern, OID_AUTO, mtx_collision_count, CTLFLAG_RW,
65 &mtx_collision_count, 0, "");
66 SYSCTL_QUAD(_kern, OID_AUTO, mtx_wakeup_count, CTLFLAG_RW,
67 &mtx_wakeup_count, 0, "");
69 static void mtx_chain_link(mtx_t mtx);
70 static void mtx_delete_link(mtx_t mtx, mtx_link_t link);
73 * Exclusive-lock a mutex, block until acquired. Recursion is allowed.
75 * Returns 0 on success, or the tsleep() return code on failure.
76 * An error can only be returned if PCATCH is specified in the flags.
79 __mtx_lock_ex(mtx_t mtx, mtx_link_t link, const char *ident, int flags, int to)
88 nlock = MTX_EXCLUSIVE | 1;
89 if (atomic_cmpset_int(&mtx->mtx_lock, 0, nlock)) {
90 mtx->mtx_owner = curthread;
94 } else if ((lock & MTX_EXCLUSIVE) &&
95 mtx->mtx_owner == curthread) {
96 KKASSERT((lock & MTX_MASK) != MTX_MASK);
98 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
104 * Clearing MTX_EXLINK in lock causes us to loop until
105 * MTX_EXLINK is available. However, to avoid
106 * unnecessary cpu cache traffic we poll instead.
108 * Setting MTX_EXLINK in nlock causes us to loop until
109 * we can acquire MTX_EXLINK.
111 * Also set MTX_EXWANTED coincident with EXLINK, if
116 if (lock & MTX_EXLINK) {
118 ++mtx_collision_count;
122 /*lock &= ~MTX_EXLINK;*/
123 nlock = lock | MTX_EXWANTED | MTX_EXLINK;
125 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
127 * Check for early abort
129 if (link->state == MTX_LINK_ABORTED) {
130 atomic_clear_int(&mtx->mtx_lock,
134 if (mtx->mtx_link == NULL) {
135 atomic_clear_int(&mtx->mtx_lock,
142 * Success. Link in our structure then
143 * release EXLINK and sleep.
146 link->state = MTX_LINK_LINKED;
148 link->next = mtx->mtx_link;
149 link->prev = link->next->prev;
150 link->next->prev = link;
151 link->prev->next = link;
155 mtx->mtx_link = link;
157 tsleep_interlock(link, 0);
158 atomic_clear_int(&mtx->mtx_lock, MTX_EXLINK);
161 error = tsleep(link, flags, ident, to);
162 ++mtx_contention_count;
165 * Normal unlink, we should own the exclusive
168 if (link->state == MTX_LINK_LINKED)
169 mtx_delete_link(mtx, link);
170 if (link->state == MTX_LINK_ACQUIRED) {
171 KKASSERT(mtx->mtx_owner == link->owner);
177 * Aborted lock (mtx_abort_ex called).
179 if (link->state == MTX_LINK_ABORTED) {
185 * tsleep error, else retry.
193 ++mtx_collision_count;
199 _mtx_lock_ex_link(mtx_t mtx, mtx_link_t link,
200 const char *ident, int flags, int to)
202 return(__mtx_lock_ex(mtx, link, ident, flags, to));
206 _mtx_lock_ex(mtx_t mtx, const char *ident, int flags, int to)
208 struct mtx_link link;
210 mtx_link_init(&link);
211 return(__mtx_lock_ex(mtx, &link, ident, flags, to));
215 _mtx_lock_ex_quick(mtx_t mtx, const char *ident)
217 struct mtx_link link;
219 mtx_link_init(&link);
220 return(__mtx_lock_ex(mtx, &link, ident, 0, 0));
224 * Share-lock a mutex, block until acquired. Recursion is allowed.
226 * Returns 0 on success, or the tsleep() return code on failure.
227 * An error can only be returned if PCATCH is specified in the flags.
229 * NOTE: Shared locks get a mass-wakeup so if the tsleep fails we
230 * do not have to chain the wakeup().
233 __mtx_lock_sh(mtx_t mtx, const char *ident, int flags, int to)
240 lock = mtx->mtx_lock;
241 if ((lock & MTX_EXCLUSIVE) == 0) {
242 KKASSERT((lock & MTX_MASK) != MTX_MASK);
244 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
249 nlock = lock | MTX_SHWANTED;
250 tsleep_interlock(mtx, 0);
251 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
252 error = tsleep(mtx, flags, ident, to);
255 ++mtx_contention_count;
259 tsleep_remove(curthread);
263 ++mtx_collision_count;
269 _mtx_lock_sh(mtx_t mtx, const char *ident, int flags, int to)
271 return (__mtx_lock_sh(mtx, ident, flags, to));
275 _mtx_lock_sh_quick(mtx_t mtx, const char *ident)
277 return (__mtx_lock_sh(mtx, ident, 0, 0));
281 * Get an exclusive spinlock the hard way.
284 _mtx_spinlock(mtx_t mtx)
292 lock = mtx->mtx_lock;
294 nlock = MTX_EXCLUSIVE | 1;
295 if (atomic_cmpset_int(&mtx->mtx_lock, 0, nlock)) {
296 mtx->mtx_owner = curthread;
299 } else if ((lock & MTX_EXCLUSIVE) &&
300 mtx->mtx_owner == curthread) {
301 KKASSERT((lock & MTX_MASK) != MTX_MASK);
303 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
310 for (bo = 0; bo < bb; ++bo)
312 ++mtx_contention_count;
315 ++mtx_collision_count;
320 * Attempt to acquire a spinlock, if we fail we must undo the
321 * gd->gd_spinlocks/gd->gd_curthead->td_critcount predisposition.
323 * Returns 0 on success, EAGAIN on failure.
326 _mtx_spinlock_try(mtx_t mtx)
328 globaldata_t gd = mycpu;
334 lock = mtx->mtx_lock;
336 nlock = MTX_EXCLUSIVE | 1;
337 if (atomic_cmpset_int(&mtx->mtx_lock, 0, nlock)) {
338 mtx->mtx_owner = gd->gd_curthread;
341 } else if ((lock & MTX_EXCLUSIVE) &&
342 mtx->mtx_owner == gd->gd_curthread) {
343 KKASSERT((lock & MTX_MASK) != MTX_MASK);
345 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
350 --gd->gd_curthread->td_critcount;
355 ++mtx_collision_count;
363 _mtx_spinlock_sh(mtx_t mtx)
371 lock = mtx->mtx_lock;
372 if ((lock & MTX_EXCLUSIVE) == 0) {
373 KKASSERT((lock & MTX_MASK) != MTX_MASK);
375 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
382 for (bo = 0; bo < bb; ++bo)
384 ++mtx_contention_count;
387 ++mtx_collision_count;
394 _mtx_lock_ex_try(mtx_t mtx)
401 lock = mtx->mtx_lock;
403 nlock = MTX_EXCLUSIVE | 1;
404 if (atomic_cmpset_int(&mtx->mtx_lock, 0, nlock)) {
405 mtx->mtx_owner = curthread;
408 } else if ((lock & MTX_EXCLUSIVE) &&
409 mtx->mtx_owner == curthread) {
410 KKASSERT((lock & MTX_MASK) != MTX_MASK);
412 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
419 ++mtx_collision_count;
425 _mtx_lock_sh_try(mtx_t mtx)
432 lock = mtx->mtx_lock;
433 if ((lock & MTX_EXCLUSIVE) == 0) {
434 KKASSERT((lock & MTX_MASK) != MTX_MASK);
436 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
443 ++mtx_collision_count;
449 * If the lock is held exclusively it must be owned by the caller. If the
450 * lock is already a shared lock this operation is a NOP. A panic will
451 * occur if the lock is not held either shared or exclusive.
453 * The exclusive count is converted to a shared count.
456 _mtx_downgrade(mtx_t mtx)
462 lock = mtx->mtx_lock;
463 if ((lock & MTX_EXCLUSIVE) == 0) {
464 KKASSERT((lock & MTX_MASK) > 0);
467 KKASSERT(mtx->mtx_owner == curthread);
468 nlock = lock & ~(MTX_EXCLUSIVE | MTX_SHWANTED);
469 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
470 if (lock & MTX_SHWANTED) {
477 ++mtx_collision_count;
482 * Upgrade a shared lock to an exclusive lock. The upgrade will fail if
483 * the shared lock has a count other then 1. Optimize the most likely case
484 * but note that a single cmpset can fail due to WANTED races.
486 * If the lock is held exclusively it must be owned by the caller and
487 * this function will simply return without doing anything. A panic will
488 * occur if the lock is held exclusively by someone other then the caller.
490 * Returns 0 on success, EDEADLK on failure.
493 _mtx_upgrade_try(mtx_t mtx)
500 lock = mtx->mtx_lock;
502 if ((lock & ~MTX_EXWANTED) == 1) {
503 nlock = lock | MTX_EXCLUSIVE;
504 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
505 mtx->mtx_owner = curthread;
508 } else if (lock & MTX_EXCLUSIVE) {
509 KKASSERT(mtx->mtx_owner == curthread);
516 ++mtx_collision_count;
522 * Unlock a lock. The caller must hold the lock either shared or exclusive.
524 * Any release which makes the lock available when others want an exclusive
525 * lock causes us to chain the owner to the next exclusive lock instead of
526 * releasing the lock.
529 _mtx_unlock(mtx_t mtx)
535 lock = mtx->mtx_lock;
536 nlock = lock & ~(MTX_SHWANTED | MTX_EXLINK);
540 * Last release, shared lock, no exclusive waiters.
542 nlock = lock & MTX_EXLINK;
543 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
545 } else if (nlock == (MTX_EXCLUSIVE | 1)) {
547 * Last release, exclusive lock, no exclusive waiters.
548 * Wake up any shared waiters.
550 mtx->mtx_owner = NULL;
551 nlock = lock & MTX_EXLINK;
552 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
553 if (lock & MTX_SHWANTED) {
559 } else if (nlock == (MTX_EXWANTED | 1)) {
561 * Last release, shared lock, with exclusive
564 * Wait for EXLINK to clear, then acquire it.
565 * We could use the cmpset for this but polling
566 * is better on the cpu caches.
568 * Acquire an exclusive lock leaving the lockcount
569 * set to 1, and get EXLINK for access to mtx_link.
573 if (lock & MTX_EXLINK) {
575 ++mtx_collision_count;
579 /*lock &= ~MTX_EXLINK;*/
580 nlock |= MTX_EXLINK | MTX_EXCLUSIVE;
581 nlock |= (lock & MTX_SHWANTED);
583 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
589 } else if (nlock == (MTX_EXCLUSIVE | MTX_EXWANTED | 1)) {
591 * Last release, exclusive lock, with exclusive
594 * leave the exclusive lock intact and the lockcount
595 * set to 1, and get EXLINK for access to mtx_link.
599 if (lock & MTX_EXLINK) {
601 ++mtx_collision_count;
605 /*lock &= ~MTX_EXLINK;*/
607 nlock |= (lock & MTX_SHWANTED);
609 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
617 * Not the last release (shared or exclusive)
620 KKASSERT((nlock & MTX_MASK) != MTX_MASK);
621 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
625 ++mtx_collision_count;
630 * Chain mtx_chain_link. Called with the lock held exclusively with a
631 * single ref count, and also with MTX_EXLINK held.
634 mtx_chain_link(mtx_t mtx)
639 u_int clock; /* bits we own and want to clear */
642 * Chain the exclusive lock to the next link. The caller cleared
643 * SHWANTED so if there is no link we have to wake up any shared
647 if ((link = mtx->mtx_link) != NULL) {
648 KKASSERT(link->state == MTX_LINK_LINKED);
649 if (link->next == link) {
650 mtx->mtx_link = NULL;
651 clock |= MTX_EXWANTED;
653 mtx->mtx_link = link->next;
654 link->next->prev = link->prev;
655 link->prev->next = link->next;
657 link->state = MTX_LINK_ACQUIRED;
658 mtx->mtx_owner = link->owner;
661 * Chain was empty, release the exclusive lock's last count
662 * as well the bits shown.
664 clock |= MTX_EXCLUSIVE | MTX_EXWANTED | MTX_SHWANTED | 1;
668 * We have to uset cmpset here to deal with MTX_SHWANTED. If
669 * we just clear the bits we can miss a wakeup or, worse,
670 * leave mtx_lock unlocked with MTX_SHWANTED still set.
673 lock = mtx->mtx_lock;
674 nlock = lock & ~clock;
676 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
679 * Wakeup new exclusive holder. Leave
683 } else if (lock & MTX_SHWANTED) {
685 * Signal any shared waiters (and we also
688 mtx->mtx_owner = NULL;
695 ++mtx_collision_count;
700 * Delete a link structure after tsleep has failed. This code is not
701 * in the critical path as most exclusive waits are chained.
705 mtx_delete_link(mtx_t mtx, mtx_link_t link)
707 thread_t td = curthread;
712 * Acquire MTX_EXLINK.
714 * Do not use cmpxchg to wait for EXLINK to clear as this might
715 * result in too much cpu cache traffic.
719 lock = mtx->mtx_lock;
720 if (lock & MTX_EXLINK) {
722 ++mtx_collision_count;
725 /* lock &= ~MTX_EXLINK; */
726 nlock = lock | MTX_EXLINK;
727 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
730 ++mtx_collision_count;
734 * Delete the link and release EXLINK.
736 if (link->state == MTX_LINK_LINKED) {
737 if (link->next == link) {
738 mtx->mtx_link = NULL;
740 mtx->mtx_link = link->next;
741 link->next->prev = link->prev;
742 link->prev->next = link->next;
744 link->state = MTX_LINK_IDLE;
746 atomic_clear_int(&mtx->mtx_lock, MTX_EXLINK);
751 * Abort a mutex locking operation, causing mtx_lock_ex_link() to
752 * return ENOLCK. This may be called at any time after the
753 * mtx_link is initialized, including both before and after the call
754 * to mtx_lock_ex_link().
757 mtx_abort_ex_link(mtx_t mtx, mtx_link_t link)
759 thread_t td = curthread;
768 lock = mtx->mtx_lock;
769 if (lock & MTX_EXLINK) {
771 ++mtx_collision_count;
774 /* lock &= ~MTX_EXLINK; */
775 nlock = lock | MTX_EXLINK;
776 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
779 ++mtx_collision_count;
785 switch(link->state) {
788 * Link not started yet
790 link->state = MTX_LINK_ABORTED;
792 case MTX_LINK_LINKED:
794 * de-link, mark aborted, and wakeup the thread.
796 if (link->next == link) {
797 mtx->mtx_link = NULL;
799 mtx->mtx_link = link->next;
800 link->next->prev = link->prev;
801 link->prev->next = link->next;
803 link->state = MTX_LINK_ABORTED;
806 case MTX_LINK_ACQUIRED:
808 * Too late, the lock was acquired. Let it complete.
813 * link already aborted, do nothing.
817 atomic_clear_int(&mtx->mtx_lock, MTX_EXLINK);