2 * Copyright (c) 2009 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * Implement fast persistent locks based on atomic_cmpset_int() with
36 * semantics similar to lockmgr locks but faster and taking up much less
37 * space. Taken from HAMMER's lock implementation.
39 * These are meant to complement our LWKT tokens. Tokens are only held
40 * while the thread is running. Mutexes can be held across blocking
43 * Most of the support is in sys/mutex[2].h. We mostly provide backoff
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/kernel.h>
50 #include <sys/sysctl.h>
51 #include <sys/thread.h>
53 #include <machine/cpufunc.h>
55 #include <sys/thread2.h>
56 #include <sys/mutex2.h>
58 static __int64_t mtx_contention_count;
59 static __int64_t mtx_collision_count;
60 static __int64_t mtx_wakeup_count;
62 SYSCTL_QUAD(_kern, OID_AUTO, mtx_contention_count, CTLFLAG_RW,
63 &mtx_contention_count, 0, "");
64 SYSCTL_QUAD(_kern, OID_AUTO, mtx_collision_count, CTLFLAG_RW,
65 &mtx_collision_count, 0, "");
66 SYSCTL_QUAD(_kern, OID_AUTO, mtx_wakeup_count, CTLFLAG_RW,
67 &mtx_wakeup_count, 0, "");
69 static void mtx_chain_link(mtx_t mtx);
70 static void mtx_delete_link(mtx_t mtx, mtx_link_t link);
73 * Exclusive-lock a mutex, block until acquired. Recursion is allowed.
75 * Returns 0 on success, or the tsleep() return code on failure.
76 * An error can only be returned if PCATCH is specified in the flags.
79 __mtx_lock_ex(mtx_t mtx, mtx_link_t link, const char *ident, int flags, int to)
88 nlock = MTX_EXCLUSIVE | 1;
89 if (atomic_cmpset_int(&mtx->mtx_lock, 0, nlock)) {
90 mtx->mtx_owner = curthread;
94 } else if ((lock & MTX_EXCLUSIVE) &&
95 mtx->mtx_owner == curthread) {
96 KKASSERT((lock & MTX_MASK) != MTX_MASK);
98 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
104 * Clearing MTX_EXLINK in lock causes us to loop until
105 * MTX_EXLINK is available. However, to avoid
106 * unnecessary cpu cache traffic we poll instead.
108 * Setting MTX_EXLINK in nlock causes us to loop until
109 * we can acquire MTX_EXLINK.
111 * Also set MTX_EXWANTED coincident with EXLINK, if
116 if (lock & MTX_EXLINK) {
118 ++mtx_collision_count;
122 /*lock &= ~MTX_EXLINK;*/
123 nlock = lock | MTX_EXWANTED | MTX_EXLINK;
125 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
127 * Check for early abort
129 if (link->state == MTX_LINK_ABORTED) {
130 atomic_clear_int(&mtx->mtx_lock,
134 if (mtx->mtx_link == NULL) {
135 atomic_clear_int(&mtx->mtx_lock,
142 * Success. Link in our structure then
143 * release EXLINK and sleep.
146 link->state = MTX_LINK_LINKED;
148 link->next = mtx->mtx_link;
149 link->prev = link->next->prev;
150 link->next->prev = link;
151 link->prev->next = link;
155 mtx->mtx_link = link;
157 tsleep_interlock(link, 0);
158 atomic_clear_int(&mtx->mtx_lock, MTX_EXLINK);
161 error = tsleep(link, flags | PINTERLOCKED,
163 ++mtx_contention_count;
166 * Normal unlink, we should own the exclusive
169 if (link->state == MTX_LINK_LINKED)
170 mtx_delete_link(mtx, link);
171 if (link->state == MTX_LINK_ACQUIRED) {
172 KKASSERT(mtx->mtx_owner == link->owner);
178 * Aborted lock (mtx_abort_ex called).
180 if (link->state == MTX_LINK_ABORTED) {
186 * tsleep error, else retry.
194 ++mtx_collision_count;
200 _mtx_lock_ex_link(mtx_t mtx, mtx_link_t link,
201 const char *ident, int flags, int to)
203 return(__mtx_lock_ex(mtx, link, ident, flags, to));
207 _mtx_lock_ex(mtx_t mtx, const char *ident, int flags, int to)
209 struct mtx_link link;
211 mtx_link_init(&link);
212 return(__mtx_lock_ex(mtx, &link, ident, flags, to));
216 _mtx_lock_ex_quick(mtx_t mtx, const char *ident)
218 struct mtx_link link;
220 mtx_link_init(&link);
221 return(__mtx_lock_ex(mtx, &link, ident, 0, 0));
225 * Share-lock a mutex, block until acquired. Recursion is allowed.
227 * Returns 0 on success, or the tsleep() return code on failure.
228 * An error can only be returned if PCATCH is specified in the flags.
230 * NOTE: Shared locks get a mass-wakeup so if the tsleep fails we
231 * do not have to chain the wakeup().
234 __mtx_lock_sh(mtx_t mtx, const char *ident, int flags, int to)
241 lock = mtx->mtx_lock;
242 if ((lock & MTX_EXCLUSIVE) == 0) {
243 KKASSERT((lock & MTX_MASK) != MTX_MASK);
245 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
250 nlock = lock | MTX_SHWANTED;
251 tsleep_interlock(mtx, 0);
252 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
253 error = tsleep(mtx, flags | PINTERLOCKED,
257 ++mtx_contention_count;
261 tsleep_remove(curthread);
265 ++mtx_collision_count;
271 _mtx_lock_sh(mtx_t mtx, const char *ident, int flags, int to)
273 return (__mtx_lock_sh(mtx, ident, flags, to));
277 _mtx_lock_sh_quick(mtx_t mtx, const char *ident)
279 return (__mtx_lock_sh(mtx, ident, 0, 0));
283 * Get an exclusive spinlock the hard way.
286 _mtx_spinlock(mtx_t mtx)
294 lock = mtx->mtx_lock;
296 nlock = MTX_EXCLUSIVE | 1;
297 if (atomic_cmpset_int(&mtx->mtx_lock, 0, nlock)) {
298 mtx->mtx_owner = curthread;
301 } else if ((lock & MTX_EXCLUSIVE) &&
302 mtx->mtx_owner == curthread) {
303 KKASSERT((lock & MTX_MASK) != MTX_MASK);
305 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
312 for (bo = 0; bo < bb; ++bo)
314 ++mtx_contention_count;
317 ++mtx_collision_count;
322 * Attempt to acquire a spinlock, if we fail we must undo the
323 * gd->gd_spinlocks/gd->gd_curthead->td_critcount predisposition.
325 * Returns 0 on success, EAGAIN on failure.
328 _mtx_spinlock_try(mtx_t mtx)
330 globaldata_t gd = mycpu;
336 lock = mtx->mtx_lock;
338 nlock = MTX_EXCLUSIVE | 1;
339 if (atomic_cmpset_int(&mtx->mtx_lock, 0, nlock)) {
340 mtx->mtx_owner = gd->gd_curthread;
343 } else if ((lock & MTX_EXCLUSIVE) &&
344 mtx->mtx_owner == gd->gd_curthread) {
345 KKASSERT((lock & MTX_MASK) != MTX_MASK);
347 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
352 --gd->gd_curthread->td_critcount;
357 ++mtx_collision_count;
365 _mtx_spinlock_sh(mtx_t mtx)
373 lock = mtx->mtx_lock;
374 if ((lock & MTX_EXCLUSIVE) == 0) {
375 KKASSERT((lock & MTX_MASK) != MTX_MASK);
377 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
384 for (bo = 0; bo < bb; ++bo)
386 ++mtx_contention_count;
389 ++mtx_collision_count;
396 _mtx_lock_ex_try(mtx_t mtx)
403 lock = mtx->mtx_lock;
405 nlock = MTX_EXCLUSIVE | 1;
406 if (atomic_cmpset_int(&mtx->mtx_lock, 0, nlock)) {
407 mtx->mtx_owner = curthread;
410 } else if ((lock & MTX_EXCLUSIVE) &&
411 mtx->mtx_owner == curthread) {
412 KKASSERT((lock & MTX_MASK) != MTX_MASK);
414 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
421 ++mtx_collision_count;
427 _mtx_lock_sh_try(mtx_t mtx)
434 lock = mtx->mtx_lock;
435 if ((lock & MTX_EXCLUSIVE) == 0) {
436 KKASSERT((lock & MTX_MASK) != MTX_MASK);
438 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
445 ++mtx_collision_count;
451 * If the lock is held exclusively it must be owned by the caller. If the
452 * lock is already a shared lock this operation is a NOP. A panic will
453 * occur if the lock is not held either shared or exclusive.
455 * The exclusive count is converted to a shared count.
458 _mtx_downgrade(mtx_t mtx)
464 lock = mtx->mtx_lock;
465 if ((lock & MTX_EXCLUSIVE) == 0) {
466 KKASSERT((lock & MTX_MASK) > 0);
469 KKASSERT(mtx->mtx_owner == curthread);
470 nlock = lock & ~(MTX_EXCLUSIVE | MTX_SHWANTED);
471 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
472 if (lock & MTX_SHWANTED) {
479 ++mtx_collision_count;
484 * Upgrade a shared lock to an exclusive lock. The upgrade will fail if
485 * the shared lock has a count other then 1. Optimize the most likely case
486 * but note that a single cmpset can fail due to WANTED races.
488 * If the lock is held exclusively it must be owned by the caller and
489 * this function will simply return without doing anything. A panic will
490 * occur if the lock is held exclusively by someone other then the caller.
492 * Returns 0 on success, EDEADLK on failure.
495 _mtx_upgrade_try(mtx_t mtx)
502 lock = mtx->mtx_lock;
504 if ((lock & ~MTX_EXWANTED) == 1) {
505 nlock = lock | MTX_EXCLUSIVE;
506 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
507 mtx->mtx_owner = curthread;
510 } else if (lock & MTX_EXCLUSIVE) {
511 KKASSERT(mtx->mtx_owner == curthread);
518 ++mtx_collision_count;
524 * Unlock a lock. The caller must hold the lock either shared or exclusive.
526 * Any release which makes the lock available when others want an exclusive
527 * lock causes us to chain the owner to the next exclusive lock instead of
528 * releasing the lock.
531 _mtx_unlock(mtx_t mtx)
537 lock = mtx->mtx_lock;
538 nlock = lock & ~(MTX_SHWANTED | MTX_EXLINK);
542 * Last release, shared lock, no exclusive waiters.
544 nlock = lock & MTX_EXLINK;
545 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
547 } else if (nlock == (MTX_EXCLUSIVE | 1)) {
549 * Last release, exclusive lock, no exclusive waiters.
550 * Wake up any shared waiters.
552 mtx->mtx_owner = NULL;
553 nlock = lock & MTX_EXLINK;
554 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
555 if (lock & MTX_SHWANTED) {
561 } else if (nlock == (MTX_EXWANTED | 1)) {
563 * Last release, shared lock, with exclusive
566 * Wait for EXLINK to clear, then acquire it.
567 * We could use the cmpset for this but polling
568 * is better on the cpu caches.
570 * Acquire an exclusive lock leaving the lockcount
571 * set to 1, and get EXLINK for access to mtx_link.
575 if (lock & MTX_EXLINK) {
577 ++mtx_collision_count;
581 /*lock &= ~MTX_EXLINK;*/
582 nlock |= MTX_EXLINK | MTX_EXCLUSIVE;
583 nlock |= (lock & MTX_SHWANTED);
585 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
591 } else if (nlock == (MTX_EXCLUSIVE | MTX_EXWANTED | 1)) {
593 * Last release, exclusive lock, with exclusive
596 * leave the exclusive lock intact and the lockcount
597 * set to 1, and get EXLINK for access to mtx_link.
601 if (lock & MTX_EXLINK) {
603 ++mtx_collision_count;
607 /*lock &= ~MTX_EXLINK;*/
609 nlock |= (lock & MTX_SHWANTED);
611 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
619 * Not the last release (shared or exclusive)
622 KKASSERT((nlock & MTX_MASK) != MTX_MASK);
623 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
627 ++mtx_collision_count;
632 * Chain mtx_chain_link. Called with the lock held exclusively with a
633 * single ref count, and also with MTX_EXLINK held.
636 mtx_chain_link(mtx_t mtx)
641 u_int clock; /* bits we own and want to clear */
644 * Chain the exclusive lock to the next link. The caller cleared
645 * SHWANTED so if there is no link we have to wake up any shared
649 if ((link = mtx->mtx_link) != NULL) {
650 KKASSERT(link->state == MTX_LINK_LINKED);
651 if (link->next == link) {
652 mtx->mtx_link = NULL;
653 clock |= MTX_EXWANTED;
655 mtx->mtx_link = link->next;
656 link->next->prev = link->prev;
657 link->prev->next = link->next;
659 link->state = MTX_LINK_ACQUIRED;
660 mtx->mtx_owner = link->owner;
663 * Chain was empty, release the exclusive lock's last count
664 * as well the bits shown.
666 clock |= MTX_EXCLUSIVE | MTX_EXWANTED | MTX_SHWANTED | 1;
670 * We have to uset cmpset here to deal with MTX_SHWANTED. If
671 * we just clear the bits we can miss a wakeup or, worse,
672 * leave mtx_lock unlocked with MTX_SHWANTED still set.
675 lock = mtx->mtx_lock;
676 nlock = lock & ~clock;
678 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
681 * Wakeup new exclusive holder. Leave
685 } else if (lock & MTX_SHWANTED) {
687 * Signal any shared waiters (and we also
690 mtx->mtx_owner = NULL;
697 ++mtx_collision_count;
702 * Delete a link structure after tsleep has failed. This code is not
703 * in the critical path as most exclusive waits are chained.
707 mtx_delete_link(mtx_t mtx, mtx_link_t link)
709 thread_t td = curthread;
714 * Acquire MTX_EXLINK.
716 * Do not use cmpxchg to wait for EXLINK to clear as this might
717 * result in too much cpu cache traffic.
721 lock = mtx->mtx_lock;
722 if (lock & MTX_EXLINK) {
724 ++mtx_collision_count;
727 /* lock &= ~MTX_EXLINK; */
728 nlock = lock | MTX_EXLINK;
729 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
732 ++mtx_collision_count;
736 * Delete the link and release EXLINK.
738 if (link->state == MTX_LINK_LINKED) {
739 if (link->next == link) {
740 mtx->mtx_link = NULL;
742 mtx->mtx_link = link->next;
743 link->next->prev = link->prev;
744 link->prev->next = link->next;
746 link->state = MTX_LINK_IDLE;
748 atomic_clear_int(&mtx->mtx_lock, MTX_EXLINK);
753 * Abort a mutex locking operation, causing mtx_lock_ex_link() to
754 * return ENOLCK. This may be called at any time after the
755 * mtx_link is initialized, including both before and after the call
756 * to mtx_lock_ex_link().
759 mtx_abort_ex_link(mtx_t mtx, mtx_link_t link)
761 thread_t td = curthread;
770 lock = mtx->mtx_lock;
771 if (lock & MTX_EXLINK) {
773 ++mtx_collision_count;
776 /* lock &= ~MTX_EXLINK; */
777 nlock = lock | MTX_EXLINK;
778 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
781 ++mtx_collision_count;
787 switch(link->state) {
790 * Link not started yet
792 link->state = MTX_LINK_ABORTED;
794 case MTX_LINK_LINKED:
796 * de-link, mark aborted, and wakeup the thread.
798 if (link->next == link) {
799 mtx->mtx_link = NULL;
801 mtx->mtx_link = link->next;
802 link->next->prev = link->prev;
803 link->prev->next = link->next;
805 link->state = MTX_LINK_ABORTED;
808 case MTX_LINK_ACQUIRED:
810 * Too late, the lock was acquired. Let it complete.
815 * link already aborted, do nothing.
819 atomic_clear_int(&mtx->mtx_lock, MTX_EXLINK);