2 * Copyright (c) 2009 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * Implement fast persistent locks based on atomic_cmpset_int() with
36 * semantics similar to lockmgr locks but faster and taking up much less
37 * space. Taken from HAMMER's lock implementation.
39 * These are meant to complement our LWKT tokens. Tokens are only held
40 * while the thread is running. Mutexes can be held across blocking
43 * - Exclusive priority over shared to prevent SMP starvation.
44 * - locks can be aborted (async callback, if any, will be made w/ENOLCK).
45 * - locks can be asynchronous.
46 * - synchronous fast path if no blocking occurs (async callback is not
49 * Generally speaking any caller-supplied link state must be properly
50 * initialized before use.
52 * Most of the support is in sys/mutex[2].h. We mostly provide backoff
56 #include <sys/param.h>
57 #include <sys/systm.h>
58 #include <sys/kernel.h>
59 #include <sys/sysctl.h>
60 #include <sys/thread.h>
62 #include <machine/cpufunc.h>
64 #include <sys/thread2.h>
65 #include <sys/mutex2.h>
67 static __int64_t mtx_contention_count;
68 static __int64_t mtx_collision_count;
69 static __int64_t mtx_wakeup_count;
71 SYSCTL_QUAD(_kern, OID_AUTO, mtx_contention_count, CTLFLAG_RW,
72 &mtx_contention_count, 0, "");
73 SYSCTL_QUAD(_kern, OID_AUTO, mtx_collision_count, CTLFLAG_RW,
74 &mtx_collision_count, 0, "");
75 SYSCTL_QUAD(_kern, OID_AUTO, mtx_wakeup_count, CTLFLAG_RW,
76 &mtx_wakeup_count, 0, "");
78 static int mtx_chain_link_ex(mtx_t *mtx, u_int olock);
79 static int mtx_chain_link_sh(mtx_t *mtx, u_int olock, int addcount);
80 static void mtx_delete_link(mtx_t *mtx, mtx_link_t *link);
83 * Exclusive-lock a mutex, block until acquired unless link is async.
84 * Recursion is allowed.
86 * Returns 0 on success, the tsleep() return code on failure, EINPROGRESS
87 * if async. If immediately successful an async exclusive lock will return 0
88 * and not issue the async callback or link the link structure. The caller
89 * must handle this case (typically this is an optimal code path).
91 * A tsleep() error can only be returned if PCATCH is specified in the flags.
94 __mtx_lock_ex(mtx_t *mtx, mtx_link_t *link,
95 const char *ident, int flags, int to)
104 lock = mtx->mtx_lock;
108 nlock = MTX_EXCLUSIVE | 1;
109 if (atomic_cmpset_int(&mtx->mtx_lock, 0, nlock)) {
110 mtx->mtx_owner = curthread;
111 link->state = MTX_LINK_ACQUIRED;
117 if ((lock & MTX_EXCLUSIVE) && mtx->mtx_owner == curthread) {
118 KKASSERT((lock & MTX_MASK) != MTX_MASK);
120 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
121 link->state = MTX_LINK_ACQUIRED;
129 * We need MTX_LINKSPIN to manipulate exlink or
132 * We must set MTX_EXWANTED with MTX_LINKSPIN to indicate
133 * pending shared requests. It cannot be set as a separate
134 * operation prior to acquiring MTX_LINKSPIN.
136 * To avoid unnecessary cpu cache traffic we poll
137 * for collisions. It is also possible that EXWANTED
138 * state failing the above test was spurious, so all the
139 * tests must be repeated if we cannot obtain LINKSPIN
140 * with the prior state tests intact (i.e. don't reload
141 * the (lock) variable here, for heaven's sake!).
143 if (lock & MTX_LINKSPIN) {
145 ++mtx_collision_count;
149 nlock = lock | MTX_EXWANTED | MTX_LINKSPIN;
151 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock) == 0) {
157 * Check for early abort.
159 if (link->state == MTX_LINK_ABORTED) {
160 if (mtx->mtx_exlink == NULL) {
161 atomic_clear_int(&mtx->mtx_lock,
165 atomic_clear_int(&mtx->mtx_lock,
169 link->state = MTX_LINK_IDLE;
175 * Add our link to the exlink list and release LINKSPIN.
178 link->state = MTX_LINK_LINKED_EX;
179 if (mtx->mtx_exlink) {
180 link->next = mtx->mtx_exlink;
181 link->prev = link->next->prev;
182 link->next->prev = link;
183 link->prev->next = link;
187 mtx->mtx_exlink = link;
189 isasync = (link->callback != NULL);
190 atomic_clear_int(&mtx->mtx_lock, MTX_LINKSPIN);
194 * If asynchronous lock request return without
195 * blocking, leave link structure linked.
205 error = mtx_wait_link(mtx, link, ident, flags, to);
212 _mtx_lock_ex_link(mtx_t *mtx, mtx_link_t *link,
213 const char *ident, int flags, int to)
215 return(__mtx_lock_ex(mtx, link, ident, flags, to));
219 _mtx_lock_ex(mtx_t *mtx, const char *ident, int flags, int to)
223 mtx_link_init(&link);
224 return(__mtx_lock_ex(mtx, &link, ident, flags, to));
228 _mtx_lock_ex_quick(mtx_t *mtx, const char *ident)
232 mtx_link_init(&link);
233 return(__mtx_lock_ex(mtx, &link, ident, 0, 0));
237 * Share-lock a mutex, block until acquired. Recursion is allowed.
239 * Returns 0 on success, or the tsleep() return code on failure.
240 * An error can only be returned if PCATCH is specified in the flags.
242 * NOTE: Shared locks get a mass-wakeup so if the tsleep fails we
243 * do not have to chain the wakeup().
246 __mtx_lock_sh(mtx_t *mtx, mtx_link_t *link,
247 const char *ident, int flags, int to)
256 lock = mtx->mtx_lock;
261 if (atomic_cmpset_int(&mtx->mtx_lock, 0, nlock)) {
263 link->state = MTX_LINK_ACQUIRED;
268 if ((lock & (MTX_EXCLUSIVE | MTX_EXWANTED)) == 0) {
269 KKASSERT((lock & MTX_MASK) != MTX_MASK);
271 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
273 link->state = MTX_LINK_ACQUIRED;
280 * We need MTX_LINKSPIN to manipulate exlink or
283 * We must set MTX_SHWANTED with MTX_LINKSPIN to indicate
284 * pending shared requests. It cannot be set as a separate
285 * operation prior to acquiring MTX_LINKSPIN.
287 * To avoid unnecessary cpu cache traffic we poll
288 * for collisions. It is also possible that EXWANTED
289 * state failing the above test was spurious, so all the
290 * tests must be repeated if we cannot obtain LINKSPIN
291 * with the prior state tests intact (i.e. don't reload
292 * the (lock) variable here, for heaven's sake!).
294 if (lock & MTX_LINKSPIN) {
296 ++mtx_collision_count;
300 nlock = lock | MTX_SHWANTED | MTX_LINKSPIN;
302 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock) == 0) {
308 * Check for early abort.
310 if (link->state == MTX_LINK_ABORTED) {
311 if (mtx->mtx_exlink == NULL) {
312 atomic_clear_int(&mtx->mtx_lock,
316 atomic_clear_int(&mtx->mtx_lock,
320 link->state = MTX_LINK_IDLE;
326 * Add our link to the exlink list and release LINKSPIN.
329 link->state = MTX_LINK_LINKED_SH;
330 if (mtx->mtx_shlink) {
331 link->next = mtx->mtx_shlink;
332 link->prev = link->next->prev;
333 link->next->prev = link;
334 link->prev->next = link;
338 mtx->mtx_shlink = link;
340 isasync = (link->callback != NULL);
341 atomic_clear_int(&mtx->mtx_lock, MTX_LINKSPIN);
345 * If asynchronous lock request return without
346 * blocking, leave link structure linked.
356 error = mtx_wait_link(mtx, link, ident, flags, to);
363 _mtx_lock_sh_link(mtx_t *mtx, mtx_link_t *link,
364 const char *ident, int flags, int to)
366 return(__mtx_lock_sh(mtx, link, ident, flags, to));
370 _mtx_lock_sh(mtx_t *mtx, const char *ident, int flags, int to)
374 mtx_link_init(&link);
375 return(__mtx_lock_sh(mtx, &link, ident, flags, to));
379 _mtx_lock_sh_quick(mtx_t *mtx, const char *ident)
383 mtx_link_init(&link);
384 return(__mtx_lock_sh(mtx, &link, ident, 0, 0));
388 * Get an exclusive spinlock the hard way.
391 _mtx_spinlock(mtx_t *mtx)
399 lock = mtx->mtx_lock;
401 nlock = MTX_EXCLUSIVE | 1;
402 if (atomic_cmpset_int(&mtx->mtx_lock, 0, nlock)) {
403 mtx->mtx_owner = curthread;
406 } else if ((lock & MTX_EXCLUSIVE) &&
407 mtx->mtx_owner == curthread) {
408 KKASSERT((lock & MTX_MASK) != MTX_MASK);
410 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
417 for (bo = 0; bo < bb; ++bo)
419 ++mtx_contention_count;
422 ++mtx_collision_count;
427 * Attempt to acquire a spinlock, if we fail we must undo the
428 * gd->gd_spinlocks/gd->gd_curthead->td_critcount predisposition.
430 * Returns 0 on success, EAGAIN on failure.
433 _mtx_spinlock_try(mtx_t *mtx)
435 globaldata_t gd = mycpu;
441 lock = mtx->mtx_lock;
443 nlock = MTX_EXCLUSIVE | 1;
444 if (atomic_cmpset_int(&mtx->mtx_lock, 0, nlock)) {
445 mtx->mtx_owner = gd->gd_curthread;
448 } else if ((lock & MTX_EXCLUSIVE) &&
449 mtx->mtx_owner == gd->gd_curthread) {
450 KKASSERT((lock & MTX_MASK) != MTX_MASK);
452 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
457 --gd->gd_curthread->td_critcount;
462 ++mtx_collision_count;
470 _mtx_spinlock_sh(mtx_t *mtx)
478 lock = mtx->mtx_lock;
479 if ((lock & MTX_EXCLUSIVE) == 0) {
480 KKASSERT((lock & MTX_MASK) != MTX_MASK);
482 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
489 for (bo = 0; bo < bb; ++bo)
491 ++mtx_contention_count;
494 ++mtx_collision_count;
501 _mtx_lock_ex_try(mtx_t *mtx)
508 lock = mtx->mtx_lock;
510 nlock = MTX_EXCLUSIVE | 1;
511 if (atomic_cmpset_int(&mtx->mtx_lock, 0, nlock)) {
512 mtx->mtx_owner = curthread;
516 } else if ((lock & MTX_EXCLUSIVE) &&
517 mtx->mtx_owner == curthread) {
518 KKASSERT((lock & MTX_MASK) != MTX_MASK);
520 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
529 ++mtx_collision_count;
535 _mtx_lock_sh_try(mtx_t *mtx)
542 lock = mtx->mtx_lock;
543 if ((lock & MTX_EXCLUSIVE) == 0) {
544 KKASSERT((lock & MTX_MASK) != MTX_MASK);
546 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
553 ++mtx_collision_count;
559 * If the lock is held exclusively it must be owned by the caller. If the
560 * lock is already a shared lock this operation is a NOP. A panic will
561 * occur if the lock is not held either shared or exclusive.
563 * The exclusive count is converted to a shared count.
566 _mtx_downgrade(mtx_t *mtx)
572 lock = mtx->mtx_lock;
576 * NOP if already shared.
578 if ((lock & MTX_EXCLUSIVE) == 0) {
579 KKASSERT((lock & MTX_MASK) > 0);
584 * Transfer count to shared. Any additional pending shared
585 * waiters must be woken up.
587 if (lock & MTX_SHWANTED) {
588 if (mtx_chain_link_sh(mtx, lock, 1))
592 nlock = lock & ~MTX_EXCLUSIVE;
593 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
598 ++mtx_collision_count;
603 * Upgrade a shared lock to an exclusive lock. The upgrade will fail if
604 * the shared lock has a count other then 1. Optimize the most likely case
605 * but note that a single cmpset can fail due to WANTED races.
607 * If the lock is held exclusively it must be owned by the caller and
608 * this function will simply return without doing anything. A panic will
609 * occur if the lock is held exclusively by someone other then the caller.
611 * Returns 0 on success, EDEADLK on failure.
614 _mtx_upgrade_try(mtx_t *mtx)
621 lock = mtx->mtx_lock;
623 if ((lock & ~MTX_EXWANTED) == 1) {
624 nlock = lock | MTX_EXCLUSIVE;
625 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
626 mtx->mtx_owner = curthread;
629 } else if (lock & MTX_EXCLUSIVE) {
630 KKASSERT(mtx->mtx_owner == curthread);
637 ++mtx_collision_count;
643 * Unlock a lock. The caller must hold the lock either shared or exclusive.
645 * On the last release we handle any pending chains.
648 _mtx_unlock(mtx_t *mtx)
654 lock = mtx->mtx_lock;
658 case MTX_EXCLUSIVE | 1:
660 * Last release, exclusive lock.
661 * No exclusive or shared requests pending.
663 mtx->mtx_owner = NULL;
665 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
668 case MTX_EXCLUSIVE | MTX_EXWANTED | 1:
669 case MTX_EXCLUSIVE | MTX_EXWANTED | MTX_SHWANTED | 1:
671 * Last release, exclusive lock.
672 * Exclusive requests pending.
673 * Exclusive requests have priority over shared reqs.
675 if (mtx_chain_link_ex(mtx, lock))
678 case MTX_EXCLUSIVE | MTX_SHWANTED | 1:
680 * Last release, exclusive lock.
682 * Shared requests are pending. Transfer our count (1)
683 * to the first shared request, wakeup all shared reqs.
685 if (mtx_chain_link_sh(mtx, lock, 0))
690 * Last release, shared lock.
691 * No exclusive or shared requests pending.
694 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
697 case MTX_EXWANTED | 1:
698 case MTX_EXWANTED | MTX_SHWANTED | 1:
700 * Last release, shared lock.
702 * Exclusive requests are pending. Transfer our
703 * count (1) to the next exclusive request.
705 * Exclusive requests have priority over shared reqs.
707 if (mtx_chain_link_ex(mtx, lock))
710 case MTX_SHWANTED | 1:
712 * Last release, shared lock.
713 * Shared requests pending.
715 if (mtx_chain_link_sh(mtx, lock, 0))
720 * We have to loop if this is the last release but
721 * someone is fiddling with LINKSPIN.
723 if ((lock & MTX_MASK) == 1) {
724 KKASSERT(lock & MTX_LINKSPIN);
729 * Not the last release (shared or exclusive)
732 KKASSERT((nlock & MTX_MASK) != MTX_MASK);
733 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
739 ++mtx_collision_count;
746 * Chain pending links. Called on the last release of an exclusive or
747 * shared lock when the appropriate WANTED bit is set. mtx_lock old state
748 * is passed in with the count left at 1, which we can inherit, and other
749 * bits which we must adjust in a single atomic operation.
751 * Return non-zero on success, 0 if caller needs to retry.
753 * NOTE: It's ok if MTX_EXWANTED is in an indeterminant state while we are
754 * acquiring LINKSPIN as all other cases will also need to acquire
755 * LINKSPIN when handling the EXWANTED case.
758 mtx_chain_link_ex(mtx_t *mtx, u_int olock)
760 thread_t td = curthread;
764 olock &= ~MTX_LINKSPIN;
765 nlock = olock | MTX_LINKSPIN | MTX_EXCLUSIVE;
767 if (atomic_cmpset_int(&mtx->mtx_lock, olock, nlock)) {
768 link = mtx->mtx_exlink;
769 KKASSERT(link != NULL);
770 if (link->next == link) {
771 mtx->mtx_exlink = NULL;
772 nlock = MTX_LINKSPIN | MTX_EXWANTED; /* to clear */
774 mtx->mtx_exlink = link->next;
775 link->next->prev = link->prev;
776 link->prev->next = link->next;
777 nlock = MTX_LINKSPIN; /* to clear */
779 KKASSERT(link->state == MTX_LINK_LINKED_EX);
780 mtx->mtx_owner = link->owner;
784 * WARNING! The callback can only be safely
785 * made with LINKSPIN still held
786 * and in a critical section.
788 * WARNING! The link can go away after the
789 * state is set, or after the
792 if (link->callback) {
793 link->state = MTX_LINK_CALLEDBACK;
794 link->callback(link, link->arg, 0);
796 link->state = MTX_LINK_ACQUIRED;
799 atomic_clear_int(&mtx->mtx_lock, nlock);
810 * Flush waiting shared locks. The lock's prior state is passed in and must
811 * be adjusted atomically only if it matches.
813 * If addcount is 0, the count for the first shared lock in the chain is
814 * assumed to have already been accounted for.
816 * If addcount is 1, the count for the first shared lock in the chain has
817 * not yet been accounted for.
820 mtx_chain_link_sh(mtx_t *mtx, u_int olock, int addcount)
822 thread_t td = curthread;
826 olock &= ~MTX_LINKSPIN;
827 nlock = olock | MTX_LINKSPIN;
828 nlock &= ~MTX_EXCLUSIVE;
830 if (atomic_cmpset_int(&mtx->mtx_lock, olock, nlock)) {
831 KKASSERT(mtx->mtx_shlink != NULL);
833 link = mtx->mtx_shlink;
834 atomic_add_int(&mtx->mtx_lock, addcount);
835 KKASSERT(link->state == MTX_LINK_LINKED_SH);
836 if (link->next == link) {
837 mtx->mtx_shlink = NULL;
841 * WARNING! The callback can only be safely
842 * made with LINKSPIN still held
843 * and in a critical section.
845 * WARNING! The link can go away after the
846 * state is set, or after the
849 if (link->callback) {
850 link->state = MTX_LINK_CALLEDBACK;
851 link->callback(link, link->arg, 0);
853 link->state = MTX_LINK_ACQUIRED;
859 mtx->mtx_shlink = link->next;
860 link->next->prev = link->prev;
861 link->prev->next = link->next;
863 link->state = MTX_LINK_ACQUIRED;
864 /* link can go away */
869 atomic_clear_int(&mtx->mtx_lock, MTX_LINKSPIN |
880 * Delete a link structure after tsleep has failed. This code is not
881 * in the critical path as most exclusive waits are chained.
885 mtx_delete_link(mtx_t *mtx, mtx_link_t *link)
887 thread_t td = curthread;
892 * Acquire MTX_LINKSPIN.
894 * Do not use cmpxchg to wait for LINKSPIN to clear as this might
895 * result in too much cpu cache traffic.
899 lock = mtx->mtx_lock;
900 if (lock & MTX_LINKSPIN) {
902 ++mtx_collision_count;
905 nlock = lock | MTX_LINKSPIN;
906 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
909 ++mtx_collision_count;
913 * Delete the link and release LINKSPIN.
915 nlock = MTX_LINKSPIN; /* to clear */
917 switch(link->state) {
918 case MTX_LINK_LINKED_EX:
919 if (link->next == link) {
920 mtx->mtx_exlink = NULL;
921 nlock |= MTX_EXWANTED; /* to clear */
923 mtx->mtx_exlink = link->next;
924 link->next->prev = link->prev;
925 link->prev->next = link->next;
928 case MTX_LINK_LINKED_SH:
929 if (link->next == link) {
930 mtx->mtx_shlink = NULL;
931 nlock |= MTX_SHWANTED; /* to clear */
933 mtx->mtx_shlink = link->next;
934 link->next->prev = link->prev;
935 link->prev->next = link->next;
942 atomic_clear_int(&mtx->mtx_lock, nlock);
947 * Wait for async lock completion or abort. Returns ENOLCK if an abort
951 mtx_wait_link(mtx_t *mtx, mtx_link_t *link,
952 const char *ident, int flags, int to)
957 * Sleep. Handle false wakeups, interruptions, etc.
958 * The link may also have been aborted.
961 while (link->state & MTX_LINK_LINKED) {
962 tsleep_interlock(link, 0);
964 if (link->state & MTX_LINK_LINKED) {
965 ++mtx_contention_count;
966 if (link->state & MTX_LINK_LINKED_SH)
967 mycpu->gd_cnt.v_lock_name[0] = 'S';
969 mycpu->gd_cnt.v_lock_name[0] = 'X';
970 strncpy(mycpu->gd_cnt.v_lock_name + 1,
972 sizeof(mycpu->gd_cnt.v_lock_name) - 2);
973 ++mycpu->gd_cnt.v_lock_colls;
975 error = tsleep(link, flags | PINTERLOCKED,
983 * We are done, make sure the link structure is unlinked.
984 * It may still be on the list due to e.g. EINTR or
987 * It is possible for the tsleep to race an ABORT and cause
990 * The tsleep() can be woken up for numerous reasons and error
991 * might be zero in situations where we intend to return an error.
993 * (This is the synchronous case so state cannot be CALLEDBACK)
995 switch(link->state) {
996 case MTX_LINK_ACQUIRED:
997 case MTX_LINK_CALLEDBACK:
1000 case MTX_LINK_ABORTED:
1003 case MTX_LINK_LINKED_EX:
1004 case MTX_LINK_LINKED_SH:
1005 mtx_delete_link(mtx, link);
1009 error = EWOULDBLOCK;
1014 * Clear state on status returned.
1016 link->state = MTX_LINK_IDLE;
1022 * Abort a mutex locking operation, causing mtx_lock_ex_link() to
1023 * return ENOLCK. This may be called at any time after the mtx_link
1024 * is initialized or the status from a previous lock has been
1025 * returned. If called prior to the next (non-try) lock attempt, the
1026 * next lock attempt using this link structure will abort instantly.
1028 * Caller must still wait for the operation to complete, either from a
1029 * blocking call that is still in progress or by calling mtx_wait_link().
1031 * If an asynchronous lock request is possibly in-progress, the caller
1032 * should call mtx_wait_link() synchronously. Note that the asynchronous
1033 * lock callback will NOT be called if a successful abort occurred. XXX
1036 mtx_abort_link(mtx_t *mtx, mtx_link_t *link)
1038 thread_t td = curthread;
1043 * Acquire MTX_LINKSPIN
1047 lock = mtx->mtx_lock;
1048 if (lock & MTX_LINKSPIN) {
1050 ++mtx_collision_count;
1053 nlock = lock | MTX_LINKSPIN;
1054 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
1057 ++mtx_collision_count;
1063 * WARNING! Link structure can disappear once link->state is set.
1065 nlock = MTX_LINKSPIN; /* to clear */
1067 switch(link->state) {
1070 * Link not started yet
1072 link->state = MTX_LINK_ABORTED;
1074 case MTX_LINK_LINKED_EX:
1076 * de-link, mark aborted, and potentially wakeup the thread
1077 * or issue the callback.
1079 if (link->next == link) {
1080 if (mtx->mtx_exlink == link) {
1081 mtx->mtx_exlink = NULL;
1082 nlock |= MTX_EXWANTED; /* to clear */
1085 if (mtx->mtx_exlink == link)
1086 mtx->mtx_exlink = link->next;
1087 link->next->prev = link->prev;
1088 link->prev->next = link->next;
1092 * When aborting the async callback is still made. We must
1093 * not set the link status to ABORTED in the callback case
1094 * since there is nothing else to clear its status if the
1097 if (link->callback) {
1098 link->state = MTX_LINK_CALLEDBACK;
1099 link->callback(link, link->arg, ENOLCK);
1101 link->state = MTX_LINK_ABORTED;
1106 case MTX_LINK_LINKED_SH:
1108 * de-link, mark aborted, and potentially wakeup the thread
1109 * or issue the callback.
1111 if (link->next == link) {
1112 if (mtx->mtx_shlink == link) {
1113 mtx->mtx_shlink = NULL;
1114 nlock |= MTX_SHWANTED; /* to clear */
1117 if (mtx->mtx_shlink == link)
1118 mtx->mtx_shlink = link->next;
1119 link->next->prev = link->prev;
1120 link->prev->next = link->next;
1124 * When aborting the async callback is still made. We must
1125 * not set the link status to ABORTED in the callback case
1126 * since there is nothing else to clear its status if the
1129 if (link->callback) {
1130 link->state = MTX_LINK_CALLEDBACK;
1131 link->callback(link, link->arg, ENOLCK);
1133 link->state = MTX_LINK_ABORTED;
1138 case MTX_LINK_ACQUIRED:
1139 case MTX_LINK_CALLEDBACK:
1141 * Too late, the lock was acquired. Let it complete.
1146 * link already aborted, do nothing.
1150 atomic_clear_int(&mtx->mtx_lock, nlock);