3 * The Regents of the University of California. All rights reserved.
5 * John S. Dyson. All rights reserved.
6 * Copyright (C) 2013-2017
7 * Matthew Dillon, All rights reserved.
9 * This code contains ideas from software contributed to Berkeley by
10 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
11 * System project at Carnegie-Mellon University.
13 * This code is derived from software contributed to The DragonFly Project
14 * by Matthew Dillon <dillon@backplane.com>. Extensively rewritten.
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 * 3. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
48 #include <sys/sysctl.h>
49 #include <sys/spinlock.h>
50 #include <sys/spinlock2.h>
51 #include <sys/indefinite2.h>
53 static void undo_shreq(struct lock *lkp);
54 static int undo_upreq(struct lock *lkp);
55 static int undo_exreq(struct lock *lkp);
57 #ifdef DEBUG_CANCEL_LOCKS
59 static int sysctl_cancel_lock(SYSCTL_HANDLER_ARGS);
60 static int sysctl_cancel_test(SYSCTL_HANDLER_ARGS);
62 static struct lock cancel_lk;
63 LOCK_SYSINIT(cancellk, &cancel_lk, "cancel", 0);
64 SYSCTL_PROC(_kern, OID_AUTO, cancel_lock, CTLTYPE_INT|CTLFLAG_RW, 0, 0,
65 sysctl_cancel_lock, "I", "test cancelable locks");
66 SYSCTL_PROC(_kern, OID_AUTO, cancel_test, CTLTYPE_INT|CTLFLAG_RW, 0, 0,
67 sysctl_cancel_test, "I", "test cancelable locks");
71 __read_frequently int lock_test_mode;
72 SYSCTL_INT(_debug, OID_AUTO, lock_test_mode, CTLFLAG_RW,
73 &lock_test_mode, 0, "");
76 * Locking primitives implementation.
77 * Locks provide shared/exclusive sychronization.
81 #define COUNT(td, x) (td)->td_locks += (x)
83 #define COUNT(td, x) do { } while (0)
87 * Helper, assert basic conditions
90 _lockmgr_assert(struct lock *lkp, u_int flags)
92 if (mycpu->gd_intr_nesting_level &&
93 (flags & LK_NOWAIT) == 0 &&
94 (flags & LK_TYPE_MASK) != LK_RELEASE &&
97 panic("lockmgr %s from %p: called from interrupt, ipi, "
98 "or hard code section",
99 lkp->lk_wmesg, ((int **)&lkp)[-1]);
104 * Acquire a shared lock
107 lockmgr_shared(struct lock *lkp, u_int flags)
117 _lockmgr_assert(lkp, flags);
118 extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
121 count = lkp->lk_count;
125 * If the caller already holds the lock exclusively then
126 * we silently obtain another count on the exclusive lock.
127 * Avoid accessing lk_lockholder until testing exclusivity.
129 * WARNING! The old FreeBSD behavior was to downgrade,
130 * but this creates a problem when recursions
131 * return to the caller and the caller expects
132 * its original exclusive lock to remain exclusively
135 if ((count & LKC_XMASK) && lkp->lk_lockholder == td) {
136 KKASSERT(lkp->lk_count & LKC_XMASK);
137 if ((extflags & LK_CANRECURSE) == 0) {
138 if (extflags & LK_NOWAIT)
140 panic("lockmgr: locking against myself");
142 atomic_add_64(&lkp->lk_count, 1);
148 * Unless TDF_DEADLKTREAT is set, we cannot add LKC_SCOUNT while
149 * SHARED is set and either EXREQ or UPREQ are set.
151 * NOTE: In the race-to-0 case (see undo_shreq()), we could
152 * theoretically work the SMASK == 0 case here.
154 if ((td->td_flags & TDF_DEADLKTREAT) == 0) {
155 while ((count & LKC_SHARED) &&
156 (count & (LKC_EXREQ | LKC_UPREQ))) {
158 * Immediate failure conditions
160 if (extflags & LK_CANCELABLE) {
161 if (count & LKC_CANCEL)
164 if (extflags & LK_NOWAIT)
170 pflags = (extflags & LK_PCATCH) ? PCATCH : 0;
171 timo = (extflags & LK_TIMELOCK) ? lkp->lk_timo : 0;
173 tsleep_interlock(lkp, pflags);
174 count = atomic_fetchadd_long(&lkp->lk_count, 0);
176 if ((count & LKC_SHARED) &&
177 (count & (LKC_EXREQ | LKC_UPREQ))) {
178 error = tsleep(lkp, pflags | PINTERLOCKED,
179 lkp->lk_wmesg, timo);
182 count = lkp->lk_count;
191 * Bump the SCOUNT field. The shared lock is granted only once
192 * the SHARED flag gets set. If it is already set, we are done.
194 * (Racing an EXREQ or UPREQ operation is ok here, we already did
197 count = atomic_fetchadd_64(&lkp->lk_count, LKC_SCOUNT) + LKC_SCOUNT;
203 * We may be able to grant ourselves the bit trivially.
204 * We're done once the SHARED bit is granted.
206 if ((count & (LKC_XMASK | LKC_EXREQ |
207 LKC_UPREQ | LKC_SHARED)) == 0) {
208 if (atomic_fcmpset_64(&lkp->lk_count,
209 &count, count | LKC_SHARED)) {
210 /* count |= LKC_SHARED; NOT USED */
215 if ((td->td_flags & TDF_DEADLKTREAT) &&
216 (count & (LKC_XMASK | LKC_SHARED)) == 0) {
217 if (atomic_fcmpset_64(&lkp->lk_count,
218 &count, count | LKC_SHARED)) {
219 /* count |= LKC_SHARED; NOT USED */
224 if (count & LKC_SHARED)
230 pflags = (extflags & LK_PCATCH) ? PCATCH : 0;
231 timo = (extflags & LK_TIMELOCK) ? lkp->lk_timo : 0;
233 if (extflags & LK_CANCELABLE) {
234 if (count & LKC_CANCEL) {
240 if (extflags & LK_NOWAIT) {
247 * Interlocked after the first loop.
250 error = tsleep(lkp, pflags | PINTERLOCKED,
251 lkp->lk_wmesg, timo);
252 if (extflags & LK_SLEEPFAIL) {
265 * Reload, shortcut grant case, then loop interlock
268 count = lkp->lk_count;
269 if (count & LKC_SHARED)
271 tsleep_interlock(lkp, pflags);
272 count = atomic_fetchadd_64(&lkp->lk_count, 0);
281 * Acquire an exclusive lock
284 lockmgr_exclusive(struct lock *lkp, u_int flags)
294 _lockmgr_assert(lkp, flags);
295 extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
299 count = lkp->lk_count;
303 * Recursive lock if we already hold it exclusively. Avoid testing
304 * lk_lockholder until after testing lk_count.
306 if ((count & LKC_XMASK) && lkp->lk_lockholder == td) {
307 if ((extflags & LK_CANRECURSE) == 0) {
308 if (extflags & LK_NOWAIT)
310 panic("lockmgr: locking against myself");
312 count = atomic_fetchadd_64(&lkp->lk_count, 1) + 1;
313 KKASSERT((count & LKC_XMASK) > 1);
319 * Trivially acquire the lock, or block until we can set EXREQ.
320 * Set EXREQ2 if EXREQ is already set or the lock is already
321 * held exclusively. EXREQ2 is an aggregation bit to request
324 * WARNING! We cannot set EXREQ if the lock is already held
325 * exclusively because it may race another EXREQ
326 * being cleared and granted. We use the exclusivity
327 * to prevent both EXREQ and UPREQ from being set.
329 * This means that both shared and exclusive requests
330 * have equal priority against a current exclusive holder's
331 * release. Exclusive requests still have priority over
332 * new shared requests when the lock is already held shared.
336 * Normal trivial case
338 if ((count & (LKC_UPREQ | LKC_EXREQ |
340 ((count & LKC_SHARED) == 0 ||
341 (count & LKC_SMASK) == 0)) {
342 ncount = (count + 1) & ~LKC_SHARED;
343 if (atomic_fcmpset_64(&lkp->lk_count,
345 lkp->lk_lockholder = td;
352 if (extflags & LK_CANCELABLE) {
353 if (count & LKC_CANCEL)
356 if (extflags & LK_NOWAIT)
360 * Interlock to set EXREQ or EXREQ2
362 pflags = (extflags & LK_PCATCH) ? PCATCH : 0;
363 timo = (extflags & LK_TIMELOCK) ? lkp->lk_timo : 0;
365 if (count & (LKC_EXREQ | LKC_XMASK))
366 ncount = count | LKC_EXREQ2;
368 ncount = count | LKC_EXREQ;
369 tsleep_interlock(lkp, pflags);
370 if (atomic_fcmpset_64(&lkp->lk_count, &count, ncount)) {
372 * If we successfully transitioned to EXREQ we
373 * can break out, otherwise we had set EXREQ2 and
376 if ((count & (LKC_EXREQ | LKC_XMASK)) == 0) {
381 error = tsleep(lkp, pflags | PINTERLOCKED,
382 lkp->lk_wmesg, timo);
383 count = lkp->lk_count; /* relod */
387 if (lock_test_mode > 0) {
394 if (extflags & LK_SLEEPFAIL)
399 * Once EXREQ has been set, wait for it to be granted
400 * We enter the loop with tsleep_interlock() already called.
404 * Waiting for EXREQ to be granted to us.
406 * NOTE! If we try to trivially get the exclusive lock
407 * (basically by racing undo_shreq()) and succeed,
408 * we must still wakeup(lkp) for another exclusive
409 * lock trying to acquire EXREQ. Easier to simply
410 * wait for our own wakeup.
412 if ((count & LKC_EXREQ) == 0) {
413 KKASSERT(count & LKC_XMASK);
414 lkp->lk_lockholder = td;
420 * Block waiting for our exreq to be granted.
421 * Check cancelation. NOWAIT was already dealt with.
423 if (extflags & LK_CANCELABLE) {
424 if (count & LKC_CANCEL) {
425 if (undo_exreq(lkp) == 0) {
426 lkp->lk_lockholder = LK_KERNTHREAD;
427 lockmgr_release(lkp, 0);
434 pflags = (extflags & LK_PCATCH) ? PCATCH : 0;
435 timo = (extflags & LK_TIMELOCK) ? lkp->lk_timo : 0;
437 error = tsleep(lkp, pflags | PINTERLOCKED, lkp->lk_wmesg, timo);
439 if (lock_test_mode > 0) {
445 * A tsleep error is uncommon. If it occurs we have to
446 * undo our EXREQ. If we are granted the exclusive lock
447 * as we try to undo we have to deal with it.
449 if (extflags & LK_SLEEPFAIL) {
450 if (undo_exreq(lkp) == 0) {
451 lkp->lk_lockholder = LK_KERNTHREAD;
452 lockmgr_release(lkp, 0);
461 lkp->lk_lockholder = td;
468 * Reload after sleep, shortcut grant case.
469 * Then set the interlock and loop.
471 count = lkp->lk_count;
473 if ((count & LKC_EXREQ) == 0) {
474 KKASSERT(count & LKC_XMASK);
475 lkp->lk_lockholder = td;
479 tsleep_interlock(lkp, pflags);
480 count = atomic_fetchadd_64(&lkp->lk_count, 0);
486 * Downgrade an exclusive lock to shared.
488 * This function always succeeds as long as the caller owns a legal
489 * exclusive lock with one reference. UPREQ and EXREQ is ignored.
492 lockmgr_downgrade(struct lock *lkp, u_int flags)
500 extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
502 count = lkp->lk_count;
508 * Downgrade an exclusive lock into a shared lock. All
509 * counts on a recursive exclusive lock become shared.
511 * NOTE: Currently to reduce confusion we only allow
512 * there to be one exclusive lock count, and panic
515 if (lkp->lk_lockholder != td || (count & LKC_XMASK) != 1) {
516 panic("lockmgr: not holding exclusive lock: "
517 "%p/%p %016jx", lkp->lk_lockholder, td, count);
521 * NOTE! Must NULL-out lockholder before releasing the
524 * NOTE! There might be pending shared requests, check
527 otd = lkp->lk_lockholder;
528 lkp->lk_lockholder = NULL;
529 ncount = (count & ~(LKC_XMASK | LKC_EXREQ2)) +
530 ((count & LKC_XMASK) << LKC_SSHIFT);
531 ncount |= LKC_SHARED;
533 if (atomic_fcmpset_64(&lkp->lk_count, &count, ncount)) {
535 * Wakeup any shared waiters (prior SMASK), or
536 * any exclusive requests that couldn't set EXREQ
537 * because the lock had been held exclusively.
539 if (count & (LKC_SMASK | LKC_EXREQ2))
541 /* count = ncount; NOT USED */
544 lkp->lk_lockholder = otd;
551 * Upgrade a shared lock to exclusive. If LK_EXCLUPGRADE then guarantee
552 * that no other exclusive requester can get in front of us and fail
553 * immediately if another upgrade is pending. If we fail, the shared
556 * If LK_EXCLUPGRADE is not set and we cannot upgrade because someone
557 * else is in front of us, we release the shared lock and acquire the
558 * exclusive lock normally. If a failure occurs, the shared lock is
562 lockmgr_upgrade(struct lock *lkp, u_int flags)
572 _lockmgr_assert(lkp, flags);
573 extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
576 count = lkp->lk_count;
580 * If we already hold the lock exclusively this operation
581 * succeeds and is a NOP.
583 if (count & LKC_XMASK) {
584 if (lkp->lk_lockholder == td)
586 panic("lockmgr: upgrade unowned lock");
588 if ((count & LKC_SMASK) == 0)
589 panic("lockmgr: upgrade unowned lock");
592 * Loop to acquire LKC_UPREQ
596 * If UPREQ is already pending, release the shared lock
597 * and acquire an exclusive lock normally.
599 * If NOWAIT or EXCLUPGRADE the operation must be atomic,
600 * and this isn't, so we fail.
602 if (count & LKC_UPREQ) {
603 lockmgr_release(lkp, 0);
604 if ((flags & LK_TYPE_MASK) == LK_EXCLUPGRADE)
606 else if (extflags & LK_NOWAIT)
609 error = lockmgr_exclusive(lkp, flags);
614 * Try to immediately grant the upgrade, handle NOWAIT,
615 * or release the shared lock and simultaneously set UPREQ.
617 if ((count & LKC_SMASK) == LKC_SCOUNT) {
621 ncount = (count - LKC_SCOUNT + 1) & ~LKC_SHARED;
622 if (atomic_fcmpset_64(&lkp->lk_count, &count, ncount)) {
623 lkp->lk_lockholder = td;
626 } else if (extflags & LK_NOWAIT) {
628 * Early EBUSY if an immediate grant is impossible
630 lockmgr_release(lkp, 0);
634 * Multiple shared locks present, request the
635 * upgrade and break to the next loop.
637 pflags = (extflags & LK_PCATCH) ? PCATCH : 0;
638 tsleep_interlock(lkp, pflags);
639 ncount = (count - LKC_SCOUNT) | LKC_UPREQ;
640 if (atomic_fcmpset_64(&lkp->lk_count, &count, ncount)) {
649 * We have acquired LKC_UPREQ, wait until the upgrade is granted
650 * or the tsleep fails.
652 * NOWAIT and EXCLUPGRADE have already been handled. The first
653 * tsleep_interlock() has already been associated.
659 * We were granted our upgrade. No other UPREQ can be
660 * made pending because we are now exclusive.
662 if ((count & LKC_UPREQ) == 0) {
663 KKASSERT((count & LKC_XMASK) == 1);
664 lkp->lk_lockholder = td;
668 if (extflags & LK_CANCELABLE) {
669 if (count & LKC_CANCEL) {
670 if (undo_upreq(lkp) == 0) {
671 lkp->lk_lockholder = LK_KERNTHREAD;
672 lockmgr_release(lkp, 0);
679 pflags = (extflags & LK_PCATCH) ? PCATCH : 0;
680 timo = (extflags & LK_TIMELOCK) ? lkp->lk_timo : 0;
682 error = tsleep(lkp, pflags | PINTERLOCKED, lkp->lk_wmesg, timo);
683 if (extflags & LK_SLEEPFAIL) {
684 if (undo_upreq(lkp) == 0) {
685 lkp->lk_lockholder = LK_KERNTHREAD;
686 lockmgr_release(lkp, 0);
699 * Reload the lock, short-cut the UPGRANT code before
700 * taking the time to interlock and loop.
702 count = lkp->lk_count;
703 if ((count & LKC_UPREQ) == 0) {
704 KKASSERT((count & LKC_XMASK) == 1);
705 lkp->lk_lockholder = td;
708 tsleep_interlock(lkp, pflags);
709 count = atomic_fetchadd_64(&lkp->lk_count, 0);
716 * Release a held lock
718 * NOTE: When releasing to an unlocked state, we set the SHARED bit
719 * to optimize shared lock requests.
722 lockmgr_release(struct lock *lkp, u_int flags)
730 extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
733 count = lkp->lk_count;
738 * Release the currently held lock, grant all requests
741 * WARNING! lksleep() assumes that LK_RELEASE does not
747 if ((count & (LKC_SMASK | LKC_XMASK)) == 0)
748 panic("lockmgr: LK_RELEASE: no lock held");
750 if (count & LKC_XMASK) {
752 * Release exclusively held lock
754 if (lkp->lk_lockholder != LK_KERNTHREAD &&
755 lkp->lk_lockholder != td) {
756 panic("lockmgr: pid %d, not exclusive "
757 "lock holder thr %p/%p unlocking",
758 (td->td_proc ? td->td_proc->p_pid : -1),
759 td, lkp->lk_lockholder);
761 if ((count & (LKC_UPREQ | LKC_EXREQ |
764 * Last exclusive count is being released
765 * with no UPREQ or EXREQ. The SHARED
766 * bit can be set or not without messing
767 * anything up, so precondition it to
768 * SHARED (which is the most cpu-optimal).
770 * Wakeup any EXREQ2. EXREQ cannot be
771 * set while an exclusive count is present
772 * so we have to wakeup any EXREQ2 we find.
774 * We could hint the EXREQ2 by leaving
775 * SHARED unset, but atm I don't see any
778 otd = lkp->lk_lockholder;
779 lkp->lk_lockholder = NULL;
780 ncount = (count - 1);
781 ncount &= ~(LKC_CANCEL | LKC_EXREQ2);
782 ncount |= LKC_SHARED;
783 if (atomic_fcmpset_64(&lkp->lk_count,
785 if (count & (LKC_SMASK | LKC_EXREQ2))
787 if (otd != LK_KERNTHREAD)
789 /* count = ncount; NOT USED */
792 lkp->lk_lockholder = otd;
794 } else if ((count & (LKC_UPREQ | LKC_XMASK)) ==
797 * Last exclusive count is being released but
798 * an upgrade request is present, automatically
799 * grant an exclusive state to the owner of
800 * the upgrade request. Transfer count to
803 * EXREQ cannot be set while an exclusive
804 * holder exists, so do not clear EXREQ2.
806 otd = lkp->lk_lockholder;
807 lkp->lk_lockholder = NULL;
808 ncount = count & ~LKC_UPREQ;
809 if (atomic_fcmpset_64(&lkp->lk_count,
812 if (otd != LK_KERNTHREAD)
814 /* count = ncount; NOT USED */
817 lkp->lk_lockholder = otd;
819 } else if ((count & (LKC_EXREQ | LKC_XMASK)) ==
822 * Last exclusive count is being released but
823 * an exclusive request is present. We
824 * automatically grant an exclusive state to
825 * the owner of the exclusive request,
826 * transfering our count.
828 * This case virtually never occurs because
829 * EXREQ is not set while exclusive holders
830 * exist. However, it might be set if a
831 * an exclusive request is pending and a
832 * shared holder upgrades.
834 * Don't bother clearing EXREQ2. A thread
835 * waiting to set EXREQ can't do it while
836 * an exclusive lock is present.
838 otd = lkp->lk_lockholder;
839 lkp->lk_lockholder = NULL;
840 ncount = count & ~LKC_EXREQ;
841 if (atomic_fcmpset_64(&lkp->lk_count,
844 if (otd != LK_KERNTHREAD)
846 /* count = ncount; NOT USED */
849 lkp->lk_lockholder = otd;
853 * Multiple exclusive counts, drop by 1.
854 * Since we are the holder and there is more
855 * than one count, we can just decrement it.
858 atomic_fetchadd_long(&lkp->lk_count, -1);
859 /* count = count - 1 NOT NEEDED */
860 if (lkp->lk_lockholder != LK_KERNTHREAD)
867 * Release shared lock
869 KKASSERT((count & LKC_SHARED) && (count & LKC_SMASK));
870 if ((count & (LKC_EXREQ | LKC_UPREQ | LKC_SMASK)) ==
873 * Last shared count is being released,
874 * no exclusive or upgrade request present.
875 * Generally leave the shared bit set.
876 * Clear the CANCEL bit.
878 ncount = (count - LKC_SCOUNT) & ~LKC_CANCEL;
879 if (atomic_fcmpset_64(&lkp->lk_count,
882 /* count = ncount; NOT USED */
886 } else if ((count & (LKC_UPREQ | LKC_SMASK)) ==
887 (LKC_UPREQ | LKC_SCOUNT)) {
889 * Last shared count is being released but
890 * an upgrade request is present, automatically
891 * grant an exclusive state to the owner of
892 * the upgrade request and transfer the count.
894 ncount = (count - LKC_SCOUNT + 1) &
895 ~(LKC_UPREQ | LKC_CANCEL | LKC_SHARED);
896 if (atomic_fcmpset_64(&lkp->lk_count,
900 /* count = ncount; NOT USED */
904 } else if ((count & (LKC_EXREQ | LKC_SMASK)) ==
905 (LKC_EXREQ | LKC_SCOUNT)) {
907 * Last shared count is being released but
908 * an exclusive request is present, we
909 * automatically grant an exclusive state to
910 * the owner of the request and transfer
913 ncount = (count - LKC_SCOUNT + 1) &
914 ~(LKC_EXREQ | LKC_EXREQ2 |
915 LKC_CANCEL | LKC_SHARED);
916 if (atomic_fcmpset_64(&lkp->lk_count,
920 /* count = ncount; NOT USED */
926 * Shared count is greater than 1. We can
927 * just use undo_shreq() to clean things up.
928 * undo_shreq() will also handle races to 0
943 * Start canceling blocked requesters or later requestors.
944 * Only blocked requesters using CANCELABLE can be canceled.
946 * This is intended to then allow other requesters (usually the
947 * caller) to obtain a non-cancelable lock.
949 * Don't waste time issuing a wakeup if nobody is pending.
952 lockmgr_cancel_beg(struct lock *lkp, u_int flags)
956 count = lkp->lk_count;
960 KKASSERT((count & LKC_CANCEL) == 0); /* disallowed case */
962 /* issue w/lock held */
963 KKASSERT((count & (LKC_XMASK | LKC_SMASK)) != 0);
965 if (!atomic_fcmpset_64(&lkp->lk_count,
966 &count, count | LKC_CANCEL)) {
969 /* count |= LKC_CANCEL; NOT USED */
972 * Wakeup any waiters.
974 * NOTE: EXREQ2 only matters when EXREQ is set, so don't
975 * bother checking EXREQ2.
977 if (count & (LKC_EXREQ | LKC_SMASK | LKC_UPREQ)) {
986 * End our cancel request (typically after we have acquired
987 * the lock ourselves).
990 lockmgr_cancel_end(struct lock *lkp, u_int flags)
992 atomic_clear_long(&lkp->lk_count, LKC_CANCEL);
998 * Backout SCOUNT from a failed shared lock attempt and handle any race
999 * to 0. This function is also used by the release code for the less
1000 * optimal race to 0 case.
1002 * WARNING! Since we are unconditionally decrementing LKC_SCOUNT, it is
1003 * possible for the lock to get into a LKC_SHARED + ZERO SCOUNT
1004 * situation. A shared request can block with a ZERO SCOUNT if
1005 * EXREQ or UPREQ is pending in this situation. Be sure to always
1006 * issue a wakeup() in this situation if we are unable to
1007 * transition to an exclusive lock, to handle the race.
1013 undo_shreq(struct lock *lkp)
1018 count = atomic_fetchadd_64(&lkp->lk_count, -LKC_SCOUNT) - LKC_SCOUNT;
1019 while ((count & (LKC_EXREQ | LKC_UPREQ | LKC_CANCEL)) &&
1020 (count & (LKC_SMASK | LKC_XMASK)) == 0) {
1022 * Note that UPREQ must have priority over EXREQ, and EXREQ
1023 * over CANCEL, so if the atomic op fails we have to loop up.
1025 if (count & LKC_UPREQ) {
1026 ncount = (count + 1) & ~(LKC_UPREQ | LKC_CANCEL |
1028 if (atomic_fcmpset_64(&lkp->lk_count, &count, ncount)) {
1030 /* count = ncount; NOT USED */
1036 if (count & LKC_EXREQ) {
1037 ncount = (count + 1) & ~(LKC_EXREQ | LKC_EXREQ2 |
1038 LKC_CANCEL | LKC_SHARED);
1039 if (atomic_fcmpset_64(&lkp->lk_count, &count, ncount)) {
1041 /* count = ncount; NOT USED */
1047 if (count & LKC_CANCEL) {
1048 ncount = count & ~LKC_CANCEL;
1049 if (atomic_fcmpset_64(&lkp->lk_count, &count, ncount)) {
1051 /* count = ncount; NOT USED */
1060 * Undo an exclusive request. Returns EBUSY if we were able to undo the
1061 * request, and 0 if the request was granted before we could undo it.
1062 * When 0 is returned, the lock state has not been modified. The caller
1063 * is responsible for setting the lockholder to curthread.
1067 undo_exreq(struct lock *lkp)
1073 count = lkp->lk_count;
1079 if ((count & LKC_EXREQ) == 0) {
1081 * EXREQ was granted. We own the exclusive lock.
1085 if (count & LKC_XMASK) {
1087 * Clear the EXREQ we still own. Only wakeup on
1088 * EXREQ2 if no UPREQ. There are still exclusive
1089 * holders so do not wake up any shared locks or
1092 * If there is an UPREQ it will issue a wakeup()
1093 * for any EXREQ wait looops, so we can clear EXREQ2
1096 ncount = count & ~(LKC_EXREQ | LKC_EXREQ2);
1097 if (atomic_fcmpset_64(&lkp->lk_count, &count, ncount)) {
1098 if ((count & (LKC_EXREQ2 | LKC_UPREQ)) ==
1103 /* count = ncount; NOT USED */
1107 } else if (count & LKC_UPREQ) {
1109 * Clear the EXREQ we still own. We cannot wakeup any
1110 * shared or exclusive waiters because there is an
1111 * uprequest pending (that we do not handle here).
1113 * If there is an UPREQ it will issue a wakeup()
1114 * for any EXREQ wait looops, so we can clear EXREQ2
1117 ncount = count & ~(LKC_EXREQ | LKC_EXREQ2);
1118 if (atomic_fcmpset_64(&lkp->lk_count, &count, ncount)) {
1123 } else if ((count & LKC_SHARED) && (count & LKC_SMASK)) {
1125 * No UPREQ, lock not held exclusively, but the lock
1126 * is held shared. Clear EXREQ, wakeup anyone trying
1127 * to get the EXREQ bit (they have to set it
1128 * themselves, EXREQ2 is an aggregation).
1130 * We must also wakeup any shared locks blocked
1131 * by the EXREQ, so just issue the wakeup
1132 * unconditionally. See lockmgr_shared() + 76 lines
1135 ncount = count & ~(LKC_EXREQ | LKC_EXREQ2);
1136 if (atomic_fcmpset_64(&lkp->lk_count, &count, ncount)) {
1139 /* count = ncount; NOT USED */
1145 * No UPREQ, lock not held exclusively or shared.
1146 * Grant the EXREQ and wakeup anyone waiting on
1149 * We must also issue a wakeup if SHARED is set,
1150 * even without an SCOUNT, due to pre-shared blocking
1151 * that can occur on EXREQ in lockmgr_shared().
1153 ncount = (count + 1) & ~(LKC_EXREQ | LKC_EXREQ2);
1154 if (atomic_fcmpset_64(&lkp->lk_count, &count, ncount)) {
1155 if (count & (LKC_EXREQ2 | LKC_SHARED))
1157 /* count = ncount; NOT USED */
1158 /* we are granting, error == 0 */
1169 * Undo an upgrade request. Returns EBUSY if we were able to undo the
1170 * request, and 0 if the request was granted before we could undo it.
1171 * When 0 is returned, the lock state has not been modified. The caller
1172 * is responsible for setting the lockholder to curthread.
1176 undo_upreq(struct lock *lkp)
1182 count = lkp->lk_count;
1188 if ((count & LKC_UPREQ) == 0) {
1194 if (count & LKC_XMASK) {
1196 * Clear the UPREQ we still own. Nobody to wakeup
1197 * here because there is an existing exclusive
1200 if (atomic_fcmpset_64(&lkp->lk_count, &count,
1201 count & ~LKC_UPREQ)) {
1203 /* count &= ~LKC_UPREQ; NOT USED */
1206 } else if (count & LKC_EXREQ) {
1208 * Clear the UPREQ we still own. Grant the exclusive
1209 * request and wake it up.
1211 ncount = (count + 1);
1212 ncount &= ~(LKC_EXREQ | LKC_EXREQ2 | LKC_UPREQ);
1214 if (atomic_fcmpset_64(&lkp->lk_count, &count, ncount)) {
1217 /* count = ncount; NOT USED */
1222 * Clear the UPREQ we still own. Wakeup any shared
1225 * We must also issue a wakeup if SHARED was set
1226 * even if no shared waiters due to pre-shared blocking
1227 * that can occur on UPREQ.
1229 ncount = count & ~LKC_UPREQ;
1230 if (count & LKC_SMASK)
1231 ncount |= LKC_SHARED;
1233 if (atomic_fcmpset_64(&lkp->lk_count, &count, ncount)) {
1234 if ((count & LKC_SHARED) ||
1235 (ncount & LKC_SHARED)) {
1239 /* count = ncount; NOT USED */
1249 lockmgr_kernproc(struct lock *lp)
1251 struct thread *td __debugvar = curthread;
1253 if (lp->lk_lockholder != LK_KERNTHREAD) {
1254 KASSERT(lp->lk_lockholder == td,
1255 ("lockmgr_kernproc: lock not owned by curthread %p: %p",
1256 td, lp->lk_lockholder));
1257 lp->lk_lockholder = LK_KERNTHREAD;
1263 * Initialize a lock; required before use.
1266 lockinit(struct lock *lkp, const char *wmesg, int timo, int flags)
1268 lkp->lk_flags = (flags & LK_EXTFLG_MASK);
1270 lkp->lk_wmesg = wmesg;
1271 lkp->lk_timo = timo;
1272 lkp->lk_lockholder = NULL;
1276 * Reinitialize a lock that is being reused for a different purpose, but
1277 * which may have pending (blocked) threads sitting on it. The caller
1278 * must already hold the interlock.
1281 lockreinit(struct lock *lkp, const char *wmesg, int timo, int flags)
1283 lkp->lk_wmesg = wmesg;
1284 lkp->lk_timo = timo;
1288 * De-initialize a lock. The structure must no longer be used by anyone.
1291 lockuninit(struct lock *lkp)
1293 uint64_t count __unused;
1295 count = lkp->lk_count;
1297 KKASSERT((count & (LKC_EXREQ | LKC_UPREQ)) == 0 &&
1298 ((count & LKC_SHARED) || (count & LKC_SMASK) == 0));
1302 * Determine the status of a lock.
1305 lockstatus(struct lock *lkp, struct thread *td)
1310 count = lkp->lk_count;
1313 if (count & (LKC_XMASK | LKC_SMASK | LKC_EXREQ | LKC_UPREQ)) {
1314 if (count & LKC_XMASK) {
1315 if (td == NULL || lkp->lk_lockholder == td)
1316 lock_type = LK_EXCLUSIVE;
1318 lock_type = LK_EXCLOTHER;
1319 } else if ((count & LKC_SMASK) && (count & LKC_SHARED)) {
1320 lock_type = LK_SHARED;
1327 * Return non-zero if the caller owns the lock shared or exclusive.
1328 * We can only guess re: shared locks.
1331 lockowned(struct lock *lkp)
1333 thread_t td = curthread;
1336 count = lkp->lk_count;
1339 if (count & LKC_XMASK)
1340 return(lkp->lk_lockholder == td);
1342 return((count & LKC_SMASK) != 0);
1347 * Determine the number of holders of a lock.
1349 * REMOVED - Cannot be used due to our use of atomic_fetchadd_64()
1350 * for shared locks. Caller can only test if the lock has
1351 * a count or not using lockinuse(lk) (sys/lock.h)
1354 lockcount(struct lock *lkp)
1356 panic("lockcount cannot be used");
1360 lockcountnb(struct lock *lkp)
1362 panic("lockcount cannot be used");
1367 * Print out information about state of a lock. Used by VOP_PRINT
1368 * routines to display status about contained locks.
1371 lockmgr_printinfo(struct lock *lkp)
1373 struct thread *td = lkp->lk_lockholder;
1377 count = lkp->lk_count;
1380 if (td && td != LK_KERNTHREAD)
1385 if (count & LKC_XMASK) {
1386 kprintf(" lock type %s: EXCLUS (count %016jx) by td %p pid %d",
1387 lkp->lk_wmesg, (intmax_t)count, td,
1388 p ? p->p_pid : -99);
1389 } else if ((count & LKC_SMASK) && (count & LKC_SHARED)) {
1390 kprintf(" lock type %s: SHARED (count %016jx)",
1391 lkp->lk_wmesg, (intmax_t)count);
1393 kprintf(" lock type %s: NOTHELD", lkp->lk_wmesg);
1395 if ((count & (LKC_EXREQ | LKC_UPREQ)) ||
1396 ((count & LKC_XMASK) && (count & LKC_SMASK)))
1397 kprintf(" with waiters\n");
1403 lock_sysinit(struct lock_args *arg)
1405 lockinit(arg->la_lock, arg->la_desc, 0, arg->la_flags);
1408 #ifdef DEBUG_CANCEL_LOCKS
1412 sysctl_cancel_lock(SYSCTL_HANDLER_ARGS)
1418 lockmgr(&cancel_lk, LK_EXCLUSIVE);
1419 error = tsleep(&error, PCATCH, "canmas", hz * 5);
1420 lockmgr(&cancel_lk, LK_CANCEL_BEG);
1421 error = tsleep(&error, PCATCH, "canmas", hz * 5);
1422 lockmgr(&cancel_lk, LK_RELEASE);
1424 SYSCTL_OUT(req, &error, sizeof(error));
1433 sysctl_cancel_test(SYSCTL_HANDLER_ARGS)
1438 error = lockmgr(&cancel_lk, LK_EXCLUSIVE|LK_CANCELABLE);
1440 lockmgr(&cancel_lk, LK_RELEASE);
1441 SYSCTL_OUT(req, &error, sizeof(error));
1442 kprintf("test %d\n", error);