3 * The Regents of the University of California. All rights reserved.
5 * John S. Dyson. All rights reserved.
7 * Matthew Dillon, All rights reserved.
9 * This code contains ideas from software contributed to Berkeley by
10 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
11 * System project at Carnegie-Mellon University.
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
45 #include <sys/sysctl.h>
46 #include <sys/spinlock.h>
47 #include <sys/thread2.h>
48 #include <sys/spinlock2.h>
50 static void undo_upreq(struct lock *lkp);
53 * Locking primitives implementation.
54 * Locks provide shared/exclusive sychronization.
58 #define COUNT(td, x) (td)->td_locks += (x)
63 #define LOCK_WAIT_TIME 100
64 #define LOCK_SAMPLE_WAIT 7
67 * Set, change, or release a lock.
72 lockmgr(struct lock *lkp, u_int flags)
74 debuglockmgr(struct lock *lkp, u_int flags,
75 const char *name, const char *file, int line)
92 if (mycpu->gd_intr_nesting_level &&
93 (flags & LK_NOWAIT) == 0 &&
94 (flags & LK_TYPE_MASK) != LK_RELEASE &&
99 panic("lockmgr %s from %p: called from interrupt, ipi, "
100 "or hard code section",
101 lkp->lk_wmesg, ((int **)&lkp)[-1]);
103 panic("lockmgr %s from %s:%d: called from interrupt, ipi, "
104 "or hard code section",
105 lkp->lk_wmesg, file, line);
110 if (mycpu->gd_spinlocks && ((flags & LK_NOWAIT) == 0)) {
111 panic("lockmgr %s from %s:%d: called with %d spinlocks held",
112 lkp->lk_wmesg, file, line, mycpu->gd_spinlocks);
116 extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
120 count = lkp->lk_count;
123 switch (flags & LK_TYPE_MASK) {
126 * Shared lock critical path case
128 if ((count & (LKC_EXREQ|LKC_UPREQ|LKC_EXCL)) == 0) {
129 if (atomic_cmpset_int(&lkp->lk_count,
138 * If the caller already holds the lock exclusively then
139 * we silently obtain another count on the exclusive lock.
141 * WARNING! The old FreeBSD behavior was to downgrade,
142 * but this creates a problem when recursions
143 * return to the caller and the caller expects
144 * its original exclusive lock to remain exclusively
147 if (lkp->lk_lockholder == td) {
148 KKASSERT(count & LKC_EXCL);
149 if ((extflags & LK_CANRECURSE) == 0) {
150 if (extflags & LK_NOWAIT) {
154 panic("lockmgr: locking against myself");
156 atomic_add_int(&lkp->lk_count, 1);
164 pflags = (extflags & LK_PCATCH) ? PCATCH : 0;
165 timo = (extflags & LK_TIMELOCK) ? lkp->lk_timo : 0;
166 wflags = (td->td_flags & TDF_DEADLKTREAT) ?
167 LKC_EXCL : (LKC_EXCL|LKC_EXREQ|LKC_UPREQ);
170 * Block while the lock is held exclusively or, conditionally,
171 * if other threads are tring to obtain an exclusive lock or
174 if (count & wflags) {
175 if (extflags & LK_NOWAIT) {
179 tsleep_interlock(lkp, pflags);
180 if (!atomic_cmpset_int(&lkp->lk_count, count,
181 count | LKC_SHREQ)) {
185 mycpu->gd_cnt.v_lock_name[0] = 'S';
186 strncpy(mycpu->gd_cnt.v_lock_name + 1,
188 sizeof(mycpu->gd_cnt.v_lock_name) - 2);
189 ++mycpu->gd_cnt.v_lock_colls;
191 error = tsleep(lkp, pflags | PINTERLOCKED,
192 lkp->lk_wmesg, timo);
195 if (extflags & LK_SLEEPFAIL) {
203 * Otherwise we can bump the count
205 if (atomic_cmpset_int(&lkp->lk_count, count, count + 1)) {
213 * Exclusive lock critical path.
216 if (atomic_cmpset_int(&lkp->lk_count, count,
217 LKC_EXCL | (count + 1))) {
218 lkp->lk_lockholder = td;
226 * Recursive lock if we already hold it exclusively.
228 if (lkp->lk_lockholder == td) {
229 KKASSERT(count & LKC_EXCL);
230 if ((extflags & LK_CANRECURSE) == 0) {
231 if (extflags & LK_NOWAIT) {
235 panic("lockmgr: locking against myself");
237 atomic_add_int(&lkp->lk_count, 1);
243 * We will block, handle LK_NOWAIT
245 if (extflags & LK_NOWAIT) {
251 * Wait until we can obtain the exclusive lock. EXREQ is
252 * automatically cleared when all current holders release
253 * so if we abort the operation we can safely leave it set.
254 * There might be other exclusive requesters.
256 pflags = (extflags & LK_PCATCH) ? PCATCH : 0;
257 timo = (extflags & LK_TIMELOCK) ? lkp->lk_timo : 0;
259 tsleep_interlock(lkp, pflags);
260 if (!atomic_cmpset_int(&lkp->lk_count, count,
261 count | LKC_EXREQ)) {
265 mycpu->gd_cnt.v_lock_name[0] = 'X';
266 strncpy(mycpu->gd_cnt.v_lock_name + 1,
268 sizeof(mycpu->gd_cnt.v_lock_name) - 2);
269 ++mycpu->gd_cnt.v_lock_colls;
271 error = tsleep(lkp, pflags | PINTERLOCKED,
272 lkp->lk_wmesg, timo);
275 if (extflags & LK_SLEEPFAIL) {
283 * Downgrade an exclusive lock into a shared lock. All
284 * counts on a recursive exclusive lock become shared.
286 * This function always succeeds.
288 if (lkp->lk_lockholder != td ||
289 (count & (LKC_EXCL|LKC_MASK)) != (LKC_EXCL|1)) {
290 panic("lockmgr: not holding exclusive lock");
294 for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
295 if (td->td_lockmgr_stack[i] == lkp &&
296 td->td_lockmgr_stack_id[i] > 0
298 td->td_lockmgr_stack_id[i]--;
304 * NOTE! Must NULL-out lockholder before releasing LKC_EXCL.
306 otd = lkp->lk_lockholder;
307 lkp->lk_lockholder = NULL;
308 if (atomic_cmpset_int(&lkp->lk_count, count,
309 count & ~(LKC_EXCL|LKC_SHREQ))) {
310 if (count & LKC_SHREQ)
314 lkp->lk_lockholder = otd;
319 * Upgrade from a single shared lock to an exclusive lock.
321 * If another process is ahead of us to get an upgrade,
322 * then we want to fail rather than have an intervening
323 * exclusive access. The shared lock is released on
326 if (count & LKC_UPREQ) {
331 /* fall through into normal upgrade */
335 * Upgrade a shared lock to an exclusive one. This can cause
336 * the lock to be temporarily released and stolen by other
337 * threads. LK_SLEEPFAIL or LK_NOWAIT may be used to detect
338 * this case, or use LK_EXCLUPGRADE.
340 * If we return an error (even NOWAIT), the current lock will
343 * Start with the critical path.
345 if ((count & (LKC_UPREQ|LKC_EXCL|LKC_MASK)) == 1) {
346 if (atomic_cmpset_int(&lkp->lk_count, count,
348 lkp->lk_lockholder = td;
355 * If we already hold the lock exclusively this operation
356 * succeeds and is a NOP.
358 if (count & LKC_EXCL) {
359 if (lkp->lk_lockholder == td)
361 panic("lockmgr: upgrade unowned lock");
363 if ((count & LKC_MASK) == 0)
364 panic("lockmgr: upgrade unowned lock");
367 * We cannot upgrade without blocking at this point.
369 if (extflags & LK_NOWAIT) {
376 * Release the shared lock and request the upgrade.
378 pflags = (extflags & LK_PCATCH) ? PCATCH : 0;
379 timo = (extflags & LK_TIMELOCK) ? lkp->lk_timo : 0;
380 tsleep_interlock(lkp, pflags);
381 wflags = (count & LKC_UPREQ) ? LKC_EXREQ : LKC_UPREQ;
383 if (atomic_cmpset_int(&lkp->lk_count, count,
384 (count - 1) | wflags)) {
387 mycpu->gd_cnt.v_lock_name[0] = 'U';
388 strncpy(mycpu->gd_cnt.v_lock_name + 1,
390 sizeof(mycpu->gd_cnt.v_lock_name) - 2);
391 ++mycpu->gd_cnt.v_lock_colls;
393 error = tsleep(lkp, pflags | PINTERLOCKED,
394 lkp->lk_wmesg, timo);
397 if (extflags & LK_SLEEPFAIL) {
403 * Refactor to either LK_EXCLUSIVE or LK_WAITUPGRADE,
404 * depending on whether we were able to acquire the
407 if (count & LKC_UPREQ)
408 flags = LK_EXCLUSIVE; /* someone else */
410 flags = LK_WAITUPGRADE; /* we own the bit */
416 * We own the LKC_UPREQ bit, wait until we are granted the
417 * exclusive lock (LKC_UPGRANT is set).
419 * IF THE OPERATION FAILS (tsleep error tsleep+LK_SLEEPFAIL),
420 * we have to undo the upgrade request and clean up any lock
421 * that might have been granted via a race.
423 if (count & LKC_UPGRANT) {
424 if (atomic_cmpset_int(&lkp->lk_count, count,
425 count & ~LKC_UPGRANT)) {
426 lkp->lk_lockholder = td;
427 KKASSERT(count & LKC_EXCL);
432 pflags = (extflags & LK_PCATCH) ? PCATCH : 0;
433 timo = (extflags & LK_TIMELOCK) ? lkp->lk_timo : 0;
434 tsleep_interlock(lkp, pflags);
435 if (atomic_cmpset_int(&lkp->lk_count, count, count)) {
437 mycpu->gd_cnt.v_lock_name[0] = 'U';
438 strncpy(mycpu->gd_cnt.v_lock_name + 1,
440 sizeof(mycpu->gd_cnt.v_lock_name) - 2);
441 ++mycpu->gd_cnt.v_lock_colls;
443 error = tsleep(lkp, pflags | PINTERLOCKED,
444 lkp->lk_wmesg, timo);
449 if (extflags & LK_SLEEPFAIL) {
461 * Release the currently held lock. If releasing the current
462 * lock as part of an error return, error will ALREADY be
465 * When releasing the last lock we automatically transition
466 * LKC_UPREQ to LKC_EXCL|1.
468 * WARNING! We cannot detect when there are multiple exclusive
469 * requests pending. We clear EXREQ unconditionally
470 * on the 1->0 transition so it is possible for
471 * shared requests to race the next exclusive
476 if ((count & LKC_MASK) == 0)
477 panic("lockmgr: LK_RELEASE: no lock held");
479 if (count & LKC_EXCL) {
480 if (lkp->lk_lockholder != LK_KERNTHREAD &&
481 lkp->lk_lockholder != td) {
482 panic("lockmgr: pid %d, not exlusive "
483 "lock holder thr %p/%p unlocking",
484 (td->td_proc ? td->td_proc->p_pid : -1),
485 td, lkp->lk_lockholder);
487 if ((count & (LKC_UPREQ|LKC_MASK)) == 1) {
489 * Last exclusive count is being released
491 otd = lkp->lk_lockholder;
492 lkp->lk_lockholder = NULL;
493 if (!atomic_cmpset_int(&lkp->lk_count, count,
495 ~(LKC_EXCL|LKC_EXREQ|LKC_SHREQ))) {
496 lkp->lk_lockholder = otd;
499 if (count & (LKC_EXREQ|LKC_SHREQ))
502 } else if ((count & (LKC_UPREQ|LKC_MASK)) ==
505 * Last exclusive count is being released but
506 * an upgrade request is present, automatically
507 * grant an exclusive state to the owner of
508 * the upgrade request.
510 otd = lkp->lk_lockholder;
511 lkp->lk_lockholder = NULL;
512 if (!atomic_cmpset_int(&lkp->lk_count, count,
513 (count & ~LKC_UPREQ) |
515 lkp->lk_lockholder = otd;
520 otd = lkp->lk_lockholder;
521 if (!atomic_cmpset_int(&lkp->lk_count, count,
528 if (otd != LK_KERNTHREAD)
531 if ((count & (LKC_UPREQ|LKC_MASK)) == 1) {
533 * Last shared count is being released.
535 if (!atomic_cmpset_int(&lkp->lk_count, count,
537 ~(LKC_EXREQ|LKC_SHREQ))) {
540 if (count & (LKC_EXREQ|LKC_SHREQ))
543 } else if ((count & (LKC_UPREQ|LKC_MASK)) ==
546 * Last shared count is being released but
547 * an upgrade request is present, automatically
548 * grant an exclusive state to the owner of
549 * the upgrade request.
551 if (!atomic_cmpset_int(&lkp->lk_count, count,
552 (count & ~LKC_UPREQ) |
553 LKC_EXCL | LKC_UPGRANT)) {
558 if (!atomic_cmpset_int(&lkp->lk_count, count,
569 panic("lockmgr: unknown locktype request %d",
570 flags & LK_TYPE_MASK);
577 * Undo an upgrade request
581 undo_upreq(struct lock *lkp)
586 count = lkp->lk_count;
588 if (count & LKC_UPGRANT) {
590 * UPREQ was shifted to UPGRANT. We own UPGRANT now,
591 * another thread might own UPREQ. Clear UPGRANT
592 * and release the granted lock.
594 if (atomic_cmpset_int(&lkp->lk_count, count,
595 count & ~LKC_UPGRANT)) {
596 lockmgr(lkp, LK_RELEASE);
599 } else if (count & LKC_EXCL) {
601 * Clear the UPREQ we still own. Nobody to wakeup
602 * here because there is an existing exclusive
605 KKASSERT(count & LKC_UPREQ);
606 KKASSERT((count & LKC_MASK) > 0);
607 if (atomic_cmpset_int(&lkp->lk_count, count,
608 count & ~LKC_UPREQ)) {
612 } else if (count & LKC_EXREQ) {
614 * Clear the UPREQ we still own. We cannot wakeup any
615 * shared waiters because there is an exclusive
618 KKASSERT(count & LKC_UPREQ);
619 KKASSERT((count & LKC_MASK) > 0);
620 if (atomic_cmpset_int(&lkp->lk_count, count,
621 count & ~LKC_UPREQ)) {
626 * Clear the UPREQ we still own. Wakeup any shared
629 KKASSERT(count & LKC_UPREQ);
630 KKASSERT((count & LKC_MASK) > 0);
631 if (atomic_cmpset_int(&lkp->lk_count, count,
633 ~(LKC_UPREQ | LKC_SHREQ))) {
634 if (count & LKC_SHREQ)
644 lockmgr_kernproc(struct lock *lp)
646 struct thread *td __debugvar = curthread;
648 if (lp->lk_lockholder != LK_KERNTHREAD) {
649 KASSERT(lp->lk_lockholder == td,
650 ("lockmgr_kernproc: lock not owned by curthread %p", td));
651 lp->lk_lockholder = LK_KERNTHREAD;
657 * Initialize a lock; required before use.
660 lockinit(struct lock *lkp, const char *wmesg, int timo, int flags)
662 lkp->lk_flags = (flags & LK_EXTFLG_MASK);
664 lkp->lk_wmesg = wmesg;
666 lkp->lk_lockholder = LK_NOTHREAD;
670 * Reinitialize a lock that is being reused for a different purpose, but
671 * which may have pending (blocked) threads sitting on it. The caller
672 * must already hold the interlock.
675 lockreinit(struct lock *lkp, const char *wmesg, int timo, int flags)
677 lkp->lk_wmesg = wmesg;
682 * De-initialize a lock. The structure must no longer be used by anyone.
685 lockuninit(struct lock *lkp)
687 KKASSERT((lkp->lk_count & (LKC_EXREQ|LKC_SHREQ|LKC_UPREQ)) == 0);
691 * Determine the status of a lock.
694 lockstatus(struct lock *lkp, struct thread *td)
699 count = lkp->lk_count;
702 if (count & LKC_EXCL) {
703 if (td == NULL || lkp->lk_lockholder == td)
704 lock_type = LK_EXCLUSIVE;
706 lock_type = LK_EXCLOTHER;
707 } else if (count & LKC_MASK) {
708 lock_type = LK_SHARED;
714 * Return non-zero if the caller owns the lock shared or exclusive.
715 * We can only guess re: shared locks.
718 lockowned(struct lock *lkp)
720 thread_t td = curthread;
723 count = lkp->lk_count;
726 if (count & LKC_EXCL)
727 return(lkp->lk_lockholder == td);
729 return((count & LKC_MASK) != 0);
733 * Determine the number of holders of a lock.
735 * The non-blocking version can usually be used for assertions.
738 lockcount(struct lock *lkp)
740 return(lkp->lk_count & LKC_MASK);
744 lockcountnb(struct lock *lkp)
746 return(lkp->lk_count & LKC_MASK);
750 * Print out information about state of a lock. Used by VOP_PRINT
751 * routines to display status about contained locks.
754 lockmgr_printinfo(struct lock *lkp)
756 struct thread *td = lkp->lk_lockholder;
760 count = lkp->lk_count;
763 if (td && td != LK_KERNTHREAD && td != LK_NOTHREAD)
768 if (count & LKC_EXCL) {
769 kprintf(" lock type %s: EXCLUS (count %08x) by td %p pid %d",
770 lkp->lk_wmesg, count, td,
772 } else if (count & LKC_MASK) {
773 kprintf(" lock type %s: SHARED (count %08x)",
774 lkp->lk_wmesg, count);
776 kprintf(" lock type %s: NOTHELD", lkp->lk_wmesg);
778 if (count & (LKC_EXREQ|LKC_SHREQ))
779 kprintf(" with waiters\n");
785 lock_sysinit(struct lock_args *arg)
787 lockinit(arg->la_lock, arg->la_desc, 0, arg->la_flags);