3 * The Regents of the University of California. All rights reserved.
6 * John S. Dyson. All rights reserved.
8 * This code contains ideas from software contributed to Berkeley by
9 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
10 * System project at Carnegie-Mellon University.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * @(#)kern_lock.c 8.18 (Berkeley) 5/21/95
37 * $FreeBSD: src/sys/kern/kern_lock.c,v 1.31.2.3 2001/12/25 01:44:44 dillon Exp $
38 * $DragonFly: src/sys/kern/kern_lock.c,v 1.27 2008/01/09 10:59:12 corecode Exp $
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
48 #include <sys/sysctl.h>
49 #include <sys/spinlock.h>
50 #include <sys/thread2.h>
51 #include <sys/spinlock2.h>
54 * Locking primitives implementation.
55 * Locks provide shared/exclusive sychronization.
59 #define COUNT(td, x) (td)->td_locks += (x)
64 #define LOCK_WAIT_TIME 100
65 #define LOCK_SAMPLE_WAIT 7
67 #if defined(DIAGNOSTIC)
70 #define LOCK_INLINE __inline
73 #define LK_ALL (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | \
74 LK_SHARE_NONZERO | LK_WAIT_NONZERO)
76 static int acquire(struct lock *lkp, int extflags, int wanted);
78 static LOCK_INLINE void
79 sharelock(struct lock *lkp, int incr)
81 lkp->lk_flags |= LK_SHARE_NONZERO;
82 lkp->lk_sharecount += incr;
85 static LOCK_INLINE int
86 shareunlock(struct lock *lkp, int decr)
90 KASSERT(lkp->lk_sharecount >= decr, ("shareunlock: count < decr"));
92 if (lkp->lk_sharecount == decr) {
93 lkp->lk_flags &= ~LK_SHARE_NONZERO;
94 if (lkp->lk_flags & (LK_WANT_UPGRADE | LK_WANT_EXCL)) {
97 lkp->lk_sharecount = 0;
99 lkp->lk_sharecount -= decr;
105 * lock acquisition helper routine. Called with the lock's spinlock held.
108 acquire(struct lock *lkp, int extflags, int wanted)
112 if ((extflags & LK_NOWAIT) && (lkp->lk_flags & wanted)) {
116 while ((lkp->lk_flags & wanted) != 0) {
117 lkp->lk_flags |= LK_WAIT_NONZERO;
121 * Atomic spinlock release/sleep/reacquire.
123 error = ssleep(lkp, &lkp->lk_spinlock,
124 ((extflags & LK_PCATCH) ? PCATCH : 0),
126 ((extflags & LK_TIMELOCK) ? lkp->lk_timo : 0));
127 if (lkp->lk_waitcount == 1) {
128 lkp->lk_flags &= ~LK_WAIT_NONZERO;
129 lkp->lk_waitcount = 0;
135 if (extflags & LK_SLEEPFAIL)
142 * Set, change, or release a lock.
144 * Shared requests increment the shared count. Exclusive requests set the
145 * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
146 * accepted shared locks and shared-to-exclusive upgrades to go away.
148 * A spinlock is held for most of the procedure. We must not do anything
149 * fancy while holding the spinlock.
153 lockmgr(struct lock *lkp, u_int flags)
155 debuglockmgr(struct lock *lkp, u_int flags,
156 const char *name, const char *file, int line)
170 if (mycpu->gd_intr_nesting_level &&
171 (flags & LK_NOWAIT) == 0 &&
172 (flags & LK_TYPE_MASK) != LK_RELEASE &&
173 panic_cpu_gd != mycpu
177 panic("lockmgr %s from %p: called from interrupt, ipi, "
178 "or hard code section",
179 lkp->lk_wmesg, ((int **)&lkp)[-1]);
181 panic("lockmgr %s from %s:%d: called from interrupt, ipi, "
182 "or hard code section",
183 lkp->lk_wmesg, file, line);
188 if (mycpu->gd_spinlocks && ((flags & LK_NOWAIT) == 0)) {
189 panic("lockmgr %s from %s:%d: called with %d spinlocks held",
190 lkp->lk_wmesg, file, line, mycpu->gd_spinlocks);
194 spin_lock(&lkp->lk_spinlock);
196 extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
199 switch (flags & LK_TYPE_MASK) {
202 * If we are not the exclusive lock holder, we have to block
203 * while there is an exclusive lock holder or while an
204 * exclusive lock request or upgrade request is in progress.
206 * However, if TDF_DEADLKTREAT is set, we override exclusive
207 * lock requests or upgrade requests ( but not the exclusive
210 if (lkp->lk_lockholder != td) {
211 if (td->td_flags & TDF_DEADLKTREAT) {
221 LK_HAVE_EXCL | LK_WANT_EXCL |
233 * If we already hold an exclusive lock we bump the
234 * exclusive count instead of downgrading to a shared
237 * WARNING! The old FreeBSD behavior was to downgrade,
238 * but this creates a problem when recursions
239 * return to the caller and the caller expects
240 * its original exclusive lock to remain exclusively
243 if (extflags & LK_CANRECURSE) {
244 lkp->lk_exclusivecount++;
248 if (extflags & LK_NOWAIT) {
252 spin_unlock(&lkp->lk_spinlock);
253 panic("lockmgr: locking against myself");
256 * old code queued a shared lock request fell into
261 /* fall into downgrade */
265 if (lkp->lk_lockholder != td || lkp->lk_exclusivecount == 0) {
266 spin_unlock(&lkp->lk_spinlock);
267 panic("lockmgr: not holding exclusive lock");
271 for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
272 if (td->td_lockmgr_stack[i] == lkp &&
273 td->td_lockmgr_stack_id[i] > 0
275 td->td_lockmgr_stack_id[i]--;
281 sharelock(lkp, lkp->lk_exclusivecount);
282 lkp->lk_exclusivecount = 0;
283 lkp->lk_flags &= ~LK_HAVE_EXCL;
284 lkp->lk_lockholder = LK_NOTHREAD;
285 if (lkp->lk_waitcount)
291 * If another process is ahead of us to get an upgrade,
292 * then we want to fail rather than have an intervening
295 if (lkp->lk_flags & LK_WANT_UPGRADE) {
296 dowakeup = shareunlock(lkp, 1);
301 /* fall into normal upgrade */
305 * Upgrade a shared lock to an exclusive one. If another
306 * shared lock has already requested an upgrade to an
307 * exclusive lock, our shared lock is released and an
308 * exclusive lock is requested (which will be granted
309 * after the upgrade). If we return an error, the file
310 * will always be unlocked.
312 if ((lkp->lk_lockholder == td) || (lkp->lk_sharecount <= 0)) {
313 spin_unlock(&lkp->lk_spinlock);
314 panic("lockmgr: upgrade exclusive lock");
316 dowakeup += shareunlock(lkp, 1);
319 * If we are just polling, check to see if we will block.
321 if ((extflags & LK_NOWAIT) &&
322 ((lkp->lk_flags & LK_WANT_UPGRADE) ||
323 lkp->lk_sharecount > 1)) {
327 if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
329 * We are first shared lock to request an upgrade, so
330 * request upgrade and wait for the shared count to
331 * drop to zero, then take exclusive lock.
333 * Although I don't think this can occur for
334 * robustness we also wait for any exclusive locks
335 * to be released. LK_WANT_UPGRADE is supposed to
336 * prevent new exclusive locks but might not in the
339 lkp->lk_flags |= LK_WANT_UPGRADE;
340 error = acquire(lkp, extflags,
341 LK_HAVE_EXCL | LK_SHARE_NONZERO);
342 lkp->lk_flags &= ~LK_WANT_UPGRADE;
346 lkp->lk_flags |= LK_HAVE_EXCL;
347 lkp->lk_lockholder = td;
348 if (lkp->lk_exclusivecount != 0) {
349 spin_unlock(&lkp->lk_spinlock);
350 panic("lockmgr(1): non-zero exclusive count");
352 lkp->lk_exclusivecount = 1;
353 #if defined(DEBUG_LOCKS)
354 lkp->lk_filename = file;
355 lkp->lk_lineno = line;
356 lkp->lk_lockername = name;
358 for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
360 * Recursive lockmgr path
362 if (td->td_lockmgr_stack[i] == lkp &&
363 td->td_lockmgr_stack_id[i] != 0
365 td->td_lockmgr_stack_id[i]++;
370 for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
372 * Use new lockmgr tracking slot
374 if (td->td_lockmgr_stack_id[i] == 0) {
375 td->td_lockmgr_stack_id[i]++;
376 td->td_lockmgr_stack[i] = lkp;
387 * Someone else has requested upgrade. Release our shared
388 * lock, awaken upgrade requestor if we are the last shared
389 * lock, then request an exclusive lock.
391 if ((lkp->lk_flags & (LK_SHARE_NONZERO|LK_WAIT_NONZERO)) ==
395 /* fall into exclusive request */
398 if (lkp->lk_lockholder == td && td != LK_KERNTHREAD) {
402 if ((extflags & (LK_NOWAIT | LK_CANRECURSE)) == 0) {
403 spin_unlock(&lkp->lk_spinlock);
404 panic("lockmgr: locking against myself");
406 if ((extflags & LK_CANRECURSE) != 0) {
407 lkp->lk_exclusivecount++;
413 * If we are just polling, check to see if we will sleep.
415 if ((extflags & LK_NOWAIT) &&
416 (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL |
417 LK_WANT_UPGRADE | LK_SHARE_NONZERO))) {
422 * Wait for exclusive lock holders to release and try to
423 * acquire the want_exclusive flag.
425 error = acquire(lkp, extflags, (LK_HAVE_EXCL | LK_WANT_EXCL));
428 lkp->lk_flags |= LK_WANT_EXCL;
431 * Wait for shared locks and upgrades to finish. We can lose
432 * the race against a successful shared lock upgrade in which
433 * case LK_HAVE_EXCL will get set regardless of our
434 * acquisition of LK_WANT_EXCL, so we have to acquire
435 * LK_HAVE_EXCL here as well.
437 error = acquire(lkp, extflags, LK_HAVE_EXCL |
440 lkp->lk_flags &= ~LK_WANT_EXCL;
443 lkp->lk_flags |= LK_HAVE_EXCL;
444 lkp->lk_lockholder = td;
445 if (lkp->lk_exclusivecount != 0) {
446 spin_unlock(&lkp->lk_spinlock);
447 panic("lockmgr(2): non-zero exclusive count");
449 lkp->lk_exclusivecount = 1;
450 #if defined(DEBUG_LOCKS)
451 lkp->lk_filename = file;
452 lkp->lk_lineno = line;
453 lkp->lk_lockername = name;
455 for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
457 * Recursive lockmgr path
459 if (td->td_lockmgr_stack[i] == lkp &&
460 td->td_lockmgr_stack_id[i] != 0
462 td->td_lockmgr_stack_id[i]++;
467 for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
469 * Use new lockmgr tracking slot
471 if (td->td_lockmgr_stack_id[i] == 0) {
472 td->td_lockmgr_stack_id[i]++;
473 td->td_lockmgr_stack[i] = lkp;
484 if (lkp->lk_exclusivecount != 0) {
485 if (lkp->lk_lockholder != td &&
486 lkp->lk_lockholder != LK_KERNTHREAD) {
487 spin_unlock(&lkp->lk_spinlock);
488 panic("lockmgr: pid %d, not %s thr %p/%p unlocking",
489 (td->td_proc ? td->td_proc->p_pid : -1),
490 "exclusive lock holder",
491 td, lkp->lk_lockholder);
493 if (lkp->lk_lockholder != LK_KERNTHREAD) {
496 if (lkp->lk_exclusivecount == 1) {
497 lkp->lk_flags &= ~LK_HAVE_EXCL;
498 lkp->lk_lockholder = LK_NOTHREAD;
499 lkp->lk_exclusivecount = 0;
501 lkp->lk_exclusivecount--;
504 for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
505 if (td->td_lockmgr_stack[i] == lkp &&
506 td->td_lockmgr_stack_id[i] > 0
508 td->td_lockmgr_stack_id[i]--;
509 lkp->lk_filename = file;
510 lkp->lk_lineno = line;
515 } else if (lkp->lk_flags & LK_SHARE_NONZERO) {
516 dowakeup += shareunlock(lkp, 1);
519 panic("lockmgr: LK_RELEASE: no lock held");
521 if (lkp->lk_flags & LK_WAIT_NONZERO)
526 spin_unlock(&lkp->lk_spinlock);
527 panic("lockmgr: unknown locktype request %d",
528 flags & LK_TYPE_MASK);
531 spin_unlock(&lkp->lk_spinlock);
538 lockmgr_kernproc(struct lock *lp)
540 struct thread *td __debugvar = curthread;
542 if (lp->lk_lockholder != LK_KERNTHREAD) {
543 KASSERT(lp->lk_lockholder == td,
544 ("lockmgr_kernproc: lock not owned by curthread %p", td));
546 lp->lk_lockholder = LK_KERNTHREAD;
552 * Set the lock to be exclusively held. The caller is holding the lock's
553 * spinlock and the spinlock remains held on return. A panic will occur
554 * if the lock cannot be set to exclusive.
556 * XXX not only unused but these functions also break EXCLUPGRADE's
560 lockmgr_setexclusive_interlocked(struct lock *lkp)
562 thread_t td = curthread;
564 KKASSERT((lkp->lk_flags & (LK_HAVE_EXCL|LK_SHARE_NONZERO)) == 0);
565 KKASSERT(lkp->lk_exclusivecount == 0);
566 lkp->lk_flags |= LK_HAVE_EXCL;
567 lkp->lk_lockholder = td;
568 lkp->lk_exclusivecount = 1;
573 * Clear the caller's exclusive lock. The caller is holding the lock's
574 * spinlock. THIS FUNCTION WILL UNLOCK THE SPINLOCK.
576 * A panic will occur if the caller does not hold the lock.
579 lockmgr_clrexclusive_interlocked(struct lock *lkp)
581 thread_t td __debugvar = curthread;
584 KKASSERT((lkp->lk_flags & LK_HAVE_EXCL) && lkp->lk_exclusivecount == 1
585 && lkp->lk_lockholder == td);
586 lkp->lk_lockholder = LK_NOTHREAD;
587 lkp->lk_flags &= ~LK_HAVE_EXCL;
588 lkp->lk_exclusivecount = 0;
589 if (lkp->lk_flags & LK_WAIT_NONZERO)
592 spin_unlock(&lkp->lk_spinlock);
600 * Initialize a lock; required before use.
603 lockinit(struct lock *lkp, const char *wmesg, int timo, int flags)
605 spin_init(&lkp->lk_spinlock);
606 lkp->lk_flags = (flags & LK_EXTFLG_MASK);
607 lkp->lk_sharecount = 0;
608 lkp->lk_waitcount = 0;
609 lkp->lk_exclusivecount = 0;
610 lkp->lk_wmesg = wmesg;
612 lkp->lk_lockholder = LK_NOTHREAD;
616 * Reinitialize a lock that is being reused for a different purpose, but
617 * which may have pending (blocked) threads sitting on it. The caller
618 * must already hold the interlock.
621 lockreinit(struct lock *lkp, const char *wmesg, int timo, int flags)
623 spin_lock(&lkp->lk_spinlock);
624 lkp->lk_flags = (lkp->lk_flags & ~LK_EXTFLG_MASK) |
625 (flags & LK_EXTFLG_MASK);
626 lkp->lk_wmesg = wmesg;
628 spin_unlock(&lkp->lk_spinlock);
632 * Requires that the caller is the exclusive owner of this lock.
635 lockuninit(struct lock *l)
638 * At this point we should have removed all the references to this lock
639 * so there can't be anyone waiting on it.
641 KKASSERT(l->lk_waitcount == 0);
643 spin_uninit(&l->lk_spinlock);
647 * Determine the status of a lock.
650 lockstatus(struct lock *lkp, struct thread *td)
654 spin_lock(&lkp->lk_spinlock);
655 if (lkp->lk_exclusivecount != 0) {
656 if (td == NULL || lkp->lk_lockholder == td)
657 lock_type = LK_EXCLUSIVE;
659 lock_type = LK_EXCLOTHER;
660 } else if (lkp->lk_sharecount != 0) {
661 lock_type = LK_SHARED;
663 spin_unlock(&lkp->lk_spinlock);
668 * Return non-zero if the caller owns the lock shared or exclusive.
669 * We can only guess re: shared locks.
672 lockowned(struct lock *lkp)
674 thread_t td = curthread;
676 if (lkp->lk_exclusivecount)
677 return(lkp->lk_lockholder == td);
678 return(lkp->lk_sharecount != 0);
682 * Determine the number of holders of a lock.
684 * The non-blocking version can usually be used for assertions.
687 lockcount(struct lock *lkp)
691 spin_lock(&lkp->lk_spinlock);
692 count = lkp->lk_exclusivecount + lkp->lk_sharecount;
693 spin_unlock(&lkp->lk_spinlock);
698 lockcountnb(struct lock *lkp)
700 return (lkp->lk_exclusivecount + lkp->lk_sharecount);
704 * Print out information about state of a lock. Used by VOP_PRINT
705 * routines to display status about contained locks.
708 lockmgr_printinfo(struct lock *lkp)
710 struct thread *td = lkp->lk_lockholder;
713 if (td && td != LK_KERNTHREAD && td != LK_NOTHREAD)
718 if (lkp->lk_sharecount)
719 kprintf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
721 else if (lkp->lk_flags & LK_HAVE_EXCL)
722 kprintf(" lock type %s: EXCL (count %d) by td %p pid %d",
723 lkp->lk_wmesg, lkp->lk_exclusivecount, td,
725 if (lkp->lk_waitcount > 0)
726 kprintf(" with %d pending", lkp->lk_waitcount);
730 lock_sysinit(struct lock_args *arg)
732 lockinit(arg->la_lock, arg->la_desc, 0, arg->la_flags);