3 * The Regents of the University of California. All rights reserved.
6 * John S. Dyson. All rights reserved.
8 * This code contains ideas from software contributed to Berkeley by
9 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
10 * System project at Carnegie-Mellon University.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the University of
23 * California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40 * @(#)kern_lock.c 8.18 (Berkeley) 5/21/95
41 * $FreeBSD: src/sys/kern/kern_lock.c,v 1.31.2.3 2001/12/25 01:44:44 dillon Exp $
42 * $DragonFly: src/sys/kern/kern_lock.c,v 1.27 2008/01/09 10:59:12 corecode Exp $
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/kernel.h>
52 #include <sys/sysctl.h>
53 #include <sys/spinlock.h>
54 #include <sys/thread2.h>
55 #include <sys/spinlock2.h>
58 * Locking primitives implementation.
59 * Locks provide shared/exclusive sychronization.
63 #define COUNT(td, x) (td)->td_locks += (x)
68 #define LOCK_WAIT_TIME 100
69 #define LOCK_SAMPLE_WAIT 7
71 #if defined(DIAGNOSTIC)
74 #define LOCK_INLINE __inline
77 #define LK_ALL (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | \
78 LK_SHARE_NONZERO | LK_WAIT_NONZERO)
80 static int acquire(struct lock *lkp, int extflags, int wanted);
82 static LOCK_INLINE void
83 sharelock(struct lock *lkp, int incr)
85 lkp->lk_flags |= LK_SHARE_NONZERO;
86 lkp->lk_sharecount += incr;
89 static LOCK_INLINE int
90 shareunlock(struct lock *lkp, int decr)
94 KASSERT(lkp->lk_sharecount >= decr, ("shareunlock: count < decr"));
96 if (lkp->lk_sharecount == decr) {
97 lkp->lk_flags &= ~LK_SHARE_NONZERO;
98 if (lkp->lk_flags & (LK_WANT_UPGRADE | LK_WANT_EXCL)) {
101 lkp->lk_sharecount = 0;
103 lkp->lk_sharecount -= decr;
109 * lock acquisition helper routine. Called with the lock's spinlock held.
112 acquire(struct lock *lkp, int extflags, int wanted)
116 if ((extflags & LK_NOWAIT) && (lkp->lk_flags & wanted)) {
120 while ((lkp->lk_flags & wanted) != 0) {
121 lkp->lk_flags |= LK_WAIT_NONZERO;
125 * Atomic spinlock release/sleep/reacquire.
127 error = ssleep(lkp, &lkp->lk_spinlock,
128 ((extflags & LK_PCATCH) ? PCATCH : 0),
130 ((extflags & LK_TIMELOCK) ? lkp->lk_timo : 0));
131 if (lkp->lk_waitcount == 1) {
132 lkp->lk_flags &= ~LK_WAIT_NONZERO;
133 lkp->lk_waitcount = 0;
139 if (extflags & LK_SLEEPFAIL)
146 * Set, change, or release a lock.
148 * Shared requests increment the shared count. Exclusive requests set the
149 * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
150 * accepted shared locks and shared-to-exclusive upgrades to go away.
152 * A spinlock is held for most of the procedure. We must not do anything
153 * fancy while holding the spinlock.
157 lockmgr(struct lock *lkp, u_int flags)
159 debuglockmgr(struct lock *lkp, u_int flags,
160 const char *name, const char *file, int line)
174 if (mycpu->gd_intr_nesting_level &&
175 (flags & LK_NOWAIT) == 0 &&
176 (flags & LK_TYPE_MASK) != LK_RELEASE &&
177 panic_cpu_gd != mycpu
181 panic("lockmgr %s from %p: called from interrupt, ipi, "
182 "or hard code section",
183 lkp->lk_wmesg, ((int **)&lkp)[-1]);
185 panic("lockmgr %s from %s:%d: called from interrupt, ipi, "
186 "or hard code section",
187 lkp->lk_wmesg, file, line);
192 if (mycpu->gd_spinlocks_wr &&
193 ((flags & LK_NOWAIT) == 0)
195 panic("lockmgr %s from %s:%d: called with %d spinlocks held",
196 lkp->lk_wmesg, file, line, mycpu->gd_spinlocks_wr);
200 spin_lock(&lkp->lk_spinlock);
202 extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
205 switch (flags & LK_TYPE_MASK) {
208 * If we are not the exclusive lock holder, we have to block
209 * while there is an exclusive lock holder or while an
210 * exclusive lock request or upgrade request is in progress.
212 * However, if TDF_DEADLKTREAT is set, we override exclusive
213 * lock requests or upgrade requests ( but not the exclusive
216 if (lkp->lk_lockholder != td) {
217 if (td->td_flags & TDF_DEADLKTREAT) {
227 LK_HAVE_EXCL | LK_WANT_EXCL |
238 * We hold an exclusive lock, so downgrade it to shared.
239 * An alternative would be to fail with EDEADLK.
243 /* fall into downgrade */
246 if (lkp->lk_lockholder != td || lkp->lk_exclusivecount == 0) {
247 spin_unlock(&lkp->lk_spinlock);
248 panic("lockmgr: not holding exclusive lock");
252 for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
253 if (td->td_lockmgr_stack[i] == lkp &&
254 td->td_lockmgr_stack_id[i] > 0
256 td->td_lockmgr_stack_id[i]--;
262 sharelock(lkp, lkp->lk_exclusivecount);
263 lkp->lk_exclusivecount = 0;
264 lkp->lk_flags &= ~LK_HAVE_EXCL;
265 lkp->lk_lockholder = LK_NOTHREAD;
266 if (lkp->lk_waitcount)
272 * If another process is ahead of us to get an upgrade,
273 * then we want to fail rather than have an intervening
276 if (lkp->lk_flags & LK_WANT_UPGRADE) {
277 dowakeup = shareunlock(lkp, 1);
282 /* fall into normal upgrade */
286 * Upgrade a shared lock to an exclusive one. If another
287 * shared lock has already requested an upgrade to an
288 * exclusive lock, our shared lock is released and an
289 * exclusive lock is requested (which will be granted
290 * after the upgrade). If we return an error, the file
291 * will always be unlocked.
293 if ((lkp->lk_lockholder == td) || (lkp->lk_sharecount <= 0)) {
294 spin_unlock(&lkp->lk_spinlock);
295 panic("lockmgr: upgrade exclusive lock");
297 dowakeup += shareunlock(lkp, 1);
300 * If we are just polling, check to see if we will block.
302 if ((extflags & LK_NOWAIT) &&
303 ((lkp->lk_flags & LK_WANT_UPGRADE) ||
304 lkp->lk_sharecount > 1)) {
308 if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
310 * We are first shared lock to request an upgrade, so
311 * request upgrade and wait for the shared count to
312 * drop to zero, then take exclusive lock.
314 * Although I don't think this can occur for
315 * robustness we also wait for any exclusive locks
316 * to be released. LK_WANT_UPGRADE is supposed to
317 * prevent new exclusive locks but might not in the
320 lkp->lk_flags |= LK_WANT_UPGRADE;
321 error = acquire(lkp, extflags,
322 LK_HAVE_EXCL | LK_SHARE_NONZERO);
323 lkp->lk_flags &= ~LK_WANT_UPGRADE;
327 lkp->lk_flags |= LK_HAVE_EXCL;
328 lkp->lk_lockholder = td;
329 if (lkp->lk_exclusivecount != 0) {
330 spin_unlock(&lkp->lk_spinlock);
331 panic("lockmgr(1): non-zero exclusive count");
333 lkp->lk_exclusivecount = 1;
334 #if defined(DEBUG_LOCKS)
335 lkp->lk_filename = file;
336 lkp->lk_lineno = line;
337 lkp->lk_lockername = name;
339 for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
341 * Recursive lockmgr path
343 if (td->td_lockmgr_stack[i] == lkp &&
344 td->td_lockmgr_stack_id[i] != 0
346 td->td_lockmgr_stack_id[i]++;
351 for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
353 * Use new lockmgr tracking slot
355 if (td->td_lockmgr_stack_id[i] == 0) {
356 td->td_lockmgr_stack_id[i]++;
357 td->td_lockmgr_stack[i] = lkp;
368 * Someone else has requested upgrade. Release our shared
369 * lock, awaken upgrade requestor if we are the last shared
370 * lock, then request an exclusive lock.
372 if ((lkp->lk_flags & (LK_SHARE_NONZERO|LK_WAIT_NONZERO)) ==
376 /* fall into exclusive request */
379 if (lkp->lk_lockholder == td && td != LK_KERNTHREAD) {
383 if ((extflags & (LK_NOWAIT | LK_CANRECURSE)) == 0) {
384 spin_unlock(&lkp->lk_spinlock);
385 panic("lockmgr: locking against myself");
387 if ((extflags & LK_CANRECURSE) != 0) {
388 lkp->lk_exclusivecount++;
394 * If we are just polling, check to see if we will sleep.
396 if ((extflags & LK_NOWAIT) &&
397 (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL |
398 LK_WANT_UPGRADE | LK_SHARE_NONZERO))) {
403 * Wait for exclusive lock holders to release and try to
404 * acquire the want_exclusive flag.
406 error = acquire(lkp, extflags, (LK_HAVE_EXCL | LK_WANT_EXCL));
409 lkp->lk_flags |= LK_WANT_EXCL;
412 * Wait for shared locks and upgrades to finish. We can lose
413 * the race against a successful shared lock upgrade in which
414 * case LK_HAVE_EXCL will get set regardless of our
415 * acquisition of LK_WANT_EXCL, so we have to acquire
416 * LK_HAVE_EXCL here as well.
418 error = acquire(lkp, extflags, LK_HAVE_EXCL |
421 lkp->lk_flags &= ~LK_WANT_EXCL;
424 lkp->lk_flags |= LK_HAVE_EXCL;
425 lkp->lk_lockholder = td;
426 if (lkp->lk_exclusivecount != 0) {
427 spin_unlock(&lkp->lk_spinlock);
428 panic("lockmgr(2): non-zero exclusive count");
430 lkp->lk_exclusivecount = 1;
431 #if defined(DEBUG_LOCKS)
432 lkp->lk_filename = file;
433 lkp->lk_lineno = line;
434 lkp->lk_lockername = name;
436 for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
438 * Recursive lockmgr path
440 if (td->td_lockmgr_stack[i] == lkp &&
441 td->td_lockmgr_stack_id[i] != 0
443 td->td_lockmgr_stack_id[i]++;
448 for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
450 * Use new lockmgr tracking slot
452 if (td->td_lockmgr_stack_id[i] == 0) {
453 td->td_lockmgr_stack_id[i]++;
454 td->td_lockmgr_stack[i] = lkp;
465 if (lkp->lk_exclusivecount != 0) {
466 if (lkp->lk_lockholder != td &&
467 lkp->lk_lockholder != LK_KERNTHREAD) {
468 spin_unlock(&lkp->lk_spinlock);
469 panic("lockmgr: pid %d, not %s thr %p/%p unlocking",
470 (td->td_proc ? td->td_proc->p_pid : -1),
471 "exclusive lock holder",
472 td, lkp->lk_lockholder);
474 if (lkp->lk_lockholder != LK_KERNTHREAD) {
477 if (lkp->lk_exclusivecount == 1) {
478 lkp->lk_flags &= ~LK_HAVE_EXCL;
479 lkp->lk_lockholder = LK_NOTHREAD;
480 lkp->lk_exclusivecount = 0;
482 lkp->lk_exclusivecount--;
485 for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
486 if (td->td_lockmgr_stack[i] == lkp &&
487 td->td_lockmgr_stack_id[i] > 0
489 td->td_lockmgr_stack_id[i]--;
490 lkp->lk_filename = file;
491 lkp->lk_lineno = line;
496 } else if (lkp->lk_flags & LK_SHARE_NONZERO) {
497 dowakeup += shareunlock(lkp, 1);
500 panic("lockmgr: LK_RELEASE: no lock held");
502 if (lkp->lk_flags & LK_WAIT_NONZERO)
507 spin_unlock(&lkp->lk_spinlock);
508 panic("lockmgr: unknown locktype request %d",
509 flags & LK_TYPE_MASK);
512 spin_unlock(&lkp->lk_spinlock);
519 lockmgr_kernproc(struct lock *lp)
521 struct thread *td __debugvar = curthread;
523 if (lp->lk_lockholder != LK_KERNTHREAD) {
524 KASSERT(lp->lk_lockholder == td,
525 ("lockmgr_kernproc: lock not owned by curthread %p", td));
527 lp->lk_lockholder = LK_KERNTHREAD;
533 * Set the lock to be exclusively held. The caller is holding the lock's
534 * spinlock and the spinlock remains held on return. A panic will occur
535 * if the lock cannot be set to exclusive.
537 * XXX not only unused but these functions also break EXCLUPGRADE's
541 lockmgr_setexclusive_interlocked(struct lock *lkp)
543 thread_t td = curthread;
545 KKASSERT((lkp->lk_flags & (LK_HAVE_EXCL|LK_SHARE_NONZERO)) == 0);
546 KKASSERT(lkp->lk_exclusivecount == 0);
547 lkp->lk_flags |= LK_HAVE_EXCL;
548 lkp->lk_lockholder = td;
549 lkp->lk_exclusivecount = 1;
554 * Clear the caller's exclusive lock. The caller is holding the lock's
555 * spinlock. THIS FUNCTION WILL UNLOCK THE SPINLOCK.
557 * A panic will occur if the caller does not hold the lock.
560 lockmgr_clrexclusive_interlocked(struct lock *lkp)
562 thread_t td __debugvar = curthread;
565 KKASSERT((lkp->lk_flags & LK_HAVE_EXCL) && lkp->lk_exclusivecount == 1
566 && lkp->lk_lockholder == td);
567 lkp->lk_lockholder = LK_NOTHREAD;
568 lkp->lk_flags &= ~LK_HAVE_EXCL;
569 lkp->lk_exclusivecount = 0;
570 if (lkp->lk_flags & LK_WAIT_NONZERO)
573 spin_unlock(&lkp->lk_spinlock);
581 * Initialize a lock; required before use.
584 lockinit(struct lock *lkp, const char *wmesg, int timo, int flags)
586 spin_init(&lkp->lk_spinlock);
587 lkp->lk_flags = (flags & LK_EXTFLG_MASK);
588 lkp->lk_sharecount = 0;
589 lkp->lk_waitcount = 0;
590 lkp->lk_exclusivecount = 0;
591 lkp->lk_wmesg = wmesg;
593 lkp->lk_lockholder = LK_NOTHREAD;
597 * Reinitialize a lock that is being reused for a different purpose, but
598 * which may have pending (blocked) threads sitting on it. The caller
599 * must already hold the interlock.
602 lockreinit(struct lock *lkp, const char *wmesg, int timo, int flags)
604 spin_lock(&lkp->lk_spinlock);
605 lkp->lk_flags = (lkp->lk_flags & ~LK_EXTFLG_MASK) |
606 (flags & LK_EXTFLG_MASK);
607 lkp->lk_wmesg = wmesg;
609 spin_unlock(&lkp->lk_spinlock);
613 * Requires that the caller is the exclusive owner of this lock.
616 lockuninit(struct lock *l)
619 * At this point we should have removed all the references to this lock
620 * so there can't be anyone waiting on it.
622 KKASSERT(l->lk_waitcount == 0);
624 spin_uninit(&l->lk_spinlock);
628 * Determine the status of a lock.
631 lockstatus(struct lock *lkp, struct thread *td)
635 spin_lock(&lkp->lk_spinlock);
636 if (lkp->lk_exclusivecount != 0) {
637 if (td == NULL || lkp->lk_lockholder == td)
638 lock_type = LK_EXCLUSIVE;
640 lock_type = LK_EXCLOTHER;
641 } else if (lkp->lk_sharecount != 0) {
642 lock_type = LK_SHARED;
644 spin_unlock(&lkp->lk_spinlock);
649 * Return non-zero if the caller owns the lock shared or exclusive.
650 * We can only guess re: shared locks.
653 lockowned(struct lock *lkp)
655 thread_t td = curthread;
657 if (lkp->lk_exclusivecount)
658 return(lkp->lk_lockholder == td);
659 return(lkp->lk_sharecount != 0);
663 * Determine the number of holders of a lock.
665 * The non-blocking version can usually be used for assertions.
668 lockcount(struct lock *lkp)
672 spin_lock(&lkp->lk_spinlock);
673 count = lkp->lk_exclusivecount + lkp->lk_sharecount;
674 spin_unlock(&lkp->lk_spinlock);
679 lockcountnb(struct lock *lkp)
681 return (lkp->lk_exclusivecount + lkp->lk_sharecount);
685 * Print out information about state of a lock. Used by VOP_PRINT
686 * routines to display status about contained locks.
689 lockmgr_printinfo(struct lock *lkp)
691 struct thread *td = lkp->lk_lockholder;
694 if (td && td != LK_KERNTHREAD && td != LK_NOTHREAD)
699 if (lkp->lk_sharecount)
700 kprintf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
702 else if (lkp->lk_flags & LK_HAVE_EXCL)
703 kprintf(" lock type %s: EXCL (count %d) by td %p pid %d",
704 lkp->lk_wmesg, lkp->lk_exclusivecount, td,
706 if (lkp->lk_waitcount > 0)
707 kprintf(" with %d pending", lkp->lk_waitcount);