kernel - Change lockmgr LK_SHARED behavior to fix improper recursion return
[dragonfly.git] / sys / kern / kern_lock.c
CommitLineData
984263bc
MD
1/*
2 * Copyright (c) 1995
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Copyright (C) 1997
6 * John S. Dyson. All rights reserved.
7 *
8 * This code contains ideas from software contributed to Berkeley by
9 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
10 * System project at Carnegie-Mellon University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the University of
23 * California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 * @(#)kern_lock.c 8.18 (Berkeley) 5/21/95
41 * $FreeBSD: src/sys/kern/kern_lock.c,v 1.31.2.3 2001/12/25 01:44:44 dillon Exp $
0412a333 42 * $DragonFly: src/sys/kern/kern_lock.c,v 1.27 2008/01/09 10:59:12 corecode Exp $
984263bc
MD
43 */
44
45#include "opt_lint.h"
46
47#include <sys/param.h>
46a3f46d
MD
48#include <sys/systm.h>
49#include <sys/kernel.h>
984263bc
MD
50#include <sys/proc.h>
51#include <sys/lock.h>
46a3f46d 52#include <sys/sysctl.h>
16523a43 53#include <sys/spinlock.h>
e43a034f 54#include <sys/thread2.h>
16523a43 55#include <sys/spinlock2.h>
46a3f46d
MD
56
57/*
984263bc
MD
58 * Locking primitives implementation.
59 * Locks provide shared/exclusive sychronization.
60 */
61
67e75efb 62#ifdef DEBUG_LOCKS
dadab5e9 63#define COUNT(td, x) (td)->td_locks += (x)
984263bc 64#else
dadab5e9 65#define COUNT(td, x)
984263bc
MD
66#endif
67
68#define LOCK_WAIT_TIME 100
69#define LOCK_SAMPLE_WAIT 7
70
71#if defined(DIAGNOSTIC)
72#define LOCK_INLINE
73#else
74#define LOCK_INLINE __inline
75#endif
76
77#define LK_ALL (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | \
dda969a8 78 LK_SHARE_NONZERO | LK_WAIT_NONZERO)
984263bc
MD
79
80static int acquire(struct lock *lkp, int extflags, int wanted);
984263bc
MD
81
82static LOCK_INLINE void
dda969a8
MD
83sharelock(struct lock *lkp, int incr)
84{
984263bc
MD
85 lkp->lk_flags |= LK_SHARE_NONZERO;
86 lkp->lk_sharecount += incr;
87}
88
e3332475
MD
89static LOCK_INLINE int
90shareunlock(struct lock *lkp, int decr)
91{
92 int dowakeup = 0;
984263bc
MD
93
94 KASSERT(lkp->lk_sharecount >= decr, ("shareunlock: count < decr"));
95
96 if (lkp->lk_sharecount == decr) {
97 lkp->lk_flags &= ~LK_SHARE_NONZERO;
98 if (lkp->lk_flags & (LK_WANT_UPGRADE | LK_WANT_EXCL)) {
e3332475 99 dowakeup = 1;
984263bc
MD
100 }
101 lkp->lk_sharecount = 0;
102 } else {
103 lkp->lk_sharecount -= decr;
104 }
e3332475 105 return(dowakeup);
984263bc
MD
106}
107
16523a43
MD
108/*
109 * lock acquisition helper routine. Called with the lock's spinlock held.
110 */
984263bc 111static int
8a8d5d85 112acquire(struct lock *lkp, int extflags, int wanted)
984263bc 113{
e43a034f 114 int error;
984263bc
MD
115
116 if ((extflags & LK_NOWAIT) && (lkp->lk_flags & wanted)) {
117 return EBUSY;
118 }
119
984263bc
MD
120 while ((lkp->lk_flags & wanted) != 0) {
121 lkp->lk_flags |= LK_WAIT_NONZERO;
122 lkp->lk_waitcount++;
16523a43
MD
123
124 /*
ab6f251b 125 * Atomic spinlock release/sleep/reacquire.
16523a43 126 */
e590ee86 127 error = ssleep(lkp, &lkp->lk_spinlock,
f2770c70
MD
128 ((extflags & LK_PCATCH) ? PCATCH : 0),
129 lkp->lk_wmesg,
130 ((extflags & LK_TIMELOCK) ? lkp->lk_timo : 0));
984263bc
MD
131 if (lkp->lk_waitcount == 1) {
132 lkp->lk_flags &= ~LK_WAIT_NONZERO;
133 lkp->lk_waitcount = 0;
134 } else {
135 lkp->lk_waitcount--;
136 }
16523a43 137 if (error)
984263bc 138 return error;
16523a43 139 if (extflags & LK_SLEEPFAIL)
984263bc 140 return ENOLCK;
984263bc 141 }
984263bc
MD
142 return 0;
143}
144
145/*
146 * Set, change, or release a lock.
147 *
148 * Shared requests increment the shared count. Exclusive requests set the
149 * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
150 * accepted shared locks and shared-to-exclusive upgrades to go away.
16523a43
MD
151 *
152 * A spinlock is held for most of the procedure. We must not do anything
153 * fancy while holding the spinlock.
984263bc
MD
154 */
155int
156#ifndef DEBUG_LOCKS
df4f70a6 157lockmgr(struct lock *lkp, u_int flags)
984263bc 158#else
df4f70a6 159debuglockmgr(struct lock *lkp, u_int flags,
056f4388 160 const char *name, const char *file, int line)
984263bc
MD
161#endif
162{
df4f70a6 163 thread_t td;
984263bc 164 int error;
984263bc 165 int extflags;
e3332475 166 int dowakeup;
67e75efb
VS
167#ifdef DEBUG_LOCKS
168 int i;
169#endif
984263bc
MD
170
171 error = 0;
e3332475 172 dowakeup = 0;
984263bc 173
4a28fe22 174 if (mycpu->gd_intr_nesting_level &&
5adac495 175 (flags & LK_NOWAIT) == 0 &&
5fddbda2
MD
176 (flags & LK_TYPE_MASK) != LK_RELEASE &&
177 panic_cpu_gd != mycpu
178 ) {
4a28fe22 179
03aa8d99 180#ifndef DEBUG_LOCKS
4a28fe22
MD
181 panic("lockmgr %s from %p: called from interrupt, ipi, "
182 "or hard code section",
183 lkp->lk_wmesg, ((int **)&lkp)[-1]);
03aa8d99 184#else
4a28fe22
MD
185 panic("lockmgr %s from %s:%d: called from interrupt, ipi, "
186 "or hard code section",
187 lkp->lk_wmesg, file, line);
03aa8d99
MD
188#endif
189 }
190
1a474e56
VS
191#ifdef DEBUG_LOCKS
192 if (mycpu->gd_spinlocks_wr &&
193 ((flags & LK_NOWAIT) == 0)
194 ) {
195 panic("lockmgr %s from %s:%d: called with %d spinlocks held",
196 lkp->lk_wmesg, file, line, mycpu->gd_spinlocks_wr);
197 }
198#endif
199
b12defdc 200 spin_lock(&lkp->lk_spinlock);
984263bc
MD
201
202 extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
df4f70a6 203 td = curthread;
984263bc
MD
204
205 switch (flags & LK_TYPE_MASK) {
984263bc
MD
206 case LK_SHARED:
207 /*
208 * If we are not the exclusive lock holder, we have to block
209 * while there is an exclusive lock holder or while an
210 * exclusive lock request or upgrade request is in progress.
211 *
dbe02692 212 * However, if TDF_DEADLKTREAT is set, we override exclusive
984263bc
MD
213 * lock requests or upgrade requests ( but not the exclusive
214 * lock itself ).
215 */
dadab5e9
MD
216 if (lkp->lk_lockholder != td) {
217 if (td->td_flags & TDF_DEADLKTREAT) {
984263bc
MD
218 error = acquire(
219 lkp,
220 extflags,
221 LK_HAVE_EXCL
222 );
223 } else {
224 error = acquire(
225 lkp,
226 extflags,
227 LK_HAVE_EXCL | LK_WANT_EXCL |
228 LK_WANT_UPGRADE
229 );
230 }
231 if (error)
232 break;
233 sharelock(lkp, 1);
7b95be2a 234 COUNT(td, 1);
984263bc
MD
235 break;
236 }
9ca02554
MD
237
238 /*
239 * If we already hold an exclusive lock we bump the
240 * exclusive count instead of downgrading to a shared
241 * lock.
242 *
243 * WARNING! The old FreeBSD behavior was to downgrade,
244 * but this creates a problem when recursions
245 * return to the caller and the caller expects
246 * its original exclusive lock to remain exclusively
247 * locked.
248 */
249 if (extflags & LK_CANRECURSE) {
250 lkp->lk_exclusivecount++;
251 COUNT(td, 1);
252 break;
253 }
254 if (extflags & LK_NOWAIT) {
255 error = EBUSY;
256 break;
257 }
258 spin_unlock(&lkp->lk_spinlock);
259 panic("lockmgr: locking against myself");
260#if 0
984263bc 261 /*
9ca02554
MD
262 * old code queued a shared lock request fell into
263 * a downgrade.
984263bc
MD
264 */
265 sharelock(lkp, 1);
7b95be2a 266 COUNT(td, 1);
984263bc 267 /* fall into downgrade */
9ca02554 268#endif
984263bc
MD
269
270 case LK_DOWNGRADE:
16523a43 271 if (lkp->lk_lockholder != td || lkp->lk_exclusivecount == 0) {
287a8577 272 spin_unlock(&lkp->lk_spinlock);
984263bc 273 panic("lockmgr: not holding exclusive lock");
16523a43 274 }
67e75efb
VS
275
276#ifdef DEBUG_LOCKS
277 for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
278 if (td->td_lockmgr_stack[i] == lkp &&
279 td->td_lockmgr_stack_id[i] > 0
280 ) {
281 td->td_lockmgr_stack_id[i]--;
282 break;
283 }
284 }
285#endif
286
984263bc
MD
287 sharelock(lkp, lkp->lk_exclusivecount);
288 lkp->lk_exclusivecount = 0;
289 lkp->lk_flags &= ~LK_HAVE_EXCL;
dadab5e9 290 lkp->lk_lockholder = LK_NOTHREAD;
984263bc 291 if (lkp->lk_waitcount)
e3332475 292 dowakeup = 1;
984263bc
MD
293 break;
294
295 case LK_EXCLUPGRADE:
296 /*
297 * If another process is ahead of us to get an upgrade,
298 * then we want to fail rather than have an intervening
299 * exclusive access.
300 */
301 if (lkp->lk_flags & LK_WANT_UPGRADE) {
e3332475 302 dowakeup = shareunlock(lkp, 1);
7b95be2a 303 COUNT(td, -1);
984263bc
MD
304 error = EBUSY;
305 break;
306 }
307 /* fall into normal upgrade */
308
309 case LK_UPGRADE:
310 /*
311 * Upgrade a shared lock to an exclusive one. If another
312 * shared lock has already requested an upgrade to an
313 * exclusive lock, our shared lock is released and an
314 * exclusive lock is requested (which will be granted
315 * after the upgrade). If we return an error, the file
316 * will always be unlocked.
317 */
16523a43 318 if ((lkp->lk_lockholder == td) || (lkp->lk_sharecount <= 0)) {
287a8577 319 spin_unlock(&lkp->lk_spinlock);
984263bc 320 panic("lockmgr: upgrade exclusive lock");
16523a43 321 }
e3332475 322 dowakeup += shareunlock(lkp, 1);
7b95be2a 323 COUNT(td, -1);
984263bc
MD
324 /*
325 * If we are just polling, check to see if we will block.
326 */
327 if ((extflags & LK_NOWAIT) &&
328 ((lkp->lk_flags & LK_WANT_UPGRADE) ||
329 lkp->lk_sharecount > 1)) {
330 error = EBUSY;
331 break;
332 }
333 if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
334 /*
335 * We are first shared lock to request an upgrade, so
336 * request upgrade and wait for the shared count to
337 * drop to zero, then take exclusive lock.
dbe02692
MD
338 *
339 * Although I don't think this can occur for
340 * robustness we also wait for any exclusive locks
341 * to be released. LK_WANT_UPGRADE is supposed to
342 * prevent new exclusive locks but might not in the
343 * future.
984263bc
MD
344 */
345 lkp->lk_flags |= LK_WANT_UPGRADE;
dbe02692
MD
346 error = acquire(lkp, extflags,
347 LK_HAVE_EXCL | LK_SHARE_NONZERO);
984263bc
MD
348 lkp->lk_flags &= ~LK_WANT_UPGRADE;
349
350 if (error)
351 break;
352 lkp->lk_flags |= LK_HAVE_EXCL;
dadab5e9 353 lkp->lk_lockholder = td;
16523a43 354 if (lkp->lk_exclusivecount != 0) {
287a8577 355 spin_unlock(&lkp->lk_spinlock);
7a4c5650 356 panic("lockmgr(1): non-zero exclusive count");
16523a43 357 }
984263bc
MD
358 lkp->lk_exclusivecount = 1;
359#if defined(DEBUG_LOCKS)
360 lkp->lk_filename = file;
361 lkp->lk_lineno = line;
362 lkp->lk_lockername = name;
67e75efb
VS
363
364 for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
365 /*
366 * Recursive lockmgr path
367 */
368 if (td->td_lockmgr_stack[i] == lkp &&
369 td->td_lockmgr_stack_id[i] != 0
370 ) {
371 td->td_lockmgr_stack_id[i]++;
372 goto lkmatch2;
373 }
374 }
375
376 for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
377 /*
378 * Use new lockmgr tracking slot
379 */
380 if (td->td_lockmgr_stack_id[i] == 0) {
381 td->td_lockmgr_stack_id[i]++;
382 td->td_lockmgr_stack[i] = lkp;
383 break;
384 }
385 }
386lkmatch2:
387 ;
984263bc 388#endif
7b95be2a 389 COUNT(td, 1);
984263bc
MD
390 break;
391 }
392 /*
393 * Someone else has requested upgrade. Release our shared
394 * lock, awaken upgrade requestor if we are the last shared
395 * lock, then request an exclusive lock.
396 */
dda969a8
MD
397 if ((lkp->lk_flags & (LK_SHARE_NONZERO|LK_WAIT_NONZERO)) ==
398 LK_WAIT_NONZERO) {
e3332475
MD
399 ++dowakeup;
400 }
984263bc
MD
401 /* fall into exclusive request */
402
403 case LK_EXCLUSIVE:
dadab5e9 404 if (lkp->lk_lockholder == td && td != LK_KERNTHREAD) {
984263bc
MD
405 /*
406 * Recursive lock.
407 */
16523a43 408 if ((extflags & (LK_NOWAIT | LK_CANRECURSE)) == 0) {
287a8577 409 spin_unlock(&lkp->lk_spinlock);
984263bc 410 panic("lockmgr: locking against myself");
16523a43 411 }
984263bc
MD
412 if ((extflags & LK_CANRECURSE) != 0) {
413 lkp->lk_exclusivecount++;
7b95be2a 414 COUNT(td, 1);
984263bc
MD
415 break;
416 }
417 }
418 /*
419 * If we are just polling, check to see if we will sleep.
420 */
421 if ((extflags & LK_NOWAIT) &&
7a4c5650
MD
422 (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL |
423 LK_WANT_UPGRADE | LK_SHARE_NONZERO))) {
984263bc
MD
424 error = EBUSY;
425 break;
426 }
427 /*
7a4c5650
MD
428 * Wait for exclusive lock holders to release and try to
429 * acquire the want_exclusive flag.
984263bc
MD
430 */
431 error = acquire(lkp, extflags, (LK_HAVE_EXCL | LK_WANT_EXCL));
432 if (error)
433 break;
434 lkp->lk_flags |= LK_WANT_EXCL;
7a4c5650 435
984263bc 436 /*
7a4c5650
MD
437 * Wait for shared locks and upgrades to finish. We can lose
438 * the race against a successful shared lock upgrade in which
439 * case LK_HAVE_EXCL will get set regardless of our
440 * acquisition of LK_WANT_EXCL, so we have to acquire
441 * LK_HAVE_EXCL here as well.
984263bc 442 */
7a4c5650
MD
443 error = acquire(lkp, extflags, LK_HAVE_EXCL |
444 LK_WANT_UPGRADE |
445 LK_SHARE_NONZERO);
984263bc
MD
446 lkp->lk_flags &= ~LK_WANT_EXCL;
447 if (error)
448 break;
449 lkp->lk_flags |= LK_HAVE_EXCL;
dadab5e9 450 lkp->lk_lockholder = td;
16523a43 451 if (lkp->lk_exclusivecount != 0) {
287a8577 452 spin_unlock(&lkp->lk_spinlock);
7a4c5650 453 panic("lockmgr(2): non-zero exclusive count");
16523a43 454 }
984263bc
MD
455 lkp->lk_exclusivecount = 1;
456#if defined(DEBUG_LOCKS)
67e75efb
VS
457 lkp->lk_filename = file;
458 lkp->lk_lineno = line;
459 lkp->lk_lockername = name;
460
461 for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
462 /*
463 * Recursive lockmgr path
464 */
465 if (td->td_lockmgr_stack[i] == lkp &&
466 td->td_lockmgr_stack_id[i] != 0
467 ) {
468 td->td_lockmgr_stack_id[i]++;
469 goto lkmatch1;
470 }
471 }
472
473 for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
474 /*
475 * Use new lockmgr tracking slot
476 */
477 if (td->td_lockmgr_stack_id[i] == 0) {
478 td->td_lockmgr_stack_id[i]++;
479 td->td_lockmgr_stack[i] = lkp;
480 break;
481 }
482 }
483lkmatch1:
484 ;
984263bc 485#endif
7b95be2a 486 COUNT(td, 1);
984263bc
MD
487 break;
488
489 case LK_RELEASE:
490 if (lkp->lk_exclusivecount != 0) {
dadab5e9
MD
491 if (lkp->lk_lockholder != td &&
492 lkp->lk_lockholder != LK_KERNTHREAD) {
287a8577 493 spin_unlock(&lkp->lk_spinlock);
ae8e83e6
MD
494 panic("lockmgr: pid %d, not %s thr %p/%p unlocking",
495 (td->td_proc ? td->td_proc->p_pid : -1),
dadab5e9 496 "exclusive lock holder",
ae8e83e6 497 td, lkp->lk_lockholder);
984263bc 498 }
dadab5e9 499 if (lkp->lk_lockholder != LK_KERNTHREAD) {
7b95be2a 500 COUNT(td, -1);
984263bc
MD
501 }
502 if (lkp->lk_exclusivecount == 1) {
503 lkp->lk_flags &= ~LK_HAVE_EXCL;
dadab5e9 504 lkp->lk_lockholder = LK_NOTHREAD;
984263bc
MD
505 lkp->lk_exclusivecount = 0;
506 } else {
507 lkp->lk_exclusivecount--;
508 }
67e75efb
VS
509#ifdef DEBUG_LOCKS
510 for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
511 if (td->td_lockmgr_stack[i] == lkp &&
512 td->td_lockmgr_stack_id[i] > 0
513 ) {
514 td->td_lockmgr_stack_id[i]--;
ead16d5b
MD
515 lkp->lk_filename = file;
516 lkp->lk_lineno = line;
67e75efb
VS
517 break;
518 }
519 }
520#endif
984263bc 521 } else if (lkp->lk_flags & LK_SHARE_NONZERO) {
e3332475 522 dowakeup += shareunlock(lkp, 1);
7b95be2a 523 COUNT(td, -1);
ead16d5b
MD
524 } else {
525 panic("lockmgr: LK_RELEASE: no lock held");
984263bc
MD
526 }
527 if (lkp->lk_flags & LK_WAIT_NONZERO)
e3332475 528 ++dowakeup;
984263bc
MD
529 break;
530
984263bc 531 default:
287a8577 532 spin_unlock(&lkp->lk_spinlock);
984263bc
MD
533 panic("lockmgr: unknown locktype request %d",
534 flags & LK_TYPE_MASK);
535 /* NOTREACHED */
536 }
287a8577 537 spin_unlock(&lkp->lk_spinlock);
e3332475
MD
538 if (dowakeup)
539 wakeup(lkp);
984263bc
MD
540 return (error);
541}
542
df4f70a6
MD
543void
544lockmgr_kernproc(struct lock *lp)
545{
f64b567c 546 struct thread *td __debugvar = curthread;
df4f70a6 547
ccc65d15
MD
548 if (lp->lk_lockholder != LK_KERNTHREAD) {
549 KASSERT(lp->lk_lockholder == td,
550 ("lockmgr_kernproc: lock not owned by curthread %p", td));
551 COUNT(td, -1);
552 lp->lk_lockholder = LK_KERNTHREAD;
553 }
df4f70a6
MD
554}
555
7a4c5650 556#if 0
16523a43 557/*
e3332475
MD
558 * Set the lock to be exclusively held. The caller is holding the lock's
559 * spinlock and the spinlock remains held on return. A panic will occur
560 * if the lock cannot be set to exclusive.
dda969a8
MD
561 *
562 * XXX not only unused but these functions also break EXCLUPGRADE's
563 * atomicy.
e3332475
MD
564 */
565void
566lockmgr_setexclusive_interlocked(struct lock *lkp)
567{
568 thread_t td = curthread;
569
570 KKASSERT((lkp->lk_flags & (LK_HAVE_EXCL|LK_SHARE_NONZERO)) == 0);
571 KKASSERT(lkp->lk_exclusivecount == 0);
572 lkp->lk_flags |= LK_HAVE_EXCL;
573 lkp->lk_lockholder = td;
574 lkp->lk_exclusivecount = 1;
575 COUNT(td, 1);
576}
577
578/*
579 * Clear the caller's exclusive lock. The caller is holding the lock's
580 * spinlock. THIS FUNCTION WILL UNLOCK THE SPINLOCK.
581 *
582 * A panic will occur if the caller does not hold the lock.
583 */
584void
585lockmgr_clrexclusive_interlocked(struct lock *lkp)
586{
f64b567c 587 thread_t td __debugvar = curthread;
e3332475
MD
588 int dowakeup = 0;
589
590 KKASSERT((lkp->lk_flags & LK_HAVE_EXCL) && lkp->lk_exclusivecount == 1
591 && lkp->lk_lockholder == td);
592 lkp->lk_lockholder = LK_NOTHREAD;
593 lkp->lk_flags &= ~LK_HAVE_EXCL;
594 lkp->lk_exclusivecount = 0;
595 if (lkp->lk_flags & LK_WAIT_NONZERO)
596 dowakeup = 1;
597 COUNT(td, -1);
287a8577 598 spin_unlock(&lkp->lk_spinlock);
e3332475
MD
599 if (dowakeup)
600 wakeup((void *)lkp);
601}
602
7a4c5650
MD
603#endif
604
e3332475 605/*
984263bc
MD
606 * Initialize a lock; required before use.
607 */
608void
f2770c70 609lockinit(struct lock *lkp, char *wmesg, int timo, int flags)
984263bc 610{
16523a43 611 spin_init(&lkp->lk_spinlock);
984263bc
MD
612 lkp->lk_flags = (flags & LK_EXTFLG_MASK);
613 lkp->lk_sharecount = 0;
614 lkp->lk_waitcount = 0;
615 lkp->lk_exclusivecount = 0;
984263bc
MD
616 lkp->lk_wmesg = wmesg;
617 lkp->lk_timo = timo;
dadab5e9 618 lkp->lk_lockholder = LK_NOTHREAD;
984263bc
MD
619}
620
621/*
3446c007 622 * Reinitialize a lock that is being reused for a different purpose, but
5adac495 623 * which may have pending (blocked) threads sitting on it. The caller
3446c007
MD
624 * must already hold the interlock.
625 */
626void
f2770c70 627lockreinit(struct lock *lkp, char *wmesg, int timo, int flags)
3446c007 628{
287a8577 629 spin_lock(&lkp->lk_spinlock);
ca2e1d01 630 lkp->lk_flags = (lkp->lk_flags & ~LK_EXTFLG_MASK) |
3446c007 631 (flags & LK_EXTFLG_MASK);
3446c007
MD
632 lkp->lk_wmesg = wmesg;
633 lkp->lk_timo = timo;
287a8577 634 spin_unlock(&lkp->lk_spinlock);
3446c007
MD
635}
636
0412a333
SS
637/*
638 * Requires that the caller is the exclusive owner of this lock.
639 */
8fc3c98f
MD
640void
641lockuninit(struct lock *l)
642{
0412a333
SS
643 /*
644 * At this point we should have removed all the references to this lock
645 * so there can't be anyone waiting on it.
646 */
8fc3c98f 647 KKASSERT(l->lk_waitcount == 0);
0412a333 648
8fc3c98f
MD
649 spin_uninit(&l->lk_spinlock);
650}
651
3446c007 652/*
984263bc
MD
653 * Determine the status of a lock.
654 */
655int
dadab5e9 656lockstatus(struct lock *lkp, struct thread *td)
984263bc
MD
657{
658 int lock_type = 0;
659
287a8577 660 spin_lock(&lkp->lk_spinlock);
984263bc 661 if (lkp->lk_exclusivecount != 0) {
dadab5e9 662 if (td == NULL || lkp->lk_lockholder == td)
984263bc
MD
663 lock_type = LK_EXCLUSIVE;
664 else
665 lock_type = LK_EXCLOTHER;
8a8d5d85 666 } else if (lkp->lk_sharecount != 0) {
984263bc 667 lock_type = LK_SHARED;
8a8d5d85 668 }
287a8577 669 spin_unlock(&lkp->lk_spinlock);
984263bc
MD
670 return (lock_type);
671}
672
673/*
43903f4c
MD
674 * Return non-zero if the caller owns the lock shared or exclusive.
675 * We can only guess re: shared locks.
676 */
677int
678lockowned(struct lock *lkp)
679{
680 thread_t td = curthread;
681
682 if (lkp->lk_exclusivecount)
683 return(lkp->lk_lockholder == td);
684 return(lkp->lk_sharecount != 0);
685}
686
687/*
984263bc 688 * Determine the number of holders of a lock.
b265bae0
MD
689 *
690 * The non-blocking version can usually be used for assertions.
984263bc
MD
691 */
692int
5adac495 693lockcount(struct lock *lkp)
984263bc
MD
694{
695 int count;
696
287a8577 697 spin_lock(&lkp->lk_spinlock);
984263bc 698 count = lkp->lk_exclusivecount + lkp->lk_sharecount;
287a8577 699 spin_unlock(&lkp->lk_spinlock);
984263bc
MD
700 return (count);
701}
702
b265bae0 703int
5adac495 704lockcountnb(struct lock *lkp)
b265bae0
MD
705{
706 return (lkp->lk_exclusivecount + lkp->lk_sharecount);
707}
708
984263bc
MD
709/*
710 * Print out information about state of a lock. Used by VOP_PRINT
711 * routines to display status about contained locks.
712 */
713void
5adac495 714lockmgr_printinfo(struct lock *lkp)
984263bc 715{
dadab5e9
MD
716 struct thread *td = lkp->lk_lockholder;
717 struct proc *p;
718
719 if (td && td != LK_KERNTHREAD && td != LK_NOTHREAD)
720 p = td->td_proc;
721 else
722 p = NULL;
984263bc
MD
723
724 if (lkp->lk_sharecount)
6ea70f76 725 kprintf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
984263bc
MD
726 lkp->lk_sharecount);
727 else if (lkp->lk_flags & LK_HAVE_EXCL)
6ea70f76 728 kprintf(" lock type %s: EXCL (count %d) by td %p pid %d",
dadab5e9
MD
729 lkp->lk_wmesg, lkp->lk_exclusivecount, td,
730 p ? p->p_pid : -99);
984263bc 731 if (lkp->lk_waitcount > 0)
6ea70f76 732 kprintf(" with %d pending", lkp->lk_waitcount);
984263bc
MD
733}
734