kernel: add a LOCK_SYSINIT helper macro
[dragonfly.git] / sys / kern / kern_lock.c
CommitLineData
984263bc
MD
1/*
2 * Copyright (c) 1995
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Copyright (C) 1997
6 * John S. Dyson. All rights reserved.
7 *
8 * This code contains ideas from software contributed to Berkeley by
9 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
10 * System project at Carnegie-Mellon University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the University of
23 * California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 * @(#)kern_lock.c 8.18 (Berkeley) 5/21/95
41 * $FreeBSD: src/sys/kern/kern_lock.c,v 1.31.2.3 2001/12/25 01:44:44 dillon Exp $
0412a333 42 * $DragonFly: src/sys/kern/kern_lock.c,v 1.27 2008/01/09 10:59:12 corecode Exp $
984263bc
MD
43 */
44
45#include "opt_lint.h"
46
47#include <sys/param.h>
46a3f46d
MD
48#include <sys/systm.h>
49#include <sys/kernel.h>
984263bc
MD
50#include <sys/proc.h>
51#include <sys/lock.h>
46a3f46d 52#include <sys/sysctl.h>
16523a43 53#include <sys/spinlock.h>
e43a034f 54#include <sys/thread2.h>
16523a43 55#include <sys/spinlock2.h>
46a3f46d
MD
56
57/*
984263bc
MD
58 * Locking primitives implementation.
59 * Locks provide shared/exclusive sychronization.
60 */
61
67e75efb 62#ifdef DEBUG_LOCKS
dadab5e9 63#define COUNT(td, x) (td)->td_locks += (x)
984263bc 64#else
dadab5e9 65#define COUNT(td, x)
984263bc
MD
66#endif
67
68#define LOCK_WAIT_TIME 100
69#define LOCK_SAMPLE_WAIT 7
70
71#if defined(DIAGNOSTIC)
72#define LOCK_INLINE
73#else
74#define LOCK_INLINE __inline
75#endif
76
77#define LK_ALL (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | \
dda969a8 78 LK_SHARE_NONZERO | LK_WAIT_NONZERO)
984263bc
MD
79
80static int acquire(struct lock *lkp, int extflags, int wanted);
984263bc
MD
81
82static LOCK_INLINE void
dda969a8
MD
83sharelock(struct lock *lkp, int incr)
84{
984263bc
MD
85 lkp->lk_flags |= LK_SHARE_NONZERO;
86 lkp->lk_sharecount += incr;
87}
88
e3332475
MD
89static LOCK_INLINE int
90shareunlock(struct lock *lkp, int decr)
91{
92 int dowakeup = 0;
984263bc
MD
93
94 KASSERT(lkp->lk_sharecount >= decr, ("shareunlock: count < decr"));
95
96 if (lkp->lk_sharecount == decr) {
97 lkp->lk_flags &= ~LK_SHARE_NONZERO;
98 if (lkp->lk_flags & (LK_WANT_UPGRADE | LK_WANT_EXCL)) {
e3332475 99 dowakeup = 1;
984263bc
MD
100 }
101 lkp->lk_sharecount = 0;
102 } else {
103 lkp->lk_sharecount -= decr;
104 }
e3332475 105 return(dowakeup);
984263bc
MD
106}
107
16523a43
MD
108/*
109 * lock acquisition helper routine. Called with the lock's spinlock held.
110 */
984263bc 111static int
8a8d5d85 112acquire(struct lock *lkp, int extflags, int wanted)
984263bc 113{
e43a034f 114 int error;
984263bc
MD
115
116 if ((extflags & LK_NOWAIT) && (lkp->lk_flags & wanted)) {
117 return EBUSY;
118 }
119
984263bc
MD
120 while ((lkp->lk_flags & wanted) != 0) {
121 lkp->lk_flags |= LK_WAIT_NONZERO;
122 lkp->lk_waitcount++;
16523a43
MD
123
124 /*
ab6f251b 125 * Atomic spinlock release/sleep/reacquire.
16523a43 126 */
e590ee86 127 error = ssleep(lkp, &lkp->lk_spinlock,
f2770c70
MD
128 ((extflags & LK_PCATCH) ? PCATCH : 0),
129 lkp->lk_wmesg,
130 ((extflags & LK_TIMELOCK) ? lkp->lk_timo : 0));
984263bc
MD
131 if (lkp->lk_waitcount == 1) {
132 lkp->lk_flags &= ~LK_WAIT_NONZERO;
133 lkp->lk_waitcount = 0;
134 } else {
135 lkp->lk_waitcount--;
136 }
16523a43 137 if (error)
984263bc 138 return error;
16523a43 139 if (extflags & LK_SLEEPFAIL)
984263bc 140 return ENOLCK;
984263bc 141 }
984263bc
MD
142 return 0;
143}
144
145/*
146 * Set, change, or release a lock.
147 *
148 * Shared requests increment the shared count. Exclusive requests set the
149 * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
150 * accepted shared locks and shared-to-exclusive upgrades to go away.
16523a43
MD
151 *
152 * A spinlock is held for most of the procedure. We must not do anything
153 * fancy while holding the spinlock.
984263bc
MD
154 */
155int
156#ifndef DEBUG_LOCKS
df4f70a6 157lockmgr(struct lock *lkp, u_int flags)
984263bc 158#else
df4f70a6 159debuglockmgr(struct lock *lkp, u_int flags,
056f4388 160 const char *name, const char *file, int line)
984263bc
MD
161#endif
162{
df4f70a6 163 thread_t td;
984263bc 164 int error;
984263bc 165 int extflags;
e3332475 166 int dowakeup;
67e75efb
VS
167#ifdef DEBUG_LOCKS
168 int i;
169#endif
984263bc
MD
170
171 error = 0;
e3332475 172 dowakeup = 0;
984263bc 173
4a28fe22 174 if (mycpu->gd_intr_nesting_level &&
5adac495 175 (flags & LK_NOWAIT) == 0 &&
5fddbda2
MD
176 (flags & LK_TYPE_MASK) != LK_RELEASE &&
177 panic_cpu_gd != mycpu
178 ) {
4a28fe22 179
03aa8d99 180#ifndef DEBUG_LOCKS
4a28fe22
MD
181 panic("lockmgr %s from %p: called from interrupt, ipi, "
182 "or hard code section",
183 lkp->lk_wmesg, ((int **)&lkp)[-1]);
03aa8d99 184#else
4a28fe22
MD
185 panic("lockmgr %s from %s:%d: called from interrupt, ipi, "
186 "or hard code section",
187 lkp->lk_wmesg, file, line);
03aa8d99
MD
188#endif
189 }
190
1a474e56
VS
191#ifdef DEBUG_LOCKS
192 if (mycpu->gd_spinlocks_wr &&
781a3790 193 ((flags & LK_NOWAIT) == 0)
1a474e56
VS
194 ) {
195 panic("lockmgr %s from %s:%d: called with %d spinlocks held",
196 lkp->lk_wmesg, file, line, mycpu->gd_spinlocks_wr);
197 }
198#endif
199
b12defdc 200 spin_lock(&lkp->lk_spinlock);
984263bc
MD
201
202 extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
df4f70a6 203 td = curthread;
984263bc
MD
204
205 switch (flags & LK_TYPE_MASK) {
984263bc
MD
206 case LK_SHARED:
207 /*
208 * If we are not the exclusive lock holder, we have to block
209 * while there is an exclusive lock holder or while an
210 * exclusive lock request or upgrade request is in progress.
211 *
dbe02692 212 * However, if TDF_DEADLKTREAT is set, we override exclusive
984263bc
MD
213 * lock requests or upgrade requests ( but not the exclusive
214 * lock itself ).
215 */
dadab5e9
MD
216 if (lkp->lk_lockholder != td) {
217 if (td->td_flags & TDF_DEADLKTREAT) {
984263bc
MD
218 error = acquire(
219 lkp,
220 extflags,
221 LK_HAVE_EXCL
222 );
223 } else {
224 error = acquire(
225 lkp,
226 extflags,
227 LK_HAVE_EXCL | LK_WANT_EXCL |
228 LK_WANT_UPGRADE
229 );
230 }
231 if (error)
232 break;
233 sharelock(lkp, 1);
7b95be2a 234 COUNT(td, 1);
984263bc
MD
235 break;
236 }
237 /*
238 * We hold an exclusive lock, so downgrade it to shared.
239 * An alternative would be to fail with EDEADLK.
240 */
241 sharelock(lkp, 1);
7b95be2a 242 COUNT(td, 1);
984263bc
MD
243 /* fall into downgrade */
244
245 case LK_DOWNGRADE:
16523a43 246 if (lkp->lk_lockholder != td || lkp->lk_exclusivecount == 0) {
287a8577 247 spin_unlock(&lkp->lk_spinlock);
984263bc 248 panic("lockmgr: not holding exclusive lock");
16523a43 249 }
67e75efb
VS
250
251#ifdef DEBUG_LOCKS
252 for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
253 if (td->td_lockmgr_stack[i] == lkp &&
254 td->td_lockmgr_stack_id[i] > 0
255 ) {
256 td->td_lockmgr_stack_id[i]--;
257 break;
258 }
259 }
260#endif
261
984263bc
MD
262 sharelock(lkp, lkp->lk_exclusivecount);
263 lkp->lk_exclusivecount = 0;
264 lkp->lk_flags &= ~LK_HAVE_EXCL;
dadab5e9 265 lkp->lk_lockholder = LK_NOTHREAD;
984263bc 266 if (lkp->lk_waitcount)
e3332475 267 dowakeup = 1;
984263bc
MD
268 break;
269
270 case LK_EXCLUPGRADE:
271 /*
272 * If another process is ahead of us to get an upgrade,
273 * then we want to fail rather than have an intervening
274 * exclusive access.
275 */
276 if (lkp->lk_flags & LK_WANT_UPGRADE) {
e3332475 277 dowakeup = shareunlock(lkp, 1);
7b95be2a 278 COUNT(td, -1);
984263bc
MD
279 error = EBUSY;
280 break;
281 }
282 /* fall into normal upgrade */
283
284 case LK_UPGRADE:
285 /*
286 * Upgrade a shared lock to an exclusive one. If another
287 * shared lock has already requested an upgrade to an
288 * exclusive lock, our shared lock is released and an
289 * exclusive lock is requested (which will be granted
290 * after the upgrade). If we return an error, the file
291 * will always be unlocked.
292 */
16523a43 293 if ((lkp->lk_lockholder == td) || (lkp->lk_sharecount <= 0)) {
287a8577 294 spin_unlock(&lkp->lk_spinlock);
984263bc 295 panic("lockmgr: upgrade exclusive lock");
16523a43 296 }
e3332475 297 dowakeup += shareunlock(lkp, 1);
7b95be2a 298 COUNT(td, -1);
984263bc
MD
299 /*
300 * If we are just polling, check to see if we will block.
301 */
302 if ((extflags & LK_NOWAIT) &&
303 ((lkp->lk_flags & LK_WANT_UPGRADE) ||
304 lkp->lk_sharecount > 1)) {
305 error = EBUSY;
306 break;
307 }
308 if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
309 /*
310 * We are first shared lock to request an upgrade, so
311 * request upgrade and wait for the shared count to
312 * drop to zero, then take exclusive lock.
dbe02692
MD
313 *
314 * Although I don't think this can occur for
315 * robustness we also wait for any exclusive locks
316 * to be released. LK_WANT_UPGRADE is supposed to
317 * prevent new exclusive locks but might not in the
318 * future.
984263bc
MD
319 */
320 lkp->lk_flags |= LK_WANT_UPGRADE;
dbe02692
MD
321 error = acquire(lkp, extflags,
322 LK_HAVE_EXCL | LK_SHARE_NONZERO);
984263bc
MD
323 lkp->lk_flags &= ~LK_WANT_UPGRADE;
324
325 if (error)
326 break;
327 lkp->lk_flags |= LK_HAVE_EXCL;
dadab5e9 328 lkp->lk_lockholder = td;
16523a43 329 if (lkp->lk_exclusivecount != 0) {
287a8577 330 spin_unlock(&lkp->lk_spinlock);
7a4c5650 331 panic("lockmgr(1): non-zero exclusive count");
16523a43 332 }
984263bc
MD
333 lkp->lk_exclusivecount = 1;
334#if defined(DEBUG_LOCKS)
335 lkp->lk_filename = file;
336 lkp->lk_lineno = line;
337 lkp->lk_lockername = name;
67e75efb
VS
338
339 for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
340 /*
341 * Recursive lockmgr path
342 */
343 if (td->td_lockmgr_stack[i] == lkp &&
344 td->td_lockmgr_stack_id[i] != 0
345 ) {
346 td->td_lockmgr_stack_id[i]++;
347 goto lkmatch2;
348 }
349 }
350
351 for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
352 /*
353 * Use new lockmgr tracking slot
354 */
355 if (td->td_lockmgr_stack_id[i] == 0) {
356 td->td_lockmgr_stack_id[i]++;
357 td->td_lockmgr_stack[i] = lkp;
358 break;
359 }
360 }
361lkmatch2:
362 ;
984263bc 363#endif
7b95be2a 364 COUNT(td, 1);
984263bc
MD
365 break;
366 }
367 /*
368 * Someone else has requested upgrade. Release our shared
369 * lock, awaken upgrade requestor if we are the last shared
370 * lock, then request an exclusive lock.
371 */
dda969a8
MD
372 if ((lkp->lk_flags & (LK_SHARE_NONZERO|LK_WAIT_NONZERO)) ==
373 LK_WAIT_NONZERO) {
e3332475
MD
374 ++dowakeup;
375 }
984263bc
MD
376 /* fall into exclusive request */
377
378 case LK_EXCLUSIVE:
dadab5e9 379 if (lkp->lk_lockholder == td && td != LK_KERNTHREAD) {
984263bc
MD
380 /*
381 * Recursive lock.
382 */
16523a43 383 if ((extflags & (LK_NOWAIT | LK_CANRECURSE)) == 0) {
287a8577 384 spin_unlock(&lkp->lk_spinlock);
984263bc 385 panic("lockmgr: locking against myself");
16523a43 386 }
984263bc
MD
387 if ((extflags & LK_CANRECURSE) != 0) {
388 lkp->lk_exclusivecount++;
7b95be2a 389 COUNT(td, 1);
984263bc
MD
390 break;
391 }
392 }
393 /*
394 * If we are just polling, check to see if we will sleep.
395 */
396 if ((extflags & LK_NOWAIT) &&
7a4c5650
MD
397 (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL |
398 LK_WANT_UPGRADE | LK_SHARE_NONZERO))) {
984263bc
MD
399 error = EBUSY;
400 break;
401 }
402 /*
7a4c5650
MD
403 * Wait for exclusive lock holders to release and try to
404 * acquire the want_exclusive flag.
984263bc
MD
405 */
406 error = acquire(lkp, extflags, (LK_HAVE_EXCL | LK_WANT_EXCL));
407 if (error)
408 break;
409 lkp->lk_flags |= LK_WANT_EXCL;
7a4c5650 410
984263bc 411 /*
7a4c5650
MD
412 * Wait for shared locks and upgrades to finish. We can lose
413 * the race against a successful shared lock upgrade in which
414 * case LK_HAVE_EXCL will get set regardless of our
415 * acquisition of LK_WANT_EXCL, so we have to acquire
416 * LK_HAVE_EXCL here as well.
984263bc 417 */
7a4c5650
MD
418 error = acquire(lkp, extflags, LK_HAVE_EXCL |
419 LK_WANT_UPGRADE |
420 LK_SHARE_NONZERO);
984263bc
MD
421 lkp->lk_flags &= ~LK_WANT_EXCL;
422 if (error)
423 break;
424 lkp->lk_flags |= LK_HAVE_EXCL;
dadab5e9 425 lkp->lk_lockholder = td;
16523a43 426 if (lkp->lk_exclusivecount != 0) {
287a8577 427 spin_unlock(&lkp->lk_spinlock);
7a4c5650 428 panic("lockmgr(2): non-zero exclusive count");
16523a43 429 }
984263bc
MD
430 lkp->lk_exclusivecount = 1;
431#if defined(DEBUG_LOCKS)
67e75efb
VS
432 lkp->lk_filename = file;
433 lkp->lk_lineno = line;
434 lkp->lk_lockername = name;
435
436 for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
437 /*
438 * Recursive lockmgr path
439 */
440 if (td->td_lockmgr_stack[i] == lkp &&
441 td->td_lockmgr_stack_id[i] != 0
442 ) {
443 td->td_lockmgr_stack_id[i]++;
444 goto lkmatch1;
445 }
446 }
447
448 for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
449 /*
450 * Use new lockmgr tracking slot
451 */
452 if (td->td_lockmgr_stack_id[i] == 0) {
453 td->td_lockmgr_stack_id[i]++;
454 td->td_lockmgr_stack[i] = lkp;
455 break;
456 }
457 }
458lkmatch1:
459 ;
984263bc 460#endif
7b95be2a 461 COUNT(td, 1);
984263bc
MD
462 break;
463
464 case LK_RELEASE:
465 if (lkp->lk_exclusivecount != 0) {
dadab5e9
MD
466 if (lkp->lk_lockholder != td &&
467 lkp->lk_lockholder != LK_KERNTHREAD) {
287a8577 468 spin_unlock(&lkp->lk_spinlock);
ae8e83e6
MD
469 panic("lockmgr: pid %d, not %s thr %p/%p unlocking",
470 (td->td_proc ? td->td_proc->p_pid : -1),
dadab5e9 471 "exclusive lock holder",
ae8e83e6 472 td, lkp->lk_lockholder);
984263bc 473 }
dadab5e9 474 if (lkp->lk_lockholder != LK_KERNTHREAD) {
7b95be2a 475 COUNT(td, -1);
984263bc
MD
476 }
477 if (lkp->lk_exclusivecount == 1) {
478 lkp->lk_flags &= ~LK_HAVE_EXCL;
dadab5e9 479 lkp->lk_lockholder = LK_NOTHREAD;
984263bc
MD
480 lkp->lk_exclusivecount = 0;
481 } else {
482 lkp->lk_exclusivecount--;
483 }
67e75efb
VS
484#ifdef DEBUG_LOCKS
485 for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
486 if (td->td_lockmgr_stack[i] == lkp &&
487 td->td_lockmgr_stack_id[i] > 0
488 ) {
489 td->td_lockmgr_stack_id[i]--;
ead16d5b
MD
490 lkp->lk_filename = file;
491 lkp->lk_lineno = line;
67e75efb
VS
492 break;
493 }
494 }
495#endif
984263bc 496 } else if (lkp->lk_flags & LK_SHARE_NONZERO) {
e3332475 497 dowakeup += shareunlock(lkp, 1);
7b95be2a 498 COUNT(td, -1);
ead16d5b
MD
499 } else {
500 panic("lockmgr: LK_RELEASE: no lock held");
984263bc
MD
501 }
502 if (lkp->lk_flags & LK_WAIT_NONZERO)
e3332475 503 ++dowakeup;
984263bc
MD
504 break;
505
984263bc 506 default:
287a8577 507 spin_unlock(&lkp->lk_spinlock);
984263bc
MD
508 panic("lockmgr: unknown locktype request %d",
509 flags & LK_TYPE_MASK);
510 /* NOTREACHED */
511 }
287a8577 512 spin_unlock(&lkp->lk_spinlock);
e3332475
MD
513 if (dowakeup)
514 wakeup(lkp);
984263bc
MD
515 return (error);
516}
517
df4f70a6
MD
518void
519lockmgr_kernproc(struct lock *lp)
520{
f64b567c 521 struct thread *td __debugvar = curthread;
df4f70a6 522
ccc65d15
MD
523 if (lp->lk_lockholder != LK_KERNTHREAD) {
524 KASSERT(lp->lk_lockholder == td,
525 ("lockmgr_kernproc: lock not owned by curthread %p", td));
526 COUNT(td, -1);
527 lp->lk_lockholder = LK_KERNTHREAD;
528 }
df4f70a6
MD
529}
530
7a4c5650 531#if 0
16523a43 532/*
e3332475
MD
533 * Set the lock to be exclusively held. The caller is holding the lock's
534 * spinlock and the spinlock remains held on return. A panic will occur
535 * if the lock cannot be set to exclusive.
dda969a8
MD
536 *
537 * XXX not only unused but these functions also break EXCLUPGRADE's
538 * atomicy.
e3332475
MD
539 */
540void
541lockmgr_setexclusive_interlocked(struct lock *lkp)
542{
543 thread_t td = curthread;
544
545 KKASSERT((lkp->lk_flags & (LK_HAVE_EXCL|LK_SHARE_NONZERO)) == 0);
546 KKASSERT(lkp->lk_exclusivecount == 0);
547 lkp->lk_flags |= LK_HAVE_EXCL;
548 lkp->lk_lockholder = td;
549 lkp->lk_exclusivecount = 1;
550 COUNT(td, 1);
551}
552
553/*
554 * Clear the caller's exclusive lock. The caller is holding the lock's
555 * spinlock. THIS FUNCTION WILL UNLOCK THE SPINLOCK.
556 *
557 * A panic will occur if the caller does not hold the lock.
558 */
559void
560lockmgr_clrexclusive_interlocked(struct lock *lkp)
561{
f64b567c 562 thread_t td __debugvar = curthread;
e3332475
MD
563 int dowakeup = 0;
564
565 KKASSERT((lkp->lk_flags & LK_HAVE_EXCL) && lkp->lk_exclusivecount == 1
566 && lkp->lk_lockholder == td);
567 lkp->lk_lockholder = LK_NOTHREAD;
568 lkp->lk_flags &= ~LK_HAVE_EXCL;
569 lkp->lk_exclusivecount = 0;
570 if (lkp->lk_flags & LK_WAIT_NONZERO)
571 dowakeup = 1;
572 COUNT(td, -1);
287a8577 573 spin_unlock(&lkp->lk_spinlock);
e3332475
MD
574 if (dowakeup)
575 wakeup((void *)lkp);
576}
577
7a4c5650
MD
578#endif
579
e3332475 580/*
984263bc
MD
581 * Initialize a lock; required before use.
582 */
583void
55a2ee33 584lockinit(struct lock *lkp, const char *wmesg, int timo, int flags)
984263bc 585{
16523a43 586 spin_init(&lkp->lk_spinlock);
984263bc
MD
587 lkp->lk_flags = (flags & LK_EXTFLG_MASK);
588 lkp->lk_sharecount = 0;
589 lkp->lk_waitcount = 0;
590 lkp->lk_exclusivecount = 0;
984263bc
MD
591 lkp->lk_wmesg = wmesg;
592 lkp->lk_timo = timo;
dadab5e9 593 lkp->lk_lockholder = LK_NOTHREAD;
984263bc
MD
594}
595
596/*
3446c007 597 * Reinitialize a lock that is being reused for a different purpose, but
5adac495 598 * which may have pending (blocked) threads sitting on it. The caller
3446c007
MD
599 * must already hold the interlock.
600 */
601void
55a2ee33 602lockreinit(struct lock *lkp, const char *wmesg, int timo, int flags)
3446c007 603{
287a8577 604 spin_lock(&lkp->lk_spinlock);
ca2e1d01 605 lkp->lk_flags = (lkp->lk_flags & ~LK_EXTFLG_MASK) |
3446c007 606 (flags & LK_EXTFLG_MASK);
3446c007
MD
607 lkp->lk_wmesg = wmesg;
608 lkp->lk_timo = timo;
287a8577 609 spin_unlock(&lkp->lk_spinlock);
3446c007
MD
610}
611
0412a333
SS
612/*
613 * Requires that the caller is the exclusive owner of this lock.
614 */
8fc3c98f
MD
615void
616lockuninit(struct lock *l)
617{
0412a333
SS
618 /*
619 * At this point we should have removed all the references to this lock
620 * so there can't be anyone waiting on it.
621 */
8fc3c98f 622 KKASSERT(l->lk_waitcount == 0);
0412a333 623
8fc3c98f
MD
624 spin_uninit(&l->lk_spinlock);
625}
626
3446c007 627/*
984263bc
MD
628 * Determine the status of a lock.
629 */
630int
dadab5e9 631lockstatus(struct lock *lkp, struct thread *td)
984263bc
MD
632{
633 int lock_type = 0;
634
287a8577 635 spin_lock(&lkp->lk_spinlock);
984263bc 636 if (lkp->lk_exclusivecount != 0) {
dadab5e9 637 if (td == NULL || lkp->lk_lockholder == td)
984263bc
MD
638 lock_type = LK_EXCLUSIVE;
639 else
640 lock_type = LK_EXCLOTHER;
8a8d5d85 641 } else if (lkp->lk_sharecount != 0) {
984263bc 642 lock_type = LK_SHARED;
8a8d5d85 643 }
287a8577 644 spin_unlock(&lkp->lk_spinlock);
984263bc
MD
645 return (lock_type);
646}
647
648/*
43903f4c
MD
649 * Return non-zero if the caller owns the lock shared or exclusive.
650 * We can only guess re: shared locks.
651 */
652int
653lockowned(struct lock *lkp)
654{
655 thread_t td = curthread;
656
657 if (lkp->lk_exclusivecount)
658 return(lkp->lk_lockholder == td);
659 return(lkp->lk_sharecount != 0);
660}
661
662/*
984263bc 663 * Determine the number of holders of a lock.
b265bae0
MD
664 *
665 * The non-blocking version can usually be used for assertions.
984263bc
MD
666 */
667int
5adac495 668lockcount(struct lock *lkp)
984263bc
MD
669{
670 int count;
671
287a8577 672 spin_lock(&lkp->lk_spinlock);
984263bc 673 count = lkp->lk_exclusivecount + lkp->lk_sharecount;
287a8577 674 spin_unlock(&lkp->lk_spinlock);
984263bc
MD
675 return (count);
676}
677
b265bae0 678int
5adac495 679lockcountnb(struct lock *lkp)
b265bae0
MD
680{
681 return (lkp->lk_exclusivecount + lkp->lk_sharecount);
682}
683
984263bc
MD
684/*
685 * Print out information about state of a lock. Used by VOP_PRINT
686 * routines to display status about contained locks.
687 */
688void
5adac495 689lockmgr_printinfo(struct lock *lkp)
984263bc 690{
dadab5e9
MD
691 struct thread *td = lkp->lk_lockholder;
692 struct proc *p;
693
694 if (td && td != LK_KERNTHREAD && td != LK_NOTHREAD)
695 p = td->td_proc;
696 else
697 p = NULL;
984263bc
MD
698
699 if (lkp->lk_sharecount)
6ea70f76 700 kprintf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
984263bc
MD
701 lkp->lk_sharecount);
702 else if (lkp->lk_flags & LK_HAVE_EXCL)
6ea70f76 703 kprintf(" lock type %s: EXCL (count %d) by td %p pid %d",
dadab5e9
MD
704 lkp->lk_wmesg, lkp->lk_exclusivecount, td,
705 p ? p->p_pid : -99);
984263bc 706 if (lkp->lk_waitcount > 0)
6ea70f76 707 kprintf(" with %d pending", lkp->lk_waitcount);
984263bc
MD
708}
709
5d101ab9
FT
710void
711lock_sysinit(struct lock_args *arg)
712{
713 lockinit(arg->la_lock, arg->la_desc, 0, LK_CANRECURSE);
714}