Initial import of binutils 2.22 on the new vendor branch
[dragonfly.git] / sys / kern / kern_lock.c
1 /* 
2  * Copyright (c) 1995
3  *      The Regents of the University of California.  All rights reserved.
4  *
5  * Copyright (C) 1997
6  *      John S. Dyson.  All rights reserved.
7  *
8  * This code contains ideas from software contributed to Berkeley by
9  * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
10  * System project at Carnegie-Mellon University.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *      This product includes software developed by the University of
23  *      California, Berkeley and its contributors.
24  * 4. Neither the name of the University nor the names of its contributors
25  *    may be used to endorse or promote products derived from this software
26  *    without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  *
40  *      @(#)kern_lock.c 8.18 (Berkeley) 5/21/95
41  * $FreeBSD: src/sys/kern/kern_lock.c,v 1.31.2.3 2001/12/25 01:44:44 dillon Exp $
42  * $DragonFly: src/sys/kern/kern_lock.c,v 1.27 2008/01/09 10:59:12 corecode Exp $
43  */
44
45 #include "opt_lint.h"
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/kernel.h>
50 #include <sys/proc.h>
51 #include <sys/lock.h>
52 #include <sys/sysctl.h>
53 #include <sys/spinlock.h>
54 #include <sys/thread2.h>
55 #include <sys/spinlock2.h>
56
57 /*
58  * Locking primitives implementation.
59  * Locks provide shared/exclusive sychronization.
60  */
61
62 #ifdef DEBUG_LOCKS
63 #define COUNT(td, x) (td)->td_locks += (x)
64 #else
65 #define COUNT(td, x)
66 #endif
67
68 #define LOCK_WAIT_TIME 100
69 #define LOCK_SAMPLE_WAIT 7
70
71 #if defined(DIAGNOSTIC)
72 #define LOCK_INLINE
73 #else
74 #define LOCK_INLINE __inline
75 #endif
76
77 #define LK_ALL (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | \
78                 LK_SHARE_NONZERO | LK_WAIT_NONZERO)
79
80 static int acquire(struct lock *lkp, int extflags, int wanted);
81
82 static LOCK_INLINE void
83 sharelock(struct lock *lkp, int incr)
84 {
85         lkp->lk_flags |= LK_SHARE_NONZERO;
86         lkp->lk_sharecount += incr;
87 }
88
89 static LOCK_INLINE int
90 shareunlock(struct lock *lkp, int decr) 
91 {
92         int dowakeup = 0;
93
94         KASSERT(lkp->lk_sharecount >= decr, ("shareunlock: count < decr"));
95
96         if (lkp->lk_sharecount == decr) {
97                 lkp->lk_flags &= ~LK_SHARE_NONZERO;
98                 if (lkp->lk_flags & (LK_WANT_UPGRADE | LK_WANT_EXCL)) {
99                         dowakeup = 1;
100                 }
101                 lkp->lk_sharecount = 0;
102         } else {
103                 lkp->lk_sharecount -= decr;
104         }
105         return(dowakeup);
106 }
107
108 /*
109  * lock acquisition helper routine.  Called with the lock's spinlock held.
110  */
111 static int
112 acquire(struct lock *lkp, int extflags, int wanted) 
113 {
114         int error;
115
116         if ((extflags & LK_NOWAIT) && (lkp->lk_flags & wanted)) {
117                 return EBUSY;
118         }
119
120         while ((lkp->lk_flags & wanted) != 0) {
121                 lkp->lk_flags |= LK_WAIT_NONZERO;
122                 lkp->lk_waitcount++;
123
124                 /*
125                  * Atomic spinlock release/sleep/reacquire.
126                  */
127                 error = ssleep(lkp, &lkp->lk_spinlock,
128                                ((extflags & LK_PCATCH) ? PCATCH : 0),
129                                lkp->lk_wmesg, 
130                                ((extflags & LK_TIMELOCK) ? lkp->lk_timo : 0));
131                 if (lkp->lk_waitcount == 1) {
132                         lkp->lk_flags &= ~LK_WAIT_NONZERO;
133                         lkp->lk_waitcount = 0;
134                 } else {
135                         lkp->lk_waitcount--;
136                 }
137                 if (error)
138                         return error;
139                 if (extflags & LK_SLEEPFAIL)
140                         return ENOLCK;
141         }
142         return 0;
143 }
144
145 /*
146  * Set, change, or release a lock.
147  *
148  * Shared requests increment the shared count. Exclusive requests set the
149  * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
150  * accepted shared locks and shared-to-exclusive upgrades to go away.
151  *
152  * A spinlock is held for most of the procedure.  We must not do anything
153  * fancy while holding the spinlock.
154  */
155 int
156 #ifndef DEBUG_LOCKS
157 lockmgr(struct lock *lkp, u_int flags)
158 #else
159 debuglockmgr(struct lock *lkp, u_int flags,
160              const char *name, const char *file, int line)
161 #endif
162 {
163         thread_t td;
164         int error;
165         int extflags;
166         int dowakeup;
167 #ifdef DEBUG_LOCKS
168         int i;
169 #endif
170
171         error = 0;
172         dowakeup = 0;
173
174         if (mycpu->gd_intr_nesting_level &&
175             (flags & LK_NOWAIT) == 0 &&
176             (flags & LK_TYPE_MASK) != LK_RELEASE &&
177             panic_cpu_gd != mycpu
178         ) {
179
180 #ifndef DEBUG_LOCKS
181                 panic("lockmgr %s from %p: called from interrupt, ipi, "
182                       "or hard code section",
183                       lkp->lk_wmesg, ((int **)&lkp)[-1]);
184 #else
185                 panic("lockmgr %s from %s:%d: called from interrupt, ipi, "
186                       "or hard code section",
187                       lkp->lk_wmesg, file, line);
188 #endif
189         }
190
191 #ifdef DEBUG_LOCKS
192         if (mycpu->gd_spinlocks_wr &&
193             ((flags & LK_NOWAIT) == 0) 
194         ) {
195                 panic("lockmgr %s from %s:%d: called with %d spinlocks held",
196                       lkp->lk_wmesg, file, line, mycpu->gd_spinlocks_wr);
197         }
198 #endif
199
200         spin_lock(&lkp->lk_spinlock);
201
202         extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
203         td = curthread;
204
205         switch (flags & LK_TYPE_MASK) {
206         case LK_SHARED:
207                 /*
208                  * If we are not the exclusive lock holder, we have to block
209                  * while there is an exclusive lock holder or while an
210                  * exclusive lock request or upgrade request is in progress.
211                  *
212                  * However, if TDF_DEADLKTREAT is set, we override exclusive
213                  * lock requests or upgrade requests ( but not the exclusive
214                  * lock itself ).
215                  */
216                 if (lkp->lk_lockholder != td) {
217                         if (td->td_flags & TDF_DEADLKTREAT) {
218                                 error = acquire(
219                                             lkp,
220                                             extflags,
221                                             LK_HAVE_EXCL
222                                         );
223                         } else {
224                                 error = acquire(
225                                             lkp, 
226                                             extflags,
227                                             LK_HAVE_EXCL | LK_WANT_EXCL | 
228                                              LK_WANT_UPGRADE
229                                         );
230                         }
231                         if (error)
232                                 break;
233                         sharelock(lkp, 1);
234                         COUNT(td, 1);
235                         break;
236                 }
237                 /*
238                  * We hold an exclusive lock, so downgrade it to shared.
239                  * An alternative would be to fail with EDEADLK.
240                  */
241                 sharelock(lkp, 1);
242                 COUNT(td, 1);
243                 /* fall into downgrade */
244
245         case LK_DOWNGRADE:
246                 if (lkp->lk_lockholder != td || lkp->lk_exclusivecount == 0) {
247                         spin_unlock(&lkp->lk_spinlock);
248                         panic("lockmgr: not holding exclusive lock");
249                 }
250
251 #ifdef DEBUG_LOCKS
252                 for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
253                         if (td->td_lockmgr_stack[i] == lkp &&
254                             td->td_lockmgr_stack_id[i] > 0
255                         ) {
256                                 td->td_lockmgr_stack_id[i]--;
257                                 break;
258                         }
259                 }
260 #endif
261
262                 sharelock(lkp, lkp->lk_exclusivecount);
263                 lkp->lk_exclusivecount = 0;
264                 lkp->lk_flags &= ~LK_HAVE_EXCL;
265                 lkp->lk_lockholder = LK_NOTHREAD;
266                 if (lkp->lk_waitcount)
267                         dowakeup = 1;
268                 break;
269
270         case LK_EXCLUPGRADE:
271                 /*
272                  * If another process is ahead of us to get an upgrade,
273                  * then we want to fail rather than have an intervening
274                  * exclusive access.
275                  */
276                 if (lkp->lk_flags & LK_WANT_UPGRADE) {
277                         dowakeup = shareunlock(lkp, 1);
278                         COUNT(td, -1);
279                         error = EBUSY;
280                         break;
281                 }
282                 /* fall into normal upgrade */
283
284         case LK_UPGRADE:
285                 /*
286                  * Upgrade a shared lock to an exclusive one. If another
287                  * shared lock has already requested an upgrade to an
288                  * exclusive lock, our shared lock is released and an
289                  * exclusive lock is requested (which will be granted
290                  * after the upgrade). If we return an error, the file
291                  * will always be unlocked.
292                  */
293                 if ((lkp->lk_lockholder == td) || (lkp->lk_sharecount <= 0)) {
294                         spin_unlock(&lkp->lk_spinlock);
295                         panic("lockmgr: upgrade exclusive lock");
296                 }
297                 dowakeup += shareunlock(lkp, 1);
298                 COUNT(td, -1);
299                 /*
300                  * If we are just polling, check to see if we will block.
301                  */
302                 if ((extflags & LK_NOWAIT) &&
303                     ((lkp->lk_flags & LK_WANT_UPGRADE) ||
304                      lkp->lk_sharecount > 1)) {
305                         error = EBUSY;
306                         break;
307                 }
308                 if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
309                         /*
310                          * We are first shared lock to request an upgrade, so
311                          * request upgrade and wait for the shared count to
312                          * drop to zero, then take exclusive lock.
313                          *
314                          * Although I don't think this can occur for
315                          * robustness we also wait for any exclusive locks
316                          * to be released.  LK_WANT_UPGRADE is supposed to
317                          * prevent new exclusive locks but might not in the
318                          * future.
319                          */
320                         lkp->lk_flags |= LK_WANT_UPGRADE;
321                         error = acquire(lkp, extflags,
322                                         LK_HAVE_EXCL | LK_SHARE_NONZERO);
323                         lkp->lk_flags &= ~LK_WANT_UPGRADE;
324
325                         if (error)
326                                 break;
327                         lkp->lk_flags |= LK_HAVE_EXCL;
328                         lkp->lk_lockholder = td;
329                         if (lkp->lk_exclusivecount != 0) {
330                                 spin_unlock(&lkp->lk_spinlock);
331                                 panic("lockmgr(1): non-zero exclusive count");
332                         }
333                         lkp->lk_exclusivecount = 1;
334 #if defined(DEBUG_LOCKS)
335                         lkp->lk_filename = file;
336                         lkp->lk_lineno = line;
337                         lkp->lk_lockername = name;
338
339                         for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
340                                 /*
341                                  * Recursive lockmgr path
342                                  */
343                                 if (td->td_lockmgr_stack[i] == lkp &&
344                                     td->td_lockmgr_stack_id[i] != 0
345                                 ) {
346                                         td->td_lockmgr_stack_id[i]++;
347                                         goto lkmatch2;
348                                 }
349                        }
350
351                         for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
352                                 /*
353                                  * Use new lockmgr tracking slot
354                                  */
355                                 if (td->td_lockmgr_stack_id[i] == 0) {
356                                         td->td_lockmgr_stack_id[i]++;
357                                         td->td_lockmgr_stack[i] = lkp;
358                                         break;
359                                 }
360                         }
361 lkmatch2:
362                         ;
363 #endif
364                         COUNT(td, 1);
365                         break;
366                 }
367                 /*
368                  * Someone else has requested upgrade. Release our shared
369                  * lock, awaken upgrade requestor if we are the last shared
370                  * lock, then request an exclusive lock.
371                  */
372                 if ((lkp->lk_flags & (LK_SHARE_NONZERO|LK_WAIT_NONZERO)) ==
373                     LK_WAIT_NONZERO) {
374                         ++dowakeup;
375                 }
376                 /* fall into exclusive request */
377
378         case LK_EXCLUSIVE:
379                 if (lkp->lk_lockholder == td && td != LK_KERNTHREAD) {
380                         /*
381                          *      Recursive lock.
382                          */
383                         if ((extflags & (LK_NOWAIT | LK_CANRECURSE)) == 0) {
384                                 spin_unlock(&lkp->lk_spinlock);
385                                 panic("lockmgr: locking against myself");
386                         }
387                         if ((extflags & LK_CANRECURSE) != 0) {
388                                 lkp->lk_exclusivecount++;
389                                 COUNT(td, 1);
390                                 break;
391                         }
392                 }
393                 /*
394                  * If we are just polling, check to see if we will sleep.
395                  */
396                 if ((extflags & LK_NOWAIT) &&
397                     (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL |
398                                       LK_WANT_UPGRADE | LK_SHARE_NONZERO))) {
399                         error = EBUSY;
400                         break;
401                 }
402                 /*
403                  * Wait for exclusive lock holders to release and try to
404                  * acquire the want_exclusive flag.
405                  */
406                 error = acquire(lkp, extflags, (LK_HAVE_EXCL | LK_WANT_EXCL));
407                 if (error)
408                         break;
409                 lkp->lk_flags |= LK_WANT_EXCL;
410
411                 /*
412                  * Wait for shared locks and upgrades to finish.  We can lose
413                  * the race against a successful shared lock upgrade in which
414                  * case LK_HAVE_EXCL will get set regardless of our
415                  * acquisition of LK_WANT_EXCL, so we have to acquire
416                  * LK_HAVE_EXCL here as well.
417                  */
418                 error = acquire(lkp, extflags, LK_HAVE_EXCL |
419                                                LK_WANT_UPGRADE |
420                                                LK_SHARE_NONZERO);
421                 lkp->lk_flags &= ~LK_WANT_EXCL;
422                 if (error)
423                         break;
424                 lkp->lk_flags |= LK_HAVE_EXCL;
425                 lkp->lk_lockholder = td;
426                 if (lkp->lk_exclusivecount != 0) {
427                         spin_unlock(&lkp->lk_spinlock);
428                         panic("lockmgr(2): non-zero exclusive count");
429                 }
430                 lkp->lk_exclusivecount = 1;
431 #if defined(DEBUG_LOCKS)
432                 lkp->lk_filename = file;
433                 lkp->lk_lineno = line;
434                 lkp->lk_lockername = name;
435
436                 for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
437                         /*
438                          * Recursive lockmgr path
439                          */
440                         if (td->td_lockmgr_stack[i] == lkp &&
441                             td->td_lockmgr_stack_id[i] != 0
442                         ) {
443                                 td->td_lockmgr_stack_id[i]++;
444                                 goto lkmatch1;
445                         }
446                 }
447
448                 for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
449                         /*
450                          * Use new lockmgr tracking slot
451                          */
452                         if (td->td_lockmgr_stack_id[i] == 0) {
453                                 td->td_lockmgr_stack_id[i]++;
454                                 td->td_lockmgr_stack[i] = lkp;
455                                 break;
456                         }
457                 }
458 lkmatch1:
459                 ;
460 #endif
461                 COUNT(td, 1);
462                 break;
463
464         case LK_RELEASE:
465                 if (lkp->lk_exclusivecount != 0) {
466                         if (lkp->lk_lockholder != td &&
467                             lkp->lk_lockholder != LK_KERNTHREAD) {
468                                 spin_unlock(&lkp->lk_spinlock);
469                                 panic("lockmgr: pid %d, not %s thr %p/%p unlocking",
470                                     (td->td_proc ? td->td_proc->p_pid : -1),
471                                     "exclusive lock holder",
472                                     td, lkp->lk_lockholder);
473                         }
474                         if (lkp->lk_lockholder != LK_KERNTHREAD) {
475                                 COUNT(td, -1);
476                         }
477                         if (lkp->lk_exclusivecount == 1) {
478                                 lkp->lk_flags &= ~LK_HAVE_EXCL;
479                                 lkp->lk_lockholder = LK_NOTHREAD;
480                                 lkp->lk_exclusivecount = 0;
481                         } else {
482                                 lkp->lk_exclusivecount--;
483                         }
484 #ifdef DEBUG_LOCKS
485                         for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
486                                 if (td->td_lockmgr_stack[i] == lkp &&
487                                     td->td_lockmgr_stack_id[i] > 0
488                                 ) {
489                                         td->td_lockmgr_stack_id[i]--;
490                                         break;
491                                 }
492                         }
493 #endif
494                 } else if (lkp->lk_flags & LK_SHARE_NONZERO) {
495                         dowakeup += shareunlock(lkp, 1);
496                         COUNT(td, -1);
497                 }
498                 if (lkp->lk_flags & LK_WAIT_NONZERO)
499                         ++dowakeup;
500                 break;
501
502         default:
503                 spin_unlock(&lkp->lk_spinlock);
504                 panic("lockmgr: unknown locktype request %d",
505                     flags & LK_TYPE_MASK);
506                 /* NOTREACHED */
507         }
508         spin_unlock(&lkp->lk_spinlock);
509         if (dowakeup)
510                 wakeup(lkp);
511         return (error);
512 }
513
514 void
515 lockmgr_kernproc(struct lock *lp)
516 {
517         struct thread *td __debugvar = curthread;
518
519         if (lp->lk_lockholder != LK_KERNTHREAD) {
520                 KASSERT(lp->lk_lockholder == td, 
521                     ("lockmgr_kernproc: lock not owned by curthread %p", td));
522                 COUNT(td, -1);
523                 lp->lk_lockholder = LK_KERNTHREAD;
524         }
525 }
526
527 #if 0
528 /*
529  * Set the lock to be exclusively held.  The caller is holding the lock's
530  * spinlock and the spinlock remains held on return.  A panic will occur
531  * if the lock cannot be set to exclusive.
532  *
533  * XXX not only unused but these functions also break EXCLUPGRADE's
534  * atomicy.
535  */
536 void
537 lockmgr_setexclusive_interlocked(struct lock *lkp)
538 {
539         thread_t td = curthread;
540
541         KKASSERT((lkp->lk_flags & (LK_HAVE_EXCL|LK_SHARE_NONZERO)) == 0);
542         KKASSERT(lkp->lk_exclusivecount == 0);
543         lkp->lk_flags |= LK_HAVE_EXCL;
544         lkp->lk_lockholder = td;
545         lkp->lk_exclusivecount = 1;
546         COUNT(td, 1);
547 }
548
549 /*
550  * Clear the caller's exclusive lock.  The caller is holding the lock's
551  * spinlock.  THIS FUNCTION WILL UNLOCK THE SPINLOCK.
552  *
553  * A panic will occur if the caller does not hold the lock.
554  */
555 void
556 lockmgr_clrexclusive_interlocked(struct lock *lkp)
557 {
558         thread_t td __debugvar = curthread;
559         int dowakeup = 0;
560
561         KKASSERT((lkp->lk_flags & LK_HAVE_EXCL) && lkp->lk_exclusivecount == 1
562                  && lkp->lk_lockholder == td);
563         lkp->lk_lockholder = LK_NOTHREAD;
564         lkp->lk_flags &= ~LK_HAVE_EXCL;
565         lkp->lk_exclusivecount = 0;
566         if (lkp->lk_flags & LK_WAIT_NONZERO)
567                 dowakeup = 1;
568         COUNT(td, -1);
569         spin_unlock(&lkp->lk_spinlock);
570         if (dowakeup)
571                 wakeup((void *)lkp);
572 }
573
574 #endif
575
576 /*
577  * Initialize a lock; required before use.
578  */
579 void
580 lockinit(struct lock *lkp, char *wmesg, int timo, int flags)
581 {
582         spin_init(&lkp->lk_spinlock);
583         lkp->lk_flags = (flags & LK_EXTFLG_MASK);
584         lkp->lk_sharecount = 0;
585         lkp->lk_waitcount = 0;
586         lkp->lk_exclusivecount = 0;
587         lkp->lk_wmesg = wmesg;
588         lkp->lk_timo = timo;
589         lkp->lk_lockholder = LK_NOTHREAD;
590 }
591
592 /*
593  * Reinitialize a lock that is being reused for a different purpose, but
594  * which may have pending (blocked) threads sitting on it.  The caller
595  * must already hold the interlock.
596  */
597 void
598 lockreinit(struct lock *lkp, char *wmesg, int timo, int flags)
599 {
600         spin_lock(&lkp->lk_spinlock);
601         lkp->lk_flags = (lkp->lk_flags & ~LK_EXTFLG_MASK) |
602                         (flags & LK_EXTFLG_MASK);
603         lkp->lk_wmesg = wmesg;
604         lkp->lk_timo = timo;
605         spin_unlock(&lkp->lk_spinlock);
606 }
607
608 /*
609  * Requires that the caller is the exclusive owner of this lock.
610  */
611 void
612 lockuninit(struct lock *l)
613 {
614         /*
615          * At this point we should have removed all the references to this lock
616          * so there can't be anyone waiting on it.
617          */
618         KKASSERT(l->lk_waitcount == 0);
619
620         spin_uninit(&l->lk_spinlock);
621 }
622
623 /*
624  * Determine the status of a lock.
625  */
626 int
627 lockstatus(struct lock *lkp, struct thread *td)
628 {
629         int lock_type = 0;
630
631         spin_lock(&lkp->lk_spinlock);
632         if (lkp->lk_exclusivecount != 0) {
633                 if (td == NULL || lkp->lk_lockholder == td)
634                         lock_type = LK_EXCLUSIVE;
635                 else
636                         lock_type = LK_EXCLOTHER;
637         } else if (lkp->lk_sharecount != 0) {
638                 lock_type = LK_SHARED;
639         }
640         spin_unlock(&lkp->lk_spinlock);
641         return (lock_type);
642 }
643
644 /*
645  * Return non-zero if the caller owns the lock shared or exclusive.
646  * We can only guess re: shared locks.
647  */
648 int
649 lockowned(struct lock *lkp)
650 {
651         thread_t td = curthread;
652
653         if (lkp->lk_exclusivecount)
654                 return(lkp->lk_lockholder == td);
655         return(lkp->lk_sharecount != 0);
656 }
657
658 /*
659  * Determine the number of holders of a lock.
660  *
661  * The non-blocking version can usually be used for assertions.
662  */
663 int
664 lockcount(struct lock *lkp)
665 {
666         int count;
667
668         spin_lock(&lkp->lk_spinlock);
669         count = lkp->lk_exclusivecount + lkp->lk_sharecount;
670         spin_unlock(&lkp->lk_spinlock);
671         return (count);
672 }
673
674 int
675 lockcountnb(struct lock *lkp)
676 {
677         return (lkp->lk_exclusivecount + lkp->lk_sharecount);
678 }
679
680 /*
681  * Print out information about state of a lock. Used by VOP_PRINT
682  * routines to display status about contained locks.
683  */
684 void
685 lockmgr_printinfo(struct lock *lkp)
686 {
687         struct thread *td = lkp->lk_lockholder;
688         struct proc *p;
689
690         if (td && td != LK_KERNTHREAD && td != LK_NOTHREAD)
691                 p = td->td_proc;
692         else
693                 p = NULL;
694
695         if (lkp->lk_sharecount)
696                 kprintf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
697                     lkp->lk_sharecount);
698         else if (lkp->lk_flags & LK_HAVE_EXCL)
699                 kprintf(" lock type %s: EXCL (count %d) by td %p pid %d",
700                     lkp->lk_wmesg, lkp->lk_exclusivecount, td,
701                     p ? p->p_pid : -99);
702         if (lkp->lk_waitcount > 0)
703                 kprintf(" with %d pending", lkp->lk_waitcount);
704 }
705