kernel - usched_dfly revamp (6), reimplement shared spinlocks & misc others
[dragonfly.git] / sys / kern / kern_lock.c
1 /* 
2  * Copyright (c) 1995
3  *      The Regents of the University of California.  All rights reserved.
4  *
5  * Copyright (C) 1997
6  *      John S. Dyson.  All rights reserved.
7  *
8  * This code contains ideas from software contributed to Berkeley by
9  * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
10  * System project at Carnegie-Mellon University.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *      This product includes software developed by the University of
23  *      California, Berkeley and its contributors.
24  * 4. Neither the name of the University nor the names of its contributors
25  *    may be used to endorse or promote products derived from this software
26  *    without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  *
40  *      @(#)kern_lock.c 8.18 (Berkeley) 5/21/95
41  * $FreeBSD: src/sys/kern/kern_lock.c,v 1.31.2.3 2001/12/25 01:44:44 dillon Exp $
42  * $DragonFly: src/sys/kern/kern_lock.c,v 1.27 2008/01/09 10:59:12 corecode Exp $
43  */
44
45 #include "opt_lint.h"
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/kernel.h>
50 #include <sys/proc.h>
51 #include <sys/lock.h>
52 #include <sys/sysctl.h>
53 #include <sys/spinlock.h>
54 #include <sys/thread2.h>
55 #include <sys/spinlock2.h>
56
57 /*
58  * Locking primitives implementation.
59  * Locks provide shared/exclusive sychronization.
60  */
61
62 #ifdef DEBUG_LOCKS
63 #define COUNT(td, x) (td)->td_locks += (x)
64 #else
65 #define COUNT(td, x)
66 #endif
67
68 #define LOCK_WAIT_TIME 100
69 #define LOCK_SAMPLE_WAIT 7
70
71 #if defined(DIAGNOSTIC)
72 #define LOCK_INLINE
73 #else
74 #define LOCK_INLINE __inline
75 #endif
76
77 #define LK_ALL (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | \
78                 LK_SHARE_NONZERO | LK_WAIT_NONZERO)
79
80 static int acquire(struct lock *lkp, int extflags, int wanted);
81
82 static LOCK_INLINE void
83 sharelock(struct lock *lkp, int incr)
84 {
85         lkp->lk_flags |= LK_SHARE_NONZERO;
86         lkp->lk_sharecount += incr;
87 }
88
89 static LOCK_INLINE int
90 shareunlock(struct lock *lkp, int decr) 
91 {
92         int dowakeup = 0;
93
94         KASSERT(lkp->lk_sharecount >= decr, ("shareunlock: count < decr"));
95
96         if (lkp->lk_sharecount == decr) {
97                 lkp->lk_flags &= ~LK_SHARE_NONZERO;
98                 if (lkp->lk_flags & (LK_WANT_UPGRADE | LK_WANT_EXCL)) {
99                         dowakeup = 1;
100                 }
101                 lkp->lk_sharecount = 0;
102         } else {
103                 lkp->lk_sharecount -= decr;
104         }
105         return(dowakeup);
106 }
107
108 /*
109  * lock acquisition helper routine.  Called with the lock's spinlock held.
110  */
111 static int
112 acquire(struct lock *lkp, int extflags, int wanted) 
113 {
114         int error;
115
116         if ((extflags & LK_NOWAIT) && (lkp->lk_flags & wanted)) {
117                 return EBUSY;
118         }
119
120         while ((lkp->lk_flags & wanted) != 0) {
121                 lkp->lk_flags |= LK_WAIT_NONZERO;
122                 lkp->lk_waitcount++;
123
124                 /*
125                  * Atomic spinlock release/sleep/reacquire.
126                  */
127                 error = ssleep(lkp, &lkp->lk_spinlock,
128                                ((extflags & LK_PCATCH) ? PCATCH : 0),
129                                lkp->lk_wmesg, 
130                                ((extflags & LK_TIMELOCK) ? lkp->lk_timo : 0));
131                 if (lkp->lk_waitcount == 1) {
132                         lkp->lk_flags &= ~LK_WAIT_NONZERO;
133                         lkp->lk_waitcount = 0;
134                 } else {
135                         lkp->lk_waitcount--;
136                 }
137                 if (error)
138                         return error;
139                 if (extflags & LK_SLEEPFAIL)
140                         return ENOLCK;
141         }
142         return 0;
143 }
144
145 /*
146  * Set, change, or release a lock.
147  *
148  * Shared requests increment the shared count. Exclusive requests set the
149  * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
150  * accepted shared locks and shared-to-exclusive upgrades to go away.
151  *
152  * A spinlock is held for most of the procedure.  We must not do anything
153  * fancy while holding the spinlock.
154  */
155 int
156 #ifndef DEBUG_LOCKS
157 lockmgr(struct lock *lkp, u_int flags)
158 #else
159 debuglockmgr(struct lock *lkp, u_int flags,
160              const char *name, const char *file, int line)
161 #endif
162 {
163         thread_t td;
164         int error;
165         int extflags;
166         int dowakeup;
167 #ifdef DEBUG_LOCKS
168         int i;
169 #endif
170
171         error = 0;
172         dowakeup = 0;
173
174         if (mycpu->gd_intr_nesting_level &&
175             (flags & LK_NOWAIT) == 0 &&
176             (flags & LK_TYPE_MASK) != LK_RELEASE &&
177             panic_cpu_gd != mycpu
178         ) {
179
180 #ifndef DEBUG_LOCKS
181                 panic("lockmgr %s from %p: called from interrupt, ipi, "
182                       "or hard code section",
183                       lkp->lk_wmesg, ((int **)&lkp)[-1]);
184 #else
185                 panic("lockmgr %s from %s:%d: called from interrupt, ipi, "
186                       "or hard code section",
187                       lkp->lk_wmesg, file, line);
188 #endif
189         }
190
191 #ifdef DEBUG_LOCKS
192         if (mycpu->gd_spinlocks && ((flags & LK_NOWAIT) == 0)) {
193                 panic("lockmgr %s from %s:%d: called with %d spinlocks held",
194                       lkp->lk_wmesg, file, line, mycpu->gd_spinlocks);
195         }
196 #endif
197
198         spin_lock(&lkp->lk_spinlock);
199
200         extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
201         td = curthread;
202
203         switch (flags & LK_TYPE_MASK) {
204         case LK_SHARED:
205                 /*
206                  * If we are not the exclusive lock holder, we have to block
207                  * while there is an exclusive lock holder or while an
208                  * exclusive lock request or upgrade request is in progress.
209                  *
210                  * However, if TDF_DEADLKTREAT is set, we override exclusive
211                  * lock requests or upgrade requests ( but not the exclusive
212                  * lock itself ).
213                  */
214                 if (lkp->lk_lockholder != td) {
215                         if (td->td_flags & TDF_DEADLKTREAT) {
216                                 error = acquire(
217                                             lkp,
218                                             extflags,
219                                             LK_HAVE_EXCL
220                                         );
221                         } else {
222                                 error = acquire(
223                                             lkp, 
224                                             extflags,
225                                             LK_HAVE_EXCL | LK_WANT_EXCL | 
226                                              LK_WANT_UPGRADE
227                                         );
228                         }
229                         if (error)
230                                 break;
231                         sharelock(lkp, 1);
232                         COUNT(td, 1);
233                         break;
234                 }
235
236                 /*
237                  * If we already hold an exclusive lock we bump the
238                  * exclusive count instead of downgrading to a shared
239                  * lock.
240                  *
241                  * WARNING!  The old FreeBSD behavior was to downgrade,
242                  *           but this creates a problem when recursions
243                  *           return to the caller and the caller expects
244                  *           its original exclusive lock to remain exclusively
245                  *           locked.
246                  */
247                 if (extflags & LK_CANRECURSE) {
248                         lkp->lk_exclusivecount++;
249                         COUNT(td, 1);
250                         break;
251                 }
252                 if (extflags & LK_NOWAIT) {
253                         error = EBUSY;
254                         break;
255                 }
256                 spin_unlock(&lkp->lk_spinlock);
257                 panic("lockmgr: locking against myself");
258 #if 0
259                 /*
260                  * old code queued a shared lock request fell into
261                  * a downgrade.
262                  */
263                 sharelock(lkp, 1);
264                 COUNT(td, 1);
265                 /* fall into downgrade */
266 #endif
267
268         case LK_DOWNGRADE:
269                 if (lkp->lk_lockholder != td || lkp->lk_exclusivecount == 0) {
270                         spin_unlock(&lkp->lk_spinlock);
271                         panic("lockmgr: not holding exclusive lock");
272                 }
273
274 #ifdef DEBUG_LOCKS
275                 for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
276                         if (td->td_lockmgr_stack[i] == lkp &&
277                             td->td_lockmgr_stack_id[i] > 0
278                         ) {
279                                 td->td_lockmgr_stack_id[i]--;
280                                 break;
281                         }
282                 }
283 #endif
284
285                 sharelock(lkp, lkp->lk_exclusivecount);
286                 lkp->lk_exclusivecount = 0;
287                 lkp->lk_flags &= ~LK_HAVE_EXCL;
288                 lkp->lk_lockholder = LK_NOTHREAD;
289                 if (lkp->lk_waitcount)
290                         dowakeup = 1;
291                 break;
292
293         case LK_EXCLUPGRADE:
294                 /*
295                  * If another process is ahead of us to get an upgrade,
296                  * then we want to fail rather than have an intervening
297                  * exclusive access.
298                  */
299                 if (lkp->lk_flags & LK_WANT_UPGRADE) {
300                         dowakeup = shareunlock(lkp, 1);
301                         COUNT(td, -1);
302                         error = EBUSY;
303                         break;
304                 }
305                 /* fall into normal upgrade */
306
307         case LK_UPGRADE:
308                 /*
309                  * Upgrade a shared lock to an exclusive one. If another
310                  * shared lock has already requested an upgrade to an
311                  * exclusive lock, our shared lock is released and an
312                  * exclusive lock is requested (which will be granted
313                  * after the upgrade). If we return an error, the file
314                  * will always be unlocked.
315                  */
316                 if ((lkp->lk_lockholder == td) || (lkp->lk_sharecount <= 0)) {
317                         spin_unlock(&lkp->lk_spinlock);
318                         panic("lockmgr: upgrade exclusive lock");
319                 }
320                 dowakeup += shareunlock(lkp, 1);
321                 COUNT(td, -1);
322                 /*
323                  * If we are just polling, check to see if we will block.
324                  */
325                 if ((extflags & LK_NOWAIT) &&
326                     ((lkp->lk_flags & LK_WANT_UPGRADE) ||
327                      lkp->lk_sharecount > 1)) {
328                         error = EBUSY;
329                         break;
330                 }
331                 if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
332                         /*
333                          * We are first shared lock to request an upgrade, so
334                          * request upgrade and wait for the shared count to
335                          * drop to zero, then take exclusive lock.
336                          *
337                          * Although I don't think this can occur for
338                          * robustness we also wait for any exclusive locks
339                          * to be released.  LK_WANT_UPGRADE is supposed to
340                          * prevent new exclusive locks but might not in the
341                          * future.
342                          */
343                         lkp->lk_flags |= LK_WANT_UPGRADE;
344                         error = acquire(lkp, extflags,
345                                         LK_HAVE_EXCL | LK_SHARE_NONZERO);
346                         lkp->lk_flags &= ~LK_WANT_UPGRADE;
347
348                         if (error)
349                                 break;
350                         lkp->lk_flags |= LK_HAVE_EXCL;
351                         lkp->lk_lockholder = td;
352                         if (lkp->lk_exclusivecount != 0) {
353                                 spin_unlock(&lkp->lk_spinlock);
354                                 panic("lockmgr(1): non-zero exclusive count");
355                         }
356                         lkp->lk_exclusivecount = 1;
357 #if defined(DEBUG_LOCKS)
358                         lkp->lk_filename = file;
359                         lkp->lk_lineno = line;
360                         lkp->lk_lockername = name;
361
362                         for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
363                                 /*
364                                  * Recursive lockmgr path
365                                  */
366                                 if (td->td_lockmgr_stack[i] == lkp &&
367                                     td->td_lockmgr_stack_id[i] != 0
368                                 ) {
369                                         td->td_lockmgr_stack_id[i]++;
370                                         goto lkmatch2;
371                                 }
372                        }
373
374                         for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
375                                 /*
376                                  * Use new lockmgr tracking slot
377                                  */
378                                 if (td->td_lockmgr_stack_id[i] == 0) {
379                                         td->td_lockmgr_stack_id[i]++;
380                                         td->td_lockmgr_stack[i] = lkp;
381                                         break;
382                                 }
383                         }
384 lkmatch2:
385                         ;
386 #endif
387                         COUNT(td, 1);
388                         break;
389                 }
390                 /*
391                  * Someone else has requested upgrade. Release our shared
392                  * lock, awaken upgrade requestor if we are the last shared
393                  * lock, then request an exclusive lock.
394                  */
395                 if ((lkp->lk_flags & (LK_SHARE_NONZERO|LK_WAIT_NONZERO)) ==
396                     LK_WAIT_NONZERO) {
397                         ++dowakeup;
398                 }
399                 /* fall into exclusive request */
400
401         case LK_EXCLUSIVE:
402                 if (lkp->lk_lockholder == td && td != LK_KERNTHREAD) {
403                         /*
404                          *      Recursive lock.
405                          */
406                         if ((extflags & (LK_NOWAIT | LK_CANRECURSE)) == 0) {
407                                 spin_unlock(&lkp->lk_spinlock);
408                                 panic("lockmgr: locking against myself");
409                         }
410                         if ((extflags & LK_CANRECURSE) != 0) {
411                                 lkp->lk_exclusivecount++;
412                                 COUNT(td, 1);
413                                 break;
414                         }
415                 }
416                 /*
417                  * If we are just polling, check to see if we will sleep.
418                  */
419                 if ((extflags & LK_NOWAIT) &&
420                     (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL |
421                                       LK_WANT_UPGRADE | LK_SHARE_NONZERO))) {
422                         error = EBUSY;
423                         break;
424                 }
425                 /*
426                  * Wait for exclusive lock holders to release and try to
427                  * acquire the want_exclusive flag.
428                  */
429                 error = acquire(lkp, extflags, (LK_HAVE_EXCL | LK_WANT_EXCL));
430                 if (error)
431                         break;
432                 lkp->lk_flags |= LK_WANT_EXCL;
433
434                 /*
435                  * Wait for shared locks and upgrades to finish.  We can lose
436                  * the race against a successful shared lock upgrade in which
437                  * case LK_HAVE_EXCL will get set regardless of our
438                  * acquisition of LK_WANT_EXCL, so we have to acquire
439                  * LK_HAVE_EXCL here as well.
440                  */
441                 error = acquire(lkp, extflags, LK_HAVE_EXCL |
442                                                LK_WANT_UPGRADE |
443                                                LK_SHARE_NONZERO);
444                 lkp->lk_flags &= ~LK_WANT_EXCL;
445                 if (error)
446                         break;
447                 lkp->lk_flags |= LK_HAVE_EXCL;
448                 lkp->lk_lockholder = td;
449                 if (lkp->lk_exclusivecount != 0) {
450                         spin_unlock(&lkp->lk_spinlock);
451                         panic("lockmgr(2): non-zero exclusive count");
452                 }
453                 lkp->lk_exclusivecount = 1;
454 #if defined(DEBUG_LOCKS)
455                 lkp->lk_filename = file;
456                 lkp->lk_lineno = line;
457                 lkp->lk_lockername = name;
458
459                 for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
460                         /*
461                          * Recursive lockmgr path
462                          */
463                         if (td->td_lockmgr_stack[i] == lkp &&
464                             td->td_lockmgr_stack_id[i] != 0
465                         ) {
466                                 td->td_lockmgr_stack_id[i]++;
467                                 goto lkmatch1;
468                         }
469                 }
470
471                 for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
472                         /*
473                          * Use new lockmgr tracking slot
474                          */
475                         if (td->td_lockmgr_stack_id[i] == 0) {
476                                 td->td_lockmgr_stack_id[i]++;
477                                 td->td_lockmgr_stack[i] = lkp;
478                                 break;
479                         }
480                 }
481 lkmatch1:
482                 ;
483 #endif
484                 COUNT(td, 1);
485                 break;
486
487         case LK_RELEASE:
488                 if (lkp->lk_exclusivecount != 0) {
489                         if (lkp->lk_lockholder != td &&
490                             lkp->lk_lockholder != LK_KERNTHREAD) {
491                                 spin_unlock(&lkp->lk_spinlock);
492                                 panic("lockmgr: pid %d, not %s thr %p/%p unlocking",
493                                     (td->td_proc ? td->td_proc->p_pid : -1),
494                                     "exclusive lock holder",
495                                     td, lkp->lk_lockholder);
496                         }
497                         if (lkp->lk_lockholder != LK_KERNTHREAD) {
498                                 COUNT(td, -1);
499                         }
500                         if (lkp->lk_exclusivecount == 1) {
501                                 lkp->lk_flags &= ~LK_HAVE_EXCL;
502                                 lkp->lk_lockholder = LK_NOTHREAD;
503                                 lkp->lk_exclusivecount = 0;
504                         } else {
505                                 lkp->lk_exclusivecount--;
506                         }
507 #ifdef DEBUG_LOCKS
508                         for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
509                                 if (td->td_lockmgr_stack[i] == lkp &&
510                                     td->td_lockmgr_stack_id[i] > 0
511                                 ) {
512                                         td->td_lockmgr_stack_id[i]--;
513                                         lkp->lk_filename = file;
514                                         lkp->lk_lineno = line;
515                                         break;
516                                 }
517                         }
518 #endif
519                 } else if (lkp->lk_flags & LK_SHARE_NONZERO) {
520                         dowakeup += shareunlock(lkp, 1);
521                         COUNT(td, -1);
522                 } else {
523                         panic("lockmgr: LK_RELEASE: no lock held");
524                 }
525                 if (lkp->lk_flags & LK_WAIT_NONZERO)
526                         ++dowakeup;
527                 break;
528
529         default:
530                 spin_unlock(&lkp->lk_spinlock);
531                 panic("lockmgr: unknown locktype request %d",
532                     flags & LK_TYPE_MASK);
533                 /* NOTREACHED */
534         }
535         spin_unlock(&lkp->lk_spinlock);
536         if (dowakeup)
537                 wakeup(lkp);
538         return (error);
539 }
540
541 void
542 lockmgr_kernproc(struct lock *lp)
543 {
544         struct thread *td __debugvar = curthread;
545
546         if (lp->lk_lockholder != LK_KERNTHREAD) {
547                 KASSERT(lp->lk_lockholder == td, 
548                     ("lockmgr_kernproc: lock not owned by curthread %p", td));
549                 COUNT(td, -1);
550                 lp->lk_lockholder = LK_KERNTHREAD;
551         }
552 }
553
554 #if 0
555 /*
556  * Set the lock to be exclusively held.  The caller is holding the lock's
557  * spinlock and the spinlock remains held on return.  A panic will occur
558  * if the lock cannot be set to exclusive.
559  *
560  * XXX not only unused but these functions also break EXCLUPGRADE's
561  * atomicy.
562  */
563 void
564 lockmgr_setexclusive_interlocked(struct lock *lkp)
565 {
566         thread_t td = curthread;
567
568         KKASSERT((lkp->lk_flags & (LK_HAVE_EXCL|LK_SHARE_NONZERO)) == 0);
569         KKASSERT(lkp->lk_exclusivecount == 0);
570         lkp->lk_flags |= LK_HAVE_EXCL;
571         lkp->lk_lockholder = td;
572         lkp->lk_exclusivecount = 1;
573         COUNT(td, 1);
574 }
575
576 /*
577  * Clear the caller's exclusive lock.  The caller is holding the lock's
578  * spinlock.  THIS FUNCTION WILL UNLOCK THE SPINLOCK.
579  *
580  * A panic will occur if the caller does not hold the lock.
581  */
582 void
583 lockmgr_clrexclusive_interlocked(struct lock *lkp)
584 {
585         thread_t td __debugvar = curthread;
586         int dowakeup = 0;
587
588         KKASSERT((lkp->lk_flags & LK_HAVE_EXCL) && lkp->lk_exclusivecount == 1
589                  && lkp->lk_lockholder == td);
590         lkp->lk_lockholder = LK_NOTHREAD;
591         lkp->lk_flags &= ~LK_HAVE_EXCL;
592         lkp->lk_exclusivecount = 0;
593         if (lkp->lk_flags & LK_WAIT_NONZERO)
594                 dowakeup = 1;
595         COUNT(td, -1);
596         spin_unlock(&lkp->lk_spinlock);
597         if (dowakeup)
598                 wakeup((void *)lkp);
599 }
600
601 #endif
602
603 /*
604  * Initialize a lock; required before use.
605  */
606 void
607 lockinit(struct lock *lkp, const char *wmesg, int timo, int flags)
608 {
609         spin_init(&lkp->lk_spinlock);
610         lkp->lk_flags = (flags & LK_EXTFLG_MASK);
611         lkp->lk_sharecount = 0;
612         lkp->lk_waitcount = 0;
613         lkp->lk_exclusivecount = 0;
614         lkp->lk_wmesg = wmesg;
615         lkp->lk_timo = timo;
616         lkp->lk_lockholder = LK_NOTHREAD;
617 }
618
619 /*
620  * Reinitialize a lock that is being reused for a different purpose, but
621  * which may have pending (blocked) threads sitting on it.  The caller
622  * must already hold the interlock.
623  */
624 void
625 lockreinit(struct lock *lkp, const char *wmesg, int timo, int flags)
626 {
627         spin_lock(&lkp->lk_spinlock);
628         lkp->lk_flags = (lkp->lk_flags & ~LK_EXTFLG_MASK) |
629                         (flags & LK_EXTFLG_MASK);
630         lkp->lk_wmesg = wmesg;
631         lkp->lk_timo = timo;
632         spin_unlock(&lkp->lk_spinlock);
633 }
634
635 /*
636  * Requires that the caller is the exclusive owner of this lock.
637  */
638 void
639 lockuninit(struct lock *l)
640 {
641         /*
642          * At this point we should have removed all the references to this lock
643          * so there can't be anyone waiting on it.
644          */
645         KKASSERT(l->lk_waitcount == 0);
646
647         spin_uninit(&l->lk_spinlock);
648 }
649
650 /*
651  * Determine the status of a lock.
652  */
653 int
654 lockstatus(struct lock *lkp, struct thread *td)
655 {
656         int lock_type = 0;
657
658         spin_lock(&lkp->lk_spinlock);
659         if (lkp->lk_exclusivecount != 0) {
660                 if (td == NULL || lkp->lk_lockholder == td)
661                         lock_type = LK_EXCLUSIVE;
662                 else
663                         lock_type = LK_EXCLOTHER;
664         } else if (lkp->lk_sharecount != 0) {
665                 lock_type = LK_SHARED;
666         }
667         spin_unlock(&lkp->lk_spinlock);
668         return (lock_type);
669 }
670
671 /*
672  * Return non-zero if the caller owns the lock shared or exclusive.
673  * We can only guess re: shared locks.
674  */
675 int
676 lockowned(struct lock *lkp)
677 {
678         thread_t td = curthread;
679
680         if (lkp->lk_exclusivecount)
681                 return(lkp->lk_lockholder == td);
682         return(lkp->lk_sharecount != 0);
683 }
684
685 /*
686  * Determine the number of holders of a lock.
687  *
688  * The non-blocking version can usually be used for assertions.
689  */
690 int
691 lockcount(struct lock *lkp)
692 {
693         int count;
694
695         spin_lock(&lkp->lk_spinlock);
696         count = lkp->lk_exclusivecount + lkp->lk_sharecount;
697         spin_unlock(&lkp->lk_spinlock);
698         return (count);
699 }
700
701 int
702 lockcountnb(struct lock *lkp)
703 {
704         return (lkp->lk_exclusivecount + lkp->lk_sharecount);
705 }
706
707 /*
708  * Print out information about state of a lock. Used by VOP_PRINT
709  * routines to display status about contained locks.
710  */
711 void
712 lockmgr_printinfo(struct lock *lkp)
713 {
714         struct thread *td = lkp->lk_lockholder;
715         struct proc *p;
716
717         if (td && td != LK_KERNTHREAD && td != LK_NOTHREAD)
718                 p = td->td_proc;
719         else
720                 p = NULL;
721
722         if (lkp->lk_sharecount)
723                 kprintf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
724                     lkp->lk_sharecount);
725         else if (lkp->lk_flags & LK_HAVE_EXCL)
726                 kprintf(" lock type %s: EXCL (count %d) by td %p pid %d",
727                     lkp->lk_wmesg, lkp->lk_exclusivecount, td,
728                     p ? p->p_pid : -99);
729         if (lkp->lk_waitcount > 0)
730                 kprintf(" with %d pending", lkp->lk_waitcount);
731 }
732
733 void
734 lock_sysinit(struct lock_args *arg)
735 {
736         lockinit(arg->la_lock, arg->la_desc, 0, arg->la_flags);
737 }