Merge branch 'vendor/LDNS'
[dragonfly.git] / sys / kern / kern_lock.c
1 /* 
2  * Copyright (c) 1995
3  *      The Regents of the University of California.  All rights reserved.
4  *
5  * Copyright (C) 1997
6  *      John S. Dyson.  All rights reserved.
7  *
8  * This code contains ideas from software contributed to Berkeley by
9  * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
10  * System project at Carnegie-Mellon University.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *      @(#)kern_lock.c 8.18 (Berkeley) 5/21/95
37  * $FreeBSD: src/sys/kern/kern_lock.c,v 1.31.2.3 2001/12/25 01:44:44 dillon Exp $
38  * $DragonFly: src/sys/kern/kern_lock.c,v 1.27 2008/01/09 10:59:12 corecode Exp $
39  */
40
41 #include "opt_lint.h"
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/proc.h>
47 #include <sys/lock.h>
48 #include <sys/sysctl.h>
49 #include <sys/spinlock.h>
50 #include <sys/thread2.h>
51 #include <sys/spinlock2.h>
52
53 /*
54  * Locking primitives implementation.
55  * Locks provide shared/exclusive sychronization.
56  */
57
58 #ifdef DEBUG_LOCKS
59 #define COUNT(td, x) (td)->td_locks += (x)
60 #else
61 #define COUNT(td, x)
62 #endif
63
64 #define LOCK_WAIT_TIME 100
65 #define LOCK_SAMPLE_WAIT 7
66
67 #if defined(DIAGNOSTIC)
68 #define LOCK_INLINE
69 #else
70 #define LOCK_INLINE __inline
71 #endif
72
73 #define LK_ALL (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | \
74                 LK_SHARE_NONZERO | LK_WAIT_NONZERO)
75
76 static int acquire(struct lock *lkp, int extflags, int wanted);
77
78 static LOCK_INLINE void
79 sharelock(struct lock *lkp, int incr)
80 {
81         lkp->lk_flags |= LK_SHARE_NONZERO;
82         lkp->lk_sharecount += incr;
83 }
84
85 static LOCK_INLINE int
86 shareunlock(struct lock *lkp, int decr) 
87 {
88         int dowakeup = 0;
89
90         KASSERT(lkp->lk_sharecount >= decr, ("shareunlock: count < decr"));
91
92         if (lkp->lk_sharecount == decr) {
93                 lkp->lk_flags &= ~LK_SHARE_NONZERO;
94                 if (lkp->lk_flags & (LK_WANT_UPGRADE | LK_WANT_EXCL)) {
95                         dowakeup = 1;
96                 }
97                 lkp->lk_sharecount = 0;
98         } else {
99                 lkp->lk_sharecount -= decr;
100         }
101         return(dowakeup);
102 }
103
104 /*
105  * lock acquisition helper routine.  Called with the lock's spinlock held.
106  */
107 static int
108 acquire(struct lock *lkp, int extflags, int wanted) 
109 {
110         int error;
111
112         if ((extflags & LK_NOWAIT) && (lkp->lk_flags & wanted)) {
113                 return EBUSY;
114         }
115
116         while ((lkp->lk_flags & wanted) != 0) {
117                 lkp->lk_flags |= LK_WAIT_NONZERO;
118                 lkp->lk_waitcount++;
119
120                 /*
121                  * Atomic spinlock release/sleep/reacquire.
122                  */
123                 error = ssleep(lkp, &lkp->lk_spinlock,
124                                ((extflags & LK_PCATCH) ? PCATCH : 0),
125                                lkp->lk_wmesg, 
126                                ((extflags & LK_TIMELOCK) ? lkp->lk_timo : 0));
127                 if (lkp->lk_waitcount == 1) {
128                         lkp->lk_flags &= ~LK_WAIT_NONZERO;
129                         lkp->lk_waitcount = 0;
130                 } else {
131                         lkp->lk_waitcount--;
132                 }
133                 if (error)
134                         return error;
135                 if (extflags & LK_SLEEPFAIL)
136                         return ENOLCK;
137         }
138         return 0;
139 }
140
141 /*
142  * Set, change, or release a lock.
143  *
144  * Shared requests increment the shared count. Exclusive requests set the
145  * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
146  * accepted shared locks and shared-to-exclusive upgrades to go away.
147  *
148  * A spinlock is held for most of the procedure.  We must not do anything
149  * fancy while holding the spinlock.
150  */
151 int
152 #ifndef DEBUG_LOCKS
153 lockmgr(struct lock *lkp, u_int flags)
154 #else
155 debuglockmgr(struct lock *lkp, u_int flags,
156              const char *name, const char *file, int line)
157 #endif
158 {
159         thread_t td;
160         int error;
161         int extflags;
162         int dowakeup;
163 #ifdef DEBUG_LOCKS
164         int i;
165 #endif
166
167         error = 0;
168         dowakeup = 0;
169
170         if (mycpu->gd_intr_nesting_level &&
171             (flags & LK_NOWAIT) == 0 &&
172             (flags & LK_TYPE_MASK) != LK_RELEASE &&
173             panic_cpu_gd != mycpu
174         ) {
175
176 #ifndef DEBUG_LOCKS
177                 panic("lockmgr %s from %p: called from interrupt, ipi, "
178                       "or hard code section",
179                       lkp->lk_wmesg, ((int **)&lkp)[-1]);
180 #else
181                 panic("lockmgr %s from %s:%d: called from interrupt, ipi, "
182                       "or hard code section",
183                       lkp->lk_wmesg, file, line);
184 #endif
185         }
186
187 #ifdef DEBUG_LOCKS
188         if (mycpu->gd_spinlocks && ((flags & LK_NOWAIT) == 0)) {
189                 panic("lockmgr %s from %s:%d: called with %d spinlocks held",
190                       lkp->lk_wmesg, file, line, mycpu->gd_spinlocks);
191         }
192 #endif
193
194         spin_lock(&lkp->lk_spinlock);
195
196         extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
197         td = curthread;
198
199         switch (flags & LK_TYPE_MASK) {
200         case LK_SHARED:
201                 /*
202                  * If we are not the exclusive lock holder, we have to block
203                  * while there is an exclusive lock holder or while an
204                  * exclusive lock request or upgrade request is in progress.
205                  *
206                  * However, if TDF_DEADLKTREAT is set, we override exclusive
207                  * lock requests or upgrade requests ( but not the exclusive
208                  * lock itself ).
209                  */
210                 if (lkp->lk_lockholder != td) {
211                         if (td->td_flags & TDF_DEADLKTREAT) {
212                                 error = acquire(
213                                             lkp,
214                                             extflags,
215                                             LK_HAVE_EXCL
216                                         );
217                         } else {
218                                 error = acquire(
219                                             lkp, 
220                                             extflags,
221                                             LK_HAVE_EXCL | LK_WANT_EXCL | 
222                                              LK_WANT_UPGRADE
223                                         );
224                         }
225                         if (error)
226                                 break;
227                         sharelock(lkp, 1);
228                         COUNT(td, 1);
229                         break;
230                 }
231
232                 /*
233                  * If we already hold an exclusive lock we bump the
234                  * exclusive count instead of downgrading to a shared
235                  * lock.
236                  *
237                  * WARNING!  The old FreeBSD behavior was to downgrade,
238                  *           but this creates a problem when recursions
239                  *           return to the caller and the caller expects
240                  *           its original exclusive lock to remain exclusively
241                  *           locked.
242                  */
243                 if (extflags & LK_CANRECURSE) {
244                         lkp->lk_exclusivecount++;
245                         COUNT(td, 1);
246                         break;
247                 }
248                 if (extflags & LK_NOWAIT) {
249                         error = EBUSY;
250                         break;
251                 }
252                 spin_unlock(&lkp->lk_spinlock);
253                 panic("lockmgr: locking against myself");
254 #if 0
255                 /*
256                  * old code queued a shared lock request fell into
257                  * a downgrade.
258                  */
259                 sharelock(lkp, 1);
260                 COUNT(td, 1);
261                 /* fall into downgrade */
262 #endif
263
264         case LK_DOWNGRADE:
265                 if (lkp->lk_lockholder != td || lkp->lk_exclusivecount == 0) {
266                         spin_unlock(&lkp->lk_spinlock);
267                         panic("lockmgr: not holding exclusive lock");
268                 }
269
270 #ifdef DEBUG_LOCKS
271                 for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
272                         if (td->td_lockmgr_stack[i] == lkp &&
273                             td->td_lockmgr_stack_id[i] > 0
274                         ) {
275                                 td->td_lockmgr_stack_id[i]--;
276                                 break;
277                         }
278                 }
279 #endif
280
281                 sharelock(lkp, lkp->lk_exclusivecount);
282                 lkp->lk_exclusivecount = 0;
283                 lkp->lk_flags &= ~LK_HAVE_EXCL;
284                 lkp->lk_lockholder = LK_NOTHREAD;
285                 if (lkp->lk_waitcount)
286                         dowakeup = 1;
287                 break;
288
289         case LK_EXCLUPGRADE:
290                 /*
291                  * If another process is ahead of us to get an upgrade,
292                  * then we want to fail rather than have an intervening
293                  * exclusive access.
294                  */
295                 if (lkp->lk_flags & LK_WANT_UPGRADE) {
296                         dowakeup = shareunlock(lkp, 1);
297                         COUNT(td, -1);
298                         error = EBUSY;
299                         break;
300                 }
301                 /* fall into normal upgrade */
302
303         case LK_UPGRADE:
304                 /*
305                  * Upgrade a shared lock to an exclusive one. If another
306                  * shared lock has already requested an upgrade to an
307                  * exclusive lock, our shared lock is released and an
308                  * exclusive lock is requested (which will be granted
309                  * after the upgrade). If we return an error, the file
310                  * will always be unlocked.
311                  */
312                 if ((lkp->lk_lockholder == td) || (lkp->lk_sharecount <= 0)) {
313                         spin_unlock(&lkp->lk_spinlock);
314                         panic("lockmgr: upgrade exclusive lock");
315                 }
316                 dowakeup += shareunlock(lkp, 1);
317                 COUNT(td, -1);
318                 /*
319                  * If we are just polling, check to see if we will block.
320                  */
321                 if ((extflags & LK_NOWAIT) &&
322                     ((lkp->lk_flags & LK_WANT_UPGRADE) ||
323                      lkp->lk_sharecount > 1)) {
324                         error = EBUSY;
325                         break;
326                 }
327                 if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
328                         /*
329                          * We are first shared lock to request an upgrade, so
330                          * request upgrade and wait for the shared count to
331                          * drop to zero, then take exclusive lock.
332                          *
333                          * Although I don't think this can occur for
334                          * robustness we also wait for any exclusive locks
335                          * to be released.  LK_WANT_UPGRADE is supposed to
336                          * prevent new exclusive locks but might not in the
337                          * future.
338                          */
339                         lkp->lk_flags |= LK_WANT_UPGRADE;
340                         error = acquire(lkp, extflags,
341                                         LK_HAVE_EXCL | LK_SHARE_NONZERO);
342                         lkp->lk_flags &= ~LK_WANT_UPGRADE;
343
344                         if (error)
345                                 break;
346                         lkp->lk_flags |= LK_HAVE_EXCL;
347                         lkp->lk_lockholder = td;
348                         if (lkp->lk_exclusivecount != 0) {
349                                 spin_unlock(&lkp->lk_spinlock);
350                                 panic("lockmgr(1): non-zero exclusive count");
351                         }
352                         lkp->lk_exclusivecount = 1;
353 #if defined(DEBUG_LOCKS)
354                         lkp->lk_filename = file;
355                         lkp->lk_lineno = line;
356                         lkp->lk_lockername = name;
357
358                         for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
359                                 /*
360                                  * Recursive lockmgr path
361                                  */
362                                 if (td->td_lockmgr_stack[i] == lkp &&
363                                     td->td_lockmgr_stack_id[i] != 0
364                                 ) {
365                                         td->td_lockmgr_stack_id[i]++;
366                                         goto lkmatch2;
367                                 }
368                        }
369
370                         for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
371                                 /*
372                                  * Use new lockmgr tracking slot
373                                  */
374                                 if (td->td_lockmgr_stack_id[i] == 0) {
375                                         td->td_lockmgr_stack_id[i]++;
376                                         td->td_lockmgr_stack[i] = lkp;
377                                         break;
378                                 }
379                         }
380 lkmatch2:
381                         ;
382 #endif
383                         COUNT(td, 1);
384                         break;
385                 }
386                 /*
387                  * Someone else has requested upgrade. Release our shared
388                  * lock, awaken upgrade requestor if we are the last shared
389                  * lock, then request an exclusive lock.
390                  */
391                 if ((lkp->lk_flags & (LK_SHARE_NONZERO|LK_WAIT_NONZERO)) ==
392                     LK_WAIT_NONZERO) {
393                         ++dowakeup;
394                 }
395                 /* fall into exclusive request */
396
397         case LK_EXCLUSIVE:
398                 if (lkp->lk_lockholder == td && td != LK_KERNTHREAD) {
399                         /*
400                          *      Recursive lock.
401                          */
402                         if ((extflags & (LK_NOWAIT | LK_CANRECURSE)) == 0) {
403                                 spin_unlock(&lkp->lk_spinlock);
404                                 panic("lockmgr: locking against myself");
405                         }
406                         if ((extflags & LK_CANRECURSE) != 0) {
407                                 lkp->lk_exclusivecount++;
408                                 COUNT(td, 1);
409                                 break;
410                         }
411                 }
412                 /*
413                  * If we are just polling, check to see if we will sleep.
414                  */
415                 if ((extflags & LK_NOWAIT) &&
416                     (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL |
417                                       LK_WANT_UPGRADE | LK_SHARE_NONZERO))) {
418                         error = EBUSY;
419                         break;
420                 }
421                 /*
422                  * Wait for exclusive lock holders to release and try to
423                  * acquire the want_exclusive flag.
424                  */
425                 error = acquire(lkp, extflags, (LK_HAVE_EXCL | LK_WANT_EXCL));
426                 if (error)
427                         break;
428                 lkp->lk_flags |= LK_WANT_EXCL;
429
430                 /*
431                  * Wait for shared locks and upgrades to finish.  We can lose
432                  * the race against a successful shared lock upgrade in which
433                  * case LK_HAVE_EXCL will get set regardless of our
434                  * acquisition of LK_WANT_EXCL, so we have to acquire
435                  * LK_HAVE_EXCL here as well.
436                  */
437                 error = acquire(lkp, extflags, LK_HAVE_EXCL |
438                                                LK_WANT_UPGRADE |
439                                                LK_SHARE_NONZERO);
440                 lkp->lk_flags &= ~LK_WANT_EXCL;
441                 if (error)
442                         break;
443                 lkp->lk_flags |= LK_HAVE_EXCL;
444                 lkp->lk_lockholder = td;
445                 if (lkp->lk_exclusivecount != 0) {
446                         spin_unlock(&lkp->lk_spinlock);
447                         panic("lockmgr(2): non-zero exclusive count");
448                 }
449                 lkp->lk_exclusivecount = 1;
450 #if defined(DEBUG_LOCKS)
451                 lkp->lk_filename = file;
452                 lkp->lk_lineno = line;
453                 lkp->lk_lockername = name;
454
455                 for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
456                         /*
457                          * Recursive lockmgr path
458                          */
459                         if (td->td_lockmgr_stack[i] == lkp &&
460                             td->td_lockmgr_stack_id[i] != 0
461                         ) {
462                                 td->td_lockmgr_stack_id[i]++;
463                                 goto lkmatch1;
464                         }
465                 }
466
467                 for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
468                         /*
469                          * Use new lockmgr tracking slot
470                          */
471                         if (td->td_lockmgr_stack_id[i] == 0) {
472                                 td->td_lockmgr_stack_id[i]++;
473                                 td->td_lockmgr_stack[i] = lkp;
474                                 break;
475                         }
476                 }
477 lkmatch1:
478                 ;
479 #endif
480                 COUNT(td, 1);
481                 break;
482
483         case LK_RELEASE:
484                 if (lkp->lk_exclusivecount != 0) {
485                         if (lkp->lk_lockholder != td &&
486                             lkp->lk_lockholder != LK_KERNTHREAD) {
487                                 spin_unlock(&lkp->lk_spinlock);
488                                 panic("lockmgr: pid %d, not %s thr %p/%p unlocking",
489                                     (td->td_proc ? td->td_proc->p_pid : -1),
490                                     "exclusive lock holder",
491                                     td, lkp->lk_lockholder);
492                         }
493                         if (lkp->lk_lockholder != LK_KERNTHREAD) {
494                                 COUNT(td, -1);
495                         }
496                         if (lkp->lk_exclusivecount == 1) {
497                                 lkp->lk_flags &= ~LK_HAVE_EXCL;
498                                 lkp->lk_lockholder = LK_NOTHREAD;
499                                 lkp->lk_exclusivecount = 0;
500                         } else {
501                                 lkp->lk_exclusivecount--;
502                         }
503 #ifdef DEBUG_LOCKS
504                         for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
505                                 if (td->td_lockmgr_stack[i] == lkp &&
506                                     td->td_lockmgr_stack_id[i] > 0
507                                 ) {
508                                         td->td_lockmgr_stack_id[i]--;
509                                         lkp->lk_filename = file;
510                                         lkp->lk_lineno = line;
511                                         break;
512                                 }
513                         }
514 #endif
515                 } else if (lkp->lk_flags & LK_SHARE_NONZERO) {
516                         dowakeup += shareunlock(lkp, 1);
517                         COUNT(td, -1);
518                 } else {
519                         panic("lockmgr: LK_RELEASE: no lock held");
520                 }
521                 if (lkp->lk_flags & LK_WAIT_NONZERO)
522                         ++dowakeup;
523                 break;
524
525         default:
526                 spin_unlock(&lkp->lk_spinlock);
527                 panic("lockmgr: unknown locktype request %d",
528                     flags & LK_TYPE_MASK);
529                 /* NOTREACHED */
530         }
531         spin_unlock(&lkp->lk_spinlock);
532         if (dowakeup)
533                 wakeup(lkp);
534         return (error);
535 }
536
537 void
538 lockmgr_kernproc(struct lock *lp)
539 {
540         struct thread *td __debugvar = curthread;
541
542         if (lp->lk_lockholder != LK_KERNTHREAD) {
543                 KASSERT(lp->lk_lockholder == td, 
544                     ("lockmgr_kernproc: lock not owned by curthread %p", td));
545                 COUNT(td, -1);
546                 lp->lk_lockholder = LK_KERNTHREAD;
547         }
548 }
549
550 #if 0
551 /*
552  * Set the lock to be exclusively held.  The caller is holding the lock's
553  * spinlock and the spinlock remains held on return.  A panic will occur
554  * if the lock cannot be set to exclusive.
555  *
556  * XXX not only unused but these functions also break EXCLUPGRADE's
557  * atomicy.
558  */
559 void
560 lockmgr_setexclusive_interlocked(struct lock *lkp)
561 {
562         thread_t td = curthread;
563
564         KKASSERT((lkp->lk_flags & (LK_HAVE_EXCL|LK_SHARE_NONZERO)) == 0);
565         KKASSERT(lkp->lk_exclusivecount == 0);
566         lkp->lk_flags |= LK_HAVE_EXCL;
567         lkp->lk_lockholder = td;
568         lkp->lk_exclusivecount = 1;
569         COUNT(td, 1);
570 }
571
572 /*
573  * Clear the caller's exclusive lock.  The caller is holding the lock's
574  * spinlock.  THIS FUNCTION WILL UNLOCK THE SPINLOCK.
575  *
576  * A panic will occur if the caller does not hold the lock.
577  */
578 void
579 lockmgr_clrexclusive_interlocked(struct lock *lkp)
580 {
581         thread_t td __debugvar = curthread;
582         int dowakeup = 0;
583
584         KKASSERT((lkp->lk_flags & LK_HAVE_EXCL) && lkp->lk_exclusivecount == 1
585                  && lkp->lk_lockholder == td);
586         lkp->lk_lockholder = LK_NOTHREAD;
587         lkp->lk_flags &= ~LK_HAVE_EXCL;
588         lkp->lk_exclusivecount = 0;
589         if (lkp->lk_flags & LK_WAIT_NONZERO)
590                 dowakeup = 1;
591         COUNT(td, -1);
592         spin_unlock(&lkp->lk_spinlock);
593         if (dowakeup)
594                 wakeup((void *)lkp);
595 }
596
597 #endif
598
599 /*
600  * Initialize a lock; required before use.
601  */
602 void
603 lockinit(struct lock *lkp, const char *wmesg, int timo, int flags)
604 {
605         spin_init(&lkp->lk_spinlock);
606         lkp->lk_flags = (flags & LK_EXTFLG_MASK);
607         lkp->lk_sharecount = 0;
608         lkp->lk_waitcount = 0;
609         lkp->lk_exclusivecount = 0;
610         lkp->lk_wmesg = wmesg;
611         lkp->lk_timo = timo;
612         lkp->lk_lockholder = LK_NOTHREAD;
613 }
614
615 /*
616  * Reinitialize a lock that is being reused for a different purpose, but
617  * which may have pending (blocked) threads sitting on it.  The caller
618  * must already hold the interlock.
619  */
620 void
621 lockreinit(struct lock *lkp, const char *wmesg, int timo, int flags)
622 {
623         spin_lock(&lkp->lk_spinlock);
624         lkp->lk_flags = (lkp->lk_flags & ~LK_EXTFLG_MASK) |
625                         (flags & LK_EXTFLG_MASK);
626         lkp->lk_wmesg = wmesg;
627         lkp->lk_timo = timo;
628         spin_unlock(&lkp->lk_spinlock);
629 }
630
631 /*
632  * Requires that the caller is the exclusive owner of this lock.
633  */
634 void
635 lockuninit(struct lock *l)
636 {
637         /*
638          * At this point we should have removed all the references to this lock
639          * so there can't be anyone waiting on it.
640          */
641         KKASSERT(l->lk_waitcount == 0);
642
643         spin_uninit(&l->lk_spinlock);
644 }
645
646 /*
647  * Determine the status of a lock.
648  */
649 int
650 lockstatus(struct lock *lkp, struct thread *td)
651 {
652         int lock_type = 0;
653
654         spin_lock(&lkp->lk_spinlock);
655         if (lkp->lk_exclusivecount != 0) {
656                 if (td == NULL || lkp->lk_lockholder == td)
657                         lock_type = LK_EXCLUSIVE;
658                 else
659                         lock_type = LK_EXCLOTHER;
660         } else if (lkp->lk_sharecount != 0) {
661                 lock_type = LK_SHARED;
662         }
663         spin_unlock(&lkp->lk_spinlock);
664         return (lock_type);
665 }
666
667 /*
668  * Return non-zero if the caller owns the lock shared or exclusive.
669  * We can only guess re: shared locks.
670  */
671 int
672 lockowned(struct lock *lkp)
673 {
674         thread_t td = curthread;
675
676         if (lkp->lk_exclusivecount)
677                 return(lkp->lk_lockholder == td);
678         return(lkp->lk_sharecount != 0);
679 }
680
681 /*
682  * Determine the number of holders of a lock.
683  *
684  * The non-blocking version can usually be used for assertions.
685  */
686 int
687 lockcount(struct lock *lkp)
688 {
689         int count;
690
691         spin_lock(&lkp->lk_spinlock);
692         count = lkp->lk_exclusivecount + lkp->lk_sharecount;
693         spin_unlock(&lkp->lk_spinlock);
694         return (count);
695 }
696
697 int
698 lockcountnb(struct lock *lkp)
699 {
700         return (lkp->lk_exclusivecount + lkp->lk_sharecount);
701 }
702
703 /*
704  * Print out information about state of a lock. Used by VOP_PRINT
705  * routines to display status about contained locks.
706  */
707 void
708 lockmgr_printinfo(struct lock *lkp)
709 {
710         struct thread *td = lkp->lk_lockholder;
711         struct proc *p;
712
713         if (td && td != LK_KERNTHREAD && td != LK_NOTHREAD)
714                 p = td->td_proc;
715         else
716                 p = NULL;
717
718         if (lkp->lk_sharecount)
719                 kprintf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
720                     lkp->lk_sharecount);
721         else if (lkp->lk_flags & LK_HAVE_EXCL)
722                 kprintf(" lock type %s: EXCL (count %d) by td %p pid %d",
723                     lkp->lk_wmesg, lkp->lk_exclusivecount, td,
724                     p ? p->p_pid : -99);
725         if (lkp->lk_waitcount > 0)
726                 kprintf(" with %d pending", lkp->lk_waitcount);
727 }
728
729 void
730 lock_sysinit(struct lock_args *arg)
731 {
732         lockinit(arg->la_lock, arg->la_desc, 0, arg->la_flags);
733 }