kernel - shmid_ds structure needs to change on 64-bit :-(
[dragonfly.git] / sys / kern / kern_lock.c
1 /* 
2  * Copyright (c) 1995
3  *      The Regents of the University of California.  All rights reserved.
4  *
5  * Copyright (C) 1997
6  *      John S. Dyson.  All rights reserved.
7  *
8  * This code contains ideas from software contributed to Berkeley by
9  * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
10  * System project at Carnegie-Mellon University.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *      This product includes software developed by the University of
23  *      California, Berkeley and its contributors.
24  * 4. Neither the name of the University nor the names of its contributors
25  *    may be used to endorse or promote products derived from this software
26  *    without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  *
40  *      @(#)kern_lock.c 8.18 (Berkeley) 5/21/95
41  * $FreeBSD: src/sys/kern/kern_lock.c,v 1.31.2.3 2001/12/25 01:44:44 dillon Exp $
42  * $DragonFly: src/sys/kern/kern_lock.c,v 1.27 2008/01/09 10:59:12 corecode Exp $
43  */
44
45 #include "opt_lint.h"
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/kernel.h>
50 #include <sys/proc.h>
51 #include <sys/lock.h>
52 #include <sys/sysctl.h>
53 #include <sys/spinlock.h>
54 #include <sys/thread2.h>
55 #include <sys/spinlock2.h>
56
57 /*
58  * Locking primitives implementation.
59  * Locks provide shared/exclusive sychronization.
60  */
61
62 #ifdef DEBUG_LOCKS
63 #define COUNT(td, x) (td)->td_locks += (x)
64 #else
65 #define COUNT(td, x)
66 #endif
67
68 #define LOCK_WAIT_TIME 100
69 #define LOCK_SAMPLE_WAIT 7
70
71 #if defined(DIAGNOSTIC)
72 #define LOCK_INLINE
73 #else
74 #define LOCK_INLINE __inline
75 #endif
76
77 #define LK_ALL (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | \
78         LK_SHARE_NONZERO | LK_WAIT_NONZERO)
79
80 static int acquire(struct lock *lkp, int extflags, int wanted);
81
82 static LOCK_INLINE void
83 sharelock(struct lock *lkp, int incr) {
84         lkp->lk_flags |= LK_SHARE_NONZERO;
85         lkp->lk_sharecount += incr;
86 }
87
88 static LOCK_INLINE int
89 shareunlock(struct lock *lkp, int decr) 
90 {
91         int dowakeup = 0;
92
93         KASSERT(lkp->lk_sharecount >= decr, ("shareunlock: count < decr"));
94
95         if (lkp->lk_sharecount == decr) {
96                 lkp->lk_flags &= ~LK_SHARE_NONZERO;
97                 if (lkp->lk_flags & (LK_WANT_UPGRADE | LK_WANT_EXCL)) {
98                         dowakeup = 1;
99                 }
100                 lkp->lk_sharecount = 0;
101         } else {
102                 lkp->lk_sharecount -= decr;
103         }
104         return(dowakeup);
105 }
106
107 /*
108  * lock acquisition helper routine.  Called with the lock's spinlock held.
109  */
110 static int
111 acquire(struct lock *lkp, int extflags, int wanted) 
112 {
113         int error;
114
115         if ((extflags & LK_NOWAIT) && (lkp->lk_flags & wanted)) {
116                 return EBUSY;
117         }
118
119         while ((lkp->lk_flags & wanted) != 0) {
120                 lkp->lk_flags |= LK_WAIT_NONZERO;
121                 lkp->lk_waitcount++;
122
123                 /*
124                  * Atomic spinlock release/sleep/reacquire.
125                  */
126                 error = ssleep(lkp, &lkp->lk_spinlock,
127                                ((extflags & LK_PCATCH) ? PCATCH : 0),
128                                lkp->lk_wmesg, 
129                                ((extflags & LK_TIMELOCK) ? lkp->lk_timo : 0));
130                 if (lkp->lk_waitcount == 1) {
131                         lkp->lk_flags &= ~LK_WAIT_NONZERO;
132                         lkp->lk_waitcount = 0;
133                 } else {
134                         lkp->lk_waitcount--;
135                 }
136                 if (error)
137                         return error;
138                 if (extflags & LK_SLEEPFAIL)
139                         return ENOLCK;
140         }
141         return 0;
142 }
143
144 /*
145  * Set, change, or release a lock.
146  *
147  * Shared requests increment the shared count. Exclusive requests set the
148  * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
149  * accepted shared locks and shared-to-exclusive upgrades to go away.
150  *
151  * A spinlock is held for most of the procedure.  We must not do anything
152  * fancy while holding the spinlock.
153  */
154 int
155 #ifndef DEBUG_LOCKS
156 lockmgr(struct lock *lkp, u_int flags)
157 #else
158 debuglockmgr(struct lock *lkp, u_int flags,
159              const char *name, const char *file, int line)
160 #endif
161 {
162         thread_t td;
163         int error;
164         int extflags;
165         int dowakeup;
166 #ifdef DEBUG_LOCKS
167         int i;
168 #endif
169
170         error = 0;
171         dowakeup = 0;
172
173         if (mycpu->gd_intr_nesting_level &&
174             (flags & LK_NOWAIT) == 0 &&
175             (flags & LK_TYPE_MASK) != LK_RELEASE &&
176             panic_cpu_gd != mycpu
177         ) {
178
179 #ifndef DEBUG_LOCKS
180                 panic("lockmgr %s from %p: called from interrupt, ipi, "
181                       "or hard code section",
182                       lkp->lk_wmesg, ((int **)&lkp)[-1]);
183 #else
184                 panic("lockmgr %s from %s:%d: called from interrupt, ipi, "
185                       "or hard code section",
186                       lkp->lk_wmesg, file, line);
187 #endif
188         }
189
190 #ifdef DEBUG_LOCKS
191         if (mycpu->gd_spinlocks_wr &&
192             ((flags & LK_NOWAIT) == 0) 
193         ) {
194                 panic("lockmgr %s from %s:%d: called with %d spinlocks held",
195                       lkp->lk_wmesg, file, line, mycpu->gd_spinlocks_wr);
196         }
197 #endif
198
199         spin_lock(&lkp->lk_spinlock);
200
201         extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
202         td = curthread;
203
204         switch (flags & LK_TYPE_MASK) {
205         case LK_SHARED:
206                 /*
207                  * If we are not the exclusive lock holder, we have to block
208                  * while there is an exclusive lock holder or while an
209                  * exclusive lock request or upgrade request is in progress.
210                  *
211                  * However, if TDF_DEADLKTREAT is set, we override exclusive
212                  * lock requests or upgrade requests ( but not the exclusive
213                  * lock itself ).
214                  */
215                 if (lkp->lk_lockholder != td) {
216                         if (td->td_flags & TDF_DEADLKTREAT) {
217                                 error = acquire(
218                                             lkp,
219                                             extflags,
220                                             LK_HAVE_EXCL
221                                         );
222                         } else {
223                                 error = acquire(
224                                             lkp, 
225                                             extflags,
226                                             LK_HAVE_EXCL | LK_WANT_EXCL | 
227                                              LK_WANT_UPGRADE
228                                         );
229                         }
230                         if (error)
231                                 break;
232                         sharelock(lkp, 1);
233                         COUNT(td, 1);
234                         break;
235                 }
236                 /*
237                  * We hold an exclusive lock, so downgrade it to shared.
238                  * An alternative would be to fail with EDEADLK.
239                  */
240                 sharelock(lkp, 1);
241                 COUNT(td, 1);
242                 /* fall into downgrade */
243
244         case LK_DOWNGRADE:
245                 if (lkp->lk_lockholder != td || lkp->lk_exclusivecount == 0) {
246                         spin_unlock(&lkp->lk_spinlock);
247                         panic("lockmgr: not holding exclusive lock");
248                 }
249
250 #ifdef DEBUG_LOCKS
251                 for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
252                         if (td->td_lockmgr_stack[i] == lkp &&
253                             td->td_lockmgr_stack_id[i] > 0
254                         ) {
255                                 td->td_lockmgr_stack_id[i]--;
256                                 break;
257                         }
258                 }
259 #endif
260
261                 sharelock(lkp, lkp->lk_exclusivecount);
262                 lkp->lk_exclusivecount = 0;
263                 lkp->lk_flags &= ~LK_HAVE_EXCL;
264                 lkp->lk_lockholder = LK_NOTHREAD;
265                 if (lkp->lk_waitcount)
266                         dowakeup = 1;
267                 break;
268
269         case LK_EXCLUPGRADE:
270                 /*
271                  * If another process is ahead of us to get an upgrade,
272                  * then we want to fail rather than have an intervening
273                  * exclusive access.
274                  */
275                 if (lkp->lk_flags & LK_WANT_UPGRADE) {
276                         dowakeup = shareunlock(lkp, 1);
277                         COUNT(td, -1);
278                         error = EBUSY;
279                         break;
280                 }
281                 /* fall into normal upgrade */
282
283         case LK_UPGRADE:
284                 /*
285                  * Upgrade a shared lock to an exclusive one. If another
286                  * shared lock has already requested an upgrade to an
287                  * exclusive lock, our shared lock is released and an
288                  * exclusive lock is requested (which will be granted
289                  * after the upgrade). If we return an error, the file
290                  * will always be unlocked.
291                  */
292                 if ((lkp->lk_lockholder == td) || (lkp->lk_sharecount <= 0)) {
293                         spin_unlock(&lkp->lk_spinlock);
294                         panic("lockmgr: upgrade exclusive lock");
295                 }
296                 dowakeup += shareunlock(lkp, 1);
297                 COUNT(td, -1);
298                 /*
299                  * If we are just polling, check to see if we will block.
300                  */
301                 if ((extflags & LK_NOWAIT) &&
302                     ((lkp->lk_flags & LK_WANT_UPGRADE) ||
303                      lkp->lk_sharecount > 1)) {
304                         error = EBUSY;
305                         break;
306                 }
307                 if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
308                         /*
309                          * We are first shared lock to request an upgrade, so
310                          * request upgrade and wait for the shared count to
311                          * drop to zero, then take exclusive lock.
312                          *
313                          * Although I don't think this can occur for
314                          * robustness we also wait for any exclusive locks
315                          * to be released.  LK_WANT_UPGRADE is supposed to
316                          * prevent new exclusive locks but might not in the
317                          * future.
318                          */
319                         lkp->lk_flags |= LK_WANT_UPGRADE;
320                         error = acquire(lkp, extflags,
321                                         LK_HAVE_EXCL | LK_SHARE_NONZERO);
322                         lkp->lk_flags &= ~LK_WANT_UPGRADE;
323
324                         if (error)
325                                 break;
326                         lkp->lk_flags |= LK_HAVE_EXCL;
327                         lkp->lk_lockholder = td;
328                         if (lkp->lk_exclusivecount != 0) {
329                                 spin_unlock(&lkp->lk_spinlock);
330                                 panic("lockmgr(1): non-zero exclusive count");
331                         }
332                         lkp->lk_exclusivecount = 1;
333 #if defined(DEBUG_LOCKS)
334                         lkp->lk_filename = file;
335                         lkp->lk_lineno = line;
336                         lkp->lk_lockername = name;
337
338                         for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
339                                 /*
340                                  * Recursive lockmgr path
341                                  */
342                                 if (td->td_lockmgr_stack[i] == lkp &&
343                                     td->td_lockmgr_stack_id[i] != 0
344                                 ) {
345                                         td->td_lockmgr_stack_id[i]++;
346                                         goto lkmatch2;
347                                 }
348                        }
349
350                         for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
351                                 /*
352                                  * Use new lockmgr tracking slot
353                                  */
354                                 if (td->td_lockmgr_stack_id[i] == 0) {
355                                         td->td_lockmgr_stack_id[i]++;
356                                         td->td_lockmgr_stack[i] = lkp;
357                                         break;
358                                 }
359                         }
360 lkmatch2:
361                         ;
362 #endif
363                         COUNT(td, 1);
364                         break;
365                 }
366                 /*
367                  * Someone else has requested upgrade. Release our shared
368                  * lock, awaken upgrade requestor if we are the last shared
369                  * lock, then request an exclusive lock.
370                  */
371                 if ( (lkp->lk_flags & (LK_SHARE_NONZERO|LK_WAIT_NONZERO)) ==
372                         LK_WAIT_NONZERO) {
373                         ++dowakeup;
374                 }
375                 /* fall into exclusive request */
376
377         case LK_EXCLUSIVE:
378                 if (lkp->lk_lockholder == td && td != LK_KERNTHREAD) {
379                         /*
380                          *      Recursive lock.
381                          */
382                         if ((extflags & (LK_NOWAIT | LK_CANRECURSE)) == 0) {
383                                 spin_unlock(&lkp->lk_spinlock);
384                                 panic("lockmgr: locking against myself");
385                         }
386                         if ((extflags & LK_CANRECURSE) != 0) {
387                                 lkp->lk_exclusivecount++;
388                                 COUNT(td, 1);
389                                 break;
390                         }
391                 }
392                 /*
393                  * If we are just polling, check to see if we will sleep.
394                  */
395                 if ((extflags & LK_NOWAIT) &&
396                     (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL |
397                                       LK_WANT_UPGRADE | LK_SHARE_NONZERO))) {
398                         error = EBUSY;
399                         break;
400                 }
401                 /*
402                  * Wait for exclusive lock holders to release and try to
403                  * acquire the want_exclusive flag.
404                  */
405                 error = acquire(lkp, extflags, (LK_HAVE_EXCL | LK_WANT_EXCL));
406                 if (error)
407                         break;
408                 lkp->lk_flags |= LK_WANT_EXCL;
409
410                 /*
411                  * Wait for shared locks and upgrades to finish.  We can lose
412                  * the race against a successful shared lock upgrade in which
413                  * case LK_HAVE_EXCL will get set regardless of our
414                  * acquisition of LK_WANT_EXCL, so we have to acquire
415                  * LK_HAVE_EXCL here as well.
416                  */
417                 error = acquire(lkp, extflags, LK_HAVE_EXCL |
418                                                LK_WANT_UPGRADE |
419                                                LK_SHARE_NONZERO);
420                 lkp->lk_flags &= ~LK_WANT_EXCL;
421                 if (error)
422                         break;
423                 lkp->lk_flags |= LK_HAVE_EXCL;
424                 lkp->lk_lockholder = td;
425                 if (lkp->lk_exclusivecount != 0) {
426                         spin_unlock(&lkp->lk_spinlock);
427                         panic("lockmgr(2): non-zero exclusive count");
428                 }
429                 lkp->lk_exclusivecount = 1;
430 #if defined(DEBUG_LOCKS)
431                 lkp->lk_filename = file;
432                 lkp->lk_lineno = line;
433                 lkp->lk_lockername = name;
434
435                 for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
436                         /*
437                          * Recursive lockmgr path
438                          */
439                         if (td->td_lockmgr_stack[i] == lkp &&
440                             td->td_lockmgr_stack_id[i] != 0
441                         ) {
442                                 td->td_lockmgr_stack_id[i]++;
443                                 goto lkmatch1;
444                         }
445                 }
446
447                 for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
448                         /*
449                          * Use new lockmgr tracking slot
450                          */
451                         if (td->td_lockmgr_stack_id[i] == 0) {
452                                 td->td_lockmgr_stack_id[i]++;
453                                 td->td_lockmgr_stack[i] = lkp;
454                                 break;
455                         }
456                 }
457 lkmatch1:
458                 ;
459 #endif
460                 COUNT(td, 1);
461                 break;
462
463         case LK_RELEASE:
464                 if (lkp->lk_exclusivecount != 0) {
465                         if (lkp->lk_lockholder != td &&
466                             lkp->lk_lockholder != LK_KERNTHREAD) {
467                                 spin_unlock(&lkp->lk_spinlock);
468                                 panic("lockmgr: pid %d, not %s thr %p/%p unlocking",
469                                     (td->td_proc ? td->td_proc->p_pid : -1),
470                                     "exclusive lock holder",
471                                     td, lkp->lk_lockholder);
472                         }
473                         if (lkp->lk_lockholder != LK_KERNTHREAD) {
474                                 COUNT(td, -1);
475                         }
476                         if (lkp->lk_exclusivecount == 1) {
477                                 lkp->lk_flags &= ~LK_HAVE_EXCL;
478                                 lkp->lk_lockholder = LK_NOTHREAD;
479                                 lkp->lk_exclusivecount = 0;
480                         } else {
481                                 lkp->lk_exclusivecount--;
482                         }
483 #ifdef DEBUG_LOCKS
484                         for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
485                                 if (td->td_lockmgr_stack[i] == lkp &&
486                                     td->td_lockmgr_stack_id[i] > 0
487                                 ) {
488                                         td->td_lockmgr_stack_id[i]--;
489                                         break;
490                                 }
491                         }
492 #endif
493                 } else if (lkp->lk_flags & LK_SHARE_NONZERO) {
494                         dowakeup += shareunlock(lkp, 1);
495                         COUNT(td, -1);
496                 }
497                 if (lkp->lk_flags & LK_WAIT_NONZERO)
498                         ++dowakeup;
499                 break;
500
501         default:
502                 spin_unlock(&lkp->lk_spinlock);
503                 panic("lockmgr: unknown locktype request %d",
504                     flags & LK_TYPE_MASK);
505                 /* NOTREACHED */
506         }
507         spin_unlock(&lkp->lk_spinlock);
508         if (dowakeup)
509                 wakeup(lkp);
510         return (error);
511 }
512
513 void
514 lockmgr_kernproc(struct lock *lp)
515 {
516         struct thread *td __debugvar = curthread;
517
518         if (lp->lk_lockholder != LK_KERNTHREAD) {
519                 KASSERT(lp->lk_lockholder == td, 
520                     ("lockmgr_kernproc: lock not owned by curthread %p", td));
521                 COUNT(td, -1);
522                 lp->lk_lockholder = LK_KERNTHREAD;
523         }
524 }
525
526 #if 0
527 /*
528  * Set the lock to be exclusively held.  The caller is holding the lock's
529  * spinlock and the spinlock remains held on return.  A panic will occur
530  * if the lock cannot be set to exclusive.
531  */
532 void
533 lockmgr_setexclusive_interlocked(struct lock *lkp)
534 {
535         thread_t td = curthread;
536
537         KKASSERT((lkp->lk_flags & (LK_HAVE_EXCL|LK_SHARE_NONZERO)) == 0);
538         KKASSERT(lkp->lk_exclusivecount == 0);
539         lkp->lk_flags |= LK_HAVE_EXCL;
540         lkp->lk_lockholder = td;
541         lkp->lk_exclusivecount = 1;
542         COUNT(td, 1);
543 }
544
545 /*
546  * Clear the caller's exclusive lock.  The caller is holding the lock's
547  * spinlock.  THIS FUNCTION WILL UNLOCK THE SPINLOCK.
548  *
549  * A panic will occur if the caller does not hold the lock.
550  */
551 void
552 lockmgr_clrexclusive_interlocked(struct lock *lkp)
553 {
554         thread_t td __debugvar = curthread;
555         int dowakeup = 0;
556
557         KKASSERT((lkp->lk_flags & LK_HAVE_EXCL) && lkp->lk_exclusivecount == 1
558                  && lkp->lk_lockholder == td);
559         lkp->lk_lockholder = LK_NOTHREAD;
560         lkp->lk_flags &= ~LK_HAVE_EXCL;
561         lkp->lk_exclusivecount = 0;
562         if (lkp->lk_flags & LK_WAIT_NONZERO)
563                 dowakeup = 1;
564         COUNT(td, -1);
565         spin_unlock(&lkp->lk_spinlock);
566         if (dowakeup)
567                 wakeup((void *)lkp);
568 }
569
570 #endif
571
572 /*
573  * Initialize a lock; required before use.
574  */
575 void
576 lockinit(struct lock *lkp, char *wmesg, int timo, int flags)
577 {
578         spin_init(&lkp->lk_spinlock);
579         lkp->lk_flags = (flags & LK_EXTFLG_MASK);
580         lkp->lk_sharecount = 0;
581         lkp->lk_waitcount = 0;
582         lkp->lk_exclusivecount = 0;
583         lkp->lk_wmesg = wmesg;
584         lkp->lk_timo = timo;
585         lkp->lk_lockholder = LK_NOTHREAD;
586 }
587
588 /*
589  * Reinitialize a lock that is being reused for a different purpose, but
590  * which may have pending (blocked) threads sitting on it.  The caller
591  * must already hold the interlock.
592  */
593 void
594 lockreinit(struct lock *lkp, char *wmesg, int timo, int flags)
595 {
596         spin_lock(&lkp->lk_spinlock);
597         lkp->lk_flags = (lkp->lk_flags & ~LK_EXTFLG_MASK) |
598                         (flags & LK_EXTFLG_MASK);
599         lkp->lk_wmesg = wmesg;
600         lkp->lk_timo = timo;
601         spin_unlock(&lkp->lk_spinlock);
602 }
603
604 /*
605  * Requires that the caller is the exclusive owner of this lock.
606  */
607 void
608 lockuninit(struct lock *l)
609 {
610         /*
611          * At this point we should have removed all the references to this lock
612          * so there can't be anyone waiting on it.
613          */
614         KKASSERT(l->lk_waitcount == 0);
615
616         spin_uninit(&l->lk_spinlock);
617 }
618
619 /*
620  * Determine the status of a lock.
621  */
622 int
623 lockstatus(struct lock *lkp, struct thread *td)
624 {
625         int lock_type = 0;
626
627         spin_lock(&lkp->lk_spinlock);
628         if (lkp->lk_exclusivecount != 0) {
629                 if (td == NULL || lkp->lk_lockholder == td)
630                         lock_type = LK_EXCLUSIVE;
631                 else
632                         lock_type = LK_EXCLOTHER;
633         } else if (lkp->lk_sharecount != 0) {
634                 lock_type = LK_SHARED;
635         }
636         spin_unlock(&lkp->lk_spinlock);
637         return (lock_type);
638 }
639
640 /*
641  * Return non-zero if the caller owns the lock shared or exclusive.
642  * We can only guess re: shared locks.
643  */
644 int
645 lockowned(struct lock *lkp)
646 {
647         thread_t td = curthread;
648
649         if (lkp->lk_exclusivecount)
650                 return(lkp->lk_lockholder == td);
651         return(lkp->lk_sharecount != 0);
652 }
653
654 /*
655  * Determine the number of holders of a lock.
656  *
657  * The non-blocking version can usually be used for assertions.
658  */
659 int
660 lockcount(struct lock *lkp)
661 {
662         int count;
663
664         spin_lock(&lkp->lk_spinlock);
665         count = lkp->lk_exclusivecount + lkp->lk_sharecount;
666         spin_unlock(&lkp->lk_spinlock);
667         return (count);
668 }
669
670 int
671 lockcountnb(struct lock *lkp)
672 {
673         return (lkp->lk_exclusivecount + lkp->lk_sharecount);
674 }
675
676 /*
677  * Print out information about state of a lock. Used by VOP_PRINT
678  * routines to display status about contained locks.
679  */
680 void
681 lockmgr_printinfo(struct lock *lkp)
682 {
683         struct thread *td = lkp->lk_lockholder;
684         struct proc *p;
685
686         if (td && td != LK_KERNTHREAD && td != LK_NOTHREAD)
687                 p = td->td_proc;
688         else
689                 p = NULL;
690
691         if (lkp->lk_sharecount)
692                 kprintf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
693                     lkp->lk_sharecount);
694         else if (lkp->lk_flags & LK_HAVE_EXCL)
695                 kprintf(" lock type %s: EXCL (count %d) by td %p pid %d",
696                     lkp->lk_wmesg, lkp->lk_exclusivecount, td,
697                     p ? p->p_pid : -99);
698         if (lkp->lk_waitcount > 0)
699                 kprintf(" with %d pending", lkp->lk_waitcount);
700 }
701