kernel - Add reapctl() system call for managing sub-processes
[dragonfly.git] / sys / kern / kern_lock.c
1 /* 
2  * Copyright (c) 1995
3  *      The Regents of the University of California.  All rights reserved.
4  * Copyright (C) 1997
5  *      John S. Dyson.  All rights reserved.
6  * Copyright (C) 2013
7  *      Matthew Dillon, All rights reserved.
8  *
9  * This code contains ideas from software contributed to Berkeley by
10  * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
11  * System project at Carnegie-Mellon University.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  */
37
38 #include "opt_lint.h"
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/proc.h>
44 #include <sys/lock.h>
45 #include <sys/sysctl.h>
46 #include <sys/spinlock.h>
47 #include <sys/thread2.h>
48 #include <sys/spinlock2.h>
49
50 static void undo_upreq(struct lock *lkp);
51
52 /*
53  * Locking primitives implementation.
54  * Locks provide shared/exclusive sychronization.
55  */
56
57 #ifdef DEBUG_LOCKS
58 #define COUNT(td, x) (td)->td_locks += (x)
59 #else
60 #define COUNT(td, x)
61 #endif
62
63 #define LOCK_WAIT_TIME 100
64 #define LOCK_SAMPLE_WAIT 7
65
66 /*
67  * Set, change, or release a lock.
68  *
69  */
70 int
71 #ifndef DEBUG_LOCKS
72 lockmgr(struct lock *lkp, u_int flags)
73 #else
74 debuglockmgr(struct lock *lkp, u_int flags,
75              const char *name, const char *file, int line)
76 #endif
77 {
78         thread_t td;
79         thread_t otd;
80         int error;
81         int extflags;
82         int count;
83         int pflags;
84         int wflags;
85         int timo;
86 #ifdef DEBUG_LOCKS
87         int i;
88 #endif
89
90         error = 0;
91
92         if (mycpu->gd_intr_nesting_level &&
93             (flags & LK_NOWAIT) == 0 &&
94             (flags & LK_TYPE_MASK) != LK_RELEASE &&
95             panic_cpu_gd != mycpu
96         ) {
97
98 #ifndef DEBUG_LOCKS
99                 panic("lockmgr %s from %p: called from interrupt, ipi, "
100                       "or hard code section",
101                       lkp->lk_wmesg, ((int **)&lkp)[-1]);
102 #else
103                 panic("lockmgr %s from %s:%d: called from interrupt, ipi, "
104                       "or hard code section",
105                       lkp->lk_wmesg, file, line);
106 #endif
107         }
108
109 #ifdef DEBUG_LOCKS
110         if (mycpu->gd_spinlocks && ((flags & LK_NOWAIT) == 0)) {
111                 panic("lockmgr %s from %s:%d: called with %d spinlocks held",
112                       lkp->lk_wmesg, file, line, mycpu->gd_spinlocks);
113         }
114 #endif
115
116         extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
117         td = curthread;
118
119 again:
120         count = lkp->lk_count;
121         cpu_ccfence();
122
123         switch (flags & LK_TYPE_MASK) {
124         case LK_SHARED:
125                 /*
126                  * Shared lock critical path case
127                  */
128                 if ((count & (LKC_EXREQ|LKC_UPREQ|LKC_EXCL)) == 0) {
129                         if (atomic_cmpset_int(&lkp->lk_count,
130                                               count, count + 1)) {
131                                 COUNT(td, 1);
132                                 break;
133                         }
134                         goto again;
135                 }
136
137                 /*
138                  * If the caller already holds the lock exclusively then
139                  * we silently obtain another count on the exclusive lock.
140                  *
141                  * WARNING!  The old FreeBSD behavior was to downgrade,
142                  *           but this creates a problem when recursions
143                  *           return to the caller and the caller expects
144                  *           its original exclusive lock to remain exclusively
145                  *           locked.
146                  */
147                 if (lkp->lk_lockholder == td) {
148                         KKASSERT(count & LKC_EXCL);
149                         if ((extflags & LK_CANRECURSE) == 0) {
150                                 if (extflags & LK_NOWAIT) {
151                                         error = EBUSY;
152                                         break;
153                                 }
154                                 panic("lockmgr: locking against myself");
155                         }
156                         atomic_add_int(&lkp->lk_count, 1);
157                         COUNT(td, 1);
158                         break;
159                 }
160
161                 /*
162                  * Slow path
163                  */
164                 pflags = (extflags & LK_PCATCH) ? PCATCH : 0;
165                 timo = (extflags & LK_TIMELOCK) ? lkp->lk_timo : 0;
166                 wflags = (td->td_flags & TDF_DEADLKTREAT) ?
167                                 LKC_EXCL : (LKC_EXCL|LKC_EXREQ|LKC_UPREQ);
168
169                 /*
170                  * Block while the lock is held exclusively or, conditionally,
171                  * if other threads are tring to obtain an exclusive lock or
172                  * upgrade to one.
173                  */
174                 if (count & wflags) {
175                         if (extflags & LK_NOWAIT) {
176                                 error = EBUSY;
177                                 break;
178                         }
179                         tsleep_interlock(lkp, pflags);
180                         if (!atomic_cmpset_int(&lkp->lk_count, count,
181                                               count | LKC_SHREQ)) {
182                                 goto again;
183                         }
184
185                         mycpu->gd_cnt.v_lock_name[0] = 'S';
186                         strncpy(mycpu->gd_cnt.v_lock_name + 1,
187                                 lkp->lk_wmesg,
188                                 sizeof(mycpu->gd_cnt.v_lock_name) - 2);
189                         ++mycpu->gd_cnt.v_lock_colls;
190
191                         error = tsleep(lkp, pflags | PINTERLOCKED,
192                                        lkp->lk_wmesg, timo);
193                         if (error)
194                                 break;
195                         if (extflags & LK_SLEEPFAIL) {
196                                 error = ENOLCK;
197                                 break;
198                         }
199                         goto again;
200                 }
201
202                 /*
203                  * Otherwise we can bump the count
204                  */
205                 if (atomic_cmpset_int(&lkp->lk_count, count, count + 1)) {
206                         COUNT(td, 1);
207                         break;
208                 }
209                 goto again;
210
211         case LK_EXCLUSIVE:
212                 /*
213                  * Exclusive lock critical path.
214                  */
215                 if (count == 0) {
216                         if (atomic_cmpset_int(&lkp->lk_count, count,
217                                               LKC_EXCL | (count + 1))) {
218                                 lkp->lk_lockholder = td;
219                                 COUNT(td, 1);
220                                 break;
221                         }
222                         goto again;
223                 }
224
225                 /*
226                  * Recursive lock if we already hold it exclusively.
227                  */
228                 if (lkp->lk_lockholder == td) {
229                         KKASSERT(count & LKC_EXCL);
230                         if ((extflags & LK_CANRECURSE) == 0) {
231                                 if (extflags & LK_NOWAIT) {
232                                         error = EBUSY;
233                                         break;
234                                 }
235                                 panic("lockmgr: locking against myself");
236                         }
237                         atomic_add_int(&lkp->lk_count, 1);
238                         COUNT(td, 1);
239                         break;
240                 }
241
242                 /*
243                  * We will block, handle LK_NOWAIT
244                  */
245                 if (extflags & LK_NOWAIT) {
246                         error = EBUSY;
247                         break;
248                 }
249
250                 /*
251                  * Wait until we can obtain the exclusive lock.  EXREQ is
252                  * automatically cleared when all current holders release
253                  * so if we abort the operation we can safely leave it set.
254                  * There might be other exclusive requesters.
255                  */
256                 pflags = (extflags & LK_PCATCH) ? PCATCH : 0;
257                 timo = (extflags & LK_TIMELOCK) ? lkp->lk_timo : 0;
258
259                 tsleep_interlock(lkp, pflags);
260                 if (!atomic_cmpset_int(&lkp->lk_count, count,
261                                        count | LKC_EXREQ)) {
262                         goto again;
263                 }
264
265                 mycpu->gd_cnt.v_lock_name[0] = 'X';
266                 strncpy(mycpu->gd_cnt.v_lock_name + 1,
267                         lkp->lk_wmesg,
268                         sizeof(mycpu->gd_cnt.v_lock_name) - 2);
269                 ++mycpu->gd_cnt.v_lock_colls;
270
271                 error = tsleep(lkp, pflags | PINTERLOCKED,
272                                lkp->lk_wmesg, timo);
273                 if (error)
274                         break;
275                 if (extflags & LK_SLEEPFAIL) {
276                         error = ENOLCK;
277                         break;
278                 }
279                 goto again;
280
281         case LK_DOWNGRADE:
282                 /*
283                  * Downgrade an exclusive lock into a shared lock.  All
284                  * counts on a recursive exclusive lock become shared.
285                  *
286                  * This function always succeeds.
287                  */
288                 if (lkp->lk_lockholder != td ||
289                     (count & (LKC_EXCL|LKC_MASK)) != (LKC_EXCL|1)) {
290                         panic("lockmgr: not holding exclusive lock");
291                 }
292
293 #ifdef DEBUG_LOCKS
294                 for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
295                         if (td->td_lockmgr_stack[i] == lkp &&
296                             td->td_lockmgr_stack_id[i] > 0
297                         ) {
298                                 td->td_lockmgr_stack_id[i]--;
299                                 break;
300                         }
301                 }
302 #endif
303                 /*
304                  * NOTE! Must NULL-out lockholder before releasing LKC_EXCL.
305                  */
306                 otd = lkp->lk_lockholder;
307                 lkp->lk_lockholder = NULL;
308                 if (atomic_cmpset_int(&lkp->lk_count, count,
309                                       count & ~(LKC_EXCL|LKC_SHREQ))) {
310                         if (count & LKC_SHREQ)
311                                 wakeup(lkp);
312                         break;
313                 }
314                 lkp->lk_lockholder = otd;
315                 goto again;
316
317         case LK_EXCLUPGRADE:
318                 /*
319                  * Upgrade from a single shared lock to an exclusive lock.
320                  *
321                  * If another process is ahead of us to get an upgrade,
322                  * then we want to fail rather than have an intervening
323                  * exclusive access.  The shared lock is released on
324                  * failure.
325                  */
326                 if (count & LKC_UPREQ) {
327                         flags = LK_RELEASE;
328                         error = EBUSY;
329                         goto again;
330                 }
331                 /* fall through into normal upgrade */
332
333         case LK_UPGRADE:
334                 /*
335                  * Upgrade a shared lock to an exclusive one.  This can cause
336                  * the lock to be temporarily released and stolen by other
337                  * threads.  LK_SLEEPFAIL or LK_NOWAIT may be used to detect
338                  * this case, or use LK_EXCLUPGRADE.
339                  *
340                  * If the lock is already exclusively owned by us, this
341                  * operation is a NOP.
342                  *
343                  * If we return an error (even NOWAIT), the current lock will
344                  * be released.
345                  *
346                  * Start with the critical path.
347                  */
348                 if ((count & (LKC_UPREQ|LKC_EXCL|LKC_MASK)) == 1) {
349                         if (atomic_cmpset_int(&lkp->lk_count, count,
350                                               count | LKC_EXCL)) {
351                                 lkp->lk_lockholder = td;
352                                 break;
353                         }
354                         goto again;
355                 }
356
357                 /*
358                  * If we already hold the lock exclusively this operation
359                  * succeeds and is a NOP.
360                  */
361                 if (count & LKC_EXCL) {
362                         if (lkp->lk_lockholder == td)
363                                 break;
364                         panic("lockmgr: upgrade unowned lock");
365                 }
366                 if ((count & LKC_MASK) == 0)
367                         panic("lockmgr: upgrade unowned lock");
368
369                 /*
370                  * We cannot upgrade without blocking at this point.
371                  */
372                 if (extflags & LK_NOWAIT) {
373                         flags = LK_RELEASE;
374                         error = EBUSY;
375                         goto again;
376                 }
377
378                 /*
379                  * Release the shared lock and request the upgrade.
380                  */
381                 pflags = (extflags & LK_PCATCH) ? PCATCH : 0;
382                 timo = (extflags & LK_TIMELOCK) ? lkp->lk_timo : 0;
383                 tsleep_interlock(lkp, pflags);
384                 wflags = (count & LKC_UPREQ) ? LKC_EXREQ : LKC_UPREQ;
385
386                 /*
387                  * If someone else owns UPREQ and this transition would
388                  * allow it to be granted, we have to grant it.  Otherwise
389                  * we release the shared lock.
390                  */
391                 if ((count & (LKC_UPREQ|LKC_MASK)) == (LKC_UPREQ | 1)) {
392                         wflags |= LKC_EXCL | LKC_UPGRANT;
393                         wflags |= count;
394                         wflags &= ~LKC_UPREQ;
395                 } else {
396                         wflags |= (count - 1);
397                 }
398
399                 if (atomic_cmpset_int(&lkp->lk_count, count, wflags)) {
400                         COUNT(td, -1);
401
402                         /*
403                          * Must wakeup the thread granted the upgrade.
404                          */
405                         if ((count & (LKC_UPREQ|LKC_MASK)) == (LKC_UPREQ | 1))
406                                 wakeup(lkp);
407
408                         mycpu->gd_cnt.v_lock_name[0] = 'U';
409                         strncpy(mycpu->gd_cnt.v_lock_name + 1,
410                                 lkp->lk_wmesg,
411                                 sizeof(mycpu->gd_cnt.v_lock_name) - 2);
412                         ++mycpu->gd_cnt.v_lock_colls;
413
414                         error = tsleep(lkp, pflags | PINTERLOCKED,
415                                        lkp->lk_wmesg, timo);
416                         if (error)
417                                 break;
418                         if (extflags & LK_SLEEPFAIL) {
419                                 error = ENOLCK;
420                                 break;
421                         }
422
423                         /*
424                          * Refactor to either LK_EXCLUSIVE or LK_WAITUPGRADE,
425                          * depending on whether we were able to acquire the
426                          * LKC_UPREQ bit.
427                          */
428                         if (count & LKC_UPREQ)
429                                 flags = LK_EXCLUSIVE;   /* someone else */
430                         else
431                                 flags = LK_WAITUPGRADE; /* we own the bit */
432                 }
433                 goto again;
434
435         case LK_WAITUPGRADE:
436                 /*
437                  * We own the LKC_UPREQ bit, wait until we are granted the
438                  * exclusive lock (LKC_UPGRANT is set).
439                  *
440                  * IF THE OPERATION FAILS (tsleep error tsleep+LK_SLEEPFAIL),
441                  * we have to undo the upgrade request and clean up any lock
442                  * that might have been granted via a race.
443                  */
444                 if (count & LKC_UPGRANT) {
445                         if (atomic_cmpset_int(&lkp->lk_count, count,
446                                               count & ~LKC_UPGRANT)) {
447                                 lkp->lk_lockholder = td;
448                                 KKASSERT(count & LKC_EXCL);
449                                 break;
450                         }
451                         /* retry */
452                 } else {
453                         pflags = (extflags & LK_PCATCH) ? PCATCH : 0;
454                         timo = (extflags & LK_TIMELOCK) ? lkp->lk_timo : 0;
455                         tsleep_interlock(lkp, pflags);
456                         if (atomic_cmpset_int(&lkp->lk_count, count, count)) {
457
458                                 mycpu->gd_cnt.v_lock_name[0] = 'U';
459                                 strncpy(mycpu->gd_cnt.v_lock_name + 1,
460                                         lkp->lk_wmesg,
461                                         sizeof(mycpu->gd_cnt.v_lock_name) - 2);
462                                 ++mycpu->gd_cnt.v_lock_colls;
463
464                                 error = tsleep(lkp, pflags | PINTERLOCKED,
465                                                lkp->lk_wmesg, timo);
466                                 if (error) {
467                                         undo_upreq(lkp);
468                                         break;
469                                 }
470                                 if (extflags & LK_SLEEPFAIL) {
471                                         error = ENOLCK;
472                                         undo_upreq(lkp);
473                                         break;
474                                 }
475                         }
476                         /* retry */
477                 }
478                 goto again;
479
480         case LK_RELEASE:
481                 /*
482                  * Release the currently held lock.  If releasing the current
483                  * lock as part of an error return, error will ALREADY be
484                  * non-zero.
485                  *
486                  * When releasing the last lock we automatically transition
487                  * LKC_UPREQ to LKC_EXCL|1.
488                  *
489                  * WARNING! We cannot detect when there are multiple exclusive
490                  *          requests pending.  We clear EXREQ unconditionally
491                  *          on the 1->0 transition so it is possible for
492                  *          shared requests to race the next exclusive
493                  *          request.
494                  *
495                  * Always succeeds.
496                  */
497                 if ((count & LKC_MASK) == 0)
498                         panic("lockmgr: LK_RELEASE: no lock held");
499
500                 if (count & LKC_EXCL) {
501                         if (lkp->lk_lockholder != LK_KERNTHREAD &&
502                             lkp->lk_lockholder != td) {
503                                 panic("lockmgr: pid %d, not exlusive "
504                                       "lock holder thr %p/%p unlocking",
505                                     (td->td_proc ? td->td_proc->p_pid : -1),
506                                     td, lkp->lk_lockholder);
507                         }
508                         if ((count & (LKC_UPREQ|LKC_MASK)) == 1) {
509                                 /*
510                                  * Last exclusive count is being released
511                                  */
512                                 otd = lkp->lk_lockholder;
513                                 lkp->lk_lockholder = NULL;
514                                 if (!atomic_cmpset_int(&lkp->lk_count, count,
515                                               (count - 1) &
516                                            ~(LKC_EXCL|LKC_EXREQ|LKC_SHREQ))) {
517                                         lkp->lk_lockholder = otd;
518                                         goto again;
519                                 }
520                                 if (count & (LKC_EXREQ|LKC_SHREQ))
521                                         wakeup(lkp);
522                                 /* success */
523                         } else if ((count & (LKC_UPREQ|LKC_MASK)) ==
524                                    (LKC_UPREQ | 1)) {
525                                 /*
526                                  * Last exclusive count is being released but
527                                  * an upgrade request is present, automatically
528                                  * grant an exclusive state to the owner of
529                                  * the upgrade request.
530                                  */
531                                 otd = lkp->lk_lockholder;
532                                 lkp->lk_lockholder = NULL;
533                                 if (!atomic_cmpset_int(&lkp->lk_count, count,
534                                                 (count & ~LKC_UPREQ) |
535                                                 LKC_UPGRANT)) {
536                                         lkp->lk_lockholder = otd;
537                                 }
538                                 wakeup(lkp);
539                                 /* success */
540                         } else {
541                                 otd = lkp->lk_lockholder;
542                                 if (!atomic_cmpset_int(&lkp->lk_count, count,
543                                                        count - 1)) {
544                                         goto again;
545                                 }
546                                 /* success */
547                         }
548                         /* success */
549                         if (otd != LK_KERNTHREAD)
550                                 COUNT(td, -1);
551                 } else {
552                         if ((count & (LKC_UPREQ|LKC_MASK)) == 1) {
553                                 /*
554                                  * Last shared count is being released.
555                                  */
556                                 if (!atomic_cmpset_int(&lkp->lk_count, count,
557                                               (count - 1) &
558                                                ~(LKC_EXREQ|LKC_SHREQ))) {
559                                         goto again;
560                                 }
561                                 if (count & (LKC_EXREQ|LKC_SHREQ))
562                                         wakeup(lkp);
563                                 /* success */
564                         } else if ((count & (LKC_UPREQ|LKC_MASK)) ==
565                                    (LKC_UPREQ | 1)) {
566                                 /*
567                                  * Last shared count is being released but
568                                  * an upgrade request is present, automatically
569                                  * grant an exclusive state to the owner of
570                                  * the upgrade request.
571                                  */
572                                 if (!atomic_cmpset_int(&lkp->lk_count, count,
573                                               (count & ~LKC_UPREQ) |
574                                               LKC_EXCL | LKC_UPGRANT)) {
575                                         goto again;
576                                 }
577                                 wakeup(lkp);
578                         } else {
579                                 if (!atomic_cmpset_int(&lkp->lk_count, count,
580                                                        count - 1)) {
581                                         goto again;
582                                 }
583                         }
584                         /* success */
585                         COUNT(td, -1);
586                 }
587                 break;
588
589         default:
590                 panic("lockmgr: unknown locktype request %d",
591                     flags & LK_TYPE_MASK);
592                 /* NOTREACHED */
593         }
594         return (error);
595 }
596
597 /*
598  * Undo an upgrade request
599  */
600 static
601 void
602 undo_upreq(struct lock *lkp)
603 {
604         int count;
605
606         for (;;) {
607                 count = lkp->lk_count;
608                 cpu_ccfence();
609                 if (count & LKC_UPGRANT) {
610                         /*
611                          * UPREQ was shifted to UPGRANT.  We own UPGRANT now,
612                          * another thread might own UPREQ.  Clear UPGRANT
613                          * and release the granted lock.
614                          */
615                         if (atomic_cmpset_int(&lkp->lk_count, count,
616                                               count & ~LKC_UPGRANT)) {
617                                 lockmgr(lkp, LK_RELEASE);
618                                 break;
619                         }
620                 } else if (count & LKC_EXCL) {
621                         /*
622                          * Clear the UPREQ we still own.  Nobody to wakeup
623                          * here because there is an existing exclusive
624                          * holder.
625                          */
626                         KKASSERT(count & LKC_UPREQ);
627                         KKASSERT((count & LKC_MASK) > 0);
628                         if (atomic_cmpset_int(&lkp->lk_count, count,
629                                               count & ~LKC_UPREQ)) {
630                                 wakeup(lkp);
631                                 break;
632                         }
633                 } else if (count & LKC_EXREQ) {
634                         /*
635                          * Clear the UPREQ we still own.  We cannot wakeup any
636                          * shared waiters because there is an exclusive
637                          * request pending.
638                          */
639                         KKASSERT(count & LKC_UPREQ);
640                         KKASSERT((count & LKC_MASK) > 0);
641                         if (atomic_cmpset_int(&lkp->lk_count, count,
642                                               count & ~LKC_UPREQ)) {
643                                 break;
644                         }
645                 } else {
646                         /*
647                          * Clear the UPREQ we still own.  Wakeup any shared
648                          * waiters.
649                          */
650                         KKASSERT(count & LKC_UPREQ);
651                         KKASSERT((count & LKC_MASK) > 0);
652                         if (atomic_cmpset_int(&lkp->lk_count, count,
653                                               count &
654                                               ~(LKC_UPREQ | LKC_SHREQ))) {
655                                 if (count & LKC_SHREQ)
656                                         wakeup(lkp);
657                                 break;
658                         }
659                 }
660                 /* retry */
661         }
662 }
663
664 void
665 lockmgr_kernproc(struct lock *lp)
666 {
667         struct thread *td __debugvar = curthread;
668
669         if (lp->lk_lockholder != LK_KERNTHREAD) {
670                 KASSERT(lp->lk_lockholder == td,
671                     ("lockmgr_kernproc: lock not owned by curthread %p", td));
672                 lp->lk_lockholder = LK_KERNTHREAD;
673                 COUNT(td, -1);
674         }
675 }
676
677 /*
678  * Initialize a lock; required before use.
679  */
680 void
681 lockinit(struct lock *lkp, const char *wmesg, int timo, int flags)
682 {
683         lkp->lk_flags = (flags & LK_EXTFLG_MASK);
684         lkp->lk_count = 0;
685         lkp->lk_wmesg = wmesg;
686         lkp->lk_timo = timo;
687         lkp->lk_lockholder = LK_NOTHREAD;
688 }
689
690 /*
691  * Reinitialize a lock that is being reused for a different purpose, but
692  * which may have pending (blocked) threads sitting on it.  The caller
693  * must already hold the interlock.
694  */
695 void
696 lockreinit(struct lock *lkp, const char *wmesg, int timo, int flags)
697 {
698         lkp->lk_wmesg = wmesg;
699         lkp->lk_timo = timo;
700 }
701
702 /*
703  * De-initialize a lock.  The structure must no longer be used by anyone.
704  */
705 void
706 lockuninit(struct lock *lkp)
707 {
708         KKASSERT((lkp->lk_count & (LKC_EXREQ|LKC_SHREQ|LKC_UPREQ)) == 0);
709 }
710
711 /*
712  * Determine the status of a lock.
713  */
714 int
715 lockstatus(struct lock *lkp, struct thread *td)
716 {
717         int lock_type = 0;
718         int count;
719
720         count = lkp->lk_count;
721         cpu_ccfence();
722
723         if (count & LKC_EXCL) {
724                 if (td == NULL || lkp->lk_lockholder == td)
725                         lock_type = LK_EXCLUSIVE;
726                 else
727                         lock_type = LK_EXCLOTHER;
728         } else if (count & LKC_MASK) {
729                 lock_type = LK_SHARED;
730         }
731         return (lock_type);
732 }
733
734 /*
735  * Return non-zero if the caller owns the lock shared or exclusive.
736  * We can only guess re: shared locks.
737  */
738 int
739 lockowned(struct lock *lkp)
740 {
741         thread_t td = curthread;
742         int count;
743
744         count = lkp->lk_count;
745         cpu_ccfence();
746
747         if (count & LKC_EXCL)
748                 return(lkp->lk_lockholder == td);
749         else
750                 return((count & LKC_MASK) != 0);
751 }
752
753 /*
754  * Determine the number of holders of a lock.
755  *
756  * The non-blocking version can usually be used for assertions.
757  */
758 int
759 lockcount(struct lock *lkp)
760 {
761         return(lkp->lk_count & LKC_MASK);
762 }
763
764 int
765 lockcountnb(struct lock *lkp)
766 {
767         return(lkp->lk_count & LKC_MASK);
768 }
769
770 /*
771  * Print out information about state of a lock. Used by VOP_PRINT
772  * routines to display status about contained locks.
773  */
774 void
775 lockmgr_printinfo(struct lock *lkp)
776 {
777         struct thread *td = lkp->lk_lockholder;
778         struct proc *p;
779         int count;
780
781         count = lkp->lk_count;
782         cpu_ccfence();
783
784         if (td && td != LK_KERNTHREAD && td != LK_NOTHREAD)
785                 p = td->td_proc;
786         else
787                 p = NULL;
788
789         if (count & LKC_EXCL) {
790                 kprintf(" lock type %s: EXCLUS (count %08x) by td %p pid %d",
791                     lkp->lk_wmesg, count, td,
792                     p ? p->p_pid : -99);
793         } else if (count & LKC_MASK) {
794                 kprintf(" lock type %s: SHARED (count %08x)",
795                     lkp->lk_wmesg, count);
796         } else {
797                 kprintf(" lock type %s: NOTHELD", lkp->lk_wmesg);
798         }
799         if (count & (LKC_EXREQ|LKC_SHREQ))
800                 kprintf(" with waiters\n");
801         else
802                 kprintf("\n");
803 }
804
805 void
806 lock_sysinit(struct lock_args *arg)
807 {
808         lockinit(arg->la_lock, arg->la_desc, 0, arg->la_flags);
809 }