kernel - Improve SMP collision statistics
[dragonfly.git] / sys / kern / kern_lock.c
1 /* 
2  * Copyright (c) 1995
3  *      The Regents of the University of California.  All rights reserved.
4  * Copyright (C) 1997
5  *      John S. Dyson.  All rights reserved.
6  * Copyright (C) 2013
7  *      Matthew Dillon, All rights reserved.
8  *
9  * This code contains ideas from software contributed to Berkeley by
10  * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
11  * System project at Carnegie-Mellon University.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  */
37
38 #include "opt_lint.h"
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/proc.h>
44 #include <sys/lock.h>
45 #include <sys/sysctl.h>
46 #include <sys/spinlock.h>
47 #include <sys/thread2.h>
48 #include <sys/spinlock2.h>
49
50 static void undo_upreq(struct lock *lkp);
51
52 /*
53  * Locking primitives implementation.
54  * Locks provide shared/exclusive sychronization.
55  */
56
57 #ifdef DEBUG_LOCKS
58 #define COUNT(td, x) (td)->td_locks += (x)
59 #else
60 #define COUNT(td, x)
61 #endif
62
63 #define LOCK_WAIT_TIME 100
64 #define LOCK_SAMPLE_WAIT 7
65
66 /*
67  * Set, change, or release a lock.
68  *
69  */
70 int
71 #ifndef DEBUG_LOCKS
72 lockmgr(struct lock *lkp, u_int flags)
73 #else
74 debuglockmgr(struct lock *lkp, u_int flags,
75              const char *name, const char *file, int line)
76 #endif
77 {
78         thread_t td;
79         thread_t otd;
80         int error;
81         int extflags;
82         int count;
83         int pflags;
84         int wflags;
85         int timo;
86 #ifdef DEBUG_LOCKS
87         int i;
88 #endif
89
90         error = 0;
91
92         if (mycpu->gd_intr_nesting_level &&
93             (flags & LK_NOWAIT) == 0 &&
94             (flags & LK_TYPE_MASK) != LK_RELEASE &&
95             panic_cpu_gd != mycpu
96         ) {
97
98 #ifndef DEBUG_LOCKS
99                 panic("lockmgr %s from %p: called from interrupt, ipi, "
100                       "or hard code section",
101                       lkp->lk_wmesg, ((int **)&lkp)[-1]);
102 #else
103                 panic("lockmgr %s from %s:%d: called from interrupt, ipi, "
104                       "or hard code section",
105                       lkp->lk_wmesg, file, line);
106 #endif
107         }
108
109 #ifdef DEBUG_LOCKS
110         if (mycpu->gd_spinlocks && ((flags & LK_NOWAIT) == 0)) {
111                 panic("lockmgr %s from %s:%d: called with %d spinlocks held",
112                       lkp->lk_wmesg, file, line, mycpu->gd_spinlocks);
113         }
114 #endif
115
116         extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
117         td = curthread;
118
119 again:
120         count = lkp->lk_count;
121         cpu_ccfence();
122
123         switch (flags & LK_TYPE_MASK) {
124         case LK_SHARED:
125                 /*
126                  * Shared lock critical path case
127                  */
128                 if ((count & (LKC_EXREQ|LKC_UPREQ|LKC_EXCL)) == 0) {
129                         if (atomic_cmpset_int(&lkp->lk_count,
130                                               count, count + 1)) {
131                                 COUNT(td, 1);
132                                 break;
133                         }
134                         goto again;
135                 }
136
137                 /*
138                  * If the caller already holds the lock exclusively then
139                  * we silently obtain another count on the exclusive lock.
140                  *
141                  * WARNING!  The old FreeBSD behavior was to downgrade,
142                  *           but this creates a problem when recursions
143                  *           return to the caller and the caller expects
144                  *           its original exclusive lock to remain exclusively
145                  *           locked.
146                  */
147                 if (lkp->lk_lockholder == td) {
148                         KKASSERT(count & LKC_EXCL);
149                         if ((extflags & LK_CANRECURSE) == 0) {
150                                 if (extflags & LK_NOWAIT) {
151                                         error = EBUSY;
152                                         break;
153                                 }
154                                 panic("lockmgr: locking against myself");
155                         }
156                         atomic_add_int(&lkp->lk_count, 1);
157                         COUNT(td, 1);
158                         break;
159                 }
160
161                 /*
162                  * Slow path
163                  */
164                 pflags = (extflags & LK_PCATCH) ? PCATCH : 0;
165                 timo = (extflags & LK_TIMELOCK) ? lkp->lk_timo : 0;
166                 wflags = (td->td_flags & TDF_DEADLKTREAT) ?
167                                 LKC_EXCL : (LKC_EXCL|LKC_EXREQ|LKC_UPREQ);
168
169                 /*
170                  * Block while the lock is held exclusively or, conditionally,
171                  * if other threads are tring to obtain an exclusive lock or
172                  * upgrade to one.
173                  */
174                 if (count & wflags) {
175                         if (extflags & LK_NOWAIT) {
176                                 error = EBUSY;
177                                 break;
178                         }
179                         tsleep_interlock(lkp, pflags);
180                         if (!atomic_cmpset_int(&lkp->lk_count, count,
181                                               count | LKC_SHREQ)) {
182                                 goto again;
183                         }
184
185                         mycpu->gd_cnt.v_lock_name[0] = 'S';
186                         strncpy(mycpu->gd_cnt.v_lock_name + 1,
187                                 lkp->lk_wmesg,
188                                 sizeof(mycpu->gd_cnt.v_lock_name) - 2);
189                         ++mycpu->gd_cnt.v_lock_colls;
190
191                         error = tsleep(lkp, pflags | PINTERLOCKED,
192                                        lkp->lk_wmesg, timo);
193                         if (error)
194                                 break;
195                         if (extflags & LK_SLEEPFAIL) {
196                                 error = ENOLCK;
197                                 break;
198                         }
199                         goto again;
200                 }
201
202                 /*
203                  * Otherwise we can bump the count
204                  */
205                 if (atomic_cmpset_int(&lkp->lk_count, count, count + 1)) {
206                         COUNT(td, 1);
207                         break;
208                 }
209                 goto again;
210
211         case LK_EXCLUSIVE:
212                 /*
213                  * Exclusive lock critical path.
214                  */
215                 if (count == 0) {
216                         if (atomic_cmpset_int(&lkp->lk_count, count,
217                                               LKC_EXCL | (count + 1))) {
218                                 lkp->lk_lockholder = td;
219                                 COUNT(td, 1);
220                                 break;
221                         }
222                         goto again;
223                 }
224
225                 /*
226                  * Recursive lock if we already hold it exclusively.
227                  */
228                 if (lkp->lk_lockholder == td) {
229                         KKASSERT(count & LKC_EXCL);
230                         if ((extflags & LK_CANRECURSE) == 0) {
231                                 if (extflags & LK_NOWAIT) {
232                                         error = EBUSY;
233                                         break;
234                                 }
235                                 panic("lockmgr: locking against myself");
236                         }
237                         atomic_add_int(&lkp->lk_count, 1);
238                         COUNT(td, 1);
239                         break;
240                 }
241
242                 /*
243                  * We will block, handle LK_NOWAIT
244                  */
245                 if (extflags & LK_NOWAIT) {
246                         error = EBUSY;
247                         break;
248                 }
249
250                 /*
251                  * Wait until we can obtain the exclusive lock.  EXREQ is
252                  * automatically cleared when all current holders release
253                  * so if we abort the operation we can safely leave it set.
254                  * There might be other exclusive requesters.
255                  */
256                 pflags = (extflags & LK_PCATCH) ? PCATCH : 0;
257                 timo = (extflags & LK_TIMELOCK) ? lkp->lk_timo : 0;
258
259                 tsleep_interlock(lkp, pflags);
260                 if (!atomic_cmpset_int(&lkp->lk_count, count,
261                                        count | LKC_EXREQ)) {
262                         goto again;
263                 }
264
265                 mycpu->gd_cnt.v_lock_name[0] = 'X';
266                 strncpy(mycpu->gd_cnt.v_lock_name + 1,
267                         lkp->lk_wmesg,
268                         sizeof(mycpu->gd_cnt.v_lock_name) - 2);
269                 ++mycpu->gd_cnt.v_lock_colls;
270
271                 error = tsleep(lkp, pflags | PINTERLOCKED,
272                                lkp->lk_wmesg, timo);
273                 if (error)
274                         break;
275                 if (extflags & LK_SLEEPFAIL) {
276                         error = ENOLCK;
277                         break;
278                 }
279                 goto again;
280
281         case LK_DOWNGRADE:
282                 /*
283                  * Downgrade an exclusive lock into a shared lock.  All
284                  * counts on a recursive exclusive lock become shared.
285                  *
286                  * This function always succeeds.
287                  */
288                 if (lkp->lk_lockholder != td ||
289                     (count & (LKC_EXCL|LKC_MASK)) != (LKC_EXCL|1)) {
290                         panic("lockmgr: not holding exclusive lock");
291                 }
292
293 #ifdef DEBUG_LOCKS
294                 for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
295                         if (td->td_lockmgr_stack[i] == lkp &&
296                             td->td_lockmgr_stack_id[i] > 0
297                         ) {
298                                 td->td_lockmgr_stack_id[i]--;
299                                 break;
300                         }
301                 }
302 #endif
303                 /*
304                  * NOTE! Must NULL-out lockholder before releasing LKC_EXCL.
305                  */
306                 otd = lkp->lk_lockholder;
307                 lkp->lk_lockholder = NULL;
308                 if (atomic_cmpset_int(&lkp->lk_count, count,
309                                       count & ~(LKC_EXCL|LKC_SHREQ))) {
310                         if (count & LKC_SHREQ)
311                                 wakeup(lkp);
312                         break;
313                 }
314                 lkp->lk_lockholder = otd;
315                 goto again;
316
317         case LK_EXCLUPGRADE:
318                 /*
319                  * Upgrade from a single shared lock to an exclusive lock.
320                  *
321                  * If another process is ahead of us to get an upgrade,
322                  * then we want to fail rather than have an intervening
323                  * exclusive access.  The shared lock is released on
324                  * failure.
325                  */
326                 if (count & LKC_UPREQ) {
327                         flags = LK_RELEASE;
328                         error = EBUSY;
329                         goto again;
330                 }
331                 /* fall through into normal upgrade */
332
333         case LK_UPGRADE:
334                 /*
335                  * Upgrade a shared lock to an exclusive one.  This can cause
336                  * the lock to be temporarily released and stolen by other
337                  * threads.  LK_SLEEPFAIL or LK_NOWAIT may be used to detect
338                  * this case, or use LK_EXCLUPGRADE.
339                  *
340                  * If we return an error (even NOWAIT), the current lock will
341                  * be released.
342                  *
343                  * Start with the critical path.
344                  */
345                 if ((count & (LKC_UPREQ|LKC_EXCL|LKC_MASK)) == 1) {
346                         if (atomic_cmpset_int(&lkp->lk_count, count,
347                                               count | LKC_EXCL)) {
348                                 lkp->lk_lockholder = td;
349                                 break;
350                         }
351                         goto again;
352                 }
353
354                 /*
355                  * If we already hold the lock exclusively this operation
356                  * succeeds and is a NOP.
357                  */
358                 if (count & LKC_EXCL) {
359                         if (lkp->lk_lockholder == td)
360                                 break;
361                         panic("lockmgr: upgrade unowned lock");
362                 }
363                 if ((count & LKC_MASK) == 0)
364                         panic("lockmgr: upgrade unowned lock");
365
366                 /*
367                  * We cannot upgrade without blocking at this point.
368                  */
369                 if (extflags & LK_NOWAIT) {
370                         flags = LK_RELEASE;
371                         error = EBUSY;
372                         goto again;
373                 }
374
375                 /*
376                  * Release the shared lock and request the upgrade.
377                  */
378                 pflags = (extflags & LK_PCATCH) ? PCATCH : 0;
379                 timo = (extflags & LK_TIMELOCK) ? lkp->lk_timo : 0;
380                 tsleep_interlock(lkp, pflags);
381                 wflags = (count & LKC_UPREQ) ? LKC_EXREQ : LKC_UPREQ;
382
383                 if (atomic_cmpset_int(&lkp->lk_count, count,
384                                       (count - 1) | wflags)) {
385                         COUNT(td, -1);
386
387                         mycpu->gd_cnt.v_lock_name[0] = 'U';
388                         strncpy(mycpu->gd_cnt.v_lock_name + 1,
389                                 lkp->lk_wmesg,
390                                 sizeof(mycpu->gd_cnt.v_lock_name) - 2);
391                         ++mycpu->gd_cnt.v_lock_colls;
392
393                         error = tsleep(lkp, pflags | PINTERLOCKED,
394                                        lkp->lk_wmesg, timo);
395                         if (error)
396                                 break;
397                         if (extflags & LK_SLEEPFAIL) {
398                                 error = ENOLCK;
399                                 break;
400                         }
401
402                         /*
403                          * Refactor to either LK_EXCLUSIVE or LK_WAITUPGRADE,
404                          * depending on whether we were able to acquire the
405                          * LKC_UPREQ bit.
406                          */
407                         if (count & LKC_UPREQ)
408                                 flags = LK_EXCLUSIVE;   /* someone else */
409                         else
410                                 flags = LK_WAITUPGRADE; /* we own the bit */
411                 }
412                 goto again;
413
414         case LK_WAITUPGRADE:
415                 /*
416                  * We own the LKC_UPREQ bit, wait until we are granted the
417                  * exclusive lock (LKC_UPGRANT is set).
418                  *
419                  * IF THE OPERATION FAILS (tsleep error tsleep+LK_SLEEPFAIL),
420                  * we have to undo the upgrade request and clean up any lock
421                  * that might have been granted via a race.
422                  */
423                 if (count & LKC_UPGRANT) {
424                         if (atomic_cmpset_int(&lkp->lk_count, count,
425                                               count & ~LKC_UPGRANT)) {
426                                 lkp->lk_lockholder = td;
427                                 KKASSERT(count & LKC_EXCL);
428                                 break;
429                         }
430                         /* retry */
431                 } else {
432                         pflags = (extflags & LK_PCATCH) ? PCATCH : 0;
433                         timo = (extflags & LK_TIMELOCK) ? lkp->lk_timo : 0;
434                         tsleep_interlock(lkp, pflags);
435                         if (atomic_cmpset_int(&lkp->lk_count, count, count)) {
436
437                                 mycpu->gd_cnt.v_lock_name[0] = 'U';
438                                 strncpy(mycpu->gd_cnt.v_lock_name + 1,
439                                         lkp->lk_wmesg,
440                                         sizeof(mycpu->gd_cnt.v_lock_name) - 2);
441                                 ++mycpu->gd_cnt.v_lock_colls;
442
443                                 error = tsleep(lkp, pflags | PINTERLOCKED,
444                                                lkp->lk_wmesg, timo);
445                                 if (error) {
446                                         undo_upreq(lkp);
447                                         break;
448                                 }
449                                 if (extflags & LK_SLEEPFAIL) {
450                                         error = ENOLCK;
451                                         undo_upreq(lkp);
452                                         break;
453                                 }
454                         }
455                         /* retry */
456                 }
457                 goto again;
458
459         case LK_RELEASE:
460                 /*
461                  * Release the currently held lock.  If releasing the current
462                  * lock as part of an error return, error will ALREADY be
463                  * non-zero.
464                  *
465                  * When releasing the last lock we automatically transition
466                  * LKC_UPREQ to LKC_EXCL|1.
467                  *
468                  * WARNING! We cannot detect when there are multiple exclusive
469                  *          requests pending.  We clear EXREQ unconditionally
470                  *          on the 1->0 transition so it is possible for
471                  *          shared requests to race the next exclusive
472                  *          request.
473                  *
474                  * Always succeeds.
475                  */
476                 if ((count & LKC_MASK) == 0)
477                         panic("lockmgr: LK_RELEASE: no lock held");
478
479                 if (count & LKC_EXCL) {
480                         if (lkp->lk_lockholder != LK_KERNTHREAD &&
481                             lkp->lk_lockholder != td) {
482                                 panic("lockmgr: pid %d, not exlusive "
483                                       "lock holder thr %p/%p unlocking",
484                                     (td->td_proc ? td->td_proc->p_pid : -1),
485                                     td, lkp->lk_lockholder);
486                         }
487                         if ((count & (LKC_UPREQ|LKC_MASK)) == 1) {
488                                 /*
489                                  * Last exclusive count is being released
490                                  */
491                                 otd = lkp->lk_lockholder;
492                                 lkp->lk_lockholder = NULL;
493                                 if (!atomic_cmpset_int(&lkp->lk_count, count,
494                                               (count - 1) &
495                                            ~(LKC_EXCL|LKC_EXREQ|LKC_SHREQ))) {
496                                         lkp->lk_lockholder = otd;
497                                         goto again;
498                                 }
499                                 if (count & (LKC_EXREQ|LKC_SHREQ))
500                                         wakeup(lkp);
501                                 /* success */
502                         } else if ((count & (LKC_UPREQ|LKC_MASK)) ==
503                                    (LKC_UPREQ | 1)) {
504                                 /*
505                                  * Last exclusive count is being released but
506                                  * an upgrade request is present, automatically
507                                  * grant an exclusive state to the owner of
508                                  * the upgrade request.
509                                  */
510                                 otd = lkp->lk_lockholder;
511                                 lkp->lk_lockholder = NULL;
512                                 if (!atomic_cmpset_int(&lkp->lk_count, count,
513                                                 (count & ~LKC_UPREQ) |
514                                                 LKC_UPGRANT)) {
515                                         lkp->lk_lockholder = otd;
516                                 }
517                                 wakeup(lkp);
518                                 /* success */
519                         } else {
520                                 otd = lkp->lk_lockholder;
521                                 if (!atomic_cmpset_int(&lkp->lk_count, count,
522                                                        count - 1)) {
523                                         goto again;
524                                 }
525                                 /* success */
526                         }
527                         /* success */
528                         if (otd != LK_KERNTHREAD)
529                                 COUNT(td, -1);
530                 } else {
531                         if ((count & (LKC_UPREQ|LKC_MASK)) == 1) {
532                                 /*
533                                  * Last shared count is being released.
534                                  */
535                                 if (!atomic_cmpset_int(&lkp->lk_count, count,
536                                               (count - 1) &
537                                                ~(LKC_EXREQ|LKC_SHREQ))) {
538                                         goto again;
539                                 }
540                                 if (count & (LKC_EXREQ|LKC_SHREQ))
541                                         wakeup(lkp);
542                                 /* success */
543                         } else if ((count & (LKC_UPREQ|LKC_MASK)) ==
544                                    (LKC_UPREQ | 1)) {
545                                 /*
546                                  * Last shared count is being released but
547                                  * an upgrade request is present, automatically
548                                  * grant an exclusive state to the owner of
549                                  * the upgrade request.
550                                  */
551                                 if (!atomic_cmpset_int(&lkp->lk_count, count,
552                                               (count & ~LKC_UPREQ) |
553                                               LKC_EXCL | LKC_UPGRANT)) {
554                                         goto again;
555                                 }
556                                 wakeup(lkp);
557                         } else {
558                                 if (!atomic_cmpset_int(&lkp->lk_count, count,
559                                                        count - 1)) {
560                                         goto again;
561                                 }
562                         }
563                         /* success */
564                         COUNT(td, -1);
565                 }
566                 break;
567
568         default:
569                 panic("lockmgr: unknown locktype request %d",
570                     flags & LK_TYPE_MASK);
571                 /* NOTREACHED */
572         }
573         return (error);
574 }
575
576 /*
577  * Undo an upgrade request
578  */
579 static
580 void
581 undo_upreq(struct lock *lkp)
582 {
583         int count;
584
585         for (;;) {
586                 count = lkp->lk_count;
587                 cpu_ccfence();
588                 if (count & LKC_UPGRANT) {
589                         /*
590                          * UPREQ was shifted to UPGRANT.  We own UPGRANT now,
591                          * another thread might own UPREQ.  Clear UPGRANT
592                          * and release the granted lock.
593                          */
594                         if (atomic_cmpset_int(&lkp->lk_count, count,
595                                               count & ~LKC_UPGRANT)) {
596                                 lockmgr(lkp, LK_RELEASE);
597                                 break;
598                         }
599                 } else if (count & LKC_EXCL) {
600                         /*
601                          * Clear the UPREQ we still own.  Nobody to wakeup
602                          * here because there is an existing exclusive
603                          * holder.
604                          */
605                         KKASSERT(count & LKC_UPREQ);
606                         KKASSERT((count & LKC_MASK) > 0);
607                         if (atomic_cmpset_int(&lkp->lk_count, count,
608                                               count & ~LKC_UPREQ)) {
609                                 wakeup(lkp);
610                                 break;
611                         }
612                 } else if (count & LKC_EXREQ) {
613                         /*
614                          * Clear the UPREQ we still own.  We cannot wakeup any
615                          * shared waiters because there is an exclusive
616                          * request pending.
617                          */
618                         KKASSERT(count & LKC_UPREQ);
619                         KKASSERT((count & LKC_MASK) > 0);
620                         if (atomic_cmpset_int(&lkp->lk_count, count,
621                                               count & ~LKC_UPREQ)) {
622                                 break;
623                         }
624                 } else {
625                         /*
626                          * Clear the UPREQ we still own.  Wakeup any shared
627                          * waiters.
628                          */
629                         KKASSERT(count & LKC_UPREQ);
630                         KKASSERT((count & LKC_MASK) > 0);
631                         if (atomic_cmpset_int(&lkp->lk_count, count,
632                                               count &
633                                               ~(LKC_UPREQ | LKC_SHREQ))) {
634                                 if (count & LKC_SHREQ)
635                                         wakeup(lkp);
636                                 break;
637                         }
638                 }
639                 /* retry */
640         }
641 }
642
643 void
644 lockmgr_kernproc(struct lock *lp)
645 {
646         struct thread *td __debugvar = curthread;
647
648         if (lp->lk_lockholder != LK_KERNTHREAD) {
649                 KASSERT(lp->lk_lockholder == td,
650                     ("lockmgr_kernproc: lock not owned by curthread %p", td));
651                 lp->lk_lockholder = LK_KERNTHREAD;
652                 COUNT(td, -1);
653         }
654 }
655
656 /*
657  * Initialize a lock; required before use.
658  */
659 void
660 lockinit(struct lock *lkp, const char *wmesg, int timo, int flags)
661 {
662         lkp->lk_flags = (flags & LK_EXTFLG_MASK);
663         lkp->lk_count = 0;
664         lkp->lk_wmesg = wmesg;
665         lkp->lk_timo = timo;
666         lkp->lk_lockholder = LK_NOTHREAD;
667 }
668
669 /*
670  * Reinitialize a lock that is being reused for a different purpose, but
671  * which may have pending (blocked) threads sitting on it.  The caller
672  * must already hold the interlock.
673  */
674 void
675 lockreinit(struct lock *lkp, const char *wmesg, int timo, int flags)
676 {
677         lkp->lk_wmesg = wmesg;
678         lkp->lk_timo = timo;
679 }
680
681 /*
682  * De-initialize a lock.  The structure must no longer be used by anyone.
683  */
684 void
685 lockuninit(struct lock *lkp)
686 {
687         KKASSERT((lkp->lk_count & (LKC_EXREQ|LKC_SHREQ|LKC_UPREQ)) == 0);
688 }
689
690 /*
691  * Determine the status of a lock.
692  */
693 int
694 lockstatus(struct lock *lkp, struct thread *td)
695 {
696         int lock_type = 0;
697         int count;
698
699         count = lkp->lk_count;
700         cpu_ccfence();
701
702         if (count & LKC_EXCL) {
703                 if (td == NULL || lkp->lk_lockholder == td)
704                         lock_type = LK_EXCLUSIVE;
705                 else
706                         lock_type = LK_EXCLOTHER;
707         } else if (count & LKC_MASK) {
708                 lock_type = LK_SHARED;
709         }
710         return (lock_type);
711 }
712
713 /*
714  * Return non-zero if the caller owns the lock shared or exclusive.
715  * We can only guess re: shared locks.
716  */
717 int
718 lockowned(struct lock *lkp)
719 {
720         thread_t td = curthread;
721         int count;
722
723         count = lkp->lk_count;
724         cpu_ccfence();
725
726         if (count & LKC_EXCL)
727                 return(lkp->lk_lockholder == td);
728         else
729                 return((count & LKC_MASK) != 0);
730 }
731
732 /*
733  * Determine the number of holders of a lock.
734  *
735  * The non-blocking version can usually be used for assertions.
736  */
737 int
738 lockcount(struct lock *lkp)
739 {
740         return(lkp->lk_count & LKC_MASK);
741 }
742
743 int
744 lockcountnb(struct lock *lkp)
745 {
746         return(lkp->lk_count & LKC_MASK);
747 }
748
749 /*
750  * Print out information about state of a lock. Used by VOP_PRINT
751  * routines to display status about contained locks.
752  */
753 void
754 lockmgr_printinfo(struct lock *lkp)
755 {
756         struct thread *td = lkp->lk_lockholder;
757         struct proc *p;
758         int count;
759
760         count = lkp->lk_count;
761         cpu_ccfence();
762
763         if (td && td != LK_KERNTHREAD && td != LK_NOTHREAD)
764                 p = td->td_proc;
765         else
766                 p = NULL;
767
768         if (count & LKC_EXCL) {
769                 kprintf(" lock type %s: EXCLUS (count %08x) by td %p pid %d",
770                     lkp->lk_wmesg, count, td,
771                     p ? p->p_pid : -99);
772         } else if (count & LKC_MASK) {
773                 kprintf(" lock type %s: SHARED (count %08x)",
774                     lkp->lk_wmesg, count);
775         } else {
776                 kprintf(" lock type %s: NOTHELD", lkp->lk_wmesg);
777         }
778         if (count & (LKC_EXREQ|LKC_SHREQ))
779                 kprintf(" with waiters\n");
780         else
781                 kprintf("\n");
782 }
783
784 void
785 lock_sysinit(struct lock_args *arg)
786 {
787         lockinit(arg->la_lock, arg->la_desc, 0, arg->la_flags);
788 }