openssl: Adjust manual pages for 1.0.1l.
[dragonfly.git] / sys / kern / kern_lock.c
1 /* 
2  * Copyright (c) 1995
3  *      The Regents of the University of California.  All rights reserved.
4  * Copyright (C) 1997
5  *      John S. Dyson.  All rights reserved.
6  * Copyright (C) 2013-2014
7  *      Matthew Dillon, All rights reserved.
8  *
9  * This code contains ideas from software contributed to Berkeley by
10  * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
11  * System project at Carnegie-Mellon University.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  */
37
38 #include "opt_lint.h"
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/proc.h>
44 #include <sys/lock.h>
45 #include <sys/sysctl.h>
46 #include <sys/spinlock.h>
47 #include <sys/thread2.h>
48 #include <sys/spinlock2.h>
49
50 extern struct lock sysctllock;
51
52 static void undo_upreq(struct lock *lkp);
53
54 #ifdef DEBUG_CANCEL_LOCKS
55
56 static int sysctl_cancel_lock(SYSCTL_HANDLER_ARGS);
57 static int sysctl_cancel_test(SYSCTL_HANDLER_ARGS);
58
59 static struct lock cancel_lk;
60 LOCK_SYSINIT(cancellk, &cancel_lk, "cancel", 0);
61 SYSCTL_PROC(_kern, OID_AUTO, cancel_lock, CTLTYPE_INT|CTLFLAG_RW, 0, 0,
62             sysctl_cancel_lock, "I", "test cancelable locks");
63 SYSCTL_PROC(_kern, OID_AUTO, cancel_test, CTLTYPE_INT|CTLFLAG_RW, 0, 0,
64             sysctl_cancel_test, "I", "test cancelable locks");
65
66 #endif
67
68 /*
69  * Locking primitives implementation.
70  * Locks provide shared/exclusive sychronization.
71  */
72
73 #ifdef DEBUG_LOCKS
74 #define COUNT(td, x) (td)->td_locks += (x)
75 #else
76 #define COUNT(td, x)
77 #endif
78
79 #define LOCK_WAIT_TIME 100
80 #define LOCK_SAMPLE_WAIT 7
81
82 /*
83  * Set, change, or release a lock.
84  */
85 int
86 #ifndef DEBUG_LOCKS
87 lockmgr(struct lock *lkp, u_int flags)
88 #else
89 debuglockmgr(struct lock *lkp, u_int flags,
90              const char *name, const char *file, int line)
91 #endif
92 {
93         thread_t td;
94         thread_t otd;
95         int error;
96         int extflags;
97         int count;
98         int pflags;
99         int wflags;
100         int timo;
101 #ifdef DEBUG_LOCKS
102         int i;
103 #endif
104
105         error = 0;
106
107         if (mycpu->gd_intr_nesting_level &&
108             (flags & LK_NOWAIT) == 0 &&
109             (flags & LK_TYPE_MASK) != LK_RELEASE &&
110             panic_cpu_gd != mycpu
111         ) {
112
113 #ifndef DEBUG_LOCKS
114                 panic("lockmgr %s from %p: called from interrupt, ipi, "
115                       "or hard code section",
116                       lkp->lk_wmesg, ((int **)&lkp)[-1]);
117 #else
118                 panic("lockmgr %s from %s:%d: called from interrupt, ipi, "
119                       "or hard code section",
120                       lkp->lk_wmesg, file, line);
121 #endif
122         }
123
124 #ifdef DEBUG_LOCKS
125         if (mycpu->gd_spinlocks && ((flags & LK_NOWAIT) == 0)) {
126                 panic("lockmgr %s from %s:%d: called with %d spinlocks held",
127                       lkp->lk_wmesg, file, line, mycpu->gd_spinlocks);
128         }
129 #endif
130
131         extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
132         td = curthread;
133
134 again:
135         count = lkp->lk_count;
136         cpu_ccfence();
137
138         switch (flags & LK_TYPE_MASK) {
139         case LK_SHARED:
140                 /*
141                  * Shared lock critical path case
142                  */
143                 if ((count & (LKC_EXREQ|LKC_UPREQ|LKC_EXCL)) == 0) {
144                         if (atomic_cmpset_int(&lkp->lk_count,
145                                               count, count + 1)) {
146                                 COUNT(td, 1);
147                                 break;
148                         }
149                         goto again;
150                 }
151
152                 /*
153                  * If the caller already holds the lock exclusively then
154                  * we silently obtain another count on the exclusive lock.
155                  *
156                  * WARNING!  The old FreeBSD behavior was to downgrade,
157                  *           but this creates a problem when recursions
158                  *           return to the caller and the caller expects
159                  *           its original exclusive lock to remain exclusively
160                  *           locked.
161                  */
162                 if (lkp->lk_lockholder == td) {
163                         KKASSERT(count & LKC_EXCL);
164                         if ((extflags & LK_CANRECURSE) == 0) {
165                                 if (extflags & LK_NOWAIT) {
166                                         error = EBUSY;
167                                         break;
168                                 }
169                                 panic("lockmgr: locking against myself");
170                         }
171                         atomic_add_int(&lkp->lk_count, 1);
172                         COUNT(td, 1);
173                         break;
174                 }
175
176                 /*
177                  * Slow path
178                  */
179                 pflags = (extflags & LK_PCATCH) ? PCATCH : 0;
180                 timo = (extflags & LK_TIMELOCK) ? lkp->lk_timo : 0;
181                 wflags = (td->td_flags & TDF_DEADLKTREAT) ?
182                                 LKC_EXCL : (LKC_EXCL|LKC_EXREQ|LKC_UPREQ);
183
184                 /*
185                  * Block while the lock is held exclusively or, conditionally,
186                  * if other threads are tring to obtain an exclusive lock or
187                  * upgrade to one.
188                  */
189                 if (count & wflags) {
190                         if (extflags & LK_CANCELABLE) {
191                                 if (count & LKC_CANCEL) {
192                                         error = ENOLCK;
193                                         break;
194                                 }
195                         }
196                         if (extflags & LK_NOWAIT) {
197                                 error = EBUSY;
198                                 break;
199                         }
200                         tsleep_interlock(lkp, pflags);
201                         if (!atomic_cmpset_int(&lkp->lk_count, count,
202                                               count | LKC_SHREQ)) {
203                                 goto again;
204                         }
205
206                         mycpu->gd_cnt.v_lock_name[0] = 'S';
207                         strncpy(mycpu->gd_cnt.v_lock_name + 1,
208                                 lkp->lk_wmesg,
209                                 sizeof(mycpu->gd_cnt.v_lock_name) - 2);
210                         ++mycpu->gd_cnt.v_lock_colls;
211
212                         error = tsleep(lkp, pflags | PINTERLOCKED,
213                                        lkp->lk_wmesg, timo);
214                         if (error)
215                                 break;
216                         if (extflags & LK_SLEEPFAIL) {
217                                 error = ENOLCK;
218                                 break;
219                         }
220                         goto again;
221                 }
222
223                 /*
224                  * Otherwise we can bump the count
225                  */
226                 if (atomic_cmpset_int(&lkp->lk_count, count, count + 1)) {
227                         COUNT(td, 1);
228                         break;
229                 }
230                 goto again;
231
232         case LK_EXCLUSIVE:
233                 /*
234                  * Exclusive lock critical path.
235                  */
236                 if (count == 0) {
237                         if (atomic_cmpset_int(&lkp->lk_count, count,
238                                               LKC_EXCL | (count + 1))) {
239                                 lkp->lk_lockholder = td;
240                                 COUNT(td, 1);
241                                 break;
242                         }
243                         goto again;
244                 }
245
246                 /*
247                  * Recursive lock if we already hold it exclusively.
248                  */
249                 if (lkp->lk_lockholder == td) {
250                         KKASSERT(count & LKC_EXCL);
251                         if ((extflags & LK_CANRECURSE) == 0) {
252                                 if (extflags & LK_NOWAIT) {
253                                         error = EBUSY;
254                                         break;
255                                 }
256                                 panic("lockmgr: locking against myself");
257                         }
258                         atomic_add_int(&lkp->lk_count, 1);
259                         COUNT(td, 1);
260                         break;
261                 }
262
263                 /*
264                  * We will block, handle LK_NOWAIT
265                  */
266                 if (extflags & LK_NOWAIT) {
267                         error = EBUSY;
268                         break;
269                 }
270                 if (extflags & LK_CANCELABLE) {
271                         if (count & LKC_CANCEL) {
272                                 error = ENOLCK;
273                                 break;
274                         }
275                 }
276
277                 /*
278                  * Wait until we can obtain the exclusive lock.  EXREQ is
279                  * automatically cleared when all current holders release
280                  * so if we abort the operation we can safely leave it set.
281                  * There might be other exclusive requesters.
282                  */
283                 pflags = (extflags & LK_PCATCH) ? PCATCH : 0;
284                 timo = (extflags & LK_TIMELOCK) ? lkp->lk_timo : 0;
285
286                 tsleep_interlock(lkp, pflags);
287                 if (!atomic_cmpset_int(&lkp->lk_count, count,
288                                        count | LKC_EXREQ)) {
289                         goto again;
290                 }
291
292                 mycpu->gd_cnt.v_lock_name[0] = 'X';
293                 strncpy(mycpu->gd_cnt.v_lock_name + 1,
294                         lkp->lk_wmesg,
295                         sizeof(mycpu->gd_cnt.v_lock_name) - 2);
296                 ++mycpu->gd_cnt.v_lock_colls;
297
298                 error = tsleep(lkp, pflags | PINTERLOCKED,
299                                lkp->lk_wmesg, timo);
300                 if (error)
301                         break;
302                 if (extflags & LK_SLEEPFAIL) {
303                         error = ENOLCK;
304                         break;
305                 }
306                 goto again;
307
308         case LK_DOWNGRADE:
309                 /*
310                  * Downgrade an exclusive lock into a shared lock.  All
311                  * counts on a recursive exclusive lock become shared.
312                  *
313                  * This function always succeeds.
314                  */
315                 if (lkp->lk_lockholder != td ||
316                     (count & (LKC_EXCL|LKC_MASK)) != (LKC_EXCL|1)) {
317                         panic("lockmgr: not holding exclusive lock");
318                 }
319
320 #ifdef DEBUG_LOCKS
321                 for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
322                         if (td->td_lockmgr_stack[i] == lkp &&
323                             td->td_lockmgr_stack_id[i] > 0
324                         ) {
325                                 td->td_lockmgr_stack_id[i]--;
326                                 break;
327                         }
328                 }
329 #endif
330                 /*
331                  * NOTE! Must NULL-out lockholder before releasing LKC_EXCL.
332                  */
333                 otd = lkp->lk_lockholder;
334                 lkp->lk_lockholder = NULL;
335                 if (atomic_cmpset_int(&lkp->lk_count, count,
336                                       count & ~(LKC_EXCL|LKC_SHREQ))) {
337                         if (count & LKC_SHREQ)
338                                 wakeup(lkp);
339                         break;
340                 }
341                 lkp->lk_lockholder = otd;
342                 goto again;
343
344         case LK_EXCLUPGRADE:
345                 /*
346                  * Upgrade from a single shared lock to an exclusive lock.
347                  *
348                  * If another process is ahead of us to get an upgrade,
349                  * then we want to fail rather than have an intervening
350                  * exclusive access.  The shared lock is released on
351                  * failure.
352                  */
353                 if (count & LKC_UPREQ) {
354                         flags = LK_RELEASE;
355                         error = EBUSY;
356                         goto again;
357                 }
358                 /* fall through into normal upgrade */
359
360         case LK_UPGRADE:
361                 /*
362                  * Upgrade a shared lock to an exclusive one.  This can cause
363                  * the lock to be temporarily released and stolen by other
364                  * threads.  LK_SLEEPFAIL or LK_NOWAIT may be used to detect
365                  * this case, or use LK_EXCLUPGRADE.
366                  *
367                  * If the lock is already exclusively owned by us, this
368                  * operation is a NOP.
369                  *
370                  * If we return an error (even NOWAIT), the current lock will
371                  * be released.
372                  *
373                  * Start with the critical path.
374                  */
375                 if ((count & (LKC_UPREQ|LKC_EXCL|LKC_MASK)) == 1) {
376                         if (atomic_cmpset_int(&lkp->lk_count, count,
377                                               count | LKC_EXCL)) {
378                                 lkp->lk_lockholder = td;
379                                 break;
380                         }
381                         goto again;
382                 }
383
384                 /*
385                  * If we already hold the lock exclusively this operation
386                  * succeeds and is a NOP.
387                  */
388                 if (count & LKC_EXCL) {
389                         if (lkp->lk_lockholder == td)
390                                 break;
391                         panic("lockmgr: upgrade unowned lock");
392                 }
393                 if ((count & LKC_MASK) == 0)
394                         panic("lockmgr: upgrade unowned lock");
395
396                 /*
397                  * We cannot upgrade without blocking at this point.
398                  */
399                 if (extflags & LK_NOWAIT) {
400                         flags = LK_RELEASE;
401                         error = EBUSY;
402                         goto again;
403                 }
404                 if (extflags & LK_CANCELABLE) {
405                         if (count & LKC_CANCEL) {
406                                 error = ENOLCK;
407                                 break;
408                         }
409                 }
410
411                 /*
412                  * Release the shared lock and request the upgrade.
413                  */
414                 pflags = (extflags & LK_PCATCH) ? PCATCH : 0;
415                 timo = (extflags & LK_TIMELOCK) ? lkp->lk_timo : 0;
416                 tsleep_interlock(lkp, pflags);
417                 wflags = (count & LKC_UPREQ) ? LKC_EXREQ : LKC_UPREQ;
418
419                 /*
420                  * If someone else owns UPREQ and this transition would
421                  * allow it to be granted, we have to grant it.  Otherwise
422                  * we release the shared lock.
423                  */
424                 if ((count & (LKC_UPREQ|LKC_MASK)) == (LKC_UPREQ | 1)) {
425                         wflags |= LKC_EXCL | LKC_UPGRANT;
426                         wflags |= count;
427                         wflags &= ~LKC_UPREQ;
428                 } else {
429                         wflags |= (count - 1);
430                 }
431
432                 if (atomic_cmpset_int(&lkp->lk_count, count, wflags)) {
433                         COUNT(td, -1);
434
435                         /*
436                          * Must wakeup the thread granted the upgrade.
437                          */
438                         if ((count & (LKC_UPREQ|LKC_MASK)) == (LKC_UPREQ | 1))
439                                 wakeup(lkp);
440
441                         mycpu->gd_cnt.v_lock_name[0] = 'U';
442                         strncpy(mycpu->gd_cnt.v_lock_name + 1,
443                                 lkp->lk_wmesg,
444                                 sizeof(mycpu->gd_cnt.v_lock_name) - 2);
445                         ++mycpu->gd_cnt.v_lock_colls;
446
447                         error = tsleep(lkp, pflags | PINTERLOCKED,
448                                        lkp->lk_wmesg, timo);
449                         if (error)
450                                 break;
451                         if (extflags & LK_SLEEPFAIL) {
452                                 error = ENOLCK;
453                                 break;
454                         }
455
456                         /*
457                          * Refactor to either LK_EXCLUSIVE or LK_WAITUPGRADE,
458                          * depending on whether we were able to acquire the
459                          * LKC_UPREQ bit.
460                          */
461                         if (count & LKC_UPREQ)
462                                 flags = LK_EXCLUSIVE;   /* someone else */
463                         else
464                                 flags = LK_WAITUPGRADE; /* we own the bit */
465                 }
466                 goto again;
467
468         case LK_WAITUPGRADE:
469                 /*
470                  * We own the LKC_UPREQ bit, wait until we are granted the
471                  * exclusive lock (LKC_UPGRANT is set).
472                  *
473                  * IF THE OPERATION FAILS (tsleep error tsleep+LK_SLEEPFAIL),
474                  * we have to undo the upgrade request and clean up any lock
475                  * that might have been granted via a race.
476                  */
477                 if (count & LKC_UPGRANT) {
478                         if (atomic_cmpset_int(&lkp->lk_count, count,
479                                               count & ~LKC_UPGRANT)) {
480                                 lkp->lk_lockholder = td;
481                                 KKASSERT(count & LKC_EXCL);
482                                 break;
483                         }
484                         /* retry */
485                 } else if ((count & LKC_CANCEL) && (extflags & LK_CANCELABLE)) {
486                         undo_upreq(lkp);
487                         error = ENOLCK;
488                         break;
489                 } else {
490                         pflags = (extflags & LK_PCATCH) ? PCATCH : 0;
491                         timo = (extflags & LK_TIMELOCK) ? lkp->lk_timo : 0;
492                         tsleep_interlock(lkp, pflags);
493                         if (atomic_cmpset_int(&lkp->lk_count, count, count)) {
494
495                                 mycpu->gd_cnt.v_lock_name[0] = 'U';
496                                 strncpy(mycpu->gd_cnt.v_lock_name + 1,
497                                         lkp->lk_wmesg,
498                                         sizeof(mycpu->gd_cnt.v_lock_name) - 2);
499                                 ++mycpu->gd_cnt.v_lock_colls;
500
501                                 error = tsleep(lkp, pflags | PINTERLOCKED,
502                                                lkp->lk_wmesg, timo);
503                                 if (error) {
504                                         undo_upreq(lkp);
505                                         break;
506                                 }
507                                 if (extflags & LK_SLEEPFAIL) {
508                                         error = ENOLCK;
509                                         undo_upreq(lkp);
510                                         break;
511                                 }
512                         }
513                         /* retry */
514                 }
515                 goto again;
516
517         case LK_RELEASE:
518                 /*
519                  * Release the currently held lock.  If releasing the current
520                  * lock as part of an error return, error will ALREADY be
521                  * non-zero.
522                  *
523                  * When releasing the last lock we automatically transition
524                  * LKC_UPREQ to LKC_EXCL|1.
525                  *
526                  * WARNING! We cannot detect when there are multiple exclusive
527                  *          requests pending.  We clear EXREQ unconditionally
528                  *          on the 1->0 transition so it is possible for
529                  *          shared requests to race the next exclusive
530                  *          request.
531                  *
532                  * Always succeeds.
533                  */
534                 if ((count & LKC_MASK) == 0)
535                         panic("lockmgr: LK_RELEASE: no lock held");
536
537                 if (count & LKC_EXCL) {
538                         if (lkp->lk_lockholder != LK_KERNTHREAD &&
539                             lkp->lk_lockholder != td) {
540                                 panic("lockmgr: pid %d, not exlusive "
541                                       "lock holder thr %p/%p unlocking",
542                                     (td->td_proc ? td->td_proc->p_pid : -1),
543                                     td, lkp->lk_lockholder);
544                         }
545                         if ((count & (LKC_UPREQ|LKC_MASK)) == 1) {
546                                 /*
547                                  * Last exclusive count is being released
548                                  */
549                                 otd = lkp->lk_lockholder;
550                                 lkp->lk_lockholder = NULL;
551                                 if (!atomic_cmpset_int(&lkp->lk_count, count,
552                                               (count - 1) &
553                                            ~(LKC_EXCL | LKC_EXREQ |
554                                              LKC_SHREQ| LKC_CANCEL))) {
555                                         lkp->lk_lockholder = otd;
556                                         goto again;
557                                 }
558                                 if (count & (LKC_EXREQ|LKC_SHREQ))
559                                         wakeup(lkp);
560                                 /* success */
561                         } else if ((count & (LKC_UPREQ|LKC_MASK)) ==
562                                    (LKC_UPREQ | 1)) {
563                                 /*
564                                  * Last exclusive count is being released but
565                                  * an upgrade request is present, automatically
566                                  * grant an exclusive state to the owner of
567                                  * the upgrade request.
568                                  */
569                                 otd = lkp->lk_lockholder;
570                                 lkp->lk_lockholder = NULL;
571                                 if (!atomic_cmpset_int(&lkp->lk_count, count,
572                                                 (count & ~LKC_UPREQ) |
573                                                 LKC_UPGRANT)) {
574                                         lkp->lk_lockholder = otd;
575                                 }
576                                 wakeup(lkp);
577                                 /* success */
578                         } else {
579                                 otd = lkp->lk_lockholder;
580                                 if (!atomic_cmpset_int(&lkp->lk_count, count,
581                                                        count - 1)) {
582                                         goto again;
583                                 }
584                                 /* success */
585                         }
586                         /* success */
587                         if (otd != LK_KERNTHREAD)
588                                 COUNT(td, -1);
589                 } else {
590                         if ((count & (LKC_UPREQ|LKC_MASK)) == 1) {
591                                 /*
592                                  * Last shared count is being released.
593                                  */
594                                 if (!atomic_cmpset_int(&lkp->lk_count, count,
595                                               (count - 1) &
596                                                ~(LKC_EXREQ | LKC_SHREQ |
597                                                  LKC_CANCEL))) {
598                                         goto again;
599                                 }
600                                 if (count & (LKC_EXREQ|LKC_SHREQ))
601                                         wakeup(lkp);
602                                 /* success */
603                         } else if ((count & (LKC_UPREQ|LKC_MASK)) ==
604                                    (LKC_UPREQ | 1)) {
605                                 /*
606                                  * Last shared count is being released but
607                                  * an upgrade request is present, automatically
608                                  * grant an exclusive state to the owner of
609                                  * the upgrade request.  Masked count
610                                  * remains 1.
611                                  */
612                                 if (!atomic_cmpset_int(&lkp->lk_count, count,
613                                               (count & ~(LKC_UPREQ |
614                                                          LKC_CANCEL)) |
615                                               LKC_EXCL | LKC_UPGRANT)) {
616                                         goto again;
617                                 }
618                                 wakeup(lkp);
619                         } else {
620                                 if (!atomic_cmpset_int(&lkp->lk_count, count,
621                                                        count - 1)) {
622                                         goto again;
623                                 }
624                         }
625                         /* success */
626                         COUNT(td, -1);
627                 }
628                 break;
629
630         case LK_CANCEL_BEG:
631                 /*
632                  * Start canceling blocked requestors or later requestors.
633                  * requestors must use CANCELABLE.  Don't waste time issuing
634                  * a wakeup if nobody is pending.
635                  */
636                 KKASSERT((count & LKC_CANCEL) == 0);    /* disallowed case */
637                 KKASSERT((count & LKC_MASK) != 0);      /* issue w/lock held */
638                 if (!atomic_cmpset_int(&lkp->lk_count,
639                                        count, count | LKC_CANCEL)) {
640                         goto again;
641                 }
642                 if (count & (LKC_EXREQ|LKC_SHREQ|LKC_UPREQ)) {
643                         wakeup(lkp);
644                 }
645                 break;
646
647         case LK_CANCEL_END:
648                 atomic_clear_int(&lkp->lk_count, LKC_CANCEL);
649                 break;
650
651         default:
652                 panic("lockmgr: unknown locktype request %d",
653                     flags & LK_TYPE_MASK);
654                 /* NOTREACHED */
655         }
656         return (error);
657 }
658
659 /*
660  * Undo an upgrade request
661  */
662 static
663 void
664 undo_upreq(struct lock *lkp)
665 {
666         int count;
667
668         for (;;) {
669                 count = lkp->lk_count;
670                 cpu_ccfence();
671                 if (count & LKC_UPGRANT) {
672                         /*
673                          * UPREQ was shifted to UPGRANT.  We own UPGRANT now,
674                          * another thread might own UPREQ.  Clear UPGRANT
675                          * and release the granted lock.
676                          */
677                         if (atomic_cmpset_int(&lkp->lk_count, count,
678                                               count & ~LKC_UPGRANT)) {
679                                 lockmgr(lkp, LK_RELEASE);
680                                 break;
681                         }
682                 } else if (count & LKC_EXCL) {
683                         /*
684                          * Clear the UPREQ we still own.  Nobody to wakeup
685                          * here because there is an existing exclusive
686                          * holder.
687                          */
688                         KKASSERT(count & LKC_UPREQ);
689                         KKASSERT((count & LKC_MASK) > 0);
690                         if (atomic_cmpset_int(&lkp->lk_count, count,
691                                               count & ~LKC_UPREQ)) {
692                                 wakeup(lkp);
693                                 break;
694                         }
695                 } else if (count & LKC_EXREQ) {
696                         /*
697                          * Clear the UPREQ we still own.  We cannot wakeup any
698                          * shared waiters because there is an exclusive
699                          * request pending.
700                          */
701                         KKASSERT(count & LKC_UPREQ);
702                         KKASSERT((count & LKC_MASK) > 0);
703                         if (atomic_cmpset_int(&lkp->lk_count, count,
704                                               count & ~LKC_UPREQ)) {
705                                 break;
706                         }
707                 } else {
708                         /*
709                          * Clear the UPREQ we still own.  Wakeup any shared
710                          * waiters.
711                          */
712                         KKASSERT(count & LKC_UPREQ);
713                         KKASSERT((count & LKC_MASK) > 0);
714                         if (atomic_cmpset_int(&lkp->lk_count, count,
715                                               count &
716                                               ~(LKC_UPREQ | LKC_SHREQ))) {
717                                 if (count & LKC_SHREQ)
718                                         wakeup(lkp);
719                                 break;
720                         }
721                 }
722                 /* retry */
723         }
724 }
725
726 void
727 lockmgr_kernproc(struct lock *lp)
728 {
729         struct thread *td __debugvar = curthread;
730
731         if (lp->lk_lockholder != LK_KERNTHREAD) {
732                 KASSERT(lp->lk_lockholder == td,
733                     ("lockmgr_kernproc: lock not owned by curthread %p", td));
734                 lp->lk_lockholder = LK_KERNTHREAD;
735                 COUNT(td, -1);
736         }
737 }
738
739 /*
740  * Initialize a lock; required before use.
741  */
742 void
743 lockinit(struct lock *lkp, const char *wmesg, int timo, int flags)
744 {
745         lkp->lk_flags = (flags & LK_EXTFLG_MASK);
746         lkp->lk_count = 0;
747         lkp->lk_wmesg = wmesg;
748         lkp->lk_timo = timo;
749         lkp->lk_lockholder = LK_NOTHREAD;
750 }
751
752 /*
753  * Reinitialize a lock that is being reused for a different purpose, but
754  * which may have pending (blocked) threads sitting on it.  The caller
755  * must already hold the interlock.
756  */
757 void
758 lockreinit(struct lock *lkp, const char *wmesg, int timo, int flags)
759 {
760         lkp->lk_wmesg = wmesg;
761         lkp->lk_timo = timo;
762 }
763
764 /*
765  * De-initialize a lock.  The structure must no longer be used by anyone.
766  */
767 void
768 lockuninit(struct lock *lkp)
769 {
770         KKASSERT((lkp->lk_count & (LKC_EXREQ|LKC_SHREQ|LKC_UPREQ)) == 0);
771 }
772
773 /*
774  * Determine the status of a lock.
775  */
776 int
777 lockstatus(struct lock *lkp, struct thread *td)
778 {
779         int lock_type = 0;
780         int count;
781
782         count = lkp->lk_count;
783         cpu_ccfence();
784
785         if (count & LKC_EXCL) {
786                 if (td == NULL || lkp->lk_lockholder == td)
787                         lock_type = LK_EXCLUSIVE;
788                 else
789                         lock_type = LK_EXCLOTHER;
790         } else if (count & LKC_MASK) {
791                 lock_type = LK_SHARED;
792         }
793         return (lock_type);
794 }
795
796 /*
797  * Return non-zero if the caller owns the lock shared or exclusive.
798  * We can only guess re: shared locks.
799  */
800 int
801 lockowned(struct lock *lkp)
802 {
803         thread_t td = curthread;
804         int count;
805
806         count = lkp->lk_count;
807         cpu_ccfence();
808
809         if (count & LKC_EXCL)
810                 return(lkp->lk_lockholder == td);
811         else
812                 return((count & LKC_MASK) != 0);
813 }
814
815 /*
816  * Determine the number of holders of a lock.
817  *
818  * The non-blocking version can usually be used for assertions.
819  */
820 int
821 lockcount(struct lock *lkp)
822 {
823         return(lkp->lk_count & LKC_MASK);
824 }
825
826 int
827 lockcountnb(struct lock *lkp)
828 {
829         return(lkp->lk_count & LKC_MASK);
830 }
831
832 /*
833  * Print out information about state of a lock. Used by VOP_PRINT
834  * routines to display status about contained locks.
835  */
836 void
837 lockmgr_printinfo(struct lock *lkp)
838 {
839         struct thread *td = lkp->lk_lockholder;
840         struct proc *p;
841         int count;
842
843         count = lkp->lk_count;
844         cpu_ccfence();
845
846         if (td && td != LK_KERNTHREAD && td != LK_NOTHREAD)
847                 p = td->td_proc;
848         else
849                 p = NULL;
850
851         if (count & LKC_EXCL) {
852                 kprintf(" lock type %s: EXCLUS (count %08x) by td %p pid %d",
853                     lkp->lk_wmesg, count, td,
854                     p ? p->p_pid : -99);
855         } else if (count & LKC_MASK) {
856                 kprintf(" lock type %s: SHARED (count %08x)",
857                     lkp->lk_wmesg, count);
858         } else {
859                 kprintf(" lock type %s: NOTHELD", lkp->lk_wmesg);
860         }
861         if (count & (LKC_EXREQ|LKC_SHREQ))
862                 kprintf(" with waiters\n");
863         else
864                 kprintf("\n");
865 }
866
867 void
868 lock_sysinit(struct lock_args *arg)
869 {
870         lockinit(arg->la_lock, arg->la_desc, 0, arg->la_flags);
871 }
872
873 #ifdef DEBUG_CANCEL_LOCKS
874
875 static
876 int
877 sysctl_cancel_lock(SYSCTL_HANDLER_ARGS)
878 {
879         int error;
880
881         if (req->newptr) {
882                 lockmgr(&sysctllock, LK_RELEASE);
883                 lockmgr(&cancel_lk, LK_EXCLUSIVE);
884                 kprintf("x");
885                 error = tsleep(&error, PCATCH, "canmas", hz * 5);
886                 lockmgr(&cancel_lk, LK_CANCEL_BEG);
887                 kprintf("y");
888                 error = tsleep(&error, PCATCH, "canmas", hz * 5);
889                 kprintf("z");
890                 lockmgr(&cancel_lk, LK_RELEASE);
891                 lockmgr(&sysctllock, LK_EXCLUSIVE);
892                 SYSCTL_OUT(req, &error, sizeof(error));
893         }
894         error = 0;
895
896         return error;
897 }
898
899 static
900 int
901 sysctl_cancel_test(SYSCTL_HANDLER_ARGS)
902 {
903         int error;
904
905         if (req->newptr) {
906                 error = lockmgr(&cancel_lk, LK_EXCLUSIVE|LK_CANCELABLE);
907                 if (error == 0)
908                         lockmgr(&cancel_lk, LK_RELEASE);
909                 SYSCTL_OUT(req, &error, sizeof(error));
910                 kprintf("test %d\n", error);
911         }
912
913         return 0;
914 }
915
916 #endif