2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. Berkeley Software Design Inc's name may not be used to endorse or
13 * promote products derived from this software without specific prior
16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
33 * Machine independent bits of mutex implementation.
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
39 #include "opt_adaptive_mutexes.h"
41 #include "opt_hwpmc_hooks.h"
42 #include "opt_sched.h"
44 #include <sys/param.h>
45 #include <sys/systm.h>
49 #include <sys/kernel.h>
52 #include <sys/malloc.h>
53 #include <sys/mutex.h>
55 #include <sys/resourcevar.h>
56 #include <sys/sched.h>
59 #include <sys/sysctl.h>
60 #include <sys/turnstile.h>
61 #include <sys/vmmeter.h>
62 #include <sys/lock_profile.h>
64 #include <machine/atomic.h>
65 #include <machine/bus.h>
66 #include <machine/cpu.h>
70 #include <fs/devfs/devfs_int.h>
73 #include <vm/vm_extern.h>
75 #if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES)
76 #define ADAPTIVE_MUTEXES
80 #include <sys/pmckern.h>
81 PMC_SOFT_DEFINE( , , lock, failed);
85 * Return the mutex address when the lock cookie address is provided.
86 * This functionality assumes that struct mtx* have a member named mtx_lock.
88 #define mtxlock2mtx(c) (__containerof(c, struct mtx, mtx_lock))
91 * Internal utility macros.
93 #define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED)
95 #define mtx_destroyed(m) ((m)->mtx_lock == MTX_DESTROYED)
97 #define mtx_owner(m) ((struct thread *)((m)->mtx_lock & ~MTX_FLAGMASK))
99 static void assert_mtx(const struct lock_object *lock, int what);
101 static void db_show_mtx(const struct lock_object *lock);
103 static void lock_mtx(struct lock_object *lock, uintptr_t how);
104 static void lock_spin(struct lock_object *lock, uintptr_t how);
106 static int owner_mtx(const struct lock_object *lock,
107 struct thread **owner);
109 static uintptr_t unlock_mtx(struct lock_object *lock);
110 static uintptr_t unlock_spin(struct lock_object *lock);
113 * Lock classes for sleep and spin mutexes.
115 struct lock_class lock_class_mtx_sleep = {
116 .lc_name = "sleep mutex",
117 .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE,
118 .lc_assert = assert_mtx,
120 .lc_ddb_show = db_show_mtx,
123 .lc_unlock = unlock_mtx,
125 .lc_owner = owner_mtx,
128 struct lock_class lock_class_mtx_spin = {
129 .lc_name = "spin mutex",
130 .lc_flags = LC_SPINLOCK | LC_RECURSABLE,
131 .lc_assert = assert_mtx,
133 .lc_ddb_show = db_show_mtx,
135 .lc_lock = lock_spin,
136 .lc_unlock = unlock_spin,
138 .lc_owner = owner_mtx,
142 #ifdef ADAPTIVE_MUTEXES
143 static SYSCTL_NODE(_debug, OID_AUTO, mtx, CTLFLAG_RD, NULL, "mtx debugging");
145 static struct lock_delay_config mtx_delay = {
152 SYSCTL_INT(_debug_mtx, OID_AUTO, delay_initial, CTLFLAG_RW, &mtx_delay.initial,
154 SYSCTL_INT(_debug_mtx, OID_AUTO, delay_step, CTLFLAG_RW, &mtx_delay.step,
156 SYSCTL_INT(_debug_mtx, OID_AUTO, delay_min, CTLFLAG_RW, &mtx_delay.min,
158 SYSCTL_INT(_debug_mtx, OID_AUTO, delay_max, CTLFLAG_RW, &mtx_delay.max,
162 mtx_delay_sysinit(void *dummy)
165 mtx_delay.initial = mp_ncpus * 25;
166 mtx_delay.step = (mp_ncpus * 25) / 2;
167 mtx_delay.min = mp_ncpus * 5;
168 mtx_delay.max = mp_ncpus * 25 * 10;
170 LOCK_DELAY_SYSINIT(mtx_delay_sysinit);
173 static SYSCTL_NODE(_debug, OID_AUTO, mtx_spin, CTLFLAG_RD, NULL,
174 "mtx spin debugging");
176 static struct lock_delay_config mtx_spin_delay = {
183 SYSCTL_INT(_debug_mtx_spin, OID_AUTO, delay_initial, CTLFLAG_RW,
184 &mtx_spin_delay.initial, 0, "");
185 SYSCTL_INT(_debug_mtx_spin, OID_AUTO, delay_step, CTLFLAG_RW, &mtx_spin_delay.step,
187 SYSCTL_INT(_debug_mtx_spin, OID_AUTO, delay_min, CTLFLAG_RW, &mtx_spin_delay.min,
189 SYSCTL_INT(_debug_mtx_spin, OID_AUTO, delay_max, CTLFLAG_RW, &mtx_spin_delay.max,
193 mtx_spin_delay_sysinit(void *dummy)
196 mtx_spin_delay.initial = mp_ncpus * 25;
197 mtx_spin_delay.step = (mp_ncpus * 25) / 2;
198 mtx_spin_delay.min = mp_ncpus * 5;
199 mtx_spin_delay.max = mp_ncpus * 25 * 10;
201 LOCK_DELAY_SYSINIT(mtx_spin_delay_sysinit);
204 * System-wide mutexes
206 struct mtx blocked_lock;
210 assert_mtx(const struct lock_object *lock, int what)
213 mtx_assert((const struct mtx *)lock, what);
217 lock_mtx(struct lock_object *lock, uintptr_t how)
220 mtx_lock((struct mtx *)lock);
224 lock_spin(struct lock_object *lock, uintptr_t how)
227 panic("spin locks can only use msleep_spin");
231 unlock_mtx(struct lock_object *lock)
235 m = (struct mtx *)lock;
236 mtx_assert(m, MA_OWNED | MA_NOTRECURSED);
242 unlock_spin(struct lock_object *lock)
245 panic("spin locks can only use msleep_spin");
250 owner_mtx(const struct lock_object *lock, struct thread **owner)
255 m = (const struct mtx *)lock;
257 *owner = (struct thread *)(x & ~MTX_FLAGMASK);
258 return (x != MTX_UNOWNED);
263 * Function versions of the inlined __mtx_* macros. These are used by
264 * modules and can also be called from assembly language if needed.
267 __mtx_lock_flags(volatile uintptr_t *c, int opts, const char *file, int line)
271 if (SCHEDULER_STOPPED())
276 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
277 ("mtx_lock() by idle thread %p on sleep mutex %s @ %s:%d",
278 curthread, m->lock_object.lo_name, file, line));
279 KASSERT(m->mtx_lock != MTX_DESTROYED,
280 ("mtx_lock() of destroyed mutex @ %s:%d", file, line));
281 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
282 ("mtx_lock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
284 WITNESS_CHECKORDER(&m->lock_object, (opts & ~MTX_RECURSE) |
285 LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);
287 __mtx_lock(m, curthread, opts, file, line);
288 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
290 WITNESS_LOCK(&m->lock_object, (opts & ~MTX_RECURSE) | LOP_EXCLUSIVE,
292 TD_LOCKS_INC(curthread);
296 __mtx_unlock_flags(volatile uintptr_t *c, int opts, const char *file, int line)
300 if (SCHEDULER_STOPPED())
305 KASSERT(m->mtx_lock != MTX_DESTROYED,
306 ("mtx_unlock() of destroyed mutex @ %s:%d", file, line));
307 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
308 ("mtx_unlock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
310 WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
311 LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
313 mtx_assert(m, MA_OWNED);
315 __mtx_unlock(m, curthread, opts, file, line);
316 TD_LOCKS_DEC(curthread);
320 __mtx_lock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
325 if (SCHEDULER_STOPPED())
330 KASSERT(m->mtx_lock != MTX_DESTROYED,
331 ("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line));
332 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
333 ("mtx_lock_spin() of sleep mutex %s @ %s:%d",
334 m->lock_object.lo_name, file, line));
336 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
337 (opts & MTX_RECURSE) != 0,
338 ("mtx_lock_spin: recursed on non-recursive mutex %s @ %s:%d\n",
339 m->lock_object.lo_name, file, line));
340 opts &= ~MTX_RECURSE;
341 WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
343 __mtx_lock_spin(m, curthread, opts, file, line);
344 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
346 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
350 __mtx_trylock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
355 if (SCHEDULER_STOPPED())
360 KASSERT(m->mtx_lock != MTX_DESTROYED,
361 ("mtx_trylock_spin() of destroyed mutex @ %s:%d", file, line));
362 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
363 ("mtx_trylock_spin() of sleep mutex %s @ %s:%d",
364 m->lock_object.lo_name, file, line));
365 KASSERT((opts & MTX_RECURSE) == 0,
366 ("mtx_trylock_spin: unsupp. opt MTX_RECURSE on mutex %s @ %s:%d\n",
367 m->lock_object.lo_name, file, line));
368 if (__mtx_trylock_spin(m, curthread, opts, file, line)) {
369 LOCK_LOG_TRY("LOCK", &m->lock_object, opts, 1, file, line);
370 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
373 LOCK_LOG_TRY("LOCK", &m->lock_object, opts, 0, file, line);
378 __mtx_unlock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
383 if (SCHEDULER_STOPPED())
388 KASSERT(m->mtx_lock != MTX_DESTROYED,
389 ("mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line));
390 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
391 ("mtx_unlock_spin() of sleep mutex %s @ %s:%d",
392 m->lock_object.lo_name, file, line));
393 WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
394 LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
396 mtx_assert(m, MA_OWNED);
398 __mtx_unlock_spin(m);
402 * The important part of mtx_trylock{,_flags}()
403 * Tries to acquire lock `m.' If this function is called on a mutex that
404 * is already owned, it will recursively acquire the lock.
407 _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line)
410 #ifdef LOCK_PROFILING
411 uint64_t waittime = 0;
416 if (SCHEDULER_STOPPED())
421 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
422 ("mtx_trylock() by idle thread %p on sleep mutex %s @ %s:%d",
423 curthread, m->lock_object.lo_name, file, line));
424 KASSERT(m->mtx_lock != MTX_DESTROYED,
425 ("mtx_trylock() of destroyed mutex @ %s:%d", file, line));
426 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
427 ("mtx_trylock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
430 if (mtx_owned(m) && ((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
431 (opts & MTX_RECURSE) != 0)) {
433 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
436 rval = _mtx_obtain_lock(m, (uintptr_t)curthread);
437 opts &= ~MTX_RECURSE;
439 LOCK_LOG_TRY("LOCK", &m->lock_object, opts, rval, file, line);
441 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
443 TD_LOCKS_INC(curthread);
444 if (m->mtx_recurse == 0)
445 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire,
446 m, contested, waittime, file, line);
454 * __mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
456 * We call this if the lock is either contested (i.e. we need to go to
457 * sleep waiting for it), or if we need to recurse on it.
460 __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t tid, int opts,
461 const char *file, int line)
464 struct turnstile *ts;
466 #ifdef ADAPTIVE_MUTEXES
467 volatile struct thread *owner;
472 #ifdef LOCK_PROFILING
474 uint64_t waittime = 0;
476 #if defined(ADAPTIVE_MUTEXES) || defined(KDTRACE_HOOKS)
477 struct lock_delay_arg lda;
481 int64_t sleep_time = 0;
482 int64_t all_time = 0;
485 if (SCHEDULER_STOPPED())
488 #if defined(ADAPTIVE_MUTEXES)
489 lock_delay_arg_init(&lda, &mtx_delay);
490 #elif defined(KDTRACE_HOOKS)
491 lock_delay_arg_init(&lda, NULL);
496 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
497 (opts & MTX_RECURSE) != 0,
498 ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n",
499 m->lock_object.lo_name, file, line));
500 opts &= ~MTX_RECURSE;
502 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
503 if (LOCK_LOG_TEST(&m->lock_object, opts))
504 CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m);
507 opts &= ~MTX_RECURSE;
510 PMC_SOFT_CALL( , , lock, failed);
512 lock_profile_obtain_lock_failed(&m->lock_object,
513 &contested, &waittime);
514 if (LOCK_LOG_TEST(&m->lock_object, opts))
516 "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
517 m->lock_object.lo_name, (void *)m->mtx_lock, file, line);
519 all_time -= lockstat_nsecs(&m->lock_object);
523 if (m->mtx_lock == MTX_UNOWNED && _mtx_obtain_lock(m, tid))
528 #ifdef ADAPTIVE_MUTEXES
530 * If the owner is running on another CPU, spin until the
531 * owner stops running or the state of the lock changes.
534 if (v != MTX_UNOWNED) {
535 owner = (struct thread *)(v & ~MTX_FLAGMASK);
536 if (TD_IS_RUNNING(owner)) {
537 if (LOCK_LOG_TEST(&m->lock_object, 0))
539 "%s: spinning on %p held by %p",
541 KTR_STATE1(KTR_SCHED, "thread",
542 sched_tdname((struct thread *)tid),
543 "spinning", "lockname:\"%s\"",
544 m->lock_object.lo_name);
545 while (mtx_owner(m) == owner &&
546 TD_IS_RUNNING(owner))
548 KTR_STATE0(KTR_SCHED, "thread",
549 sched_tdname((struct thread *)tid),
556 ts = turnstile_trywait(&m->lock_object);
560 * Check if the lock has been released while spinning for
561 * the turnstile chain lock.
563 if (v == MTX_UNOWNED) {
564 turnstile_cancel(ts);
568 #ifdef ADAPTIVE_MUTEXES
570 * The current lock owner might have started executing
571 * on another CPU (or the lock could have changed
572 * owners) while we were waiting on the turnstile
573 * chain lock. If so, drop the turnstile lock and try
576 owner = (struct thread *)(v & ~MTX_FLAGMASK);
577 if (TD_IS_RUNNING(owner)) {
578 turnstile_cancel(ts);
584 * If the mutex isn't already contested and a failure occurs
585 * setting the contested bit, the mutex was either released
586 * or the state of the MTX_RECURSED bit changed.
588 if ((v & MTX_CONTESTED) == 0 &&
589 !atomic_cmpset_ptr(&m->mtx_lock, v, v | MTX_CONTESTED)) {
590 turnstile_cancel(ts);
595 * We definitely must sleep for this lock.
597 mtx_assert(m, MA_NOTOWNED);
602 "contention: %p at %s:%d wants %s, taken by %s:%d",
603 (void *)tid, file, line, m->lock_object.lo_name,
604 WITNESS_FILE(&m->lock_object),
605 WITNESS_LINE(&m->lock_object));
611 * Block on the turnstile.
614 sleep_time -= lockstat_nsecs(&m->lock_object);
616 turnstile_wait(ts, mtx_owner(m), TS_EXCLUSIVE_QUEUE);
618 sleep_time += lockstat_nsecs(&m->lock_object);
623 all_time += lockstat_nsecs(&m->lock_object);
628 "contention end: %s acquired by %p at %s:%d",
629 m->lock_object.lo_name, (void *)tid, file, line);
632 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire, m, contested,
633 waittime, file, line);
636 LOCKSTAT_RECORD1(adaptive__block, m, sleep_time);
639 * Only record the loops spinning and not sleeping.
641 if (lda.spin_cnt > sleep_cnt)
642 LOCKSTAT_RECORD1(adaptive__spin, m, all_time - sleep_time);
647 _mtx_lock_spin_failed(struct mtx *m)
653 /* If the mutex is unlocked, try again. */
657 printf( "spin lock %p (%s) held by %p (tid %d) too long\n",
658 m, m->lock_object.lo_name, td, td->td_tid);
660 witness_display_spinlock(&m->lock_object, td, printf);
662 panic("spin lock held too long");
667 * _mtx_lock_spin_cookie: the tougher part of acquiring an MTX_SPIN lock.
669 * This is only called if we need to actually spin for the lock. Recursion
673 _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t tid, int opts,
674 const char *file, int line)
677 struct lock_delay_arg lda;
678 #ifdef LOCK_PROFILING
680 uint64_t waittime = 0;
683 int64_t spin_time = 0;
686 if (SCHEDULER_STOPPED())
689 lock_delay_arg_init(&lda, &mtx_spin_delay);
692 if (LOCK_LOG_TEST(&m->lock_object, opts))
693 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
694 KTR_STATE1(KTR_SCHED, "thread", sched_tdname((struct thread *)tid),
695 "spinning", "lockname:\"%s\"", m->lock_object.lo_name);
698 PMC_SOFT_CALL( , , lock, failed);
700 lock_profile_obtain_lock_failed(&m->lock_object, &contested, &waittime);
702 spin_time -= lockstat_nsecs(&m->lock_object);
705 if (m->mtx_lock == MTX_UNOWNED && _mtx_obtain_lock(m, tid))
707 /* Give interrupts a chance while we spin. */
709 while (m->mtx_lock != MTX_UNOWNED) {
710 if (lda.spin_cnt < 10000000) {
715 if (lda.spin_cnt < 60000000 || kdb_active ||
719 _mtx_lock_spin_failed(m);
725 spin_time += lockstat_nsecs(&m->lock_object);
728 if (LOCK_LOG_TEST(&m->lock_object, opts))
729 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
730 KTR_STATE0(KTR_SCHED, "thread", sched_tdname((struct thread *)tid),
734 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, m,
735 contested, waittime, file, line);
737 LOCKSTAT_RECORD1(spin__spin, m, spin_time);
743 thread_lock_flags_(struct thread *td, int opts, const char *file, int line)
747 struct lock_delay_arg lda;
748 #ifdef LOCK_PROFILING
750 uint64_t waittime = 0;
753 int64_t spin_time = 0;
756 tid = (uintptr_t)curthread;
758 if (SCHEDULER_STOPPED()) {
760 * Ensure that spinlock sections are balanced even when the
761 * scheduler is stopped, since we may otherwise inadvertently
762 * re-enable interrupts while dumping core.
768 lock_delay_arg_init(&lda, &mtx_spin_delay);
771 spin_time -= lockstat_nsecs(&td->td_lock->lock_object);
777 KASSERT(m->mtx_lock != MTX_DESTROYED,
778 ("thread_lock() of destroyed mutex @ %s:%d", file, line));
779 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
780 ("thread_lock() of sleep mutex %s @ %s:%d",
781 m->lock_object.lo_name, file, line));
783 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0,
784 ("thread_lock: recursed on non-recursive mutex %s @ %s:%d\n",
785 m->lock_object.lo_name, file, line));
786 WITNESS_CHECKORDER(&m->lock_object,
787 opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);
789 if (m->mtx_lock == MTX_UNOWNED && _mtx_obtain_lock(m, tid))
791 if (m->mtx_lock == tid) {
796 PMC_SOFT_CALL( , , lock, failed);
798 lock_profile_obtain_lock_failed(&m->lock_object,
799 &contested, &waittime);
800 /* Give interrupts a chance while we spin. */
802 while (m->mtx_lock != MTX_UNOWNED) {
803 if (lda.spin_cnt < 10000000) {
807 if (lda.spin_cnt < 60000000 ||
808 kdb_active || panicstr != NULL)
811 _mtx_lock_spin_failed(m);
814 if (m != td->td_lock)
819 if (m == td->td_lock)
821 __mtx_unlock_spin(m); /* does spinlock_exit() */
824 spin_time += lockstat_nsecs(&m->lock_object);
826 if (m->mtx_recurse == 0)
827 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, m,
828 contested, waittime, file, line);
829 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
831 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
834 LOCKSTAT_RECORD1(thread__spin, m, spin_time);
839 thread_lock_block(struct thread *td)
843 THREAD_LOCK_ASSERT(td, MA_OWNED);
845 td->td_lock = &blocked_lock;
846 mtx_unlock_spin(lock);
852 thread_lock_unblock(struct thread *td, struct mtx *new)
854 mtx_assert(new, MA_OWNED);
855 MPASS(td->td_lock == &blocked_lock);
856 atomic_store_rel_ptr((volatile void *)&td->td_lock, (uintptr_t)new);
860 thread_lock_set(struct thread *td, struct mtx *new)
864 mtx_assert(new, MA_OWNED);
865 THREAD_LOCK_ASSERT(td, MA_OWNED);
868 mtx_unlock_spin(lock);
872 * __mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
874 * We are only called here if the lock is recursed or contested (i.e. we
875 * need to wake up a blocked thread).
878 __mtx_unlock_sleep(volatile uintptr_t *c, int opts, const char *file, int line)
881 struct turnstile *ts;
883 if (SCHEDULER_STOPPED())
888 if (mtx_recursed(m)) {
889 if (--(m->mtx_recurse) == 0)
890 atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
891 if (LOCK_LOG_TEST(&m->lock_object, opts))
892 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m);
897 * We have to lock the chain before the turnstile so this turnstile
898 * can be removed from the hash list if it is empty.
900 turnstile_chain_lock(&m->lock_object);
901 ts = turnstile_lookup(&m->lock_object);
902 if (LOCK_LOG_TEST(&m->lock_object, opts))
903 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
905 turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE);
906 _mtx_release_lock_quick(m);
909 * This turnstile is now no longer associated with the mutex. We can
910 * unlock the chain lock so a new turnstile may take it's place.
912 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
913 turnstile_chain_unlock(&m->lock_object);
917 * All the unlocking of MTX_SPIN locks is done inline.
918 * See the __mtx_unlock_spin() macro for the details.
922 * The backing function for the INVARIANTS-enabled mtx_assert()
924 #ifdef INVARIANT_SUPPORT
926 __mtx_assert(const volatile uintptr_t *c, int what, const char *file, int line)
930 if (panicstr != NULL || dumping || SCHEDULER_STOPPED())
937 case MA_OWNED | MA_RECURSED:
938 case MA_OWNED | MA_NOTRECURSED:
940 panic("mutex %s not owned at %s:%d",
941 m->lock_object.lo_name, file, line);
942 if (mtx_recursed(m)) {
943 if ((what & MA_NOTRECURSED) != 0)
944 panic("mutex %s recursed at %s:%d",
945 m->lock_object.lo_name, file, line);
946 } else if ((what & MA_RECURSED) != 0) {
947 panic("mutex %s unrecursed at %s:%d",
948 m->lock_object.lo_name, file, line);
953 panic("mutex %s owned at %s:%d",
954 m->lock_object.lo_name, file, line);
957 panic("unknown mtx_assert at %s:%d", file, line);
963 * General init routine used by the MTX_SYSINIT() macro.
966 mtx_sysinit(void *arg)
968 struct mtx_args *margs = arg;
970 mtx_init((struct mtx *)margs->ma_mtx, margs->ma_desc, NULL,
975 * Mutex initialization routine; initialize lock `m' of type contained in
976 * `opts' with options contained in `opts' and name `name.' The optional
977 * lock type `type' is used as a general lock category name for use with
981 _mtx_init(volatile uintptr_t *c, const char *name, const char *type, int opts)
984 struct lock_class *class;
989 MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
990 MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE | MTX_NEW)) == 0);
991 ASSERT_ATOMIC_LOAD_PTR(m->mtx_lock,
992 ("%s: mtx_lock not aligned for %s: %p", __func__, name,
995 /* Determine lock class and lock flags. */
997 class = &lock_class_mtx_spin;
999 class = &lock_class_mtx_sleep;
1001 if (opts & MTX_QUIET)
1003 if (opts & MTX_RECURSE)
1004 flags |= LO_RECURSABLE;
1005 if ((opts & MTX_NOWITNESS) == 0)
1006 flags |= LO_WITNESS;
1007 if (opts & MTX_DUPOK)
1009 if (opts & MTX_NOPROFILE)
1010 flags |= LO_NOPROFILE;
1014 /* Initialize mutex. */
1015 lock_init(&m->lock_object, class, name, type, flags);
1017 m->mtx_lock = MTX_UNOWNED;
1022 * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be
1023 * passed in as a flag here because if the corresponding mtx_init() was
1024 * called with MTX_QUIET set, then it will already be set in the mutex's
1028 _mtx_destroy(volatile uintptr_t *c)
1035 MPASS(mtx_unowned(m));
1037 MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
1039 /* Perform the non-mtx related part of mtx_unlock_spin(). */
1040 if (LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin)
1043 TD_LOCKS_DEC(curthread);
1045 lock_profile_release_lock(&m->lock_object);
1046 /* Tell witness this isn't locked to make it happy. */
1047 WITNESS_UNLOCK(&m->lock_object, LOP_EXCLUSIVE, __FILE__,
1051 m->mtx_lock = MTX_DESTROYED;
1052 lock_destroy(&m->lock_object);
1056 * Intialize the mutex code and system mutexes. This is called from the MD
1057 * startup code prior to mi_startup(). The per-CPU data space needs to be
1058 * setup before this is called.
1064 /* Setup turnstiles so that sleep mutexes work. */
1068 * Initialize mutexes.
1070 mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE);
1071 mtx_init(&blocked_lock, "blocked lock", NULL, MTX_SPIN);
1072 blocked_lock.mtx_lock = 0xdeadc0de; /* Always blocked. */
1073 mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
1074 mtx_init(&proc0.p_slock, "process slock", NULL, MTX_SPIN);
1075 mtx_init(&proc0.p_statmtx, "pstatl", NULL, MTX_SPIN);
1076 mtx_init(&proc0.p_itimmtx, "pitiml", NULL, MTX_SPIN);
1077 mtx_init(&proc0.p_profmtx, "pprofl", NULL, MTX_SPIN);
1078 mtx_init(&devmtx, "cdev", NULL, MTX_DEF);
1084 db_show_mtx(const struct lock_object *lock)
1087 const struct mtx *m;
1089 m = (const struct mtx *)lock;
1091 db_printf(" flags: {");
1092 if (LOCK_CLASS(lock) == &lock_class_mtx_spin)
1096 if (m->lock_object.lo_flags & LO_RECURSABLE)
1097 db_printf(", RECURSE");
1098 if (m->lock_object.lo_flags & LO_DUPOK)
1099 db_printf(", DUPOK");
1101 db_printf(" state: {");
1103 db_printf("UNOWNED");
1104 else if (mtx_destroyed(m))
1105 db_printf("DESTROYED");
1108 if (m->mtx_lock & MTX_CONTESTED)
1109 db_printf(", CONTESTED");
1110 if (m->mtx_lock & MTX_RECURSED)
1111 db_printf(", RECURSED");
1114 if (!mtx_unowned(m) && !mtx_destroyed(m)) {
1116 db_printf(" owner: %p (tid %d, pid %d, \"%s\")\n", td,
1117 td->td_tid, td->td_proc->p_pid, td->td_name);
1118 if (mtx_recursed(m))
1119 db_printf(" recursed: %d\n", m->mtx_recurse);