2 * Copyright (c) 2003,2004,2009 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * lwkt_token - Implement soft token locks.
38 * Tokens are locks which serialize a thread only while the thread is
39 * running. If the thread blocks all tokens are released, then reacquired
40 * when the thread resumes.
42 * This implementation requires no critical sections or spin locks, but
43 * does use atomic_cmpset_ptr().
45 * Tokens may be recursively acquired by the same thread. However the
46 * caller must be sure to release such tokens in reverse order.
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/kernel.h>
52 #include <sys/rtprio.h>
53 #include <sys/queue.h>
54 #include <sys/sysctl.h>
56 #include <sys/kthread.h>
57 #include <machine/cpu.h>
60 #include <sys/spinlock.h>
62 #include <sys/thread2.h>
63 #include <sys/spinlock2.h>
64 #include <sys/mplock2.h>
67 #include <vm/vm_param.h>
68 #include <vm/vm_kern.h>
69 #include <vm/vm_object.h>
70 #include <vm/vm_page.h>
71 #include <vm/vm_map.h>
72 #include <vm/vm_pager.h>
73 #include <vm/vm_extern.h>
74 #include <vm/vm_zone.h>
76 #include <machine/stdarg.h>
77 #include <machine/smp.h>
79 #ifndef LWKT_NUM_POOL_TOKENS
80 #define LWKT_NUM_POOL_TOKENS 1024 /* power of 2 */
82 #define LWKT_MASK_POOL_TOKENS (LWKT_NUM_POOL_TOKENS - 1)
84 static lwkt_token pool_tokens[LWKT_NUM_POOL_TOKENS];
86 #define TOKEN_STRING "REF=%p TOK=%p TD=%p"
87 #define CONTENDED_STRING "REF=%p TOK=%p TD=%p (contention started)"
88 #define UNCONTENDED_STRING "REF=%p TOK=%p TD=%p (contention stopped)"
89 #if !defined(KTR_TOKENS)
90 #define KTR_TOKENS KTR_ALL
93 KTR_INFO_MASTER(tokens);
94 KTR_INFO(KTR_TOKENS, tokens, fail, 0, TOKEN_STRING, sizeof(void *) * 3);
95 KTR_INFO(KTR_TOKENS, tokens, succ, 1, TOKEN_STRING, sizeof(void *) * 3);
97 KTR_INFO(KTR_TOKENS, tokens, release, 2, TOKEN_STRING, sizeof(void *) * 3);
98 KTR_INFO(KTR_TOKENS, tokens, remote, 3, TOKEN_STRING, sizeof(void *) * 3);
99 KTR_INFO(KTR_TOKENS, tokens, reqremote, 4, TOKEN_STRING, sizeof(void *) * 3);
100 KTR_INFO(KTR_TOKENS, tokens, reqfail, 5, TOKEN_STRING, sizeof(void *) * 3);
101 KTR_INFO(KTR_TOKENS, tokens, drain, 6, TOKEN_STRING, sizeof(void *) * 3);
102 KTR_INFO(KTR_TOKENS, tokens, contention_start, 7, CONTENDED_STRING, sizeof(void *) * 3);
103 KTR_INFO(KTR_TOKENS, tokens, contention_stop, 7, UNCONTENDED_STRING, sizeof(void *) * 3);
106 #define logtoken(name, ref) \
107 KTR_LOG(tokens_ ## name, ref, ref->tr_tok, curthread)
110 * Global tokens. These replace the MP lock for major subsystem locking.
111 * These tokens are initially used to lockup both global and individual
114 * Once individual structures get their own locks these tokens are used
115 * only to protect global lists & other variables and to interlock
116 * allocations and teardowns and such.
118 * The UP initializer causes token acquisition to also acquire the MP lock
119 * for maximum compatibility. The feature may be enabled and disabled at
120 * any time, the MP state is copied to the tokref when the token is acquired
121 * and will not race against sysctl changes.
123 struct lwkt_token mp_token = LWKT_TOKEN_MP_INITIALIZER(mp_token);
124 struct lwkt_token pmap_token = LWKT_TOKEN_UP_INITIALIZER(pmap_token);
125 struct lwkt_token dev_token = LWKT_TOKEN_UP_INITIALIZER(dev_token);
126 struct lwkt_token vm_token = LWKT_TOKEN_UP_INITIALIZER(vm_token);
127 struct lwkt_token vmspace_token = LWKT_TOKEN_UP_INITIALIZER(vmspace_token);
128 struct lwkt_token kvm_token = LWKT_TOKEN_UP_INITIALIZER(kvm_token);
129 struct lwkt_token proc_token = LWKT_TOKEN_UP_INITIALIZER(proc_token);
130 struct lwkt_token tty_token = LWKT_TOKEN_UP_INITIALIZER(tty_token);
131 struct lwkt_token vnode_token = LWKT_TOKEN_UP_INITIALIZER(vnode_token);
132 struct lwkt_token vmobj_token = LWKT_TOKEN_UP_INITIALIZER(vmobj_token);
134 SYSCTL_INT(_lwkt, OID_AUTO, pmap_mpsafe, CTLFLAG_RW,
135 &pmap_token.t_flags, 0, "Require MP lock for pmap_token");
136 SYSCTL_INT(_lwkt, OID_AUTO, dev_mpsafe, CTLFLAG_RW,
137 &dev_token.t_flags, 0, "Require MP lock for dev_token");
138 SYSCTL_INT(_lwkt, OID_AUTO, vm_mpsafe, CTLFLAG_RW,
139 &vm_token.t_flags, 0, "Require MP lock for vm_token");
140 SYSCTL_INT(_lwkt, OID_AUTO, vmspace_mpsafe, CTLFLAG_RW,
141 &vmspace_token.t_flags, 0, "Require MP lock for vmspace_token");
142 SYSCTL_INT(_lwkt, OID_AUTO, kvm_mpsafe, CTLFLAG_RW,
143 &kvm_token.t_flags, 0, "Require MP lock for kvm_token");
144 SYSCTL_INT(_lwkt, OID_AUTO, proc_mpsafe, CTLFLAG_RW,
145 &proc_token.t_flags, 0, "Require MP lock for proc_token");
146 SYSCTL_INT(_lwkt, OID_AUTO, tty_mpsafe, CTLFLAG_RW,
147 &tty_token.t_flags, 0, "Require MP lock for tty_token");
148 SYSCTL_INT(_lwkt, OID_AUTO, vnode_mpsafe, CTLFLAG_RW,
149 &vnode_token.t_flags, 0, "Require MP lock for vnode_token");
150 SYSCTL_INT(_lwkt, OID_AUTO, vmobj_mpsafe, CTLFLAG_RW,
151 &vmobj_token.t_flags, 0, "Require MP lock for vmobj_token");
154 * The collision count is bumped every time the LWKT scheduler fails
155 * to acquire needed tokens in addition to a normal lwkt_gettoken()
158 SYSCTL_LONG(_lwkt, OID_AUTO, mp_collisions, CTLFLAG_RW,
159 &mp_token.t_collisions, 0, "Collision counter of mp_token");
160 SYSCTL_LONG(_lwkt, OID_AUTO, pmap_collisions, CTLFLAG_RW,
161 &pmap_token.t_collisions, 0, "Collision counter of pmap_token");
162 SYSCTL_LONG(_lwkt, OID_AUTO, dev_collisions, CTLFLAG_RW,
163 &dev_token.t_collisions, 0, "Collision counter of dev_token");
164 SYSCTL_LONG(_lwkt, OID_AUTO, vm_collisions, CTLFLAG_RW,
165 &vm_token.t_collisions, 0, "Collision counter of vm_token");
166 SYSCTL_LONG(_lwkt, OID_AUTO, vmspace_collisions, CTLFLAG_RW,
167 &vmspace_token.t_collisions, 0, "Collision counter of vmspace_token");
168 SYSCTL_LONG(_lwkt, OID_AUTO, kvm_collisions, CTLFLAG_RW,
169 &kvm_token.t_collisions, 0, "Collision counter of kvm_token");
170 SYSCTL_LONG(_lwkt, OID_AUTO, proc_collisions, CTLFLAG_RW,
171 &proc_token.t_collisions, 0, "Collision counter of proc_token");
172 SYSCTL_LONG(_lwkt, OID_AUTO, tty_collisions, CTLFLAG_RW,
173 &tty_token.t_collisions, 0, "Collision counter of tty_token");
174 SYSCTL_LONG(_lwkt, OID_AUTO, vnode_collisions, CTLFLAG_RW,
175 &vnode_token.t_collisions, 0, "Collision counter of vnode_token");
179 * Acquire the initial mplock
181 * (low level boot only)
184 cpu_get_initial_mplock(void)
186 KKASSERT(mp_token.t_ref == NULL);
187 if (lwkt_trytoken(&mp_token) == FALSE)
188 panic("cpu_get_initial_mplock");
193 * Return a pool token given an address
197 _lwkt_token_pool_lookup(void *ptr)
201 i = ((int)(intptr_t)ptr >> 2) ^ ((int)(intptr_t)ptr >> 12);
202 return(&pool_tokens[i & LWKT_MASK_POOL_TOKENS]);
206 * Initialize a tokref_t prior to making it visible in the thread's
209 * As an optimization we set the MPSAFE flag if the thread is already
210 * holding the mp_token. This bypasses unncessary calls to get_mplock() and
211 * rel_mplock() on tokens which are not normally MPSAFE when the thread
212 * is already holding the MP lock.
216 _lwkt_tok_flags(lwkt_token_t tok, thread_t td)
221 * tok->t_flags can change out from under us, make sure we have
224 flags = tok->t_flags;
227 if ((flags & LWKT_TOKEN_MPSAFE) == 0 &&
228 _lwkt_token_held(&mp_token, td)) {
229 return (flags | LWKT_TOKEN_MPSAFE);
234 return (flags | LWKT_TOKEN_MPSAFE);
240 _lwkt_tokref_init(lwkt_tokref_t ref, lwkt_token_t tok, thread_t td,
245 ref->tr_flags = flags;
249 * Force a LWKT reschedule on the target cpu when a requested token
254 lwkt_reltoken_mask_remote(void *arg, int arg2, struct intrframe *frame)
261 _lwkt_reltoken_mask(lwkt_token_t tok)
266 while ((mask = tok->t_collmask) != 0) {
267 if (atomic_cmpset_cpumask(&tok->t_collmask, mask, 0)) {
268 lwkt_send_ipiq3_mask(mask, lwkt_reltoken_mask_remote,
277 * Obtain all the tokens required by the specified thread on the current
278 * cpu, return 0 on failure and non-zero on success. If a failure occurs
279 * any partially acquired tokens will be released prior to return.
281 * lwkt_getalltokens is called by the LWKT scheduler to acquire all
282 * tokens that the thread had acquired prior to going to sleep.
284 * The scheduler is responsible for maintaining the MP lock count, so
285 * we don't need to deal with tr_flags here. We also do not do any
286 * logging here. The logging done by lwkt_gettoken() is plenty good
287 * enough to get a feel for it.
289 * Called from a critical section.
292 lwkt_getalltokens(thread_t td)
299 * Acquire tokens in forward order, assign or validate tok->t_ref.
301 for (scan = &td->td_toks_base; scan < td->td_toks_stop; ++scan) {
305 * Try to acquire the token if we do not already have
308 * NOTE: If atomic_cmpset_ptr() fails we have to
309 * loop and try again. It just means we
314 if (atomic_cmpset_ptr(&tok->t_ref, NULL, scan))
320 * Test if ref is already recursively held by this
321 * thread. We cannot safely dereference tok->t_ref
322 * (it might belong to another thread and is thus
323 * unstable), but we don't have to. We can simply
326 if (ref >= &td->td_toks_base && ref < td->td_toks_stop)
331 * Otherwise we failed to acquire all the tokens.
332 * Undo and return. We have to try once more after
333 * setting cpumask to cover possible races.
335 atomic_set_cpumask(&tok->t_collmask,
336 td->td_gd->gd_cpumask);
337 if (atomic_cmpset_ptr(&tok->t_ref, NULL, scan)) {
338 atomic_clear_cpumask(&tok->t_collmask,
339 td->td_gd->gd_cpumask);
343 td->td_wmesg = tok->t_desc;
344 atomic_add_long(&tok->t_collisions, 1);
345 lwkt_relalltokens(td);
353 * Release all tokens owned by the specified thread on the current cpu.
355 * This code is really simple. Even in cases where we own all the tokens
356 * note that t_ref may not match the scan for recursively held tokens,
357 * or for the case where a lwkt_getalltokens() failed.
359 * The scheduler is responsible for maintaining the MP lock count, so
360 * we don't need to deal with tr_flags here.
362 * Called from a critical section.
365 lwkt_relalltokens(thread_t td)
370 for (scan = &td->td_toks_base; scan < td->td_toks_stop; ++scan) {
372 if (tok->t_ref == scan) {
374 _lwkt_reltoken_mask(tok);
380 * Token acquisition helper function. The caller must have already
381 * made nref visible by adjusting td_toks_stop and will be responsible
382 * for the disposition of nref on either success or failure.
384 * When acquiring tokens recursively we want tok->t_ref to point to
385 * the outer (first) acquisition so it gets cleared only on the last
390 _lwkt_trytokref2(lwkt_tokref_t nref, thread_t td, int blocking)
396 * Make sure the compiler does not reorder prior instructions
397 * beyond this demark.
402 * Attempt to gain ownership
407 * Try to acquire the token if we do not already have
408 * it. This is not allowed if we are in a hard code
409 * section (because it 'might' have blocked).
413 KASSERT((blocking == 0 ||
414 td->td_gd->gd_intr_nesting_level == 0 ||
415 panic_cpu_gd == mycpu),
416 ("Attempt to acquire token %p not already "
417 "held in hard code section", tok));
420 * NOTE: If atomic_cmpset_ptr() fails we have to
421 * loop and try again. It just means we
424 if (atomic_cmpset_ptr(&tok->t_ref, NULL, nref))
430 * Test if ref is already recursively held by this
431 * thread. We cannot safely dereference tok->t_ref
432 * (it might belong to another thread and is thus
433 * unstable), but we don't have to. We can simply
436 * It is ok to acquire a token that is already held
437 * by the current thread when in a hard code section.
439 if (ref >= &td->td_toks_base && ref < td->td_toks_stop)
443 * Otherwise we failed, and it is not ok to attempt to
444 * acquire a token in a hard code section.
446 KASSERT((blocking == 0 ||
447 td->td_gd->gd_intr_nesting_level == 0),
448 ("Attempt to acquire token %p not already "
449 "held in hard code section", tok));
456 * Get a serializing token. This routine can block.
459 lwkt_gettoken(lwkt_token_t tok)
461 thread_t td = curthread;
465 flags = _lwkt_tok_flags(tok, td);
466 if ((flags & LWKT_TOKEN_MPSAFE) == 0)
469 ref = td->td_toks_stop;
470 KKASSERT(ref < &td->td_toks_end);
473 _lwkt_tokref_init(ref, tok, td, flags);
475 if (_lwkt_trytokref2(ref, td, 1) == FALSE) {
477 * Give up running if we can't acquire the token right now.
479 * Since the tokref is already active the scheduler now
480 * takes care of acquisition, so we need only call
483 * Since we failed this was not a recursive token so upon
484 * return tr_tok->t_ref should be assigned to this specific
488 atomic_set_cpumask(&tok->t_collmask, td->td_gd->gd_cpumask);
489 if (atomic_cmpset_ptr(&tok->t_ref, NULL, ref)) {
490 atomic_clear_cpumask(&tok->t_collmask,
491 td->td_gd->gd_cpumask);
495 td->td_wmesg = tok->t_desc;
496 atomic_add_long(&tok->t_collisions, 1);
500 KKASSERT(tok->t_ref == ref);
505 lwkt_gettoken_hard(lwkt_token_t tok)
507 thread_t td = curthread;
511 flags = _lwkt_tok_flags(tok, td);
512 if ((flags & LWKT_TOKEN_MPSAFE) == 0)
515 ref = td->td_toks_stop;
516 KKASSERT(ref < &td->td_toks_end);
519 _lwkt_tokref_init(ref, tok, td, flags);
521 if (_lwkt_trytokref2(ref, td, 1) == FALSE) {
523 * Give up running if we can't acquire the token right now.
525 * Since the tokref is already active the scheduler now
526 * takes care of acquisition, so we need only call
529 * Since we failed this was not a recursive token so upon
530 * return tr_tok->t_ref should be assigned to this specific
534 atomic_set_cpumask(&tok->t_collmask, td->td_gd->gd_cpumask);
535 if (atomic_cmpset_ptr(&tok->t_ref, NULL, ref)) {
536 atomic_clear_cpumask(&tok->t_collmask,
537 td->td_gd->gd_cpumask);
541 td->td_wmesg = tok->t_desc;
542 atomic_add_long(&tok->t_collisions, 1);
546 KKASSERT(tok->t_ref == ref);
549 crit_enter_hard_gd(td->td_gd);
553 lwkt_getpooltoken(void *ptr)
555 thread_t td = curthread;
560 tok = _lwkt_token_pool_lookup(ptr);
561 flags = _lwkt_tok_flags(tok, td);
562 if ((flags & LWKT_TOKEN_MPSAFE) == 0)
565 ref = td->td_toks_stop;
566 KKASSERT(ref < &td->td_toks_end);
569 _lwkt_tokref_init(ref, tok, td, flags);
571 if (_lwkt_trytokref2(ref, td, 1) == FALSE) {
573 * Give up running if we can't acquire the token right now.
575 * Since the tokref is already active the scheduler now
576 * takes care of acquisition, so we need only call
579 * Since we failed this was not a recursive token so upon
580 * return tr_tok->t_ref should be assigned to this specific
584 atomic_set_cpumask(&tok->t_collmask, td->td_gd->gd_cpumask);
585 if (atomic_cmpset_ptr(&tok->t_ref, NULL, ref)) {
586 atomic_clear_cpumask(&tok->t_collmask,
587 td->td_gd->gd_cpumask);
591 td->td_wmesg = tok->t_desc;
592 atomic_add_long(&tok->t_collisions, 1);
596 KKASSERT(tok->t_ref == ref);
603 * Attempt to acquire a token, return TRUE on success, FALSE on failure.
606 lwkt_trytoken(lwkt_token_t tok)
608 thread_t td = curthread;
612 flags = _lwkt_tok_flags(tok, td);
613 if ((flags & LWKT_TOKEN_MPSAFE) == 0) {
614 if (try_mplock() == 0)
618 ref = td->td_toks_stop;
619 KKASSERT(ref < &td->td_toks_end);
622 _lwkt_tokref_init(ref, tok, td, flags);
624 if (_lwkt_trytokref2(ref, td, 0) == FALSE) {
626 * Cleanup, deactivate the failed token.
628 if ((ref->tr_flags & LWKT_TOKEN_MPSAFE) == 0) {
643 * Release a serializing token.
645 * WARNING! All tokens must be released in reverse order. This will be
649 lwkt_reltoken(lwkt_token_t tok)
651 thread_t td = curthread;
655 * Remove ref from thread token list and assert that it matches
656 * the token passed in. Tokens must be released in reverse order.
658 ref = td->td_toks_stop - 1;
659 KKASSERT(ref >= &td->td_toks_base && ref->tr_tok == tok);
662 * Only clear the token if it matches ref. If ref was a recursively
663 * acquired token it may not match. Then adjust td_toks_stop.
665 * Some comparisons must be run prior to adjusting td_toks_stop
666 * to avoid racing against a fast interrupt/ ipi which tries to
669 * We must also be absolutely sure that the compiler does not
670 * reorder the clearing of t_ref and the adjustment of td_toks_stop,
671 * or reorder the adjustment of td_toks_stop against the conditional.
673 * NOTE: The mplock is a token also so sequencing is a bit complex.
675 if (tok->t_ref == ref) {
677 _lwkt_reltoken_mask(tok);
680 if ((ref->tr_flags & LWKT_TOKEN_MPSAFE) == 0) {
682 td->td_toks_stop = ref;
687 td->td_toks_stop = ref;
690 KKASSERT(tok->t_ref != ref);
694 lwkt_reltoken_hard(lwkt_token_t tok)
701 * It is faster for users of lwkt_getpooltoken() to use the returned
702 * token and just call lwkt_reltoken(), but for convenience we provide
703 * this function which looks the token up based on the ident.
706 lwkt_relpooltoken(void *ptr)
708 lwkt_token_t tok = _lwkt_token_pool_lookup(ptr);
713 * Return a count of the number of token refs the thread has to the
714 * specified token, whether it currently owns the token or not.
717 lwkt_cnttoken(lwkt_token_t tok, thread_t td)
722 for (scan = &td->td_toks_base; scan < td->td_toks_stop; ++scan) {
723 if (scan->tr_tok == tok)
731 * Pool tokens are used to provide a type-stable serializing token
732 * pointer that does not race against disappearing data structures.
734 * This routine is called in early boot just after we setup the BSP's
735 * globaldata structure.
738 lwkt_token_pool_init(void)
742 for (i = 0; i < LWKT_NUM_POOL_TOKENS; ++i)
743 lwkt_token_init(&pool_tokens[i], 1, "pool");
747 lwkt_token_pool_lookup(void *ptr)
749 return (_lwkt_token_pool_lookup(ptr));
753 * Initialize a token. If mpsafe is 0, the MP lock is acquired before
754 * acquiring the token and released after releasing the token.
757 lwkt_token_init(lwkt_token_t tok, int mpsafe, const char *desc)
760 tok->t_flags = mpsafe ? LWKT_TOKEN_MPSAFE : 0;
761 tok->t_collisions = 0;
767 lwkt_token_uninit(lwkt_token_t tok)
774 lwkt_token_is_stale(lwkt_tokref_t ref)
776 lwkt_token_t tok = ref->tr_tok;
778 KKASSERT(tok->t_owner == curthread && ref->tr_state == 1 &&
781 /* Token is not stale */
782 if (tok->t_lastowner == tok->t_owner)
786 * The token is stale. Reset to not stale so that the next call to
787 * lwkt_token_is_stale will return "not stale" unless the token
788 * was acquired in-between by another thread.
790 tok->t_lastowner = tok->t_owner;