2 * Copyright (c) 2003,2004,2009 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * lwkt_token - Implement soft token locks.
38 * Tokens are locks which serialize a thread only while the thread is
39 * running. If the thread blocks all tokens are released, then reacquired
40 * when the thread resumes.
42 * This implementation requires no critical sections or spin locks, but
43 * does use atomic_cmpset_ptr().
45 * Tokens may be recursively acquired by the same thread. However the
46 * caller must be sure to release such tokens in reverse order.
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/kernel.h>
52 #include <sys/rtprio.h>
53 #include <sys/queue.h>
54 #include <sys/sysctl.h>
56 #include <sys/kthread.h>
57 #include <machine/cpu.h>
60 #include <sys/spinlock.h>
62 #include <sys/thread2.h>
63 #include <sys/spinlock2.h>
64 #include <sys/mplock2.h>
67 #include <vm/vm_param.h>
68 #include <vm/vm_kern.h>
69 #include <vm/vm_object.h>
70 #include <vm/vm_page.h>
71 #include <vm/vm_map.h>
72 #include <vm/vm_pager.h>
73 #include <vm/vm_extern.h>
74 #include <vm/vm_zone.h>
76 #include <machine/stdarg.h>
77 #include <machine/smp.h>
79 #ifndef LWKT_NUM_POOL_TOKENS
80 #define LWKT_NUM_POOL_TOKENS 1024 /* power of 2 */
82 #define LWKT_MASK_POOL_TOKENS (LWKT_NUM_POOL_TOKENS - 1)
84 static lwkt_token pool_tokens[LWKT_NUM_POOL_TOKENS];
86 #define TOKEN_STRING "REF=%p TOK=%p TD=%p"
87 #define CONTENDED_STRING "REF=%p TOK=%p TD=%p (contention started)"
88 #define UNCONTENDED_STRING "REF=%p TOK=%p TD=%p (contention stopped)"
89 #if !defined(KTR_TOKENS)
90 #define KTR_TOKENS KTR_ALL
93 KTR_INFO_MASTER(tokens);
94 KTR_INFO(KTR_TOKENS, tokens, fail, 0, TOKEN_STRING, sizeof(void *) * 3);
95 KTR_INFO(KTR_TOKENS, tokens, succ, 1, TOKEN_STRING, sizeof(void *) * 3);
97 KTR_INFO(KTR_TOKENS, tokens, release, 2, TOKEN_STRING, sizeof(void *) * 3);
98 KTR_INFO(KTR_TOKENS, tokens, remote, 3, TOKEN_STRING, sizeof(void *) * 3);
99 KTR_INFO(KTR_TOKENS, tokens, reqremote, 4, TOKEN_STRING, sizeof(void *) * 3);
100 KTR_INFO(KTR_TOKENS, tokens, reqfail, 5, TOKEN_STRING, sizeof(void *) * 3);
101 KTR_INFO(KTR_TOKENS, tokens, drain, 6, TOKEN_STRING, sizeof(void *) * 3);
102 KTR_INFO(KTR_TOKENS, tokens, contention_start, 7, CONTENDED_STRING, sizeof(void *) * 3);
103 KTR_INFO(KTR_TOKENS, tokens, contention_stop, 7, UNCONTENDED_STRING, sizeof(void *) * 3);
106 #define logtoken(name, ref) \
107 KTR_LOG(tokens_ ## name, ref, ref->tr_tok, curthread)
110 * Global tokens. These replace the MP lock for major subsystem locking.
111 * These tokens are initially used to lockup both global and individual
114 * Once individual structures get their own locks these tokens are used
115 * only to protect global lists & other variables and to interlock
116 * allocations and teardowns and such.
118 * The UP initializer causes token acquisition to also acquire the MP lock
119 * for maximum compatibility. The feature may be enabled and disabled at
120 * any time, the MP state is copied to the tokref when the token is acquired
121 * and will not race against sysctl changes.
123 struct lwkt_token mp_token = LWKT_TOKEN_MP_INITIALIZER(mp_token);
124 struct lwkt_token pmap_token = LWKT_TOKEN_UP_INITIALIZER(pmap_token);
125 struct lwkt_token dev_token = LWKT_TOKEN_UP_INITIALIZER(dev_token);
126 struct lwkt_token vm_token = LWKT_TOKEN_UP_INITIALIZER(vm_token);
127 struct lwkt_token vmspace_token = LWKT_TOKEN_UP_INITIALIZER(vmspace_token);
128 struct lwkt_token kvm_token = LWKT_TOKEN_UP_INITIALIZER(kvm_token);
129 struct lwkt_token proc_token = LWKT_TOKEN_UP_INITIALIZER(proc_token);
130 struct lwkt_token tty_token = LWKT_TOKEN_UP_INITIALIZER(tty_token);
131 struct lwkt_token vnode_token = LWKT_TOKEN_UP_INITIALIZER(vnode_token);
132 struct lwkt_token vmobj_token = LWKT_TOKEN_UP_INITIALIZER(vmobj_token);
134 SYSCTL_INT(_lwkt, OID_AUTO, pmap_mpsafe, CTLFLAG_RW,
135 &pmap_token.t_flags, 0, "Require MP lock for pmap_token");
136 SYSCTL_INT(_lwkt, OID_AUTO, dev_mpsafe, CTLFLAG_RW,
137 &dev_token.t_flags, 0, "Require MP lock for dev_token");
138 SYSCTL_INT(_lwkt, OID_AUTO, vm_mpsafe, CTLFLAG_RW,
139 &vm_token.t_flags, 0, "Require MP lock for vm_token");
140 SYSCTL_INT(_lwkt, OID_AUTO, vmspace_mpsafe, CTLFLAG_RW,
141 &vmspace_token.t_flags, 0, "Require MP lock for vmspace_token");
142 SYSCTL_INT(_lwkt, OID_AUTO, kvm_mpsafe, CTLFLAG_RW,
143 &kvm_token.t_flags, 0, "Require MP lock for kvm_token");
144 SYSCTL_INT(_lwkt, OID_AUTO, proc_mpsafe, CTLFLAG_RW,
145 &proc_token.t_flags, 0, "Require MP lock for proc_token");
146 SYSCTL_INT(_lwkt, OID_AUTO, tty_mpsafe, CTLFLAG_RW,
147 &tty_token.t_flags, 0, "Require MP lock for tty_token");
148 SYSCTL_INT(_lwkt, OID_AUTO, vnode_mpsafe, CTLFLAG_RW,
149 &vnode_token.t_flags, 0, "Require MP lock for vnode_token");
150 SYSCTL_INT(_lwkt, OID_AUTO, vmobj_mpsafe, CTLFLAG_RW,
151 &vmobj_token.t_flags, 0, "Require MP lock for vmobj_token");
153 static int lwkt_token_ipi_dispatch = 4;
154 SYSCTL_INT(_lwkt, OID_AUTO, token_ipi_dispatch, CTLFLAG_RW,
155 &lwkt_token_ipi_dispatch, 0, "Number of IPIs to dispatch on token release");
158 * The collision count is bumped every time the LWKT scheduler fails
159 * to acquire needed tokens in addition to a normal lwkt_gettoken()
162 SYSCTL_LONG(_lwkt, OID_AUTO, mp_collisions, CTLFLAG_RW,
163 &mp_token.t_collisions, 0, "Collision counter of mp_token");
164 SYSCTL_LONG(_lwkt, OID_AUTO, pmap_collisions, CTLFLAG_RW,
165 &pmap_token.t_collisions, 0, "Collision counter of pmap_token");
166 SYSCTL_LONG(_lwkt, OID_AUTO, dev_collisions, CTLFLAG_RW,
167 &dev_token.t_collisions, 0, "Collision counter of dev_token");
168 SYSCTL_LONG(_lwkt, OID_AUTO, vm_collisions, CTLFLAG_RW,
169 &vm_token.t_collisions, 0, "Collision counter of vm_token");
170 SYSCTL_LONG(_lwkt, OID_AUTO, vmspace_collisions, CTLFLAG_RW,
171 &vmspace_token.t_collisions, 0, "Collision counter of vmspace_token");
172 SYSCTL_LONG(_lwkt, OID_AUTO, kvm_collisions, CTLFLAG_RW,
173 &kvm_token.t_collisions, 0, "Collision counter of kvm_token");
174 SYSCTL_LONG(_lwkt, OID_AUTO, proc_collisions, CTLFLAG_RW,
175 &proc_token.t_collisions, 0, "Collision counter of proc_token");
176 SYSCTL_LONG(_lwkt, OID_AUTO, tty_collisions, CTLFLAG_RW,
177 &tty_token.t_collisions, 0, "Collision counter of tty_token");
178 SYSCTL_LONG(_lwkt, OID_AUTO, vnode_collisions, CTLFLAG_RW,
179 &vnode_token.t_collisions, 0, "Collision counter of vnode_token");
183 * Acquire the initial mplock
185 * (low level boot only)
188 cpu_get_initial_mplock(void)
190 KKASSERT(mp_token.t_ref == NULL);
191 if (lwkt_trytoken(&mp_token) == FALSE)
192 panic("cpu_get_initial_mplock");
197 * Return a pool token given an address
201 _lwkt_token_pool_lookup(void *ptr)
205 i = ((int)(intptr_t)ptr >> 2) ^ ((int)(intptr_t)ptr >> 12);
206 return(&pool_tokens[i & LWKT_MASK_POOL_TOKENS]);
210 * Initialize a tokref_t prior to making it visible in the thread's
213 * As an optimization we set the MPSAFE flag if the thread is already
214 * holding the mp_token. This bypasses unncessary calls to get_mplock() and
215 * rel_mplock() on tokens which are not normally MPSAFE when the thread
216 * is already holding the MP lock.
220 _lwkt_tok_flags(lwkt_token_t tok, thread_t td)
225 * tok->t_flags can change out from under us, make sure we have
228 flags = tok->t_flags;
231 if ((flags & LWKT_TOKEN_MPSAFE) == 0 &&
232 _lwkt_token_held(&mp_token, td)) {
233 return (flags | LWKT_TOKEN_MPSAFE);
238 return (flags | LWKT_TOKEN_MPSAFE);
244 _lwkt_tokref_init(lwkt_tokref_t ref, lwkt_token_t tok, thread_t td,
249 ref->tr_flags = flags;
254 * Force a LWKT reschedule on the target cpu when a requested token
259 lwkt_reltoken_mask_remote(void *arg, int arg2, struct intrframe *frame)
266 * This bit of code sends a LWKT reschedule request to whatever other cpus
267 * had contended on the token being released. We could wake up all the cpus
268 * but generally speaking if there is a lot of contention we really only want
269 * to wake up a subset of cpus to avoid aggregating O(N^2) IPIs. The current
270 * cpuid is used as a basis to select which other cpus to wake up.
272 * For the selected cpus we can avoid issuing the actual IPI if the target
273 * cpu's RQF_WAKEUP is already set. In this case simply setting the
274 * reschedule flag RQF_AST_LWKT_RESCHED will be sufficient.
276 * lwkt.token_ipi_dispatch specifies the maximum number of IPIs to dispatch
277 * on a token release.
281 _lwkt_reltoken_mask(lwkt_token_t tok)
287 cpumask_t wumask; /* wakeup mask */
288 cpumask_t remask; /* clear mask */
289 int wucount; /* wakeup count */
294 * Mask of contending cpus we want to wake up.
296 mask = tok->t_collmask;
302 * Degenerate case - IPI to all contending cpus
304 wucount = lwkt_token_ipi_dispatch;
305 if (wucount <= 0 || wucount >= ncpus) {
315 * Calculate which cpus to IPI. These cpus are potentially in a
316 * HLT state waiting for token contention to go away.
318 * Ask the cpu LWKT scheduler to reschedule by setting
319 * RQF_AST_LWKT_RESCHEDULE. Signal the cpu if RQF_WAKEUP is not
320 * set (otherwise it has already been signalled or will check the
321 * flag very soon anyway). Both bits must be adjusted atomically
322 * all in one go to avoid races.
324 * The collision mask is cleared for all cpus we set the resched
325 * flag for, but we only IPI the ones that need signalling.
327 while (wucount && mask) {
328 tmpmask = mask & ~(CPUMASK(mycpu->gd_cpuid) - 1);
330 cpuid = BSFCPUMASK(tmpmask);
332 cpuid = BSFCPUMASK(mask);
333 ngd = globaldata_find(cpuid);
335 reqflags = ngd->gd_reqflags;
336 if (atomic_cmpset_int(&ngd->gd_reqflags, reqflags,
339 RQF_AST_LWKT_RESCHED))) {
343 if ((reqflags & RQF_WAKEUP) == 0) {
344 wumask |= CPUMASK(cpuid);
347 remask |= CPUMASK(cpuid);
348 mask &= ~CPUMASK(cpuid);
351 atomic_clear_cpumask(&tok->t_collmask, remask);
352 lwkt_send_ipiq3_mask(wumask, lwkt_reltoken_mask_remote,
359 * Obtain all the tokens required by the specified thread on the current
360 * cpu, return 0 on failure and non-zero on success. If a failure occurs
361 * any partially acquired tokens will be released prior to return.
363 * lwkt_getalltokens is called by the LWKT scheduler to acquire all
364 * tokens that the thread had acquired prior to going to sleep.
366 * We always clear the collision mask on token aquision.
368 * Called from a critical section.
371 lwkt_getalltokens(thread_t td)
378 * Acquire tokens in forward order, assign or validate tok->t_ref.
380 for (scan = &td->td_toks_base; scan < td->td_toks_stop; ++scan) {
384 * Try to acquire the token if we do not already have
387 * NOTE: If atomic_cmpset_ptr() fails we have to
388 * loop and try again. It just means we
393 if (atomic_cmpset_ptr(&tok->t_ref, NULL, scan))
395 if (tok->t_collmask & td->td_gd->gd_cpumask) {
396 atomic_clear_cpumask(&tok->t_collmask,
397 td->td_gd->gd_cpumask);
405 * Someone holds the token.
407 * Test if ref is already recursively held by this
408 * thread. We cannot safely dereference tok->t_ref
409 * (it might belong to another thread and is thus
410 * unstable), but we don't have to. We can simply
413 if (ref >= &td->td_toks_base && ref < td->td_toks_stop)
418 * Otherwise we failed to acquire all the tokens.
419 * Undo and return. We have to try once more after
420 * setting cpumask to cover possible races against
421 * the checking of t_collmask.
423 atomic_set_cpumask(&tok->t_collmask,
424 td->td_gd->gd_cpumask);
425 if (atomic_cmpset_ptr(&tok->t_ref, NULL, scan)) {
426 if (tok->t_collmask & td->td_gd->gd_cpumask) {
427 atomic_clear_cpumask(&tok->t_collmask,
428 td->td_gd->gd_cpumask);
433 td->td_wmesg = tok->t_desc;
434 atomic_add_long(&tok->t_collisions, 1);
435 lwkt_relalltokens(td);
443 * Release all tokens owned by the specified thread on the current cpu.
445 * This code is really simple. Even in cases where we own all the tokens
446 * note that t_ref may not match the scan for recursively held tokens,
447 * or for the case where a lwkt_getalltokens() failed.
449 * The scheduler is responsible for maintaining the MP lock count, so
450 * we don't need to deal with tr_flags here.
452 * Called from a critical section.
455 lwkt_relalltokens(thread_t td)
460 for (scan = &td->td_toks_base; scan < td->td_toks_stop; ++scan) {
462 if (tok->t_ref == scan) {
464 _lwkt_reltoken_mask(tok);
470 * Token acquisition helper function. The caller must have already
471 * made nref visible by adjusting td_toks_stop and will be responsible
472 * for the disposition of nref on either success or failure.
474 * When acquiring tokens recursively we want tok->t_ref to point to
475 * the outer (first) acquisition so it gets cleared only on the last
480 _lwkt_trytokref2(lwkt_tokref_t nref, thread_t td, int blocking)
486 * Make sure the compiler does not reorder prior instructions
487 * beyond this demark.
492 * Attempt to gain ownership
497 * Try to acquire the token if we do not already have
498 * it. This is not allowed if we are in a hard code
499 * section (because it 'might' have blocked).
503 KASSERT((blocking == 0 ||
504 td->td_gd->gd_intr_nesting_level == 0 ||
505 panic_cpu_gd == mycpu),
506 ("Attempt to acquire token %p not already "
507 "held in hard code section", tok));
510 * NOTE: If atomic_cmpset_ptr() fails we have to
511 * loop and try again. It just means we
514 if (atomic_cmpset_ptr(&tok->t_ref, NULL, nref))
520 * Test if ref is already recursively held by this
521 * thread. We cannot safely dereference tok->t_ref
522 * (it might belong to another thread and is thus
523 * unstable), but we don't have to. We can simply
526 * It is ok to acquire a token that is already held
527 * by the current thread when in a hard code section.
529 if (ref >= &td->td_toks_base && ref < td->td_toks_stop)
533 * Otherwise we failed, and it is not ok to attempt to
534 * acquire a token in a hard code section.
536 KASSERT((blocking == 0 ||
537 td->td_gd->gd_intr_nesting_level == 0),
538 ("Attempt to acquire token %p not already "
539 "held in hard code section", tok));
546 * Get a serializing token. This routine can block.
549 lwkt_gettoken(lwkt_token_t tok)
551 thread_t td = curthread;
555 flags = _lwkt_tok_flags(tok, td);
556 if ((flags & LWKT_TOKEN_MPSAFE) == 0)
559 ref = td->td_toks_stop;
560 KKASSERT(ref < &td->td_toks_end);
563 _lwkt_tokref_init(ref, tok, td, flags);
565 if (_lwkt_trytokref2(ref, td, 1) == FALSE) {
567 * Give up running if we can't acquire the token right now.
569 * Since the tokref is already active the scheduler now
570 * takes care of acquisition, so we need only call
573 * Since we failed this was not a recursive token so upon
574 * return tr_tok->t_ref should be assigned to this specific
580 * (DISABLED ATM) - Do not set t_collmask on a token
581 * acquisition failure, the scheduler will spin at least
582 * once and deal with hlt/spin semantics.
584 atomic_set_cpumask(&tok->t_collmask, td->td_gd->gd_cpumask);
585 if (atomic_cmpset_ptr(&tok->t_ref, NULL, ref)) {
586 atomic_clear_cpumask(&tok->t_collmask,
587 td->td_gd->gd_cpumask);
592 td->td_wmesg = tok->t_desc;
593 atomic_add_long(&tok->t_collisions, 1);
597 KKASSERT(tok->t_ref == ref);
602 lwkt_gettoken_hard(lwkt_token_t tok)
604 thread_t td = curthread;
608 flags = _lwkt_tok_flags(tok, td);
609 if ((flags & LWKT_TOKEN_MPSAFE) == 0)
612 ref = td->td_toks_stop;
613 KKASSERT(ref < &td->td_toks_end);
616 _lwkt_tokref_init(ref, tok, td, flags);
618 if (_lwkt_trytokref2(ref, td, 1) == FALSE) {
620 * Give up running if we can't acquire the token right now.
622 * Since the tokref is already active the scheduler now
623 * takes care of acquisition, so we need only call
626 * Since we failed this was not a recursive token so upon
627 * return tr_tok->t_ref should be assigned to this specific
633 * (DISABLED ATM) - Do not set t_collmask on a token
634 * acquisition failure, the scheduler will spin at least
635 * once and deal with hlt/spin semantics.
637 atomic_set_cpumask(&tok->t_collmask, td->td_gd->gd_cpumask);
638 if (atomic_cmpset_ptr(&tok->t_ref, NULL, ref)) {
639 atomic_clear_cpumask(&tok->t_collmask,
640 td->td_gd->gd_cpumask);
645 td->td_wmesg = tok->t_desc;
646 atomic_add_long(&tok->t_collisions, 1);
650 KKASSERT(tok->t_ref == ref);
657 crit_enter_hard_gd(td->td_gd);
661 lwkt_getpooltoken(void *ptr)
663 thread_t td = curthread;
668 tok = _lwkt_token_pool_lookup(ptr);
669 flags = _lwkt_tok_flags(tok, td);
670 if ((flags & LWKT_TOKEN_MPSAFE) == 0)
673 ref = td->td_toks_stop;
674 KKASSERT(ref < &td->td_toks_end);
677 _lwkt_tokref_init(ref, tok, td, flags);
679 if (_lwkt_trytokref2(ref, td, 1) == FALSE) {
681 * Give up running if we can't acquire the token right now.
683 * Since the tokref is already active the scheduler now
684 * takes care of acquisition, so we need only call
687 * Since we failed this was not a recursive token so upon
688 * return tr_tok->t_ref should be assigned to this specific
694 * (DISABLED ATM) - Do not set t_collmask on a token
695 * acquisition failure, the scheduler will spin at least
696 * once and deal with hlt/spin semantics.
698 atomic_set_cpumask(&tok->t_collmask, td->td_gd->gd_cpumask);
699 if (atomic_cmpset_ptr(&tok->t_ref, NULL, ref)) {
700 atomic_clear_cpumask(&tok->t_collmask,
701 td->td_gd->gd_cpumask);
706 td->td_wmesg = tok->t_desc;
707 atomic_add_long(&tok->t_collisions, 1);
711 KKASSERT(tok->t_ref == ref);
722 * Attempt to acquire a token, return TRUE on success, FALSE on failure.
725 lwkt_trytoken(lwkt_token_t tok)
727 thread_t td = curthread;
731 flags = _lwkt_tok_flags(tok, td);
732 if ((flags & LWKT_TOKEN_MPSAFE) == 0) {
733 if (try_mplock() == 0)
737 ref = td->td_toks_stop;
738 KKASSERT(ref < &td->td_toks_end);
741 _lwkt_tokref_init(ref, tok, td, flags);
743 if (_lwkt_trytokref2(ref, td, 0) == FALSE) {
745 * Cleanup, deactivate the failed token.
747 if ((ref->tr_flags & LWKT_TOKEN_MPSAFE) == 0) {
762 * Release a serializing token.
764 * WARNING! All tokens must be released in reverse order. This will be
768 lwkt_reltoken(lwkt_token_t tok)
770 thread_t td = curthread;
774 * Remove ref from thread token list and assert that it matches
775 * the token passed in. Tokens must be released in reverse order.
777 ref = td->td_toks_stop - 1;
778 KKASSERT(ref >= &td->td_toks_base && ref->tr_tok == tok);
781 * Only clear the token if it matches ref. If ref was a recursively
782 * acquired token it may not match. Then adjust td_toks_stop.
784 * Some comparisons must be run prior to adjusting td_toks_stop
785 * to avoid racing against a fast interrupt/ ipi which tries to
788 * We must also be absolutely sure that the compiler does not
789 * reorder the clearing of t_ref and the adjustment of td_toks_stop,
790 * or reorder the adjustment of td_toks_stop against the conditional.
792 * NOTE: The mplock is a token also so sequencing is a bit complex.
794 if (tok->t_ref == ref) {
796 _lwkt_reltoken_mask(tok);
799 if ((ref->tr_flags & LWKT_TOKEN_MPSAFE) == 0) {
801 td->td_toks_stop = ref;
806 td->td_toks_stop = ref;
809 KKASSERT(tok->t_ref != ref);
813 lwkt_reltoken_hard(lwkt_token_t tok)
820 * It is faster for users of lwkt_getpooltoken() to use the returned
821 * token and just call lwkt_reltoken(), but for convenience we provide
822 * this function which looks the token up based on the ident.
825 lwkt_relpooltoken(void *ptr)
827 lwkt_token_t tok = _lwkt_token_pool_lookup(ptr);
832 * Return a count of the number of token refs the thread has to the
833 * specified token, whether it currently owns the token or not.
836 lwkt_cnttoken(lwkt_token_t tok, thread_t td)
841 for (scan = &td->td_toks_base; scan < td->td_toks_stop; ++scan) {
842 if (scan->tr_tok == tok)
850 * Pool tokens are used to provide a type-stable serializing token
851 * pointer that does not race against disappearing data structures.
853 * This routine is called in early boot just after we setup the BSP's
854 * globaldata structure.
857 lwkt_token_pool_init(void)
861 for (i = 0; i < LWKT_NUM_POOL_TOKENS; ++i)
862 lwkt_token_init(&pool_tokens[i], 1, "pool");
866 lwkt_token_pool_lookup(void *ptr)
868 return (_lwkt_token_pool_lookup(ptr));
872 * Initialize a token. If mpsafe is 0, the MP lock is acquired before
873 * acquiring the token and released after releasing the token.
876 lwkt_token_init(lwkt_token_t tok, int mpsafe, const char *desc)
879 tok->t_flags = mpsafe ? LWKT_TOKEN_MPSAFE : 0;
880 tok->t_collisions = 0;
886 lwkt_token_uninit(lwkt_token_t tok)
893 lwkt_token_is_stale(lwkt_tokref_t ref)
895 lwkt_token_t tok = ref->tr_tok;
897 KKASSERT(tok->t_owner == curthread && ref->tr_state == 1 &&
900 /* Token is not stale */
901 if (tok->t_lastowner == tok->t_owner)
905 * The token is stale. Reset to not stale so that the next call to
906 * lwkt_token_is_stale will return "not stale" unless the token
907 * was acquired in-between by another thread.
909 tok->t_lastowner = tok->t_owner;