2 * Copyright (c) 2003,2004,2009 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * lwkt_token - Implement soft token locks.
38 * Tokens are locks which serialize a thread only while the thread is
39 * running. If the thread blocks all tokens are released, then reacquired
40 * when the thread resumes.
42 * This implementation requires no critical sections or spin locks, but
43 * does use atomic_cmpset_ptr().
45 * Tokens may be recursively acquired by the same thread. However the
46 * caller must be sure to release such tokens in reverse order.
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/kernel.h>
52 #include <sys/rtprio.h>
53 #include <sys/queue.h>
54 #include <sys/sysctl.h>
56 #include <sys/kthread.h>
57 #include <machine/cpu.h>
60 #include <sys/spinlock.h>
62 #include <sys/thread2.h>
63 #include <sys/spinlock2.h>
64 #include <sys/mplock2.h>
67 #include <vm/vm_param.h>
68 #include <vm/vm_kern.h>
69 #include <vm/vm_object.h>
70 #include <vm/vm_page.h>
71 #include <vm/vm_map.h>
72 #include <vm/vm_pager.h>
73 #include <vm/vm_extern.h>
74 #include <vm/vm_zone.h>
76 #include <machine/stdarg.h>
77 #include <machine/smp.h>
79 extern int lwkt_sched_debug;
81 #ifndef LWKT_NUM_POOL_TOKENS
82 #define LWKT_NUM_POOL_TOKENS 4001 /* prime number */
85 static lwkt_token pool_tokens[LWKT_NUM_POOL_TOKENS];
87 #define TOKEN_STRING "REF=%p TOK=%p TD=%p"
88 #define CONTENDED_STRING "REF=%p TOK=%p TD=%p (contention started)"
89 #define UNCONTENDED_STRING "REF=%p TOK=%p TD=%p (contention stopped)"
90 #if !defined(KTR_TOKENS)
91 #define KTR_TOKENS KTR_ALL
94 KTR_INFO_MASTER(tokens);
95 KTR_INFO(KTR_TOKENS, tokens, fail, 0, TOKEN_STRING, sizeof(void *) * 3);
96 KTR_INFO(KTR_TOKENS, tokens, succ, 1, TOKEN_STRING, sizeof(void *) * 3);
98 KTR_INFO(KTR_TOKENS, tokens, release, 2, TOKEN_STRING, sizeof(void *) * 3);
99 KTR_INFO(KTR_TOKENS, tokens, remote, 3, TOKEN_STRING, sizeof(void *) * 3);
100 KTR_INFO(KTR_TOKENS, tokens, reqremote, 4, TOKEN_STRING, sizeof(void *) * 3);
101 KTR_INFO(KTR_TOKENS, tokens, reqfail, 5, TOKEN_STRING, sizeof(void *) * 3);
102 KTR_INFO(KTR_TOKENS, tokens, drain, 6, TOKEN_STRING, sizeof(void *) * 3);
103 KTR_INFO(KTR_TOKENS, tokens, contention_start, 7, CONTENDED_STRING, sizeof(void *) * 3);
104 KTR_INFO(KTR_TOKENS, tokens, contention_stop, 7, UNCONTENDED_STRING, sizeof(void *) * 3);
107 #define logtoken(name, ref) \
108 KTR_LOG(tokens_ ## name, ref, ref->tr_tok, curthread)
111 * Global tokens. These replace the MP lock for major subsystem locking.
112 * These tokens are initially used to lockup both global and individual
115 * Once individual structures get their own locks these tokens are used
116 * only to protect global lists & other variables and to interlock
117 * allocations and teardowns and such.
119 * The UP initializer causes token acquisition to also acquire the MP lock
120 * for maximum compatibility. The feature may be enabled and disabled at
121 * any time, the MP state is copied to the tokref when the token is acquired
122 * and will not race against sysctl changes.
124 struct lwkt_token mp_token = LWKT_TOKEN_INITIALIZER(mp_token);
125 struct lwkt_token pmap_token = LWKT_TOKEN_INITIALIZER(pmap_token);
126 struct lwkt_token dev_token = LWKT_TOKEN_INITIALIZER(dev_token);
127 struct lwkt_token vm_token = LWKT_TOKEN_INITIALIZER(vm_token);
128 struct lwkt_token vmspace_token = LWKT_TOKEN_INITIALIZER(vmspace_token);
129 struct lwkt_token kvm_token = LWKT_TOKEN_INITIALIZER(kvm_token);
130 struct lwkt_token proc_token = LWKT_TOKEN_INITIALIZER(proc_token);
131 struct lwkt_token tty_token = LWKT_TOKEN_INITIALIZER(tty_token);
132 struct lwkt_token vnode_token = LWKT_TOKEN_INITIALIZER(vnode_token);
133 struct lwkt_token vmobj_token = LWKT_TOKEN_INITIALIZER(vmobj_token);
135 static int lwkt_token_spin = 5;
136 SYSCTL_INT(_lwkt, OID_AUTO, token_spin, CTLFLAG_RW,
137 &lwkt_token_spin, 0, "Decontention spin loops");
138 static int lwkt_token_delay = 0;
139 SYSCTL_INT(_lwkt, OID_AUTO, token_delay, CTLFLAG_RW,
140 &lwkt_token_delay, 0, "Decontention spin delay in ns");
143 * The collision count is bumped every time the LWKT scheduler fails
144 * to acquire needed tokens in addition to a normal lwkt_gettoken()
147 SYSCTL_LONG(_lwkt, OID_AUTO, mp_collisions, CTLFLAG_RW,
148 &mp_token.t_collisions, 0, "Collision counter of mp_token");
149 SYSCTL_LONG(_lwkt, OID_AUTO, pmap_collisions, CTLFLAG_RW,
150 &pmap_token.t_collisions, 0, "Collision counter of pmap_token");
151 SYSCTL_LONG(_lwkt, OID_AUTO, dev_collisions, CTLFLAG_RW,
152 &dev_token.t_collisions, 0, "Collision counter of dev_token");
153 SYSCTL_LONG(_lwkt, OID_AUTO, vm_collisions, CTLFLAG_RW,
154 &vm_token.t_collisions, 0, "Collision counter of vm_token");
155 SYSCTL_LONG(_lwkt, OID_AUTO, vmspace_collisions, CTLFLAG_RW,
156 &vmspace_token.t_collisions, 0, "Collision counter of vmspace_token");
157 SYSCTL_LONG(_lwkt, OID_AUTO, kvm_collisions, CTLFLAG_RW,
158 &kvm_token.t_collisions, 0, "Collision counter of kvm_token");
159 SYSCTL_LONG(_lwkt, OID_AUTO, proc_collisions, CTLFLAG_RW,
160 &proc_token.t_collisions, 0, "Collision counter of proc_token");
161 SYSCTL_LONG(_lwkt, OID_AUTO, tty_collisions, CTLFLAG_RW,
162 &tty_token.t_collisions, 0, "Collision counter of tty_token");
163 SYSCTL_LONG(_lwkt, OID_AUTO, vnode_collisions, CTLFLAG_RW,
164 &vnode_token.t_collisions, 0, "Collision counter of vnode_token");
166 static int _lwkt_getalltokens_sorted(thread_t td);
170 * Acquire the initial mplock
172 * (low level boot only)
175 cpu_get_initial_mplock(void)
177 KKASSERT(mp_token.t_ref == NULL);
178 if (lwkt_trytoken(&mp_token) == FALSE)
179 panic("cpu_get_initial_mplock");
184 * Return a pool token given an address. Use a prime number to reduce
189 _lwkt_token_pool_lookup(void *ptr)
193 i = (u_int)(uintptr_t)ptr % LWKT_NUM_POOL_TOKENS;
194 return(&pool_tokens[i]);
198 * Initialize a tokref_t prior to making it visible in the thread's
203 _lwkt_tokref_init(lwkt_tokref_t ref, lwkt_token_t tok, thread_t td)
210 * See kern/kern_spinlock.c for the discussion on cache-friendly contention
211 * resolution. We currently do not use cpu_lfence() (expensive!!) and, more
212 * importantly, we do a read-test of t_ref before attempting an atomic op,
213 * which greatly reduces hw cache bus contention.
217 _lwkt_trytoken_spin(lwkt_token_t tok, lwkt_tokref_t ref)
221 for (n = 0; n < lwkt_token_spin; ++n) {
222 if (tok->t_ref == NULL &&
223 atomic_cmpset_ptr(&tok->t_ref, NULL, ref)) {
226 if (lwkt_token_delay) {
227 tsc_delay(lwkt_token_delay);
237 _lwkt_reltoken_spin(lwkt_token_t tok)
244 * Helper function used by lwkt_getalltokens[_sorted]().
246 * Our attempt to acquire the token has failed. To reduce cache coherency
247 * bandwidth we set our cpu bit in t_collmask then wait for a reasonable
248 * period of time for a hand-off from the current token owner.
252 _lwkt_trytoken_spin(lwkt_token_t tok, lwkt_tokref_t ref)
254 globaldata_t gd = mycpu;
259 * Add our cpu to the collision mask and wait for the token to be
263 atomic_set_cpumask(&tok->t_collmask, gd->gd_cpumask);
264 for (n = 0; n < lwkt_token_spin; ++n) {
266 * Token was released before we set our collision bit.
268 if (tok->t_ref == NULL &&
269 atomic_cmpset_ptr(&tok->t_ref, NULL, ref)) {
270 KKASSERT((tok->t_collmask & gd->gd_cpumask) != 0);
271 atomic_clear_cpumask(&tok->t_collmask, gd->gd_cpumask);
277 * Token was handed-off to us.
279 if (tok->t_ref == &gd->gd_handoff) {
280 KKASSERT((tok->t_collmask & gd->gd_cpumask) == 0);
285 if (lwkt_token_delay)
286 tsc_delay(lwkt_token_delay);
292 * We failed, attempt to clear our bit in the cpumask. We may race
293 * someone handing-off to us. If someone other than us cleared our
294 * cpu bit a handoff is incoming and we must wait for it.
297 mask = tok->t_collmask;
299 if (mask & gd->gd_cpumask) {
300 if (atomic_cmpset_cpumask(&tok->t_collmask,
302 mask & ~gd->gd_cpumask)) {
308 if (tok->t_ref != &gd->gd_handoff) {
319 * Release token with hand-off
323 _lwkt_reltoken_spin(lwkt_token_t tok)
330 if (tok->t_collmask == 0) {
336 sidemask = ~(mycpu->gd_cpumask - 1); /* high bits >= xcpu */
338 mask = tok->t_collmask;
345 cpuid = BSFCPUMASK(mask & sidemask);
347 cpuid = BSFCPUMASK(mask);
348 xgd = globaldata_find(cpuid);
349 if (atomic_cmpset_cpumask(&tok->t_collmask, mask,
350 mask & ~CPUMASK(cpuid))) {
351 tok->t_ref = &xgd->gd_handoff;
362 * Obtain all the tokens required by the specified thread on the current
363 * cpu, return 0 on failure and non-zero on success. If a failure occurs
364 * any partially acquired tokens will be released prior to return.
366 * lwkt_getalltokens is called by the LWKT scheduler to acquire all
367 * tokens that the thread had acquired prior to going to sleep.
369 * If spinning is non-zero this function acquires the tokens in a particular
370 * order to deal with potential deadlocks. We simply use address order for
373 * Called from a critical section.
376 lwkt_getalltokens(thread_t td, int spinning)
383 return(_lwkt_getalltokens_sorted(td));
386 * Acquire tokens in forward order, assign or validate tok->t_ref.
388 for (scan = &td->td_toks_base; scan < td->td_toks_stop; ++scan) {
392 * Try to acquire the token if we do not already have
395 * NOTE: If atomic_cmpset_ptr() fails we have to
396 * loop and try again. It just means we
401 if (atomic_cmpset_ptr(&tok->t_ref, NULL,scan))
407 * Someone holds the token.
409 * Test if ref is already recursively held by this
410 * thread. We cannot safely dereference tok->t_ref
411 * (it might belong to another thread and is thus
412 * unstable), but we don't have to. We can simply
415 if (ref >= &td->td_toks_base && ref < td->td_toks_stop)
419 * Try hard to acquire this token before giving up
420 * and releasing the whole lot.
422 if (_lwkt_trytoken_spin(tok, scan))
424 if (lwkt_sched_debug > 0) {
426 kprintf("toka %p %s %s\n",
427 tok, tok->t_desc, td->td_comm);
431 * Otherwise we failed to acquire all the tokens.
432 * Release whatever we did get.
434 td->td_wmesg = tok->t_desc;
435 atomic_add_long(&tok->t_collisions, 1);
436 lwkt_relalltokens(td);
446 * Release all tokens owned by the specified thread on the current cpu.
448 * This code is really simple. Even in cases where we own all the tokens
449 * note that t_ref may not match the scan for recursively held tokens which
450 * are held deeper in the stack, or for the case where a lwkt_getalltokens()
453 * Tokens are released in reverse order to reduce chasing race failures.
455 * Called from a critical section.
458 lwkt_relalltokens(thread_t td)
463 for (scan = td->td_toks_stop - 1; scan >= &td->td_toks_base; --scan) {
464 /*for (scan = &td->td_toks_base; scan < td->td_toks_stop; ++scan) {*/
466 if (tok->t_ref == scan)
467 _lwkt_reltoken_spin(tok);
472 * This is the decontention version of lwkt_getalltokens(). The tokens are
473 * acquired in address-sorted order to deal with any deadlocks. Ultimately
474 * token failures will spin into the scheduler and get here.
476 * In addition, to reduce hardware cache coherency contention monitor/mwait
477 * is interlocked with gd->gd_reqflags and RQF_SPINNING. Other cores which
478 * release a contended token will clear RQF_SPINNING and cause the mwait
479 * to resume. Any interrupt will also generally set RQF_* flags and cause
480 * mwait to resume (or be a NOP in the first place).
482 * This code is required to set up RQF_SPINNING in case of failure. The
483 * caller may call monitor/mwait on gd->gd_reqflags on failure. We do NOT
484 * want to call mwait here, and doubly so while we are holding tokens.
486 * Called from critical section
490 _lwkt_getalltokens_sorted(thread_t td)
492 /*globaldata_t gd = td->td_gd;*/
493 lwkt_tokref_t sort_array[LWKT_MAXTOKENS];
502 * Sort the token array. Yah yah, I know this isn't fun.
504 * NOTE: Recursively acquired tokens are ordered the same as in the
505 * td_toks_array so we can always get the earliest one first.
508 scan = &td->td_toks_base;
509 while (scan < td->td_toks_stop) {
510 for (j = 0; j < i; ++j) {
511 if (scan->tr_tok < sort_array[j]->tr_tok)
515 bcopy(sort_array + j, sort_array + j + 1,
516 (i - j) * sizeof(lwkt_tokref_t));
518 sort_array[j] = scan;
525 * Acquire tokens in forward order, assign or validate tok->t_ref.
527 for (i = 0; i < n; ++i) {
528 scan = sort_array[i];
532 * Try to acquire the token if we do not already have
535 * NOTE: If atomic_cmpset_ptr() fails we have to
536 * loop and try again. It just means we
541 if (atomic_cmpset_ptr(&tok->t_ref, NULL, scan))
547 * Someone holds the token.
549 * Test if ref is already recursively held by this
550 * thread. We cannot safely dereference tok->t_ref
551 * (it might belong to another thread and is thus
552 * unstable), but we don't have to. We can simply
555 if (ref >= &td->td_toks_base && ref < td->td_toks_stop)
559 * Try hard to acquire this token before giving up
560 * and releasing the whole lot.
562 if (_lwkt_trytoken_spin(tok, scan))
564 if (lwkt_sched_debug > 0) {
566 kprintf("tokb %p %s %s\n",
567 tok, tok->t_desc, td->td_comm);
571 * Tokens are released in reverse order to reduce
572 * chasing race failures.
574 td->td_wmesg = tok->t_desc;
575 atomic_add_long(&tok->t_collisions, 1);
577 for (j = i - 1; j >= 0; --j) {
578 /*for (j = 0; j < i; ++j) {*/
579 scan = sort_array[j];
581 if (tok->t_ref == scan)
582 _lwkt_reltoken_spin(tok);
589 * We were successful, there is no need for another core to signal
593 atomic_clear_int(&gd->gd_reqflags, RQF_SPINNING);
599 * Token acquisition helper function. The caller must have already
600 * made nref visible by adjusting td_toks_stop and will be responsible
601 * for the disposition of nref on either success or failure.
603 * When acquiring tokens recursively we want tok->t_ref to point to
604 * the outer (first) acquisition so it gets cleared only on the last
609 _lwkt_trytokref2(lwkt_tokref_t nref, thread_t td, int blocking)
615 * Make sure the compiler does not reorder prior instructions
616 * beyond this demark.
621 * Attempt to gain ownership
626 * Try to acquire the token if we do not already have
627 * it. This is not allowed if we are in a hard code
628 * section (because it 'might' have blocked).
632 KASSERT((blocking == 0 ||
633 td->td_gd->gd_intr_nesting_level == 0 ||
634 panic_cpu_gd == mycpu),
635 ("Attempt to acquire token %p not already "
636 "held in hard code section", tok));
639 * NOTE: If atomic_cmpset_ptr() fails we have to
640 * loop and try again. It just means we
643 if (atomic_cmpset_ptr(&tok->t_ref, NULL, nref))
649 * Test if ref is already recursively held by this
650 * thread. We cannot safely dereference tok->t_ref
651 * (it might belong to another thread and is thus
652 * unstable), but we don't have to. We can simply
655 * It is ok to acquire a token that is already held
656 * by the current thread when in a hard code section.
658 if (ref >= &td->td_toks_base && ref < td->td_toks_stop)
662 * Spin generously. This is preferable to just switching
663 * away unconditionally.
665 if (_lwkt_trytoken_spin(tok, nref))
669 * Otherwise we failed, and it is not ok to attempt to
670 * acquire a token in a hard code section.
672 KASSERT((blocking == 0 ||
673 td->td_gd->gd_intr_nesting_level == 0),
674 ("Attempt to acquire token %p not already "
675 "held in hard code section", tok));
682 * Get a serializing token. This routine can block.
685 lwkt_gettoken(lwkt_token_t tok)
687 thread_t td = curthread;
690 ref = td->td_toks_stop;
691 KKASSERT(ref < &td->td_toks_end);
694 _lwkt_tokref_init(ref, tok, td);
696 if (_lwkt_trytokref2(ref, td, 1) == FALSE) {
698 * Give up running if we can't acquire the token right now.
700 * Since the tokref is already active the scheduler now
701 * takes care of acquisition, so we need only call
704 * Since we failed this was not a recursive token so upon
705 * return tr_tok->t_ref should be assigned to this specific
708 td->td_wmesg = tok->t_desc;
709 atomic_add_long(&tok->t_collisions, 1);
713 KKASSERT(tok->t_ref == ref);
718 lwkt_gettoken_hard(lwkt_token_t tok)
720 thread_t td = curthread;
723 ref = td->td_toks_stop;
724 KKASSERT(ref < &td->td_toks_end);
727 _lwkt_tokref_init(ref, tok, td);
729 if (_lwkt_trytokref2(ref, td, 1) == FALSE) {
731 * Give up running if we can't acquire the token right now.
733 * Since the tokref is already active the scheduler now
734 * takes care of acquisition, so we need only call
737 * Since we failed this was not a recursive token so upon
738 * return tr_tok->t_ref should be assigned to this specific
741 td->td_wmesg = tok->t_desc;
742 atomic_add_long(&tok->t_collisions, 1);
746 KKASSERT(tok->t_ref == ref);
748 crit_enter_hard_gd(td->td_gd);
752 lwkt_getpooltoken(void *ptr)
754 thread_t td = curthread;
758 tok = _lwkt_token_pool_lookup(ptr);
759 ref = td->td_toks_stop;
760 KKASSERT(ref < &td->td_toks_end);
763 _lwkt_tokref_init(ref, tok, td);
765 if (_lwkt_trytokref2(ref, td, 1) == FALSE) {
767 * Give up running if we can't acquire the token right now.
769 * Since the tokref is already active the scheduler now
770 * takes care of acquisition, so we need only call
773 * Since we failed this was not a recursive token so upon
774 * return tr_tok->t_ref should be assigned to this specific
777 td->td_wmesg = tok->t_desc;
778 atomic_add_long(&tok->t_collisions, 1);
782 KKASSERT(tok->t_ref == ref);
788 * Attempt to acquire a token, return TRUE on success, FALSE on failure.
791 lwkt_trytoken(lwkt_token_t tok)
793 thread_t td = curthread;
796 ref = td->td_toks_stop;
797 KKASSERT(ref < &td->td_toks_end);
800 _lwkt_tokref_init(ref, tok, td);
802 if (_lwkt_trytokref2(ref, td, 0) == FALSE) {
804 * Cleanup, deactivate the failed token.
814 * Release a serializing token.
816 * WARNING! All tokens must be released in reverse order. This will be
820 lwkt_reltoken(lwkt_token_t tok)
822 thread_t td = curthread;
826 * Remove ref from thread token list and assert that it matches
827 * the token passed in. Tokens must be released in reverse order.
829 ref = td->td_toks_stop - 1;
830 KKASSERT(ref >= &td->td_toks_base && ref->tr_tok == tok);
833 * Only clear the token if it matches ref. If ref was a recursively
834 * acquired token it may not match. Then adjust td_toks_stop.
836 * Some comparisons must be run prior to adjusting td_toks_stop
837 * to avoid racing against a fast interrupt/ ipi which tries to
840 * We must also be absolutely sure that the compiler does not
841 * reorder the clearing of t_ref and the adjustment of td_toks_stop,
842 * or reorder the adjustment of td_toks_stop against the conditional.
844 * NOTE: The mplock is a token also so sequencing is a bit complex.
846 if (tok->t_ref == ref)
847 _lwkt_reltoken_spin(tok);
850 td->td_toks_stop = ref;
852 KKASSERT(tok->t_ref != ref);
856 lwkt_reltoken_hard(lwkt_token_t tok)
863 * It is faster for users of lwkt_getpooltoken() to use the returned
864 * token and just call lwkt_reltoken(), but for convenience we provide
865 * this function which looks the token up based on the ident.
868 lwkt_relpooltoken(void *ptr)
870 lwkt_token_t tok = _lwkt_token_pool_lookup(ptr);
875 * Return a count of the number of token refs the thread has to the
876 * specified token, whether it currently owns the token or not.
879 lwkt_cnttoken(lwkt_token_t tok, thread_t td)
884 for (scan = &td->td_toks_base; scan < td->td_toks_stop; ++scan) {
885 if (scan->tr_tok == tok)
893 * Pool tokens are used to provide a type-stable serializing token
894 * pointer that does not race against disappearing data structures.
896 * This routine is called in early boot just after we setup the BSP's
897 * globaldata structure.
900 lwkt_token_pool_init(void)
904 for (i = 0; i < LWKT_NUM_POOL_TOKENS; ++i)
905 lwkt_token_init(&pool_tokens[i], "pool");
909 lwkt_token_pool_lookup(void *ptr)
911 return (_lwkt_token_pool_lookup(ptr));
915 * Initialize a token.
918 lwkt_token_init(lwkt_token_t tok, const char *desc)
921 tok->t_collisions = 0;
927 lwkt_token_uninit(lwkt_token_t tok)
933 * Exchange the two most recent tokens on the tokref stack. This allows
934 * you to release a token out of order.
936 * We have to be careful about the case where the top two tokens are
937 * the same token. In this case tok->t_ref will point to the deeper
938 * ref and must remain pointing to the deeper ref. If we were to swap
939 * it the first release would clear the token even though a second
940 * ref is still present.
943 lwkt_token_swap(void)
945 lwkt_tokref_t ref1, ref2;
946 lwkt_token_t tok1, tok2;
947 thread_t td = curthread;
951 ref1 = td->td_toks_stop - 1;
952 ref2 = td->td_toks_stop - 2;
953 KKASSERT(ref1 > &td->td_toks_base);
954 KKASSERT(ref2 > &td->td_toks_base);
961 if (tok1->t_ref == ref1)
963 if (tok2->t_ref == ref2)
972 lwkt_token_is_stale(lwkt_tokref_t ref)
974 lwkt_token_t tok = ref->tr_tok;
976 KKASSERT(tok->t_owner == curthread && ref->tr_state == 1 &&
979 /* Token is not stale */
980 if (tok->t_lastowner == tok->t_owner)
984 * The token is stale. Reset to not stale so that the next call to
985 * lwkt_token_is_stale will return "not stale" unless the token
986 * was acquired in-between by another thread.
988 tok->t_lastowner = tok->t_owner;