2 * Copyright (c) 2003,2004,2009 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * lwkt_token - Implement soft token locks.
38 * Tokens are locks which serialize a thread only while the thread is
39 * running. If the thread blocks all tokens are released, then reacquired
40 * when the thread resumes.
42 * This implementation requires no critical sections or spin locks, but
43 * does use atomic_cmpset_ptr().
45 * Tokens may be recursively acquired by the same thread. However the
46 * caller must be sure to release such tokens in reverse order.
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/kernel.h>
52 #include <sys/rtprio.h>
53 #include <sys/queue.h>
54 #include <sys/sysctl.h>
57 #include <sys/kthread.h>
58 #include <machine/cpu.h>
60 #include <sys/spinlock.h>
62 #include <sys/thread2.h>
63 #include <sys/spinlock2.h>
64 #include <sys/mplock2.h>
67 #include <vm/vm_param.h>
68 #include <vm/vm_kern.h>
69 #include <vm/vm_object.h>
70 #include <vm/vm_page.h>
71 #include <vm/vm_map.h>
72 #include <vm/vm_pager.h>
73 #include <vm/vm_extern.h>
74 #include <vm/vm_zone.h>
76 #include <machine/stdarg.h>
77 #include <machine/smp.h>
79 extern int lwkt_sched_debug;
81 #ifndef LWKT_NUM_POOL_TOKENS
82 #define LWKT_NUM_POOL_TOKENS 4001 /* prime number */
85 static lwkt_token pool_tokens[LWKT_NUM_POOL_TOKENS];
86 struct spinlock tok_debug_spin = SPINLOCK_INITIALIZER(&tok_debug_spin);
88 #define TOKEN_STRING "REF=%p TOK=%p TD=%p"
89 #define TOKEN_ARGS lwkt_tokref_t ref, lwkt_token_t tok, struct thread *td
90 #define CONTENDED_STRING TOKEN_STRING " (contention started)"
91 #define UNCONTENDED_STRING TOKEN_STRING " (contention stopped)"
92 #if !defined(KTR_TOKENS)
93 #define KTR_TOKENS KTR_ALL
96 KTR_INFO_MASTER(tokens);
97 KTR_INFO(KTR_TOKENS, tokens, fail, 0, TOKEN_STRING, TOKEN_ARGS);
98 KTR_INFO(KTR_TOKENS, tokens, succ, 1, TOKEN_STRING, TOKEN_ARGS);
100 KTR_INFO(KTR_TOKENS, tokens, release, 2, TOKEN_STRING, TOKEN_ARGS);
101 KTR_INFO(KTR_TOKENS, tokens, remote, 3, TOKEN_STRING, TOKEN_ARGS);
102 KTR_INFO(KTR_TOKENS, tokens, reqremote, 4, TOKEN_STRING, TOKEN_ARGS);
103 KTR_INFO(KTR_TOKENS, tokens, reqfail, 5, TOKEN_STRING, TOKEN_ARGS);
104 KTR_INFO(KTR_TOKENS, tokens, drain, 6, TOKEN_STRING, TOKEN_ARGS);
105 KTR_INFO(KTR_TOKENS, tokens, contention_start, 7, CONTENDED_STRING, TOKEN_ARGS);
106 KTR_INFO(KTR_TOKENS, tokens, contention_stop, 7, UNCONTENDED_STRING, TOKEN_ARGS);
109 #define logtoken(name, ref) \
110 KTR_LOG(tokens_ ## name, ref, ref->tr_tok, curthread)
113 * Global tokens. These replace the MP lock for major subsystem locking.
114 * These tokens are initially used to lockup both global and individual
117 * Once individual structures get their own locks these tokens are used
118 * only to protect global lists & other variables and to interlock
119 * allocations and teardowns and such.
121 * The UP initializer causes token acquisition to also acquire the MP lock
122 * for maximum compatibility. The feature may be enabled and disabled at
123 * any time, the MP state is copied to the tokref when the token is acquired
124 * and will not race against sysctl changes.
126 struct lwkt_token mp_token = LWKT_TOKEN_INITIALIZER(mp_token);
127 struct lwkt_token pmap_token = LWKT_TOKEN_INITIALIZER(pmap_token);
128 struct lwkt_token dev_token = LWKT_TOKEN_INITIALIZER(dev_token);
129 struct lwkt_token vm_token = LWKT_TOKEN_INITIALIZER(vm_token);
130 struct lwkt_token vmspace_token = LWKT_TOKEN_INITIALIZER(vmspace_token);
131 struct lwkt_token kvm_token = LWKT_TOKEN_INITIALIZER(kvm_token);
132 struct lwkt_token proc_token = LWKT_TOKEN_INITIALIZER(proc_token);
133 struct lwkt_token tty_token = LWKT_TOKEN_INITIALIZER(tty_token);
134 struct lwkt_token vnode_token = LWKT_TOKEN_INITIALIZER(vnode_token);
135 struct lwkt_token vmobj_token = LWKT_TOKEN_INITIALIZER(vmobj_token);
137 static int lwkt_token_spin = 5;
138 SYSCTL_INT(_lwkt, OID_AUTO, token_spin, CTLFLAG_RW,
139 &lwkt_token_spin, 0, "Decontention spin loops");
140 static int lwkt_token_delay = 0;
141 SYSCTL_INT(_lwkt, OID_AUTO, token_delay, CTLFLAG_RW,
142 &lwkt_token_delay, 0, "Decontention spin delay in ns");
145 * The collision count is bumped every time the LWKT scheduler fails
146 * to acquire needed tokens in addition to a normal lwkt_gettoken()
149 SYSCTL_LONG(_lwkt, OID_AUTO, mp_collisions, CTLFLAG_RW,
150 &mp_token.t_collisions, 0, "Collision counter of mp_token");
151 SYSCTL_LONG(_lwkt, OID_AUTO, pmap_collisions, CTLFLAG_RW,
152 &pmap_token.t_collisions, 0, "Collision counter of pmap_token");
153 SYSCTL_LONG(_lwkt, OID_AUTO, dev_collisions, CTLFLAG_RW,
154 &dev_token.t_collisions, 0, "Collision counter of dev_token");
155 SYSCTL_LONG(_lwkt, OID_AUTO, vm_collisions, CTLFLAG_RW,
156 &vm_token.t_collisions, 0, "Collision counter of vm_token");
157 SYSCTL_LONG(_lwkt, OID_AUTO, vmspace_collisions, CTLFLAG_RW,
158 &vmspace_token.t_collisions, 0, "Collision counter of vmspace_token");
159 SYSCTL_LONG(_lwkt, OID_AUTO, kvm_collisions, CTLFLAG_RW,
160 &kvm_token.t_collisions, 0, "Collision counter of kvm_token");
161 SYSCTL_LONG(_lwkt, OID_AUTO, proc_collisions, CTLFLAG_RW,
162 &proc_token.t_collisions, 0, "Collision counter of proc_token");
163 SYSCTL_LONG(_lwkt, OID_AUTO, tty_collisions, CTLFLAG_RW,
164 &tty_token.t_collisions, 0, "Collision counter of tty_token");
165 SYSCTL_LONG(_lwkt, OID_AUTO, vnode_collisions, CTLFLAG_RW,
166 &vnode_token.t_collisions, 0, "Collision counter of vnode_token");
168 int tokens_debug_output;
169 SYSCTL_INT(_lwkt, OID_AUTO, tokens_debug_output, CTLFLAG_RW,
170 &tokens_debug_output, 0, "Generate stack trace N times");
173 #ifdef DEBUG_LOCKS_LATENCY
175 static long tokens_add_latency;
176 SYSCTL_LONG(_debug, OID_AUTO, tokens_add_latency, CTLFLAG_RW,
177 &tokens_add_latency, 0,
178 "Add spinlock latency");
183 static int _lwkt_getalltokens_sorted(thread_t td);
186 * Acquire the initial mplock
188 * (low level boot only)
191 cpu_get_initial_mplock(void)
193 KKASSERT(mp_token.t_ref == NULL);
194 if (lwkt_trytoken(&mp_token) == FALSE)
195 panic("cpu_get_initial_mplock");
199 * Return a pool token given an address. Use a prime number to reduce
204 _lwkt_token_pool_lookup(void *ptr)
208 i = (u_int)(uintptr_t)ptr % LWKT_NUM_POOL_TOKENS;
209 return(&pool_tokens[i]);
213 * Initialize a tokref_t prior to making it visible in the thread's
218 _lwkt_tokref_init(lwkt_tokref_t ref, lwkt_token_t tok, thread_t td, long excl)
221 ref->tr_count = excl;
226 * Attempt to acquire a shared or exclusive token. Returns TRUE on success,
229 * If TOK_EXCLUSIVE is set in mode we are attempting to get an exclusive
230 * token, otherwise are attempting to get a shared token.
232 * If TOK_EXCLREQ is set in mode this is a blocking operation, otherwise
233 * it is a non-blocking operation (for both exclusive or shared acquisions).
237 _lwkt_trytokref(lwkt_tokref_t ref, thread_t td, long mode)
244 KASSERT(((mode & TOK_EXCLREQ) == 0 || /* non blocking */
245 td->td_gd->gd_intr_nesting_level == 0 ||
246 panic_cpu_gd == mycpu),
247 ("Attempt to acquire token %p not already "
248 "held in hard code section", tok));
250 if (mode & TOK_EXCLUSIVE) {
252 * Attempt to get an exclusive token
255 count = tok->t_count;
256 oref = tok->t_ref; /* can be NULL */
258 if ((count & ~TOK_EXCLREQ) == 0) {
260 * It is possible to get the exclusive bit.
261 * We must clear TOK_EXCLREQ on successful
264 if (atomic_cmpset_long(&tok->t_count, count,
265 (count & ~TOK_EXCLREQ) |
267 KKASSERT(tok->t_ref == NULL);
272 } else if ((count & TOK_EXCLUSIVE) &&
273 oref >= &td->td_toks_base &&
274 oref < td->td_toks_stop) {
276 * Our thread already holds the exclusive
277 * bit, we treat this tokref as a shared
278 * token (sorta) to make the token release
281 * NOTE: oref cannot race above if it
282 * happens to be ours, so we're good.
283 * But we must still have a stable
284 * variable for both parts of the
287 * NOTE: Since we already have an exclusive
288 * lock and don't need to check EXCLREQ
289 * we can just use an atomic_add here
291 atomic_add_long(&tok->t_count, TOK_INCR);
292 ref->tr_count &= ~TOK_EXCLUSIVE;
294 } else if ((mode & TOK_EXCLREQ) &&
295 (count & TOK_EXCLREQ) == 0) {
297 * Unable to get the exclusive bit but being
298 * asked to set the exclusive-request bit.
299 * Since we are going to retry anyway just
300 * set the bit unconditionally.
302 atomic_set_long(&tok->t_count, TOK_EXCLREQ);
306 * Unable to get the exclusive bit and not
307 * being asked to set the exclusive-request
308 * (aka lwkt_trytoken()), or EXCLREQ was
318 * Attempt to get a shared token. Note that TOK_EXCLREQ
319 * for shared tokens simply means the caller intends to
320 * block. We never actually set the bit in tok->t_count.
323 count = tok->t_count;
324 oref = tok->t_ref; /* can be NULL */
326 if ((count & (TOK_EXCLUSIVE/*|TOK_EXCLREQ*/)) == 0) {
327 /* XXX EXCLREQ should work */
329 * It is possible to get the token shared.
331 if (atomic_cmpset_long(&tok->t_count, count,
336 } else if ((count & TOK_EXCLUSIVE) &&
337 oref >= &td->td_toks_base &&
338 oref < td->td_toks_stop) {
340 * We own the exclusive bit on the token so
341 * we can in fact also get it shared.
343 atomic_add_long(&tok->t_count, TOK_INCR);
347 * We failed to get the token shared
358 _lwkt_trytokref_spin(lwkt_tokref_t ref, thread_t td, long mode)
362 if (_lwkt_trytokref(ref, td, mode)) {
363 #ifdef DEBUG_LOCKS_LATENCY
365 for (j = tokens_add_latency; j > 0; --j)
370 for (spin = lwkt_token_spin; spin > 0; --spin) {
371 if (lwkt_token_delay)
372 tsc_delay(lwkt_token_delay);
375 if (_lwkt_trytokref(ref, td, mode)) {
376 #ifdef DEBUG_LOCKS_LATENCY
378 for (j = tokens_add_latency; j > 0; --j)
388 * Release a token that we hold.
392 _lwkt_reltokref(lwkt_tokref_t ref, thread_t td)
399 count = tok->t_count;
401 if (tok->t_ref == ref) {
403 * We are an exclusive holder. We must clear tr_ref
404 * before we clear the TOK_EXCLUSIVE bit. If we are
405 * unable to clear the bit we must restore
408 KKASSERT(count & TOK_EXCLUSIVE);
410 if (atomic_cmpset_long(&tok->t_count, count,
411 count & ~TOK_EXCLUSIVE)) {
418 * We are a shared holder
420 KKASSERT(count & TOK_COUNTMASK);
421 if (atomic_cmpset_long(&tok->t_count, count,
432 * Obtain all the tokens required by the specified thread on the current
433 * cpu, return 0 on failure and non-zero on success. If a failure occurs
434 * any partially acquired tokens will be released prior to return.
436 * lwkt_getalltokens is called by the LWKT scheduler to re-acquire all
437 * tokens that the thread had to release when it switched away.
439 * If spinning is non-zero this function acquires the tokens in a particular
440 * order to deal with potential deadlocks. We simply use address order for
443 * Called from a critical section.
446 lwkt_getalltokens(thread_t td, int spinning)
452 return(_lwkt_getalltokens_sorted(td));
455 * Acquire tokens in forward order, assign or validate tok->t_ref.
457 for (scan = &td->td_toks_base; scan < td->td_toks_stop; ++scan) {
461 * Only try really hard on the last token
463 if (scan == td->td_toks_stop - 1) {
464 if (_lwkt_trytokref_spin(scan, td, scan->tr_count))
467 if (_lwkt_trytokref(scan, td, scan->tr_count))
472 * Otherwise we failed to acquire all the tokens.
473 * Release whatever we did get.
475 KASSERT(tok->t_desc, ("token %p is not initialized", tok));
476 strncpy(td->td_gd->gd_cnt.v_token_name,
478 sizeof(td->td_gd->gd_cnt.v_token_name) - 1);
480 if (lwkt_sched_debug > 0) {
482 kprintf("toka %p %s %s\n",
483 tok, tok->t_desc, td->td_comm);
485 td->td_wmesg = tok->t_desc;
487 while (--scan >= &td->td_toks_base)
488 _lwkt_reltokref(scan, td);
496 * Release all tokens owned by the specified thread on the current cpu.
498 * This code is really simple. Even in cases where we own all the tokens
499 * note that t_ref may not match the scan for recursively held tokens which
500 * are held deeper in the stack, or for the case where a lwkt_getalltokens()
503 * Tokens are released in reverse order to reduce chasing race failures.
505 * Called from a critical section.
508 lwkt_relalltokens(thread_t td)
513 * Weird order is to try to avoid a panic loop
515 if (td->td_toks_have) {
516 scan = td->td_toks_have;
517 td->td_toks_have = NULL;
519 scan = td->td_toks_stop;
521 while (--scan >= &td->td_toks_base)
522 _lwkt_reltokref(scan, td);
526 * This is the decontention version of lwkt_getalltokens(). The tokens are
527 * acquired in address-sorted order to deal with any deadlocks. Ultimately
528 * token failures will spin into the scheduler and get here.
530 * Called from critical section
534 _lwkt_getalltokens_sorted(thread_t td)
536 lwkt_tokref_t sort_array[LWKT_MAXTOKENS];
544 * Sort the token array. Yah yah, I know this isn't fun.
546 * NOTE: Recursively acquired tokens are ordered the same as in the
547 * td_toks_array so we can always get the earliest one first.
550 scan = &td->td_toks_base;
551 while (scan < td->td_toks_stop) {
552 for (j = 0; j < i; ++j) {
553 if (scan->tr_tok < sort_array[j]->tr_tok)
557 bcopy(sort_array + j, sort_array + j + 1,
558 (i - j) * sizeof(lwkt_tokref_t));
560 sort_array[j] = scan;
567 * Acquire tokens in forward order, assign or validate tok->t_ref.
569 for (i = 0; i < n; ++i) {
570 scan = sort_array[i];
574 * Only try really hard on the last token
576 if (scan == td->td_toks_stop - 1) {
577 if (_lwkt_trytokref_spin(scan, td, scan->tr_count))
580 if (_lwkt_trytokref(scan, td, scan->tr_count))
585 * Otherwise we failed to acquire all the tokens.
586 * Release whatever we did get.
588 if (lwkt_sched_debug > 0) {
590 kprintf("tokb %p %s %s\n",
591 tok, tok->t_desc, td->td_comm);
593 td->td_wmesg = tok->t_desc;
596 scan = sort_array[i];
597 _lwkt_reltokref(scan, td);
604 * We were successful, there is no need for another core to signal
611 * Get a serializing token. This routine can block.
614 lwkt_gettoken(lwkt_token_t tok)
616 thread_t td = curthread;
619 ref = td->td_toks_stop;
620 KKASSERT(ref < &td->td_toks_end);
623 _lwkt_tokref_init(ref, tok, td, TOK_EXCLUSIVE|TOK_EXCLREQ);
627 * Taking an exclusive token after holding it shared will
628 * livelock. Scan for that case and assert.
632 for (tk = &td->td_toks_base; tk < ref; tk++) {
633 if (tk->tr_tok != tok)
637 if (tk->tr_count & TOK_EXCLUSIVE)
640 /* We found only shared instances of this token if found >0 here */
641 KASSERT((found == 0), ("Token %p s/x livelock", tok));
645 if (_lwkt_trytokref_spin(ref, td, TOK_EXCLUSIVE|TOK_EXCLREQ))
649 * Give up running if we can't acquire the token right now.
651 * Since the tokref is already active the scheduler now
652 * takes care of acquisition, so we need only call
655 * Since we failed this was not a recursive token so upon
656 * return tr_tok->t_ref should be assigned to this specific
659 td->td_wmesg = tok->t_desc;
662 td->td_toks_have = td->td_toks_stop - 1;
664 if (tokens_debug_output > 0) {
665 --tokens_debug_output;
666 spin_lock(&tok_debug_spin);
667 kprintf("Excl Token thread %p %s %s\n",
668 td, tok->t_desc, td->td_comm);
671 spin_unlock(&tok_debug_spin);
676 KKASSERT(tok->t_ref == ref);
680 * Similar to gettoken but we acquire a shared token instead of an exclusive
684 lwkt_gettoken_shared(lwkt_token_t tok)
686 thread_t td = curthread;
689 ref = td->td_toks_stop;
690 KKASSERT(ref < &td->td_toks_end);
693 _lwkt_tokref_init(ref, tok, td, TOK_EXCLREQ);
697 * Taking a pool token in shared mode is a bad idea; other
698 * addresses deeper in the call stack may hash to the same pool
699 * token and you may end up with an exclusive-shared livelock.
700 * Warn in this condition.
702 if ((tok >= &pool_tokens[0]) &&
703 (tok < &pool_tokens[LWKT_NUM_POOL_TOKENS]))
704 kprintf("Warning! Taking pool token %p in shared mode\n", tok);
708 if (_lwkt_trytokref_spin(ref, td, TOK_EXCLREQ))
712 * Give up running if we can't acquire the token right now.
714 * Since the tokref is already active the scheduler now
715 * takes care of acquisition, so we need only call
718 * Since we failed this was not a recursive token so upon
719 * return tr_tok->t_ref should be assigned to this specific
722 td->td_wmesg = tok->t_desc;
725 td->td_toks_have = td->td_toks_stop - 1;
727 if (tokens_debug_output > 0) {
728 --tokens_debug_output;
729 spin_lock(&tok_debug_spin);
730 kprintf("Shar Token thread %p %s %s\n",
731 td, tok->t_desc, td->td_comm);
734 spin_unlock(&tok_debug_spin);
742 * Attempt to acquire a token, return TRUE on success, FALSE on failure.
744 * We setup the tokref in case we actually get the token (if we switch later
745 * it becomes mandatory so we set TOK_EXCLREQ), but we call trytokref without
746 * TOK_EXCLREQ in case we fail.
749 lwkt_trytoken(lwkt_token_t tok)
751 thread_t td = curthread;
754 ref = td->td_toks_stop;
755 KKASSERT(ref < &td->td_toks_end);
758 _lwkt_tokref_init(ref, tok, td, TOK_EXCLUSIVE|TOK_EXCLREQ);
760 if (_lwkt_trytokref(ref, td, TOK_EXCLUSIVE))
764 * Failed, unpend the request
774 lwkt_gettoken_hard(lwkt_token_t tok)
781 lwkt_getpooltoken(void *ptr)
785 tok = _lwkt_token_pool_lookup(ptr);
791 * Release a serializing token.
793 * WARNING! All tokens must be released in reverse order. This will be
797 lwkt_reltoken(lwkt_token_t tok)
799 thread_t td = curthread;
803 * Remove ref from thread token list and assert that it matches
804 * the token passed in. Tokens must be released in reverse order.
806 ref = td->td_toks_stop - 1;
807 KKASSERT(ref >= &td->td_toks_base && ref->tr_tok == tok);
808 _lwkt_reltokref(ref, td);
810 td->td_toks_stop = ref;
814 lwkt_reltoken_hard(lwkt_token_t tok)
821 * It is faster for users of lwkt_getpooltoken() to use the returned
822 * token and just call lwkt_reltoken(), but for convenience we provide
823 * this function which looks the token up based on the ident.
826 lwkt_relpooltoken(void *ptr)
828 lwkt_token_t tok = _lwkt_token_pool_lookup(ptr);
833 * Return a count of the number of token refs the thread has to the
834 * specified token, whether it currently owns the token or not.
837 lwkt_cnttoken(lwkt_token_t tok, thread_t td)
842 for (scan = &td->td_toks_base; scan < td->td_toks_stop; ++scan) {
843 if (scan->tr_tok == tok)
850 * Pool tokens are used to provide a type-stable serializing token
851 * pointer that does not race against disappearing data structures.
853 * This routine is called in early boot just after we setup the BSP's
854 * globaldata structure.
857 lwkt_token_pool_init(void)
861 for (i = 0; i < LWKT_NUM_POOL_TOKENS; ++i)
862 lwkt_token_init(&pool_tokens[i], "pool");
866 lwkt_token_pool_lookup(void *ptr)
868 return (_lwkt_token_pool_lookup(ptr));
872 * Initialize a token.
875 lwkt_token_init(lwkt_token_t tok, const char *desc)
879 tok->t_collisions = 0;
884 lwkt_token_uninit(lwkt_token_t tok)
890 * Exchange the two most recent tokens on the tokref stack. This allows
891 * you to release a token out of order.
893 * We have to be careful about the case where the top two tokens are
894 * the same token. In this case tok->t_ref will point to the deeper
895 * ref and must remain pointing to the deeper ref. If we were to swap
896 * it the first release would clear the token even though a second
897 * ref is still present.
899 * Only exclusively held tokens contain a reference to the tokref which
900 * has to be flipped along with the swap.
903 lwkt_token_swap(void)
905 lwkt_tokref_t ref1, ref2;
906 lwkt_token_t tok1, tok2;
908 thread_t td = curthread;
912 ref1 = td->td_toks_stop - 1;
913 ref2 = td->td_toks_stop - 2;
914 KKASSERT(ref1 >= &td->td_toks_base);
915 KKASSERT(ref2 >= &td->td_toks_base);
919 count1 = ref1->tr_count;
920 count2 = ref2->tr_count;
924 ref1->tr_count = count2;
926 ref2->tr_count = count1;
927 if (tok1->t_ref == ref1)
929 if (tok2->t_ref == ref2)
936 DB_SHOW_COMMAND(tokens, db_tok_all)
938 struct lwkt_token *tok, **ptr;
939 struct lwkt_token *toklist[16] = {
954 for (tok = *ptr; tok; tok = *(++ptr)) {
955 db_printf("tok=%p tr_owner=%p t_colissions=%ld t_desc=%s\n", tok,
956 (tok->t_ref ? tok->t_ref->tr_owner : NULL),
957 tok->t_collisions, tok->t_desc);