kernel - Add description to static token initializers
[dragonfly.git] / sys / kern / lwkt_token.c
CommitLineData
c31b1324 1/*
c6fbe95a 2 * Copyright (c) 2003,2004,2009 The DragonFly Project. All rights reserved.
8c10bfcf
MD
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
c31b1324
MD
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
8c10bfcf 10 *
c31b1324
MD
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
8c10bfcf
MD
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
c31b1324 32 * SUCH DAMAGE.
c31b1324
MD
33 */
34
c6fbe95a
MD
35/*
36 * lwkt_token - Implement soft token locks.
37 *
38 * Tokens are locks which serialize a thread only while the thread is
39 * running. If the thread blocks all tokens are released, then reacquired
40 * when the thread resumes.
41 *
42 * This implementation requires no critical sections or spin locks, but
43 * does use atomic_cmpset_ptr().
44 *
45 * Tokens may be recursively acquired by the same thread. However the
46 * caller must be sure to release such tokens in reverse order.
47 */
c31b1324
MD
48#include <sys/param.h>
49#include <sys/systm.h>
50#include <sys/kernel.h>
51#include <sys/proc.h>
52#include <sys/rtprio.h>
53#include <sys/queue.h>
c31b1324 54#include <sys/sysctl.h>
4883dbe9 55#include <sys/ktr.h>
c31b1324
MD
56#include <sys/kthread.h>
57#include <machine/cpu.h>
58#include <sys/lock.h>
59#include <sys/caps.h>
9d265729
MD
60#include <sys/spinlock.h>
61
62#include <sys/thread2.h>
63#include <sys/spinlock2.h>
3b998fa9 64#include <sys/mplock2.h>
c31b1324
MD
65
66#include <vm/vm.h>
67#include <vm/vm_param.h>
68#include <vm/vm_kern.h>
69#include <vm/vm_object.h>
70#include <vm/vm_page.h>
71#include <vm/vm_map.h>
72#include <vm/vm_pager.h>
73#include <vm/vm_extern.h>
74#include <vm/vm_zone.h>
75
76#include <machine/stdarg.h>
c31b1324
MD
77#include <machine/smp.h>
78
41a01a4d
MD
79#ifndef LWKT_NUM_POOL_TOKENS
80#define LWKT_NUM_POOL_TOKENS 1024 /* power of 2 */
81#endif
82#define LWKT_MASK_POOL_TOKENS (LWKT_NUM_POOL_TOKENS - 1)
83
c31b1324
MD
84#ifdef INVARIANTS
85static int token_debug = 0;
86#endif
87
41a01a4d
MD
88static lwkt_token pool_tokens[LWKT_NUM_POOL_TOKENS];
89
f917e9bc 90#define TOKEN_STRING "REF=%p TOK=%p TD=%p"
38717797
HP
91#define CONTENDED_STRING "REF=%p TOK=%p TD=%p (contention started)"
92#define UNCONTENDED_STRING "REF=%p TOK=%p TD=%p (contention stopped)"
4883dbe9
MD
93#if !defined(KTR_TOKENS)
94#define KTR_TOKENS KTR_ALL
95#endif
790e4db7 96
4883dbe9 97KTR_INFO_MASTER(tokens);
c6fbe95a
MD
98KTR_INFO(KTR_TOKENS, tokens, fail, 0, TOKEN_STRING, sizeof(void *) * 3);
99KTR_INFO(KTR_TOKENS, tokens, succ, 1, TOKEN_STRING, sizeof(void *) * 3);
7cd8d145 100#if 0
c6fbe95a 101KTR_INFO(KTR_TOKENS, tokens, release, 2, TOKEN_STRING, sizeof(void *) * 3);
f917e9bc
MD
102KTR_INFO(KTR_TOKENS, tokens, remote, 3, TOKEN_STRING, sizeof(void *) * 3);
103KTR_INFO(KTR_TOKENS, tokens, reqremote, 4, TOKEN_STRING, sizeof(void *) * 3);
104KTR_INFO(KTR_TOKENS, tokens, reqfail, 5, TOKEN_STRING, sizeof(void *) * 3);
105KTR_INFO(KTR_TOKENS, tokens, drain, 6, TOKEN_STRING, sizeof(void *) * 3);
38717797
HP
106KTR_INFO(KTR_TOKENS, tokens, contention_start, 7, CONTENDED_STRING, sizeof(void *) * 3);
107KTR_INFO(KTR_TOKENS, tokens, contention_stop, 7, UNCONTENDED_STRING, sizeof(void *) * 3);
790e4db7
MD
108#endif
109
f917e9bc
MD
110#define logtoken(name, ref) \
111 KTR_LOG(tokens_ ## name, ref, ref->tr_tok, curthread)
4883dbe9 112
c31b1324
MD
113#ifdef INVARIANTS
114SYSCTL_INT(_lwkt, OID_AUTO, token_debug, CTLFLAG_RW, &token_debug, 0, "");
115#endif
116
c31b1324 117/*
c9aa7a82
MD
118 * Global tokens. These replace the MP lock for major subsystem locking.
119 * These tokens are initially used to lockup both global and individual
120 * operations.
121 *
122 * Once individual structures get their own locks these tokens are used
123 * only to protect global lists & other variables and to interlock
124 * allocations and teardowns and such.
125 *
126 * The UP initializer causes token acquisition to also acquire the MP lock
127 * for maximum compatibility. The feature may be enabled and disabled at
128 * any time, the MP state is copied to the tokref when the token is acquired
129 * and will not race against sysctl changes.
130 */
12586b82
MD
131struct lwkt_token pmap_token = LWKT_TOKEN_UP_INITIALIZER(pmap_token);
132struct lwkt_token dev_token = LWKT_TOKEN_UP_INITIALIZER(dev_token);
133struct lwkt_token vm_token = LWKT_TOKEN_UP_INITIALIZER(vm_token);
134struct lwkt_token vmspace_token = LWKT_TOKEN_UP_INITIALIZER(vmspace_token);
135struct lwkt_token kvm_token = LWKT_TOKEN_UP_INITIALIZER(kvm_token);
136struct lwkt_token proc_token = LWKT_TOKEN_UP_INITIALIZER(proc_token);
137struct lwkt_token tty_token = LWKT_TOKEN_UP_INITIALIZER(tty_token);
138struct lwkt_token vnode_token = LWKT_TOKEN_UP_INITIALIZER(vnode_token);
c9aa7a82
MD
139
140SYSCTL_INT(_lwkt, OID_AUTO, pmap_mpsafe,
141 CTLFLAG_RW, &pmap_token.t_flags, 0, "");
142SYSCTL_INT(_lwkt, OID_AUTO, dev_mpsafe,
143 CTLFLAG_RW, &dev_token.t_flags, 0, "");
d63ddd9c
MD
144SYSCTL_INT(_lwkt, OID_AUTO, vm_mpsafe,
145 CTLFLAG_RW, &vm_token.t_flags, 0, "");
d39d3c43
MD
146SYSCTL_INT(_lwkt, OID_AUTO, vmspace_mpsafe,
147 CTLFLAG_RW, &vmspace_token.t_flags, 0, "");
d63ddd9c
MD
148SYSCTL_INT(_lwkt, OID_AUTO, kvm_mpsafe,
149 CTLFLAG_RW, &kvm_token.t_flags, 0, "");
c9aa7a82
MD
150SYSCTL_INT(_lwkt, OID_AUTO, proc_mpsafe,
151 CTLFLAG_RW, &proc_token.t_flags, 0, "");
152SYSCTL_INT(_lwkt, OID_AUTO, tty_mpsafe,
153 CTLFLAG_RW, &tty_token.t_flags, 0, "");
154SYSCTL_INT(_lwkt, OID_AUTO, vnode_mpsafe,
155 CTLFLAG_RW, &vnode_token.t_flags, 0, "");
156
157/*
158 * The collision count is bumped every time the LWKT scheduler fails
159 * to acquire needed tokens in addition to a normal lwkt_gettoken()
160 * stall.
161 */
162SYSCTL_LONG(_lwkt, OID_AUTO, pmap_collisions,
163 CTLFLAG_RW, &pmap_token.t_collisions, 0, "");
164SYSCTL_LONG(_lwkt, OID_AUTO, dev_collisions,
165 CTLFLAG_RW, &dev_token.t_collisions, 0, "");
d63ddd9c
MD
166SYSCTL_LONG(_lwkt, OID_AUTO, vm_collisions,
167 CTLFLAG_RW, &vm_token.t_collisions, 0, "");
d39d3c43
MD
168SYSCTL_LONG(_lwkt, OID_AUTO, vmspace_collisions,
169 CTLFLAG_RW, &vmspace_token.t_collisions, 0, "");
d63ddd9c
MD
170SYSCTL_LONG(_lwkt, OID_AUTO, kvm_collisions,
171 CTLFLAG_RW, &kvm_token.t_collisions, 0, "");
c9aa7a82
MD
172SYSCTL_LONG(_lwkt, OID_AUTO, proc_collisions,
173 CTLFLAG_RW, &proc_token.t_collisions, 0, "");
174SYSCTL_LONG(_lwkt, OID_AUTO, tty_collisions,
175 CTLFLAG_RW, &tty_token.t_collisions, 0, "");
176SYSCTL_LONG(_lwkt, OID_AUTO, vnode_collisions,
177 CTLFLAG_RW, &vnode_token.t_collisions, 0, "");
178
179/*
c6fbe95a
MD
180 * Return a pool token given an address
181 */
182static __inline
183lwkt_token_t
184_lwkt_token_pool_lookup(void *ptr)
185{
186 int i;
187
188 i = ((int)(intptr_t)ptr >> 2) ^ ((int)(intptr_t)ptr >> 12);
189 return(&pool_tokens[i & LWKT_MASK_POOL_TOKENS]);
190}
191
3b998fa9
MD
192/*
193 * Initialize a tokref_t prior to making it visible in the thread's
194 * token array.
c843cbf7
MD
195 *
196 * As an optimization we set the MPSAFE flag if the thread is already
197 * holding the MP lock. This bypasses unncessary calls to get_mplock() and
198 * rel_mplock() on tokens which are not normally MPSAFE when the thread
199 * is already holding the MP lock.
3b998fa9
MD
200 */
201static __inline
202void
203_lwkt_tokref_init(lwkt_tokref_t ref, lwkt_token_t tok, thread_t td)
204{
205 ref->tr_tok = tok;
206 ref->tr_owner = td;
207 ref->tr_flags = tok->t_flags;
d7ec0cb4 208#ifdef SMP
c843cbf7 209 if (td->td_mpcount)
d7ec0cb4 210#endif
c843cbf7 211 ref->tr_flags |= LWKT_TOKEN_MPSAFE;
3b998fa9 212}
c6fbe95a
MD
213
214/*
9d265729 215 * Obtain all the tokens required by the specified thread on the current
1fe5fad2
MD
216 * cpu, return 0 on failure and non-zero on success. If a failure occurs
217 * any partially acquired tokens will be released prior to return.
dd55d707 218 *
7eb611ef 219 * lwkt_getalltokens is called by the LWKT scheduler to acquire all
24c80b2b 220 * tokens that the thread had acquired prior to going to sleep.
7eb611ef 221 *
3b998fa9 222 * The scheduler is responsible for maintaining the MP lock count, so
c9aa7a82
MD
223 * we don't need to deal with tr_flags here. We also do not do any
224 * logging here. The logging done by lwkt_gettoken() is plenty good
225 * enough to get a feel for it.
3b998fa9 226 *
7eb611ef 227 * Called from a critical section.
c31b1324 228 */
41a01a4d 229int
b37f18d6 230lwkt_getalltokens(thread_t td, const char **msgp, const void **addrp)
41a01a4d 231{
3b998fa9 232 lwkt_tokref_t scan;
c6fbe95a
MD
233 lwkt_tokref_t ref;
234 lwkt_token_t tok;
235
3b998fa9
MD
236 /*
237 * Acquire tokens in forward order, assign or validate tok->t_ref.
238 */
239 for (scan = &td->td_toks_base; scan < td->td_toks_stop; ++scan) {
240 tok = scan->tr_tok;
c6fbe95a
MD
241 for (;;) {
242 /*
243 * Try to acquire the token if we do not already have
244 * it.
245 *
246 * NOTE: If atomic_cmpset_ptr() fails we have to
247 * loop and try again. It just means we
248 * lost a cpu race.
249 */
250 ref = tok->t_ref;
c6fbe95a 251 if (ref == NULL) {
3b998fa9 252 if (atomic_cmpset_ptr(&tok->t_ref, NULL, scan))
c6fbe95a
MD
253 break;
254 continue;
255 }
256
257 /*
3b998fa9
MD
258 * Test if ref is already recursively held by this
259 * thread. We cannot safely dereference tok->t_ref
260 * (it might belong to another thread and is thus
261 * unstable), but we don't have to. We can simply
262 * range-check it.
c6fbe95a 263 */
3b998fa9
MD
264 if (ref >= &td->td_toks_base && ref < td->td_toks_stop)
265 break;
266
267 /*
268 * Otherwise we failed to acquire all the tokens.
269 * Undo and return.
270 */
b37f18d6
MD
271 *msgp = tok->t_desc;
272 *addrp = scan->tr_stallpc;
c9aa7a82 273 atomic_add_long(&tok->t_collisions, 1);
3b998fa9
MD
274 lwkt_relalltokens(td);
275 return(FALSE);
38717797 276 }
41a01a4d 277 }
c6fbe95a 278 return (TRUE);
c31b1324
MD
279}
280
41a01a4d 281/*
9d265729 282 * Release all tokens owned by the specified thread on the current cpu.
c6fbe95a
MD
283 *
284 * This code is really simple. Even in cases where we own all the tokens
285 * note that t_ref may not match the scan for recursively held tokens,
286 * or for the case where a lwkt_getalltokens() failed.
3b998fa9
MD
287 *
288 * The scheduler is responsible for maintaining the MP lock count, so
289 * we don't need to deal with tr_flags here.
7eb611ef
MD
290 *
291 * Called from a critical section.
41a01a4d 292 */
9d265729
MD
293void
294lwkt_relalltokens(thread_t td)
41a01a4d 295{
3b998fa9 296 lwkt_tokref_t scan;
c6fbe95a
MD
297 lwkt_token_t tok;
298
3b998fa9
MD
299 for (scan = &td->td_toks_base; scan < td->td_toks_stop; ++scan) {
300 tok = scan->tr_tok;
301 if (tok->t_ref == scan)
c6fbe95a 302 tok->t_ref = NULL;
41a01a4d 303 }
41a01a4d
MD
304}
305
41a01a4d 306/*
3b998fa9
MD
307 * Token acquisition helper function. The caller must have already
308 * made nref visible by adjusting td_toks_stop and will be responsible
309 * for the disposition of nref on either success or failure.
dd55d707 310 *
3b998fa9
MD
311 * When acquiring tokens recursively we want tok->t_ref to point to
312 * the outer (first) acquisition so it gets cleared only on the last
313 * release.
41a01a4d 314 */
41a01a4d 315static __inline
7eb611ef 316int
c6fbe95a 317_lwkt_trytokref2(lwkt_tokref_t nref, thread_t td)
41a01a4d 318{
c6fbe95a 319 lwkt_token_t tok;
3b998fa9 320 lwkt_tokref_t ref;
c6fbe95a
MD
321
322 KKASSERT(td->td_gd->gd_intr_nesting_level == 0);
323
7eb611ef 324 /*
3b998fa9
MD
325 * Make sure the compiler does not reorder prior instructions
326 * beyond this demark.
7eb611ef 327 */
c6fbe95a
MD
328 cpu_ccfence();
329
7eb611ef 330 /*
c6fbe95a 331 * Attempt to gain ownership
7eb611ef 332 */
c6fbe95a
MD
333 tok = nref->tr_tok;
334 for (;;) {
335 /*
336 * Try to acquire the token if we do not already have
337 * it.
338 */
339 ref = tok->t_ref;
c6fbe95a
MD
340 if (ref == NULL) {
341 /*
342 * NOTE: If atomic_cmpset_ptr() fails we have to
343 * loop and try again. It just means we
344 * lost a cpu race.
345 */
346 if (atomic_cmpset_ptr(&tok->t_ref, NULL, nref))
347 return (TRUE);
348 continue;
349 }
350
351 /*
3b998fa9
MD
352 * Test if ref is already recursively held by this
353 * thread. We cannot safely dereference tok->t_ref
354 * (it might belong to another thread and is thus
355 * unstable), but we don't have to. We can simply
356 * range-check it.
357 */
358 if (ref >= &td->td_toks_base && ref < td->td_toks_stop)
359 return(TRUE);
360
361 /*
362 * Otherwise we failed.
c6fbe95a 363 */
c6fbe95a 364 return(FALSE);
dd55d707 365 }
c31b1324
MD
366}
367
c6fbe95a
MD
368/*
369 * Acquire a serializing token. This routine does not block.
370 */
41a01a4d 371static __inline
c31b1324 372int
c6fbe95a 373_lwkt_trytokref(lwkt_tokref_t ref, thread_t td)
c31b1324 374{
3b998fa9
MD
375 if ((ref->tr_flags & LWKT_TOKEN_MPSAFE) == 0) {
376 if (try_mplock() == 0)
377 return (FALSE);
378 }
c6fbe95a
MD
379 if (_lwkt_trytokref2(ref, td) == FALSE) {
380 /*
3b998fa9 381 * Cleanup, deactivate the failed token.
c6fbe95a 382 */
3b998fa9
MD
383 --td->td_toks_stop;
384 if ((ref->tr_flags & LWKT_TOKEN_MPSAFE) == 0)
385 rel_mplock();
c6fbe95a
MD
386 return (FALSE);
387 }
388 return (TRUE);
c31b1324
MD
389}
390
7eb611ef
MD
391/*
392 * Acquire a serializing token. This routine can block.
7eb611ef
MD
393 */
394static __inline
395void
b37f18d6 396_lwkt_gettokref(lwkt_tokref_t ref, thread_t td, const void **stkframe)
7eb611ef 397{
3b998fa9
MD
398 if ((ref->tr_flags & LWKT_TOKEN_MPSAFE) == 0)
399 get_mplock();
c6fbe95a
MD
400 if (_lwkt_trytokref2(ref, td) == FALSE) {
401 /*
402 * Give up running if we can't acquire the token right now.
c6fbe95a 403 *
3b998fa9
MD
404 * Since the tokref is already active the scheduler now
405 * takes care of acquisition, so we need only call
f9235b6d 406 * lwkt_switch().
3b998fa9
MD
407 *
408 * Since we failed this was not a recursive token so upon
c6fbe95a
MD
409 * return tr_tok->t_ref should be assigned to this specific
410 * ref.
411 */
b37f18d6 412 ref->tr_stallpc = stkframe[-1];
c9aa7a82 413 atomic_add_long(&ref->tr_tok->t_collisions, 1);
c6fbe95a 414 logtoken(fail, ref);
f9235b6d 415 lwkt_switch();
c6fbe95a 416 logtoken(succ, ref);
c6fbe95a
MD
417 KKASSERT(ref->tr_tok->t_ref == ref);
418 }
7eb611ef
MD
419}
420
41a01a4d 421void
3b998fa9 422lwkt_gettoken(lwkt_token_t tok)
c31b1324 423{
c6fbe95a 424 thread_t td = curthread;
3b998fa9 425 lwkt_tokref_t ref;
c6fbe95a 426
3b998fa9
MD
427 ref = td->td_toks_stop;
428 KKASSERT(ref < &td->td_toks_end);
429 _lwkt_tokref_init(ref, tok, td);
430 ++td->td_toks_stop;
b37f18d6 431 _lwkt_gettokref(ref, td, (const void **)&tok);
c6fbe95a
MD
432}
433
3b998fa9
MD
434lwkt_token_t
435lwkt_getpooltoken(void *ptr)
c6fbe95a
MD
436{
437 thread_t td = curthread;
3b998fa9
MD
438 lwkt_token_t tok;
439 lwkt_tokref_t ref;
c6fbe95a 440
3b998fa9
MD
441 ref = td->td_toks_stop;
442 KKASSERT(ref < &td->td_toks_end);
443 tok = _lwkt_token_pool_lookup(ptr);
444 _lwkt_tokref_init(ref, tok, td);
445 ++td->td_toks_stop;
b37f18d6 446 _lwkt_gettokref(ref, td, (const void **)&ptr);
3b998fa9 447 return(tok);
c31b1324
MD
448}
449
c31b1324 450int
3b998fa9 451lwkt_trytoken(lwkt_token_t tok)
c31b1324 452{
c6fbe95a 453 thread_t td = curthread;
3b998fa9 454 lwkt_tokref_t ref;
c6fbe95a 455
3b998fa9
MD
456 ref = td->td_toks_stop;
457 KKASSERT(ref < &td->td_toks_end);
458 _lwkt_tokref_init(ref, tok, td);
459 ++td->td_toks_stop;
c6fbe95a 460 return(_lwkt_trytokref(ref, td));
41a01a4d
MD
461}
462
c31b1324 463/*
c6fbe95a
MD
464 * Release a serializing token.
465 *
3b998fa9
MD
466 * WARNING! All tokens must be released in reverse order. This will be
467 * asserted.
c31b1324 468 */
41a01a4d 469void
3b998fa9 470lwkt_reltoken(lwkt_token_t tok)
c31b1324 471{
3b998fa9
MD
472 thread_t td = curthread;
473 lwkt_tokref_t ref;
c6fbe95a 474
3b998fa9
MD
475 /*
476 * Remove ref from thread token list and assert that it matches
477 * the token passed in. Tokens must be released in reverse order.
478 */
479 ref = td->td_toks_stop - 1;
480 KKASSERT(ref >= &td->td_toks_base && ref->tr_tok == tok);
481 td->td_toks_stop = ref;
482
483 /*
484 * If the token was not MPSAFE release the MP lock.
485 */
486 if ((ref->tr_flags & LWKT_TOKEN_MPSAFE) == 0)
487 rel_mplock();
c6fbe95a
MD
488
489 /*
3b998fa9
MD
490 * Make sure the compiler does not reorder the clearing of
491 * tok->t_ref.
c6fbe95a 492 */
c6fbe95a
MD
493 cpu_ccfence();
494
495 /*
496 * Only clear the token if it matches ref. If ref was a recursively
497 * acquired token it may not match.
498 */
499 if (tok->t_ref == ref)
500 tok->t_ref = NULL;
c31b1324
MD
501}
502
41a01a4d
MD
503/*
504 * Pool tokens are used to provide a type-stable serializing token
505 * pointer that does not race against disappearing data structures.
506 *
507 * This routine is called in early boot just after we setup the BSP's
508 * globaldata structure.
509 */
510void
511lwkt_token_pool_init(void)
512{
c6fbe95a 513 int i;
41a01a4d 514
c6fbe95a 515 for (i = 0; i < LWKT_NUM_POOL_TOKENS; ++i)
b37f18d6 516 lwkt_token_init(&pool_tokens[i], 1, "pool");
41a01a4d
MD
517}
518
519lwkt_token_t
c6fbe95a 520lwkt_token_pool_lookup(void *ptr)
41a01a4d 521{
c6fbe95a 522 return (_lwkt_token_pool_lookup(ptr));
41a01a4d
MD
523}
524
41a01a4d 525/*
544c38e8
NT
526 * Initialize a token. If mpsafe is 0, the MP lock is acquired before
527 * acquiring the token and released after releasing the token.
41a01a4d
MD
528 */
529void
b37f18d6 530lwkt_token_init(lwkt_token_t tok, int mpsafe, const char *desc)
41a01a4d 531{
c6fbe95a 532 tok->t_ref = NULL;
3b998fa9 533 tok->t_flags = mpsafe ? LWKT_TOKEN_MPSAFE : 0;
73d8e728 534 tok->t_collisions = 0;
c5724852 535 tok->t_desc = desc;
c31b1324
MD
536}
537
41a01a4d
MD
538void
539lwkt_token_uninit(lwkt_token_t tok)
540{
c6fbe95a 541 /* empty */
41a01a4d 542}
7eb611ef 543
c6fbe95a 544#if 0
7eb611ef
MD
545int
546lwkt_token_is_stale(lwkt_tokref_t ref)
547{
c6fbe95a
MD
548 lwkt_token_t tok = ref->tr_tok;
549
550 KKASSERT(tok->t_owner == curthread && ref->tr_state == 1 &&
551 tok->t_count > 0);
552
553 /* Token is not stale */
554 if (tok->t_lastowner == tok->t_owner)
555 return (FALSE);
556
557 /*
558 * The token is stale. Reset to not stale so that the next call to
559 * lwkt_token_is_stale will return "not stale" unless the token
560 * was acquired in-between by another thread.
561 */
562 tok->t_lastowner = tok->t_owner;
563 return (TRUE);
7eb611ef 564}
c6fbe95a 565#endif