kernel - MPSAFE work - add vmspace_token
[dragonfly.git] / sys / kern / lwkt_token.c
CommitLineData
c31b1324 1/*
c6fbe95a 2 * Copyright (c) 2003,2004,2009 The DragonFly Project. All rights reserved.
8c10bfcf
MD
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
c31b1324
MD
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
8c10bfcf 10 *
c31b1324
MD
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
8c10bfcf
MD
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
c31b1324 32 * SUCH DAMAGE.
c31b1324
MD
33 */
34
c6fbe95a
MD
35/*
36 * lwkt_token - Implement soft token locks.
37 *
38 * Tokens are locks which serialize a thread only while the thread is
39 * running. If the thread blocks all tokens are released, then reacquired
40 * when the thread resumes.
41 *
42 * This implementation requires no critical sections or spin locks, but
43 * does use atomic_cmpset_ptr().
44 *
45 * Tokens may be recursively acquired by the same thread. However the
46 * caller must be sure to release such tokens in reverse order.
47 */
c31b1324
MD
48#include <sys/param.h>
49#include <sys/systm.h>
50#include <sys/kernel.h>
51#include <sys/proc.h>
52#include <sys/rtprio.h>
53#include <sys/queue.h>
c31b1324 54#include <sys/sysctl.h>
4883dbe9 55#include <sys/ktr.h>
c31b1324
MD
56#include <sys/kthread.h>
57#include <machine/cpu.h>
58#include <sys/lock.h>
59#include <sys/caps.h>
9d265729
MD
60#include <sys/spinlock.h>
61
62#include <sys/thread2.h>
63#include <sys/spinlock2.h>
3b998fa9 64#include <sys/mplock2.h>
c31b1324
MD
65
66#include <vm/vm.h>
67#include <vm/vm_param.h>
68#include <vm/vm_kern.h>
69#include <vm/vm_object.h>
70#include <vm/vm_page.h>
71#include <vm/vm_map.h>
72#include <vm/vm_pager.h>
73#include <vm/vm_extern.h>
74#include <vm/vm_zone.h>
75
76#include <machine/stdarg.h>
c31b1324
MD
77#include <machine/smp.h>
78
41a01a4d
MD
79#ifndef LWKT_NUM_POOL_TOKENS
80#define LWKT_NUM_POOL_TOKENS 1024 /* power of 2 */
81#endif
82#define LWKT_MASK_POOL_TOKENS (LWKT_NUM_POOL_TOKENS - 1)
83
c31b1324
MD
84#ifdef INVARIANTS
85static int token_debug = 0;
86#endif
87
41a01a4d
MD
88static lwkt_token pool_tokens[LWKT_NUM_POOL_TOKENS];
89
f917e9bc 90#define TOKEN_STRING "REF=%p TOK=%p TD=%p"
38717797
HP
91#define CONTENDED_STRING "REF=%p TOK=%p TD=%p (contention started)"
92#define UNCONTENDED_STRING "REF=%p TOK=%p TD=%p (contention stopped)"
4883dbe9
MD
93#if !defined(KTR_TOKENS)
94#define KTR_TOKENS KTR_ALL
95#endif
790e4db7 96
4883dbe9 97KTR_INFO_MASTER(tokens);
c6fbe95a
MD
98KTR_INFO(KTR_TOKENS, tokens, fail, 0, TOKEN_STRING, sizeof(void *) * 3);
99KTR_INFO(KTR_TOKENS, tokens, succ, 1, TOKEN_STRING, sizeof(void *) * 3);
7cd8d145 100#if 0
c6fbe95a 101KTR_INFO(KTR_TOKENS, tokens, release, 2, TOKEN_STRING, sizeof(void *) * 3);
f917e9bc
MD
102KTR_INFO(KTR_TOKENS, tokens, remote, 3, TOKEN_STRING, sizeof(void *) * 3);
103KTR_INFO(KTR_TOKENS, tokens, reqremote, 4, TOKEN_STRING, sizeof(void *) * 3);
104KTR_INFO(KTR_TOKENS, tokens, reqfail, 5, TOKEN_STRING, sizeof(void *) * 3);
105KTR_INFO(KTR_TOKENS, tokens, drain, 6, TOKEN_STRING, sizeof(void *) * 3);
38717797
HP
106KTR_INFO(KTR_TOKENS, tokens, contention_start, 7, CONTENDED_STRING, sizeof(void *) * 3);
107KTR_INFO(KTR_TOKENS, tokens, contention_stop, 7, UNCONTENDED_STRING, sizeof(void *) * 3);
790e4db7
MD
108#endif
109
f917e9bc
MD
110#define logtoken(name, ref) \
111 KTR_LOG(tokens_ ## name, ref, ref->tr_tok, curthread)
4883dbe9 112
c31b1324
MD
113#ifdef INVARIANTS
114SYSCTL_INT(_lwkt, OID_AUTO, token_debug, CTLFLAG_RW, &token_debug, 0, "");
115#endif
116
c31b1324 117/*
c9aa7a82
MD
118 * Global tokens. These replace the MP lock for major subsystem locking.
119 * These tokens are initially used to lockup both global and individual
120 * operations.
121 *
122 * Once individual structures get their own locks these tokens are used
123 * only to protect global lists & other variables and to interlock
124 * allocations and teardowns and such.
125 *
126 * The UP initializer causes token acquisition to also acquire the MP lock
127 * for maximum compatibility. The feature may be enabled and disabled at
128 * any time, the MP state is copied to the tokref when the token is acquired
129 * and will not race against sysctl changes.
130 */
131struct lwkt_token pmap_token = LWKT_TOKEN_UP_INITIALIZER;
132struct lwkt_token dev_token = LWKT_TOKEN_UP_INITIALIZER;
d63ddd9c 133struct lwkt_token vm_token = LWKT_TOKEN_UP_INITIALIZER;
d39d3c43 134struct lwkt_token vmspace_token = LWKT_TOKEN_UP_INITIALIZER;
d63ddd9c 135struct lwkt_token kvm_token = LWKT_TOKEN_UP_INITIALIZER;
c9aa7a82
MD
136struct lwkt_token proc_token = LWKT_TOKEN_UP_INITIALIZER;
137struct lwkt_token tty_token = LWKT_TOKEN_UP_INITIALIZER;
138struct lwkt_token vnode_token = LWKT_TOKEN_UP_INITIALIZER;
139
140SYSCTL_INT(_lwkt, OID_AUTO, pmap_mpsafe,
141 CTLFLAG_RW, &pmap_token.t_flags, 0, "");
142SYSCTL_INT(_lwkt, OID_AUTO, dev_mpsafe,
143 CTLFLAG_RW, &dev_token.t_flags, 0, "");
d63ddd9c
MD
144SYSCTL_INT(_lwkt, OID_AUTO, vm_mpsafe,
145 CTLFLAG_RW, &vm_token.t_flags, 0, "");
d39d3c43
MD
146SYSCTL_INT(_lwkt, OID_AUTO, vmspace_mpsafe,
147 CTLFLAG_RW, &vmspace_token.t_flags, 0, "");
d63ddd9c
MD
148SYSCTL_INT(_lwkt, OID_AUTO, kvm_mpsafe,
149 CTLFLAG_RW, &kvm_token.t_flags, 0, "");
c9aa7a82
MD
150SYSCTL_INT(_lwkt, OID_AUTO, proc_mpsafe,
151 CTLFLAG_RW, &proc_token.t_flags, 0, "");
152SYSCTL_INT(_lwkt, OID_AUTO, tty_mpsafe,
153 CTLFLAG_RW, &tty_token.t_flags, 0, "");
154SYSCTL_INT(_lwkt, OID_AUTO, vnode_mpsafe,
155 CTLFLAG_RW, &vnode_token.t_flags, 0, "");
156
157/*
158 * The collision count is bumped every time the LWKT scheduler fails
159 * to acquire needed tokens in addition to a normal lwkt_gettoken()
160 * stall.
161 */
162SYSCTL_LONG(_lwkt, OID_AUTO, pmap_collisions,
163 CTLFLAG_RW, &pmap_token.t_collisions, 0, "");
164SYSCTL_LONG(_lwkt, OID_AUTO, dev_collisions,
165 CTLFLAG_RW, &dev_token.t_collisions, 0, "");
d63ddd9c
MD
166SYSCTL_LONG(_lwkt, OID_AUTO, vm_collisions,
167 CTLFLAG_RW, &vm_token.t_collisions, 0, "");
d39d3c43
MD
168SYSCTL_LONG(_lwkt, OID_AUTO, vmspace_collisions,
169 CTLFLAG_RW, &vmspace_token.t_collisions, 0, "");
d63ddd9c
MD
170SYSCTL_LONG(_lwkt, OID_AUTO, kvm_collisions,
171 CTLFLAG_RW, &kvm_token.t_collisions, 0, "");
c9aa7a82
MD
172SYSCTL_LONG(_lwkt, OID_AUTO, proc_collisions,
173 CTLFLAG_RW, &proc_token.t_collisions, 0, "");
174SYSCTL_LONG(_lwkt, OID_AUTO, tty_collisions,
175 CTLFLAG_RW, &tty_token.t_collisions, 0, "");
176SYSCTL_LONG(_lwkt, OID_AUTO, vnode_collisions,
177 CTLFLAG_RW, &vnode_token.t_collisions, 0, "");
178
179/*
c6fbe95a
MD
180 * Return a pool token given an address
181 */
182static __inline
183lwkt_token_t
184_lwkt_token_pool_lookup(void *ptr)
185{
186 int i;
187
188 i = ((int)(intptr_t)ptr >> 2) ^ ((int)(intptr_t)ptr >> 12);
189 return(&pool_tokens[i & LWKT_MASK_POOL_TOKENS]);
190}
191
3b998fa9
MD
192/*
193 * Initialize a tokref_t prior to making it visible in the thread's
194 * token array.
c843cbf7
MD
195 *
196 * As an optimization we set the MPSAFE flag if the thread is already
197 * holding the MP lock. This bypasses unncessary calls to get_mplock() and
198 * rel_mplock() on tokens which are not normally MPSAFE when the thread
199 * is already holding the MP lock.
3b998fa9
MD
200 */
201static __inline
202void
203_lwkt_tokref_init(lwkt_tokref_t ref, lwkt_token_t tok, thread_t td)
204{
205 ref->tr_tok = tok;
206 ref->tr_owner = td;
207 ref->tr_flags = tok->t_flags;
d7ec0cb4 208#ifdef SMP
c843cbf7 209 if (td->td_mpcount)
d7ec0cb4 210#endif
c843cbf7 211 ref->tr_flags |= LWKT_TOKEN_MPSAFE;
3b998fa9 212}
c6fbe95a
MD
213
214/*
9d265729 215 * Obtain all the tokens required by the specified thread on the current
1fe5fad2
MD
216 * cpu, return 0 on failure and non-zero on success. If a failure occurs
217 * any partially acquired tokens will be released prior to return.
dd55d707 218 *
7eb611ef 219 * lwkt_getalltokens is called by the LWKT scheduler to acquire all
24c80b2b 220 * tokens that the thread had acquired prior to going to sleep.
7eb611ef 221 *
3b998fa9 222 * The scheduler is responsible for maintaining the MP lock count, so
c9aa7a82
MD
223 * we don't need to deal with tr_flags here. We also do not do any
224 * logging here. The logging done by lwkt_gettoken() is plenty good
225 * enough to get a feel for it.
3b998fa9 226 *
7eb611ef 227 * Called from a critical section.
c31b1324 228 */
41a01a4d 229int
9d265729 230lwkt_getalltokens(thread_t td)
41a01a4d 231{
3b998fa9 232 lwkt_tokref_t scan;
c6fbe95a
MD
233 lwkt_tokref_t ref;
234 lwkt_token_t tok;
235
3b998fa9
MD
236 /*
237 * Acquire tokens in forward order, assign or validate tok->t_ref.
238 */
239 for (scan = &td->td_toks_base; scan < td->td_toks_stop; ++scan) {
240 tok = scan->tr_tok;
c6fbe95a
MD
241 for (;;) {
242 /*
243 * Try to acquire the token if we do not already have
244 * it.
245 *
246 * NOTE: If atomic_cmpset_ptr() fails we have to
247 * loop and try again. It just means we
248 * lost a cpu race.
249 */
250 ref = tok->t_ref;
c6fbe95a 251 if (ref == NULL) {
3b998fa9 252 if (atomic_cmpset_ptr(&tok->t_ref, NULL, scan))
c6fbe95a
MD
253 break;
254 continue;
255 }
256
257 /*
3b998fa9
MD
258 * Test if ref is already recursively held by this
259 * thread. We cannot safely dereference tok->t_ref
260 * (it might belong to another thread and is thus
261 * unstable), but we don't have to. We can simply
262 * range-check it.
c6fbe95a 263 */
3b998fa9
MD
264 if (ref >= &td->td_toks_base && ref < td->td_toks_stop)
265 break;
266
267 /*
268 * Otherwise we failed to acquire all the tokens.
269 * Undo and return.
270 */
c9aa7a82 271 atomic_add_long(&tok->t_collisions, 1);
3b998fa9
MD
272 lwkt_relalltokens(td);
273 return(FALSE);
38717797 274 }
41a01a4d 275 }
c6fbe95a 276 return (TRUE);
c31b1324
MD
277}
278
41a01a4d 279/*
9d265729 280 * Release all tokens owned by the specified thread on the current cpu.
c6fbe95a
MD
281 *
282 * This code is really simple. Even in cases where we own all the tokens
283 * note that t_ref may not match the scan for recursively held tokens,
284 * or for the case where a lwkt_getalltokens() failed.
3b998fa9
MD
285 *
286 * The scheduler is responsible for maintaining the MP lock count, so
287 * we don't need to deal with tr_flags here.
7eb611ef
MD
288 *
289 * Called from a critical section.
41a01a4d 290 */
9d265729
MD
291void
292lwkt_relalltokens(thread_t td)
41a01a4d 293{
3b998fa9 294 lwkt_tokref_t scan;
c6fbe95a
MD
295 lwkt_token_t tok;
296
3b998fa9
MD
297 for (scan = &td->td_toks_base; scan < td->td_toks_stop; ++scan) {
298 tok = scan->tr_tok;
299 if (tok->t_ref == scan)
c6fbe95a 300 tok->t_ref = NULL;
41a01a4d 301 }
41a01a4d
MD
302}
303
41a01a4d 304/*
3b998fa9
MD
305 * Token acquisition helper function. The caller must have already
306 * made nref visible by adjusting td_toks_stop and will be responsible
307 * for the disposition of nref on either success or failure.
dd55d707 308 *
3b998fa9
MD
309 * When acquiring tokens recursively we want tok->t_ref to point to
310 * the outer (first) acquisition so it gets cleared only on the last
311 * release.
41a01a4d 312 */
41a01a4d 313static __inline
7eb611ef 314int
c6fbe95a 315_lwkt_trytokref2(lwkt_tokref_t nref, thread_t td)
41a01a4d 316{
c6fbe95a 317 lwkt_token_t tok;
3b998fa9 318 lwkt_tokref_t ref;
c6fbe95a
MD
319
320 KKASSERT(td->td_gd->gd_intr_nesting_level == 0);
321
7eb611ef 322 /*
3b998fa9
MD
323 * Make sure the compiler does not reorder prior instructions
324 * beyond this demark.
7eb611ef 325 */
c6fbe95a
MD
326 cpu_ccfence();
327
7eb611ef 328 /*
c6fbe95a 329 * Attempt to gain ownership
7eb611ef 330 */
c6fbe95a
MD
331 tok = nref->tr_tok;
332 for (;;) {
333 /*
334 * Try to acquire the token if we do not already have
335 * it.
336 */
337 ref = tok->t_ref;
c6fbe95a
MD
338 if (ref == NULL) {
339 /*
340 * NOTE: If atomic_cmpset_ptr() fails we have to
341 * loop and try again. It just means we
342 * lost a cpu race.
343 */
344 if (atomic_cmpset_ptr(&tok->t_ref, NULL, nref))
345 return (TRUE);
346 continue;
347 }
348
349 /*
3b998fa9
MD
350 * Test if ref is already recursively held by this
351 * thread. We cannot safely dereference tok->t_ref
352 * (it might belong to another thread and is thus
353 * unstable), but we don't have to. We can simply
354 * range-check it.
355 */
356 if (ref >= &td->td_toks_base && ref < td->td_toks_stop)
357 return(TRUE);
358
359 /*
360 * Otherwise we failed.
c6fbe95a 361 */
c6fbe95a 362 return(FALSE);
dd55d707 363 }
c31b1324
MD
364}
365
c6fbe95a
MD
366/*
367 * Acquire a serializing token. This routine does not block.
368 */
41a01a4d 369static __inline
c31b1324 370int
c6fbe95a 371_lwkt_trytokref(lwkt_tokref_t ref, thread_t td)
c31b1324 372{
3b998fa9
MD
373 if ((ref->tr_flags & LWKT_TOKEN_MPSAFE) == 0) {
374 if (try_mplock() == 0)
375 return (FALSE);
376 }
c6fbe95a
MD
377 if (_lwkt_trytokref2(ref, td) == FALSE) {
378 /*
3b998fa9 379 * Cleanup, deactivate the failed token.
c6fbe95a 380 */
3b998fa9
MD
381 --td->td_toks_stop;
382 if ((ref->tr_flags & LWKT_TOKEN_MPSAFE) == 0)
383 rel_mplock();
c6fbe95a
MD
384 return (FALSE);
385 }
386 return (TRUE);
c31b1324
MD
387}
388
7eb611ef
MD
389/*
390 * Acquire a serializing token. This routine can block.
7eb611ef
MD
391 */
392static __inline
393void
c6fbe95a 394_lwkt_gettokref(lwkt_tokref_t ref, thread_t td)
7eb611ef 395{
3b998fa9
MD
396 if ((ref->tr_flags & LWKT_TOKEN_MPSAFE) == 0)
397 get_mplock();
c6fbe95a
MD
398 if (_lwkt_trytokref2(ref, td) == FALSE) {
399 /*
400 * Give up running if we can't acquire the token right now.
c6fbe95a 401 *
3b998fa9
MD
402 * Since the tokref is already active the scheduler now
403 * takes care of acquisition, so we need only call
404 * lwkt_yield().
405 *
406 * Since we failed this was not a recursive token so upon
c6fbe95a
MD
407 * return tr_tok->t_ref should be assigned to this specific
408 * ref.
409 */
c9aa7a82 410 atomic_add_long(&ref->tr_tok->t_collisions, 1);
c6fbe95a
MD
411 logtoken(fail, ref);
412 lwkt_yield();
413 logtoken(succ, ref);
c6fbe95a
MD
414 KKASSERT(ref->tr_tok->t_ref == ref);
415 }
7eb611ef
MD
416}
417
41a01a4d 418void
3b998fa9 419lwkt_gettoken(lwkt_token_t tok)
c31b1324 420{
c6fbe95a 421 thread_t td = curthread;
3b998fa9 422 lwkt_tokref_t ref;
c6fbe95a 423
3b998fa9
MD
424 ref = td->td_toks_stop;
425 KKASSERT(ref < &td->td_toks_end);
426 _lwkt_tokref_init(ref, tok, td);
427 ++td->td_toks_stop;
c6fbe95a
MD
428 _lwkt_gettokref(ref, td);
429}
430
3b998fa9
MD
431lwkt_token_t
432lwkt_getpooltoken(void *ptr)
c6fbe95a
MD
433{
434 thread_t td = curthread;
3b998fa9
MD
435 lwkt_token_t tok;
436 lwkt_tokref_t ref;
c6fbe95a 437
3b998fa9
MD
438 ref = td->td_toks_stop;
439 KKASSERT(ref < &td->td_toks_end);
440 tok = _lwkt_token_pool_lookup(ptr);
441 _lwkt_tokref_init(ref, tok, td);
442 ++td->td_toks_stop;
c6fbe95a 443 _lwkt_gettokref(ref, td);
3b998fa9 444 return(tok);
c31b1324
MD
445}
446
c31b1324 447int
3b998fa9 448lwkt_trytoken(lwkt_token_t tok)
c31b1324 449{
c6fbe95a 450 thread_t td = curthread;
3b998fa9 451 lwkt_tokref_t ref;
c6fbe95a 452
3b998fa9
MD
453 ref = td->td_toks_stop;
454 KKASSERT(ref < &td->td_toks_end);
455 _lwkt_tokref_init(ref, tok, td);
456 ++td->td_toks_stop;
c6fbe95a 457 return(_lwkt_trytokref(ref, td));
41a01a4d
MD
458}
459
c31b1324 460/*
c6fbe95a
MD
461 * Release a serializing token.
462 *
3b998fa9
MD
463 * WARNING! All tokens must be released in reverse order. This will be
464 * asserted.
c31b1324 465 */
41a01a4d 466void
3b998fa9 467lwkt_reltoken(lwkt_token_t tok)
c31b1324 468{
3b998fa9
MD
469 thread_t td = curthread;
470 lwkt_tokref_t ref;
c6fbe95a 471
3b998fa9
MD
472 /*
473 * Remove ref from thread token list and assert that it matches
474 * the token passed in. Tokens must be released in reverse order.
475 */
476 ref = td->td_toks_stop - 1;
477 KKASSERT(ref >= &td->td_toks_base && ref->tr_tok == tok);
478 td->td_toks_stop = ref;
479
480 /*
481 * If the token was not MPSAFE release the MP lock.
482 */
483 if ((ref->tr_flags & LWKT_TOKEN_MPSAFE) == 0)
484 rel_mplock();
c6fbe95a
MD
485
486 /*
3b998fa9
MD
487 * Make sure the compiler does not reorder the clearing of
488 * tok->t_ref.
c6fbe95a 489 */
c6fbe95a
MD
490 cpu_ccfence();
491
492 /*
493 * Only clear the token if it matches ref. If ref was a recursively
494 * acquired token it may not match.
495 */
496 if (tok->t_ref == ref)
497 tok->t_ref = NULL;
c31b1324
MD
498}
499
41a01a4d
MD
500/*
501 * Pool tokens are used to provide a type-stable serializing token
502 * pointer that does not race against disappearing data structures.
503 *
504 * This routine is called in early boot just after we setup the BSP's
505 * globaldata structure.
506 */
507void
508lwkt_token_pool_init(void)
509{
c6fbe95a 510 int i;
41a01a4d 511
c6fbe95a 512 for (i = 0; i < LWKT_NUM_POOL_TOKENS; ++i)
3b998fa9 513 lwkt_token_init(&pool_tokens[i], 1);
41a01a4d
MD
514}
515
516lwkt_token_t
c6fbe95a 517lwkt_token_pool_lookup(void *ptr)
41a01a4d 518{
c6fbe95a 519 return (_lwkt_token_pool_lookup(ptr));
41a01a4d
MD
520}
521
41a01a4d
MD
522/*
523 * Initialize the owner and release-to cpu to the current cpu
524 * and reset the generation count.
525 */
526void
3b998fa9 527lwkt_token_init(lwkt_token_t tok, int mpsafe)
41a01a4d 528{
c6fbe95a 529 tok->t_ref = NULL;
3b998fa9 530 tok->t_flags = mpsafe ? LWKT_TOKEN_MPSAFE : 0;
c31b1324
MD
531}
532
41a01a4d
MD
533void
534lwkt_token_uninit(lwkt_token_t tok)
535{
c6fbe95a 536 /* empty */
41a01a4d 537}
7eb611ef 538
c6fbe95a 539#if 0
7eb611ef
MD
540int
541lwkt_token_is_stale(lwkt_tokref_t ref)
542{
c6fbe95a
MD
543 lwkt_token_t tok = ref->tr_tok;
544
545 KKASSERT(tok->t_owner == curthread && ref->tr_state == 1 &&
546 tok->t_count > 0);
547
548 /* Token is not stale */
549 if (tok->t_lastowner == tok->t_owner)
550 return (FALSE);
551
552 /*
553 * The token is stale. Reset to not stale so that the next call to
554 * lwkt_token_is_stale will return "not stale" unless the token
555 * was acquired in-between by another thread.
556 */
557 tok->t_lastowner = tok->t_owner;
558 return (TRUE);
7eb611ef 559}
c6fbe95a 560#endif