kernel - Refactor the lwkt_token code, making it faster
[dragonfly.git] / sys / kern / lwkt_token.c
CommitLineData
c31b1324 1/*
c6fbe95a 2 * Copyright (c) 2003,2004,2009 The DragonFly Project. All rights reserved.
8c10bfcf
MD
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
c31b1324
MD
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
8c10bfcf 10 *
c31b1324
MD
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
8c10bfcf
MD
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
c31b1324 32 * SUCH DAMAGE.
c31b1324
MD
33 */
34
c6fbe95a
MD
35/*
36 * lwkt_token - Implement soft token locks.
37 *
38 * Tokens are locks which serialize a thread only while the thread is
39 * running. If the thread blocks all tokens are released, then reacquired
40 * when the thread resumes.
41 *
42 * This implementation requires no critical sections or spin locks, but
43 * does use atomic_cmpset_ptr().
44 *
45 * Tokens may be recursively acquired by the same thread. However the
46 * caller must be sure to release such tokens in reverse order.
47 */
c31b1324
MD
48#include <sys/param.h>
49#include <sys/systm.h>
50#include <sys/kernel.h>
51#include <sys/proc.h>
52#include <sys/rtprio.h>
53#include <sys/queue.h>
c31b1324 54#include <sys/sysctl.h>
4883dbe9 55#include <sys/ktr.h>
c31b1324
MD
56#include <sys/kthread.h>
57#include <machine/cpu.h>
58#include <sys/lock.h>
59#include <sys/caps.h>
9d265729
MD
60#include <sys/spinlock.h>
61
62#include <sys/thread2.h>
63#include <sys/spinlock2.h>
c31b1324
MD
64
65#include <vm/vm.h>
66#include <vm/vm_param.h>
67#include <vm/vm_kern.h>
68#include <vm/vm_object.h>
69#include <vm/vm_page.h>
70#include <vm/vm_map.h>
71#include <vm/vm_pager.h>
72#include <vm/vm_extern.h>
73#include <vm/vm_zone.h>
74
75#include <machine/stdarg.h>
c31b1324
MD
76#include <machine/smp.h>
77
41a01a4d
MD
78#ifndef LWKT_NUM_POOL_TOKENS
79#define LWKT_NUM_POOL_TOKENS 1024 /* power of 2 */
80#endif
81#define LWKT_MASK_POOL_TOKENS (LWKT_NUM_POOL_TOKENS - 1)
82
c31b1324
MD
83#ifdef INVARIANTS
84static int token_debug = 0;
85#endif
86
41a01a4d
MD
87static lwkt_token pool_tokens[LWKT_NUM_POOL_TOKENS];
88
f917e9bc 89#define TOKEN_STRING "REF=%p TOK=%p TD=%p"
38717797
HP
90#define CONTENDED_STRING "REF=%p TOK=%p TD=%p (contention started)"
91#define UNCONTENDED_STRING "REF=%p TOK=%p TD=%p (contention stopped)"
4883dbe9
MD
92#if !defined(KTR_TOKENS)
93#define KTR_TOKENS KTR_ALL
94#endif
790e4db7 95
4883dbe9 96KTR_INFO_MASTER(tokens);
c6fbe95a
MD
97KTR_INFO(KTR_TOKENS, tokens, fail, 0, TOKEN_STRING, sizeof(void *) * 3);
98KTR_INFO(KTR_TOKENS, tokens, succ, 1, TOKEN_STRING, sizeof(void *) * 3);
7cd8d145 99#if 0
c6fbe95a 100KTR_INFO(KTR_TOKENS, tokens, release, 2, TOKEN_STRING, sizeof(void *) * 3);
f917e9bc
MD
101KTR_INFO(KTR_TOKENS, tokens, remote, 3, TOKEN_STRING, sizeof(void *) * 3);
102KTR_INFO(KTR_TOKENS, tokens, reqremote, 4, TOKEN_STRING, sizeof(void *) * 3);
103KTR_INFO(KTR_TOKENS, tokens, reqfail, 5, TOKEN_STRING, sizeof(void *) * 3);
104KTR_INFO(KTR_TOKENS, tokens, drain, 6, TOKEN_STRING, sizeof(void *) * 3);
38717797
HP
105KTR_INFO(KTR_TOKENS, tokens, contention_start, 7, CONTENDED_STRING, sizeof(void *) * 3);
106KTR_INFO(KTR_TOKENS, tokens, contention_stop, 7, UNCONTENDED_STRING, sizeof(void *) * 3);
790e4db7
MD
107#endif
108
f917e9bc
MD
109#define logtoken(name, ref) \
110 KTR_LOG(tokens_ ## name, ref, ref->tr_tok, curthread)
4883dbe9 111
c31b1324
MD
112#ifdef INVARIANTS
113SYSCTL_INT(_lwkt, OID_AUTO, token_debug, CTLFLAG_RW, &token_debug, 0, "");
114#endif
115
c31b1324 116/*
c6fbe95a
MD
117 * Return a pool token given an address
118 */
119static __inline
120lwkt_token_t
121_lwkt_token_pool_lookup(void *ptr)
122{
123 int i;
124
125 i = ((int)(intptr_t)ptr >> 2) ^ ((int)(intptr_t)ptr >> 12);
126 return(&pool_tokens[i & LWKT_MASK_POOL_TOKENS]);
127}
128
129
130/*
9d265729
MD
131 * Obtain all the tokens required by the specified thread on the current
132 * cpu, return 0 on failure and non-zero on success.
dd55d707 133 *
7eb611ef 134 * lwkt_getalltokens is called by the LWKT scheduler to acquire all
24c80b2b 135 * tokens that the thread had acquired prior to going to sleep.
7eb611ef
MD
136 *
137 * Called from a critical section.
c31b1324 138 */
41a01a4d 139int
9d265729 140lwkt_getalltokens(thread_t td)
41a01a4d 141{
c6fbe95a
MD
142 lwkt_tokref_t scan1;
143 lwkt_tokref_t scan2;
144 lwkt_tokref_t ref;
145 lwkt_token_t tok;
146
147 for (scan1 = td->td_toks; scan1; scan1 = scan1->tr_next) {
148 tok = scan1->tr_tok;
149 for (;;) {
150 /*
151 * Try to acquire the token if we do not already have
152 * it.
153 *
154 * NOTE: If atomic_cmpset_ptr() fails we have to
155 * loop and try again. It just means we
156 * lost a cpu race.
157 */
158 ref = tok->t_ref;
159 if (ref == scan1)
160 break;
161 if (ref == NULL) {
162 if (atomic_cmpset_ptr(&tok->t_ref, NULL, scan1))
163 break;
164 continue;
165 }
166
167 /*
168 * If acquisition fails the token might be held
169 * recursively by another ref owned by the same
170 * thread.
171 *
172 * NOTE! We cannot just dereference 'ref' to test
173 * the tr_owner as its storage will be
174 * unstable if it belongs to another thread.
175 *
176 * NOTE! Since tokens are inserted at the head
177 * of the list we must migrate such tokens
178 * so the actual lock is not cleared until
179 * the last release.
180 */
181 scan2 = td->td_toks;
182 for (;;) {
183 if (scan2 == scan1)
184 return(FALSE);
185 if (scan2 == ref) {
186 tok->t_ref = scan1;
187 break;
188 }
189 scan2 = scan2->tr_next;
190 }
191 break;
38717797 192 }
41a01a4d 193 }
c6fbe95a 194 return (TRUE);
c31b1324
MD
195}
196
41a01a4d 197/*
9d265729 198 * Release all tokens owned by the specified thread on the current cpu.
c6fbe95a
MD
199 *
200 * This code is really simple. Even in cases where we own all the tokens
201 * note that t_ref may not match the scan for recursively held tokens,
202 * or for the case where a lwkt_getalltokens() failed.
7eb611ef
MD
203 *
204 * Called from a critical section.
41a01a4d 205 */
9d265729
MD
206void
207lwkt_relalltokens(thread_t td)
41a01a4d 208{
c6fbe95a
MD
209 lwkt_tokref_t scan1;
210 lwkt_token_t tok;
211
212 for (scan1 = td->td_toks; scan1; scan1 = scan1->tr_next) {
213 tok = scan1->tr_tok;
214 if (tok->t_ref == scan1)
215 tok->t_ref = NULL;
41a01a4d 216 }
41a01a4d
MD
217}
218
41a01a4d 219/*
7eb611ef
MD
220 * Token acquisition helper function. Note that get/trytokenref do not
221 * reset t_lastowner if the token is already held. Only lwkt_token_is_stale()
222 * is allowed to do that.
dd55d707 223 *
7eb611ef
MD
224 * NOTE: On failure, this function doesn't remove the token from the
225 * thread's token list, so that you have to perform that yourself:
226 *
227 * td->td_toks = ref->tr_next;
41a01a4d 228 */
41a01a4d 229static __inline
7eb611ef 230int
c6fbe95a 231_lwkt_trytokref2(lwkt_tokref_t nref, thread_t td)
41a01a4d 232{
c6fbe95a
MD
233 lwkt_tokref_t ref;
234 lwkt_tokref_t scan2;
235 lwkt_token_t tok;
236
237 KKASSERT(td->td_gd->gd_intr_nesting_level == 0);
238
7eb611ef 239 /*
c6fbe95a
MD
240 * Link the tokref into curthread's list. Make sure the
241 * cpu does not reorder these instructions!
7eb611ef 242 */
c6fbe95a
MD
243 nref->tr_next = td->td_toks;
244 cpu_ccfence();
245 td->td_toks = nref;
246 cpu_ccfence();
247
7eb611ef 248 /*
c6fbe95a 249 * Attempt to gain ownership
7eb611ef 250 */
c6fbe95a
MD
251 tok = nref->tr_tok;
252 for (;;) {
253 /*
254 * Try to acquire the token if we do not already have
255 * it.
256 */
257 ref = tok->t_ref;
258 if (ref == nref)
259 return (TRUE);
260 if (ref == NULL) {
261 /*
262 * NOTE: If atomic_cmpset_ptr() fails we have to
263 * loop and try again. It just means we
264 * lost a cpu race.
265 */
266 if (atomic_cmpset_ptr(&tok->t_ref, NULL, nref))
267 return (TRUE);
268 continue;
269 }
270
271 /*
272 * If acquisition fails the token might be held
273 * recursively by another ref owned by the same
274 * thread.
275 *
276 * NOTE! We cannot just dereference 'ref' to test
277 * the tr_owner as its storage will be
278 * unstable if it belongs to another thread.
279 *
280 * NOTE! We do not migrate t_ref to nref here as we
281 * want the recursion unwinding in reverse order
282 * to NOT release the token until last the
283 * recursive ref is released.
284 */
285 for (scan2 = nref->tr_next; scan2; scan2 = scan2->tr_next) {
286 if (scan2 == ref)
287 return(TRUE);
7eb611ef 288 }
c6fbe95a 289 return(FALSE);
dd55d707 290 }
c31b1324
MD
291}
292
c6fbe95a
MD
293/*
294 * Acquire a serializing token. This routine does not block.
295 */
41a01a4d 296static __inline
c31b1324 297int
c6fbe95a 298_lwkt_trytokref(lwkt_tokref_t ref, thread_t td)
c31b1324 299{
c6fbe95a
MD
300 if (_lwkt_trytokref2(ref, td) == FALSE) {
301 /*
302 * Cleanup. Remove the token from the thread's list.
303 */
304 td->td_toks = ref->tr_next;
305 return (FALSE);
306 }
307 return (TRUE);
c31b1324
MD
308}
309
7eb611ef
MD
310/*
311 * Acquire a serializing token. This routine can block.
7eb611ef
MD
312 */
313static __inline
314void
c6fbe95a 315_lwkt_gettokref(lwkt_tokref_t ref, thread_t td)
7eb611ef 316{
c6fbe95a
MD
317 if (_lwkt_trytokref2(ref, td) == FALSE) {
318 /*
319 * Give up running if we can't acquire the token right now.
320 * But as we have linked in the tokref to the thread's list
321 * (_lwkt_trytokref2), the scheduler now takes care to acquire
322 * the token (by calling lwkt_getalltokens) before resuming
323 * execution. As such, when we return from lwkt_yield(),
324 * the token is acquired.
325 *
326 * Since we failed this is not a recursive token so upon
327 * return tr_tok->t_ref should be assigned to this specific
328 * ref.
329 */
330 logtoken(fail, ref);
331 lwkt_yield();
332 logtoken(succ, ref);
333#if 0
334 if (ref->tr_tok->t_ref != ref) {
335 lwkt_tokref_t scan;
336 kprintf("gettokref %p failed, held by tok %p ref %p\n",
337 ref, ref->tr_tok, ref->tr_tok->t_ref);
338 for (scan = td->td_toks; scan; scan = scan->tr_next) {
339 kprintf(" %p\n", scan);
340 }
341 }
342#endif
343 KKASSERT(ref->tr_tok->t_ref == ref);
344 }
7eb611ef
MD
345}
346
41a01a4d
MD
347void
348lwkt_gettoken(lwkt_tokref_t ref, lwkt_token_t tok)
c31b1324 349{
c6fbe95a
MD
350 thread_t td = curthread;
351
352 lwkt_tokref_init(ref, tok, td);
353 _lwkt_gettokref(ref, td);
354}
355
356void
357lwkt_getpooltoken(lwkt_tokref_t ref, void *ptr)
358{
359 thread_t td = curthread;
360
361 lwkt_tokref_init(ref, _lwkt_token_pool_lookup(ptr), td);
362 _lwkt_gettokref(ref, td);
41a01a4d 363}
c31b1324 364
41a01a4d
MD
365void
366lwkt_gettokref(lwkt_tokref_t ref)
367{
c6fbe95a 368 _lwkt_gettokref(ref, ref->tr_owner);
c31b1324
MD
369}
370
c31b1324 371int
41a01a4d 372lwkt_trytoken(lwkt_tokref_t ref, lwkt_token_t tok)
c31b1324 373{
c6fbe95a
MD
374 thread_t td = curthread;
375
376 lwkt_tokref_init(ref, tok, td);
377 return(_lwkt_trytokref(ref, td));
41a01a4d
MD
378}
379
380int
381lwkt_trytokref(lwkt_tokref_t ref)
382{
c6fbe95a 383 return(_lwkt_trytokref(ref, ref->tr_owner));
c31b1324
MD
384}
385
386/*
c6fbe95a
MD
387 * Release a serializing token.
388 *
389 * WARNING! Any recursive tokens must be released in reverse order.
c31b1324 390 */
41a01a4d 391void
7eb611ef 392lwkt_reltoken(lwkt_tokref_t ref)
c31b1324 393{
c6fbe95a
MD
394 struct lwkt_tokref **scanp;
395 lwkt_token_t tok;
396 thread_t td;
397
398 tok = ref->tr_tok;
399
400 /*
401 * Remove the ref from the thread's token list.
402 *
403 * NOTE: td == curthread
404 */
405 td = ref->tr_owner;
406 for (scanp = &td->td_toks; *scanp != ref; scanp = &((*scanp)->tr_next))
407 ;
408 *scanp = ref->tr_next;
409 cpu_ccfence();
410
411 /*
412 * Only clear the token if it matches ref. If ref was a recursively
413 * acquired token it may not match.
414 */
415 if (tok->t_ref == ref)
416 tok->t_ref = NULL;
c31b1324
MD
417}
418
41a01a4d
MD
419/*
420 * Pool tokens are used to provide a type-stable serializing token
421 * pointer that does not race against disappearing data structures.
422 *
423 * This routine is called in early boot just after we setup the BSP's
424 * globaldata structure.
425 */
426void
427lwkt_token_pool_init(void)
428{
c6fbe95a 429 int i;
41a01a4d 430
c6fbe95a
MD
431 for (i = 0; i < LWKT_NUM_POOL_TOKENS; ++i)
432 lwkt_token_init(&pool_tokens[i]);
41a01a4d
MD
433}
434
435lwkt_token_t
c6fbe95a 436lwkt_token_pool_lookup(void *ptr)
41a01a4d 437{
c6fbe95a 438 return (_lwkt_token_pool_lookup(ptr));
41a01a4d
MD
439}
440
41a01a4d
MD
441/*
442 * Initialize the owner and release-to cpu to the current cpu
443 * and reset the generation count.
444 */
445void
446lwkt_token_init(lwkt_token_t tok)
447{
c6fbe95a 448 tok->t_ref = NULL;
c31b1324
MD
449}
450
41a01a4d
MD
451void
452lwkt_token_uninit(lwkt_token_t tok)
453{
c6fbe95a 454 /* empty */
41a01a4d 455}
7eb611ef 456
c6fbe95a 457#if 0
7eb611ef
MD
458int
459lwkt_token_is_stale(lwkt_tokref_t ref)
460{
c6fbe95a
MD
461 lwkt_token_t tok = ref->tr_tok;
462
463 KKASSERT(tok->t_owner == curthread && ref->tr_state == 1 &&
464 tok->t_count > 0);
465
466 /* Token is not stale */
467 if (tok->t_lastowner == tok->t_owner)
468 return (FALSE);
469
470 /*
471 * The token is stale. Reset to not stale so that the next call to
472 * lwkt_token_is_stale will return "not stale" unless the token
473 * was acquired in-between by another thread.
474 */
475 tok->t_lastowner = tok->t_owner;
476 return (TRUE);
7eb611ef 477}
c6fbe95a 478#endif