2 * Copyright (c) 2003 Matthew Dillon <dillon@backplane.com>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * $DragonFly: src/sys/kern/lwkt_token.c,v 1.6 2004/03/08 03:03:54 dillon Exp $
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
35 #include <sys/rtprio.h>
36 #include <sys/queue.h>
37 #include <sys/thread2.h>
38 #include <sys/sysctl.h>
39 #include <sys/kthread.h>
40 #include <machine/cpu.h>
45 #include <vm/vm_param.h>
46 #include <vm/vm_kern.h>
47 #include <vm/vm_object.h>
48 #include <vm/vm_page.h>
49 #include <vm/vm_map.h>
50 #include <vm/vm_pager.h>
51 #include <vm/vm_extern.h>
52 #include <vm/vm_zone.h>
54 #include <machine/stdarg.h>
55 #include <machine/ipl.h>
56 #include <machine/smp.h>
58 #define THREAD_STACK (UPAGES * PAGE_SIZE)
62 #include <sys/stdint.h>
63 #include <libcaps/thread.h>
64 #include <sys/thread.h>
65 #include <sys/msgport.h>
66 #include <sys/errno.h>
67 #include <libcaps/globaldata.h>
68 #include <sys/thread2.h>
69 #include <sys/msgport2.h>
73 #include <machine/cpufunc.h>
74 #include <machine/lock.h>
78 #define MAKE_TOKENS_SPIN
79 /* #define MAKE_TOKENS_YIELD */
81 #ifndef LWKT_NUM_POOL_TOKENS
82 #define LWKT_NUM_POOL_TOKENS 1024 /* power of 2 */
84 #define LWKT_MASK_POOL_TOKENS (LWKT_NUM_POOL_TOKENS - 1)
87 static int token_debug = 0;
90 static lwkt_token pool_tokens[LWKT_NUM_POOL_TOKENS];
95 SYSCTL_INT(_lwkt, OID_AUTO, token_debug, CTLFLAG_RW, &token_debug, 0, "");
103 * Determine if we own all the tokens in the token reference list.
104 * Return 1 on success, 0 on failure.
106 * As a side effect, queue requests for tokens we want which are owned
107 * by other cpus. The magic number is used to communicate when the
108 * target cpu has processed the request. Note, however, that the
109 * target cpu may not be able to assign the token to us which is why
110 * the scheduler must spin.
113 lwkt_chktokens(thread_t td)
115 globaldata_t gd = td->td_gd; /* mycpu */
121 for (refs = td->td_toks; refs; refs = refs->tr_next) {
123 if ((dgd = tok->t_cpu) != gd) {
128 * Queue a request to the target cpu, exit the loop early if
129 * we are unable to queue the IPI message. The magic number
130 * flags whether we have a pending ipi request queued or not.
132 if (refs->tr_magic == LWKT_TOKREF_MAGIC1) {
133 refs->tr_magic = LWKT_TOKREF_MAGIC2; /* MP synched slowreq*/
135 tok->t_reqcpu = gd; /* MP unsynchronized 'fast' req */
136 if (lwkt_send_ipiq_passive(dgd, lwkt_reqtoken_remote, refs)) {
138 refs->tr_magic = LWKT_TOKREF_MAGIC1;
150 * Check if we already own the token. Return 1 on success, 0 on failure.
153 lwkt_havetoken(lwkt_token_t tok)
155 globaldata_t gd = mycpu;
156 thread_t td = gd->gd_curthread;
159 for (ref = td->td_toks; ref; ref = ref->tr_next) {
160 if (ref->tr_tok == tok)
167 lwkt_havetokref(lwkt_tokref_t xref)
169 globaldata_t gd = mycpu;
170 thread_t td = gd->gd_curthread;
173 for (ref = td->td_toks; ref; ref = ref->tr_next) {
183 * Returns 1 if it is ok to give a token away, 0 if it is not.
186 lwkt_oktogiveaway_token(lwkt_token_t tok)
188 globaldata_t gd = mycpu;
192 for (td = gd->gd_curthread; td; td = td->td_preempted) {
193 for (ref = td->td_toks; ref; ref = ref->tr_next) {
194 if (ref->tr_tok == tok)
204 * Acquire a serializing token
209 _lwkt_gettokref(lwkt_tokref_t ref)
215 gd = mycpu; /* our cpu */
216 KKASSERT(ref->tr_magic == LWKT_TOKREF_MAGIC1);
217 td = gd->gd_curthread; /* our thread */
220 * Link the request into our thread's list. This interlocks against
221 * remote requests from other cpus and prevents the token from being
222 * given away if our cpu already owns it. This also allows us to
223 * avoid using a critical section.
225 ref->tr_next = td->td_toks;
226 cpu_mb1(); /* order memory / we can be interrupted */
230 * If our cpu does not own the token then let the scheduler deal with
231 * it. We are guarenteed to own the tokens on our thread's token
232 * list when we are switched back in.
234 * Otherwise make sure the token is not held by a thread we are
235 * preempting. If it is, let the scheduler deal with it.
239 if (tok->t_cpu != gd) {
241 * Temporarily operate on tokens synchronously. We have to fix
242 * a number of interlocks and especially the softupdates code to
243 * be able to properly yield. ZZZ
245 #if defined(MAKE_TOKENS_SPIN)
248 while (lwkt_chktokens(td) == 0) {
250 lwkt_drain_token_requests();
253 printf("CHKTOKEN loop %d\n", gd->gd_cpuid);
261 #elif defined(MAKE_TOKENS_YIELD)
264 #error MAKE_TOKENS_XXX ?
266 KKASSERT(tok->t_cpu == gd);
267 } else /* NOTE CONDITIONAL */
269 if (td->td_preempted) {
270 while ((td = td->td_preempted) != NULL) {
272 for (scan = td->td_toks; scan; scan = scan->tr_next) {
273 if (scan->tr_tok == tok) {
275 KKASSERT(tok->t_cpu == gd);
282 /* 'td' variable no longer valid due to preempt loop above */
287 * Attempt to acquire a serializing token
291 _lwkt_trytokref(lwkt_tokref_t ref)
297 gd = mycpu; /* our cpu */
298 KKASSERT(ref->tr_magic == LWKT_TOKREF_MAGIC1);
299 td = gd->gd_curthread; /* our thread */
302 * Link the request into our thread's list. This interlocks against
303 * remote requests from other cpus and prevents the token from being
304 * given away if our cpu already owns it. This also allows us to
305 * avoid using a critical section.
307 ref->tr_next = td->td_toks;
308 cpu_mb1(); /* order memory / we can be interrupted */
312 * If our cpu does not own the token then stop now.
314 * Otherwise make sure the token is not held by a thread we are
315 * preempting. If it is, stop.
319 if (tok->t_cpu != gd) {
320 td->td_toks = ref->tr_next; /* remove ref */
322 } else /* NOTE CONDITIONAL */
324 if (td->td_preempted) {
325 while ((td = td->td_preempted) != NULL) {
327 for (scan = td->td_toks; scan; scan = scan->tr_next) {
328 if (scan->tr_tok == tok) {
329 td = gd->gd_curthread; /* our thread */
330 td->td_toks = ref->tr_next; /* remove ref */
336 /* 'td' variable no longer valid */
341 lwkt_gettoken(lwkt_tokref_t ref, lwkt_token_t tok)
343 lwkt_tokref_init(ref, tok);
344 _lwkt_gettokref(ref);
348 lwkt_gettokref(lwkt_tokref_t ref)
350 _lwkt_gettokref(ref);
354 lwkt_trytoken(lwkt_tokref_t ref, lwkt_token_t tok)
356 lwkt_tokref_init(ref, tok);
357 return(_lwkt_trytokref(ref));
361 lwkt_trytokref(lwkt_tokref_t ref)
363 return(_lwkt_trytokref(ref));
367 * Release a serializing token
370 lwkt_reltoken(lwkt_tokref *_ref)
379 * Guard check and stack check (if in the same stack page). We must
380 * also wait for any action pending on remote cpus which we do by
381 * checking the magic number and yielding in a loop.
385 if ((((intptr_t)ref ^ (intptr_t)&_ref) && ~(intptr_t)PAGE_MASK) == 0)
386 KKASSERT((char *)ref > (char *)&_ref);
387 KKASSERT(ref->tr_magic == LWKT_TOKREF_MAGIC1 ||
388 ref->tr_magic == LWKT_TOKREF_MAGIC2);
391 * Locate and unlink the token. Interlock with the token's cpureq
392 * to give the token away before we release it from our thread list,
393 * which allows us to avoid using a critical section.
396 td = gd->gd_curthread;
397 for (pref = &td->td_toks; (ref = *pref) != _ref; pref = &ref->tr_next) {
398 KKASSERT(ref != NULL);
401 KKASSERT(tok->t_cpu == gd);
402 tok->t_cpu = tok->t_reqcpu; /* we do not own 'tok' after this */
403 *pref = ref->tr_next; /* note: also removes giveaway interlock */
406 * If we had gotten the token opportunistically and it still happens to
407 * be queued to a target cpu, we have to wait for the target cpu
408 * to finish processing it. This does not happen very often and does
409 * not need to be optimal.
411 while (ref->tr_magic == LWKT_TOKREF_MAGIC2) {
412 #if defined(MAKE_TOKENS_SPIN)
419 #elif defined(MAKE_TOKENS_YIELD)
422 #error MAKE_TOKENS_XXX ?
428 * Pool tokens are used to provide a type-stable serializing token
429 * pointer that does not race against disappearing data structures.
431 * This routine is called in early boot just after we setup the BSP's
432 * globaldata structure.
435 lwkt_token_pool_init(void)
439 for (i = 0; i < LWKT_NUM_POOL_TOKENS; ++i)
440 lwkt_token_init(&pool_tokens[i]);
444 lwkt_token_pool_get(void *ptraddr)
448 i = ((int)(intptr_t)ptraddr >> 2) ^ ((int)(intptr_t)ptraddr >> 12);
449 return(&pool_tokens[i & LWKT_MASK_POOL_TOKENS]);
455 * This is the receiving side of a remote IPI requesting a token. If we
456 * cannot immediately hand the token off to another cpu we queue it.
458 * NOTE! we 'own' the ref structure, but we only 'own' the token if
462 lwkt_reqtoken_remote(void *data)
464 lwkt_tokref_t ref = data;
465 globaldata_t gd = mycpu;
466 lwkt_token_t tok = ref->tr_tok;
469 * We do not have to queue the token if we can give it away
470 * immediately. Otherwise we queue it to our globaldata structure.
472 KKASSERT(ref->tr_magic == LWKT_TOKREF_MAGIC2);
473 if (lwkt_oktogiveaway_token(tok)) {
474 if (tok->t_cpu == gd)
475 tok->t_cpu = ref->tr_reqgd;
477 ref->tr_magic = LWKT_TOKREF_MAGIC1;
479 ref->tr_gdreqnext = gd->gd_tokreqbase;
480 gd->gd_tokreqbase = ref;
485 * Must be called from a critical section. Satisfy all remote token
486 * requests that are pending on our globaldata structure. The request
487 * does not have to be satisfied with a successful change of ownership
488 * but we do have to acknowledge that we have completed processing the
489 * request by setting the magic number back to MAGIC1.
491 * NOTE! we 'own' the ref structure, but we only 'own' the token if
495 lwkt_drain_token_requests(void)
497 globaldata_t gd = mycpu;
500 while ((ref = gd->gd_tokreqbase) != NULL) {
501 gd->gd_tokreqbase = ref->tr_gdreqnext;
502 KKASSERT(ref->tr_magic == LWKT_TOKREF_MAGIC2);
503 if (ref->tr_tok->t_cpu == gd)
504 ref->tr_tok->t_cpu = ref->tr_reqgd;
506 ref->tr_magic = LWKT_TOKREF_MAGIC1;
513 * Initialize the owner and release-to cpu to the current cpu
514 * and reset the generation count.
517 lwkt_token_init(lwkt_token_t tok)
519 tok->t_cpu = tok->t_reqcpu = mycpu;
523 lwkt_token_uninit(lwkt_token_t tok)