2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/kern/lwkt_token.c,v 1.14 2005/06/02 21:55:22 dillon Exp $
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
43 #include <sys/rtprio.h>
44 #include <sys/queue.h>
45 #include <sys/thread2.h>
46 #include <sys/sysctl.h>
47 #include <sys/kthread.h>
48 #include <machine/cpu.h>
53 #include <vm/vm_param.h>
54 #include <vm/vm_kern.h>
55 #include <vm/vm_object.h>
56 #include <vm/vm_page.h>
57 #include <vm/vm_map.h>
58 #include <vm/vm_pager.h>
59 #include <vm/vm_extern.h>
60 #include <vm/vm_zone.h>
62 #include <machine/stdarg.h>
63 #include <machine/ipl.h>
64 #include <machine/smp.h>
66 #define THREAD_STACK (UPAGES * PAGE_SIZE)
70 #include <sys/stdint.h>
71 #include <libcaps/thread.h>
72 #include <sys/thread.h>
73 #include <sys/msgport.h>
74 #include <sys/errno.h>
75 #include <libcaps/globaldata.h>
76 #include <machine/cpufunc.h>
77 #include <sys/thread2.h>
78 #include <sys/msgport2.h>
82 #include <machine/lock.h>
83 #include <machine/cpu.h>
87 #define MAKE_TOKENS_SPIN
88 /* #define MAKE_TOKENS_YIELD */
90 #ifndef LWKT_NUM_POOL_TOKENS
91 #define LWKT_NUM_POOL_TOKENS 1024 /* power of 2 */
93 #define LWKT_MASK_POOL_TOKENS (LWKT_NUM_POOL_TOKENS - 1)
96 static int token_debug = 0;
100 static void lwkt_reqtoken_remote(void *data);
103 static lwkt_token pool_tokens[LWKT_NUM_POOL_TOKENS];
108 SYSCTL_INT(_lwkt, OID_AUTO, token_debug, CTLFLAG_RW, &token_debug, 0, "");
116 * Determine if we own all the tokens in the token reference list.
117 * Return 1 on success, 0 on failure.
119 * As a side effect, queue requests for tokens we want which are owned
120 * by other cpus. The magic number is used to communicate when the
121 * target cpu has processed the request. Note, however, that the
122 * target cpu may not be able to assign the token to us which is why
123 * the scheduler must spin.
126 lwkt_chktokens(thread_t td)
128 globaldata_t gd = td->td_gd; /* mycpu */
134 for (refs = td->td_toks; refs; refs = refs->tr_next) {
136 if ((dgd = tok->t_cpu) != gd) {
141 * Queue a request to the target cpu, exit the loop early if
142 * we are unable to queue the IPI message. The magic number
143 * flags whether we have a pending ipi request queued or not.
144 * It can be set from MAGIC2 to MAGIC1 by a remote cpu but can
145 * only be set from MAGIC1 to MAGIC2 by our cpu.
147 if (refs->tr_magic == LWKT_TOKREF_MAGIC1) {
148 refs->tr_magic = LWKT_TOKREF_MAGIC2; /* MP synched slowreq*/
150 tok->t_reqcpu = gd; /* MP unsynchronized 'fast' req */
151 if (lwkt_send_ipiq_nowait(dgd, lwkt_reqtoken_remote, refs)) {
153 refs->tr_magic = LWKT_TOKREF_MAGIC1;
165 * Check if we already own the token. Return 1 on success, 0 on failure.
168 lwkt_havetoken(lwkt_token_t tok)
170 globaldata_t gd = mycpu;
171 thread_t td = gd->gd_curthread;
174 for (ref = td->td_toks; ref; ref = ref->tr_next) {
175 if (ref->tr_tok == tok)
182 lwkt_havetokref(lwkt_tokref_t xref)
184 globaldata_t gd = mycpu;
185 thread_t td = gd->gd_curthread;
188 for (ref = td->td_toks; ref; ref = ref->tr_next) {
198 * Returns 1 if it is ok to give a token away, 0 if it is not.
201 lwkt_oktogiveaway_token(lwkt_token_t tok)
203 globaldata_t gd = mycpu;
207 for (td = gd->gd_curthread; td; td = td->td_preempted) {
208 for (ref = td->td_toks; ref; ref = ref->tr_next) {
209 if (ref->tr_tok == tok)
219 * Acquire a serializing token
224 _lwkt_gettokref(lwkt_tokref_t ref)
230 gd = mycpu; /* our cpu */
231 KKASSERT(ref->tr_magic == LWKT_TOKREF_MAGIC1);
232 td = gd->gd_curthread; /* our thread */
235 * Link the request into our thread's list. This interlocks against
236 * remote requests from other cpus and prevents the token from being
237 * given away if our cpu already owns it. This also allows us to
238 * avoid using a critical section.
240 ref->tr_next = td->td_toks;
241 cpu_mb1(); /* order memory / we can be interrupted */
245 * If our cpu does not own the token then let the scheduler deal with
246 * it. We are guarenteed to own the tokens on our thread's token
247 * list when we are switched back in.
249 * Otherwise make sure the token is not held by a thread we are
250 * preempting. If it is, let the scheduler deal with it.
254 if (tok->t_cpu != gd) {
256 * Temporarily operate on tokens synchronously. We have to fix
257 * a number of interlocks and especially the softupdates code to
258 * be able to properly yield. ZZZ
260 #if defined(MAKE_TOKENS_SPIN)
264 while (lwkt_chktokens(td) == 0) {
266 lwkt_drain_token_requests();
269 printf("CHKTOKEN looping on cpu %d\n", gd->gd_cpuid);
272 panic("CHKTOKEN looping on cpu %d", gd->gd_cpuid);
278 #elif defined(MAKE_TOKENS_YIELD)
281 #error MAKE_TOKENS_XXX ?
283 KKASSERT(tok->t_cpu == gd);
284 } else /* NOTE CONDITIONAL */
286 if (td->td_preempted) {
287 while ((td = td->td_preempted) != NULL) {
289 for (scan = td->td_toks; scan; scan = scan->tr_next) {
290 if (scan->tr_tok == tok) {
292 KKASSERT(tok->t_cpu == gd);
299 /* 'td' variable no longer valid due to preempt loop above */
304 * Attempt to acquire a serializing token
308 _lwkt_trytokref(lwkt_tokref_t ref)
314 gd = mycpu; /* our cpu */
315 KKASSERT(ref->tr_magic == LWKT_TOKREF_MAGIC1);
316 td = gd->gd_curthread; /* our thread */
319 * Link the request into our thread's list. This interlocks against
320 * remote requests from other cpus and prevents the token from being
321 * given away if our cpu already owns it. This also allows us to
322 * avoid using a critical section.
324 ref->tr_next = td->td_toks;
325 cpu_mb1(); /* order memory / we can be interrupted */
329 * If our cpu does not own the token then stop now.
331 * Otherwise make sure the token is not held by a thread we are
332 * preempting. If it is, stop.
336 if (tok->t_cpu != gd) {
337 td->td_toks = ref->tr_next; /* remove ref */
339 } else /* NOTE CONDITIONAL */
341 if (td->td_preempted) {
342 while ((td = td->td_preempted) != NULL) {
344 for (scan = td->td_toks; scan; scan = scan->tr_next) {
345 if (scan->tr_tok == tok) {
346 td = gd->gd_curthread; /* our thread */
347 td->td_toks = ref->tr_next; /* remove ref */
353 /* 'td' variable no longer valid */
358 lwkt_gettoken(lwkt_tokref_t ref, lwkt_token_t tok)
360 lwkt_tokref_init(ref, tok);
361 _lwkt_gettokref(ref);
365 lwkt_gettokref(lwkt_tokref_t ref)
367 _lwkt_gettokref(ref);
371 lwkt_trytoken(lwkt_tokref_t ref, lwkt_token_t tok)
373 lwkt_tokref_init(ref, tok);
374 return(_lwkt_trytokref(ref));
378 lwkt_trytokref(lwkt_tokref_t ref)
380 return(_lwkt_trytokref(ref));
384 * Release a serializing token
387 lwkt_reltoken(lwkt_tokref *_ref)
396 * Guard check and stack check (if in the same stack page). We must
397 * also wait for any action pending on remote cpus which we do by
398 * checking the magic number and yielding in a loop.
402 if ((((intptr_t)ref ^ (intptr_t)&_ref) & ~(intptr_t)PAGE_MASK) == 0)
403 KKASSERT((char *)ref > (char *)&_ref);
404 KKASSERT(ref->tr_magic == LWKT_TOKREF_MAGIC1 ||
405 ref->tr_magic == LWKT_TOKREF_MAGIC2);
408 * Locate and unlink the token. Interlock with the token's cpureq
409 * to give the token away before we release it from our thread list,
410 * which allows us to avoid using a critical section.
413 td = gd->gd_curthread;
414 for (pref = &td->td_toks; (ref = *pref) != _ref; pref = &ref->tr_next) {
415 KKASSERT(ref != NULL);
418 KKASSERT(tok->t_cpu == gd);
419 tok->t_cpu = tok->t_reqcpu; /* we do not own 'tok' after this */
420 *pref = ref->tr_next; /* note: also removes giveaway interlock */
423 * If we had gotten the token opportunistically and it still happens to
424 * be queued to a target cpu, we have to wait for the target cpu
425 * to finish processing it. This does not happen very often and does
426 * not need to be optimal.
428 while (ref->tr_magic == LWKT_TOKREF_MAGIC2) {
429 #if defined(MAKE_TOKENS_SPIN)
436 #elif defined(MAKE_TOKENS_YIELD)
439 #error MAKE_TOKENS_XXX ?
445 * Pool tokens are used to provide a type-stable serializing token
446 * pointer that does not race against disappearing data structures.
448 * This routine is called in early boot just after we setup the BSP's
449 * globaldata structure.
452 lwkt_token_pool_init(void)
456 for (i = 0; i < LWKT_NUM_POOL_TOKENS; ++i)
457 lwkt_token_init(&pool_tokens[i]);
461 lwkt_token_pool_get(void *ptraddr)
465 i = ((int)(intptr_t)ptraddr >> 2) ^ ((int)(intptr_t)ptraddr >> 12);
466 return(&pool_tokens[i & LWKT_MASK_POOL_TOKENS]);
472 * This is the receiving side of a remote IPI requesting a token. If we
473 * cannot immediately hand the token off to another cpu we queue it.
475 * NOTE! we 'own' the ref structure, but we only 'own' the token if
479 lwkt_reqtoken_remote(void *data)
481 lwkt_tokref_t ref = data;
482 globaldata_t gd = mycpu;
483 lwkt_token_t tok = ref->tr_tok;
486 * We do not have to queue the token if we can give it away
487 * immediately. Otherwise we queue it to our globaldata structure.
489 KKASSERT(ref->tr_magic == LWKT_TOKREF_MAGIC2);
490 if (lwkt_oktogiveaway_token(tok)) {
491 if (tok->t_cpu == gd)
492 tok->t_cpu = ref->tr_reqgd;
494 ref->tr_magic = LWKT_TOKREF_MAGIC1;
496 ref->tr_gdreqnext = gd->gd_tokreqbase;
497 gd->gd_tokreqbase = ref;
502 * Must be called from a critical section. Satisfy all remote token
503 * requests that are pending on our globaldata structure. The request
504 * does not have to be satisfied with a successful change of ownership
505 * but we do have to acknowledge that we have completed processing the
506 * request by setting the magic number back to MAGIC1.
508 * NOTE! we 'own' the ref structure, but we only 'own' the token if
512 lwkt_drain_token_requests(void)
514 globaldata_t gd = mycpu;
517 while ((ref = gd->gd_tokreqbase) != NULL) {
518 gd->gd_tokreqbase = ref->tr_gdreqnext;
519 KKASSERT(ref->tr_magic == LWKT_TOKREF_MAGIC2);
520 if (ref->tr_tok->t_cpu == gd)
521 ref->tr_tok->t_cpu = ref->tr_reqgd;
523 ref->tr_magic = LWKT_TOKREF_MAGIC1;
530 * Initialize the owner and release-to cpu to the current cpu
531 * and reset the generation count.
534 lwkt_token_init(lwkt_token_t tok)
536 tok->t_cpu = tok->t_reqcpu = mycpu;
540 lwkt_token_uninit(lwkt_token_t tok)