proc->thread stage 6: kernel threads now create processless LWKT threads.
[dragonfly.git] / sys / kern / lwkt_thread.c
CommitLineData
8ad65e08
MD
1/*
2 * Copyright (c) 2003 Matthew Dillon <dillon@backplane.com>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
f1d1c3fa
MD
26 * Each cpu in a system has its own self-contained light weight kernel
27 * thread scheduler, which means that generally speaking we only need
28 * to use a critical section to prevent hicups.
29 *
0cfcada1 30 * $DragonFly: src/sys/kern/lwkt_thread.c,v 1.5 2003/06/27 01:53:25 dillon Exp $
8ad65e08
MD
31 */
32
33#include <sys/param.h>
34#include <sys/systm.h>
35#include <sys/kernel.h>
36#include <sys/proc.h>
37#include <sys/rtprio.h>
38#include <sys/queue.h>
f1d1c3fa 39#include <sys/thread2.h>
7d0bac62
MD
40#include <sys/lock.h>
41#include <sys/sysctl.h>
f1d1c3fa
MD
42#include <machine/cpu.h>
43
7d0bac62
MD
44#include <vm/vm.h>
45#include <vm/vm_param.h>
46#include <vm/vm_kern.h>
47#include <vm/vm_object.h>
48#include <vm/vm_page.h>
49#include <vm/vm_map.h>
50#include <vm/vm_pager.h>
51#include <vm/vm_extern.h>
52#include <vm/vm_zone.h>
53
54static int untimely_switch = 0;
55SYSCTL_INT(_debug, OID_AUTO, untimely_switch, CTLFLAG_RW, &untimely_switch, 0, "");
56
57
f1d1c3fa
MD
58static __inline
59void
60_lwkt_dequeue(thread_t td)
61{
62 if (td->td_flags & TDF_RUNQ) {
63 td->td_flags &= ~TDF_RUNQ;
64 TAILQ_REMOVE(&mycpu->gd_tdrunq, td, td_threadq);
65 }
66}
67
68static __inline
69void
70_lwkt_enqueue(thread_t td)
71{
72 if ((td->td_flags & TDF_RUNQ) == 0) {
73 td->td_flags |= TDF_RUNQ;
74 TAILQ_INSERT_TAIL(&mycpu->gd_tdrunq, td, td_threadq);
75 }
76}
8ad65e08
MD
77
78/*
79 * LWKTs operate on a per-cpu basis
80 *
81 * YYY implement strict priorities & round-robin at the same priority
82 */
83void
84lwkt_gdinit(struct globaldata *gd)
85{
86 TAILQ_INIT(&gd->gd_tdrunq);
87}
88
7d0bac62
MD
89/*
90 * Initialize a thread wait structure prior to first use.
91 *
92 * NOTE! called from low level boot code, we cannot do anything fancy!
93 */
94void
95lwkt_init_wait(lwkt_wait_t w)
96{
97 TAILQ_INIT(&w->wa_waitq);
98}
99
100/*
101 * Create a new thread. The thread must be associated with a process context
102 * or LWKT start address before it can be scheduled.
0cfcada1
MD
103 *
104 * If you intend to create a thread without a process context this function
105 * does everything except load the startup and switcher function.
7d0bac62
MD
106 */
107thread_t
108lwkt_alloc_thread(void)
109{
110 struct thread *td;
111 void *stack;
112
113 crit_enter();
114 if (mycpu->gd_tdfreecount > 0) {
115 --mycpu->gd_tdfreecount;
116 td = TAILQ_FIRST(&mycpu->gd_tdfreeq);
117 KASSERT(td != NULL, ("unexpected null cache td"));
118 TAILQ_REMOVE(&mycpu->gd_tdfreeq, td, td_threadq);
119 crit_exit();
120 stack = td->td_kstack;
121 } else {
122 crit_exit();
123 td = zalloc(thread_zone);
124 stack = (void *)kmem_alloc(kernel_map, UPAGES * PAGE_SIZE);
125 }
126 lwkt_init_thread(td, stack);
127 return(td);
128}
129
130/*
131 * Initialize a preexisting thread structure. This function is used by
132 * lwkt_alloc_thread() and also used to initialize the per-cpu idlethread.
133 *
134 * NOTE! called from low level boot code, we cannot do anything fancy!
135 */
136void
137lwkt_init_thread(thread_t td, void *stack)
138{
139 bzero(td, sizeof(struct thread));
140 lwkt_rwlock_init(&td->td_rwlock);
141 td->td_kstack = stack;
142 pmap_init_thread(td);
143}
144
8ad65e08
MD
145/*
146 * Switch to the next runnable lwkt. If no LWKTs are runnable then
f1d1c3fa
MD
147 * switch to the idlethread. Switching must occur within a critical
148 * section to avoid races with the scheduling queue.
149 *
150 * We always have full control over our cpu's run queue. Other cpus
151 * that wish to manipulate our queue must use the cpu_*msg() calls to
152 * talk to our cpu, so a critical section is all that is needed and
153 * the result is very, very fast thread switching.
154 *
155 * We always 'own' our own thread and the threads on our run queue,l
156 * due to TDF_RUNNING or TDF_RUNQ being set. We can safely clear
157 * TDF_RUNNING while in a critical section.
158 *
159 * The td_switch() function must be called while in the critical section.
160 * This function saves as much state as is appropriate for the type of
161 * thread.
162 *
163 * (self contained on a per cpu basis)
8ad65e08
MD
164 */
165void
166lwkt_switch(void)
167{
f1d1c3fa 168 thread_t td = curthread;
8ad65e08
MD
169 thread_t ntd;
170
f1d1c3fa 171 crit_enter();
8ad65e08
MD
172 if ((ntd = TAILQ_FIRST(&mycpu->gd_tdrunq)) != NULL) {
173 TAILQ_REMOVE(&mycpu->gd_tdrunq, ntd, td_threadq);
174 TAILQ_INSERT_TAIL(&mycpu->gd_tdrunq, ntd, td_threadq);
8ad65e08 175 } else {
f1d1c3fa
MD
176 ntd = &mycpu->gd_idlethread;
177 }
178 if (td != ntd) {
179 td->td_flags &= ~TDF_RUNNING;
180 ntd->td_flags |= TDF_RUNNING;
181 td->td_switch(ntd);
8ad65e08 182 }
f1d1c3fa 183 crit_exit();
8ad65e08
MD
184}
185
f1d1c3fa
MD
186/*
187 * Yield our thread while higher priority threads are pending. This is
188 * typically called when we leave a critical section but it can be safely
189 * called while we are in a critical section.
190 *
191 * This function will not generally yield to equal priority threads but it
192 * can occur as a side effect. Note that lwkt_switch() is called from
193 * inside the critical section to pervent its own crit_exit() from reentering
194 * lwkt_yield_quick().
195 *
196 * (self contained on a per cpu basis)
197 */
198void
199lwkt_yield_quick(void)
200{
201 thread_t td = curthread;
202 while ((td->td_pri & TDPRI_MASK) < mycpu->gd_reqpri) {
203#if 0
204 cpu_schedule_reqs(); /* resets gd_reqpri */
205#endif
206 splz();
207 }
208
209 /*
210 * YYY enabling will cause wakeup() to task-switch, which really
211 * confused the old 4.x code. This is a good way to simulate
7d0bac62
MD
212 * preemption and MP without actually doing preemption or MP, because a
213 * lot of code assumes that wakeup() does not block.
f1d1c3fa 214 */
7d0bac62 215 if (untimely_switch && intr_nesting_level == 0) {
f1d1c3fa
MD
216 crit_enter();
217 /*
218 * YYY temporary hacks until we disassociate the userland scheduler
219 * from the LWKT scheduler.
220 */
221 if (td->td_flags & TDF_RUNQ) {
222 lwkt_switch(); /* will not reenter yield function */
223 } else {
224 lwkt_schedule_self(); /* make sure we are scheduled */
225 lwkt_switch(); /* will not reenter yield function */
226 lwkt_deschedule_self(); /* make sure we are descheduled */
227 }
228 crit_exit_noyield();
229 }
f1d1c3fa
MD
230}
231
8ad65e08 232/*
f1d1c3fa
MD
233 * This implements a normal yield which, unlike _quick, will yield to equal
234 * priority threads as well. Note that gd_reqpri tests will be handled by
235 * the crit_exit() call in lwkt_switch().
236 *
237 * (self contained on a per cpu basis)
8ad65e08
MD
238 */
239void
f1d1c3fa 240lwkt_yield(void)
8ad65e08 241{
f1d1c3fa
MD
242 lwkt_schedule_self();
243 lwkt_switch();
244}
245
246/*
247 * Schedule a thread to run. As the current thread we can always safely
248 * schedule ourselves, and a shortcut procedure is provided for that
249 * function.
250 *
251 * (non-blocking, self contained on a per cpu basis)
252 */
253void
254lwkt_schedule_self(void)
255{
256 thread_t td = curthread;
257
258 crit_enter();
259 KASSERT(td->td_wait == NULL, ("lwkt_schedule_self(): td_wait not NULL!"));
260 KASSERT(td->td_flags & TDF_RUNNING, ("lwkt_schedule_self(): TDF_RUNNING not set!"));
261 _lwkt_enqueue(td);
262 crit_exit();
8ad65e08 263}
8ad65e08
MD
264
265/*
f1d1c3fa
MD
266 * Generic schedule. Possibly schedule threads belonging to other cpus and
267 * deal with threads that might be blocked on a wait queue.
268 *
269 * This function will queue requests asynchronously when possible, but may
270 * block if no request structures are available. Upon return the caller
271 * should note that the scheduling request may not yet have been processed
272 * by the target cpu.
273 *
274 * YYY this is one of the best places to implement any load balancing code.
275 * Load balancing can be accomplished by requesting other sorts of actions
276 * for the thread in question.
8ad65e08
MD
277 */
278void
279lwkt_schedule(thread_t td)
280{
f1d1c3fa
MD
281 crit_enter();
282 if (td == curthread) {
283 _lwkt_enqueue(td);
284 } else {
285 lwkt_wait_t w;
286
287 /*
288 * If the thread is on a wait list we have to send our scheduling
289 * request to the owner of the wait structure. Otherwise we send
290 * the scheduling request to the cpu owning the thread. Races
291 * are ok, the target will forward the message as necessary (the
292 * message may chase the thread around before it finally gets
293 * acted upon).
294 *
295 * (remember, wait structures use stable storage)
296 */
297 if ((w = td->td_wait) != NULL) {
298 if (lwkt_havetoken(&w->wa_token)) {
299 TAILQ_REMOVE(&w->wa_waitq, td, td_threadq);
300 --w->wa_count;
301 td->td_wait = NULL;
302 if (td->td_cpu == mycpu->gd_cpu) {
303 _lwkt_enqueue(td);
304 } else {
305 panic("lwkt_schedule: cpu mismatch1");
8ad65e08 306#if 0
f1d1c3fa
MD
307 lwkt_cpu_msg_union_t msg = lwkt_getcpumsg();
308 initScheduleReqMsg_Wait(&msg.mu_SchedReq, td, w);
309 cpu_sendnormsg(&msg.mu_Msg);
8ad65e08 310#endif
f1d1c3fa
MD
311 }
312 } else {
313 panic("lwkt_schedule: cpu mismatch2");
314#if 0
315 lwkt_cpu_msg_union_t msg = lwkt_getcpumsg();
316 initScheduleReqMsg_Wait(&msg.mu_SchedReq, td, w);
317 cpu_sendnormsg(&msg.mu_Msg);
318#endif
319 }
320 } else {
321 /*
322 * If the wait structure is NULL and we own the thread, there
323 * is no race (since we are in a critical section). If we
324 * do not own the thread there might be a race but the
325 * target cpu will deal with it.
326 */
327 if (td->td_cpu == mycpu->gd_cpu) {
328 _lwkt_enqueue(td);
329 } else {
330 panic("lwkt_schedule: cpu mismatch3");
331#if 0
332 lwkt_cpu_msg_union_t msg = lwkt_getcpumsg();
333 initScheduleReqMsg_Thread(&msg.mu_SchedReq, td);
334 cpu_sendnormsg(&msg.mu_Msg);
335#endif
336 }
337 }
8ad65e08 338 }
f1d1c3fa 339 crit_exit();
8ad65e08
MD
340}
341
342/*
f1d1c3fa
MD
343 * Deschedule a thread.
344 *
345 * (non-blocking, self contained on a per cpu basis)
346 */
347void
348lwkt_deschedule_self(void)
349{
350 thread_t td = curthread;
351
352 crit_enter();
353 KASSERT(td->td_wait == NULL, ("lwkt_schedule_self(): td_wait not NULL!"));
354 KASSERT(td->td_flags & TDF_RUNNING, ("lwkt_schedule_self(): TDF_RUNNING not set!"));
355 _lwkt_dequeue(td);
356 crit_exit();
357}
358
359/*
360 * Generic deschedule. Descheduling threads other then your own should be
361 * done only in carefully controlled circumstances. Descheduling is
362 * asynchronous.
363 *
364 * This function may block if the cpu has run out of messages.
8ad65e08
MD
365 */
366void
367lwkt_deschedule(thread_t td)
368{
f1d1c3fa
MD
369 crit_enter();
370 if (td == curthread) {
371 _lwkt_dequeue(td);
372 } else {
373 if (td->td_cpu == mycpu->gd_cpu) {
374 _lwkt_dequeue(td);
375 } else {
376 panic("lwkt_deschedule: cpu mismatch");
377#if 0
378 lwkt_cpu_msg_union_t msg = lwkt_getcpumsg();
379 initDescheduleReqMsg_Thread(&msg.mu_DeschedReq, td);
380 cpu_sendnormsg(&msg.mu_Msg);
381#endif
382 }
383 }
384 crit_exit();
385}
386
387/*
388 * This function deschedules the current thread and blocks on the specified
389 * wait queue. We obtain ownership of the wait queue in order to block
390 * on it. A generation number is used to interlock the wait queue in case
391 * it gets signalled while we are blocked waiting on the token.
392 *
393 * Note: alternatively we could dequeue our thread and then message the
394 * target cpu owning the wait queue. YYY implement as sysctl.
395 *
396 * Note: wait queue signals normally ping-pong the cpu as an optimization.
397 */
398void
ae8050a4 399lwkt_block(lwkt_wait_t w, const char *wmesg, int *gen)
f1d1c3fa
MD
400{
401 thread_t td = curthread;
f1d1c3fa 402
f1d1c3fa 403 lwkt_gettoken(&w->wa_token);
ae8050a4 404 if (w->wa_gen == *gen) {
f1d1c3fa
MD
405 _lwkt_dequeue(td);
406 TAILQ_INSERT_TAIL(&w->wa_waitq, td, td_threadq);
407 ++w->wa_count;
408 td->td_wait = w;
ae8050a4 409 td->td_wmesg = wmesg;
f1d1c3fa 410 lwkt_switch();
8ad65e08 411 }
ae8050a4
MD
412 /* token might be lost, doesn't matter for gen update */
413 *gen = w->wa_gen;
f1d1c3fa
MD
414 lwkt_reltoken(&w->wa_token);
415}
416
417/*
418 * Signal a wait queue. We gain ownership of the wait queue in order to
419 * signal it. Once a thread is removed from the wait queue we have to
420 * deal with the cpu owning the thread.
421 *
422 * Note: alternatively we could message the target cpu owning the wait
423 * queue. YYY implement as sysctl.
424 */
425void
426lwkt_signal(lwkt_wait_t w)
427{
428 thread_t td;
429 int count;
430
431 lwkt_gettoken(&w->wa_token);
432 ++w->wa_gen;
433 count = w->wa_count;
434 while ((td = TAILQ_FIRST(&w->wa_waitq)) != NULL && count) {
435 --count;
436 --w->wa_count;
437 TAILQ_REMOVE(&w->wa_waitq, td, td_threadq);
438 td->td_wait = NULL;
ae8050a4 439 td->td_wmesg = NULL;
f1d1c3fa
MD
440 if (td->td_cpu == mycpu->gd_cpu) {
441 _lwkt_enqueue(td);
442 } else {
443#if 0
444 lwkt_cpu_msg_union_t msg = lwkt_getcpumsg();
445 initScheduleReqMsg_Thread(&msg.mu_SchedReq, td);
446 cpu_sendnormsg(&msg.mu_Msg);
447#endif
448 panic("lwkt_signal: cpu mismatch");
449 }
450 lwkt_regettoken(&w->wa_token);
451 }
452 lwkt_reltoken(&w->wa_token);
453}
454
455/*
456 * Aquire ownership of a token
457 *
458 * Aquire ownership of a token. The token may have spl and/or critical
459 * section side effects, depending on its purpose. These side effects
460 * guarentee that you will maintain ownership of the token as long as you
461 * do not block. If you block you may lose access to the token (but you
462 * must still release it even if you lose your access to it).
463 *
464 * Note that the spl and critical section characteristics of a token
465 * may not be changed once the token has been initialized.
466 */
467void
468lwkt_gettoken(lwkt_token_t tok)
469{
470 /*
471 * Prevent preemption so the token can't be taken away from us once
472 * we gain ownership of it. Use a synchronous request which might
473 * block. The request will be forwarded as necessary playing catchup
474 * to the token.
475 */
476 crit_enter();
477#if 0
478 while (tok->t_cpu != mycpu->gd_cpu) {
479 lwkt_cpu_msg_union msg;
480 initTokenReqMsg(&msg.mu_TokenReq);
481 cpu_domsg(&msg);
482 }
483#endif
484 /*
485 * leave us in a critical section on return. This will be undone
486 * by lwkt_reltoken()
487 */
488}
489
490/*
491 * Release your ownership of a token. Releases must occur in reverse
492 * order to aquisitions, eventually so priorities can be unwound properly
493 * like SPLs. At the moment the actual implemention doesn't care.
494 *
495 * We can safely hand a token that we own to another cpu without notifying
496 * it, but once we do we can't get it back without requesting it (unless
497 * the other cpu hands it back to us before we check).
498 *
499 * We might have lost the token, so check that.
500 */
501void
502lwkt_reltoken(lwkt_token_t tok)
503{
504 if (tok->t_cpu == mycpu->gd_cpu) {
505 tok->t_cpu = tok->t_reqcpu;
506 }
507 crit_exit();
508}
509
510/*
511 * Reaquire a token that might have been lost. Returns 1 if we blocked
512 * while reaquiring the token (meaning that you might have lost other
513 * tokens you held when you made this call), return 0 if we did not block.
514 */
515int
516lwkt_regettoken(lwkt_token_t tok)
517{
518#if 0
519 if (tok->t_cpu != mycpu->gd_cpu) {
520 while (tok->t_cpu != mycpu->gd_cpu) {
521 lwkt_cpu_msg_union msg;
522 initTokenReqMsg(&msg.mu_TokenReq);
523 cpu_domsg(&msg);
524 }
525 return(1);
526 }
527#endif
528 return(0);
8ad65e08
MD
529}
530