Add 64 bit display output support to sysctl plus convenient macros.
[dragonfly.git] / sys / kern / lwkt_thread.c
CommitLineData
8ad65e08
MD
1/*
2 * Copyright (c) 2003 Matthew Dillon <dillon@backplane.com>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
f1d1c3fa
MD
26 * Each cpu in a system has its own self-contained light weight kernel
27 * thread scheduler, which means that generally speaking we only need
28 * to use a critical section to prevent hicups.
29 *
b68b7282 30 * $DragonFly: src/sys/kern/lwkt_thread.c,v 1.10 2003/06/29 05:29:31 dillon Exp $
8ad65e08
MD
31 */
32
33#include <sys/param.h>
34#include <sys/systm.h>
35#include <sys/kernel.h>
36#include <sys/proc.h>
37#include <sys/rtprio.h>
38#include <sys/queue.h>
f1d1c3fa 39#include <sys/thread2.h>
7d0bac62 40#include <sys/sysctl.h>
99df837e 41#include <sys/kthread.h>
f1d1c3fa 42#include <machine/cpu.h>
99df837e 43#include <sys/lock.h>
f1d1c3fa 44
7d0bac62
MD
45#include <vm/vm.h>
46#include <vm/vm_param.h>
47#include <vm/vm_kern.h>
48#include <vm/vm_object.h>
49#include <vm/vm_page.h>
50#include <vm/vm_map.h>
51#include <vm/vm_pager.h>
52#include <vm/vm_extern.h>
53#include <vm/vm_zone.h>
54
99df837e
MD
55#include <machine/stdarg.h>
56
7d0bac62
MD
57static int untimely_switch = 0;
58SYSCTL_INT(_debug, OID_AUTO, untimely_switch, CTLFLAG_RW, &untimely_switch, 0, "");
59
60
f1d1c3fa
MD
61static __inline
62void
63_lwkt_dequeue(thread_t td)
64{
65 if (td->td_flags & TDF_RUNQ) {
66 td->td_flags &= ~TDF_RUNQ;
67 TAILQ_REMOVE(&mycpu->gd_tdrunq, td, td_threadq);
68 }
69}
70
71static __inline
72void
73_lwkt_enqueue(thread_t td)
74{
75 if ((td->td_flags & TDF_RUNQ) == 0) {
76 td->td_flags |= TDF_RUNQ;
77 TAILQ_INSERT_TAIL(&mycpu->gd_tdrunq, td, td_threadq);
78 }
79}
8ad65e08
MD
80
81/*
82 * LWKTs operate on a per-cpu basis
83 *
84 * YYY implement strict priorities & round-robin at the same priority
85 */
86void
87lwkt_gdinit(struct globaldata *gd)
88{
89 TAILQ_INIT(&gd->gd_tdrunq);
90}
91
7d0bac62
MD
92/*
93 * Initialize a thread wait structure prior to first use.
94 *
95 * NOTE! called from low level boot code, we cannot do anything fancy!
96 */
97void
98lwkt_init_wait(lwkt_wait_t w)
99{
100 TAILQ_INIT(&w->wa_waitq);
101}
102
103/*
104 * Create a new thread. The thread must be associated with a process context
105 * or LWKT start address before it can be scheduled.
0cfcada1
MD
106 *
107 * If you intend to create a thread without a process context this function
108 * does everything except load the startup and switcher function.
7d0bac62
MD
109 */
110thread_t
ef0fdad1 111lwkt_alloc_thread(struct thread *td)
7d0bac62 112{
99df837e 113 void *stack;
ef0fdad1 114 int flags = 0;
7d0bac62 115
99df837e 116 crit_enter();
ef0fdad1
MD
117 if (td == NULL) {
118 if (mycpu->gd_tdfreecount > 0) {
119 --mycpu->gd_tdfreecount;
120 td = TAILQ_FIRST(&mycpu->gd_tdfreeq);
121 KASSERT(td != NULL && (td->td_flags & TDF_EXITED),
122 ("lwkt_alloc_thread: unexpected NULL or corrupted td"));
123 TAILQ_REMOVE(&mycpu->gd_tdfreeq, td, td_threadq);
124 crit_exit();
125 stack = td->td_kstack;
126 flags = td->td_flags & (TDF_ALLOCATED_STACK|TDF_ALLOCATED_THREAD);
127 } else {
128 crit_exit();
129 td = zalloc(thread_zone);
130 td->td_kstack = NULL;
131 flags |= TDF_ALLOCATED_THREAD;
132 }
133 }
134 if ((stack = td->td_kstack) == NULL) {
99df837e 135 stack = (void *)kmem_alloc(kernel_map, UPAGES * PAGE_SIZE);
ef0fdad1 136 flags |= TDF_ALLOCATED_STACK;
99df837e 137 }
ef0fdad1 138 lwkt_init_thread(td, stack, flags);
99df837e 139 return(td);
7d0bac62
MD
140}
141
142/*
143 * Initialize a preexisting thread structure. This function is used by
144 * lwkt_alloc_thread() and also used to initialize the per-cpu idlethread.
145 *
146 * NOTE! called from low level boot code, we cannot do anything fancy!
147 */
148void
99df837e 149lwkt_init_thread(thread_t td, void *stack, int flags)
7d0bac62 150{
99df837e
MD
151 bzero(td, sizeof(struct thread));
152 td->td_kstack = stack;
153 td->td_flags |= flags;
154 pmap_init_thread(td);
7d0bac62
MD
155}
156
99df837e
MD
157void
158lwkt_free_thread(struct thread *td)
159{
160 KASSERT(td->td_flags & TDF_EXITED,
161 ("lwkt_free_thread: did not exit! %p", td));
162
163 crit_enter();
164 if (mycpu->gd_tdfreecount < CACHE_NTHREADS &&
165 (td->td_flags & TDF_ALLOCATED_THREAD)
166 ) {
167 ++mycpu->gd_tdfreecount;
168 TAILQ_INSERT_HEAD(&mycpu->gd_tdfreeq, td, td_threadq);
169 crit_exit();
170 } else {
171 crit_exit();
172 if (td->td_kstack && (td->td_flags & TDF_ALLOCATED_STACK)) {
173 kmem_free(kernel_map,
174 (vm_offset_t)td->td_kstack, UPAGES * PAGE_SIZE);
175 td->td_kstack = NULL;
176 }
177 if (td->td_flags & TDF_ALLOCATED_THREAD)
178 zfree(thread_zone, td);
179 }
180}
181
182
8ad65e08
MD
183/*
184 * Switch to the next runnable lwkt. If no LWKTs are runnable then
f1d1c3fa
MD
185 * switch to the idlethread. Switching must occur within a critical
186 * section to avoid races with the scheduling queue.
187 *
188 * We always have full control over our cpu's run queue. Other cpus
189 * that wish to manipulate our queue must use the cpu_*msg() calls to
190 * talk to our cpu, so a critical section is all that is needed and
191 * the result is very, very fast thread switching.
192 *
193 * We always 'own' our own thread and the threads on our run queue,l
194 * due to TDF_RUNNING or TDF_RUNQ being set. We can safely clear
195 * TDF_RUNNING while in a critical section.
196 *
197 * The td_switch() function must be called while in the critical section.
198 * This function saves as much state as is appropriate for the type of
199 * thread.
200 *
201 * (self contained on a per cpu basis)
8ad65e08
MD
202 */
203void
204lwkt_switch(void)
205{
f1d1c3fa 206 thread_t td = curthread;
8ad65e08
MD
207 thread_t ntd;
208
b68b7282 209 if (mycpu->gd_intr_nesting_level && td->td_preempted == NULL)
ef0fdad1
MD
210 panic("lwkt_switch: cannot switch from within an interrupt\n");
211
f1d1c3fa 212 crit_enter();
99df837e
MD
213 if ((ntd = td->td_preempted) != NULL) {
214 /*
215 * We had preempted another thread on this cpu, resume the preempted
216 * thread.
217 */
218 td->td_preempted = NULL;
b68b7282 219 td->td_pri -= TDPRI_CRIT;
99df837e
MD
220 ntd->td_flags &= ~TDF_PREEMPTED;
221 } else if ((ntd = TAILQ_FIRST(&mycpu->gd_tdrunq)) != NULL) {
8ad65e08
MD
222 TAILQ_REMOVE(&mycpu->gd_tdrunq, ntd, td_threadq);
223 TAILQ_INSERT_TAIL(&mycpu->gd_tdrunq, ntd, td_threadq);
8ad65e08 224 } else {
85100692 225 ntd = mycpu->gd_idletd;
f1d1c3fa 226 }
99df837e 227 if (td != ntd)
f1d1c3fa 228 td->td_switch(ntd);
f1d1c3fa 229 crit_exit();
8ad65e08
MD
230}
231
b68b7282
MD
232/*
233 * The target thread preempts the current thread. The target thread
234 * structure must be stable and preempt-safe (e.g. an interrupt thread).
235 * When the target thread blocks the current thread will be resumed.
236 *
237 * XXX the target runs in a critical section so it does not open the original
238 * thread up to additional interrupts that the original thread believes it
239 * is blocking.
240 *
241 * Normal kernel threads should not preempt other normal kernel threads
242 * as it breaks the assumptions kernel threads are allowed to make. Note
243 * that preemption does not mess around with the current thread's RUNQ
244 * state.
245 */
246void
247lwkt_preempt(struct thread *ntd, int id)
248{
249 struct thread *td = curthread;
250
251 crit_enter();
252 if (ntd->td_preempted == NULL) {
253 ntd->td_preempted = curthread;
254 td->td_flags |= TDF_PREEMPTED;
255 ntd->td_pri += TDPRI_CRIT;
256 while (td->td_flags & TDF_PREEMPTED)
257 ntd->td_switch(ntd);
258 }
259 crit_exit_noyield();
260}
261
f1d1c3fa
MD
262/*
263 * Yield our thread while higher priority threads are pending. This is
264 * typically called when we leave a critical section but it can be safely
265 * called while we are in a critical section.
266 *
267 * This function will not generally yield to equal priority threads but it
268 * can occur as a side effect. Note that lwkt_switch() is called from
269 * inside the critical section to pervent its own crit_exit() from reentering
270 * lwkt_yield_quick().
271 *
ef0fdad1
MD
272 * gd_reqpri indicates that *something* changed, e.g. an interrupt or softint
273 * came along but was blocked and made pending.
274 *
f1d1c3fa
MD
275 * (self contained on a per cpu basis)
276 */
277void
278lwkt_yield_quick(void)
279{
280 thread_t td = curthread;
ef0fdad1
MD
281
282 if ((td->td_pri & TDPRI_MASK) < mycpu->gd_reqpri) {
283 mycpu->gd_reqpri = 0;
f1d1c3fa
MD
284 splz();
285 }
286
287 /*
288 * YYY enabling will cause wakeup() to task-switch, which really
289 * confused the old 4.x code. This is a good way to simulate
7d0bac62
MD
290 * preemption and MP without actually doing preemption or MP, because a
291 * lot of code assumes that wakeup() does not block.
f1d1c3fa 292 */
ef0fdad1 293 if (untimely_switch && mycpu->gd_intr_nesting_level == 0) {
f1d1c3fa
MD
294 crit_enter();
295 /*
296 * YYY temporary hacks until we disassociate the userland scheduler
297 * from the LWKT scheduler.
298 */
299 if (td->td_flags & TDF_RUNQ) {
300 lwkt_switch(); /* will not reenter yield function */
301 } else {
302 lwkt_schedule_self(); /* make sure we are scheduled */
303 lwkt_switch(); /* will not reenter yield function */
304 lwkt_deschedule_self(); /* make sure we are descheduled */
305 }
306 crit_exit_noyield();
307 }
f1d1c3fa
MD
308}
309
8ad65e08 310/*
f1d1c3fa
MD
311 * This implements a normal yield which, unlike _quick, will yield to equal
312 * priority threads as well. Note that gd_reqpri tests will be handled by
313 * the crit_exit() call in lwkt_switch().
314 *
315 * (self contained on a per cpu basis)
8ad65e08
MD
316 */
317void
f1d1c3fa 318lwkt_yield(void)
8ad65e08 319{
f1d1c3fa
MD
320 lwkt_schedule_self();
321 lwkt_switch();
322}
323
324/*
325 * Schedule a thread to run. As the current thread we can always safely
326 * schedule ourselves, and a shortcut procedure is provided for that
327 * function.
328 *
329 * (non-blocking, self contained on a per cpu basis)
330 */
331void
332lwkt_schedule_self(void)
333{
334 thread_t td = curthread;
335
336 crit_enter();
337 KASSERT(td->td_wait == NULL, ("lwkt_schedule_self(): td_wait not NULL!"));
f1d1c3fa
MD
338 _lwkt_enqueue(td);
339 crit_exit();
8ad65e08 340}
8ad65e08
MD
341
342/*
f1d1c3fa
MD
343 * Generic schedule. Possibly schedule threads belonging to other cpus and
344 * deal with threads that might be blocked on a wait queue.
345 *
346 * This function will queue requests asynchronously when possible, but may
347 * block if no request structures are available. Upon return the caller
348 * should note that the scheduling request may not yet have been processed
349 * by the target cpu.
350 *
351 * YYY this is one of the best places to implement any load balancing code.
352 * Load balancing can be accomplished by requesting other sorts of actions
353 * for the thread in question.
8ad65e08
MD
354 */
355void
356lwkt_schedule(thread_t td)
357{
f1d1c3fa
MD
358 crit_enter();
359 if (td == curthread) {
360 _lwkt_enqueue(td);
361 } else {
362 lwkt_wait_t w;
363
364 /*
365 * If the thread is on a wait list we have to send our scheduling
366 * request to the owner of the wait structure. Otherwise we send
367 * the scheduling request to the cpu owning the thread. Races
368 * are ok, the target will forward the message as necessary (the
369 * message may chase the thread around before it finally gets
370 * acted upon).
371 *
372 * (remember, wait structures use stable storage)
373 */
374 if ((w = td->td_wait) != NULL) {
375 if (lwkt_havetoken(&w->wa_token)) {
376 TAILQ_REMOVE(&w->wa_waitq, td, td_threadq);
377 --w->wa_count;
378 td->td_wait = NULL;
d0e06f83 379 if (td->td_cpu == mycpu->gd_cpuid) {
f1d1c3fa
MD
380 _lwkt_enqueue(td);
381 } else {
382 panic("lwkt_schedule: cpu mismatch1");
8ad65e08 383#if 0
f1d1c3fa
MD
384 lwkt_cpu_msg_union_t msg = lwkt_getcpumsg();
385 initScheduleReqMsg_Wait(&msg.mu_SchedReq, td, w);
386 cpu_sendnormsg(&msg.mu_Msg);
8ad65e08 387#endif
f1d1c3fa
MD
388 }
389 } else {
390 panic("lwkt_schedule: cpu mismatch2");
391#if 0
392 lwkt_cpu_msg_union_t msg = lwkt_getcpumsg();
393 initScheduleReqMsg_Wait(&msg.mu_SchedReq, td, w);
394 cpu_sendnormsg(&msg.mu_Msg);
395#endif
396 }
397 } else {
398 /*
399 * If the wait structure is NULL and we own the thread, there
400 * is no race (since we are in a critical section). If we
401 * do not own the thread there might be a race but the
402 * target cpu will deal with it.
403 */
d0e06f83 404 if (td->td_cpu == mycpu->gd_cpuid) {
f1d1c3fa
MD
405 _lwkt_enqueue(td);
406 } else {
407 panic("lwkt_schedule: cpu mismatch3");
408#if 0
409 lwkt_cpu_msg_union_t msg = lwkt_getcpumsg();
410 initScheduleReqMsg_Thread(&msg.mu_SchedReq, td);
411 cpu_sendnormsg(&msg.mu_Msg);
412#endif
413 }
414 }
8ad65e08 415 }
f1d1c3fa 416 crit_exit();
8ad65e08
MD
417}
418
419/*
f1d1c3fa
MD
420 * Deschedule a thread.
421 *
422 * (non-blocking, self contained on a per cpu basis)
423 */
424void
425lwkt_deschedule_self(void)
426{
427 thread_t td = curthread;
428
429 crit_enter();
430 KASSERT(td->td_wait == NULL, ("lwkt_schedule_self(): td_wait not NULL!"));
f1d1c3fa
MD
431 _lwkt_dequeue(td);
432 crit_exit();
433}
434
435/*
436 * Generic deschedule. Descheduling threads other then your own should be
437 * done only in carefully controlled circumstances. Descheduling is
438 * asynchronous.
439 *
440 * This function may block if the cpu has run out of messages.
8ad65e08
MD
441 */
442void
443lwkt_deschedule(thread_t td)
444{
f1d1c3fa
MD
445 crit_enter();
446 if (td == curthread) {
447 _lwkt_dequeue(td);
448 } else {
d0e06f83 449 if (td->td_cpu == mycpu->gd_cpuid) {
f1d1c3fa
MD
450 _lwkt_dequeue(td);
451 } else {
452 panic("lwkt_deschedule: cpu mismatch");
453#if 0
454 lwkt_cpu_msg_union_t msg = lwkt_getcpumsg();
455 initDescheduleReqMsg_Thread(&msg.mu_DeschedReq, td);
456 cpu_sendnormsg(&msg.mu_Msg);
457#endif
458 }
459 }
460 crit_exit();
461}
462
463/*
464 * This function deschedules the current thread and blocks on the specified
465 * wait queue. We obtain ownership of the wait queue in order to block
466 * on it. A generation number is used to interlock the wait queue in case
467 * it gets signalled while we are blocked waiting on the token.
468 *
469 * Note: alternatively we could dequeue our thread and then message the
470 * target cpu owning the wait queue. YYY implement as sysctl.
471 *
472 * Note: wait queue signals normally ping-pong the cpu as an optimization.
473 */
474void
ae8050a4 475lwkt_block(lwkt_wait_t w, const char *wmesg, int *gen)
f1d1c3fa
MD
476{
477 thread_t td = curthread;
f1d1c3fa 478
f1d1c3fa 479 lwkt_gettoken(&w->wa_token);
ae8050a4 480 if (w->wa_gen == *gen) {
f1d1c3fa
MD
481 _lwkt_dequeue(td);
482 TAILQ_INSERT_TAIL(&w->wa_waitq, td, td_threadq);
483 ++w->wa_count;
484 td->td_wait = w;
ae8050a4 485 td->td_wmesg = wmesg;
f1d1c3fa 486 lwkt_switch();
8ad65e08 487 }
ae8050a4
MD
488 /* token might be lost, doesn't matter for gen update */
489 *gen = w->wa_gen;
f1d1c3fa
MD
490 lwkt_reltoken(&w->wa_token);
491}
492
493/*
494 * Signal a wait queue. We gain ownership of the wait queue in order to
495 * signal it. Once a thread is removed from the wait queue we have to
496 * deal with the cpu owning the thread.
497 *
498 * Note: alternatively we could message the target cpu owning the wait
499 * queue. YYY implement as sysctl.
500 */
501void
502lwkt_signal(lwkt_wait_t w)
503{
504 thread_t td;
505 int count;
506
507 lwkt_gettoken(&w->wa_token);
508 ++w->wa_gen;
509 count = w->wa_count;
510 while ((td = TAILQ_FIRST(&w->wa_waitq)) != NULL && count) {
511 --count;
512 --w->wa_count;
513 TAILQ_REMOVE(&w->wa_waitq, td, td_threadq);
514 td->td_wait = NULL;
ae8050a4 515 td->td_wmesg = NULL;
d0e06f83 516 if (td->td_cpu == mycpu->gd_cpuid) {
f1d1c3fa
MD
517 _lwkt_enqueue(td);
518 } else {
519#if 0
520 lwkt_cpu_msg_union_t msg = lwkt_getcpumsg();
521 initScheduleReqMsg_Thread(&msg.mu_SchedReq, td);
522 cpu_sendnormsg(&msg.mu_Msg);
523#endif
524 panic("lwkt_signal: cpu mismatch");
525 }
526 lwkt_regettoken(&w->wa_token);
527 }
528 lwkt_reltoken(&w->wa_token);
529}
530
531/*
532 * Aquire ownership of a token
533 *
534 * Aquire ownership of a token. The token may have spl and/or critical
535 * section side effects, depending on its purpose. These side effects
536 * guarentee that you will maintain ownership of the token as long as you
537 * do not block. If you block you may lose access to the token (but you
538 * must still release it even if you lose your access to it).
539 *
540 * Note that the spl and critical section characteristics of a token
541 * may not be changed once the token has been initialized.
542 */
543void
544lwkt_gettoken(lwkt_token_t tok)
545{
546 /*
547 * Prevent preemption so the token can't be taken away from us once
548 * we gain ownership of it. Use a synchronous request which might
549 * block. The request will be forwarded as necessary playing catchup
550 * to the token.
551 */
552 crit_enter();
553#if 0
d0e06f83 554 while (tok->t_cpu != mycpu->gd_cpuid) {
f1d1c3fa
MD
555 lwkt_cpu_msg_union msg;
556 initTokenReqMsg(&msg.mu_TokenReq);
557 cpu_domsg(&msg);
558 }
559#endif
560 /*
561 * leave us in a critical section on return. This will be undone
562 * by lwkt_reltoken()
563 */
564}
565
566/*
567 * Release your ownership of a token. Releases must occur in reverse
568 * order to aquisitions, eventually so priorities can be unwound properly
569 * like SPLs. At the moment the actual implemention doesn't care.
570 *
571 * We can safely hand a token that we own to another cpu without notifying
572 * it, but once we do we can't get it back without requesting it (unless
573 * the other cpu hands it back to us before we check).
574 *
575 * We might have lost the token, so check that.
576 */
577void
578lwkt_reltoken(lwkt_token_t tok)
579{
d0e06f83 580 if (tok->t_cpu == mycpu->gd_cpuid) {
f1d1c3fa
MD
581 tok->t_cpu = tok->t_reqcpu;
582 }
583 crit_exit();
584}
585
586/*
587 * Reaquire a token that might have been lost. Returns 1 if we blocked
588 * while reaquiring the token (meaning that you might have lost other
589 * tokens you held when you made this call), return 0 if we did not block.
590 */
591int
592lwkt_regettoken(lwkt_token_t tok)
593{
594#if 0
d0e06f83
MD
595 if (tok->t_cpu != mycpu->gd_cpuid) {
596 while (tok->t_cpu != mycpu->gd_cpuid) {
f1d1c3fa
MD
597 lwkt_cpu_msg_union msg;
598 initTokenReqMsg(&msg.mu_TokenReq);
599 cpu_domsg(&msg);
600 }
601 return(1);
602 }
603#endif
604 return(0);
8ad65e08
MD
605}
606
99df837e
MD
607/*
608 * Create a kernel process/thread/whatever. It shares it's address space
609 * with proc0 - ie: kernel only.
610 *
611 * XXX should be renamed to lwkt_create()
612 */
613int
614lwkt_create(void (*func)(void *), void *arg,
ef0fdad1
MD
615 struct thread **tdp, struct thread *template, int tdflags,
616 const char *fmt, ...)
99df837e
MD
617{
618 struct thread *td;
619 va_list ap;
620
ef0fdad1 621 td = *tdp = lwkt_alloc_thread(template);
99df837e 622 cpu_set_thread_handler(td, kthread_exit, func, arg);
ef0fdad1 623 td->td_flags |= TDF_VERBOSE | tdflags;
99df837e
MD
624
625 /*
626 * Set up arg0 for 'ps' etc
627 */
628 va_start(ap, fmt);
629 vsnprintf(td->td_comm, sizeof(td->td_comm), fmt, ap);
630 va_end(ap);
631
632 /*
633 * Schedule the thread to run
634 */
ef0fdad1
MD
635 if ((td->td_flags & TDF_STOPREQ) == 0)
636 lwkt_schedule(td);
637 else
638 td->td_flags &= ~TDF_STOPREQ;
99df837e
MD
639 return 0;
640}
641
642/*
643 * Destroy an LWKT thread. Warning! This function is not called when
644 * a process exits, cpu_proc_exit() directly calls cpu_thread_exit() and
645 * uses a different reaping mechanism.
646 */
647void
648lwkt_exit(void)
649{
650 thread_t td = curthread;
651
652 if (td->td_flags & TDF_VERBOSE)
653 printf("kthread %p %s has exited\n", td, td->td_comm);
654 crit_enter();
655 lwkt_deschedule_self();
656 ++mycpu->gd_tdfreecount;
657 TAILQ_INSERT_TAIL(&mycpu->gd_tdfreeq, td, td_threadq);
658 cpu_thread_exit();
659}
660
661/*
662 * Create a kernel process/thread/whatever. It shares it's address space
ef0fdad1 663 * with proc0 - ie: kernel only. 5.x compatible.
99df837e
MD
664 */
665int
666kthread_create(void (*func)(void *), void *arg,
667 struct thread **tdp, const char *fmt, ...)
668{
669 struct thread *td;
670 va_list ap;
671
ef0fdad1 672 td = *tdp = lwkt_alloc_thread(NULL);
99df837e
MD
673 cpu_set_thread_handler(td, kthread_exit, func, arg);
674 td->td_flags |= TDF_VERBOSE;
675
676 /*
677 * Set up arg0 for 'ps' etc
678 */
679 va_start(ap, fmt);
680 vsnprintf(td->td_comm, sizeof(td->td_comm), fmt, ap);
681 va_end(ap);
682
683 /*
684 * Schedule the thread to run
685 */
686 lwkt_schedule(td);
687 return 0;
688}
689
690/*
691 * Destroy an LWKT thread. Warning! This function is not called when
692 * a process exits, cpu_proc_exit() directly calls cpu_thread_exit() and
693 * uses a different reaping mechanism.
694 *
695 * XXX duplicates lwkt_exit()
696 */
697void
698kthread_exit(void)
699{
700 lwkt_exit();
701}
702