2 * Copyright (c) 2003-2011 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * Each cpu in a system has its own self-contained light weight kernel
37 * thread scheduler, which means that generally speaking we only need
38 * to use a critical section to avoid problems. Foreign thread
39 * scheduling is queued via (async) IPIs.
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
46 #include <sys/rtprio.h>
47 #include <sys/kinfo.h>
48 #include <sys/queue.h>
49 #include <sys/sysctl.h>
50 #include <sys/kthread.h>
51 #include <machine/cpu.h>
54 #include <sys/spinlock.h>
57 #include <sys/thread2.h>
58 #include <sys/spinlock2.h>
59 #include <sys/mplock2.h>
61 #include <sys/dsched.h>
64 #include <vm/vm_param.h>
65 #include <vm/vm_kern.h>
66 #include <vm/vm_object.h>
67 #include <vm/vm_page.h>
68 #include <vm/vm_map.h>
69 #include <vm/vm_pager.h>
70 #include <vm/vm_extern.h>
72 #include <machine/stdarg.h>
73 #include <machine/smp.h>
75 #if !defined(KTR_CTXSW)
76 #define KTR_CTXSW KTR_ALL
78 KTR_INFO_MASTER(ctxsw);
79 KTR_INFO(KTR_CTXSW, ctxsw, sw, 0, "#cpu[%d].td = %p", int cpu, struct thread *td);
80 KTR_INFO(KTR_CTXSW, ctxsw, pre, 1, "#cpu[%d].td = %p", int cpu, struct thread *td);
81 KTR_INFO(KTR_CTXSW, ctxsw, newtd, 2, "#threads[%p].name = %s", struct thread *td, char *comm);
82 KTR_INFO(KTR_CTXSW, ctxsw, deadtd, 3, "#threads[%p].name = <dead>", struct thread *td);
84 static MALLOC_DEFINE(M_THREAD, "thread", "lwkt threads");
87 static int panic_on_cscount = 0;
89 static __int64_t switch_count = 0;
90 static __int64_t preempt_hit = 0;
91 static __int64_t preempt_miss = 0;
92 static __int64_t preempt_weird = 0;
93 static __int64_t token_contention_count[TDPRI_MAX+1] __debugvar;
94 static int lwkt_use_spin_port;
95 static struct objcache *thread_cache;
98 static void lwkt_schedule_remote(void *arg, int arg2, struct intrframe *frame);
99 static void lwkt_setcpu_remote(void *arg);
102 extern void cpu_heavy_restore(void);
103 extern void cpu_lwkt_restore(void);
104 extern void cpu_kthread_restore(void);
105 extern void cpu_idle_restore(void);
108 * We can make all thread ports use the spin backend instead of the thread
109 * backend. This should only be set to debug the spin backend.
111 TUNABLE_INT("lwkt.use_spin_port", &lwkt_use_spin_port);
114 SYSCTL_INT(_lwkt, OID_AUTO, panic_on_cscount, CTLFLAG_RW, &panic_on_cscount, 0,
115 "Panic if attempting to switch lwkt's while mastering cpusync");
117 SYSCTL_QUAD(_lwkt, OID_AUTO, switch_count, CTLFLAG_RW, &switch_count, 0,
118 "Number of switched threads");
119 SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_hit, CTLFLAG_RW, &preempt_hit, 0,
120 "Successful preemption events");
121 SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_miss, CTLFLAG_RW, &preempt_miss, 0,
122 "Failed preemption events");
123 SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_weird, CTLFLAG_RW, &preempt_weird, 0,
124 "Number of preempted threads.");
126 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count_00, CTLFLAG_RW,
127 &token_contention_count[0], 0, "spinning due to token contention");
128 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count_01, CTLFLAG_RW,
129 &token_contention_count[1], 0, "spinning due to token contention");
130 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count_02, CTLFLAG_RW,
131 &token_contention_count[2], 0, "spinning due to token contention");
132 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count_03, CTLFLAG_RW,
133 &token_contention_count[3], 0, "spinning due to token contention");
134 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count_04, CTLFLAG_RW,
135 &token_contention_count[4], 0, "spinning due to token contention");
136 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count_05, CTLFLAG_RW,
137 &token_contention_count[5], 0, "spinning due to token contention");
138 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count_06, CTLFLAG_RW,
139 &token_contention_count[6], 0, "spinning due to token contention");
140 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count_07, CTLFLAG_RW,
141 &token_contention_count[7], 0, "spinning due to token contention");
142 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count_08, CTLFLAG_RW,
143 &token_contention_count[8], 0, "spinning due to token contention");
144 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count_09, CTLFLAG_RW,
145 &token_contention_count[9], 0, "spinning due to token contention");
146 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count_10, CTLFLAG_RW,
147 &token_contention_count[10], 0, "spinning due to token contention");
148 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count_11, CTLFLAG_RW,
149 &token_contention_count[11], 0, "spinning due to token contention");
150 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count_12, CTLFLAG_RW,
151 &token_contention_count[12], 0, "spinning due to token contention");
152 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count_13, CTLFLAG_RW,
153 &token_contention_count[13], 0, "spinning due to token contention");
154 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count_14, CTLFLAG_RW,
155 &token_contention_count[14], 0, "spinning due to token contention");
156 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count_15, CTLFLAG_RW,
157 &token_contention_count[15], 0, "spinning due to token contention");
158 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count_16, CTLFLAG_RW,
159 &token_contention_count[16], 0, "spinning due to token contention");
160 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count_17, CTLFLAG_RW,
161 &token_contention_count[17], 0, "spinning due to token contention");
162 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count_18, CTLFLAG_RW,
163 &token_contention_count[18], 0, "spinning due to token contention");
164 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count_19, CTLFLAG_RW,
165 &token_contention_count[19], 0, "spinning due to token contention");
166 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count_20, CTLFLAG_RW,
167 &token_contention_count[20], 0, "spinning due to token contention");
168 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count_21, CTLFLAG_RW,
169 &token_contention_count[21], 0, "spinning due to token contention");
170 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count_22, CTLFLAG_RW,
171 &token_contention_count[22], 0, "spinning due to token contention");
172 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count_23, CTLFLAG_RW,
173 &token_contention_count[23], 0, "spinning due to token contention");
174 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count_24, CTLFLAG_RW,
175 &token_contention_count[24], 0, "spinning due to token contention");
176 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count_25, CTLFLAG_RW,
177 &token_contention_count[25], 0, "spinning due to token contention");
178 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count_26, CTLFLAG_RW,
179 &token_contention_count[26], 0, "spinning due to token contention");
180 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count_27, CTLFLAG_RW,
181 &token_contention_count[27], 0, "spinning due to token contention");
182 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count_28, CTLFLAG_RW,
183 &token_contention_count[28], 0, "spinning due to token contention");
184 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count_29, CTLFLAG_RW,
185 &token_contention_count[29], 0, "spinning due to token contention");
186 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count_30, CTLFLAG_RW,
187 &token_contention_count[30], 0, "spinning due to token contention");
188 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count_31, CTLFLAG_RW,
189 &token_contention_count[31], 0, "spinning due to token contention");
191 static int fairq_enable = 0;
192 SYSCTL_INT(_lwkt, OID_AUTO, fairq_enable, CTLFLAG_RW,
193 &fairq_enable, 0, "Turn on fairq priority accumulators");
194 static int fairq_bypass = -1;
195 SYSCTL_INT(_lwkt, OID_AUTO, fairq_bypass, CTLFLAG_RW,
196 &fairq_bypass, 0, "Allow fairq to bypass td on token failure");
197 extern int lwkt_sched_debug;
198 int lwkt_sched_debug = 0;
199 SYSCTL_INT(_lwkt, OID_AUTO, sched_debug, CTLFLAG_RW,
200 &lwkt_sched_debug, 0, "Scheduler debug");
201 static int lwkt_spin_loops = 10;
202 SYSCTL_INT(_lwkt, OID_AUTO, spin_loops, CTLFLAG_RW,
203 &lwkt_spin_loops, 0, "Scheduler spin loops until sorted decon");
204 static int lwkt_spin_reseq = 0;
205 SYSCTL_INT(_lwkt, OID_AUTO, spin_reseq, CTLFLAG_RW,
206 &lwkt_spin_reseq, 0, "Scheduler resequencer enable");
207 static int lwkt_spin_monitor = 0;
208 SYSCTL_INT(_lwkt, OID_AUTO, spin_monitor, CTLFLAG_RW,
209 &lwkt_spin_monitor, 0, "Scheduler uses monitor/mwait");
210 static int lwkt_spin_fatal = 0; /* disabled */
211 SYSCTL_INT(_lwkt, OID_AUTO, spin_fatal, CTLFLAG_RW,
212 &lwkt_spin_fatal, 0, "LWKT scheduler spin loops till fatal panic");
213 static int preempt_enable = 1;
214 SYSCTL_INT(_lwkt, OID_AUTO, preempt_enable, CTLFLAG_RW,
215 &preempt_enable, 0, "Enable preemption");
216 static int lwkt_cache_threads = 0;
217 SYSCTL_INT(_lwkt, OID_AUTO, cache_threads, CTLFLAG_RD,
218 &lwkt_cache_threads, 0, "thread+kstack cache");
220 static __cachealign int lwkt_cseq_rindex;
221 static __cachealign int lwkt_cseq_windex;
224 * These helper procedures handle the runq, they can only be called from
225 * within a critical section.
227 * WARNING! Prior to SMP being brought up it is possible to enqueue and
228 * dequeue threads belonging to other cpus, so be sure to use td->td_gd
229 * instead of 'mycpu' when referencing the globaldata structure. Once
230 * SMP live enqueuing and dequeueing only occurs on the current cpu.
234 _lwkt_dequeue(thread_t td)
236 if (td->td_flags & TDF_RUNQ) {
237 struct globaldata *gd = td->td_gd;
239 td->td_flags &= ~TDF_RUNQ;
240 TAILQ_REMOVE(&gd->gd_tdrunq, td, td_threadq);
241 if (TAILQ_FIRST(&gd->gd_tdrunq) == NULL)
242 atomic_clear_int(&gd->gd_reqflags, RQF_RUNNING);
249 * NOTE: There are a limited number of lwkt threads runnable since user
250 * processes only schedule one at a time per cpu.
254 _lwkt_enqueue(thread_t td)
258 if ((td->td_flags & (TDF_RUNQ|TDF_MIGRATING|TDF_BLOCKQ)) == 0) {
259 struct globaldata *gd = td->td_gd;
261 td->td_flags |= TDF_RUNQ;
262 xtd = TAILQ_FIRST(&gd->gd_tdrunq);
264 TAILQ_INSERT_TAIL(&gd->gd_tdrunq, td, td_threadq);
265 atomic_set_int(&gd->gd_reqflags, RQF_RUNNING);
267 while (xtd && xtd->td_pri >= td->td_pri)
268 xtd = TAILQ_NEXT(xtd, td_threadq);
270 TAILQ_INSERT_BEFORE(xtd, td, td_threadq);
272 TAILQ_INSERT_TAIL(&gd->gd_tdrunq, td, td_threadq);
276 * Request a LWKT reschedule if we are now at the head of the queue.
278 if (TAILQ_FIRST(&gd->gd_tdrunq) == td)
284 _lwkt_thread_ctor(void *obj, void *privdata, int ocflags)
286 struct thread *td = (struct thread *)obj;
288 td->td_kstack = NULL;
289 td->td_kstack_size = 0;
290 td->td_flags = TDF_ALLOCATED_THREAD;
296 _lwkt_thread_dtor(void *obj, void *privdata)
298 struct thread *td = (struct thread *)obj;
300 KASSERT(td->td_flags & TDF_ALLOCATED_THREAD,
301 ("_lwkt_thread_dtor: not allocated from objcache"));
302 KASSERT((td->td_flags & TDF_ALLOCATED_STACK) && td->td_kstack &&
303 td->td_kstack_size > 0,
304 ("_lwkt_thread_dtor: corrupted stack"));
305 kmem_free(&kernel_map, (vm_offset_t)td->td_kstack, td->td_kstack_size);
306 td->td_kstack = NULL;
311 * Initialize the lwkt s/system.
313 * Nominally cache up to 32 thread + kstack structures. Cache more on
314 * systems with a lot of cpu cores.
319 TUNABLE_INT("lwkt.cache_threads", &lwkt_cache_threads);
320 if (lwkt_cache_threads == 0) {
321 lwkt_cache_threads = ncpus * 4;
322 if (lwkt_cache_threads < 32)
323 lwkt_cache_threads = 32;
325 thread_cache = objcache_create_mbacked(
326 M_THREAD, sizeof(struct thread),
327 NULL, lwkt_cache_threads,
328 _lwkt_thread_ctor, _lwkt_thread_dtor, NULL);
332 * Schedule a thread to run. As the current thread we can always safely
333 * schedule ourselves, and a shortcut procedure is provided for that
336 * (non-blocking, self contained on a per cpu basis)
339 lwkt_schedule_self(thread_t td)
341 KKASSERT((td->td_flags & TDF_MIGRATING) == 0);
342 crit_enter_quick(td);
343 KASSERT(td != &td->td_gd->gd_idlethread,
344 ("lwkt_schedule_self(): scheduling gd_idlethread is illegal!"));
345 KKASSERT(td->td_lwp == NULL ||
346 (td->td_lwp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
352 * Deschedule a thread.
354 * (non-blocking, self contained on a per cpu basis)
357 lwkt_deschedule_self(thread_t td)
359 crit_enter_quick(td);
365 * LWKTs operate on a per-cpu basis
367 * WARNING! Called from early boot, 'mycpu' may not work yet.
370 lwkt_gdinit(struct globaldata *gd)
372 TAILQ_INIT(&gd->gd_tdrunq);
373 TAILQ_INIT(&gd->gd_tdallq);
377 * Create a new thread. The thread must be associated with a process context
378 * or LWKT start address before it can be scheduled. If the target cpu is
379 * -1 the thread will be created on the current cpu.
381 * If you intend to create a thread without a process context this function
382 * does everything except load the startup and switcher function.
385 lwkt_alloc_thread(struct thread *td, int stksize, int cpu, int flags)
387 static int cpu_rotator;
388 globaldata_t gd = mycpu;
392 * If static thread storage is not supplied allocate a thread. Reuse
393 * a cached free thread if possible. gd_freetd is used to keep an exiting
394 * thread intact through the exit.
398 if ((td = gd->gd_freetd) != NULL) {
399 KKASSERT((td->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK|
401 gd->gd_freetd = NULL;
403 td = objcache_get(thread_cache, M_WAITOK);
404 KKASSERT((td->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK|
408 KASSERT((td->td_flags &
409 (TDF_ALLOCATED_THREAD|TDF_RUNNING|TDF_PREEMPT_LOCK)) ==
410 TDF_ALLOCATED_THREAD,
411 ("lwkt_alloc_thread: corrupted td flags 0x%X", td->td_flags));
412 flags |= td->td_flags & (TDF_ALLOCATED_THREAD|TDF_ALLOCATED_STACK);
416 * Try to reuse cached stack.
418 if ((stack = td->td_kstack) != NULL && td->td_kstack_size != stksize) {
419 if (flags & TDF_ALLOCATED_STACK) {
420 kmem_free(&kernel_map, (vm_offset_t)stack, td->td_kstack_size);
425 stack = (void *)kmem_alloc_stack(&kernel_map, stksize);
426 flags |= TDF_ALLOCATED_STACK;
433 lwkt_init_thread(td, stack, stksize, flags, globaldata_find(cpu));
438 * Initialize a preexisting thread structure. This function is used by
439 * lwkt_alloc_thread() and also used to initialize the per-cpu idlethread.
441 * All threads start out in a critical section at a priority of
442 * TDPRI_KERN_DAEMON. Higher level code will modify the priority as
443 * appropriate. This function may send an IPI message when the
444 * requested cpu is not the current cpu and consequently gd_tdallq may
445 * not be initialized synchronously from the point of view of the originating
448 * NOTE! we have to be careful in regards to creating threads for other cpus
449 * if SMP has not yet been activated.
454 lwkt_init_thread_remote(void *arg)
459 * Protected by critical section held by IPI dispatch
461 TAILQ_INSERT_TAIL(&td->td_gd->gd_tdallq, td, td_allq);
467 * lwkt core thread structural initialization.
469 * NOTE: All threads are initialized as mpsafe threads.
472 lwkt_init_thread(thread_t td, void *stack, int stksize, int flags,
473 struct globaldata *gd)
475 globaldata_t mygd = mycpu;
477 bzero(td, sizeof(struct thread));
478 td->td_kstack = stack;
479 td->td_kstack_size = stksize;
480 td->td_flags = flags;
483 td->td_pri = TDPRI_KERN_DAEMON;
484 td->td_critcount = 1;
485 td->td_toks_have = NULL;
486 td->td_toks_stop = &td->td_toks_base;
487 if (lwkt_use_spin_port || (flags & TDF_FORCE_SPINPORT))
488 lwkt_initport_spin(&td->td_msgport);
490 lwkt_initport_thread(&td->td_msgport, td);
491 pmap_init_thread(td);
494 * Normally initializing a thread for a remote cpu requires sending an
495 * IPI. However, the idlethread is setup before the other cpus are
496 * activated so we have to treat it as a special case. XXX manipulation
497 * of gd_tdallq requires the BGL.
499 if (gd == mygd || td == &gd->gd_idlethread) {
501 TAILQ_INSERT_TAIL(&gd->gd_tdallq, td, td_allq);
504 lwkt_send_ipiq(gd, lwkt_init_thread_remote, td);
508 TAILQ_INSERT_TAIL(&gd->gd_tdallq, td, td_allq);
512 dsched_new_thread(td);
516 lwkt_set_comm(thread_t td, const char *ctl, ...)
521 kvsnprintf(td->td_comm, sizeof(td->td_comm), ctl, va);
523 KTR_LOG(ctxsw_newtd, td, td->td_comm);
527 * Prevent the thread from getting destroyed. Note that unlike PHOLD/PRELE
528 * this does not prevent the thread from migrating to another cpu so the
529 * gd_tdallq state is not protected by this.
532 lwkt_hold(thread_t td)
534 atomic_add_int(&td->td_refs, 1);
538 lwkt_rele(thread_t td)
540 KKASSERT(td->td_refs > 0);
541 atomic_add_int(&td->td_refs, -1);
545 lwkt_free_thread(thread_t td)
547 KKASSERT(td->td_refs == 0);
548 KKASSERT((td->td_flags & (TDF_RUNNING | TDF_PREEMPT_LOCK |
549 TDF_RUNQ | TDF_TSLEEPQ)) == 0);
550 if (td->td_flags & TDF_ALLOCATED_THREAD) {
551 objcache_put(thread_cache, td);
552 } else if (td->td_flags & TDF_ALLOCATED_STACK) {
553 /* client-allocated struct with internally allocated stack */
554 KASSERT(td->td_kstack && td->td_kstack_size > 0,
555 ("lwkt_free_thread: corrupted stack"));
556 kmem_free(&kernel_map, (vm_offset_t)td->td_kstack, td->td_kstack_size);
557 td->td_kstack = NULL;
558 td->td_kstack_size = 0;
560 KTR_LOG(ctxsw_deadtd, td);
565 * Switch to the next runnable lwkt. If no LWKTs are runnable then
566 * switch to the idlethread. Switching must occur within a critical
567 * section to avoid races with the scheduling queue.
569 * We always have full control over our cpu's run queue. Other cpus
570 * that wish to manipulate our queue must use the cpu_*msg() calls to
571 * talk to our cpu, so a critical section is all that is needed and
572 * the result is very, very fast thread switching.
574 * The LWKT scheduler uses a fixed priority model and round-robins at
575 * each priority level. User process scheduling is a totally
576 * different beast and LWKT priorities should not be confused with
577 * user process priorities.
579 * PREEMPTION NOTE: Preemption occurs via lwkt_preempt(). lwkt_switch()
580 * is not called by the current thread in the preemption case, only when
581 * the preempting thread blocks (in order to return to the original thread).
583 * SPECIAL NOTE ON SWITCH ATOMICY: Certain operations such as thread
584 * migration and tsleep deschedule the current lwkt thread and call
585 * lwkt_switch(). In particular, the target cpu of the migration fully
586 * expects the thread to become non-runnable and can deadlock against
587 * cpusync operations if we run any IPIs prior to switching the thread out.
589 * WE MUST BE VERY CAREFUL NOT TO RUN SPLZ DIRECTLY OR INDIRECTLY IF
590 * THE CURRENT THREAD HAS BEEN DESCHEDULED!
595 globaldata_t gd = mycpu;
596 thread_t td = gd->gd_curthread;
600 KKASSERT(gd->gd_processing_ipiq == 0);
601 KKASSERT(td->td_flags & TDF_RUNNING);
604 * Switching from within a 'fast' (non thread switched) interrupt or IPI
605 * is illegal. However, we may have to do it anyway if we hit a fatal
606 * kernel trap or we have paniced.
608 * If this case occurs save and restore the interrupt nesting level.
610 if (gd->gd_intr_nesting_level) {
614 if (gd->gd_trap_nesting_level == 0 && panic_cpu_gd != mycpu) {
615 panic("lwkt_switch: Attempt to switch from a "
616 "fast interrupt, ipi, or hard code section, "
620 savegdnest = gd->gd_intr_nesting_level;
621 savegdtrap = gd->gd_trap_nesting_level;
622 gd->gd_intr_nesting_level = 0;
623 gd->gd_trap_nesting_level = 0;
624 if ((td->td_flags & TDF_PANICWARN) == 0) {
625 td->td_flags |= TDF_PANICWARN;
626 kprintf("Warning: thread switch from interrupt, IPI, "
627 "or hard code section.\n"
628 "thread %p (%s)\n", td, td->td_comm);
632 gd->gd_intr_nesting_level = savegdnest;
633 gd->gd_trap_nesting_level = savegdtrap;
639 * Release our current user process designation if we are blocking
640 * or if a user reschedule was requested.
642 * NOTE: This function is NOT called if we are switching into or
643 * returning from a preemption.
645 * NOTE: Releasing our current user process designation may cause
646 * it to be assigned to another thread, which in turn will
647 * cause us to block in the usched acquire code when we attempt
648 * to return to userland.
650 * NOTE: On SMP systems this can be very nasty when heavy token
651 * contention is present so we want to be careful not to
652 * release the designation gratuitously.
654 if (td->td_release &&
655 (user_resched_wanted() || (td->td_flags & TDF_RUNQ) == 0)) {
663 if (TD_TOKS_HELD(td))
664 lwkt_relalltokens(td);
667 * We had better not be holding any spin locks, but don't get into an
668 * endless panic loop.
670 KASSERT(gd->gd_spinlocks_wr == 0 || panicstr != NULL,
671 ("lwkt_switch: still holding %d exclusive spinlocks!",
672 gd->gd_spinlocks_wr));
677 if (td->td_cscount) {
678 kprintf("Diagnostic: attempt to switch while mastering cpusync: %p\n",
680 if (panic_on_cscount)
681 panic("switching while mastering cpusync");
687 * If we had preempted another thread on this cpu, resume the preempted
688 * thread. This occurs transparently, whether the preempted thread
689 * was scheduled or not (it may have been preempted after descheduling
692 * We have to setup the MP lock for the original thread after backing
693 * out the adjustment that was made to curthread when the original
696 if ((ntd = td->td_preempted) != NULL) {
697 KKASSERT(ntd->td_flags & TDF_PREEMPT_LOCK);
698 ntd->td_flags |= TDF_PREEMPT_DONE;
701 * The interrupt may have woken a thread up, we need to properly
702 * set the reschedule flag if the originally interrupted thread is
703 * at a lower priority.
705 * The interrupt may not have descheduled.
707 if (TAILQ_FIRST(&gd->gd_tdrunq) != ntd)
709 goto havethread_preempted;
713 * If we cannot obtain ownership of the tokens we cannot immediately
714 * schedule the target thread.
716 * Reminder: Again, we cannot afford to run any IPIs in this path if
717 * the current thread has been descheduled.
720 clear_lwkt_resched();
723 * Hotpath - pull the head of the run queue and attempt to schedule
727 ntd = TAILQ_FIRST(&gd->gd_tdrunq);
731 * Runq is empty, switch to idle to allow it to halt.
733 ntd = &gd->gd_idlethread;
735 if (gd->gd_trap_nesting_level == 0 && panicstr == NULL)
736 ASSERT_NO_TOKENS_HELD(ntd);
738 cpu_time.cp_msg[0] = 0;
739 cpu_time.cp_stallpc = 0;
746 * Hotpath - schedule ntd.
748 * NOTE: For UP there is no mplock and lwkt_getalltokens()
751 if (TD_TOKS_NOT_HELD(ntd) ||
752 lwkt_getalltokens(ntd, (spinning >= lwkt_spin_loops)))
758 * Coldpath (SMP only since tokens always succeed on UP)
760 * We had some contention on the thread we wanted to schedule.
761 * What we do now is try to find a thread that we can schedule
764 * The coldpath scan does NOT rearrange threads in the run list.
765 * The lwkt_schedulerclock() will assert need_lwkt_resched() on
766 * the next tick whenever the current head is not the current thread.
769 ++token_contention_count[ntd->td_pri];
773 if (fairq_bypass > 0)
776 while ((ntd = TAILQ_NEXT(ntd, td_threadq)) != NULL) {
778 * Never schedule threads returning to userland or the
779 * user thread scheduler helper thread when higher priority
780 * threads are present.
782 if (ntd->td_pri < TDPRI_KERN_LPSCHED) {
790 if (TD_TOKS_NOT_HELD(ntd) ||
791 lwkt_getalltokens(ntd, (spinning >= lwkt_spin_loops))) {
795 ++token_contention_count[ntd->td_pri];
802 * We exhausted the run list, meaning that all runnable threads
806 ntd = &gd->gd_idlethread;
808 if (gd->gd_trap_nesting_level == 0 && panicstr == NULL)
809 ASSERT_NO_TOKENS_HELD(ntd);
810 /* contention case, do not clear contention mask */
814 * We are going to have to retry but if the current thread is not
815 * on the runq we instead switch through the idle thread to get away
816 * from the current thread. We have to flag for lwkt reschedule
817 * to prevent the idle thread from halting.
819 * NOTE: A non-zero spinning is passed to lwkt_getalltokens() to
820 * instruct it to deal with the potential for deadlocks by
821 * ordering the tokens by address.
823 if ((td->td_flags & TDF_RUNQ) == 0) {
824 need_lwkt_resched(); /* prevent hlt */
827 #if defined(INVARIANTS) && defined(__amd64__)
828 if ((read_rflags() & PSL_I) == 0) {
830 panic("lwkt_switch() called with interrupts disabled");
835 * Number iterations so far. After a certain point we switch to
836 * a sorted-address/monitor/mwait version of lwkt_getalltokens()
838 if (spinning < 0x7FFFFFFF)
843 * lwkt_getalltokens() failed in sorted token mode, we can use
844 * monitor/mwait in this case.
846 if (spinning >= lwkt_spin_loops &&
847 (cpu_mi_feature & CPU_MI_MONITOR) &&
850 cpu_mmw_pause_int(&gd->gd_reqflags,
851 (gd->gd_reqflags | RQF_SPINNING) &
852 ~RQF_IDLECHECK_WK_MASK);
857 * We already checked that td is still scheduled so this should be
863 * This experimental resequencer is used as a fall-back to reduce
864 * hw cache line contention by placing each core's scheduler into a
865 * time-domain-multplexed slot.
867 * The resequencer is disabled by default. It's functionality has
868 * largely been superceeded by the token algorithm which limits races
869 * to a subset of cores.
871 * The resequencer algorithm tends to break down when more than
872 * 20 cores are contending. What appears to happen is that new
873 * tokens can be obtained out of address-sorted order by new cores
874 * while existing cores languish in long delays between retries and
875 * wind up being starved-out of the token acquisition.
877 if (lwkt_spin_reseq && spinning >= lwkt_spin_reseq) {
878 int cseq = atomic_fetchadd_int(&lwkt_cseq_windex, 1);
881 while ((oseq = lwkt_cseq_rindex) != cseq) {
884 if (cpu_mi_feature & CPU_MI_MONITOR) {
885 cpu_mmw_pause_int(&lwkt_cseq_rindex, oseq);
895 atomic_add_int(&lwkt_cseq_rindex, 1);
897 /* highest level for(;;) loop */
902 * Clear gd_idle_repeat when doing a normal switch to a non-idle
905 ntd->td_wmesg = NULL;
906 ++gd->gd_cnt.v_swtch;
907 gd->gd_idle_repeat = 0;
909 havethread_preempted:
911 * If the new target does not need the MP lock and we are holding it,
912 * release the MP lock. If the new target requires the MP lock we have
913 * already acquired it for the target.
917 KASSERT(ntd->td_critcount,
918 ("priority problem in lwkt_switch %d %d",
919 td->td_critcount, ntd->td_critcount));
923 * Execute the actual thread switch operation. This function
924 * returns to the current thread and returns the previous thread
925 * (which may be different from the thread we switched to).
927 * We are responsible for marking ntd as TDF_RUNNING.
929 KKASSERT((ntd->td_flags & TDF_RUNNING) == 0);
931 KTR_LOG(ctxsw_sw, gd->gd_cpuid, ntd);
932 ntd->td_flags |= TDF_RUNNING;
933 lwkt_switch_return(td->td_switch(ntd));
934 /* ntd invalid, td_switch() can return a different thread_t */
938 * catch-all. XXX is this strictly needed?
942 /* NOTE: current cpu may have changed after switch */
947 * Called by assembly in the td_switch (thread restore path) for thread
948 * bootstrap cases which do not 'return' to lwkt_switch().
951 lwkt_switch_return(thread_t otd)
957 * Check if otd was migrating. Now that we are on ntd we can finish
958 * up the migration. This is a bit messy but it is the only place
959 * where td is known to be fully descheduled.
961 * We can only activate the migration if otd was migrating but not
962 * held on the cpu due to a preemption chain. We still have to
963 * clear TDF_RUNNING on the old thread either way.
965 * We are responsible for clearing the previously running thread's
968 if ((rgd = otd->td_migrate_gd) != NULL &&
969 (otd->td_flags & TDF_PREEMPT_LOCK) == 0) {
970 KKASSERT((otd->td_flags & (TDF_MIGRATING | TDF_RUNNING)) ==
971 (TDF_MIGRATING | TDF_RUNNING));
972 otd->td_migrate_gd = NULL;
973 otd->td_flags &= ~TDF_RUNNING;
974 lwkt_send_ipiq(rgd, lwkt_setcpu_remote, otd);
976 otd->td_flags &= ~TDF_RUNNING;
979 otd->td_flags &= ~TDF_RUNNING;
984 * Request that the target thread preempt the current thread. Preemption
985 * can only occur if our only critical section is the one that we were called
986 * with, the relative priority of the target thread is higher, and the target
987 * thread holds no tokens. This also only works if we are not holding any
988 * spinlocks (obviously).
990 * THE CALLER OF LWKT_PREEMPT() MUST BE IN A CRITICAL SECTION. Typically
991 * this is called via lwkt_schedule() through the td_preemptable callback.
992 * critcount is the managed critical priority that we should ignore in order
993 * to determine whether preemption is possible (aka usually just the crit
994 * priority of lwkt_schedule() itself).
996 * Preemption is typically limited to interrupt threads.
998 * Operation works in a fairly straight-forward manner. The normal
999 * scheduling code is bypassed and we switch directly to the target
1000 * thread. When the target thread attempts to block or switch away
1001 * code at the base of lwkt_switch() will switch directly back to our
1002 * thread. Our thread is able to retain whatever tokens it holds and
1003 * if the target needs one of them the target will switch back to us
1004 * and reschedule itself normally.
1007 lwkt_preempt(thread_t ntd, int critcount)
1009 struct globaldata *gd = mycpu;
1012 int save_gd_intr_nesting_level;
1015 * The caller has put us in a critical section. We can only preempt
1016 * if the caller of the caller was not in a critical section (basically
1017 * a local interrupt), as determined by the 'critcount' parameter. We
1018 * also can't preempt if the caller is holding any spinlocks (even if
1019 * he isn't in a critical section). This also handles the tokens test.
1021 * YYY The target thread must be in a critical section (else it must
1022 * inherit our critical section? I dunno yet).
1024 KASSERT(ntd->td_critcount, ("BADCRIT0 %d", ntd->td_pri));
1026 td = gd->gd_curthread;
1027 if (preempt_enable == 0) {
1031 if (ntd->td_pri <= td->td_pri) {
1035 if (td->td_critcount > critcount) {
1040 if (td->td_cscount) {
1044 if (ntd->td_gd != gd) {
1050 * We don't have to check spinlocks here as they will also bump
1053 * Do not try to preempt if the target thread is holding any tokens.
1054 * We could try to acquire the tokens but this case is so rare there
1055 * is no need to support it.
1057 KKASSERT(gd->gd_spinlocks_wr == 0);
1059 if (TD_TOKS_HELD(ntd)) {
1063 if (td == ntd || ((td->td_flags | ntd->td_flags) & TDF_PREEMPT_LOCK)) {
1067 if (ntd->td_preempted) {
1071 KKASSERT(gd->gd_processing_ipiq == 0);
1074 * Since we are able to preempt the current thread, there is no need to
1075 * call need_lwkt_resched().
1077 * We must temporarily clear gd_intr_nesting_level around the switch
1078 * since switchouts from the target thread are allowed (they will just
1079 * return to our thread), and since the target thread has its own stack.
1081 * A preemption must switch back to the original thread, assert the
1085 ntd->td_preempted = td;
1086 td->td_flags |= TDF_PREEMPT_LOCK;
1087 KTR_LOG(ctxsw_pre, gd->gd_cpuid, ntd);
1088 save_gd_intr_nesting_level = gd->gd_intr_nesting_level;
1089 gd->gd_intr_nesting_level = 0;
1091 KKASSERT((ntd->td_flags & TDF_RUNNING) == 0);
1092 ntd->td_flags |= TDF_RUNNING;
1093 xtd = td->td_switch(ntd);
1094 KKASSERT(xtd == ntd);
1095 lwkt_switch_return(xtd);
1096 gd->gd_intr_nesting_level = save_gd_intr_nesting_level;
1098 KKASSERT(ntd->td_preempted && (td->td_flags & TDF_PREEMPT_DONE));
1099 ntd->td_preempted = NULL;
1100 td->td_flags &= ~(TDF_PREEMPT_LOCK|TDF_PREEMPT_DONE);
1104 * Conditionally call splz() if gd_reqflags indicates work is pending.
1105 * This will work inside a critical section but not inside a hard code
1108 * (self contained on a per cpu basis)
1113 globaldata_t gd = mycpu;
1114 thread_t td = gd->gd_curthread;
1116 if ((gd->gd_reqflags & RQF_IDLECHECK_MASK) &&
1117 gd->gd_intr_nesting_level == 0 &&
1118 td->td_nest_count < 2)
1125 * This version is integrated into crit_exit, reqflags has already
1126 * been tested but td_critcount has not.
1128 * We only want to execute the splz() on the 1->0 transition of
1129 * critcount and not in a hard code section or if too deeply nested.
1131 * NOTE: gd->gd_spinlocks_wr is implied to be 0 when td_critcount is 0.
1134 lwkt_maybe_splz(thread_t td)
1136 globaldata_t gd = td->td_gd;
1138 if (td->td_critcount == 0 &&
1139 gd->gd_intr_nesting_level == 0 &&
1140 td->td_nest_count < 2)
1147 * Drivers which set up processing co-threads can call this function to
1148 * run the co-thread at a higher priority and to allow it to preempt
1152 lwkt_set_interrupt_support_thread(void)
1154 thread_t td = curthread;
1156 lwkt_setpri_self(TDPRI_INT_SUPPORT);
1157 td->td_flags |= TDF_INTTHREAD;
1158 td->td_preemptable = lwkt_preempt;
1163 * This function is used to negotiate a passive release of the current
1164 * process/lwp designation with the user scheduler, allowing the user
1165 * scheduler to schedule another user thread. The related kernel thread
1166 * (curthread) continues running in the released state.
1169 lwkt_passive_release(struct thread *td)
1171 struct lwp *lp = td->td_lwp;
1173 td->td_release = NULL;
1174 lwkt_setpri_self(TDPRI_KERN_USER);
1175 lp->lwp_proc->p_usched->release_curproc(lp);
1180 * This implements a LWKT yield, allowing a kernel thread to yield to other
1181 * kernel threads at the same or higher priority. This function can be
1182 * called in a tight loop and will typically only yield once per tick.
1184 * Most kernel threads run at the same priority in order to allow equal
1187 * (self contained on a per cpu basis)
1192 globaldata_t gd = mycpu;
1193 thread_t td = gd->gd_curthread;
1195 if ((gd->gd_reqflags & RQF_IDLECHECK_MASK) && td->td_nest_count < 2)
1197 if (lwkt_resched_wanted()) {
1198 lwkt_schedule_self(curthread);
1204 * This yield is designed for kernel threads with a user context.
1206 * The kernel acting on behalf of the user is potentially cpu-bound,
1207 * this function will efficiently allow other threads to run and also
1208 * switch to other processes by releasing.
1210 * The lwkt_user_yield() function is designed to have very low overhead
1211 * if no yield is determined to be needed.
1214 lwkt_user_yield(void)
1216 globaldata_t gd = mycpu;
1217 thread_t td = gd->gd_curthread;
1220 * Always run any pending interrupts in case we are in a critical
1223 if ((gd->gd_reqflags & RQF_IDLECHECK_MASK) && td->td_nest_count < 2)
1227 * Switch (which forces a release) if another kernel thread needs
1228 * the cpu, if userland wants us to resched, or if our kernel
1229 * quantum has run out.
1231 if (lwkt_resched_wanted() ||
1232 user_resched_wanted())
1239 * Reacquire the current process if we are released.
1241 * XXX not implemented atm. The kernel may be holding locks and such,
1242 * so we want the thread to continue to receive cpu.
1244 if (td->td_release == NULL && lp) {
1245 lp->lwp_proc->p_usched->acquire_curproc(lp);
1246 td->td_release = lwkt_passive_release;
1247 lwkt_setpri_self(TDPRI_USER_NORM);
1253 * Generic schedule. Possibly schedule threads belonging to other cpus and
1254 * deal with threads that might be blocked on a wait queue.
1256 * We have a little helper inline function which does additional work after
1257 * the thread has been enqueued, including dealing with preemption and
1258 * setting need_lwkt_resched() (which prevents the kernel from returning
1259 * to userland until it has processed higher priority threads).
1261 * It is possible for this routine to be called after a failed _enqueue
1262 * (due to the target thread migrating, sleeping, or otherwise blocked).
1263 * We have to check that the thread is actually on the run queue!
1267 _lwkt_schedule_post(globaldata_t gd, thread_t ntd, int ccount)
1269 if (ntd->td_flags & TDF_RUNQ) {
1270 if (ntd->td_preemptable) {
1271 ntd->td_preemptable(ntd, ccount); /* YYY +token */
1278 _lwkt_schedule(thread_t td)
1280 globaldata_t mygd = mycpu;
1282 KASSERT(td != &td->td_gd->gd_idlethread,
1283 ("lwkt_schedule(): scheduling gd_idlethread is illegal!"));
1284 KKASSERT((td->td_flags & TDF_MIGRATING) == 0);
1285 crit_enter_gd(mygd);
1286 KKASSERT(td->td_lwp == NULL ||
1287 (td->td_lwp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
1289 if (td == mygd->gd_curthread) {
1293 * If we own the thread, there is no race (since we are in a
1294 * critical section). If we do not own the thread there might
1295 * be a race but the target cpu will deal with it.
1298 if (td->td_gd == mygd) {
1300 _lwkt_schedule_post(mygd, td, 1);
1302 lwkt_send_ipiq3(td->td_gd, lwkt_schedule_remote, td, 0);
1306 _lwkt_schedule_post(mygd, td, 1);
1313 lwkt_schedule(thread_t td)
1319 lwkt_schedule_noresched(thread_t td) /* XXX not impl */
1327 * When scheduled remotely if frame != NULL the IPIQ is being
1328 * run via doreti or an interrupt then preemption can be allowed.
1330 * To allow preemption we have to drop the critical section so only
1331 * one is present in _lwkt_schedule_post.
1334 lwkt_schedule_remote(void *arg, int arg2, struct intrframe *frame)
1336 thread_t td = curthread;
1339 if (frame && ntd->td_preemptable) {
1340 crit_exit_noyield(td);
1341 _lwkt_schedule(ntd);
1342 crit_enter_quick(td);
1344 _lwkt_schedule(ntd);
1349 * Thread migration using a 'Pull' method. The thread may or may not be
1350 * the current thread. It MUST be descheduled and in a stable state.
1351 * lwkt_giveaway() must be called on the cpu owning the thread.
1353 * At any point after lwkt_giveaway() is called, the target cpu may
1354 * 'pull' the thread by calling lwkt_acquire().
1356 * We have to make sure the thread is not sitting on a per-cpu tsleep
1357 * queue or it will blow up when it moves to another cpu.
1359 * MPSAFE - must be called under very specific conditions.
1362 lwkt_giveaway(thread_t td)
1364 globaldata_t gd = mycpu;
1367 if (td->td_flags & TDF_TSLEEPQ)
1369 KKASSERT(td->td_gd == gd);
1370 TAILQ_REMOVE(&gd->gd_tdallq, td, td_allq);
1371 td->td_flags |= TDF_MIGRATING;
1376 lwkt_acquire(thread_t td)
1380 int retry = 10000000;
1382 KKASSERT(td->td_flags & TDF_MIGRATING);
1387 KKASSERT((td->td_flags & TDF_RUNQ) == 0);
1388 crit_enter_gd(mygd);
1389 DEBUG_PUSH_INFO("lwkt_acquire");
1390 while (td->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK)) {
1392 lwkt_process_ipiq();
1396 kprintf("lwkt_acquire: stuck: td %p td->td_flags %08x\n",
1404 TAILQ_INSERT_TAIL(&mygd->gd_tdallq, td, td_allq);
1405 td->td_flags &= ~TDF_MIGRATING;
1408 crit_enter_gd(mygd);
1409 TAILQ_INSERT_TAIL(&mygd->gd_tdallq, td, td_allq);
1410 td->td_flags &= ~TDF_MIGRATING;
1418 * Generic deschedule. Descheduling threads other then your own should be
1419 * done only in carefully controlled circumstances. Descheduling is
1422 * This function may block if the cpu has run out of messages.
1425 lwkt_deschedule(thread_t td)
1429 if (td == curthread) {
1432 if (td->td_gd == mycpu) {
1435 lwkt_send_ipiq(td->td_gd, (ipifunc1_t)lwkt_deschedule, td);
1445 * Set the target thread's priority. This routine does not automatically
1446 * switch to a higher priority thread, LWKT threads are not designed for
1447 * continuous priority changes. Yield if you want to switch.
1450 lwkt_setpri(thread_t td, int pri)
1452 if (td->td_pri != pri) {
1455 if (td->td_flags & TDF_RUNQ) {
1456 KKASSERT(td->td_gd == mycpu);
1468 * Set the initial priority for a thread prior to it being scheduled for
1469 * the first time. The thread MUST NOT be scheduled before or during
1470 * this call. The thread may be assigned to a cpu other then the current
1473 * Typically used after a thread has been created with TDF_STOPPREQ,
1474 * and before the thread is initially scheduled.
1477 lwkt_setpri_initial(thread_t td, int pri)
1480 KKASSERT((td->td_flags & TDF_RUNQ) == 0);
1485 lwkt_setpri_self(int pri)
1487 thread_t td = curthread;
1489 KKASSERT(pri >= 0 && pri <= TDPRI_MAX);
1491 if (td->td_flags & TDF_RUNQ) {
1502 * hz tick scheduler clock for LWKT threads
1505 lwkt_schedulerclock(thread_t td)
1507 globaldata_t gd = td->td_gd;
1510 if (TAILQ_FIRST(&gd->gd_tdrunq) == td) {
1512 * If the current thread is at the head of the runq shift it to the
1513 * end of any equal-priority threads and request a LWKT reschedule
1516 xtd = TAILQ_NEXT(td, td_threadq);
1517 if (xtd && xtd->td_pri == td->td_pri) {
1518 TAILQ_REMOVE(&gd->gd_tdrunq, td, td_threadq);
1519 while (xtd && xtd->td_pri == td->td_pri)
1520 xtd = TAILQ_NEXT(xtd, td_threadq);
1522 TAILQ_INSERT_BEFORE(xtd, td, td_threadq);
1524 TAILQ_INSERT_TAIL(&gd->gd_tdrunq, td, td_threadq);
1525 need_lwkt_resched();
1529 * If we scheduled a thread other than the one at the head of the
1530 * queue always request a reschedule every tick.
1532 need_lwkt_resched();
1537 * Migrate the current thread to the specified cpu.
1539 * This is accomplished by descheduling ourselves from the current cpu
1540 * and setting td_migrate_gd. The lwkt_switch() code will detect that the
1541 * 'old' thread wants to migrate after it has been completely switched out
1542 * and will complete the migration.
1544 * TDF_MIGRATING prevents scheduling races while the thread is being migrated.
1546 * We must be sure to release our current process designation (if a user
1547 * process) before clearing out any tsleepq we are on because the release
1548 * code may re-add us.
1550 * We must be sure to remove ourselves from the current cpu's tsleepq
1551 * before potentially moving to another queue. The thread can be on
1552 * a tsleepq due to a left-over tsleep_interlock().
1556 lwkt_setcpu_self(globaldata_t rgd)
1559 thread_t td = curthread;
1561 if (td->td_gd != rgd) {
1562 crit_enter_quick(td);
1566 if (td->td_flags & TDF_TSLEEPQ)
1570 * Set TDF_MIGRATING to prevent a spurious reschedule while we are
1571 * trying to deschedule ourselves and switch away, then deschedule
1572 * ourself, remove us from tdallq, and set td_migrate_gd. Finally,
1573 * call lwkt_switch() to complete the operation.
1575 td->td_flags |= TDF_MIGRATING;
1576 lwkt_deschedule_self(td);
1577 TAILQ_REMOVE(&td->td_gd->gd_tdallq, td, td_allq);
1578 td->td_migrate_gd = rgd;
1582 * We are now on the target cpu
1584 KKASSERT(rgd == mycpu);
1585 TAILQ_INSERT_TAIL(&rgd->gd_tdallq, td, td_allq);
1586 crit_exit_quick(td);
1592 lwkt_migratecpu(int cpuid)
1597 rgd = globaldata_find(cpuid);
1598 lwkt_setcpu_self(rgd);
1604 * Remote IPI for cpu migration (called while in a critical section so we
1605 * do not have to enter another one).
1607 * The thread (td) has already been completely descheduled from the
1608 * originating cpu and we can simply assert the case. The thread is
1609 * assigned to the new cpu and enqueued.
1611 * The thread will re-add itself to tdallq when it resumes execution.
1614 lwkt_setcpu_remote(void *arg)
1617 globaldata_t gd = mycpu;
1619 KKASSERT((td->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK)) == 0);
1622 td->td_flags &= ~TDF_MIGRATING;
1623 KKASSERT(td->td_migrate_gd == NULL);
1624 KKASSERT(td->td_lwp == NULL ||
1625 (td->td_lwp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
1631 lwkt_preempted_proc(void)
1633 thread_t td = curthread;
1634 while (td->td_preempted)
1635 td = td->td_preempted;
1640 * Create a kernel process/thread/whatever. It shares it's address space
1641 * with proc0 - ie: kernel only.
1643 * If the cpu is not specified one will be selected. In the future
1644 * specifying a cpu of -1 will enable kernel thread migration between
1648 lwkt_create(void (*func)(void *), void *arg, struct thread **tdp,
1649 thread_t template, int tdflags, int cpu, const char *fmt, ...)
1654 td = lwkt_alloc_thread(template, LWKT_THREAD_STACK, cpu,
1658 cpu_set_thread_handler(td, lwkt_exit, func, arg);
1661 * Set up arg0 for 'ps' etc
1663 __va_start(ap, fmt);
1664 kvsnprintf(td->td_comm, sizeof(td->td_comm), fmt, ap);
1668 * Schedule the thread to run
1670 if (td->td_flags & TDF_NOSTART)
1671 td->td_flags &= ~TDF_NOSTART;
1678 * Destroy an LWKT thread. Warning! This function is not called when
1679 * a process exits, cpu_proc_exit() directly calls cpu_thread_exit() and
1680 * uses a different reaping mechanism.
1685 thread_t td = curthread;
1690 * Do any cleanup that might block here
1692 if (td->td_flags & TDF_VERBOSE)
1693 kprintf("kthread %p %s has exited\n", td, td->td_comm);
1696 dsched_exit_thread(td);
1699 * Get us into a critical section to interlock gd_freetd and loop
1700 * until we can get it freed.
1702 * We have to cache the current td in gd_freetd because objcache_put()ing
1703 * it would rip it out from under us while our thread is still active.
1705 * We are the current thread so of course our own TDF_RUNNING bit will
1706 * be set, so unlike the lwp reap code we don't wait for it to clear.
1709 crit_enter_quick(td);
1712 tsleep(td, 0, "tdreap", 1);
1715 if ((std = gd->gd_freetd) != NULL) {
1716 KKASSERT((std->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK)) == 0);
1717 gd->gd_freetd = NULL;
1718 objcache_put(thread_cache, std);
1725 * Remove thread resources from kernel lists and deschedule us for
1726 * the last time. We cannot block after this point or we may end
1727 * up with a stale td on the tsleepq.
1729 * None of this may block, the critical section is the only thing
1730 * protecting tdallq and the only thing preventing new lwkt_hold()
1733 if (td->td_flags & TDF_TSLEEPQ)
1735 lwkt_deschedule_self(td);
1736 lwkt_remove_tdallq(td);
1737 KKASSERT(td->td_refs == 0);
1742 KKASSERT(gd->gd_freetd == NULL);
1743 if (td->td_flags & TDF_ALLOCATED_THREAD)
1749 lwkt_remove_tdallq(thread_t td)
1751 KKASSERT(td->td_gd == mycpu);
1752 TAILQ_REMOVE(&td->td_gd->gd_tdallq, td, td_allq);
1756 * Code reduction and branch prediction improvements. Call/return
1757 * overhead on modern cpus often degenerates into 0 cycles due to
1758 * the cpu's branch prediction hardware and return pc cache. We
1759 * can take advantage of this by not inlining medium-complexity
1760 * functions and we can also reduce the branch prediction impact
1761 * by collapsing perfectly predictable branches into a single
1762 * procedure instead of duplicating it.
1764 * Is any of this noticeable? Probably not, so I'll take the
1765 * smaller code size.
1768 crit_exit_wrapper(__DEBUG_CRIT_ARG__)
1770 _crit_exit(mycpu __DEBUG_CRIT_PASS_ARG__);
1776 thread_t td = curthread;
1777 int lcrit = td->td_critcount;
1779 td->td_critcount = 0;
1780 panic("td_critcount is/would-go negative! %p %d", td, lcrit);
1787 * Called from debugger/panic on cpus which have been stopped. We must still
1788 * process the IPIQ while stopped, even if we were stopped while in a critical
1791 * If we are dumping also try to process any pending interrupts. This may
1792 * or may not work depending on the state of the cpu at the point it was
1796 lwkt_smp_stopped(void)
1798 globaldata_t gd = mycpu;
1802 lwkt_process_ipiq();
1805 lwkt_process_ipiq();