kernel - Provide descriptions for vm.stats.* sysctl's
[dragonfly.git] / sys / kern / lwkt_thread.c
CommitLineData
8ad65e08 1/*
3b998fa9 2 * Copyright (c) 2003-2010 The DragonFly Project. All rights reserved.
60f60350 3 *
8c10bfcf
MD
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
60f60350 6 *
8ad65e08
MD
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
60f60350 10 *
8ad65e08
MD
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
8c10bfcf
MD
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
60f60350 20 *
8c10bfcf
MD
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
8ad65e08 32 * SUCH DAMAGE.
75cdbe6c
MD
33 */
34
35/*
36 * Each cpu in a system has its own self-contained light weight kernel
37 * thread scheduler, which means that generally speaking we only need
38 * to use a critical section to avoid problems. Foreign thread
39 * scheduling is queued via (async) IPIs.
8ad65e08
MD
40 */
41
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/kernel.h>
45#include <sys/proc.h>
46#include <sys/rtprio.h>
b37f18d6 47#include <sys/kinfo.h>
8ad65e08 48#include <sys/queue.h>
7d0bac62 49#include <sys/sysctl.h>
99df837e 50#include <sys/kthread.h>
f1d1c3fa 51#include <machine/cpu.h>
99df837e 52#include <sys/lock.h>
f6bf3af1 53#include <sys/caps.h>
9d265729 54#include <sys/spinlock.h>
57aa743c 55#include <sys/ktr.h>
9d265729
MD
56
57#include <sys/thread2.h>
58#include <sys/spinlock2.h>
684a93c4 59#include <sys/mplock2.h>
f1d1c3fa 60
8c72e3d5
AH
61#include <sys/dsched.h>
62
7d0bac62
MD
63#include <vm/vm.h>
64#include <vm/vm_param.h>
65#include <vm/vm_kern.h>
66#include <vm/vm_object.h>
67#include <vm/vm_page.h>
68#include <vm/vm_map.h>
69#include <vm/vm_pager.h>
70#include <vm/vm_extern.h>
7d0bac62 71
99df837e 72#include <machine/stdarg.h>
96728c05 73#include <machine/smp.h>
99df837e 74
d850923c
AE
75#if !defined(KTR_CTXSW)
76#define KTR_CTXSW KTR_ALL
77#endif
78KTR_INFO_MASTER(ctxsw);
a1f0fb66
AE
79KTR_INFO(KTR_CTXSW, ctxsw, sw, 0, "#cpu[%d].td = %p",
80 sizeof(int) + sizeof(struct thread *));
81KTR_INFO(KTR_CTXSW, ctxsw, pre, 1, "#cpu[%d].td = %p",
82 sizeof(int) + sizeof(struct thread *));
83KTR_INFO(KTR_CTXSW, ctxsw, newtd, 2, "#threads[%p].name = %s",
84 sizeof (struct thread *) + sizeof(char *));
85KTR_INFO(KTR_CTXSW, ctxsw, deadtd, 3, "#threads[%p].name = <dead>", sizeof (struct thread *));
1541028a 86
40aaf5fc
NT
87static MALLOC_DEFINE(M_THREAD, "thread", "lwkt threads");
88
0f7a3396
MD
89#ifdef INVARIANTS
90static int panic_on_cscount = 0;
91#endif
05220613
MD
92static __int64_t switch_count = 0;
93static __int64_t preempt_hit = 0;
94static __int64_t preempt_miss = 0;
95static __int64_t preempt_weird = 0;
f64b567c 96static __int64_t token_contention_count __debugvar = 0;
fb0f29c4 97static int lwkt_use_spin_port;
40aaf5fc 98static struct objcache *thread_cache;
05220613 99
88ebb169 100#ifdef SMP
e381e77c 101static void lwkt_schedule_remote(void *arg, int arg2, struct intrframe *frame);
88ebb169 102#endif
f9235b6d 103static void lwkt_fairq_accumulate(globaldata_t gd, thread_t td);
e381e77c 104
0855a2af
JG
105extern void cpu_heavy_restore(void);
106extern void cpu_lwkt_restore(void);
107extern void cpu_kthread_restore(void);
108extern void cpu_idle_restore(void);
109
fb0f29c4
MD
110/*
111 * We can make all thread ports use the spin backend instead of the thread
112 * backend. This should only be set to debug the spin backend.
113 */
114TUNABLE_INT("lwkt.use_spin_port", &lwkt_use_spin_port);
115
0f7a3396
MD
116#ifdef INVARIANTS
117SYSCTL_INT(_lwkt, OID_AUTO, panic_on_cscount, CTLFLAG_RW, &panic_on_cscount, 0, "");
118#endif
4b5f931b 119SYSCTL_QUAD(_lwkt, OID_AUTO, switch_count, CTLFLAG_RW, &switch_count, 0, "");
9733f757
VS
120SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_hit, CTLFLAG_RW, &preempt_hit, 0,
121 "Successful preemption events");
122SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_miss, CTLFLAG_RW, &preempt_miss, 0,
123 "Failed preemption events");
26a0694b 124SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_weird, CTLFLAG_RW, &preempt_weird, 0, "");
38717797
HP
125#ifdef INVARIANTS
126SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count, CTLFLAG_RW,
127 &token_contention_count, 0, "spinning due to token contention");
38717797 128#endif
f9235b6d
MD
129static int fairq_enable = 1;
130SYSCTL_INT(_lwkt, OID_AUTO, fairq_enable, CTLFLAG_RW, &fairq_enable, 0, "");
77912481
MD
131static int user_pri_sched = 0;
132SYSCTL_INT(_lwkt, OID_AUTO, user_pri_sched, CTLFLAG_RW, &user_pri_sched, 0, "");
fbc024e4
MD
133static int preempt_enable = 1;
134SYSCTL_INT(_lwkt, OID_AUTO, preempt_enable, CTLFLAG_RW, &preempt_enable, 0, "");
135
05220613 136
4b5f931b
MD
137/*
138 * These helper procedures handle the runq, they can only be called from
139 * within a critical section.
75cdbe6c
MD
140 *
141 * WARNING! Prior to SMP being brought up it is possible to enqueue and
142 * dequeue threads belonging to other cpus, so be sure to use td->td_gd
143 * instead of 'mycpu' when referencing the globaldata structure. Once
144 * SMP live enqueuing and dequeueing only occurs on the current cpu.
4b5f931b 145 */
f1d1c3fa
MD
146static __inline
147void
148_lwkt_dequeue(thread_t td)
149{
150 if (td->td_flags & TDF_RUNQ) {
75cdbe6c 151 struct globaldata *gd = td->td_gd;
4b5f931b 152
f1d1c3fa 153 td->td_flags &= ~TDF_RUNQ;
f9235b6d
MD
154 TAILQ_REMOVE(&gd->gd_tdrunq, td, td_threadq);
155 gd->gd_fairq_total_pri -= td->td_pri;
156 if (TAILQ_FIRST(&gd->gd_tdrunq) == NULL)
157 atomic_clear_int_nonlocked(&gd->gd_reqflags, RQF_RUNNING);
f1d1c3fa
MD
158 }
159}
160
f9235b6d
MD
161/*
162 * Priority enqueue.
163 *
164 * NOTE: There are a limited number of lwkt threads runnable since user
165 * processes only schedule one at a time per cpu.
166 */
f1d1c3fa
MD
167static __inline
168void
169_lwkt_enqueue(thread_t td)
170{
f9235b6d
MD
171 thread_t xtd;
172
7f5d7ed7 173 if ((td->td_flags & (TDF_RUNQ|TDF_MIGRATING|TDF_BLOCKQ)) == 0) {
75cdbe6c 174 struct globaldata *gd = td->td_gd;
4b5f931b 175
f1d1c3fa 176 td->td_flags |= TDF_RUNQ;
f9235b6d
MD
177 xtd = TAILQ_FIRST(&gd->gd_tdrunq);
178 if (xtd == NULL) {
179 TAILQ_INSERT_TAIL(&gd->gd_tdrunq, td, td_threadq);
180 atomic_set_int_nonlocked(&gd->gd_reqflags, RQF_RUNNING);
181 } else {
182 while (xtd && xtd->td_pri > td->td_pri)
183 xtd = TAILQ_NEXT(xtd, td_threadq);
184 if (xtd)
185 TAILQ_INSERT_BEFORE(xtd, td, td_threadq);
186 else
187 TAILQ_INSERT_TAIL(&gd->gd_tdrunq, td, td_threadq);
188 }
189 gd->gd_fairq_total_pri += td->td_pri;
f1d1c3fa
MD
190 }
191}
8ad65e08 192
40aaf5fc
NT
193static __boolean_t
194_lwkt_thread_ctor(void *obj, void *privdata, int ocflags)
195{
196 struct thread *td = (struct thread *)obj;
197
198 td->td_kstack = NULL;
199 td->td_kstack_size = 0;
200 td->td_flags = TDF_ALLOCATED_THREAD;
201 return (1);
202}
203
204static void
205_lwkt_thread_dtor(void *obj, void *privdata)
206{
207 struct thread *td = (struct thread *)obj;
208
209 KASSERT(td->td_flags & TDF_ALLOCATED_THREAD,
210 ("_lwkt_thread_dtor: not allocated from objcache"));
211 KASSERT((td->td_flags & TDF_ALLOCATED_STACK) && td->td_kstack &&
212 td->td_kstack_size > 0,
213 ("_lwkt_thread_dtor: corrupted stack"));
214 kmem_free(&kernel_map, (vm_offset_t)td->td_kstack, td->td_kstack_size);
215}
216
217/*
218 * Initialize the lwkt s/system.
219 */
220void
221lwkt_init(void)
222{
223 /* An objcache has 2 magazines per CPU so divide cache size by 2. */
0aa16b5d
SZ
224 thread_cache = objcache_create_mbacked(M_THREAD, sizeof(struct thread),
225 NULL, CACHE_NTHREADS/2,
226 _lwkt_thread_ctor, _lwkt_thread_dtor, NULL);
40aaf5fc
NT
227}
228
37af14fe
MD
229/*
230 * Schedule a thread to run. As the current thread we can always safely
231 * schedule ourselves, and a shortcut procedure is provided for that
232 * function.
233 *
234 * (non-blocking, self contained on a per cpu basis)
235 */
236void
237lwkt_schedule_self(thread_t td)
238{
239 crit_enter_quick(td);
f9235b6d
MD
240 KASSERT(td != &td->td_gd->gd_idlethread,
241 ("lwkt_schedule_self(): scheduling gd_idlethread is illegal!"));
9388413d 242 KKASSERT(td->td_lwp == NULL || (td->td_lwp->lwp_flag & LWP_ONRUNQ) == 0);
37af14fe 243 _lwkt_enqueue(td);
37af14fe
MD
244 crit_exit_quick(td);
245}
246
247/*
248 * Deschedule a thread.
249 *
250 * (non-blocking, self contained on a per cpu basis)
251 */
252void
253lwkt_deschedule_self(thread_t td)
254{
255 crit_enter_quick(td);
37af14fe
MD
256 _lwkt_dequeue(td);
257 crit_exit_quick(td);
258}
259
8ad65e08
MD
260/*
261 * LWKTs operate on a per-cpu basis
262 *
73e4f7b9 263 * WARNING! Called from early boot, 'mycpu' may not work yet.
8ad65e08
MD
264 */
265void
266lwkt_gdinit(struct globaldata *gd)
267{
f9235b6d 268 TAILQ_INIT(&gd->gd_tdrunq);
73e4f7b9 269 TAILQ_INIT(&gd->gd_tdallq);
8ad65e08
MD
270}
271
7d0bac62
MD
272/*
273 * Create a new thread. The thread must be associated with a process context
75cdbe6c
MD
274 * or LWKT start address before it can be scheduled. If the target cpu is
275 * -1 the thread will be created on the current cpu.
0cfcada1
MD
276 *
277 * If you intend to create a thread without a process context this function
278 * does everything except load the startup and switcher function.
7d0bac62
MD
279 */
280thread_t
d3d32139 281lwkt_alloc_thread(struct thread *td, int stksize, int cpu, int flags)
7d0bac62 282{
c070746a 283 globaldata_t gd = mycpu;
99df837e 284 void *stack;
7d0bac62 285
c070746a
MD
286 /*
287 * If static thread storage is not supplied allocate a thread. Reuse
288 * a cached free thread if possible. gd_freetd is used to keep an exiting
289 * thread intact through the exit.
290 */
ef0fdad1 291 if (td == NULL) {
cf709dd2
MD
292 crit_enter_gd(gd);
293 if ((td = gd->gd_freetd) != NULL) {
294 KKASSERT((td->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK|
295 TDF_RUNQ)) == 0);
c070746a 296 gd->gd_freetd = NULL;
cf709dd2 297 } else {
c070746a 298 td = objcache_get(thread_cache, M_WAITOK);
cf709dd2
MD
299 KKASSERT((td->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK|
300 TDF_RUNQ)) == 0);
301 }
302 crit_exit_gd(gd);
40aaf5fc
NT
303 KASSERT((td->td_flags &
304 (TDF_ALLOCATED_THREAD|TDF_RUNNING)) == TDF_ALLOCATED_THREAD,
305 ("lwkt_alloc_thread: corrupted td flags 0x%X", td->td_flags));
306 flags |= td->td_flags & (TDF_ALLOCATED_THREAD|TDF_ALLOCATED_STACK);
ef0fdad1 307 }
c070746a
MD
308
309 /*
310 * Try to reuse cached stack.
311 */
f470d0c8
MD
312 if ((stack = td->td_kstack) != NULL && td->td_kstack_size != stksize) {
313 if (flags & TDF_ALLOCATED_STACK) {
e4846942 314 kmem_free(&kernel_map, (vm_offset_t)stack, td->td_kstack_size);
f470d0c8
MD
315 stack = NULL;
316 }
317 }
318 if (stack == NULL) {
e40cfbd7 319 stack = (void *)kmem_alloc_stack(&kernel_map, stksize);
ef0fdad1 320 flags |= TDF_ALLOCATED_STACK;
99df837e 321 }
75cdbe6c 322 if (cpu < 0)
c070746a 323 lwkt_init_thread(td, stack, stksize, flags, gd);
75cdbe6c 324 else
f470d0c8 325 lwkt_init_thread(td, stack, stksize, flags, globaldata_find(cpu));
99df837e 326 return(td);
7d0bac62
MD
327}
328
329/*
330 * Initialize a preexisting thread structure. This function is used by
331 * lwkt_alloc_thread() and also used to initialize the per-cpu idlethread.
332 *
f8c3996b
MD
333 * All threads start out in a critical section at a priority of
334 * TDPRI_KERN_DAEMON. Higher level code will modify the priority as
75cdbe6c
MD
335 * appropriate. This function may send an IPI message when the
336 * requested cpu is not the current cpu and consequently gd_tdallq may
337 * not be initialized synchronously from the point of view of the originating
338 * cpu.
339 *
340 * NOTE! we have to be careful in regards to creating threads for other cpus
341 * if SMP has not yet been activated.
7d0bac62 342 */
41a01a4d
MD
343#ifdef SMP
344
75cdbe6c
MD
345static void
346lwkt_init_thread_remote(void *arg)
347{
348 thread_t td = arg;
349
52eedfb5
MD
350 /*
351 * Protected by critical section held by IPI dispatch
352 */
75cdbe6c
MD
353 TAILQ_INSERT_TAIL(&td->td_gd->gd_tdallq, td, td_allq);
354}
355
41a01a4d
MD
356#endif
357
fdce8919
MD
358/*
359 * lwkt core thread structural initialization.
360 *
361 * NOTE: All threads are initialized as mpsafe threads.
362 */
7d0bac62 363void
f470d0c8
MD
364lwkt_init_thread(thread_t td, void *stack, int stksize, int flags,
365 struct globaldata *gd)
7d0bac62 366{
37af14fe
MD
367 globaldata_t mygd = mycpu;
368
99df837e
MD
369 bzero(td, sizeof(struct thread));
370 td->td_kstack = stack;
f470d0c8 371 td->td_kstack_size = stksize;
d3d32139 372 td->td_flags = flags;
26a0694b 373 td->td_gd = gd;
f9235b6d
MD
374 td->td_pri = TDPRI_KERN_DAEMON;
375 td->td_critcount = 1;
3b998fa9 376 td->td_toks_stop = &td->td_toks_base;
fb0f29c4
MD
377 if (lwkt_use_spin_port)
378 lwkt_initport_spin(&td->td_msgport);
379 else
380 lwkt_initport_thread(&td->td_msgport, td);
99df837e 381 pmap_init_thread(td);
0f7a3396 382#ifdef SMP
5d21b981
MD
383 /*
384 * Normally initializing a thread for a remote cpu requires sending an
385 * IPI. However, the idlethread is setup before the other cpus are
386 * activated so we have to treat it as a special case. XXX manipulation
387 * of gd_tdallq requires the BGL.
388 */
389 if (gd == mygd || td == &gd->gd_idlethread) {
37af14fe 390 crit_enter_gd(mygd);
75cdbe6c 391 TAILQ_INSERT_TAIL(&gd->gd_tdallq, td, td_allq);
37af14fe 392 crit_exit_gd(mygd);
75cdbe6c 393 } else {
2db3b277 394 lwkt_send_ipiq(gd, lwkt_init_thread_remote, td);
75cdbe6c 395 }
0f7a3396 396#else
37af14fe 397 crit_enter_gd(mygd);
0f7a3396 398 TAILQ_INSERT_TAIL(&gd->gd_tdallq, td, td_allq);
37af14fe 399 crit_exit_gd(mygd);
0f7a3396 400#endif
8c72e3d5
AH
401
402 dsched_new_thread(td);
73e4f7b9
MD
403}
404
405void
406lwkt_set_comm(thread_t td, const char *ctl, ...)
407{
e2565a42 408 __va_list va;
73e4f7b9 409
e2565a42 410 __va_start(va, ctl);
379210cb 411 kvsnprintf(td->td_comm, sizeof(td->td_comm), ctl, va);
e2565a42 412 __va_end(va);
e7c0dbba 413 KTR_LOG(ctxsw_newtd, td, &td->td_comm[0]);
7d0bac62
MD
414}
415
99df837e 416void
73e4f7b9 417lwkt_hold(thread_t td)
99df837e 418{
73e4f7b9
MD
419 ++td->td_refs;
420}
421
422void
423lwkt_rele(thread_t td)
424{
425 KKASSERT(td->td_refs > 0);
426 --td->td_refs;
427}
428
429void
430lwkt_wait_free(thread_t td)
431{
432 while (td->td_refs)
377d4740 433 tsleep(td, 0, "tdreap", hz);
73e4f7b9
MD
434}
435
436void
437lwkt_free_thread(thread_t td)
438{
cf709dd2 439 KKASSERT((td->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK|TDF_RUNQ)) == 0);
40aaf5fc
NT
440 if (td->td_flags & TDF_ALLOCATED_THREAD) {
441 objcache_put(thread_cache, td);
442 } else if (td->td_flags & TDF_ALLOCATED_STACK) {
443 /* client-allocated struct with internally allocated stack */
444 KASSERT(td->td_kstack && td->td_kstack_size > 0,
445 ("lwkt_free_thread: corrupted stack"));
446 kmem_free(&kernel_map, (vm_offset_t)td->td_kstack, td->td_kstack_size);
447 td->td_kstack = NULL;
448 td->td_kstack_size = 0;
99df837e 449 }
e7c0dbba 450 KTR_LOG(ctxsw_deadtd, td);
99df837e
MD
451}
452
453
8ad65e08
MD
454/*
455 * Switch to the next runnable lwkt. If no LWKTs are runnable then
f1d1c3fa
MD
456 * switch to the idlethread. Switching must occur within a critical
457 * section to avoid races with the scheduling queue.
458 *
459 * We always have full control over our cpu's run queue. Other cpus
460 * that wish to manipulate our queue must use the cpu_*msg() calls to
461 * talk to our cpu, so a critical section is all that is needed and
462 * the result is very, very fast thread switching.
463 *
96728c05
MD
464 * The LWKT scheduler uses a fixed priority model and round-robins at
465 * each priority level. User process scheduling is a totally
466 * different beast and LWKT priorities should not be confused with
467 * user process priorities.
f1d1c3fa 468 *
3933a3ab
MD
469 * The MP lock may be out of sync with the thread's td_mpcount + td_xpcount.
470 * lwkt_switch() cleans it up.
471 *
472 * Note that the td_switch() function cannot do anything that requires
473 * the MP lock since the MP lock will have already been setup for
71ef2f5c
MD
474 * the target thread (not the current thread). It's nice to have a scheduler
475 * that does not need the MP lock to work because it allows us to do some
476 * really cool high-performance MP lock optimizations.
69d78e99
MD
477 *
478 * PREEMPTION NOTE: Preemption occurs via lwkt_preempt(). lwkt_switch()
479 * is not called by the current thread in the preemption case, only when
480 * the preempting thread blocks (in order to return to the original thread).
8ad65e08
MD
481 */
482void
483lwkt_switch(void)
484{
37af14fe
MD
485 globaldata_t gd = mycpu;
486 thread_t td = gd->gd_curthread;
8ad65e08 487 thread_t ntd;
f9235b6d
MD
488 thread_t xtd;
489 thread_t nlast;
f9235b6d 490 int nquserok;
6f207a2c 491#ifdef SMP
8a8d5d85
MD
492 int mpheld;
493#endif
f9235b6d 494 int didaccumulate;
b37f18d6
MD
495 const char *lmsg; /* diagnostic - 'systat -pv 1' */
496 const void *laddr;
8ad65e08 497
46a3f46d 498 /*
27e88a6e
MD
499 * Switching from within a 'fast' (non thread switched) interrupt or IPI
500 * is illegal. However, we may have to do it anyway if we hit a fatal
501 * kernel trap or we have paniced.
502 *
503 * If this case occurs save and restore the interrupt nesting level.
46a3f46d 504 */
27e88a6e
MD
505 if (gd->gd_intr_nesting_level) {
506 int savegdnest;
507 int savegdtrap;
508
5fddbda2 509 if (gd->gd_trap_nesting_level == 0 && panic_cpu_gd != mycpu) {
4a28fe22
MD
510 panic("lwkt_switch: Attempt to switch from a "
511 "a fast interrupt, ipi, or hard code section, "
512 "td %p\n",
513 td);
27e88a6e
MD
514 } else {
515 savegdnest = gd->gd_intr_nesting_level;
516 savegdtrap = gd->gd_trap_nesting_level;
517 gd->gd_intr_nesting_level = 0;
518 gd->gd_trap_nesting_level = 0;
a7422615
MD
519 if ((td->td_flags & TDF_PANICWARN) == 0) {
520 td->td_flags |= TDF_PANICWARN;
4a28fe22
MD
521 kprintf("Warning: thread switch from interrupt, IPI, "
522 "or hard code section.\n"
a7422615 523 "thread %p (%s)\n", td, td->td_comm);
7ce2998e 524 print_backtrace(-1);
a7422615 525 }
27e88a6e
MD
526 lwkt_switch();
527 gd->gd_intr_nesting_level = savegdnest;
528 gd->gd_trap_nesting_level = savegdtrap;
529 return;
530 }
96728c05 531 }
ef0fdad1 532
cb973d15
MD
533 /*
534 * Passive release (used to transition from user to kernel mode
535 * when we block or switch rather then when we enter the kernel).
536 * This function is NOT called if we are switching into a preemption
537 * or returning from a preemption. Typically this causes us to lose
0a3f9b47
MD
538 * our current process designation (if we have one) and become a true
539 * LWKT thread, and may also hand the current process designation to
540 * another process and schedule thread.
cb973d15
MD
541 */
542 if (td->td_release)
543 td->td_release(td);
544
37af14fe 545 crit_enter_gd(gd);
3b998fa9 546 if (TD_TOKS_HELD(td))
9d265729
MD
547 lwkt_relalltokens(td);
548
549 /*
b02926de
MD
550 * We had better not be holding any spin locks, but don't get into an
551 * endless panic loop.
9d265729 552 */
d666840a
MD
553 KASSERT(gd->gd_spinlocks_wr == 0 || panicstr != NULL,
554 ("lwkt_switch: still holding %d exclusive spinlocks!",
555 gd->gd_spinlocks_wr));
9d265729 556
8a8d5d85
MD
557
558#ifdef SMP
559 /*
3933a3ab
MD
560 * td_mpcount + td_xpcount cannot be used to determine if we currently
561 * hold the MP lock because get_mplock() will increment it prior to
562 * attempting to get the lock, and switch out if it can't. Our
563 * ownership of the actual lock will remain stable while we are
564 * in a critical section, and once we actually acquire the underlying
565 * lock as long as the count is greater than 0.
8a8d5d85 566 */
c5724852 567 mpheld = MP_LOCK_HELD(gd);
0f7a3396
MD
568#ifdef INVARIANTS
569 if (td->td_cscount) {
6ea70f76 570 kprintf("Diagnostic: attempt to switch while mastering cpusync: %p\n",
0f7a3396
MD
571 td);
572 if (panic_on_cscount)
573 panic("switching while mastering cpusync");
574 }
575#endif
8a8d5d85 576#endif
f9235b6d
MD
577
578 /*
579 * If we had preempted another thread on this cpu, resume the preempted
580 * thread. This occurs transparently, whether the preempted thread
581 * was scheduled or not (it may have been preempted after descheduling
582 * itself).
583 *
584 * We have to setup the MP lock for the original thread after backing
585 * out the adjustment that was made to curthread when the original
586 * was preempted.
587 */
99df837e 588 if ((ntd = td->td_preempted) != NULL) {
26a0694b 589 KKASSERT(ntd->td_flags & TDF_PREEMPT_LOCK);
8a8d5d85 590#ifdef SMP
3933a3ab 591 if (ntd->td_mpcount + ntd->td_xpcount && mpheld == 0) {
fc92d4aa 592 panic("MPLOCK NOT HELD ON RETURN: %p %p %d %d",
3933a3ab 593 td, ntd, td->td_mpcount, ntd->td_mpcount + ntd->td_xpcount);
8a8d5d85 594 }
3933a3ab 595 td->td_xpcount = 0;
8a8d5d85 596#endif
26a0694b 597 ntd->td_flags |= TDF_PREEMPT_DONE;
8ec60c3f
MD
598
599 /*
b9eb1c19
MD
600 * The interrupt may have woken a thread up, we need to properly
601 * set the reschedule flag if the originally interrupted thread is
602 * at a lower priority.
8ec60c3f 603 */
f9235b6d
MD
604 if (TAILQ_FIRST(&gd->gd_tdrunq) &&
605 TAILQ_FIRST(&gd->gd_tdrunq)->td_pri > ntd->td_pri) {
8ec60c3f 606 need_lwkt_resched();
f9235b6d 607 }
8a8d5d85 608 /* YYY release mp lock on switchback if original doesn't need it */
f9235b6d
MD
609 goto havethread_preempted;
610 }
611
612 /*
613 * Implement round-robin fairq with priority insertion. The priority
614 * insertion is handled by _lwkt_enqueue()
615 *
616 * We have to adjust the MP lock for the target thread. If we
617 * need the MP lock and cannot obtain it we try to locate a
618 * thread that does not need the MP lock. If we cannot, we spin
619 * instead of HLT.
620 *
621 * A similar issue exists for the tokens held by the target thread.
622 * If we cannot obtain ownership of the tokens we cannot immediately
623 * schedule the thread.
624 */
625 for (;;) {
626 clear_lwkt_resched();
627 didaccumulate = 0;
628 ntd = TAILQ_FIRST(&gd->gd_tdrunq);
629
4b5f931b 630 /*
f9235b6d 631 * Hotpath if we can get all necessary resources.
41a01a4d 632 *
f9235b6d 633 * If nothing is runnable switch to the idle thread
41a01a4d 634 */
f9235b6d
MD
635 if (ntd == NULL) {
636 ntd = &gd->gd_idlethread;
637 if (gd->gd_reqflags & RQF_IDLECHECK_MASK)
638 ntd->td_flags |= TDF_IDLE_NOHLT;
6f207a2c 639#ifdef SMP
3933a3ab 640 KKASSERT(ntd->td_xpcount == 0);
f9235b6d
MD
641 if (ntd->td_mpcount) {
642 if (gd->gd_trap_nesting_level == 0 && panicstr == NULL)
643 panic("Idle thread %p was holding the BGL!", ntd);
644 if (mpheld == 0) {
c5724852
MD
645 set_cpu_contention_mask(gd);
646 handle_cpu_contention_mask();
647 cpu_try_mplock();
648 mpheld = MP_LOCK_HELD(gd);
f9235b6d
MD
649 cpu_pause();
650 continue;
651 }
652 }
c5724852 653 clr_cpu_contention_mask(gd);
6f207a2c 654#endif
b37f18d6
MD
655 cpu_time.cp_msg[0] = 0;
656 cpu_time.cp_stallpc = 0;
f9235b6d
MD
657 goto haveidle;
658 }
41a01a4d 659
8ec60c3f 660 /*
f9235b6d 661 * Hotpath schedule
6f207a2c
MD
662 *
663 * NOTE: For UP there is no mplock and lwkt_getalltokens()
664 * always succeeds.
8ec60c3f 665 */
f9235b6d
MD
666 if (ntd->td_fairq_accum >= 0 &&
667#ifdef SMP
3933a3ab
MD
668 (ntd->td_mpcount + ntd->td_xpcount == 0 ||
669 mpheld || cpu_try_mplock()) &&
f9235b6d 670#endif
b37f18d6 671 (!TD_TOKS_HELD(ntd) || lwkt_getalltokens(ntd, &lmsg, &laddr))
f9235b6d 672 ) {
8a8d5d85 673#ifdef SMP
c5724852 674 clr_cpu_contention_mask(gd);
f9235b6d
MD
675#endif
676 goto havethread;
677 }
678
b37f18d6
MD
679 lmsg = NULL;
680 laddr = NULL;
681
f9235b6d 682#ifdef SMP
c5724852
MD
683 if (ntd->td_fairq_accum >= 0)
684 set_cpu_contention_mask(gd);
f9235b6d 685 /* Reload mpheld (it become stale after mplock/token ops) */
c5724852 686 mpheld = MP_LOCK_HELD(gd);
3933a3ab 687 if (ntd->td_mpcount + ntd->td_xpcount && mpheld == 0) {
b37f18d6
MD
688 lmsg = "mplock";
689 laddr = ntd->td_mplock_stallpc;
690 }
f9235b6d
MD
691#endif
692
693 /*
694 * Coldpath - unable to schedule ntd, continue looking for threads
695 * to schedule. This is only allowed of the (presumably) kernel
696 * thread exhausted its fair share. A kernel thread stuck on
697 * resources does not currently allow a user thread to get in
698 * front of it.
699 */
700#ifdef SMP
701 nquserok = ((ntd->td_pri < TDPRI_KERN_LPSCHED) ||
702 (ntd->td_fairq_accum < 0));
6f207a2c
MD
703#else
704 nquserok = 1;
f9235b6d
MD
705#endif
706 nlast = NULL;
707
708 for (;;) {
41a01a4d 709 /*
f9235b6d
MD
710 * If the fair-share scheduler ran out ntd gets moved to the
711 * end and its accumulator will be bumped, if it didn't we
712 * maintain the same queue position.
df6b8ba0 713 *
f9235b6d 714 * nlast keeps track of the last element prior to any moves.
41a01a4d 715 */
f9235b6d 716 if (ntd->td_fairq_accum < 0) {
f9235b6d
MD
717 lwkt_fairq_accumulate(gd, ntd);
718 didaccumulate = 1;
c5724852
MD
719
720 /*
721 * Move to end
722 */
723 xtd = TAILQ_NEXT(ntd, td_threadq);
f9235b6d
MD
724 TAILQ_REMOVE(&gd->gd_tdrunq, ntd, td_threadq);
725 TAILQ_INSERT_TAIL(&gd->gd_tdrunq, ntd, td_threadq);
c5724852
MD
726
727 /*
728 * Set terminal element (nlast)
729 */
f9235b6d
MD
730 if (nlast == NULL) {
731 nlast = ntd;
732 if (xtd == NULL)
733 xtd = ntd;
734 }
735 ntd = xtd;
736 } else {
737 ntd = TAILQ_NEXT(ntd, td_threadq);
738 }
a453459d 739
f9235b6d
MD
740 /*
741 * If we exhausted the run list switch to the idle thread.
742 * Since one or more threads had resource acquisition issues
743 * we do not allow the idle thread to halt.
744 *
745 * NOTE: nlast can be NULL.
746 */
747 if (ntd == nlast) {
e0a90d3b 748 cpu_pause();
f9235b6d
MD
749 ntd = &gd->gd_idlethread;
750 ntd->td_flags |= TDF_IDLE_NOHLT;
6f207a2c 751#ifdef SMP
3933a3ab 752 KKASSERT(ntd->td_xpcount == 0);
f9235b6d 753 if (ntd->td_mpcount) {
c5724852 754 mpheld = MP_LOCK_HELD(gd);
f9235b6d
MD
755 if (gd->gd_trap_nesting_level == 0 && panicstr == NULL)
756 panic("Idle thread %p was holding the BGL!", ntd);
757 if (mpheld == 0) {
c5724852
MD
758 set_cpu_contention_mask(gd);
759 handle_cpu_contention_mask();
760 cpu_try_mplock();
761 mpheld = MP_LOCK_HELD(gd);
f9235b6d
MD
762 cpu_pause();
763 break; /* try again from the top, almost */
b9eb1c19 764 }
8a8d5d85 765 }
6f207a2c 766#endif
684a93c4
MD
767
768 /*
f9235b6d
MD
769 * If fairq accumulations occured we do not schedule the
770 * idle thread. This will cause us to try again from
771 * the (almost) top.
684a93c4 772 */
f9235b6d 773 if (didaccumulate)
b37f18d6
MD
774 break; /* try again from the top, almost */
775 if (lmsg)
776 strlcpy(cpu_time.cp_msg, lmsg, sizeof(cpu_time.cp_msg));
777 cpu_time.cp_stallpc = (uintptr_t)laddr;
f9235b6d 778 goto haveidle;
8a8d5d85 779 }
f9235b6d 780
df6b8ba0 781 /*
f9235b6d 782 * Try to switch to this thread.
6f207a2c
MD
783 *
784 * NOTE: For UP there is no mplock and lwkt_getalltokens()
785 * always succeeds.
df6b8ba0 786 */
77912481
MD
787 if ((ntd->td_pri >= TDPRI_KERN_LPSCHED || nquserok ||
788 user_pri_sched) && ntd->td_fairq_accum >= 0 &&
f9235b6d 789#ifdef SMP
3933a3ab
MD
790 (ntd->td_mpcount + ntd->td_xpcount == 0 ||
791 mpheld || cpu_try_mplock()) &&
8a8d5d85 792#endif
b37f18d6 793 (!TD_TOKS_HELD(ntd) || lwkt_getalltokens(ntd, &lmsg, &laddr))
f9235b6d 794 ) {
a453459d 795#ifdef SMP
c5724852 796 clr_cpu_contention_mask(gd);
f9235b6d
MD
797#endif
798 goto havethread;
df6b8ba0 799 }
f9235b6d 800#ifdef SMP
c5724852
MD
801 if (ntd->td_fairq_accum >= 0)
802 set_cpu_contention_mask(gd);
803 /*
804 * Reload mpheld (it become stale after mplock/token ops).
805 */
806 mpheld = MP_LOCK_HELD(gd);
3933a3ab 807 if (ntd->td_mpcount + ntd->td_xpcount && mpheld == 0) {
b37f18d6
MD
808 lmsg = "mplock";
809 laddr = ntd->td_mplock_stallpc;
810 }
f9235b6d
MD
811 if (ntd->td_pri >= TDPRI_KERN_LPSCHED && ntd->td_fairq_accum >= 0)
812 nquserok = 0;
a453459d 813#endif
4b5f931b 814 }
c5724852
MD
815
816 /*
817 * All threads exhausted but we can loop due to a negative
818 * accumulator.
819 *
820 * While we are looping in the scheduler be sure to service
821 * any interrupts which were made pending due to our critical
822 * section, otherwise we could livelock (e.g.) IPIs.
823 *
824 * NOTE: splz can enter and exit the mplock so mpheld is
825 * stale after this call.
826 */
827 splz_check();
828
829#ifdef SMP
830 /*
831 * Our mplock can be cached and cause other cpus to livelock
832 * if we loop due to e.g. not being able to acquire tokens.
833 */
834 if (MP_LOCK_HELD(gd))
835 cpu_rel_mplock(gd->gd_cpuid);
836 mpheld = 0;
837#endif
f1d1c3fa 838 }
8a8d5d85
MD
839
840 /*
f9235b6d
MD
841 * Do the actual switch. WARNING: mpheld is stale here.
842 *
843 * We must always decrement td_fairq_accum on non-idle threads just
844 * in case a thread never gets a tick due to being in a continuous
845 * critical section. The page-zeroing code does that.
846 *
847 * If the thread we came up with is a higher or equal priority verses
848 * the thread at the head of the queue we move our thread to the
849 * front. This way we can always check the front of the queue.
850 */
851havethread:
852 ++gd->gd_cnt.v_swtch;
853 --ntd->td_fairq_accum;
854 xtd = TAILQ_FIRST(&gd->gd_tdrunq);
855 if (ntd != xtd && ntd->td_pri >= xtd->td_pri) {
856 TAILQ_REMOVE(&gd->gd_tdrunq, ntd, td_threadq);
857 TAILQ_INSERT_HEAD(&gd->gd_tdrunq, ntd, td_threadq);
858 }
859havethread_preempted:
860 ;
861 /*
862 * If the new target does not need the MP lock and we are holding it,
863 * release the MP lock. If the new target requires the MP lock we have
864 * already acquired it for the target.
865 *
866 * WARNING: mpheld is stale here.
8a8d5d85 867 */
f9235b6d
MD
868haveidle:
869 KASSERT(ntd->td_critcount,
870 ("priority problem in lwkt_switch %d %d", td->td_pri, ntd->td_pri));
8a8d5d85 871#ifdef SMP
3933a3ab 872 if (ntd->td_mpcount + ntd->td_xpcount == 0 ) {
c5724852
MD
873 if (MP_LOCK_HELD(gd))
874 cpu_rel_mplock(gd->gd_cpuid);
8a8d5d85 875 } else {
a453459d 876 ASSERT_MP_LOCK_HELD(ntd);
8a8d5d85
MD
877 }
878#endif
94f6d86e
MD
879 if (td != ntd) {
880 ++switch_count;
a1f0fb66 881 KTR_LOG(ctxsw_sw, gd->gd_cpuid, ntd);
f1d1c3fa 882 td->td_switch(ntd);
94f6d86e 883 }
37af14fe
MD
884 /* NOTE: current cpu may have changed after switch */
885 crit_exit_quick(td);
8ad65e08
MD
886}
887
b68b7282 888/*
96728c05
MD
889 * Request that the target thread preempt the current thread. Preemption
890 * only works under a specific set of conditions:
b68b7282 891 *
96728c05
MD
892 * - We are not preempting ourselves
893 * - The target thread is owned by the current cpu
894 * - We are not currently being preempted
895 * - The target is not currently being preempted
d3d1cbc8
MD
896 * - We are not holding any spin locks
897 * - The target thread is not holding any tokens
96728c05
MD
898 * - We are able to satisfy the target's MP lock requirements (if any).
899 *
900 * THE CALLER OF LWKT_PREEMPT() MUST BE IN A CRITICAL SECTION. Typically
901 * this is called via lwkt_schedule() through the td_preemptable callback.
f9235b6d 902 * critcount is the managed critical priority that we should ignore in order
96728c05
MD
903 * to determine whether preemption is possible (aka usually just the crit
904 * priority of lwkt_schedule() itself).
b68b7282 905 *
26a0694b
MD
906 * XXX at the moment we run the target thread in a critical section during
907 * the preemption in order to prevent the target from taking interrupts
908 * that *WE* can't. Preemption is strictly limited to interrupt threads
909 * and interrupt-like threads, outside of a critical section, and the
910 * preempted source thread will be resumed the instant the target blocks
911 * whether or not the source is scheduled (i.e. preemption is supposed to
912 * be as transparent as possible).
4b5f931b 913 *
8a8d5d85
MD
914 * The target thread inherits our MP count (added to its own) for the
915 * duration of the preemption in order to preserve the atomicy of the
96728c05
MD
916 * MP lock during the preemption. Therefore, any preempting targets must be
917 * careful in regards to MP assertions. Note that the MP count may be
71ef2f5c
MD
918 * out of sync with the physical mp_lock, but we do not have to preserve
919 * the original ownership of the lock if it was out of synch (that is, we
920 * can leave it synchronized on return).
b68b7282
MD
921 */
922void
f9235b6d 923lwkt_preempt(thread_t ntd, int critcount)
b68b7282 924{
46a3f46d 925 struct globaldata *gd = mycpu;
0a3f9b47 926 thread_t td;
8a8d5d85
MD
927#ifdef SMP
928 int mpheld;
57c254db 929 int savecnt;
8a8d5d85 930#endif
2d910aaf 931 int save_gd_intr_nesting_level;
b68b7282 932
26a0694b 933 /*
96728c05
MD
934 * The caller has put us in a critical section. We can only preempt
935 * if the caller of the caller was not in a critical section (basically
f9235b6d 936 * a local interrupt), as determined by the 'critcount' parameter. We
47737962 937 * also can't preempt if the caller is holding any spinlocks (even if
d666840a 938 * he isn't in a critical section). This also handles the tokens test.
96728c05
MD
939 *
940 * YYY The target thread must be in a critical section (else it must
941 * inherit our critical section? I dunno yet).
41a01a4d 942 *
0a3f9b47 943 * Set need_lwkt_resched() unconditionally for now YYY.
26a0694b 944 */
f9235b6d 945 KASSERT(ntd->td_critcount, ("BADCRIT0 %d", ntd->td_pri));
26a0694b 946
fbc024e4
MD
947 if (preempt_enable == 0) {
948 ++preempt_miss;
949 return;
950 }
951
0a3f9b47 952 td = gd->gd_curthread;
f9235b6d 953 if (ntd->td_pri <= td->td_pri) {
57c254db
MD
954 ++preempt_miss;
955 return;
956 }
f9235b6d 957 if (td->td_critcount > critcount) {
96728c05 958 ++preempt_miss;
8ec60c3f 959 need_lwkt_resched();
96728c05
MD
960 return;
961 }
962#ifdef SMP
46a3f46d 963 if (ntd->td_gd != gd) {
96728c05 964 ++preempt_miss;
8ec60c3f 965 need_lwkt_resched();
96728c05
MD
966 return;
967 }
968#endif
41a01a4d 969 /*
77912481
MD
970 * We don't have to check spinlocks here as they will also bump
971 * td_critcount.
d3d1cbc8
MD
972 *
973 * Do not try to preempt if the target thread is holding any tokens.
974 * We could try to acquire the tokens but this case is so rare there
975 * is no need to support it.
41a01a4d 976 */
77912481
MD
977 KKASSERT(gd->gd_spinlocks_wr == 0);
978
3b998fa9 979 if (TD_TOKS_HELD(ntd)) {
d3d1cbc8
MD
980 ++preempt_miss;
981 need_lwkt_resched();
982 return;
983 }
26a0694b
MD
984 if (td == ntd || ((td->td_flags | ntd->td_flags) & TDF_PREEMPT_LOCK)) {
985 ++preempt_weird;
8ec60c3f 986 need_lwkt_resched();
26a0694b
MD
987 return;
988 }
989 if (ntd->td_preempted) {
4b5f931b 990 ++preempt_hit;
8ec60c3f 991 need_lwkt_resched();
26a0694b 992 return;
b68b7282 993 }
8a8d5d85 994#ifdef SMP
a2a5ad0d 995 /*
3933a3ab 996 * NOTE: An interrupt might have occured just as we were transitioning
71ef2f5c 997 * to or from the MP lock. In this case td_mpcount will be pre-disposed
3933a3ab
MD
998 * (non-zero) but not actually synchronized with the mp_lock itself.
999 * We can use it to imply an MP lock requirement for the preemption but
1000 * we cannot use it to test whether we hold the MP lock or not.
a2a5ad0d 1001 */
96728c05 1002 savecnt = td->td_mpcount;
c5724852 1003 mpheld = MP_LOCK_HELD(gd);
6d9b99db 1004 ntd->td_xpcount = td->td_mpcount + td->td_xpcount;
3933a3ab
MD
1005 if (mpheld == 0 && ntd->td_mpcount + ntd->td_xpcount && !cpu_try_mplock()) {
1006 ntd->td_xpcount = 0;
8a8d5d85 1007 ++preempt_miss;
8ec60c3f 1008 need_lwkt_resched();
8a8d5d85
MD
1009 return;
1010 }
1011#endif
26a0694b 1012
8ec60c3f
MD
1013 /*
1014 * Since we are able to preempt the current thread, there is no need to
1015 * call need_lwkt_resched().
2d910aaf
MD
1016 *
1017 * We must temporarily clear gd_intr_nesting_level around the switch
1018 * since switchouts from the target thread are allowed (they will just
1019 * return to our thread), and since the target thread has its own stack.
8ec60c3f 1020 */
26a0694b
MD
1021 ++preempt_hit;
1022 ntd->td_preempted = td;
1023 td->td_flags |= TDF_PREEMPT_LOCK;
a1f0fb66 1024 KTR_LOG(ctxsw_pre, gd->gd_cpuid, ntd);
2d910aaf
MD
1025 save_gd_intr_nesting_level = gd->gd_intr_nesting_level;
1026 gd->gd_intr_nesting_level = 0;
26a0694b 1027 td->td_switch(ntd);
2d910aaf 1028 gd->gd_intr_nesting_level = save_gd_intr_nesting_level;
b9eb1c19 1029
26a0694b 1030 KKASSERT(ntd->td_preempted && (td->td_flags & TDF_PREEMPT_DONE));
96728c05
MD
1031#ifdef SMP
1032 KKASSERT(savecnt == td->td_mpcount);
c5724852 1033 mpheld = MP_LOCK_HELD(gd);
71ef2f5c 1034 if (mpheld && td->td_mpcount == 0)
c5724852 1035 cpu_rel_mplock(gd->gd_cpuid);
3933a3ab 1036 else if (mpheld == 0 && td->td_mpcount + td->td_xpcount)
96728c05
MD
1037 panic("lwkt_preempt(): MP lock was not held through");
1038#endif
26a0694b
MD
1039 ntd->td_preempted = NULL;
1040 td->td_flags &= ~(TDF_PREEMPT_LOCK|TDF_PREEMPT_DONE);
b68b7282
MD
1041}
1042
f1d1c3fa 1043/*
faaeffac 1044 * Conditionally call splz() if gd_reqflags indicates work is pending.
4a28fe22
MD
1045 * This will work inside a critical section but not inside a hard code
1046 * section.
ef0fdad1 1047 *
f1d1c3fa
MD
1048 * (self contained on a per cpu basis)
1049 */
1050void
faaeffac 1051splz_check(void)
f1d1c3fa 1052{
7966cb69
MD
1053 globaldata_t gd = mycpu;
1054 thread_t td = gd->gd_curthread;
ef0fdad1 1055
4a28fe22
MD
1056 if ((gd->gd_reqflags & RQF_IDLECHECK_MASK) &&
1057 gd->gd_intr_nesting_level == 0 &&
1058 td->td_nest_count < 2)
1059 {
f1d1c3fa 1060 splz();
4a28fe22
MD
1061 }
1062}
1063
1064/*
1065 * This version is integrated into crit_exit, reqflags has already
1066 * been tested but td_critcount has not.
1067 *
1068 * We only want to execute the splz() on the 1->0 transition of
1069 * critcount and not in a hard code section or if too deeply nested.
1070 */
1071void
1072lwkt_maybe_splz(thread_t td)
1073{
1074 globaldata_t gd = td->td_gd;
1075
1076 if (td->td_critcount == 0 &&
1077 gd->gd_intr_nesting_level == 0 &&
1078 td->td_nest_count < 2)
1079 {
1080 splz();
1081 }
f1d1c3fa
MD
1082}
1083
8ad65e08 1084/*
f9235b6d
MD
1085 * This function is used to negotiate a passive release of the current
1086 * process/lwp designation with the user scheduler, allowing the user
1087 * scheduler to schedule another user thread. The related kernel thread
1088 * (curthread) continues running in the released state.
8ad65e08
MD
1089 */
1090void
f9235b6d 1091lwkt_passive_release(struct thread *td)
8ad65e08 1092{
f9235b6d
MD
1093 struct lwp *lp = td->td_lwp;
1094
1095 td->td_release = NULL;
1096 lwkt_setpri_self(TDPRI_KERN_USER);
1097 lp->lwp_proc->p_usched->release_curproc(lp);
f1d1c3fa
MD
1098}
1099
f9235b6d 1100
3824f392 1101/*
f9235b6d
MD
1102 * This implements a normal yield. This routine is virtually a nop if
1103 * there is nothing to yield to but it will always run any pending interrupts
1104 * if called from a critical section.
1105 *
1106 * This yield is designed for kernel threads without a user context.
1107 *
1108 * (self contained on a per cpu basis)
3824f392
MD
1109 */
1110void
f9235b6d 1111lwkt_yield(void)
3824f392 1112{
f9235b6d
MD
1113 globaldata_t gd = mycpu;
1114 thread_t td = gd->gd_curthread;
1115 thread_t xtd;
3824f392 1116
f9235b6d
MD
1117 if ((gd->gd_reqflags & RQF_IDLECHECK_MASK) && td->td_nest_count < 2)
1118 splz();
1119 if (td->td_fairq_accum < 0) {
1120 lwkt_schedule_self(curthread);
1121 lwkt_switch();
1122 } else {
1123 xtd = TAILQ_FIRST(&gd->gd_tdrunq);
1124 if (xtd && xtd->td_pri > td->td_pri) {
1125 lwkt_schedule_self(curthread);
1126 lwkt_switch();
1127 }
1128 }
3824f392
MD
1129}
1130
1131/*
f9235b6d
MD
1132 * This yield is designed for kernel threads with a user context.
1133 *
1134 * The kernel acting on behalf of the user is potentially cpu-bound,
1135 * this function will efficiently allow other threads to run and also
1136 * switch to other processes by releasing.
3824f392
MD
1137 *
1138 * The lwkt_user_yield() function is designed to have very low overhead
1139 * if no yield is determined to be needed.
1140 */
1141void
1142lwkt_user_yield(void)
1143{
f9235b6d
MD
1144 globaldata_t gd = mycpu;
1145 thread_t td = gd->gd_curthread;
1146
1147 /*
1148 * Always run any pending interrupts in case we are in a critical
1149 * section.
1150 */
1151 if ((gd->gd_reqflags & RQF_IDLECHECK_MASK) && td->td_nest_count < 2)
1152 splz();
3824f392
MD
1153
1154#ifdef SMP
1155 /*
1156 * XXX SEVERE TEMPORARY HACK. A cpu-bound operation running in the
1157 * kernel can prevent other cpus from servicing interrupt threads
1158 * which still require the MP lock (which is a lot of them). This
1159 * has a chaining effect since if the interrupt is blocked, so is
1160 * the event, so normal scheduling will not pick up on the problem.
1161 */
3933a3ab 1162 if (cpu_contention_mask && td->td_mpcount + td->td_xpcount) {
684a93c4 1163 yield_mplock(td);
3824f392
MD
1164 }
1165#endif
1166
1167 /*
f9235b6d
MD
1168 * Switch (which forces a release) if another kernel thread needs
1169 * the cpu, if userland wants us to resched, or if our kernel
1170 * quantum has run out.
3824f392 1171 */
f9235b6d
MD
1172 if (lwkt_resched_wanted() ||
1173 user_resched_wanted() ||
1174 td->td_fairq_accum < 0)
1175 {
3824f392 1176 lwkt_switch();
3824f392
MD
1177 }
1178
f9235b6d 1179#if 0
3824f392 1180 /*
f9235b6d
MD
1181 * Reacquire the current process if we are released.
1182 *
1183 * XXX not implemented atm. The kernel may be holding locks and such,
1184 * so we want the thread to continue to receive cpu.
3824f392 1185 */
f9235b6d
MD
1186 if (td->td_release == NULL && lp) {
1187 lp->lwp_proc->p_usched->acquire_curproc(lp);
1188 td->td_release = lwkt_passive_release;
1189 lwkt_setpri_self(TDPRI_USER_NORM);
3824f392 1190 }
f9235b6d 1191#endif
b9eb1c19
MD
1192}
1193
8ad65e08 1194/*
f1d1c3fa
MD
1195 * Generic schedule. Possibly schedule threads belonging to other cpus and
1196 * deal with threads that might be blocked on a wait queue.
1197 *
0a3f9b47
MD
1198 * We have a little helper inline function which does additional work after
1199 * the thread has been enqueued, including dealing with preemption and
1200 * setting need_lwkt_resched() (which prevents the kernel from returning
1201 * to userland until it has processed higher priority threads).
6330a558
MD
1202 *
1203 * It is possible for this routine to be called after a failed _enqueue
1204 * (due to the target thread migrating, sleeping, or otherwise blocked).
1205 * We have to check that the thread is actually on the run queue!
361d01dd
MD
1206 *
1207 * reschedok is an optimized constant propagated from lwkt_schedule() or
1208 * lwkt_schedule_noresched(). By default it is non-zero, causing a
1209 * reschedule to be requested if the target thread has a higher priority.
1210 * The port messaging code will set MSG_NORESCHED and cause reschedok to
1211 * be 0, prevented undesired reschedules.
8ad65e08 1212 */
0a3f9b47
MD
1213static __inline
1214void
f9235b6d 1215_lwkt_schedule_post(globaldata_t gd, thread_t ntd, int ccount, int reschedok)
0a3f9b47 1216{
b9eb1c19 1217 thread_t otd;
c730be20 1218
6330a558 1219 if (ntd->td_flags & TDF_RUNQ) {
361d01dd 1220 if (ntd->td_preemptable && reschedok) {
f9235b6d 1221 ntd->td_preemptable(ntd, ccount); /* YYY +token */
361d01dd 1222 } else if (reschedok) {
b9eb1c19 1223 otd = curthread;
f9235b6d 1224 if (ntd->td_pri > otd->td_pri)
c730be20 1225 need_lwkt_resched();
6330a558 1226 }
f9235b6d
MD
1227
1228 /*
1229 * Give the thread a little fair share scheduler bump if it
1230 * has been asleep for a while. This is primarily to avoid
1231 * a degenerate case for interrupt threads where accumulator
1232 * crosses into negative territory unnecessarily.
1233 */
1234 if (ntd->td_fairq_lticks != ticks) {
1235 ntd->td_fairq_lticks = ticks;
1236 ntd->td_fairq_accum += gd->gd_fairq_total_pri;
1237 if (ntd->td_fairq_accum > TDFAIRQ_MAX(gd))
1238 ntd->td_fairq_accum = TDFAIRQ_MAX(gd);
1239 }
0a3f9b47
MD
1240 }
1241}
1242
361d01dd 1243static __inline
8ad65e08 1244void
361d01dd 1245_lwkt_schedule(thread_t td, int reschedok)
8ad65e08 1246{
37af14fe
MD
1247 globaldata_t mygd = mycpu;
1248
cf709dd2
MD
1249 KASSERT(td != &td->td_gd->gd_idlethread,
1250 ("lwkt_schedule(): scheduling gd_idlethread is illegal!"));
37af14fe 1251 crit_enter_gd(mygd);
9388413d 1252 KKASSERT(td->td_lwp == NULL || (td->td_lwp->lwp_flag & LWP_ONRUNQ) == 0);
37af14fe 1253 if (td == mygd->gd_curthread) {
f1d1c3fa
MD
1254 _lwkt_enqueue(td);
1255 } else {
f1d1c3fa 1256 /*
7cd8d145
MD
1257 * If we own the thread, there is no race (since we are in a
1258 * critical section). If we do not own the thread there might
1259 * be a race but the target cpu will deal with it.
f1d1c3fa 1260 */
0f7a3396 1261#ifdef SMP
7cd8d145 1262 if (td->td_gd == mygd) {
9d265729 1263 _lwkt_enqueue(td);
f9235b6d 1264 _lwkt_schedule_post(mygd, td, 1, reschedok);
f1d1c3fa 1265 } else {
e381e77c 1266 lwkt_send_ipiq3(td->td_gd, lwkt_schedule_remote, td, 0);
7cd8d145 1267 }
0f7a3396 1268#else
7cd8d145 1269 _lwkt_enqueue(td);
f9235b6d 1270 _lwkt_schedule_post(mygd, td, 1, reschedok);
0f7a3396 1271#endif
8ad65e08 1272 }
37af14fe 1273 crit_exit_gd(mygd);
8ad65e08
MD
1274}
1275
361d01dd
MD
1276void
1277lwkt_schedule(thread_t td)
1278{
1279 _lwkt_schedule(td, 1);
1280}
1281
1282void
1283lwkt_schedule_noresched(thread_t td)
1284{
1285 _lwkt_schedule(td, 0);
1286}
1287
88ebb169
SW
1288#ifdef SMP
1289
e381e77c
MD
1290/*
1291 * When scheduled remotely if frame != NULL the IPIQ is being
1292 * run via doreti or an interrupt then preemption can be allowed.
1293 *
1294 * To allow preemption we have to drop the critical section so only
1295 * one is present in _lwkt_schedule_post.
1296 */
1297static void
1298lwkt_schedule_remote(void *arg, int arg2, struct intrframe *frame)
1299{
1300 thread_t td = curthread;
1301 thread_t ntd = arg;
1302
1303 if (frame && ntd->td_preemptable) {
1304 crit_exit_noyield(td);
1305 _lwkt_schedule(ntd, 1);
1306 crit_enter_quick(td);
1307 } else {
1308 _lwkt_schedule(ntd, 1);
1309 }
1310}
1311
d9eea1a5 1312/*
52eedfb5
MD
1313 * Thread migration using a 'Pull' method. The thread may or may not be
1314 * the current thread. It MUST be descheduled and in a stable state.
1315 * lwkt_giveaway() must be called on the cpu owning the thread.
1316 *
1317 * At any point after lwkt_giveaway() is called, the target cpu may
1318 * 'pull' the thread by calling lwkt_acquire().
1319 *
ae8e83e6
MD
1320 * We have to make sure the thread is not sitting on a per-cpu tsleep
1321 * queue or it will blow up when it moves to another cpu.
1322 *
52eedfb5 1323 * MPSAFE - must be called under very specific conditions.
d9eea1a5 1324 */
52eedfb5
MD
1325void
1326lwkt_giveaway(thread_t td)
1327{
3b4192fb 1328 globaldata_t gd = mycpu;
52eedfb5 1329
3b4192fb
MD
1330 crit_enter_gd(gd);
1331 if (td->td_flags & TDF_TSLEEPQ)
1332 tsleep_remove(td);
1333 KKASSERT(td->td_gd == gd);
1334 TAILQ_REMOVE(&gd->gd_tdallq, td, td_allq);
1335 td->td_flags |= TDF_MIGRATING;
1336 crit_exit_gd(gd);
52eedfb5
MD
1337}
1338
a2a5ad0d
MD
1339void
1340lwkt_acquire(thread_t td)
1341{
37af14fe
MD
1342 globaldata_t gd;
1343 globaldata_t mygd;
a2a5ad0d 1344
52eedfb5 1345 KKASSERT(td->td_flags & TDF_MIGRATING);
a2a5ad0d 1346 gd = td->td_gd;
37af14fe 1347 mygd = mycpu;
52eedfb5 1348 if (gd != mycpu) {
35238fa5 1349 cpu_lfence();
52eedfb5 1350 KKASSERT((td->td_flags & TDF_RUNQ) == 0);
37af14fe 1351 crit_enter_gd(mygd);
df910c23
MD
1352 while (td->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK)) {
1353#ifdef SMP
1354 lwkt_process_ipiq();
1355#endif
52eedfb5 1356 cpu_lfence();
df910c23 1357 }
562273ea 1358 cpu_mfence();
37af14fe 1359 td->td_gd = mygd;
52eedfb5
MD
1360 TAILQ_INSERT_TAIL(&mygd->gd_tdallq, td, td_allq);
1361 td->td_flags &= ~TDF_MIGRATING;
1362 crit_exit_gd(mygd);
1363 } else {
1364 crit_enter_gd(mygd);
1365 TAILQ_INSERT_TAIL(&mygd->gd_tdallq, td, td_allq);
1366 td->td_flags &= ~TDF_MIGRATING;
37af14fe 1367 crit_exit_gd(mygd);
a2a5ad0d
MD
1368 }
1369}
1370
52eedfb5
MD
1371#endif
1372
f1d1c3fa
MD
1373/*
1374 * Generic deschedule. Descheduling threads other then your own should be
1375 * done only in carefully controlled circumstances. Descheduling is
1376 * asynchronous.
1377 *
1378 * This function may block if the cpu has run out of messages.
8ad65e08
MD
1379 */
1380void
1381lwkt_deschedule(thread_t td)
1382{
f1d1c3fa 1383 crit_enter();
b8a98473 1384#ifdef SMP
f1d1c3fa
MD
1385 if (td == curthread) {
1386 _lwkt_dequeue(td);
1387 } else {
a72187e9 1388 if (td->td_gd == mycpu) {
f1d1c3fa
MD
1389 _lwkt_dequeue(td);
1390 } else {
b8a98473 1391 lwkt_send_ipiq(td->td_gd, (ipifunc1_t)lwkt_deschedule, td);
f1d1c3fa
MD
1392 }
1393 }
b8a98473
MD
1394#else
1395 _lwkt_dequeue(td);
1396#endif
f1d1c3fa
MD
1397 crit_exit();
1398}
1399
4b5f931b
MD
1400/*
1401 * Set the target thread's priority. This routine does not automatically
1402 * switch to a higher priority thread, LWKT threads are not designed for
1403 * continuous priority changes. Yield if you want to switch.
4b5f931b
MD
1404 */
1405void
1406lwkt_setpri(thread_t td, int pri)
1407{
a72187e9 1408 KKASSERT(td->td_gd == mycpu);
f9235b6d
MD
1409 if (td->td_pri != pri) {
1410 KKASSERT(pri >= 0);
1411 crit_enter();
1412 if (td->td_flags & TDF_RUNQ) {
1413 _lwkt_dequeue(td);
1414 td->td_pri = pri;
1415 _lwkt_enqueue(td);
1416 } else {
1417 td->td_pri = pri;
1418 }
1419 crit_exit();
26a0694b 1420 }
26a0694b
MD
1421}
1422
03bd0a5e
MD
1423/*
1424 * Set the initial priority for a thread prior to it being scheduled for
1425 * the first time. The thread MUST NOT be scheduled before or during
1426 * this call. The thread may be assigned to a cpu other then the current
1427 * cpu.
1428 *
1429 * Typically used after a thread has been created with TDF_STOPPREQ,
1430 * and before the thread is initially scheduled.
1431 */
1432void
1433lwkt_setpri_initial(thread_t td, int pri)
1434{
1435 KKASSERT(pri >= 0);
1436 KKASSERT((td->td_flags & TDF_RUNQ) == 0);
f9235b6d 1437 td->td_pri = pri;
03bd0a5e
MD
1438}
1439
26a0694b
MD
1440void
1441lwkt_setpri_self(int pri)
1442{
1443 thread_t td = curthread;
1444
4b5f931b
MD
1445 KKASSERT(pri >= 0 && pri <= TDPRI_MAX);
1446 crit_enter();
1447 if (td->td_flags & TDF_RUNQ) {
1448 _lwkt_dequeue(td);
f9235b6d 1449 td->td_pri = pri;
4b5f931b
MD
1450 _lwkt_enqueue(td);
1451 } else {
f9235b6d 1452 td->td_pri = pri;
4b5f931b
MD
1453 }
1454 crit_exit();
1455}
1456
f9235b6d
MD
1457/*
1458 * 1/hz tick (typically 10ms) x TDFAIRQ_SCALE (typ 8) = 80ms full cycle.
1459 *
1460 * Example: two competing threads, same priority N. decrement by (2*N)
1461 * increment by N*8, each thread will get 4 ticks.
1462 */
1463void
1464lwkt_fairq_schedulerclock(thread_t td)
1465{
1466 if (fairq_enable) {
1467 while (td) {
1468 if (td != &td->td_gd->gd_idlethread) {
1469 td->td_fairq_accum -= td->td_gd->gd_fairq_total_pri;
1470 if (td->td_fairq_accum < -TDFAIRQ_MAX(td->td_gd))
1471 td->td_fairq_accum = -TDFAIRQ_MAX(td->td_gd);
1472 if (td->td_fairq_accum < 0)
1473 need_lwkt_resched();
1474 td->td_fairq_lticks = ticks;
1475 }
1476 td = td->td_preempted;
1477 }
1478 }
1479}
1480
1481static void
1482lwkt_fairq_accumulate(globaldata_t gd, thread_t td)
1483{
1484 td->td_fairq_accum += td->td_pri * TDFAIRQ_SCALE;
1485 if (td->td_fairq_accum > TDFAIRQ_MAX(td->td_gd))
1486 td->td_fairq_accum = TDFAIRQ_MAX(td->td_gd);
1487}
1488
5d21b981 1489/*
52eedfb5
MD
1490 * Migrate the current thread to the specified cpu.
1491 *
1492 * This is accomplished by descheduling ourselves from the current cpu,
1493 * moving our thread to the tdallq of the target cpu, IPI messaging the
1494 * target cpu, and switching out. TDF_MIGRATING prevents scheduling
1495 * races while the thread is being migrated.
ae8e83e6
MD
1496 *
1497 * We must be sure to remove ourselves from the current cpu's tsleepq
1498 * before potentially moving to another queue. The thread can be on
1499 * a tsleepq due to a left-over tsleep_interlock().
5d21b981 1500 */
3d28ff59 1501#ifdef SMP
5d21b981 1502static void lwkt_setcpu_remote(void *arg);
3d28ff59 1503#endif
5d21b981
MD
1504
1505void
1506lwkt_setcpu_self(globaldata_t rgd)
1507{
1508#ifdef SMP
1509 thread_t td = curthread;
1510
1511 if (td->td_gd != rgd) {
1512 crit_enter_quick(td);
ae8e83e6 1513 if (td->td_flags & TDF_TSLEEPQ)
3b4192fb 1514 tsleep_remove(td);
5d21b981
MD
1515 td->td_flags |= TDF_MIGRATING;
1516 lwkt_deschedule_self(td);
52eedfb5 1517 TAILQ_REMOVE(&td->td_gd->gd_tdallq, td, td_allq);
b8a98473 1518 lwkt_send_ipiq(rgd, (ipifunc1_t)lwkt_setcpu_remote, td);
5d21b981
MD
1519 lwkt_switch();
1520 /* we are now on the target cpu */
52eedfb5 1521 TAILQ_INSERT_TAIL(&rgd->gd_tdallq, td, td_allq);
5d21b981
MD
1522 crit_exit_quick(td);
1523 }
1524#endif
1525}
1526
ecdefdda
MD
1527void
1528lwkt_migratecpu(int cpuid)
1529{
1530#ifdef SMP
1531 globaldata_t rgd;
1532
1533 rgd = globaldata_find(cpuid);
1534 lwkt_setcpu_self(rgd);
1535#endif
1536}
1537
5d21b981
MD
1538/*
1539 * Remote IPI for cpu migration (called while in a critical section so we
1540 * do not have to enter another one). The thread has already been moved to
1541 * our cpu's allq, but we must wait for the thread to be completely switched
1542 * out on the originating cpu before we schedule it on ours or the stack
1543 * state may be corrupt. We clear TDF_MIGRATING after flushing the GD
1544 * change to main memory.
1545 *
1546 * XXX The use of TDF_MIGRATING might not be sufficient to avoid races
1547 * against wakeups. It is best if this interface is used only when there
1548 * are no pending events that might try to schedule the thread.
1549 */
3d28ff59 1550#ifdef SMP
5d21b981
MD
1551static void
1552lwkt_setcpu_remote(void *arg)
1553{
1554 thread_t td = arg;
1555 globaldata_t gd = mycpu;
1556
df910c23
MD
1557 while (td->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK)) {
1558#ifdef SMP
1559 lwkt_process_ipiq();
1560#endif
35238fa5 1561 cpu_lfence();
562273ea 1562 cpu_pause();
df910c23 1563 }
5d21b981 1564 td->td_gd = gd;
562273ea 1565 cpu_mfence();
5d21b981 1566 td->td_flags &= ~TDF_MIGRATING;
9388413d 1567 KKASSERT(td->td_lwp == NULL || (td->td_lwp->lwp_flag & LWP_ONRUNQ) == 0);
5d21b981
MD
1568 _lwkt_enqueue(td);
1569}
3d28ff59 1570#endif
5d21b981 1571
553ea3c8 1572struct lwp *
4b5f931b
MD
1573lwkt_preempted_proc(void)
1574{
73e4f7b9 1575 thread_t td = curthread;
4b5f931b
MD
1576 while (td->td_preempted)
1577 td = td->td_preempted;
553ea3c8 1578 return(td->td_lwp);
4b5f931b
MD
1579}
1580
99df837e
MD
1581/*
1582 * Create a kernel process/thread/whatever. It shares it's address space
1583 * with proc0 - ie: kernel only.
1584 *
365fa13f
MD
1585 * NOTE! By default new threads are created with the MP lock held. A
1586 * thread which does not require the MP lock should release it by calling
1587 * rel_mplock() at the start of the new thread.
99df837e
MD
1588 */
1589int
c9e9fb21
MD
1590lwkt_create(void (*func)(void *), void *arg, struct thread **tdp,
1591 thread_t template, int tdflags, int cpu, const char *fmt, ...)
99df837e 1592{
73e4f7b9 1593 thread_t td;
e2565a42 1594 __va_list ap;
99df837e 1595
d3d32139 1596 td = lwkt_alloc_thread(template, LWKT_THREAD_STACK, cpu,
dbcd0c9b 1597 tdflags);
a2a5ad0d
MD
1598 if (tdp)
1599 *tdp = td;
709799ea 1600 cpu_set_thread_handler(td, lwkt_exit, func, arg);
99df837e
MD
1601
1602 /*
1603 * Set up arg0 for 'ps' etc
1604 */
e2565a42 1605 __va_start(ap, fmt);
379210cb 1606 kvsnprintf(td->td_comm, sizeof(td->td_comm), fmt, ap);
e2565a42 1607 __va_end(ap);
99df837e
MD
1608
1609 /*
1610 * Schedule the thread to run
1611 */
ef0fdad1
MD
1612 if ((td->td_flags & TDF_STOPREQ) == 0)
1613 lwkt_schedule(td);
1614 else
1615 td->td_flags &= ~TDF_STOPREQ;
99df837e
MD
1616 return 0;
1617}
1618
1619/*
1620 * Destroy an LWKT thread. Warning! This function is not called when
1621 * a process exits, cpu_proc_exit() directly calls cpu_thread_exit() and
1622 * uses a different reaping mechanism.
1623 */
1624void
1625lwkt_exit(void)
1626{
1627 thread_t td = curthread;
c070746a 1628 thread_t std;
8826f33a 1629 globaldata_t gd;
99df837e 1630
2883d2d8
MD
1631 /*
1632 * Do any cleanup that might block here
1633 */
99df837e 1634 if (td->td_flags & TDF_VERBOSE)
6ea70f76 1635 kprintf("kthread %p %s has exited\n", td, td->td_comm);
f6bf3af1 1636 caps_exit(td);
2883d2d8
MD
1637 biosched_done(td);
1638 dsched_exit_thread(td);
c070746a
MD
1639
1640 /*
1641 * Get us into a critical section to interlock gd_freetd and loop
1642 * until we can get it freed.
1643 *
1644 * We have to cache the current td in gd_freetd because objcache_put()ing
1645 * it would rip it out from under us while our thread is still active.
1646 */
1647 gd = mycpu;
37af14fe 1648 crit_enter_quick(td);
c070746a 1649 while ((std = gd->gd_freetd) != NULL) {
cf709dd2 1650 KKASSERT((std->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK)) == 0);
c070746a
MD
1651 gd->gd_freetd = NULL;
1652 objcache_put(thread_cache, std);
1653 }
3b4192fb
MD
1654
1655 /*
1656 * Remove thread resources from kernel lists and deschedule us for
2883d2d8
MD
1657 * the last time. We cannot block after this point or we may end
1658 * up with a stale td on the tsleepq.
3b4192fb
MD
1659 */
1660 if (td->td_flags & TDF_TSLEEPQ)
1661 tsleep_remove(td);
37af14fe 1662 lwkt_deschedule_self(td);
e56e4dea 1663 lwkt_remove_tdallq(td);
2883d2d8
MD
1664
1665 /*
1666 * Final cleanup
1667 */
1668 KKASSERT(gd->gd_freetd == NULL);
c070746a
MD
1669 if (td->td_flags & TDF_ALLOCATED_THREAD)
1670 gd->gd_freetd = td;
99df837e
MD
1671 cpu_thread_exit();
1672}
1673
e56e4dea
MD
1674void
1675lwkt_remove_tdallq(thread_t td)
1676{
1677 KKASSERT(td->td_gd == mycpu);
1678 TAILQ_REMOVE(&td->td_gd->gd_tdallq, td, td_allq);
1679}
1680
9cf43f91
MD
1681/*
1682 * Code reduction and branch prediction improvements. Call/return
1683 * overhead on modern cpus often degenerates into 0 cycles due to
1684 * the cpu's branch prediction hardware and return pc cache. We
1685 * can take advantage of this by not inlining medium-complexity
1686 * functions and we can also reduce the branch prediction impact
1687 * by collapsing perfectly predictable branches into a single
1688 * procedure instead of duplicating it.
1689 *
1690 * Is any of this noticeable? Probably not, so I'll take the
1691 * smaller code size.
1692 */
1693void
b6468f56 1694crit_exit_wrapper(__DEBUG_CRIT_ARG__)
9cf43f91 1695{
b6468f56 1696 _crit_exit(mycpu __DEBUG_CRIT_PASS_ARG__);
9cf43f91
MD
1697}
1698
2d93b37a
MD
1699void
1700crit_panic(void)
1701{
1702 thread_t td = curthread;
850634cc 1703 int lcrit = td->td_critcount;
2d93b37a 1704
850634cc
AH
1705 td->td_critcount = 0;
1706 panic("td_critcount is/would-go negative! %p %d", td, lcrit);
4a28fe22 1707 /* NOT REACHED */
2d93b37a
MD
1708}
1709
d165e668
MD
1710#ifdef SMP
1711
bd8015ca
MD
1712/*
1713 * Called from debugger/panic on cpus which have been stopped. We must still
1714 * process the IPIQ while stopped, even if we were stopped while in a critical
1715 * section (XXX).
1716 *
1717 * If we are dumping also try to process any pending interrupts. This may
1718 * or may not work depending on the state of the cpu at the point it was
1719 * stopped.
1720 */
1721void
1722lwkt_smp_stopped(void)
1723{
1724 globaldata_t gd = mycpu;
1725
1726 crit_enter_gd(gd);
1727 if (dumping) {
1728 lwkt_process_ipiq();
1729 splz();
1730 } else {
1731 lwkt_process_ipiq();
1732 }
1733 crit_exit_gd(gd);
1734}
1735
d165e668 1736#endif