MP Implementation 2/4: Implement a poor-man's IPI messaging subsystem,
[dragonfly.git] / sys / kern / lwkt_thread.c
CommitLineData
8ad65e08
MD
1/*
2 * Copyright (c) 2003 Matthew Dillon <dillon@backplane.com>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
f1d1c3fa
MD
26 * Each cpu in a system has its own self-contained light weight kernel
27 * thread scheduler, which means that generally speaking we only need
96728c05
MD
28 * to use a critical section to avoid problems. Foreign thread
29 * scheduling is queued via (async) IPIs.
f1d1c3fa 30 *
96728c05 31 * $DragonFly: src/sys/kern/lwkt_thread.c,v 1.16 2003/07/08 06:27:27 dillon Exp $
8ad65e08
MD
32 */
33
34#include <sys/param.h>
35#include <sys/systm.h>
36#include <sys/kernel.h>
37#include <sys/proc.h>
38#include <sys/rtprio.h>
39#include <sys/queue.h>
f1d1c3fa 40#include <sys/thread2.h>
7d0bac62 41#include <sys/sysctl.h>
99df837e 42#include <sys/kthread.h>
f1d1c3fa 43#include <machine/cpu.h>
99df837e 44#include <sys/lock.h>
f1d1c3fa 45
7d0bac62
MD
46#include <vm/vm.h>
47#include <vm/vm_param.h>
48#include <vm/vm_kern.h>
49#include <vm/vm_object.h>
50#include <vm/vm_page.h>
51#include <vm/vm_map.h>
52#include <vm/vm_pager.h>
53#include <vm/vm_extern.h>
54#include <vm/vm_zone.h>
55
99df837e 56#include <machine/stdarg.h>
96728c05
MD
57#ifdef SMP
58#include <machine/smp.h>
59#endif
99df837e 60
7d0bac62 61static int untimely_switch = 0;
4b5f931b
MD
62SYSCTL_INT(_lwkt, OID_AUTO, untimely_switch, CTLFLAG_RW, &untimely_switch, 0, "");
63static quad_t switch_count = 0;
64SYSCTL_QUAD(_lwkt, OID_AUTO, switch_count, CTLFLAG_RW, &switch_count, 0, "");
65static quad_t preempt_hit = 0;
66SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_hit, CTLFLAG_RW, &preempt_hit, 0, "");
67static quad_t preempt_miss = 0;
68SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_miss, CTLFLAG_RW, &preempt_miss, 0, "");
26a0694b
MD
69static quad_t preempt_weird = 0;
70SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_weird, CTLFLAG_RW, &preempt_weird, 0, "");
96728c05
MD
71static quad_t ipiq_count = 0;
72SYSCTL_QUAD(_lwkt, OID_AUTO, ipiq_count, CTLFLAG_RW, &ipiq_count, 0, "");
73static quad_t ipiq_fifofull = 0;
74SYSCTL_QUAD(_lwkt, OID_AUTO, ipiq_fifofull, CTLFLAG_RW, &ipiq_fifofull, 0, "");
7d0bac62 75
4b5f931b
MD
76/*
77 * These helper procedures handle the runq, they can only be called from
78 * within a critical section.
79 */
f1d1c3fa
MD
80static __inline
81void
82_lwkt_dequeue(thread_t td)
83{
84 if (td->td_flags & TDF_RUNQ) {
4b5f931b
MD
85 int nq = td->td_pri & TDPRI_MASK;
86 struct globaldata *gd = mycpu;
87
f1d1c3fa 88 td->td_flags &= ~TDF_RUNQ;
4b5f931b
MD
89 TAILQ_REMOVE(&gd->gd_tdrunq[nq], td, td_threadq);
90 /* runqmask is passively cleaned up by the switcher */
f1d1c3fa
MD
91 }
92}
93
94static __inline
95void
96_lwkt_enqueue(thread_t td)
97{
98 if ((td->td_flags & TDF_RUNQ) == 0) {
4b5f931b
MD
99 int nq = td->td_pri & TDPRI_MASK;
100 struct globaldata *gd = mycpu;
101
f1d1c3fa 102 td->td_flags |= TDF_RUNQ;
4b5f931b
MD
103 TAILQ_INSERT_TAIL(&gd->gd_tdrunq[nq], td, td_threadq);
104 gd->gd_runqmask |= 1 << nq;
26a0694b
MD
105#if 0
106 /*
107 * YYY needs cli/sti protection? gd_reqpri set by interrupt
108 * when made pending. need better mechanism.
109 */
110 if (gd->gd_reqpri < (td->td_pri & TDPRI_MASK))
111 gd->gd_reqpri = (td->td_pri & TDPRI_MASK);
112#endif
f1d1c3fa
MD
113 }
114}
8ad65e08
MD
115
116/*
117 * LWKTs operate on a per-cpu basis
118 *
73e4f7b9 119 * WARNING! Called from early boot, 'mycpu' may not work yet.
8ad65e08
MD
120 */
121void
122lwkt_gdinit(struct globaldata *gd)
123{
4b5f931b
MD
124 int i;
125
126 for (i = 0; i < sizeof(gd->gd_tdrunq)/sizeof(gd->gd_tdrunq[0]); ++i)
127 TAILQ_INIT(&gd->gd_tdrunq[i]);
128 gd->gd_runqmask = 0;
73e4f7b9 129 TAILQ_INIT(&gd->gd_tdallq);
8ad65e08
MD
130}
131
7d0bac62
MD
132/*
133 * Initialize a thread wait structure prior to first use.
134 *
135 * NOTE! called from low level boot code, we cannot do anything fancy!
136 */
137void
138lwkt_init_wait(lwkt_wait_t w)
139{
140 TAILQ_INIT(&w->wa_waitq);
141}
142
143/*
144 * Create a new thread. The thread must be associated with a process context
145 * or LWKT start address before it can be scheduled.
0cfcada1
MD
146 *
147 * If you intend to create a thread without a process context this function
148 * does everything except load the startup and switcher function.
7d0bac62
MD
149 */
150thread_t
ef0fdad1 151lwkt_alloc_thread(struct thread *td)
7d0bac62 152{
99df837e 153 void *stack;
ef0fdad1 154 int flags = 0;
7d0bac62 155
ef0fdad1 156 if (td == NULL) {
26a0694b 157 crit_enter();
ef0fdad1
MD
158 if (mycpu->gd_tdfreecount > 0) {
159 --mycpu->gd_tdfreecount;
160 td = TAILQ_FIRST(&mycpu->gd_tdfreeq);
161 KASSERT(td != NULL && (td->td_flags & TDF_EXITED),
162 ("lwkt_alloc_thread: unexpected NULL or corrupted td"));
163 TAILQ_REMOVE(&mycpu->gd_tdfreeq, td, td_threadq);
164 crit_exit();
165 stack = td->td_kstack;
166 flags = td->td_flags & (TDF_ALLOCATED_STACK|TDF_ALLOCATED_THREAD);
167 } else {
168 crit_exit();
169 td = zalloc(thread_zone);
170 td->td_kstack = NULL;
171 flags |= TDF_ALLOCATED_THREAD;
172 }
173 }
174 if ((stack = td->td_kstack) == NULL) {
99df837e 175 stack = (void *)kmem_alloc(kernel_map, UPAGES * PAGE_SIZE);
ef0fdad1 176 flags |= TDF_ALLOCATED_STACK;
99df837e 177 }
26a0694b 178 lwkt_init_thread(td, stack, flags, mycpu);
99df837e 179 return(td);
7d0bac62
MD
180}
181
182/*
183 * Initialize a preexisting thread structure. This function is used by
184 * lwkt_alloc_thread() and also used to initialize the per-cpu idlethread.
185 *
186 * NOTE! called from low level boot code, we cannot do anything fancy!
187 */
188void
26a0694b 189lwkt_init_thread(thread_t td, void *stack, int flags, struct globaldata *gd)
7d0bac62 190{
99df837e
MD
191 bzero(td, sizeof(struct thread));
192 td->td_kstack = stack;
193 td->td_flags |= flags;
26a0694b
MD
194 td->td_gd = gd;
195 td->td_pri = TDPRI_CRIT;
8a8d5d85 196 td->td_cpu = gd->gd_cpuid; /* YYY don't need this if have td_gd */
99df837e 197 pmap_init_thread(td);
73e4f7b9
MD
198 crit_enter();
199 TAILQ_INSERT_TAIL(&mycpu->gd_tdallq, td, td_allq);
200 crit_exit();
201}
202
203void
204lwkt_set_comm(thread_t td, const char *ctl, ...)
205{
206 va_list va;
207
208 va_start(va, ctl);
209 vsnprintf(td->td_comm, sizeof(td->td_comm), ctl, va);
210 va_end(va);
7d0bac62
MD
211}
212
99df837e 213void
73e4f7b9 214lwkt_hold(thread_t td)
99df837e 215{
73e4f7b9
MD
216 ++td->td_refs;
217}
218
219void
220lwkt_rele(thread_t td)
221{
222 KKASSERT(td->td_refs > 0);
223 --td->td_refs;
224}
225
226void
227lwkt_wait_free(thread_t td)
228{
229 while (td->td_refs)
230 tsleep(td, PWAIT, "tdreap", hz);
231}
232
233void
234lwkt_free_thread(thread_t td)
235{
236 struct globaldata *gd = mycpu;
237
99df837e
MD
238 KASSERT(td->td_flags & TDF_EXITED,
239 ("lwkt_free_thread: did not exit! %p", td));
240
241 crit_enter();
73e4f7b9
MD
242 TAILQ_REMOVE(&gd->gd_tdallq, td, td_allq);
243 if (gd->gd_tdfreecount < CACHE_NTHREADS &&
99df837e
MD
244 (td->td_flags & TDF_ALLOCATED_THREAD)
245 ) {
73e4f7b9
MD
246 ++gd->gd_tdfreecount;
247 TAILQ_INSERT_HEAD(&gd->gd_tdfreeq, td, td_threadq);
99df837e
MD
248 crit_exit();
249 } else {
250 crit_exit();
251 if (td->td_kstack && (td->td_flags & TDF_ALLOCATED_STACK)) {
252 kmem_free(kernel_map,
253 (vm_offset_t)td->td_kstack, UPAGES * PAGE_SIZE);
73e4f7b9 254 /* gd invalid */
99df837e
MD
255 td->td_kstack = NULL;
256 }
257 if (td->td_flags & TDF_ALLOCATED_THREAD)
258 zfree(thread_zone, td);
259 }
260}
261
262
8ad65e08
MD
263/*
264 * Switch to the next runnable lwkt. If no LWKTs are runnable then
f1d1c3fa
MD
265 * switch to the idlethread. Switching must occur within a critical
266 * section to avoid races with the scheduling queue.
267 *
268 * We always have full control over our cpu's run queue. Other cpus
269 * that wish to manipulate our queue must use the cpu_*msg() calls to
270 * talk to our cpu, so a critical section is all that is needed and
271 * the result is very, very fast thread switching.
272 *
96728c05
MD
273 * The LWKT scheduler uses a fixed priority model and round-robins at
274 * each priority level. User process scheduling is a totally
275 * different beast and LWKT priorities should not be confused with
276 * user process priorities.
f1d1c3fa 277 *
96728c05
MD
278 * The MP lock may be out of sync with the thread's td_mpcount. lwkt_switch()
279 * cleans it up. Note that the td_switch() function cannot do anything that
280 * requires the MP lock since the MP lock will have already been setup for
281 * the target thread (not the current thread).
8ad65e08 282 */
96728c05
MD
283
284int swtarg[32][2];
285int swtarg2;
286
8ad65e08
MD
287void
288lwkt_switch(void)
289{
4b5f931b 290 struct globaldata *gd;
f1d1c3fa 291 thread_t td = curthread;
8ad65e08 292 thread_t ntd;
8a8d5d85
MD
293#ifdef SMP
294 int mpheld;
295#endif
8ad65e08 296
96728c05
MD
297 if (mycpu->gd_intr_nesting_level &&
298 td->td_preempted == NULL && panicstr == NULL
299 ) {
26a0694b 300 panic("lwkt_switch: cannot switch from within an interrupt, yet\n");
96728c05 301 }
ef0fdad1 302
f1d1c3fa 303 crit_enter();
4b5f931b 304 ++switch_count;
8a8d5d85
MD
305
306#ifdef SMP
307 /*
308 * td_mpcount cannot be used to determine if we currently hold the
309 * MP lock because get_mplock() will increment it prior to attempting
310 * to get the lock, and switch out if it can't. Look at the actual lock.
311 */
312 mpheld = MP_LOCK_HELD();
313#endif
99df837e
MD
314 if ((ntd = td->td_preempted) != NULL) {
315 /*
316 * We had preempted another thread on this cpu, resume the preempted
26a0694b
MD
317 * thread. This occurs transparently, whether the preempted thread
318 * was scheduled or not (it may have been preempted after descheduling
8a8d5d85
MD
319 * itself).
320 *
321 * We have to setup the MP lock for the original thread after backing
322 * out the adjustment that was made to curthread when the original
323 * was preempted.
99df837e 324 */
26a0694b 325 KKASSERT(ntd->td_flags & TDF_PREEMPT_LOCK);
8a8d5d85 326#ifdef SMP
96728c05
MD
327 if (ntd->td_mpcount && mpheld == 0) {
328 panic("MPLOCK NOT HELD ON RETURN: %p %p %d %d\n",
329 td, ntd, td->td_mpcount, ntd->td_mpcount);
330 }
8a8d5d85
MD
331 if (ntd->td_mpcount) {
332 td->td_mpcount -= ntd->td_mpcount;
333 KKASSERT(td->td_mpcount >= 0);
334 }
335#endif
26a0694b 336 ntd->td_flags |= TDF_PREEMPT_DONE;
8a8d5d85 337 /* YYY release mp lock on switchback if original doesn't need it */
8ad65e08 338 } else {
4b5f931b
MD
339 /*
340 * Priority queue / round-robin at each priority. Note that user
341 * processes run at a fixed, low priority and the user process
342 * scheduler deals with interactions between user processes
343 * by scheduling and descheduling them from the LWKT queue as
344 * necessary.
8a8d5d85
MD
345 *
346 * We have to adjust the MP lock for the target thread. If we
347 * need the MP lock and cannot obtain it we try to locate a
348 * thread that does not need the MP lock.
4b5f931b
MD
349 */
350 gd = mycpu;
4b5f931b
MD
351again:
352 if (gd->gd_runqmask) {
353 int nq = bsrl(gd->gd_runqmask);
354 if ((ntd = TAILQ_FIRST(&gd->gd_tdrunq[nq])) == NULL) {
355 gd->gd_runqmask &= ~(1 << nq);
356 goto again;
357 }
8a8d5d85
MD
358#ifdef SMP
359 if (ntd->td_mpcount && mpheld == 0 && !cpu_try_mplock()) {
360 /*
96728c05
MD
361 * Target needs MP lock and we couldn't get it, try
362 * to locate a thread which does not need the MP lock
363 * to run.
8a8d5d85
MD
364 */
365 u_int32_t rqmask = gd->gd_runqmask;
366 while (rqmask) {
367 TAILQ_FOREACH(ntd, &gd->gd_tdrunq[nq], td_threadq) {
368 if (ntd->td_mpcount == 0)
369 break;
370 }
371 if (ntd)
372 break;
373 rqmask &= ~(1 << nq);
374 nq = bsrl(rqmask);
375 }
376 if (ntd == NULL) {
377 ntd = gd->gd_idletd;
378 } else {
379 TAILQ_REMOVE(&gd->gd_tdrunq[nq], ntd, td_threadq);
380 TAILQ_INSERT_TAIL(&gd->gd_tdrunq[nq], ntd, td_threadq);
381 }
382 } else {
383 TAILQ_REMOVE(&gd->gd_tdrunq[nq], ntd, td_threadq);
384 TAILQ_INSERT_TAIL(&gd->gd_tdrunq[nq], ntd, td_threadq);
385 }
386#else
4b5f931b
MD
387 TAILQ_REMOVE(&gd->gd_tdrunq[nq], ntd, td_threadq);
388 TAILQ_INSERT_TAIL(&gd->gd_tdrunq[nq], ntd, td_threadq);
8a8d5d85 389#endif
4b5f931b
MD
390 } else {
391 ntd = gd->gd_idletd;
392 }
f1d1c3fa 393 }
26a0694b
MD
394 KASSERT(ntd->td_pri >= TDPRI_CRIT,
395 ("priority problem in lwkt_switch %d %d", td->td_pri, ntd->td_pri));
8a8d5d85
MD
396
397 /*
398 * Do the actual switch. If the new target does not need the MP lock
399 * and we are holding it, release the MP lock. If the new target requires
400 * the MP lock we have already acquired it for the target.
401 */
402#ifdef SMP
403 if (ntd->td_mpcount == 0 ) {
404 if (MP_LOCK_HELD())
405 cpu_rel_mplock();
406 } else {
407 ASSERT_MP_LOCK_HELD();
408 }
409#endif
410
96728c05
MD
411 if (mycpu->gd_cpuid == 1) {
412 bcopy(swtarg[0], swtarg[1], sizeof(int)*31*2);
413 swtarg[0][0] = (int)ntd->td_sp;
414 swtarg[0][1] = *(int *)ntd->td_sp;
415 }
416 KKASSERT(td->td_cpu == ntd->td_cpu);
8a8d5d85 417 if (td != ntd) {
f1d1c3fa 418 td->td_switch(ntd);
8a8d5d85 419 }
96728c05 420
f1d1c3fa 421 crit_exit();
8ad65e08
MD
422}
423
b68b7282 424/*
96728c05
MD
425 * Request that the target thread preempt the current thread. Preemption
426 * only works under a specific set of conditions:
b68b7282 427 *
96728c05
MD
428 * - We are not preempting ourselves
429 * - The target thread is owned by the current cpu
430 * - We are not currently being preempted
431 * - The target is not currently being preempted
432 * - We are able to satisfy the target's MP lock requirements (if any).
433 *
434 * THE CALLER OF LWKT_PREEMPT() MUST BE IN A CRITICAL SECTION. Typically
435 * this is called via lwkt_schedule() through the td_preemptable callback.
436 * critpri is the managed critical priority that we should ignore in order
437 * to determine whether preemption is possible (aka usually just the crit
438 * priority of lwkt_schedule() itself).
b68b7282 439 *
26a0694b
MD
440 * XXX at the moment we run the target thread in a critical section during
441 * the preemption in order to prevent the target from taking interrupts
442 * that *WE* can't. Preemption is strictly limited to interrupt threads
443 * and interrupt-like threads, outside of a critical section, and the
444 * preempted source thread will be resumed the instant the target blocks
445 * whether or not the source is scheduled (i.e. preemption is supposed to
446 * be as transparent as possible).
4b5f931b 447 *
8a8d5d85
MD
448 * The target thread inherits our MP count (added to its own) for the
449 * duration of the preemption in order to preserve the atomicy of the
96728c05
MD
450 * MP lock during the preemption. Therefore, any preempting targets must be
451 * careful in regards to MP assertions. Note that the MP count may be
452 * out of sync with the physical mp_lock. If we preempt we have to preserve
453 * the expected situation.
b68b7282
MD
454 */
455void
96728c05 456lwkt_preempt(thread_t ntd, int critpri)
b68b7282 457{
73e4f7b9 458 thread_t td = curthread;
8a8d5d85
MD
459#ifdef SMP
460 int mpheld;
461#endif
96728c05 462int savecnt;
b68b7282 463
26a0694b 464 /*
96728c05
MD
465 * The caller has put us in a critical section. We can only preempt
466 * if the caller of the caller was not in a critical section (basically
467 * a local interrupt).
468 *
469 * YYY The target thread must be in a critical section (else it must
470 * inherit our critical section? I dunno yet).
26a0694b
MD
471 */
472 KASSERT(ntd->td_pri >= TDPRI_CRIT, ("BADCRIT0 %d", ntd->td_pri));
26a0694b 473
96728c05
MD
474 if ((td->td_pri & ~TDPRI_MASK) > critpri) {
475 ++preempt_miss;
476 return;
477 }
478#ifdef SMP
479 if (ntd->td_cpu != mycpu->gd_cpuid) {
480 ++preempt_miss;
481 return;
482 }
483#endif
26a0694b
MD
484 if (td == ntd || ((td->td_flags | ntd->td_flags) & TDF_PREEMPT_LOCK)) {
485 ++preempt_weird;
486 return;
487 }
488 if (ntd->td_preempted) {
4b5f931b 489 ++preempt_hit;
26a0694b
MD
490 return;
491 }
492 if ((ntd->td_pri & TDPRI_MASK) <= (td->td_pri & TDPRI_MASK)) {
4b5f931b 493 ++preempt_miss;
26a0694b 494 return;
b68b7282 495 }
8a8d5d85
MD
496#ifdef SMP
497 mpheld = MP_LOCK_HELD();
96728c05
MD
498 if (mpheld && td->td_mpcount == 0)
499 panic("lwkt_preempt(): held and no count");
500 savecnt = td->td_mpcount;
8a8d5d85
MD
501 ntd->td_mpcount += td->td_mpcount;
502 if (mpheld == 0 && ntd->td_mpcount && !cpu_try_mplock()) {
503 ntd->td_mpcount -= td->td_mpcount;
504 ++preempt_miss;
505 return;
506 }
507#endif
26a0694b
MD
508
509 ++preempt_hit;
510 ntd->td_preempted = td;
511 td->td_flags |= TDF_PREEMPT_LOCK;
512 td->td_switch(ntd);
513 KKASSERT(ntd->td_preempted && (td->td_flags & TDF_PREEMPT_DONE));
96728c05
MD
514#ifdef SMP
515 KKASSERT(savecnt == td->td_mpcount);
516 if (mpheld == 0 && MP_LOCK_HELD())
517 cpu_rel_mplock();
518 else if (mpheld && !MP_LOCK_HELD())
519 panic("lwkt_preempt(): MP lock was not held through");
520#endif
26a0694b
MD
521 ntd->td_preempted = NULL;
522 td->td_flags &= ~(TDF_PREEMPT_LOCK|TDF_PREEMPT_DONE);
b68b7282
MD
523}
524
f1d1c3fa
MD
525/*
526 * Yield our thread while higher priority threads are pending. This is
527 * typically called when we leave a critical section but it can be safely
528 * called while we are in a critical section.
529 *
530 * This function will not generally yield to equal priority threads but it
531 * can occur as a side effect. Note that lwkt_switch() is called from
532 * inside the critical section to pervent its own crit_exit() from reentering
533 * lwkt_yield_quick().
534 *
ef0fdad1
MD
535 * gd_reqpri indicates that *something* changed, e.g. an interrupt or softint
536 * came along but was blocked and made pending.
537 *
f1d1c3fa
MD
538 * (self contained on a per cpu basis)
539 */
540void
541lwkt_yield_quick(void)
542{
543 thread_t td = curthread;
ef0fdad1
MD
544
545 if ((td->td_pri & TDPRI_MASK) < mycpu->gd_reqpri) {
546 mycpu->gd_reqpri = 0;
f1d1c3fa
MD
547 splz();
548 }
549
550 /*
551 * YYY enabling will cause wakeup() to task-switch, which really
552 * confused the old 4.x code. This is a good way to simulate
7d0bac62
MD
553 * preemption and MP without actually doing preemption or MP, because a
554 * lot of code assumes that wakeup() does not block.
f1d1c3fa 555 */
ef0fdad1 556 if (untimely_switch && mycpu->gd_intr_nesting_level == 0) {
f1d1c3fa
MD
557 crit_enter();
558 /*
559 * YYY temporary hacks until we disassociate the userland scheduler
560 * from the LWKT scheduler.
561 */
562 if (td->td_flags & TDF_RUNQ) {
563 lwkt_switch(); /* will not reenter yield function */
564 } else {
565 lwkt_schedule_self(); /* make sure we are scheduled */
566 lwkt_switch(); /* will not reenter yield function */
567 lwkt_deschedule_self(); /* make sure we are descheduled */
568 }
569 crit_exit_noyield();
570 }
f1d1c3fa
MD
571}
572
8ad65e08 573/*
f1d1c3fa
MD
574 * This implements a normal yield which, unlike _quick, will yield to equal
575 * priority threads as well. Note that gd_reqpri tests will be handled by
576 * the crit_exit() call in lwkt_switch().
577 *
578 * (self contained on a per cpu basis)
8ad65e08
MD
579 */
580void
f1d1c3fa 581lwkt_yield(void)
8ad65e08 582{
f1d1c3fa
MD
583 lwkt_schedule_self();
584 lwkt_switch();
585}
586
587/*
588 * Schedule a thread to run. As the current thread we can always safely
589 * schedule ourselves, and a shortcut procedure is provided for that
590 * function.
591 *
592 * (non-blocking, self contained on a per cpu basis)
593 */
594void
595lwkt_schedule_self(void)
596{
597 thread_t td = curthread;
598
599 crit_enter();
600 KASSERT(td->td_wait == NULL, ("lwkt_schedule_self(): td_wait not NULL!"));
f1d1c3fa 601 _lwkt_enqueue(td);
26a0694b
MD
602 if (td->td_proc && td->td_proc->p_stat == SSLEEP)
603 panic("SCHED SELF PANIC");
f1d1c3fa 604 crit_exit();
8ad65e08 605}
8ad65e08
MD
606
607/*
f1d1c3fa
MD
608 * Generic schedule. Possibly schedule threads belonging to other cpus and
609 * deal with threads that might be blocked on a wait queue.
610 *
96728c05 611 * YYY this is one of the best places to implement load balancing code.
f1d1c3fa
MD
612 * Load balancing can be accomplished by requesting other sorts of actions
613 * for the thread in question.
8ad65e08
MD
614 */
615void
616lwkt_schedule(thread_t td)
617{
96728c05 618#ifdef INVARIANTS
26a0694b
MD
619 if ((td->td_flags & TDF_PREEMPT_LOCK) == 0 && td->td_proc
620 && td->td_proc->p_stat == SSLEEP
621 ) {
622 printf("PANIC schedule curtd = %p (%d %d) target %p (%d %d)\n",
623 curthread,
624 curthread->td_proc ? curthread->td_proc->p_pid : -1,
625 curthread->td_proc ? curthread->td_proc->p_stat : -1,
626 td,
627 td->td_proc ? curthread->td_proc->p_pid : -1,
628 td->td_proc ? curthread->td_proc->p_stat : -1
629 );
630 panic("SCHED PANIC");
631 }
96728c05 632#endif
f1d1c3fa
MD
633 crit_enter();
634 if (td == curthread) {
635 _lwkt_enqueue(td);
636 } else {
637 lwkt_wait_t w;
638
639 /*
640 * If the thread is on a wait list we have to send our scheduling
641 * request to the owner of the wait structure. Otherwise we send
642 * the scheduling request to the cpu owning the thread. Races
643 * are ok, the target will forward the message as necessary (the
644 * message may chase the thread around before it finally gets
645 * acted upon).
646 *
647 * (remember, wait structures use stable storage)
648 */
649 if ((w = td->td_wait) != NULL) {
96728c05 650 if (lwkt_trytoken(&w->wa_token)) {
f1d1c3fa
MD
651 TAILQ_REMOVE(&w->wa_waitq, td, td_threadq);
652 --w->wa_count;
653 td->td_wait = NULL;
d0e06f83 654 if (td->td_cpu == mycpu->gd_cpuid) {
f1d1c3fa 655 _lwkt_enqueue(td);
96728c05
MD
656 if (td->td_preemptable)
657 td->td_preemptable(td, TDPRI_CRIT*2); /* YYY +token */
f1d1c3fa 658 } else {
96728c05 659 lwkt_send_ipiq(td->td_cpu, (ipifunc_t)lwkt_schedule, td);
f1d1c3fa 660 }
96728c05 661 lwkt_reltoken(&w->wa_token);
f1d1c3fa 662 } else {
96728c05 663 lwkt_send_ipiq(w->wa_token.t_cpu, (ipifunc_t)lwkt_schedule, td);
f1d1c3fa
MD
664 }
665 } else {
666 /*
667 * If the wait structure is NULL and we own the thread, there
668 * is no race (since we are in a critical section). If we
669 * do not own the thread there might be a race but the
670 * target cpu will deal with it.
671 */
d0e06f83 672 if (td->td_cpu == mycpu->gd_cpuid) {
f1d1c3fa 673 _lwkt_enqueue(td);
96728c05
MD
674 if (td->td_preemptable)
675 td->td_preemptable(td, TDPRI_CRIT);
f1d1c3fa 676 } else {
96728c05 677 lwkt_send_ipiq(td->td_cpu, (ipifunc_t)lwkt_schedule, td);
f1d1c3fa
MD
678 }
679 }
8ad65e08 680 }
f1d1c3fa 681 crit_exit();
8ad65e08
MD
682}
683
684/*
f1d1c3fa
MD
685 * Deschedule a thread.
686 *
687 * (non-blocking, self contained on a per cpu basis)
688 */
689void
690lwkt_deschedule_self(void)
691{
692 thread_t td = curthread;
693
694 crit_enter();
695 KASSERT(td->td_wait == NULL, ("lwkt_schedule_self(): td_wait not NULL!"));
f1d1c3fa
MD
696 _lwkt_dequeue(td);
697 crit_exit();
698}
699
700/*
701 * Generic deschedule. Descheduling threads other then your own should be
702 * done only in carefully controlled circumstances. Descheduling is
703 * asynchronous.
704 *
705 * This function may block if the cpu has run out of messages.
8ad65e08
MD
706 */
707void
708lwkt_deschedule(thread_t td)
709{
f1d1c3fa
MD
710 crit_enter();
711 if (td == curthread) {
712 _lwkt_dequeue(td);
713 } else {
d0e06f83 714 if (td->td_cpu == mycpu->gd_cpuid) {
f1d1c3fa
MD
715 _lwkt_dequeue(td);
716 } else {
96728c05 717 lwkt_send_ipiq(td->td_cpu, (ipifunc_t)lwkt_deschedule, td);
f1d1c3fa
MD
718 }
719 }
720 crit_exit();
721}
722
4b5f931b
MD
723/*
724 * Set the target thread's priority. This routine does not automatically
725 * switch to a higher priority thread, LWKT threads are not designed for
726 * continuous priority changes. Yield if you want to switch.
727 *
728 * We have to retain the critical section count which uses the high bits
26a0694b
MD
729 * of the td_pri field. The specified priority may also indicate zero or
730 * more critical sections by adding TDPRI_CRIT*N.
4b5f931b
MD
731 */
732void
733lwkt_setpri(thread_t td, int pri)
734{
26a0694b
MD
735 KKASSERT(pri >= 0);
736 crit_enter();
737 if (td->td_flags & TDF_RUNQ) {
738 _lwkt_dequeue(td);
739 td->td_pri = (td->td_pri & ~TDPRI_MASK) + pri;
740 _lwkt_enqueue(td);
741 } else {
742 td->td_pri = (td->td_pri & ~TDPRI_MASK) + pri;
743 }
744 crit_exit();
745}
746
747void
748lwkt_setpri_self(int pri)
749{
750 thread_t td = curthread;
751
4b5f931b
MD
752 KKASSERT(pri >= 0 && pri <= TDPRI_MAX);
753 crit_enter();
754 if (td->td_flags & TDF_RUNQ) {
755 _lwkt_dequeue(td);
756 td->td_pri = (td->td_pri & ~TDPRI_MASK) + pri;
757 _lwkt_enqueue(td);
758 } else {
759 td->td_pri = (td->td_pri & ~TDPRI_MASK) + pri;
760 }
761 crit_exit();
762}
763
764struct proc *
765lwkt_preempted_proc(void)
766{
73e4f7b9 767 thread_t td = curthread;
4b5f931b
MD
768 while (td->td_preempted)
769 td = td->td_preempted;
770 return(td->td_proc);
771}
772
773
f1d1c3fa
MD
774/*
775 * This function deschedules the current thread and blocks on the specified
776 * wait queue. We obtain ownership of the wait queue in order to block
777 * on it. A generation number is used to interlock the wait queue in case
778 * it gets signalled while we are blocked waiting on the token.
779 *
780 * Note: alternatively we could dequeue our thread and then message the
781 * target cpu owning the wait queue. YYY implement as sysctl.
782 *
783 * Note: wait queue signals normally ping-pong the cpu as an optimization.
784 */
96728c05
MD
785typedef struct lwkt_gettoken_req {
786 lwkt_token_t tok;
787 int cpu;
788} lwkt_gettoken_req;
789
f1d1c3fa 790void
ae8050a4 791lwkt_block(lwkt_wait_t w, const char *wmesg, int *gen)
f1d1c3fa
MD
792{
793 thread_t td = curthread;
f1d1c3fa 794
f1d1c3fa 795 lwkt_gettoken(&w->wa_token);
ae8050a4 796 if (w->wa_gen == *gen) {
f1d1c3fa
MD
797 _lwkt_dequeue(td);
798 TAILQ_INSERT_TAIL(&w->wa_waitq, td, td_threadq);
799 ++w->wa_count;
800 td->td_wait = w;
ae8050a4 801 td->td_wmesg = wmesg;
f1d1c3fa 802 lwkt_switch();
8ad65e08 803 }
ae8050a4
MD
804 /* token might be lost, doesn't matter for gen update */
805 *gen = w->wa_gen;
f1d1c3fa
MD
806 lwkt_reltoken(&w->wa_token);
807}
808
809/*
810 * Signal a wait queue. We gain ownership of the wait queue in order to
811 * signal it. Once a thread is removed from the wait queue we have to
812 * deal with the cpu owning the thread.
813 *
814 * Note: alternatively we could message the target cpu owning the wait
815 * queue. YYY implement as sysctl.
816 */
817void
818lwkt_signal(lwkt_wait_t w)
819{
820 thread_t td;
821 int count;
822
823 lwkt_gettoken(&w->wa_token);
824 ++w->wa_gen;
825 count = w->wa_count;
826 while ((td = TAILQ_FIRST(&w->wa_waitq)) != NULL && count) {
827 --count;
828 --w->wa_count;
829 TAILQ_REMOVE(&w->wa_waitq, td, td_threadq);
830 td->td_wait = NULL;
ae8050a4 831 td->td_wmesg = NULL;
d0e06f83 832 if (td->td_cpu == mycpu->gd_cpuid) {
f1d1c3fa
MD
833 _lwkt_enqueue(td);
834 } else {
96728c05 835 lwkt_send_ipiq(td->td_cpu, (ipifunc_t)lwkt_schedule, td);
f1d1c3fa
MD
836 }
837 lwkt_regettoken(&w->wa_token);
838 }
839 lwkt_reltoken(&w->wa_token);
840}
841
842/*
96728c05 843 * Acquire ownership of a token
f1d1c3fa 844 *
96728c05 845 * Acquire ownership of a token. The token may have spl and/or critical
f1d1c3fa
MD
846 * section side effects, depending on its purpose. These side effects
847 * guarentee that you will maintain ownership of the token as long as you
848 * do not block. If you block you may lose access to the token (but you
849 * must still release it even if you lose your access to it).
850 *
96728c05
MD
851 * YYY for now we use a critical section to prevent IPIs from taking away
852 * a token, but we really only need to disable IPIs ?
853 *
854 * YYY certain tokens could be made to act like mutexes when performance
855 * would be better (e.g. t_cpu == -1). This is not yet implemented.
856 *
857 * If the token is owned by another cpu we may have to send an IPI to
858 * it and then block. The IPI causes the token to be given away to the
859 * requesting cpu, unless it has already changed hands. Since only the
860 * current cpu can give away a token it owns we do not need a memory barrier.
f1d1c3fa 861 */
96728c05
MD
862static
863void
864lwkt_gettoken_remote(void *arg)
865{
866 lwkt_gettoken_req *req = arg;
867 if (req->tok->t_cpu == mycpu->gd_cpuid) {
868 req->tok->t_cpu = req->cpu;
869 }
870}
871
8a8d5d85 872int
f1d1c3fa
MD
873lwkt_gettoken(lwkt_token_t tok)
874{
875 /*
876 * Prevent preemption so the token can't be taken away from us once
877 * we gain ownership of it. Use a synchronous request which might
878 * block. The request will be forwarded as necessary playing catchup
879 * to the token.
880 */
96728c05
MD
881 struct lwkt_gettoken_req req;
882 int seq;
883
f1d1c3fa 884 crit_enter();
96728c05 885#ifdef SMP
d0e06f83 886 while (tok->t_cpu != mycpu->gd_cpuid) {
96728c05
MD
887 int dcpu;
888
889 req.cpu = mycpu->gd_cpuid;
890 req.tok = tok;
891 dcpu = (volatile int)tok->t_cpu;
892 seq = lwkt_send_ipiq(dcpu, lwkt_gettoken_remote, &req);
893 lwkt_wait_ipiq(dcpu, seq);
f1d1c3fa
MD
894 }
895#endif
896 /*
897 * leave us in a critical section on return. This will be undone
8a8d5d85 898 * by lwkt_reltoken(). Bump the generation number.
f1d1c3fa 899 */
8a8d5d85 900 return(++tok->t_gen);
f1d1c3fa
MD
901}
902
96728c05
MD
903/*
904 * Attempt to acquire ownership of a token. Returns 1 on success, 0 on
905 * failure.
906 */
907int
908lwkt_trytoken(lwkt_token_t tok)
909{
910 crit_enter();
911#ifdef SMP
912 if (tok->t_cpu != mycpu->gd_cpuid) {
913 return(0);
914 }
915#endif
916 /* leave us in the critical section */
917 ++tok->t_gen;
918 return(1);
919}
920
f1d1c3fa
MD
921/*
922 * Release your ownership of a token. Releases must occur in reverse
923 * order to aquisitions, eventually so priorities can be unwound properly
924 * like SPLs. At the moment the actual implemention doesn't care.
925 *
926 * We can safely hand a token that we own to another cpu without notifying
927 * it, but once we do we can't get it back without requesting it (unless
928 * the other cpu hands it back to us before we check).
929 *
930 * We might have lost the token, so check that.
931 */
932void
933lwkt_reltoken(lwkt_token_t tok)
934{
d0e06f83 935 if (tok->t_cpu == mycpu->gd_cpuid) {
f1d1c3fa
MD
936 tok->t_cpu = tok->t_reqcpu;
937 }
938 crit_exit();
939}
940
941/*
8a8d5d85
MD
942 * Reacquire a token that might have been lost and compare and update the
943 * generation number. 0 is returned if the generation has not changed
944 * (nobody else obtained the token while we were blocked, on this cpu or
945 * any other cpu).
946 *
947 * This function returns with the token re-held whether the generation
948 * number changed or not.
949 */
950int
951lwkt_gentoken(lwkt_token_t tok, int *gen)
952{
953 if (lwkt_regettoken(tok) == *gen) {
954 return(0);
955 } else {
956 *gen = tok->t_gen;
957 return(-1);
958 }
959}
960
961
962/*
96728c05 963 * Re-acquire a token that might have been lost. Returns the generation
8a8d5d85 964 * number of the token.
f1d1c3fa
MD
965 */
966int
967lwkt_regettoken(lwkt_token_t tok)
968{
96728c05
MD
969 struct lwkt_gettoken_req req;
970 int seq;
971
972 /* assert we are in a critical section */
d0e06f83 973 if (tok->t_cpu != mycpu->gd_cpuid) {
96728c05 974#ifdef SMP
d0e06f83 975 while (tok->t_cpu != mycpu->gd_cpuid) {
96728c05
MD
976 int dcpu;
977 req.cpu = mycpu->gd_cpuid;
978 req.tok = tok;
979 dcpu = (volatile int)tok->t_cpu;
980 seq = lwkt_send_ipiq(dcpu, lwkt_gettoken_remote, &req);
981 lwkt_wait_ipiq(dcpu, seq);
f1d1c3fa 982 }
f1d1c3fa 983#endif
96728c05
MD
984 ++tok->t_gen;
985 }
8a8d5d85 986 return(tok->t_gen);
8ad65e08
MD
987}
988
72740893
MD
989void
990lwkt_inittoken(lwkt_token_t tok)
991{
992 /*
993 * Zero structure and set cpu owner and reqcpu to cpu 0.
994 */
995 bzero(tok, sizeof(*tok));
996}
997
99df837e
MD
998/*
999 * Create a kernel process/thread/whatever. It shares it's address space
1000 * with proc0 - ie: kernel only.
1001 *
1002 * XXX should be renamed to lwkt_create()
8a8d5d85
MD
1003 *
1004 * The thread will be entered with the MP lock held.
99df837e
MD
1005 */
1006int
1007lwkt_create(void (*func)(void *), void *arg,
73e4f7b9 1008 struct thread **tdp, thread_t template, int tdflags,
ef0fdad1 1009 const char *fmt, ...)
99df837e 1010{
73e4f7b9 1011 thread_t td;
99df837e
MD
1012 va_list ap;
1013
ef0fdad1 1014 td = *tdp = lwkt_alloc_thread(template);
99df837e 1015 cpu_set_thread_handler(td, kthread_exit, func, arg);
ef0fdad1 1016 td->td_flags |= TDF_VERBOSE | tdflags;
8a8d5d85
MD
1017#ifdef SMP
1018 td->td_mpcount = 1;
1019#endif
99df837e
MD
1020
1021 /*
1022 * Set up arg0 for 'ps' etc
1023 */
1024 va_start(ap, fmt);
1025 vsnprintf(td->td_comm, sizeof(td->td_comm), fmt, ap);
1026 va_end(ap);
1027
1028 /*
1029 * Schedule the thread to run
1030 */
ef0fdad1
MD
1031 if ((td->td_flags & TDF_STOPREQ) == 0)
1032 lwkt_schedule(td);
1033 else
1034 td->td_flags &= ~TDF_STOPREQ;
99df837e
MD
1035 return 0;
1036}
1037
1038/*
1039 * Destroy an LWKT thread. Warning! This function is not called when
1040 * a process exits, cpu_proc_exit() directly calls cpu_thread_exit() and
1041 * uses a different reaping mechanism.
1042 */
1043void
1044lwkt_exit(void)
1045{
1046 thread_t td = curthread;
1047
1048 if (td->td_flags & TDF_VERBOSE)
1049 printf("kthread %p %s has exited\n", td, td->td_comm);
1050 crit_enter();
1051 lwkt_deschedule_self();
1052 ++mycpu->gd_tdfreecount;
1053 TAILQ_INSERT_TAIL(&mycpu->gd_tdfreeq, td, td_threadq);
1054 cpu_thread_exit();
1055}
1056
1057/*
1058 * Create a kernel process/thread/whatever. It shares it's address space
ef0fdad1 1059 * with proc0 - ie: kernel only. 5.x compatible.
99df837e
MD
1060 */
1061int
1062kthread_create(void (*func)(void *), void *arg,
1063 struct thread **tdp, const char *fmt, ...)
1064{
73e4f7b9 1065 thread_t td;
99df837e
MD
1066 va_list ap;
1067
ef0fdad1 1068 td = *tdp = lwkt_alloc_thread(NULL);
99df837e
MD
1069 cpu_set_thread_handler(td, kthread_exit, func, arg);
1070 td->td_flags |= TDF_VERBOSE;
8a8d5d85
MD
1071#ifdef SMP
1072 td->td_mpcount = 1;
1073#endif
99df837e
MD
1074
1075 /*
1076 * Set up arg0 for 'ps' etc
1077 */
1078 va_start(ap, fmt);
1079 vsnprintf(td->td_comm, sizeof(td->td_comm), fmt, ap);
1080 va_end(ap);
1081
1082 /*
1083 * Schedule the thread to run
1084 */
1085 lwkt_schedule(td);
1086 return 0;
1087}
1088
26a0694b
MD
1089void
1090crit_panic(void)
1091{
73e4f7b9 1092 thread_t td = curthread;
26a0694b
MD
1093 int lpri = td->td_pri;
1094
1095 td->td_pri = 0;
1096 panic("td_pri is/would-go negative! %p %d", td, lpri);
1097}
1098
99df837e
MD
1099/*
1100 * Destroy an LWKT thread. Warning! This function is not called when
1101 * a process exits, cpu_proc_exit() directly calls cpu_thread_exit() and
1102 * uses a different reaping mechanism.
1103 *
1104 * XXX duplicates lwkt_exit()
1105 */
1106void
1107kthread_exit(void)
1108{
1109 lwkt_exit();
1110}
1111
96728c05
MD
1112#ifdef SMP
1113
1114/*
1115 * Send a function execution request to another cpu. The request is queued
1116 * on the cpu<->cpu ipiq matrix. Each cpu owns a unique ipiq FIFO for every
1117 * possible target cpu. The FIFO can be written.
1118 *
1119 * YYY If the FIFO fills up we have to enable interrupts and process the
1120 * IPIQ while waiting for it to empty or we may deadlock with another cpu.
1121 * Create a CPU_*() function to do this!
1122 *
1123 * Must be called from a critical section.
1124 */
1125int
1126lwkt_send_ipiq(int dcpu, ipifunc_t func, void *arg)
1127{
1128 lwkt_ipiq_t ip;
1129 int windex;
1130
1131 if (dcpu == mycpu->gd_cpuid) {
1132 func(arg);
1133 return(0);
1134 }
1135 KKASSERT(curthread->td_pri >= TDPRI_CRIT);
1136 KKASSERT(dcpu >= 0 && dcpu < ncpus);
1137 ++ipiq_count;
1138 ip = &mycpu->gd_ipiq[dcpu];
1139 if (ip->ip_windex - ip->ip_rindex > MAXCPUFIFO / 2) {
1140 unsigned int eflags = read_eflags();
1141 cpu_enable_intr();
1142 ++ipiq_fifofull;
1143 while (ip->ip_windex - ip->ip_rindex > MAXCPUFIFO / 2) {
1144 KKASSERT(ip->ip_windex - ip->ip_rindex != MAXCPUFIFO - 1);
1145 lwkt_process_ipiq();
1146 }
1147 write_eflags(eflags);
1148 }
1149 KKASSERT(ip->ip_windex - ip->ip_rindex != MAXCPUFIFO - 1);
1150 windex = ip->ip_windex & MAXCPUFIFO_MASK;
1151 ip->ip_func[windex] = func;
1152 ip->ip_arg[windex] = arg;
1153 /* YYY memory barrier */
1154 ++ip->ip_windex;
1155 cpu_send_ipiq(dcpu); /* issues memory barrier if appropriate */
1156 return(ip->ip_windex);
1157}
1158
1159/*
1160 * Wait for the remote cpu to finish processing a function.
1161 *
1162 * YYY we have to enable interrupts and process the IPIQ while waiting
1163 * for it to empty or we may deadlock with another cpu. Create a CPU_*()
1164 * function to do this! YYY we really should 'block' here.
1165 *
1166 * Must be called from a critical section. Thsi routine may be called
1167 * from an interrupt (for example, if an interrupt wakes a foreign thread
1168 * up).
1169 */
1170void
1171lwkt_wait_ipiq(int dcpu, int seq)
1172{
1173 lwkt_ipiq_t ip;
1174
1175 if (dcpu != mycpu->gd_cpuid) {
1176 KKASSERT(dcpu >= 0 && dcpu < ncpus);
1177 ip = &mycpu->gd_ipiq[dcpu];
1178 if ((int)(ip->ip_rindex - seq) < 0) {
1179 unsigned int eflags = read_eflags();
1180 cpu_enable_intr();
1181 while ((int)(ip->ip_rindex - seq) < 0) {
1182 lwkt_process_ipiq();
1183#if 0
1184 lwkt_switch(); /* YYY fixme */
1185#endif
1186 }
1187 write_eflags(eflags);
1188 }
1189 }
1190}
1191
1192/*
1193 * Called from IPI interrupt (like a fast interrupt), which has placed
1194 * us in a critical section. The MP lock may or may not be held.
1195 * May also be called from doreti or splz.
1196 */
1197void
1198lwkt_process_ipiq(void)
1199{
1200 int n;
1201 int cpuid = mycpu->gd_cpuid;
1202
1203 for (n = 0; n < ncpus; ++n) {
1204 lwkt_ipiq_t ip;
1205 int ri;
1206
1207 if (n == cpuid)
1208 continue;
1209 ip = globaldata_find(n)->gd_ipiq;
1210 if (ip == NULL)
1211 continue;
1212 ip = &ip[cpuid];
1213 while (ip->ip_rindex != ip->ip_windex) {
1214 ri = ip->ip_rindex & MAXCPUFIFO_MASK;
1215 ip->ip_func[ri](ip->ip_arg[ri]);
1216 ++ip->ip_rindex;
1217 }
1218 }
1219}
1220
1221#else
1222
1223int
1224lwkt_send_ipiq(int dcpu, ipifunc_t func, void *arg)
1225{
1226 panic("lwkt_send_ipiq: UP box! (%d,%p,%p)", dcpu, func, arg);
1227 return(0); /* NOT REACHED */
1228}
1229
1230void
1231lwkt_wait_ipiq(int dcpu, int seq)
1232{
1233 panic("lwkt_wait_ipiq: UP box! (%d,%d)", dcpu, seq);
1234}
1235
1236#endif