2 * Copyright (c) 2003 Matthew Dillon <dillon@backplane.com>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * $DragonFly: src/sys/kern/lwkt_ipiq.c,v 1.7 2004/07/04 22:44:27 eirikn Exp $
30 * This module implements IPI message queueing and the MI portion of IPI
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/kernel.h>
40 #include <sys/rtprio.h>
41 #include <sys/queue.h>
42 #include <sys/thread2.h>
43 #include <sys/sysctl.h>
44 #include <sys/kthread.h>
45 #include <machine/cpu.h>
50 #include <vm/vm_param.h>
51 #include <vm/vm_kern.h>
52 #include <vm/vm_object.h>
53 #include <vm/vm_page.h>
54 #include <vm/vm_map.h>
55 #include <vm/vm_pager.h>
56 #include <vm/vm_extern.h>
57 #include <vm/vm_zone.h>
59 #include <machine/stdarg.h>
60 #include <machine/ipl.h>
61 #include <machine/smp.h>
62 #include <machine/atomic.h>
64 #define THREAD_STACK (UPAGES * PAGE_SIZE)
68 #include <sys/stdint.h>
69 #include <libcaps/thread.h>
70 #include <sys/thread.h>
71 #include <sys/msgport.h>
72 #include <sys/errno.h>
73 #include <libcaps/globaldata.h>
74 #include <machine/cpufunc.h>
75 #include <sys/thread2.h>
76 #include <sys/msgport2.h>
80 #include <machine/lock.h>
81 #include <machine/cpu.h>
82 #include <machine/atomic.h>
87 static __int64_t ipiq_count;
88 static __int64_t ipiq_fifofull;
89 static __int64_t ipiq_cscount;
95 SYSCTL_QUAD(_lwkt, OID_AUTO, ipiq_count, CTLFLAG_RW, &ipiq_count, 0, "");
96 SYSCTL_QUAD(_lwkt, OID_AUTO, ipiq_fifofull, CTLFLAG_RW, &ipiq_fifofull, 0, "");
97 SYSCTL_QUAD(_lwkt, OID_AUTO, ipiq_cscount, CTLFLAG_RW, &ipiq_cscount, 0, "");
104 static int lwkt_process_ipiq1(lwkt_ipiq_t ip, struct intrframe *frame);
105 static void lwkt_cpusync_remote1(lwkt_cpusync_t poll);
106 static void lwkt_cpusync_remote2(lwkt_cpusync_t poll);
109 * Send a function execution request to another cpu. The request is queued
110 * on the cpu<->cpu ipiq matrix. Each cpu owns a unique ipiq FIFO for every
111 * possible target cpu. The FIFO can be written.
113 * YYY If the FIFO fills up we have to enable interrupts and process the
114 * IPIQ while waiting for it to empty or we may deadlock with another cpu.
115 * Create a CPU_*() function to do this!
117 * We can safely bump gd_intr_nesting_level because our crit_exit() at the
118 * end will take care of any pending interrupts.
120 * Must be called from a critical section.
123 lwkt_send_ipiq(globaldata_t target, ipifunc_t func, void *arg)
127 struct globaldata *gd = mycpu;
134 ++gd->gd_intr_nesting_level;
136 if (gd->gd_intr_nesting_level > 20)
137 panic("lwkt_send_ipiq: TOO HEAVILY NESTED!");
139 KKASSERT(curthread->td_pri >= TDPRI_CRIT);
141 ip = &gd->gd_ipiq[target->gd_cpuid];
144 * We always drain before the FIFO becomes full so it should never
145 * become full. We need to leave enough entries to deal with
148 KKASSERT(ip->ip_windex - ip->ip_rindex != MAXCPUFIFO);
149 windex = ip->ip_windex & MAXCPUFIFO_MASK;
150 ip->ip_func[windex] = (ipifunc2_t)func;
151 ip->ip_arg[windex] = arg;
154 if (ip->ip_windex - ip->ip_rindex > MAXCPUFIFO / 2) {
155 unsigned int eflags = read_eflags();
158 while (ip->ip_windex - ip->ip_rindex > MAXCPUFIFO / 4) {
159 KKASSERT(ip->ip_windex - ip->ip_rindex != MAXCPUFIFO - 1);
162 write_eflags(eflags);
164 --gd->gd_intr_nesting_level;
165 cpu_send_ipiq(target->gd_cpuid); /* issues mem barrier if appropriate */
167 return(ip->ip_windex);
171 * Send an IPI request passively, return 0 on success and ENOENT on failure.
172 * This routine does not recursive through lwkt_process_ipiq() nor does it
173 * block trying to queue the actual IPI. If we successfully queue the
174 * message but fail to queue the IPI, we still count it as a success.
175 * The occassional small race against a target cpu HLT is recovered at
176 * the next clock interrupt.
179 lwkt_send_ipiq_passive(globaldata_t target, ipifunc_t func, void *arg)
183 struct globaldata *gd = mycpu;
185 KKASSERT(curthread->td_pri >= TDPRI_CRIT);
191 ip = &gd->gd_ipiq[target->gd_cpuid];
193 if (ip->ip_windex - ip->ip_rindex >= MAXCPUFIFO - 1) {
196 windex = ip->ip_windex & MAXCPUFIFO_MASK;
197 ip->ip_func[windex] = (ipifunc2_t)func;
198 ip->ip_arg[windex] = arg;
202 * passive mode doesn't work yet :-(
205 cpu_send_ipiq(target->gd_cpuid);
207 cpu_send_ipiq_passive(target->gd_cpuid);
213 * deprecated, used only by fast int forwarding.
216 lwkt_send_ipiq_bycpu(int dcpu, ipifunc_t func, void *arg)
218 return(lwkt_send_ipiq(globaldata_find(dcpu), func, arg));
222 * Send a message to several target cpus. Typically used for scheduling.
223 * The message will not be sent to stopped cpus.
226 lwkt_send_ipiq_mask(u_int32_t mask, ipifunc_t func, void *arg)
231 mask &= ~stopped_cpus;
234 lwkt_send_ipiq(globaldata_find(cpuid), func, arg);
235 mask &= ~(1 << cpuid);
242 * Wait for the remote cpu to finish processing a function.
244 * YYY we have to enable interrupts and process the IPIQ while waiting
245 * for it to empty or we may deadlock with another cpu. Create a CPU_*()
246 * function to do this! YYY we really should 'block' here.
248 * MUST be called from a critical section. This routine may be called
249 * from an interrupt (for example, if an interrupt wakes a foreign thread
253 lwkt_wait_ipiq(globaldata_t target, int seq)
256 int maxc = 100000000;
258 if (target != mycpu) {
259 ip = &mycpu->gd_ipiq[target->gd_cpuid];
260 if ((int)(ip->ip_xindex - seq) < 0) {
261 unsigned int eflags = read_eflags();
263 while ((int)(ip->ip_xindex - seq) < 0) {
268 printf("LWKT_WAIT_IPIQ WARNING! %d wait %d (%d)\n", mycpu->gd_cpuid, target->gd_cpuid, ip->ip_xindex - seq);
270 panic("LWKT_WAIT_IPIQ");
272 write_eflags(eflags);
278 lwkt_seq_ipiq(globaldata_t target)
282 ip = &mycpu->gd_ipiq[target->gd_cpuid];
283 return(ip->ip_windex);
287 * Called from IPI interrupt (like a fast interrupt), which has placed
288 * us in a critical section. The MP lock may or may not be held.
289 * May also be called from doreti or splz, or be reentrantly called
290 * indirectly through the ip_func[] we run.
292 * There are two versions, one where no interrupt frame is available (when
293 * called from the send code and from splz, and one where an interrupt
294 * frame is available.
297 lwkt_process_ipiq(void)
299 globaldata_t gd = mycpu;
304 for (n = 0; n < ncpus; ++n) {
305 if (n != gd->gd_cpuid) {
306 ip = globaldata_find(n)->gd_ipiq;
308 while (lwkt_process_ipiq1(&ip[gd->gd_cpuid], NULL))
313 if (gd->gd_cpusyncq.ip_rindex != gd->gd_cpusyncq.ip_windex) {
314 if (lwkt_process_ipiq1(&gd->gd_cpusyncq, NULL)) {
315 if (gd->gd_curthread->td_cscount == 0)
324 lwkt_process_ipiq_frame(struct intrframe frame)
326 globaldata_t gd = mycpu;
331 for (n = 0; n < ncpus; ++n) {
332 if (n != gd->gd_cpuid) {
333 ip = globaldata_find(n)->gd_ipiq;
335 while (lwkt_process_ipiq1(&ip[gd->gd_cpuid], &frame))
340 if (gd->gd_cpusyncq.ip_rindex != gd->gd_cpusyncq.ip_windex) {
341 if (lwkt_process_ipiq1(&gd->gd_cpusyncq, &frame)) {
342 if (gd->gd_curthread->td_cscount == 0)
351 lwkt_process_ipiq1(lwkt_ipiq_t ip, struct intrframe *frame)
354 int wi = ip->ip_windex;
356 * Note: xindex is only updated after we are sure the function has
357 * finished execution. Beware lwkt_process_ipiq() reentrancy! The
358 * function may send an IPI which may block/drain.
360 while ((ri = ip->ip_rindex) != wi) {
361 ip->ip_rindex = ri + 1;
362 ri &= MAXCPUFIFO_MASK;
363 ip->ip_func[ri](ip->ip_arg[ri], frame);
364 /* YYY memory barrier */
365 ip->ip_xindex = ip->ip_rindex;
367 return(wi != ip->ip_windex);
373 * !SMP dummy routines
377 lwkt_send_ipiq(globaldata_t target, ipifunc_t func, void *arg)
379 panic("lwkt_send_ipiq: UP box! (%d,%p,%p)", target->gd_cpuid, func, arg);
380 return(0); /* NOT REACHED */
384 lwkt_wait_ipiq(globaldata_t target, int seq)
386 panic("lwkt_wait_ipiq: UP box! (%d,%d)", target->gd_cpuid, seq);
392 * CPU Synchronization Support
394 * lwkt_cpusync_simple()
396 * The function is executed synchronously before return on remote cpus.
397 * A lwkt_cpusync_t pointer is passed as an argument. The data can
398 * be accessed via arg->cs_data.
400 * XXX should I just pass the data as an argument to be consistent?
404 lwkt_cpusync_simple(cpumask_t mask, cpusync_func_t func, void *data)
406 struct lwkt_cpusync cmd;
408 cmd.cs_run_func = NULL;
409 cmd.cs_fin1_func = func;
410 cmd.cs_fin2_func = NULL;
412 lwkt_cpusync_start(mask & mycpu->gd_other_cpus, &cmd);
413 if (mask & (1 << mycpu->gd_cpuid))
415 lwkt_cpusync_finish(&cmd);
419 * lwkt_cpusync_fastdata()
421 * The function is executed in tandem with return on remote cpus.
422 * The data is directly passed as an argument. Do not pass pointers to
423 * temporary storage as the storage might have
424 * gone poof by the time the target cpu executes
427 * At the moment lwkt_cpusync is declared on the stack and we must wait
428 * for all remote cpus to ack in lwkt_cpusync_finish(), but as a future
429 * optimization we should be able to put a counter in the globaldata
430 * structure (if it is not otherwise being used) and just poke it and
431 * return without waiting. XXX
434 lwkt_cpusync_fastdata(cpumask_t mask, cpusync_func2_t func, void *data)
436 struct lwkt_cpusync cmd;
438 cmd.cs_run_func = NULL;
439 cmd.cs_fin1_func = NULL;
440 cmd.cs_fin2_func = func;
442 lwkt_cpusync_start(mask & mycpu->gd_other_cpus, &cmd);
443 if (mask & (1 << mycpu->gd_cpuid))
445 lwkt_cpusync_finish(&cmd);
449 * lwkt_cpusync_start()
451 * Start synchronization with a set of target cpus, return once they are
452 * known to be in a synchronization loop. The target cpus will execute
453 * poll->cs_run_func() IN TANDEM WITH THE RETURN.
455 * XXX future: add lwkt_cpusync_start_quick() and require a call to
456 * lwkt_cpusync_add() or lwkt_cpusync_wait(), allowing the caller to
457 * potentially absorb the IPI latency doing something useful.
460 lwkt_cpusync_start(cpumask_t mask, lwkt_cpusync_t poll)
462 globaldata_t gd = mycpu;
465 poll->cs_mask = mask;
467 poll->cs_maxcount = lwkt_send_ipiq_mask(
468 mask & gd->gd_other_cpus & smp_active_mask,
469 (ipifunc_t)lwkt_cpusync_remote1, poll);
471 if (mask & gd->gd_cpumask) {
472 if (poll->cs_run_func)
473 poll->cs_run_func(poll);
476 if (poll->cs_maxcount) {
478 ++gd->gd_curthread->td_cscount;
479 while (poll->cs_count != poll->cs_maxcount) {
489 lwkt_cpusync_add(cpumask_t mask, lwkt_cpusync_t poll)
491 globaldata_t gd = mycpu;
496 mask &= ~poll->cs_mask;
497 poll->cs_mask |= mask;
499 count = lwkt_send_ipiq_mask(
500 mask & gd->gd_other_cpus & smp_active_mask,
501 (ipifunc_t)lwkt_cpusync_remote1, poll);
503 if (mask & gd->gd_cpumask) {
504 if (poll->cs_run_func)
505 poll->cs_run_func(poll);
508 poll->cs_maxcount += count;
509 if (poll->cs_maxcount) {
510 if (poll->cs_maxcount == count)
511 ++gd->gd_curthread->td_cscount;
512 while (poll->cs_count != poll->cs_maxcount) {
522 * Finish synchronization with a set of target cpus. The target cpus will
523 * execute cs_fin1_func(poll) prior to this function returning, and will
524 * execute cs_fin2_func(data) IN TANDEM WITH THIS FUNCTION'S RETURN.
526 * If cs_maxcount is non-zero then we are mastering a cpusync with one or
527 * more remote cpus and must account for it in our thread structure.
530 lwkt_cpusync_finish(lwkt_cpusync_t poll)
532 globaldata_t gd = mycpu;
535 if (poll->cs_mask & gd->gd_cpumask) {
536 if (poll->cs_fin1_func)
537 poll->cs_fin1_func(poll);
538 if (poll->cs_fin2_func)
539 poll->cs_fin2_func(poll->cs_data);
542 if (poll->cs_maxcount) {
543 while (poll->cs_count != -(poll->cs_maxcount + 1)) {
548 --gd->gd_curthread->td_cscount;
556 * helper IPI remote messaging function.
558 * Called on remote cpu when a new cpu synchronization request has been
559 * sent to us. Execute the run function and adjust cs_count, then requeue
560 * the request so we spin on it.
563 lwkt_cpusync_remote1(lwkt_cpusync_t poll)
565 atomic_add_int(&poll->cs_count, 1);
566 if (poll->cs_run_func)
567 poll->cs_run_func(poll);
568 lwkt_cpusync_remote2(poll);
572 * helper IPI remote messaging function.
574 * Poll for the originator telling us to finish. If it hasn't, requeue
575 * our request so we spin on it. When the originator requests that we
576 * finish we execute cs_fin1_func(poll) synchronously and cs_fin2_func(data)
577 * in tandem with the release.
580 lwkt_cpusync_remote2(lwkt_cpusync_t poll)
582 if (poll->cs_count < 0) {
583 cpusync_func2_t savef;
586 if (poll->cs_fin1_func)
587 poll->cs_fin1_func(poll);
588 if (poll->cs_fin2_func) {
589 savef = poll->cs_fin2_func;
590 saved = poll->cs_data;
591 atomic_add_int(&poll->cs_count, -1);
594 atomic_add_int(&poll->cs_count, -1);
597 globaldata_t gd = mycpu;
601 ip = &gd->gd_cpusyncq;
602 wi = ip->ip_windex & MAXCPUFIFO_MASK;
603 ip->ip_func[wi] = (ipifunc2_t)lwkt_cpusync_remote2;
604 ip->ip_arg[wi] = poll;