2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * This module implements IPI message queueing and the MI portion of IPI
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
46 #include <sys/rtprio.h>
47 #include <sys/queue.h>
48 #include <sys/thread2.h>
49 #include <sys/sysctl.h>
51 #include <sys/kthread.h>
52 #include <machine/cpu.h>
56 #include <vm/vm_param.h>
57 #include <vm/vm_kern.h>
58 #include <vm/vm_object.h>
59 #include <vm/vm_page.h>
60 #include <vm/vm_map.h>
61 #include <vm/vm_pager.h>
62 #include <vm/vm_extern.h>
63 #include <vm/vm_zone.h>
65 #include <machine/stdarg.h>
66 #include <machine/smp.h>
67 #include <machine/atomic.h>
69 #ifdef _KERNEL_VIRTUAL
74 __int64_t ipiq_count; /* total calls to lwkt_send_ipiq*() */
75 __int64_t ipiq_fifofull; /* number of fifo full conditions detected */
76 __int64_t ipiq_avoided; /* interlock with target avoids cpu ipi */
77 __int64_t ipiq_passive; /* passive IPI messages */
78 __int64_t ipiq_cscount; /* number of cpu synchronizations */
81 static struct ipiq_stats ipiq_stats_percpu[MAXCPU];
82 #define ipiq_stat(gd) ipiq_stats_percpu[(gd)->gd_cpuid]
84 static int ipiq_debug; /* set to 1 for debug */
86 static int panic_ipiq_cpu = -1;
87 static int panic_ipiq_count = 100;
90 SYSCTL_INT(_lwkt, OID_AUTO, ipiq_debug, CTLFLAG_RW, &ipiq_debug, 0,
93 SYSCTL_INT(_lwkt, OID_AUTO, panic_ipiq_cpu, CTLFLAG_RW, &panic_ipiq_cpu, 0, "");
94 SYSCTL_INT(_lwkt, OID_AUTO, panic_ipiq_count, CTLFLAG_RW, &panic_ipiq_count, 0, "");
97 #define IPIQ_STRING "func=%p arg1=%p arg2=%d scpu=%d dcpu=%d"
98 #define IPIQ_ARGS void *func, void *arg1, int arg2, int scpu, int dcpu
100 #if !defined(KTR_IPIQ)
101 #define KTR_IPIQ KTR_ALL
103 KTR_INFO_MASTER(ipiq);
104 KTR_INFO(KTR_IPIQ, ipiq, send_norm, 0, IPIQ_STRING, IPIQ_ARGS);
105 KTR_INFO(KTR_IPIQ, ipiq, send_pasv, 1, IPIQ_STRING, IPIQ_ARGS);
106 KTR_INFO(KTR_IPIQ, ipiq, send_nbio, 2, IPIQ_STRING, IPIQ_ARGS);
107 KTR_INFO(KTR_IPIQ, ipiq, send_fail, 3, IPIQ_STRING, IPIQ_ARGS);
108 KTR_INFO(KTR_IPIQ, ipiq, receive, 4, IPIQ_STRING, IPIQ_ARGS);
109 KTR_INFO(KTR_IPIQ, ipiq, sync_start, 5, "cpumask=%08lx", unsigned long mask);
110 KTR_INFO(KTR_IPIQ, ipiq, sync_end, 6, "cpumask=%08lx", unsigned long mask);
111 KTR_INFO(KTR_IPIQ, ipiq, cpu_send, 7, IPIQ_STRING, IPIQ_ARGS);
112 KTR_INFO(KTR_IPIQ, ipiq, send_end, 8, IPIQ_STRING, IPIQ_ARGS);
113 KTR_INFO(KTR_IPIQ, ipiq, sync_quick, 9, "cpumask=%08lx", unsigned long mask);
115 #define logipiq(name, func, arg1, arg2, sgd, dgd) \
116 KTR_LOG(ipiq_ ## name, func, arg1, arg2, sgd->gd_cpuid, dgd->gd_cpuid)
117 #define logipiq2(name, arg) \
118 KTR_LOG(ipiq_ ## name, arg)
120 static int lwkt_process_ipiq_core(globaldata_t sgd, lwkt_ipiq_t ip,
121 struct intrframe *frame);
122 static void lwkt_cpusync_remote1(lwkt_cpusync_t cs);
123 static void lwkt_cpusync_remote2(lwkt_cpusync_t cs);
125 #define IPIQ_SYSCTL(name) \
127 sysctl_##name(SYSCTL_HANDLER_ARGS) \
132 for (cpu = 0; cpu < ncpus; ++cpu) \
133 val += ipiq_stats_percpu[cpu].name; \
135 error = sysctl_handle_quad(oidp, &val, 0, req); \
136 if (error || req->newptr == NULL) \
139 for (cpu = 0; cpu < ncpus; ++cpu) \
140 ipiq_stats_percpu[cpu].name = val; \
145 IPIQ_SYSCTL(ipiq_count);
146 IPIQ_SYSCTL(ipiq_fifofull);
147 IPIQ_SYSCTL(ipiq_avoided);
148 IPIQ_SYSCTL(ipiq_passive);
149 IPIQ_SYSCTL(ipiq_cscount);
151 SYSCTL_PROC(_lwkt, OID_AUTO, ipiq_count, (CTLTYPE_QUAD | CTLFLAG_RW),
152 0, 0, sysctl_ipiq_count, "Q", "Number of IPI's sent");
153 SYSCTL_PROC(_lwkt, OID_AUTO, ipiq_fifofull, (CTLTYPE_QUAD | CTLFLAG_RW),
154 0, 0, sysctl_ipiq_fifofull, "Q",
155 "Number of fifo full conditions detected");
156 SYSCTL_PROC(_lwkt, OID_AUTO, ipiq_avoided, (CTLTYPE_QUAD | CTLFLAG_RW),
157 0, 0, sysctl_ipiq_avoided, "Q",
158 "Number of IPI's avoided by interlock with target cpu");
159 SYSCTL_PROC(_lwkt, OID_AUTO, ipiq_passive, (CTLTYPE_QUAD | CTLFLAG_RW),
160 0, 0, sysctl_ipiq_passive, "Q",
161 "Number of passive IPI messages sent");
162 SYSCTL_PROC(_lwkt, OID_AUTO, ipiq_cscount, (CTLTYPE_QUAD | CTLFLAG_RW),
163 0, 0, sysctl_ipiq_cscount, "Q",
164 "Number of cpu synchronizations");
167 * Send a function execution request to another cpu. The request is queued
168 * on the cpu<->cpu ipiq matrix. Each cpu owns a unique ipiq FIFO for every
169 * possible target cpu. The FIFO can be written.
171 * If the FIFO fills up we have to enable interrupts to avoid an APIC
172 * deadlock and process pending IPIQs while waiting for it to empty.
173 * Otherwise we may soft-deadlock with another cpu whos FIFO is also full.
175 * We can safely bump gd_intr_nesting_level because our crit_exit() at the
176 * end will take care of any pending interrupts.
178 * The actual hardware IPI is avoided if the target cpu is already processing
179 * the queue from a prior IPI. It is possible to pipeline IPI messages
180 * very quickly between cpus due to the FIFO hysteresis.
182 * Need not be called from a critical section.
185 lwkt_send_ipiq3(globaldata_t target, ipifunc3_t func, void *arg1, int arg2)
189 #ifdef _KERNEL_VIRTUAL
192 struct globaldata *gd = mycpu;
194 logipiq(send_norm, func, arg1, arg2, gd, target);
197 func(arg1, arg2, NULL);
198 logipiq(send_end, func, arg1, arg2, gd, target);
202 ++gd->gd_intr_nesting_level;
204 if (gd->gd_intr_nesting_level > 20)
205 panic("lwkt_send_ipiq: TOO HEAVILY NESTED!");
207 KKASSERT(curthread->td_critcount);
208 ++ipiq_stat(gd).ipiq_count;
209 ip = &gd->gd_ipiq[target->gd_cpuid];
212 * Do not allow the FIFO to become full. Interrupts must be physically
213 * enabled while we liveloop to avoid deadlocking the APIC.
215 * The target ipiq may have gotten filled up due to passive IPIs and thus
216 * not be aware that its queue is too full, so be sure to issue an
217 * ipiq interrupt to the target cpu.
219 if (ip->ip_windex - ip->ip_rindex > MAXCPUFIFO / 2) {
220 #if defined(__i386__)
221 unsigned int eflags = read_eflags();
222 #elif defined(__x86_64__)
223 unsigned long rflags = read_rflags();
227 ++ipiq_stat(gd).ipiq_fifofull;
228 DEBUG_PUSH_INFO("send_ipiq3");
229 while (ip->ip_windex - ip->ip_rindex > MAXCPUFIFO / 4) {
230 if (atomic_poll_acquire_int(&target->gd_npoll)) {
231 logipiq(cpu_send, func, arg1, arg2, gd, target);
232 cpu_send_ipiq(target->gd_cpuid);
234 KKASSERT(ip->ip_windex - ip->ip_rindex != MAXCPUFIFO - 1);
237 #ifdef _KERNEL_VIRTUAL
238 if (repeating++ > 10)
243 #if defined(__i386__)
244 write_eflags(eflags);
245 #elif defined(__x86_64__)
246 write_rflags(rflags);
251 * Queue the new message
253 windex = ip->ip_windex & MAXCPUFIFO_MASK;
254 ip->ip_info[windex].func = func;
255 ip->ip_info[windex].arg1 = arg1;
256 ip->ip_info[windex].arg2 = arg2;
259 ATOMIC_CPUMASK_ORBIT(target->gd_ipimask, gd->gd_cpuid);
262 * signal the target cpu that there is work pending.
264 if (atomic_poll_acquire_int(&target->gd_npoll)) {
265 logipiq(cpu_send, func, arg1, arg2, gd, target);
266 cpu_send_ipiq(target->gd_cpuid);
268 ++ipiq_stat(gd).ipiq_avoided;
270 --gd->gd_intr_nesting_level;
272 logipiq(send_end, func, arg1, arg2, gd, target);
274 return(ip->ip_windex);
278 * Similar to lwkt_send_ipiq() but this function does not actually initiate
279 * the IPI to the target cpu unless the FIFO has become too full, so it is
282 * This function is used for non-critical IPI messages, such as memory
283 * deallocations. The queue will typically be flushed by the target cpu at
284 * the next clock interrupt.
286 * Need not be called from a critical section.
289 lwkt_send_ipiq3_passive(globaldata_t target, ipifunc3_t func,
290 void *arg1, int arg2)
294 #ifdef _KERNEL_VIRTUAL
297 struct globaldata *gd = mycpu;
299 KKASSERT(target != gd);
301 ++gd->gd_intr_nesting_level;
302 logipiq(send_pasv, func, arg1, arg2, gd, target);
304 if (gd->gd_intr_nesting_level > 20)
305 panic("lwkt_send_ipiq: TOO HEAVILY NESTED!");
307 KKASSERT(curthread->td_critcount);
308 ++ipiq_stat(gd).ipiq_count;
309 ++ipiq_stat(gd).ipiq_passive;
310 ip = &gd->gd_ipiq[target->gd_cpuid];
313 * Do not allow the FIFO to become full. Interrupts must be physically
314 * enabled while we liveloop to avoid deadlocking the APIC.
316 if (ip->ip_windex - ip->ip_rindex > MAXCPUFIFO / 2) {
317 #if defined(__i386__)
318 unsigned int eflags = read_eflags();
319 #elif defined(__x86_64__)
320 unsigned long rflags = read_rflags();
324 ++ipiq_stat(gd).ipiq_fifofull;
325 DEBUG_PUSH_INFO("send_ipiq3_passive");
326 while (ip->ip_windex - ip->ip_rindex > MAXCPUFIFO / 4) {
327 if (atomic_poll_acquire_int(&target->gd_npoll)) {
328 logipiq(cpu_send, func, arg1, arg2, gd, target);
329 cpu_send_ipiq(target->gd_cpuid);
331 KKASSERT(ip->ip_windex - ip->ip_rindex != MAXCPUFIFO - 1);
334 #ifdef _KERNEL_VIRTUAL
335 if (repeating++ > 10)
340 #if defined(__i386__)
341 write_eflags(eflags);
342 #elif defined(__x86_64__)
343 write_rflags(rflags);
348 * Queue the new message
350 windex = ip->ip_windex & MAXCPUFIFO_MASK;
351 ip->ip_info[windex].func = func;
352 ip->ip_info[windex].arg1 = arg1;
353 ip->ip_info[windex].arg2 = arg2;
356 ATOMIC_CPUMASK_ORBIT(target->gd_ipimask, gd->gd_cpuid);
357 --gd->gd_intr_nesting_level;
360 * Do not signal the target cpu, it will pick up the IPI when it next
361 * polls (typically on the next tick).
364 logipiq(send_end, func, arg1, arg2, gd, target);
366 return(ip->ip_windex);
370 * Send an IPI request without blocking, return 0 on success, ENOENT on
371 * failure. The actual queueing of the hardware IPI may still force us
372 * to spin and process incoming IPIs but that will eventually go away
373 * when we've gotten rid of the other general IPIs.
376 lwkt_send_ipiq3_nowait(globaldata_t target, ipifunc3_t func,
377 void *arg1, int arg2)
381 struct globaldata *gd = mycpu;
383 logipiq(send_nbio, func, arg1, arg2, gd, target);
384 KKASSERT(curthread->td_critcount);
386 func(arg1, arg2, NULL);
387 logipiq(send_end, func, arg1, arg2, gd, target);
391 ++gd->gd_intr_nesting_level;
392 ++ipiq_stat(gd).ipiq_count;
393 ip = &gd->gd_ipiq[target->gd_cpuid];
395 if (ip->ip_windex - ip->ip_rindex >= MAXCPUFIFO * 2 / 3) {
396 logipiq(send_fail, func, arg1, arg2, gd, target);
397 --gd->gd_intr_nesting_level;
401 windex = ip->ip_windex & MAXCPUFIFO_MASK;
402 ip->ip_info[windex].func = func;
403 ip->ip_info[windex].arg1 = arg1;
404 ip->ip_info[windex].arg2 = arg2;
407 ATOMIC_CPUMASK_ORBIT(target->gd_ipimask, gd->gd_cpuid);
410 * This isn't a passive IPI, we still have to signal the target cpu.
412 if (atomic_poll_acquire_int(&target->gd_npoll)) {
413 logipiq(cpu_send, func, arg1, arg2, gd, target);
414 cpu_send_ipiq(target->gd_cpuid);
416 ++ipiq_stat(gd).ipiq_avoided;
418 --gd->gd_intr_nesting_level;
421 logipiq(send_end, func, arg1, arg2, gd, target);
426 * deprecated, used only by fast int forwarding.
429 lwkt_send_ipiq3_bycpu(int dcpu, ipifunc3_t func, void *arg1, int arg2)
431 return(lwkt_send_ipiq3(globaldata_find(dcpu), func, arg1, arg2));
435 * Send a message to several target cpus. Typically used for scheduling.
436 * The message will not be sent to stopped cpus.
439 lwkt_send_ipiq3_mask(cpumask_t mask, ipifunc3_t func, void *arg1, int arg2)
444 CPUMASK_NANDMASK(mask, stopped_cpus);
445 while (CPUMASK_TESTNZERO(mask)) {
446 cpuid = BSFCPUMASK(mask);
447 lwkt_send_ipiq3(globaldata_find(cpuid), func, arg1, arg2);
448 CPUMASK_NANDBIT(mask, cpuid);
455 * Wait for the remote cpu to finish processing a function.
457 * YYY we have to enable interrupts and process the IPIQ while waiting
458 * for it to empty or we may deadlock with another cpu. Create a CPU_*()
459 * function to do this! YYY we really should 'block' here.
461 * MUST be called from a critical section. This routine may be called
462 * from an interrupt (for example, if an interrupt wakes a foreign thread
466 lwkt_wait_ipiq(globaldata_t target, int seq)
470 if (target != mycpu) {
471 ip = &mycpu->gd_ipiq[target->gd_cpuid];
472 if ((int)(ip->ip_xindex - seq) < 0) {
473 #if defined(__i386__)
474 unsigned int eflags = read_eflags();
475 #elif defined(__x86_64__)
476 unsigned long rflags = read_rflags();
478 int64_t time_tgt = tsc_get_target(1000000000LL);
481 #ifdef _KERNEL_VIRTUAL
486 DEBUG_PUSH_INFO("wait_ipiq");
487 while ((int)(ip->ip_xindex - seq) < 0) {
491 #ifdef _KERNEL_VIRTUAL
492 if (repeating++ > 10)
497 * IPIQs must be handled within 10 seconds and this code
498 * will warn after one second.
500 if ((benice & 255) == 0 && tsc_test_target(time_tgt) > 0) {
501 kprintf("LWKT_WAIT_IPIQ WARNING! %d wait %d (%d)\n",
502 mycpu->gd_cpuid, target->gd_cpuid,
503 ip->ip_xindex - seq);
504 if (--time_loops == 0)
505 panic("LWKT_WAIT_IPIQ");
506 time_tgt = tsc_get_target(1000000000LL);
511 * xindex may be modified by another cpu, use a load fence
512 * to ensure that the loop does not use a speculative value
513 * (which may improve performance).
519 #if defined(__i386__)
520 write_eflags(eflags);
521 #elif defined(__x86_64__)
522 write_rflags(rflags);
529 lwkt_seq_ipiq(globaldata_t target)
533 ip = &mycpu->gd_ipiq[target->gd_cpuid];
534 return(ip->ip_windex);
538 * Called from IPI interrupt (like a fast interrupt), which has placed
539 * us in a critical section. The MP lock may or may not be held.
540 * May also be called from doreti or splz, or be reentrantly called
541 * indirectly through the ip_info[].func we run.
543 * There are two versions, one where no interrupt frame is available (when
544 * called from the send code and from splz, and one where an interrupt
545 * frame is available.
547 * When the current cpu is mastering a cpusync we do NOT internally loop
548 * on the cpusyncq poll. We also do not re-flag a pending ipi due to
549 * the cpusyncq poll because this can cause doreti/splz to loop internally.
550 * The cpusync master's own loop must be allowed to run to avoid a deadlock.
553 lwkt_process_ipiq(void)
555 globaldata_t gd = mycpu;
561 ++gd->gd_processing_ipiq;
564 mask = gd->gd_ipimask;
565 ATOMIC_CPUMASK_NANDMASK(gd->gd_ipimask, mask);
566 while (CPUMASK_TESTNZERO(mask)) {
567 n = BSFCPUMASK(mask);
568 if (n != gd->gd_cpuid) {
569 sgd = globaldata_find(n);
572 while (lwkt_process_ipiq_core(sgd, &ip[gd->gd_cpuid], NULL))
576 CPUMASK_NANDBIT(mask, n);
580 * Process pending cpusyncs. If the current thread has a cpusync
581 * active cpusync we only run the list once and do not re-flag
582 * as the thread itself is processing its interlock.
584 if (lwkt_process_ipiq_core(gd, &gd->gd_cpusyncq, NULL)) {
585 if (gd->gd_curthread->td_cscount == 0)
587 /* need_ipiq(); do not reflag */
591 * Interlock to allow more IPI interrupts. Recheck ipimask after
592 * releasing gd_npoll.
594 if (CPUMASK_TESTNZERO(gd->gd_ipimask))
596 atomic_poll_release_int(&gd->gd_npoll);
598 if (CPUMASK_TESTNZERO(gd->gd_ipimask))
600 --gd->gd_processing_ipiq;
604 lwkt_process_ipiq_frame(struct intrframe *frame)
606 globaldata_t gd = mycpu;
614 mask = gd->gd_ipimask;
615 ATOMIC_CPUMASK_NANDMASK(gd->gd_ipimask, mask);
616 while (CPUMASK_TESTNZERO(mask)) {
617 n = BSFCPUMASK(mask);
618 if (n != gd->gd_cpuid) {
619 sgd = globaldata_find(n);
622 while (lwkt_process_ipiq_core(sgd, &ip[gd->gd_cpuid], frame))
626 CPUMASK_NANDBIT(mask, n);
628 if (gd->gd_cpusyncq.ip_rindex != gd->gd_cpusyncq.ip_windex) {
629 if (lwkt_process_ipiq_core(gd, &gd->gd_cpusyncq, frame)) {
630 if (gd->gd_curthread->td_cscount == 0)
632 /* need_ipiq(); do not reflag */
637 * Interlock to allow more IPI interrupts. Recheck ipimask after
638 * releasing gd_npoll.
640 if (CPUMASK_TESTNZERO(gd->gd_ipimask))
642 atomic_poll_release_int(&gd->gd_npoll);
644 if (CPUMASK_TESTNZERO(gd->gd_ipimask))
649 static int iqticks[SMP_MAXCPU];
650 static int iqcount[SMP_MAXCPU];
653 static int iqterm[SMP_MAXCPU];
657 lwkt_process_ipiq_core(globaldata_t sgd, lwkt_ipiq_t ip,
658 struct intrframe *frame)
660 globaldata_t mygd = mycpu;
663 ipifunc3_t copy_func;
668 if (iqticks[mygd->gd_cpuid] != ticks) {
669 iqticks[mygd->gd_cpuid] = ticks;
670 iqcount[mygd->gd_cpuid] = 0;
672 if (++iqcount[mygd->gd_cpuid] > 3000000) {
673 kprintf("cpu %d ipiq maxed cscount %d spin %d\n",
675 mygd->gd_curthread->td_cscount,
677 iqcount[mygd->gd_cpuid] = 0;
679 if (++iqterm[mygd->gd_cpuid] > 10)
680 panic("cpu %d ipiq maxed", mygd->gd_cpuid);
683 for (i = 0; i < ncpus; ++i) {
684 if (globaldata_find(i)->gd_infomsg)
685 kprintf(" %s", globaldata_find(i)->gd_infomsg);
692 * Clear the originating core from our ipimask, we will process all
695 * Obtain the current write index, which is modified by a remote cpu.
696 * Issue a load fence to prevent speculative reads of e.g. data written
697 * by the other cpu prior to it updating the index.
699 KKASSERT(curthread->td_critcount);
702 ++mygd->gd_intr_nesting_level;
705 * NOTE: xindex is only updated after we are sure the function has
706 * finished execution. Beware lwkt_process_ipiq() reentrancy!
707 * The function may send an IPI which may block/drain.
709 * NOTE: Due to additional IPI operations that the callback function
710 * may make, it is possible for both rindex and windex to advance and
711 * thus for rindex to advance passed our cached windex.
713 * NOTE: A load fence is required to prevent speculative loads prior
714 * to the loading of ip_rindex. Even though stores might be
715 * ordered, loads are probably not. A memory fence is required
716 * to prevent reordering of the loads after the ip_rindex update.
718 * NOTE: Single pass only. Returns non-zero if the queue is not empty
721 while (wi - (ri = ip->ip_rindex) > 0) {
722 ri &= MAXCPUFIFO_MASK;
724 copy_func = ip->ip_info[ri].func;
725 copy_arg1 = ip->ip_info[ri].arg1;
726 copy_arg2 = ip->ip_info[ri].arg2;
729 KKASSERT((ip->ip_rindex & MAXCPUFIFO_MASK) ==
730 ((ri + 1) & MAXCPUFIFO_MASK));
731 logipiq(receive, copy_func, copy_arg1, copy_arg2, sgd, mycpu);
733 if (ipiq_debug && (ip->ip_rindex & 0xFFFFFF) == 0) {
734 kprintf("cpu %d ipifunc %p %p %d (frame %p)\n",
736 copy_func, copy_arg1, copy_arg2,
737 #if defined(__i386__)
738 (frame ? (void *)frame->if_eip : NULL));
739 #elif defined(__x86_64__)
740 (frame ? (void *)frame->if_rip : NULL));
746 copy_func(copy_arg1, copy_arg2, frame);
748 ip->ip_xindex = ip->ip_rindex;
752 * Simulate panics during the processing of an IPI
754 if (mycpu->gd_cpuid == panic_ipiq_cpu && panic_ipiq_count) {
755 if (--panic_ipiq_count == 0) {
757 Debugger("PANIC_DEBUG");
759 panic("PANIC_DEBUG");
765 --mygd->gd_intr_nesting_level;
768 * Return non-zero if there is still more in the queue.
771 return (ip->ip_rindex != ip->ip_windex);
775 lwkt_sync_ipiq(void *arg)
777 volatile cpumask_t *cpumask = arg;
779 ATOMIC_CPUMASK_NANDBIT(*cpumask, mycpu->gd_cpuid);
780 if (CPUMASK_TESTZERO(*cpumask))
785 lwkt_synchronize_ipiqs(const char *wmesg)
787 volatile cpumask_t other_cpumask;
789 other_cpumask = smp_active_mask;
790 CPUMASK_ANDMASK(other_cpumask, mycpu->gd_other_cpus);
791 lwkt_send_ipiq_mask(other_cpumask, lwkt_sync_ipiq,
792 __DEVOLATILE(void *, &other_cpumask));
794 while (CPUMASK_TESTNZERO(other_cpumask)) {
795 tsleep_interlock(&other_cpumask, 0);
796 if (CPUMASK_TESTNZERO(other_cpumask))
797 tsleep(&other_cpumask, PINTERLOCKED, wmesg, 0);
802 * CPU Synchronization Support
804 * lwkt_cpusync_interlock() - Place specified cpus in a quiescent state.
805 * The current cpu is placed in a hard critical
808 * lwkt_cpusync_deinterlock() - Execute cs_func on specified cpus, including
809 * current cpu if specified, then return.
812 lwkt_cpusync_simple(cpumask_t mask, cpusync_func_t func, void *arg)
814 struct lwkt_cpusync cs;
816 lwkt_cpusync_init(&cs, mask, func, arg);
817 lwkt_cpusync_interlock(&cs);
818 lwkt_cpusync_deinterlock(&cs);
823 lwkt_cpusync_interlock(lwkt_cpusync_t cs)
825 globaldata_t gd = mycpu;
829 * mask acknowledge (cs_mack): 0->mask for stage 1
831 * mack does not include the current cpu.
834 CPUMASK_ANDMASK(mask, gd->gd_other_cpus);
835 CPUMASK_ANDMASK(mask, smp_active_mask);
836 CPUMASK_ASSZERO(cs->cs_mack);
838 crit_enter_id("cpusync");
839 if (CPUMASK_TESTNZERO(mask)) {
840 DEBUG_PUSH_INFO("cpusync_interlock");
841 ++ipiq_stat(gd).ipiq_cscount;
842 ++gd->gd_curthread->td_cscount;
843 lwkt_send_ipiq_mask(mask, (ipifunc1_t)lwkt_cpusync_remote1, cs);
844 logipiq2(sync_start, (long)CPUMASK_LOWMASK(mask));
845 while (CPUMASK_CMPMASKNEQ(cs->cs_mack, mask)) {
848 #ifdef _KERNEL_VIRTUAL
857 * Interlocked cpus have executed remote1 and are polling in remote2.
858 * To deinterlock we clear cs_mack and wait for the cpus to execute
859 * the func and set their bit in cs_mack again.
863 lwkt_cpusync_deinterlock(lwkt_cpusync_t cs)
865 globaldata_t gd = mycpu;
869 * mask acknowledge (cs_mack): mack->0->mack for stage 2
871 * Clearing cpu bits for polling cpus in cs_mack will cause them to
872 * execute stage 2, which executes the cs_func(cs_data) and then sets
873 * their bit in cs_mack again.
875 * mack does not include the current cpu.
879 CPUMASK_ASSZERO(cs->cs_mack);
881 if (cs->cs_func && CPUMASK_TESTBIT(cs->cs_mask, gd->gd_cpuid))
882 cs->cs_func(cs->cs_data);
883 if (CPUMASK_TESTNZERO(mask)) {
884 DEBUG_PUSH_INFO("cpusync_deinterlock");
885 while (CPUMASK_CMPMASKNEQ(cs->cs_mack, mask)) {
888 #ifdef _KERNEL_VIRTUAL
894 * cpusyncq ipis may be left queued without the RQF flag set due to
895 * a non-zero td_cscount, so be sure to process any laggards after
896 * decrementing td_cscount.
898 --gd->gd_curthread->td_cscount;
900 logipiq2(sync_end, (long)CPUMASK_LOWMASK(mask));
902 crit_exit_id("cpusync");
906 * The quick version does not quiesce the target cpu(s) but instead executes
907 * the function on the target cpu(s) and waits for all to acknowledge. This
908 * avoids spinning on the target cpus.
910 * This function is typically only used for kernel_pmap updates. User pmaps
911 * have to be quiesced.
914 lwkt_cpusync_quick(lwkt_cpusync_t cs)
916 globaldata_t gd = mycpu;
920 * stage-2 cs_mack only.
923 CPUMASK_ANDMASK(mask, gd->gd_other_cpus);
924 CPUMASK_ANDMASK(mask, smp_active_mask);
925 CPUMASK_ASSZERO(cs->cs_mack);
927 crit_enter_id("cpusync");
928 if (CPUMASK_TESTNZERO(mask)) {
929 DEBUG_PUSH_INFO("cpusync_interlock");
930 ++ipiq_stat(gd).ipiq_cscount;
931 ++gd->gd_curthread->td_cscount;
932 lwkt_send_ipiq_mask(mask, (ipifunc1_t)lwkt_cpusync_remote2, cs);
933 logipiq2(sync_quick, (long)CPUMASK_LOWMASK(mask));
934 while (CPUMASK_CMPMASKNEQ(cs->cs_mack, mask)) {
937 #ifdef _KERNEL_VIRTUAL
943 * cpusyncq ipis may be left queued without the RQF flag set due to
944 * a non-zero td_cscount, so be sure to process any laggards after
945 * decrementing td_cscount.
948 --gd->gd_curthread->td_cscount;
951 if (cs->cs_func && CPUMASK_TESTBIT(cs->cs_mask, gd->gd_cpuid))
952 cs->cs_func(cs->cs_data);
953 crit_exit_id("cpusync");
957 * helper IPI remote messaging function.
959 * Called on remote cpu when a new cpu synchronization request has been
960 * sent to us. Execute the run function and adjust cs_count, then requeue
961 * the request so we spin on it.
964 lwkt_cpusync_remote1(lwkt_cpusync_t cs)
966 globaldata_t gd = mycpu;
968 ATOMIC_CPUMASK_ORBIT(cs->cs_mack, gd->gd_cpuid);
969 lwkt_cpusync_remote2(cs);
973 * helper IPI remote messaging function.
975 * Poll for the originator telling us to finish. If it hasn't, requeue
976 * our request so we spin on it.
979 lwkt_cpusync_remote2(lwkt_cpusync_t cs)
981 globaldata_t gd = mycpu;
983 if (CPUMASK_TESTMASK(cs->cs_mack, gd->gd_cpumask) == 0) {
985 cs->cs_func(cs->cs_data);
986 ATOMIC_CPUMASK_ORBIT(cs->cs_mack, gd->gd_cpuid);
987 /* cs can be ripped out at this point */
993 #ifdef _KERNEL_VIRTUAL
999 * Requeue our IPI to avoid a deep stack recursion. If no other
1000 * IPIs are pending we can just loop up, which should help VMs
1001 * better-detect spin loops.
1003 ip = &gd->gd_cpusyncq;
1005 if (ip->ip_rindex == ip->ip_windex) {
1006 __asm __volatile("cli");
1007 if (ip->ip_rindex == ip->ip_windex) {
1008 __asm __volatile("sti; hlt");
1010 __asm __volatile("sti");
1015 wi = ip->ip_windex & MAXCPUFIFO_MASK;
1016 ip->ip_info[wi].func = (ipifunc3_t)(ipifunc1_t)lwkt_cpusync_remote2;
1017 ip->ip_info[wi].arg1 = cs;
1018 ip->ip_info[wi].arg2 = 0;
1020 KKASSERT(ip->ip_windex - ip->ip_rindex < MAXCPUFIFO);
1022 if (ipiq_debug && (ip->ip_windex & 0xFFFFFF) == 0) {
1023 kprintf("cpu %d cm=%016jx %016jx f=%p\n",
1025 (intmax_t)CPUMASK_LOWMASK(cs->cs_mask),
1026 (intmax_t)CPUMASK_LOWMASK(cs->cs_mack),