2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/kern/lwkt_ipiq.c,v 1.27 2008/05/18 20:57:56 nth Exp $
38 * This module implements IPI message queueing and the MI portion of IPI
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
48 #include <sys/rtprio.h>
49 #include <sys/queue.h>
50 #include <sys/thread2.h>
51 #include <sys/sysctl.h>
53 #include <sys/kthread.h>
54 #include <machine/cpu.h>
59 #include <vm/vm_param.h>
60 #include <vm/vm_kern.h>
61 #include <vm/vm_object.h>
62 #include <vm/vm_page.h>
63 #include <vm/vm_map.h>
64 #include <vm/vm_pager.h>
65 #include <vm/vm_extern.h>
66 #include <vm/vm_zone.h>
68 #include <machine/stdarg.h>
69 #include <machine/smp.h>
70 #include <machine/atomic.h>
73 static __int64_t ipiq_count; /* total calls to lwkt_send_ipiq*() */
74 static __int64_t ipiq_fifofull; /* number of fifo full conditions detected */
75 static __int64_t ipiq_avoided; /* interlock with target avoids cpu ipi */
76 static __int64_t ipiq_passive; /* passive IPI messages */
77 static __int64_t ipiq_cscount; /* number of cpu synchronizations */
78 static int ipiq_optimized = 1; /* XXX temporary sysctl */
79 static int ipiq_debug; /* set to 1 for debug */
81 static int panic_ipiq_cpu = -1;
82 static int panic_ipiq_count = 100;
87 SYSCTL_QUAD(_lwkt, OID_AUTO, ipiq_count, CTLFLAG_RW, &ipiq_count, 0,
88 "Number of IPI's sent");
89 SYSCTL_QUAD(_lwkt, OID_AUTO, ipiq_fifofull, CTLFLAG_RW, &ipiq_fifofull, 0,
90 "Number of fifo full conditions detected");
91 SYSCTL_QUAD(_lwkt, OID_AUTO, ipiq_avoided, CTLFLAG_RW, &ipiq_avoided, 0,
92 "Number of IPI's avoided by interlock with target cpu");
93 SYSCTL_QUAD(_lwkt, OID_AUTO, ipiq_passive, CTLFLAG_RW, &ipiq_passive, 0,
94 "Number of passive IPI messages sent");
95 SYSCTL_QUAD(_lwkt, OID_AUTO, ipiq_cscount, CTLFLAG_RW, &ipiq_cscount, 0,
96 "Number of cpu synchronizations");
97 SYSCTL_INT(_lwkt, OID_AUTO, ipiq_optimized, CTLFLAG_RW, &ipiq_optimized, 0,
99 SYSCTL_INT(_lwkt, OID_AUTO, ipiq_debug, CTLFLAG_RW, &ipiq_debug, 0,
102 SYSCTL_INT(_lwkt, OID_AUTO, panic_ipiq_cpu, CTLFLAG_RW, &panic_ipiq_cpu, 0, "");
103 SYSCTL_INT(_lwkt, OID_AUTO, panic_ipiq_count, CTLFLAG_RW, &panic_ipiq_count, 0, "");
106 #define IPIQ_STRING "func=%p arg1=%p arg2=%d scpu=%d dcpu=%d"
107 #define IPIQ_ARG_SIZE (sizeof(void *) * 2 + sizeof(int) * 3)
109 #if !defined(KTR_IPIQ)
110 #define KTR_IPIQ KTR_ALL
112 KTR_INFO_MASTER(ipiq);
113 KTR_INFO(KTR_IPIQ, ipiq, send_norm, 0, IPIQ_STRING, IPIQ_ARG_SIZE);
114 KTR_INFO(KTR_IPIQ, ipiq, send_pasv, 1, IPIQ_STRING, IPIQ_ARG_SIZE);
115 KTR_INFO(KTR_IPIQ, ipiq, send_nbio, 2, IPIQ_STRING, IPIQ_ARG_SIZE);
116 KTR_INFO(KTR_IPIQ, ipiq, send_fail, 3, IPIQ_STRING, IPIQ_ARG_SIZE);
117 KTR_INFO(KTR_IPIQ, ipiq, receive, 4, IPIQ_STRING, IPIQ_ARG_SIZE);
118 KTR_INFO(KTR_IPIQ, ipiq, sync_start, 5, "cpumask=%08x", sizeof(cpumask_t));
119 KTR_INFO(KTR_IPIQ, ipiq, sync_end, 6, "cpumask=%08x", sizeof(cpumask_t));
120 KTR_INFO(KTR_IPIQ, ipiq, cpu_send, 7, IPIQ_STRING, IPIQ_ARG_SIZE);
121 KTR_INFO(KTR_IPIQ, ipiq, send_end, 8, IPIQ_STRING, IPIQ_ARG_SIZE);
123 #define logipiq(name, func, arg1, arg2, sgd, dgd) \
124 KTR_LOG(ipiq_ ## name, func, arg1, arg2, sgd->gd_cpuid, dgd->gd_cpuid)
125 #define logipiq2(name, arg) \
126 KTR_LOG(ipiq_ ## name, arg)
132 static int lwkt_process_ipiq_core(globaldata_t sgd, lwkt_ipiq_t ip,
133 struct intrframe *frame);
134 static void lwkt_cpusync_remote1(lwkt_cpusync_t cs);
135 static void lwkt_cpusync_remote2(lwkt_cpusync_t cs);
138 * Send a function execution request to another cpu. The request is queued
139 * on the cpu<->cpu ipiq matrix. Each cpu owns a unique ipiq FIFO for every
140 * possible target cpu. The FIFO can be written.
142 * If the FIFO fills up we have to enable interrupts to avoid an APIC
143 * deadlock and process pending IPIQs while waiting for it to empty.
144 * Otherwise we may soft-deadlock with another cpu whos FIFO is also full.
146 * We can safely bump gd_intr_nesting_level because our crit_exit() at the
147 * end will take care of any pending interrupts.
149 * The actual hardware IPI is avoided if the target cpu is already processing
150 * the queue from a prior IPI. It is possible to pipeline IPI messages
151 * very quickly between cpus due to the FIFO hysteresis.
153 * Need not be called from a critical section.
156 lwkt_send_ipiq3(globaldata_t target, ipifunc3_t func, void *arg1, int arg2)
160 struct globaldata *gd = mycpu;
162 logipiq(send_norm, func, arg1, arg2, gd, target);
165 func(arg1, arg2, NULL);
166 logipiq(send_end, func, arg1, arg2, gd, target);
170 ++gd->gd_intr_nesting_level;
172 if (gd->gd_intr_nesting_level > 20)
173 panic("lwkt_send_ipiq: TOO HEAVILY NESTED!");
175 KKASSERT(curthread->td_critcount);
177 ip = &gd->gd_ipiq[target->gd_cpuid];
180 * Do not allow the FIFO to become full. Interrupts must be physically
181 * enabled while we liveloop to avoid deadlocking the APIC.
183 * The target ipiq may have gotten filled up due to passive IPIs and thus
184 * not be aware that its queue is too full, so be sure to issue an
185 * ipiq interrupt to the target cpu.
187 if (ip->ip_windex - ip->ip_rindex > MAXCPUFIFO / 2) {
188 #if defined(__i386__)
189 unsigned int eflags = read_eflags();
190 #elif defined(__x86_64__)
191 unsigned long rflags = read_rflags();
194 if (atomic_poll_acquire_int(&ip->ip_npoll) || ipiq_optimized == 0) {
195 logipiq(cpu_send, func, arg1, arg2, gd, target);
196 cpu_send_ipiq(target->gd_cpuid);
200 cpu_send_ipiq(target->gd_cpuid);
201 DEBUG_PUSH_INFO("send_ipiq3");
202 while (ip->ip_windex - ip->ip_rindex > MAXCPUFIFO / 4) {
203 KKASSERT(ip->ip_windex - ip->ip_rindex != MAXCPUFIFO - 1);
207 #if defined(__i386__)
208 write_eflags(eflags);
209 #elif defined(__x86_64__)
210 write_rflags(rflags);
215 * Queue the new message
217 windex = ip->ip_windex & MAXCPUFIFO_MASK;
218 ip->ip_func[windex] = func;
219 ip->ip_arg1[windex] = arg1;
220 ip->ip_arg2[windex] = arg2;
223 --gd->gd_intr_nesting_level;
226 * signal the target cpu that there is work pending.
228 if (atomic_poll_acquire_int(&ip->ip_npoll)) {
229 logipiq(cpu_send, func, arg1, arg2, gd, target);
230 cpu_send_ipiq(target->gd_cpuid);
232 if (ipiq_optimized == 0) {
233 logipiq(cpu_send, func, arg1, arg2, gd, target);
234 cpu_send_ipiq(target->gd_cpuid);
241 logipiq(send_end, func, arg1, arg2, gd, target);
242 return(ip->ip_windex);
246 * Similar to lwkt_send_ipiq() but this function does not actually initiate
247 * the IPI to the target cpu unless the FIFO has become too full, so it is
250 * This function is used for non-critical IPI messages, such as memory
251 * deallocations. The queue will typically be flushed by the target cpu at
252 * the next clock interrupt.
254 * Need not be called from a critical section.
257 lwkt_send_ipiq3_passive(globaldata_t target, ipifunc3_t func,
258 void *arg1, int arg2)
262 struct globaldata *gd = mycpu;
264 KKASSERT(target != gd);
266 logipiq(send_pasv, func, arg1, arg2, gd, target);
267 ++gd->gd_intr_nesting_level;
269 if (gd->gd_intr_nesting_level > 20)
270 panic("lwkt_send_ipiq: TOO HEAVILY NESTED!");
272 KKASSERT(curthread->td_critcount);
275 ip = &gd->gd_ipiq[target->gd_cpuid];
278 * Do not allow the FIFO to become full. Interrupts must be physically
279 * enabled while we liveloop to avoid deadlocking the APIC.
281 if (ip->ip_windex - ip->ip_rindex > MAXCPUFIFO / 2) {
282 #if defined(__i386__)
283 unsigned int eflags = read_eflags();
284 #elif defined(__x86_64__)
285 unsigned long rflags = read_rflags();
288 if (atomic_poll_acquire_int(&ip->ip_npoll) || ipiq_optimized == 0) {
289 logipiq(cpu_send, func, arg1, arg2, gd, target);
290 cpu_send_ipiq(target->gd_cpuid);
294 cpu_send_ipiq(target->gd_cpuid);
295 DEBUG_PUSH_INFO("send_ipiq3_passive");
296 while (ip->ip_windex - ip->ip_rindex > MAXCPUFIFO / 4) {
297 KKASSERT(ip->ip_windex - ip->ip_rindex != MAXCPUFIFO - 1);
301 #if defined(__i386__)
302 write_eflags(eflags);
303 #elif defined(__x86_64__)
304 write_rflags(rflags);
309 * Queue the new message
311 windex = ip->ip_windex & MAXCPUFIFO_MASK;
312 ip->ip_func[windex] = func;
313 ip->ip_arg1[windex] = arg1;
314 ip->ip_arg2[windex] = arg2;
317 --gd->gd_intr_nesting_level;
320 * Do not signal the target cpu, it will pick up the IPI when it next
321 * polls (typically on the next tick).
325 logipiq(send_end, func, arg1, arg2, gd, target);
326 return(ip->ip_windex);
330 * Send an IPI request without blocking, return 0 on success, ENOENT on
331 * failure. The actual queueing of the hardware IPI may still force us
332 * to spin and process incoming IPIs but that will eventually go away
333 * when we've gotten rid of the other general IPIs.
336 lwkt_send_ipiq3_nowait(globaldata_t target, ipifunc3_t func,
337 void *arg1, int arg2)
341 struct globaldata *gd = mycpu;
343 logipiq(send_nbio, func, arg1, arg2, gd, target);
344 KKASSERT(curthread->td_critcount);
346 func(arg1, arg2, NULL);
347 logipiq(send_end, func, arg1, arg2, gd, target);
351 ip = &gd->gd_ipiq[target->gd_cpuid];
353 if (ip->ip_windex - ip->ip_rindex >= MAXCPUFIFO * 2 / 3) {
354 logipiq(send_fail, func, arg1, arg2, gd, target);
357 windex = ip->ip_windex & MAXCPUFIFO_MASK;
358 ip->ip_func[windex] = func;
359 ip->ip_arg1[windex] = arg1;
360 ip->ip_arg2[windex] = arg2;
365 * This isn't a passive IPI, we still have to signal the target cpu.
367 if (atomic_poll_acquire_int(&ip->ip_npoll)) {
368 logipiq(cpu_send, func, arg1, arg2, gd, target);
369 cpu_send_ipiq(target->gd_cpuid);
371 if (ipiq_optimized == 0) {
372 logipiq(cpu_send, func, arg1, arg2, gd, target);
373 cpu_send_ipiq(target->gd_cpuid);
379 logipiq(send_end, func, arg1, arg2, gd, target);
384 * deprecated, used only by fast int forwarding.
387 lwkt_send_ipiq3_bycpu(int dcpu, ipifunc3_t func, void *arg1, int arg2)
389 return(lwkt_send_ipiq3(globaldata_find(dcpu), func, arg1, arg2));
393 * Send a message to several target cpus. Typically used for scheduling.
394 * The message will not be sent to stopped cpus.
397 lwkt_send_ipiq3_mask(cpumask_t mask, ipifunc3_t func, void *arg1, int arg2)
402 mask &= ~stopped_cpus;
404 cpuid = BSFCPUMASK(mask);
405 lwkt_send_ipiq3(globaldata_find(cpuid), func, arg1, arg2);
406 mask &= ~CPUMASK(cpuid);
413 * Wait for the remote cpu to finish processing a function.
415 * YYY we have to enable interrupts and process the IPIQ while waiting
416 * for it to empty or we may deadlock with another cpu. Create a CPU_*()
417 * function to do this! YYY we really should 'block' here.
419 * MUST be called from a critical section. This routine may be called
420 * from an interrupt (for example, if an interrupt wakes a foreign thread
424 lwkt_wait_ipiq(globaldata_t target, int seq)
427 int maxc = 100000000;
429 if (target != mycpu) {
430 ip = &mycpu->gd_ipiq[target->gd_cpuid];
431 if ((int)(ip->ip_xindex - seq) < 0) {
432 #if defined(__i386__)
433 unsigned int eflags = read_eflags();
434 #elif defined(__x86_64__)
435 unsigned long rflags = read_rflags();
438 DEBUG_PUSH_INFO("wait_ipiq");
439 while ((int)(ip->ip_xindex - seq) < 0) {
444 kprintf("LWKT_WAIT_IPIQ WARNING! %d wait %d (%d)\n", mycpu->gd_cpuid, target->gd_cpuid, ip->ip_xindex - seq);
446 panic("LWKT_WAIT_IPIQ");
448 * xindex may be modified by another cpu, use a load fence
449 * to ensure that the loop does not use a speculative value
450 * (which may improve performance).
455 #if defined(__i386__)
456 write_eflags(eflags);
457 #elif defined(__x86_64__)
458 write_rflags(rflags);
465 lwkt_seq_ipiq(globaldata_t target)
469 ip = &mycpu->gd_ipiq[target->gd_cpuid];
470 return(ip->ip_windex);
474 * Called from IPI interrupt (like a fast interrupt), which has placed
475 * us in a critical section. The MP lock may or may not be held.
476 * May also be called from doreti or splz, or be reentrantly called
477 * indirectly through the ip_func[] we run.
479 * There are two versions, one where no interrupt frame is available (when
480 * called from the send code and from splz, and one where an interrupt
481 * frame is available.
483 * When the current cpu is mastering a cpusync we do NOT internally loop
484 * on the cpusyncq poll. We also do not re-flag a pending ipi due to
485 * the cpusyncq poll because this can cause doreti/splz to loop internally.
486 * The cpusync master's own loop must be allowed to run to avoid a deadlock.
489 lwkt_process_ipiq(void)
491 globaldata_t gd = mycpu;
497 for (n = 0; n < ncpus; ++n) {
498 if (n != gd->gd_cpuid) {
499 sgd = globaldata_find(n);
502 while (lwkt_process_ipiq_core(sgd, &ip[gd->gd_cpuid], NULL))
507 if (gd->gd_cpusyncq.ip_rindex != gd->gd_cpusyncq.ip_windex) {
508 if (lwkt_process_ipiq_core(gd, &gd->gd_cpusyncq, NULL)) {
509 if (gd->gd_curthread->td_cscount == 0)
516 lwkt_process_ipiq_frame(struct intrframe *frame)
518 globaldata_t gd = mycpu;
524 for (n = 0; n < ncpus; ++n) {
525 if (n != gd->gd_cpuid) {
526 sgd = globaldata_find(n);
529 while (lwkt_process_ipiq_core(sgd, &ip[gd->gd_cpuid], frame))
534 if (gd->gd_cpusyncq.ip_rindex != gd->gd_cpusyncq.ip_windex) {
535 if (lwkt_process_ipiq_core(gd, &gd->gd_cpusyncq, frame)) {
536 if (gd->gd_curthread->td_cscount == 0)
543 static int iqticks[SMP_MAXCPU];
544 static int iqcount[SMP_MAXCPU];
547 static int iqterm[SMP_MAXCPU];
551 lwkt_process_ipiq_core(globaldata_t sgd, lwkt_ipiq_t ip,
552 struct intrframe *frame)
554 globaldata_t mygd = mycpu;
557 ipifunc3_t copy_func;
562 if (iqticks[mygd->gd_cpuid] != ticks) {
563 iqticks[mygd->gd_cpuid] = ticks;
564 iqcount[mygd->gd_cpuid] = 0;
566 if (++iqcount[mygd->gd_cpuid] > 3000000) {
567 kprintf("cpu %d ipiq maxed cscount %d spin %d\n",
569 mygd->gd_curthread->td_cscount,
570 mygd->gd_spinlocks_wr);
571 iqcount[mygd->gd_cpuid] = 0;
573 if (++iqterm[mygd->gd_cpuid] > 10)
574 panic("cpu %d ipiq maxed", mygd->gd_cpuid);
577 for (i = 0; i < ncpus; ++i) {
578 if (globaldata_find(i)->gd_infomsg)
579 kprintf(" %s", globaldata_find(i)->gd_infomsg);
586 * Obtain the current write index, which is modified by a remote cpu.
587 * Issue a load fence to prevent speculative reads of e.g. data written
588 * by the other cpu prior to it updating the index.
590 KKASSERT(curthread->td_critcount);
593 ++mygd->gd_intr_nesting_level;
596 * NOTE: xindex is only updated after we are sure the function has
597 * finished execution. Beware lwkt_process_ipiq() reentrancy!
598 * The function may send an IPI which may block/drain.
600 * NOTE: Due to additional IPI operations that the callback function
601 * may make, it is possible for both rindex and windex to advance and
602 * thus for rindex to advance passed our cached windex.
604 * NOTE: A load fence is required to prevent speculative loads prior
605 * to the loading of ip_rindex. Even though stores might be
606 * ordered, loads are probably not. A memory fence is required
607 * to prevent reordering of the loads after the ip_rindex update.
609 while (wi - (ri = ip->ip_rindex) > 0) {
610 ri &= MAXCPUFIFO_MASK;
612 copy_func = ip->ip_func[ri];
613 copy_arg1 = ip->ip_arg1[ri];
614 copy_arg2 = ip->ip_arg2[ri];
617 KKASSERT((ip->ip_rindex & MAXCPUFIFO_MASK) ==
618 ((ri + 1) & MAXCPUFIFO_MASK));
619 logipiq(receive, copy_func, copy_arg1, copy_arg2, sgd, mycpu);
621 if (ipiq_debug && (ip->ip_rindex & 0xFFFFFF) == 0) {
622 kprintf("cpu %d ipifunc %p %p %d (frame %p)\n",
624 copy_func, copy_arg1, copy_arg2,
625 #if defined(__i386__)
626 (frame ? (void *)frame->if_eip : NULL));
627 #elif defined(__amd64__)
628 (frame ? (void *)frame->if_rip : NULL));
634 copy_func(copy_arg1, copy_arg2, frame);
636 ip->ip_xindex = ip->ip_rindex;
640 * Simulate panics during the processing of an IPI
642 if (mycpu->gd_cpuid == panic_ipiq_cpu && panic_ipiq_count) {
643 if (--panic_ipiq_count == 0) {
645 Debugger("PANIC_DEBUG");
647 panic("PANIC_DEBUG");
653 --mygd->gd_intr_nesting_level;
656 * Return non-zero if there are more IPI messages pending on this
657 * ipiq. ip_npoll is left set as long as possible to reduce the
658 * number of IPIs queued by the originating cpu, but must be cleared
659 * *BEFORE* checking windex.
661 atomic_poll_release_int(&ip->ip_npoll);
662 return(wi != ip->ip_windex);
666 lwkt_sync_ipiq(void *arg)
668 volatile cpumask_t *cpumask = arg;
670 atomic_clear_cpumask(cpumask, mycpu->gd_cpumask);
676 lwkt_synchronize_ipiqs(const char *wmesg)
678 volatile cpumask_t other_cpumask;
680 other_cpumask = mycpu->gd_other_cpus & smp_active_mask;
681 lwkt_send_ipiq_mask(other_cpumask, lwkt_sync_ipiq,
682 __DEVOLATILE(void *, &other_cpumask));
684 while (other_cpumask != 0) {
685 tsleep_interlock(&other_cpumask, 0);
686 if (other_cpumask != 0)
687 tsleep(&other_cpumask, PINTERLOCKED, wmesg, 0);
694 * CPU Synchronization Support
696 * lwkt_cpusync_interlock() - Place specified cpus in a quiescent state.
697 * The current cpu is placed in a hard critical
700 * lwkt_cpusync_deinterlock() - Execute cs_func on specified cpus, including
701 * current cpu if specified, then return.
704 lwkt_cpusync_simple(cpumask_t mask, cpusync_func_t func, void *arg)
706 struct lwkt_cpusync cs;
708 lwkt_cpusync_init(&cs, mask, func, arg);
709 lwkt_cpusync_interlock(&cs);
710 lwkt_cpusync_deinterlock(&cs);
715 lwkt_cpusync_interlock(lwkt_cpusync_t cs)
718 globaldata_t gd = mycpu;
722 * mask acknowledge (cs_mack): 0->mask for stage 1
724 * mack does not include the current cpu.
726 mask = cs->cs_mask & gd->gd_other_cpus & smp_active_mask;
728 crit_enter_id("cpusync");
730 DEBUG_PUSH_INFO("cpusync_interlock");
732 ++gd->gd_curthread->td_cscount;
733 lwkt_send_ipiq_mask(mask, (ipifunc1_t)lwkt_cpusync_remote1, cs);
734 logipiq2(sync_start, mask);
735 while (cs->cs_mack != mask) {
747 * Interlocked cpus have executed remote1 and are polling in remote2.
748 * To deinterlock we clear cs_mack and wait for the cpus to execute
749 * the func and set their bit in cs_mack again.
753 lwkt_cpusync_deinterlock(lwkt_cpusync_t cs)
755 globaldata_t gd = mycpu;
760 * mask acknowledge (cs_mack): mack->0->mack for stage 2
762 * Clearing cpu bits for polling cpus in cs_mack will cause them to
763 * execute stage 2, which executes the cs_func(cs_data) and then sets
764 * their bit in cs_mack again.
766 * mack does not include the current cpu.
771 if (cs->cs_func && (cs->cs_mask & gd->gd_cpumask))
772 cs->cs_func(cs->cs_data);
774 DEBUG_PUSH_INFO("cpusync_deinterlock");
775 while (cs->cs_mack != mask) {
781 * cpusyncq ipis may be left queued without the RQF flag set due to
782 * a non-zero td_cscount, so be sure to process any laggards after
783 * decrementing td_cscount.
785 --gd->gd_curthread->td_cscount;
787 logipiq2(sync_end, mask);
789 crit_exit_id("cpusync");
791 if (cs->cs_func && (cs->cs_mask & gd->gd_cpumask))
792 cs->cs_func(cs->cs_data);
799 * helper IPI remote messaging function.
801 * Called on remote cpu when a new cpu synchronization request has been
802 * sent to us. Execute the run function and adjust cs_count, then requeue
803 * the request so we spin on it.
806 lwkt_cpusync_remote1(lwkt_cpusync_t cs)
808 globaldata_t gd = mycpu;
810 atomic_set_cpumask(&cs->cs_mack, gd->gd_cpumask);
811 lwkt_cpusync_remote2(cs);
815 * helper IPI remote messaging function.
817 * Poll for the originator telling us to finish. If it hasn't, requeue
818 * our request so we spin on it.
821 lwkt_cpusync_remote2(lwkt_cpusync_t cs)
823 globaldata_t gd = mycpu;
825 if ((cs->cs_mack & gd->gd_cpumask) == 0) {
827 cs->cs_func(cs->cs_data);
828 atomic_set_cpumask(&cs->cs_mack, gd->gd_cpumask);
833 ip = &gd->gd_cpusyncq;
834 wi = ip->ip_windex & MAXCPUFIFO_MASK;
835 ip->ip_func[wi] = (ipifunc3_t)(ipifunc1_t)lwkt_cpusync_remote2;
836 ip->ip_arg1[wi] = cs;
840 if (ipiq_debug && (ip->ip_windex & 0xFFFFFF) == 0) {
841 kprintf("cpu %d cm=%016jx %016jx f=%p\n",
843 (intmax_t)cs->cs_mask, (intmax_t)cs->cs_mack,