2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/kern/lwkt_ipiq.c,v 1.27 2008/05/18 20:57:56 nth Exp $
38 * This module implements IPI message queueing and the MI portion of IPI
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
48 #include <sys/rtprio.h>
49 #include <sys/queue.h>
50 #include <sys/thread2.h>
51 #include <sys/sysctl.h>
53 #include <sys/kthread.h>
54 #include <machine/cpu.h>
59 #include <vm/vm_param.h>
60 #include <vm/vm_kern.h>
61 #include <vm/vm_object.h>
62 #include <vm/vm_page.h>
63 #include <vm/vm_map.h>
64 #include <vm/vm_pager.h>
65 #include <vm/vm_extern.h>
66 #include <vm/vm_zone.h>
68 #include <machine/stdarg.h>
69 #include <machine/smp.h>
70 #include <machine/atomic.h>
73 static __int64_t ipiq_count; /* total calls to lwkt_send_ipiq*() */
74 static __int64_t ipiq_fifofull; /* number of fifo full conditions detected */
75 static __int64_t ipiq_avoided; /* interlock with target avoids cpu ipi */
76 static __int64_t ipiq_passive; /* passive IPI messages */
77 static __int64_t ipiq_cscount; /* number of cpu synchronizations */
78 static int ipiq_optimized = 1; /* XXX temporary sysctl */
80 static int panic_ipiq_cpu = -1;
81 static int panic_ipiq_count = 100;
86 SYSCTL_QUAD(_lwkt, OID_AUTO, ipiq_count, CTLFLAG_RW, &ipiq_count, 0,
87 "Number of IPI's sent");
88 SYSCTL_QUAD(_lwkt, OID_AUTO, ipiq_fifofull, CTLFLAG_RW, &ipiq_fifofull, 0,
89 "Number of fifo full conditions detected");
90 SYSCTL_QUAD(_lwkt, OID_AUTO, ipiq_avoided, CTLFLAG_RW, &ipiq_avoided, 0,
91 "Number of IPI's avoided by interlock with target cpu");
92 SYSCTL_QUAD(_lwkt, OID_AUTO, ipiq_passive, CTLFLAG_RW, &ipiq_passive, 0,
93 "Number of passive IPI messages sent");
94 SYSCTL_QUAD(_lwkt, OID_AUTO, ipiq_cscount, CTLFLAG_RW, &ipiq_cscount, 0,
95 "Number of cpu synchronizations");
96 SYSCTL_INT(_lwkt, OID_AUTO, ipiq_optimized, CTLFLAG_RW, &ipiq_optimized, 0,
99 SYSCTL_INT(_lwkt, OID_AUTO, panic_ipiq_cpu, CTLFLAG_RW, &panic_ipiq_cpu, 0, "");
100 SYSCTL_INT(_lwkt, OID_AUTO, panic_ipiq_count, CTLFLAG_RW, &panic_ipiq_count, 0, "");
103 #define IPIQ_STRING "func=%p arg1=%p arg2=%d scpu=%d dcpu=%d"
104 #define IPIQ_ARG_SIZE (sizeof(void *) * 2 + sizeof(int) * 3)
106 #if !defined(KTR_IPIQ)
107 #define KTR_IPIQ KTR_ALL
109 KTR_INFO_MASTER(ipiq);
110 KTR_INFO(KTR_IPIQ, ipiq, send_norm, 0, IPIQ_STRING, IPIQ_ARG_SIZE);
111 KTR_INFO(KTR_IPIQ, ipiq, send_pasv, 1, IPIQ_STRING, IPIQ_ARG_SIZE);
112 KTR_INFO(KTR_IPIQ, ipiq, send_nbio, 2, IPIQ_STRING, IPIQ_ARG_SIZE);
113 KTR_INFO(KTR_IPIQ, ipiq, send_fail, 3, IPIQ_STRING, IPIQ_ARG_SIZE);
114 KTR_INFO(KTR_IPIQ, ipiq, receive, 4, IPIQ_STRING, IPIQ_ARG_SIZE);
115 KTR_INFO(KTR_IPIQ, ipiq, sync_start, 5, "cpumask=%08x", sizeof(cpumask_t));
116 KTR_INFO(KTR_IPIQ, ipiq, sync_add, 6, "cpumask=%08x", sizeof(cpumask_t));
117 KTR_INFO(KTR_IPIQ, ipiq, cpu_send, 7, IPIQ_STRING, IPIQ_ARG_SIZE);
118 KTR_INFO(KTR_IPIQ, ipiq, send_end, 8, IPIQ_STRING, IPIQ_ARG_SIZE);
120 #define logipiq(name, func, arg1, arg2, sgd, dgd) \
121 KTR_LOG(ipiq_ ## name, func, arg1, arg2, sgd->gd_cpuid, dgd->gd_cpuid)
122 #define logipiq2(name, arg) \
123 KTR_LOG(ipiq_ ## name, arg)
129 static int lwkt_process_ipiq_core(globaldata_t sgd, lwkt_ipiq_t ip,
130 struct intrframe *frame);
131 static void lwkt_cpusync_remote1(lwkt_cpusync_t poll);
132 static void lwkt_cpusync_remote2(lwkt_cpusync_t poll);
135 * Send a function execution request to another cpu. The request is queued
136 * on the cpu<->cpu ipiq matrix. Each cpu owns a unique ipiq FIFO for every
137 * possible target cpu. The FIFO can be written.
139 * If the FIFO fills up we have to enable interrupts to avoid an APIC
140 * deadlock and process pending IPIQs while waiting for it to empty.
141 * Otherwise we may soft-deadlock with another cpu whos FIFO is also full.
143 * We can safely bump gd_intr_nesting_level because our crit_exit() at the
144 * end will take care of any pending interrupts.
146 * The actual hardware IPI is avoided if the target cpu is already processing
147 * the queue from a prior IPI. It is possible to pipeline IPI messages
148 * very quickly between cpus due to the FIFO hysteresis.
150 * Need not be called from a critical section.
153 lwkt_send_ipiq3(globaldata_t target, ipifunc3_t func, void *arg1, int arg2)
157 struct globaldata *gd = mycpu;
159 logipiq(send_norm, func, arg1, arg2, gd, target);
162 func(arg1, arg2, NULL);
163 logipiq(send_end, func, arg1, arg2, gd, target);
167 ++gd->gd_intr_nesting_level;
169 if (gd->gd_intr_nesting_level > 20)
170 panic("lwkt_send_ipiq: TOO HEAVILY NESTED!");
172 KKASSERT(curthread->td_critcount);
174 ip = &gd->gd_ipiq[target->gd_cpuid];
177 * Do not allow the FIFO to become full. Interrupts must be physically
178 * enabled while we liveloop to avoid deadlocking the APIC.
180 if (ip->ip_windex - ip->ip_rindex > MAXCPUFIFO / 2) {
181 #if defined(__i386__)
182 unsigned int eflags = read_eflags();
183 #elif defined(__x86_64__)
184 unsigned long rflags = read_rflags();
187 if (atomic_poll_acquire_int(&ip->ip_npoll) || ipiq_optimized == 0) {
188 logipiq(cpu_send, func, arg1, arg2, gd, target);
189 cpu_send_ipiq(target->gd_cpuid);
193 while (ip->ip_windex - ip->ip_rindex > MAXCPUFIFO / 4) {
194 KKASSERT(ip->ip_windex - ip->ip_rindex != MAXCPUFIFO - 1);
197 #if defined(__i386__)
198 write_eflags(eflags);
199 #elif defined(__x86_64__)
200 write_rflags(rflags);
205 * Queue the new message
207 windex = ip->ip_windex & MAXCPUFIFO_MASK;
208 ip->ip_func[windex] = func;
209 ip->ip_arg1[windex] = arg1;
210 ip->ip_arg2[windex] = arg2;
213 --gd->gd_intr_nesting_level;
216 * signal the target cpu that there is work pending.
218 if (atomic_poll_acquire_int(&ip->ip_npoll)) {
219 logipiq(cpu_send, func, arg1, arg2, gd, target);
220 cpu_send_ipiq(target->gd_cpuid);
222 if (ipiq_optimized == 0) {
223 logipiq(cpu_send, func, arg1, arg2, gd, target);
224 cpu_send_ipiq(target->gd_cpuid);
231 logipiq(send_end, func, arg1, arg2, gd, target);
232 return(ip->ip_windex);
236 * Similar to lwkt_send_ipiq() but this function does not actually initiate
237 * the IPI to the target cpu unless the FIFO has become too full, so it is
240 * This function is used for non-critical IPI messages, such as memory
241 * deallocations. The queue will typically be flushed by the target cpu at
242 * the next clock interrupt.
244 * Need not be called from a critical section.
247 lwkt_send_ipiq3_passive(globaldata_t target, ipifunc3_t func,
248 void *arg1, int arg2)
252 struct globaldata *gd = mycpu;
254 KKASSERT(target != gd);
256 logipiq(send_pasv, func, arg1, arg2, gd, target);
257 ++gd->gd_intr_nesting_level;
259 if (gd->gd_intr_nesting_level > 20)
260 panic("lwkt_send_ipiq: TOO HEAVILY NESTED!");
262 KKASSERT(curthread->td_critcount);
265 ip = &gd->gd_ipiq[target->gd_cpuid];
268 * Do not allow the FIFO to become full. Interrupts must be physically
269 * enabled while we liveloop to avoid deadlocking the APIC.
271 if (ip->ip_windex - ip->ip_rindex > MAXCPUFIFO / 2) {
272 #if defined(__i386__)
273 unsigned int eflags = read_eflags();
274 #elif defined(__x86_64__)
275 unsigned long rflags = read_rflags();
278 if (atomic_poll_acquire_int(&ip->ip_npoll) || ipiq_optimized == 0) {
279 logipiq(cpu_send, func, arg1, arg2, gd, target);
280 cpu_send_ipiq(target->gd_cpuid);
284 while (ip->ip_windex - ip->ip_rindex > MAXCPUFIFO / 4) {
285 KKASSERT(ip->ip_windex - ip->ip_rindex != MAXCPUFIFO - 1);
288 #if defined(__i386__)
289 write_eflags(eflags);
290 #elif defined(__x86_64__)
291 write_rflags(rflags);
296 * Queue the new message
298 windex = ip->ip_windex & MAXCPUFIFO_MASK;
299 ip->ip_func[windex] = func;
300 ip->ip_arg1[windex] = arg1;
301 ip->ip_arg2[windex] = arg2;
304 --gd->gd_intr_nesting_level;
307 * Do not signal the target cpu, it will pick up the IPI when it next
308 * polls (typically on the next tick).
312 logipiq(send_end, func, arg1, arg2, gd, target);
313 return(ip->ip_windex);
317 * Send an IPI request without blocking, return 0 on success, ENOENT on
318 * failure. The actual queueing of the hardware IPI may still force us
319 * to spin and process incoming IPIs but that will eventually go away
320 * when we've gotten rid of the other general IPIs.
323 lwkt_send_ipiq3_nowait(globaldata_t target, ipifunc3_t func,
324 void *arg1, int arg2)
328 struct globaldata *gd = mycpu;
330 logipiq(send_nbio, func, arg1, arg2, gd, target);
331 KKASSERT(curthread->td_critcount);
333 func(arg1, arg2, NULL);
334 logipiq(send_end, func, arg1, arg2, gd, target);
338 ip = &gd->gd_ipiq[target->gd_cpuid];
340 if (ip->ip_windex - ip->ip_rindex >= MAXCPUFIFO * 2 / 3) {
341 logipiq(send_fail, func, arg1, arg2, gd, target);
344 windex = ip->ip_windex & MAXCPUFIFO_MASK;
345 ip->ip_func[windex] = func;
346 ip->ip_arg1[windex] = arg1;
347 ip->ip_arg2[windex] = arg2;
352 * This isn't a passive IPI, we still have to signal the target cpu.
354 if (atomic_poll_acquire_int(&ip->ip_npoll)) {
355 logipiq(cpu_send, func, arg1, arg2, gd, target);
356 cpu_send_ipiq(target->gd_cpuid);
358 if (ipiq_optimized == 0) {
359 logipiq(cpu_send, func, arg1, arg2, gd, target);
360 cpu_send_ipiq(target->gd_cpuid);
366 logipiq(send_end, func, arg1, arg2, gd, target);
371 * deprecated, used only by fast int forwarding.
374 lwkt_send_ipiq3_bycpu(int dcpu, ipifunc3_t func, void *arg1, int arg2)
376 return(lwkt_send_ipiq3(globaldata_find(dcpu), func, arg1, arg2));
380 * Send a message to several target cpus. Typically used for scheduling.
381 * The message will not be sent to stopped cpus.
384 lwkt_send_ipiq3_mask(cpumask_t mask, ipifunc3_t func, void *arg1, int arg2)
389 mask &= ~stopped_cpus;
391 cpuid = BSFCPUMASK(mask);
392 lwkt_send_ipiq3(globaldata_find(cpuid), func, arg1, arg2);
393 mask &= ~CPUMASK(cpuid);
400 * Wait for the remote cpu to finish processing a function.
402 * YYY we have to enable interrupts and process the IPIQ while waiting
403 * for it to empty or we may deadlock with another cpu. Create a CPU_*()
404 * function to do this! YYY we really should 'block' here.
406 * MUST be called from a critical section. This routine may be called
407 * from an interrupt (for example, if an interrupt wakes a foreign thread
411 lwkt_wait_ipiq(globaldata_t target, int seq)
414 int maxc = 100000000;
416 if (target != mycpu) {
417 ip = &mycpu->gd_ipiq[target->gd_cpuid];
418 if ((int)(ip->ip_xindex - seq) < 0) {
419 #if defined(__i386__)
420 unsigned int eflags = read_eflags();
421 #elif defined(__x86_64__)
422 unsigned long rflags = read_rflags();
425 while ((int)(ip->ip_xindex - seq) < 0) {
430 kprintf("LWKT_WAIT_IPIQ WARNING! %d wait %d (%d)\n", mycpu->gd_cpuid, target->gd_cpuid, ip->ip_xindex - seq);
432 panic("LWKT_WAIT_IPIQ");
434 * xindex may be modified by another cpu, use a load fence
435 * to ensure that the loop does not use a speculative value
436 * (which may improve performance).
440 #if defined(__i386__)
441 write_eflags(eflags);
442 #elif defined(__x86_64__)
443 write_rflags(rflags);
450 lwkt_seq_ipiq(globaldata_t target)
454 ip = &mycpu->gd_ipiq[target->gd_cpuid];
455 return(ip->ip_windex);
459 * Called from IPI interrupt (like a fast interrupt), which has placed
460 * us in a critical section. The MP lock may or may not be held.
461 * May also be called from doreti or splz, or be reentrantly called
462 * indirectly through the ip_func[] we run.
464 * There are two versions, one where no interrupt frame is available (when
465 * called from the send code and from splz, and one where an interrupt
466 * frame is available.
469 lwkt_process_ipiq(void)
471 globaldata_t gd = mycpu;
477 for (n = 0; n < ncpus; ++n) {
478 if (n != gd->gd_cpuid) {
479 sgd = globaldata_find(n);
482 while (lwkt_process_ipiq_core(sgd, &ip[gd->gd_cpuid], NULL))
487 if (gd->gd_cpusyncq.ip_rindex != gd->gd_cpusyncq.ip_windex) {
488 if (lwkt_process_ipiq_core(gd, &gd->gd_cpusyncq, NULL)) {
489 if (gd->gd_curthread->td_cscount == 0)
497 lwkt_process_ipiq_frame(struct intrframe *frame)
499 globaldata_t gd = mycpu;
505 for (n = 0; n < ncpus; ++n) {
506 if (n != gd->gd_cpuid) {
507 sgd = globaldata_find(n);
510 while (lwkt_process_ipiq_core(sgd, &ip[gd->gd_cpuid], frame))
515 if (gd->gd_cpusyncq.ip_rindex != gd->gd_cpusyncq.ip_windex) {
516 if (lwkt_process_ipiq_core(gd, &gd->gd_cpusyncq, frame)) {
517 if (gd->gd_curthread->td_cscount == 0)
525 lwkt_process_ipiq_core(globaldata_t sgd, lwkt_ipiq_t ip,
526 struct intrframe *frame)
528 globaldata_t mygd = mycpu;
531 ipifunc3_t copy_func;
536 * Obtain the current write index, which is modified by a remote cpu.
537 * Issue a load fence to prevent speculative reads of e.g. data written
538 * by the other cpu prior to it updating the index.
540 KKASSERT(curthread->td_critcount);
543 ++mygd->gd_intr_nesting_level;
546 * NOTE: xindex is only updated after we are sure the function has
547 * finished execution. Beware lwkt_process_ipiq() reentrancy!
548 * The function may send an IPI which may block/drain.
550 * NOTE: Due to additional IPI operations that the callback function
551 * may make, it is possible for both rindex and windex to advance and
552 * thus for rindex to advance passed our cached windex.
554 * NOTE: A memory fence is required to prevent speculative loads prior
555 * to the loading of ip_rindex. Even though stores might be
556 * ordered, loads are probably not.
558 while (wi - (ri = ip->ip_rindex) > 0) {
559 ri &= MAXCPUFIFO_MASK;
561 copy_func = ip->ip_func[ri];
562 copy_arg1 = ip->ip_arg1[ri];
563 copy_arg2 = ip->ip_arg2[ri];
565 KKASSERT((ip->ip_rindex & MAXCPUFIFO_MASK) ==
566 ((ri + 1) & MAXCPUFIFO_MASK));
567 logipiq(receive, copy_func, copy_arg1, copy_arg2, sgd, mycpu);
568 copy_func(copy_arg1, copy_arg2, frame);
570 ip->ip_xindex = ip->ip_rindex;
574 * Simulate panics during the processing of an IPI
576 if (mycpu->gd_cpuid == panic_ipiq_cpu && panic_ipiq_count) {
577 if (--panic_ipiq_count == 0) {
579 Debugger("PANIC_DEBUG");
581 panic("PANIC_DEBUG");
587 --mygd->gd_intr_nesting_level;
590 * Return non-zero if there are more IPI messages pending on this
591 * ipiq. ip_npoll is left set as long as possible to reduce the
592 * number of IPIs queued by the originating cpu, but must be cleared
593 * *BEFORE* checking windex.
595 atomic_poll_release_int(&ip->ip_npoll);
596 return(wi != ip->ip_windex);
600 lwkt_sync_ipiq(void *arg)
602 cpumask_t *cpumask = arg;
604 atomic_clear_cpumask(cpumask, mycpu->gd_cpumask);
610 lwkt_synchronize_ipiqs(const char *wmesg)
612 cpumask_t other_cpumask;
614 other_cpumask = mycpu->gd_other_cpus & smp_active_mask;
615 lwkt_send_ipiq_mask(other_cpumask, lwkt_sync_ipiq, &other_cpumask);
617 while (other_cpumask != 0) {
618 tsleep_interlock(&other_cpumask, 0);
619 if (other_cpumask != 0)
620 tsleep(&other_cpumask, PINTERLOCKED, wmesg, 0);
627 * CPU Synchronization Support
629 * lwkt_cpusync_simple()
631 * The function is executed synchronously before return on remote cpus.
632 * A lwkt_cpusync_t pointer is passed as an argument. The data can
633 * be accessed via arg->cs_data.
635 * XXX should I just pass the data as an argument to be consistent?
639 lwkt_cpusync_simple(cpumask_t mask, cpusync_func_t func, void *data)
641 struct lwkt_cpusync cmd;
643 cmd.cs_run_func = NULL;
644 cmd.cs_fin1_func = func;
645 cmd.cs_fin2_func = NULL;
647 lwkt_cpusync_start(mask & mycpu->gd_other_cpus, &cmd);
648 if (mask & CPUMASK(mycpu->gd_cpuid))
650 lwkt_cpusync_finish(&cmd);
654 * lwkt_cpusync_fastdata()
656 * The function is executed in tandem with return on remote cpus.
657 * The data is directly passed as an argument. Do not pass pointers to
658 * temporary storage as the storage might have
659 * gone poof by the time the target cpu executes
662 * At the moment lwkt_cpusync is declared on the stack and we must wait
663 * for all remote cpus to ack in lwkt_cpusync_finish(), but as a future
664 * optimization we should be able to put a counter in the globaldata
665 * structure (if it is not otherwise being used) and just poke it and
666 * return without waiting. XXX
669 lwkt_cpusync_fastdata(cpumask_t mask, cpusync_func2_t func, void *data)
671 struct lwkt_cpusync cmd;
673 cmd.cs_run_func = NULL;
674 cmd.cs_fin1_func = NULL;
675 cmd.cs_fin2_func = func;
677 lwkt_cpusync_start(mask & mycpu->gd_other_cpus, &cmd);
678 if (mask & CPUMASK(mycpu->gd_cpuid))
680 lwkt_cpusync_finish(&cmd);
684 * lwkt_cpusync_start()
686 * Start synchronization with a set of target cpus, return once they are
687 * known to be in a synchronization loop. The target cpus will execute
688 * poll->cs_run_func() IN TANDEM WITH THE RETURN.
690 * XXX future: add lwkt_cpusync_start_quick() and require a call to
691 * lwkt_cpusync_add() or lwkt_cpusync_wait(), allowing the caller to
692 * potentially absorb the IPI latency doing something useful.
695 lwkt_cpusync_start(cpumask_t mask, lwkt_cpusync_t poll)
697 globaldata_t gd = mycpu;
700 poll->cs_mask = mask;
702 logipiq2(sync_start, mask & gd->gd_other_cpus);
703 poll->cs_maxcount = lwkt_send_ipiq_mask(
704 mask & gd->gd_other_cpus & smp_active_mask,
705 (ipifunc1_t)lwkt_cpusync_remote1, poll);
707 if (mask & gd->gd_cpumask) {
708 if (poll->cs_run_func)
709 poll->cs_run_func(poll);
712 if (poll->cs_maxcount) {
714 ++gd->gd_curthread->td_cscount;
715 while (poll->cs_count != poll->cs_maxcount) {
725 lwkt_cpusync_add(cpumask_t mask, lwkt_cpusync_t poll)
727 globaldata_t gd = mycpu;
732 mask &= ~poll->cs_mask;
733 poll->cs_mask |= mask;
735 logipiq2(sync_add, mask & gd->gd_other_cpus);
736 count = lwkt_send_ipiq_mask(
737 mask & gd->gd_other_cpus & smp_active_mask,
738 (ipifunc1_t)lwkt_cpusync_remote1, poll);
740 if (mask & gd->gd_cpumask) {
741 if (poll->cs_run_func)
742 poll->cs_run_func(poll);
745 poll->cs_maxcount += count;
746 if (poll->cs_maxcount) {
747 if (poll->cs_maxcount == count)
748 ++gd->gd_curthread->td_cscount;
749 while (poll->cs_count != poll->cs_maxcount) {
759 * Finish synchronization with a set of target cpus. The target cpus will
760 * execute cs_fin1_func(poll) prior to this function returning, and will
761 * execute cs_fin2_func(data) IN TANDEM WITH THIS FUNCTION'S RETURN.
763 * If cs_maxcount is non-zero then we are mastering a cpusync with one or
764 * more remote cpus and must account for it in our thread structure.
767 lwkt_cpusync_finish(lwkt_cpusync_t poll)
769 globaldata_t gd = mycpu;
772 if (poll->cs_mask & gd->gd_cpumask) {
773 if (poll->cs_fin1_func)
774 poll->cs_fin1_func(poll);
775 if (poll->cs_fin2_func)
776 poll->cs_fin2_func(poll->cs_data);
779 if (poll->cs_maxcount) {
780 while (poll->cs_count != -(poll->cs_maxcount + 1)) {
785 --gd->gd_curthread->td_cscount;
793 * helper IPI remote messaging function.
795 * Called on remote cpu when a new cpu synchronization request has been
796 * sent to us. Execute the run function and adjust cs_count, then requeue
797 * the request so we spin on it.
800 lwkt_cpusync_remote1(lwkt_cpusync_t poll)
802 atomic_add_int(&poll->cs_count, 1);
803 if (poll->cs_run_func)
804 poll->cs_run_func(poll);
805 lwkt_cpusync_remote2(poll);
809 * helper IPI remote messaging function.
811 * Poll for the originator telling us to finish. If it hasn't, requeue
812 * our request so we spin on it. When the originator requests that we
813 * finish we execute cs_fin1_func(poll) synchronously and cs_fin2_func(data)
814 * in tandem with the release.
817 lwkt_cpusync_remote2(lwkt_cpusync_t poll)
819 if (poll->cs_count < 0) {
820 cpusync_func2_t savef;
823 if (poll->cs_fin1_func)
824 poll->cs_fin1_func(poll);
825 if (poll->cs_fin2_func) {
826 savef = poll->cs_fin2_func;
827 saved = poll->cs_data;
828 cpu_ccfence(); /* required ordering for MP operation */
829 atomic_add_int(&poll->cs_count, -1);
832 atomic_add_int(&poll->cs_count, -1);
835 globaldata_t gd = mycpu;
839 ip = &gd->gd_cpusyncq;
840 wi = ip->ip_windex & MAXCPUFIFO_MASK;
841 ip->ip_func[wi] = (ipifunc3_t)(ipifunc1_t)lwkt_cpusync_remote2;
842 ip->ip_arg1[wi] = poll;