2 * Copyright (c) 2003-2016 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * This module implements IPI message queueing and the MI portion of IPI
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
46 #include <sys/rtprio.h>
47 #include <sys/queue.h>
48 #include <sys/thread2.h>
49 #include <sys/sysctl.h>
51 #include <sys/kthread.h>
52 #include <machine/cpu.h>
56 #include <vm/vm_param.h>
57 #include <vm/vm_kern.h>
58 #include <vm/vm_object.h>
59 #include <vm/vm_page.h>
60 #include <vm/vm_map.h>
61 #include <vm/vm_pager.h>
62 #include <vm/vm_extern.h>
63 #include <vm/vm_zone.h>
65 #include <machine/stdarg.h>
66 #include <machine/smp.h>
67 #include <machine/clock.h>
68 #include <machine/atomic.h>
70 #ifdef _KERNEL_VIRTUAL
75 int64_t ipiq_count; /* total calls to lwkt_send_ipiq*() */
76 int64_t ipiq_fifofull; /* number of fifo full conditions detected */
77 int64_t ipiq_avoided; /* interlock with target avoids cpu ipi */
78 int64_t ipiq_passive; /* passive IPI messages */
79 int64_t ipiq_cscount; /* number of cpu synchronizations */
82 static struct ipiq_stats ipiq_stats_percpu[MAXCPU];
83 #define ipiq_stat(gd) ipiq_stats_percpu[(gd)->gd_cpuid]
85 static int ipiq_debug; /* set to 1 for debug */
87 static int panic_ipiq_cpu = -1;
88 static int panic_ipiq_count = 100;
91 SYSCTL_INT(_lwkt, OID_AUTO, ipiq_debug, CTLFLAG_RW, &ipiq_debug, 0,
94 SYSCTL_INT(_lwkt, OID_AUTO, panic_ipiq_cpu, CTLFLAG_RW, &panic_ipiq_cpu, 0, "");
95 SYSCTL_INT(_lwkt, OID_AUTO, panic_ipiq_count, CTLFLAG_RW, &panic_ipiq_count, 0, "");
98 #define IPIQ_STRING "func=%p arg1=%p arg2=%d scpu=%d dcpu=%d"
99 #define IPIQ_ARGS void *func, void *arg1, int arg2, int scpu, int dcpu
101 #if !defined(KTR_IPIQ)
102 #define KTR_IPIQ KTR_ALL
104 KTR_INFO_MASTER(ipiq);
105 KTR_INFO(KTR_IPIQ, ipiq, send_norm, 0, IPIQ_STRING, IPIQ_ARGS);
106 KTR_INFO(KTR_IPIQ, ipiq, send_pasv, 1, IPIQ_STRING, IPIQ_ARGS);
107 KTR_INFO(KTR_IPIQ, ipiq, send_nbio, 2, IPIQ_STRING, IPIQ_ARGS);
108 KTR_INFO(KTR_IPIQ, ipiq, send_fail, 3, IPIQ_STRING, IPIQ_ARGS);
109 KTR_INFO(KTR_IPIQ, ipiq, receive, 4, IPIQ_STRING, IPIQ_ARGS);
110 KTR_INFO(KTR_IPIQ, ipiq, sync_start, 5, "cpumask=%08lx", unsigned long mask);
111 KTR_INFO(KTR_IPIQ, ipiq, sync_end, 6, "cpumask=%08lx", unsigned long mask);
112 KTR_INFO(KTR_IPIQ, ipiq, cpu_send, 7, IPIQ_STRING, IPIQ_ARGS);
113 KTR_INFO(KTR_IPIQ, ipiq, send_end, 8, IPIQ_STRING, IPIQ_ARGS);
114 KTR_INFO(KTR_IPIQ, ipiq, sync_quick, 9, "cpumask=%08lx", unsigned long mask);
116 #define logipiq(name, func, arg1, arg2, sgd, dgd) \
117 KTR_LOG(ipiq_ ## name, func, arg1, arg2, sgd->gd_cpuid, dgd->gd_cpuid)
118 #define logipiq2(name, arg) \
119 KTR_LOG(ipiq_ ## name, arg)
121 static int lwkt_process_ipiq_core(globaldata_t sgd, lwkt_ipiq_t ip,
122 struct intrframe *frame);
123 static void lwkt_cpusync_remote1(lwkt_cpusync_t cs);
124 static void lwkt_cpusync_remote2(lwkt_cpusync_t cs);
126 #define IPIQ_SYSCTL(name) \
128 sysctl_##name(SYSCTL_HANDLER_ARGS) \
133 for (cpu = 0; cpu < ncpus; ++cpu) \
134 val += ipiq_stats_percpu[cpu].name; \
136 error = sysctl_handle_quad(oidp, &val, 0, req); \
137 if (error || req->newptr == NULL) \
140 for (cpu = 0; cpu < ncpus; ++cpu) \
141 ipiq_stats_percpu[cpu].name = val; \
146 IPIQ_SYSCTL(ipiq_count);
147 IPIQ_SYSCTL(ipiq_fifofull);
148 IPIQ_SYSCTL(ipiq_avoided);
149 IPIQ_SYSCTL(ipiq_passive);
150 IPIQ_SYSCTL(ipiq_cscount);
152 SYSCTL_PROC(_lwkt, OID_AUTO, ipiq_count, (CTLTYPE_QUAD | CTLFLAG_RW),
153 0, 0, sysctl_ipiq_count, "Q", "Number of IPI's sent");
154 SYSCTL_PROC(_lwkt, OID_AUTO, ipiq_fifofull, (CTLTYPE_QUAD | CTLFLAG_RW),
155 0, 0, sysctl_ipiq_fifofull, "Q",
156 "Number of fifo full conditions detected");
157 SYSCTL_PROC(_lwkt, OID_AUTO, ipiq_avoided, (CTLTYPE_QUAD | CTLFLAG_RW),
158 0, 0, sysctl_ipiq_avoided, "Q",
159 "Number of IPI's avoided by interlock with target cpu");
160 SYSCTL_PROC(_lwkt, OID_AUTO, ipiq_passive, (CTLTYPE_QUAD | CTLFLAG_RW),
161 0, 0, sysctl_ipiq_passive, "Q",
162 "Number of passive IPI messages sent");
163 SYSCTL_PROC(_lwkt, OID_AUTO, ipiq_cscount, (CTLTYPE_QUAD | CTLFLAG_RW),
164 0, 0, sysctl_ipiq_cscount, "Q",
165 "Number of cpu synchronizations");
168 * Send a function execution request to another cpu. The request is queued
169 * on the cpu<->cpu ipiq matrix. Each cpu owns a unique ipiq FIFO for every
170 * possible target cpu. The FIFO can be written.
172 * If the FIFO fills up we have to enable interrupts to avoid an APIC
173 * deadlock and process pending IPIQs while waiting for it to empty.
174 * Otherwise we may soft-deadlock with another cpu whos FIFO is also full.
176 * We can safely bump gd_intr_nesting_level because our crit_exit() at the
177 * end will take care of any pending interrupts.
179 * The actual hardware IPI is avoided if the target cpu is already processing
180 * the queue from a prior IPI. It is possible to pipeline IPI messages
181 * very quickly between cpus due to the FIFO hysteresis.
183 * Need not be called from a critical section.
186 lwkt_send_ipiq3(globaldata_t target, ipifunc3_t func, void *arg1, int arg2)
192 struct globaldata *gd = mycpu;
194 logipiq(send_norm, func, arg1, arg2, gd, target);
197 func(arg1, arg2, NULL);
198 logipiq(send_end, func, arg1, arg2, gd, target);
202 ++gd->gd_intr_nesting_level;
204 if (gd->gd_intr_nesting_level > 20)
205 panic("lwkt_send_ipiq: TOO HEAVILY NESTED!");
207 KKASSERT(curthread->td_critcount);
208 ++ipiq_stat(gd).ipiq_count;
209 ip = &gd->gd_ipiq[target->gd_cpuid];
212 * Do not allow the FIFO to become full. Interrupts must be physically
213 * enabled while we liveloop to avoid deadlocking the APIC.
215 * If we are nested we want to queue the IPI without processing incoming
216 * IPIs, if possible, to avoid excessive stack recursion. As long as
217 * the IPI callback does not itself try to send more than a few IPIs to
218 * any single target, it should not be possible to excessively nest because
219 * the unested send code always leaves at least 1/2 the fifo available.
221 if (gd->gd_processing_ipiq) {
222 level1 = MAXCPUFIFO - 2;
223 level2 = MAXCPUFIFO - 4;
225 level1 = MAXCPUFIFO / 2;
226 level2 = MAXCPUFIFO / 4;
229 if (ip->ip_windex - ip->ip_rindex > level1) {
230 #if defined(__x86_64__)
231 unsigned long rflags = read_rflags();
233 #error "no read_*flags"
235 #ifndef _KERNEL_VIRTUAL
236 uint64_t tsc_base = rdtsc();
241 ++ipiq_stat(gd).ipiq_fifofull;
242 DEBUG_PUSH_INFO("send_ipiq3");
243 while (ip->ip_windex - ip->ip_rindex > level2) {
245 if (atomic_swap_int(&target->gd_npoll, 1) == 0) {
246 logipiq(cpu_send, func, arg1, arg2, gd, target);
247 cpu_send_ipiq(target->gd_cpuid);
250 KKASSERT(ip->ip_windex - ip->ip_rindex != MAXCPUFIFO - 1);
255 * Check for target not draining issue. This should be fixed but
256 * leave the code in-place anyway as it can recover an otherwise
259 #ifdef _KERNEL_VIRTUAL
260 if (repeating++ > 10)
263 if (rdtsc() - tsc_base > tsc_frequency) {
265 if (repeating > 10) {
267 ATOMIC_CPUMASK_ORBIT(target->gd_ipimask, gd->gd_cpuid);
268 cpu_send_ipiq(target->gd_cpuid);
269 kprintf("send_ipiq %d->%d tgt not draining (%d) sniff=%p,%p\n",
270 gd->gd_cpuid, target->gd_cpuid, repeating,
271 target->gd_sample_pc, target->gd_sample_sp);
274 kprintf("send_ipiq %d->%d tgt not draining (%d)\n",
275 gd->gd_cpuid, target->gd_cpuid, repeating);
282 #if defined(__x86_64__)
283 write_rflags(rflags);
285 #error "no write_*flags"
290 * Queue the new message
292 windex = ip->ip_windex & MAXCPUFIFO_MASK;
293 ip->ip_info[windex].func = func;
294 ip->ip_info[windex].arg1 = arg1;
295 ip->ip_info[windex].arg2 = arg2;
298 ATOMIC_CPUMASK_ORBIT(target->gd_ipimask, gd->gd_cpuid);
301 * signal the target cpu that there is work pending.
303 if (atomic_swap_int(&target->gd_npoll, 1) == 0) {
304 logipiq(cpu_send, func, arg1, arg2, gd, target);
305 cpu_send_ipiq(target->gd_cpuid);
307 ++ipiq_stat(gd).ipiq_avoided;
309 --gd->gd_intr_nesting_level;
311 logipiq(send_end, func, arg1, arg2, gd, target);
313 return(ip->ip_windex);
317 * Similar to lwkt_send_ipiq() but this function does not actually initiate
318 * the IPI to the target cpu unless the FIFO has become too full, so it is
321 * This function is used for non-critical IPI messages, such as memory
322 * deallocations. The queue will typically be flushed by the target cpu at
323 * the next clock interrupt.
325 * Need not be called from a critical section.
328 lwkt_send_ipiq3_passive(globaldata_t target, ipifunc3_t func,
329 void *arg1, int arg2)
335 struct globaldata *gd = mycpu;
337 KKASSERT(target != gd);
339 ++gd->gd_intr_nesting_level;
340 logipiq(send_pasv, func, arg1, arg2, gd, target);
342 if (gd->gd_intr_nesting_level > 20)
343 panic("lwkt_send_ipiq: TOO HEAVILY NESTED!");
345 KKASSERT(curthread->td_critcount);
346 ++ipiq_stat(gd).ipiq_count;
347 ++ipiq_stat(gd).ipiq_passive;
348 ip = &gd->gd_ipiq[target->gd_cpuid];
351 * Do not allow the FIFO to become full. Interrupts must be physically
352 * enabled while we liveloop to avoid deadlocking the APIC.
354 * If we are nested we want to queue the IPI without processing incoming
355 * IPIs, if possible, to avoid excessive stack recursion.
357 if (gd->gd_processing_ipiq) {
358 level1 = MAXCPUFIFO - 2;
359 level2 = MAXCPUFIFO - 4;
361 level1 = MAXCPUFIFO / 2;
362 level2 = MAXCPUFIFO / 4;
364 if (ip->ip_windex - ip->ip_rindex > level1) {
365 #if defined(__x86_64__)
366 unsigned long rflags = read_rflags();
368 #error "no read_*flags"
370 #ifndef _KERNEL_VIRTUAL
371 uint64_t tsc_base = rdtsc();
376 ++ipiq_stat(gd).ipiq_fifofull;
377 DEBUG_PUSH_INFO("send_ipiq3_passive");
378 while (ip->ip_windex - ip->ip_rindex > level2) {
380 if (atomic_swap_int(&target->gd_npoll, 1) == 0) {
381 logipiq(cpu_send, func, arg1, arg2, gd, target);
382 cpu_send_ipiq(target->gd_cpuid);
385 KKASSERT(ip->ip_windex - ip->ip_rindex != MAXCPUFIFO - 1);
390 * Check for target not draining issue. This should be fixed but
391 * leave the code in-place anyway as it can recover an otherwise
394 #ifdef _KERNEL_VIRTUAL
395 if (repeating++ > 10)
398 if (rdtsc() - tsc_base > tsc_frequency) {
400 if (repeating > 10) {
402 ATOMIC_CPUMASK_ORBIT(target->gd_ipimask, gd->gd_cpuid);
403 cpu_send_ipiq(target->gd_cpuid);
404 kprintf("send_ipiq %d->%d tgt not draining (%d) sniff=%p,%p\n",
405 gd->gd_cpuid, target->gd_cpuid, repeating,
406 target->gd_sample_pc, target->gd_sample_sp);
409 kprintf("send_ipiq %d->%d tgt not draining (%d)\n",
410 gd->gd_cpuid, target->gd_cpuid, repeating);
417 #if defined(__x86_64__)
418 write_rflags(rflags);
420 #error "no write_*flags"
425 * Queue the new message
427 windex = ip->ip_windex & MAXCPUFIFO_MASK;
428 ip->ip_info[windex].func = func;
429 ip->ip_info[windex].arg1 = arg1;
430 ip->ip_info[windex].arg2 = arg2;
433 ATOMIC_CPUMASK_ORBIT(target->gd_ipimask, gd->gd_cpuid);
436 * We normally do not signal the target cpu, it will pick up the IPI when it
437 * next polls (typically on the next tick). However, we do not want to allow
438 * the FIFO to get too full without signaling. Make sure the target cpu is
439 * signalled once the FIFO is greater than 1/4 full. This also ensures that
440 * the target cpu will be signalled in order to allow the drain wait to function
441 * without also signalling.
443 if ((ip->ip_windex - ip->ip_rindex) > MAXCPUFIFO / 4 &&
444 atomic_swap_int(&target->gd_npoll, 1) == 0) {
445 logipiq(cpu_send, func, arg1, arg2, gd, target);
446 cpu_send_ipiq(target->gd_cpuid);
448 ++ipiq_stat(gd).ipiq_avoided;
450 --gd->gd_intr_nesting_level;
452 logipiq(send_end, func, arg1, arg2, gd, target);
454 return(ip->ip_windex);
458 * Send an IPI request without blocking, return 0 on success, ENOENT on
459 * failure. The actual queueing of the hardware IPI may still force us
460 * to spin and process incoming IPIs but that will eventually go away
461 * when we've gotten rid of the other general IPIs.
464 lwkt_send_ipiq3_nowait(globaldata_t target, ipifunc3_t func,
465 void *arg1, int arg2)
469 struct globaldata *gd = mycpu;
471 logipiq(send_nbio, func, arg1, arg2, gd, target);
472 KKASSERT(curthread->td_critcount);
474 func(arg1, arg2, NULL);
475 logipiq(send_end, func, arg1, arg2, gd, target);
479 ++gd->gd_intr_nesting_level;
480 ++ipiq_stat(gd).ipiq_count;
481 ip = &gd->gd_ipiq[target->gd_cpuid];
483 if (ip->ip_windex - ip->ip_rindex >= MAXCPUFIFO * 2 / 3) {
484 logipiq(send_fail, func, arg1, arg2, gd, target);
485 --gd->gd_intr_nesting_level;
489 windex = ip->ip_windex & MAXCPUFIFO_MASK;
490 ip->ip_info[windex].func = func;
491 ip->ip_info[windex].arg1 = arg1;
492 ip->ip_info[windex].arg2 = arg2;
495 ATOMIC_CPUMASK_ORBIT(target->gd_ipimask, gd->gd_cpuid);
498 * This isn't a passive IPI, we still have to signal the target cpu.
500 if (atomic_swap_int(&target->gd_npoll, 1) == 0) {
501 logipiq(cpu_send, func, arg1, arg2, gd, target);
502 cpu_send_ipiq(target->gd_cpuid);
504 ++ipiq_stat(gd).ipiq_avoided;
506 --gd->gd_intr_nesting_level;
509 logipiq(send_end, func, arg1, arg2, gd, target);
514 * deprecated, used only by fast int forwarding.
517 lwkt_send_ipiq3_bycpu(int dcpu, ipifunc3_t func, void *arg1, int arg2)
519 return(lwkt_send_ipiq3(globaldata_find(dcpu), func, arg1, arg2));
523 * Send a message to several target cpus. Typically used for scheduling.
524 * The message will not be sent to stopped cpus.
527 lwkt_send_ipiq3_mask(cpumask_t mask, ipifunc3_t func, void *arg1, int arg2)
532 CPUMASK_NANDMASK(mask, stopped_cpus);
533 while (CPUMASK_TESTNZERO(mask)) {
534 cpuid = BSFCPUMASK(mask);
535 lwkt_send_ipiq3(globaldata_find(cpuid), func, arg1, arg2);
536 CPUMASK_NANDBIT(mask, cpuid);
543 * Wait for the remote cpu to finish processing a function.
545 * YYY we have to enable interrupts and process the IPIQ while waiting
546 * for it to empty or we may deadlock with another cpu. Create a CPU_*()
547 * function to do this! YYY we really should 'block' here.
549 * MUST be called from a critical section. This routine may be called
550 * from an interrupt (for example, if an interrupt wakes a foreign thread
554 lwkt_wait_ipiq(globaldata_t target, int seq)
558 if (target != mycpu) {
559 ip = &mycpu->gd_ipiq[target->gd_cpuid];
560 if ((int)(ip->ip_xindex - seq) < 0) {
561 #if defined(__x86_64__)
562 unsigned long rflags = read_rflags();
564 #error "no read_*flags"
566 int64_t time_tgt = tsc_get_target(1000000000LL);
569 #ifdef _KERNEL_VIRTUAL
574 DEBUG_PUSH_INFO("wait_ipiq");
575 while ((int)(ip->ip_xindex - seq) < 0) {
579 #ifdef _KERNEL_VIRTUAL
580 if (repeating++ > 10)
585 * IPIQs must be handled within 10 seconds and this code
586 * will warn after one second.
588 if ((benice & 255) == 0 && tsc_test_target(time_tgt) > 0) {
589 kprintf("LWKT_WAIT_IPIQ WARNING! %d wait %d (%d)\n",
590 mycpu->gd_cpuid, target->gd_cpuid,
591 ip->ip_xindex - seq);
592 if (--time_loops == 0)
593 panic("LWKT_WAIT_IPIQ");
594 time_tgt = tsc_get_target(1000000000LL);
599 * xindex may be modified by another cpu, use a load fence
600 * to ensure that the loop does not use a speculative value
601 * (which may improve performance).
607 #if defined(__x86_64__)
608 write_rflags(rflags);
610 #error "no write_*flags"
617 lwkt_seq_ipiq(globaldata_t target)
621 ip = &mycpu->gd_ipiq[target->gd_cpuid];
622 return(ip->ip_windex);
626 * Called from IPI interrupt (like a fast interrupt), which has placed
627 * us in a critical section. The MP lock may or may not be held.
628 * May also be called from doreti or splz, or be reentrantly called
629 * indirectly through the ip_info[].func we run.
631 * There are two versions, one where no interrupt frame is available (when
632 * called from the send code and from splz, and one where an interrupt
633 * frame is available.
635 * When the current cpu is mastering a cpusync we do NOT internally loop
636 * on the cpusyncq poll. We also do not re-flag a pending ipi due to
637 * the cpusyncq poll because this can cause doreti/splz to loop internally.
638 * The cpusync master's own loop must be allowed to run to avoid a deadlock.
641 lwkt_process_ipiq(void)
643 globaldata_t gd = mycpu;
650 * We must process the entire cpumask if we are reentrant because it might
651 * have been partially cleared.
653 if (++gd->gd_processing_ipiq > 1)
654 ATOMIC_CPUMASK_COPY(gd->gd_ipimask, smp_active_mask);
656 atomic_swap_int(&gd->gd_npoll, 0);
657 mask = gd->gd_ipimask;
659 ATOMIC_CPUMASK_NANDMASK(gd->gd_ipimask, mask);
660 while (CPUMASK_TESTNZERO(mask)) {
661 n = BSFCPUMASK(mask);
662 if (n != gd->gd_cpuid) {
663 sgd = globaldata_find(n);
666 while (lwkt_process_ipiq_core(sgd, &ip[gd->gd_cpuid], NULL))
670 CPUMASK_NANDBIT(mask, n);
674 * Process pending cpusyncs. If the current thread has a cpusync
675 * active cpusync we only run the list once and do not re-flag
676 * as the thread itself is processing its interlock.
678 if (lwkt_process_ipiq_core(gd, &gd->gd_cpusyncq, NULL)) {
679 if (gd->gd_curthread->td_cscount == 0)
681 /* need_ipiq(); do not reflag */
685 * Interlock to allow more IPI interrupts. Recheck ipimask after
686 * releasing gd_npoll.
688 if (atomic_swap_int(&gd->gd_npoll, 0))
690 --gd->gd_processing_ipiq;
694 lwkt_process_ipiq_frame(struct intrframe *frame)
696 globaldata_t gd = mycpu;
703 * We must process the entire cpumask if we are reentrant because it might
704 * have been partially cleared.
706 if (++gd->gd_processing_ipiq > 1)
707 ATOMIC_CPUMASK_COPY(gd->gd_ipimask, smp_active_mask);
709 atomic_swap_int(&gd->gd_npoll, 0);
710 mask = gd->gd_ipimask;
712 ATOMIC_CPUMASK_NANDMASK(gd->gd_ipimask, mask);
713 while (CPUMASK_TESTNZERO(mask)) {
714 n = BSFCPUMASK(mask);
715 if (n != gd->gd_cpuid) {
716 sgd = globaldata_find(n);
719 while (lwkt_process_ipiq_core(sgd, &ip[gd->gd_cpuid], frame))
723 CPUMASK_NANDBIT(mask, n);
725 if (gd->gd_cpusyncq.ip_rindex != gd->gd_cpusyncq.ip_windex) {
726 if (lwkt_process_ipiq_core(gd, &gd->gd_cpusyncq, frame)) {
727 if (gd->gd_curthread->td_cscount == 0)
729 /* need_ipiq(); do not reflag */
734 * Interlock to allow more IPI interrupts. Recheck ipimask after
735 * releasing gd_npoll.
737 if (atomic_swap_int(&gd->gd_npoll, 0))
739 --gd->gd_processing_ipiq;
743 static int iqticks[SMP_MAXCPU];
744 static int iqcount[SMP_MAXCPU];
747 static int iqterm[SMP_MAXCPU];
751 lwkt_process_ipiq_core(globaldata_t sgd, lwkt_ipiq_t ip,
752 struct intrframe *frame)
754 globaldata_t mygd = mycpu;
757 ipifunc3_t copy_func;
762 if (iqticks[mygd->gd_cpuid] != ticks) {
763 iqticks[mygd->gd_cpuid] = ticks;
764 iqcount[mygd->gd_cpuid] = 0;
766 if (++iqcount[mygd->gd_cpuid] > 3000000) {
767 kprintf("cpu %d ipiq maxed cscount %d spin %d\n",
769 mygd->gd_curthread->td_cscount,
771 iqcount[mygd->gd_cpuid] = 0;
773 if (++iqterm[mygd->gd_cpuid] > 10)
774 panic("cpu %d ipiq maxed", mygd->gd_cpuid);
777 for (i = 0; i < ncpus; ++i) {
778 if (globaldata_find(i)->gd_infomsg)
779 kprintf(" %s", globaldata_find(i)->gd_infomsg);
786 * Clear the originating core from our ipimask, we will process all
789 * Obtain the current write index, which is modified by a remote cpu.
790 * Issue a load fence to prevent speculative reads of e.g. data written
791 * by the other cpu prior to it updating the index.
793 KKASSERT(curthread->td_critcount);
796 ++mygd->gd_intr_nesting_level;
799 * NOTE: xindex is only updated after we are sure the function has
800 * finished execution. Beware lwkt_process_ipiq() reentrancy!
801 * The function may send an IPI which may block/drain.
803 * NOTE: Due to additional IPI operations that the callback function
804 * may make, it is possible for both rindex and windex to advance and
805 * thus for rindex to advance passed our cached windex.
807 * NOTE: A load fence is required to prevent speculative loads prior
808 * to the loading of ip_rindex. Even though stores might be
809 * ordered, loads are probably not. A memory fence is required
810 * to prevent reordering of the loads after the ip_rindex update.
812 * NOTE: Single pass only. Returns non-zero if the queue is not empty
815 while (wi - (ri = ip->ip_rindex) > 0) {
816 ri &= MAXCPUFIFO_MASK;
818 copy_func = ip->ip_info[ri].func;
819 copy_arg1 = ip->ip_info[ri].arg1;
820 copy_arg2 = ip->ip_info[ri].arg2;
823 KKASSERT((ip->ip_rindex & MAXCPUFIFO_MASK) ==
824 ((ri + 1) & MAXCPUFIFO_MASK));
825 logipiq(receive, copy_func, copy_arg1, copy_arg2, sgd, mycpu);
827 if (ipiq_debug && (ip->ip_rindex & 0xFFFFFF) == 0) {
828 kprintf("cpu %d ipifunc %p %p %d (frame %p)\n",
830 copy_func, copy_arg1, copy_arg2,
831 #if defined(__x86_64__)
832 (frame ? (void *)frame->if_rip : NULL));
838 copy_func(copy_arg1, copy_arg2, frame);
840 ip->ip_xindex = ip->ip_rindex;
844 * Simulate panics during the processing of an IPI
846 if (mycpu->gd_cpuid == panic_ipiq_cpu && panic_ipiq_count) {
847 if (--panic_ipiq_count == 0) {
849 Debugger("PANIC_DEBUG");
851 panic("PANIC_DEBUG");
857 --mygd->gd_intr_nesting_level;
860 * Return non-zero if there is still more in the queue.
863 return (ip->ip_rindex != ip->ip_windex);
867 lwkt_sync_ipiq(void *arg)
869 volatile cpumask_t *cpumask = arg;
871 ATOMIC_CPUMASK_NANDBIT(*cpumask, mycpu->gd_cpuid);
872 if (CPUMASK_TESTZERO(*cpumask))
877 lwkt_synchronize_ipiqs(const char *wmesg)
879 volatile cpumask_t other_cpumask;
881 other_cpumask = smp_active_mask;
882 CPUMASK_ANDMASK(other_cpumask, mycpu->gd_other_cpus);
883 lwkt_send_ipiq_mask(other_cpumask, lwkt_sync_ipiq,
884 __DEVOLATILE(void *, &other_cpumask));
886 while (CPUMASK_TESTNZERO(other_cpumask)) {
887 tsleep_interlock(&other_cpumask, 0);
888 if (CPUMASK_TESTNZERO(other_cpumask))
889 tsleep(&other_cpumask, PINTERLOCKED, wmesg, 0);
894 * CPU Synchronization Support
896 * lwkt_cpusync_interlock() - Place specified cpus in a quiescent state.
897 * The current cpu is placed in a hard critical
900 * lwkt_cpusync_deinterlock() - Execute cs_func on specified cpus, including
901 * current cpu if specified, then return.
904 lwkt_cpusync_simple(cpumask_t mask, cpusync_func_t func, void *arg)
906 struct lwkt_cpusync cs;
908 lwkt_cpusync_init(&cs, mask, func, arg);
909 lwkt_cpusync_interlock(&cs);
910 lwkt_cpusync_deinterlock(&cs);
915 lwkt_cpusync_interlock(lwkt_cpusync_t cs)
917 globaldata_t gd = mycpu;
921 * mask acknowledge (cs_mack): 0->mask for stage 1
923 * mack does not include the current cpu.
926 CPUMASK_ANDMASK(mask, gd->gd_other_cpus);
927 CPUMASK_ANDMASK(mask, smp_active_mask);
928 CPUMASK_ASSZERO(cs->cs_mack);
930 crit_enter_id("cpusync");
931 if (CPUMASK_TESTNZERO(mask)) {
932 DEBUG_PUSH_INFO("cpusync_interlock");
933 ++ipiq_stat(gd).ipiq_cscount;
934 ++gd->gd_curthread->td_cscount;
935 lwkt_send_ipiq_mask(mask, (ipifunc1_t)lwkt_cpusync_remote1, cs);
936 logipiq2(sync_start, (long)CPUMASK_LOWMASK(mask));
937 while (CPUMASK_CMPMASKNEQ(cs->cs_mack, mask)) {
940 #ifdef _KERNEL_VIRTUAL
949 * Interlocked cpus have executed remote1 and are polling in remote2.
950 * To deinterlock we clear cs_mack and wait for the cpus to execute
951 * the func and set their bit in cs_mack again.
955 lwkt_cpusync_deinterlock(lwkt_cpusync_t cs)
957 globaldata_t gd = mycpu;
961 * mask acknowledge (cs_mack): mack->0->mack for stage 2
963 * Clearing cpu bits for polling cpus in cs_mack will cause them to
964 * execute stage 2, which executes the cs_func(cs_data) and then sets
965 * their bit in cs_mack again.
967 * mack does not include the current cpu.
971 CPUMASK_ASSZERO(cs->cs_mack);
973 if (cs->cs_func && CPUMASK_TESTBIT(cs->cs_mask, gd->gd_cpuid))
974 cs->cs_func(cs->cs_data);
975 if (CPUMASK_TESTNZERO(mask)) {
976 DEBUG_PUSH_INFO("cpusync_deinterlock");
977 while (CPUMASK_CMPMASKNEQ(cs->cs_mack, mask)) {
980 #ifdef _KERNEL_VIRTUAL
986 * cpusyncq ipis may be left queued without the RQF flag set due to
987 * a non-zero td_cscount, so be sure to process any laggards after
988 * decrementing td_cscount.
990 --gd->gd_curthread->td_cscount;
992 logipiq2(sync_end, (long)CPUMASK_LOWMASK(mask));
994 crit_exit_id("cpusync");
998 * The quick version does not quiesce the target cpu(s) but instead executes
999 * the function on the target cpu(s) and waits for all to acknowledge. This
1000 * avoids spinning on the target cpus.
1002 * This function is typically only used for kernel_pmap updates. User pmaps
1003 * have to be quiesced.
1006 lwkt_cpusync_quick(lwkt_cpusync_t cs)
1008 globaldata_t gd = mycpu;
1012 * stage-2 cs_mack only.
1015 CPUMASK_ANDMASK(mask, gd->gd_other_cpus);
1016 CPUMASK_ANDMASK(mask, smp_active_mask);
1017 CPUMASK_ASSZERO(cs->cs_mack);
1019 crit_enter_id("cpusync");
1020 if (CPUMASK_TESTNZERO(mask)) {
1021 DEBUG_PUSH_INFO("cpusync_interlock");
1022 ++ipiq_stat(gd).ipiq_cscount;
1023 ++gd->gd_curthread->td_cscount;
1024 lwkt_send_ipiq_mask(mask, (ipifunc1_t)lwkt_cpusync_remote2, cs);
1025 logipiq2(sync_quick, (long)CPUMASK_LOWMASK(mask));
1026 while (CPUMASK_CMPMASKNEQ(cs->cs_mack, mask)) {
1027 lwkt_process_ipiq();
1029 #ifdef _KERNEL_VIRTUAL
1035 * cpusyncq ipis may be left queued without the RQF flag set due to
1036 * a non-zero td_cscount, so be sure to process any laggards after
1037 * decrementing td_cscount.
1040 --gd->gd_curthread->td_cscount;
1041 lwkt_process_ipiq();
1043 if (cs->cs_func && CPUMASK_TESTBIT(cs->cs_mask, gd->gd_cpuid))
1044 cs->cs_func(cs->cs_data);
1045 crit_exit_id("cpusync");
1049 * helper IPI remote messaging function.
1051 * Called on remote cpu when a new cpu synchronization request has been
1052 * sent to us. Execute the run function and adjust cs_count, then requeue
1053 * the request so we spin on it.
1056 lwkt_cpusync_remote1(lwkt_cpusync_t cs)
1058 globaldata_t gd = mycpu;
1060 ATOMIC_CPUMASK_ORBIT(cs->cs_mack, gd->gd_cpuid);
1061 lwkt_cpusync_remote2(cs);
1065 * helper IPI remote messaging function.
1067 * Poll for the originator telling us to finish. If it hasn't, requeue
1068 * our request so we spin on it.
1071 lwkt_cpusync_remote2(lwkt_cpusync_t cs)
1073 globaldata_t gd = mycpu;
1075 if (CPUMASK_TESTMASK(cs->cs_mack, gd->gd_cpumask) == 0) {
1077 cs->cs_func(cs->cs_data);
1078 ATOMIC_CPUMASK_ORBIT(cs->cs_mack, gd->gd_cpuid);
1079 /* cs can be ripped out at this point */
1085 #ifdef _KERNEL_VIRTUAL
1091 * Requeue our IPI to avoid a deep stack recursion. If no other
1092 * IPIs are pending we can just loop up, which should help VMs
1093 * better-detect spin loops.
1095 ip = &gd->gd_cpusyncq;
1097 if (ip->ip_rindex == ip->ip_windex) {
1098 __asm __volatile("cli");
1099 if (ip->ip_rindex == ip->ip_windex) {
1100 __asm __volatile("sti; hlt");
1102 __asm __volatile("sti");
1107 wi = ip->ip_windex & MAXCPUFIFO_MASK;
1108 ip->ip_info[wi].func = (ipifunc3_t)(ipifunc1_t)lwkt_cpusync_remote2;
1109 ip->ip_info[wi].arg1 = cs;
1110 ip->ip_info[wi].arg2 = 0;
1112 KKASSERT(ip->ip_windex - ip->ip_rindex < MAXCPUFIFO);
1114 if (ipiq_debug && (ip->ip_windex & 0xFFFFFF) == 0) {
1115 kprintf("cpu %d cm=%016jx %016jx f=%p\n",
1117 (intmax_t)CPUMASK_LOWMASK(cs->cs_mask),
1118 (intmax_t)CPUMASK_LOWMASK(cs->cs_mack),
1124 #define LWKT_IPIQ_NLATENCY 8
1125 #define LWKT_IPIQ_NLATENCY_MASK (LWKT_IPIQ_NLATENCY - 1)
1127 struct lwkt_ipiq_latency_log {
1128 int idx; /* unmasked index */
1130 uint64_t latency[LWKT_IPIQ_NLATENCY];
1133 static struct lwkt_ipiq_latency_log lwkt_ipiq_latency_logs[MAXCPU];
1134 static uint64_t save_tsc;
1137 * IPI callback (already in a critical section)
1140 lwkt_ipiq_latency_testfunc(void *arg __unused)
1143 struct globaldata *gd;
1144 struct lwkt_ipiq_latency_log *lat;
1147 * Get delta TSC (assume TSCs are synchronized) as quickly as
1148 * possible and then convert to nanoseconds.
1150 delta_tsc = rdtsc_ordered() - save_tsc;
1151 delta_tsc = delta_tsc * 1000000000LU / tsc_frequency;
1154 * Record in our save array.
1157 lat = &lwkt_ipiq_latency_logs[gd->gd_cpuid];
1158 lat->latency[lat->idx & LWKT_IPIQ_NLATENCY_MASK] = delta_tsc;
1163 * Send IPI from cpu0 to other cpus
1165 * NOTE: Machine must be idle for test to run dependably, and also probably
1166 * a good idea not to be running powerd.
1168 * NOTE: Caller should use 'usched :1 <command>' to lock itself to cpu 0.
1169 * See 'ipitest' script in /usr/src/test/sysperf/ipitest
1172 lwkt_ipiq_latency_test(SYSCTL_HANDLER_ARGS)
1174 struct globaldata *gd;
1175 int cpu = 0, orig_cpu, error;
1177 error = sysctl_handle_int(oidp, &cpu, arg2, req);
1178 if (error || req->newptr == NULL)
1183 else if (cpu >= ncpus || cpu < 0)
1189 gd = globaldata_find(cpu);
1191 save_tsc = rdtsc_ordered();
1192 lwkt_send_ipiq(gd, lwkt_ipiq_latency_testfunc, NULL);
1194 lwkt_migratecpu(orig_cpu);
1198 SYSCTL_NODE(_debug, OID_AUTO, ipiq, CTLFLAG_RW, 0, "");
1199 SYSCTL_PROC(_debug_ipiq, OID_AUTO, latency_test, CTLTYPE_INT | CTLFLAG_RW,
1200 NULL, 0, lwkt_ipiq_latency_test, "I",
1201 "ipi latency test, arg: remote cpuid");
1204 lwkt_ipiq_latency(SYSCTL_HANDLER_ARGS)
1206 struct lwkt_ipiq_latency_log *latency = arg1;
1207 uint64_t lat[LWKT_IPIQ_NLATENCY];
1210 for (i = 0; i < LWKT_IPIQ_NLATENCY; ++i)
1211 lat[i] = latency->latency[i];
1213 return sysctl_handle_opaque(oidp, lat, sizeof(lat), req);
1217 lwkt_ipiq_latency_init(void *dummy __unused)
1221 for (cpu = 0; cpu < ncpus; ++cpu) {
1224 ksnprintf(name, sizeof(name), "latency%d", cpu);
1225 SYSCTL_ADD_PROC(NULL, SYSCTL_STATIC_CHILDREN(_debug_ipiq),
1226 OID_AUTO, name, CTLTYPE_OPAQUE | CTLFLAG_RD,
1227 &lwkt_ipiq_latency_logs[cpu], 0, lwkt_ipiq_latency,
1228 "LU", "7 latest ipi latency measurement results");
1231 SYSINIT(lwkt_ipiq_latency, SI_SUB_CONFIGURE, SI_ORDER_ANY,
1232 lwkt_ipiq_latency_init, NULL);