vkernel - Fix a vkernel lockup on startup
[dragonfly.git] / sys / kern / lwkt_ipiq.c
CommitLineData
3b6b7bd1 1/*
ddec9f48 2 * Copyright (c) 2003-2016 The DragonFly Project. All rights reserved.
8c10bfcf
MD
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
3b6b7bd1
MD
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
8c10bfcf 10 *
3b6b7bd1
MD
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
8c10bfcf
MD
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
3b6b7bd1 32 * SUCH DAMAGE.
3b6b7bd1
MD
33 */
34
35/*
36 * This module implements IPI message queueing and the MI portion of IPI
37 * message processing.
38 */
39
e8f15168
MD
40#include "opt_ddb.h"
41
3b6b7bd1
MD
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/kernel.h>
45#include <sys/proc.h>
46#include <sys/rtprio.h>
47#include <sys/queue.h>
48#include <sys/thread2.h>
49#include <sys/sysctl.h>
ac72c7f4 50#include <sys/ktr.h>
3b6b7bd1
MD
51#include <sys/kthread.h>
52#include <machine/cpu.h>
53#include <sys/lock.h>
3b6b7bd1
MD
54
55#include <vm/vm.h>
56#include <vm/vm_param.h>
57#include <vm/vm_kern.h>
58#include <vm/vm_object.h>
59#include <vm/vm_page.h>
60#include <vm/vm_map.h>
61#include <vm/vm_pager.h>
62#include <vm/vm_extern.h>
63#include <vm/vm_zone.h>
64
65#include <machine/stdarg.h>
3b6b7bd1 66#include <machine/smp.h>
2c08e360 67#include <machine/clock.h>
3b6b7bd1
MD
68#include <machine/atomic.h>
69
8cee56f4
MD
70#ifdef _KERNEL_VIRTUAL
71#include <pthread.h>
72#endif
73
b6b1cc0f 74struct ipiq_stats {
e28c8ef4
SW
75 int64_t ipiq_count; /* total calls to lwkt_send_ipiq*() */
76 int64_t ipiq_fifofull; /* number of fifo full conditions detected */
77 int64_t ipiq_avoided; /* interlock with target avoids cpu ipi */
78 int64_t ipiq_passive; /* passive IPI messages */
79 int64_t ipiq_cscount; /* number of cpu synchronizations */
b6b1cc0f
SZ
80} __cachealign;
81
82static struct ipiq_stats ipiq_stats_percpu[MAXCPU];
c8cd4196 83#define ipiq_stat(gd) ipiq_stats_percpu[(gd)->gd_cpuid]
b6b1cc0f 84
d5b2d319 85static int ipiq_debug; /* set to 1 for debug */
e8f15168
MD
86#ifdef PANIC_DEBUG
87static int panic_ipiq_cpu = -1;
88static int panic_ipiq_count = 100;
89#endif
3b6b7bd1 90
d5b2d319
MD
91SYSCTL_INT(_lwkt, OID_AUTO, ipiq_debug, CTLFLAG_RW, &ipiq_debug, 0,
92 "");
e8f15168
MD
93#ifdef PANIC_DEBUG
94SYSCTL_INT(_lwkt, OID_AUTO, panic_ipiq_cpu, CTLFLAG_RW, &panic_ipiq_cpu, 0, "");
95SYSCTL_INT(_lwkt, OID_AUTO, panic_ipiq_count, CTLFLAG_RW, &panic_ipiq_count, 0, "");
96#endif
3b6b7bd1 97
a7adb95a 98#define IPIQ_STRING "func=%p arg1=%p arg2=%d scpu=%d dcpu=%d"
5bf48697 99#define IPIQ_ARGS void *func, void *arg1, int arg2, int scpu, int dcpu
ac72c7f4
MD
100
101#if !defined(KTR_IPIQ)
102#define KTR_IPIQ KTR_ALL
3b6b7bd1 103#endif
ac72c7f4 104KTR_INFO_MASTER(ipiq);
5bf48697
AE
105KTR_INFO(KTR_IPIQ, ipiq, send_norm, 0, IPIQ_STRING, IPIQ_ARGS);
106KTR_INFO(KTR_IPIQ, ipiq, send_pasv, 1, IPIQ_STRING, IPIQ_ARGS);
5bf48697
AE
107KTR_INFO(KTR_IPIQ, ipiq, receive, 4, IPIQ_STRING, IPIQ_ARGS);
108KTR_INFO(KTR_IPIQ, ipiq, sync_start, 5, "cpumask=%08lx", unsigned long mask);
109KTR_INFO(KTR_IPIQ, ipiq, sync_end, 6, "cpumask=%08lx", unsigned long mask);
110KTR_INFO(KTR_IPIQ, ipiq, cpu_send, 7, IPIQ_STRING, IPIQ_ARGS);
111KTR_INFO(KTR_IPIQ, ipiq, send_end, 8, IPIQ_STRING, IPIQ_ARGS);
3a24972f 112KTR_INFO(KTR_IPIQ, ipiq, sync_quick, 9, "cpumask=%08lx", unsigned long mask);
ac72c7f4 113
a7adb95a
SZ
114#define logipiq(name, func, arg1, arg2, sgd, dgd) \
115 KTR_LOG(ipiq_ ## name, func, arg1, arg2, sgd->gd_cpuid, dgd->gd_cpuid)
d7ed9e5e
MD
116#define logipiq2(name, arg) \
117 KTR_LOG(ipiq_ ## name, arg)
ac72c7f4 118
5d920ec6 119static void lwkt_process_ipiq_nested(void);
b8a98473 120static int lwkt_process_ipiq_core(globaldata_t sgd, lwkt_ipiq_t ip,
5d920ec6 121 struct intrframe *frame, int limit);
d5b2d319
MD
122static void lwkt_cpusync_remote1(lwkt_cpusync_t cs);
123static void lwkt_cpusync_remote2(lwkt_cpusync_t cs);
3b6b7bd1 124
5d920ec6
MD
125#define IPIQ_SYSCTL(name) \
126static int \
127sysctl_##name(SYSCTL_HANDLER_ARGS) \
128{ \
129 int64_t val = 0; \
130 int cpu, error; \
131 \
132 for (cpu = 0; cpu < ncpus; ++cpu) \
133 val += ipiq_stats_percpu[cpu].name; \
134 \
135 error = sysctl_handle_quad(oidp, &val, 0, req); \
136 if (error || req->newptr == NULL) \
137 return error; \
138 \
139 for (cpu = 0; cpu < ncpus; ++cpu) \
140 ipiq_stats_percpu[cpu].name = val; \
141 \
142 return 0; \
b6b1cc0f
SZ
143}
144
145IPIQ_SYSCTL(ipiq_count);
146IPIQ_SYSCTL(ipiq_fifofull);
147IPIQ_SYSCTL(ipiq_avoided);
148IPIQ_SYSCTL(ipiq_passive);
149IPIQ_SYSCTL(ipiq_cscount);
150
151SYSCTL_PROC(_lwkt, OID_AUTO, ipiq_count, (CTLTYPE_QUAD | CTLFLAG_RW),
152 0, 0, sysctl_ipiq_count, "Q", "Number of IPI's sent");
153SYSCTL_PROC(_lwkt, OID_AUTO, ipiq_fifofull, (CTLTYPE_QUAD | CTLFLAG_RW),
154 0, 0, sysctl_ipiq_fifofull, "Q",
155 "Number of fifo full conditions detected");
156SYSCTL_PROC(_lwkt, OID_AUTO, ipiq_avoided, (CTLTYPE_QUAD | CTLFLAG_RW),
157 0, 0, sysctl_ipiq_avoided, "Q",
158 "Number of IPI's avoided by interlock with target cpu");
159SYSCTL_PROC(_lwkt, OID_AUTO, ipiq_passive, (CTLTYPE_QUAD | CTLFLAG_RW),
160 0, 0, sysctl_ipiq_passive, "Q",
161 "Number of passive IPI messages sent");
162SYSCTL_PROC(_lwkt, OID_AUTO, ipiq_cscount, (CTLTYPE_QUAD | CTLFLAG_RW),
163 0, 0, sysctl_ipiq_cscount, "Q",
164 "Number of cpu synchronizations");
165
3b6b7bd1
MD
166/*
167 * Send a function execution request to another cpu. The request is queued
168 * on the cpu<->cpu ipiq matrix. Each cpu owns a unique ipiq FIFO for every
169 * possible target cpu. The FIFO can be written.
170 *
4c9f5a7f
MD
171 * If the FIFO fills up we have to enable interrupts to avoid an APIC
172 * deadlock and process pending IPIQs while waiting for it to empty.
173 * Otherwise we may soft-deadlock with another cpu whos FIFO is also full.
3b6b7bd1
MD
174 *
175 * We can safely bump gd_intr_nesting_level because our crit_exit() at the
176 * end will take care of any pending interrupts.
177 *
4c9f5a7f
MD
178 * The actual hardware IPI is avoided if the target cpu is already processing
179 * the queue from a prior IPI. It is possible to pipeline IPI messages
180 * very quickly between cpus due to the FIFO hysteresis.
181 *
182 * Need not be called from a critical section.
3b6b7bd1
MD
183 */
184int
b8a98473 185lwkt_send_ipiq3(globaldata_t target, ipifunc3_t func, void *arg1, int arg2)
3b6b7bd1
MD
186{
187 lwkt_ipiq_t ip;
188 int windex;
ddec9f48
MD
189 int level1;
190 int level2;
e47e3dba 191 long rflags;
3b6b7bd1
MD
192 struct globaldata *gd = mycpu;
193
a7adb95a 194 logipiq(send_norm, func, arg1, arg2, gd, target);
ac72c7f4 195
3b6b7bd1 196 if (target == gd) {
b8a98473 197 func(arg1, arg2, NULL);
c92e86f1 198 logipiq(send_end, func, arg1, arg2, gd, target);
3b6b7bd1 199 return(0);
4dd1b994 200 }
3b6b7bd1
MD
201 crit_enter();
202 ++gd->gd_intr_nesting_level;
203#ifdef INVARIANTS
204 if (gd->gd_intr_nesting_level > 20)
205 panic("lwkt_send_ipiq: TOO HEAVILY NESTED!");
206#endif
f9235b6d 207 KKASSERT(curthread->td_critcount);
c8cd4196 208 ++ipiq_stat(gd).ipiq_count;
3b6b7bd1
MD
209 ip = &gd->gd_ipiq[target->gd_cpuid];
210
211 /*
4c9f5a7f
MD
212 * Do not allow the FIFO to become full. Interrupts must be physically
213 * enabled while we liveloop to avoid deadlocking the APIC.
1b1e83e2 214 *
5d920ec6
MD
215 * When we are not nested inside a processing loop we allow the FIFO
216 * to get 1/2 full. Once it exceeds 1/2 full we must wait for it to
217 * drain, executing any incoming IPIs while we wait.
218 *
219 * When we are nested we allow the FIFO to get almost completely full.
220 * This allows us to queue IPIs sent from IPI callbacks. The processing
221 * code will only process incoming FIFOs that are trying to drain while
222 * we wait, and only to the only-slightly-less-full point, to avoid a
223 * deadlock.
224 *
225 * We are guaranteed
4c9f5a7f 226 */
5d920ec6
MD
227
228 if (gd->gd_processing_ipiq == 0) {
ddec9f48
MD
229 level1 = MAXCPUFIFO / 2;
230 level2 = MAXCPUFIFO / 4;
5d920ec6
MD
231 } else {
232 level1 = MAXCPUFIFO - 3;
233 level2 = MAXCPUFIFO - 5;
ddec9f48
MD
234 }
235
236 if (ip->ip_windex - ip->ip_rindex > level1) {
e32d3244
MD
237#ifndef _KERNEL_VIRTUAL
238 uint64_t tsc_base = rdtsc();
239#endif
240 int repeating = 0;
5d920ec6 241 int olimit;
4c9f5a7f 242
e47e3dba 243 rflags = read_rflags();
4c9f5a7f 244 cpu_enable_intr();
c8cd4196 245 ++ipiq_stat(gd).ipiq_fifofull;
cfaeae2a 246 DEBUG_PUSH_INFO("send_ipiq3");
5d920ec6 247 olimit = atomic_swap_int(&ip->ip_drain, level2);
ddec9f48 248 while (ip->ip_windex - ip->ip_rindex > level2) {
4c9f5a7f 249 KKASSERT(ip->ip_windex - ip->ip_rindex != MAXCPUFIFO - 1);
5d920ec6 250 lwkt_process_ipiq_nested();
da0b0e8b 251 cpu_pause();
e32d3244
MD
252
253 /*
254 * Check for target not draining issue. This should be fixed but
255 * leave the code in-place anyway as it can recover an otherwise
256 * dead system.
257 */
a86ce0cd
MD
258#ifdef _KERNEL_VIRTUAL
259 if (repeating++ > 10)
260 pthread_yield();
e32d3244
MD
261#else
262 if (rdtsc() - tsc_base > tsc_frequency) {
263 ++repeating;
264 if (repeating > 10) {
e32d3244
MD
265 kprintf("send_ipiq %d->%d tgt not draining (%d) sniff=%p,%p\n",
266 gd->gd_cpuid, target->gd_cpuid, repeating,
267 target->gd_sample_pc, target->gd_sample_sp);
e32d3244 268 smp_sniff();
e47e3dba
MD
269 ATOMIC_CPUMASK_ORBIT(target->gd_ipimask, gd->gd_cpuid);
270 cpu_send_ipiq(target->gd_cpuid);
271 } else {
e32d3244
MD
272 kprintf("send_ipiq %d->%d tgt not draining (%d)\n",
273 gd->gd_cpuid, target->gd_cpuid, repeating);
e47e3dba 274 smp_sniff();
e32d3244
MD
275 }
276 tsc_base = rdtsc();
277 }
a86ce0cd 278#endif
4c9f5a7f 279 }
5d920ec6 280 atomic_swap_int(&ip->ip_drain, olimit);
cfaeae2a 281 DEBUG_POP_INFO();
ac8ea0ad 282#if defined(__x86_64__)
46d4e165 283 write_rflags(rflags);
ac8ea0ad
SW
284#else
285#error "no write_*flags"
46d4e165 286#endif
4c9f5a7f
MD
287 }
288
289 /*
e47e3dba
MD
290 * Queue the new message and signal the target cpu. For now we need to
291 * physically disable interrupts because the target will not get signalled
292 * by other cpus once we set target->gd_npoll and we don't want to get
293 * interrupted.
294 *
295 * XXX not sure why this is a problem, the critical section should prevent
296 * any stalls (incoming interrupts except Xinvltlb and Xsnoop will
297 * just be made pending).
3b6b7bd1 298 */
e47e3dba 299 rflags = read_rflags();
4dd1b994 300#ifndef _KERNEL_VIRTUAL
e47e3dba 301 cpu_disable_intr();
4dd1b994 302#endif
e47e3dba 303
3b6b7bd1 304 windex = ip->ip_windex & MAXCPUFIFO_MASK;
b12defdc
MD
305 ip->ip_info[windex].func = func;
306 ip->ip_info[windex].arg1 = arg1;
307 ip->ip_info[windex].arg2 = arg2;
35238fa5 308 cpu_sfence();
3b6b7bd1 309 ++ip->ip_windex;
c07315c4 310 ATOMIC_CPUMASK_ORBIT(target->gd_ipimask, gd->gd_cpuid);
4c9f5a7f
MD
311
312 /*
313 * signal the target cpu that there is work pending.
314 */
e32d3244 315 if (atomic_swap_int(&target->gd_npoll, 1) == 0) {
866b61fb 316 logipiq(cpu_send, func, arg1, arg2, gd, target);
4c9f5a7f
MD
317 cpu_send_ipiq(target->gd_cpuid);
318 } else {
c8cd4196 319 ++ipiq_stat(gd).ipiq_avoided;
4c9f5a7f 320 }
e47e3dba
MD
321 write_rflags(rflags);
322
da0b0e8b 323 --gd->gd_intr_nesting_level;
4c9f5a7f 324 crit_exit();
c92e86f1 325 logipiq(send_end, func, arg1, arg2, gd, target);
da0b0e8b 326
4c9f5a7f
MD
327 return(ip->ip_windex);
328}
329
330/*
331 * Similar to lwkt_send_ipiq() but this function does not actually initiate
5d920ec6
MD
332 * the IPI to the target cpu unless the FIFO is greater than 1/4 full.
333 * This function is usually very fast.
4c9f5a7f
MD
334 *
335 * This function is used for non-critical IPI messages, such as memory
336 * deallocations. The queue will typically be flushed by the target cpu at
337 * the next clock interrupt.
338 *
339 * Need not be called from a critical section.
340 */
341int
b8a98473
MD
342lwkt_send_ipiq3_passive(globaldata_t target, ipifunc3_t func,
343 void *arg1, int arg2)
4c9f5a7f
MD
344{
345 lwkt_ipiq_t ip;
346 int windex;
347 struct globaldata *gd = mycpu;
348
349 KKASSERT(target != gd);
5d920ec6 350 crit_enter_gd(gd);
4c9f5a7f 351 ++gd->gd_intr_nesting_level;
4c9f5a7f
MD
352 ip = &gd->gd_ipiq[target->gd_cpuid];
353
354 /*
5d920ec6 355 * If the FIFO is too full send the IPI actively.
ddec9f48 356 *
5d920ec6
MD
357 * WARNING! This level must be low enough not to trigger a wait loop
358 * in the active sending code since we are not signalling the
359 * target cpu.
4c9f5a7f 360 */
5d920ec6
MD
361 if (ip->ip_windex - ip->ip_rindex >= MAXCPUFIFO / 4) {
362 --gd->gd_intr_nesting_level;
363 crit_exit_gd(gd);
364 return lwkt_send_ipiq3(target, func, arg1, arg2);
ddec9f48 365 }
4c9f5a7f 366
5d920ec6
MD
367 /*
368 * Else we can do it passively.
369 */
370 logipiq(send_pasv, func, arg1, arg2, gd, target);
371 ++ipiq_stat(gd).ipiq_count;
372 ++ipiq_stat(gd).ipiq_passive;
4c9f5a7f
MD
373
374 /*
375 * Queue the new message
376 */
377 windex = ip->ip_windex & MAXCPUFIFO_MASK;
b12defdc
MD
378 ip->ip_info[windex].func = func;
379 ip->ip_info[windex].arg1 = arg1;
380 ip->ip_info[windex].arg2 = arg2;
35238fa5 381 cpu_sfence();
4c9f5a7f 382 ++ip->ip_windex;
c07315c4 383 ATOMIC_CPUMASK_ORBIT(target->gd_ipimask, gd->gd_cpuid);
5d920ec6 384 --gd->gd_intr_nesting_level;
4c9f5a7f
MD
385
386 /*
5d920ec6
MD
387 * Do not signal the target cpu, it will pick up the IPI when it next
388 * polls (typically on the next tick).
4c9f5a7f 389 */
3b6b7bd1 390 crit_exit();
c92e86f1 391 logipiq(send_end, func, arg1, arg2, gd, target);
da0b0e8b 392
3b6b7bd1
MD
393 return(ip->ip_windex);
394}
395
396/*
397 * deprecated, used only by fast int forwarding.
398 */
399int
b8a98473 400lwkt_send_ipiq3_bycpu(int dcpu, ipifunc3_t func, void *arg1, int arg2)
3b6b7bd1 401{
b8a98473 402 return(lwkt_send_ipiq3(globaldata_find(dcpu), func, arg1, arg2));
3b6b7bd1
MD
403}
404
405/*
406 * Send a message to several target cpus. Typically used for scheduling.
407 * The message will not be sent to stopped cpus.
dbe02471
MD
408 *
409 * To prevent treating low-numbered cpus as favored sons, the IPIs are
410 * issued in order starting at mycpu upward, then from 0 through mycpu.
411 * This is particularly important to prevent random scheduler pickups
412 * from favoring cpu 0.
3b6b7bd1
MD
413 */
414int
da23a592 415lwkt_send_ipiq3_mask(cpumask_t mask, ipifunc3_t func, void *arg1, int arg2)
3b6b7bd1
MD
416{
417 int cpuid;
418 int count = 0;
dbe02471 419 cpumask_t amask;
3b6b7bd1 420
c07315c4 421 CPUMASK_NANDMASK(mask, stopped_cpus);
dbe02471
MD
422
423 /*
424 * All cpus in mask which are >= mycpu
425 */
426 CPUMASK_ASSBMASK(amask, mycpu->gd_cpuid);
427 CPUMASK_INVMASK(amask);
428 CPUMASK_ANDMASK(amask, mask);
429 while (CPUMASK_TESTNZERO(amask)) {
430 cpuid = BSFCPUMASK(amask);
431 lwkt_send_ipiq3(globaldata_find(cpuid), func, arg1, arg2);
432 CPUMASK_NANDBIT(amask, cpuid);
433 ++count;
434 }
435
436 /*
437 * All cpus in mask which are < mycpu
438 */
439 CPUMASK_ASSBMASK(amask, mycpu->gd_cpuid);
440 CPUMASK_ANDMASK(amask, mask);
441 while (CPUMASK_TESTNZERO(amask)) {
442 cpuid = BSFCPUMASK(amask);
b8a98473 443 lwkt_send_ipiq3(globaldata_find(cpuid), func, arg1, arg2);
dbe02471 444 CPUMASK_NANDBIT(amask, cpuid);
3b6b7bd1
MD
445 ++count;
446 }
447 return(count);
448}
449
450/*
451 * Wait for the remote cpu to finish processing a function.
452 *
453 * YYY we have to enable interrupts and process the IPIQ while waiting
454 * for it to empty or we may deadlock with another cpu. Create a CPU_*()
455 * function to do this! YYY we really should 'block' here.
456 *
457 * MUST be called from a critical section. This routine may be called
458 * from an interrupt (for example, if an interrupt wakes a foreign thread
459 * up).
460 */
461void
462lwkt_wait_ipiq(globaldata_t target, int seq)
463{
464 lwkt_ipiq_t ip;
3b6b7bd1
MD
465
466 if (target != mycpu) {
467 ip = &mycpu->gd_ipiq[target->gd_cpuid];
468 if ((int)(ip->ip_xindex - seq) < 0) {
ac8ea0ad 469#if defined(__x86_64__)
46d4e165 470 unsigned long rflags = read_rflags();
ac8ea0ad
SW
471#else
472#error "no read_*flags"
46d4e165 473#endif
3ec72a55
MD
474 int64_t time_tgt = tsc_get_target(1000000000LL);
475 int time_loops = 10;
476 int benice = 0;
a86ce0cd
MD
477#ifdef _KERNEL_VIRTUAL
478 int repeating = 0;
479#endif
3ec72a55 480
3b6b7bd1 481 cpu_enable_intr();
cfaeae2a 482 DEBUG_PUSH_INFO("wait_ipiq");
3b6b7bd1 483 while ((int)(ip->ip_xindex - seq) < 0) {
41a01a4d 484 crit_enter();
3b6b7bd1 485 lwkt_process_ipiq();
41a01a4d 486 crit_exit();
a86ce0cd
MD
487#ifdef _KERNEL_VIRTUAL
488 if (repeating++ > 10)
489 pthread_yield();
490#endif
3ec72a55
MD
491
492 /*
493 * IPIQs must be handled within 10 seconds and this code
494 * will warn after one second.
495 */
496 if ((benice & 255) == 0 && tsc_test_target(time_tgt) > 0) {
497 kprintf("LWKT_WAIT_IPIQ WARNING! %d wait %d (%d)\n",
498 mycpu->gd_cpuid, target->gd_cpuid,
499 ip->ip_xindex - seq);
500 if (--time_loops == 0)
501 panic("LWKT_WAIT_IPIQ");
502 time_tgt = tsc_get_target(1000000000LL);
503 }
504 ++benice;
505
35238fa5
MD
506 /*
507 * xindex may be modified by another cpu, use a load fence
508 * to ensure that the loop does not use a speculative value
509 * (which may improve performance).
510 */
06c66eb2 511 cpu_pause();
35238fa5 512 cpu_lfence();
3b6b7bd1 513 }
cfaeae2a 514 DEBUG_POP_INFO();
ac8ea0ad 515#if defined(__x86_64__)
46d4e165 516 write_rflags(rflags);
ac8ea0ad
SW
517#else
518#error "no write_*flags"
46d4e165 519#endif
3b6b7bd1
MD
520 }
521 }
522}
523
524/*
525 * Called from IPI interrupt (like a fast interrupt), which has placed
526 * us in a critical section. The MP lock may or may not be held.
527 * May also be called from doreti or splz, or be reentrantly called
b12defdc 528 * indirectly through the ip_info[].func we run.
3b6b7bd1
MD
529 *
530 * There are two versions, one where no interrupt frame is available (when
531 * called from the send code and from splz, and one where an interrupt
532 * frame is available.
d5b2d319
MD
533 *
534 * When the current cpu is mastering a cpusync we do NOT internally loop
535 * on the cpusyncq poll. We also do not re-flag a pending ipi due to
536 * the cpusyncq poll because this can cause doreti/splz to loop internally.
537 * The cpusync master's own loop must be allowed to run to avoid a deadlock.
3b6b7bd1
MD
538 */
539void
540lwkt_process_ipiq(void)
541{
542 globaldata_t gd = mycpu;
ac72c7f4 543 globaldata_t sgd;
3b6b7bd1 544 lwkt_ipiq_t ip;
b12defdc 545 cpumask_t mask;
3b6b7bd1
MD
546 int n;
547
5d920ec6 548 ++gd->gd_processing_ipiq;
3b6b7bd1 549again:
b12defdc 550 mask = gd->gd_ipimask;
e32d3244 551 cpu_ccfence();
c07315c4 552 while (CPUMASK_TESTNZERO(mask)) {
b12defdc 553 n = BSFCPUMASK(mask);
3b6b7bd1 554 if (n != gd->gd_cpuid) {
ac72c7f4
MD
555 sgd = globaldata_find(n);
556 ip = sgd->gd_ipiq;
3b6b7bd1 557 if (ip != NULL) {
5d920ec6
MD
558 ip += gd->gd_cpuid;
559 while (lwkt_process_ipiq_core(sgd, ip, NULL, 0))
3b6b7bd1 560 ;
5d920ec6
MD
561 ATOMIC_CPUMASK_NANDBIT(gd->gd_ipimask, n);
562 if (ip->ip_rindex != ip->ip_windex)
563 ATOMIC_CPUMASK_ORBIT(gd->gd_ipimask, n);
3b6b7bd1
MD
564 }
565 }
c07315c4 566 CPUMASK_NANDBIT(mask, n);
3b6b7bd1 567 }
c17a6852
MD
568
569 /*
570 * Process pending cpusyncs. If the current thread has a cpusync
571 * active cpusync we only run the list once and do not re-flag
572 * as the thread itself is processing its interlock.
573 */
5d920ec6 574 if (lwkt_process_ipiq_core(gd, &gd->gd_cpusyncq, NULL, 0)) {
da0b0e8b
MD
575 if (gd->gd_curthread->td_cscount == 0)
576 goto again;
577 /* need_ipiq(); do not reflag */
3b6b7bd1 578 }
b12defdc
MD
579
580 /*
e47e3dba 581 * Interlock to allow more IPI interrupts.
b12defdc 582 */
da0b0e8b 583 --gd->gd_processing_ipiq;
3b6b7bd1
MD
584}
585
3b6b7bd1 586void
c7eb0589 587lwkt_process_ipiq_frame(struct intrframe *frame)
3b6b7bd1
MD
588{
589 globaldata_t gd = mycpu;
ac72c7f4 590 globaldata_t sgd;
3b6b7bd1 591 lwkt_ipiq_t ip;
b12defdc 592 cpumask_t mask;
3b6b7bd1
MD
593 int n;
594
5d920ec6 595 ++gd->gd_processing_ipiq;
3b6b7bd1 596again:
b12defdc 597 mask = gd->gd_ipimask;
e32d3244 598 cpu_ccfence();
c07315c4 599 while (CPUMASK_TESTNZERO(mask)) {
b12defdc 600 n = BSFCPUMASK(mask);
3b6b7bd1 601 if (n != gd->gd_cpuid) {
ac72c7f4
MD
602 sgd = globaldata_find(n);
603 ip = sgd->gd_ipiq;
3b6b7bd1 604 if (ip != NULL) {
5d920ec6
MD
605 ip += gd->gd_cpuid;
606 while (lwkt_process_ipiq_core(sgd, ip, frame, 0))
3b6b7bd1 607 ;
5d920ec6
MD
608 ATOMIC_CPUMASK_NANDBIT(gd->gd_ipimask, n);
609 if (ip->ip_rindex != ip->ip_windex)
610 ATOMIC_CPUMASK_ORBIT(gd->gd_ipimask, n);
3b6b7bd1
MD
611 }
612 }
c07315c4 613 CPUMASK_NANDBIT(mask, n);
3b6b7bd1
MD
614 }
615 if (gd->gd_cpusyncq.ip_rindex != gd->gd_cpusyncq.ip_windex) {
5d920ec6 616 if (lwkt_process_ipiq_core(gd, &gd->gd_cpusyncq, frame, 0)) {
0f7a3396
MD
617 if (gd->gd_curthread->td_cscount == 0)
618 goto again;
da0b0e8b 619 /* need_ipiq(); do not reflag */
0f7a3396 620 }
3b6b7bd1 621 }
e32d3244 622 --gd->gd_processing_ipiq;
3b6b7bd1 623}
3b6b7bd1 624
5d920ec6
MD
625/*
626 * Only process incoming IPIQs from draining senders and only process them
627 * to the point where the draining sender is able to continue. This is
628 * necessary to avoid deadlocking the IPI subsystem because we are acting on
629 * incoming messages and the callback may queue additional messages.
630 *
631 * We only want to have to act on senders that are blocked to limit the
632 * number of additional messages sent. At the same time, recipients are
633 * trying to drain our own queue. Theoretically this create a pipeline that
634 * cannot deadlock.
635 */
636static void
637lwkt_process_ipiq_nested(void)
638{
639 globaldata_t gd = mycpu;
640 globaldata_t sgd;
641 lwkt_ipiq_t ip;
642 cpumask_t mask;
643 int n;
644 int limit;
cfaeae2a 645
5d920ec6
MD
646 ++gd->gd_processing_ipiq;
647again:
648 mask = gd->gd_ipimask;
649 cpu_ccfence();
650 while (CPUMASK_TESTNZERO(mask)) {
651 n = BSFCPUMASK(mask);
652 if (n != gd->gd_cpuid) {
653 sgd = globaldata_find(n);
654 ip = sgd->gd_ipiq;
655
656 /*
657 * NOTE: We do not mess with the cpumask at all, instead we allow
658 * the top-level ipiq processor deal with it.
659 */
660 if (ip != NULL) {
661 ip += gd->gd_cpuid;
662 if ((limit = ip->ip_drain) != 0) {
663 lwkt_process_ipiq_core(sgd, ip, NULL, limit);
e47e3dba 664 /* no gd_ipimask when doing limited processing */
5d920ec6
MD
665 }
666 }
667 }
668 CPUMASK_NANDBIT(mask, n);
669 }
670
671 /*
672 * Process pending cpusyncs. If the current thread has a cpusync
673 * active cpusync we only run the list once and do not re-flag
674 * as the thread itself is processing its interlock.
675 */
676 if (lwkt_process_ipiq_core(gd, &gd->gd_cpusyncq, NULL, 0)) {
677 if (gd->gd_curthread->td_cscount == 0)
678 goto again;
679 /* need_ipiq(); do not reflag */
680 }
681 --gd->gd_processing_ipiq;
682}
683
684/*
e47e3dba
MD
685 * Process incoming IPI requests until only <limit> are left (0 to exhaust
686 * all incoming IPI requests).
5d920ec6 687 */
3b6b7bd1 688static int
b8a98473 689lwkt_process_ipiq_core(globaldata_t sgd, lwkt_ipiq_t ip,
5d920ec6 690 struct intrframe *frame, int limit)
3b6b7bd1 691{
2de4f77e 692 globaldata_t mygd = mycpu;
3b6b7bd1 693 int ri;
35238fa5 694 int wi;
b8a98473
MD
695 ipifunc3_t copy_func;
696 void *copy_arg1;
697 int copy_arg2;
35238fa5
MD
698
699 /*
b12defdc
MD
700 * Clear the originating core from our ipimask, we will process all
701 * incoming messages.
702 *
35238fa5
MD
703 * Obtain the current write index, which is modified by a remote cpu.
704 * Issue a load fence to prevent speculative reads of e.g. data written
705 * by the other cpu prior to it updating the index.
706 */
f9235b6d 707 KKASSERT(curthread->td_critcount);
35238fa5
MD
708 wi = ip->ip_windex;
709 cpu_lfence();
2de4f77e 710 ++mygd->gd_intr_nesting_level;
35238fa5 711
3b6b7bd1 712 /*
562273ea
MD
713 * NOTE: xindex is only updated after we are sure the function has
714 * finished execution. Beware lwkt_process_ipiq() reentrancy!
715 * The function may send an IPI which may block/drain.
d64a7617 716 *
562273ea
MD
717 * NOTE: Due to additional IPI operations that the callback function
718 * may make, it is possible for both rindex and windex to advance and
719 * thus for rindex to advance passed our cached windex.
720 *
d5b2d319 721 * NOTE: A load fence is required to prevent speculative loads prior
562273ea 722 * to the loading of ip_rindex. Even though stores might be
d5b2d319
MD
723 * ordered, loads are probably not. A memory fence is required
724 * to prevent reordering of the loads after the ip_rindex update.
c17a6852
MD
725 *
726 * NOTE: Single pass only. Returns non-zero if the queue is not empty
727 * on return.
3b6b7bd1 728 */
5d920ec6 729 while (wi - (ri = ip->ip_rindex) > limit) {
3b6b7bd1 730 ri &= MAXCPUFIFO_MASK;
d5b2d319 731 cpu_lfence();
b12defdc
MD
732 copy_func = ip->ip_info[ri].func;
733 copy_arg1 = ip->ip_info[ri].arg1;
734 copy_arg2 = ip->ip_info[ri].arg2;
d5b2d319 735 cpu_mfence();
728f6208 736 ++ip->ip_rindex;
562273ea
MD
737 KKASSERT((ip->ip_rindex & MAXCPUFIFO_MASK) ==
738 ((ri + 1) & MAXCPUFIFO_MASK));
a7adb95a 739 logipiq(receive, copy_func, copy_arg1, copy_arg2, sgd, mycpu);
d5b2d319
MD
740#ifdef INVARIANTS
741 if (ipiq_debug && (ip->ip_rindex & 0xFFFFFF) == 0) {
742 kprintf("cpu %d ipifunc %p %p %d (frame %p)\n",
743 mycpu->gd_cpuid,
744 copy_func, copy_arg1, copy_arg2,
ac8ea0ad 745#if defined(__x86_64__)
d5b2d319
MD
746 (frame ? (void *)frame->if_rip : NULL));
747#else
748 NULL);
749#endif
750 }
751#endif
b8a98473 752 copy_func(copy_arg1, copy_arg2, frame);
35238fa5 753 cpu_sfence();
3b6b7bd1 754 ip->ip_xindex = ip->ip_rindex;
e8f15168
MD
755
756#ifdef PANIC_DEBUG
757 /*
758 * Simulate panics during the processing of an IPI
759 */
760 if (mycpu->gd_cpuid == panic_ipiq_cpu && panic_ipiq_count) {
761 if (--panic_ipiq_count == 0) {
762#ifdef DDB
763 Debugger("PANIC_DEBUG");
764#else
765 panic("PANIC_DEBUG");
766#endif
767 }
768 }
769#endif
3b6b7bd1 770 }
2de4f77e 771 --mygd->gd_intr_nesting_level;
4c9f5a7f
MD
772
773 /*
5d920ec6
MD
774 * Return non-zero if there is still more in the queue. Don't worry
775 * about fencing, we will get another interrupt if necessary.
4c9f5a7f 776 */
da0b0e8b 777 return (ip->ip_rindex != ip->ip_windex);
3b6b7bd1
MD
778}
779
6c92c1f2
SZ
780static void
781lwkt_sync_ipiq(void *arg)
782{
5a1a2253 783 volatile cpumask_t *cpumask = arg;
6c92c1f2 784
c07315c4
MD
785 ATOMIC_CPUMASK_NANDBIT(*cpumask, mycpu->gd_cpuid);
786 if (CPUMASK_TESTZERO(*cpumask))
6c92c1f2
SZ
787 wakeup(cpumask);
788}
789
790void
791lwkt_synchronize_ipiqs(const char *wmesg)
792{
5a1a2253 793 volatile cpumask_t other_cpumask;
6c92c1f2 794
c07315c4
MD
795 other_cpumask = smp_active_mask;
796 CPUMASK_ANDMASK(other_cpumask, mycpu->gd_other_cpus);
5a1a2253 797 lwkt_send_ipiq_mask(other_cpumask, lwkt_sync_ipiq,
c07315c4 798 __DEVOLATILE(void *, &other_cpumask));
6c92c1f2 799
c07315c4 800 while (CPUMASK_TESTNZERO(other_cpumask)) {
ae8e83e6 801 tsleep_interlock(&other_cpumask, 0);
c07315c4 802 if (CPUMASK_TESTNZERO(other_cpumask))
d9345d3a 803 tsleep(&other_cpumask, PINTERLOCKED, wmesg, 0);
6c92c1f2 804 }
6c92c1f2
SZ
805}
806
3b6b7bd1
MD
807/*
808 * CPU Synchronization Support
5c71a36a 809 *
d5b2d319
MD
810 * lwkt_cpusync_interlock() - Place specified cpus in a quiescent state.
811 * The current cpu is placed in a hard critical
812 * section.
5c71a36a 813 *
d5b2d319
MD
814 * lwkt_cpusync_deinterlock() - Execute cs_func on specified cpus, including
815 * current cpu if specified, then return.
3b6b7bd1 816 */
3b6b7bd1 817void
d5b2d319 818lwkt_cpusync_simple(cpumask_t mask, cpusync_func_t func, void *arg)
5c71a36a 819{
d5b2d319 820 struct lwkt_cpusync cs;
5c71a36a 821
d5b2d319
MD
822 lwkt_cpusync_init(&cs, mask, func, arg);
823 lwkt_cpusync_interlock(&cs);
824 lwkt_cpusync_deinterlock(&cs);
3b6b7bd1
MD
825}
826
d5b2d319 827
5c71a36a 828void
d5b2d319 829lwkt_cpusync_interlock(lwkt_cpusync_t cs)
3b6b7bd1 830{
0f7a3396 831 globaldata_t gd = mycpu;
d5b2d319 832 cpumask_t mask;
0f7a3396 833
d5b2d319
MD
834 /*
835 * mask acknowledge (cs_mack): 0->mask for stage 1
836 *
837 * mack does not include the current cpu.
838 */
c07315c4
MD
839 mask = cs->cs_mask;
840 CPUMASK_ANDMASK(mask, gd->gd_other_cpus);
841 CPUMASK_ANDMASK(mask, smp_active_mask);
842 CPUMASK_ASSZERO(cs->cs_mack);
843
d5b2d319 844 crit_enter_id("cpusync");
c07315c4 845 if (CPUMASK_TESTNZERO(mask)) {
cfaeae2a 846 DEBUG_PUSH_INFO("cpusync_interlock");
c8cd4196 847 ++ipiq_stat(gd).ipiq_cscount;
0f7a3396 848 ++gd->gd_curthread->td_cscount;
d5b2d319 849 lwkt_send_ipiq_mask(mask, (ipifunc1_t)lwkt_cpusync_remote1, cs);
c07315c4 850 logipiq2(sync_start, (long)CPUMASK_LOWMASK(mask));
c07315c4 851 while (CPUMASK_CMPMASKNEQ(cs->cs_mack, mask)) {
0f7a3396 852 lwkt_process_ipiq();
d5b2d319 853 cpu_pause();
8cee56f4
MD
854#ifdef _KERNEL_VIRTUAL
855 pthread_yield();
856#endif
0f7a3396 857 }
cfaeae2a 858 DEBUG_POP_INFO();
3b6b7bd1 859 }
3b6b7bd1
MD
860}
861
862/*
d5b2d319
MD
863 * Interlocked cpus have executed remote1 and are polling in remote2.
864 * To deinterlock we clear cs_mack and wait for the cpus to execute
865 * the func and set their bit in cs_mack again.
0f7a3396 866 *
3b6b7bd1
MD
867 */
868void
d5b2d319 869lwkt_cpusync_deinterlock(lwkt_cpusync_t cs)
3b6b7bd1 870{
0f7a3396 871 globaldata_t gd = mycpu;
d5b2d319
MD
872 cpumask_t mask;
873
874 /*
875 * mask acknowledge (cs_mack): mack->0->mack for stage 2
876 *
877 * Clearing cpu bits for polling cpus in cs_mack will cause them to
878 * execute stage 2, which executes the cs_func(cs_data) and then sets
879 * their bit in cs_mack again.
880 *
881 * mack does not include the current cpu.
882 */
883 mask = cs->cs_mack;
884 cpu_ccfence();
c07315c4 885 CPUMASK_ASSZERO(cs->cs_mack);
cb31dff3 886 cpu_ccfence();
c07315c4 887 if (cs->cs_func && CPUMASK_TESTBIT(cs->cs_mask, gd->gd_cpuid))
d5b2d319 888 cs->cs_func(cs->cs_data);
c07315c4 889 if (CPUMASK_TESTNZERO(mask)) {
cfaeae2a 890 DEBUG_PUSH_INFO("cpusync_deinterlock");
c07315c4 891 while (CPUMASK_CMPMASKNEQ(cs->cs_mack, mask)) {
0f7a3396 892 lwkt_process_ipiq();
d5b2d319 893 cpu_pause();
8cee56f4
MD
894#ifdef _KERNEL_VIRTUAL
895 pthread_yield();
896#endif
0f7a3396 897 }
cfaeae2a
MD
898 DEBUG_POP_INFO();
899 /*
900 * cpusyncq ipis may be left queued without the RQF flag set due to
901 * a non-zero td_cscount, so be sure to process any laggards after
902 * decrementing td_cscount.
903 */
0f7a3396 904 --gd->gd_curthread->td_cscount;
d5b2d319 905 lwkt_process_ipiq();
c07315c4 906 logipiq2(sync_end, (long)CPUMASK_LOWMASK(mask));
3b6b7bd1 907 }
d5b2d319 908 crit_exit_id("cpusync");
3b6b7bd1
MD
909}
910
3a24972f
MD
911/*
912 * The quick version does not quiesce the target cpu(s) but instead executes
913 * the function on the target cpu(s) and waits for all to acknowledge. This
914 * avoids spinning on the target cpus.
915 *
916 * This function is typically only used for kernel_pmap updates. User pmaps
917 * have to be quiesced.
918 */
919void
920lwkt_cpusync_quick(lwkt_cpusync_t cs)
921{
922 globaldata_t gd = mycpu;
923 cpumask_t mask;
924
925 /*
926 * stage-2 cs_mack only.
927 */
928 mask = cs->cs_mask;
929 CPUMASK_ANDMASK(mask, gd->gd_other_cpus);
930 CPUMASK_ANDMASK(mask, smp_active_mask);
931 CPUMASK_ASSZERO(cs->cs_mack);
932
933 crit_enter_id("cpusync");
934 if (CPUMASK_TESTNZERO(mask)) {
935 DEBUG_PUSH_INFO("cpusync_interlock");
936 ++ipiq_stat(gd).ipiq_cscount;
937 ++gd->gd_curthread->td_cscount;
938 lwkt_send_ipiq_mask(mask, (ipifunc1_t)lwkt_cpusync_remote2, cs);
939 logipiq2(sync_quick, (long)CPUMASK_LOWMASK(mask));
940 while (CPUMASK_CMPMASKNEQ(cs->cs_mack, mask)) {
941 lwkt_process_ipiq();
942 cpu_pause();
943#ifdef _KERNEL_VIRTUAL
944 pthread_yield();
945#endif
946 }
947
948 /*
949 * cpusyncq ipis may be left queued without the RQF flag set due to
950 * a non-zero td_cscount, so be sure to process any laggards after
951 * decrementing td_cscount.
952 */
953 DEBUG_POP_INFO();
954 --gd->gd_curthread->td_cscount;
955 lwkt_process_ipiq();
956 }
957 if (cs->cs_func && CPUMASK_TESTBIT(cs->cs_mask, gd->gd_cpuid))
958 cs->cs_func(cs->cs_data);
959 crit_exit_id("cpusync");
960}
961
3b6b7bd1
MD
962/*
963 * helper IPI remote messaging function.
964 *
965 * Called on remote cpu when a new cpu synchronization request has been
966 * sent to us. Execute the run function and adjust cs_count, then requeue
967 * the request so we spin on it.
968 */
969static void
d5b2d319 970lwkt_cpusync_remote1(lwkt_cpusync_t cs)
3b6b7bd1 971{
d5b2d319
MD
972 globaldata_t gd = mycpu;
973
c07315c4 974 ATOMIC_CPUMASK_ORBIT(cs->cs_mack, gd->gd_cpuid);
d5b2d319 975 lwkt_cpusync_remote2(cs);
3b6b7bd1
MD
976}
977
978/*
979 * helper IPI remote messaging function.
980 *
981 * Poll for the originator telling us to finish. If it hasn't, requeue
d5b2d319 982 * our request so we spin on it.
3b6b7bd1
MD
983 */
984static void
d5b2d319 985lwkt_cpusync_remote2(lwkt_cpusync_t cs)
3b6b7bd1 986{
d5b2d319
MD
987 globaldata_t gd = mycpu;
988
c07315c4 989 if (CPUMASK_TESTMASK(cs->cs_mack, gd->gd_cpumask) == 0) {
d5b2d319
MD
990 if (cs->cs_func)
991 cs->cs_func(cs->cs_data);
c07315c4 992 ATOMIC_CPUMASK_ORBIT(cs->cs_mack, gd->gd_cpuid);
93ad6da2 993 /* cs can be ripped out at this point */
3b6b7bd1 994 } else {
3b6b7bd1
MD
995 lwkt_ipiq_t ip;
996 int wi;
997
06c66eb2 998 cpu_pause();
8cee56f4
MD
999#ifdef _KERNEL_VIRTUAL
1000 pthread_yield();
1001#endif
06c66eb2
MD
1002 cpu_lfence();
1003
1004 /*
1005 * Requeue our IPI to avoid a deep stack recursion. If no other
1006 * IPIs are pending we can just loop up, which should help VMs
1007 * better-detect spin loops.
1008 */
3b6b7bd1 1009 ip = &gd->gd_cpusyncq;
06c66eb2 1010
3b6b7bd1 1011 wi = ip->ip_windex & MAXCPUFIFO_MASK;
b12defdc
MD
1012 ip->ip_info[wi].func = (ipifunc3_t)(ipifunc1_t)lwkt_cpusync_remote2;
1013 ip->ip_info[wi].arg1 = cs;
1014 ip->ip_info[wi].arg2 = 0;
35238fa5 1015 cpu_sfence();
c17a6852 1016 KKASSERT(ip->ip_windex - ip->ip_rindex < MAXCPUFIFO);
3b6b7bd1 1017 ++ip->ip_windex;
37494a7a 1018 if (ipiq_debug && (ip->ip_windex & 0xFFFFFF) == 0) {
cfaeae2a
MD
1019 kprintf("cpu %d cm=%016jx %016jx f=%p\n",
1020 gd->gd_cpuid,
c07315c4
MD
1021 (intmax_t)CPUMASK_LOWMASK(cs->cs_mask),
1022 (intmax_t)CPUMASK_LOWMASK(cs->cs_mack),
cfaeae2a 1023 cs->cs_func);
37494a7a 1024 }
3b6b7bd1
MD
1025 }
1026}
ceb6fcad 1027
2c08e360
MD
1028#define LWKT_IPIQ_NLATENCY 8
1029#define LWKT_IPIQ_NLATENCY_MASK (LWKT_IPIQ_NLATENCY - 1)
ceb6fcad
SZ
1030
1031struct lwkt_ipiq_latency_log {
2c08e360 1032 int idx; /* unmasked index */
ceb6fcad
SZ
1033 int pad;
1034 uint64_t latency[LWKT_IPIQ_NLATENCY];
1035};
1036
1037static struct lwkt_ipiq_latency_log lwkt_ipiq_latency_logs[MAXCPU];
2c08e360 1038static uint64_t save_tsc;
ceb6fcad 1039
2c08e360
MD
1040/*
1041 * IPI callback (already in a critical section)
1042 */
ceb6fcad 1043static void
2c08e360 1044lwkt_ipiq_latency_testfunc(void *arg __unused)
ceb6fcad 1045{
2c08e360
MD
1046 uint64_t delta_tsc;
1047 struct globaldata *gd;
ceb6fcad
SZ
1048 struct lwkt_ipiq_latency_log *lat;
1049
2c08e360
MD
1050 /*
1051 * Get delta TSC (assume TSCs are synchronized) as quickly as
1052 * possible and then convert to nanoseconds.
1053 */
1054 delta_tsc = rdtsc_ordered() - save_tsc;
1055 delta_tsc = delta_tsc * 1000000000LU / tsc_frequency;
ceb6fcad 1056
2c08e360
MD
1057 /*
1058 * Record in our save array.
1059 */
1060 gd = mycpu;
1061 lat = &lwkt_ipiq_latency_logs[gd->gd_cpuid];
1062 lat->latency[lat->idx & LWKT_IPIQ_NLATENCY_MASK] = delta_tsc;
1063 ++lat->idx;
ceb6fcad
SZ
1064}
1065
1066/*
1067 * Send IPI from cpu0 to other cpus
2c08e360
MD
1068 *
1069 * NOTE: Machine must be idle for test to run dependably, and also probably
1070 * a good idea not to be running powerd.
1071 *
1072 * NOTE: Caller should use 'usched :1 <command>' to lock itself to cpu 0.
1073 * See 'ipitest' script in /usr/src/test/sysperf/ipitest
ceb6fcad
SZ
1074 */
1075static int
1076lwkt_ipiq_latency_test(SYSCTL_HANDLER_ARGS)
1077{
1078 struct globaldata *gd;
1079 int cpu = 0, orig_cpu, error;
ceb6fcad
SZ
1080
1081 error = sysctl_handle_int(oidp, &cpu, arg2, req);
1082 if (error || req->newptr == NULL)
1083 return error;
1084
1085 if (cpu == 0)
1086 return 0;
1087 else if (cpu >= ncpus || cpu < 0)
1088 return EINVAL;
1089
1090 orig_cpu = mycpuid;
1091 lwkt_migratecpu(0);
1092
1093 gd = globaldata_find(cpu);
1094
2c08e360
MD
1095 save_tsc = rdtsc_ordered();
1096 lwkt_send_ipiq(gd, lwkt_ipiq_latency_testfunc, NULL);
ceb6fcad
SZ
1097
1098 lwkt_migratecpu(orig_cpu);
1099 return 0;
1100}
1101
1102SYSCTL_NODE(_debug, OID_AUTO, ipiq, CTLFLAG_RW, 0, "");
1103SYSCTL_PROC(_debug_ipiq, OID_AUTO, latency_test, CTLTYPE_INT | CTLFLAG_RW,
82ce5490
SZ
1104 NULL, 0, lwkt_ipiq_latency_test, "I",
1105 "ipi latency test, arg: remote cpuid");
ceb6fcad
SZ
1106
1107static int
1108lwkt_ipiq_latency(SYSCTL_HANDLER_ARGS)
1109{
1110 struct lwkt_ipiq_latency_log *latency = arg1;
1111 uint64_t lat[LWKT_IPIQ_NLATENCY];
1112 int i;
1113
1114 for (i = 0; i < LWKT_IPIQ_NLATENCY; ++i)
1115 lat[i] = latency->latency[i];
1116
1117 return sysctl_handle_opaque(oidp, lat, sizeof(lat), req);
1118}
1119
1120static void
1121lwkt_ipiq_latency_init(void *dummy __unused)
1122{
1123 int cpu;
1124
1125 for (cpu = 0; cpu < ncpus; ++cpu) {
1126 char name[32];
1127
1128 ksnprintf(name, sizeof(name), "latency%d", cpu);
1129 SYSCTL_ADD_PROC(NULL, SYSCTL_STATIC_CHILDREN(_debug_ipiq),
1130 OID_AUTO, name, CTLTYPE_OPAQUE | CTLFLAG_RD,
1131 &lwkt_ipiq_latency_logs[cpu], 0, lwkt_ipiq_latency,
82ce5490 1132 "LU", "7 latest ipi latency measurement results");
ceb6fcad
SZ
1133 }
1134}
1135SYSINIT(lwkt_ipiq_latency, SI_SUB_CONFIGURE, SI_ORDER_ANY,
1136 lwkt_ipiq_latency_init, NULL);