kernel - Fix badfo_kqfilter for badfileops
[dragonfly.git] / sys / kern / lwkt_ipiq.c
CommitLineData
3b6b7bd1 1/*
8c10bfcf
MD
2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
3b6b7bd1
MD
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
8c10bfcf 10 *
3b6b7bd1
MD
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
8c10bfcf
MD
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
3b6b7bd1 32 * SUCH DAMAGE.
8c10bfcf 33 *
546f2c66 34 * $DragonFly: src/sys/kern/lwkt_ipiq.c,v 1.27 2008/05/18 20:57:56 nth Exp $
3b6b7bd1
MD
35 */
36
37/*
38 * This module implements IPI message queueing and the MI portion of IPI
39 * message processing.
40 */
41
e8f15168
MD
42#include "opt_ddb.h"
43
3b6b7bd1
MD
44#include <sys/param.h>
45#include <sys/systm.h>
46#include <sys/kernel.h>
47#include <sys/proc.h>
48#include <sys/rtprio.h>
49#include <sys/queue.h>
50#include <sys/thread2.h>
51#include <sys/sysctl.h>
ac72c7f4 52#include <sys/ktr.h>
3b6b7bd1
MD
53#include <sys/kthread.h>
54#include <machine/cpu.h>
55#include <sys/lock.h>
56#include <sys/caps.h>
57
58#include <vm/vm.h>
59#include <vm/vm_param.h>
60#include <vm/vm_kern.h>
61#include <vm/vm_object.h>
62#include <vm/vm_page.h>
63#include <vm/vm_map.h>
64#include <vm/vm_pager.h>
65#include <vm/vm_extern.h>
66#include <vm/vm_zone.h>
67
68#include <machine/stdarg.h>
3b6b7bd1
MD
69#include <machine/smp.h>
70#include <machine/atomic.h>
71
3b6b7bd1 72#ifdef SMP
4c9f5a7f
MD
73static __int64_t ipiq_count; /* total calls to lwkt_send_ipiq*() */
74static __int64_t ipiq_fifofull; /* number of fifo full conditions detected */
75static __int64_t ipiq_avoided; /* interlock with target avoids cpu ipi */
76static __int64_t ipiq_passive; /* passive IPI messages */
77static __int64_t ipiq_cscount; /* number of cpu synchronizations */
78static int ipiq_optimized = 1; /* XXX temporary sysctl */
e8f15168
MD
79#ifdef PANIC_DEBUG
80static int panic_ipiq_cpu = -1;
81static int panic_ipiq_count = 100;
82#endif
3b6b7bd1
MD
83#endif
84
3b6b7bd1
MD
85#ifdef SMP
86SYSCTL_QUAD(_lwkt, OID_AUTO, ipiq_count, CTLFLAG_RW, &ipiq_count, 0, "");
87SYSCTL_QUAD(_lwkt, OID_AUTO, ipiq_fifofull, CTLFLAG_RW, &ipiq_fifofull, 0, "");
4c9f5a7f
MD
88SYSCTL_QUAD(_lwkt, OID_AUTO, ipiq_avoided, CTLFLAG_RW, &ipiq_avoided, 0, "");
89SYSCTL_QUAD(_lwkt, OID_AUTO, ipiq_passive, CTLFLAG_RW, &ipiq_passive, 0, "");
0f7a3396 90SYSCTL_QUAD(_lwkt, OID_AUTO, ipiq_cscount, CTLFLAG_RW, &ipiq_cscount, 0, "");
4c9f5a7f 91SYSCTL_INT(_lwkt, OID_AUTO, ipiq_optimized, CTLFLAG_RW, &ipiq_optimized, 0, "");
e8f15168
MD
92#ifdef PANIC_DEBUG
93SYSCTL_INT(_lwkt, OID_AUTO, panic_ipiq_cpu, CTLFLAG_RW, &panic_ipiq_cpu, 0, "");
94SYSCTL_INT(_lwkt, OID_AUTO, panic_ipiq_count, CTLFLAG_RW, &panic_ipiq_count, 0, "");
95#endif
3b6b7bd1 96
a7adb95a 97#define IPIQ_STRING "func=%p arg1=%p arg2=%d scpu=%d dcpu=%d"
5118bbc4 98#define IPIQ_ARG_SIZE (sizeof(void *) * 2 + sizeof(int) * 3)
ac72c7f4
MD
99
100#if !defined(KTR_IPIQ)
101#define KTR_IPIQ KTR_ALL
3b6b7bd1 102#endif
ac72c7f4
MD
103KTR_INFO_MASTER(ipiq);
104KTR_INFO(KTR_IPIQ, ipiq, send_norm, 0, IPIQ_STRING, IPIQ_ARG_SIZE);
105KTR_INFO(KTR_IPIQ, ipiq, send_pasv, 1, IPIQ_STRING, IPIQ_ARG_SIZE);
106KTR_INFO(KTR_IPIQ, ipiq, send_nbio, 2, IPIQ_STRING, IPIQ_ARG_SIZE);
107KTR_INFO(KTR_IPIQ, ipiq, send_fail, 3, IPIQ_STRING, IPIQ_ARG_SIZE);
108KTR_INFO(KTR_IPIQ, ipiq, receive, 4, IPIQ_STRING, IPIQ_ARG_SIZE);
d7ed9e5e
MD
109KTR_INFO(KTR_IPIQ, ipiq, sync_start, 5, "cpumask=%08x", sizeof(cpumask_t));
110KTR_INFO(KTR_IPIQ, ipiq, sync_add, 6, "cpumask=%08x", sizeof(cpumask_t));
866b61fb 111KTR_INFO(KTR_IPIQ, ipiq, cpu_send, 7, IPIQ_STRING, IPIQ_ARG_SIZE);
c92e86f1 112KTR_INFO(KTR_IPIQ, ipiq, send_end, 8, IPIQ_STRING, IPIQ_ARG_SIZE);
ac72c7f4 113
a7adb95a
SZ
114#define logipiq(name, func, arg1, arg2, sgd, dgd) \
115 KTR_LOG(ipiq_ ## name, func, arg1, arg2, sgd->gd_cpuid, dgd->gd_cpuid)
d7ed9e5e
MD
116#define logipiq2(name, arg) \
117 KTR_LOG(ipiq_ ## name, arg)
ac72c7f4
MD
118
119#endif /* SMP */
3b6b7bd1
MD
120
121#ifdef SMP
122
b8a98473
MD
123static int lwkt_process_ipiq_core(globaldata_t sgd, lwkt_ipiq_t ip,
124 struct intrframe *frame);
3b6b7bd1
MD
125static void lwkt_cpusync_remote1(lwkt_cpusync_t poll);
126static void lwkt_cpusync_remote2(lwkt_cpusync_t poll);
127
128/*
129 * Send a function execution request to another cpu. The request is queued
130 * on the cpu<->cpu ipiq matrix. Each cpu owns a unique ipiq FIFO for every
131 * possible target cpu. The FIFO can be written.
132 *
4c9f5a7f
MD
133 * If the FIFO fills up we have to enable interrupts to avoid an APIC
134 * deadlock and process pending IPIQs while waiting for it to empty.
135 * Otherwise we may soft-deadlock with another cpu whos FIFO is also full.
3b6b7bd1
MD
136 *
137 * We can safely bump gd_intr_nesting_level because our crit_exit() at the
138 * end will take care of any pending interrupts.
139 *
4c9f5a7f
MD
140 * The actual hardware IPI is avoided if the target cpu is already processing
141 * the queue from a prior IPI. It is possible to pipeline IPI messages
142 * very quickly between cpus due to the FIFO hysteresis.
143 *
144 * Need not be called from a critical section.
3b6b7bd1
MD
145 */
146int
b8a98473 147lwkt_send_ipiq3(globaldata_t target, ipifunc3_t func, void *arg1, int arg2)
3b6b7bd1
MD
148{
149 lwkt_ipiq_t ip;
150 int windex;
151 struct globaldata *gd = mycpu;
152
a7adb95a 153 logipiq(send_norm, func, arg1, arg2, gd, target);
ac72c7f4 154
3b6b7bd1 155 if (target == gd) {
b8a98473 156 func(arg1, arg2, NULL);
c92e86f1 157 logipiq(send_end, func, arg1, arg2, gd, target);
3b6b7bd1
MD
158 return(0);
159 }
160 crit_enter();
161 ++gd->gd_intr_nesting_level;
162#ifdef INVARIANTS
163 if (gd->gd_intr_nesting_level > 20)
164 panic("lwkt_send_ipiq: TOO HEAVILY NESTED!");
165#endif
f9235b6d 166 KKASSERT(curthread->td_critcount);
3b6b7bd1
MD
167 ++ipiq_count;
168 ip = &gd->gd_ipiq[target->gd_cpuid];
169
170 /*
4c9f5a7f
MD
171 * Do not allow the FIFO to become full. Interrupts must be physically
172 * enabled while we liveloop to avoid deadlocking the APIC.
173 */
174 if (ip->ip_windex - ip->ip_rindex > MAXCPUFIFO / 2) {
46d4e165 175#if defined(__i386__)
4c9f5a7f 176 unsigned int eflags = read_eflags();
b2b3ffcd 177#elif defined(__x86_64__)
46d4e165
JG
178 unsigned long rflags = read_rflags();
179#endif
4c9f5a7f 180
866b61fb
SZ
181 if (atomic_poll_acquire_int(&ip->ip_npoll) || ipiq_optimized == 0) {
182 logipiq(cpu_send, func, arg1, arg2, gd, target);
4c9f5a7f 183 cpu_send_ipiq(target->gd_cpuid);
866b61fb 184 }
4c9f5a7f
MD
185 cpu_enable_intr();
186 ++ipiq_fifofull;
187 while (ip->ip_windex - ip->ip_rindex > MAXCPUFIFO / 4) {
188 KKASSERT(ip->ip_windex - ip->ip_rindex != MAXCPUFIFO - 1);
189 lwkt_process_ipiq();
190 }
46d4e165 191#if defined(__i386__)
4c9f5a7f 192 write_eflags(eflags);
b2b3ffcd 193#elif defined(__x86_64__)
46d4e165
JG
194 write_rflags(rflags);
195#endif
4c9f5a7f
MD
196 }
197
198 /*
199 * Queue the new message
3b6b7bd1 200 */
3b6b7bd1 201 windex = ip->ip_windex & MAXCPUFIFO_MASK;
b8a98473
MD
202 ip->ip_func[windex] = func;
203 ip->ip_arg1[windex] = arg1;
204 ip->ip_arg2[windex] = arg2;
35238fa5 205 cpu_sfence();
3b6b7bd1 206 ++ip->ip_windex;
4c9f5a7f
MD
207 --gd->gd_intr_nesting_level;
208
209 /*
210 * signal the target cpu that there is work pending.
211 */
212 if (atomic_poll_acquire_int(&ip->ip_npoll)) {
866b61fb 213 logipiq(cpu_send, func, arg1, arg2, gd, target);
4c9f5a7f
MD
214 cpu_send_ipiq(target->gd_cpuid);
215 } else {
866b61fb
SZ
216 if (ipiq_optimized == 0) {
217 logipiq(cpu_send, func, arg1, arg2, gd, target);
4c9f5a7f 218 cpu_send_ipiq(target->gd_cpuid);
866b61fb
SZ
219 } else {
220 ++ipiq_avoided;
221 }
4c9f5a7f
MD
222 }
223 crit_exit();
c92e86f1
SZ
224
225 logipiq(send_end, func, arg1, arg2, gd, target);
4c9f5a7f
MD
226 return(ip->ip_windex);
227}
228
229/*
230 * Similar to lwkt_send_ipiq() but this function does not actually initiate
231 * the IPI to the target cpu unless the FIFO has become too full, so it is
232 * very fast.
233 *
234 * This function is used for non-critical IPI messages, such as memory
235 * deallocations. The queue will typically be flushed by the target cpu at
236 * the next clock interrupt.
237 *
238 * Need not be called from a critical section.
239 */
240int
b8a98473
MD
241lwkt_send_ipiq3_passive(globaldata_t target, ipifunc3_t func,
242 void *arg1, int arg2)
4c9f5a7f
MD
243{
244 lwkt_ipiq_t ip;
245 int windex;
246 struct globaldata *gd = mycpu;
247
248 KKASSERT(target != gd);
249 crit_enter();
a7adb95a 250 logipiq(send_pasv, func, arg1, arg2, gd, target);
4c9f5a7f
MD
251 ++gd->gd_intr_nesting_level;
252#ifdef INVARIANTS
253 if (gd->gd_intr_nesting_level > 20)
254 panic("lwkt_send_ipiq: TOO HEAVILY NESTED!");
255#endif
f9235b6d 256 KKASSERT(curthread->td_critcount);
4c9f5a7f
MD
257 ++ipiq_count;
258 ++ipiq_passive;
259 ip = &gd->gd_ipiq[target->gd_cpuid];
260
261 /*
262 * Do not allow the FIFO to become full. Interrupts must be physically
263 * enabled while we liveloop to avoid deadlocking the APIC.
264 */
3b6b7bd1 265 if (ip->ip_windex - ip->ip_rindex > MAXCPUFIFO / 2) {
46d4e165 266#if defined(__i386__)
3b6b7bd1 267 unsigned int eflags = read_eflags();
b2b3ffcd 268#elif defined(__x86_64__)
46d4e165
JG
269 unsigned long rflags = read_rflags();
270#endif
4c9f5a7f 271
866b61fb
SZ
272 if (atomic_poll_acquire_int(&ip->ip_npoll) || ipiq_optimized == 0) {
273 logipiq(cpu_send, func, arg1, arg2, gd, target);
4c9f5a7f 274 cpu_send_ipiq(target->gd_cpuid);
866b61fb 275 }
3b6b7bd1
MD
276 cpu_enable_intr();
277 ++ipiq_fifofull;
278 while (ip->ip_windex - ip->ip_rindex > MAXCPUFIFO / 4) {
279 KKASSERT(ip->ip_windex - ip->ip_rindex != MAXCPUFIFO - 1);
280 lwkt_process_ipiq();
281 }
46d4e165 282#if defined(__i386__)
3b6b7bd1 283 write_eflags(eflags);
b2b3ffcd 284#elif defined(__x86_64__)
46d4e165
JG
285 write_rflags(rflags);
286#endif
3b6b7bd1 287 }
4c9f5a7f
MD
288
289 /*
290 * Queue the new message
291 */
292 windex = ip->ip_windex & MAXCPUFIFO_MASK;
b8a98473
MD
293 ip->ip_func[windex] = func;
294 ip->ip_arg1[windex] = arg1;
295 ip->ip_arg2[windex] = arg2;
35238fa5 296 cpu_sfence();
4c9f5a7f 297 ++ip->ip_windex;
3b6b7bd1 298 --gd->gd_intr_nesting_level;
4c9f5a7f
MD
299
300 /*
301 * Do not signal the target cpu, it will pick up the IPI when it next
302 * polls (typically on the next tick).
303 */
3b6b7bd1 304 crit_exit();
c92e86f1
SZ
305
306 logipiq(send_end, func, arg1, arg2, gd, target);
3b6b7bd1
MD
307 return(ip->ip_windex);
308}
309
41a01a4d 310/*
4c9f5a7f
MD
311 * Send an IPI request without blocking, return 0 on success, ENOENT on
312 * failure. The actual queueing of the hardware IPI may still force us
313 * to spin and process incoming IPIs but that will eventually go away
314 * when we've gotten rid of the other general IPIs.
41a01a4d
MD
315 */
316int
b8a98473
MD
317lwkt_send_ipiq3_nowait(globaldata_t target, ipifunc3_t func,
318 void *arg1, int arg2)
41a01a4d
MD
319{
320 lwkt_ipiq_t ip;
321 int windex;
322 struct globaldata *gd = mycpu;
323
a7adb95a 324 logipiq(send_nbio, func, arg1, arg2, gd, target);
f9235b6d 325 KKASSERT(curthread->td_critcount);
41a01a4d 326 if (target == gd) {
b8a98473 327 func(arg1, arg2, NULL);
c92e86f1 328 logipiq(send_end, func, arg1, arg2, gd, target);
41a01a4d
MD
329 return(0);
330 }
331 ++ipiq_count;
332 ip = &gd->gd_ipiq[target->gd_cpuid];
333
ac72c7f4 334 if (ip->ip_windex - ip->ip_rindex >= MAXCPUFIFO * 2 / 3) {
a7adb95a 335 logipiq(send_fail, func, arg1, arg2, gd, target);
41a01a4d 336 return(ENOENT);
ac72c7f4 337 }
41a01a4d 338 windex = ip->ip_windex & MAXCPUFIFO_MASK;
b8a98473
MD
339 ip->ip_func[windex] = func;
340 ip->ip_arg1[windex] = arg1;
341 ip->ip_arg2[windex] = arg2;
35238fa5 342 cpu_sfence();
41a01a4d 343 ++ip->ip_windex;
4c9f5a7f 344
41a01a4d 345 /*
4c9f5a7f 346 * This isn't a passive IPI, we still have to signal the target cpu.
41a01a4d 347 */
4c9f5a7f 348 if (atomic_poll_acquire_int(&ip->ip_npoll)) {
866b61fb 349 logipiq(cpu_send, func, arg1, arg2, gd, target);
4c9f5a7f
MD
350 cpu_send_ipiq(target->gd_cpuid);
351 } else {
866b61fb
SZ
352 if (ipiq_optimized == 0) {
353 logipiq(cpu_send, func, arg1, arg2, gd, target);
4c9f5a7f 354 cpu_send_ipiq(target->gd_cpuid);
866b61fb 355 } else {
728f6208 356 ++ipiq_avoided;
866b61fb 357 }
4c9f5a7f 358 }
c92e86f1
SZ
359
360 logipiq(send_end, func, arg1, arg2, gd, target);
41a01a4d
MD
361 return(0);
362}
363
3b6b7bd1
MD
364/*
365 * deprecated, used only by fast int forwarding.
366 */
367int
b8a98473 368lwkt_send_ipiq3_bycpu(int dcpu, ipifunc3_t func, void *arg1, int arg2)
3b6b7bd1 369{
b8a98473 370 return(lwkt_send_ipiq3(globaldata_find(dcpu), func, arg1, arg2));
3b6b7bd1
MD
371}
372
373/*
374 * Send a message to several target cpus. Typically used for scheduling.
375 * The message will not be sent to stopped cpus.
376 */
377int
b8a98473 378lwkt_send_ipiq3_mask(u_int32_t mask, ipifunc3_t func, void *arg1, int arg2)
3b6b7bd1
MD
379{
380 int cpuid;
381 int count = 0;
382
383 mask &= ~stopped_cpus;
384 while (mask) {
385 cpuid = bsfl(mask);
b8a98473 386 lwkt_send_ipiq3(globaldata_find(cpuid), func, arg1, arg2);
3b6b7bd1
MD
387 mask &= ~(1 << cpuid);
388 ++count;
389 }
390 return(count);
391}
392
393/*
394 * Wait for the remote cpu to finish processing a function.
395 *
396 * YYY we have to enable interrupts and process the IPIQ while waiting
397 * for it to empty or we may deadlock with another cpu. Create a CPU_*()
398 * function to do this! YYY we really should 'block' here.
399 *
400 * MUST be called from a critical section. This routine may be called
401 * from an interrupt (for example, if an interrupt wakes a foreign thread
402 * up).
403 */
404void
405lwkt_wait_ipiq(globaldata_t target, int seq)
406{
407 lwkt_ipiq_t ip;
408 int maxc = 100000000;
409
410 if (target != mycpu) {
411 ip = &mycpu->gd_ipiq[target->gd_cpuid];
412 if ((int)(ip->ip_xindex - seq) < 0) {
46d4e165 413#if defined(__i386__)
3b6b7bd1 414 unsigned int eflags = read_eflags();
b2b3ffcd 415#elif defined(__x86_64__)
46d4e165
JG
416 unsigned long rflags = read_rflags();
417#endif
3b6b7bd1
MD
418 cpu_enable_intr();
419 while ((int)(ip->ip_xindex - seq) < 0) {
41a01a4d 420 crit_enter();
3b6b7bd1 421 lwkt_process_ipiq();
41a01a4d 422 crit_exit();
3b6b7bd1 423 if (--maxc == 0)
6ea70f76 424 kprintf("LWKT_WAIT_IPIQ WARNING! %d wait %d (%d)\n", mycpu->gd_cpuid, target->gd_cpuid, ip->ip_xindex - seq);
3b6b7bd1
MD
425 if (maxc < -1000000)
426 panic("LWKT_WAIT_IPIQ");
35238fa5
MD
427 /*
428 * xindex may be modified by another cpu, use a load fence
429 * to ensure that the loop does not use a speculative value
430 * (which may improve performance).
431 */
432 cpu_lfence();
3b6b7bd1 433 }
46d4e165 434#if defined(__i386__)
3b6b7bd1 435 write_eflags(eflags);
b2b3ffcd 436#elif defined(__x86_64__)
46d4e165
JG
437 write_rflags(rflags);
438#endif
3b6b7bd1
MD
439 }
440 }
441}
442
41a01a4d
MD
443int
444lwkt_seq_ipiq(globaldata_t target)
445{
446 lwkt_ipiq_t ip;
447
448 ip = &mycpu->gd_ipiq[target->gd_cpuid];
449 return(ip->ip_windex);
450}
451
3b6b7bd1
MD
452/*
453 * Called from IPI interrupt (like a fast interrupt), which has placed
454 * us in a critical section. The MP lock may or may not be held.
455 * May also be called from doreti or splz, or be reentrantly called
456 * indirectly through the ip_func[] we run.
457 *
458 * There are two versions, one where no interrupt frame is available (when
459 * called from the send code and from splz, and one where an interrupt
460 * frame is available.
461 */
462void
463lwkt_process_ipiq(void)
464{
465 globaldata_t gd = mycpu;
ac72c7f4 466 globaldata_t sgd;
3b6b7bd1
MD
467 lwkt_ipiq_t ip;
468 int n;
469
470again:
471 for (n = 0; n < ncpus; ++n) {
472 if (n != gd->gd_cpuid) {
ac72c7f4
MD
473 sgd = globaldata_find(n);
474 ip = sgd->gd_ipiq;
3b6b7bd1 475 if (ip != NULL) {
b8a98473 476 while (lwkt_process_ipiq_core(sgd, &ip[gd->gd_cpuid], NULL))
3b6b7bd1
MD
477 ;
478 }
479 }
480 }
481 if (gd->gd_cpusyncq.ip_rindex != gd->gd_cpusyncq.ip_windex) {
b8a98473 482 if (lwkt_process_ipiq_core(gd, &gd->gd_cpusyncq, NULL)) {
0f7a3396
MD
483 if (gd->gd_curthread->td_cscount == 0)
484 goto again;
485 need_ipiq();
486 }
3b6b7bd1
MD
487 }
488}
489
3b6b7bd1 490void
c7eb0589 491lwkt_process_ipiq_frame(struct intrframe *frame)
3b6b7bd1
MD
492{
493 globaldata_t gd = mycpu;
ac72c7f4 494 globaldata_t sgd;
3b6b7bd1
MD
495 lwkt_ipiq_t ip;
496 int n;
497
498again:
499 for (n = 0; n < ncpus; ++n) {
500 if (n != gd->gd_cpuid) {
ac72c7f4
MD
501 sgd = globaldata_find(n);
502 ip = sgd->gd_ipiq;
3b6b7bd1 503 if (ip != NULL) {
c7eb0589 504 while (lwkt_process_ipiq_core(sgd, &ip[gd->gd_cpuid], frame))
3b6b7bd1
MD
505 ;
506 }
507 }
508 }
509 if (gd->gd_cpusyncq.ip_rindex != gd->gd_cpusyncq.ip_windex) {
c7eb0589 510 if (lwkt_process_ipiq_core(gd, &gd->gd_cpusyncq, frame)) {
0f7a3396
MD
511 if (gd->gd_curthread->td_cscount == 0)
512 goto again;
513 need_ipiq();
514 }
3b6b7bd1
MD
515 }
516}
3b6b7bd1
MD
517
518static int
b8a98473
MD
519lwkt_process_ipiq_core(globaldata_t sgd, lwkt_ipiq_t ip,
520 struct intrframe *frame)
3b6b7bd1
MD
521{
522 int ri;
35238fa5 523 int wi;
b8a98473
MD
524 ipifunc3_t copy_func;
525 void *copy_arg1;
526 int copy_arg2;
35238fa5
MD
527
528 /*
529 * Obtain the current write index, which is modified by a remote cpu.
530 * Issue a load fence to prevent speculative reads of e.g. data written
531 * by the other cpu prior to it updating the index.
532 */
f9235b6d 533 KKASSERT(curthread->td_critcount);
35238fa5
MD
534 wi = ip->ip_windex;
535 cpu_lfence();
536
3b6b7bd1
MD
537 /*
538 * Note: xindex is only updated after we are sure the function has
539 * finished execution. Beware lwkt_process_ipiq() reentrancy! The
540 * function may send an IPI which may block/drain.
d64a7617
MD
541 *
542 * Note: due to additional IPI operations that the callback function
543 * may make, it is possible for both rindex and windex to advance and
544 * thus for rindex to advance passed our cached windex.
3b6b7bd1 545 */
d64a7617 546 while (wi - (ri = ip->ip_rindex) > 0) {
3b6b7bd1 547 ri &= MAXCPUFIFO_MASK;
728f6208 548 copy_func = ip->ip_func[ri];
b8a98473
MD
549 copy_arg1 = ip->ip_arg1[ri];
550 copy_arg2 = ip->ip_arg2[ri];
728f6208
MD
551 cpu_mfence();
552 ++ip->ip_rindex;
553 KKASSERT((ip->ip_rindex & MAXCPUFIFO_MASK) == ((ri + 1) & MAXCPUFIFO_MASK));
a7adb95a 554 logipiq(receive, copy_func, copy_arg1, copy_arg2, sgd, mycpu);
b8a98473 555 copy_func(copy_arg1, copy_arg2, frame);
35238fa5 556 cpu_sfence();
3b6b7bd1 557 ip->ip_xindex = ip->ip_rindex;
e8f15168
MD
558
559#ifdef PANIC_DEBUG
560 /*
561 * Simulate panics during the processing of an IPI
562 */
563 if (mycpu->gd_cpuid == panic_ipiq_cpu && panic_ipiq_count) {
564 if (--panic_ipiq_count == 0) {
565#ifdef DDB
566 Debugger("PANIC_DEBUG");
567#else
568 panic("PANIC_DEBUG");
569#endif
570 }
571 }
572#endif
3b6b7bd1 573 }
4c9f5a7f
MD
574
575 /*
576 * Return non-zero if there are more IPI messages pending on this
577 * ipiq. ip_npoll is left set as long as possible to reduce the
578 * number of IPIs queued by the originating cpu, but must be cleared
579 * *BEFORE* checking windex.
580 */
581 atomic_poll_release_int(&ip->ip_npoll);
3b6b7bd1
MD
582 return(wi != ip->ip_windex);
583}
584
6c92c1f2
SZ
585static void
586lwkt_sync_ipiq(void *arg)
587{
588 cpumask_t *cpumask = arg;
589
590 atomic_clear_int(cpumask, mycpu->gd_cpumask);
591 if (*cpumask == 0)
592 wakeup(cpumask);
593}
594
595void
596lwkt_synchronize_ipiqs(const char *wmesg)
597{
598 cpumask_t other_cpumask;
599
600 other_cpumask = mycpu->gd_other_cpus & smp_active_mask;
601 lwkt_send_ipiq_mask(other_cpumask, lwkt_sync_ipiq, &other_cpumask);
602
6c92c1f2 603 while (other_cpumask != 0) {
ae8e83e6 604 tsleep_interlock(&other_cpumask, 0);
6c92c1f2 605 if (other_cpumask != 0)
d9345d3a 606 tsleep(&other_cpumask, PINTERLOCKED, wmesg, 0);
6c92c1f2 607 }
6c92c1f2
SZ
608}
609
0f7a3396
MD
610#endif
611
3b6b7bd1
MD
612/*
613 * CPU Synchronization Support
5c71a36a
MD
614 *
615 * lwkt_cpusync_simple()
616 *
617 * The function is executed synchronously before return on remote cpus.
618 * A lwkt_cpusync_t pointer is passed as an argument. The data can
619 * be accessed via arg->cs_data.
620 *
621 * XXX should I just pass the data as an argument to be consistent?
3b6b7bd1
MD
622 */
623
624void
5c71a36a
MD
625lwkt_cpusync_simple(cpumask_t mask, cpusync_func_t func, void *data)
626{
627 struct lwkt_cpusync cmd;
628
629 cmd.cs_run_func = NULL;
630 cmd.cs_fin1_func = func;
631 cmd.cs_fin2_func = NULL;
632 cmd.cs_data = data;
633 lwkt_cpusync_start(mask & mycpu->gd_other_cpus, &cmd);
634 if (mask & (1 << mycpu->gd_cpuid))
635 func(&cmd);
636 lwkt_cpusync_finish(&cmd);
637}
638
639/*
640 * lwkt_cpusync_fastdata()
641 *
642 * The function is executed in tandem with return on remote cpus.
643 * The data is directly passed as an argument. Do not pass pointers to
644 * temporary storage as the storage might have
645 * gone poof by the time the target cpu executes
646 * the function.
647 *
648 * At the moment lwkt_cpusync is declared on the stack and we must wait
649 * for all remote cpus to ack in lwkt_cpusync_finish(), but as a future
650 * optimization we should be able to put a counter in the globaldata
651 * structure (if it is not otherwise being used) and just poke it and
652 * return without waiting. XXX
653 */
654void
655lwkt_cpusync_fastdata(cpumask_t mask, cpusync_func2_t func, void *data)
3b6b7bd1
MD
656{
657 struct lwkt_cpusync cmd;
3b6b7bd1
MD
658
659 cmd.cs_run_func = NULL;
660 cmd.cs_fin1_func = NULL;
661 cmd.cs_fin2_func = func;
5c71a36a
MD
662 cmd.cs_data = NULL;
663 lwkt_cpusync_start(mask & mycpu->gd_other_cpus, &cmd);
3b6b7bd1
MD
664 if (mask & (1 << mycpu->gd_cpuid))
665 func(data);
5c71a36a 666 lwkt_cpusync_finish(&cmd);
3b6b7bd1
MD
667}
668
669/*
5c71a36a
MD
670 * lwkt_cpusync_start()
671 *
672 * Start synchronization with a set of target cpus, return once they are
673 * known to be in a synchronization loop. The target cpus will execute
674 * poll->cs_run_func() IN TANDEM WITH THE RETURN.
675 *
676 * XXX future: add lwkt_cpusync_start_quick() and require a call to
677 * lwkt_cpusync_add() or lwkt_cpusync_wait(), allowing the caller to
678 * potentially absorb the IPI latency doing something useful.
3b6b7bd1 679 */
5c71a36a 680void
3b6b7bd1
MD
681lwkt_cpusync_start(cpumask_t mask, lwkt_cpusync_t poll)
682{
0f7a3396
MD
683 globaldata_t gd = mycpu;
684
3b6b7bd1 685 poll->cs_count = 0;
5c71a36a 686 poll->cs_mask = mask;
0f7a3396 687#ifdef SMP
d7ed9e5e 688 logipiq2(sync_start, mask & gd->gd_other_cpus);
0f7a3396
MD
689 poll->cs_maxcount = lwkt_send_ipiq_mask(
690 mask & gd->gd_other_cpus & smp_active_mask,
b8a98473 691 (ipifunc1_t)lwkt_cpusync_remote1, poll);
0f7a3396 692#endif
fda1ad89 693 if (mask & gd->gd_cpumask) {
5c71a36a
MD
694 if (poll->cs_run_func)
695 poll->cs_run_func(poll);
696 }
0f7a3396
MD
697#ifdef SMP
698 if (poll->cs_maxcount) {
699 ++ipiq_cscount;
700 ++gd->gd_curthread->td_cscount;
701 while (poll->cs_count != poll->cs_maxcount) {
702 crit_enter();
703 lwkt_process_ipiq();
704 crit_exit();
705 }
5c71a36a 706 }
0f7a3396 707#endif
5c71a36a
MD
708}
709
710void
711lwkt_cpusync_add(cpumask_t mask, lwkt_cpusync_t poll)
712{
0f7a3396 713 globaldata_t gd = mycpu;
41a01a4d 714#ifdef SMP
0f7a3396 715 int count;
41a01a4d 716#endif
0f7a3396 717
5c71a36a
MD
718 mask &= ~poll->cs_mask;
719 poll->cs_mask |= mask;
0f7a3396 720#ifdef SMP
d7ed9e5e 721 logipiq2(sync_add, mask & gd->gd_other_cpus);
0f7a3396
MD
722 count = lwkt_send_ipiq_mask(
723 mask & gd->gd_other_cpus & smp_active_mask,
b8a98473 724 (ipifunc1_t)lwkt_cpusync_remote1, poll);
0f7a3396 725#endif
fda1ad89 726 if (mask & gd->gd_cpumask) {
5c71a36a
MD
727 if (poll->cs_run_func)
728 poll->cs_run_func(poll);
729 }
0f7a3396
MD
730#ifdef SMP
731 poll->cs_maxcount += count;
732 if (poll->cs_maxcount) {
733 if (poll->cs_maxcount == count)
734 ++gd->gd_curthread->td_cscount;
735 while (poll->cs_count != poll->cs_maxcount) {
736 crit_enter();
737 lwkt_process_ipiq();
738 crit_exit();
739 }
3b6b7bd1 740 }
0f7a3396 741#endif
3b6b7bd1
MD
742}
743
744/*
745 * Finish synchronization with a set of target cpus. The target cpus will
746 * execute cs_fin1_func(poll) prior to this function returning, and will
747 * execute cs_fin2_func(data) IN TANDEM WITH THIS FUNCTION'S RETURN.
0f7a3396
MD
748 *
749 * If cs_maxcount is non-zero then we are mastering a cpusync with one or
750 * more remote cpus and must account for it in our thread structure.
3b6b7bd1
MD
751 */
752void
5c71a36a 753lwkt_cpusync_finish(lwkt_cpusync_t poll)
3b6b7bd1 754{
0f7a3396 755 globaldata_t gd = mycpu;
5c71a36a 756
3b6b7bd1 757 poll->cs_count = -1;
fda1ad89 758 if (poll->cs_mask & gd->gd_cpumask) {
5c71a36a
MD
759 if (poll->cs_fin1_func)
760 poll->cs_fin1_func(poll);
761 if (poll->cs_fin2_func)
762 poll->cs_fin2_func(poll->cs_data);
763 }
0f7a3396
MD
764#ifdef SMP
765 if (poll->cs_maxcount) {
766 while (poll->cs_count != -(poll->cs_maxcount + 1)) {
767 crit_enter();
768 lwkt_process_ipiq();
769 crit_exit();
770 }
771 --gd->gd_curthread->td_cscount;
3b6b7bd1 772 }
0f7a3396 773#endif
3b6b7bd1
MD
774}
775
0f7a3396
MD
776#ifdef SMP
777
3b6b7bd1
MD
778/*
779 * helper IPI remote messaging function.
780 *
781 * Called on remote cpu when a new cpu synchronization request has been
782 * sent to us. Execute the run function and adjust cs_count, then requeue
783 * the request so we spin on it.
784 */
785static void
786lwkt_cpusync_remote1(lwkt_cpusync_t poll)
787{
788 atomic_add_int(&poll->cs_count, 1);
789 if (poll->cs_run_func)
790 poll->cs_run_func(poll);
791 lwkt_cpusync_remote2(poll);
792}
793
794/*
795 * helper IPI remote messaging function.
796 *
797 * Poll for the originator telling us to finish. If it hasn't, requeue
798 * our request so we spin on it. When the originator requests that we
799 * finish we execute cs_fin1_func(poll) synchronously and cs_fin2_func(data)
800 * in tandem with the release.
801 */
802static void
803lwkt_cpusync_remote2(lwkt_cpusync_t poll)
804{
805 if (poll->cs_count < 0) {
806 cpusync_func2_t savef;
807 void *saved;
808
809 if (poll->cs_fin1_func)
810 poll->cs_fin1_func(poll);
811 if (poll->cs_fin2_func) {
812 savef = poll->cs_fin2_func;
813 saved = poll->cs_data;
814 atomic_add_int(&poll->cs_count, -1);
815 savef(saved);
816 } else {
817 atomic_add_int(&poll->cs_count, -1);
818 }
819 } else {
820 globaldata_t gd = mycpu;
821 lwkt_ipiq_t ip;
822 int wi;
823
824 ip = &gd->gd_cpusyncq;
825 wi = ip->ip_windex & MAXCPUFIFO_MASK;
b8a98473
MD
826 ip->ip_func[wi] = (ipifunc3_t)(ipifunc1_t)lwkt_cpusync_remote2;
827 ip->ip_arg1[wi] = poll;
828 ip->ip_arg2[wi] = 0;
35238fa5 829 cpu_sfence();
3b6b7bd1
MD
830 ++ip->ip_windex;
831 }
832}
833
3b6b7bd1 834#endif