Print out Early Retransmit statistics.
[dragonfly.git] / sys / kern / lwkt_ipiq.c
CommitLineData
3b6b7bd1
MD
1/*
2 * Copyright (c) 2003 Matthew Dillon <dillon@backplane.com>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
41a01a4d 26 * $DragonFly: src/sys/kern/lwkt_ipiq.c,v 1.4 2004/03/01 06:33:17 dillon Exp $
3b6b7bd1
MD
27 */
28
29/*
30 * This module implements IPI message queueing and the MI portion of IPI
31 * message processing.
32 */
33
34#ifdef _KERNEL
35
36#include <sys/param.h>
37#include <sys/systm.h>
38#include <sys/kernel.h>
39#include <sys/proc.h>
40#include <sys/rtprio.h>
41#include <sys/queue.h>
42#include <sys/thread2.h>
43#include <sys/sysctl.h>
44#include <sys/kthread.h>
45#include <machine/cpu.h>
46#include <sys/lock.h>
47#include <sys/caps.h>
48
49#include <vm/vm.h>
50#include <vm/vm_param.h>
51#include <vm/vm_kern.h>
52#include <vm/vm_object.h>
53#include <vm/vm_page.h>
54#include <vm/vm_map.h>
55#include <vm/vm_pager.h>
56#include <vm/vm_extern.h>
57#include <vm/vm_zone.h>
58
59#include <machine/stdarg.h>
60#include <machine/ipl.h>
61#include <machine/smp.h>
62#include <machine/atomic.h>
63
64#define THREAD_STACK (UPAGES * PAGE_SIZE)
65
66#else
67
68#include <sys/stdint.h>
69#include <libcaps/thread.h>
70#include <sys/thread.h>
71#include <sys/msgport.h>
72#include <sys/errno.h>
73#include <libcaps/globaldata.h>
74#include <sys/thread2.h>
75#include <sys/msgport2.h>
76#include <stdio.h>
77#include <stdlib.h>
78#include <string.h>
79#include <machine/cpufunc.h>
80#include <machine/lock.h>
81
82#endif
83
84#ifdef SMP
0f7a3396
MD
85static __int64_t ipiq_count;
86static __int64_t ipiq_fifofull;
87static __int64_t ipiq_cscount;
3b6b7bd1
MD
88#endif
89
90#ifdef _KERNEL
91
92#ifdef SMP
93SYSCTL_QUAD(_lwkt, OID_AUTO, ipiq_count, CTLFLAG_RW, &ipiq_count, 0, "");
94SYSCTL_QUAD(_lwkt, OID_AUTO, ipiq_fifofull, CTLFLAG_RW, &ipiq_fifofull, 0, "");
0f7a3396 95SYSCTL_QUAD(_lwkt, OID_AUTO, ipiq_cscount, CTLFLAG_RW, &ipiq_cscount, 0, "");
3b6b7bd1
MD
96#endif
97
98#endif
99
100#ifdef SMP
101
102static int lwkt_process_ipiq1(lwkt_ipiq_t ip, struct intrframe *frame);
103static void lwkt_cpusync_remote1(lwkt_cpusync_t poll);
104static void lwkt_cpusync_remote2(lwkt_cpusync_t poll);
105
106/*
107 * Send a function execution request to another cpu. The request is queued
108 * on the cpu<->cpu ipiq matrix. Each cpu owns a unique ipiq FIFO for every
109 * possible target cpu. The FIFO can be written.
110 *
111 * YYY If the FIFO fills up we have to enable interrupts and process the
112 * IPIQ while waiting for it to empty or we may deadlock with another cpu.
113 * Create a CPU_*() function to do this!
114 *
115 * We can safely bump gd_intr_nesting_level because our crit_exit() at the
116 * end will take care of any pending interrupts.
117 *
118 * Must be called from a critical section.
119 */
120int
121lwkt_send_ipiq(globaldata_t target, ipifunc_t func, void *arg)
122{
123 lwkt_ipiq_t ip;
124 int windex;
125 struct globaldata *gd = mycpu;
126
127 if (target == gd) {
128 func(arg);
129 return(0);
130 }
131 crit_enter();
132 ++gd->gd_intr_nesting_level;
133#ifdef INVARIANTS
134 if (gd->gd_intr_nesting_level > 20)
135 panic("lwkt_send_ipiq: TOO HEAVILY NESTED!");
136#endif
137 KKASSERT(curthread->td_pri >= TDPRI_CRIT);
138 ++ipiq_count;
139 ip = &gd->gd_ipiq[target->gd_cpuid];
140
141 /*
142 * We always drain before the FIFO becomes full so it should never
143 * become full. We need to leave enough entries to deal with
144 * reentrancy.
145 */
146 KKASSERT(ip->ip_windex - ip->ip_rindex != MAXCPUFIFO);
147 windex = ip->ip_windex & MAXCPUFIFO_MASK;
148 ip->ip_func[windex] = (ipifunc2_t)func;
149 ip->ip_arg[windex] = arg;
41a01a4d 150 cpu_mb1();
3b6b7bd1
MD
151 ++ip->ip_windex;
152 if (ip->ip_windex - ip->ip_rindex > MAXCPUFIFO / 2) {
153 unsigned int eflags = read_eflags();
154 cpu_enable_intr();
155 ++ipiq_fifofull;
156 while (ip->ip_windex - ip->ip_rindex > MAXCPUFIFO / 4) {
157 KKASSERT(ip->ip_windex - ip->ip_rindex != MAXCPUFIFO - 1);
158 lwkt_process_ipiq();
159 }
160 write_eflags(eflags);
161 }
162 --gd->gd_intr_nesting_level;
163 cpu_send_ipiq(target->gd_cpuid); /* issues mem barrier if appropriate */
164 crit_exit();
165 return(ip->ip_windex);
166}
167
41a01a4d
MD
168/*
169 * Send an IPI request passively, return 0 on success and ENOENT on failure.
170 * This routine does not recursive through lwkt_process_ipiq() nor does it
171 * block trying to queue the actual IPI. If we successfully queue the
172 * message but fail to queue the IPI, we still count it as a success.
173 * The occassional small race against a target cpu HLT is recovered at
174 * the next clock interrupt.
175 */
176int
177lwkt_send_ipiq_passive(globaldata_t target, ipifunc_t func, void *arg)
178{
179 lwkt_ipiq_t ip;
180 int windex;
181 struct globaldata *gd = mycpu;
182
183 KKASSERT(curthread->td_pri >= TDPRI_CRIT);
184 if (target == gd) {
185 func(arg);
186 return(0);
187 }
188 ++ipiq_count;
189 ip = &gd->gd_ipiq[target->gd_cpuid];
190
191 if (ip->ip_windex - ip->ip_rindex >= MAXCPUFIFO - 1) {
192 return(ENOENT);
193 }
194 windex = ip->ip_windex & MAXCPUFIFO_MASK;
195 ip->ip_func[windex] = (ipifunc2_t)func;
196 ip->ip_arg[windex] = arg;
197 cpu_mb1();
198 ++ip->ip_windex;
199 /*
200 * passive mode doesn't work yet :-(
201 */
202#if 1
203 cpu_send_ipiq(target->gd_cpuid);
204#else
205 cpu_send_ipiq_passive(target->gd_cpuid);
206#endif
207 return(0);
208}
209
3b6b7bd1
MD
210/*
211 * deprecated, used only by fast int forwarding.
212 */
213int
214lwkt_send_ipiq_bycpu(int dcpu, ipifunc_t func, void *arg)
215{
216 return(lwkt_send_ipiq(globaldata_find(dcpu), func, arg));
217}
218
219/*
220 * Send a message to several target cpus. Typically used for scheduling.
221 * The message will not be sent to stopped cpus.
222 */
223int
224lwkt_send_ipiq_mask(u_int32_t mask, ipifunc_t func, void *arg)
225{
226 int cpuid;
227 int count = 0;
228
229 mask &= ~stopped_cpus;
230 while (mask) {
231 cpuid = bsfl(mask);
232 lwkt_send_ipiq(globaldata_find(cpuid), func, arg);
233 mask &= ~(1 << cpuid);
234 ++count;
235 }
236 return(count);
237}
238
239/*
240 * Wait for the remote cpu to finish processing a function.
241 *
242 * YYY we have to enable interrupts and process the IPIQ while waiting
243 * for it to empty or we may deadlock with another cpu. Create a CPU_*()
244 * function to do this! YYY we really should 'block' here.
245 *
246 * MUST be called from a critical section. This routine may be called
247 * from an interrupt (for example, if an interrupt wakes a foreign thread
248 * up).
249 */
250void
251lwkt_wait_ipiq(globaldata_t target, int seq)
252{
253 lwkt_ipiq_t ip;
254 int maxc = 100000000;
255
256 if (target != mycpu) {
257 ip = &mycpu->gd_ipiq[target->gd_cpuid];
258 if ((int)(ip->ip_xindex - seq) < 0) {
259 unsigned int eflags = read_eflags();
260 cpu_enable_intr();
261 while ((int)(ip->ip_xindex - seq) < 0) {
41a01a4d 262 crit_enter();
3b6b7bd1 263 lwkt_process_ipiq();
41a01a4d 264 crit_exit();
3b6b7bd1
MD
265 if (--maxc == 0)
266 printf("LWKT_WAIT_IPIQ WARNING! %d wait %d (%d)\n", mycpu->gd_cpuid, target->gd_cpuid, ip->ip_xindex - seq);
267 if (maxc < -1000000)
268 panic("LWKT_WAIT_IPIQ");
269 }
270 write_eflags(eflags);
271 }
272 }
273}
274
41a01a4d
MD
275int
276lwkt_seq_ipiq(globaldata_t target)
277{
278 lwkt_ipiq_t ip;
279
280 ip = &mycpu->gd_ipiq[target->gd_cpuid];
281 return(ip->ip_windex);
282}
283
3b6b7bd1
MD
284/*
285 * Called from IPI interrupt (like a fast interrupt), which has placed
286 * us in a critical section. The MP lock may or may not be held.
287 * May also be called from doreti or splz, or be reentrantly called
288 * indirectly through the ip_func[] we run.
289 *
290 * There are two versions, one where no interrupt frame is available (when
291 * called from the send code and from splz, and one where an interrupt
292 * frame is available.
293 */
294void
295lwkt_process_ipiq(void)
296{
297 globaldata_t gd = mycpu;
298 lwkt_ipiq_t ip;
299 int n;
300
301again:
302 for (n = 0; n < ncpus; ++n) {
303 if (n != gd->gd_cpuid) {
304 ip = globaldata_find(n)->gd_ipiq;
305 if (ip != NULL) {
306 while (lwkt_process_ipiq1(&ip[gd->gd_cpuid], NULL))
307 ;
308 }
309 }
310 }
311 if (gd->gd_cpusyncq.ip_rindex != gd->gd_cpusyncq.ip_windex) {
0f7a3396
MD
312 if (lwkt_process_ipiq1(&gd->gd_cpusyncq, NULL)) {
313 if (gd->gd_curthread->td_cscount == 0)
314 goto again;
315 need_ipiq();
316 }
3b6b7bd1
MD
317 }
318}
319
320#ifdef _KERNEL
321void
322lwkt_process_ipiq_frame(struct intrframe frame)
323{
324 globaldata_t gd = mycpu;
325 lwkt_ipiq_t ip;
326 int n;
327
328again:
329 for (n = 0; n < ncpus; ++n) {
330 if (n != gd->gd_cpuid) {
331 ip = globaldata_find(n)->gd_ipiq;
332 if (ip != NULL) {
333 while (lwkt_process_ipiq1(&ip[gd->gd_cpuid], &frame))
334 ;
335 }
336 }
337 }
338 if (gd->gd_cpusyncq.ip_rindex != gd->gd_cpusyncq.ip_windex) {
0f7a3396
MD
339 if (lwkt_process_ipiq1(&gd->gd_cpusyncq, &frame)) {
340 if (gd->gd_curthread->td_cscount == 0)
341 goto again;
342 need_ipiq();
343 }
3b6b7bd1
MD
344 }
345}
346#endif
347
348static int
349lwkt_process_ipiq1(lwkt_ipiq_t ip, struct intrframe *frame)
350{
351 int ri;
352 int wi = ip->ip_windex;
353 /*
354 * Note: xindex is only updated after we are sure the function has
355 * finished execution. Beware lwkt_process_ipiq() reentrancy! The
356 * function may send an IPI which may block/drain.
357 */
358 while ((ri = ip->ip_rindex) != wi) {
359 ip->ip_rindex = ri + 1;
360 ri &= MAXCPUFIFO_MASK;
361 ip->ip_func[ri](ip->ip_arg[ri], frame);
362 /* YYY memory barrier */
363 ip->ip_xindex = ip->ip_rindex;
364 }
365 return(wi != ip->ip_windex);
366}
367
0f7a3396
MD
368#else
369
370/*
371 * !SMP dummy routines
372 */
373
374int
375lwkt_send_ipiq(globaldata_t target, ipifunc_t func, void *arg)
376{
377 panic("lwkt_send_ipiq: UP box! (%d,%p,%p)", target->gd_cpuid, func, arg);
378 return(0); /* NOT REACHED */
379}
380
381void
382lwkt_wait_ipiq(globaldata_t target, int seq)
383{
384 panic("lwkt_wait_ipiq: UP box! (%d,%d)", target->gd_cpuid, seq);
385}
386
387#endif
388
3b6b7bd1
MD
389/*
390 * CPU Synchronization Support
5c71a36a
MD
391 *
392 * lwkt_cpusync_simple()
393 *
394 * The function is executed synchronously before return on remote cpus.
395 * A lwkt_cpusync_t pointer is passed as an argument. The data can
396 * be accessed via arg->cs_data.
397 *
398 * XXX should I just pass the data as an argument to be consistent?
3b6b7bd1
MD
399 */
400
401void
5c71a36a
MD
402lwkt_cpusync_simple(cpumask_t mask, cpusync_func_t func, void *data)
403{
404 struct lwkt_cpusync cmd;
405
406 cmd.cs_run_func = NULL;
407 cmd.cs_fin1_func = func;
408 cmd.cs_fin2_func = NULL;
409 cmd.cs_data = data;
410 lwkt_cpusync_start(mask & mycpu->gd_other_cpus, &cmd);
411 if (mask & (1 << mycpu->gd_cpuid))
412 func(&cmd);
413 lwkt_cpusync_finish(&cmd);
414}
415
416/*
417 * lwkt_cpusync_fastdata()
418 *
419 * The function is executed in tandem with return on remote cpus.
420 * The data is directly passed as an argument. Do not pass pointers to
421 * temporary storage as the storage might have
422 * gone poof by the time the target cpu executes
423 * the function.
424 *
425 * At the moment lwkt_cpusync is declared on the stack and we must wait
426 * for all remote cpus to ack in lwkt_cpusync_finish(), but as a future
427 * optimization we should be able to put a counter in the globaldata
428 * structure (if it is not otherwise being used) and just poke it and
429 * return without waiting. XXX
430 */
431void
432lwkt_cpusync_fastdata(cpumask_t mask, cpusync_func2_t func, void *data)
3b6b7bd1
MD
433{
434 struct lwkt_cpusync cmd;
3b6b7bd1
MD
435
436 cmd.cs_run_func = NULL;
437 cmd.cs_fin1_func = NULL;
438 cmd.cs_fin2_func = func;
5c71a36a
MD
439 cmd.cs_data = NULL;
440 lwkt_cpusync_start(mask & mycpu->gd_other_cpus, &cmd);
3b6b7bd1
MD
441 if (mask & (1 << mycpu->gd_cpuid))
442 func(data);
5c71a36a 443 lwkt_cpusync_finish(&cmd);
3b6b7bd1
MD
444}
445
446/*
5c71a36a
MD
447 * lwkt_cpusync_start()
448 *
449 * Start synchronization with a set of target cpus, return once they are
450 * known to be in a synchronization loop. The target cpus will execute
451 * poll->cs_run_func() IN TANDEM WITH THE RETURN.
452 *
453 * XXX future: add lwkt_cpusync_start_quick() and require a call to
454 * lwkt_cpusync_add() or lwkt_cpusync_wait(), allowing the caller to
455 * potentially absorb the IPI latency doing something useful.
3b6b7bd1 456 */
5c71a36a 457void
3b6b7bd1
MD
458lwkt_cpusync_start(cpumask_t mask, lwkt_cpusync_t poll)
459{
0f7a3396
MD
460 globaldata_t gd = mycpu;
461
3b6b7bd1 462 poll->cs_count = 0;
5c71a36a 463 poll->cs_mask = mask;
0f7a3396
MD
464#ifdef SMP
465 poll->cs_maxcount = lwkt_send_ipiq_mask(
466 mask & gd->gd_other_cpus & smp_active_mask,
467 (ipifunc_t)lwkt_cpusync_remote1, poll);
468#endif
469 if (mask & (1 << gd->gd_cpuid)) {
5c71a36a
MD
470 if (poll->cs_run_func)
471 poll->cs_run_func(poll);
472 }
0f7a3396
MD
473#ifdef SMP
474 if (poll->cs_maxcount) {
475 ++ipiq_cscount;
476 ++gd->gd_curthread->td_cscount;
477 while (poll->cs_count != poll->cs_maxcount) {
478 crit_enter();
479 lwkt_process_ipiq();
480 crit_exit();
481 }
5c71a36a 482 }
0f7a3396 483#endif
5c71a36a
MD
484}
485
486void
487lwkt_cpusync_add(cpumask_t mask, lwkt_cpusync_t poll)
488{
0f7a3396 489 globaldata_t gd = mycpu;
41a01a4d 490#ifdef SMP
0f7a3396 491 int count;
41a01a4d 492#endif
0f7a3396 493
5c71a36a
MD
494 mask &= ~poll->cs_mask;
495 poll->cs_mask |= mask;
0f7a3396
MD
496#ifdef SMP
497 count = lwkt_send_ipiq_mask(
498 mask & gd->gd_other_cpus & smp_active_mask,
499 (ipifunc_t)lwkt_cpusync_remote1, poll);
500#endif
501 if (mask & (1 << gd->gd_cpuid)) {
5c71a36a
MD
502 if (poll->cs_run_func)
503 poll->cs_run_func(poll);
504 }
0f7a3396
MD
505#ifdef SMP
506 poll->cs_maxcount += count;
507 if (poll->cs_maxcount) {
508 if (poll->cs_maxcount == count)
509 ++gd->gd_curthread->td_cscount;
510 while (poll->cs_count != poll->cs_maxcount) {
511 crit_enter();
512 lwkt_process_ipiq();
513 crit_exit();
514 }
3b6b7bd1 515 }
0f7a3396 516#endif
3b6b7bd1
MD
517}
518
519/*
520 * Finish synchronization with a set of target cpus. The target cpus will
521 * execute cs_fin1_func(poll) prior to this function returning, and will
522 * execute cs_fin2_func(data) IN TANDEM WITH THIS FUNCTION'S RETURN.
0f7a3396
MD
523 *
524 * If cs_maxcount is non-zero then we are mastering a cpusync with one or
525 * more remote cpus and must account for it in our thread structure.
3b6b7bd1
MD
526 */
527void
5c71a36a 528lwkt_cpusync_finish(lwkt_cpusync_t poll)
3b6b7bd1 529{
0f7a3396 530 globaldata_t gd = mycpu;
5c71a36a 531
3b6b7bd1 532 poll->cs_count = -1;
0f7a3396 533 if (poll->cs_mask & (1 << gd->gd_cpuid)) {
5c71a36a
MD
534 if (poll->cs_fin1_func)
535 poll->cs_fin1_func(poll);
536 if (poll->cs_fin2_func)
537 poll->cs_fin2_func(poll->cs_data);
538 }
0f7a3396
MD
539#ifdef SMP
540 if (poll->cs_maxcount) {
541 while (poll->cs_count != -(poll->cs_maxcount + 1)) {
542 crit_enter();
543 lwkt_process_ipiq();
544 crit_exit();
545 }
546 --gd->gd_curthread->td_cscount;
3b6b7bd1 547 }
0f7a3396 548#endif
3b6b7bd1
MD
549}
550
0f7a3396
MD
551#ifdef SMP
552
3b6b7bd1
MD
553/*
554 * helper IPI remote messaging function.
555 *
556 * Called on remote cpu when a new cpu synchronization request has been
557 * sent to us. Execute the run function and adjust cs_count, then requeue
558 * the request so we spin on it.
559 */
560static void
561lwkt_cpusync_remote1(lwkt_cpusync_t poll)
562{
563 atomic_add_int(&poll->cs_count, 1);
564 if (poll->cs_run_func)
565 poll->cs_run_func(poll);
566 lwkt_cpusync_remote2(poll);
567}
568
569/*
570 * helper IPI remote messaging function.
571 *
572 * Poll for the originator telling us to finish. If it hasn't, requeue
573 * our request so we spin on it. When the originator requests that we
574 * finish we execute cs_fin1_func(poll) synchronously and cs_fin2_func(data)
575 * in tandem with the release.
576 */
577static void
578lwkt_cpusync_remote2(lwkt_cpusync_t poll)
579{
580 if (poll->cs_count < 0) {
581 cpusync_func2_t savef;
582 void *saved;
583
584 if (poll->cs_fin1_func)
585 poll->cs_fin1_func(poll);
586 if (poll->cs_fin2_func) {
587 savef = poll->cs_fin2_func;
588 saved = poll->cs_data;
589 atomic_add_int(&poll->cs_count, -1);
590 savef(saved);
591 } else {
592 atomic_add_int(&poll->cs_count, -1);
593 }
594 } else {
595 globaldata_t gd = mycpu;
596 lwkt_ipiq_t ip;
597 int wi;
598
599 ip = &gd->gd_cpusyncq;
600 wi = ip->ip_windex & MAXCPUFIFO_MASK;
601 ip->ip_func[wi] = (ipifunc2_t)lwkt_cpusync_remote2;
602 ip->ip_arg[wi] = poll;
603 ++ip->ip_windex;
604 }
605}
606
3b6b7bd1 607#endif