ifpoll: Expose kernel time fraction; currenly for debugging only.
[dragonfly.git] / sys / net / if_poll.c
CommitLineData
b3a7093f
SZ
1/*-
2 * Copyright (c) 2001-2002 Luigi Rizzo
3 *
4 * Supported by: the Xorp Project (www.xorp.org)
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $FreeBSD: src/sys/kern/kern_poll.c,v 1.2.2.4 2002/06/27 23:26:33 luigi Exp $
28 */
29
30#include "opt_ifpoll.h"
31
32#include <sys/param.h>
33#include <sys/kernel.h>
34#include <sys/ktr.h>
35#include <sys/malloc.h>
36#include <sys/serialize.h>
37#include <sys/socket.h>
38#include <sys/sysctl.h>
39
40#include <sys/thread2.h>
41#include <sys/msgport2.h>
42
43#include <machine/atomic.h>
44#include <machine/smp.h>
45
46#include <net/if.h>
47#include <net/if_poll.h>
48#include <net/netmsg2.h>
49
50/*
51 * Polling support for network device drivers.
52 *
53 * Drivers which support this feature try to register one status polling
54 * handler and several TX/RX polling handlers with the polling code.
55 * If interface's if_qpoll is called with non-NULL second argument, then
56 * a register operation is requested, else a deregister operation is
57 * requested. If the requested operation is "register", driver should
58 * setup the ifpoll_info passed in accoding its own needs:
59 * ifpoll_info.ifpi_status.status_func == NULL
60 * No status polling handler will be installed on CPU(0)
61 * ifpoll_info.ifpi_rx[n].poll_func == NULL
62 * No RX polling handler will be installed on CPU(n)
63 * ifpoll_info.ifpi_tx[n].poll_func == NULL
64 * No TX polling handler will be installed on CPU(n)
65 *
66 * All of the registered polling handlers are called only if the interface
67 * is marked as 'IFF_RUNNING and IFF_NPOLLING'. However, the interface's
68 * register and deregister function (ifnet.if_qpoll) will be called even
69 * if interface is not marked with 'IFF_RUNNING'.
70 *
71 * If registration is successful, the driver must disable interrupts,
72 * and further I/O is performed through the TX/RX polling handler, which
73 * are invoked (at least once per clock tick) with 3 arguments: the "arg"
74 * passed at register time, a struct ifnet pointer, and a "count" limit.
75 * The registered serializer will be held before calling the related
76 * polling handler.
77 *
78 * The count limit specifies how much work the handler can do during the
79 * call -- typically this is the number of packets to be received, or
80 * transmitted, etc. (drivers are free to interpret this number, as long
81 * as the max time spent in the function grows roughly linearly with the
82 * count).
83 *
84 * A second variable controls the sharing of CPU between polling/kernel
85 * network processing, and other activities (typically userlevel tasks):
86 * net.ifpoll.{rxX,txX}.user_frac (between 0 and 100, default 50) sets the
87 * share of CPU allocated to user tasks. CPU is allocated proportionally
88 * to the shares, by dynamically adjusting the "count" (poll_burst).
89 *
90 * Other parameters can should be left to their default values.
91 * The following constraints hold
92 *
93 * 1 <= poll_burst <= poll_burst_max
94 * 1 <= poll_each_burst <= poll_burst_max
95 * MIN_POLL_BURST_MAX <= poll_burst_max <= MAX_POLL_BURST_MAX
96 */
97
98#define IFPOLL_LIST_LEN 128
99#define IFPOLL_FREQ_MAX 30000
100
101#define MIN_IOPOLL_BURST_MAX 10
102#define MAX_IOPOLL_BURST_MAX 1000
103#define IOPOLL_BURST_MAX 150 /* good for 100Mbit net and HZ=1000 */
104
105#define IOPOLL_EACH_BURST 5
106
107#define IFPOLL_FREQ_DEFAULT 2000
108#define IOPOLL_FREQ_DEFAULT IFPOLL_FREQ_DEFAULT
109#define STPOLL_FREQ_DEFAULT 100
110
111#define IFPOLL_TXFRAC_DEFAULT 1
112#define IFPOLL_STFRAC_DEFAULT 20
113
114#define IFPOLL_RX 0x1
115#define IFPOLL_TX 0x2
116
117struct iopoll_rec {
118 struct lwkt_serialize *serializer;
119 struct ifnet *ifp;
120 void *arg;
121 ifpoll_iofn_t poll_func;
122};
123
124struct iopoll_ctx {
125#ifdef IFPOLL_MULTI_SYSTIMER
126 struct systimer pollclock;
127#endif
128
129 struct timeval prev_t; /* state */
130 uint32_t short_ticks; /* statistics */
131 uint32_t lost_polls; /* statistics */
132 uint32_t suspect; /* statistics */
133 uint32_t stalled; /* statistics */
134 uint32_t pending_polls; /* state */
135
136 struct netmsg poll_netmsg;
137
138 int poll_cpuid;
139#ifdef IFPOLL_MULTI_SYSTIMER
140 int pollhz; /* tunable */
141#else
142 int poll_type; /* IFPOLL_{RX,TX} */
143#endif
144 uint32_t phase; /* state */
145 int residual_burst; /* state */
146 uint32_t poll_each_burst; /* tunable */
147 struct timeval poll_start_t; /* state */
148
149 uint32_t poll_handlers; /* next free entry in pr[]. */
150 struct iopoll_rec pr[IFPOLL_LIST_LEN];
151
152 struct netmsg poll_more_netmsg;
153
154 uint32_t poll_burst; /* state */
155 uint32_t poll_burst_max; /* tunable */
156 uint32_t user_frac; /* tunable */
112d5942 157 uint32_t kern_frac; /* state */
b3a7093f
SZ
158
159 struct sysctl_ctx_list poll_sysctl_ctx;
160 struct sysctl_oid *poll_sysctl_tree;
161} __cachealign;
162
163struct stpoll_rec {
164 struct lwkt_serialize *serializer;
165 struct ifnet *ifp;
166 ifpoll_stfn_t status_func;
167};
168
169struct stpoll_ctx {
170#ifdef IFPOLL_MULTI_SYSTIMER
171 struct systimer pollclock;
172#endif
173
174 struct netmsg poll_netmsg;
175
176#ifdef IFPOLL_MULTI_SYSTIMER
177 int pollhz; /* tunable */
178#endif
179 uint32_t poll_handlers; /* next free entry in pr[]. */
180 struct stpoll_rec pr[IFPOLL_LIST_LEN];
181
182 struct sysctl_ctx_list poll_sysctl_ctx;
183 struct sysctl_oid *poll_sysctl_tree;
184};
185
186struct iopoll_sysctl_netmsg {
187 struct netmsg nmsg;
188 struct iopoll_ctx *ctx;
189};
190
191#ifndef IFPOLL_MULTI_SYSTIMER
192
193struct ifpoll_data {
194 struct systimer clock;
195 int txfrac_count;
196 int stfrac_count;
197 u_int tx_cpumask;
198 u_int rx_cpumask;
199} __cachealign;
200
201#endif
202
203static struct stpoll_ctx stpoll_context;
204static struct iopoll_ctx *rxpoll_context[IFPOLL_CTX_MAX];
205static struct iopoll_ctx *txpoll_context[IFPOLL_CTX_MAX];
206
207SYSCTL_NODE(_net, OID_AUTO, ifpoll, CTLFLAG_RW, 0,
208 "Network device polling parameters");
209
210static int ifpoll_ncpus = IFPOLL_CTX_MAX;
211
212static int iopoll_burst_max = IOPOLL_BURST_MAX;
213static int iopoll_each_burst = IOPOLL_EACH_BURST;
214
215TUNABLE_INT("net.ifpoll.burst_max", &iopoll_burst_max);
216TUNABLE_INT("net.ifpoll.each_burst", &iopoll_each_burst);
217
218#ifdef IFPOLL_MULTI_SYSTIMER
219
220static int stpoll_hz = STPOLL_FREQ_DEFAULT;
221static int iopoll_hz = IOPOLL_FREQ_DEFAULT;
222
223TUNABLE_INT("net.ifpoll.stpoll_hz", &stpoll_hz);
224TUNABLE_INT("net.ifpoll.iopoll_hz", &iopoll_hz);
225
226#else /* !IFPOLL_MULTI_SYSTIMER */
227
228static struct ifpoll_data ifpoll0;
229static int ifpoll_pollhz = IFPOLL_FREQ_DEFAULT;
230static int ifpoll_stfrac = IFPOLL_STFRAC_DEFAULT;
231static int ifpoll_txfrac = IFPOLL_TXFRAC_DEFAULT;
232static int ifpoll_handlers;
233
234TUNABLE_INT("net.ifpoll.pollhz", &ifpoll_pollhz);
235TUNABLE_INT("net.ifpoll.status_frac", &ifpoll_stfrac);
236TUNABLE_INT("net.ifpoll.tx_frac", &ifpoll_txfrac);
237
238static void sysctl_ifpollhz_handler(struct netmsg *);
239static int sysctl_ifpollhz(SYSCTL_HANDLER_ARGS);
240
241SYSCTL_PROC(_net_ifpoll, OID_AUTO, pollhz, CTLTYPE_INT | CTLFLAG_RW,
242 0, 0, sysctl_ifpollhz, "I", "Polling frequency");
243SYSCTL_INT(_net_ifpoll, OID_AUTO, tx_frac, CTLFLAG_RW,
244 &ifpoll_txfrac, 0, "Every this many cycles poll transmit");
245SYSCTL_INT(_net_ifpoll, OID_AUTO, st_frac, CTLFLAG_RW,
246 &ifpoll_stfrac, 0, "Every this many cycles poll status");
247
248#endif /* IFPOLL_MULTI_SYSTIMER */
249
250void ifpoll_init_pcpu(int);
251
252#ifndef IFPOLL_MULTI_SYSTIMER
253static void ifpoll_start_handler(struct netmsg *);
254static void ifpoll_stop_handler(struct netmsg *);
255static void ifpoll_handler_addevent(void);
256static void ifpoll_handler_delevent(void);
257static void ifpoll_ipi_handler(void *, int);
258static void ifpoll_systimer(systimer_t, struct intrframe *);
259#endif
260
261static void ifpoll_register_handler(struct netmsg *);
262static void ifpoll_deregister_handler(struct netmsg *);
263
264/*
265 * Status polling
266 */
267static void stpoll_init(void);
268static void stpoll_handler(struct netmsg *);
269static void stpoll_clock(struct stpoll_ctx *);
270#ifdef IFPOLL_MULTI_SYSTIMER
271static void stpoll_systimer(systimer_t, struct intrframe *);
272#endif
273static int stpoll_register(struct ifnet *, const struct ifpoll_status *);
274static int stpoll_deregister(struct ifnet *);
275
276#ifdef IFPOLL_MULTI_SYSTIMER
277static void sysctl_stpollhz_handler(struct netmsg *);
278static int sysctl_stpollhz(SYSCTL_HANDLER_ARGS);
279#endif
280
281/*
282 * RX/TX polling
283 */
284static struct iopoll_ctx *iopoll_ctx_create(int, int);
285static void iopoll_init(int);
286static void iopoll_handler(struct netmsg *);
287static void iopollmore_handler(struct netmsg *);
288static void iopoll_clock(struct iopoll_ctx *);
289#ifdef IFPOLL_MULTI_SYSTIMER
290static void iopoll_systimer(systimer_t, struct intrframe *);
291#endif
292static int iopoll_register(struct ifnet *, struct iopoll_ctx *,
293 const struct ifpoll_io *);
294static int iopoll_deregister(struct ifnet *, struct iopoll_ctx *);
295
296static void iopoll_add_sysctl(struct sysctl_ctx_list *,
297 struct sysctl_oid_list *, struct iopoll_ctx *);
298#ifdef IFPOLL_MULTI_SYSTIMER
299static void sysctl_iopollhz_handler(struct netmsg *);
300static int sysctl_iopollhz(SYSCTL_HANDLER_ARGS);
301#endif
302static void sysctl_burstmax_handler(struct netmsg *);
303static int sysctl_burstmax(SYSCTL_HANDLER_ARGS);
304static void sysctl_eachburst_handler(struct netmsg *);
305static int sysctl_eachburst(SYSCTL_HANDLER_ARGS);
306
a7254d9c 307static __inline void
b3a7093f
SZ
308ifpoll_sendmsg_oncpu(struct netmsg *msg)
309{
310 if (msg->nm_lmsg.ms_flags & MSGF_DONE)
311 ifnet_sendmsg(&msg->nm_lmsg, mycpuid);
312}
313
a7254d9c 314static __inline void
b3a7093f
SZ
315sched_stpoll(struct stpoll_ctx *st_ctx)
316{
b3a7093f 317 ifpoll_sendmsg_oncpu(&st_ctx->poll_netmsg);
b3a7093f
SZ
318}
319
a7254d9c 320static __inline void
b3a7093f
SZ
321sched_iopoll(struct iopoll_ctx *io_ctx)
322{
b3a7093f 323 ifpoll_sendmsg_oncpu(&io_ctx->poll_netmsg);
b3a7093f
SZ
324}
325
a7254d9c 326static __inline void
b3a7093f
SZ
327sched_iopollmore(struct iopoll_ctx *io_ctx)
328{
329 ifpoll_sendmsg_oncpu(&io_ctx->poll_more_netmsg);
330}
331
332/*
333 * Initialize per-cpu qpolling(4) context. Called from kern_clock.c:
334 */
335void
336ifpoll_init_pcpu(int cpuid)
337{
338 if (cpuid >= IFPOLL_CTX_MAX) {
339 return;
340 } else if (cpuid == 0) {
341 if (ifpoll_ncpus > ncpus)
342 ifpoll_ncpus = ncpus;
343 kprintf("ifpoll_ncpus %d\n", ifpoll_ncpus);
344
345#ifndef IFPOLL_MULTI_SYSTIMER
346 systimer_init_periodic_nq(&ifpoll0.clock,
347 ifpoll_systimer, NULL, 1);
348#endif
349
350 stpoll_init();
351 }
352 iopoll_init(cpuid);
353}
354
355#ifndef IFPOLL_MULTI_SYSTIMER
356
357static void
358ifpoll_ipi_handler(void *arg __unused, int poll)
359{
360 KKASSERT(mycpuid < ifpoll_ncpus);
361
362 if (poll & IFPOLL_TX)
363 iopoll_clock(txpoll_context[mycpuid]);
364 if (poll & IFPOLL_RX)
365 iopoll_clock(rxpoll_context[mycpuid]);
366}
367
368static void
369ifpoll_systimer(systimer_t info __unused, struct intrframe *frame __unused)
370{
371 uint32_t cpumask = 0;
372
373 KKASSERT(mycpuid == 0);
374
375 if (ifpoll0.stfrac_count-- == 0) {
376 ifpoll0.stfrac_count = ifpoll_stfrac;
377 stpoll_clock(&stpoll_context);
378 }
379
380 if (ifpoll0.txfrac_count-- == 0) {
381 ifpoll0.txfrac_count = ifpoll_txfrac;
382
383 /* TODO: We may try to piggyback TX on RX */
384 cpumask = smp_active_mask & ifpoll0.tx_cpumask;
385 if (cpumask != 0) {
386 lwkt_send_ipiq2_mask(cpumask, ifpoll_ipi_handler,
387 NULL, IFPOLL_TX);
388 }
389 }
390
391 cpumask = smp_active_mask & ifpoll0.rx_cpumask;
392 if (cpumask != 0) {
393 lwkt_send_ipiq2_mask(cpumask, ifpoll_ipi_handler,
394 NULL, IFPOLL_RX);
395 }
396}
397
398static void
399ifpoll_start_handler(struct netmsg *nmsg)
400{
401 KKASSERT(&curthread->td_msgport == ifnet_portfn(0));
402
403 kprintf("ifpoll: start\n");
404 systimer_adjust_periodic(&ifpoll0.clock, ifpoll_pollhz);
405 lwkt_replymsg(&nmsg->nm_lmsg, 0);
406}
407
408static void
409ifpoll_stop_handler(struct netmsg *nmsg)
410{
411 KKASSERT(&curthread->td_msgport == ifnet_portfn(0));
412
413 kprintf("ifpoll: stop\n");
414 systimer_adjust_periodic(&ifpoll0.clock, 1);
415 lwkt_replymsg(&nmsg->nm_lmsg, 0);
416}
417
418static void
419ifpoll_handler_addevent(void)
420{
421 if (atomic_fetchadd_int(&ifpoll_handlers, 1) == 0) {
422 struct netmsg *nmsg;
423
424 /* Start systimer */
425 nmsg = kmalloc(sizeof(*nmsg), M_LWKTMSG, M_WAITOK);
426 netmsg_init(nmsg, &netisr_afree_rport, 0, ifpoll_start_handler);
427 ifnet_sendmsg(&nmsg->nm_lmsg, 0);
428 }
429}
430
431static void
432ifpoll_handler_delevent(void)
433{
434 KKASSERT(ifpoll_handlers > 0);
435 if (atomic_fetchadd_int(&ifpoll_handlers, -1) == 1) {
436 struct netmsg *nmsg;
437
438 /* Stop systimer */
439 nmsg = kmalloc(sizeof(*nmsg), M_LWKTMSG, M_WAITOK);
440 netmsg_init(nmsg, &netisr_afree_rport, 0, ifpoll_stop_handler);
441 ifnet_sendmsg(&nmsg->nm_lmsg, 0);
442 }
443}
444
445static void
446sysctl_ifpollhz_handler(struct netmsg *nmsg)
447{
448 KKASSERT(&curthread->td_msgport == ifnet_portfn(0));
449
450 /*
451 * If there is no handler registered, don't adjust polling
452 * systimer frequency; polling systimer frequency will be
453 * adjusted once there is registered handler.
454 */
455 ifpoll_pollhz = nmsg->nm_lmsg.u.ms_result;
456 if (ifpoll_handlers)
457 systimer_adjust_periodic(&ifpoll0.clock, ifpoll_pollhz);
458
459 lwkt_replymsg(&nmsg->nm_lmsg, 0);
460}
461
462static int
463sysctl_ifpollhz(SYSCTL_HANDLER_ARGS)
464{
465 struct netmsg nmsg;
466 int error, phz;
467
468 phz = ifpoll_pollhz;
469 error = sysctl_handle_int(oidp, &phz, 0, req);
470 if (error || req->newptr == NULL)
471 return error;
472 if (phz <= 0)
473 return EINVAL;
474 else if (phz > IFPOLL_FREQ_MAX)
475 phz = IFPOLL_FREQ_MAX;
476
477 netmsg_init(&nmsg, &curthread->td_msgport, MSGF_MPSAFE,
478 sysctl_ifpollhz_handler);
479 nmsg.nm_lmsg.u.ms_result = phz;
480
481 return ifnet_domsg(&nmsg.nm_lmsg, 0);
482}
483
484#endif /* !IFPOLL_MULTI_SYSTIMER */
485
486int
487ifpoll_register(struct ifnet *ifp)
488{
489 struct ifpoll_info info;
490 struct netmsg nmsg;
491 int error;
492
493 if (ifp->if_qpoll == NULL) {
494 /* Device does not support polling */
495 return EOPNOTSUPP;
496 }
497
498 /*
499 * Attempt to register. Interlock with IFF_NPOLLING.
500 */
501
502 ifnet_serialize_all(ifp);
503
504 if (ifp->if_flags & IFF_NPOLLING) {
505 /* Already polling */
506 ifnet_deserialize_all(ifp);
507 return EBUSY;
508 }
509
510 bzero(&info, sizeof(info));
511 info.ifpi_ifp = ifp;
512
513 ifp->if_flags |= IFF_NPOLLING;
514 ifp->if_qpoll(ifp, &info);
515
516 ifnet_deserialize_all(ifp);
517
518 netmsg_init(&nmsg, &curthread->td_msgport, MSGF_MPSAFE,
519 ifpoll_register_handler);
520 nmsg.nm_lmsg.u.ms_resultp = &info;
521
522 error = ifnet_domsg(&nmsg.nm_lmsg, 0);
523 if (error) {
524 if (!ifpoll_deregister(ifp)) {
525 if_printf(ifp, "ifpoll_register: "
526 "ifpoll_deregister failed!\n");
527 }
528 }
529 return error;
530}
531
532int
533ifpoll_deregister(struct ifnet *ifp)
534{
535 struct netmsg nmsg;
536 int error;
537
538 if (ifp->if_qpoll == NULL)
539 return EOPNOTSUPP;
540
541 ifnet_serialize_all(ifp);
542
543 if ((ifp->if_flags & IFF_NPOLLING) == 0) {
544 ifnet_deserialize_all(ifp);
545 return EINVAL;
546 }
547 ifp->if_flags &= ~IFF_NPOLLING;
548
549 ifnet_deserialize_all(ifp);
550
551 netmsg_init(&nmsg, &curthread->td_msgport, MSGF_MPSAFE,
552 ifpoll_deregister_handler);
553 nmsg.nm_lmsg.u.ms_resultp = ifp;
554
555 error = ifnet_domsg(&nmsg.nm_lmsg, 0);
556 if (!error) {
557 ifnet_serialize_all(ifp);
558 ifp->if_qpoll(ifp, NULL);
559 ifnet_deserialize_all(ifp);
560 }
561 return error;
562}
563
564static void
565ifpoll_register_handler(struct netmsg *nmsg)
566{
567 const struct ifpoll_info *info = nmsg->nm_lmsg.u.ms_resultp;
568 int cpuid = mycpuid, nextcpu;
569 int error;
570
571 KKASSERT(cpuid < ifpoll_ncpus);
572 KKASSERT(&curthread->td_msgport == ifnet_portfn(cpuid));
573
574 if (cpuid == 0) {
575 error = stpoll_register(info->ifpi_ifp, &info->ifpi_status);
576 if (error)
577 goto failed;
578 }
579
580 error = iopoll_register(info->ifpi_ifp, rxpoll_context[cpuid],
581 &info->ifpi_rx[cpuid]);
582 if (error)
583 goto failed;
584
585 error = iopoll_register(info->ifpi_ifp, txpoll_context[cpuid],
586 &info->ifpi_tx[cpuid]);
587 if (error)
588 goto failed;
589
590 nextcpu = cpuid + 1;
591 if (nextcpu < ifpoll_ncpus)
592 ifnet_forwardmsg(&nmsg->nm_lmsg, nextcpu);
593 else
594 lwkt_replymsg(&nmsg->nm_lmsg, 0);
595 return;
596failed:
597 lwkt_replymsg(&nmsg->nm_lmsg, error);
598}
599
600static void
601ifpoll_deregister_handler(struct netmsg *nmsg)
602{
603 struct ifnet *ifp = nmsg->nm_lmsg.u.ms_resultp;
604 int cpuid = mycpuid, nextcpu;
605
606 KKASSERT(cpuid < ifpoll_ncpus);
607 KKASSERT(&curthread->td_msgport == ifnet_portfn(cpuid));
608
609 /* Ignore errors */
610 if (cpuid == 0)
611 stpoll_deregister(ifp);
612 iopoll_deregister(ifp, rxpoll_context[cpuid]);
613 iopoll_deregister(ifp, txpoll_context[cpuid]);
614
615 nextcpu = cpuid + 1;
616 if (nextcpu < ifpoll_ncpus)
617 ifnet_forwardmsg(&nmsg->nm_lmsg, nextcpu);
618 else
619 lwkt_replymsg(&nmsg->nm_lmsg, 0);
620}
621
622static void
623stpoll_init(void)
624{
625 struct stpoll_ctx *st_ctx = &stpoll_context;
626
627#ifdef IFPOLL_MULTI_SYSTIMER
628 st_ctx->pollhz = stpoll_hz;
629#endif
630
631 sysctl_ctx_init(&st_ctx->poll_sysctl_ctx);
632 st_ctx->poll_sysctl_tree = SYSCTL_ADD_NODE(&st_ctx->poll_sysctl_ctx,
633 SYSCTL_STATIC_CHILDREN(_net_ifpoll),
634 OID_AUTO, "status", CTLFLAG_RD, 0, "");
635
636#ifdef IFPOLL_MULTI_SYSTIMER
637 SYSCTL_ADD_PROC(&st_ctx->poll_sysctl_ctx,
638 SYSCTL_CHILDREN(st_ctx->poll_sysctl_tree),
639 OID_AUTO, "pollhz", CTLTYPE_INT | CTLFLAG_RW,
640 st_ctx, 0, sysctl_stpollhz, "I",
641 "Status polling frequency");
642#endif
643
644 SYSCTL_ADD_UINT(&st_ctx->poll_sysctl_ctx,
645 SYSCTL_CHILDREN(st_ctx->poll_sysctl_tree),
646 OID_AUTO, "handlers", CTLFLAG_RD,
647 &st_ctx->poll_handlers, 0,
648 "Number of registered status poll handlers");
649
650 netmsg_init(&st_ctx->poll_netmsg, &netisr_adone_rport, MSGF_MPSAFE,
651 stpoll_handler);
652
653#ifdef IFPOLL_MULTI_SYSTIMER
654 systimer_init_periodic_nq(&st_ctx->pollclock,
655 stpoll_systimer, st_ctx, 1);
656#endif
657}
658
659#ifdef IFPOLL_MULTI_SYSTIMER
660
661static void
662sysctl_stpollhz_handler(struct netmsg *msg)
663{
664 struct stpoll_ctx *st_ctx = &stpoll_context;
665
666 KKASSERT(&curthread->td_msgport == ifnet_portfn(0));
667
668 /*
669 * If there is no handler registered, don't adjust polling
670 * systimer frequency; polling systimer frequency will be
671 * adjusted once there is registered handler.
672 */
673 st_ctx->pollhz = msg->nm_lmsg.u.ms_result;
674 if (st_ctx->poll_handlers)
675 systimer_adjust_periodic(&st_ctx->pollclock, st_ctx->pollhz);
676
677 lwkt_replymsg(&msg->nm_lmsg, 0);
678}
679
680static int
681sysctl_stpollhz(SYSCTL_HANDLER_ARGS)
682{
683 struct stpoll_ctx *st_ctx = arg1;
684 struct netmsg msg;
685 int error, phz;
686
687 phz = st_ctx->pollhz;
688 error = sysctl_handle_int(oidp, &phz, 0, req);
689 if (error || req->newptr == NULL)
690 return error;
691 if (phz <= 0)
692 return EINVAL;
693 else if (phz > IFPOLL_FREQ_MAX)
694 phz = IFPOLL_FREQ_MAX;
695
696 netmsg_init(&msg, &curthread->td_msgport, MSGF_MPSAFE,
697 sysctl_stpollhz_handler);
698 msg.nm_lmsg.u.ms_result = phz;
699
700 return ifnet_domsg(&msg.nm_lmsg, 0);
701}
702
703#endif /* IFPOLL_MULTI_SYSTIMER */
704
705/*
706 * stpoll_handler is scheduled by sched_stpoll when appropriate, typically
707 * once per polling systimer tick.
708 */
709static void
710stpoll_handler(struct netmsg *msg)
711{
712 struct stpoll_ctx *st_ctx = &stpoll_context;
ec7f6da8 713 struct thread *td = curthread;
b3a7093f
SZ
714 int i, poll_hz;
715
ec7f6da8
SZ
716 KKASSERT(&td->td_msgport == ifnet_portfn(0));
717
718 crit_enter_quick(td);
b3a7093f
SZ
719
720 /* Reply ASAP */
b3a7093f 721 lwkt_replymsg(&msg->nm_lmsg, 0);
b3a7093f 722
ec7f6da8
SZ
723 if (st_ctx->poll_handlers == 0) {
724 crit_exit_quick(td);
b3a7093f 725 return;
ec7f6da8 726 }
b3a7093f
SZ
727
728#ifdef IFPOLL_MULTI_SYSTIMER
729 poll_hz = st_ctx->pollhz;
730#else
731 poll_hz = ifpoll_pollhz / (ifpoll_stfrac + 1);
732#endif
733
734 for (i = 0; i < st_ctx->poll_handlers; ++i) {
735 const struct stpoll_rec *rec = &st_ctx->pr[i];
736 struct ifnet *ifp = rec->ifp;
737
738 if (!lwkt_serialize_try(rec->serializer))
739 continue;
740
741 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) ==
ec7f6da8 742 (IFF_RUNNING | IFF_NPOLLING))
b3a7093f 743 rec->status_func(ifp, poll_hz);
b3a7093f
SZ
744
745 lwkt_serialize_exit(rec->serializer);
746 }
ec7f6da8
SZ
747
748 crit_exit_quick(td);
b3a7093f
SZ
749}
750
751/*
752 * Hook from status poll systimer. Tries to schedule an status poll.
753 */
754static void
755stpoll_clock(struct stpoll_ctx *st_ctx)
756{
a49543a6
SZ
757 globaldata_t gd = mycpu;
758
759 KKASSERT(gd->gd_cpuid == 0);
b3a7093f
SZ
760
761 if (st_ctx->poll_handlers == 0)
762 return;
a7254d9c 763
a49543a6 764 crit_enter_gd(gd);
b3a7093f 765 sched_stpoll(st_ctx);
a49543a6 766 crit_exit_gd(gd);
b3a7093f
SZ
767}
768
769#ifdef IFPOLL_MULTI_SYSTIMER
770static void
771stpoll_systimer(systimer_t info, struct intrframe *frame __unused)
772{
773 stpoll_clock(info->data);
774}
775#endif
776
777static int
778stpoll_register(struct ifnet *ifp, const struct ifpoll_status *st_rec)
779{
780 struct stpoll_ctx *st_ctx = &stpoll_context;
781 int error;
782
783 KKASSERT(&curthread->td_msgport == ifnet_portfn(0));
784
785 if (st_rec->status_func == NULL)
786 return 0;
787
788 /*
789 * Check if there is room.
790 */
791 if (st_ctx->poll_handlers >= IFPOLL_LIST_LEN) {
792 /*
793 * List full, cannot register more entries.
794 * This should never happen; if it does, it is probably a
795 * broken driver trying to register multiple times. Checking
796 * this at runtime is expensive, and won't solve the problem
797 * anyways, so just report a few times and then give up.
798 */
799 static int verbose = 10; /* XXX */
800
801 if (verbose > 0) {
802 kprintf("status poll handlers list full, "
803 "maybe a broken driver ?\n");
804 verbose--;
805 }
806 error = ENOENT;
807 } else {
808 struct stpoll_rec *rec = &st_ctx->pr[st_ctx->poll_handlers];
809
810 rec->ifp = ifp;
811 rec->serializer = st_rec->serializer;
812 rec->status_func = st_rec->status_func;
813
814 st_ctx->poll_handlers++;
815
816#ifdef IFPOLL_MULTI_SYSTIMER
817 if (st_ctx->poll_handlers == 1) {
818 systimer_adjust_periodic(&st_ctx->pollclock,
819 st_ctx->pollhz);
820 }
821#else
822 ifpoll_handler_addevent();
823#endif
824 error = 0;
825 }
826 return error;
827}
828
829static int
830stpoll_deregister(struct ifnet *ifp)
831{
832 struct stpoll_ctx *st_ctx = &stpoll_context;
833 int i, error;
834
835 KKASSERT(&curthread->td_msgport == ifnet_portfn(0));
836
837 for (i = 0; i < st_ctx->poll_handlers; ++i) {
838 if (st_ctx->pr[i].ifp == ifp) /* Found it */
839 break;
840 }
841 if (i == st_ctx->poll_handlers) {
842 kprintf("stpoll_deregister: ifp not found!!!\n");
843 error = ENOENT;
844 } else {
845 st_ctx->poll_handlers--;
846 if (i < st_ctx->poll_handlers) {
847 /* Last entry replaces this one. */
848 st_ctx->pr[i] = st_ctx->pr[st_ctx->poll_handlers];
849 }
850
851#ifdef IFPOLL_MULTI_SYSTIMER
852 if (st_ctx->poll_handlers == 0)
853 systimer_adjust_periodic(&st_ctx->pollclock, 1);
854#else
855 ifpoll_handler_delevent();
856#endif
857 error = 0;
858 }
859 return error;
860}
861
862#ifndef IFPOLL_MULTI_SYSTIMER
863static __inline int
864iopoll_hz(struct iopoll_ctx *io_ctx)
865{
866 int poll_hz;
867
868 poll_hz = ifpoll_pollhz;
869 if (io_ctx->poll_type == IFPOLL_TX)
870 poll_hz /= ifpoll_txfrac + 1;
871 return poll_hz;
872}
873#endif
874
875static __inline void
876iopoll_reset_state(struct iopoll_ctx *io_ctx)
877{
878 crit_enter();
879 io_ctx->poll_burst = 5;
880 io_ctx->pending_polls = 0;
881 io_ctx->residual_burst = 0;
882 io_ctx->phase = 0;
112d5942 883 io_ctx->kern_frac = 0;
b3a7093f
SZ
884 bzero(&io_ctx->poll_start_t, sizeof(io_ctx->poll_start_t));
885 bzero(&io_ctx->prev_t, sizeof(io_ctx->prev_t));
886 crit_exit();
887}
888
889static void
890iopoll_init(int cpuid)
891{
892 KKASSERT(cpuid < IFPOLL_CTX_MAX);
893
894 rxpoll_context[cpuid] = iopoll_ctx_create(cpuid, IFPOLL_RX);
895 txpoll_context[cpuid] = iopoll_ctx_create(cpuid, IFPOLL_TX);
896}
897
898static struct iopoll_ctx *
899iopoll_ctx_create(int cpuid, int poll_type)
900{
901 struct iopoll_ctx *io_ctx;
902 const char *poll_type_str;
903 char cpuid_str[16];
904
905 KKASSERT(poll_type == IFPOLL_RX || poll_type == IFPOLL_TX);
906
907 /*
908 * Make sure that tunables are in sane state
909 */
910 if (iopoll_burst_max < MIN_IOPOLL_BURST_MAX)
911 iopoll_burst_max = MIN_IOPOLL_BURST_MAX;
912 else if (iopoll_burst_max > MAX_IOPOLL_BURST_MAX)
913 iopoll_burst_max = MAX_IOPOLL_BURST_MAX;
914
915 if (iopoll_each_burst > iopoll_burst_max)
916 iopoll_each_burst = iopoll_burst_max;
917
918 /*
919 * Create the per-cpu polling context
920 */
921 io_ctx = kmalloc(sizeof(*io_ctx), M_DEVBUF, M_WAITOK | M_ZERO);
922
923 io_ctx->poll_each_burst = iopoll_each_burst;
924 io_ctx->poll_burst_max = iopoll_burst_max;
925 io_ctx->user_frac = 50;
926#ifdef IFPOLL_MULTI_SYSTIMER
927 io_ctx->pollhz = iopoll_hz;
928#else
929 io_ctx->poll_type = poll_type;
930#endif
931 io_ctx->poll_cpuid = cpuid;
932 iopoll_reset_state(io_ctx);
933
934 netmsg_init(&io_ctx->poll_netmsg, &netisr_adone_rport, MSGF_MPSAFE,
935 iopoll_handler);
936 io_ctx->poll_netmsg.nm_lmsg.u.ms_resultp = io_ctx;
937
938 netmsg_init(&io_ctx->poll_more_netmsg, &netisr_adone_rport, MSGF_MPSAFE,
939 iopollmore_handler);
940 io_ctx->poll_more_netmsg.nm_lmsg.u.ms_resultp = io_ctx;
941
942 /*
943 * Initialize per-cpu sysctl nodes
944 */
945 if (poll_type == IFPOLL_RX)
946 poll_type_str = "rx";
947 else
948 poll_type_str = "tx";
949 ksnprintf(cpuid_str, sizeof(cpuid_str), "%s%d",
950 poll_type_str, io_ctx->poll_cpuid);
951
952 sysctl_ctx_init(&io_ctx->poll_sysctl_ctx);
953 io_ctx->poll_sysctl_tree = SYSCTL_ADD_NODE(&io_ctx->poll_sysctl_ctx,
954 SYSCTL_STATIC_CHILDREN(_net_ifpoll),
955 OID_AUTO, cpuid_str, CTLFLAG_RD, 0, "");
956 iopoll_add_sysctl(&io_ctx->poll_sysctl_ctx,
957 SYSCTL_CHILDREN(io_ctx->poll_sysctl_tree), io_ctx);
958
959#ifdef IFPOLL_MULTI_SYSTIMER
960 /*
961 * Initialize systimer
962 */
963 systimer_init_periodic_nq(&io_ctx->pollclock,
964 iopoll_systimer, io_ctx, 1);
965#endif
966
967 return io_ctx;
968}
969
970/*
971 * Hook from iopoll systimer. Tries to schedule an iopoll, but keeps
972 * track of lost ticks due to the previous handler taking too long.
973 * Normally, this should not happen, because polling handler should
974 * run for a short time. However, in some cases (e.g. when there are
975 * changes in link status etc.) the drivers take a very long time
976 * (even in the order of milliseconds) to reset and reconfigure the
977 * device, causing apparent lost polls.
978 *
979 * The first part of the code is just for debugging purposes, and tries
980 * to count how often hardclock ticks are shorter than they should,
981 * meaning either stray interrupts or delayed events.
982 *
983 * WARNING! called from fastint or IPI, the MP lock might not be held.
984 */
985static void
986iopoll_clock(struct iopoll_ctx *io_ctx)
987{
a49543a6 988 globaldata_t gd = mycpu;
b3a7093f
SZ
989 struct timeval t;
990 int delta, poll_hz;
991
a49543a6 992 KKASSERT(gd->gd_cpuid == io_ctx->poll_cpuid);
b3a7093f
SZ
993
994 if (io_ctx->poll_handlers == 0)
995 return;
996
997#ifdef IFPOLL_MULTI_SYSTIMER
998 poll_hz = io_ctx->pollhz;
999#else
1000 poll_hz = iopoll_hz(io_ctx);
1001#endif
1002
1003 microuptime(&t);
1004 delta = (t.tv_usec - io_ctx->prev_t.tv_usec) +
1005 (t.tv_sec - io_ctx->prev_t.tv_sec) * 1000000;
1006 if (delta * poll_hz < 500000)
1007 io_ctx->short_ticks++;
1008 else
1009 io_ctx->prev_t = t;
1010
1011 if (io_ctx->pending_polls > 100) {
1012 /*
1013 * Too much, assume it has stalled (not always true
1014 * see comment above).
1015 */
1016 io_ctx->stalled++;
1017 io_ctx->pending_polls = 0;
1018 io_ctx->phase = 0;
1019 }
1020
1021 if (io_ctx->phase <= 2) {
1022 if (io_ctx->phase != 0)
1023 io_ctx->suspect++;
1024 io_ctx->phase = 1;
a49543a6 1025 crit_enter_gd(gd);
b3a7093f 1026 sched_iopoll(io_ctx);
a49543a6 1027 crit_exit_gd(gd);
b3a7093f
SZ
1028 io_ctx->phase = 2;
1029 }
1030 if (io_ctx->pending_polls++ > 0)
1031 io_ctx->lost_polls++;
1032}
1033
1034#ifdef IFPOLL_MULTI_SYSTIMER
1035static void
1036iopoll_systimer(systimer_t info, struct intrframe *frame __unused)
1037{
1038 iopoll_clock(info->data);
1039}
1040#endif
1041
1042/*
1043 * iopoll_handler is scheduled by sched_iopoll when appropriate, typically
1044 * once per polling systimer tick.
1045 *
1046 * Note that the message is replied immediately in order to allow a new
1047 * ISR to be scheduled in the handler.
1048 */
1049static void
1050iopoll_handler(struct netmsg *msg)
1051{
1052 struct iopoll_ctx *io_ctx;
ec7f6da8 1053 struct thread *td = curthread;
b3a7093f
SZ
1054 int i, cycles;
1055
1056 io_ctx = msg->nm_lmsg.u.ms_resultp;
ec7f6da8
SZ
1057 KKASSERT(&td->td_msgport == ifnet_portfn(io_ctx->poll_cpuid));
1058
1059 crit_enter_quick(td);
b3a7093f
SZ
1060
1061 /* Reply ASAP */
b3a7093f 1062 lwkt_replymsg(&msg->nm_lmsg, 0);
b3a7093f 1063
ec7f6da8
SZ
1064 if (io_ctx->poll_handlers == 0) {
1065 crit_exit_quick(td);
b3a7093f 1066 return;
ec7f6da8 1067 }
b3a7093f
SZ
1068
1069 io_ctx->phase = 3;
1070 if (io_ctx->residual_burst == 0) {
1071 /* First call in this tick */
1072 microuptime(&io_ctx->poll_start_t);
1073 io_ctx->residual_burst = io_ctx->poll_burst;
1074 }
1075 cycles = (io_ctx->residual_burst < io_ctx->poll_each_burst) ?
1076 io_ctx->residual_burst : io_ctx->poll_each_burst;
1077 io_ctx->residual_burst -= cycles;
1078
1079 for (i = 0; i < io_ctx->poll_handlers; i++) {
1080 const struct iopoll_rec *rec = &io_ctx->pr[i];
1081 struct ifnet *ifp = rec->ifp;
1082
1083 if (!lwkt_serialize_try(rec->serializer))
1084 continue;
1085
1086 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) ==
ec7f6da8 1087 (IFF_RUNNING | IFF_NPOLLING))
b3a7093f 1088 rec->poll_func(ifp, rec->arg, cycles);
b3a7093f
SZ
1089
1090 lwkt_serialize_exit(rec->serializer);
1091 }
1092
9feed54b
SZ
1093 /*
1094 * Do a quick exit/enter to catch any higher-priority
1095 * interrupt sources.
1096 */
ec7f6da8 1097 crit_exit_quick(td);
9feed54b 1098 crit_enter_quick(td);
ec7f6da8 1099
b3a7093f
SZ
1100 sched_iopollmore(io_ctx);
1101 io_ctx->phase = 4;
9feed54b
SZ
1102
1103 crit_exit_quick(td);
b3a7093f
SZ
1104}
1105
1106/*
1107 * iopollmore_handler is called after other netisr's, possibly scheduling
1108 * another iopoll_handler call, or adapting the burst size for the next cycle.
1109 *
1110 * It is very bad to fetch large bursts of packets from a single card at once,
1111 * because the burst could take a long time to be completely processed leading
1112 * to unfairness. To reduce the problem, and also to account better for time
1113 * spent in network-related processing, we split the burst in smaller chunks
1114 * of fixed size, giving control to the other netisr's between chunks. This
1115 * helps in improving the fairness, reducing livelock and accounting for the
1116 * work performed in low level handling.
1117 */
1118static void
1119iopollmore_handler(struct netmsg *msg)
1120{
9feed54b 1121 struct thread *td = curthread;
b3a7093f
SZ
1122 struct iopoll_ctx *io_ctx;
1123 struct timeval t;
1124 int kern_load, poll_hz;
1125 uint32_t pending_polls;
1126
1127 io_ctx = msg->nm_lmsg.u.ms_resultp;
9feed54b
SZ
1128 KKASSERT(&td->td_msgport == ifnet_portfn(io_ctx->poll_cpuid));
1129
1130 crit_enter_quick(td);
b3a7093f
SZ
1131
1132 /* Replay ASAP */
1133 lwkt_replymsg(&msg->nm_lmsg, 0);
1134
9feed54b
SZ
1135 if (io_ctx->poll_handlers == 0) {
1136 crit_exit_quick(td);
b3a7093f 1137 return;
9feed54b 1138 }
b3a7093f
SZ
1139
1140#ifdef IFPOLL_MULTI_SYSTIMER
1141 poll_hz = io_ctx->pollhz;
1142#else
1143 poll_hz = iopoll_hz(io_ctx);
1144#endif
1145
1146 io_ctx->phase = 5;
1147 if (io_ctx->residual_burst > 0) {
1148 sched_iopoll(io_ctx);
9feed54b 1149 crit_exit_quick(td);
b3a7093f
SZ
1150 /* Will run immediately on return, followed by netisrs */
1151 return;
1152 }
1153
1154 /* Here we can account time spent in iopoll's in this tick */
1155 microuptime(&t);
1156 kern_load = (t.tv_usec - io_ctx->poll_start_t.tv_usec) +
1157 (t.tv_sec - io_ctx->poll_start_t.tv_sec) * 1000000; /* us */
1158 kern_load = (kern_load * poll_hz) / 10000; /* 0..100 */
112d5942
SZ
1159 io_ctx->kern_frac = kern_load;
1160
b3a7093f
SZ
1161 if (kern_load > (100 - io_ctx->user_frac)) {
1162 /* Try decrease ticks */
1163 if (io_ctx->poll_burst > 1)
1164 io_ctx->poll_burst--;
1165 } else {
1166 if (io_ctx->poll_burst < io_ctx->poll_burst_max)
1167 io_ctx->poll_burst++;
1168 }
1169
b3a7093f
SZ
1170 io_ctx->pending_polls--;
1171 pending_polls = io_ctx->pending_polls;
b3a7093f
SZ
1172
1173 if (pending_polls == 0) {
1174 /* We are done */
1175 io_ctx->phase = 0;
1176 } else {
1177 /*
1178 * Last cycle was long and caused us to miss one or more
1179 * hardclock ticks. Restart processing again, but slightly
1180 * reduce the burst size to prevent that this happens again.
1181 */
1182 io_ctx->poll_burst -= (io_ctx->poll_burst / 8);
1183 if (io_ctx->poll_burst < 1)
1184 io_ctx->poll_burst = 1;
1185 sched_iopoll(io_ctx);
1186 io_ctx->phase = 6;
1187 }
9feed54b
SZ
1188
1189 crit_exit_quick(td);
b3a7093f
SZ
1190}
1191
1192static void
1193iopoll_add_sysctl(struct sysctl_ctx_list *ctx, struct sysctl_oid_list *parent,
1194 struct iopoll_ctx *io_ctx)
1195{
1196#ifdef IFPOLL_MULTI_SYSTIMER
1197 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "pollhz",
1198 CTLTYPE_INT | CTLFLAG_RW, io_ctx, 0, sysctl_iopollhz,
1199 "I", "Device polling frequency");
1200#endif
1201
1202 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "burst_max",
1203 CTLTYPE_UINT | CTLFLAG_RW, io_ctx, 0, sysctl_burstmax,
1204 "IU", "Max Polling burst size");
1205
1206 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "each_burst",
1207 CTLTYPE_UINT | CTLFLAG_RW, io_ctx, 0, sysctl_eachburst,
1208 "IU", "Max size of each burst");
1209
1210 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "phase", CTLFLAG_RD,
1211 &io_ctx->phase, 0, "Polling phase");
1212
1213 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "suspect", CTLFLAG_RW,
1214 &io_ctx->suspect, 0, "suspect event");
1215
1216 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "stalled", CTLFLAG_RW,
1217 &io_ctx->stalled, 0, "potential stalls");
1218
1219 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "burst", CTLFLAG_RD,
1220 &io_ctx->poll_burst, 0, "Current polling burst size");
1221
1222 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "user_frac", CTLFLAG_RW,
1223 &io_ctx->user_frac, 0,
1224 "Desired user fraction of cpu time");
1225
112d5942
SZ
1226 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "kern_frac", CTLFLAG_RD,
1227 &io_ctx->kern_frac, 0,
1228 "Kernel fraction of cpu time");
1229
b3a7093f
SZ
1230 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "short_ticks", CTLFLAG_RW,
1231 &io_ctx->short_ticks, 0,
1232 "Hardclock ticks shorter than they should be");
1233
1234 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "lost_polls", CTLFLAG_RW,
1235 &io_ctx->lost_polls, 0,
1236 "How many times we would have lost a poll tick");
1237
1238 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "pending_polls", CTLFLAG_RD,
1239 &io_ctx->pending_polls, 0, "Do we need to poll again");
1240
1241 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "residual_burst", CTLFLAG_RD,
1242 &io_ctx->residual_burst, 0,
1243 "# of residual cycles in burst");
1244
1245 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "handlers", CTLFLAG_RD,
1246 &io_ctx->poll_handlers, 0,
1247 "Number of registered poll handlers");
1248}
1249
1250#ifdef IFPOLL_MULTI_SYSTIMER
1251
1252static int
1253sysctl_iopollhz(SYSCTL_HANDLER_ARGS)
1254{
1255 struct iopoll_ctx *io_ctx = arg1;
1256 struct iopoll_sysctl_netmsg msg;
1257 struct netmsg *nmsg;
1258 int error, phz;
1259
1260 phz = io_ctx->pollhz;
1261 error = sysctl_handle_int(oidp, &phz, 0, req);
1262 if (error || req->newptr == NULL)
1263 return error;
1264 if (phz <= 0)
1265 return EINVAL;
1266 else if (phz > IFPOLL_FREQ_MAX)
1267 phz = IFPOLL_FREQ_MAX;
1268
1269 nmsg = &msg.nmsg;
1270 netmsg_init(nmsg, &curthread->td_msgport, MSGF_MPSAFE,
1271 sysctl_iopollhz_handler);
1272 nmsg->nm_lmsg.u.ms_result = phz;
1273 msg.ctx = io_ctx;
1274
1275 return ifnet_domsg(&nmsg->nm_lmsg, io_ctx->poll_cpuid);
1276}
1277
1278static void
1279sysctl_iopollhz_handler(struct netmsg *nmsg)
1280{
1281 struct iopoll_sysctl_netmsg *msg = (struct iopoll_sysctl_netmsg *)nmsg;
1282 struct iopoll_ctx *io_ctx;
1283
1284 io_ctx = msg->ctx;
1285 KKASSERT(&curthread->td_msgport == ifnet_portfn(io_ctx->poll_cpuid));
1286
1287 /*
1288 * If polling is disabled or there is no polling handler
1289 * registered, don't adjust polling systimer frequency.
1290 * Polling systimer frequency will be adjusted once there
1291 * are registered handlers.
1292 */
1293 io_ctx->pollhz = nmsg->nm_lmsg.u.ms_result;
1294 if (io_ctx->poll_handlers)
1295 systimer_adjust_periodic(&io_ctx->pollclock, io_ctx->pollhz);
1296
1297 lwkt_replymsg(&nmsg->nm_lmsg, 0);
1298}
1299
1300#endif /* IFPOLL_MULTI_SYSTIMER */
1301
1302static void
1303sysctl_burstmax_handler(struct netmsg *nmsg)
1304{
1305 struct iopoll_sysctl_netmsg *msg = (struct iopoll_sysctl_netmsg *)nmsg;
1306 struct iopoll_ctx *io_ctx;
1307
1308 io_ctx = msg->ctx;
1309 KKASSERT(&curthread->td_msgport == ifnet_portfn(io_ctx->poll_cpuid));
1310
1311 io_ctx->poll_burst_max = nmsg->nm_lmsg.u.ms_result;
1312 if (io_ctx->poll_each_burst > io_ctx->poll_burst_max)
1313 io_ctx->poll_each_burst = io_ctx->poll_burst_max;
1314 if (io_ctx->poll_burst > io_ctx->poll_burst_max)
1315 io_ctx->poll_burst = io_ctx->poll_burst_max;
1316 if (io_ctx->residual_burst > io_ctx->poll_burst_max)
1317 io_ctx->residual_burst = io_ctx->poll_burst_max;
1318
1319 lwkt_replymsg(&nmsg->nm_lmsg, 0);
1320}
1321
1322static int
1323sysctl_burstmax(SYSCTL_HANDLER_ARGS)
1324{
1325 struct iopoll_ctx *io_ctx = arg1;
1326 struct iopoll_sysctl_netmsg msg;
1327 struct netmsg *nmsg;
1328 uint32_t burst_max;
1329 int error;
1330
1331 burst_max = io_ctx->poll_burst_max;
1332 error = sysctl_handle_int(oidp, &burst_max, 0, req);
1333 if (error || req->newptr == NULL)
1334 return error;
1335 if (burst_max < MIN_IOPOLL_BURST_MAX)
1336 burst_max = MIN_IOPOLL_BURST_MAX;
1337 else if (burst_max > MAX_IOPOLL_BURST_MAX)
1338 burst_max = MAX_IOPOLL_BURST_MAX;
1339
1340 nmsg = &msg.nmsg;
1341 netmsg_init(nmsg, &curthread->td_msgport, MSGF_MPSAFE,
1342 sysctl_burstmax_handler);
1343 nmsg->nm_lmsg.u.ms_result = burst_max;
1344 msg.ctx = io_ctx;
1345
1346 return ifnet_domsg(&nmsg->nm_lmsg, io_ctx->poll_cpuid);
1347}
1348
1349static void
1350sysctl_eachburst_handler(struct netmsg *nmsg)
1351{
1352 struct iopoll_sysctl_netmsg *msg = (struct iopoll_sysctl_netmsg *)nmsg;
1353 struct iopoll_ctx *io_ctx;
1354 uint32_t each_burst;
1355
1356 io_ctx = msg->ctx;
1357 KKASSERT(&curthread->td_msgport == ifnet_portfn(io_ctx->poll_cpuid));
1358
1359 each_burst = nmsg->nm_lmsg.u.ms_result;
1360 if (each_burst > io_ctx->poll_burst_max)
1361 each_burst = io_ctx->poll_burst_max;
1362 else if (each_burst < 1)
1363 each_burst = 1;
1364 io_ctx->poll_each_burst = each_burst;
1365
1366 lwkt_replymsg(&nmsg->nm_lmsg, 0);
1367}
1368
1369static int
1370sysctl_eachburst(SYSCTL_HANDLER_ARGS)
1371{
1372 struct iopoll_ctx *io_ctx = arg1;
1373 struct iopoll_sysctl_netmsg msg;
1374 struct netmsg *nmsg;
1375 uint32_t each_burst;
1376 int error;
1377
1378 each_burst = io_ctx->poll_each_burst;
1379 error = sysctl_handle_int(oidp, &each_burst, 0, req);
1380 if (error || req->newptr == NULL)
1381 return error;
1382
1383 nmsg = &msg.nmsg;
1384 netmsg_init(nmsg, &curthread->td_msgport, MSGF_MPSAFE,
1385 sysctl_eachburst_handler);
1386 nmsg->nm_lmsg.u.ms_result = each_burst;
1387 msg.ctx = io_ctx;
1388
1389 return ifnet_domsg(&nmsg->nm_lmsg, io_ctx->poll_cpuid);
1390}
1391
1392static int
1393iopoll_register(struct ifnet *ifp, struct iopoll_ctx *io_ctx,
1394 const struct ifpoll_io *io_rec)
1395{
1396 int error;
1397
1398 KKASSERT(&curthread->td_msgport == ifnet_portfn(io_ctx->poll_cpuid));
1399
1400 if (io_rec->poll_func == NULL)
1401 return 0;
1402
1403 /*
1404 * Check if there is room.
1405 */
1406 if (io_ctx->poll_handlers >= IFPOLL_LIST_LEN) {
1407 /*
1408 * List full, cannot register more entries.
1409 * This should never happen; if it does, it is probably a
1410 * broken driver trying to register multiple times. Checking
1411 * this at runtime is expensive, and won't solve the problem
1412 * anyways, so just report a few times and then give up.
1413 */
1414 static int verbose = 10; /* XXX */
1415 if (verbose > 0) {
1416 kprintf("io poll handlers list full, "
1417 "maybe a broken driver ?\n");
1418 verbose--;
1419 }
1420 error = ENOENT;
1421 } else {
1422 struct iopoll_rec *rec = &io_ctx->pr[io_ctx->poll_handlers];
1423
1424 rec->ifp = ifp;
1425 rec->serializer = io_rec->serializer;
1426 rec->arg = io_rec->arg;
1427 rec->poll_func = io_rec->poll_func;
1428
1429 io_ctx->poll_handlers++;
1430 if (io_ctx->poll_handlers == 1) {
1431#ifdef IFPOLL_MULTI_SYSTIMER
1432 systimer_adjust_periodic(&io_ctx->pollclock,
1433 io_ctx->pollhz);
1434#else
1435 u_int *mask;
1436
1437 if (io_ctx->poll_type == IFPOLL_RX)
1438 mask = &ifpoll0.rx_cpumask;
1439 else
1440 mask = &ifpoll0.tx_cpumask;
1441 KKASSERT((*mask & mycpu->gd_cpumask) == 0);
1442 atomic_set_int(mask, mycpu->gd_cpumask);
1443#endif
1444 }
1445#ifndef IFPOLL_MULTI_SYSTIMER
1446 ifpoll_handler_addevent();
1447#endif
1448 error = 0;
1449 }
1450 return error;
1451}
1452
1453static int
1454iopoll_deregister(struct ifnet *ifp, struct iopoll_ctx *io_ctx)
1455{
1456 int i, error;
1457
1458 KKASSERT(&curthread->td_msgport == ifnet_portfn(io_ctx->poll_cpuid));
1459
1460 for (i = 0; i < io_ctx->poll_handlers; ++i) {
1461 if (io_ctx->pr[i].ifp == ifp) /* Found it */
1462 break;
1463 }
1464 if (i == io_ctx->poll_handlers) {
1465 error = ENOENT;
1466 } else {
1467 io_ctx->poll_handlers--;
1468 if (i < io_ctx->poll_handlers) {
1469 /* Last entry replaces this one. */
1470 io_ctx->pr[i] = io_ctx->pr[io_ctx->poll_handlers];
1471 }
1472
1473 if (io_ctx->poll_handlers == 0) {
1474#ifdef IFPOLL_MULTI_SYSTIMER
1475 systimer_adjust_periodic(&io_ctx->pollclock, 1);
1476#else
1477 u_int *mask;
1478
1479 if (io_ctx->poll_type == IFPOLL_RX)
1480 mask = &ifpoll0.rx_cpumask;
1481 else
1482 mask = &ifpoll0.tx_cpumask;
1483 KKASSERT(*mask & mycpu->gd_cpumask);
1484 atomic_clear_int(mask, mycpu->gd_cpumask);
1485#endif
1486 iopoll_reset_state(io_ctx);
1487 }
1488#ifndef IFPOLL_MULTI_SYSTIMER
1489 ifpoll_handler_delevent();
1490#endif
1491 error = 0;
1492 }
1493 return error;
1494}