2 * Copyright (c) 2001-2002 Luigi Rizzo
4 * Supported by: the Xorp Project (www.xorp.org)
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * $FreeBSD: src/sys/kern/kern_poll.c,v 1.2.2.4 2002/06/27 23:26:33 luigi Exp $
30 #include "opt_ifpoll.h"
32 #include <sys/param.h>
33 #include <sys/kernel.h>
35 #include <sys/malloc.h>
36 #include <sys/serialize.h>
37 #include <sys/socket.h>
38 #include <sys/sysctl.h>
40 #include <sys/thread2.h>
41 #include <sys/msgport2.h>
43 #include <machine/atomic.h>
44 #include <machine/clock.h>
45 #include <machine/smp.h>
48 #include <net/if_poll.h>
49 #include <net/netmsg2.h>
52 * Polling support for network device drivers.
54 * Drivers which support this feature try to register one status polling
55 * handler and several TX/RX polling handlers with the polling code.
56 * If interface's if_qpoll is called with non-NULL second argument, then
57 * a register operation is requested, else a deregister operation is
58 * requested. If the requested operation is "register", driver should
59 * setup the ifpoll_info passed in accoding its own needs:
60 * ifpoll_info.ifpi_status.status_func == NULL
61 * No status polling handler will be installed on CPU(0)
62 * ifpoll_info.ifpi_rx[n].poll_func == NULL
63 * No RX polling handler will be installed on CPU(n)
64 * ifpoll_info.ifpi_tx[n].poll_func == NULL
65 * No TX polling handler will be installed on CPU(n)
67 * RX is polled at the specified polling frequency (net.ifpoll.X.pollhz).
68 * TX and status polling could be done at lower frequency than RX frequency.
69 * To avoid systimer staggering at high frequency, RX systimer gives TX
70 * and status polling a piggyback (XXX).
72 * All of the registered polling handlers are called only if the interface
73 * is marked as 'IFF_RUNNING and IFF_NPOLLING'. However, the interface's
74 * register and deregister function (ifnet.if_qpoll) will be called even
75 * if interface is not marked with 'IFF_RUNNING'.
77 * If registration is successful, the driver must disable interrupts,
78 * and further I/O is performed through the TX/RX polling handler, which
79 * are invoked (at least once per clock tick) with 3 arguments: the "arg"
80 * passed at register time, a struct ifnet pointer, and a "count" limit.
81 * The registered serializer will be held before calling the related
84 * The count limit specifies how much work the handler can do during the
85 * call -- typically this is the number of packets to be received, or
86 * transmitted, etc. (drivers are free to interpret this number, as long
87 * as the max time spent in the function grows roughly linearly with the
90 * A second variable controls the sharing of CPU between polling/kernel
91 * network processing, and other activities (typically userlevel tasks):
92 * net.ifpoll.X.{rx,tx}.user_frac (between 0 and 100, default 50) sets the
93 * share of CPU allocated to user tasks. CPU is allocated proportionally
94 * to the shares, by dynamically adjusting the "count" (poll_burst).
96 * Other parameters can should be left to their default values.
97 * The following constraints hold
99 * 1 <= poll_burst <= poll_burst_max
100 * 1 <= poll_each_burst <= poll_burst_max
101 * MIN_POLL_BURST_MAX <= poll_burst_max <= MAX_POLL_BURST_MAX
104 #define IFPOLL_LIST_LEN 128
105 #define IFPOLL_FREQ_MAX 30000
107 #define MIN_IOPOLL_BURST_MAX 10
108 #define MAX_IOPOLL_BURST_MAX 1000
109 #define IOPOLL_BURST_MAX 150 /* good for 100Mbit net and HZ=1000 */
111 #define IOPOLL_EACH_BURST 5
113 #define IFPOLL_FREQ_DEFAULT 2000
115 #define IFPOLL_TXFRAC_DEFAULT 1
116 #define IFPOLL_STFRAC_DEFAULT 19
118 #define IFPOLL_RX 0x1
119 #define IFPOLL_TX 0x2
127 struct lwkt_serialize *serializer;
130 ifpoll_iofn_t poll_func;
134 union ifpoll_time prev_t;
135 uint32_t short_ticks; /* statistics */
136 uint32_t lost_polls; /* statistics */
137 uint32_t suspect; /* statistics */
138 uint32_t stalled; /* statistics */
139 uint32_t pending_polls; /* state */
141 struct netmsg poll_netmsg;
145 uint32_t phase; /* state */
146 int residual_burst; /* state */
147 uint32_t poll_each_burst; /* tunable */
148 union ifpoll_time poll_start_t; /* state */
150 uint32_t poll_handlers; /* next free entry in pr[]. */
151 struct iopoll_rec pr[IFPOLL_LIST_LEN];
153 struct netmsg poll_more_netmsg;
155 uint32_t poll_burst; /* state */
156 uint32_t poll_burst_max; /* tunable */
157 uint32_t user_frac; /* tunable */
158 uint32_t kern_frac; /* state */
160 struct sysctl_ctx_list poll_sysctl_ctx;
161 struct sysctl_oid *poll_sysctl_tree;
165 struct systimer pollclock;
168 int stfrac_count; /* state */
169 int poll_stfrac; /* tunable */
171 int txfrac_count; /* state */
172 int poll_txfrac; /* tunable */
174 int pollhz; /* tunable */
176 struct sysctl_ctx_list sysctl_ctx;
177 struct sysctl_oid *sysctl_tree;
181 struct lwkt_serialize *serializer;
183 ifpoll_stfn_t status_func;
187 struct netmsg poll_netmsg;
191 uint32_t poll_handlers; /* next free entry in pr[]. */
192 struct stpoll_rec pr[IFPOLL_LIST_LEN];
194 struct sysctl_ctx_list poll_sysctl_ctx;
195 struct sysctl_oid *poll_sysctl_tree;
198 struct iopoll_sysctl_netmsg {
200 struct iopoll_ctx *ctx;
203 void ifpoll_init_pcpu(int);
204 static void ifpoll_register_handler(struct netmsg *);
205 static void ifpoll_deregister_handler(struct netmsg *);
210 static void stpoll_init(void);
211 static void stpoll_handler(struct netmsg *);
212 static void stpoll_clock(struct stpoll_ctx *);
213 static int stpoll_register(struct ifnet *, const struct ifpoll_status *);
214 static int stpoll_deregister(struct ifnet *);
219 static struct iopoll_ctx *iopoll_ctx_create(int, int);
220 static void iopoll_init(int);
221 static void iopoll_handler(struct netmsg *);
222 static void iopollmore_handler(struct netmsg *);
223 static void iopoll_clock(struct iopoll_ctx *);
224 static int iopoll_register(struct ifnet *, struct iopoll_ctx *,
225 const struct ifpoll_io *);
226 static int iopoll_deregister(struct ifnet *, struct iopoll_ctx *);
228 static void iopoll_add_sysctl(struct sysctl_ctx_list *,
229 struct sysctl_oid_list *, struct iopoll_ctx *);
230 static void sysctl_burstmax_handler(struct netmsg *);
231 static int sysctl_burstmax(SYSCTL_HANDLER_ARGS);
232 static void sysctl_eachburst_handler(struct netmsg *);
233 static int sysctl_eachburst(SYSCTL_HANDLER_ARGS);
238 static void poll_comm_init(int);
239 static void poll_comm_start(int);
240 static void poll_comm_adjust_pollhz(struct poll_comm *);
241 static void poll_comm_systimer0(systimer_t, struct intrframe *);
242 static void poll_comm_systimer(systimer_t, struct intrframe *);
243 static void sysctl_pollhz_handler(struct netmsg *);
244 static int sysctl_pollhz(SYSCTL_HANDLER_ARGS);
246 static struct stpoll_ctx stpoll_context;
247 static struct poll_comm *poll_common[IFPOLL_CTX_MAX];
248 static struct iopoll_ctx *rxpoll_context[IFPOLL_CTX_MAX];
249 static struct iopoll_ctx *txpoll_context[IFPOLL_CTX_MAX];
251 SYSCTL_NODE(_net, OID_AUTO, ifpoll, CTLFLAG_RW, 0,
252 "Network device polling parameters");
254 static int ifpoll_ncpus = IFPOLL_CTX_MAX;
256 static int iopoll_burst_max = IOPOLL_BURST_MAX;
257 static int iopoll_each_burst = IOPOLL_EACH_BURST;
259 static int ifpoll_pollhz = IFPOLL_FREQ_DEFAULT;
260 static int ifpoll_stfrac = IFPOLL_STFRAC_DEFAULT;
261 static int ifpoll_txfrac = IFPOLL_TXFRAC_DEFAULT;
263 TUNABLE_INT("net.ifpoll.burst_max", &iopoll_burst_max);
264 TUNABLE_INT("net.ifpoll.each_burst", &iopoll_each_burst);
265 TUNABLE_INT("net.ifpoll.pollhz", &ifpoll_pollhz);
266 TUNABLE_INT("net.ifpoll.status_frac", &ifpoll_stfrac);
267 TUNABLE_INT("net.ifpoll.tx_frac", &ifpoll_txfrac);
270 ifpoll_sendmsg_oncpu(struct netmsg *msg)
272 if (msg->nm_lmsg.ms_flags & MSGF_DONE)
273 ifnet_sendmsg(&msg->nm_lmsg, mycpuid);
277 sched_stpoll(struct stpoll_ctx *st_ctx)
279 ifpoll_sendmsg_oncpu(&st_ctx->poll_netmsg);
283 sched_iopoll(struct iopoll_ctx *io_ctx)
285 ifpoll_sendmsg_oncpu(&io_ctx->poll_netmsg);
289 sched_iopollmore(struct iopoll_ctx *io_ctx)
291 ifpoll_sendmsg_oncpu(&io_ctx->poll_more_netmsg);
295 ifpoll_time_get(union ifpoll_time *t)
303 /* Return time diff in us */
305 ifpoll_time_diff(const union ifpoll_time *s, const union ifpoll_time *e)
308 return (((e->tsc - s->tsc) * 1000000) / tsc_frequency);
310 return ((e->tv.tv_usec - s->tv.tv_usec) +
311 (e->tv.tv_sec - s->tv.tv_sec) * 1000000);
316 * Initialize per-cpu qpolling(4) context. Called from kern_clock.c:
319 ifpoll_init_pcpu(int cpuid)
321 if (cpuid >= IFPOLL_CTX_MAX)
325 if (ifpoll_ncpus > ncpus)
326 ifpoll_ncpus = ncpus;
328 kprintf("ifpoll_ncpus %d\n", ifpoll_ncpus);
331 poll_comm_init(cpuid);
337 poll_comm_start(cpuid);
341 ifpoll_register(struct ifnet *ifp)
343 struct ifpoll_info info;
347 if (ifp->if_qpoll == NULL) {
348 /* Device does not support polling */
353 * Attempt to register. Interlock with IFF_NPOLLING.
356 ifnet_serialize_all(ifp);
358 if (ifp->if_flags & IFF_NPOLLING) {
359 /* Already polling */
360 ifnet_deserialize_all(ifp);
364 bzero(&info, sizeof(info));
367 ifp->if_flags |= IFF_NPOLLING;
368 ifp->if_qpoll(ifp, &info);
370 ifnet_deserialize_all(ifp);
372 netmsg_init(&nmsg, &curthread->td_msgport, MSGF_MPSAFE,
373 ifpoll_register_handler);
374 nmsg.nm_lmsg.u.ms_resultp = &info;
376 error = ifnet_domsg(&nmsg.nm_lmsg, 0);
378 if (!ifpoll_deregister(ifp)) {
379 if_printf(ifp, "ifpoll_register: "
380 "ifpoll_deregister failed!\n");
387 ifpoll_deregister(struct ifnet *ifp)
392 if (ifp->if_qpoll == NULL)
395 ifnet_serialize_all(ifp);
397 if ((ifp->if_flags & IFF_NPOLLING) == 0) {
398 ifnet_deserialize_all(ifp);
401 ifp->if_flags &= ~IFF_NPOLLING;
403 ifnet_deserialize_all(ifp);
405 netmsg_init(&nmsg, &curthread->td_msgport, MSGF_MPSAFE,
406 ifpoll_deregister_handler);
407 nmsg.nm_lmsg.u.ms_resultp = ifp;
409 error = ifnet_domsg(&nmsg.nm_lmsg, 0);
411 ifnet_serialize_all(ifp);
412 ifp->if_qpoll(ifp, NULL);
413 ifnet_deserialize_all(ifp);
419 ifpoll_register_handler(struct netmsg *nmsg)
421 const struct ifpoll_info *info = nmsg->nm_lmsg.u.ms_resultp;
422 int cpuid = mycpuid, nextcpu;
425 KKASSERT(cpuid < ifpoll_ncpus);
426 KKASSERT(&curthread->td_msgport == ifnet_portfn(cpuid));
429 error = stpoll_register(info->ifpi_ifp, &info->ifpi_status);
434 error = iopoll_register(info->ifpi_ifp, rxpoll_context[cpuid],
435 &info->ifpi_rx[cpuid]);
439 error = iopoll_register(info->ifpi_ifp, txpoll_context[cpuid],
440 &info->ifpi_tx[cpuid]);
444 /* Adjust polling frequency, after all registration is done */
445 poll_comm_adjust_pollhz(poll_common[cpuid]);
448 if (nextcpu < ifpoll_ncpus)
449 ifnet_forwardmsg(&nmsg->nm_lmsg, nextcpu);
451 lwkt_replymsg(&nmsg->nm_lmsg, 0);
454 lwkt_replymsg(&nmsg->nm_lmsg, error);
458 ifpoll_deregister_handler(struct netmsg *nmsg)
460 struct ifnet *ifp = nmsg->nm_lmsg.u.ms_resultp;
461 int cpuid = mycpuid, nextcpu;
463 KKASSERT(cpuid < ifpoll_ncpus);
464 KKASSERT(&curthread->td_msgport == ifnet_portfn(cpuid));
468 stpoll_deregister(ifp);
469 iopoll_deregister(ifp, rxpoll_context[cpuid]);
470 iopoll_deregister(ifp, txpoll_context[cpuid]);
472 /* Adjust polling frequency, after all deregistration is done */
473 poll_comm_adjust_pollhz(poll_common[cpuid]);
476 if (nextcpu < ifpoll_ncpus)
477 ifnet_forwardmsg(&nmsg->nm_lmsg, nextcpu);
479 lwkt_replymsg(&nmsg->nm_lmsg, 0);
485 struct stpoll_ctx *st_ctx = &stpoll_context;
486 const struct poll_comm *comm = poll_common[0];
488 st_ctx->pollhz = comm->pollhz / (comm->poll_stfrac + 1);
490 sysctl_ctx_init(&st_ctx->poll_sysctl_ctx);
491 st_ctx->poll_sysctl_tree = SYSCTL_ADD_NODE(&st_ctx->poll_sysctl_ctx,
492 SYSCTL_CHILDREN(comm->sysctl_tree),
493 OID_AUTO, "status", CTLFLAG_RD, 0, "");
495 SYSCTL_ADD_UINT(&st_ctx->poll_sysctl_ctx,
496 SYSCTL_CHILDREN(st_ctx->poll_sysctl_tree),
497 OID_AUTO, "handlers", CTLFLAG_RD,
498 &st_ctx->poll_handlers, 0,
499 "Number of registered status poll handlers");
501 netmsg_init(&st_ctx->poll_netmsg, &netisr_adone_rport, MSGF_MPSAFE,
506 * stpoll_handler is scheduled by sched_stpoll when appropriate, typically
507 * once per polling systimer tick.
510 stpoll_handler(struct netmsg *msg)
512 struct stpoll_ctx *st_ctx = &stpoll_context;
513 struct thread *td = curthread;
516 KKASSERT(&td->td_msgport == ifnet_portfn(0));
518 crit_enter_quick(td);
521 lwkt_replymsg(&msg->nm_lmsg, 0);
523 if (st_ctx->poll_handlers == 0) {
528 for (i = 0; i < st_ctx->poll_handlers; ++i) {
529 const struct stpoll_rec *rec = &st_ctx->pr[i];
530 struct ifnet *ifp = rec->ifp;
532 if (!lwkt_serialize_try(rec->serializer))
535 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) ==
536 (IFF_RUNNING | IFF_NPOLLING))
537 rec->status_func(ifp, st_ctx->pollhz);
539 lwkt_serialize_exit(rec->serializer);
546 * Hook from status poll systimer. Tries to schedule an status poll.
549 stpoll_clock(struct stpoll_ctx *st_ctx)
551 globaldata_t gd = mycpu;
553 KKASSERT(gd->gd_cpuid == 0);
555 if (st_ctx->poll_handlers == 0)
559 sched_stpoll(st_ctx);
564 stpoll_register(struct ifnet *ifp, const struct ifpoll_status *st_rec)
566 struct stpoll_ctx *st_ctx = &stpoll_context;
569 KKASSERT(&curthread->td_msgport == ifnet_portfn(0));
571 if (st_rec->status_func == NULL)
575 * Check if there is room.
577 if (st_ctx->poll_handlers >= IFPOLL_LIST_LEN) {
579 * List full, cannot register more entries.
580 * This should never happen; if it does, it is probably a
581 * broken driver trying to register multiple times. Checking
582 * this at runtime is expensive, and won't solve the problem
583 * anyways, so just report a few times and then give up.
585 static int verbose = 10; /* XXX */
588 kprintf("status poll handlers list full, "
589 "maybe a broken driver ?\n");
594 struct stpoll_rec *rec = &st_ctx->pr[st_ctx->poll_handlers];
597 rec->serializer = st_rec->serializer;
598 rec->status_func = st_rec->status_func;
600 st_ctx->poll_handlers++;
607 stpoll_deregister(struct ifnet *ifp)
609 struct stpoll_ctx *st_ctx = &stpoll_context;
612 KKASSERT(&curthread->td_msgport == ifnet_portfn(0));
614 for (i = 0; i < st_ctx->poll_handlers; ++i) {
615 if (st_ctx->pr[i].ifp == ifp) /* Found it */
618 if (i == st_ctx->poll_handlers) {
619 kprintf("stpoll_deregister: ifp not found!!!\n");
622 st_ctx->poll_handlers--;
623 if (i < st_ctx->poll_handlers) {
624 /* Last entry replaces this one. */
625 st_ctx->pr[i] = st_ctx->pr[st_ctx->poll_handlers];
633 iopoll_reset_state(struct iopoll_ctx *io_ctx)
636 io_ctx->poll_burst = 5;
637 io_ctx->pending_polls = 0;
638 io_ctx->residual_burst = 0;
640 io_ctx->kern_frac = 0;
641 bzero(&io_ctx->poll_start_t, sizeof(io_ctx->poll_start_t));
642 bzero(&io_ctx->prev_t, sizeof(io_ctx->prev_t));
647 iopoll_init(int cpuid)
649 KKASSERT(cpuid < IFPOLL_CTX_MAX);
651 rxpoll_context[cpuid] = iopoll_ctx_create(cpuid, IFPOLL_RX);
652 txpoll_context[cpuid] = iopoll_ctx_create(cpuid, IFPOLL_TX);
655 static struct iopoll_ctx *
656 iopoll_ctx_create(int cpuid, int poll_type)
658 struct poll_comm *comm;
659 struct iopoll_ctx *io_ctx;
660 const char *poll_type_str;
662 KKASSERT(poll_type == IFPOLL_RX || poll_type == IFPOLL_TX);
665 * Make sure that tunables are in sane state
667 if (iopoll_burst_max < MIN_IOPOLL_BURST_MAX)
668 iopoll_burst_max = MIN_IOPOLL_BURST_MAX;
669 else if (iopoll_burst_max > MAX_IOPOLL_BURST_MAX)
670 iopoll_burst_max = MAX_IOPOLL_BURST_MAX;
672 if (iopoll_each_burst > iopoll_burst_max)
673 iopoll_each_burst = iopoll_burst_max;
675 comm = poll_common[cpuid];
678 * Create the per-cpu polling context
680 io_ctx = kmalloc(sizeof(*io_ctx), M_DEVBUF, M_WAITOK | M_ZERO);
682 io_ctx->poll_each_burst = iopoll_each_burst;
683 io_ctx->poll_burst_max = iopoll_burst_max;
684 io_ctx->user_frac = 50;
685 if (poll_type == IFPOLL_RX)
686 io_ctx->pollhz = comm->pollhz;
688 io_ctx->pollhz = comm->pollhz / (comm->poll_txfrac + 1);
689 io_ctx->poll_cpuid = cpuid;
690 iopoll_reset_state(io_ctx);
692 netmsg_init(&io_ctx->poll_netmsg, &netisr_adone_rport, MSGF_MPSAFE,
694 io_ctx->poll_netmsg.nm_lmsg.u.ms_resultp = io_ctx;
696 netmsg_init(&io_ctx->poll_more_netmsg, &netisr_adone_rport, MSGF_MPSAFE,
698 io_ctx->poll_more_netmsg.nm_lmsg.u.ms_resultp = io_ctx;
701 * Initialize per-cpu sysctl nodes
703 if (poll_type == IFPOLL_RX)
704 poll_type_str = "rx";
706 poll_type_str = "tx";
708 sysctl_ctx_init(&io_ctx->poll_sysctl_ctx);
709 io_ctx->poll_sysctl_tree = SYSCTL_ADD_NODE(&io_ctx->poll_sysctl_ctx,
710 SYSCTL_CHILDREN(comm->sysctl_tree),
711 OID_AUTO, poll_type_str, CTLFLAG_RD, 0, "");
712 iopoll_add_sysctl(&io_ctx->poll_sysctl_ctx,
713 SYSCTL_CHILDREN(io_ctx->poll_sysctl_tree), io_ctx);
719 * Hook from iopoll systimer. Tries to schedule an iopoll, but keeps
720 * track of lost ticks due to the previous handler taking too long.
721 * Normally, this should not happen, because polling handler should
722 * run for a short time. However, in some cases (e.g. when there are
723 * changes in link status etc.) the drivers take a very long time
724 * (even in the order of milliseconds) to reset and reconfigure the
725 * device, causing apparent lost polls.
727 * The first part of the code is just for debugging purposes, and tries
728 * to count how often hardclock ticks are shorter than they should,
729 * meaning either stray interrupts or delayed events.
731 * WARNING! called from fastint or IPI, the MP lock might not be held.
734 iopoll_clock(struct iopoll_ctx *io_ctx)
736 globaldata_t gd = mycpu;
740 KKASSERT(gd->gd_cpuid == io_ctx->poll_cpuid);
742 if (io_ctx->poll_handlers == 0)
746 delta = ifpoll_time_diff(&io_ctx->prev_t, &t);
747 if (delta * io_ctx->pollhz < 500000)
748 io_ctx->short_ticks++;
752 if (io_ctx->pending_polls > 100) {
754 * Too much, assume it has stalled (not always true
755 * see comment above).
758 io_ctx->pending_polls = 0;
762 if (io_ctx->phase <= 2) {
763 if (io_ctx->phase != 0)
767 sched_iopoll(io_ctx);
771 if (io_ctx->pending_polls++ > 0)
772 io_ctx->lost_polls++;
776 * iopoll_handler is scheduled by sched_iopoll when appropriate, typically
777 * once per polling systimer tick.
779 * Note that the message is replied immediately in order to allow a new
780 * ISR to be scheduled in the handler.
783 iopoll_handler(struct netmsg *msg)
785 struct iopoll_ctx *io_ctx;
786 struct thread *td = curthread;
789 io_ctx = msg->nm_lmsg.u.ms_resultp;
790 KKASSERT(&td->td_msgport == ifnet_portfn(io_ctx->poll_cpuid));
792 crit_enter_quick(td);
795 lwkt_replymsg(&msg->nm_lmsg, 0);
797 if (io_ctx->poll_handlers == 0) {
803 if (io_ctx->residual_burst == 0) {
804 /* First call in this tick */
805 ifpoll_time_get(&io_ctx->poll_start_t);
806 io_ctx->residual_burst = io_ctx->poll_burst;
808 cycles = (io_ctx->residual_burst < io_ctx->poll_each_burst) ?
809 io_ctx->residual_burst : io_ctx->poll_each_burst;
810 io_ctx->residual_burst -= cycles;
812 for (i = 0; i < io_ctx->poll_handlers; i++) {
813 const struct iopoll_rec *rec = &io_ctx->pr[i];
814 struct ifnet *ifp = rec->ifp;
816 if (!lwkt_serialize_try(rec->serializer))
819 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) ==
820 (IFF_RUNNING | IFF_NPOLLING))
821 rec->poll_func(ifp, rec->arg, cycles);
823 lwkt_serialize_exit(rec->serializer);
827 * Do a quick exit/enter to catch any higher-priority
831 crit_enter_quick(td);
833 sched_iopollmore(io_ctx);
840 * iopollmore_handler is called after other netisr's, possibly scheduling
841 * another iopoll_handler call, or adapting the burst size for the next cycle.
843 * It is very bad to fetch large bursts of packets from a single card at once,
844 * because the burst could take a long time to be completely processed leading
845 * to unfairness. To reduce the problem, and also to account better for time
846 * spent in network-related processing, we split the burst in smaller chunks
847 * of fixed size, giving control to the other netisr's between chunks. This
848 * helps in improving the fairness, reducing livelock and accounting for the
849 * work performed in low level handling.
852 iopollmore_handler(struct netmsg *msg)
854 struct thread *td = curthread;
855 struct iopoll_ctx *io_ctx;
858 uint32_t pending_polls;
860 io_ctx = msg->nm_lmsg.u.ms_resultp;
861 KKASSERT(&td->td_msgport == ifnet_portfn(io_ctx->poll_cpuid));
863 crit_enter_quick(td);
866 lwkt_replymsg(&msg->nm_lmsg, 0);
868 if (io_ctx->poll_handlers == 0) {
874 if (io_ctx->residual_burst > 0) {
875 sched_iopoll(io_ctx);
877 /* Will run immediately on return, followed by netisrs */
881 /* Here we can account time spent in iopoll's in this tick */
883 kern_load = ifpoll_time_diff(&io_ctx->poll_start_t, &t);
884 kern_load = (kern_load * io_ctx->pollhz) / 10000; /* 0..100 */
885 io_ctx->kern_frac = kern_load;
887 if (kern_load > (100 - io_ctx->user_frac)) {
888 /* Try decrease ticks */
889 if (io_ctx->poll_burst > 1)
890 io_ctx->poll_burst--;
892 if (io_ctx->poll_burst < io_ctx->poll_burst_max)
893 io_ctx->poll_burst++;
896 io_ctx->pending_polls--;
897 pending_polls = io_ctx->pending_polls;
899 if (pending_polls == 0) {
904 * Last cycle was long and caused us to miss one or more
905 * hardclock ticks. Restart processing again, but slightly
906 * reduce the burst size to prevent that this happens again.
908 io_ctx->poll_burst -= (io_ctx->poll_burst / 8);
909 if (io_ctx->poll_burst < 1)
910 io_ctx->poll_burst = 1;
911 sched_iopoll(io_ctx);
919 iopoll_add_sysctl(struct sysctl_ctx_list *ctx, struct sysctl_oid_list *parent,
920 struct iopoll_ctx *io_ctx)
922 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "burst_max",
923 CTLTYPE_UINT | CTLFLAG_RW, io_ctx, 0, sysctl_burstmax,
924 "IU", "Max Polling burst size");
926 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "each_burst",
927 CTLTYPE_UINT | CTLFLAG_RW, io_ctx, 0, sysctl_eachburst,
928 "IU", "Max size of each burst");
930 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "phase", CTLFLAG_RD,
931 &io_ctx->phase, 0, "Polling phase");
933 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "suspect", CTLFLAG_RW,
934 &io_ctx->suspect, 0, "suspect event");
936 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "stalled", CTLFLAG_RW,
937 &io_ctx->stalled, 0, "potential stalls");
939 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "burst", CTLFLAG_RD,
940 &io_ctx->poll_burst, 0, "Current polling burst size");
942 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "user_frac", CTLFLAG_RW,
943 &io_ctx->user_frac, 0,
944 "Desired user fraction of cpu time");
946 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "kern_frac", CTLFLAG_RD,
947 &io_ctx->kern_frac, 0,
948 "Kernel fraction of cpu time");
950 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "short_ticks", CTLFLAG_RW,
951 &io_ctx->short_ticks, 0,
952 "Hardclock ticks shorter than they should be");
954 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "lost_polls", CTLFLAG_RW,
955 &io_ctx->lost_polls, 0,
956 "How many times we would have lost a poll tick");
958 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "pending_polls", CTLFLAG_RD,
959 &io_ctx->pending_polls, 0, "Do we need to poll again");
961 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "residual_burst", CTLFLAG_RD,
962 &io_ctx->residual_burst, 0,
963 "# of residual cycles in burst");
965 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "handlers", CTLFLAG_RD,
966 &io_ctx->poll_handlers, 0,
967 "Number of registered poll handlers");
971 sysctl_burstmax_handler(struct netmsg *nmsg)
973 struct iopoll_sysctl_netmsg *msg = (struct iopoll_sysctl_netmsg *)nmsg;
974 struct iopoll_ctx *io_ctx;
977 KKASSERT(&curthread->td_msgport == ifnet_portfn(io_ctx->poll_cpuid));
979 io_ctx->poll_burst_max = nmsg->nm_lmsg.u.ms_result;
980 if (io_ctx->poll_each_burst > io_ctx->poll_burst_max)
981 io_ctx->poll_each_burst = io_ctx->poll_burst_max;
982 if (io_ctx->poll_burst > io_ctx->poll_burst_max)
983 io_ctx->poll_burst = io_ctx->poll_burst_max;
984 if (io_ctx->residual_burst > io_ctx->poll_burst_max)
985 io_ctx->residual_burst = io_ctx->poll_burst_max;
987 lwkt_replymsg(&nmsg->nm_lmsg, 0);
991 sysctl_burstmax(SYSCTL_HANDLER_ARGS)
993 struct iopoll_ctx *io_ctx = arg1;
994 struct iopoll_sysctl_netmsg msg;
999 burst_max = io_ctx->poll_burst_max;
1000 error = sysctl_handle_int(oidp, &burst_max, 0, req);
1001 if (error || req->newptr == NULL)
1003 if (burst_max < MIN_IOPOLL_BURST_MAX)
1004 burst_max = MIN_IOPOLL_BURST_MAX;
1005 else if (burst_max > MAX_IOPOLL_BURST_MAX)
1006 burst_max = MAX_IOPOLL_BURST_MAX;
1009 netmsg_init(nmsg, &curthread->td_msgport, MSGF_MPSAFE,
1010 sysctl_burstmax_handler);
1011 nmsg->nm_lmsg.u.ms_result = burst_max;
1014 return ifnet_domsg(&nmsg->nm_lmsg, io_ctx->poll_cpuid);
1018 sysctl_eachburst_handler(struct netmsg *nmsg)
1020 struct iopoll_sysctl_netmsg *msg = (struct iopoll_sysctl_netmsg *)nmsg;
1021 struct iopoll_ctx *io_ctx;
1022 uint32_t each_burst;
1025 KKASSERT(&curthread->td_msgport == ifnet_portfn(io_ctx->poll_cpuid));
1027 each_burst = nmsg->nm_lmsg.u.ms_result;
1028 if (each_burst > io_ctx->poll_burst_max)
1029 each_burst = io_ctx->poll_burst_max;
1030 else if (each_burst < 1)
1032 io_ctx->poll_each_burst = each_burst;
1034 lwkt_replymsg(&nmsg->nm_lmsg, 0);
1038 sysctl_eachburst(SYSCTL_HANDLER_ARGS)
1040 struct iopoll_ctx *io_ctx = arg1;
1041 struct iopoll_sysctl_netmsg msg;
1042 struct netmsg *nmsg;
1043 uint32_t each_burst;
1046 each_burst = io_ctx->poll_each_burst;
1047 error = sysctl_handle_int(oidp, &each_burst, 0, req);
1048 if (error || req->newptr == NULL)
1052 netmsg_init(nmsg, &curthread->td_msgport, MSGF_MPSAFE,
1053 sysctl_eachburst_handler);
1054 nmsg->nm_lmsg.u.ms_result = each_burst;
1057 return ifnet_domsg(&nmsg->nm_lmsg, io_ctx->poll_cpuid);
1061 iopoll_register(struct ifnet *ifp, struct iopoll_ctx *io_ctx,
1062 const struct ifpoll_io *io_rec)
1066 KKASSERT(&curthread->td_msgport == ifnet_portfn(io_ctx->poll_cpuid));
1068 if (io_rec->poll_func == NULL)
1072 * Check if there is room.
1074 if (io_ctx->poll_handlers >= IFPOLL_LIST_LEN) {
1076 * List full, cannot register more entries.
1077 * This should never happen; if it does, it is probably a
1078 * broken driver trying to register multiple times. Checking
1079 * this at runtime is expensive, and won't solve the problem
1080 * anyways, so just report a few times and then give up.
1082 static int verbose = 10; /* XXX */
1084 kprintf("io poll handlers list full, "
1085 "maybe a broken driver ?\n");
1090 struct iopoll_rec *rec = &io_ctx->pr[io_ctx->poll_handlers];
1093 rec->serializer = io_rec->serializer;
1094 rec->arg = io_rec->arg;
1095 rec->poll_func = io_rec->poll_func;
1097 io_ctx->poll_handlers++;
1104 iopoll_deregister(struct ifnet *ifp, struct iopoll_ctx *io_ctx)
1108 KKASSERT(&curthread->td_msgport == ifnet_portfn(io_ctx->poll_cpuid));
1110 for (i = 0; i < io_ctx->poll_handlers; ++i) {
1111 if (io_ctx->pr[i].ifp == ifp) /* Found it */
1114 if (i == io_ctx->poll_handlers) {
1117 io_ctx->poll_handlers--;
1118 if (i < io_ctx->poll_handlers) {
1119 /* Last entry replaces this one. */
1120 io_ctx->pr[i] = io_ctx->pr[io_ctx->poll_handlers];
1123 if (io_ctx->poll_handlers == 0)
1124 iopoll_reset_state(io_ctx);
1131 poll_comm_init(int cpuid)
1133 struct poll_comm *comm;
1136 comm = kmalloc(sizeof(*comm), M_DEVBUF, M_WAITOK | M_ZERO);
1138 comm->pollhz = ifpoll_pollhz;
1139 comm->poll_cpuid = cpuid;
1140 comm->poll_stfrac = ifpoll_stfrac;
1141 comm->poll_txfrac = ifpoll_txfrac;
1143 ksnprintf(cpuid_str, sizeof(cpuid_str), "%d", cpuid);
1145 sysctl_ctx_init(&comm->sysctl_ctx);
1146 comm->sysctl_tree = SYSCTL_ADD_NODE(&comm->sysctl_ctx,
1147 SYSCTL_STATIC_CHILDREN(_net_ifpoll),
1148 OID_AUTO, cpuid_str, CTLFLAG_RD, 0, "");
1150 SYSCTL_ADD_PROC(&comm->sysctl_ctx, SYSCTL_CHILDREN(comm->sysctl_tree),
1151 OID_AUTO, "pollhz", CTLTYPE_INT | CTLFLAG_RW,
1152 comm, 0, sysctl_pollhz,
1153 "I", "Device polling frequency");
1155 poll_common[cpuid] = comm;
1159 poll_comm_start(int cpuid)
1161 struct poll_comm *comm = poll_common[cpuid];
1162 void (*func)(systimer_t, struct intrframe *);
1165 * Initialize systimer
1168 func = poll_comm_systimer0;
1170 func = poll_comm_systimer;
1171 systimer_init_periodic_nq(&comm->pollclock, func, comm, 1);
1175 _poll_comm_systimer(struct poll_comm *comm)
1177 if (comm->txfrac_count-- == 0) {
1178 comm->txfrac_count = comm->poll_txfrac;
1179 iopoll_clock(txpoll_context[comm->poll_cpuid]);
1181 iopoll_clock(rxpoll_context[comm->poll_cpuid]);
1185 poll_comm_systimer0(systimer_t info, struct intrframe *frame __unused)
1187 struct poll_comm *comm = info->data;
1188 globaldata_t gd = mycpu;
1190 KKASSERT(comm->poll_cpuid == gd->gd_cpuid && gd->gd_cpuid == 0);
1194 if (comm->stfrac_count-- == 0) {
1195 comm->stfrac_count = comm->poll_stfrac;
1196 stpoll_clock(&stpoll_context);
1198 _poll_comm_systimer(comm);
1204 poll_comm_systimer(systimer_t info, struct intrframe *frame __unused)
1206 struct poll_comm *comm = info->data;
1207 globaldata_t gd = mycpu;
1209 KKASSERT(comm->poll_cpuid == gd->gd_cpuid && gd->gd_cpuid != 0);
1212 _poll_comm_systimer(comm);
1217 poll_comm_adjust_pollhz(struct poll_comm *comm)
1222 KKASSERT(&curthread->td_msgport == ifnet_portfn(comm->poll_cpuid));
1225 * If there is no polling handler registered, set systimer
1226 * frequency to the lowest value. Polling systimer frequency
1227 * will be adjusted to the requested value, once there are
1228 * registered handlers.
1230 handlers = rxpoll_context[mycpuid]->poll_handlers +
1231 txpoll_context[mycpuid]->poll_handlers;
1232 if (comm->poll_cpuid == 0)
1233 handlers += stpoll_context.poll_handlers;
1235 pollhz = comm->pollhz;
1236 systimer_adjust_periodic(&comm->pollclock, pollhz);
1240 sysctl_pollhz(SYSCTL_HANDLER_ARGS)
1242 struct poll_comm *comm = arg1;
1247 error = sysctl_handle_int(oidp, &phz, 0, req);
1248 if (error || req->newptr == NULL)
1252 else if (phz > IFPOLL_FREQ_MAX)
1253 phz = IFPOLL_FREQ_MAX;
1255 netmsg_init(&nmsg, &curthread->td_msgport, MSGF_MPSAFE,
1256 sysctl_pollhz_handler);
1257 nmsg.nm_lmsg.u.ms_result = phz;
1259 return ifnet_domsg(&nmsg.nm_lmsg, comm->poll_cpuid);
1263 sysctl_pollhz_handler(struct netmsg *nmsg)
1265 struct poll_comm *comm = poll_common[mycpuid];
1267 KKASSERT(&curthread->td_msgport == ifnet_portfn(comm->poll_cpuid));
1269 /* Save polling frequency */
1270 comm->pollhz = nmsg->nm_lmsg.u.ms_result;
1273 * Adjust cached pollhz
1275 rxpoll_context[mycpuid]->pollhz = comm->pollhz;
1276 txpoll_context[mycpuid]->pollhz =
1277 comm->pollhz / (comm->poll_txfrac + 1);
1278 stpoll_context.pollhz = comm->pollhz / (comm->poll_stfrac + 1);
1281 * Adjust polling frequency
1283 poll_comm_adjust_pollhz(comm);
1285 lwkt_replymsg(&nmsg->nm_lmsg, 0);