2 * Copyright (c) 2001-2002 Luigi Rizzo
4 * Supported by: the Xorp Project (www.xorp.org)
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * $FreeBSD: src/sys/kern/kern_poll.c,v 1.2.2.4 2002/06/27 23:26:33 luigi Exp $
30 #include "opt_ifpoll.h"
32 #include <sys/param.h>
33 #include <sys/kernel.h>
35 #include <sys/malloc.h>
36 #include <sys/serialize.h>
37 #include <sys/socket.h>
38 #include <sys/sysctl.h>
40 #include <sys/thread2.h>
41 #include <sys/msgport2.h>
43 #include <machine/atomic.h>
44 #include <machine/clock.h>
45 #include <machine/smp.h>
48 #include <net/if_poll.h>
49 #include <net/netmsg2.h>
52 * Polling support for network device drivers.
54 * Drivers which support this feature try to register one status polling
55 * handler and several TX/RX polling handlers with the polling code.
56 * If interface's if_npoll is called with non-NULL second argument, then
57 * a register operation is requested, else a deregister operation is
58 * requested. If the requested operation is "register", driver should
59 * setup the ifpoll_info passed in accoding its own needs:
60 * ifpoll_info.ifpi_status.status_func == NULL
61 * No status polling handler will be installed on CPU(0)
62 * ifpoll_info.ifpi_rx[n].poll_func == NULL
63 * No RX polling handler will be installed on CPU(n)
64 * ifpoll_info.ifpi_tx[n].poll_func == NULL
65 * No TX polling handler will be installed on CPU(n)
67 * RX is polled at the specified polling frequency (net.ifpoll.X.pollhz).
68 * TX and status polling could be done at lower frequency than RX frequency
69 * (net.ifpoll.0.status_frac and net.ifpoll.X.tx_frac). To avoid systimer
70 * staggering at high frequency, RX systimer gives TX and status polling a
73 * All of the registered polling handlers are called only if the interface
74 * is marked as 'IFF_RUNNING and IFF_NPOLLING'. However, the interface's
75 * register and deregister function (ifnet.if_npoll) will be called even
76 * if interface is not marked with 'IFF_RUNNING'.
78 * If registration is successful, the driver must disable interrupts,
79 * and further I/O is performed through the TX/RX polling handler, which
80 * are invoked (at least once per clock tick) with 3 arguments: the "arg"
81 * passed at register time, a struct ifnet pointer, and a "count" limit.
82 * The registered serializer will be held before calling the related
85 * The count limit specifies how much work the handler can do during the
86 * call -- typically this is the number of packets to be received, or
87 * transmitted, etc. (drivers are free to interpret this number, as long
88 * as the max time spent in the function grows roughly linearly with the
91 * A second variable controls the sharing of CPU between polling/kernel
92 * network processing, and other activities (typically userlevel tasks):
93 * net.ifpoll.X.{rx,tx}.user_frac (between 0 and 100, default 50) sets the
94 * share of CPU allocated to user tasks. CPU is allocated proportionally
95 * to the shares, by dynamically adjusting the "count" (poll_burst).
97 * Other parameters can should be left to their default values.
98 * The following constraints hold
100 * 1 <= poll_burst <= poll_burst_max
101 * 1 <= poll_each_burst <= poll_burst_max
102 * MIN_POLL_BURST_MAX <= poll_burst_max <= MAX_POLL_BURST_MAX
105 #define IFPOLL_LIST_LEN 128
106 #define IFPOLL_FREQ_MAX 30000
108 #define MIN_IOPOLL_BURST_MAX 10
109 #define MAX_IOPOLL_BURST_MAX 1000
110 #define IOPOLL_BURST_MAX 150 /* good for 100Mbit net and HZ=1000 */
112 #define IOPOLL_EACH_BURST 5
114 #define IFPOLL_FREQ_DEFAULT 2000
116 #define IFPOLL_TXFRAC_DEFAULT 1 /* 1/2 of the pollhz */
117 #define IFPOLL_STFRAC_DEFAULT 19 /* 1/20 of the pollhz */
119 #define IFPOLL_RX 0x1
120 #define IFPOLL_TX 0x2
128 struct lwkt_serialize *serializer;
131 ifpoll_iofn_t poll_func;
135 union ifpoll_time prev_t;
136 u_long short_ticks; /* statistics */
137 u_long lost_polls; /* statistics */
138 u_long suspect; /* statistics */
139 u_long stalled; /* statistics */
140 uint32_t pending_polls; /* state */
142 struct netmsg_base poll_netmsg;
143 struct netmsg_base poll_more_netmsg;
147 uint32_t phase; /* state */
148 int residual_burst; /* state */
149 uint32_t poll_each_burst; /* tunable */
150 union ifpoll_time poll_start_t; /* state */
152 uint32_t poll_burst; /* state */
153 uint32_t poll_burst_max; /* tunable */
154 uint32_t user_frac; /* tunable */
155 uint32_t kern_frac; /* state */
157 uint32_t poll_handlers; /* next free entry in pr[]. */
158 struct iopoll_rec pr[IFPOLL_LIST_LEN];
160 struct sysctl_ctx_list poll_sysctl_ctx;
161 struct sysctl_oid *poll_sysctl_tree;
165 struct systimer pollclock;
168 int stfrac_count; /* state */
169 int poll_stfrac; /* tunable */
171 int txfrac_count; /* state */
172 int poll_txfrac; /* tunable */
174 int pollhz; /* tunable */
176 struct sysctl_ctx_list sysctl_ctx;
177 struct sysctl_oid *sysctl_tree;
181 struct lwkt_serialize *serializer;
183 ifpoll_stfn_t status_func;
187 struct netmsg_base poll_netmsg;
189 uint32_t poll_handlers; /* next free entry in pr[]. */
190 struct stpoll_rec pr[IFPOLL_LIST_LEN];
192 struct sysctl_ctx_list poll_sysctl_ctx;
193 struct sysctl_oid *poll_sysctl_tree;
196 struct iopoll_sysctl_netmsg {
197 struct netmsg_base base;
198 struct iopoll_ctx *ctx;
201 void ifpoll_init_pcpu(int);
202 static void ifpoll_register_handler(netmsg_t);
203 static void ifpoll_deregister_handler(netmsg_t);
208 static void stpoll_init(void);
209 static void stpoll_handler(netmsg_t);
210 static void stpoll_clock(struct stpoll_ctx *);
211 static int stpoll_register(struct ifnet *, const struct ifpoll_status *);
212 static int stpoll_deregister(struct ifnet *);
217 static struct iopoll_ctx *iopoll_ctx_create(int, int);
218 static void iopoll_init(int);
219 static void rxpoll_handler(netmsg_t);
220 static void txpoll_handler(netmsg_t);
221 static void rxpollmore_handler(netmsg_t);
222 static void txpollmore_handler(netmsg_t);
223 static void iopoll_clock(struct iopoll_ctx *);
224 static int iopoll_register(struct ifnet *, struct iopoll_ctx *,
225 const struct ifpoll_io *);
226 static int iopoll_deregister(struct ifnet *, struct iopoll_ctx *);
228 static void iopoll_add_sysctl(struct sysctl_ctx_list *,
229 struct sysctl_oid_list *, struct iopoll_ctx *, int);
230 static void sysctl_burstmax_handler(netmsg_t);
231 static int sysctl_burstmax(SYSCTL_HANDLER_ARGS);
232 static void sysctl_eachburst_handler(netmsg_t);
233 static int sysctl_eachburst(SYSCTL_HANDLER_ARGS);
238 static void poll_comm_init(int);
239 static void poll_comm_start(int);
240 static void poll_comm_adjust_pollhz(struct poll_comm *);
241 static void poll_comm_systimer0(systimer_t, int, struct intrframe *);
242 static void poll_comm_systimer(systimer_t, int, struct intrframe *);
243 static void sysctl_pollhz_handler(netmsg_t);
244 static void sysctl_stfrac_handler(netmsg_t);
245 static void sysctl_txfrac_handler(netmsg_t);
246 static int sysctl_pollhz(SYSCTL_HANDLER_ARGS);
247 static int sysctl_stfrac(SYSCTL_HANDLER_ARGS);
248 static int sysctl_txfrac(SYSCTL_HANDLER_ARGS);
250 static struct stpoll_ctx stpoll_context;
251 static struct poll_comm *poll_common[MAXCPU];
252 static struct iopoll_ctx *rxpoll_context[MAXCPU];
253 static struct iopoll_ctx *txpoll_context[MAXCPU];
255 SYSCTL_NODE(_net, OID_AUTO, ifpoll, CTLFLAG_RW, 0,
256 "Network device polling parameters");
258 static int iopoll_burst_max = IOPOLL_BURST_MAX;
259 static int iopoll_each_burst = IOPOLL_EACH_BURST;
261 static int ifpoll_pollhz = IFPOLL_FREQ_DEFAULT;
262 static int ifpoll_stfrac = IFPOLL_STFRAC_DEFAULT;
263 static int ifpoll_txfrac = IFPOLL_TXFRAC_DEFAULT;
265 TUNABLE_INT("net.ifpoll.burst_max", &iopoll_burst_max);
266 TUNABLE_INT("net.ifpoll.each_burst", &iopoll_each_burst);
267 TUNABLE_INT("net.ifpoll.pollhz", &ifpoll_pollhz);
268 TUNABLE_INT("net.ifpoll.status_frac", &ifpoll_stfrac);
269 TUNABLE_INT("net.ifpoll.tx_frac", &ifpoll_txfrac);
272 ifpoll_sendmsg_oncpu(netmsg_t msg)
274 if (msg->lmsg.ms_flags & MSGF_DONE)
275 lwkt_sendmsg(netisr_portfn(mycpuid), &msg->lmsg);
279 sched_stpoll(struct stpoll_ctx *st_ctx)
281 ifpoll_sendmsg_oncpu((netmsg_t)&st_ctx->poll_netmsg);
285 sched_iopoll(struct iopoll_ctx *io_ctx)
287 ifpoll_sendmsg_oncpu((netmsg_t)&io_ctx->poll_netmsg);
291 sched_iopollmore(struct iopoll_ctx *io_ctx)
293 ifpoll_sendmsg_oncpu((netmsg_t)&io_ctx->poll_more_netmsg);
297 ifpoll_time_get(union ifpoll_time *t)
299 if (__predict_true(tsc_present))
305 /* Return time diff in us */
307 ifpoll_time_diff(const union ifpoll_time *s, const union ifpoll_time *e)
309 if (__predict_true(tsc_present)) {
310 return (((e->tsc - s->tsc) * 1000000) / tsc_frequency);
312 return ((e->tv.tv_usec - s->tv.tv_usec) +
313 (e->tv.tv_sec - s->tv.tv_sec) * 1000000);
318 * Initialize per-cpu qpolling(4) context. Called from kern_clock.c:
321 ifpoll_init_pcpu(int cpuid)
326 poll_comm_init(cpuid);
332 poll_comm_start(cpuid);
336 ifpoll_register(struct ifnet *ifp)
338 struct ifpoll_info *info;
339 struct netmsg_base nmsg;
342 if (ifp->if_npoll == NULL) {
343 /* Device does not support polling */
347 info = kmalloc(sizeof(*info), M_TEMP, M_WAITOK | M_ZERO);
350 * Attempt to register. Interlock with IFF_NPOLLING.
353 ifnet_serialize_all(ifp);
355 if (ifp->if_flags & IFF_NPOLLING) {
356 /* Already polling */
357 ifnet_deserialize_all(ifp);
362 info->ifpi_ifp = ifp;
364 ifp->if_flags |= IFF_NPOLLING;
365 ifp->if_npoll(ifp, info);
366 KASSERT(ifp->if_npoll_cpuid >= 0, ("invalid npoll cpuid"));
368 ifnet_deserialize_all(ifp);
370 netmsg_init(&nmsg, NULL, &curthread->td_msgport,
371 0, ifpoll_register_handler);
372 nmsg.lmsg.u.ms_resultp = info;
374 error = lwkt_domsg(netisr_portfn(0), &nmsg.lmsg, 0);
376 if (!ifpoll_deregister(ifp)) {
377 if_printf(ifp, "ifpoll_register: "
378 "ifpoll_deregister failed!\n");
387 ifpoll_deregister(struct ifnet *ifp)
389 struct netmsg_base nmsg;
392 if (ifp->if_npoll == NULL)
395 ifnet_serialize_all(ifp);
397 if ((ifp->if_flags & IFF_NPOLLING) == 0) {
398 ifnet_deserialize_all(ifp);
401 ifp->if_flags &= ~IFF_NPOLLING;
403 ifnet_deserialize_all(ifp);
405 netmsg_init(&nmsg, NULL, &curthread->td_msgport,
406 0, ifpoll_deregister_handler);
407 nmsg.lmsg.u.ms_resultp = ifp;
409 error = lwkt_domsg(netisr_portfn(0), &nmsg.lmsg, 0);
411 ifnet_serialize_all(ifp);
412 ifp->if_npoll(ifp, NULL);
413 KASSERT(ifp->if_npoll_cpuid < 0, ("invalid npoll cpuid"));
414 ifnet_deserialize_all(ifp);
420 ifpoll_register_handler(netmsg_t nmsg)
422 const struct ifpoll_info *info = nmsg->lmsg.u.ms_resultp;
423 int cpuid = mycpuid, nextcpu;
426 KKASSERT(cpuid < ncpus2);
427 KKASSERT(&curthread->td_msgport == netisr_portfn(cpuid));
430 error = stpoll_register(info->ifpi_ifp, &info->ifpi_status);
435 error = iopoll_register(info->ifpi_ifp, rxpoll_context[cpuid],
436 &info->ifpi_rx[cpuid]);
440 error = iopoll_register(info->ifpi_ifp, txpoll_context[cpuid],
441 &info->ifpi_tx[cpuid]);
445 /* Adjust polling frequency, after all registration is done */
446 poll_comm_adjust_pollhz(poll_common[cpuid]);
449 if (nextcpu < ncpus2)
450 lwkt_forwardmsg(netisr_portfn(nextcpu), &nmsg->lmsg);
452 lwkt_replymsg(&nmsg->lmsg, 0);
455 lwkt_replymsg(&nmsg->lmsg, error);
459 ifpoll_deregister_handler(netmsg_t nmsg)
461 struct ifnet *ifp = nmsg->lmsg.u.ms_resultp;
462 int cpuid = mycpuid, nextcpu;
464 KKASSERT(cpuid < ncpus2);
465 KKASSERT(&curthread->td_msgport == netisr_portfn(cpuid));
469 stpoll_deregister(ifp);
470 iopoll_deregister(ifp, rxpoll_context[cpuid]);
471 iopoll_deregister(ifp, txpoll_context[cpuid]);
473 /* Adjust polling frequency, after all deregistration is done */
474 poll_comm_adjust_pollhz(poll_common[cpuid]);
477 if (nextcpu < ncpus2)
478 lwkt_forwardmsg(netisr_portfn(nextcpu), &nmsg->lmsg);
480 lwkt_replymsg(&nmsg->lmsg, 0);
486 struct stpoll_ctx *st_ctx = &stpoll_context;
487 const struct poll_comm *comm = poll_common[0];
489 sysctl_ctx_init(&st_ctx->poll_sysctl_ctx);
490 st_ctx->poll_sysctl_tree = SYSCTL_ADD_NODE(&st_ctx->poll_sysctl_ctx,
491 SYSCTL_CHILDREN(comm->sysctl_tree),
492 OID_AUTO, "status", CTLFLAG_RD, 0, "");
494 SYSCTL_ADD_UINT(&st_ctx->poll_sysctl_ctx,
495 SYSCTL_CHILDREN(st_ctx->poll_sysctl_tree),
496 OID_AUTO, "handlers", CTLFLAG_RD,
497 &st_ctx->poll_handlers, 0,
498 "Number of registered status poll handlers");
500 netmsg_init(&st_ctx->poll_netmsg, NULL, &netisr_adone_rport,
505 * stpoll_handler is scheduled by sched_stpoll when appropriate, typically
506 * once per polling systimer tick.
509 stpoll_handler(netmsg_t msg)
511 struct stpoll_ctx *st_ctx = &stpoll_context;
512 struct thread *td = curthread;
515 KKASSERT(&td->td_msgport == netisr_portfn(0));
517 crit_enter_quick(td);
520 lwkt_replymsg(&msg->lmsg, 0);
522 if (st_ctx->poll_handlers == 0) {
527 for (i = 0; i < st_ctx->poll_handlers; ++i) {
528 const struct stpoll_rec *rec = &st_ctx->pr[i];
529 struct ifnet *ifp = rec->ifp;
531 if (!lwkt_serialize_try(rec->serializer))
534 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) ==
535 (IFF_RUNNING | IFF_NPOLLING))
536 rec->status_func(ifp);
538 lwkt_serialize_exit(rec->serializer);
545 * Hook from status poll systimer. Tries to schedule an status poll.
546 * NOTE: Caller should hold critical section.
549 stpoll_clock(struct stpoll_ctx *st_ctx)
551 KKASSERT(mycpuid == 0);
553 if (st_ctx->poll_handlers == 0)
555 sched_stpoll(st_ctx);
559 stpoll_register(struct ifnet *ifp, const struct ifpoll_status *st_rec)
561 struct stpoll_ctx *st_ctx = &stpoll_context;
564 KKASSERT(&curthread->td_msgport == netisr_portfn(0));
566 if (st_rec->status_func == NULL)
570 * Check if there is room.
572 if (st_ctx->poll_handlers >= IFPOLL_LIST_LEN) {
574 * List full, cannot register more entries.
575 * This should never happen; if it does, it is probably a
576 * broken driver trying to register multiple times. Checking
577 * this at runtime is expensive, and won't solve the problem
578 * anyways, so just report a few times and then give up.
580 static int verbose = 10; /* XXX */
583 kprintf("status poll handlers list full, "
584 "maybe a broken driver ?\n");
589 struct stpoll_rec *rec = &st_ctx->pr[st_ctx->poll_handlers];
592 rec->serializer = st_rec->serializer;
593 rec->status_func = st_rec->status_func;
595 st_ctx->poll_handlers++;
602 stpoll_deregister(struct ifnet *ifp)
604 struct stpoll_ctx *st_ctx = &stpoll_context;
607 KKASSERT(&curthread->td_msgport == netisr_portfn(0));
609 for (i = 0; i < st_ctx->poll_handlers; ++i) {
610 if (st_ctx->pr[i].ifp == ifp) /* Found it */
613 if (i == st_ctx->poll_handlers) {
616 st_ctx->poll_handlers--;
617 if (i < st_ctx->poll_handlers) {
618 /* Last entry replaces this one. */
619 st_ctx->pr[i] = st_ctx->pr[st_ctx->poll_handlers];
627 iopoll_reset_state(struct iopoll_ctx *io_ctx)
630 io_ctx->poll_burst = 5;
631 io_ctx->pending_polls = 0;
632 io_ctx->residual_burst = 0;
634 io_ctx->kern_frac = 0;
635 bzero(&io_ctx->poll_start_t, sizeof(io_ctx->poll_start_t));
636 bzero(&io_ctx->prev_t, sizeof(io_ctx->prev_t));
641 iopoll_init(int cpuid)
643 KKASSERT(cpuid < ncpus2);
645 rxpoll_context[cpuid] = iopoll_ctx_create(cpuid, IFPOLL_RX);
646 txpoll_context[cpuid] = iopoll_ctx_create(cpuid, IFPOLL_TX);
649 static struct iopoll_ctx *
650 iopoll_ctx_create(int cpuid, int poll_type)
652 struct poll_comm *comm;
653 struct iopoll_ctx *io_ctx;
654 const char *poll_type_str;
655 netisr_fn_t handler, more_handler;
657 KKASSERT(poll_type == IFPOLL_RX || poll_type == IFPOLL_TX);
660 * Make sure that tunables are in sane state
662 if (iopoll_burst_max < MIN_IOPOLL_BURST_MAX)
663 iopoll_burst_max = MIN_IOPOLL_BURST_MAX;
664 else if (iopoll_burst_max > MAX_IOPOLL_BURST_MAX)
665 iopoll_burst_max = MAX_IOPOLL_BURST_MAX;
667 if (iopoll_each_burst > iopoll_burst_max)
668 iopoll_each_burst = iopoll_burst_max;
670 comm = poll_common[cpuid];
673 * Create the per-cpu polling context
675 io_ctx = kmalloc_cachealign(sizeof(*io_ctx), M_DEVBUF,
678 io_ctx->poll_each_burst = iopoll_each_burst;
679 io_ctx->poll_burst_max = iopoll_burst_max;
680 io_ctx->user_frac = 50;
681 if (poll_type == IFPOLL_RX)
682 io_ctx->pollhz = comm->pollhz;
684 io_ctx->pollhz = comm->pollhz / (comm->poll_txfrac + 1);
685 io_ctx->poll_cpuid = cpuid;
686 iopoll_reset_state(io_ctx);
688 if (poll_type == IFPOLL_RX) {
689 handler = rxpoll_handler;
690 more_handler = rxpollmore_handler;
692 handler = txpoll_handler;
693 more_handler = txpollmore_handler;
696 netmsg_init(&io_ctx->poll_netmsg, NULL, &netisr_adone_rport,
698 io_ctx->poll_netmsg.lmsg.u.ms_resultp = io_ctx;
700 netmsg_init(&io_ctx->poll_more_netmsg, NULL, &netisr_adone_rport,
702 io_ctx->poll_more_netmsg.lmsg.u.ms_resultp = io_ctx;
705 * Initialize per-cpu sysctl nodes
707 if (poll_type == IFPOLL_RX)
708 poll_type_str = "rx";
710 poll_type_str = "tx";
712 sysctl_ctx_init(&io_ctx->poll_sysctl_ctx);
713 io_ctx->poll_sysctl_tree = SYSCTL_ADD_NODE(&io_ctx->poll_sysctl_ctx,
714 SYSCTL_CHILDREN(comm->sysctl_tree),
715 OID_AUTO, poll_type_str, CTLFLAG_RD, 0, "");
716 iopoll_add_sysctl(&io_ctx->poll_sysctl_ctx,
717 SYSCTL_CHILDREN(io_ctx->poll_sysctl_tree), io_ctx, poll_type);
723 * Hook from iopoll systimer. Tries to schedule an iopoll, but keeps
724 * track of lost ticks due to the previous handler taking too long.
725 * Normally, this should not happen, because polling handler should
726 * run for a short time. However, in some cases (e.g. when there are
727 * changes in link status etc.) the drivers take a very long time
728 * (even in the order of milliseconds) to reset and reconfigure the
729 * device, causing apparent lost polls.
731 * The first part of the code is just for debugging purposes, and tries
732 * to count how often hardclock ticks are shorter than they should,
733 * meaning either stray interrupts or delayed events.
735 * WARNING! called from fastint or IPI, the MP lock might not be held.
736 * NOTE: Caller should hold critical section.
739 iopoll_clock(struct iopoll_ctx *io_ctx)
744 KKASSERT(mycpuid == io_ctx->poll_cpuid);
746 if (io_ctx->poll_handlers == 0)
750 delta = ifpoll_time_diff(&io_ctx->prev_t, &t);
751 if (delta * io_ctx->pollhz < 500000)
752 io_ctx->short_ticks++;
756 if (io_ctx->pending_polls > 100) {
758 * Too much, assume it has stalled (not always true
759 * see comment above).
762 io_ctx->pending_polls = 0;
766 if (io_ctx->phase <= 2) {
767 if (io_ctx->phase != 0)
770 sched_iopoll(io_ctx);
773 if (io_ctx->pending_polls++ > 0)
774 io_ctx->lost_polls++;
778 * rxpoll_handler and txpoll_handler are scheduled by sched_iopoll when
779 * appropriate, typically once per polling systimer tick.
781 * Note that the message is replied immediately in order to allow a new
782 * ISR to be scheduled in the handler.
785 rxpoll_handler(netmsg_t msg)
787 struct iopoll_ctx *io_ctx;
788 struct thread *td = curthread;
791 io_ctx = msg->lmsg.u.ms_resultp;
792 KKASSERT(&td->td_msgport == netisr_portfn(io_ctx->poll_cpuid));
794 crit_enter_quick(td);
797 lwkt_replymsg(&msg->lmsg, 0);
799 if (io_ctx->poll_handlers == 0) {
805 if (io_ctx->residual_burst == 0) {
806 /* First call in this tick */
807 ifpoll_time_get(&io_ctx->poll_start_t);
808 io_ctx->residual_burst = io_ctx->poll_burst;
810 cycles = (io_ctx->residual_burst < io_ctx->poll_each_burst) ?
811 io_ctx->residual_burst : io_ctx->poll_each_burst;
812 io_ctx->residual_burst -= cycles;
814 for (i = 0; i < io_ctx->poll_handlers; i++) {
815 const struct iopoll_rec *rec = &io_ctx->pr[i];
816 struct ifnet *ifp = rec->ifp;
818 if (!lwkt_serialize_try(rec->serializer))
821 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) ==
822 (IFF_RUNNING | IFF_NPOLLING))
823 rec->poll_func(ifp, rec->arg, cycles);
825 lwkt_serialize_exit(rec->serializer);
829 * Do a quick exit/enter to catch any higher-priority
833 crit_enter_quick(td);
835 sched_iopollmore(io_ctx);
842 txpoll_handler(netmsg_t msg)
844 struct iopoll_ctx *io_ctx;
845 struct thread *td = curthread;
848 io_ctx = msg->lmsg.u.ms_resultp;
849 KKASSERT(&td->td_msgport == netisr_portfn(io_ctx->poll_cpuid));
851 crit_enter_quick(td);
854 lwkt_replymsg(&msg->lmsg, 0);
856 if (io_ctx->poll_handlers == 0) {
863 for (i = 0; i < io_ctx->poll_handlers; i++) {
864 const struct iopoll_rec *rec = &io_ctx->pr[i];
865 struct ifnet *ifp = rec->ifp;
867 if (!lwkt_serialize_try(rec->serializer))
870 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) ==
871 (IFF_RUNNING | IFF_NPOLLING))
872 rec->poll_func(ifp, rec->arg, -1);
874 lwkt_serialize_exit(rec->serializer);
878 * Do a quick exit/enter to catch any higher-priority
882 crit_enter_quick(td);
884 sched_iopollmore(io_ctx);
891 * rxpollmore_handler and txpollmore_handler are called after other netisr's,
892 * possibly scheduling another rxpoll_handler or txpoll_handler call, or
893 * adapting the burst size for the next cycle.
895 * It is very bad to fetch large bursts of packets from a single card at once,
896 * because the burst could take a long time to be completely processed leading
897 * to unfairness. To reduce the problem, and also to account better for time
898 * spent in network-related processing, we split the burst in smaller chunks
899 * of fixed size, giving control to the other netisr's between chunks. This
900 * helps in improving the fairness, reducing livelock and accounting for the
901 * work performed in low level handling.
904 rxpollmore_handler(netmsg_t msg)
906 struct thread *td = curthread;
907 struct iopoll_ctx *io_ctx;
910 uint32_t pending_polls;
912 io_ctx = msg->lmsg.u.ms_resultp;
913 KKASSERT(&td->td_msgport == netisr_portfn(io_ctx->poll_cpuid));
915 crit_enter_quick(td);
918 lwkt_replymsg(&msg->lmsg, 0);
920 if (io_ctx->poll_handlers == 0) {
926 if (io_ctx->residual_burst > 0) {
927 sched_iopoll(io_ctx);
929 /* Will run immediately on return, followed by netisrs */
933 /* Here we can account time spent in iopoll's in this tick */
935 kern_load = ifpoll_time_diff(&io_ctx->poll_start_t, &t);
936 kern_load = (kern_load * io_ctx->pollhz) / 10000; /* 0..100 */
937 io_ctx->kern_frac = kern_load;
939 if (kern_load > (100 - io_ctx->user_frac)) {
940 /* Try decrease ticks */
941 if (io_ctx->poll_burst > 1)
942 io_ctx->poll_burst--;
944 if (io_ctx->poll_burst < io_ctx->poll_burst_max)
945 io_ctx->poll_burst++;
948 io_ctx->pending_polls--;
949 pending_polls = io_ctx->pending_polls;
951 if (pending_polls == 0) {
956 * Last cycle was long and caused us to miss one or more
957 * hardclock ticks. Restart processing again, but slightly
958 * reduce the burst size to prevent that this happens again.
960 io_ctx->poll_burst -= (io_ctx->poll_burst / 8);
961 if (io_ctx->poll_burst < 1)
962 io_ctx->poll_burst = 1;
963 sched_iopoll(io_ctx);
971 txpollmore_handler(netmsg_t msg)
973 struct thread *td = curthread;
974 struct iopoll_ctx *io_ctx;
975 uint32_t pending_polls;
977 io_ctx = msg->lmsg.u.ms_resultp;
978 KKASSERT(&td->td_msgport == netisr_portfn(io_ctx->poll_cpuid));
980 crit_enter_quick(td);
983 lwkt_replymsg(&msg->lmsg, 0);
985 if (io_ctx->poll_handlers == 0) {
992 io_ctx->pending_polls--;
993 pending_polls = io_ctx->pending_polls;
995 if (pending_polls == 0) {
1000 * Last cycle was long and caused us to miss one or more
1001 * hardclock ticks. Restart processing again.
1003 sched_iopoll(io_ctx);
1007 crit_exit_quick(td);
1011 iopoll_add_sysctl(struct sysctl_ctx_list *ctx, struct sysctl_oid_list *parent,
1012 struct iopoll_ctx *io_ctx, int poll_type)
1014 if (poll_type == IFPOLL_RX) {
1015 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "burst_max",
1016 CTLTYPE_UINT | CTLFLAG_RW, io_ctx, 0, sysctl_burstmax,
1017 "IU", "Max Polling burst size");
1019 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "each_burst",
1020 CTLTYPE_UINT | CTLFLAG_RW, io_ctx, 0, sysctl_eachburst,
1021 "IU", "Max size of each burst");
1023 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "burst", CTLFLAG_RD,
1024 &io_ctx->poll_burst, 0, "Current polling burst size");
1026 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "user_frac", CTLFLAG_RW,
1027 &io_ctx->user_frac, 0, "Desired user fraction of cpu time");
1029 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "kern_frac", CTLFLAG_RD,
1030 &io_ctx->kern_frac, 0, "Kernel fraction of cpu time");
1032 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "residual_burst", CTLFLAG_RD,
1033 &io_ctx->residual_burst, 0,
1034 "# of residual cycles in burst");
1037 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "phase", CTLFLAG_RD,
1038 &io_ctx->phase, 0, "Polling phase");
1040 SYSCTL_ADD_ULONG(ctx, parent, OID_AUTO, "suspect", CTLFLAG_RW,
1041 &io_ctx->suspect, "Suspected events");
1043 SYSCTL_ADD_ULONG(ctx, parent, OID_AUTO, "stalled", CTLFLAG_RW,
1044 &io_ctx->stalled, "Potential stalls");
1046 SYSCTL_ADD_ULONG(ctx, parent, OID_AUTO, "short_ticks", CTLFLAG_RW,
1047 &io_ctx->short_ticks,
1048 "Hardclock ticks shorter than they should be");
1050 SYSCTL_ADD_ULONG(ctx, parent, OID_AUTO, "lost_polls", CTLFLAG_RW,
1051 &io_ctx->lost_polls,
1052 "How many times we would have lost a poll tick");
1054 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "pending_polls", CTLFLAG_RD,
1055 &io_ctx->pending_polls, 0, "Do we need to poll again");
1057 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "handlers", CTLFLAG_RD,
1058 &io_ctx->poll_handlers, 0, "Number of registered poll handlers");
1062 sysctl_burstmax_handler(netmsg_t nmsg)
1064 struct iopoll_sysctl_netmsg *msg = (struct iopoll_sysctl_netmsg *)nmsg;
1065 struct iopoll_ctx *io_ctx;
1068 KKASSERT(&curthread->td_msgport == netisr_portfn(io_ctx->poll_cpuid));
1070 io_ctx->poll_burst_max = nmsg->lmsg.u.ms_result;
1071 if (io_ctx->poll_each_burst > io_ctx->poll_burst_max)
1072 io_ctx->poll_each_burst = io_ctx->poll_burst_max;
1073 if (io_ctx->poll_burst > io_ctx->poll_burst_max)
1074 io_ctx->poll_burst = io_ctx->poll_burst_max;
1075 if (io_ctx->residual_burst > io_ctx->poll_burst_max)
1076 io_ctx->residual_burst = io_ctx->poll_burst_max;
1078 lwkt_replymsg(&nmsg->lmsg, 0);
1082 sysctl_burstmax(SYSCTL_HANDLER_ARGS)
1084 struct iopoll_ctx *io_ctx = arg1;
1085 struct iopoll_sysctl_netmsg msg;
1089 burst_max = io_ctx->poll_burst_max;
1090 error = sysctl_handle_int(oidp, &burst_max, 0, req);
1091 if (error || req->newptr == NULL)
1093 if (burst_max < MIN_IOPOLL_BURST_MAX)
1094 burst_max = MIN_IOPOLL_BURST_MAX;
1095 else if (burst_max > MAX_IOPOLL_BURST_MAX)
1096 burst_max = MAX_IOPOLL_BURST_MAX;
1098 netmsg_init(&msg.base, NULL, &curthread->td_msgport,
1099 0, sysctl_burstmax_handler);
1100 msg.base.lmsg.u.ms_result = burst_max;
1103 return lwkt_domsg(netisr_portfn(io_ctx->poll_cpuid), &msg.base.lmsg, 0);
1107 sysctl_eachburst_handler(netmsg_t nmsg)
1109 struct iopoll_sysctl_netmsg *msg = (struct iopoll_sysctl_netmsg *)nmsg;
1110 struct iopoll_ctx *io_ctx;
1111 uint32_t each_burst;
1114 KKASSERT(&curthread->td_msgport == netisr_portfn(io_ctx->poll_cpuid));
1116 each_burst = nmsg->lmsg.u.ms_result;
1117 if (each_burst > io_ctx->poll_burst_max)
1118 each_burst = io_ctx->poll_burst_max;
1119 else if (each_burst < 1)
1121 io_ctx->poll_each_burst = each_burst;
1123 lwkt_replymsg(&nmsg->lmsg, 0);
1127 sysctl_eachburst(SYSCTL_HANDLER_ARGS)
1129 struct iopoll_ctx *io_ctx = arg1;
1130 struct iopoll_sysctl_netmsg msg;
1131 uint32_t each_burst;
1134 each_burst = io_ctx->poll_each_burst;
1135 error = sysctl_handle_int(oidp, &each_burst, 0, req);
1136 if (error || req->newptr == NULL)
1139 netmsg_init(&msg.base, NULL, &curthread->td_msgport,
1140 0, sysctl_eachburst_handler);
1141 msg.base.lmsg.u.ms_result = each_burst;
1144 return lwkt_domsg(netisr_portfn(io_ctx->poll_cpuid), &msg.base.lmsg, 0);
1148 iopoll_register(struct ifnet *ifp, struct iopoll_ctx *io_ctx,
1149 const struct ifpoll_io *io_rec)
1153 KKASSERT(&curthread->td_msgport == netisr_portfn(io_ctx->poll_cpuid));
1155 if (io_rec->poll_func == NULL)
1159 * Check if there is room.
1161 if (io_ctx->poll_handlers >= IFPOLL_LIST_LEN) {
1163 * List full, cannot register more entries.
1164 * This should never happen; if it does, it is probably a
1165 * broken driver trying to register multiple times. Checking
1166 * this at runtime is expensive, and won't solve the problem
1167 * anyways, so just report a few times and then give up.
1169 static int verbose = 10; /* XXX */
1171 kprintf("io poll handlers list full, "
1172 "maybe a broken driver ?\n");
1177 struct iopoll_rec *rec = &io_ctx->pr[io_ctx->poll_handlers];
1180 rec->serializer = io_rec->serializer;
1181 rec->arg = io_rec->arg;
1182 rec->poll_func = io_rec->poll_func;
1184 io_ctx->poll_handlers++;
1191 iopoll_deregister(struct ifnet *ifp, struct iopoll_ctx *io_ctx)
1195 KKASSERT(&curthread->td_msgport == netisr_portfn(io_ctx->poll_cpuid));
1197 for (i = 0; i < io_ctx->poll_handlers; ++i) {
1198 if (io_ctx->pr[i].ifp == ifp) /* Found it */
1201 if (i == io_ctx->poll_handlers) {
1204 io_ctx->poll_handlers--;
1205 if (i < io_ctx->poll_handlers) {
1206 /* Last entry replaces this one. */
1207 io_ctx->pr[i] = io_ctx->pr[io_ctx->poll_handlers];
1210 if (io_ctx->poll_handlers == 0)
1211 iopoll_reset_state(io_ctx);
1218 poll_comm_init(int cpuid)
1220 struct poll_comm *comm;
1223 comm = kmalloc_cachealign(sizeof(*comm), M_DEVBUF, M_WAITOK | M_ZERO);
1225 if (ifpoll_stfrac < 0)
1226 ifpoll_stfrac = IFPOLL_STFRAC_DEFAULT;
1227 if (ifpoll_txfrac < 0)
1228 ifpoll_txfrac = IFPOLL_TXFRAC_DEFAULT;
1230 comm->pollhz = ifpoll_pollhz;
1231 comm->poll_cpuid = cpuid;
1232 comm->poll_stfrac = ifpoll_stfrac;
1233 comm->poll_txfrac = ifpoll_txfrac;
1235 ksnprintf(cpuid_str, sizeof(cpuid_str), "%d", cpuid);
1237 sysctl_ctx_init(&comm->sysctl_ctx);
1238 comm->sysctl_tree = SYSCTL_ADD_NODE(&comm->sysctl_ctx,
1239 SYSCTL_STATIC_CHILDREN(_net_ifpoll),
1240 OID_AUTO, cpuid_str, CTLFLAG_RD, 0, "");
1242 SYSCTL_ADD_PROC(&comm->sysctl_ctx, SYSCTL_CHILDREN(comm->sysctl_tree),
1243 OID_AUTO, "pollhz", CTLTYPE_INT | CTLFLAG_RW,
1244 comm, 0, sysctl_pollhz,
1245 "I", "Device polling frequency");
1248 SYSCTL_ADD_PROC(&comm->sysctl_ctx,
1249 SYSCTL_CHILDREN(comm->sysctl_tree),
1250 OID_AUTO, "status_frac",
1251 CTLTYPE_INT | CTLFLAG_RW,
1252 comm, 0, sysctl_stfrac,
1253 "I", "# of cycles before status is polled");
1255 SYSCTL_ADD_PROC(&comm->sysctl_ctx, SYSCTL_CHILDREN(comm->sysctl_tree),
1256 OID_AUTO, "tx_frac", CTLTYPE_INT | CTLFLAG_RW,
1257 comm, 0, sysctl_txfrac,
1258 "I", "# of cycles before TX is polled");
1260 poll_common[cpuid] = comm;
1264 poll_comm_start(int cpuid)
1266 struct poll_comm *comm = poll_common[cpuid];
1267 systimer_func_t func;
1270 * Initialize systimer
1273 func = poll_comm_systimer0;
1275 func = poll_comm_systimer;
1276 systimer_init_periodic_nq(&comm->pollclock, func, comm, 1);
1280 _poll_comm_systimer(struct poll_comm *comm)
1282 if (comm->txfrac_count-- == 0) {
1283 comm->txfrac_count = comm->poll_txfrac;
1284 iopoll_clock(txpoll_context[comm->poll_cpuid]);
1286 iopoll_clock(rxpoll_context[comm->poll_cpuid]);
1290 poll_comm_systimer0(systimer_t info, int in_ipi __unused,
1291 struct intrframe *frame __unused)
1293 struct poll_comm *comm = info->data;
1294 globaldata_t gd = mycpu;
1296 KKASSERT(comm->poll_cpuid == gd->gd_cpuid && gd->gd_cpuid == 0);
1300 if (comm->stfrac_count-- == 0) {
1301 comm->stfrac_count = comm->poll_stfrac;
1302 stpoll_clock(&stpoll_context);
1304 _poll_comm_systimer(comm);
1310 poll_comm_systimer(systimer_t info, int in_ipi __unused,
1311 struct intrframe *frame __unused)
1313 struct poll_comm *comm = info->data;
1314 globaldata_t gd = mycpu;
1316 KKASSERT(comm->poll_cpuid == gd->gd_cpuid && gd->gd_cpuid != 0);
1319 _poll_comm_systimer(comm);
1324 poll_comm_adjust_pollhz(struct poll_comm *comm)
1329 KKASSERT(&curthread->td_msgport == netisr_portfn(comm->poll_cpuid));
1332 * If there is no polling handler registered, set systimer
1333 * frequency to the lowest value. Polling systimer frequency
1334 * will be adjusted to the requested value, once there are
1335 * registered handlers.
1337 handlers = rxpoll_context[mycpuid]->poll_handlers +
1338 txpoll_context[mycpuid]->poll_handlers;
1339 if (comm->poll_cpuid == 0)
1340 handlers += stpoll_context.poll_handlers;
1342 pollhz = comm->pollhz;
1343 systimer_adjust_periodic(&comm->pollclock, pollhz);
1347 sysctl_pollhz(SYSCTL_HANDLER_ARGS)
1349 struct poll_comm *comm = arg1;
1350 struct netmsg_base nmsg;
1354 error = sysctl_handle_int(oidp, &phz, 0, req);
1355 if (error || req->newptr == NULL)
1359 else if (phz > IFPOLL_FREQ_MAX)
1360 phz = IFPOLL_FREQ_MAX;
1362 netmsg_init(&nmsg, NULL, &curthread->td_msgport,
1363 0, sysctl_pollhz_handler);
1364 nmsg.lmsg.u.ms_result = phz;
1366 return lwkt_domsg(netisr_portfn(comm->poll_cpuid), &nmsg.lmsg, 0);
1370 sysctl_pollhz_handler(netmsg_t nmsg)
1372 struct poll_comm *comm = poll_common[mycpuid];
1374 KKASSERT(&curthread->td_msgport == netisr_portfn(comm->poll_cpuid));
1376 /* Save polling frequency */
1377 comm->pollhz = nmsg->lmsg.u.ms_result;
1380 * Adjust cached pollhz
1382 rxpoll_context[mycpuid]->pollhz = comm->pollhz;
1383 txpoll_context[mycpuid]->pollhz =
1384 comm->pollhz / (comm->poll_txfrac + 1);
1387 * Adjust polling frequency
1389 poll_comm_adjust_pollhz(comm);
1391 lwkt_replymsg(&nmsg->lmsg, 0);
1395 sysctl_stfrac(SYSCTL_HANDLER_ARGS)
1397 struct poll_comm *comm = arg1;
1398 struct netmsg_base nmsg;
1401 KKASSERT(comm->poll_cpuid == 0);
1403 stfrac = comm->poll_stfrac;
1404 error = sysctl_handle_int(oidp, &stfrac, 0, req);
1405 if (error || req->newptr == NULL)
1410 netmsg_init(&nmsg, NULL, &curthread->td_msgport,
1411 0, sysctl_stfrac_handler);
1412 nmsg.lmsg.u.ms_result = stfrac;
1414 return lwkt_domsg(netisr_portfn(comm->poll_cpuid), &nmsg.lmsg, 0);
1418 sysctl_stfrac_handler(netmsg_t nmsg)
1420 struct poll_comm *comm = poll_common[mycpuid];
1421 int stfrac = nmsg->lmsg.u.ms_result;
1423 KKASSERT(&curthread->td_msgport == netisr_portfn(comm->poll_cpuid));
1426 comm->poll_stfrac = stfrac;
1427 if (comm->stfrac_count > comm->poll_stfrac)
1428 comm->stfrac_count = comm->poll_stfrac;
1431 lwkt_replymsg(&nmsg->lmsg, 0);
1435 sysctl_txfrac(SYSCTL_HANDLER_ARGS)
1437 struct poll_comm *comm = arg1;
1438 struct netmsg_base nmsg;
1441 txfrac = comm->poll_txfrac;
1442 error = sysctl_handle_int(oidp, &txfrac, 0, req);
1443 if (error || req->newptr == NULL)
1448 netmsg_init(&nmsg, NULL, &curthread->td_msgport,
1449 0, sysctl_txfrac_handler);
1450 nmsg.lmsg.u.ms_result = txfrac;
1452 return lwkt_domsg(netisr_portfn(comm->poll_cpuid), &nmsg.lmsg, 0);
1456 sysctl_txfrac_handler(netmsg_t nmsg)
1458 struct poll_comm *comm = poll_common[mycpuid];
1459 int txfrac = nmsg->lmsg.u.ms_result;
1461 KKASSERT(&curthread->td_msgport == netisr_portfn(comm->poll_cpuid));
1464 comm->poll_txfrac = txfrac;
1465 if (comm->txfrac_count > comm->poll_txfrac)
1466 comm->txfrac_count = comm->poll_txfrac;
1469 lwkt_replymsg(&nmsg->lmsg, 0);