2 * Copyright (c) 2001-2002 Luigi Rizzo
4 * Supported by: the Xorp Project (www.xorp.org)
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * $FreeBSD: src/sys/kern/kern_poll.c,v 1.2.2.4 2002/06/27 23:26:33 luigi Exp $
30 #include "opt_ifpoll.h"
32 #include <sys/param.h>
33 #include <sys/kernel.h>
35 #include <sys/malloc.h>
36 #include <sys/serialize.h>
37 #include <sys/socket.h>
38 #include <sys/sysctl.h>
40 #include <sys/thread2.h>
41 #include <sys/msgport2.h>
43 #include <machine/atomic.h>
44 #include <machine/clock.h>
45 #include <machine/smp.h>
48 #include <net/if_poll.h>
49 #include <net/netmsg2.h>
52 * Polling support for network device drivers.
54 * Drivers which support this feature try to register one status polling
55 * handler and several TX/RX polling handlers with the polling code.
56 * If interface's if_qpoll is called with non-NULL second argument, then
57 * a register operation is requested, else a deregister operation is
58 * requested. If the requested operation is "register", driver should
59 * setup the ifpoll_info passed in accoding its own needs:
60 * ifpoll_info.ifpi_status.status_func == NULL
61 * No status polling handler will be installed on CPU(0)
62 * ifpoll_info.ifpi_rx[n].poll_func == NULL
63 * No RX polling handler will be installed on CPU(n)
64 * ifpoll_info.ifpi_tx[n].poll_func == NULL
65 * No TX polling handler will be installed on CPU(n)
67 * All of the registered polling handlers are called only if the interface
68 * is marked as 'IFF_RUNNING and IFF_NPOLLING'. However, the interface's
69 * register and deregister function (ifnet.if_qpoll) will be called even
70 * if interface is not marked with 'IFF_RUNNING'.
72 * If registration is successful, the driver must disable interrupts,
73 * and further I/O is performed through the TX/RX polling handler, which
74 * are invoked (at least once per clock tick) with 3 arguments: the "arg"
75 * passed at register time, a struct ifnet pointer, and a "count" limit.
76 * The registered serializer will be held before calling the related
79 * The count limit specifies how much work the handler can do during the
80 * call -- typically this is the number of packets to be received, or
81 * transmitted, etc. (drivers are free to interpret this number, as long
82 * as the max time spent in the function grows roughly linearly with the
85 * A second variable controls the sharing of CPU between polling/kernel
86 * network processing, and other activities (typically userlevel tasks):
87 * net.ifpoll.{rxX,txX}.user_frac (between 0 and 100, default 50) sets the
88 * share of CPU allocated to user tasks. CPU is allocated proportionally
89 * to the shares, by dynamically adjusting the "count" (poll_burst).
91 * Other parameters can should be left to their default values.
92 * The following constraints hold
94 * 1 <= poll_burst <= poll_burst_max
95 * 1 <= poll_each_burst <= poll_burst_max
96 * MIN_POLL_BURST_MAX <= poll_burst_max <= MAX_POLL_BURST_MAX
99 #define IFPOLL_LIST_LEN 128
100 #define IFPOLL_FREQ_MAX 30000
102 #define MIN_IOPOLL_BURST_MAX 10
103 #define MAX_IOPOLL_BURST_MAX 1000
104 #define IOPOLL_BURST_MAX 150 /* good for 100Mbit net and HZ=1000 */
106 #define IOPOLL_EACH_BURST 5
108 #define IFPOLL_FREQ_DEFAULT 2000
109 #define IOPOLL_FREQ_DEFAULT IFPOLL_FREQ_DEFAULT
110 #define STPOLL_FREQ_DEFAULT 100
112 #define IFPOLL_TXFRAC_DEFAULT 1
113 #define IFPOLL_STFRAC_DEFAULT 20
115 #define IFPOLL_RX 0x1
116 #define IFPOLL_TX 0x2
124 struct lwkt_serialize *serializer;
127 ifpoll_iofn_t poll_func;
131 #ifdef IFPOLL_MULTI_SYSTIMER
132 struct systimer pollclock;
135 union ifpoll_time prev_t;
136 uint32_t short_ticks; /* statistics */
137 uint32_t lost_polls; /* statistics */
138 uint32_t suspect; /* statistics */
139 uint32_t stalled; /* statistics */
140 uint32_t pending_polls; /* state */
142 struct netmsg poll_netmsg;
145 #ifdef IFPOLL_MULTI_SYSTIMER
146 int pollhz; /* tunable */
148 int poll_type; /* IFPOLL_{RX,TX} */
150 uint32_t phase; /* state */
151 int residual_burst; /* state */
152 uint32_t poll_each_burst; /* tunable */
153 union ifpoll_time poll_start_t; /* state */
155 uint32_t poll_handlers; /* next free entry in pr[]. */
156 struct iopoll_rec pr[IFPOLL_LIST_LEN];
158 struct netmsg poll_more_netmsg;
160 uint32_t poll_burst; /* state */
161 uint32_t poll_burst_max; /* tunable */
162 uint32_t user_frac; /* tunable */
163 uint32_t kern_frac; /* state */
165 struct sysctl_ctx_list poll_sysctl_ctx;
166 struct sysctl_oid *poll_sysctl_tree;
170 struct lwkt_serialize *serializer;
172 ifpoll_stfn_t status_func;
176 #ifdef IFPOLL_MULTI_SYSTIMER
177 struct systimer pollclock;
180 struct netmsg poll_netmsg;
182 #ifdef IFPOLL_MULTI_SYSTIMER
183 int pollhz; /* tunable */
185 uint32_t poll_handlers; /* next free entry in pr[]. */
186 struct stpoll_rec pr[IFPOLL_LIST_LEN];
188 struct sysctl_ctx_list poll_sysctl_ctx;
189 struct sysctl_oid *poll_sysctl_tree;
192 struct iopoll_sysctl_netmsg {
194 struct iopoll_ctx *ctx;
197 #ifndef IFPOLL_MULTI_SYSTIMER
200 struct systimer clock;
209 static struct stpoll_ctx stpoll_context;
210 static struct iopoll_ctx *rxpoll_context[IFPOLL_CTX_MAX];
211 static struct iopoll_ctx *txpoll_context[IFPOLL_CTX_MAX];
213 SYSCTL_NODE(_net, OID_AUTO, ifpoll, CTLFLAG_RW, 0,
214 "Network device polling parameters");
216 static int ifpoll_ncpus = IFPOLL_CTX_MAX;
218 static int iopoll_burst_max = IOPOLL_BURST_MAX;
219 static int iopoll_each_burst = IOPOLL_EACH_BURST;
221 TUNABLE_INT("net.ifpoll.burst_max", &iopoll_burst_max);
222 TUNABLE_INT("net.ifpoll.each_burst", &iopoll_each_burst);
224 #ifdef IFPOLL_MULTI_SYSTIMER
226 static int stpoll_hz = STPOLL_FREQ_DEFAULT;
227 static int iopoll_hz = IOPOLL_FREQ_DEFAULT;
229 TUNABLE_INT("net.ifpoll.stpoll_hz", &stpoll_hz);
230 TUNABLE_INT("net.ifpoll.iopoll_hz", &iopoll_hz);
232 #else /* !IFPOLL_MULTI_SYSTIMER */
234 static struct ifpoll_data ifpoll0;
235 static int ifpoll_pollhz = IFPOLL_FREQ_DEFAULT;
236 static int ifpoll_stfrac = IFPOLL_STFRAC_DEFAULT;
237 static int ifpoll_txfrac = IFPOLL_TXFRAC_DEFAULT;
238 static int ifpoll_handlers;
240 TUNABLE_INT("net.ifpoll.pollhz", &ifpoll_pollhz);
241 TUNABLE_INT("net.ifpoll.status_frac", &ifpoll_stfrac);
242 TUNABLE_INT("net.ifpoll.tx_frac", &ifpoll_txfrac);
244 static void sysctl_ifpollhz_handler(struct netmsg *);
245 static int sysctl_ifpollhz(SYSCTL_HANDLER_ARGS);
247 SYSCTL_PROC(_net_ifpoll, OID_AUTO, pollhz, CTLTYPE_INT | CTLFLAG_RW,
248 0, 0, sysctl_ifpollhz, "I", "Polling frequency");
249 SYSCTL_INT(_net_ifpoll, OID_AUTO, tx_frac, CTLFLAG_RW,
250 &ifpoll_txfrac, 0, "Every this many cycles poll transmit");
251 SYSCTL_INT(_net_ifpoll, OID_AUTO, st_frac, CTLFLAG_RW,
252 &ifpoll_stfrac, 0, "Every this many cycles poll status");
254 #endif /* IFPOLL_MULTI_SYSTIMER */
256 void ifpoll_init_pcpu(int);
258 #ifndef IFPOLL_MULTI_SYSTIMER
259 static void ifpoll_start_handler(struct netmsg *);
260 static void ifpoll_stop_handler(struct netmsg *);
261 static void ifpoll_handler_addevent(void);
262 static void ifpoll_handler_delevent(void);
263 static void ifpoll_ipi_handler(void *, int);
264 static void ifpoll_systimer(systimer_t, struct intrframe *);
267 static void ifpoll_register_handler(struct netmsg *);
268 static void ifpoll_deregister_handler(struct netmsg *);
273 static void stpoll_init(void);
274 static void stpoll_handler(struct netmsg *);
275 static void stpoll_clock(struct stpoll_ctx *);
276 #ifdef IFPOLL_MULTI_SYSTIMER
277 static void stpoll_systimer(systimer_t, struct intrframe *);
279 static int stpoll_register(struct ifnet *, const struct ifpoll_status *);
280 static int stpoll_deregister(struct ifnet *);
282 #ifdef IFPOLL_MULTI_SYSTIMER
283 static void sysctl_stpollhz_handler(struct netmsg *);
284 static int sysctl_stpollhz(SYSCTL_HANDLER_ARGS);
290 static struct iopoll_ctx *iopoll_ctx_create(int, int);
291 static void iopoll_init(int);
292 static void iopoll_handler(struct netmsg *);
293 static void iopollmore_handler(struct netmsg *);
294 static void iopoll_clock(struct iopoll_ctx *);
295 #ifdef IFPOLL_MULTI_SYSTIMER
296 static void iopoll_systimer(systimer_t, struct intrframe *);
298 static int iopoll_register(struct ifnet *, struct iopoll_ctx *,
299 const struct ifpoll_io *);
300 static int iopoll_deregister(struct ifnet *, struct iopoll_ctx *);
302 static void iopoll_add_sysctl(struct sysctl_ctx_list *,
303 struct sysctl_oid_list *, struct iopoll_ctx *);
304 #ifdef IFPOLL_MULTI_SYSTIMER
305 static void sysctl_iopollhz_handler(struct netmsg *);
306 static int sysctl_iopollhz(SYSCTL_HANDLER_ARGS);
308 static void sysctl_burstmax_handler(struct netmsg *);
309 static int sysctl_burstmax(SYSCTL_HANDLER_ARGS);
310 static void sysctl_eachburst_handler(struct netmsg *);
311 static int sysctl_eachburst(SYSCTL_HANDLER_ARGS);
314 ifpoll_sendmsg_oncpu(struct netmsg *msg)
316 if (msg->nm_lmsg.ms_flags & MSGF_DONE)
317 ifnet_sendmsg(&msg->nm_lmsg, mycpuid);
321 sched_stpoll(struct stpoll_ctx *st_ctx)
323 ifpoll_sendmsg_oncpu(&st_ctx->poll_netmsg);
327 sched_iopoll(struct iopoll_ctx *io_ctx)
329 ifpoll_sendmsg_oncpu(&io_ctx->poll_netmsg);
333 sched_iopollmore(struct iopoll_ctx *io_ctx)
335 ifpoll_sendmsg_oncpu(&io_ctx->poll_more_netmsg);
339 ifpoll_time_get(union ifpoll_time *t)
347 /* Return time diff in us */
349 ifpoll_time_diff(const union ifpoll_time *s, const union ifpoll_time *e)
352 return (((e->tsc - s->tsc) * 1000000) / tsc_frequency);
354 return ((e->tv.tv_usec - s->tv.tv_usec) +
355 (e->tv.tv_sec - s->tv.tv_sec) * 1000000);
360 * Initialize per-cpu qpolling(4) context. Called from kern_clock.c:
363 ifpoll_init_pcpu(int cpuid)
365 if (cpuid >= IFPOLL_CTX_MAX) {
367 } else if (cpuid == 0) {
368 if (ifpoll_ncpus > ncpus)
369 ifpoll_ncpus = ncpus;
370 kprintf("ifpoll_ncpus %d\n", ifpoll_ncpus);
372 #ifndef IFPOLL_MULTI_SYSTIMER
373 systimer_init_periodic_nq(&ifpoll0.clock,
374 ifpoll_systimer, NULL, 1);
382 #ifndef IFPOLL_MULTI_SYSTIMER
385 ifpoll_ipi_handler(void *arg __unused, int poll)
387 KKASSERT(mycpuid < ifpoll_ncpus);
389 if (poll & IFPOLL_TX)
390 iopoll_clock(txpoll_context[mycpuid]);
391 if (poll & IFPOLL_RX)
392 iopoll_clock(rxpoll_context[mycpuid]);
396 ifpoll_systimer(systimer_t info __unused, struct intrframe *frame __unused)
398 uint32_t cpumask = 0;
400 KKASSERT(mycpuid == 0);
402 if (ifpoll0.stfrac_count-- == 0) {
403 ifpoll0.stfrac_count = ifpoll_stfrac;
404 stpoll_clock(&stpoll_context);
407 if (ifpoll0.txfrac_count-- == 0) {
408 ifpoll0.txfrac_count = ifpoll_txfrac;
410 /* TODO: We may try to piggyback TX on RX */
411 cpumask = smp_active_mask & ifpoll0.tx_cpumask;
413 lwkt_send_ipiq2_mask(cpumask, ifpoll_ipi_handler,
418 cpumask = smp_active_mask & ifpoll0.rx_cpumask;
420 lwkt_send_ipiq2_mask(cpumask, ifpoll_ipi_handler,
426 ifpoll_start_handler(struct netmsg *nmsg)
428 KKASSERT(&curthread->td_msgport == ifnet_portfn(0));
430 kprintf("ifpoll: start\n");
431 systimer_adjust_periodic(&ifpoll0.clock, ifpoll_pollhz);
432 lwkt_replymsg(&nmsg->nm_lmsg, 0);
436 ifpoll_stop_handler(struct netmsg *nmsg)
438 KKASSERT(&curthread->td_msgport == ifnet_portfn(0));
440 kprintf("ifpoll: stop\n");
441 systimer_adjust_periodic(&ifpoll0.clock, 1);
442 lwkt_replymsg(&nmsg->nm_lmsg, 0);
446 ifpoll_handler_addevent(void)
448 if (atomic_fetchadd_int(&ifpoll_handlers, 1) == 0) {
452 nmsg = kmalloc(sizeof(*nmsg), M_LWKTMSG, M_WAITOK);
453 netmsg_init(nmsg, &netisr_afree_rport, 0, ifpoll_start_handler);
454 ifnet_sendmsg(&nmsg->nm_lmsg, 0);
459 ifpoll_handler_delevent(void)
461 KKASSERT(ifpoll_handlers > 0);
462 if (atomic_fetchadd_int(&ifpoll_handlers, -1) == 1) {
466 nmsg = kmalloc(sizeof(*nmsg), M_LWKTMSG, M_WAITOK);
467 netmsg_init(nmsg, &netisr_afree_rport, 0, ifpoll_stop_handler);
468 ifnet_sendmsg(&nmsg->nm_lmsg, 0);
473 sysctl_ifpollhz_handler(struct netmsg *nmsg)
475 KKASSERT(&curthread->td_msgport == ifnet_portfn(0));
478 * If there is no handler registered, don't adjust polling
479 * systimer frequency; polling systimer frequency will be
480 * adjusted once there is registered handler.
482 ifpoll_pollhz = nmsg->nm_lmsg.u.ms_result;
484 systimer_adjust_periodic(&ifpoll0.clock, ifpoll_pollhz);
486 lwkt_replymsg(&nmsg->nm_lmsg, 0);
490 sysctl_ifpollhz(SYSCTL_HANDLER_ARGS)
496 error = sysctl_handle_int(oidp, &phz, 0, req);
497 if (error || req->newptr == NULL)
501 else if (phz > IFPOLL_FREQ_MAX)
502 phz = IFPOLL_FREQ_MAX;
504 netmsg_init(&nmsg, &curthread->td_msgport, MSGF_MPSAFE,
505 sysctl_ifpollhz_handler);
506 nmsg.nm_lmsg.u.ms_result = phz;
508 return ifnet_domsg(&nmsg.nm_lmsg, 0);
511 #endif /* !IFPOLL_MULTI_SYSTIMER */
514 ifpoll_register(struct ifnet *ifp)
516 struct ifpoll_info info;
520 if (ifp->if_qpoll == NULL) {
521 /* Device does not support polling */
526 * Attempt to register. Interlock with IFF_NPOLLING.
529 ifnet_serialize_all(ifp);
531 if (ifp->if_flags & IFF_NPOLLING) {
532 /* Already polling */
533 ifnet_deserialize_all(ifp);
537 bzero(&info, sizeof(info));
540 ifp->if_flags |= IFF_NPOLLING;
541 ifp->if_qpoll(ifp, &info);
543 ifnet_deserialize_all(ifp);
545 netmsg_init(&nmsg, &curthread->td_msgport, MSGF_MPSAFE,
546 ifpoll_register_handler);
547 nmsg.nm_lmsg.u.ms_resultp = &info;
549 error = ifnet_domsg(&nmsg.nm_lmsg, 0);
551 if (!ifpoll_deregister(ifp)) {
552 if_printf(ifp, "ifpoll_register: "
553 "ifpoll_deregister failed!\n");
560 ifpoll_deregister(struct ifnet *ifp)
565 if (ifp->if_qpoll == NULL)
568 ifnet_serialize_all(ifp);
570 if ((ifp->if_flags & IFF_NPOLLING) == 0) {
571 ifnet_deserialize_all(ifp);
574 ifp->if_flags &= ~IFF_NPOLLING;
576 ifnet_deserialize_all(ifp);
578 netmsg_init(&nmsg, &curthread->td_msgport, MSGF_MPSAFE,
579 ifpoll_deregister_handler);
580 nmsg.nm_lmsg.u.ms_resultp = ifp;
582 error = ifnet_domsg(&nmsg.nm_lmsg, 0);
584 ifnet_serialize_all(ifp);
585 ifp->if_qpoll(ifp, NULL);
586 ifnet_deserialize_all(ifp);
592 ifpoll_register_handler(struct netmsg *nmsg)
594 const struct ifpoll_info *info = nmsg->nm_lmsg.u.ms_resultp;
595 int cpuid = mycpuid, nextcpu;
598 KKASSERT(cpuid < ifpoll_ncpus);
599 KKASSERT(&curthread->td_msgport == ifnet_portfn(cpuid));
602 error = stpoll_register(info->ifpi_ifp, &info->ifpi_status);
607 error = iopoll_register(info->ifpi_ifp, rxpoll_context[cpuid],
608 &info->ifpi_rx[cpuid]);
612 error = iopoll_register(info->ifpi_ifp, txpoll_context[cpuid],
613 &info->ifpi_tx[cpuid]);
618 if (nextcpu < ifpoll_ncpus)
619 ifnet_forwardmsg(&nmsg->nm_lmsg, nextcpu);
621 lwkt_replymsg(&nmsg->nm_lmsg, 0);
624 lwkt_replymsg(&nmsg->nm_lmsg, error);
628 ifpoll_deregister_handler(struct netmsg *nmsg)
630 struct ifnet *ifp = nmsg->nm_lmsg.u.ms_resultp;
631 int cpuid = mycpuid, nextcpu;
633 KKASSERT(cpuid < ifpoll_ncpus);
634 KKASSERT(&curthread->td_msgport == ifnet_portfn(cpuid));
638 stpoll_deregister(ifp);
639 iopoll_deregister(ifp, rxpoll_context[cpuid]);
640 iopoll_deregister(ifp, txpoll_context[cpuid]);
643 if (nextcpu < ifpoll_ncpus)
644 ifnet_forwardmsg(&nmsg->nm_lmsg, nextcpu);
646 lwkt_replymsg(&nmsg->nm_lmsg, 0);
652 struct stpoll_ctx *st_ctx = &stpoll_context;
654 #ifdef IFPOLL_MULTI_SYSTIMER
655 st_ctx->pollhz = stpoll_hz;
658 sysctl_ctx_init(&st_ctx->poll_sysctl_ctx);
659 st_ctx->poll_sysctl_tree = SYSCTL_ADD_NODE(&st_ctx->poll_sysctl_ctx,
660 SYSCTL_STATIC_CHILDREN(_net_ifpoll),
661 OID_AUTO, "status", CTLFLAG_RD, 0, "");
663 #ifdef IFPOLL_MULTI_SYSTIMER
664 SYSCTL_ADD_PROC(&st_ctx->poll_sysctl_ctx,
665 SYSCTL_CHILDREN(st_ctx->poll_sysctl_tree),
666 OID_AUTO, "pollhz", CTLTYPE_INT | CTLFLAG_RW,
667 st_ctx, 0, sysctl_stpollhz, "I",
668 "Status polling frequency");
671 SYSCTL_ADD_UINT(&st_ctx->poll_sysctl_ctx,
672 SYSCTL_CHILDREN(st_ctx->poll_sysctl_tree),
673 OID_AUTO, "handlers", CTLFLAG_RD,
674 &st_ctx->poll_handlers, 0,
675 "Number of registered status poll handlers");
677 netmsg_init(&st_ctx->poll_netmsg, &netisr_adone_rport, MSGF_MPSAFE,
680 #ifdef IFPOLL_MULTI_SYSTIMER
681 systimer_init_periodic_nq(&st_ctx->pollclock,
682 stpoll_systimer, st_ctx, 1);
686 #ifdef IFPOLL_MULTI_SYSTIMER
689 sysctl_stpollhz_handler(struct netmsg *msg)
691 struct stpoll_ctx *st_ctx = &stpoll_context;
693 KKASSERT(&curthread->td_msgport == ifnet_portfn(0));
696 * If there is no handler registered, don't adjust polling
697 * systimer frequency; polling systimer frequency will be
698 * adjusted once there is registered handler.
700 st_ctx->pollhz = msg->nm_lmsg.u.ms_result;
701 if (st_ctx->poll_handlers)
702 systimer_adjust_periodic(&st_ctx->pollclock, st_ctx->pollhz);
704 lwkt_replymsg(&msg->nm_lmsg, 0);
708 sysctl_stpollhz(SYSCTL_HANDLER_ARGS)
710 struct stpoll_ctx *st_ctx = arg1;
714 phz = st_ctx->pollhz;
715 error = sysctl_handle_int(oidp, &phz, 0, req);
716 if (error || req->newptr == NULL)
720 else if (phz > IFPOLL_FREQ_MAX)
721 phz = IFPOLL_FREQ_MAX;
723 netmsg_init(&msg, &curthread->td_msgport, MSGF_MPSAFE,
724 sysctl_stpollhz_handler);
725 msg.nm_lmsg.u.ms_result = phz;
727 return ifnet_domsg(&msg.nm_lmsg, 0);
730 #endif /* IFPOLL_MULTI_SYSTIMER */
733 * stpoll_handler is scheduled by sched_stpoll when appropriate, typically
734 * once per polling systimer tick.
737 stpoll_handler(struct netmsg *msg)
739 struct stpoll_ctx *st_ctx = &stpoll_context;
740 struct thread *td = curthread;
743 KKASSERT(&td->td_msgport == ifnet_portfn(0));
745 crit_enter_quick(td);
748 lwkt_replymsg(&msg->nm_lmsg, 0);
750 if (st_ctx->poll_handlers == 0) {
755 #ifdef IFPOLL_MULTI_SYSTIMER
756 poll_hz = st_ctx->pollhz;
758 poll_hz = ifpoll_pollhz / (ifpoll_stfrac + 1);
761 for (i = 0; i < st_ctx->poll_handlers; ++i) {
762 const struct stpoll_rec *rec = &st_ctx->pr[i];
763 struct ifnet *ifp = rec->ifp;
765 if (!lwkt_serialize_try(rec->serializer))
768 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) ==
769 (IFF_RUNNING | IFF_NPOLLING))
770 rec->status_func(ifp, poll_hz);
772 lwkt_serialize_exit(rec->serializer);
779 * Hook from status poll systimer. Tries to schedule an status poll.
782 stpoll_clock(struct stpoll_ctx *st_ctx)
784 globaldata_t gd = mycpu;
786 KKASSERT(gd->gd_cpuid == 0);
788 if (st_ctx->poll_handlers == 0)
792 sched_stpoll(st_ctx);
796 #ifdef IFPOLL_MULTI_SYSTIMER
798 stpoll_systimer(systimer_t info, struct intrframe *frame __unused)
800 stpoll_clock(info->data);
805 stpoll_register(struct ifnet *ifp, const struct ifpoll_status *st_rec)
807 struct stpoll_ctx *st_ctx = &stpoll_context;
810 KKASSERT(&curthread->td_msgport == ifnet_portfn(0));
812 if (st_rec->status_func == NULL)
816 * Check if there is room.
818 if (st_ctx->poll_handlers >= IFPOLL_LIST_LEN) {
820 * List full, cannot register more entries.
821 * This should never happen; if it does, it is probably a
822 * broken driver trying to register multiple times. Checking
823 * this at runtime is expensive, and won't solve the problem
824 * anyways, so just report a few times and then give up.
826 static int verbose = 10; /* XXX */
829 kprintf("status poll handlers list full, "
830 "maybe a broken driver ?\n");
835 struct stpoll_rec *rec = &st_ctx->pr[st_ctx->poll_handlers];
838 rec->serializer = st_rec->serializer;
839 rec->status_func = st_rec->status_func;
841 st_ctx->poll_handlers++;
843 #ifdef IFPOLL_MULTI_SYSTIMER
844 if (st_ctx->poll_handlers == 1) {
845 systimer_adjust_periodic(&st_ctx->pollclock,
849 ifpoll_handler_addevent();
857 stpoll_deregister(struct ifnet *ifp)
859 struct stpoll_ctx *st_ctx = &stpoll_context;
862 KKASSERT(&curthread->td_msgport == ifnet_portfn(0));
864 for (i = 0; i < st_ctx->poll_handlers; ++i) {
865 if (st_ctx->pr[i].ifp == ifp) /* Found it */
868 if (i == st_ctx->poll_handlers) {
869 kprintf("stpoll_deregister: ifp not found!!!\n");
872 st_ctx->poll_handlers--;
873 if (i < st_ctx->poll_handlers) {
874 /* Last entry replaces this one. */
875 st_ctx->pr[i] = st_ctx->pr[st_ctx->poll_handlers];
878 #ifdef IFPOLL_MULTI_SYSTIMER
879 if (st_ctx->poll_handlers == 0)
880 systimer_adjust_periodic(&st_ctx->pollclock, 1);
882 ifpoll_handler_delevent();
889 #ifndef IFPOLL_MULTI_SYSTIMER
891 iopoll_hz(struct iopoll_ctx *io_ctx)
895 poll_hz = ifpoll_pollhz;
896 if (io_ctx->poll_type == IFPOLL_TX)
897 poll_hz /= ifpoll_txfrac + 1;
903 iopoll_reset_state(struct iopoll_ctx *io_ctx)
906 io_ctx->poll_burst = 5;
907 io_ctx->pending_polls = 0;
908 io_ctx->residual_burst = 0;
910 io_ctx->kern_frac = 0;
911 bzero(&io_ctx->poll_start_t, sizeof(io_ctx->poll_start_t));
912 bzero(&io_ctx->prev_t, sizeof(io_ctx->prev_t));
917 iopoll_init(int cpuid)
919 KKASSERT(cpuid < IFPOLL_CTX_MAX);
921 rxpoll_context[cpuid] = iopoll_ctx_create(cpuid, IFPOLL_RX);
922 txpoll_context[cpuid] = iopoll_ctx_create(cpuid, IFPOLL_TX);
925 static struct iopoll_ctx *
926 iopoll_ctx_create(int cpuid, int poll_type)
928 struct iopoll_ctx *io_ctx;
929 const char *poll_type_str;
932 KKASSERT(poll_type == IFPOLL_RX || poll_type == IFPOLL_TX);
935 * Make sure that tunables are in sane state
937 if (iopoll_burst_max < MIN_IOPOLL_BURST_MAX)
938 iopoll_burst_max = MIN_IOPOLL_BURST_MAX;
939 else if (iopoll_burst_max > MAX_IOPOLL_BURST_MAX)
940 iopoll_burst_max = MAX_IOPOLL_BURST_MAX;
942 if (iopoll_each_burst > iopoll_burst_max)
943 iopoll_each_burst = iopoll_burst_max;
946 * Create the per-cpu polling context
948 io_ctx = kmalloc(sizeof(*io_ctx), M_DEVBUF, M_WAITOK | M_ZERO);
950 io_ctx->poll_each_burst = iopoll_each_burst;
951 io_ctx->poll_burst_max = iopoll_burst_max;
952 io_ctx->user_frac = 50;
953 #ifdef IFPOLL_MULTI_SYSTIMER
954 io_ctx->pollhz = iopoll_hz;
956 io_ctx->poll_type = poll_type;
958 io_ctx->poll_cpuid = cpuid;
959 iopoll_reset_state(io_ctx);
961 netmsg_init(&io_ctx->poll_netmsg, &netisr_adone_rport, MSGF_MPSAFE,
963 io_ctx->poll_netmsg.nm_lmsg.u.ms_resultp = io_ctx;
965 netmsg_init(&io_ctx->poll_more_netmsg, &netisr_adone_rport, MSGF_MPSAFE,
967 io_ctx->poll_more_netmsg.nm_lmsg.u.ms_resultp = io_ctx;
970 * Initialize per-cpu sysctl nodes
972 if (poll_type == IFPOLL_RX)
973 poll_type_str = "rx";
975 poll_type_str = "tx";
976 ksnprintf(cpuid_str, sizeof(cpuid_str), "%s%d",
977 poll_type_str, io_ctx->poll_cpuid);
979 sysctl_ctx_init(&io_ctx->poll_sysctl_ctx);
980 io_ctx->poll_sysctl_tree = SYSCTL_ADD_NODE(&io_ctx->poll_sysctl_ctx,
981 SYSCTL_STATIC_CHILDREN(_net_ifpoll),
982 OID_AUTO, cpuid_str, CTLFLAG_RD, 0, "");
983 iopoll_add_sysctl(&io_ctx->poll_sysctl_ctx,
984 SYSCTL_CHILDREN(io_ctx->poll_sysctl_tree), io_ctx);
986 #ifdef IFPOLL_MULTI_SYSTIMER
988 * Initialize systimer
990 systimer_init_periodic_nq(&io_ctx->pollclock,
991 iopoll_systimer, io_ctx, 1);
998 * Hook from iopoll systimer. Tries to schedule an iopoll, but keeps
999 * track of lost ticks due to the previous handler taking too long.
1000 * Normally, this should not happen, because polling handler should
1001 * run for a short time. However, in some cases (e.g. when there are
1002 * changes in link status etc.) the drivers take a very long time
1003 * (even in the order of milliseconds) to reset and reconfigure the
1004 * device, causing apparent lost polls.
1006 * The first part of the code is just for debugging purposes, and tries
1007 * to count how often hardclock ticks are shorter than they should,
1008 * meaning either stray interrupts or delayed events.
1010 * WARNING! called from fastint or IPI, the MP lock might not be held.
1013 iopoll_clock(struct iopoll_ctx *io_ctx)
1015 globaldata_t gd = mycpu;
1016 union ifpoll_time t;
1019 KKASSERT(gd->gd_cpuid == io_ctx->poll_cpuid);
1021 if (io_ctx->poll_handlers == 0)
1024 #ifdef IFPOLL_MULTI_SYSTIMER
1025 poll_hz = io_ctx->pollhz;
1027 poll_hz = iopoll_hz(io_ctx);
1030 ifpoll_time_get(&t);
1031 delta = ifpoll_time_diff(&io_ctx->prev_t, &t);
1032 if (delta * poll_hz < 500000)
1033 io_ctx->short_ticks++;
1037 if (io_ctx->pending_polls > 100) {
1039 * Too much, assume it has stalled (not always true
1040 * see comment above).
1043 io_ctx->pending_polls = 0;
1047 if (io_ctx->phase <= 2) {
1048 if (io_ctx->phase != 0)
1052 sched_iopoll(io_ctx);
1056 if (io_ctx->pending_polls++ > 0)
1057 io_ctx->lost_polls++;
1060 #ifdef IFPOLL_MULTI_SYSTIMER
1062 iopoll_systimer(systimer_t info, struct intrframe *frame __unused)
1064 iopoll_clock(info->data);
1069 * iopoll_handler is scheduled by sched_iopoll when appropriate, typically
1070 * once per polling systimer tick.
1072 * Note that the message is replied immediately in order to allow a new
1073 * ISR to be scheduled in the handler.
1076 iopoll_handler(struct netmsg *msg)
1078 struct iopoll_ctx *io_ctx;
1079 struct thread *td = curthread;
1082 io_ctx = msg->nm_lmsg.u.ms_resultp;
1083 KKASSERT(&td->td_msgport == ifnet_portfn(io_ctx->poll_cpuid));
1085 crit_enter_quick(td);
1088 lwkt_replymsg(&msg->nm_lmsg, 0);
1090 if (io_ctx->poll_handlers == 0) {
1091 crit_exit_quick(td);
1096 if (io_ctx->residual_burst == 0) {
1097 /* First call in this tick */
1098 ifpoll_time_get(&io_ctx->poll_start_t);
1099 io_ctx->residual_burst = io_ctx->poll_burst;
1101 cycles = (io_ctx->residual_burst < io_ctx->poll_each_burst) ?
1102 io_ctx->residual_burst : io_ctx->poll_each_burst;
1103 io_ctx->residual_burst -= cycles;
1105 for (i = 0; i < io_ctx->poll_handlers; i++) {
1106 const struct iopoll_rec *rec = &io_ctx->pr[i];
1107 struct ifnet *ifp = rec->ifp;
1109 if (!lwkt_serialize_try(rec->serializer))
1112 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) ==
1113 (IFF_RUNNING | IFF_NPOLLING))
1114 rec->poll_func(ifp, rec->arg, cycles);
1116 lwkt_serialize_exit(rec->serializer);
1120 * Do a quick exit/enter to catch any higher-priority
1121 * interrupt sources.
1123 crit_exit_quick(td);
1124 crit_enter_quick(td);
1126 sched_iopollmore(io_ctx);
1129 crit_exit_quick(td);
1133 * iopollmore_handler is called after other netisr's, possibly scheduling
1134 * another iopoll_handler call, or adapting the burst size for the next cycle.
1136 * It is very bad to fetch large bursts of packets from a single card at once,
1137 * because the burst could take a long time to be completely processed leading
1138 * to unfairness. To reduce the problem, and also to account better for time
1139 * spent in network-related processing, we split the burst in smaller chunks
1140 * of fixed size, giving control to the other netisr's between chunks. This
1141 * helps in improving the fairness, reducing livelock and accounting for the
1142 * work performed in low level handling.
1145 iopollmore_handler(struct netmsg *msg)
1147 struct thread *td = curthread;
1148 struct iopoll_ctx *io_ctx;
1149 union ifpoll_time t;
1150 int kern_load, poll_hz;
1151 uint32_t pending_polls;
1153 io_ctx = msg->nm_lmsg.u.ms_resultp;
1154 KKASSERT(&td->td_msgport == ifnet_portfn(io_ctx->poll_cpuid));
1156 crit_enter_quick(td);
1159 lwkt_replymsg(&msg->nm_lmsg, 0);
1161 if (io_ctx->poll_handlers == 0) {
1162 crit_exit_quick(td);
1166 #ifdef IFPOLL_MULTI_SYSTIMER
1167 poll_hz = io_ctx->pollhz;
1169 poll_hz = iopoll_hz(io_ctx);
1173 if (io_ctx->residual_burst > 0) {
1174 sched_iopoll(io_ctx);
1175 crit_exit_quick(td);
1176 /* Will run immediately on return, followed by netisrs */
1180 /* Here we can account time spent in iopoll's in this tick */
1181 ifpoll_time_get(&t);
1182 kern_load = ifpoll_time_diff(&io_ctx->poll_start_t, &t);
1183 kern_load = (kern_load * poll_hz) / 10000; /* 0..100 */
1184 io_ctx->kern_frac = kern_load;
1186 if (kern_load > (100 - io_ctx->user_frac)) {
1187 /* Try decrease ticks */
1188 if (io_ctx->poll_burst > 1)
1189 io_ctx->poll_burst--;
1191 if (io_ctx->poll_burst < io_ctx->poll_burst_max)
1192 io_ctx->poll_burst++;
1195 io_ctx->pending_polls--;
1196 pending_polls = io_ctx->pending_polls;
1198 if (pending_polls == 0) {
1203 * Last cycle was long and caused us to miss one or more
1204 * hardclock ticks. Restart processing again, but slightly
1205 * reduce the burst size to prevent that this happens again.
1207 io_ctx->poll_burst -= (io_ctx->poll_burst / 8);
1208 if (io_ctx->poll_burst < 1)
1209 io_ctx->poll_burst = 1;
1210 sched_iopoll(io_ctx);
1214 crit_exit_quick(td);
1218 iopoll_add_sysctl(struct sysctl_ctx_list *ctx, struct sysctl_oid_list *parent,
1219 struct iopoll_ctx *io_ctx)
1221 #ifdef IFPOLL_MULTI_SYSTIMER
1222 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "pollhz",
1223 CTLTYPE_INT | CTLFLAG_RW, io_ctx, 0, sysctl_iopollhz,
1224 "I", "Device polling frequency");
1227 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "burst_max",
1228 CTLTYPE_UINT | CTLFLAG_RW, io_ctx, 0, sysctl_burstmax,
1229 "IU", "Max Polling burst size");
1231 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "each_burst",
1232 CTLTYPE_UINT | CTLFLAG_RW, io_ctx, 0, sysctl_eachburst,
1233 "IU", "Max size of each burst");
1235 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "phase", CTLFLAG_RD,
1236 &io_ctx->phase, 0, "Polling phase");
1238 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "suspect", CTLFLAG_RW,
1239 &io_ctx->suspect, 0, "suspect event");
1241 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "stalled", CTLFLAG_RW,
1242 &io_ctx->stalled, 0, "potential stalls");
1244 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "burst", CTLFLAG_RD,
1245 &io_ctx->poll_burst, 0, "Current polling burst size");
1247 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "user_frac", CTLFLAG_RW,
1248 &io_ctx->user_frac, 0,
1249 "Desired user fraction of cpu time");
1251 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "kern_frac", CTLFLAG_RD,
1252 &io_ctx->kern_frac, 0,
1253 "Kernel fraction of cpu time");
1255 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "short_ticks", CTLFLAG_RW,
1256 &io_ctx->short_ticks, 0,
1257 "Hardclock ticks shorter than they should be");
1259 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "lost_polls", CTLFLAG_RW,
1260 &io_ctx->lost_polls, 0,
1261 "How many times we would have lost a poll tick");
1263 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "pending_polls", CTLFLAG_RD,
1264 &io_ctx->pending_polls, 0, "Do we need to poll again");
1266 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "residual_burst", CTLFLAG_RD,
1267 &io_ctx->residual_burst, 0,
1268 "# of residual cycles in burst");
1270 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "handlers", CTLFLAG_RD,
1271 &io_ctx->poll_handlers, 0,
1272 "Number of registered poll handlers");
1275 #ifdef IFPOLL_MULTI_SYSTIMER
1278 sysctl_iopollhz(SYSCTL_HANDLER_ARGS)
1280 struct iopoll_ctx *io_ctx = arg1;
1281 struct iopoll_sysctl_netmsg msg;
1282 struct netmsg *nmsg;
1285 phz = io_ctx->pollhz;
1286 error = sysctl_handle_int(oidp, &phz, 0, req);
1287 if (error || req->newptr == NULL)
1291 else if (phz > IFPOLL_FREQ_MAX)
1292 phz = IFPOLL_FREQ_MAX;
1295 netmsg_init(nmsg, &curthread->td_msgport, MSGF_MPSAFE,
1296 sysctl_iopollhz_handler);
1297 nmsg->nm_lmsg.u.ms_result = phz;
1300 return ifnet_domsg(&nmsg->nm_lmsg, io_ctx->poll_cpuid);
1304 sysctl_iopollhz_handler(struct netmsg *nmsg)
1306 struct iopoll_sysctl_netmsg *msg = (struct iopoll_sysctl_netmsg *)nmsg;
1307 struct iopoll_ctx *io_ctx;
1310 KKASSERT(&curthread->td_msgport == ifnet_portfn(io_ctx->poll_cpuid));
1313 * If polling is disabled or there is no polling handler
1314 * registered, don't adjust polling systimer frequency.
1315 * Polling systimer frequency will be adjusted once there
1316 * are registered handlers.
1318 io_ctx->pollhz = nmsg->nm_lmsg.u.ms_result;
1319 if (io_ctx->poll_handlers)
1320 systimer_adjust_periodic(&io_ctx->pollclock, io_ctx->pollhz);
1322 lwkt_replymsg(&nmsg->nm_lmsg, 0);
1325 #endif /* IFPOLL_MULTI_SYSTIMER */
1328 sysctl_burstmax_handler(struct netmsg *nmsg)
1330 struct iopoll_sysctl_netmsg *msg = (struct iopoll_sysctl_netmsg *)nmsg;
1331 struct iopoll_ctx *io_ctx;
1334 KKASSERT(&curthread->td_msgport == ifnet_portfn(io_ctx->poll_cpuid));
1336 io_ctx->poll_burst_max = nmsg->nm_lmsg.u.ms_result;
1337 if (io_ctx->poll_each_burst > io_ctx->poll_burst_max)
1338 io_ctx->poll_each_burst = io_ctx->poll_burst_max;
1339 if (io_ctx->poll_burst > io_ctx->poll_burst_max)
1340 io_ctx->poll_burst = io_ctx->poll_burst_max;
1341 if (io_ctx->residual_burst > io_ctx->poll_burst_max)
1342 io_ctx->residual_burst = io_ctx->poll_burst_max;
1344 lwkt_replymsg(&nmsg->nm_lmsg, 0);
1348 sysctl_burstmax(SYSCTL_HANDLER_ARGS)
1350 struct iopoll_ctx *io_ctx = arg1;
1351 struct iopoll_sysctl_netmsg msg;
1352 struct netmsg *nmsg;
1356 burst_max = io_ctx->poll_burst_max;
1357 error = sysctl_handle_int(oidp, &burst_max, 0, req);
1358 if (error || req->newptr == NULL)
1360 if (burst_max < MIN_IOPOLL_BURST_MAX)
1361 burst_max = MIN_IOPOLL_BURST_MAX;
1362 else if (burst_max > MAX_IOPOLL_BURST_MAX)
1363 burst_max = MAX_IOPOLL_BURST_MAX;
1366 netmsg_init(nmsg, &curthread->td_msgport, MSGF_MPSAFE,
1367 sysctl_burstmax_handler);
1368 nmsg->nm_lmsg.u.ms_result = burst_max;
1371 return ifnet_domsg(&nmsg->nm_lmsg, io_ctx->poll_cpuid);
1375 sysctl_eachburst_handler(struct netmsg *nmsg)
1377 struct iopoll_sysctl_netmsg *msg = (struct iopoll_sysctl_netmsg *)nmsg;
1378 struct iopoll_ctx *io_ctx;
1379 uint32_t each_burst;
1382 KKASSERT(&curthread->td_msgport == ifnet_portfn(io_ctx->poll_cpuid));
1384 each_burst = nmsg->nm_lmsg.u.ms_result;
1385 if (each_burst > io_ctx->poll_burst_max)
1386 each_burst = io_ctx->poll_burst_max;
1387 else if (each_burst < 1)
1389 io_ctx->poll_each_burst = each_burst;
1391 lwkt_replymsg(&nmsg->nm_lmsg, 0);
1395 sysctl_eachburst(SYSCTL_HANDLER_ARGS)
1397 struct iopoll_ctx *io_ctx = arg1;
1398 struct iopoll_sysctl_netmsg msg;
1399 struct netmsg *nmsg;
1400 uint32_t each_burst;
1403 each_burst = io_ctx->poll_each_burst;
1404 error = sysctl_handle_int(oidp, &each_burst, 0, req);
1405 if (error || req->newptr == NULL)
1409 netmsg_init(nmsg, &curthread->td_msgport, MSGF_MPSAFE,
1410 sysctl_eachburst_handler);
1411 nmsg->nm_lmsg.u.ms_result = each_burst;
1414 return ifnet_domsg(&nmsg->nm_lmsg, io_ctx->poll_cpuid);
1418 iopoll_register(struct ifnet *ifp, struct iopoll_ctx *io_ctx,
1419 const struct ifpoll_io *io_rec)
1423 KKASSERT(&curthread->td_msgport == ifnet_portfn(io_ctx->poll_cpuid));
1425 if (io_rec->poll_func == NULL)
1429 * Check if there is room.
1431 if (io_ctx->poll_handlers >= IFPOLL_LIST_LEN) {
1433 * List full, cannot register more entries.
1434 * This should never happen; if it does, it is probably a
1435 * broken driver trying to register multiple times. Checking
1436 * this at runtime is expensive, and won't solve the problem
1437 * anyways, so just report a few times and then give up.
1439 static int verbose = 10; /* XXX */
1441 kprintf("io poll handlers list full, "
1442 "maybe a broken driver ?\n");
1447 struct iopoll_rec *rec = &io_ctx->pr[io_ctx->poll_handlers];
1450 rec->serializer = io_rec->serializer;
1451 rec->arg = io_rec->arg;
1452 rec->poll_func = io_rec->poll_func;
1454 io_ctx->poll_handlers++;
1455 if (io_ctx->poll_handlers == 1) {
1456 #ifdef IFPOLL_MULTI_SYSTIMER
1457 systimer_adjust_periodic(&io_ctx->pollclock,
1462 if (io_ctx->poll_type == IFPOLL_RX)
1463 mask = &ifpoll0.rx_cpumask;
1465 mask = &ifpoll0.tx_cpumask;
1466 KKASSERT((*mask & mycpu->gd_cpumask) == 0);
1467 atomic_set_int(mask, mycpu->gd_cpumask);
1470 #ifndef IFPOLL_MULTI_SYSTIMER
1471 ifpoll_handler_addevent();
1479 iopoll_deregister(struct ifnet *ifp, struct iopoll_ctx *io_ctx)
1483 KKASSERT(&curthread->td_msgport == ifnet_portfn(io_ctx->poll_cpuid));
1485 for (i = 0; i < io_ctx->poll_handlers; ++i) {
1486 if (io_ctx->pr[i].ifp == ifp) /* Found it */
1489 if (i == io_ctx->poll_handlers) {
1492 io_ctx->poll_handlers--;
1493 if (i < io_ctx->poll_handlers) {
1494 /* Last entry replaces this one. */
1495 io_ctx->pr[i] = io_ctx->pr[io_ctx->poll_handlers];
1498 if (io_ctx->poll_handlers == 0) {
1499 #ifdef IFPOLL_MULTI_SYSTIMER
1500 systimer_adjust_periodic(&io_ctx->pollclock, 1);
1504 if (io_ctx->poll_type == IFPOLL_RX)
1505 mask = &ifpoll0.rx_cpumask;
1507 mask = &ifpoll0.tx_cpumask;
1508 KKASSERT(*mask & mycpu->gd_cpumask);
1509 atomic_clear_int(mask, mycpu->gd_cpumask);
1511 iopoll_reset_state(io_ctx);
1513 #ifndef IFPOLL_MULTI_SYSTIMER
1514 ifpoll_handler_delevent();