Add ifpoll, which support hardware TX/RX queues based polling.
[dragonfly.git] / sys / net / if_poll.c
CommitLineData
b3a7093f
SZ
1/*-
2 * Copyright (c) 2001-2002 Luigi Rizzo
3 *
4 * Supported by: the Xorp Project (www.xorp.org)
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $FreeBSD: src/sys/kern/kern_poll.c,v 1.2.2.4 2002/06/27 23:26:33 luigi Exp $
28 */
29
30#include "opt_ifpoll.h"
31
32#include <sys/param.h>
33#include <sys/kernel.h>
34#include <sys/ktr.h>
35#include <sys/malloc.h>
36#include <sys/serialize.h>
37#include <sys/socket.h>
38#include <sys/sysctl.h>
39
40#include <sys/thread2.h>
41#include <sys/msgport2.h>
42
43#include <machine/atomic.h>
44#include <machine/smp.h>
45
46#include <net/if.h>
47#include <net/if_poll.h>
48#include <net/netmsg2.h>
49
50/*
51 * Polling support for network device drivers.
52 *
53 * Drivers which support this feature try to register one status polling
54 * handler and several TX/RX polling handlers with the polling code.
55 * If interface's if_qpoll is called with non-NULL second argument, then
56 * a register operation is requested, else a deregister operation is
57 * requested. If the requested operation is "register", driver should
58 * setup the ifpoll_info passed in accoding its own needs:
59 * ifpoll_info.ifpi_status.status_func == NULL
60 * No status polling handler will be installed on CPU(0)
61 * ifpoll_info.ifpi_rx[n].poll_func == NULL
62 * No RX polling handler will be installed on CPU(n)
63 * ifpoll_info.ifpi_tx[n].poll_func == NULL
64 * No TX polling handler will be installed on CPU(n)
65 *
66 * All of the registered polling handlers are called only if the interface
67 * is marked as 'IFF_RUNNING and IFF_NPOLLING'. However, the interface's
68 * register and deregister function (ifnet.if_qpoll) will be called even
69 * if interface is not marked with 'IFF_RUNNING'.
70 *
71 * If registration is successful, the driver must disable interrupts,
72 * and further I/O is performed through the TX/RX polling handler, which
73 * are invoked (at least once per clock tick) with 3 arguments: the "arg"
74 * passed at register time, a struct ifnet pointer, and a "count" limit.
75 * The registered serializer will be held before calling the related
76 * polling handler.
77 *
78 * The count limit specifies how much work the handler can do during the
79 * call -- typically this is the number of packets to be received, or
80 * transmitted, etc. (drivers are free to interpret this number, as long
81 * as the max time spent in the function grows roughly linearly with the
82 * count).
83 *
84 * A second variable controls the sharing of CPU between polling/kernel
85 * network processing, and other activities (typically userlevel tasks):
86 * net.ifpoll.{rxX,txX}.user_frac (between 0 and 100, default 50) sets the
87 * share of CPU allocated to user tasks. CPU is allocated proportionally
88 * to the shares, by dynamically adjusting the "count" (poll_burst).
89 *
90 * Other parameters can should be left to their default values.
91 * The following constraints hold
92 *
93 * 1 <= poll_burst <= poll_burst_max
94 * 1 <= poll_each_burst <= poll_burst_max
95 * MIN_POLL_BURST_MAX <= poll_burst_max <= MAX_POLL_BURST_MAX
96 */
97
98#define IFPOLL_LIST_LEN 128
99#define IFPOLL_FREQ_MAX 30000
100
101#define MIN_IOPOLL_BURST_MAX 10
102#define MAX_IOPOLL_BURST_MAX 1000
103#define IOPOLL_BURST_MAX 150 /* good for 100Mbit net and HZ=1000 */
104
105#define IOPOLL_EACH_BURST 5
106
107#define IFPOLL_FREQ_DEFAULT 2000
108#define IOPOLL_FREQ_DEFAULT IFPOLL_FREQ_DEFAULT
109#define STPOLL_FREQ_DEFAULT 100
110
111#define IFPOLL_TXFRAC_DEFAULT 1
112#define IFPOLL_STFRAC_DEFAULT 20
113
114#define IFPOLL_RX 0x1
115#define IFPOLL_TX 0x2
116
117struct iopoll_rec {
118 struct lwkt_serialize *serializer;
119 struct ifnet *ifp;
120 void *arg;
121 ifpoll_iofn_t poll_func;
122};
123
124struct iopoll_ctx {
125#ifdef IFPOLL_MULTI_SYSTIMER
126 struct systimer pollclock;
127#endif
128
129 struct timeval prev_t; /* state */
130 uint32_t short_ticks; /* statistics */
131 uint32_t lost_polls; /* statistics */
132 uint32_t suspect; /* statistics */
133 uint32_t stalled; /* statistics */
134 uint32_t pending_polls; /* state */
135
136 struct netmsg poll_netmsg;
137
138 int poll_cpuid;
139#ifdef IFPOLL_MULTI_SYSTIMER
140 int pollhz; /* tunable */
141#else
142 int poll_type; /* IFPOLL_{RX,TX} */
143#endif
144 uint32_t phase; /* state */
145 int residual_burst; /* state */
146 uint32_t poll_each_burst; /* tunable */
147 struct timeval poll_start_t; /* state */
148
149 uint32_t poll_handlers; /* next free entry in pr[]. */
150 struct iopoll_rec pr[IFPOLL_LIST_LEN];
151
152 struct netmsg poll_more_netmsg;
153
154 uint32_t poll_burst; /* state */
155 uint32_t poll_burst_max; /* tunable */
156 uint32_t user_frac; /* tunable */
157
158 struct sysctl_ctx_list poll_sysctl_ctx;
159 struct sysctl_oid *poll_sysctl_tree;
160} __cachealign;
161
162struct stpoll_rec {
163 struct lwkt_serialize *serializer;
164 struct ifnet *ifp;
165 ifpoll_stfn_t status_func;
166};
167
168struct stpoll_ctx {
169#ifdef IFPOLL_MULTI_SYSTIMER
170 struct systimer pollclock;
171#endif
172
173 struct netmsg poll_netmsg;
174
175#ifdef IFPOLL_MULTI_SYSTIMER
176 int pollhz; /* tunable */
177#endif
178 uint32_t poll_handlers; /* next free entry in pr[]. */
179 struct stpoll_rec pr[IFPOLL_LIST_LEN];
180
181 struct sysctl_ctx_list poll_sysctl_ctx;
182 struct sysctl_oid *poll_sysctl_tree;
183};
184
185struct iopoll_sysctl_netmsg {
186 struct netmsg nmsg;
187 struct iopoll_ctx *ctx;
188};
189
190#ifndef IFPOLL_MULTI_SYSTIMER
191
192struct ifpoll_data {
193 struct systimer clock;
194 int txfrac_count;
195 int stfrac_count;
196 u_int tx_cpumask;
197 u_int rx_cpumask;
198} __cachealign;
199
200#endif
201
202static struct stpoll_ctx stpoll_context;
203static struct iopoll_ctx *rxpoll_context[IFPOLL_CTX_MAX];
204static struct iopoll_ctx *txpoll_context[IFPOLL_CTX_MAX];
205
206SYSCTL_NODE(_net, OID_AUTO, ifpoll, CTLFLAG_RW, 0,
207 "Network device polling parameters");
208
209static int ifpoll_ncpus = IFPOLL_CTX_MAX;
210
211static int iopoll_burst_max = IOPOLL_BURST_MAX;
212static int iopoll_each_burst = IOPOLL_EACH_BURST;
213
214TUNABLE_INT("net.ifpoll.burst_max", &iopoll_burst_max);
215TUNABLE_INT("net.ifpoll.each_burst", &iopoll_each_burst);
216
217#ifdef IFPOLL_MULTI_SYSTIMER
218
219static int stpoll_hz = STPOLL_FREQ_DEFAULT;
220static int iopoll_hz = IOPOLL_FREQ_DEFAULT;
221
222TUNABLE_INT("net.ifpoll.stpoll_hz", &stpoll_hz);
223TUNABLE_INT("net.ifpoll.iopoll_hz", &iopoll_hz);
224
225#else /* !IFPOLL_MULTI_SYSTIMER */
226
227static struct ifpoll_data ifpoll0;
228static int ifpoll_pollhz = IFPOLL_FREQ_DEFAULT;
229static int ifpoll_stfrac = IFPOLL_STFRAC_DEFAULT;
230static int ifpoll_txfrac = IFPOLL_TXFRAC_DEFAULT;
231static int ifpoll_handlers;
232
233TUNABLE_INT("net.ifpoll.pollhz", &ifpoll_pollhz);
234TUNABLE_INT("net.ifpoll.status_frac", &ifpoll_stfrac);
235TUNABLE_INT("net.ifpoll.tx_frac", &ifpoll_txfrac);
236
237static void sysctl_ifpollhz_handler(struct netmsg *);
238static int sysctl_ifpollhz(SYSCTL_HANDLER_ARGS);
239
240SYSCTL_PROC(_net_ifpoll, OID_AUTO, pollhz, CTLTYPE_INT | CTLFLAG_RW,
241 0, 0, sysctl_ifpollhz, "I", "Polling frequency");
242SYSCTL_INT(_net_ifpoll, OID_AUTO, tx_frac, CTLFLAG_RW,
243 &ifpoll_txfrac, 0, "Every this many cycles poll transmit");
244SYSCTL_INT(_net_ifpoll, OID_AUTO, st_frac, CTLFLAG_RW,
245 &ifpoll_stfrac, 0, "Every this many cycles poll status");
246
247#endif /* IFPOLL_MULTI_SYSTIMER */
248
249void ifpoll_init_pcpu(int);
250
251#ifndef IFPOLL_MULTI_SYSTIMER
252static void ifpoll_start_handler(struct netmsg *);
253static void ifpoll_stop_handler(struct netmsg *);
254static void ifpoll_handler_addevent(void);
255static void ifpoll_handler_delevent(void);
256static void ifpoll_ipi_handler(void *, int);
257static void ifpoll_systimer(systimer_t, struct intrframe *);
258#endif
259
260static void ifpoll_register_handler(struct netmsg *);
261static void ifpoll_deregister_handler(struct netmsg *);
262
263/*
264 * Status polling
265 */
266static void stpoll_init(void);
267static void stpoll_handler(struct netmsg *);
268static void stpoll_clock(struct stpoll_ctx *);
269#ifdef IFPOLL_MULTI_SYSTIMER
270static void stpoll_systimer(systimer_t, struct intrframe *);
271#endif
272static int stpoll_register(struct ifnet *, const struct ifpoll_status *);
273static int stpoll_deregister(struct ifnet *);
274
275#ifdef IFPOLL_MULTI_SYSTIMER
276static void sysctl_stpollhz_handler(struct netmsg *);
277static int sysctl_stpollhz(SYSCTL_HANDLER_ARGS);
278#endif
279
280/*
281 * RX/TX polling
282 */
283static struct iopoll_ctx *iopoll_ctx_create(int, int);
284static void iopoll_init(int);
285static void iopoll_handler(struct netmsg *);
286static void iopollmore_handler(struct netmsg *);
287static void iopoll_clock(struct iopoll_ctx *);
288#ifdef IFPOLL_MULTI_SYSTIMER
289static void iopoll_systimer(systimer_t, struct intrframe *);
290#endif
291static int iopoll_register(struct ifnet *, struct iopoll_ctx *,
292 const struct ifpoll_io *);
293static int iopoll_deregister(struct ifnet *, struct iopoll_ctx *);
294
295static void iopoll_add_sysctl(struct sysctl_ctx_list *,
296 struct sysctl_oid_list *, struct iopoll_ctx *);
297#ifdef IFPOLL_MULTI_SYSTIMER
298static void sysctl_iopollhz_handler(struct netmsg *);
299static int sysctl_iopollhz(SYSCTL_HANDLER_ARGS);
300#endif
301static void sysctl_burstmax_handler(struct netmsg *);
302static int sysctl_burstmax(SYSCTL_HANDLER_ARGS);
303static void sysctl_eachburst_handler(struct netmsg *);
304static int sysctl_eachburst(SYSCTL_HANDLER_ARGS);
305
306static void
307ifpoll_sendmsg_oncpu(struct netmsg *msg)
308{
309 if (msg->nm_lmsg.ms_flags & MSGF_DONE)
310 ifnet_sendmsg(&msg->nm_lmsg, mycpuid);
311}
312
313static void
314sched_stpoll(struct stpoll_ctx *st_ctx)
315{
316 crit_enter();
317 ifpoll_sendmsg_oncpu(&st_ctx->poll_netmsg);
318 crit_exit();
319}
320
321static void
322sched_iopoll(struct iopoll_ctx *io_ctx)
323{
324 crit_enter();
325 ifpoll_sendmsg_oncpu(&io_ctx->poll_netmsg);
326 crit_exit();
327}
328
329static void
330sched_iopollmore(struct iopoll_ctx *io_ctx)
331{
332 ifpoll_sendmsg_oncpu(&io_ctx->poll_more_netmsg);
333}
334
335/*
336 * Initialize per-cpu qpolling(4) context. Called from kern_clock.c:
337 */
338void
339ifpoll_init_pcpu(int cpuid)
340{
341 if (cpuid >= IFPOLL_CTX_MAX) {
342 return;
343 } else if (cpuid == 0) {
344 if (ifpoll_ncpus > ncpus)
345 ifpoll_ncpus = ncpus;
346 kprintf("ifpoll_ncpus %d\n", ifpoll_ncpus);
347
348#ifndef IFPOLL_MULTI_SYSTIMER
349 systimer_init_periodic_nq(&ifpoll0.clock,
350 ifpoll_systimer, NULL, 1);
351#endif
352
353 stpoll_init();
354 }
355 iopoll_init(cpuid);
356}
357
358#ifndef IFPOLL_MULTI_SYSTIMER
359
360static void
361ifpoll_ipi_handler(void *arg __unused, int poll)
362{
363 KKASSERT(mycpuid < ifpoll_ncpus);
364
365 if (poll & IFPOLL_TX)
366 iopoll_clock(txpoll_context[mycpuid]);
367 if (poll & IFPOLL_RX)
368 iopoll_clock(rxpoll_context[mycpuid]);
369}
370
371static void
372ifpoll_systimer(systimer_t info __unused, struct intrframe *frame __unused)
373{
374 uint32_t cpumask = 0;
375
376 KKASSERT(mycpuid == 0);
377
378 if (ifpoll0.stfrac_count-- == 0) {
379 ifpoll0.stfrac_count = ifpoll_stfrac;
380 stpoll_clock(&stpoll_context);
381 }
382
383 if (ifpoll0.txfrac_count-- == 0) {
384 ifpoll0.txfrac_count = ifpoll_txfrac;
385
386 /* TODO: We may try to piggyback TX on RX */
387 cpumask = smp_active_mask & ifpoll0.tx_cpumask;
388 if (cpumask != 0) {
389 lwkt_send_ipiq2_mask(cpumask, ifpoll_ipi_handler,
390 NULL, IFPOLL_TX);
391 }
392 }
393
394 cpumask = smp_active_mask & ifpoll0.rx_cpumask;
395 if (cpumask != 0) {
396 lwkt_send_ipiq2_mask(cpumask, ifpoll_ipi_handler,
397 NULL, IFPOLL_RX);
398 }
399}
400
401static void
402ifpoll_start_handler(struct netmsg *nmsg)
403{
404 KKASSERT(&curthread->td_msgport == ifnet_portfn(0));
405
406 kprintf("ifpoll: start\n");
407 systimer_adjust_periodic(&ifpoll0.clock, ifpoll_pollhz);
408 lwkt_replymsg(&nmsg->nm_lmsg, 0);
409}
410
411static void
412ifpoll_stop_handler(struct netmsg *nmsg)
413{
414 KKASSERT(&curthread->td_msgport == ifnet_portfn(0));
415
416 kprintf("ifpoll: stop\n");
417 systimer_adjust_periodic(&ifpoll0.clock, 1);
418 lwkt_replymsg(&nmsg->nm_lmsg, 0);
419}
420
421static void
422ifpoll_handler_addevent(void)
423{
424 if (atomic_fetchadd_int(&ifpoll_handlers, 1) == 0) {
425 struct netmsg *nmsg;
426
427 /* Start systimer */
428 nmsg = kmalloc(sizeof(*nmsg), M_LWKTMSG, M_WAITOK);
429 netmsg_init(nmsg, &netisr_afree_rport, 0, ifpoll_start_handler);
430 ifnet_sendmsg(&nmsg->nm_lmsg, 0);
431 }
432}
433
434static void
435ifpoll_handler_delevent(void)
436{
437 KKASSERT(ifpoll_handlers > 0);
438 if (atomic_fetchadd_int(&ifpoll_handlers, -1) == 1) {
439 struct netmsg *nmsg;
440
441 /* Stop systimer */
442 nmsg = kmalloc(sizeof(*nmsg), M_LWKTMSG, M_WAITOK);
443 netmsg_init(nmsg, &netisr_afree_rport, 0, ifpoll_stop_handler);
444 ifnet_sendmsg(&nmsg->nm_lmsg, 0);
445 }
446}
447
448static void
449sysctl_ifpollhz_handler(struct netmsg *nmsg)
450{
451 KKASSERT(&curthread->td_msgport == ifnet_portfn(0));
452
453 /*
454 * If there is no handler registered, don't adjust polling
455 * systimer frequency; polling systimer frequency will be
456 * adjusted once there is registered handler.
457 */
458 ifpoll_pollhz = nmsg->nm_lmsg.u.ms_result;
459 if (ifpoll_handlers)
460 systimer_adjust_periodic(&ifpoll0.clock, ifpoll_pollhz);
461
462 lwkt_replymsg(&nmsg->nm_lmsg, 0);
463}
464
465static int
466sysctl_ifpollhz(SYSCTL_HANDLER_ARGS)
467{
468 struct netmsg nmsg;
469 int error, phz;
470
471 phz = ifpoll_pollhz;
472 error = sysctl_handle_int(oidp, &phz, 0, req);
473 if (error || req->newptr == NULL)
474 return error;
475 if (phz <= 0)
476 return EINVAL;
477 else if (phz > IFPOLL_FREQ_MAX)
478 phz = IFPOLL_FREQ_MAX;
479
480 netmsg_init(&nmsg, &curthread->td_msgport, MSGF_MPSAFE,
481 sysctl_ifpollhz_handler);
482 nmsg.nm_lmsg.u.ms_result = phz;
483
484 return ifnet_domsg(&nmsg.nm_lmsg, 0);
485}
486
487#endif /* !IFPOLL_MULTI_SYSTIMER */
488
489int
490ifpoll_register(struct ifnet *ifp)
491{
492 struct ifpoll_info info;
493 struct netmsg nmsg;
494 int error;
495
496 if (ifp->if_qpoll == NULL) {
497 /* Device does not support polling */
498 return EOPNOTSUPP;
499 }
500
501 /*
502 * Attempt to register. Interlock with IFF_NPOLLING.
503 */
504
505 ifnet_serialize_all(ifp);
506
507 if (ifp->if_flags & IFF_NPOLLING) {
508 /* Already polling */
509 ifnet_deserialize_all(ifp);
510 return EBUSY;
511 }
512
513 bzero(&info, sizeof(info));
514 info.ifpi_ifp = ifp;
515
516 ifp->if_flags |= IFF_NPOLLING;
517 ifp->if_qpoll(ifp, &info);
518
519 ifnet_deserialize_all(ifp);
520
521 netmsg_init(&nmsg, &curthread->td_msgport, MSGF_MPSAFE,
522 ifpoll_register_handler);
523 nmsg.nm_lmsg.u.ms_resultp = &info;
524
525 error = ifnet_domsg(&nmsg.nm_lmsg, 0);
526 if (error) {
527 if (!ifpoll_deregister(ifp)) {
528 if_printf(ifp, "ifpoll_register: "
529 "ifpoll_deregister failed!\n");
530 }
531 }
532 return error;
533}
534
535int
536ifpoll_deregister(struct ifnet *ifp)
537{
538 struct netmsg nmsg;
539 int error;
540
541 if (ifp->if_qpoll == NULL)
542 return EOPNOTSUPP;
543
544 ifnet_serialize_all(ifp);
545
546 if ((ifp->if_flags & IFF_NPOLLING) == 0) {
547 ifnet_deserialize_all(ifp);
548 return EINVAL;
549 }
550 ifp->if_flags &= ~IFF_NPOLLING;
551
552 ifnet_deserialize_all(ifp);
553
554 netmsg_init(&nmsg, &curthread->td_msgport, MSGF_MPSAFE,
555 ifpoll_deregister_handler);
556 nmsg.nm_lmsg.u.ms_resultp = ifp;
557
558 error = ifnet_domsg(&nmsg.nm_lmsg, 0);
559 if (!error) {
560 ifnet_serialize_all(ifp);
561 ifp->if_qpoll(ifp, NULL);
562 ifnet_deserialize_all(ifp);
563 }
564 return error;
565}
566
567static void
568ifpoll_register_handler(struct netmsg *nmsg)
569{
570 const struct ifpoll_info *info = nmsg->nm_lmsg.u.ms_resultp;
571 int cpuid = mycpuid, nextcpu;
572 int error;
573
574 KKASSERT(cpuid < ifpoll_ncpus);
575 KKASSERT(&curthread->td_msgport == ifnet_portfn(cpuid));
576
577 if (cpuid == 0) {
578 error = stpoll_register(info->ifpi_ifp, &info->ifpi_status);
579 if (error)
580 goto failed;
581 }
582
583 error = iopoll_register(info->ifpi_ifp, rxpoll_context[cpuid],
584 &info->ifpi_rx[cpuid]);
585 if (error)
586 goto failed;
587
588 error = iopoll_register(info->ifpi_ifp, txpoll_context[cpuid],
589 &info->ifpi_tx[cpuid]);
590 if (error)
591 goto failed;
592
593 nextcpu = cpuid + 1;
594 if (nextcpu < ifpoll_ncpus)
595 ifnet_forwardmsg(&nmsg->nm_lmsg, nextcpu);
596 else
597 lwkt_replymsg(&nmsg->nm_lmsg, 0);
598 return;
599failed:
600 lwkt_replymsg(&nmsg->nm_lmsg, error);
601}
602
603static void
604ifpoll_deregister_handler(struct netmsg *nmsg)
605{
606 struct ifnet *ifp = nmsg->nm_lmsg.u.ms_resultp;
607 int cpuid = mycpuid, nextcpu;
608
609 KKASSERT(cpuid < ifpoll_ncpus);
610 KKASSERT(&curthread->td_msgport == ifnet_portfn(cpuid));
611
612 /* Ignore errors */
613 if (cpuid == 0)
614 stpoll_deregister(ifp);
615 iopoll_deregister(ifp, rxpoll_context[cpuid]);
616 iopoll_deregister(ifp, txpoll_context[cpuid]);
617
618 nextcpu = cpuid + 1;
619 if (nextcpu < ifpoll_ncpus)
620 ifnet_forwardmsg(&nmsg->nm_lmsg, nextcpu);
621 else
622 lwkt_replymsg(&nmsg->nm_lmsg, 0);
623}
624
625static void
626stpoll_init(void)
627{
628 struct stpoll_ctx *st_ctx = &stpoll_context;
629
630#ifdef IFPOLL_MULTI_SYSTIMER
631 st_ctx->pollhz = stpoll_hz;
632#endif
633
634 sysctl_ctx_init(&st_ctx->poll_sysctl_ctx);
635 st_ctx->poll_sysctl_tree = SYSCTL_ADD_NODE(&st_ctx->poll_sysctl_ctx,
636 SYSCTL_STATIC_CHILDREN(_net_ifpoll),
637 OID_AUTO, "status", CTLFLAG_RD, 0, "");
638
639#ifdef IFPOLL_MULTI_SYSTIMER
640 SYSCTL_ADD_PROC(&st_ctx->poll_sysctl_ctx,
641 SYSCTL_CHILDREN(st_ctx->poll_sysctl_tree),
642 OID_AUTO, "pollhz", CTLTYPE_INT | CTLFLAG_RW,
643 st_ctx, 0, sysctl_stpollhz, "I",
644 "Status polling frequency");
645#endif
646
647 SYSCTL_ADD_UINT(&st_ctx->poll_sysctl_ctx,
648 SYSCTL_CHILDREN(st_ctx->poll_sysctl_tree),
649 OID_AUTO, "handlers", CTLFLAG_RD,
650 &st_ctx->poll_handlers, 0,
651 "Number of registered status poll handlers");
652
653 netmsg_init(&st_ctx->poll_netmsg, &netisr_adone_rport, MSGF_MPSAFE,
654 stpoll_handler);
655
656#ifdef IFPOLL_MULTI_SYSTIMER
657 systimer_init_periodic_nq(&st_ctx->pollclock,
658 stpoll_systimer, st_ctx, 1);
659#endif
660}
661
662#ifdef IFPOLL_MULTI_SYSTIMER
663
664static void
665sysctl_stpollhz_handler(struct netmsg *msg)
666{
667 struct stpoll_ctx *st_ctx = &stpoll_context;
668
669 KKASSERT(&curthread->td_msgport == ifnet_portfn(0));
670
671 /*
672 * If there is no handler registered, don't adjust polling
673 * systimer frequency; polling systimer frequency will be
674 * adjusted once there is registered handler.
675 */
676 st_ctx->pollhz = msg->nm_lmsg.u.ms_result;
677 if (st_ctx->poll_handlers)
678 systimer_adjust_periodic(&st_ctx->pollclock, st_ctx->pollhz);
679
680 lwkt_replymsg(&msg->nm_lmsg, 0);
681}
682
683static int
684sysctl_stpollhz(SYSCTL_HANDLER_ARGS)
685{
686 struct stpoll_ctx *st_ctx = arg1;
687 struct netmsg msg;
688 int error, phz;
689
690 phz = st_ctx->pollhz;
691 error = sysctl_handle_int(oidp, &phz, 0, req);
692 if (error || req->newptr == NULL)
693 return error;
694 if (phz <= 0)
695 return EINVAL;
696 else if (phz > IFPOLL_FREQ_MAX)
697 phz = IFPOLL_FREQ_MAX;
698
699 netmsg_init(&msg, &curthread->td_msgport, MSGF_MPSAFE,
700 sysctl_stpollhz_handler);
701 msg.nm_lmsg.u.ms_result = phz;
702
703 return ifnet_domsg(&msg.nm_lmsg, 0);
704}
705
706#endif /* IFPOLL_MULTI_SYSTIMER */
707
708/*
709 * stpoll_handler is scheduled by sched_stpoll when appropriate, typically
710 * once per polling systimer tick.
711 */
712static void
713stpoll_handler(struct netmsg *msg)
714{
715 struct stpoll_ctx *st_ctx = &stpoll_context;
716 int i, poll_hz;
717
718 KKASSERT(&curthread->td_msgport == ifnet_portfn(0));
719
720 /* Reply ASAP */
721 crit_enter();
722 lwkt_replymsg(&msg->nm_lmsg, 0);
723 crit_exit();
724
725 if (st_ctx->poll_handlers == 0)
726 return;
727
728#ifdef IFPOLL_MULTI_SYSTIMER
729 poll_hz = st_ctx->pollhz;
730#else
731 poll_hz = ifpoll_pollhz / (ifpoll_stfrac + 1);
732#endif
733
734 for (i = 0; i < st_ctx->poll_handlers; ++i) {
735 const struct stpoll_rec *rec = &st_ctx->pr[i];
736 struct ifnet *ifp = rec->ifp;
737
738 if (!lwkt_serialize_try(rec->serializer))
739 continue;
740
741 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) ==
742 (IFF_RUNNING | IFF_NPOLLING)) {
743 crit_enter();
744 rec->status_func(ifp, poll_hz);
745 crit_exit();
746 }
747
748 lwkt_serialize_exit(rec->serializer);
749 }
750}
751
752/*
753 * Hook from status poll systimer. Tries to schedule an status poll.
754 */
755static void
756stpoll_clock(struct stpoll_ctx *st_ctx)
757{
758 KKASSERT(mycpuid == 0);
759
760 if (st_ctx->poll_handlers == 0)
761 return;
762 sched_stpoll(st_ctx);
763}
764
765#ifdef IFPOLL_MULTI_SYSTIMER
766static void
767stpoll_systimer(systimer_t info, struct intrframe *frame __unused)
768{
769 stpoll_clock(info->data);
770}
771#endif
772
773static int
774stpoll_register(struct ifnet *ifp, const struct ifpoll_status *st_rec)
775{
776 struct stpoll_ctx *st_ctx = &stpoll_context;
777 int error;
778
779 KKASSERT(&curthread->td_msgport == ifnet_portfn(0));
780
781 if (st_rec->status_func == NULL)
782 return 0;
783
784 /*
785 * Check if there is room.
786 */
787 if (st_ctx->poll_handlers >= IFPOLL_LIST_LEN) {
788 /*
789 * List full, cannot register more entries.
790 * This should never happen; if it does, it is probably a
791 * broken driver trying to register multiple times. Checking
792 * this at runtime is expensive, and won't solve the problem
793 * anyways, so just report a few times and then give up.
794 */
795 static int verbose = 10; /* XXX */
796
797 if (verbose > 0) {
798 kprintf("status poll handlers list full, "
799 "maybe a broken driver ?\n");
800 verbose--;
801 }
802 error = ENOENT;
803 } else {
804 struct stpoll_rec *rec = &st_ctx->pr[st_ctx->poll_handlers];
805
806 rec->ifp = ifp;
807 rec->serializer = st_rec->serializer;
808 rec->status_func = st_rec->status_func;
809
810 st_ctx->poll_handlers++;
811
812#ifdef IFPOLL_MULTI_SYSTIMER
813 if (st_ctx->poll_handlers == 1) {
814 systimer_adjust_periodic(&st_ctx->pollclock,
815 st_ctx->pollhz);
816 }
817#else
818 ifpoll_handler_addevent();
819#endif
820 error = 0;
821 }
822 return error;
823}
824
825static int
826stpoll_deregister(struct ifnet *ifp)
827{
828 struct stpoll_ctx *st_ctx = &stpoll_context;
829 int i, error;
830
831 KKASSERT(&curthread->td_msgport == ifnet_portfn(0));
832
833 for (i = 0; i < st_ctx->poll_handlers; ++i) {
834 if (st_ctx->pr[i].ifp == ifp) /* Found it */
835 break;
836 }
837 if (i == st_ctx->poll_handlers) {
838 kprintf("stpoll_deregister: ifp not found!!!\n");
839 error = ENOENT;
840 } else {
841 st_ctx->poll_handlers--;
842 if (i < st_ctx->poll_handlers) {
843 /* Last entry replaces this one. */
844 st_ctx->pr[i] = st_ctx->pr[st_ctx->poll_handlers];
845 }
846
847#ifdef IFPOLL_MULTI_SYSTIMER
848 if (st_ctx->poll_handlers == 0)
849 systimer_adjust_periodic(&st_ctx->pollclock, 1);
850#else
851 ifpoll_handler_delevent();
852#endif
853 error = 0;
854 }
855 return error;
856}
857
858#ifndef IFPOLL_MULTI_SYSTIMER
859static __inline int
860iopoll_hz(struct iopoll_ctx *io_ctx)
861{
862 int poll_hz;
863
864 poll_hz = ifpoll_pollhz;
865 if (io_ctx->poll_type == IFPOLL_TX)
866 poll_hz /= ifpoll_txfrac + 1;
867 return poll_hz;
868}
869#endif
870
871static __inline void
872iopoll_reset_state(struct iopoll_ctx *io_ctx)
873{
874 crit_enter();
875 io_ctx->poll_burst = 5;
876 io_ctx->pending_polls = 0;
877 io_ctx->residual_burst = 0;
878 io_ctx->phase = 0;
879 bzero(&io_ctx->poll_start_t, sizeof(io_ctx->poll_start_t));
880 bzero(&io_ctx->prev_t, sizeof(io_ctx->prev_t));
881 crit_exit();
882}
883
884static void
885iopoll_init(int cpuid)
886{
887 KKASSERT(cpuid < IFPOLL_CTX_MAX);
888
889 rxpoll_context[cpuid] = iopoll_ctx_create(cpuid, IFPOLL_RX);
890 txpoll_context[cpuid] = iopoll_ctx_create(cpuid, IFPOLL_TX);
891}
892
893static struct iopoll_ctx *
894iopoll_ctx_create(int cpuid, int poll_type)
895{
896 struct iopoll_ctx *io_ctx;
897 const char *poll_type_str;
898 char cpuid_str[16];
899
900 KKASSERT(poll_type == IFPOLL_RX || poll_type == IFPOLL_TX);
901
902 /*
903 * Make sure that tunables are in sane state
904 */
905 if (iopoll_burst_max < MIN_IOPOLL_BURST_MAX)
906 iopoll_burst_max = MIN_IOPOLL_BURST_MAX;
907 else if (iopoll_burst_max > MAX_IOPOLL_BURST_MAX)
908 iopoll_burst_max = MAX_IOPOLL_BURST_MAX;
909
910 if (iopoll_each_burst > iopoll_burst_max)
911 iopoll_each_burst = iopoll_burst_max;
912
913 /*
914 * Create the per-cpu polling context
915 */
916 io_ctx = kmalloc(sizeof(*io_ctx), M_DEVBUF, M_WAITOK | M_ZERO);
917
918 io_ctx->poll_each_burst = iopoll_each_burst;
919 io_ctx->poll_burst_max = iopoll_burst_max;
920 io_ctx->user_frac = 50;
921#ifdef IFPOLL_MULTI_SYSTIMER
922 io_ctx->pollhz = iopoll_hz;
923#else
924 io_ctx->poll_type = poll_type;
925#endif
926 io_ctx->poll_cpuid = cpuid;
927 iopoll_reset_state(io_ctx);
928
929 netmsg_init(&io_ctx->poll_netmsg, &netisr_adone_rport, MSGF_MPSAFE,
930 iopoll_handler);
931 io_ctx->poll_netmsg.nm_lmsg.u.ms_resultp = io_ctx;
932
933 netmsg_init(&io_ctx->poll_more_netmsg, &netisr_adone_rport, MSGF_MPSAFE,
934 iopollmore_handler);
935 io_ctx->poll_more_netmsg.nm_lmsg.u.ms_resultp = io_ctx;
936
937 /*
938 * Initialize per-cpu sysctl nodes
939 */
940 if (poll_type == IFPOLL_RX)
941 poll_type_str = "rx";
942 else
943 poll_type_str = "tx";
944 ksnprintf(cpuid_str, sizeof(cpuid_str), "%s%d",
945 poll_type_str, io_ctx->poll_cpuid);
946
947 sysctl_ctx_init(&io_ctx->poll_sysctl_ctx);
948 io_ctx->poll_sysctl_tree = SYSCTL_ADD_NODE(&io_ctx->poll_sysctl_ctx,
949 SYSCTL_STATIC_CHILDREN(_net_ifpoll),
950 OID_AUTO, cpuid_str, CTLFLAG_RD, 0, "");
951 iopoll_add_sysctl(&io_ctx->poll_sysctl_ctx,
952 SYSCTL_CHILDREN(io_ctx->poll_sysctl_tree), io_ctx);
953
954#ifdef IFPOLL_MULTI_SYSTIMER
955 /*
956 * Initialize systimer
957 */
958 systimer_init_periodic_nq(&io_ctx->pollclock,
959 iopoll_systimer, io_ctx, 1);
960#endif
961
962 return io_ctx;
963}
964
965/*
966 * Hook from iopoll systimer. Tries to schedule an iopoll, but keeps
967 * track of lost ticks due to the previous handler taking too long.
968 * Normally, this should not happen, because polling handler should
969 * run for a short time. However, in some cases (e.g. when there are
970 * changes in link status etc.) the drivers take a very long time
971 * (even in the order of milliseconds) to reset and reconfigure the
972 * device, causing apparent lost polls.
973 *
974 * The first part of the code is just for debugging purposes, and tries
975 * to count how often hardclock ticks are shorter than they should,
976 * meaning either stray interrupts or delayed events.
977 *
978 * WARNING! called from fastint or IPI, the MP lock might not be held.
979 */
980static void
981iopoll_clock(struct iopoll_ctx *io_ctx)
982{
983 struct timeval t;
984 int delta, poll_hz;
985
986 KKASSERT(mycpuid == io_ctx->poll_cpuid);
987
988 if (io_ctx->poll_handlers == 0)
989 return;
990
991#ifdef IFPOLL_MULTI_SYSTIMER
992 poll_hz = io_ctx->pollhz;
993#else
994 poll_hz = iopoll_hz(io_ctx);
995#endif
996
997 microuptime(&t);
998 delta = (t.tv_usec - io_ctx->prev_t.tv_usec) +
999 (t.tv_sec - io_ctx->prev_t.tv_sec) * 1000000;
1000 if (delta * poll_hz < 500000)
1001 io_ctx->short_ticks++;
1002 else
1003 io_ctx->prev_t = t;
1004
1005 if (io_ctx->pending_polls > 100) {
1006 /*
1007 * Too much, assume it has stalled (not always true
1008 * see comment above).
1009 */
1010 io_ctx->stalled++;
1011 io_ctx->pending_polls = 0;
1012 io_ctx->phase = 0;
1013 }
1014
1015 if (io_ctx->phase <= 2) {
1016 if (io_ctx->phase != 0)
1017 io_ctx->suspect++;
1018 io_ctx->phase = 1;
1019 sched_iopoll(io_ctx);
1020 io_ctx->phase = 2;
1021 }
1022 if (io_ctx->pending_polls++ > 0)
1023 io_ctx->lost_polls++;
1024}
1025
1026#ifdef IFPOLL_MULTI_SYSTIMER
1027static void
1028iopoll_systimer(systimer_t info, struct intrframe *frame __unused)
1029{
1030 iopoll_clock(info->data);
1031}
1032#endif
1033
1034/*
1035 * iopoll_handler is scheduled by sched_iopoll when appropriate, typically
1036 * once per polling systimer tick.
1037 *
1038 * Note that the message is replied immediately in order to allow a new
1039 * ISR to be scheduled in the handler.
1040 */
1041static void
1042iopoll_handler(struct netmsg *msg)
1043{
1044 struct iopoll_ctx *io_ctx;
1045 int i, cycles;
1046
1047 io_ctx = msg->nm_lmsg.u.ms_resultp;
1048 KKASSERT(&curthread->td_msgport == ifnet_portfn(io_ctx->poll_cpuid));
1049
1050 /* Reply ASAP */
1051 crit_enter();
1052 lwkt_replymsg(&msg->nm_lmsg, 0);
1053 crit_exit();
1054
1055 if (io_ctx->poll_handlers == 0)
1056 return;
1057
1058 io_ctx->phase = 3;
1059 if (io_ctx->residual_burst == 0) {
1060 /* First call in this tick */
1061 microuptime(&io_ctx->poll_start_t);
1062 io_ctx->residual_burst = io_ctx->poll_burst;
1063 }
1064 cycles = (io_ctx->residual_burst < io_ctx->poll_each_burst) ?
1065 io_ctx->residual_burst : io_ctx->poll_each_burst;
1066 io_ctx->residual_burst -= cycles;
1067
1068 for (i = 0; i < io_ctx->poll_handlers; i++) {
1069 const struct iopoll_rec *rec = &io_ctx->pr[i];
1070 struct ifnet *ifp = rec->ifp;
1071
1072 if (!lwkt_serialize_try(rec->serializer))
1073 continue;
1074
1075 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) ==
1076 (IFF_RUNNING | IFF_NPOLLING)) {
1077 crit_enter();
1078 rec->poll_func(ifp, rec->arg, cycles);
1079 crit_exit();
1080 }
1081
1082 lwkt_serialize_exit(rec->serializer);
1083 }
1084
1085 sched_iopollmore(io_ctx);
1086 io_ctx->phase = 4;
1087}
1088
1089/*
1090 * iopollmore_handler is called after other netisr's, possibly scheduling
1091 * another iopoll_handler call, or adapting the burst size for the next cycle.
1092 *
1093 * It is very bad to fetch large bursts of packets from a single card at once,
1094 * because the burst could take a long time to be completely processed leading
1095 * to unfairness. To reduce the problem, and also to account better for time
1096 * spent in network-related processing, we split the burst in smaller chunks
1097 * of fixed size, giving control to the other netisr's between chunks. This
1098 * helps in improving the fairness, reducing livelock and accounting for the
1099 * work performed in low level handling.
1100 */
1101static void
1102iopollmore_handler(struct netmsg *msg)
1103{
1104 struct iopoll_ctx *io_ctx;
1105 struct timeval t;
1106 int kern_load, poll_hz;
1107 uint32_t pending_polls;
1108
1109 io_ctx = msg->nm_lmsg.u.ms_resultp;
1110 KKASSERT(&curthread->td_msgport == ifnet_portfn(io_ctx->poll_cpuid));
1111
1112 /* Replay ASAP */
1113 lwkt_replymsg(&msg->nm_lmsg, 0);
1114
1115 if (io_ctx->poll_handlers == 0)
1116 return;
1117
1118#ifdef IFPOLL_MULTI_SYSTIMER
1119 poll_hz = io_ctx->pollhz;
1120#else
1121 poll_hz = iopoll_hz(io_ctx);
1122#endif
1123
1124 io_ctx->phase = 5;
1125 if (io_ctx->residual_burst > 0) {
1126 sched_iopoll(io_ctx);
1127 /* Will run immediately on return, followed by netisrs */
1128 return;
1129 }
1130
1131 /* Here we can account time spent in iopoll's in this tick */
1132 microuptime(&t);
1133 kern_load = (t.tv_usec - io_ctx->poll_start_t.tv_usec) +
1134 (t.tv_sec - io_ctx->poll_start_t.tv_sec) * 1000000; /* us */
1135 kern_load = (kern_load * poll_hz) / 10000; /* 0..100 */
1136 if (kern_load > (100 - io_ctx->user_frac)) {
1137 /* Try decrease ticks */
1138 if (io_ctx->poll_burst > 1)
1139 io_ctx->poll_burst--;
1140 } else {
1141 if (io_ctx->poll_burst < io_ctx->poll_burst_max)
1142 io_ctx->poll_burst++;
1143 }
1144
1145 crit_enter();
1146 io_ctx->pending_polls--;
1147 pending_polls = io_ctx->pending_polls;
1148 crit_exit();
1149
1150 if (pending_polls == 0) {
1151 /* We are done */
1152 io_ctx->phase = 0;
1153 } else {
1154 /*
1155 * Last cycle was long and caused us to miss one or more
1156 * hardclock ticks. Restart processing again, but slightly
1157 * reduce the burst size to prevent that this happens again.
1158 */
1159 io_ctx->poll_burst -= (io_ctx->poll_burst / 8);
1160 if (io_ctx->poll_burst < 1)
1161 io_ctx->poll_burst = 1;
1162 sched_iopoll(io_ctx);
1163 io_ctx->phase = 6;
1164 }
1165}
1166
1167static void
1168iopoll_add_sysctl(struct sysctl_ctx_list *ctx, struct sysctl_oid_list *parent,
1169 struct iopoll_ctx *io_ctx)
1170{
1171#ifdef IFPOLL_MULTI_SYSTIMER
1172 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "pollhz",
1173 CTLTYPE_INT | CTLFLAG_RW, io_ctx, 0, sysctl_iopollhz,
1174 "I", "Device polling frequency");
1175#endif
1176
1177 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "burst_max",
1178 CTLTYPE_UINT | CTLFLAG_RW, io_ctx, 0, sysctl_burstmax,
1179 "IU", "Max Polling burst size");
1180
1181 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "each_burst",
1182 CTLTYPE_UINT | CTLFLAG_RW, io_ctx, 0, sysctl_eachburst,
1183 "IU", "Max size of each burst");
1184
1185 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "phase", CTLFLAG_RD,
1186 &io_ctx->phase, 0, "Polling phase");
1187
1188 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "suspect", CTLFLAG_RW,
1189 &io_ctx->suspect, 0, "suspect event");
1190
1191 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "stalled", CTLFLAG_RW,
1192 &io_ctx->stalled, 0, "potential stalls");
1193
1194 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "burst", CTLFLAG_RD,
1195 &io_ctx->poll_burst, 0, "Current polling burst size");
1196
1197 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "user_frac", CTLFLAG_RW,
1198 &io_ctx->user_frac, 0,
1199 "Desired user fraction of cpu time");
1200
1201 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "short_ticks", CTLFLAG_RW,
1202 &io_ctx->short_ticks, 0,
1203 "Hardclock ticks shorter than they should be");
1204
1205 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "lost_polls", CTLFLAG_RW,
1206 &io_ctx->lost_polls, 0,
1207 "How many times we would have lost a poll tick");
1208
1209 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "pending_polls", CTLFLAG_RD,
1210 &io_ctx->pending_polls, 0, "Do we need to poll again");
1211
1212 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "residual_burst", CTLFLAG_RD,
1213 &io_ctx->residual_burst, 0,
1214 "# of residual cycles in burst");
1215
1216 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "handlers", CTLFLAG_RD,
1217 &io_ctx->poll_handlers, 0,
1218 "Number of registered poll handlers");
1219}
1220
1221#ifdef IFPOLL_MULTI_SYSTIMER
1222
1223static int
1224sysctl_iopollhz(SYSCTL_HANDLER_ARGS)
1225{
1226 struct iopoll_ctx *io_ctx = arg1;
1227 struct iopoll_sysctl_netmsg msg;
1228 struct netmsg *nmsg;
1229 int error, phz;
1230
1231 phz = io_ctx->pollhz;
1232 error = sysctl_handle_int(oidp, &phz, 0, req);
1233 if (error || req->newptr == NULL)
1234 return error;
1235 if (phz <= 0)
1236 return EINVAL;
1237 else if (phz > IFPOLL_FREQ_MAX)
1238 phz = IFPOLL_FREQ_MAX;
1239
1240 nmsg = &msg.nmsg;
1241 netmsg_init(nmsg, &curthread->td_msgport, MSGF_MPSAFE,
1242 sysctl_iopollhz_handler);
1243 nmsg->nm_lmsg.u.ms_result = phz;
1244 msg.ctx = io_ctx;
1245
1246 return ifnet_domsg(&nmsg->nm_lmsg, io_ctx->poll_cpuid);
1247}
1248
1249static void
1250sysctl_iopollhz_handler(struct netmsg *nmsg)
1251{
1252 struct iopoll_sysctl_netmsg *msg = (struct iopoll_sysctl_netmsg *)nmsg;
1253 struct iopoll_ctx *io_ctx;
1254
1255 io_ctx = msg->ctx;
1256 KKASSERT(&curthread->td_msgport == ifnet_portfn(io_ctx->poll_cpuid));
1257
1258 /*
1259 * If polling is disabled or there is no polling handler
1260 * registered, don't adjust polling systimer frequency.
1261 * Polling systimer frequency will be adjusted once there
1262 * are registered handlers.
1263 */
1264 io_ctx->pollhz = nmsg->nm_lmsg.u.ms_result;
1265 if (io_ctx->poll_handlers)
1266 systimer_adjust_periodic(&io_ctx->pollclock, io_ctx->pollhz);
1267
1268 lwkt_replymsg(&nmsg->nm_lmsg, 0);
1269}
1270
1271#endif /* IFPOLL_MULTI_SYSTIMER */
1272
1273static void
1274sysctl_burstmax_handler(struct netmsg *nmsg)
1275{
1276 struct iopoll_sysctl_netmsg *msg = (struct iopoll_sysctl_netmsg *)nmsg;
1277 struct iopoll_ctx *io_ctx;
1278
1279 io_ctx = msg->ctx;
1280 KKASSERT(&curthread->td_msgport == ifnet_portfn(io_ctx->poll_cpuid));
1281
1282 io_ctx->poll_burst_max = nmsg->nm_lmsg.u.ms_result;
1283 if (io_ctx->poll_each_burst > io_ctx->poll_burst_max)
1284 io_ctx->poll_each_burst = io_ctx->poll_burst_max;
1285 if (io_ctx->poll_burst > io_ctx->poll_burst_max)
1286 io_ctx->poll_burst = io_ctx->poll_burst_max;
1287 if (io_ctx->residual_burst > io_ctx->poll_burst_max)
1288 io_ctx->residual_burst = io_ctx->poll_burst_max;
1289
1290 lwkt_replymsg(&nmsg->nm_lmsg, 0);
1291}
1292
1293static int
1294sysctl_burstmax(SYSCTL_HANDLER_ARGS)
1295{
1296 struct iopoll_ctx *io_ctx = arg1;
1297 struct iopoll_sysctl_netmsg msg;
1298 struct netmsg *nmsg;
1299 uint32_t burst_max;
1300 int error;
1301
1302 burst_max = io_ctx->poll_burst_max;
1303 error = sysctl_handle_int(oidp, &burst_max, 0, req);
1304 if (error || req->newptr == NULL)
1305 return error;
1306 if (burst_max < MIN_IOPOLL_BURST_MAX)
1307 burst_max = MIN_IOPOLL_BURST_MAX;
1308 else if (burst_max > MAX_IOPOLL_BURST_MAX)
1309 burst_max = MAX_IOPOLL_BURST_MAX;
1310
1311 nmsg = &msg.nmsg;
1312 netmsg_init(nmsg, &curthread->td_msgport, MSGF_MPSAFE,
1313 sysctl_burstmax_handler);
1314 nmsg->nm_lmsg.u.ms_result = burst_max;
1315 msg.ctx = io_ctx;
1316
1317 return ifnet_domsg(&nmsg->nm_lmsg, io_ctx->poll_cpuid);
1318}
1319
1320static void
1321sysctl_eachburst_handler(struct netmsg *nmsg)
1322{
1323 struct iopoll_sysctl_netmsg *msg = (struct iopoll_sysctl_netmsg *)nmsg;
1324 struct iopoll_ctx *io_ctx;
1325 uint32_t each_burst;
1326
1327 io_ctx = msg->ctx;
1328 KKASSERT(&curthread->td_msgport == ifnet_portfn(io_ctx->poll_cpuid));
1329
1330 each_burst = nmsg->nm_lmsg.u.ms_result;
1331 if (each_burst > io_ctx->poll_burst_max)
1332 each_burst = io_ctx->poll_burst_max;
1333 else if (each_burst < 1)
1334 each_burst = 1;
1335 io_ctx->poll_each_burst = each_burst;
1336
1337 lwkt_replymsg(&nmsg->nm_lmsg, 0);
1338}
1339
1340static int
1341sysctl_eachburst(SYSCTL_HANDLER_ARGS)
1342{
1343 struct iopoll_ctx *io_ctx = arg1;
1344 struct iopoll_sysctl_netmsg msg;
1345 struct netmsg *nmsg;
1346 uint32_t each_burst;
1347 int error;
1348
1349 each_burst = io_ctx->poll_each_burst;
1350 error = sysctl_handle_int(oidp, &each_burst, 0, req);
1351 if (error || req->newptr == NULL)
1352 return error;
1353
1354 nmsg = &msg.nmsg;
1355 netmsg_init(nmsg, &curthread->td_msgport, MSGF_MPSAFE,
1356 sysctl_eachburst_handler);
1357 nmsg->nm_lmsg.u.ms_result = each_burst;
1358 msg.ctx = io_ctx;
1359
1360 return ifnet_domsg(&nmsg->nm_lmsg, io_ctx->poll_cpuid);
1361}
1362
1363static int
1364iopoll_register(struct ifnet *ifp, struct iopoll_ctx *io_ctx,
1365 const struct ifpoll_io *io_rec)
1366{
1367 int error;
1368
1369 KKASSERT(&curthread->td_msgport == ifnet_portfn(io_ctx->poll_cpuid));
1370
1371 if (io_rec->poll_func == NULL)
1372 return 0;
1373
1374 /*
1375 * Check if there is room.
1376 */
1377 if (io_ctx->poll_handlers >= IFPOLL_LIST_LEN) {
1378 /*
1379 * List full, cannot register more entries.
1380 * This should never happen; if it does, it is probably a
1381 * broken driver trying to register multiple times. Checking
1382 * this at runtime is expensive, and won't solve the problem
1383 * anyways, so just report a few times and then give up.
1384 */
1385 static int verbose = 10; /* XXX */
1386 if (verbose > 0) {
1387 kprintf("io poll handlers list full, "
1388 "maybe a broken driver ?\n");
1389 verbose--;
1390 }
1391 error = ENOENT;
1392 } else {
1393 struct iopoll_rec *rec = &io_ctx->pr[io_ctx->poll_handlers];
1394
1395 rec->ifp = ifp;
1396 rec->serializer = io_rec->serializer;
1397 rec->arg = io_rec->arg;
1398 rec->poll_func = io_rec->poll_func;
1399
1400 io_ctx->poll_handlers++;
1401 if (io_ctx->poll_handlers == 1) {
1402#ifdef IFPOLL_MULTI_SYSTIMER
1403 systimer_adjust_periodic(&io_ctx->pollclock,
1404 io_ctx->pollhz);
1405#else
1406 u_int *mask;
1407
1408 if (io_ctx->poll_type == IFPOLL_RX)
1409 mask = &ifpoll0.rx_cpumask;
1410 else
1411 mask = &ifpoll0.tx_cpumask;
1412 KKASSERT((*mask & mycpu->gd_cpumask) == 0);
1413 atomic_set_int(mask, mycpu->gd_cpumask);
1414#endif
1415 }
1416#ifndef IFPOLL_MULTI_SYSTIMER
1417 ifpoll_handler_addevent();
1418#endif
1419 error = 0;
1420 }
1421 return error;
1422}
1423
1424static int
1425iopoll_deregister(struct ifnet *ifp, struct iopoll_ctx *io_ctx)
1426{
1427 int i, error;
1428
1429 KKASSERT(&curthread->td_msgport == ifnet_portfn(io_ctx->poll_cpuid));
1430
1431 for (i = 0; i < io_ctx->poll_handlers; ++i) {
1432 if (io_ctx->pr[i].ifp == ifp) /* Found it */
1433 break;
1434 }
1435 if (i == io_ctx->poll_handlers) {
1436 error = ENOENT;
1437 } else {
1438 io_ctx->poll_handlers--;
1439 if (i < io_ctx->poll_handlers) {
1440 /* Last entry replaces this one. */
1441 io_ctx->pr[i] = io_ctx->pr[io_ctx->poll_handlers];
1442 }
1443
1444 if (io_ctx->poll_handlers == 0) {
1445#ifdef IFPOLL_MULTI_SYSTIMER
1446 systimer_adjust_periodic(&io_ctx->pollclock, 1);
1447#else
1448 u_int *mask;
1449
1450 if (io_ctx->poll_type == IFPOLL_RX)
1451 mask = &ifpoll0.rx_cpumask;
1452 else
1453 mask = &ifpoll0.tx_cpumask;
1454 KKASSERT(*mask & mycpu->gd_cpumask);
1455 atomic_clear_int(mask, mycpu->gd_cpumask);
1456#endif
1457 iopoll_reset_state(io_ctx);
1458 }
1459#ifndef IFPOLL_MULTI_SYSTIMER
1460 ifpoll_handler_delevent();
1461#endif
1462 error = 0;
1463 }
1464 return error;
1465}