top - Fix -t / -S
[dragonfly.git] / sys / net / if_poll.c
CommitLineData
b3a7093f
SZ
1/*-
2 * Copyright (c) 2001-2002 Luigi Rizzo
3 *
4 * Supported by: the Xorp Project (www.xorp.org)
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $FreeBSD: src/sys/kern/kern_poll.c,v 1.2.2.4 2002/06/27 23:26:33 luigi Exp $
28 */
29
30#include "opt_ifpoll.h"
31
32#include <sys/param.h>
33#include <sys/kernel.h>
34#include <sys/ktr.h>
35#include <sys/malloc.h>
36#include <sys/serialize.h>
37#include <sys/socket.h>
38#include <sys/sysctl.h>
39
40#include <sys/thread2.h>
41#include <sys/msgport2.h>
42
43#include <machine/atomic.h>
3aa902eb 44#include <machine/clock.h>
b3a7093f
SZ
45#include <machine/smp.h>
46
47#include <net/if.h>
48#include <net/if_poll.h>
49#include <net/netmsg2.h>
50
51/*
52 * Polling support for network device drivers.
53 *
54 * Drivers which support this feature try to register one status polling
55 * handler and several TX/RX polling handlers with the polling code.
f994de37 56 * If interface's if_npoll is called with non-NULL second argument, then
b3a7093f
SZ
57 * a register operation is requested, else a deregister operation is
58 * requested. If the requested operation is "register", driver should
59 * setup the ifpoll_info passed in accoding its own needs:
60 * ifpoll_info.ifpi_status.status_func == NULL
61 * No status polling handler will be installed on CPU(0)
62 * ifpoll_info.ifpi_rx[n].poll_func == NULL
63 * No RX polling handler will be installed on CPU(n)
64 * ifpoll_info.ifpi_tx[n].poll_func == NULL
65 * No TX polling handler will be installed on CPU(n)
66 *
968f17f7 67 * RX is polled at the specified polling frequency (net.ifpoll.X.pollhz).
26673976
SZ
68 * TX and status polling could be done at lower frequency than RX frequency
69 * (net.ifpoll.0.status_frac and net.ifpoll.X.tx_frac). To avoid systimer
70 * staggering at high frequency, RX systimer gives TX and status polling a
71 * piggyback (XXX).
968f17f7 72 *
b3a7093f
SZ
73 * All of the registered polling handlers are called only if the interface
74 * is marked as 'IFF_RUNNING and IFF_NPOLLING'. However, the interface's
f994de37 75 * register and deregister function (ifnet.if_npoll) will be called even
b3a7093f
SZ
76 * if interface is not marked with 'IFF_RUNNING'.
77 *
78 * If registration is successful, the driver must disable interrupts,
79 * and further I/O is performed through the TX/RX polling handler, which
80 * are invoked (at least once per clock tick) with 3 arguments: the "arg"
81 * passed at register time, a struct ifnet pointer, and a "count" limit.
82 * The registered serializer will be held before calling the related
83 * polling handler.
84 *
85 * The count limit specifies how much work the handler can do during the
86 * call -- typically this is the number of packets to be received, or
87 * transmitted, etc. (drivers are free to interpret this number, as long
88 * as the max time spent in the function grows roughly linearly with the
89 * count).
90 *
91 * A second variable controls the sharing of CPU between polling/kernel
92 * network processing, and other activities (typically userlevel tasks):
63e6ef20 93 * net.ifpoll.X.{rx,tx}.user_frac (between 0 and 100, default 50) sets the
b3a7093f
SZ
94 * share of CPU allocated to user tasks. CPU is allocated proportionally
95 * to the shares, by dynamically adjusting the "count" (poll_burst).
96 *
97 * Other parameters can should be left to their default values.
98 * The following constraints hold
99 *
100 * 1 <= poll_burst <= poll_burst_max
101 * 1 <= poll_each_burst <= poll_burst_max
102 * MIN_POLL_BURST_MAX <= poll_burst_max <= MAX_POLL_BURST_MAX
103 */
104
105#define IFPOLL_LIST_LEN 128
106#define IFPOLL_FREQ_MAX 30000
107
108#define MIN_IOPOLL_BURST_MAX 10
109#define MAX_IOPOLL_BURST_MAX 1000
110#define IOPOLL_BURST_MAX 150 /* good for 100Mbit net and HZ=1000 */
111
112#define IOPOLL_EACH_BURST 5
113
114#define IFPOLL_FREQ_DEFAULT 2000
b3a7093f 115
26673976
SZ
116#define IFPOLL_TXFRAC_DEFAULT 1 /* 1/2 of the pollhz */
117#define IFPOLL_STFRAC_DEFAULT 19 /* 1/20 of the pollhz */
b3a7093f
SZ
118
119#define IFPOLL_RX 0x1
120#define IFPOLL_TX 0x2
121
3aa902eb
SZ
122union ifpoll_time {
123 struct timeval tv;
124 uint64_t tsc;
125};
126
b3a7093f
SZ
127struct iopoll_rec {
128 struct lwkt_serialize *serializer;
129 struct ifnet *ifp;
130 void *arg;
131 ifpoll_iofn_t poll_func;
132};
133
134struct iopoll_ctx {
3aa902eb 135 union ifpoll_time prev_t;
b3a7093f
SZ
136 uint32_t short_ticks; /* statistics */
137 uint32_t lost_polls; /* statistics */
138 uint32_t suspect; /* statistics */
139 uint32_t stalled; /* statistics */
140 uint32_t pending_polls; /* state */
141
002c1265 142 struct netmsg_base poll_netmsg;
b3a7093f
SZ
143
144 int poll_cpuid;
968f17f7 145 int pollhz;
b3a7093f
SZ
146 uint32_t phase; /* state */
147 int residual_burst; /* state */
148 uint32_t poll_each_burst; /* tunable */
3aa902eb 149 union ifpoll_time poll_start_t; /* state */
b3a7093f
SZ
150
151 uint32_t poll_handlers; /* next free entry in pr[]. */
152 struct iopoll_rec pr[IFPOLL_LIST_LEN];
153
002c1265 154 struct netmsg_base poll_more_netmsg;
b3a7093f
SZ
155
156 uint32_t poll_burst; /* state */
157 uint32_t poll_burst_max; /* tunable */
158 uint32_t user_frac; /* tunable */
112d5942 159 uint32_t kern_frac; /* state */
b3a7093f
SZ
160
161 struct sysctl_ctx_list poll_sysctl_ctx;
162 struct sysctl_oid *poll_sysctl_tree;
163} __cachealign;
164
968f17f7
SZ
165struct poll_comm {
166 struct systimer pollclock;
167 int poll_cpuid;
168
169 int stfrac_count; /* state */
170 int poll_stfrac; /* tunable */
171
172 int txfrac_count; /* state */
173 int poll_txfrac; /* tunable */
174
175 int pollhz; /* tunable */
176
a7519a15
SZ
177 struct sysctl_ctx_list sysctl_ctx;
178 struct sysctl_oid *sysctl_tree;
179} __cachealign;
180
b3a7093f
SZ
181struct stpoll_rec {
182 struct lwkt_serialize *serializer;
183 struct ifnet *ifp;
184 ifpoll_stfn_t status_func;
185};
186
187struct stpoll_ctx {
002c1265 188 struct netmsg_base poll_netmsg;
b3a7093f 189
968f17f7
SZ
190 int pollhz;
191
b3a7093f
SZ
192 uint32_t poll_handlers; /* next free entry in pr[]. */
193 struct stpoll_rec pr[IFPOLL_LIST_LEN];
194
195 struct sysctl_ctx_list poll_sysctl_ctx;
196 struct sysctl_oid *poll_sysctl_tree;
197};
198
199struct iopoll_sysctl_netmsg {
002c1265 200 struct netmsg_base base;
b3a7093f
SZ
201 struct iopoll_ctx *ctx;
202};
203
b3a7093f 204void ifpoll_init_pcpu(int);
002c1265
MD
205static void ifpoll_register_handler(netmsg_t);
206static void ifpoll_deregister_handler(netmsg_t);
b3a7093f
SZ
207
208/*
209 * Status polling
210 */
211static void stpoll_init(void);
002c1265 212static void stpoll_handler(netmsg_t);
b3a7093f 213static void stpoll_clock(struct stpoll_ctx *);
b3a7093f
SZ
214static int stpoll_register(struct ifnet *, const struct ifpoll_status *);
215static int stpoll_deregister(struct ifnet *);
216
b3a7093f
SZ
217/*
218 * RX/TX polling
219 */
220static struct iopoll_ctx *iopoll_ctx_create(int, int);
221static void iopoll_init(int);
002c1265
MD
222static void iopoll_handler(netmsg_t);
223static void iopollmore_handler(netmsg_t);
b3a7093f 224static void iopoll_clock(struct iopoll_ctx *);
b3a7093f
SZ
225static int iopoll_register(struct ifnet *, struct iopoll_ctx *,
226 const struct ifpoll_io *);
227static int iopoll_deregister(struct ifnet *, struct iopoll_ctx *);
228
229static void iopoll_add_sysctl(struct sysctl_ctx_list *,
230 struct sysctl_oid_list *, struct iopoll_ctx *);
002c1265 231static void sysctl_burstmax_handler(netmsg_t);
b3a7093f 232static int sysctl_burstmax(SYSCTL_HANDLER_ARGS);
002c1265 233static void sysctl_eachburst_handler(netmsg_t);
b3a7093f
SZ
234static int sysctl_eachburst(SYSCTL_HANDLER_ARGS);
235
968f17f7
SZ
236/*
237 * Common functions
238 */
239static void poll_comm_init(int);
240static void poll_comm_start(int);
241static void poll_comm_adjust_pollhz(struct poll_comm *);
96d52ac8
SZ
242static void poll_comm_systimer0(systimer_t, int, struct intrframe *);
243static void poll_comm_systimer(systimer_t, int, struct intrframe *);
002c1265
MD
244static void sysctl_pollhz_handler(netmsg_t);
245static void sysctl_stfrac_handler(netmsg_t);
246static void sysctl_txfrac_handler(netmsg_t);
968f17f7 247static int sysctl_pollhz(SYSCTL_HANDLER_ARGS);
26673976
SZ
248static int sysctl_stfrac(SYSCTL_HANDLER_ARGS);
249static int sysctl_txfrac(SYSCTL_HANDLER_ARGS);
968f17f7
SZ
250
251static struct stpoll_ctx stpoll_context;
9826be42
SZ
252static struct poll_comm *poll_common[MAXCPU];
253static struct iopoll_ctx *rxpoll_context[MAXCPU];
254static struct iopoll_ctx *txpoll_context[MAXCPU];
968f17f7
SZ
255
256SYSCTL_NODE(_net, OID_AUTO, ifpoll, CTLFLAG_RW, 0,
257 "Network device polling parameters");
258
968f17f7
SZ
259static int iopoll_burst_max = IOPOLL_BURST_MAX;
260static int iopoll_each_burst = IOPOLL_EACH_BURST;
261
262static int ifpoll_pollhz = IFPOLL_FREQ_DEFAULT;
263static int ifpoll_stfrac = IFPOLL_STFRAC_DEFAULT;
264static int ifpoll_txfrac = IFPOLL_TXFRAC_DEFAULT;
265
266TUNABLE_INT("net.ifpoll.burst_max", &iopoll_burst_max);
267TUNABLE_INT("net.ifpoll.each_burst", &iopoll_each_burst);
268TUNABLE_INT("net.ifpoll.pollhz", &ifpoll_pollhz);
269TUNABLE_INT("net.ifpoll.status_frac", &ifpoll_stfrac);
270TUNABLE_INT("net.ifpoll.tx_frac", &ifpoll_txfrac);
271
a7254d9c 272static __inline void
002c1265 273ifpoll_sendmsg_oncpu(netmsg_t msg)
b3a7093f 274{
002c1265 275 if (msg->lmsg.ms_flags & MSGF_DONE)
3abced87 276 lwkt_sendmsg(netisr_portfn(mycpuid), &msg->lmsg);
b3a7093f
SZ
277}
278
a7254d9c 279static __inline void
b3a7093f
SZ
280sched_stpoll(struct stpoll_ctx *st_ctx)
281{
002c1265 282 ifpoll_sendmsg_oncpu((netmsg_t)&st_ctx->poll_netmsg);
b3a7093f
SZ
283}
284
a7254d9c 285static __inline void
b3a7093f
SZ
286sched_iopoll(struct iopoll_ctx *io_ctx)
287{
002c1265 288 ifpoll_sendmsg_oncpu((netmsg_t)&io_ctx->poll_netmsg);
b3a7093f
SZ
289}
290
a7254d9c 291static __inline void
b3a7093f
SZ
292sched_iopollmore(struct iopoll_ctx *io_ctx)
293{
002c1265 294 ifpoll_sendmsg_oncpu((netmsg_t)&io_ctx->poll_more_netmsg);
b3a7093f
SZ
295}
296
3aa902eb
SZ
297static __inline void
298ifpoll_time_get(union ifpoll_time *t)
299{
300 if (tsc_present)
301 t->tsc = rdtsc();
302 else
303 microuptime(&t->tv);
304}
305
306/* Return time diff in us */
307static __inline int
308ifpoll_time_diff(const union ifpoll_time *s, const union ifpoll_time *e)
309{
310 if (tsc_present) {
311 return (((e->tsc - s->tsc) * 1000000) / tsc_frequency);
312 } else {
313 return ((e->tv.tv_usec - s->tv.tv_usec) +
314 (e->tv.tv_sec - s->tv.tv_sec) * 1000000);
315 }
316}
317
b3a7093f
SZ
318/*
319 * Initialize per-cpu qpolling(4) context. Called from kern_clock.c:
320 */
321void
322ifpoll_init_pcpu(int cpuid)
323{
9826be42 324 if (cpuid >= ncpus2)
b3a7093f 325 return;
968f17f7 326
968f17f7 327 poll_comm_init(cpuid);
b3a7093f 328
968f17f7 329 if (cpuid == 0)
b3a7093f 330 stpoll_init();
b3a7093f 331 iopoll_init(cpuid);
b3a7093f 332
968f17f7 333 poll_comm_start(cpuid);
b3a7093f 334}
b3a7093f
SZ
335
336int
337ifpoll_register(struct ifnet *ifp)
338{
9826be42 339 struct ifpoll_info *info;
002c1265 340 struct netmsg_base nmsg;
b3a7093f
SZ
341 int error;
342
f994de37 343 if (ifp->if_npoll == NULL) {
b3a7093f
SZ
344 /* Device does not support polling */
345 return EOPNOTSUPP;
346 }
347
9826be42
SZ
348 info = kmalloc(sizeof(*info), M_TEMP, M_WAITOK | M_ZERO);
349
b3a7093f
SZ
350 /*
351 * Attempt to register. Interlock with IFF_NPOLLING.
352 */
353
354 ifnet_serialize_all(ifp);
355
356 if (ifp->if_flags & IFF_NPOLLING) {
357 /* Already polling */
358 ifnet_deserialize_all(ifp);
9826be42 359 kfree(info, M_TEMP);
b3a7093f
SZ
360 return EBUSY;
361 }
362
9826be42 363 info->ifpi_ifp = ifp;
b3a7093f
SZ
364
365 ifp->if_flags |= IFF_NPOLLING;
f994de37 366 ifp->if_npoll(ifp, info);
f7be129c 367 KASSERT(ifp->if_npoll_cpuid >= 0, ("invalid npoll cpuid"));
b3a7093f
SZ
368
369 ifnet_deserialize_all(ifp);
370
48e7b118 371 netmsg_init(&nmsg, NULL, &curthread->td_msgport,
c3c96e44 372 0, ifpoll_register_handler);
9826be42 373 nmsg.lmsg.u.ms_resultp = info;
b3a7093f 374
3abced87 375 error = lwkt_domsg(netisr_portfn(0), &nmsg.lmsg, 0);
b3a7093f
SZ
376 if (error) {
377 if (!ifpoll_deregister(ifp)) {
378 if_printf(ifp, "ifpoll_register: "
379 "ifpoll_deregister failed!\n");
380 }
381 }
9826be42
SZ
382
383 kfree(info, M_TEMP);
b3a7093f
SZ
384 return error;
385}
386
387int
388ifpoll_deregister(struct ifnet *ifp)
389{
002c1265 390 struct netmsg_base nmsg;
b3a7093f
SZ
391 int error;
392
f994de37 393 if (ifp->if_npoll == NULL)
b3a7093f
SZ
394 return EOPNOTSUPP;
395
396 ifnet_serialize_all(ifp);
397
398 if ((ifp->if_flags & IFF_NPOLLING) == 0) {
399 ifnet_deserialize_all(ifp);
400 return EINVAL;
401 }
402 ifp->if_flags &= ~IFF_NPOLLING;
403
404 ifnet_deserialize_all(ifp);
405
48e7b118 406 netmsg_init(&nmsg, NULL, &curthread->td_msgport,
c3c96e44 407 0, ifpoll_deregister_handler);
002c1265 408 nmsg.lmsg.u.ms_resultp = ifp;
b3a7093f 409
3abced87 410 error = lwkt_domsg(netisr_portfn(0), &nmsg.lmsg, 0);
b3a7093f
SZ
411 if (!error) {
412 ifnet_serialize_all(ifp);
f994de37 413 ifp->if_npoll(ifp, NULL);
f7be129c 414 KASSERT(ifp->if_npoll_cpuid < 0, ("invalid npoll cpuid"));
b3a7093f
SZ
415 ifnet_deserialize_all(ifp);
416 }
417 return error;
418}
419
420static void
002c1265 421ifpoll_register_handler(netmsg_t nmsg)
b3a7093f 422{
002c1265 423 const struct ifpoll_info *info = nmsg->lmsg.u.ms_resultp;
b3a7093f
SZ
424 int cpuid = mycpuid, nextcpu;
425 int error;
426
9826be42 427 KKASSERT(cpuid < ncpus2);
3abced87 428 KKASSERT(&curthread->td_msgport == netisr_portfn(cpuid));
b3a7093f
SZ
429
430 if (cpuid == 0) {
431 error = stpoll_register(info->ifpi_ifp, &info->ifpi_status);
432 if (error)
433 goto failed;
434 }
435
436 error = iopoll_register(info->ifpi_ifp, rxpoll_context[cpuid],
437 &info->ifpi_rx[cpuid]);
438 if (error)
439 goto failed;
440
441 error = iopoll_register(info->ifpi_ifp, txpoll_context[cpuid],
442 &info->ifpi_tx[cpuid]);
443 if (error)
444 goto failed;
445
968f17f7
SZ
446 /* Adjust polling frequency, after all registration is done */
447 poll_comm_adjust_pollhz(poll_common[cpuid]);
448
b3a7093f 449 nextcpu = cpuid + 1;
9826be42 450 if (nextcpu < ncpus2)
3abced87 451 lwkt_forwardmsg(netisr_portfn(nextcpu), &nmsg->lmsg);
b3a7093f 452 else
002c1265 453 lwkt_replymsg(&nmsg->lmsg, 0);
b3a7093f
SZ
454 return;
455failed:
002c1265 456 lwkt_replymsg(&nmsg->lmsg, error);
b3a7093f
SZ
457}
458
459static void
002c1265 460ifpoll_deregister_handler(netmsg_t nmsg)
b3a7093f 461{
002c1265 462 struct ifnet *ifp = nmsg->lmsg.u.ms_resultp;
b3a7093f
SZ
463 int cpuid = mycpuid, nextcpu;
464
9826be42 465 KKASSERT(cpuid < ncpus2);
3abced87 466 KKASSERT(&curthread->td_msgport == netisr_portfn(cpuid));
b3a7093f
SZ
467
468 /* Ignore errors */
469 if (cpuid == 0)
470 stpoll_deregister(ifp);
471 iopoll_deregister(ifp, rxpoll_context[cpuid]);
472 iopoll_deregister(ifp, txpoll_context[cpuid]);
473
968f17f7
SZ
474 /* Adjust polling frequency, after all deregistration is done */
475 poll_comm_adjust_pollhz(poll_common[cpuid]);
476
b3a7093f 477 nextcpu = cpuid + 1;
9826be42 478 if (nextcpu < ncpus2)
3abced87 479 lwkt_forwardmsg(netisr_portfn(nextcpu), &nmsg->lmsg);
b3a7093f 480 else
002c1265 481 lwkt_replymsg(&nmsg->lmsg, 0);
b3a7093f
SZ
482}
483
484static void
485stpoll_init(void)
486{
487 struct stpoll_ctx *st_ctx = &stpoll_context;
968f17f7 488 const struct poll_comm *comm = poll_common[0];
b3a7093f 489
968f17f7 490 st_ctx->pollhz = comm->pollhz / (comm->poll_stfrac + 1);
b3a7093f
SZ
491
492 sysctl_ctx_init(&st_ctx->poll_sysctl_ctx);
493 st_ctx->poll_sysctl_tree = SYSCTL_ADD_NODE(&st_ctx->poll_sysctl_ctx,
766f5d44 494 SYSCTL_CHILDREN(comm->sysctl_tree),
b3a7093f
SZ
495 OID_AUTO, "status", CTLFLAG_RD, 0, "");
496
b3a7093f
SZ
497 SYSCTL_ADD_UINT(&st_ctx->poll_sysctl_ctx,
498 SYSCTL_CHILDREN(st_ctx->poll_sysctl_tree),
499 OID_AUTO, "handlers", CTLFLAG_RD,
500 &st_ctx->poll_handlers, 0,
501 "Number of registered status poll handlers");
502
48e7b118 503 netmsg_init(&st_ctx->poll_netmsg, NULL, &netisr_adone_rport,
c3c96e44 504 0, stpoll_handler);
b3a7093f
SZ
505}
506
b3a7093f
SZ
507/*
508 * stpoll_handler is scheduled by sched_stpoll when appropriate, typically
509 * once per polling systimer tick.
510 */
511static void
002c1265 512stpoll_handler(netmsg_t msg)
b3a7093f
SZ
513{
514 struct stpoll_ctx *st_ctx = &stpoll_context;
ec7f6da8 515 struct thread *td = curthread;
968f17f7 516 int i;
b3a7093f 517
3abced87 518 KKASSERT(&td->td_msgport == netisr_portfn(0));
ec7f6da8
SZ
519
520 crit_enter_quick(td);
b3a7093f
SZ
521
522 /* Reply ASAP */
002c1265 523 lwkt_replymsg(&msg->lmsg, 0);
b3a7093f 524
ec7f6da8
SZ
525 if (st_ctx->poll_handlers == 0) {
526 crit_exit_quick(td);
b3a7093f 527 return;
ec7f6da8 528 }
b3a7093f 529
b3a7093f
SZ
530 for (i = 0; i < st_ctx->poll_handlers; ++i) {
531 const struct stpoll_rec *rec = &st_ctx->pr[i];
532 struct ifnet *ifp = rec->ifp;
533
534 if (!lwkt_serialize_try(rec->serializer))
535 continue;
536
537 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) ==
ec7f6da8 538 (IFF_RUNNING | IFF_NPOLLING))
968f17f7 539 rec->status_func(ifp, st_ctx->pollhz);
b3a7093f
SZ
540
541 lwkt_serialize_exit(rec->serializer);
542 }
ec7f6da8
SZ
543
544 crit_exit_quick(td);
b3a7093f
SZ
545}
546
547/*
548 * Hook from status poll systimer. Tries to schedule an status poll.
42bb6c2e 549 * NOTE: Caller should hold critical section.
b3a7093f
SZ
550 */
551static void
552stpoll_clock(struct stpoll_ctx *st_ctx)
553{
42bb6c2e 554 KKASSERT(mycpuid == 0);
b3a7093f
SZ
555
556 if (st_ctx->poll_handlers == 0)
557 return;
558 sched_stpoll(st_ctx);
559}
560
b3a7093f
SZ
561static int
562stpoll_register(struct ifnet *ifp, const struct ifpoll_status *st_rec)
563{
564 struct stpoll_ctx *st_ctx = &stpoll_context;
565 int error;
566
3abced87 567 KKASSERT(&curthread->td_msgport == netisr_portfn(0));
b3a7093f
SZ
568
569 if (st_rec->status_func == NULL)
570 return 0;
571
572 /*
573 * Check if there is room.
574 */
575 if (st_ctx->poll_handlers >= IFPOLL_LIST_LEN) {
576 /*
577 * List full, cannot register more entries.
578 * This should never happen; if it does, it is probably a
579 * broken driver trying to register multiple times. Checking
580 * this at runtime is expensive, and won't solve the problem
581 * anyways, so just report a few times and then give up.
582 */
583 static int verbose = 10; /* XXX */
584
585 if (verbose > 0) {
586 kprintf("status poll handlers list full, "
587 "maybe a broken driver ?\n");
588 verbose--;
589 }
590 error = ENOENT;
591 } else {
592 struct stpoll_rec *rec = &st_ctx->pr[st_ctx->poll_handlers];
593
594 rec->ifp = ifp;
595 rec->serializer = st_rec->serializer;
596 rec->status_func = st_rec->status_func;
597
598 st_ctx->poll_handlers++;
b3a7093f
SZ
599 error = 0;
600 }
601 return error;
602}
603
604static int
605stpoll_deregister(struct ifnet *ifp)
606{
607 struct stpoll_ctx *st_ctx = &stpoll_context;
608 int i, error;
609
3abced87 610 KKASSERT(&curthread->td_msgport == netisr_portfn(0));
b3a7093f
SZ
611
612 for (i = 0; i < st_ctx->poll_handlers; ++i) {
613 if (st_ctx->pr[i].ifp == ifp) /* Found it */
614 break;
615 }
616 if (i == st_ctx->poll_handlers) {
617 kprintf("stpoll_deregister: ifp not found!!!\n");
618 error = ENOENT;
619 } else {
620 st_ctx->poll_handlers--;
621 if (i < st_ctx->poll_handlers) {
622 /* Last entry replaces this one. */
623 st_ctx->pr[i] = st_ctx->pr[st_ctx->poll_handlers];
624 }
b3a7093f
SZ
625 error = 0;
626 }
627 return error;
628}
629
b3a7093f
SZ
630static __inline void
631iopoll_reset_state(struct iopoll_ctx *io_ctx)
632{
633 crit_enter();
634 io_ctx->poll_burst = 5;
635 io_ctx->pending_polls = 0;
636 io_ctx->residual_burst = 0;
637 io_ctx->phase = 0;
112d5942 638 io_ctx->kern_frac = 0;
b3a7093f
SZ
639 bzero(&io_ctx->poll_start_t, sizeof(io_ctx->poll_start_t));
640 bzero(&io_ctx->prev_t, sizeof(io_ctx->prev_t));
641 crit_exit();
642}
643
644static void
645iopoll_init(int cpuid)
646{
9826be42 647 KKASSERT(cpuid < ncpus2);
b3a7093f
SZ
648
649 rxpoll_context[cpuid] = iopoll_ctx_create(cpuid, IFPOLL_RX);
650 txpoll_context[cpuid] = iopoll_ctx_create(cpuid, IFPOLL_TX);
651}
652
653static struct iopoll_ctx *
654iopoll_ctx_create(int cpuid, int poll_type)
655{
968f17f7 656 struct poll_comm *comm;
b3a7093f
SZ
657 struct iopoll_ctx *io_ctx;
658 const char *poll_type_str;
b3a7093f
SZ
659
660 KKASSERT(poll_type == IFPOLL_RX || poll_type == IFPOLL_TX);
661
662 /*
663 * Make sure that tunables are in sane state
664 */
665 if (iopoll_burst_max < MIN_IOPOLL_BURST_MAX)
666 iopoll_burst_max = MIN_IOPOLL_BURST_MAX;
667 else if (iopoll_burst_max > MAX_IOPOLL_BURST_MAX)
668 iopoll_burst_max = MAX_IOPOLL_BURST_MAX;
669
670 if (iopoll_each_burst > iopoll_burst_max)
671 iopoll_each_burst = iopoll_burst_max;
672
968f17f7
SZ
673 comm = poll_common[cpuid];
674
b3a7093f
SZ
675 /*
676 * Create the per-cpu polling context
677 */
678 io_ctx = kmalloc(sizeof(*io_ctx), M_DEVBUF, M_WAITOK | M_ZERO);
679
680 io_ctx->poll_each_burst = iopoll_each_burst;
681 io_ctx->poll_burst_max = iopoll_burst_max;
682 io_ctx->user_frac = 50;
968f17f7
SZ
683 if (poll_type == IFPOLL_RX)
684 io_ctx->pollhz = comm->pollhz;
685 else
686 io_ctx->pollhz = comm->pollhz / (comm->poll_txfrac + 1);
b3a7093f
SZ
687 io_ctx->poll_cpuid = cpuid;
688 iopoll_reset_state(io_ctx);
689
48e7b118 690 netmsg_init(&io_ctx->poll_netmsg, NULL, &netisr_adone_rport,
c3c96e44 691 0, iopoll_handler);
002c1265 692 io_ctx->poll_netmsg.lmsg.u.ms_resultp = io_ctx;
b3a7093f 693
48e7b118 694 netmsg_init(&io_ctx->poll_more_netmsg, NULL, &netisr_adone_rport,
c3c96e44 695 0, iopollmore_handler);
002c1265 696 io_ctx->poll_more_netmsg.lmsg.u.ms_resultp = io_ctx;
b3a7093f
SZ
697
698 /*
699 * Initialize per-cpu sysctl nodes
700 */
701 if (poll_type == IFPOLL_RX)
702 poll_type_str = "rx";
703 else
704 poll_type_str = "tx";
b3a7093f
SZ
705
706 sysctl_ctx_init(&io_ctx->poll_sysctl_ctx);
707 io_ctx->poll_sysctl_tree = SYSCTL_ADD_NODE(&io_ctx->poll_sysctl_ctx,
a7519a15
SZ
708 SYSCTL_CHILDREN(comm->sysctl_tree),
709 OID_AUTO, poll_type_str, CTLFLAG_RD, 0, "");
b3a7093f
SZ
710 iopoll_add_sysctl(&io_ctx->poll_sysctl_ctx,
711 SYSCTL_CHILDREN(io_ctx->poll_sysctl_tree), io_ctx);
712
b3a7093f
SZ
713 return io_ctx;
714}
715
716/*
717 * Hook from iopoll systimer. Tries to schedule an iopoll, but keeps
718 * track of lost ticks due to the previous handler taking too long.
719 * Normally, this should not happen, because polling handler should
720 * run for a short time. However, in some cases (e.g. when there are
721 * changes in link status etc.) the drivers take a very long time
722 * (even in the order of milliseconds) to reset and reconfigure the
723 * device, causing apparent lost polls.
724 *
725 * The first part of the code is just for debugging purposes, and tries
726 * to count how often hardclock ticks are shorter than they should,
727 * meaning either stray interrupts or delayed events.
728 *
729 * WARNING! called from fastint or IPI, the MP lock might not be held.
42bb6c2e 730 * NOTE: Caller should hold critical section.
b3a7093f
SZ
731 */
732static void
733iopoll_clock(struct iopoll_ctx *io_ctx)
734{
3aa902eb 735 union ifpoll_time t;
968f17f7 736 int delta;
b3a7093f 737
42bb6c2e 738 KKASSERT(mycpuid == io_ctx->poll_cpuid);
b3a7093f
SZ
739
740 if (io_ctx->poll_handlers == 0)
741 return;
742
3aa902eb
SZ
743 ifpoll_time_get(&t);
744 delta = ifpoll_time_diff(&io_ctx->prev_t, &t);
968f17f7 745 if (delta * io_ctx->pollhz < 500000)
b3a7093f
SZ
746 io_ctx->short_ticks++;
747 else
748 io_ctx->prev_t = t;
749
750 if (io_ctx->pending_polls > 100) {
751 /*
752 * Too much, assume it has stalled (not always true
753 * see comment above).
754 */
755 io_ctx->stalled++;
756 io_ctx->pending_polls = 0;
757 io_ctx->phase = 0;
758 }
759
760 if (io_ctx->phase <= 2) {
761 if (io_ctx->phase != 0)
762 io_ctx->suspect++;
763 io_ctx->phase = 1;
764 sched_iopoll(io_ctx);
765 io_ctx->phase = 2;
766 }
767 if (io_ctx->pending_polls++ > 0)
768 io_ctx->lost_polls++;
769}
770
b3a7093f
SZ
771/*
772 * iopoll_handler is scheduled by sched_iopoll when appropriate, typically
773 * once per polling systimer tick.
774 *
775 * Note that the message is replied immediately in order to allow a new
776 * ISR to be scheduled in the handler.
777 */
778static void
002c1265 779iopoll_handler(netmsg_t msg)
b3a7093f
SZ
780{
781 struct iopoll_ctx *io_ctx;
ec7f6da8 782 struct thread *td = curthread;
b3a7093f
SZ
783 int i, cycles;
784
002c1265 785 io_ctx = msg->lmsg.u.ms_resultp;
3abced87 786 KKASSERT(&td->td_msgport == netisr_portfn(io_ctx->poll_cpuid));
ec7f6da8
SZ
787
788 crit_enter_quick(td);
b3a7093f
SZ
789
790 /* Reply ASAP */
002c1265 791 lwkt_replymsg(&msg->lmsg, 0);
b3a7093f 792
ec7f6da8
SZ
793 if (io_ctx->poll_handlers == 0) {
794 crit_exit_quick(td);
b3a7093f 795 return;
ec7f6da8 796 }
b3a7093f
SZ
797
798 io_ctx->phase = 3;
799 if (io_ctx->residual_burst == 0) {
800 /* First call in this tick */
3aa902eb 801 ifpoll_time_get(&io_ctx->poll_start_t);
b3a7093f
SZ
802 io_ctx->residual_burst = io_ctx->poll_burst;
803 }
804 cycles = (io_ctx->residual_burst < io_ctx->poll_each_burst) ?
805 io_ctx->residual_burst : io_ctx->poll_each_burst;
806 io_ctx->residual_burst -= cycles;
807
808 for (i = 0; i < io_ctx->poll_handlers; i++) {
809 const struct iopoll_rec *rec = &io_ctx->pr[i];
810 struct ifnet *ifp = rec->ifp;
811
812 if (!lwkt_serialize_try(rec->serializer))
813 continue;
814
815 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) ==
ec7f6da8 816 (IFF_RUNNING | IFF_NPOLLING))
b3a7093f 817 rec->poll_func(ifp, rec->arg, cycles);
b3a7093f
SZ
818
819 lwkt_serialize_exit(rec->serializer);
820 }
821
9feed54b
SZ
822 /*
823 * Do a quick exit/enter to catch any higher-priority
824 * interrupt sources.
825 */
ec7f6da8 826 crit_exit_quick(td);
9feed54b 827 crit_enter_quick(td);
ec7f6da8 828
b3a7093f
SZ
829 sched_iopollmore(io_ctx);
830 io_ctx->phase = 4;
9feed54b
SZ
831
832 crit_exit_quick(td);
b3a7093f
SZ
833}
834
835/*
836 * iopollmore_handler is called after other netisr's, possibly scheduling
837 * another iopoll_handler call, or adapting the burst size for the next cycle.
838 *
839 * It is very bad to fetch large bursts of packets from a single card at once,
840 * because the burst could take a long time to be completely processed leading
841 * to unfairness. To reduce the problem, and also to account better for time
842 * spent in network-related processing, we split the burst in smaller chunks
843 * of fixed size, giving control to the other netisr's between chunks. This
844 * helps in improving the fairness, reducing livelock and accounting for the
845 * work performed in low level handling.
846 */
847static void
002c1265 848iopollmore_handler(netmsg_t msg)
b3a7093f 849{
9feed54b 850 struct thread *td = curthread;
b3a7093f 851 struct iopoll_ctx *io_ctx;
3aa902eb 852 union ifpoll_time t;
968f17f7 853 int kern_load;
b3a7093f
SZ
854 uint32_t pending_polls;
855
002c1265 856 io_ctx = msg->lmsg.u.ms_resultp;
3abced87 857 KKASSERT(&td->td_msgport == netisr_portfn(io_ctx->poll_cpuid));
9feed54b
SZ
858
859 crit_enter_quick(td);
b3a7093f
SZ
860
861 /* Replay ASAP */
002c1265 862 lwkt_replymsg(&msg->lmsg, 0);
b3a7093f 863
9feed54b
SZ
864 if (io_ctx->poll_handlers == 0) {
865 crit_exit_quick(td);
b3a7093f 866 return;
9feed54b 867 }
b3a7093f 868
b3a7093f
SZ
869 io_ctx->phase = 5;
870 if (io_ctx->residual_burst > 0) {
871 sched_iopoll(io_ctx);
9feed54b 872 crit_exit_quick(td);
b3a7093f
SZ
873 /* Will run immediately on return, followed by netisrs */
874 return;
875 }
876
877 /* Here we can account time spent in iopoll's in this tick */
3aa902eb
SZ
878 ifpoll_time_get(&t);
879 kern_load = ifpoll_time_diff(&io_ctx->poll_start_t, &t);
968f17f7 880 kern_load = (kern_load * io_ctx->pollhz) / 10000; /* 0..100 */
112d5942
SZ
881 io_ctx->kern_frac = kern_load;
882
b3a7093f
SZ
883 if (kern_load > (100 - io_ctx->user_frac)) {
884 /* Try decrease ticks */
885 if (io_ctx->poll_burst > 1)
886 io_ctx->poll_burst--;
887 } else {
888 if (io_ctx->poll_burst < io_ctx->poll_burst_max)
889 io_ctx->poll_burst++;
890 }
891
b3a7093f
SZ
892 io_ctx->pending_polls--;
893 pending_polls = io_ctx->pending_polls;
b3a7093f
SZ
894
895 if (pending_polls == 0) {
896 /* We are done */
897 io_ctx->phase = 0;
898 } else {
899 /*
900 * Last cycle was long and caused us to miss one or more
901 * hardclock ticks. Restart processing again, but slightly
902 * reduce the burst size to prevent that this happens again.
903 */
904 io_ctx->poll_burst -= (io_ctx->poll_burst / 8);
905 if (io_ctx->poll_burst < 1)
906 io_ctx->poll_burst = 1;
907 sched_iopoll(io_ctx);
908 io_ctx->phase = 6;
909 }
9feed54b
SZ
910
911 crit_exit_quick(td);
b3a7093f
SZ
912}
913
914static void
915iopoll_add_sysctl(struct sysctl_ctx_list *ctx, struct sysctl_oid_list *parent,
916 struct iopoll_ctx *io_ctx)
917{
b3a7093f
SZ
918 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "burst_max",
919 CTLTYPE_UINT | CTLFLAG_RW, io_ctx, 0, sysctl_burstmax,
920 "IU", "Max Polling burst size");
921
922 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "each_burst",
923 CTLTYPE_UINT | CTLFLAG_RW, io_ctx, 0, sysctl_eachburst,
924 "IU", "Max size of each burst");
925
926 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "phase", CTLFLAG_RD,
927 &io_ctx->phase, 0, "Polling phase");
928
929 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "suspect", CTLFLAG_RW,
930 &io_ctx->suspect, 0, "suspect event");
931
932 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "stalled", CTLFLAG_RW,
933 &io_ctx->stalled, 0, "potential stalls");
934
935 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "burst", CTLFLAG_RD,
936 &io_ctx->poll_burst, 0, "Current polling burst size");
937
938 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "user_frac", CTLFLAG_RW,
939 &io_ctx->user_frac, 0,
940 "Desired user fraction of cpu time");
941
112d5942
SZ
942 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "kern_frac", CTLFLAG_RD,
943 &io_ctx->kern_frac, 0,
944 "Kernel fraction of cpu time");
945
b3a7093f
SZ
946 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "short_ticks", CTLFLAG_RW,
947 &io_ctx->short_ticks, 0,
948 "Hardclock ticks shorter than they should be");
949
950 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "lost_polls", CTLFLAG_RW,
951 &io_ctx->lost_polls, 0,
952 "How many times we would have lost a poll tick");
953
954 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "pending_polls", CTLFLAG_RD,
955 &io_ctx->pending_polls, 0, "Do we need to poll again");
956
957 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "residual_burst", CTLFLAG_RD,
958 &io_ctx->residual_burst, 0,
959 "# of residual cycles in burst");
960
961 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "handlers", CTLFLAG_RD,
962 &io_ctx->poll_handlers, 0,
963 "Number of registered poll handlers");
964}
965
b3a7093f 966static void
002c1265 967sysctl_burstmax_handler(netmsg_t nmsg)
b3a7093f
SZ
968{
969 struct iopoll_sysctl_netmsg *msg = (struct iopoll_sysctl_netmsg *)nmsg;
970 struct iopoll_ctx *io_ctx;
971
972 io_ctx = msg->ctx;
3abced87 973 KKASSERT(&curthread->td_msgport == netisr_portfn(io_ctx->poll_cpuid));
b3a7093f 974
002c1265 975 io_ctx->poll_burst_max = nmsg->lmsg.u.ms_result;
b3a7093f
SZ
976 if (io_ctx->poll_each_burst > io_ctx->poll_burst_max)
977 io_ctx->poll_each_burst = io_ctx->poll_burst_max;
978 if (io_ctx->poll_burst > io_ctx->poll_burst_max)
979 io_ctx->poll_burst = io_ctx->poll_burst_max;
980 if (io_ctx->residual_burst > io_ctx->poll_burst_max)
981 io_ctx->residual_burst = io_ctx->poll_burst_max;
982
002c1265 983 lwkt_replymsg(&nmsg->lmsg, 0);
b3a7093f
SZ
984}
985
986static int
987sysctl_burstmax(SYSCTL_HANDLER_ARGS)
988{
989 struct iopoll_ctx *io_ctx = arg1;
990 struct iopoll_sysctl_netmsg msg;
b3a7093f
SZ
991 uint32_t burst_max;
992 int error;
993
994 burst_max = io_ctx->poll_burst_max;
995 error = sysctl_handle_int(oidp, &burst_max, 0, req);
996 if (error || req->newptr == NULL)
997 return error;
998 if (burst_max < MIN_IOPOLL_BURST_MAX)
999 burst_max = MIN_IOPOLL_BURST_MAX;
1000 else if (burst_max > MAX_IOPOLL_BURST_MAX)
1001 burst_max = MAX_IOPOLL_BURST_MAX;
1002
002c1265 1003 netmsg_init(&msg.base, NULL, &curthread->td_msgport,
c3c96e44 1004 0, sysctl_burstmax_handler);
002c1265 1005 msg.base.lmsg.u.ms_result = burst_max;
b3a7093f
SZ
1006 msg.ctx = io_ctx;
1007
3abced87 1008 return lwkt_domsg(netisr_portfn(io_ctx->poll_cpuid), &msg.base.lmsg, 0);
b3a7093f
SZ
1009}
1010
1011static void
002c1265 1012sysctl_eachburst_handler(netmsg_t nmsg)
b3a7093f
SZ
1013{
1014 struct iopoll_sysctl_netmsg *msg = (struct iopoll_sysctl_netmsg *)nmsg;
1015 struct iopoll_ctx *io_ctx;
1016 uint32_t each_burst;
1017
1018 io_ctx = msg->ctx;
3abced87 1019 KKASSERT(&curthread->td_msgport == netisr_portfn(io_ctx->poll_cpuid));
b3a7093f 1020
002c1265 1021 each_burst = nmsg->lmsg.u.ms_result;
b3a7093f
SZ
1022 if (each_burst > io_ctx->poll_burst_max)
1023 each_burst = io_ctx->poll_burst_max;
1024 else if (each_burst < 1)
1025 each_burst = 1;
1026 io_ctx->poll_each_burst = each_burst;
1027
002c1265 1028 lwkt_replymsg(&nmsg->lmsg, 0);
b3a7093f
SZ
1029}
1030
1031static int
1032sysctl_eachburst(SYSCTL_HANDLER_ARGS)
1033{
1034 struct iopoll_ctx *io_ctx = arg1;
1035 struct iopoll_sysctl_netmsg msg;
b3a7093f
SZ
1036 uint32_t each_burst;
1037 int error;
1038
1039 each_burst = io_ctx->poll_each_burst;
1040 error = sysctl_handle_int(oidp, &each_burst, 0, req);
1041 if (error || req->newptr == NULL)
1042 return error;
1043
002c1265 1044 netmsg_init(&msg.base, NULL, &curthread->td_msgport,
c3c96e44 1045 0, sysctl_eachburst_handler);
002c1265 1046 msg.base.lmsg.u.ms_result = each_burst;
b3a7093f
SZ
1047 msg.ctx = io_ctx;
1048
3abced87 1049 return lwkt_domsg(netisr_portfn(io_ctx->poll_cpuid), &msg.base.lmsg, 0);
b3a7093f
SZ
1050}
1051
1052static int
1053iopoll_register(struct ifnet *ifp, struct iopoll_ctx *io_ctx,
1054 const struct ifpoll_io *io_rec)
1055{
1056 int error;
1057
3abced87 1058 KKASSERT(&curthread->td_msgport == netisr_portfn(io_ctx->poll_cpuid));
b3a7093f
SZ
1059
1060 if (io_rec->poll_func == NULL)
1061 return 0;
1062
1063 /*
1064 * Check if there is room.
1065 */
1066 if (io_ctx->poll_handlers >= IFPOLL_LIST_LEN) {
1067 /*
1068 * List full, cannot register more entries.
1069 * This should never happen; if it does, it is probably a
1070 * broken driver trying to register multiple times. Checking
1071 * this at runtime is expensive, and won't solve the problem
1072 * anyways, so just report a few times and then give up.
1073 */
1074 static int verbose = 10; /* XXX */
1075 if (verbose > 0) {
1076 kprintf("io poll handlers list full, "
1077 "maybe a broken driver ?\n");
1078 verbose--;
1079 }
1080 error = ENOENT;
1081 } else {
1082 struct iopoll_rec *rec = &io_ctx->pr[io_ctx->poll_handlers];
1083
1084 rec->ifp = ifp;
1085 rec->serializer = io_rec->serializer;
1086 rec->arg = io_rec->arg;
1087 rec->poll_func = io_rec->poll_func;
1088
1089 io_ctx->poll_handlers++;
b3a7093f
SZ
1090 error = 0;
1091 }
1092 return error;
1093}
1094
1095static int
1096iopoll_deregister(struct ifnet *ifp, struct iopoll_ctx *io_ctx)
1097{
1098 int i, error;
1099
3abced87 1100 KKASSERT(&curthread->td_msgport == netisr_portfn(io_ctx->poll_cpuid));
b3a7093f
SZ
1101
1102 for (i = 0; i < io_ctx->poll_handlers; ++i) {
1103 if (io_ctx->pr[i].ifp == ifp) /* Found it */
1104 break;
1105 }
1106 if (i == io_ctx->poll_handlers) {
1107 error = ENOENT;
1108 } else {
1109 io_ctx->poll_handlers--;
1110 if (i < io_ctx->poll_handlers) {
1111 /* Last entry replaces this one. */
1112 io_ctx->pr[i] = io_ctx->pr[io_ctx->poll_handlers];
1113 }
1114
968f17f7 1115 if (io_ctx->poll_handlers == 0)
b3a7093f 1116 iopoll_reset_state(io_ctx);
b3a7093f
SZ
1117 error = 0;
1118 }
1119 return error;
1120}
968f17f7
SZ
1121
1122static void
1123poll_comm_init(int cpuid)
1124{
1125 struct poll_comm *comm;
1126 char cpuid_str[16];
1127
1128 comm = kmalloc(sizeof(*comm), M_DEVBUF, M_WAITOK | M_ZERO);
1129
26673976
SZ
1130 if (ifpoll_stfrac < 0)
1131 ifpoll_stfrac = IFPOLL_STFRAC_DEFAULT;
1132 if (ifpoll_txfrac < 0)
1133 ifpoll_txfrac = IFPOLL_TXFRAC_DEFAULT;
1134
968f17f7
SZ
1135 comm->pollhz = ifpoll_pollhz;
1136 comm->poll_cpuid = cpuid;
1137 comm->poll_stfrac = ifpoll_stfrac;
1138 comm->poll_txfrac = ifpoll_txfrac;
1139
1140 ksnprintf(cpuid_str, sizeof(cpuid_str), "%d", cpuid);
1141
1142 sysctl_ctx_init(&comm->sysctl_ctx);
1143 comm->sysctl_tree = SYSCTL_ADD_NODE(&comm->sysctl_ctx,
1144 SYSCTL_STATIC_CHILDREN(_net_ifpoll),
1145 OID_AUTO, cpuid_str, CTLFLAG_RD, 0, "");
1146
1147 SYSCTL_ADD_PROC(&comm->sysctl_ctx, SYSCTL_CHILDREN(comm->sysctl_tree),
1148 OID_AUTO, "pollhz", CTLTYPE_INT | CTLFLAG_RW,
1149 comm, 0, sysctl_pollhz,
1150 "I", "Device polling frequency");
1151
26673976
SZ
1152 if (cpuid == 0) {
1153 SYSCTL_ADD_PROC(&comm->sysctl_ctx,
1154 SYSCTL_CHILDREN(comm->sysctl_tree),
1155 OID_AUTO, "status_frac",
1156 CTLTYPE_INT | CTLFLAG_RW,
1157 comm, 0, sysctl_stfrac,
1158 "I", "# of cycles before status is polled");
1159 }
1160 SYSCTL_ADD_PROC(&comm->sysctl_ctx, SYSCTL_CHILDREN(comm->sysctl_tree),
1161 OID_AUTO, "tx_frac", CTLTYPE_INT | CTLFLAG_RW,
1162 comm, 0, sysctl_txfrac,
1163 "I", "# of cycles before TX is polled");
1164
968f17f7
SZ
1165 poll_common[cpuid] = comm;
1166}
1167
1168static void
1169poll_comm_start(int cpuid)
1170{
1171 struct poll_comm *comm = poll_common[cpuid];
96d52ac8 1172 systimer_func_t func;
968f17f7
SZ
1173
1174 /*
1175 * Initialize systimer
1176 */
1177 if (cpuid == 0)
1178 func = poll_comm_systimer0;
1179 else
1180 func = poll_comm_systimer;
1181 systimer_init_periodic_nq(&comm->pollclock, func, comm, 1);
1182}
1183
1184static void
1185_poll_comm_systimer(struct poll_comm *comm)
1186{
1187 if (comm->txfrac_count-- == 0) {
1188 comm->txfrac_count = comm->poll_txfrac;
1189 iopoll_clock(txpoll_context[comm->poll_cpuid]);
1190 }
1191 iopoll_clock(rxpoll_context[comm->poll_cpuid]);
1192}
1193
1194static void
96d52ac8
SZ
1195poll_comm_systimer0(systimer_t info, int in_ipi __unused,
1196 struct intrframe *frame __unused)
968f17f7
SZ
1197{
1198 struct poll_comm *comm = info->data;
1199 globaldata_t gd = mycpu;
1200
1201 KKASSERT(comm->poll_cpuid == gd->gd_cpuid && gd->gd_cpuid == 0);
1202
1203 crit_enter_gd(gd);
1204
1205 if (comm->stfrac_count-- == 0) {
1206 comm->stfrac_count = comm->poll_stfrac;
1207 stpoll_clock(&stpoll_context);
1208 }
1209 _poll_comm_systimer(comm);
1210
1211 crit_exit_gd(gd);
1212}
1213
1214static void
96d52ac8
SZ
1215poll_comm_systimer(systimer_t info, int in_ipi __unused,
1216 struct intrframe *frame __unused)
968f17f7
SZ
1217{
1218 struct poll_comm *comm = info->data;
1219 globaldata_t gd = mycpu;
1220
1221 KKASSERT(comm->poll_cpuid == gd->gd_cpuid && gd->gd_cpuid != 0);
1222
1223 crit_enter_gd(gd);
1224 _poll_comm_systimer(comm);
1225 crit_exit_gd(gd);
1226}
1227
1228static void
1229poll_comm_adjust_pollhz(struct poll_comm *comm)
1230{
1231 uint32_t handlers;
1232 int pollhz = 1;
1233
3abced87 1234 KKASSERT(&curthread->td_msgport == netisr_portfn(comm->poll_cpuid));
968f17f7
SZ
1235
1236 /*
1237 * If there is no polling handler registered, set systimer
1238 * frequency to the lowest value. Polling systimer frequency
1239 * will be adjusted to the requested value, once there are
1240 * registered handlers.
1241 */
1242 handlers = rxpoll_context[mycpuid]->poll_handlers +
1243 txpoll_context[mycpuid]->poll_handlers;
1244 if (comm->poll_cpuid == 0)
1245 handlers += stpoll_context.poll_handlers;
1246 if (handlers)
1247 pollhz = comm->pollhz;
1248 systimer_adjust_periodic(&comm->pollclock, pollhz);
1249}
1250
1251static int
1252sysctl_pollhz(SYSCTL_HANDLER_ARGS)
1253{
1254 struct poll_comm *comm = arg1;
002c1265 1255 struct netmsg_base nmsg;
968f17f7
SZ
1256 int error, phz;
1257
1258 phz = comm->pollhz;
1259 error = sysctl_handle_int(oidp, &phz, 0, req);
1260 if (error || req->newptr == NULL)
1261 return error;
1262 if (phz <= 0)
1263 return EINVAL;
1264 else if (phz > IFPOLL_FREQ_MAX)
1265 phz = IFPOLL_FREQ_MAX;
1266
48e7b118 1267 netmsg_init(&nmsg, NULL, &curthread->td_msgport,
c3c96e44 1268 0, sysctl_pollhz_handler);
002c1265 1269 nmsg.lmsg.u.ms_result = phz;
968f17f7 1270
3abced87 1271 return lwkt_domsg(netisr_portfn(comm->poll_cpuid), &nmsg.lmsg, 0);
968f17f7
SZ
1272}
1273
1274static void
002c1265 1275sysctl_pollhz_handler(netmsg_t nmsg)
968f17f7
SZ
1276{
1277 struct poll_comm *comm = poll_common[mycpuid];
1278
3abced87 1279 KKASSERT(&curthread->td_msgport == netisr_portfn(comm->poll_cpuid));
968f17f7
SZ
1280
1281 /* Save polling frequency */
002c1265 1282 comm->pollhz = nmsg->lmsg.u.ms_result;
968f17f7
SZ
1283
1284 /*
1285 * Adjust cached pollhz
1286 */
1287 rxpoll_context[mycpuid]->pollhz = comm->pollhz;
1288 txpoll_context[mycpuid]->pollhz =
1289 comm->pollhz / (comm->poll_txfrac + 1);
b6b52795
SZ
1290 if (mycpuid == 0)
1291 stpoll_context.pollhz = comm->pollhz / (comm->poll_stfrac + 1);
968f17f7
SZ
1292
1293 /*
1294 * Adjust polling frequency
1295 */
1296 poll_comm_adjust_pollhz(comm);
1297
002c1265 1298 lwkt_replymsg(&nmsg->lmsg, 0);
968f17f7 1299}
26673976
SZ
1300
1301static int
1302sysctl_stfrac(SYSCTL_HANDLER_ARGS)
1303{
1304 struct poll_comm *comm = arg1;
002c1265 1305 struct netmsg_base nmsg;
26673976
SZ
1306 int error, stfrac;
1307
1308 KKASSERT(comm->poll_cpuid == 0);
1309
1310 stfrac = comm->poll_stfrac;
1311 error = sysctl_handle_int(oidp, &stfrac, 0, req);
1312 if (error || req->newptr == NULL)
1313 return error;
1314 if (stfrac < 0)
1315 return EINVAL;
1316
48e7b118 1317 netmsg_init(&nmsg, NULL, &curthread->td_msgport,
c3c96e44 1318 0, sysctl_stfrac_handler);
002c1265 1319 nmsg.lmsg.u.ms_result = stfrac;
26673976 1320
3abced87 1321 return lwkt_domsg(netisr_portfn(comm->poll_cpuid), &nmsg.lmsg, 0);
26673976
SZ
1322}
1323
1324static void
002c1265 1325sysctl_stfrac_handler(netmsg_t nmsg)
26673976
SZ
1326{
1327 struct poll_comm *comm = poll_common[mycpuid];
002c1265 1328 int stfrac = nmsg->lmsg.u.ms_result;
26673976 1329
3abced87 1330 KKASSERT(&curthread->td_msgport == netisr_portfn(comm->poll_cpuid));
26673976
SZ
1331
1332 crit_enter();
1333 comm->poll_stfrac = stfrac;
1334 if (comm->stfrac_count > comm->poll_stfrac)
1335 comm->stfrac_count = comm->poll_stfrac;
1336 crit_exit();
1337
002c1265 1338 lwkt_replymsg(&nmsg->lmsg, 0);
26673976
SZ
1339}
1340
1341static int
1342sysctl_txfrac(SYSCTL_HANDLER_ARGS)
1343{
1344 struct poll_comm *comm = arg1;
002c1265 1345 struct netmsg_base nmsg;
26673976
SZ
1346 int error, txfrac;
1347
1348 txfrac = comm->poll_txfrac;
1349 error = sysctl_handle_int(oidp, &txfrac, 0, req);
1350 if (error || req->newptr == NULL)
1351 return error;
1352 if (txfrac < 0)
1353 return EINVAL;
1354
48e7b118 1355 netmsg_init(&nmsg, NULL, &curthread->td_msgport,
c3c96e44 1356 0, sysctl_txfrac_handler);
002c1265 1357 nmsg.lmsg.u.ms_result = txfrac;
26673976 1358
3abced87 1359 return lwkt_domsg(netisr_portfn(comm->poll_cpuid), &nmsg.lmsg, 0);
26673976
SZ
1360}
1361
1362static void
002c1265 1363sysctl_txfrac_handler(netmsg_t nmsg)
26673976
SZ
1364{
1365 struct poll_comm *comm = poll_common[mycpuid];
002c1265 1366 int txfrac = nmsg->lmsg.u.ms_result;
26673976 1367
3abced87 1368 KKASSERT(&curthread->td_msgport == netisr_portfn(comm->poll_cpuid));
26673976
SZ
1369
1370 crit_enter();
1371 comm->poll_txfrac = txfrac;
1372 if (comm->txfrac_count > comm->poll_txfrac)
1373 comm->txfrac_count = comm->poll_txfrac;
1374 crit_exit();
1375
002c1265 1376 lwkt_replymsg(&nmsg->lmsg, 0);
26673976 1377}