ifpoll: Utilize kmalloc_cachealign()
[dragonfly.git] / sys / net / if_poll.c
CommitLineData
b3a7093f
SZ
1/*-
2 * Copyright (c) 2001-2002 Luigi Rizzo
3 *
4 * Supported by: the Xorp Project (www.xorp.org)
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $FreeBSD: src/sys/kern/kern_poll.c,v 1.2.2.4 2002/06/27 23:26:33 luigi Exp $
28 */
29
30#include "opt_ifpoll.h"
31
32#include <sys/param.h>
33#include <sys/kernel.h>
34#include <sys/ktr.h>
35#include <sys/malloc.h>
36#include <sys/serialize.h>
37#include <sys/socket.h>
38#include <sys/sysctl.h>
39
40#include <sys/thread2.h>
41#include <sys/msgport2.h>
42
43#include <machine/atomic.h>
3aa902eb 44#include <machine/clock.h>
b3a7093f
SZ
45#include <machine/smp.h>
46
47#include <net/if.h>
48#include <net/if_poll.h>
49#include <net/netmsg2.h>
50
51/*
52 * Polling support for network device drivers.
53 *
54 * Drivers which support this feature try to register one status polling
55 * handler and several TX/RX polling handlers with the polling code.
f994de37 56 * If interface's if_npoll is called with non-NULL second argument, then
b3a7093f
SZ
57 * a register operation is requested, else a deregister operation is
58 * requested. If the requested operation is "register", driver should
59 * setup the ifpoll_info passed in accoding its own needs:
60 * ifpoll_info.ifpi_status.status_func == NULL
61 * No status polling handler will be installed on CPU(0)
62 * ifpoll_info.ifpi_rx[n].poll_func == NULL
63 * No RX polling handler will be installed on CPU(n)
64 * ifpoll_info.ifpi_tx[n].poll_func == NULL
65 * No TX polling handler will be installed on CPU(n)
66 *
968f17f7 67 * RX is polled at the specified polling frequency (net.ifpoll.X.pollhz).
26673976
SZ
68 * TX and status polling could be done at lower frequency than RX frequency
69 * (net.ifpoll.0.status_frac and net.ifpoll.X.tx_frac). To avoid systimer
70 * staggering at high frequency, RX systimer gives TX and status polling a
71 * piggyback (XXX).
968f17f7 72 *
b3a7093f
SZ
73 * All of the registered polling handlers are called only if the interface
74 * is marked as 'IFF_RUNNING and IFF_NPOLLING'. However, the interface's
f994de37 75 * register and deregister function (ifnet.if_npoll) will be called even
b3a7093f
SZ
76 * if interface is not marked with 'IFF_RUNNING'.
77 *
78 * If registration is successful, the driver must disable interrupts,
79 * and further I/O is performed through the TX/RX polling handler, which
80 * are invoked (at least once per clock tick) with 3 arguments: the "arg"
81 * passed at register time, a struct ifnet pointer, and a "count" limit.
82 * The registered serializer will be held before calling the related
83 * polling handler.
84 *
85 * The count limit specifies how much work the handler can do during the
86 * call -- typically this is the number of packets to be received, or
87 * transmitted, etc. (drivers are free to interpret this number, as long
88 * as the max time spent in the function grows roughly linearly with the
89 * count).
90 *
91 * A second variable controls the sharing of CPU between polling/kernel
92 * network processing, and other activities (typically userlevel tasks):
63e6ef20 93 * net.ifpoll.X.{rx,tx}.user_frac (between 0 and 100, default 50) sets the
b3a7093f
SZ
94 * share of CPU allocated to user tasks. CPU is allocated proportionally
95 * to the shares, by dynamically adjusting the "count" (poll_burst).
96 *
97 * Other parameters can should be left to their default values.
98 * The following constraints hold
99 *
100 * 1 <= poll_burst <= poll_burst_max
101 * 1 <= poll_each_burst <= poll_burst_max
102 * MIN_POLL_BURST_MAX <= poll_burst_max <= MAX_POLL_BURST_MAX
103 */
104
105#define IFPOLL_LIST_LEN 128
106#define IFPOLL_FREQ_MAX 30000
107
108#define MIN_IOPOLL_BURST_MAX 10
109#define MAX_IOPOLL_BURST_MAX 1000
110#define IOPOLL_BURST_MAX 150 /* good for 100Mbit net and HZ=1000 */
111
112#define IOPOLL_EACH_BURST 5
113
114#define IFPOLL_FREQ_DEFAULT 2000
b3a7093f 115
26673976
SZ
116#define IFPOLL_TXFRAC_DEFAULT 1 /* 1/2 of the pollhz */
117#define IFPOLL_STFRAC_DEFAULT 19 /* 1/20 of the pollhz */
b3a7093f
SZ
118
119#define IFPOLL_RX 0x1
120#define IFPOLL_TX 0x2
121
3aa902eb
SZ
122union ifpoll_time {
123 struct timeval tv;
124 uint64_t tsc;
125};
126
b3a7093f
SZ
127struct iopoll_rec {
128 struct lwkt_serialize *serializer;
129 struct ifnet *ifp;
130 void *arg;
131 ifpoll_iofn_t poll_func;
132};
133
134struct iopoll_ctx {
3aa902eb 135 union ifpoll_time prev_t;
e3f2143d
SZ
136 u_long short_ticks; /* statistics */
137 u_long lost_polls; /* statistics */
138 u_long suspect; /* statistics */
139 u_long stalled; /* statistics */
b3a7093f
SZ
140 uint32_t pending_polls; /* state */
141
002c1265 142 struct netmsg_base poll_netmsg;
56b64290 143 struct netmsg_base poll_more_netmsg;
b3a7093f
SZ
144
145 int poll_cpuid;
968f17f7 146 int pollhz;
b3a7093f
SZ
147 uint32_t phase; /* state */
148 int residual_burst; /* state */
149 uint32_t poll_each_burst; /* tunable */
3aa902eb 150 union ifpoll_time poll_start_t; /* state */
b3a7093f 151
b3a7093f
SZ
152 uint32_t poll_burst; /* state */
153 uint32_t poll_burst_max; /* tunable */
154 uint32_t user_frac; /* tunable */
112d5942 155 uint32_t kern_frac; /* state */
b3a7093f 156
56b64290
SZ
157 uint32_t poll_handlers; /* next free entry in pr[]. */
158 struct iopoll_rec pr[IFPOLL_LIST_LEN];
159
b3a7093f
SZ
160 struct sysctl_ctx_list poll_sysctl_ctx;
161 struct sysctl_oid *poll_sysctl_tree;
162} __cachealign;
163
968f17f7
SZ
164struct poll_comm {
165 struct systimer pollclock;
166 int poll_cpuid;
167
168 int stfrac_count; /* state */
169 int poll_stfrac; /* tunable */
170
171 int txfrac_count; /* state */
172 int poll_txfrac; /* tunable */
173
174 int pollhz; /* tunable */
175
a7519a15
SZ
176 struct sysctl_ctx_list sysctl_ctx;
177 struct sysctl_oid *sysctl_tree;
178} __cachealign;
179
b3a7093f
SZ
180struct stpoll_rec {
181 struct lwkt_serialize *serializer;
182 struct ifnet *ifp;
183 ifpoll_stfn_t status_func;
184};
185
186struct stpoll_ctx {
002c1265 187 struct netmsg_base poll_netmsg;
b3a7093f 188
968f17f7
SZ
189 int pollhz;
190
b3a7093f
SZ
191 uint32_t poll_handlers; /* next free entry in pr[]. */
192 struct stpoll_rec pr[IFPOLL_LIST_LEN];
193
194 struct sysctl_ctx_list poll_sysctl_ctx;
195 struct sysctl_oid *poll_sysctl_tree;
7b33812f 196} __cachealign;
b3a7093f
SZ
197
198struct iopoll_sysctl_netmsg {
002c1265 199 struct netmsg_base base;
b3a7093f
SZ
200 struct iopoll_ctx *ctx;
201};
202
b3a7093f 203void ifpoll_init_pcpu(int);
002c1265
MD
204static void ifpoll_register_handler(netmsg_t);
205static void ifpoll_deregister_handler(netmsg_t);
b3a7093f
SZ
206
207/*
208 * Status polling
209 */
210static void stpoll_init(void);
002c1265 211static void stpoll_handler(netmsg_t);
b3a7093f 212static void stpoll_clock(struct stpoll_ctx *);
b3a7093f
SZ
213static int stpoll_register(struct ifnet *, const struct ifpoll_status *);
214static int stpoll_deregister(struct ifnet *);
215
b3a7093f
SZ
216/*
217 * RX/TX polling
218 */
219static struct iopoll_ctx *iopoll_ctx_create(int, int);
220static void iopoll_init(int);
98dc3fc1
SZ
221static void rxpoll_handler(netmsg_t);
222static void txpoll_handler(netmsg_t);
223static void rxpollmore_handler(netmsg_t);
224static void txpollmore_handler(netmsg_t);
b3a7093f 225static void iopoll_clock(struct iopoll_ctx *);
b3a7093f
SZ
226static int iopoll_register(struct ifnet *, struct iopoll_ctx *,
227 const struct ifpoll_io *);
228static int iopoll_deregister(struct ifnet *, struct iopoll_ctx *);
229
230static void iopoll_add_sysctl(struct sysctl_ctx_list *,
98dc3fc1 231 struct sysctl_oid_list *, struct iopoll_ctx *, int);
002c1265 232static void sysctl_burstmax_handler(netmsg_t);
b3a7093f 233static int sysctl_burstmax(SYSCTL_HANDLER_ARGS);
002c1265 234static void sysctl_eachburst_handler(netmsg_t);
b3a7093f
SZ
235static int sysctl_eachburst(SYSCTL_HANDLER_ARGS);
236
968f17f7
SZ
237/*
238 * Common functions
239 */
240static void poll_comm_init(int);
241static void poll_comm_start(int);
242static void poll_comm_adjust_pollhz(struct poll_comm *);
96d52ac8
SZ
243static void poll_comm_systimer0(systimer_t, int, struct intrframe *);
244static void poll_comm_systimer(systimer_t, int, struct intrframe *);
002c1265
MD
245static void sysctl_pollhz_handler(netmsg_t);
246static void sysctl_stfrac_handler(netmsg_t);
247static void sysctl_txfrac_handler(netmsg_t);
968f17f7 248static int sysctl_pollhz(SYSCTL_HANDLER_ARGS);
26673976
SZ
249static int sysctl_stfrac(SYSCTL_HANDLER_ARGS);
250static int sysctl_txfrac(SYSCTL_HANDLER_ARGS);
968f17f7
SZ
251
252static struct stpoll_ctx stpoll_context;
9826be42
SZ
253static struct poll_comm *poll_common[MAXCPU];
254static struct iopoll_ctx *rxpoll_context[MAXCPU];
255static struct iopoll_ctx *txpoll_context[MAXCPU];
968f17f7
SZ
256
257SYSCTL_NODE(_net, OID_AUTO, ifpoll, CTLFLAG_RW, 0,
258 "Network device polling parameters");
259
968f17f7
SZ
260static int iopoll_burst_max = IOPOLL_BURST_MAX;
261static int iopoll_each_burst = IOPOLL_EACH_BURST;
262
263static int ifpoll_pollhz = IFPOLL_FREQ_DEFAULT;
264static int ifpoll_stfrac = IFPOLL_STFRAC_DEFAULT;
265static int ifpoll_txfrac = IFPOLL_TXFRAC_DEFAULT;
266
267TUNABLE_INT("net.ifpoll.burst_max", &iopoll_burst_max);
268TUNABLE_INT("net.ifpoll.each_burst", &iopoll_each_burst);
269TUNABLE_INT("net.ifpoll.pollhz", &ifpoll_pollhz);
270TUNABLE_INT("net.ifpoll.status_frac", &ifpoll_stfrac);
271TUNABLE_INT("net.ifpoll.tx_frac", &ifpoll_txfrac);
272
a7254d9c 273static __inline void
002c1265 274ifpoll_sendmsg_oncpu(netmsg_t msg)
b3a7093f 275{
002c1265 276 if (msg->lmsg.ms_flags & MSGF_DONE)
3abced87 277 lwkt_sendmsg(netisr_portfn(mycpuid), &msg->lmsg);
b3a7093f
SZ
278}
279
a7254d9c 280static __inline void
b3a7093f
SZ
281sched_stpoll(struct stpoll_ctx *st_ctx)
282{
002c1265 283 ifpoll_sendmsg_oncpu((netmsg_t)&st_ctx->poll_netmsg);
b3a7093f
SZ
284}
285
a7254d9c 286static __inline void
b3a7093f
SZ
287sched_iopoll(struct iopoll_ctx *io_ctx)
288{
002c1265 289 ifpoll_sendmsg_oncpu((netmsg_t)&io_ctx->poll_netmsg);
b3a7093f
SZ
290}
291
a7254d9c 292static __inline void
b3a7093f
SZ
293sched_iopollmore(struct iopoll_ctx *io_ctx)
294{
002c1265 295 ifpoll_sendmsg_oncpu((netmsg_t)&io_ctx->poll_more_netmsg);
b3a7093f
SZ
296}
297
3aa902eb
SZ
298static __inline void
299ifpoll_time_get(union ifpoll_time *t)
300{
56b64290 301 if (__predict_true(tsc_present))
3aa902eb
SZ
302 t->tsc = rdtsc();
303 else
304 microuptime(&t->tv);
305}
306
307/* Return time diff in us */
308static __inline int
309ifpoll_time_diff(const union ifpoll_time *s, const union ifpoll_time *e)
310{
56b64290 311 if (__predict_true(tsc_present)) {
3aa902eb
SZ
312 return (((e->tsc - s->tsc) * 1000000) / tsc_frequency);
313 } else {
314 return ((e->tv.tv_usec - s->tv.tv_usec) +
315 (e->tv.tv_sec - s->tv.tv_sec) * 1000000);
316 }
317}
318
b3a7093f
SZ
319/*
320 * Initialize per-cpu qpolling(4) context. Called from kern_clock.c:
321 */
322void
323ifpoll_init_pcpu(int cpuid)
324{
9826be42 325 if (cpuid >= ncpus2)
b3a7093f 326 return;
968f17f7 327
968f17f7 328 poll_comm_init(cpuid);
b3a7093f 329
968f17f7 330 if (cpuid == 0)
b3a7093f 331 stpoll_init();
b3a7093f 332 iopoll_init(cpuid);
b3a7093f 333
968f17f7 334 poll_comm_start(cpuid);
b3a7093f 335}
b3a7093f
SZ
336
337int
338ifpoll_register(struct ifnet *ifp)
339{
9826be42 340 struct ifpoll_info *info;
002c1265 341 struct netmsg_base nmsg;
b3a7093f
SZ
342 int error;
343
f994de37 344 if (ifp->if_npoll == NULL) {
b3a7093f
SZ
345 /* Device does not support polling */
346 return EOPNOTSUPP;
347 }
348
9826be42
SZ
349 info = kmalloc(sizeof(*info), M_TEMP, M_WAITOK | M_ZERO);
350
b3a7093f
SZ
351 /*
352 * Attempt to register. Interlock with IFF_NPOLLING.
353 */
354
355 ifnet_serialize_all(ifp);
356
357 if (ifp->if_flags & IFF_NPOLLING) {
358 /* Already polling */
359 ifnet_deserialize_all(ifp);
9826be42 360 kfree(info, M_TEMP);
b3a7093f
SZ
361 return EBUSY;
362 }
363
9826be42 364 info->ifpi_ifp = ifp;
b3a7093f
SZ
365
366 ifp->if_flags |= IFF_NPOLLING;
f994de37 367 ifp->if_npoll(ifp, info);
f7be129c 368 KASSERT(ifp->if_npoll_cpuid >= 0, ("invalid npoll cpuid"));
b3a7093f
SZ
369
370 ifnet_deserialize_all(ifp);
371
48e7b118 372 netmsg_init(&nmsg, NULL, &curthread->td_msgport,
c3c96e44 373 0, ifpoll_register_handler);
9826be42 374 nmsg.lmsg.u.ms_resultp = info;
b3a7093f 375
3abced87 376 error = lwkt_domsg(netisr_portfn(0), &nmsg.lmsg, 0);
b3a7093f
SZ
377 if (error) {
378 if (!ifpoll_deregister(ifp)) {
379 if_printf(ifp, "ifpoll_register: "
380 "ifpoll_deregister failed!\n");
381 }
382 }
9826be42
SZ
383
384 kfree(info, M_TEMP);
b3a7093f
SZ
385 return error;
386}
387
388int
389ifpoll_deregister(struct ifnet *ifp)
390{
002c1265 391 struct netmsg_base nmsg;
b3a7093f
SZ
392 int error;
393
f994de37 394 if (ifp->if_npoll == NULL)
b3a7093f
SZ
395 return EOPNOTSUPP;
396
397 ifnet_serialize_all(ifp);
398
399 if ((ifp->if_flags & IFF_NPOLLING) == 0) {
400 ifnet_deserialize_all(ifp);
401 return EINVAL;
402 }
403 ifp->if_flags &= ~IFF_NPOLLING;
404
405 ifnet_deserialize_all(ifp);
406
48e7b118 407 netmsg_init(&nmsg, NULL, &curthread->td_msgport,
c3c96e44 408 0, ifpoll_deregister_handler);
002c1265 409 nmsg.lmsg.u.ms_resultp = ifp;
b3a7093f 410
3abced87 411 error = lwkt_domsg(netisr_portfn(0), &nmsg.lmsg, 0);
b3a7093f
SZ
412 if (!error) {
413 ifnet_serialize_all(ifp);
f994de37 414 ifp->if_npoll(ifp, NULL);
f7be129c 415 KASSERT(ifp->if_npoll_cpuid < 0, ("invalid npoll cpuid"));
b3a7093f
SZ
416 ifnet_deserialize_all(ifp);
417 }
418 return error;
419}
420
421static void
002c1265 422ifpoll_register_handler(netmsg_t nmsg)
b3a7093f 423{
002c1265 424 const struct ifpoll_info *info = nmsg->lmsg.u.ms_resultp;
b3a7093f
SZ
425 int cpuid = mycpuid, nextcpu;
426 int error;
427
9826be42 428 KKASSERT(cpuid < ncpus2);
3abced87 429 KKASSERT(&curthread->td_msgport == netisr_portfn(cpuid));
b3a7093f
SZ
430
431 if (cpuid == 0) {
432 error = stpoll_register(info->ifpi_ifp, &info->ifpi_status);
433 if (error)
434 goto failed;
435 }
436
437 error = iopoll_register(info->ifpi_ifp, rxpoll_context[cpuid],
438 &info->ifpi_rx[cpuid]);
439 if (error)
440 goto failed;
441
442 error = iopoll_register(info->ifpi_ifp, txpoll_context[cpuid],
443 &info->ifpi_tx[cpuid]);
444 if (error)
445 goto failed;
446
968f17f7
SZ
447 /* Adjust polling frequency, after all registration is done */
448 poll_comm_adjust_pollhz(poll_common[cpuid]);
449
b3a7093f 450 nextcpu = cpuid + 1;
9826be42 451 if (nextcpu < ncpus2)
3abced87 452 lwkt_forwardmsg(netisr_portfn(nextcpu), &nmsg->lmsg);
b3a7093f 453 else
002c1265 454 lwkt_replymsg(&nmsg->lmsg, 0);
b3a7093f
SZ
455 return;
456failed:
002c1265 457 lwkt_replymsg(&nmsg->lmsg, error);
b3a7093f
SZ
458}
459
460static void
002c1265 461ifpoll_deregister_handler(netmsg_t nmsg)
b3a7093f 462{
002c1265 463 struct ifnet *ifp = nmsg->lmsg.u.ms_resultp;
b3a7093f
SZ
464 int cpuid = mycpuid, nextcpu;
465
9826be42 466 KKASSERT(cpuid < ncpus2);
3abced87 467 KKASSERT(&curthread->td_msgport == netisr_portfn(cpuid));
b3a7093f
SZ
468
469 /* Ignore errors */
470 if (cpuid == 0)
471 stpoll_deregister(ifp);
472 iopoll_deregister(ifp, rxpoll_context[cpuid]);
473 iopoll_deregister(ifp, txpoll_context[cpuid]);
474
968f17f7
SZ
475 /* Adjust polling frequency, after all deregistration is done */
476 poll_comm_adjust_pollhz(poll_common[cpuid]);
477
b3a7093f 478 nextcpu = cpuid + 1;
9826be42 479 if (nextcpu < ncpus2)
3abced87 480 lwkt_forwardmsg(netisr_portfn(nextcpu), &nmsg->lmsg);
b3a7093f 481 else
002c1265 482 lwkt_replymsg(&nmsg->lmsg, 0);
b3a7093f
SZ
483}
484
485static void
486stpoll_init(void)
487{
488 struct stpoll_ctx *st_ctx = &stpoll_context;
968f17f7 489 const struct poll_comm *comm = poll_common[0];
b3a7093f 490
968f17f7 491 st_ctx->pollhz = comm->pollhz / (comm->poll_stfrac + 1);
b3a7093f
SZ
492
493 sysctl_ctx_init(&st_ctx->poll_sysctl_ctx);
494 st_ctx->poll_sysctl_tree = SYSCTL_ADD_NODE(&st_ctx->poll_sysctl_ctx,
766f5d44 495 SYSCTL_CHILDREN(comm->sysctl_tree),
b3a7093f
SZ
496 OID_AUTO, "status", CTLFLAG_RD, 0, "");
497
b3a7093f
SZ
498 SYSCTL_ADD_UINT(&st_ctx->poll_sysctl_ctx,
499 SYSCTL_CHILDREN(st_ctx->poll_sysctl_tree),
500 OID_AUTO, "handlers", CTLFLAG_RD,
501 &st_ctx->poll_handlers, 0,
502 "Number of registered status poll handlers");
503
48e7b118 504 netmsg_init(&st_ctx->poll_netmsg, NULL, &netisr_adone_rport,
c3c96e44 505 0, stpoll_handler);
b3a7093f
SZ
506}
507
b3a7093f
SZ
508/*
509 * stpoll_handler is scheduled by sched_stpoll when appropriate, typically
510 * once per polling systimer tick.
511 */
512static void
002c1265 513stpoll_handler(netmsg_t msg)
b3a7093f
SZ
514{
515 struct stpoll_ctx *st_ctx = &stpoll_context;
ec7f6da8 516 struct thread *td = curthread;
968f17f7 517 int i;
b3a7093f 518
3abced87 519 KKASSERT(&td->td_msgport == netisr_portfn(0));
ec7f6da8
SZ
520
521 crit_enter_quick(td);
b3a7093f
SZ
522
523 /* Reply ASAP */
002c1265 524 lwkt_replymsg(&msg->lmsg, 0);
b3a7093f 525
ec7f6da8
SZ
526 if (st_ctx->poll_handlers == 0) {
527 crit_exit_quick(td);
b3a7093f 528 return;
ec7f6da8 529 }
b3a7093f 530
b3a7093f
SZ
531 for (i = 0; i < st_ctx->poll_handlers; ++i) {
532 const struct stpoll_rec *rec = &st_ctx->pr[i];
533 struct ifnet *ifp = rec->ifp;
534
535 if (!lwkt_serialize_try(rec->serializer))
536 continue;
537
538 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) ==
ec7f6da8 539 (IFF_RUNNING | IFF_NPOLLING))
968f17f7 540 rec->status_func(ifp, st_ctx->pollhz);
b3a7093f
SZ
541
542 lwkt_serialize_exit(rec->serializer);
543 }
ec7f6da8
SZ
544
545 crit_exit_quick(td);
b3a7093f
SZ
546}
547
548/*
549 * Hook from status poll systimer. Tries to schedule an status poll.
42bb6c2e 550 * NOTE: Caller should hold critical section.
b3a7093f
SZ
551 */
552static void
553stpoll_clock(struct stpoll_ctx *st_ctx)
554{
42bb6c2e 555 KKASSERT(mycpuid == 0);
b3a7093f
SZ
556
557 if (st_ctx->poll_handlers == 0)
558 return;
559 sched_stpoll(st_ctx);
560}
561
b3a7093f
SZ
562static int
563stpoll_register(struct ifnet *ifp, const struct ifpoll_status *st_rec)
564{
565 struct stpoll_ctx *st_ctx = &stpoll_context;
566 int error;
567
3abced87 568 KKASSERT(&curthread->td_msgport == netisr_portfn(0));
b3a7093f
SZ
569
570 if (st_rec->status_func == NULL)
571 return 0;
572
573 /*
574 * Check if there is room.
575 */
576 if (st_ctx->poll_handlers >= IFPOLL_LIST_LEN) {
577 /*
578 * List full, cannot register more entries.
579 * This should never happen; if it does, it is probably a
580 * broken driver trying to register multiple times. Checking
581 * this at runtime is expensive, and won't solve the problem
582 * anyways, so just report a few times and then give up.
583 */
584 static int verbose = 10; /* XXX */
585
586 if (verbose > 0) {
587 kprintf("status poll handlers list full, "
588 "maybe a broken driver ?\n");
589 verbose--;
590 }
591 error = ENOENT;
592 } else {
593 struct stpoll_rec *rec = &st_ctx->pr[st_ctx->poll_handlers];
594
595 rec->ifp = ifp;
596 rec->serializer = st_rec->serializer;
597 rec->status_func = st_rec->status_func;
598
599 st_ctx->poll_handlers++;
b3a7093f
SZ
600 error = 0;
601 }
602 return error;
603}
604
605static int
606stpoll_deregister(struct ifnet *ifp)
607{
608 struct stpoll_ctx *st_ctx = &stpoll_context;
609 int i, error;
610
3abced87 611 KKASSERT(&curthread->td_msgport == netisr_portfn(0));
b3a7093f
SZ
612
613 for (i = 0; i < st_ctx->poll_handlers; ++i) {
614 if (st_ctx->pr[i].ifp == ifp) /* Found it */
615 break;
616 }
617 if (i == st_ctx->poll_handlers) {
618 kprintf("stpoll_deregister: ifp not found!!!\n");
619 error = ENOENT;
620 } else {
621 st_ctx->poll_handlers--;
622 if (i < st_ctx->poll_handlers) {
623 /* Last entry replaces this one. */
624 st_ctx->pr[i] = st_ctx->pr[st_ctx->poll_handlers];
625 }
b3a7093f
SZ
626 error = 0;
627 }
628 return error;
629}
630
b3a7093f
SZ
631static __inline void
632iopoll_reset_state(struct iopoll_ctx *io_ctx)
633{
634 crit_enter();
635 io_ctx->poll_burst = 5;
636 io_ctx->pending_polls = 0;
637 io_ctx->residual_burst = 0;
638 io_ctx->phase = 0;
112d5942 639 io_ctx->kern_frac = 0;
b3a7093f
SZ
640 bzero(&io_ctx->poll_start_t, sizeof(io_ctx->poll_start_t));
641 bzero(&io_ctx->prev_t, sizeof(io_ctx->prev_t));
642 crit_exit();
643}
644
645static void
646iopoll_init(int cpuid)
647{
9826be42 648 KKASSERT(cpuid < ncpus2);
b3a7093f
SZ
649
650 rxpoll_context[cpuid] = iopoll_ctx_create(cpuid, IFPOLL_RX);
651 txpoll_context[cpuid] = iopoll_ctx_create(cpuid, IFPOLL_TX);
652}
653
654static struct iopoll_ctx *
655iopoll_ctx_create(int cpuid, int poll_type)
656{
968f17f7 657 struct poll_comm *comm;
b3a7093f
SZ
658 struct iopoll_ctx *io_ctx;
659 const char *poll_type_str;
98dc3fc1 660 netisr_fn_t handler, more_handler;
b3a7093f
SZ
661
662 KKASSERT(poll_type == IFPOLL_RX || poll_type == IFPOLL_TX);
663
664 /*
665 * Make sure that tunables are in sane state
666 */
667 if (iopoll_burst_max < MIN_IOPOLL_BURST_MAX)
668 iopoll_burst_max = MIN_IOPOLL_BURST_MAX;
669 else if (iopoll_burst_max > MAX_IOPOLL_BURST_MAX)
670 iopoll_burst_max = MAX_IOPOLL_BURST_MAX;
671
672 if (iopoll_each_burst > iopoll_burst_max)
673 iopoll_each_burst = iopoll_burst_max;
674
968f17f7
SZ
675 comm = poll_common[cpuid];
676
b3a7093f
SZ
677 /*
678 * Create the per-cpu polling context
679 */
7b33812f
SZ
680 io_ctx = kmalloc_cachealign(sizeof(*io_ctx), M_DEVBUF,
681 M_WAITOK | M_ZERO);
b3a7093f
SZ
682
683 io_ctx->poll_each_burst = iopoll_each_burst;
684 io_ctx->poll_burst_max = iopoll_burst_max;
685 io_ctx->user_frac = 50;
968f17f7
SZ
686 if (poll_type == IFPOLL_RX)
687 io_ctx->pollhz = comm->pollhz;
688 else
689 io_ctx->pollhz = comm->pollhz / (comm->poll_txfrac + 1);
b3a7093f
SZ
690 io_ctx->poll_cpuid = cpuid;
691 iopoll_reset_state(io_ctx);
692
98dc3fc1
SZ
693 if (poll_type == IFPOLL_RX) {
694 handler = rxpoll_handler;
695 more_handler = rxpollmore_handler;
696 } else {
697 handler = txpoll_handler;
698 more_handler = txpollmore_handler;
699 }
700
48e7b118 701 netmsg_init(&io_ctx->poll_netmsg, NULL, &netisr_adone_rport,
98dc3fc1 702 0, handler);
002c1265 703 io_ctx->poll_netmsg.lmsg.u.ms_resultp = io_ctx;
b3a7093f 704
48e7b118 705 netmsg_init(&io_ctx->poll_more_netmsg, NULL, &netisr_adone_rport,
98dc3fc1 706 0, more_handler);
002c1265 707 io_ctx->poll_more_netmsg.lmsg.u.ms_resultp = io_ctx;
b3a7093f
SZ
708
709 /*
710 * Initialize per-cpu sysctl nodes
711 */
712 if (poll_type == IFPOLL_RX)
713 poll_type_str = "rx";
714 else
715 poll_type_str = "tx";
b3a7093f
SZ
716
717 sysctl_ctx_init(&io_ctx->poll_sysctl_ctx);
718 io_ctx->poll_sysctl_tree = SYSCTL_ADD_NODE(&io_ctx->poll_sysctl_ctx,
a7519a15
SZ
719 SYSCTL_CHILDREN(comm->sysctl_tree),
720 OID_AUTO, poll_type_str, CTLFLAG_RD, 0, "");
b3a7093f 721 iopoll_add_sysctl(&io_ctx->poll_sysctl_ctx,
98dc3fc1 722 SYSCTL_CHILDREN(io_ctx->poll_sysctl_tree), io_ctx, poll_type);
b3a7093f 723
b3a7093f
SZ
724 return io_ctx;
725}
726
727/*
728 * Hook from iopoll systimer. Tries to schedule an iopoll, but keeps
729 * track of lost ticks due to the previous handler taking too long.
730 * Normally, this should not happen, because polling handler should
731 * run for a short time. However, in some cases (e.g. when there are
732 * changes in link status etc.) the drivers take a very long time
733 * (even in the order of milliseconds) to reset and reconfigure the
734 * device, causing apparent lost polls.
735 *
736 * The first part of the code is just for debugging purposes, and tries
737 * to count how often hardclock ticks are shorter than they should,
738 * meaning either stray interrupts or delayed events.
739 *
740 * WARNING! called from fastint or IPI, the MP lock might not be held.
42bb6c2e 741 * NOTE: Caller should hold critical section.
b3a7093f
SZ
742 */
743static void
744iopoll_clock(struct iopoll_ctx *io_ctx)
745{
3aa902eb 746 union ifpoll_time t;
968f17f7 747 int delta;
b3a7093f 748
42bb6c2e 749 KKASSERT(mycpuid == io_ctx->poll_cpuid);
b3a7093f
SZ
750
751 if (io_ctx->poll_handlers == 0)
752 return;
753
3aa902eb
SZ
754 ifpoll_time_get(&t);
755 delta = ifpoll_time_diff(&io_ctx->prev_t, &t);
968f17f7 756 if (delta * io_ctx->pollhz < 500000)
b3a7093f
SZ
757 io_ctx->short_ticks++;
758 else
759 io_ctx->prev_t = t;
760
761 if (io_ctx->pending_polls > 100) {
762 /*
763 * Too much, assume it has stalled (not always true
764 * see comment above).
765 */
766 io_ctx->stalled++;
767 io_ctx->pending_polls = 0;
768 io_ctx->phase = 0;
769 }
770
771 if (io_ctx->phase <= 2) {
772 if (io_ctx->phase != 0)
773 io_ctx->suspect++;
774 io_ctx->phase = 1;
775 sched_iopoll(io_ctx);
776 io_ctx->phase = 2;
777 }
778 if (io_ctx->pending_polls++ > 0)
779 io_ctx->lost_polls++;
780}
781
b3a7093f 782/*
98dc3fc1
SZ
783 * rxpoll_handler and txpoll_handler are scheduled by sched_iopoll when
784 * appropriate, typically once per polling systimer tick.
b3a7093f
SZ
785 *
786 * Note that the message is replied immediately in order to allow a new
787 * ISR to be scheduled in the handler.
788 */
789static void
98dc3fc1 790rxpoll_handler(netmsg_t msg)
b3a7093f
SZ
791{
792 struct iopoll_ctx *io_ctx;
ec7f6da8 793 struct thread *td = curthread;
b3a7093f
SZ
794 int i, cycles;
795
002c1265 796 io_ctx = msg->lmsg.u.ms_resultp;
3abced87 797 KKASSERT(&td->td_msgport == netisr_portfn(io_ctx->poll_cpuid));
ec7f6da8
SZ
798
799 crit_enter_quick(td);
b3a7093f
SZ
800
801 /* Reply ASAP */
002c1265 802 lwkt_replymsg(&msg->lmsg, 0);
b3a7093f 803
ec7f6da8
SZ
804 if (io_ctx->poll_handlers == 0) {
805 crit_exit_quick(td);
b3a7093f 806 return;
ec7f6da8 807 }
b3a7093f
SZ
808
809 io_ctx->phase = 3;
810 if (io_ctx->residual_burst == 0) {
811 /* First call in this tick */
3aa902eb 812 ifpoll_time_get(&io_ctx->poll_start_t);
b3a7093f
SZ
813 io_ctx->residual_burst = io_ctx->poll_burst;
814 }
815 cycles = (io_ctx->residual_burst < io_ctx->poll_each_burst) ?
816 io_ctx->residual_burst : io_ctx->poll_each_burst;
817 io_ctx->residual_burst -= cycles;
818
819 for (i = 0; i < io_ctx->poll_handlers; i++) {
820 const struct iopoll_rec *rec = &io_ctx->pr[i];
821 struct ifnet *ifp = rec->ifp;
822
823 if (!lwkt_serialize_try(rec->serializer))
824 continue;
825
826 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) ==
ec7f6da8 827 (IFF_RUNNING | IFF_NPOLLING))
b3a7093f 828 rec->poll_func(ifp, rec->arg, cycles);
b3a7093f
SZ
829
830 lwkt_serialize_exit(rec->serializer);
831 }
832
9feed54b
SZ
833 /*
834 * Do a quick exit/enter to catch any higher-priority
835 * interrupt sources.
836 */
ec7f6da8 837 crit_exit_quick(td);
9feed54b 838 crit_enter_quick(td);
ec7f6da8 839
b3a7093f
SZ
840 sched_iopollmore(io_ctx);
841 io_ctx->phase = 4;
9feed54b
SZ
842
843 crit_exit_quick(td);
b3a7093f
SZ
844}
845
98dc3fc1
SZ
846static void
847txpoll_handler(netmsg_t msg)
848{
849 struct iopoll_ctx *io_ctx;
850 struct thread *td = curthread;
851 int i;
852
853 io_ctx = msg->lmsg.u.ms_resultp;
854 KKASSERT(&td->td_msgport == netisr_portfn(io_ctx->poll_cpuid));
855
856 crit_enter_quick(td);
857
858 /* Reply ASAP */
859 lwkt_replymsg(&msg->lmsg, 0);
860
861 if (io_ctx->poll_handlers == 0) {
862 crit_exit_quick(td);
863 return;
864 }
865
866 io_ctx->phase = 3;
867
868 for (i = 0; i < io_ctx->poll_handlers; i++) {
869 const struct iopoll_rec *rec = &io_ctx->pr[i];
870 struct ifnet *ifp = rec->ifp;
871
872 if (!lwkt_serialize_try(rec->serializer))
873 continue;
874
875 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) ==
876 (IFF_RUNNING | IFF_NPOLLING))
877 rec->poll_func(ifp, rec->arg, -1);
878
879 lwkt_serialize_exit(rec->serializer);
880 }
881
882 /*
883 * Do a quick exit/enter to catch any higher-priority
884 * interrupt sources.
885 */
886 crit_exit_quick(td);
887 crit_enter_quick(td);
888
889 sched_iopollmore(io_ctx);
890 io_ctx->phase = 4;
891
892 crit_exit_quick(td);
893}
894
b3a7093f 895/*
98dc3fc1
SZ
896 * rxpollmore_handler and txpollmore_handler are called after other netisr's,
897 * possibly scheduling another rxpoll_handler or txpoll_handler call, or
898 * adapting the burst size for the next cycle.
b3a7093f
SZ
899 *
900 * It is very bad to fetch large bursts of packets from a single card at once,
901 * because the burst could take a long time to be completely processed leading
902 * to unfairness. To reduce the problem, and also to account better for time
903 * spent in network-related processing, we split the burst in smaller chunks
904 * of fixed size, giving control to the other netisr's between chunks. This
905 * helps in improving the fairness, reducing livelock and accounting for the
906 * work performed in low level handling.
907 */
908static void
98dc3fc1 909rxpollmore_handler(netmsg_t msg)
b3a7093f 910{
9feed54b 911 struct thread *td = curthread;
b3a7093f 912 struct iopoll_ctx *io_ctx;
3aa902eb 913 union ifpoll_time t;
968f17f7 914 int kern_load;
b3a7093f
SZ
915 uint32_t pending_polls;
916
002c1265 917 io_ctx = msg->lmsg.u.ms_resultp;
3abced87 918 KKASSERT(&td->td_msgport == netisr_portfn(io_ctx->poll_cpuid));
9feed54b
SZ
919
920 crit_enter_quick(td);
b3a7093f
SZ
921
922 /* Replay ASAP */
002c1265 923 lwkt_replymsg(&msg->lmsg, 0);
b3a7093f 924
9feed54b
SZ
925 if (io_ctx->poll_handlers == 0) {
926 crit_exit_quick(td);
b3a7093f 927 return;
9feed54b 928 }
b3a7093f 929
b3a7093f
SZ
930 io_ctx->phase = 5;
931 if (io_ctx->residual_burst > 0) {
932 sched_iopoll(io_ctx);
9feed54b 933 crit_exit_quick(td);
b3a7093f
SZ
934 /* Will run immediately on return, followed by netisrs */
935 return;
936 }
937
938 /* Here we can account time spent in iopoll's in this tick */
3aa902eb
SZ
939 ifpoll_time_get(&t);
940 kern_load = ifpoll_time_diff(&io_ctx->poll_start_t, &t);
968f17f7 941 kern_load = (kern_load * io_ctx->pollhz) / 10000; /* 0..100 */
112d5942
SZ
942 io_ctx->kern_frac = kern_load;
943
b3a7093f
SZ
944 if (kern_load > (100 - io_ctx->user_frac)) {
945 /* Try decrease ticks */
946 if (io_ctx->poll_burst > 1)
947 io_ctx->poll_burst--;
948 } else {
949 if (io_ctx->poll_burst < io_ctx->poll_burst_max)
950 io_ctx->poll_burst++;
951 }
952
b3a7093f
SZ
953 io_ctx->pending_polls--;
954 pending_polls = io_ctx->pending_polls;
b3a7093f
SZ
955
956 if (pending_polls == 0) {
957 /* We are done */
958 io_ctx->phase = 0;
959 } else {
960 /*
961 * Last cycle was long and caused us to miss one or more
962 * hardclock ticks. Restart processing again, but slightly
963 * reduce the burst size to prevent that this happens again.
964 */
965 io_ctx->poll_burst -= (io_ctx->poll_burst / 8);
966 if (io_ctx->poll_burst < 1)
967 io_ctx->poll_burst = 1;
968 sched_iopoll(io_ctx);
969 io_ctx->phase = 6;
970 }
9feed54b
SZ
971
972 crit_exit_quick(td);
b3a7093f
SZ
973}
974
975static void
98dc3fc1
SZ
976txpollmore_handler(netmsg_t msg)
977{
978 struct thread *td = curthread;
979 struct iopoll_ctx *io_ctx;
980 uint32_t pending_polls;
981
982 io_ctx = msg->lmsg.u.ms_resultp;
983 KKASSERT(&td->td_msgport == netisr_portfn(io_ctx->poll_cpuid));
984
985 crit_enter_quick(td);
986
987 /* Replay ASAP */
988 lwkt_replymsg(&msg->lmsg, 0);
989
990 if (io_ctx->poll_handlers == 0) {
991 crit_exit_quick(td);
992 return;
993 }
994
995 io_ctx->phase = 5;
996
997 io_ctx->pending_polls--;
998 pending_polls = io_ctx->pending_polls;
999
1000 if (pending_polls == 0) {
1001 /* We are done */
1002 io_ctx->phase = 0;
1003 } else {
1004 /*
1005 * Last cycle was long and caused us to miss one or more
1006 * hardclock ticks. Restart processing again.
1007 */
1008 sched_iopoll(io_ctx);
1009 io_ctx->phase = 6;
1010 }
1011
1012 crit_exit_quick(td);
1013}
1014
1015static void
b3a7093f 1016iopoll_add_sysctl(struct sysctl_ctx_list *ctx, struct sysctl_oid_list *parent,
98dc3fc1 1017 struct iopoll_ctx *io_ctx, int poll_type)
b3a7093f 1018{
98dc3fc1
SZ
1019 if (poll_type == IFPOLL_RX) {
1020 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "burst_max",
1021 CTLTYPE_UINT | CTLFLAG_RW, io_ctx, 0, sysctl_burstmax,
1022 "IU", "Max Polling burst size");
1023
1024 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "each_burst",
1025 CTLTYPE_UINT | CTLFLAG_RW, io_ctx, 0, sysctl_eachburst,
1026 "IU", "Max size of each burst");
1027
1028 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "burst", CTLFLAG_RD,
1029 &io_ctx->poll_burst, 0, "Current polling burst size");
1030
1031 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "user_frac", CTLFLAG_RW,
1032 &io_ctx->user_frac, 0, "Desired user fraction of cpu time");
b3a7093f 1033
98dc3fc1
SZ
1034 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "kern_frac", CTLFLAG_RD,
1035 &io_ctx->kern_frac, 0, "Kernel fraction of cpu time");
1036
1037 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "residual_burst", CTLFLAG_RD,
1038 &io_ctx->residual_burst, 0,
1039 "# of residual cycles in burst");
1040 }
b3a7093f
SZ
1041
1042 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "phase", CTLFLAG_RD,
e3f2143d 1043 &io_ctx->phase, 0, "Polling phase");
b3a7093f 1044
e3f2143d
SZ
1045 SYSCTL_ADD_ULONG(ctx, parent, OID_AUTO, "suspect", CTLFLAG_RW,
1046 &io_ctx->suspect, "Suspected events");
b3a7093f 1047
e3f2143d
SZ
1048 SYSCTL_ADD_ULONG(ctx, parent, OID_AUTO, "stalled", CTLFLAG_RW,
1049 &io_ctx->stalled, "Potential stalls");
b3a7093f 1050
e3f2143d
SZ
1051 SYSCTL_ADD_ULONG(ctx, parent, OID_AUTO, "short_ticks", CTLFLAG_RW,
1052 &io_ctx->short_ticks,
1053 "Hardclock ticks shorter than they should be");
b3a7093f 1054
e3f2143d
SZ
1055 SYSCTL_ADD_ULONG(ctx, parent, OID_AUTO, "lost_polls", CTLFLAG_RW,
1056 &io_ctx->lost_polls,
1057 "How many times we would have lost a poll tick");
b3a7093f
SZ
1058
1059 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "pending_polls", CTLFLAG_RD,
e3f2143d 1060 &io_ctx->pending_polls, 0, "Do we need to poll again");
b3a7093f 1061
b3a7093f 1062 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "handlers", CTLFLAG_RD,
e3f2143d 1063 &io_ctx->poll_handlers, 0, "Number of registered poll handlers");
b3a7093f
SZ
1064}
1065
b3a7093f 1066static void
002c1265 1067sysctl_burstmax_handler(netmsg_t nmsg)
b3a7093f
SZ
1068{
1069 struct iopoll_sysctl_netmsg *msg = (struct iopoll_sysctl_netmsg *)nmsg;
1070 struct iopoll_ctx *io_ctx;
1071
1072 io_ctx = msg->ctx;
3abced87 1073 KKASSERT(&curthread->td_msgport == netisr_portfn(io_ctx->poll_cpuid));
b3a7093f 1074
002c1265 1075 io_ctx->poll_burst_max = nmsg->lmsg.u.ms_result;
b3a7093f
SZ
1076 if (io_ctx->poll_each_burst > io_ctx->poll_burst_max)
1077 io_ctx->poll_each_burst = io_ctx->poll_burst_max;
1078 if (io_ctx->poll_burst > io_ctx->poll_burst_max)
1079 io_ctx->poll_burst = io_ctx->poll_burst_max;
1080 if (io_ctx->residual_burst > io_ctx->poll_burst_max)
1081 io_ctx->residual_burst = io_ctx->poll_burst_max;
1082
002c1265 1083 lwkt_replymsg(&nmsg->lmsg, 0);
b3a7093f
SZ
1084}
1085
1086static int
1087sysctl_burstmax(SYSCTL_HANDLER_ARGS)
1088{
1089 struct iopoll_ctx *io_ctx = arg1;
1090 struct iopoll_sysctl_netmsg msg;
b3a7093f
SZ
1091 uint32_t burst_max;
1092 int error;
1093
1094 burst_max = io_ctx->poll_burst_max;
1095 error = sysctl_handle_int(oidp, &burst_max, 0, req);
1096 if (error || req->newptr == NULL)
1097 return error;
1098 if (burst_max < MIN_IOPOLL_BURST_MAX)
1099 burst_max = MIN_IOPOLL_BURST_MAX;
1100 else if (burst_max > MAX_IOPOLL_BURST_MAX)
1101 burst_max = MAX_IOPOLL_BURST_MAX;
1102
002c1265 1103 netmsg_init(&msg.base, NULL, &curthread->td_msgport,
c3c96e44 1104 0, sysctl_burstmax_handler);
002c1265 1105 msg.base.lmsg.u.ms_result = burst_max;
b3a7093f
SZ
1106 msg.ctx = io_ctx;
1107
3abced87 1108 return lwkt_domsg(netisr_portfn(io_ctx->poll_cpuid), &msg.base.lmsg, 0);
b3a7093f
SZ
1109}
1110
1111static void
002c1265 1112sysctl_eachburst_handler(netmsg_t nmsg)
b3a7093f
SZ
1113{
1114 struct iopoll_sysctl_netmsg *msg = (struct iopoll_sysctl_netmsg *)nmsg;
1115 struct iopoll_ctx *io_ctx;
1116 uint32_t each_burst;
1117
1118 io_ctx = msg->ctx;
3abced87 1119 KKASSERT(&curthread->td_msgport == netisr_portfn(io_ctx->poll_cpuid));
b3a7093f 1120
002c1265 1121 each_burst = nmsg->lmsg.u.ms_result;
b3a7093f
SZ
1122 if (each_burst > io_ctx->poll_burst_max)
1123 each_burst = io_ctx->poll_burst_max;
1124 else if (each_burst < 1)
1125 each_burst = 1;
1126 io_ctx->poll_each_burst = each_burst;
1127
002c1265 1128 lwkt_replymsg(&nmsg->lmsg, 0);
b3a7093f
SZ
1129}
1130
1131static int
1132sysctl_eachburst(SYSCTL_HANDLER_ARGS)
1133{
1134 struct iopoll_ctx *io_ctx = arg1;
1135 struct iopoll_sysctl_netmsg msg;
b3a7093f
SZ
1136 uint32_t each_burst;
1137 int error;
1138
1139 each_burst = io_ctx->poll_each_burst;
1140 error = sysctl_handle_int(oidp, &each_burst, 0, req);
1141 if (error || req->newptr == NULL)
1142 return error;
1143
002c1265 1144 netmsg_init(&msg.base, NULL, &curthread->td_msgport,
c3c96e44 1145 0, sysctl_eachburst_handler);
002c1265 1146 msg.base.lmsg.u.ms_result = each_burst;
b3a7093f
SZ
1147 msg.ctx = io_ctx;
1148
3abced87 1149 return lwkt_domsg(netisr_portfn(io_ctx->poll_cpuid), &msg.base.lmsg, 0);
b3a7093f
SZ
1150}
1151
1152static int
1153iopoll_register(struct ifnet *ifp, struct iopoll_ctx *io_ctx,
1154 const struct ifpoll_io *io_rec)
1155{
1156 int error;
1157
3abced87 1158 KKASSERT(&curthread->td_msgport == netisr_portfn(io_ctx->poll_cpuid));
b3a7093f
SZ
1159
1160 if (io_rec->poll_func == NULL)
1161 return 0;
1162
1163 /*
1164 * Check if there is room.
1165 */
1166 if (io_ctx->poll_handlers >= IFPOLL_LIST_LEN) {
1167 /*
1168 * List full, cannot register more entries.
1169 * This should never happen; if it does, it is probably a
1170 * broken driver trying to register multiple times. Checking
1171 * this at runtime is expensive, and won't solve the problem
1172 * anyways, so just report a few times and then give up.
1173 */
1174 static int verbose = 10; /* XXX */
1175 if (verbose > 0) {
1176 kprintf("io poll handlers list full, "
1177 "maybe a broken driver ?\n");
1178 verbose--;
1179 }
1180 error = ENOENT;
1181 } else {
1182 struct iopoll_rec *rec = &io_ctx->pr[io_ctx->poll_handlers];
1183
1184 rec->ifp = ifp;
1185 rec->serializer = io_rec->serializer;
1186 rec->arg = io_rec->arg;
1187 rec->poll_func = io_rec->poll_func;
1188
1189 io_ctx->poll_handlers++;
b3a7093f
SZ
1190 error = 0;
1191 }
1192 return error;
1193}
1194
1195static int
1196iopoll_deregister(struct ifnet *ifp, struct iopoll_ctx *io_ctx)
1197{
1198 int i, error;
1199
3abced87 1200 KKASSERT(&curthread->td_msgport == netisr_portfn(io_ctx->poll_cpuid));
b3a7093f
SZ
1201
1202 for (i = 0; i < io_ctx->poll_handlers; ++i) {
1203 if (io_ctx->pr[i].ifp == ifp) /* Found it */
1204 break;
1205 }
1206 if (i == io_ctx->poll_handlers) {
1207 error = ENOENT;
1208 } else {
1209 io_ctx->poll_handlers--;
1210 if (i < io_ctx->poll_handlers) {
1211 /* Last entry replaces this one. */
1212 io_ctx->pr[i] = io_ctx->pr[io_ctx->poll_handlers];
1213 }
1214
968f17f7 1215 if (io_ctx->poll_handlers == 0)
b3a7093f 1216 iopoll_reset_state(io_ctx);
b3a7093f
SZ
1217 error = 0;
1218 }
1219 return error;
1220}
968f17f7
SZ
1221
1222static void
1223poll_comm_init(int cpuid)
1224{
1225 struct poll_comm *comm;
1226 char cpuid_str[16];
1227
7b33812f 1228 comm = kmalloc_cachealign(sizeof(*comm), M_DEVBUF, M_WAITOK | M_ZERO);
968f17f7 1229
26673976
SZ
1230 if (ifpoll_stfrac < 0)
1231 ifpoll_stfrac = IFPOLL_STFRAC_DEFAULT;
1232 if (ifpoll_txfrac < 0)
1233 ifpoll_txfrac = IFPOLL_TXFRAC_DEFAULT;
1234
968f17f7
SZ
1235 comm->pollhz = ifpoll_pollhz;
1236 comm->poll_cpuid = cpuid;
1237 comm->poll_stfrac = ifpoll_stfrac;
1238 comm->poll_txfrac = ifpoll_txfrac;
1239
1240 ksnprintf(cpuid_str, sizeof(cpuid_str), "%d", cpuid);
1241
1242 sysctl_ctx_init(&comm->sysctl_ctx);
1243 comm->sysctl_tree = SYSCTL_ADD_NODE(&comm->sysctl_ctx,
1244 SYSCTL_STATIC_CHILDREN(_net_ifpoll),
1245 OID_AUTO, cpuid_str, CTLFLAG_RD, 0, "");
1246
1247 SYSCTL_ADD_PROC(&comm->sysctl_ctx, SYSCTL_CHILDREN(comm->sysctl_tree),
1248 OID_AUTO, "pollhz", CTLTYPE_INT | CTLFLAG_RW,
1249 comm, 0, sysctl_pollhz,
1250 "I", "Device polling frequency");
1251
26673976
SZ
1252 if (cpuid == 0) {
1253 SYSCTL_ADD_PROC(&comm->sysctl_ctx,
1254 SYSCTL_CHILDREN(comm->sysctl_tree),
1255 OID_AUTO, "status_frac",
1256 CTLTYPE_INT | CTLFLAG_RW,
1257 comm, 0, sysctl_stfrac,
1258 "I", "# of cycles before status is polled");
1259 }
1260 SYSCTL_ADD_PROC(&comm->sysctl_ctx, SYSCTL_CHILDREN(comm->sysctl_tree),
1261 OID_AUTO, "tx_frac", CTLTYPE_INT | CTLFLAG_RW,
1262 comm, 0, sysctl_txfrac,
1263 "I", "# of cycles before TX is polled");
1264
968f17f7
SZ
1265 poll_common[cpuid] = comm;
1266}
1267
1268static void
1269poll_comm_start(int cpuid)
1270{
1271 struct poll_comm *comm = poll_common[cpuid];
96d52ac8 1272 systimer_func_t func;
968f17f7
SZ
1273
1274 /*
1275 * Initialize systimer
1276 */
1277 if (cpuid == 0)
1278 func = poll_comm_systimer0;
1279 else
1280 func = poll_comm_systimer;
1281 systimer_init_periodic_nq(&comm->pollclock, func, comm, 1);
1282}
1283
1284static void
1285_poll_comm_systimer(struct poll_comm *comm)
1286{
1287 if (comm->txfrac_count-- == 0) {
1288 comm->txfrac_count = comm->poll_txfrac;
1289 iopoll_clock(txpoll_context[comm->poll_cpuid]);
1290 }
1291 iopoll_clock(rxpoll_context[comm->poll_cpuid]);
1292}
1293
1294static void
96d52ac8
SZ
1295poll_comm_systimer0(systimer_t info, int in_ipi __unused,
1296 struct intrframe *frame __unused)
968f17f7
SZ
1297{
1298 struct poll_comm *comm = info->data;
1299 globaldata_t gd = mycpu;
1300
1301 KKASSERT(comm->poll_cpuid == gd->gd_cpuid && gd->gd_cpuid == 0);
1302
1303 crit_enter_gd(gd);
1304
1305 if (comm->stfrac_count-- == 0) {
1306 comm->stfrac_count = comm->poll_stfrac;
1307 stpoll_clock(&stpoll_context);
1308 }
1309 _poll_comm_systimer(comm);
1310
1311 crit_exit_gd(gd);
1312}
1313
1314static void
96d52ac8
SZ
1315poll_comm_systimer(systimer_t info, int in_ipi __unused,
1316 struct intrframe *frame __unused)
968f17f7
SZ
1317{
1318 struct poll_comm *comm = info->data;
1319 globaldata_t gd = mycpu;
1320
1321 KKASSERT(comm->poll_cpuid == gd->gd_cpuid && gd->gd_cpuid != 0);
1322
1323 crit_enter_gd(gd);
1324 _poll_comm_systimer(comm);
1325 crit_exit_gd(gd);
1326}
1327
1328static void
1329poll_comm_adjust_pollhz(struct poll_comm *comm)
1330{
1331 uint32_t handlers;
1332 int pollhz = 1;
1333
3abced87 1334 KKASSERT(&curthread->td_msgport == netisr_portfn(comm->poll_cpuid));
968f17f7
SZ
1335
1336 /*
1337 * If there is no polling handler registered, set systimer
1338 * frequency to the lowest value. Polling systimer frequency
1339 * will be adjusted to the requested value, once there are
1340 * registered handlers.
1341 */
1342 handlers = rxpoll_context[mycpuid]->poll_handlers +
1343 txpoll_context[mycpuid]->poll_handlers;
1344 if (comm->poll_cpuid == 0)
1345 handlers += stpoll_context.poll_handlers;
1346 if (handlers)
1347 pollhz = comm->pollhz;
1348 systimer_adjust_periodic(&comm->pollclock, pollhz);
1349}
1350
1351static int
1352sysctl_pollhz(SYSCTL_HANDLER_ARGS)
1353{
1354 struct poll_comm *comm = arg1;
002c1265 1355 struct netmsg_base nmsg;
968f17f7
SZ
1356 int error, phz;
1357
1358 phz = comm->pollhz;
1359 error = sysctl_handle_int(oidp, &phz, 0, req);
1360 if (error || req->newptr == NULL)
1361 return error;
1362 if (phz <= 0)
1363 return EINVAL;
1364 else if (phz > IFPOLL_FREQ_MAX)
1365 phz = IFPOLL_FREQ_MAX;
1366
48e7b118 1367 netmsg_init(&nmsg, NULL, &curthread->td_msgport,
c3c96e44 1368 0, sysctl_pollhz_handler);
002c1265 1369 nmsg.lmsg.u.ms_result = phz;
968f17f7 1370
3abced87 1371 return lwkt_domsg(netisr_portfn(comm->poll_cpuid), &nmsg.lmsg, 0);
968f17f7
SZ
1372}
1373
1374static void
002c1265 1375sysctl_pollhz_handler(netmsg_t nmsg)
968f17f7
SZ
1376{
1377 struct poll_comm *comm = poll_common[mycpuid];
1378
3abced87 1379 KKASSERT(&curthread->td_msgport == netisr_portfn(comm->poll_cpuid));
968f17f7
SZ
1380
1381 /* Save polling frequency */
002c1265 1382 comm->pollhz = nmsg->lmsg.u.ms_result;
968f17f7
SZ
1383
1384 /*
1385 * Adjust cached pollhz
1386 */
1387 rxpoll_context[mycpuid]->pollhz = comm->pollhz;
1388 txpoll_context[mycpuid]->pollhz =
1389 comm->pollhz / (comm->poll_txfrac + 1);
b6b52795
SZ
1390 if (mycpuid == 0)
1391 stpoll_context.pollhz = comm->pollhz / (comm->poll_stfrac + 1);
968f17f7
SZ
1392
1393 /*
1394 * Adjust polling frequency
1395 */
1396 poll_comm_adjust_pollhz(comm);
1397
002c1265 1398 lwkt_replymsg(&nmsg->lmsg, 0);
968f17f7 1399}
26673976
SZ
1400
1401static int
1402sysctl_stfrac(SYSCTL_HANDLER_ARGS)
1403{
1404 struct poll_comm *comm = arg1;
002c1265 1405 struct netmsg_base nmsg;
26673976
SZ
1406 int error, stfrac;
1407
1408 KKASSERT(comm->poll_cpuid == 0);
1409
1410 stfrac = comm->poll_stfrac;
1411 error = sysctl_handle_int(oidp, &stfrac, 0, req);
1412 if (error || req->newptr == NULL)
1413 return error;
1414 if (stfrac < 0)
1415 return EINVAL;
1416
48e7b118 1417 netmsg_init(&nmsg, NULL, &curthread->td_msgport,
c3c96e44 1418 0, sysctl_stfrac_handler);
002c1265 1419 nmsg.lmsg.u.ms_result = stfrac;
26673976 1420
3abced87 1421 return lwkt_domsg(netisr_portfn(comm->poll_cpuid), &nmsg.lmsg, 0);
26673976
SZ
1422}
1423
1424static void
002c1265 1425sysctl_stfrac_handler(netmsg_t nmsg)
26673976
SZ
1426{
1427 struct poll_comm *comm = poll_common[mycpuid];
002c1265 1428 int stfrac = nmsg->lmsg.u.ms_result;
26673976 1429
3abced87 1430 KKASSERT(&curthread->td_msgport == netisr_portfn(comm->poll_cpuid));
26673976
SZ
1431
1432 crit_enter();
1433 comm->poll_stfrac = stfrac;
1434 if (comm->stfrac_count > comm->poll_stfrac)
1435 comm->stfrac_count = comm->poll_stfrac;
1436 crit_exit();
1437
002c1265 1438 lwkt_replymsg(&nmsg->lmsg, 0);
26673976
SZ
1439}
1440
1441static int
1442sysctl_txfrac(SYSCTL_HANDLER_ARGS)
1443{
1444 struct poll_comm *comm = arg1;
002c1265 1445 struct netmsg_base nmsg;
26673976
SZ
1446 int error, txfrac;
1447
1448 txfrac = comm->poll_txfrac;
1449 error = sysctl_handle_int(oidp, &txfrac, 0, req);
1450 if (error || req->newptr == NULL)
1451 return error;
1452 if (txfrac < 0)
1453 return EINVAL;
1454
48e7b118 1455 netmsg_init(&nmsg, NULL, &curthread->td_msgport,
c3c96e44 1456 0, sysctl_txfrac_handler);
002c1265 1457 nmsg.lmsg.u.ms_result = txfrac;
26673976 1458
3abced87 1459 return lwkt_domsg(netisr_portfn(comm->poll_cpuid), &nmsg.lmsg, 0);
26673976
SZ
1460}
1461
1462static void
002c1265 1463sysctl_txfrac_handler(netmsg_t nmsg)
26673976
SZ
1464{
1465 struct poll_comm *comm = poll_common[mycpuid];
002c1265 1466 int txfrac = nmsg->lmsg.u.ms_result;
26673976 1467
3abced87 1468 KKASSERT(&curthread->td_msgport == netisr_portfn(comm->poll_cpuid));
26673976
SZ
1469
1470 crit_enter();
1471 comm->poll_txfrac = txfrac;
1472 if (comm->txfrac_count > comm->poll_txfrac)
1473 comm->txfrac_count = comm->poll_txfrac;
1474 crit_exit();
1475
002c1265 1476 lwkt_replymsg(&nmsg->lmsg, 0);
26673976 1477}