Virtio_Balloon implementation for DragonFly
[dragonfly.git] / sys / net / if_poll.c
1 /*-
2  * Copyright (c) 2001-2002 Luigi Rizzo
3  *
4  * Supported by: the Xorp Project (www.xorp.org)
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * $FreeBSD: src/sys/kern/kern_poll.c,v 1.2.2.4 2002/06/27 23:26:33 luigi Exp $
28  */
29
30 #include "opt_ifpoll.h"
31
32 #include <sys/param.h>
33 #include <sys/kernel.h>
34 #include <sys/ktr.h>
35 #include <sys/malloc.h>
36 #include <sys/serialize.h>
37 #include <sys/socket.h>
38 #include <sys/sysctl.h>
39 #include <sys/microtime_pcpu.h>
40
41 #include <sys/thread2.h>
42 #include <sys/msgport2.h>
43
44 #include <net/if.h>
45 #include <net/if_var.h>
46 #include <net/if_poll.h>
47 #include <net/netmsg2.h>
48 #include <net/netisr2.h>
49
50 /*
51  * Polling support for network device drivers.
52  *
53  * Drivers which support this feature try to register one status polling
54  * handler and several TX/RX polling handlers with the polling code.
55  * If interface's if_npoll is called with non-NULL second argument, then
56  * a register operation is requested, else a deregister operation is
57  * requested.  If the requested operation is "register", driver should
58  * setup the ifpoll_info passed in accoding its own needs:
59  *   ifpoll_info.ifpi_status.status_func == NULL
60  *     No status polling handler will be installed on CPU(0)
61  *   ifpoll_info.ifpi_rx[n].poll_func == NULL
62  *     No RX polling handler will be installed on CPU(n)
63  *   ifpoll_info.ifpi_tx[n].poll_func == NULL
64  *     No TX polling handler will be installed on CPU(n)
65  *
66  * Serializer field of ifpoll_info.ifpi_status and ifpoll_info.ifpi_tx[n]
67  * must _not_ be NULL.  The serializer will be held before the status_func
68  * and poll_func being called.  Serializer field of ifpoll_info.ifpi_rx[n]
69  * can be NULL, but the interface's if_flags must have IFF_IDIRECT set,
70  * which indicates that the network processing of the input packets is
71  * running directly instead of being redispatched.
72  *
73  * RX is polled at the specified polling frequency (net.ifpoll.X.pollhz).
74  * TX and status polling could be done at lower frequency than RX frequency
75  * (net.ifpoll.0.status_frac and net.ifpoll.X.tx_frac).  To avoid systimer
76  * staggering at high frequency, RX systimer gives TX and status polling a
77  * piggyback (XXX).
78  *
79  * All of the registered polling handlers are called only if the interface
80  * is marked as IFF_UP, IFF_RUNNING and IFF_NPOLLING.  However, the
81  * interface's register and deregister function (ifnet.if_npoll) will be
82  * called even if interface is not marked with IFF_RUNNING or IFF_UP.
83  *
84  * If registration is successful, the driver must disable interrupts,
85  * and further I/O is performed through the TX/RX polling handler, which
86  * are invoked (at least once per clock tick) with 3 arguments: the "arg"
87  * passed at register time, a struct ifnet pointer, and a "count" limit.
88  * The registered serializer will be held before calling the related
89  * polling handler.
90  *
91  * The count limit specifies how much work the handler can do during the
92  * call -- typically this is the number of packets to be received, or
93  * transmitted, etc. (drivers are free to interpret this number, as long
94  * as the max time spent in the function grows roughly linearly with the
95  * count).
96  *
97  * A second variable controls the sharing of CPU between polling/kernel
98  * network processing, and other activities (typically userlevel tasks):
99  * net.ifpoll.X.{rx,tx}.user_frac (between 0 and 100, default 50) sets the
100  * share of CPU allocated to user tasks.  CPU is allocated proportionally
101  * to the shares, by dynamically adjusting the "count" (poll_burst).
102  *
103  * Other parameters can should be left to their default values.
104  * The following constraints hold
105  *
106  *      1 <= poll_burst <= poll_burst_max
107  *      1 <= poll_each_burst <= poll_burst_max
108  *      MIN_POLL_BURST_MAX <= poll_burst_max <= MAX_POLL_BURST_MAX
109  */
110
111 #define IFPOLL_LIST_LEN         128
112 #define IFPOLL_FREQ_MAX         30000
113
114 #define MIN_IOPOLL_BURST_MAX    10
115 #define MAX_IOPOLL_BURST_MAX    5000
116 #define IOPOLL_BURST_MAX        250     /* good for 1000Mbit net and HZ=6000 */
117
118 #define IOPOLL_EACH_BURST       50
119 #define IOPOLL_USER_FRAC        50
120
121 #define IFPOLL_FREQ_DEFAULT     6000
122
123 #define IFPOLL_TXFRAC_DEFAULT   1       /* 1/1 of the pollhz */
124 #define IFPOLL_STFRAC_DEFAULT   120     /* 1/120 of the pollhz */
125
126 #define IFPOLL_RX               0x1
127 #define IFPOLL_TX               0x2
128
129 struct iopoll_rec {
130         struct lwkt_serialize   *serializer;
131         struct ifnet            *ifp;
132         void                    *arg;
133         ifpoll_iofn_t           poll_func;
134 };
135
136 struct iopoll_ctx {
137         union microtime_pcpu    prev_t;
138         u_long                  short_ticks;            /* statistics */
139         u_long                  lost_polls;             /* statistics */
140         u_long                  suspect;                /* statistics */
141         u_long                  stalled;                /* statistics */
142         uint32_t                pending_polls;          /* state */
143
144         struct netmsg_base      poll_netmsg;
145         struct netmsg_base      poll_more_netmsg;
146
147         int                     poll_cpuid;
148         int                     pollhz;
149         uint32_t                phase;                  /* state */
150         int                     residual_burst;         /* state */
151         uint32_t                poll_each_burst;        /* tunable */
152         union microtime_pcpu    poll_start_t;           /* state */
153
154         uint32_t                poll_burst;             /* state */
155         uint32_t                poll_burst_max;         /* tunable */
156         uint32_t                user_frac;              /* tunable */
157         uint32_t                kern_frac;              /* state */
158
159         uint32_t                poll_handlers; /* next free entry in pr[]. */
160         struct iopoll_rec       pr[IFPOLL_LIST_LEN];
161
162         struct sysctl_ctx_list  poll_sysctl_ctx;
163         struct sysctl_oid       *poll_sysctl_tree;
164 };
165
166 struct poll_comm {
167         struct systimer         pollclock;
168         int                     poll_cpuid;
169
170         int                     stfrac_count;           /* state */
171         int                     poll_stfrac;            /* tunable */
172
173         int                     txfrac_count;           /* state */
174         int                     poll_txfrac;            /* tunable */
175
176         int                     pollhz;                 /* tunable */
177
178         struct sysctl_ctx_list  sysctl_ctx;
179         struct sysctl_oid       *sysctl_tree;
180 };
181
182 struct stpoll_rec {
183         struct lwkt_serialize   *serializer;
184         struct ifnet            *ifp;
185         ifpoll_stfn_t           status_func;
186 };
187
188 struct stpoll_ctx {
189         struct netmsg_base      poll_netmsg;
190
191         uint32_t                poll_handlers; /* next free entry in pr[]. */
192         struct stpoll_rec       pr[IFPOLL_LIST_LEN];
193
194         struct sysctl_ctx_list  poll_sysctl_ctx;
195         struct sysctl_oid       *poll_sysctl_tree;
196 } __cachealign;
197
198 struct iopoll_sysctl_netmsg {
199         struct netmsg_base      base;
200         struct iopoll_ctx       *ctx;
201 };
202
203 static void     ifpoll_init_pcpu(int);
204 static void     ifpoll_register_handler(netmsg_t);
205 static void     ifpoll_deregister_handler(netmsg_t);
206
207 /*
208  * Status polling
209  */
210 static void     stpoll_init(void);
211 static void     stpoll_handler(netmsg_t);
212 static void     stpoll_clock(struct stpoll_ctx *);
213 static int      stpoll_register(struct ifnet *, const struct ifpoll_status *);
214 static int      stpoll_deregister(struct ifnet *);
215
216 /*
217  * RX/TX polling
218  */
219 static struct iopoll_ctx *iopoll_ctx_create(int, int);
220 static void     iopoll_init(int);
221 static void     rxpoll_handler(netmsg_t);
222 static void     txpoll_handler(netmsg_t);
223 static void     rxpollmore_handler(netmsg_t);
224 static void     txpollmore_handler(netmsg_t);
225 static void     iopoll_clock(struct iopoll_ctx *);
226 static int      iopoll_register(struct ifnet *, struct iopoll_ctx *,
227                     const struct ifpoll_io *);
228 static int      iopoll_deregister(struct ifnet *, struct iopoll_ctx *);
229
230 static void     iopoll_add_sysctl(struct sysctl_ctx_list *,
231                     struct sysctl_oid_list *, struct iopoll_ctx *, int);
232 static void     sysctl_burstmax_handler(netmsg_t);
233 static int      sysctl_burstmax(SYSCTL_HANDLER_ARGS);
234 static void     sysctl_eachburst_handler(netmsg_t);
235 static int      sysctl_eachburst(SYSCTL_HANDLER_ARGS);
236
237 /*
238  * Common functions
239  */
240 static void     poll_comm_init(int);
241 static void     poll_comm_start(int);
242 static void     poll_comm_adjust_pollhz(struct poll_comm *);
243 static void     poll_comm_systimer0(systimer_t, int, struct intrframe *);
244 static void     poll_comm_systimer(systimer_t, int, struct intrframe *);
245 static void     sysctl_pollhz_handler(netmsg_t);
246 static void     sysctl_stfrac_handler(netmsg_t);
247 static void     sysctl_txfrac_handler(netmsg_t);
248 static int      sysctl_pollhz(SYSCTL_HANDLER_ARGS);
249 static int      sysctl_stfrac(SYSCTL_HANDLER_ARGS);
250 static int      sysctl_txfrac(SYSCTL_HANDLER_ARGS);
251 static int      sysctl_compat_npoll_stfrac(SYSCTL_HANDLER_ARGS);
252 static int      sysctl_compat_npoll_cpuid(SYSCTL_HANDLER_ARGS);
253
254 static struct stpoll_ctx        stpoll_context;
255 static struct poll_comm         *poll_common[MAXCPU];
256 static struct iopoll_ctx        *rxpoll_context[MAXCPU];
257 static struct iopoll_ctx        *txpoll_context[MAXCPU];
258
259 SYSCTL_NODE(_net, OID_AUTO, ifpoll, CTLFLAG_RW, 0,
260             "Network device polling parameters");
261
262 static int      iopoll_burst_max = IOPOLL_BURST_MAX;
263 static int      iopoll_each_burst = IOPOLL_EACH_BURST;
264 static int      iopoll_user_frac = IOPOLL_USER_FRAC;
265
266 static int      ifpoll_pollhz = IFPOLL_FREQ_DEFAULT;
267 static int      ifpoll_stfrac = IFPOLL_STFRAC_DEFAULT;
268 static int      ifpoll_txfrac = IFPOLL_TXFRAC_DEFAULT;
269
270 TUNABLE_INT("net.ifpoll.burst_max", &iopoll_burst_max);
271 TUNABLE_INT("net.ifpoll.each_burst", &iopoll_each_burst);
272 TUNABLE_INT("net.ifpoll.user_frac", &iopoll_user_frac);
273 TUNABLE_INT("net.ifpoll.pollhz", &ifpoll_pollhz);
274 TUNABLE_INT("net.ifpoll.status_frac", &ifpoll_stfrac);
275 TUNABLE_INT("net.ifpoll.tx_frac", &ifpoll_txfrac);
276
277 #if !defined(KTR_IF_POLL)
278 #define  KTR_IF_POLL            KTR_ALL
279 #endif
280 KTR_INFO_MASTER(if_poll);
281 KTR_INFO(KTR_IF_POLL, if_poll, rx_start, 0, "rx start");
282 KTR_INFO(KTR_IF_POLL, if_poll, rx_end, 1, "rx end");
283 KTR_INFO(KTR_IF_POLL, if_poll, tx_start, 2, "tx start");
284 KTR_INFO(KTR_IF_POLL, if_poll, tx_end, 3, "tx end");
285 KTR_INFO(KTR_IF_POLL, if_poll, rx_mstart, 4, "rx more start");
286 KTR_INFO(KTR_IF_POLL, if_poll, rx_mend, 5, "rx more end");
287 KTR_INFO(KTR_IF_POLL, if_poll, tx_mstart, 6, "tx more start");
288 KTR_INFO(KTR_IF_POLL, if_poll, tx_mend, 7, "tx more end");
289 KTR_INFO(KTR_IF_POLL, if_poll, ioclock_start, 8, "ioclock start");
290 KTR_INFO(KTR_IF_POLL, if_poll, ioclock_end, 9, "ioclock end");
291 #define logpoll(name)   KTR_LOG(if_poll_ ## name)
292
293 #define IFPOLL_FREQ_ADJ(comm)   (((comm)->poll_cpuid * 3) % 50)
294
295 static __inline int
296 poll_comm_pollhz_div(const struct poll_comm *comm, int pollhz)
297 {
298         return pollhz + IFPOLL_FREQ_ADJ(comm);
299 }
300
301 static __inline int
302 poll_comm_pollhz_conv(const struct poll_comm *comm, int pollhz)
303 {
304         return pollhz - IFPOLL_FREQ_ADJ(comm);
305 }
306
307 static __inline void
308 ifpoll_sendmsg_oncpu(netmsg_t msg)
309 {
310         if (msg->lmsg.ms_flags & MSGF_DONE)
311                 netisr_sendmsg_oncpu(&msg->base);
312 }
313
314 static __inline void
315 sched_stpoll(struct stpoll_ctx *st_ctx)
316 {
317         ifpoll_sendmsg_oncpu((netmsg_t)&st_ctx->poll_netmsg);
318 }
319
320 static __inline void
321 sched_iopoll(struct iopoll_ctx *io_ctx)
322 {
323         ifpoll_sendmsg_oncpu((netmsg_t)&io_ctx->poll_netmsg);
324 }
325
326 static __inline void
327 sched_iopollmore(struct iopoll_ctx *io_ctx, boolean_t direct)
328 {
329
330         if (!direct) {
331                 ifpoll_sendmsg_oncpu((netmsg_t)&io_ctx->poll_more_netmsg);
332         } else {
333                 struct netmsg_base *nmsg = &io_ctx->poll_more_netmsg;
334
335                 nmsg->lmsg.ms_flags &= ~(MSGF_REPLY | MSGF_DONE);
336                 nmsg->lmsg.ms_flags |= MSGF_SYNC;
337                 nmsg->nm_dispatch((netmsg_t)nmsg);
338                 KKASSERT(nmsg->lmsg.ms_flags & MSGF_DONE);
339         }
340 }
341
342 /*
343  * Initialize per-cpu polling(4) context.
344  */
345 static void
346 ifpoll_init_pcpu(int cpuid)
347 {
348
349         poll_comm_init(cpuid);
350
351         if (cpuid == 0)
352                 stpoll_init();
353         iopoll_init(cpuid);
354
355         poll_comm_start(cpuid);
356 }
357
358 static void
359 ifpoll_init_handler(netmsg_t msg)
360 {
361         int cpu = mycpuid;
362
363         ifpoll_init_pcpu(cpu);
364         netisr_forwardmsg(&msg->base, cpu + 1);
365 }
366
367 static void
368 ifpoll_sysinit(void *dummy __unused)
369 {
370         struct netmsg_base msg;
371
372         netmsg_init(&msg, NULL, &curthread->td_msgport, 0, ifpoll_init_handler);
373         netisr_domsg_global(&msg);
374 }
375 SYSINIT(ifpoll, SI_SUB_PRE_DRIVERS, SI_ORDER_ANY, ifpoll_sysinit, NULL);
376
377 int
378 ifpoll_register(struct ifnet *ifp)
379 {
380         struct ifpoll_info *info;
381         struct netmsg_base nmsg;
382         int error;
383
384         if (ifp->if_npoll == NULL) {
385                 /* Device does not support polling */
386                 return EOPNOTSUPP;
387         }
388
389         info = kmalloc(sizeof(*info), M_TEMP, M_WAITOK | M_ZERO);
390
391         /*
392          * Attempt to register.  Interlock with IFF_NPOLLING.
393          */
394
395         ifnet_serialize_all(ifp);
396
397         if (ifp->if_flags & IFF_NPOLLING) {
398                 /* Already polling */
399                 ifnet_deserialize_all(ifp);
400                 kfree(info, M_TEMP);
401                 return EBUSY;
402         }
403
404         info->ifpi_ifp = ifp;
405
406         ifp->if_flags |= IFF_NPOLLING;
407         ifp->if_npoll(ifp, info);
408
409         ifnet_deserialize_all(ifp);
410
411         netmsg_init(&nmsg, NULL, &curthread->td_msgport,
412                     0, ifpoll_register_handler);
413         nmsg.lmsg.u.ms_resultp = info;
414
415         error = netisr_domsg_global(&nmsg);
416         if (error) {
417                 if (!ifpoll_deregister(ifp)) {
418                         if_printf(ifp, "ifpoll_register: "
419                                   "ifpoll_deregister failed!\n");
420                 }
421         }
422
423         kfree(info, M_TEMP);
424         return error;
425 }
426
427 int
428 ifpoll_deregister(struct ifnet *ifp)
429 {
430         struct netmsg_base nmsg;
431         int error;
432
433         if (ifp->if_npoll == NULL)
434                 return EOPNOTSUPP;
435
436         ifnet_serialize_all(ifp);
437
438         if ((ifp->if_flags & IFF_NPOLLING) == 0) {
439                 ifnet_deserialize_all(ifp);
440                 return EINVAL;
441         }
442         ifp->if_flags &= ~IFF_NPOLLING;
443
444         ifnet_deserialize_all(ifp);
445
446         netmsg_init(&nmsg, NULL, &curthread->td_msgport,
447                     0, ifpoll_deregister_handler);
448         nmsg.lmsg.u.ms_resultp = ifp;
449
450         error = netisr_domsg_global(&nmsg);
451         if (!error) {
452                 ifnet_serialize_all(ifp);
453                 ifp->if_npoll(ifp, NULL);
454                 ifnet_deserialize_all(ifp);
455         }
456         return error;
457 }
458
459 static void
460 ifpoll_register_handler(netmsg_t nmsg)
461 {
462         const struct ifpoll_info *info = nmsg->lmsg.u.ms_resultp;
463         int cpuid = mycpuid;
464         int error;
465
466         KKASSERT(cpuid < netisr_ncpus);
467         KKASSERT(&curthread->td_msgport == netisr_cpuport(cpuid));
468
469         if (cpuid == 0) {
470                 error = stpoll_register(info->ifpi_ifp, &info->ifpi_status);
471                 if (error)
472                         goto failed;
473         }
474
475         error = iopoll_register(info->ifpi_ifp, rxpoll_context[cpuid],
476                                 &info->ifpi_rx[cpuid]);
477         if (error)
478                 goto failed;
479
480         error = iopoll_register(info->ifpi_ifp, txpoll_context[cpuid],
481                                 &info->ifpi_tx[cpuid]);
482         if (error)
483                 goto failed;
484
485         /* Adjust polling frequency, after all registration is done */
486         poll_comm_adjust_pollhz(poll_common[cpuid]);
487
488         netisr_forwardmsg(&nmsg->base, cpuid + 1);
489         return;
490 failed:
491         netisr_replymsg(&nmsg->base, error);
492 }
493
494 static void
495 ifpoll_deregister_handler(netmsg_t nmsg)
496 {
497         struct ifnet *ifp = nmsg->lmsg.u.ms_resultp;
498         int cpuid = mycpuid;
499
500         KKASSERT(cpuid < netisr_ncpus);
501         KKASSERT(&curthread->td_msgport == netisr_cpuport(cpuid));
502
503         /* Ignore errors */
504         if (cpuid == 0)
505                 stpoll_deregister(ifp);
506         iopoll_deregister(ifp, rxpoll_context[cpuid]);
507         iopoll_deregister(ifp, txpoll_context[cpuid]);
508
509         /* Adjust polling frequency, after all deregistration is done */
510         poll_comm_adjust_pollhz(poll_common[cpuid]);
511
512         netisr_forwardmsg(&nmsg->base, cpuid + 1);
513 }
514
515 static void
516 stpoll_init(void)
517 {
518         struct stpoll_ctx *st_ctx = &stpoll_context;
519         const struct poll_comm *comm = poll_common[0];
520
521         sysctl_ctx_init(&st_ctx->poll_sysctl_ctx);
522         st_ctx->poll_sysctl_tree = SYSCTL_ADD_NODE(&st_ctx->poll_sysctl_ctx,
523                                    SYSCTL_CHILDREN(comm->sysctl_tree),
524                                    OID_AUTO, "status", CTLFLAG_RD, 0, "");
525
526         SYSCTL_ADD_UINT(&st_ctx->poll_sysctl_ctx,
527                         SYSCTL_CHILDREN(st_ctx->poll_sysctl_tree),
528                         OID_AUTO, "handlers", CTLFLAG_RD,
529                         &st_ctx->poll_handlers, 0,
530                         "Number of registered status poll handlers");
531
532         netmsg_init(&st_ctx->poll_netmsg, NULL, &netisr_adone_rport,
533                     0, stpoll_handler);
534 }
535
536 /*
537  * stpoll_handler is scheduled by sched_stpoll when appropriate, typically
538  * once per polling systimer tick.
539  */
540 static void
541 stpoll_handler(netmsg_t msg)
542 {
543         struct stpoll_ctx *st_ctx = &stpoll_context;
544         struct thread *td = curthread;
545         int i;
546
547         ASSERT_NETISR0;
548
549         crit_enter_quick(td);
550
551         /* Reply ASAP */
552         netisr_replymsg(&msg->base, 0);
553
554         if (st_ctx->poll_handlers == 0) {
555                 crit_exit_quick(td);
556                 return;
557         }
558
559         for (i = 0; i < st_ctx->poll_handlers; ++i) {
560                 const struct stpoll_rec *rec = &st_ctx->pr[i];
561                 struct ifnet *ifp = rec->ifp;
562
563                 if (!lwkt_serialize_try(rec->serializer))
564                         continue;
565
566                 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) ==
567                     (IFF_RUNNING | IFF_NPOLLING))
568                         rec->status_func(ifp);
569
570                 lwkt_serialize_exit(rec->serializer);
571         }
572
573         crit_exit_quick(td);
574 }
575
576 /*
577  * Hook from status poll systimer.  Tries to schedule an status poll.
578  * NOTE: Caller should hold critical section.
579  */
580 static void
581 stpoll_clock(struct stpoll_ctx *st_ctx)
582 {
583         KKASSERT(mycpuid == 0);
584
585         if (st_ctx->poll_handlers == 0)
586                 return;
587         sched_stpoll(st_ctx);
588 }
589
590 static int
591 stpoll_register(struct ifnet *ifp, const struct ifpoll_status *st_rec)
592 {
593         struct stpoll_ctx *st_ctx = &stpoll_context;
594         int error;
595
596         ASSERT_NETISR0;
597
598         if (st_rec->status_func == NULL)
599                 return 0;
600
601         /*
602          * Check if there is room.
603          */
604         if (st_ctx->poll_handlers >= IFPOLL_LIST_LEN) {
605                 /*
606                  * List full, cannot register more entries.
607                  * This should never happen; if it does, it is probably a
608                  * broken driver trying to register multiple times. Checking
609                  * this at runtime is expensive, and won't solve the problem
610                  * anyways, so just report a few times and then give up.
611                  */
612                 static int verbose = 10; /* XXX */
613
614                 if (verbose > 0) {
615                         kprintf("status poll handlers list full, "
616                                 "maybe a broken driver ?\n");
617                         verbose--;
618                 }
619                 error = ENOENT;
620         } else {
621                 struct stpoll_rec *rec = &st_ctx->pr[st_ctx->poll_handlers];
622
623                 rec->ifp = ifp;
624                 rec->serializer = st_rec->serializer;
625                 rec->status_func = st_rec->status_func;
626
627                 st_ctx->poll_handlers++;
628                 error = 0;
629         }
630         return error;
631 }
632
633 static int
634 stpoll_deregister(struct ifnet *ifp)
635 {
636         struct stpoll_ctx *st_ctx = &stpoll_context;
637         int i, error;
638
639         ASSERT_NETISR0;
640
641         for (i = 0; i < st_ctx->poll_handlers; ++i) {
642                 if (st_ctx->pr[i].ifp == ifp) /* Found it */
643                         break;
644         }
645         if (i == st_ctx->poll_handlers) {
646                 error = ENOENT;
647         } else {
648                 st_ctx->poll_handlers--;
649                 if (i < st_ctx->poll_handlers) {
650                         /* Last entry replaces this one. */
651                         st_ctx->pr[i] = st_ctx->pr[st_ctx->poll_handlers];
652                 }
653                 error = 0;
654         }
655         return error;
656 }
657
658 static __inline void
659 iopoll_reset_state(struct iopoll_ctx *io_ctx)
660 {
661         crit_enter();
662         io_ctx->poll_burst = io_ctx->poll_each_burst;
663         io_ctx->pending_polls = 0;
664         io_ctx->residual_burst = 0;
665         io_ctx->phase = 0;
666         io_ctx->kern_frac = 0;
667         bzero(&io_ctx->poll_start_t, sizeof(io_ctx->poll_start_t));
668         bzero(&io_ctx->prev_t, sizeof(io_ctx->prev_t));
669         crit_exit();
670 }
671
672 static void
673 iopoll_init(int cpuid)
674 {
675         KKASSERT(cpuid < netisr_ncpus);
676
677         rxpoll_context[cpuid] = iopoll_ctx_create(cpuid, IFPOLL_RX);
678         txpoll_context[cpuid] = iopoll_ctx_create(cpuid, IFPOLL_TX);
679 }
680
681 static struct iopoll_ctx *
682 iopoll_ctx_create(int cpuid, int poll_type)
683 {
684         struct poll_comm *comm;
685         struct iopoll_ctx *io_ctx;
686         const char *poll_type_str;
687         netisr_fn_t handler, more_handler;
688
689         KKASSERT(poll_type == IFPOLL_RX || poll_type == IFPOLL_TX);
690
691         /*
692          * Make sure that tunables are in sane state
693          */
694         if (iopoll_burst_max < MIN_IOPOLL_BURST_MAX)
695                 iopoll_burst_max = MIN_IOPOLL_BURST_MAX;
696         else if (iopoll_burst_max > MAX_IOPOLL_BURST_MAX)
697                 iopoll_burst_max = MAX_IOPOLL_BURST_MAX;
698
699         if (iopoll_each_burst > iopoll_burst_max)
700                 iopoll_each_burst = iopoll_burst_max;
701
702         comm = poll_common[cpuid];
703
704         /*
705          * Create the per-cpu polling context
706          */
707         io_ctx = kmalloc(sizeof(*io_ctx), M_DEVBUF, M_WAITOK | M_ZERO);
708
709         io_ctx->poll_each_burst = iopoll_each_burst;
710         io_ctx->poll_burst_max = iopoll_burst_max;
711         io_ctx->user_frac = iopoll_user_frac;
712         if (poll_type == IFPOLL_RX)
713                 io_ctx->pollhz = comm->pollhz;
714         else
715                 io_ctx->pollhz = comm->pollhz / (comm->poll_txfrac + 1);
716         io_ctx->poll_cpuid = cpuid;
717         iopoll_reset_state(io_ctx);
718
719         if (poll_type == IFPOLL_RX) {
720                 handler = rxpoll_handler;
721                 more_handler = rxpollmore_handler;
722         } else {
723                 handler = txpoll_handler;
724                 more_handler = txpollmore_handler;
725         }
726
727         netmsg_init(&io_ctx->poll_netmsg, NULL, &netisr_adone_rport,
728             0, handler);
729         io_ctx->poll_netmsg.lmsg.u.ms_resultp = io_ctx;
730
731         netmsg_init(&io_ctx->poll_more_netmsg, NULL, &netisr_adone_rport,
732             0, more_handler);
733         io_ctx->poll_more_netmsg.lmsg.u.ms_resultp = io_ctx;
734
735         /*
736          * Initialize per-cpu sysctl nodes
737          */
738         if (poll_type == IFPOLL_RX)
739                 poll_type_str = "rx";
740         else
741                 poll_type_str = "tx";
742
743         sysctl_ctx_init(&io_ctx->poll_sysctl_ctx);
744         io_ctx->poll_sysctl_tree = SYSCTL_ADD_NODE(&io_ctx->poll_sysctl_ctx,
745                                    SYSCTL_CHILDREN(comm->sysctl_tree),
746                                    OID_AUTO, poll_type_str, CTLFLAG_RD, 0, "");
747         iopoll_add_sysctl(&io_ctx->poll_sysctl_ctx,
748             SYSCTL_CHILDREN(io_ctx->poll_sysctl_tree), io_ctx, poll_type);
749
750         return io_ctx;
751 }
752
753 /*
754  * Hook from iopoll systimer.  Tries to schedule an iopoll, but keeps
755  * track of lost ticks due to the previous handler taking too long.
756  * Normally, this should not happen, because polling handler should
757  * run for a short time.  However, in some cases (e.g. when there are
758  * changes in link status etc.) the drivers take a very long time
759  * (even in the order of milliseconds) to reset and reconfigure the
760  * device, causing apparent lost polls.
761  *
762  * The first part of the code is just for debugging purposes, and tries
763  * to count how often hardclock ticks are shorter than they should,
764  * meaning either stray interrupts or delayed events.
765  *
766  * WARNING! called from fastint or IPI, the MP lock might not be held.
767  * NOTE: Caller should hold critical section.
768  */
769 static void
770 iopoll_clock(struct iopoll_ctx *io_ctx)
771 {
772         union microtime_pcpu t;
773         int delta;
774
775         KKASSERT(mycpuid == io_ctx->poll_cpuid);
776
777         if (io_ctx->poll_handlers == 0)
778                 return;
779
780         logpoll(ioclock_start);
781
782         microtime_pcpu_get(&t);
783         delta = microtime_pcpu_diff(&io_ctx->prev_t, &t);
784         if (delta * io_ctx->pollhz < 500000)
785                 io_ctx->short_ticks++;
786         else
787                 io_ctx->prev_t = t;
788
789         if (io_ctx->pending_polls > 100) {
790                 /*
791                  * Too much, assume it has stalled (not always true
792                  * see comment above).
793                  */
794                 io_ctx->stalled++;
795                 io_ctx->pending_polls = 0;
796                 io_ctx->phase = 0;
797         }
798
799         if (io_ctx->phase <= 2) {
800                 if (io_ctx->phase != 0)
801                         io_ctx->suspect++;
802                 io_ctx->phase = 1;
803                 sched_iopoll(io_ctx);
804                 io_ctx->phase = 2;
805         }
806         if (io_ctx->pending_polls++ > 0)
807                 io_ctx->lost_polls++;
808
809         logpoll(ioclock_end);
810 }
811
812 /*
813  * rxpoll_handler and txpoll_handler are scheduled by sched_iopoll when
814  * appropriate, typically once per polling systimer tick.
815  *
816  * Note that the message is replied immediately in order to allow a new
817  * ISR to be scheduled in the handler.
818  */
819 static void
820 rxpoll_handler(netmsg_t msg)
821 {
822         struct iopoll_ctx *io_ctx;
823         struct thread *td = curthread;
824         boolean_t direct = TRUE, crit;
825         int i, cycles;
826
827         logpoll(rx_start);
828
829         io_ctx = msg->lmsg.u.ms_resultp;
830         KKASSERT(&td->td_msgport == netisr_cpuport(io_ctx->poll_cpuid));
831
832         crit = TRUE;
833         crit_enter_quick(td);
834
835         /* Reply ASAP */
836         netisr_replymsg(&msg->base, 0);
837
838         if (io_ctx->poll_handlers == 0) {
839                 crit_exit_quick(td);
840                 logpoll(rx_end);
841                 return;
842         }
843
844         io_ctx->phase = 3;
845         if (io_ctx->residual_burst == 0) {
846                 /* First call in this tick */
847                 microtime_pcpu_get(&io_ctx->poll_start_t);
848                 io_ctx->residual_burst = io_ctx->poll_burst;
849         }
850         cycles = (io_ctx->residual_burst < io_ctx->poll_each_burst) ?
851                  io_ctx->residual_burst : io_ctx->poll_each_burst;
852         io_ctx->residual_burst -= cycles;
853
854         for (i = 0; i < io_ctx->poll_handlers; i++) {
855                 const struct iopoll_rec *rec = &io_ctx->pr[i];
856                 struct ifnet *ifp = rec->ifp;
857
858                 if (rec->serializer != NULL) {
859                         if (!crit) {
860                                 crit = TRUE;
861                                 crit_enter_quick(td);
862                         }
863                         if (__predict_false(
864                             !lwkt_serialize_try(rec->serializer))) {
865                                 /* RX serializer generally will not fail. */
866                                 continue;
867                         }
868                 } else if (crit) {
869                         /*
870                          * Exit critical section, if the RX polling
871                          * handler does not require serialization,
872                          * i.e. RX polling is doing direct input.
873                          */
874                         crit_exit_quick(td);
875                         crit = FALSE;
876                 }
877
878                 if ((ifp->if_flags & IFF_IDIRECT) == 0) {
879                         direct = FALSE;
880                         KASSERT(rec->serializer != NULL,
881                             ("rx polling handler is not serialized"));
882                 }
883 #ifdef INVARIANTS
884                 else {
885                         KASSERT(rec->serializer == NULL,
886                             ("serialized direct input"));
887                 }
888 #endif
889
890                 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING | IFF_NPOLLING)) ==
891                     (IFF_UP | IFF_RUNNING | IFF_NPOLLING))
892                         rec->poll_func(ifp, rec->arg, cycles);
893
894                 if (rec->serializer != NULL)
895                         lwkt_serialize_exit(rec->serializer);
896         }
897
898         if (crit) {
899                 /*
900                  * Do a quick exit/enter to catch any higher-priority
901                  * interrupt sources.
902                  */
903                 crit_exit_quick(td);
904         }
905         crit_enter_quick(td);
906
907         io_ctx->phase = 4;
908         sched_iopollmore(io_ctx, direct);
909
910         crit_exit_quick(td);
911
912         logpoll(rx_end);
913 }
914
915 static void
916 txpoll_handler(netmsg_t msg)
917 {
918         struct iopoll_ctx *io_ctx;
919         struct thread *td = curthread;
920         int i;
921
922         logpoll(tx_start);
923
924         io_ctx = msg->lmsg.u.ms_resultp;
925         KKASSERT(&td->td_msgport == netisr_cpuport(io_ctx->poll_cpuid));
926
927         crit_enter_quick(td);
928
929         /* Reply ASAP */
930         netisr_replymsg(&msg->base, 0);
931
932         if (io_ctx->poll_handlers == 0) {
933                 crit_exit_quick(td);
934                 logpoll(tx_end);
935                 return;
936         }
937
938         io_ctx->phase = 3;
939
940         for (i = 0; i < io_ctx->poll_handlers; i++) {
941                 const struct iopoll_rec *rec = &io_ctx->pr[i];
942                 struct ifnet *ifp = rec->ifp;
943
944                 if (!lwkt_serialize_try(rec->serializer))
945                         continue;
946
947                 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING | IFF_NPOLLING)) ==
948                     (IFF_UP | IFF_RUNNING | IFF_NPOLLING))
949                         rec->poll_func(ifp, rec->arg, -1);
950
951                 lwkt_serialize_exit(rec->serializer);
952         }
953
954         /*
955          * Do a quick exit/enter to catch any higher-priority
956          * interrupt sources.
957          */
958         crit_exit_quick(td);
959         crit_enter_quick(td);
960
961         io_ctx->phase = 4;
962         sched_iopollmore(io_ctx, TRUE);
963
964         crit_exit_quick(td);
965
966         logpoll(tx_end);
967 }
968
969 /*
970  * rxpollmore_handler and txpollmore_handler are called after other netisr's,
971  * possibly scheduling another rxpoll_handler or txpoll_handler call, or
972  * adapting the burst size for the next cycle.
973  *
974  * It is very bad to fetch large bursts of packets from a single card at once,
975  * because the burst could take a long time to be completely processed leading
976  * to unfairness.  To reduce the problem, and also to account better for time
977  * spent in network-related processing, we split the burst in smaller chunks
978  * of fixed size, giving control to the other netisr's between chunks.  This
979  * helps in improving the fairness, reducing livelock and accounting for the
980  * work performed in low level handling.
981  */
982 static void
983 rxpollmore_handler(netmsg_t msg)
984 {
985         struct thread *td = curthread;
986         struct iopoll_ctx *io_ctx;
987         union microtime_pcpu t;
988         int kern_load;
989         uint32_t pending_polls;
990
991         logpoll(rx_mstart);
992
993         io_ctx = msg->lmsg.u.ms_resultp;
994         KKASSERT(&td->td_msgport == netisr_cpuport(io_ctx->poll_cpuid));
995
996         crit_enter_quick(td);
997
998         /* Replay ASAP */
999         netisr_replymsg(&msg->base, 0);
1000
1001         if (io_ctx->poll_handlers == 0) {
1002                 crit_exit_quick(td);
1003                 logpoll(rx_mend);
1004                 return;
1005         }
1006
1007         io_ctx->phase = 5;
1008         if (io_ctx->residual_burst > 0) {
1009                 sched_iopoll(io_ctx);
1010                 crit_exit_quick(td);
1011                 /* Will run immediately on return, followed by netisrs */
1012                 logpoll(rx_mend);
1013                 return;
1014         }
1015
1016         /* Here we can account time spent in iopoll's in this tick */
1017         microtime_pcpu_get(&t);
1018         kern_load = microtime_pcpu_diff(&io_ctx->poll_start_t, &t);
1019         kern_load = (kern_load * io_ctx->pollhz) / 10000; /* 0..100 */
1020         io_ctx->kern_frac = kern_load;
1021
1022         if (kern_load > (100 - io_ctx->user_frac)) {
1023                 /* Try decrease ticks */
1024                 if (io_ctx->poll_burst > 1)
1025                         io_ctx->poll_burst--;
1026         } else {
1027                 if (io_ctx->poll_burst < io_ctx->poll_burst_max)
1028                         io_ctx->poll_burst++;
1029         }
1030
1031         io_ctx->pending_polls--;
1032         pending_polls = io_ctx->pending_polls;
1033
1034         if (pending_polls == 0) {
1035                 /* We are done */
1036                 io_ctx->phase = 0;
1037         } else {
1038                 /*
1039                  * Last cycle was long and caused us to miss one or more
1040                  * hardclock ticks.  Restart processing again, but slightly
1041                  * reduce the burst size to prevent that this happens again.
1042                  */
1043                 io_ctx->poll_burst -= (io_ctx->poll_burst / 8);
1044                 if (io_ctx->poll_burst < 1)
1045                         io_ctx->poll_burst = 1;
1046                 sched_iopoll(io_ctx);
1047                 io_ctx->phase = 6;
1048         }
1049
1050         crit_exit_quick(td);
1051
1052         logpoll(rx_mend);
1053 }
1054
1055 static void
1056 txpollmore_handler(netmsg_t msg)
1057 {
1058         struct thread *td = curthread;
1059         struct iopoll_ctx *io_ctx;
1060         uint32_t pending_polls;
1061
1062         logpoll(tx_mstart);
1063
1064         io_ctx = msg->lmsg.u.ms_resultp;
1065         KKASSERT(&td->td_msgport == netisr_cpuport(io_ctx->poll_cpuid));
1066
1067         crit_enter_quick(td);
1068
1069         /* Replay ASAP */
1070         netisr_replymsg(&msg->base, 0);
1071
1072         if (io_ctx->poll_handlers == 0) {
1073                 crit_exit_quick(td);
1074                 logpoll(tx_mend);
1075                 return;
1076         }
1077
1078         io_ctx->phase = 5;
1079
1080         io_ctx->pending_polls--;
1081         pending_polls = io_ctx->pending_polls;
1082
1083         if (pending_polls == 0) {
1084                 /* We are done */
1085                 io_ctx->phase = 0;
1086         } else {
1087                 /*
1088                  * Last cycle was long and caused us to miss one or more
1089                  * hardclock ticks.  Restart processing again.
1090                  */
1091                 sched_iopoll(io_ctx);
1092                 io_ctx->phase = 6;
1093         }
1094
1095         crit_exit_quick(td);
1096
1097         logpoll(tx_mend);
1098 }
1099
1100 static void
1101 iopoll_add_sysctl(struct sysctl_ctx_list *ctx, struct sysctl_oid_list *parent,
1102     struct iopoll_ctx *io_ctx, int poll_type)
1103 {
1104         if (poll_type == IFPOLL_RX) {
1105                 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "burst_max",
1106                     CTLTYPE_UINT | CTLFLAG_RW, io_ctx, 0, sysctl_burstmax,
1107                     "IU", "Max Polling burst size");
1108
1109                 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "each_burst",
1110                     CTLTYPE_UINT | CTLFLAG_RW, io_ctx, 0, sysctl_eachburst,
1111                     "IU", "Max size of each burst");
1112
1113                 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "burst", CTLFLAG_RD,
1114                     &io_ctx->poll_burst, 0, "Current polling burst size");
1115
1116                 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "user_frac", CTLFLAG_RW,
1117                     &io_ctx->user_frac, 0, "Desired user fraction of cpu time");
1118
1119                 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "kern_frac", CTLFLAG_RD,
1120                     &io_ctx->kern_frac, 0, "Kernel fraction of cpu time");
1121
1122                 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "residual_burst", CTLFLAG_RD,
1123                     &io_ctx->residual_burst, 0,
1124                     "# of residual cycles in burst");
1125         }
1126
1127         SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "phase", CTLFLAG_RD,
1128             &io_ctx->phase, 0, "Polling phase");
1129
1130         SYSCTL_ADD_ULONG(ctx, parent, OID_AUTO, "suspect", CTLFLAG_RW,
1131             &io_ctx->suspect, "Suspected events");
1132
1133         SYSCTL_ADD_ULONG(ctx, parent, OID_AUTO, "stalled", CTLFLAG_RW,
1134             &io_ctx->stalled, "Potential stalls");
1135
1136         SYSCTL_ADD_ULONG(ctx, parent, OID_AUTO, "short_ticks", CTLFLAG_RW,
1137             &io_ctx->short_ticks,
1138             "Hardclock ticks shorter than they should be");
1139
1140         SYSCTL_ADD_ULONG(ctx, parent, OID_AUTO, "lost_polls", CTLFLAG_RW,
1141             &io_ctx->lost_polls,
1142             "How many times we would have lost a poll tick");
1143
1144         SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "pending_polls", CTLFLAG_RD,
1145             &io_ctx->pending_polls, 0, "Do we need to poll again");
1146
1147         SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "handlers", CTLFLAG_RD,
1148             &io_ctx->poll_handlers, 0, "Number of registered poll handlers");
1149 }
1150
1151 static void
1152 sysctl_burstmax_handler(netmsg_t nmsg)
1153 {
1154         struct iopoll_sysctl_netmsg *msg = (struct iopoll_sysctl_netmsg *)nmsg;
1155         struct iopoll_ctx *io_ctx;
1156
1157         io_ctx = msg->ctx;
1158         KKASSERT(&curthread->td_msgport == netisr_cpuport(io_ctx->poll_cpuid));
1159
1160         io_ctx->poll_burst_max = nmsg->lmsg.u.ms_result;
1161         if (io_ctx->poll_each_burst > io_ctx->poll_burst_max)
1162                 io_ctx->poll_each_burst = io_ctx->poll_burst_max;
1163         if (io_ctx->poll_burst > io_ctx->poll_burst_max)
1164                 io_ctx->poll_burst = io_ctx->poll_burst_max;
1165         if (io_ctx->residual_burst > io_ctx->poll_burst_max)
1166                 io_ctx->residual_burst = io_ctx->poll_burst_max;
1167
1168         netisr_replymsg(&nmsg->base, 0);
1169 }
1170
1171 static int
1172 sysctl_burstmax(SYSCTL_HANDLER_ARGS)
1173 {
1174         struct iopoll_ctx *io_ctx = arg1;
1175         struct iopoll_sysctl_netmsg msg;
1176         uint32_t burst_max;
1177         int error;
1178
1179         burst_max = io_ctx->poll_burst_max;
1180         error = sysctl_handle_int(oidp, &burst_max, 0, req);
1181         if (error || req->newptr == NULL)
1182                 return error;
1183         if (burst_max < MIN_IOPOLL_BURST_MAX)
1184                 burst_max = MIN_IOPOLL_BURST_MAX;
1185         else if (burst_max > MAX_IOPOLL_BURST_MAX)
1186                 burst_max = MAX_IOPOLL_BURST_MAX;
1187
1188         netmsg_init(&msg.base, NULL, &curthread->td_msgport,
1189                     0, sysctl_burstmax_handler);
1190         msg.base.lmsg.u.ms_result = burst_max;
1191         msg.ctx = io_ctx;
1192
1193         return netisr_domsg(&msg.base, io_ctx->poll_cpuid);
1194 }
1195
1196 static void
1197 sysctl_eachburst_handler(netmsg_t nmsg)
1198 {
1199         struct iopoll_sysctl_netmsg *msg = (struct iopoll_sysctl_netmsg *)nmsg;
1200         struct iopoll_ctx *io_ctx;
1201         uint32_t each_burst;
1202
1203         io_ctx = msg->ctx;
1204         KKASSERT(&curthread->td_msgport == netisr_cpuport(io_ctx->poll_cpuid));
1205
1206         each_burst = nmsg->lmsg.u.ms_result;
1207         if (each_burst > io_ctx->poll_burst_max)
1208                 each_burst = io_ctx->poll_burst_max;
1209         else if (each_burst < 1)
1210                 each_burst = 1;
1211         io_ctx->poll_each_burst = each_burst;
1212
1213         netisr_replymsg(&nmsg->base, 0);
1214 }
1215
1216 static int
1217 sysctl_eachburst(SYSCTL_HANDLER_ARGS)
1218 {
1219         struct iopoll_ctx *io_ctx = arg1;
1220         struct iopoll_sysctl_netmsg msg;
1221         uint32_t each_burst;
1222         int error;
1223
1224         each_burst = io_ctx->poll_each_burst;
1225         error = sysctl_handle_int(oidp, &each_burst, 0, req);
1226         if (error || req->newptr == NULL)
1227                 return error;
1228
1229         netmsg_init(&msg.base, NULL, &curthread->td_msgport,
1230                     0, sysctl_eachburst_handler);
1231         msg.base.lmsg.u.ms_result = each_burst;
1232         msg.ctx = io_ctx;
1233
1234         return netisr_domsg(&msg.base, io_ctx->poll_cpuid);
1235 }
1236
1237 static int
1238 iopoll_register(struct ifnet *ifp, struct iopoll_ctx *io_ctx,
1239                 const struct ifpoll_io *io_rec)
1240 {
1241         int error;
1242
1243         KKASSERT(&curthread->td_msgport == netisr_cpuport(io_ctx->poll_cpuid));
1244
1245         if (io_rec->poll_func == NULL)
1246                 return 0;
1247
1248         /*
1249          * Check if there is room.
1250          */
1251         if (io_ctx->poll_handlers >= IFPOLL_LIST_LEN) {
1252                 /*
1253                  * List full, cannot register more entries.
1254                  * This should never happen; if it does, it is probably a
1255                  * broken driver trying to register multiple times. Checking
1256                  * this at runtime is expensive, and won't solve the problem
1257                  * anyways, so just report a few times and then give up.
1258                  */
1259                 static int verbose = 10; /* XXX */
1260                 if (verbose > 0) {
1261                         kprintf("io poll handlers list full, "
1262                                 "maybe a broken driver ?\n");
1263                         verbose--;
1264                 }
1265                 error = ENOENT;
1266         } else {
1267                 struct iopoll_rec *rec = &io_ctx->pr[io_ctx->poll_handlers];
1268
1269                 rec->ifp = ifp;
1270                 rec->serializer = io_rec->serializer;
1271                 rec->arg = io_rec->arg;
1272                 rec->poll_func = io_rec->poll_func;
1273
1274                 io_ctx->poll_handlers++;
1275                 error = 0;
1276         }
1277         return error;
1278 }
1279
1280 static int
1281 iopoll_deregister(struct ifnet *ifp, struct iopoll_ctx *io_ctx)
1282 {
1283         int i, error;
1284
1285         KKASSERT(&curthread->td_msgport == netisr_cpuport(io_ctx->poll_cpuid));
1286
1287         for (i = 0; i < io_ctx->poll_handlers; ++i) {
1288                 if (io_ctx->pr[i].ifp == ifp) /* Found it */
1289                         break;
1290         }
1291         if (i == io_ctx->poll_handlers) {
1292                 error = ENOENT;
1293         } else {
1294                 io_ctx->poll_handlers--;
1295                 if (i < io_ctx->poll_handlers) {
1296                         /* Last entry replaces this one. */
1297                         io_ctx->pr[i] = io_ctx->pr[io_ctx->poll_handlers];
1298                 }
1299
1300                 if (io_ctx->poll_handlers == 0)
1301                         iopoll_reset_state(io_ctx);
1302                 error = 0;
1303         }
1304         return error;
1305 }
1306
1307 static void
1308 poll_comm_init(int cpuid)
1309 {
1310         struct poll_comm *comm;
1311         char cpuid_str[16];
1312
1313         comm = kmalloc(sizeof(*comm), M_DEVBUF, M_WAITOK | M_ZERO);
1314
1315         if (ifpoll_stfrac < 1)
1316                 ifpoll_stfrac = IFPOLL_STFRAC_DEFAULT;
1317         if (ifpoll_txfrac < 1)
1318                 ifpoll_txfrac = IFPOLL_TXFRAC_DEFAULT;
1319
1320         comm->poll_cpuid = cpuid;
1321         comm->pollhz = poll_comm_pollhz_div(comm, ifpoll_pollhz);
1322         comm->poll_stfrac = ifpoll_stfrac - 1;
1323         comm->poll_txfrac = ifpoll_txfrac - 1;
1324
1325         ksnprintf(cpuid_str, sizeof(cpuid_str), "%d", cpuid);
1326
1327         sysctl_ctx_init(&comm->sysctl_ctx);
1328         comm->sysctl_tree = SYSCTL_ADD_NODE(&comm->sysctl_ctx,
1329                             SYSCTL_STATIC_CHILDREN(_net_ifpoll),
1330                             OID_AUTO, cpuid_str, CTLFLAG_RD, 0, "");
1331
1332         SYSCTL_ADD_PROC(&comm->sysctl_ctx, SYSCTL_CHILDREN(comm->sysctl_tree),
1333                         OID_AUTO, "pollhz", CTLTYPE_INT | CTLFLAG_RW,
1334                         comm, 0, sysctl_pollhz,
1335                         "I", "Device polling frequency");
1336
1337         if (cpuid == 0) {
1338                 SYSCTL_ADD_PROC(&comm->sysctl_ctx,
1339                                 SYSCTL_CHILDREN(comm->sysctl_tree),
1340                                 OID_AUTO, "status_frac",
1341                                 CTLTYPE_INT | CTLFLAG_RW,
1342                                 comm, 0, sysctl_stfrac,
1343                                 "I", "# of cycles before status is polled");
1344         }
1345         SYSCTL_ADD_PROC(&comm->sysctl_ctx, SYSCTL_CHILDREN(comm->sysctl_tree),
1346                         OID_AUTO, "tx_frac", CTLTYPE_INT | CTLFLAG_RW,
1347                         comm, 0, sysctl_txfrac,
1348                         "I", "# of cycles before TX is polled");
1349
1350         poll_common[cpuid] = comm;
1351 }
1352
1353 static void
1354 poll_comm_start(int cpuid)
1355 {
1356         struct poll_comm *comm = poll_common[cpuid];
1357         systimer_func_t func;
1358
1359         /*
1360          * Initialize systimer
1361          */
1362         if (cpuid == 0)
1363                 func = poll_comm_systimer0;
1364         else
1365                 func = poll_comm_systimer;
1366         systimer_init_periodic_nq(&comm->pollclock, func, comm, 1);
1367 }
1368
1369 static void
1370 _poll_comm_systimer(struct poll_comm *comm)
1371 {
1372         iopoll_clock(rxpoll_context[comm->poll_cpuid]);
1373         if (comm->txfrac_count-- == 0) {
1374                 comm->txfrac_count = comm->poll_txfrac;
1375                 iopoll_clock(txpoll_context[comm->poll_cpuid]);
1376         }
1377 }
1378
1379 static void
1380 poll_comm_systimer0(systimer_t info, int in_ipi __unused,
1381     struct intrframe *frame __unused)
1382 {
1383         struct poll_comm *comm = info->data;
1384         globaldata_t gd = mycpu;
1385
1386         KKASSERT(comm->poll_cpuid == gd->gd_cpuid && gd->gd_cpuid == 0);
1387
1388         crit_enter_gd(gd);
1389
1390         if (comm->stfrac_count-- == 0) {
1391                 comm->stfrac_count = comm->poll_stfrac;
1392                 stpoll_clock(&stpoll_context);
1393         }
1394         _poll_comm_systimer(comm);
1395
1396         crit_exit_gd(gd);
1397 }
1398
1399 static void
1400 poll_comm_systimer(systimer_t info, int in_ipi __unused,
1401     struct intrframe *frame __unused)
1402 {
1403         struct poll_comm *comm = info->data;
1404         globaldata_t gd = mycpu;
1405
1406         KKASSERT(comm->poll_cpuid == gd->gd_cpuid && gd->gd_cpuid != 0);
1407
1408         crit_enter_gd(gd);
1409         _poll_comm_systimer(comm);
1410         crit_exit_gd(gd);
1411 }
1412
1413 static void
1414 poll_comm_adjust_pollhz(struct poll_comm *comm)
1415 {
1416         uint32_t handlers;
1417         int pollhz = 1;
1418
1419         KKASSERT(&curthread->td_msgport == netisr_cpuport(comm->poll_cpuid));
1420
1421         /*
1422          * If there is no polling handler registered, set systimer
1423          * frequency to the lowest value.  Polling systimer frequency
1424          * will be adjusted to the requested value, once there are
1425          * registered handlers.
1426          */
1427         handlers = rxpoll_context[mycpuid]->poll_handlers +
1428                    txpoll_context[mycpuid]->poll_handlers;
1429         if (comm->poll_cpuid == 0)
1430                 handlers += stpoll_context.poll_handlers;
1431         if (handlers)
1432                 pollhz = comm->pollhz;
1433         systimer_adjust_periodic(&comm->pollclock, pollhz);
1434 }
1435
1436 static int
1437 sysctl_pollhz(SYSCTL_HANDLER_ARGS)
1438 {
1439         struct poll_comm *comm = arg1;
1440         struct netmsg_base nmsg;
1441         int error, phz;
1442
1443         phz = poll_comm_pollhz_conv(comm, comm->pollhz);
1444         error = sysctl_handle_int(oidp, &phz, 0, req);
1445         if (error || req->newptr == NULL)
1446                 return error;
1447         if (phz <= 0)
1448                 return EINVAL;
1449         else if (phz > IFPOLL_FREQ_MAX)
1450                 phz = IFPOLL_FREQ_MAX;
1451
1452         netmsg_init(&nmsg, NULL, &curthread->td_msgport,
1453                     0, sysctl_pollhz_handler);
1454         nmsg.lmsg.u.ms_result = phz;
1455
1456         return netisr_domsg(&nmsg, comm->poll_cpuid);
1457 }
1458
1459 static void
1460 sysctl_pollhz_handler(netmsg_t nmsg)
1461 {
1462         struct poll_comm *comm = poll_common[mycpuid];
1463
1464         KKASSERT(&curthread->td_msgport == netisr_cpuport(comm->poll_cpuid));
1465
1466         /* Save polling frequency */
1467         comm->pollhz = poll_comm_pollhz_div(comm, nmsg->lmsg.u.ms_result);
1468
1469         /*
1470          * Adjust cached pollhz
1471          */
1472         rxpoll_context[mycpuid]->pollhz = comm->pollhz;
1473         txpoll_context[mycpuid]->pollhz =
1474             comm->pollhz / (comm->poll_txfrac + 1);
1475
1476         /*
1477          * Adjust polling frequency
1478          */
1479         poll_comm_adjust_pollhz(comm);
1480
1481         netisr_replymsg(&nmsg->base, 0);
1482 }
1483
1484 static int
1485 sysctl_stfrac(SYSCTL_HANDLER_ARGS)
1486 {
1487         struct poll_comm *comm = arg1;
1488         struct netmsg_base nmsg;
1489         int error, stfrac;
1490
1491         KKASSERT(comm->poll_cpuid == 0);
1492
1493         stfrac = comm->poll_stfrac + 1;
1494         error = sysctl_handle_int(oidp, &stfrac, 0, req);
1495         if (error || req->newptr == NULL)
1496                 return error;
1497         if (stfrac < 1)
1498                 return EINVAL;
1499
1500         netmsg_init(&nmsg, NULL, &curthread->td_msgport,
1501                     0, sysctl_stfrac_handler);
1502         nmsg.lmsg.u.ms_result = stfrac - 1;
1503
1504         return netisr_domsg(&nmsg, comm->poll_cpuid);
1505 }
1506
1507 static void
1508 sysctl_stfrac_handler(netmsg_t nmsg)
1509 {
1510         struct poll_comm *comm = poll_common[mycpuid];
1511         int stfrac = nmsg->lmsg.u.ms_result;
1512
1513         KKASSERT(&curthread->td_msgport == netisr_cpuport(comm->poll_cpuid));
1514
1515         crit_enter();
1516         comm->poll_stfrac = stfrac;
1517         if (comm->stfrac_count > comm->poll_stfrac)
1518                 comm->stfrac_count = comm->poll_stfrac;
1519         crit_exit();
1520
1521         netisr_replymsg(&nmsg->base, 0);
1522 }
1523
1524 static int
1525 sysctl_txfrac(SYSCTL_HANDLER_ARGS)
1526 {
1527         struct poll_comm *comm = arg1;
1528         struct netmsg_base nmsg;
1529         int error, txfrac;
1530
1531         txfrac = comm->poll_txfrac + 1;
1532         error = sysctl_handle_int(oidp, &txfrac, 0, req);
1533         if (error || req->newptr == NULL)
1534                 return error;
1535         if (txfrac < 1)
1536                 return EINVAL;
1537
1538         netmsg_init(&nmsg, NULL, &curthread->td_msgport,
1539                     0, sysctl_txfrac_handler);
1540         nmsg.lmsg.u.ms_result = txfrac - 1;
1541
1542         return netisr_domsg(&nmsg, comm->poll_cpuid);
1543 }
1544
1545 static void
1546 sysctl_txfrac_handler(netmsg_t nmsg)
1547 {
1548         struct poll_comm *comm = poll_common[mycpuid];
1549         int txfrac = nmsg->lmsg.u.ms_result;
1550
1551         KKASSERT(&curthread->td_msgport == netisr_cpuport(comm->poll_cpuid));
1552
1553         crit_enter();
1554         comm->poll_txfrac = txfrac;
1555         if (comm->txfrac_count > comm->poll_txfrac)
1556                 comm->txfrac_count = comm->poll_txfrac;
1557         crit_exit();
1558
1559         netisr_replymsg(&nmsg->base, 0);
1560 }
1561
1562 void
1563 ifpoll_compat_setup(struct ifpoll_compat *cp,
1564     struct sysctl_ctx_list *sysctl_ctx,
1565     struct sysctl_oid *sysctl_tree,
1566     int unit, struct lwkt_serialize *slz)
1567 {
1568         cp->ifpc_stcount = 0;
1569         cp->ifpc_stfrac = ((poll_common[0]->poll_stfrac + 1) *
1570             howmany(IOPOLL_BURST_MAX, IOPOLL_EACH_BURST)) - 1;
1571
1572         cp->ifpc_cpuid = unit % netisr_ncpus;
1573         cp->ifpc_serializer = slz;
1574
1575         if (sysctl_ctx != NULL && sysctl_tree != NULL) {
1576                 SYSCTL_ADD_PROC(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1577                     OID_AUTO, "npoll_stfrac", CTLTYPE_INT | CTLFLAG_RW,
1578                     cp, 0, sysctl_compat_npoll_stfrac, "I",
1579                     "polling status frac");
1580                 SYSCTL_ADD_PROC(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1581                     OID_AUTO, "npoll_cpuid", CTLTYPE_INT | CTLFLAG_RW,
1582                     cp, 0, sysctl_compat_npoll_cpuid, "I",
1583                     "polling cpuid");
1584         }
1585 }
1586
1587 static int
1588 sysctl_compat_npoll_stfrac(SYSCTL_HANDLER_ARGS)
1589 {
1590         struct ifpoll_compat *cp = arg1;
1591         int error = 0, stfrac;
1592
1593         lwkt_serialize_enter(cp->ifpc_serializer);
1594
1595         stfrac = cp->ifpc_stfrac + 1;
1596         error = sysctl_handle_int(oidp, &stfrac, 0, req);
1597         if (!error && req->newptr != NULL) {
1598                 if (stfrac < 1) {
1599                         error = EINVAL;
1600                 } else {
1601                         cp->ifpc_stfrac = stfrac - 1;
1602                         if (cp->ifpc_stcount > cp->ifpc_stfrac)
1603                                 cp->ifpc_stcount = cp->ifpc_stfrac;
1604                 }
1605         }
1606
1607         lwkt_serialize_exit(cp->ifpc_serializer);
1608         return error;
1609 }
1610
1611 static int
1612 sysctl_compat_npoll_cpuid(SYSCTL_HANDLER_ARGS)
1613 {
1614         struct ifpoll_compat *cp = arg1;
1615         int error = 0, cpuid;
1616
1617         lwkt_serialize_enter(cp->ifpc_serializer);
1618
1619         cpuid = cp->ifpc_cpuid;
1620         error = sysctl_handle_int(oidp, &cpuid, 0, req);
1621         if (!error && req->newptr != NULL) {
1622                 if (cpuid < 0 || cpuid >= netisr_ncpus)
1623                         error = EINVAL;
1624                 else
1625                         cp->ifpc_cpuid = cpuid;
1626         }
1627
1628         lwkt_serialize_exit(cp->ifpc_serializer);
1629         return error;
1630 }