2 * Copyright (c) 2001-2002 Luigi Rizzo
4 * Supported by: the Xorp Project (www.xorp.org)
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * $FreeBSD: src/sys/kern/kern_poll.c,v 1.2.2.4 2002/06/27 23:26:33 luigi Exp $
28 * $DragonFly: src/sys/kern/kern_poll.c,v 1.35 2007/09/30 05:12:25 sephe Exp $
31 #include "opt_polling.h"
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/kernel.h>
36 #include <sys/socket.h> /* needed by net/if.h */
37 #include <sys/sysctl.h>
39 #include <sys/thread2.h>
40 #include <sys/msgport2.h>
42 #include <net/if.h> /* for IFF_* flags */
43 #include <net/netmsg2.h>
46 * Polling support for [network] device drivers.
48 * Drivers which support this feature try to register with the
51 * If registration is successful, the driver must disable interrupts,
52 * and further I/O is performed through the handler, which is invoked
53 * (at least once per clock tick) with 3 arguments: the "arg" passed at
54 * register time (a struct ifnet pointer), a command, and a "count" limit.
56 * The command can be one of the following:
57 * POLL_ONLY: quick move of "count" packets from input/output queues.
58 * POLL_AND_CHECK_STATUS: as above, plus check status registers or do
59 * other more expensive operations. This command is issued periodically
60 * but less frequently than POLL_ONLY.
61 * POLL_DEREGISTER: deregister and return to interrupt mode.
62 * POLL_REGISTER: register and disable interrupts
64 * The first two commands are only issued if the interface is marked as
65 * 'IFF_UP, IFF_RUNNING and IFF_POLLING', the last two only if IFF_RUNNING
68 * The count limit specifies how much work the handler can do during the
69 * call -- typically this is the number of packets to be received, or
70 * transmitted, etc. (drivers are free to interpret this number, as long
71 * as the max time spent in the function grows roughly linearly with the
74 * Deregistration can be requested by the driver itself (typically in the
75 * *_stop() routine), or by the polling code, by invoking the handler.
77 * Polling can be enabled or disabled on particular CPU_X with the sysctl
78 * variable kern.polling.X.enable (default is 1, enabled)
80 * A second variable controls the sharing of CPU between polling/kernel
81 * network processing, and other activities (typically userlevel tasks):
82 * kern.polling.X.user_frac (between 0 and 100, default 50) sets the share
83 * of CPU allocated to user tasks. CPU is allocated proportionally to the
84 * shares, by dynamically adjusting the "count" (poll_burst).
86 * Other parameters can should be left to their default values.
87 * The following constraints hold
89 * 1 <= poll_each_burst <= poll_burst <= poll_burst_max
90 * MIN_POLL_BURST_MAX <= poll_burst_max <= MAX_POLL_BURST_MAX
93 #define MIN_POLL_BURST_MAX 10
94 #define MAX_POLL_BURST_MAX 1000
96 #ifndef DEVICE_POLLING_FREQ_MAX
97 #define DEVICE_POLLING_FREQ_MAX 30000
99 #define DEVICE_POLLING_FREQ_DEFAULT 2000
101 #define POLL_LIST_LEN 128
106 #define POLLCTX_MAX 32
109 struct sysctl_ctx_list poll_sysctl_ctx;
110 struct sysctl_oid *poll_sysctl_tree;
113 uint32_t poll_each_burst;
114 uint32_t poll_burst_max;
118 uint32_t short_ticks;
120 uint32_t pending_polls;
125 struct timeval poll_start_t;
126 struct timeval prev_t;
128 uint32_t poll_handlers; /* next free entry in pr[]. */
129 struct pollrec pr[POLL_LIST_LEN];
132 struct systimer pollclock;
136 struct netmsg poll_netmsg;
137 struct netmsg poll_more_netmsg;
140 static struct pollctx *poll_context[POLLCTX_MAX];
142 SYSCTL_NODE(_kern, OID_AUTO, polling, CTLFLAG_RW, 0,
143 "Device polling parameters");
145 static int poll_defcpu = -1;
146 SYSCTL_INT(_kern_polling, OID_AUTO, defcpu, CTLFLAG_RD,
147 &poll_defcpu, 0, "default CPU to run device polling");
149 static uint32_t poll_cpumask0 = 0xffffffff;
150 TUNABLE_INT("kern.polling.cpumask", (int *)&poll_cpumask0);
152 static uint32_t poll_cpumask;
153 SYSCTL_INT(_kern_polling, OID_AUTO, cpumask, CTLFLAG_RD,
154 &poll_cpumask, 0, "CPUs that can run device polling");
156 static int polling_enabled = 1; /* global polling enable */
157 TUNABLE_INT("kern.polling.enable", &polling_enabled);
159 static int pollhz = DEVICE_POLLING_FREQ_DEFAULT;
160 TUNABLE_INT("kern.polling.pollhz", &pollhz);
162 /* Netisr handlers */
163 static void netisr_poll(struct netmsg *);
164 static void netisr_pollmore(struct netmsg *);
165 static void poll_register(struct netmsg *);
166 static void poll_deregister(struct netmsg *);
167 static void poll_sysctl_pollhz(struct netmsg *);
168 static void poll_sysctl_polling(struct netmsg *);
170 /* Systimer handler */
171 static void pollclock(systimer_t, struct intrframe *);
173 /* Sysctl handlers */
174 static int sysctl_pollhz(SYSCTL_HANDLER_ARGS);
175 static int sysctl_polling(SYSCTL_HANDLER_ARGS);
176 static void poll_add_sysctl(struct sysctl_ctx_list *,
177 struct sysctl_oid_list *, struct pollctx *);
179 static void schedpoll_oncpu(struct pollctx *, struct netmsg *, netisr_fn_t);
181 void init_device_poll_pcpu(int); /* per-cpu init routine */
184 * Initialize per-cpu polling(4) context. Called from kern_clock.c:
187 init_device_poll_pcpu(int cpuid)
189 struct pollctx *pctx;
192 if (cpuid >= POLLCTX_MAX)
195 if (((1 << cpuid) & poll_cpumask0) == 0)
198 poll_cpumask |= (1 << cpuid);
200 pctx = kmalloc(sizeof(*pctx), M_DEVBUF, M_WAITOK | M_ZERO);
202 pctx->poll_burst = 5;
203 pctx->poll_each_burst = 5;
204 pctx->poll_burst_max = 150; /* good for 100Mbit net and HZ=1000 */
205 pctx->user_frac = 50;
207 pctx->polling_enabled = polling_enabled;
208 pctx->pollhz = pollhz;
209 pctx->poll_cpuid = cpuid;
210 netmsg_init(&pctx->poll_netmsg, &netisr_adone_rport, 0, NULL);
211 netmsg_init(&pctx->poll_more_netmsg, &netisr_adone_rport, 0, NULL);
213 KASSERT(cpuid < POLLCTX_MAX, ("cpu id must < %d", cpuid));
214 poll_context[cpuid] = pctx;
216 if (poll_defcpu < 0) {
220 * Initialize global sysctl nodes, for compat
222 poll_add_sysctl(NULL, SYSCTL_STATIC_CHILDREN(_kern_polling),
227 * Initialize per-cpu sysctl nodes
229 ksnprintf(cpuid_str, sizeof(cpuid_str), "%d", pctx->poll_cpuid);
231 sysctl_ctx_init(&pctx->poll_sysctl_ctx);
232 pctx->poll_sysctl_tree = SYSCTL_ADD_NODE(&pctx->poll_sysctl_ctx,
233 SYSCTL_STATIC_CHILDREN(_kern_polling),
234 OID_AUTO, cpuid_str, CTLFLAG_RD, 0, "");
235 poll_add_sysctl(&pctx->poll_sysctl_ctx,
236 SYSCTL_CHILDREN(pctx->poll_sysctl_tree), pctx);
239 * Initialize systimer
241 systimer_init_periodic_nq(&pctx->pollclock, pollclock, pctx, 1);
245 schedpoll(struct pollctx *pctx)
247 schedpoll_oncpu(pctx, &pctx->poll_netmsg, netisr_poll);
251 schedpollmore(struct pollctx *pctx)
253 schedpoll_oncpu(pctx, &pctx->poll_more_netmsg, netisr_pollmore);
257 * Set the polling frequency
260 sysctl_pollhz(SYSCTL_HANDLER_ARGS)
262 struct pollctx *pctx = arg1;
268 error = sysctl_handle_int(oidp, &phz, 0, req);
269 if (error || req->newptr == NULL)
273 else if (phz > DEVICE_POLLING_FREQ_MAX)
274 phz = DEVICE_POLLING_FREQ_MAX;
276 netmsg_init(&msg, &curthread->td_msgport, 0, poll_sysctl_pollhz);
277 msg.nm_lmsg.u.ms_result = phz;
279 port = cpu_portfn(pctx->poll_cpuid);
280 lwkt_domsg(port, &msg.nm_lmsg, 0);
288 sysctl_polling(SYSCTL_HANDLER_ARGS)
290 struct pollctx *pctx = arg1;
295 enabled = pctx->polling_enabled;
296 error = sysctl_handle_int(oidp, &enabled, 0, req);
297 if (error || req->newptr == NULL)
300 netmsg_init(&msg, &curthread->td_msgport, 0, poll_sysctl_polling);
301 msg.nm_lmsg.u.ms_result = enabled;
303 port = cpu_portfn(pctx->poll_cpuid);
304 lwkt_domsg(port, &msg.nm_lmsg, 0);
309 * Hook from polling systimer. Tries to schedule a netisr, but keeps
310 * track of lost ticks due to the previous handler taking too long.
311 * Normally, this should not happen, because polling handler should
312 * run for a short time. However, in some cases (e.g. when there are
313 * changes in link status etc.) the drivers take a very long time
314 * (even in the order of milliseconds) to reset and reconfigure the
315 * device, causing apparent lost polls.
317 * The first part of the code is just for debugging purposes, and tries
318 * to count how often hardclock ticks are shorter than they should,
319 * meaning either stray interrupts or delayed events.
321 * WARNING! called from fastint or IPI, the MP lock might not be held.
324 pollclock(systimer_t info, struct intrframe *frame __unused)
326 struct pollctx *pctx = info->data;
330 if (pctx->poll_handlers == 0)
334 delta = (t.tv_usec - pctx->prev_t.tv_usec) +
335 (t.tv_sec - pctx->prev_t.tv_sec)*1000000;
336 if (delta * hz < 500000)
341 if (pctx->pending_polls > 100) {
343 * Too much, assume it has stalled (not always true
344 * see comment above).
347 pctx->pending_polls = 0;
351 if (pctx->phase <= 2) {
352 if (pctx->phase != 0)
358 if (pctx->pending_polls++ > 0)
363 * netisr_pollmore is called after other netisr's, possibly scheduling
364 * another NETISR_POLL call, or adapting the burst size for the next cycle.
366 * It is very bad to fetch large bursts of packets from a single card at once,
367 * because the burst could take a long time to be completely processed, or
368 * could saturate the intermediate queue (ipintrq or similar) leading to
369 * losses or unfairness. To reduce the problem, and also to account better for
370 * time spent in network-related processing, we split the burst in smaller
371 * chunks of fixed size, giving control to the other netisr's between chunks.
372 * This helps in improving the fairness, reducing livelock (because we
373 * emulate more closely the "process to completion" that we have with
374 * fastforwarding) and accounting for the work performed in low level
375 * handling and forwarding.
380 netisr_pollmore(struct netmsg *msg)
382 struct pollctx *pctx;
384 int kern_load, cpuid;
386 cpuid = mycpu->gd_cpuid;
387 KKASSERT(cpuid < POLLCTX_MAX);
389 pctx = poll_context[cpuid];
390 KKASSERT(pctx != NULL);
391 KKASSERT(pctx->poll_cpuid == cpuid);
392 KKASSERT(pctx == msg->nm_lmsg.u.ms_resultp);
394 lwkt_replymsg(&msg->nm_lmsg, 0);
397 if (pctx->residual_burst > 0) {
399 /* will run immediately on return, followed by netisrs */
402 /* here we can account time spent in netisr's in this tick */
404 kern_load = (t.tv_usec - pctx->poll_start_t.tv_usec) +
405 (t.tv_sec - pctx->poll_start_t.tv_sec)*1000000; /* us */
406 kern_load = (kern_load * hz) / 10000; /* 0..100 */
407 if (kern_load > (100 - pctx->user_frac)) { /* try decrease ticks */
408 if (pctx->poll_burst > 1)
411 if (pctx->poll_burst < pctx->poll_burst_max)
415 pctx->pending_polls--;
416 if (pctx->pending_polls == 0) { /* we are done */
420 * Last cycle was long and caused us to miss one or more
421 * hardclock ticks. Restart processing again, but slightly
422 * reduce the burst size to prevent that this happens again.
424 pctx->poll_burst -= (pctx->poll_burst / 8);
425 if (pctx->poll_burst < 1)
426 pctx->poll_burst = 1;
433 * netisr_poll is scheduled by schedpoll when appropriate, typically once
434 * per polling systimer tick.
436 * Note that the message is replied immediately in order to allow a new
437 * ISR to be scheduled in the handler.
439 * XXX each registration should indicate whether it needs a critical
440 * section to operate.
444 netisr_poll(struct netmsg *msg)
446 struct pollctx *pctx;
447 int i, cycles, cpuid;
448 enum poll_cmd arg = POLL_ONLY;
450 cpuid = mycpu->gd_cpuid;
451 KKASSERT(cpuid < POLLCTX_MAX);
453 pctx = poll_context[cpuid];
454 KKASSERT(pctx != NULL);
455 KKASSERT(pctx->poll_cpuid == cpuid);
456 KKASSERT(pctx == msg->nm_lmsg.u.ms_resultp);
458 lwkt_replymsg(&msg->nm_lmsg, 0);
461 if (pctx->residual_burst == 0) { /* first call in this tick */
462 microuptime(&pctx->poll_start_t);
464 * Check that paremeters are consistent with runtime
465 * variables. Some of these tests could be done at sysctl
466 * time, but the savings would be very limited because we
467 * still have to check against reg_frac_count and
468 * poll_each_burst. So, instead of writing separate sysctl
469 * handlers, we do all here.
472 if (pctx->reg_frac > hz)
474 else if (pctx->reg_frac < 1)
476 if (pctx->reg_frac_count > pctx->reg_frac)
477 pctx->reg_frac_count = pctx->reg_frac - 1;
478 if (pctx->reg_frac_count-- == 0) {
479 arg = POLL_AND_CHECK_STATUS;
480 pctx->reg_frac_count = pctx->reg_frac - 1;
482 if (pctx->poll_burst_max < MIN_POLL_BURST_MAX)
483 pctx->poll_burst_max = MIN_POLL_BURST_MAX;
484 else if (pctx->poll_burst_max > MAX_POLL_BURST_MAX)
485 pctx->poll_burst_max = MAX_POLL_BURST_MAX;
487 if (pctx->poll_each_burst < 1)
488 pctx->poll_each_burst = 1;
489 else if (pctx->poll_each_burst > pctx->poll_burst_max)
490 pctx->poll_each_burst = pctx->poll_burst_max;
492 pctx->residual_burst = pctx->poll_burst;
494 cycles = (pctx->residual_burst < pctx->poll_each_burst) ?
495 pctx->residual_burst : pctx->poll_each_burst;
496 pctx->residual_burst -= cycles;
498 if (pctx->polling_enabled) {
499 for (i = 0 ; i < pctx->poll_handlers ; i++) {
500 struct ifnet *ifp = pctx->pr[i].ifp;
502 if (!lwkt_serialize_try(ifp->if_serializer))
505 if ((ifp->if_flags & (IFF_UP|IFF_RUNNING|IFF_POLLING))
506 == (IFF_UP|IFF_RUNNING|IFF_POLLING))
507 ifp->if_poll(ifp, arg, cycles);
509 lwkt_serialize_exit(ifp->if_serializer);
511 } else { /* unregister */
512 for (i = 0 ; i < pctx->poll_handlers ; i++) {
513 struct ifnet *ifp = pctx->pr[i].ifp;
515 lwkt_serialize_enter(ifp->if_serializer);
517 if ((ifp->if_flags & IFF_POLLING) == 0) {
518 KKASSERT(ifp->if_poll_cpuid < 0);
519 lwkt_serialize_exit(ifp->if_serializer);
522 ifp->if_flags &= ~IFF_POLLING;
523 ifp->if_poll_cpuid = -1;
526 * Only call the interface deregistration
527 * function if the interface is still
530 if (ifp->if_flags & IFF_RUNNING)
531 ifp->if_poll(ifp, POLL_DEREGISTER, 1);
533 lwkt_serialize_exit(ifp->if_serializer);
535 pctx->residual_burst = 0;
536 pctx->poll_handlers = 0;
543 poll_register(struct netmsg *msg)
545 struct ifnet *ifp = msg->nm_lmsg.u.ms_resultp;
546 struct pollctx *pctx;
549 cpuid = mycpu->gd_cpuid;
550 KKASSERT(cpuid < POLLCTX_MAX);
552 pctx = poll_context[cpuid];
553 KKASSERT(pctx != NULL);
554 KKASSERT(pctx->poll_cpuid == cpuid);
556 if (pctx->polling_enabled == 0) {
557 /* Polling disabled, cannot register */
563 * Check if there is room.
565 if (pctx->poll_handlers >= POLL_LIST_LEN) {
567 * List full, cannot register more entries.
568 * This should never happen; if it does, it is probably a
569 * broken driver trying to register multiple times. Checking
570 * this at runtime is expensive, and won't solve the problem
571 * anyways, so just report a few times and then give up.
573 static int verbose = 10; /* XXX */
575 kprintf("poll handlers list full, "
576 "maybe a broken driver ?\n");
581 pctx->pr[pctx->poll_handlers].ifp = ifp;
582 pctx->poll_handlers++;
585 if (pctx->poll_handlers == 1) {
586 KKASSERT(pctx->polling_enabled);
587 systimer_adjust_periodic(&pctx->pollclock,
592 lwkt_replymsg(&msg->nm_lmsg, rc);
596 * Try to register routine for polling. Returns 1 if successful
597 * (and polling should be enabled), 0 otherwise.
599 * Called from mainline code only, not called from an interrupt.
602 ether_poll_register(struct ifnet *ifp)
606 KKASSERT(poll_defcpu < POLLCTX_MAX);
608 return ether_pollcpu_register(ifp, poll_defcpu);
612 ether_pollcpu_register(struct ifnet *ifp, int cpuid)
618 if (ifp->if_poll == NULL) {
619 /* Device does not support polling */
623 if (cpuid < 0 || cpuid >= POLLCTX_MAX)
626 if (((1 << cpuid) & poll_cpumask) == 0) {
627 /* Polling is not supported on 'cpuid' */
630 KKASSERT(poll_context[cpuid] != NULL);
633 * Attempt to register. Interlock with IFF_POLLING.
635 crit_enter(); /* XXX MP - not mp safe */
637 lwkt_serialize_enter(ifp->if_serializer);
638 if (ifp->if_flags & IFF_POLLING) {
639 /* Already polling */
640 KKASSERT(ifp->if_poll_cpuid >= 0);
641 lwkt_serialize_exit(ifp->if_serializer);
645 KKASSERT(ifp->if_poll_cpuid < 0);
646 ifp->if_flags |= IFF_POLLING;
647 ifp->if_poll_cpuid = cpuid;
648 if (ifp->if_flags & IFF_RUNNING)
649 ifp->if_poll(ifp, POLL_REGISTER, 0);
650 lwkt_serialize_exit(ifp->if_serializer);
652 netmsg_init(&msg, &curthread->td_msgport, 0, poll_register);
653 msg.nm_lmsg.u.ms_resultp = ifp;
655 port = cpu_portfn(cpuid);
656 lwkt_domsg(port, &msg.nm_lmsg, 0);
658 if (msg.nm_lmsg.ms_error) {
659 lwkt_serialize_enter(ifp->if_serializer);
660 ifp->if_flags &= ~IFF_POLLING;
661 ifp->if_poll_cpuid = -1;
662 if (ifp->if_flags & IFF_RUNNING)
663 ifp->if_poll(ifp, POLL_DEREGISTER, 0);
664 lwkt_serialize_exit(ifp->if_serializer);
675 poll_deregister(struct netmsg *msg)
677 struct ifnet *ifp = msg->nm_lmsg.u.ms_resultp;
678 struct pollctx *pctx;
681 cpuid = mycpu->gd_cpuid;
682 KKASSERT(cpuid < POLLCTX_MAX);
684 pctx = poll_context[cpuid];
685 KKASSERT(pctx != NULL);
686 KKASSERT(pctx->poll_cpuid == cpuid);
688 for (i = 0 ; i < pctx->poll_handlers ; i++) {
689 if (pctx->pr[i].ifp == ifp) /* Found it */
692 if (i == pctx->poll_handlers) {
693 kprintf("ether_poll_deregister: ifp not found!!!\n");
696 pctx->poll_handlers--;
697 if (i < pctx->poll_handlers) {
698 /* Last entry replaces this one. */
699 pctx->pr[i].ifp = pctx->pr[pctx->poll_handlers].ifp;
702 if (pctx->poll_handlers == 0)
703 systimer_adjust_periodic(&pctx->pollclock, 1);
706 lwkt_replymsg(&msg->nm_lmsg, rc);
710 * Remove interface from the polling list. Occurs when polling is turned
711 * off. Called from mainline code only, not called from an interrupt.
714 ether_poll_deregister(struct ifnet *ifp)
720 KKASSERT(ifp != NULL);
722 if (ifp->if_poll == NULL)
727 lwkt_serialize_enter(ifp->if_serializer);
728 if ((ifp->if_flags & IFF_POLLING) == 0) {
729 KKASSERT(ifp->if_poll_cpuid < 0);
730 lwkt_serialize_exit(ifp->if_serializer);
735 cpuid = ifp->if_poll_cpuid;
736 KKASSERT(cpuid >= 0);
737 KKASSERT(poll_context[cpuid] != NULL);
739 ifp->if_flags &= ~IFF_POLLING;
740 ifp->if_poll_cpuid = -1;
741 lwkt_serialize_exit(ifp->if_serializer);
743 netmsg_init(&msg, &curthread->td_msgport, 0, poll_deregister);
744 msg.nm_lmsg.u.ms_resultp = ifp;
746 port = cpu_portfn(cpuid);
747 lwkt_domsg(port, &msg.nm_lmsg, 0);
749 if (!msg.nm_lmsg.ms_error) {
750 lwkt_serialize_enter(ifp->if_serializer);
751 if (ifp->if_flags & IFF_RUNNING)
752 ifp->if_poll(ifp, POLL_DEREGISTER, 1);
753 lwkt_serialize_exit(ifp->if_serializer);
764 poll_add_sysctl(struct sysctl_ctx_list *ctx, struct sysctl_oid_list *parent,
765 struct pollctx *pctx)
767 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "enable",
768 CTLTYPE_INT | CTLFLAG_RW, pctx, 0, sysctl_polling,
769 "I", "Polling enabled");
771 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "pollhz",
772 CTLTYPE_INT | CTLFLAG_RW, pctx, 0, sysctl_pollhz,
773 "I", "Device polling frequency");
775 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "phase", CTLFLAG_RD,
776 &pctx->phase, 0, "Polling phase");
778 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "suspect", CTLFLAG_RW,
779 &pctx->suspect, 0, "suspect event");
781 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "stalled", CTLFLAG_RW,
782 &pctx->stalled, 0, "potential stalls");
784 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "burst", CTLFLAG_RW,
785 &pctx->poll_burst, 0, "Current polling burst size");
787 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "each_burst", CTLFLAG_RW,
788 &pctx->poll_each_burst, 0, "Max size of each burst");
790 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "burst_max", CTLFLAG_RW,
791 &pctx->poll_burst_max, 0, "Max Polling burst size");
793 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "user_frac", CTLFLAG_RW,
795 "Desired user fraction of cpu time");
797 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "reg_frac", CTLFLAG_RW,
799 "Every this many cycles poll register");
801 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "short_ticks", CTLFLAG_RW,
802 &pctx->short_ticks, 0,
803 "Hardclock ticks shorter than they should be");
805 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "lost_polls", CTLFLAG_RW,
806 &pctx->lost_polls, 0,
807 "How many times we would have lost a poll tick");
809 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "pending_polls", CTLFLAG_RD,
810 &pctx->pending_polls, 0, "Do we need to poll again");
812 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "residual_burst", CTLFLAG_RW,
813 &pctx->residual_burst, 0,
814 "# of residual cycles in burst");
816 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "handlers", CTLFLAG_RD,
817 &pctx->poll_handlers, 0,
818 "Number of registered poll handlers");
822 schedpoll_oncpu(struct pollctx *pctx, struct netmsg *msg, netisr_fn_t handler)
824 if (msg->nm_lmsg.ms_flags & MSGF_DONE) {
827 netmsg_init(msg, &netisr_adone_rport, 0, handler);
829 msg->nm_lmsg.u.ms_resultp = pctx;
831 port = cpu_portfn(mycpu->gd_cpuid);
832 lwkt_sendmsg(port, &msg->nm_lmsg);
837 poll_sysctl_pollhz(struct netmsg *msg)
839 struct pollctx *pctx;
842 cpuid = mycpu->gd_cpuid;
843 KKASSERT(cpuid < POLLCTX_MAX);
845 pctx = poll_context[cpuid];
846 KKASSERT(pctx != NULL);
847 KKASSERT(pctx->poll_cpuid == cpuid);
850 * If polling is disabled or there is no device registered,
851 * don't adjust polling systimer frequency.
852 * Polling systimer frequency will be adjusted once polling
853 * is enabled and there are registered devices.
855 pctx->pollhz = msg->nm_lmsg.u.ms_result;
856 if (pctx->polling_enabled && pctx->poll_handlers)
857 systimer_adjust_periodic(&pctx->pollclock, pctx->pollhz);
858 lwkt_replymsg(&msg->nm_lmsg, 0);
862 poll_sysctl_polling(struct netmsg *msg)
864 struct pollctx *pctx;
867 cpuid = mycpu->gd_cpuid;
868 KKASSERT(cpuid < POLLCTX_MAX);
870 pctx = poll_context[cpuid];
871 KKASSERT(pctx != NULL);
872 KKASSERT(pctx->poll_cpuid == cpuid);
875 * If polling is disabled or there is no device registered,
876 * cut the polling systimer frequency to 1hz.
878 pctx->polling_enabled = msg->nm_lmsg.u.ms_result;
879 if (pctx->polling_enabled && pctx->poll_handlers)
880 systimer_adjust_periodic(&pctx->pollclock, pctx->pollhz);
882 systimer_adjust_periodic(&pctx->pollclock, 1);
883 lwkt_replymsg(&msg->nm_lmsg, 0);