2 * Copyright (c) 2003 Matthew Dillon <dillon@backplane.com> All rights reserved.
3 * Copyright (c) 1997, Stefan Esser <se@freebsd.org> All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 * $FreeBSD: src/sys/kern/kern_intr.c,v 1.24.2.1 2001/10/14 20:05:50 luigi Exp $
27 * $DragonFly: src/sys/kern/kern_intr.c,v 1.18 2004/06/28 05:02:56 dillon Exp $
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/malloc.h>
34 #include <sys/kernel.h>
35 #include <sys/sysctl.h>
36 #include <sys/thread.h>
38 #include <sys/thread2.h>
39 #include <sys/random.h>
41 #include <machine/ipl.h>
43 #include <sys/interrupt.h>
45 typedef struct intrec {
53 static intrec_t *intlists[NHWI+NSWI];
54 static thread_t ithreads[NHWI+NSWI];
55 static struct thread ithread_ary[NHWI+NSWI];
56 static struct random_softc irandom_ary[NHWI+NSWI];
57 static int irunning[NHWI+NSWI];
58 static u_int ill_count[NHWI+NSWI]; /* interrupt livelock counter */
59 static u_int ill_ticks[NHWI+NSWI]; /* track elapsed to calculate freq */
60 static u_int ill_delta[NHWI+NSWI]; /* track elapsed to calculate freq */
61 static int ill_state[NHWI+NSWI]; /* current state */
62 static struct systimer ill_timer[NHWI+NSWI]; /* enforced freq. timer */
63 static struct systimer ill_rtimer[NHWI+NSWI]; /* recovery timer */
65 #define LIVELOCK_NONE 0
66 #define LIVELOCK_LIMITED 1
68 static int livelock_limit = 50000;
69 static int livelock_fallback = 20000;
70 SYSCTL_INT(_kern, OID_AUTO, livelock_limit,
71 CTLFLAG_RW, &livelock_limit, 0, "Livelock interrupt rate limit");
72 SYSCTL_INT(_kern, OID_AUTO, livelock_fallback,
73 CTLFLAG_RW, &livelock_fallback, 0, "Livelock interrupt fallback rate");
75 static void ithread_handler(void *arg);
78 register_swi(int intr, inthand2_t *handler, void *arg, const char *name)
80 if (intr < NHWI || intr >= NHWI + NSWI)
81 panic("register_swi: bad intr %d", intr);
82 return(register_int(intr, handler, arg, name));
86 register_int(int intr, inthand2_t *handler, void *arg, const char *name)
92 if (intr < 0 || intr >= NHWI + NSWI)
93 panic("register_int: bad intr %d", intr);
95 rec = malloc(sizeof(intrec_t), M_DEVBUF, M_NOWAIT);
97 panic("register_swi: malloc failed");
98 rec->handler = handler;
104 list = &intlists[intr];
107 * Create an interrupt thread if necessary, leave it in an unscheduled
108 * state. The kthread restore function exits a critical section before
109 * starting the function so we need *TWO* critical sections in order
110 * for the handler to begin running in one.
112 if ((td = ithreads[intr]) == NULL) {
113 lwkt_create((void *)ithread_handler, (void *)intr, &ithreads[intr],
114 &ithread_ary[intr], TDF_STOPREQ|TDF_INTTHREAD, -1,
117 if (intr >= NHWI && intr < NHWI + NSWI)
118 lwkt_setpri(td, TDPRI_SOFT_NORM + TDPRI_CRIT * 2);
120 lwkt_setpri(td, TDPRI_INT_MED + TDPRI_CRIT * 2);
124 * Add the record to the interrupt list
126 crit_enter(); /* token */
127 while (*list != NULL)
128 list = &(*list)->next;
135 unregister_swi(int intr, inthand2_t *handler)
137 if (intr < NHWI || intr >= NHWI + NSWI)
138 panic("register_swi: bad intr %d", intr);
139 unregister_int(intr, handler);
143 unregister_int(int intr, inthand2_t handler)
148 if (intr < 0 || intr > NHWI + NSWI)
149 panic("register_int: bad intr %d", intr);
150 list = &intlists[intr];
152 while ((rec = *list) != NULL) {
153 if (rec->handler == (void *)handler) {
163 printf("warning: unregister_int: int %d handler %p not found\n",
169 swi_setpriority(int intr, int pri)
173 if (intr < NHWI || intr >= NHWI + NSWI)
174 panic("register_swi: bad intr %d", intr);
175 if ((td = ithreads[intr]) != NULL)
176 lwkt_setpri(td, pri);
180 register_randintr(int intr)
182 struct random_softc *sc = &irandom_ary[intr];
188 unregister_randintr(int intr)
190 struct random_softc *sc = &irandom_ary[intr];
195 * Dispatch an interrupt. If there's nothing to do we have a stray
196 * interrupt and can just return, leaving the interrupt masked.
198 * We need to schedule the interrupt and set its irunning[] bit. If
199 * we are not on the interrupt thread's cpu we have to send a message
200 * to the correct cpu that will issue the desired action (interlocking
201 * with the interrupt thread's critical section).
203 * We are NOT in a critical section, which will allow the scheduled
204 * interrupt to preempt us. The MP lock might *NOT* be held here.
207 sched_ithd_remote(void *arg)
209 sched_ithd((int)arg);
217 if ((td = ithreads[intr]) != NULL) {
218 if (intlists[intr] == NULL) {
219 printf("sched_ithd: stray interrupt %d\n", intr);
221 if (td->td_gd == mycpu) {
223 lwkt_schedule(td); /* preemption handled internally */
225 lwkt_send_ipiq(td->td_gd, sched_ithd_remote, (void *)intr);
229 printf("sched_ithd: stray interrupt %d\n", intr);
234 * This is run from a periodic SYSTIMER (and thus must be MP safe, the BGL
235 * might not be held).
238 ithread_livelock_wakeup(systimer_t info)
240 int intr = (int)info->data;
243 if ((td = ithreads[intr]) != NULL)
249 * Interrupt threads run this as their main loop. The handler should be
250 * in a critical section on entry and the BGL is usually left held (for now).
252 * The irunning state starts at 0. When an interrupt occurs, the hardware
253 * interrupt is disabled and sched_ithd() The HW interrupt remains disabled
254 * until all routines have run. We then call ithread_done() to reenable
255 * the HW interrupt and deschedule us until the next interrupt.
258 #define LIVELOCK_TIMEFRAME(freq) ((freq) >> 2) /* 1/4 second */
261 ithread_handler(void *arg)
267 intrec_t **list = &intlists[intr];
270 struct random_softc *sc = &irandom_ary[intr];
272 KKASSERT(curthread->td_pri >= TDPRI_CRIT);
275 * We can get woken up by the livelock periodic code too, run the
276 * handlers only if there is a real interrupt pending. Clear
277 * irunning[] prior to running the handlers to interlock new
281 for (rec = *list; rec; rec = nrec) {
283 rec->handler(rec->argument);
287 * This is our interrupt hook to add rate randomness to the random
291 add_interrupt_randomness(intr);
294 * This is our livelock test. If we hit the rate limit we
295 * limit ourselves to 10000 interrupts/sec until the rate
296 * falls below 50% of that value, then we unlimit again.
298 cputicks = cputimer_count();
300 bticks = cputicks - ill_ticks[intr];
301 ill_ticks[intr] = cputicks;
302 if (bticks > cputimer_freq)
303 bticks = cputimer_freq;
305 switch(ill_state[intr]) {
307 ill_delta[intr] += bticks;
308 if (ill_delta[intr] < LIVELOCK_TIMEFRAME(cputimer_freq))
310 freq = (int64_t)ill_count[intr] * cputimer_freq / ill_delta[intr];
313 if (freq < livelock_limit)
315 printf("intr %d at %d hz, livelocked! limiting at %d hz\n",
316 intr, freq, livelock_fallback);
317 ill_state[intr] = LIVELOCK_LIMITED;
319 /* force periodic check to avoid stale removal (if ints stop) */
320 systimer_init_periodic(&ill_rtimer[intr], ithread_livelock_wakeup,
323 case LIVELOCK_LIMITED:
325 * Delay (us) before rearming the interrupt
327 systimer_init_oneshot(&ill_timer[intr], ithread_livelock_wakeup,
328 (void *)intr, 1 + 1000000 / livelock_fallback);
329 lwkt_deschedule_self(curthread);
332 /* in case we were woken up by something else */
333 systimer_del(&ill_timer[intr]);
336 * Calculate interrupt rate (note that due to our delay it
337 * will not exceed livelock_fallback).
339 ill_delta[intr] += bticks;
340 if (ill_delta[intr] < LIVELOCK_TIMEFRAME(cputimer_freq))
342 freq = (int64_t)ill_count[intr] * cputimer_freq / ill_delta[intr];
345 if (freq < (livelock_fallback >> 1)) {
346 printf("intr %d at %d hz, removing livelock limit\n",
348 ill_state[intr] = LIVELOCK_NONE;
349 systimer_del(&ill_rtimer[intr]);
355 * If another interrupt has not been queued we can reenable the
356 * hardware interrupt and go to sleep.
358 if (irunning[intr] == 0)
364 * Sysctls used by systat and others: hw.intrnames and hw.intrcnt.
365 * The data for this machine dependent, and the declarations are in machine
366 * dependent code. The layout of intrnames and intrcnt however is machine
369 * We do not know the length of intrcnt and intrnames at compile time, so
370 * calculate things at run time.
373 sysctl_intrnames(SYSCTL_HANDLER_ARGS)
375 return (sysctl_handle_opaque(oidp, intrnames, eintrnames - intrnames,
379 SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD,
380 NULL, 0, sysctl_intrnames, "", "Interrupt Names");
383 sysctl_intrcnt(SYSCTL_HANDLER_ARGS)
385 return (sysctl_handle_opaque(oidp, intrcnt,
386 (char *)eintrcnt - (char *)intrcnt, req));
389 SYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD,
390 NULL, 0, sysctl_intrcnt, "", "Interrupt Counts");