put pthread_timedjoin_np in right order.
[dragonfly.git] / sys / kern / kern_intr.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2003 Matthew Dillon <dillon@backplane.com> All rights reserved.
3 * Copyright (c) 1997, Stefan Esser <se@freebsd.org> All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
10 * disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * $FreeBSD: src/sys/kern/kern_intr.c,v 1.24.2.1 2001/10/14 20:05:50 luigi Exp $
27 * $DragonFly: src/sys/kern/kern_intr.c,v 1.23 2005/10/12 17:39:49 dillon Exp $
28 *
29 */
30
31#include <sys/param.h>
32#include <sys/systm.h>
33#include <sys/malloc.h>
34#include <sys/kernel.h>
35#include <sys/sysctl.h>
36#include <sys/thread.h>
37#include <sys/proc.h>
38#include <sys/thread2.h>
39#include <sys/random.h>
40
41#include <machine/ipl.h>
42
43#include <sys/interrupt.h>
44
45typedef struct intrec {
46 struct intrec *next;
47 inthand2_t *handler;
48 void *argument;
49 const char *name;
50 int intr;
51} intrec_t;
52
53static intrec_t *intlists[NHWI+NSWI];
54static thread_t ithreads[NHWI+NSWI];
55static struct thread ithread_ary[NHWI+NSWI];
56static struct random_softc irandom_ary[NHWI+NSWI];
57static int irunning[NHWI+NSWI];
58
59#define LIVELOCK_NONE 0
60#define LIVELOCK_LIMITED 1
61
62static int livelock_limit = 50000;
63static int livelock_fallback = 20000;
64SYSCTL_INT(_kern, OID_AUTO, livelock_limit,
65 CTLFLAG_RW, &livelock_limit, 0, "Livelock interrupt rate limit");
66SYSCTL_INT(_kern, OID_AUTO, livelock_fallback,
67 CTLFLAG_RW, &livelock_fallback, 0, "Livelock interrupt fallback rate");
68
69static void ithread_handler(void *arg);
70
71/*
72 * Register an SWI or INTerrupt handler.
73 */
74thread_t
75register_swi(int intr, inthand2_t *handler, void *arg, const char *name)
76{
77 if (intr < NHWI || intr >= NHWI + NSWI)
78 panic("register_swi: bad intr %d", intr);
79 return(register_int(intr, handler, arg, name));
80}
81
82thread_t
83register_int(int intr, inthand2_t *handler, void *arg, const char *name)
84{
85 intrec_t **list;
86 intrec_t *rec;
87 thread_t td;
88
89 if (intr < 0 || intr >= NHWI + NSWI)
90 panic("register_int: bad intr %d", intr);
91
92 rec = malloc(sizeof(intrec_t), M_DEVBUF, M_NOWAIT);
93 if (rec == NULL)
94 panic("register_swi: malloc failed");
95 rec->handler = handler;
96 rec->argument = arg;
97 rec->name = name;
98 rec->intr = intr;
99 rec->next = NULL;
100
101 list = &intlists[intr];
102
103 /*
104 * Create an interrupt thread if necessary, leave it in an unscheduled
105 * state.
106 */
107 if ((td = ithreads[intr]) == NULL) {
108 lwkt_create((void *)ithread_handler, (void *)intr, &ithreads[intr],
109 &ithread_ary[intr], TDF_STOPREQ|TDF_INTTHREAD, -1,
110 "ithread %d", intr);
111 td = ithreads[intr];
112 if (intr >= NHWI && intr < NHWI + NSWI)
113 lwkt_setpri(td, TDPRI_SOFT_NORM);
114 else
115 lwkt_setpri(td, TDPRI_INT_MED);
116 }
117
118 /*
119 * Add the record to the interrupt list
120 */
121 crit_enter(); /* token */
122 while (*list != NULL)
123 list = &(*list)->next;
124 *list = rec;
125 crit_exit();
126 return(td);
127}
128
129void
130unregister_swi(int intr, inthand2_t *handler)
131{
132 if (intr < NHWI || intr >= NHWI + NSWI)
133 panic("register_swi: bad intr %d", intr);
134 unregister_int(intr, handler);
135}
136
137void
138unregister_int(int intr, inthand2_t handler)
139{
140 intrec_t **list;
141 intrec_t *rec;
142
143 if (intr < 0 || intr > NHWI + NSWI)
144 panic("register_int: bad intr %d", intr);
145 list = &intlists[intr];
146 crit_enter();
147 while ((rec = *list) != NULL) {
148 if (rec->handler == (void *)handler) {
149 *list = rec->next;
150 break;
151 }
152 list = &rec->next;
153 }
154 crit_exit();
155 if (rec != NULL) {
156 free(rec, M_DEVBUF);
157 } else {
158 printf("warning: unregister_int: int %d handler %p not found\n",
159 intr, handler);
160 }
161}
162
163void
164swi_setpriority(int intr, int pri)
165{
166 struct thread *td;
167
168 if (intr < NHWI || intr >= NHWI + NSWI)
169 panic("register_swi: bad intr %d", intr);
170 if ((td = ithreads[intr]) != NULL)
171 lwkt_setpri(td, pri);
172}
173
174void
175register_randintr(int intr)
176{
177 struct random_softc *sc = &irandom_ary[intr];
178 sc->sc_intr = intr;
179 sc->sc_enabled = 1;
180}
181
182void
183unregister_randintr(int intr)
184{
185 struct random_softc *sc = &irandom_ary[intr];
186 sc->sc_enabled = 0;
187}
188
189/*
190 * Dispatch an interrupt. If there's nothing to do we have a stray
191 * interrupt and can just return, leaving the interrupt masked.
192 *
193 * We need to schedule the interrupt and set its irunning[] bit. If
194 * we are not on the interrupt thread's cpu we have to send a message
195 * to the correct cpu that will issue the desired action (interlocking
196 * with the interrupt thread's critical section).
197 *
198 * We are NOT in a critical section, which will allow the scheduled
199 * interrupt to preempt us. The MP lock might *NOT* be held here.
200 */
201static void
202sched_ithd_remote(void *arg)
203{
204 sched_ithd((int)arg);
205}
206
207void
208sched_ithd(int intr)
209{
210 thread_t td;
211
212 if ((td = ithreads[intr]) != NULL) {
213 if (intlists[intr] == NULL) {
214 printf("sched_ithd: stray interrupt %d\n", intr);
215 } else {
216 if (td->td_gd == mycpu) {
217 irunning[intr] = 1;
218 lwkt_schedule(td); /* preemption handled internally */
219 } else {
220 lwkt_send_ipiq(td->td_gd, sched_ithd_remote, (void *)intr);
221 }
222 }
223 } else {
224 printf("sched_ithd: stray interrupt %d\n", intr);
225 }
226}
227
228/*
229 * This is run from a periodic SYSTIMER (and thus must be MP safe, the BGL
230 * might not be held).
231 */
232static void
233ithread_livelock_wakeup(systimer_t info)
234{
235 int intr = (int)info->data;
236 thread_t td;
237
238 if ((td = ithreads[intr]) != NULL)
239 lwkt_schedule(td);
240}
241
242/*
243 * This is run from a periodic SYSTIMER. It resets the
244 */
245
246
247/*
248 * Interrupt threads run this as their main loop.
249 *
250 * The handler begins execution outside a critical section and with the BGL
251 * held.
252 *
253 * The irunning state starts at 0. When an interrupt occurs, the hardware
254 * interrupt is disabled and sched_ithd() The HW interrupt remains disabled
255 * until all routines have run. We then call ithread_done() to reenable
256 * the HW interrupt and deschedule us until the next interrupt.
257 *
258 * We are responsible for atomically checking irunning[] and ithread_done()
259 * is responsible for atomically checking for platform-specific delayed
260 * interrupts. irunning[] for our irq is only set in the context of our cpu,
261 * so a critical section is a sufficient interlock.
262 */
263#define LIVELOCK_TIMEFRAME(freq) ((freq) >> 2) /* 1/4 second */
264
265static void
266ithread_handler(void *arg)
267{
268 int intr = (int)arg;
269 int freq;
270 u_int bticks;
271 u_int cputicks;
272 intrec_t **list = &intlists[intr];
273 intrec_t *rec;
274 intrec_t *nrec;
275 struct random_softc *sc = &irandom_ary[intr];
276 globaldata_t gd = mycpu;
277 struct systimer ill_timer; /* enforced freq. timer */
278 struct systimer ill_rtimer; /* recovery timer */
279 u_int ill_count = 0; /* interrupt livelock counter */
280 u_int ill_ticks = 0; /* track elapsed to calculate freq */
281 u_int ill_delta = 0; /* track elapsed to calculate freq */
282 int ill_state = 0; /* current state */
283
284 /*
285 * The loop must be entered with one critical section held.
286 */
287 crit_enter_gd(gd);
288
289 for (;;) {
290 /*
291 * We can get woken up by the livelock periodic code too, run the
292 * handlers only if there is a real interrupt pending. XXX
293 *
294 * Clear irunning[] prior to running the handlers to interlock
295 * again new events occuring during processing of existing events.
296 *
297 * For now run each handler in a critical section.
298 */
299 irunning[intr] = 0;
300 for (rec = *list; rec; rec = nrec) {
301 nrec = rec->next;
302 rec->handler(rec->argument);
303 }
304
305 /*
306 * Do a quick exit/enter to catch any higher-priority
307 * interrupt sources and so user/system/interrupt statistics
308 * work for interrupt threads.
309 */
310 crit_exit_gd(gd);
311 crit_enter_gd(gd);
312
313 /*
314 * This is our interrupt hook to add rate randomness to the random
315 * number generator.
316 */
317 if (sc->sc_enabled)
318 add_interrupt_randomness(intr);
319
320 /*
321 * This is our livelock test. If we hit the rate limit we
322 * limit ourselves to X interrupts/sec until the rate
323 * falls below 50% of that value, then we unlimit again.
324 *
325 * XXX calling cputimer_count() is expensive but a livelock may
326 * prevent other interrupts from occuring so we cannot use ticks.
327 */
328 cputicks = sys_cputimer->count();
329 ++ill_count;
330 bticks = cputicks - ill_ticks;
331 ill_ticks = cputicks;
332 if (bticks > sys_cputimer->freq)
333 bticks = sys_cputimer->freq;
334
335 switch(ill_state) {
336 case LIVELOCK_NONE:
337 ill_delta += bticks;
338 if (ill_delta < LIVELOCK_TIMEFRAME(sys_cputimer->freq))
339 break;
340 freq = (int64_t)ill_count * sys_cputimer->freq /
341 ill_delta;
342 ill_delta = 0;
343 ill_count = 0;
344 if (freq < livelock_limit)
345 break;
346 printf("intr %d at %d hz, livelocked! limiting at %d hz\n",
347 intr, freq, livelock_fallback);
348 ill_state = LIVELOCK_LIMITED;
349 bticks = 0;
350 /* force periodic check to avoid stale removal (if ints stop) */
351 systimer_init_periodic(&ill_rtimer, ithread_livelock_wakeup,
352 (void *)intr, 1);
353 /* fall through */
354 case LIVELOCK_LIMITED:
355 /*
356 * Delay (us) before rearming the interrupt
357 */
358 systimer_init_oneshot(&ill_timer, ithread_livelock_wakeup,
359 (void *)intr, 1 + 1000000 / livelock_fallback);
360 lwkt_deschedule_self(curthread);
361 lwkt_switch();
362
363 /* in case we were woken up by something else */
364 systimer_del(&ill_timer);
365
366 /*
367 * Calculate interrupt rate (note that due to our delay it
368 * will not exceed livelock_fallback).
369 */
370 ill_delta += bticks;
371 if (ill_delta < LIVELOCK_TIMEFRAME(sys_cputimer->freq))
372 break;
373 freq = (int64_t)ill_count * sys_cputimer->freq / ill_delta;
374 ill_delta = 0;
375 ill_count = 0;
376 if (freq < (livelock_fallback >> 1)) {
377 printf("intr %d at %d hz, removing livelock limit\n",
378 intr, freq);
379 ill_state = LIVELOCK_NONE;
380 systimer_del(&ill_rtimer);
381 }
382 break;
383 }
384
385 /*
386 * There are two races here. irunning[] is set by sched_ithd()
387 * in the context of our cpu and is critical-section safe. We
388 * are responsible for checking it. ipending is not critical
389 * section safe and must be handled by the platform specific
390 * ithread_done() routine.
391 */
392 if (irunning[intr] == 0)
393 ithread_done(intr);
394 /* must be in critical section on loop */
395 }
396 /* not reached */
397}
398
399/*
400 * Sysctls used by systat and others: hw.intrnames and hw.intrcnt.
401 * The data for this machine dependent, and the declarations are in machine
402 * dependent code. The layout of intrnames and intrcnt however is machine
403 * independent.
404 *
405 * We do not know the length of intrcnt and intrnames at compile time, so
406 * calculate things at run time.
407 */
408static int
409sysctl_intrnames(SYSCTL_HANDLER_ARGS)
410{
411 return (sysctl_handle_opaque(oidp, intrnames, eintrnames - intrnames,
412 req));
413}
414
415SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD,
416 NULL, 0, sysctl_intrnames, "", "Interrupt Names");
417
418static int
419sysctl_intrcnt(SYSCTL_HANDLER_ARGS)
420{
421 return (sysctl_handle_opaque(oidp, intrcnt,
422 (char *)eintrcnt - (char *)intrcnt, req));
423}
424
425SYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD,
426 NULL, 0, sysctl_intrcnt, "", "Interrupt Counts");