2 * Copyright (c) 2003 Matthew Dillon <dillon@backplane.com> All rights reserved.
3 * Copyright (c) 1997, Stefan Esser <se@freebsd.org> All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 * $FreeBSD: src/sys/kern/kern_intr.c,v 1.24.2.1 2001/10/14 20:05:50 luigi Exp $
27 * $DragonFly: src/sys/kern/kern_intr.c,v 1.24 2005/10/13 00:02:22 dillon Exp $
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/malloc.h>
34 #include <sys/kernel.h>
35 #include <sys/sysctl.h>
36 #include <sys/thread.h>
38 #include <sys/thread2.h>
39 #include <sys/random.h>
40 #include <sys/serialize.h>
43 #include <machine/ipl.h>
44 #include <machine/frame.h>
46 #include <sys/interrupt.h>
48 typedef struct intrec {
55 struct lwkt_serialize *serializer;
60 struct thread i_thread;
61 struct random_softc i_random;
67 } intr_info_ary[NHWI + NSWI];
69 int intr_info_size = sizeof(intr_info_ary) / sizeof(intr_info_ary[0]);
71 #define LIVELOCK_NONE 0
72 #define LIVELOCK_LIMITED 1
74 static int livelock_limit = 50000;
75 static int livelock_fallback = 20000;
76 SYSCTL_INT(_kern, OID_AUTO, livelock_limit,
77 CTLFLAG_RW, &livelock_limit, 0, "Livelock interrupt rate limit");
78 SYSCTL_INT(_kern, OID_AUTO, livelock_fallback,
79 CTLFLAG_RW, &livelock_fallback, 0, "Livelock interrupt fallback rate");
81 static void ithread_handler(void *arg);
84 * Register an SWI or INTerrupt handler.
87 register_swi(int intr, inthand2_t *handler, void *arg, const char *name,
88 struct lwkt_serialize *serializer)
90 if (intr < NHWI || intr >= NHWI + NSWI)
91 panic("register_swi: bad intr %d", intr);
92 return(register_int(intr, handler, arg, name, serializer, 0));
96 register_int(int intr, inthand2_t *handler, void *arg, const char *name,
97 struct lwkt_serialize *serializer, int intr_flags)
99 struct intr_info *info;
100 struct intrec **list;
103 if (intr < 0 || intr >= NHWI + NSWI)
104 panic("register_int: bad intr %d", intr);
107 info = &intr_info_ary[intr];
109 rec = malloc(sizeof(struct intrec), M_DEVBUF, M_INTWAIT);
110 rec->name = malloc(strlen(name) + 1, M_DEVBUF, M_INTWAIT);
111 strcpy(rec->name, name);
113 rec->handler = handler;
116 rec->intr_flags = intr_flags;
118 rec->serializer = serializer;
120 list = &info->i_reclist;
123 * Keep track of how many fast and slow interrupts we have.
125 if (intr_flags & INTR_FAST)
131 * Create an interrupt thread if necessary, leave it in an unscheduled
134 if (info->i_valid_thread == 0) {
135 info->i_valid_thread = 1;
136 lwkt_create((void *)ithread_handler, (void *)intr, NULL,
137 &info->i_thread, TDF_STOPREQ|TDF_INTTHREAD, -1,
139 if (intr >= NHWI && intr < NHWI + NSWI)
140 lwkt_setpri(&info->i_thread, TDPRI_SOFT_NORM);
142 lwkt_setpri(&info->i_thread, TDPRI_INT_MED);
143 info->i_thread.td_preemptable = lwkt_preempt;
147 * Add the record to the interrupt list
149 crit_enter(); /* token */
150 while (*list != NULL)
151 list = &(*list)->next;
158 unregister_swi(void *id)
160 return(unregister_int(id));
164 unregister_int(void *id)
166 struct intr_info *info;
167 struct intrec **list;
171 intr = ((intrec_t)id)->intr;
173 if (intr < 0 || intr > NHWI + NSWI)
174 panic("register_int: bad intr %d", intr);
176 info = &intr_info_ary[intr];
179 * Remove the interrupt descriptor
182 list = &info->i_reclist;
183 while ((rec = *list) != NULL) {
193 * Free it, adjust interrupt type counts
196 if (rec->intr_flags & INTR_FAST)
200 free(rec->name, M_DEVBUF);
203 printf("warning: unregister_int: int %d handler for %s not found\n",
204 intr, ((intrec_t)id)->name);
208 * Return the number of interrupt vectors still registered on this intr
210 return(info->i_fast + info->i_slow);
214 get_registered_intr(void *id)
216 return(((intrec_t)id)->intr);
220 get_registered_name(int intr)
224 if (intr < 0 || intr > NHWI + NSWI)
225 panic("register_int: bad intr %d", intr);
227 if ((rec = intr_info_ary[intr].i_reclist) == NULL)
236 count_registered_ints(int intr)
238 struct intr_info *info;
240 if (intr < 0 || intr > NHWI + NSWI)
241 panic("register_int: bad intr %d", intr);
242 info = &intr_info_ary[intr];
243 return(info->i_fast + info->i_slow);
247 get_interrupt_counter(int intr)
249 struct intr_info *info;
251 if (intr < 0 || intr > NHWI + NSWI)
252 panic("register_int: bad intr %d", intr);
253 info = &intr_info_ary[intr];
254 return(info->i_count);
259 swi_setpriority(int intr, int pri)
261 struct intr_info *info;
263 if (intr < NHWI || intr >= NHWI + NSWI)
264 panic("register_swi: bad intr %d", intr);
265 info = &intr_info_ary[intr];
266 if (info->i_valid_thread)
267 lwkt_setpri(&info->i_thread, pri);
271 register_randintr(int intr)
273 struct intr_info *info;
275 if (intr < NHWI || intr >= NHWI + NSWI)
276 panic("register_swi: bad intr %d", intr);
277 info = &intr_info_ary[intr];
278 info->i_random.sc_intr = intr;
279 info->i_random.sc_enabled = 1;
283 unregister_randintr(int intr)
285 struct intr_info *info;
287 if (intr < NHWI || intr >= NHWI + NSWI)
288 panic("register_swi: bad intr %d", intr);
289 info = &intr_info_ary[intr];
290 info->i_random.sc_enabled = 0;
294 * Dispatch an interrupt. If there's nothing to do we have a stray
295 * interrupt and can just return, leaving the interrupt masked.
297 * We need to schedule the interrupt and set its i_running bit. If
298 * we are not on the interrupt thread's cpu we have to send a message
299 * to the correct cpu that will issue the desired action (interlocking
300 * with the interrupt thread's critical section).
302 * We are NOT in a critical section, which will allow the scheduled
303 * interrupt to preempt us. The MP lock might *NOT* be held here.
306 sched_ithd_remote(void *arg)
308 sched_ithd((int)arg);
314 struct intr_info *info;
316 info = &intr_info_ary[intr];
319 if (info->i_valid_thread) {
320 if (info->i_reclist == NULL) {
321 printf("sched_ithd: stray interrupt %d\n", intr);
323 if (info->i_thread.td_gd == mycpu) {
325 /* preemption handled internally */
326 lwkt_schedule(&info->i_thread);
328 lwkt_send_ipiq(info->i_thread.td_gd,
329 sched_ithd_remote, (void *)intr);
333 printf("sched_ithd: stray interrupt %d\n", intr);
338 * This is run from a periodic SYSTIMER (and thus must be MP safe, the BGL
339 * might not be held).
342 ithread_livelock_wakeup(systimer_t st)
344 struct intr_info *info;
346 info = &intr_info_ary[(int)st->data];
347 if (info->i_valid_thread)
348 lwkt_schedule(&info->i_thread);
352 * This function is called drectly from the ICU or APIC vector code assembly
353 * to process an interrupt. The critical section and interrupt deferral
354 * checks have already been done but the function is entered WITHOUT
355 * a critical section held. The BGL may or may not be held.
357 * Must return non-zero if we do not want the vector code to re-enable
358 * the interrupt (which we don't if we have to schedule the interrupt)
360 int ithread_fast_handler(struct intrframe frame);
363 ithread_fast_handler(struct intrframe frame)
366 struct intr_info *info;
367 struct intrec **list;
372 intrec_t rec, next_rec;
378 info = &intr_info_ary[intr];
381 * If we are not processing any FAST interrupts, just schedule the thing.
382 * (since we aren't in a critical section, this can result in a
385 if (info->i_fast == 0) {
391 * This should not normally occur since interrupts ought to be
392 * masked if the ithread has been scheduled or is running.
398 * Bump the interrupt nesting level to process any FAST interrupts.
399 * Obtain the MP lock as necessary. If the MP lock cannot be obtained,
400 * schedule the interrupt thread to deal with the issue instead.
402 * To reduce overhead, just leave the MP lock held once it has been
406 ++gd->gd_intr_nesting_level;
408 must_schedule = info->i_slow;
413 list = &info->i_reclist;
414 for (rec = *list; rec; rec = next_rec) {
415 next_rec = rec->next; /* rec may be invalid after call */
417 if (rec->intr_flags & INTR_FAST) {
419 if ((rec->intr_flags & INTR_MPSAFE) == 0 && got_mplock == 0) {
420 if (try_mplock() == 0) {
422 * XXX forward to the cpu holding the MP lock
430 if (rec->serializer) {
431 must_schedule += lwkt_serialize_handler_try(
432 rec->serializer, rec->handler,
433 rec->argument, &frame);
435 rec->handler(rec->argument, &frame);
443 --gd->gd_intr_nesting_level;
451 * If we had a problem, schedule the thread to catch the missed
452 * records (it will just re-run all of them). A return value of 0
453 * indicates that all handlers have been run and the interrupt can
454 * be re-enabled, and a non-zero return indicates that the interrupt
455 * thread controls re-enablement.
461 return(must_schedule);
467 /* could not get the MP lock, forward the interrupt */ \
468 movl mp_lock, %eax ; /* check race */ \
469 cmpl $MP_FREE_LOCK,%eax ; \
471 incl PCPU(cnt)+V_FORWARDED_INTS ; \
473 movl $irq_num,8(%esp) ; \
474 movl $forward_fastint_remote,4(%esp) ; \
476 call lwkt_send_ipiq_bycpu ; \
484 * Interrupt threads run this as their main loop.
486 * The handler begins execution outside a critical section and with the BGL
489 * The i_running state starts at 0. When an interrupt occurs, the hardware
490 * interrupt is disabled and sched_ithd() The HW interrupt remains disabled
491 * until all routines have run. We then call ithread_done() to reenable
492 * the HW interrupt and deschedule us until the next interrupt.
494 * We are responsible for atomically checking i_running and ithread_done()
495 * is responsible for atomically checking for platform-specific delayed
496 * interrupts. i_running for our irq is only set in the context of our cpu,
497 * so a critical section is a sufficient interlock.
499 #define LIVELOCK_TIMEFRAME(freq) ((freq) >> 2) /* 1/4 second */
502 ithread_handler(void *arg)
504 struct intr_info *info;
509 struct intrec **list;
511 globaldata_t gd = mycpu;
512 struct systimer ill_timer; /* enforced freq. timer */
513 struct systimer ill_rtimer; /* recovery timer */
514 u_int ill_count = 0; /* interrupt livelock counter */
515 u_int ill_ticks = 0; /* track elapsed to calculate freq */
516 u_int ill_delta = 0; /* track elapsed to calculate freq */
517 int ill_state = 0; /* current state */
520 info = &intr_info_ary[intr];
521 list = &info->i_reclist;
525 * The loop must be entered with one critical section held.
531 * We can get woken up by the livelock periodic code too, run the
532 * handlers only if there is a real interrupt pending. XXX
534 * Clear i_running prior to running the handlers to interlock
535 * again new events occuring during processing of existing events.
537 * Run each handler in a critical section. Note that we run both
538 * FAST and SLOW designated service routines.
541 for (rec = *list; rec; rec = nrec) {
543 if (rec->serializer) {
544 lwkt_serialize_handler_call(rec->serializer,
545 rec->handler, rec->argument, NULL);
547 rec->handler(rec->argument, NULL);
552 * Do a quick exit/enter to catch any higher-priority
553 * interrupt sources and so user/system/interrupt statistics
554 * work for interrupt threads.
560 * This is our interrupt hook to add rate randomness to the random
563 if (info->i_random.sc_enabled)
564 add_interrupt_randomness(intr);
567 * This is our livelock test. If we hit the rate limit we
568 * limit ourselves to X interrupts/sec until the rate
569 * falls below 50% of that value, then we unlimit again.
571 * XXX calling cputimer_count() is expensive but a livelock may
572 * prevent other interrupts from occuring so we cannot use ticks.
574 cputicks = sys_cputimer->count();
576 bticks = cputicks - ill_ticks;
577 ill_ticks = cputicks;
578 if (bticks > sys_cputimer->freq)
579 bticks = sys_cputimer->freq;
584 if (ill_delta < LIVELOCK_TIMEFRAME(sys_cputimer->freq))
586 freq = (int64_t)ill_count * sys_cputimer->freq /
590 if (freq < livelock_limit)
592 printf("intr %d at %d hz, livelocked! limiting at %d hz\n",
593 intr, freq, livelock_fallback);
594 ill_state = LIVELOCK_LIMITED;
596 /* force periodic check to avoid stale removal (if ints stop) */
597 systimer_init_periodic(&ill_rtimer, ithread_livelock_wakeup,
600 case LIVELOCK_LIMITED:
602 * Delay (us) before rearming the interrupt
604 systimer_init_oneshot(&ill_timer, ithread_livelock_wakeup,
605 (void *)intr, 1 + 1000000 / livelock_fallback);
606 lwkt_deschedule_self(curthread);
609 /* in case we were woken up by something else */
610 systimer_del(&ill_timer);
613 * Calculate interrupt rate (note that due to our delay it
614 * will not exceed livelock_fallback).
617 if (ill_delta < LIVELOCK_TIMEFRAME(sys_cputimer->freq))
619 freq = (int64_t)ill_count * sys_cputimer->freq / ill_delta;
622 if (freq < (livelock_fallback >> 1)) {
623 printf("intr %d at %d hz, removing livelock limit\n",
625 ill_state = LIVELOCK_NONE;
626 systimer_del(&ill_rtimer);
632 * There are two races here. i_running is set by sched_ithd()
633 * in the context of our cpu and is critical-section safe. We
634 * are responsible for checking it. ipending is not critical
635 * section safe and must be handled by the platform specific
636 * ithread_done() routine.
638 if (info->i_running == 0)
640 /* must be in critical section on loop */
646 * Sysctls used by systat and others: hw.intrnames and hw.intrcnt.
647 * The data for this machine dependent, and the declarations are in machine
648 * dependent code. The layout of intrnames and intrcnt however is machine
651 * We do not know the length of intrcnt and intrnames at compile time, so
652 * calculate things at run time.
656 sysctl_intrnames(SYSCTL_HANDLER_ARGS)
658 struct intr_info *info;
665 for (intr = 0; error == 0 && intr < NHWI + NSWI; ++intr) {
666 info = &intr_info_ary[intr];
670 for (rec = info->i_reclist; rec; rec = rec->next) {
671 snprintf(buf + len, sizeof(buf) - len, "%s%s",
672 (len ? "/" : ""), rec->name);
673 len += strlen(buf + len);
676 snprintf(buf, sizeof(buf), "irq%d", intr);
679 error = SYSCTL_OUT(req, buf, len + 1);
685 SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD,
686 NULL, 0, sysctl_intrnames, "", "Interrupt Names");
689 sysctl_intrcnt(SYSCTL_HANDLER_ARGS)
691 struct intr_info *info;
695 for (intr = 0; intr < NHWI + NSWI; ++intr) {
696 info = &intr_info_ary[intr];
698 error = SYSCTL_OUT(req, &info->i_count, sizeof(info->i_count));
705 SYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD,
706 NULL, 0, sysctl_intrcnt, "", "Interrupt Counts");