2 * Copyright (c) 2003 Matthew Dillon <dillon@backplane.com> All rights reserved.
3 * Copyright (c) 1997, Stefan Esser <se@freebsd.org> All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 * $FreeBSD: src/sys/kern/kern_intr.c,v 1.24.2.1 2001/10/14 20:05:50 luigi Exp $
27 * $DragonFly: src/sys/kern/kern_intr.c,v 1.31 2005/11/02 22:59:48 dillon Exp $
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/malloc.h>
34 #include <sys/kernel.h>
35 #include <sys/sysctl.h>
36 #include <sys/thread.h>
38 #include <sys/thread2.h>
39 #include <sys/random.h>
40 #include <sys/serialize.h>
42 #include <sys/machintr.h>
44 #include <machine/ipl.h>
45 #include <machine/frame.h>
47 #include <sys/interrupt.h>
49 typedef struct intrec {
56 struct lwkt_serialize *serializer;
61 struct thread i_thread;
62 struct random_softc i_random;
68 } intr_info_ary[MAX_INTS];
70 int max_installed_hard_intr;
71 int max_installed_soft_intr;
73 #define EMERGENCY_INTR_POLLING_FREQ_MAX 20000
75 static int sysctl_emergency_freq(SYSCTL_HANDLER_ARGS);
76 static int sysctl_emergency_enable(SYSCTL_HANDLER_ARGS);
77 static void emergency_intr_timer_callback(systimer_t, struct intrframe *);
78 static void ithread_handler(void *arg);
79 static void ithread_emergency(void *arg);
81 int intr_info_size = sizeof(intr_info_ary) / sizeof(intr_info_ary[0]);
83 static struct systimer emergency_intr_timer;
84 static struct thread emergency_intr_thread;
86 #define ISTATE_NOTHREAD 0
87 #define ISTATE_NORMAL 1
88 #define ISTATE_LIVELOCKED 2
90 static int livelock_limit = 50000;
91 static int livelock_lowater = 20000;
92 SYSCTL_INT(_kern, OID_AUTO, livelock_limit,
93 CTLFLAG_RW, &livelock_limit, 0, "Livelock interrupt rate limit");
94 SYSCTL_INT(_kern, OID_AUTO, livelock_lowater,
95 CTLFLAG_RW, &livelock_lowater, 0, "Livelock low-water mark restore");
97 static int emergency_intr_enable = 0; /* emergency interrupt polling */
98 TUNABLE_INT("kern.emergency_intr_enable", &emergency_intr_enable);
99 SYSCTL_PROC(_kern, OID_AUTO, emergency_intr_enable, CTLTYPE_INT | CTLFLAG_RW,
100 0, 0, sysctl_emergency_enable, "I", "Emergency Interrupt Poll Enable");
102 static int emergency_intr_freq = 10; /* emergency polling frequency */
103 TUNABLE_INT("kern.emergency_intr_freq", &emergency_intr_freq);
104 SYSCTL_PROC(_kern, OID_AUTO, emergency_intr_freq, CTLTYPE_INT | CTLFLAG_RW,
105 0, 0, sysctl_emergency_freq, "I", "Emergency Interrupt Poll Frequency");
108 * Sysctl support routines
111 sysctl_emergency_enable(SYSCTL_HANDLER_ARGS)
115 enabled = emergency_intr_enable;
116 error = sysctl_handle_int(oidp, &enabled, 0, req);
117 if (error || req->newptr == NULL)
119 emergency_intr_enable = enabled;
120 if (emergency_intr_enable) {
121 emergency_intr_timer.periodic =
122 sys_cputimer->fromhz(emergency_intr_freq);
124 emergency_intr_timer.periodic = sys_cputimer->fromhz(1);
130 sysctl_emergency_freq(SYSCTL_HANDLER_ARGS)
134 phz = emergency_intr_freq;
135 error = sysctl_handle_int(oidp, &phz, 0, req);
136 if (error || req->newptr == NULL)
140 else if (phz > EMERGENCY_INTR_POLLING_FREQ_MAX)
141 phz = EMERGENCY_INTR_POLLING_FREQ_MAX;
143 emergency_intr_freq = phz;
144 if (emergency_intr_enable) {
145 emergency_intr_timer.periodic =
146 sys_cputimer->fromhz(emergency_intr_freq);
148 emergency_intr_timer.periodic = sys_cputimer->fromhz(1);
154 * Register an SWI or INTerrupt handler.
157 register_swi(int intr, inthand2_t *handler, void *arg, const char *name,
158 struct lwkt_serialize *serializer)
160 if (intr < FIRST_SOFTINT || intr >= MAX_INTS)
161 panic("register_swi: bad intr %d", intr);
162 return(register_int(intr, handler, arg, name, serializer, 0));
166 register_int(int intr, inthand2_t *handler, void *arg, const char *name,
167 struct lwkt_serialize *serializer, int intr_flags)
169 struct intr_info *info;
170 struct intrec **list;
173 if (intr < 0 || intr >= MAX_INTS)
174 panic("register_int: bad intr %d", intr);
177 info = &intr_info_ary[intr];
179 rec = malloc(sizeof(struct intrec), M_DEVBUF, M_INTWAIT);
180 rec->name = malloc(strlen(name) + 1, M_DEVBUF, M_INTWAIT);
181 strcpy(rec->name, name);
183 rec->handler = handler;
186 rec->intr_flags = intr_flags;
188 rec->serializer = serializer;
190 list = &info->i_reclist;
193 * Keep track of how many fast and slow interrupts we have.
195 if (intr_flags & INTR_FAST)
201 * Create an emergency polling thread and set up a systimer to wake
204 if (emergency_intr_thread.td_kstack == NULL) {
205 lwkt_create(ithread_emergency, NULL, NULL,
206 &emergency_intr_thread, TDF_STOPREQ|TDF_INTTHREAD, -1,
208 systimer_init_periodic_nq(&emergency_intr_timer,
209 emergency_intr_timer_callback, &emergency_intr_thread,
210 (emergency_intr_enable ? emergency_intr_freq : 1));
214 * Create an interrupt thread if necessary, leave it in an unscheduled
217 if (info->i_state == ISTATE_NOTHREAD) {
218 info->i_state = ISTATE_NORMAL;
219 lwkt_create((void *)ithread_handler, (void *)intr, NULL,
220 &info->i_thread, TDF_STOPREQ|TDF_INTTHREAD, -1,
222 if (intr >= FIRST_SOFTINT)
223 lwkt_setpri(&info->i_thread, TDPRI_SOFT_NORM);
225 lwkt_setpri(&info->i_thread, TDPRI_INT_MED);
226 info->i_thread.td_preemptable = lwkt_preempt;
230 * Add the record to the interrupt list
232 crit_enter(); /* token */
233 while (*list != NULL)
234 list = &(*list)->next;
239 * Update max_installed_hard_intr to make the emergency intr poll
240 * a bit more efficient.
242 if (intr < FIRST_SOFTINT) {
243 if (max_installed_hard_intr <= intr)
244 max_installed_hard_intr = intr + 1;
246 if (max_installed_soft_intr <= intr)
247 max_installed_soft_intr = intr + 1;
253 unregister_swi(void *id)
255 return(unregister_int(id));
259 unregister_int(void *id)
261 struct intr_info *info;
262 struct intrec **list;
266 intr = ((intrec_t)id)->intr;
268 if (intr < 0 || intr >= MAX_INTS)
269 panic("register_int: bad intr %d", intr);
271 info = &intr_info_ary[intr];
274 * Remove the interrupt descriptor
277 list = &info->i_reclist;
278 while ((rec = *list) != NULL) {
288 * Free it, adjust interrupt type counts
291 if (rec->intr_flags & INTR_FAST)
295 free(rec->name, M_DEVBUF);
298 printf("warning: unregister_int: int %d handler for %s not found\n",
299 intr, ((intrec_t)id)->name);
303 * Return the number of interrupt vectors still registered on this intr
305 return(info->i_fast + info->i_slow);
309 get_registered_intr(void *id)
311 return(((intrec_t)id)->intr);
315 get_registered_name(int intr)
319 if (intr < 0 || intr >= MAX_INTS)
320 panic("register_int: bad intr %d", intr);
322 if ((rec = intr_info_ary[intr].i_reclist) == NULL)
331 count_registered_ints(int intr)
333 struct intr_info *info;
335 if (intr < 0 || intr >= MAX_INTS)
336 panic("register_int: bad intr %d", intr);
337 info = &intr_info_ary[intr];
338 return(info->i_fast + info->i_slow);
342 get_interrupt_counter(int intr)
344 struct intr_info *info;
346 if (intr < 0 || intr >= MAX_INTS)
347 panic("register_int: bad intr %d", intr);
348 info = &intr_info_ary[intr];
349 return(info->i_count);
354 swi_setpriority(int intr, int pri)
356 struct intr_info *info;
358 if (intr < FIRST_SOFTINT || intr >= MAX_INTS)
359 panic("register_swi: bad intr %d", intr);
360 info = &intr_info_ary[intr];
361 if (info->i_state != ISTATE_NOTHREAD)
362 lwkt_setpri(&info->i_thread, pri);
366 register_randintr(int intr)
368 struct intr_info *info;
370 if (intr < 0 || intr >= MAX_INTS)
371 panic("register_randintr: bad intr %d", intr);
372 info = &intr_info_ary[intr];
373 info->i_random.sc_intr = intr;
374 info->i_random.sc_enabled = 1;
378 unregister_randintr(int intr)
380 struct intr_info *info;
382 if (intr < 0 || intr >= MAX_INTS)
383 panic("register_swi: bad intr %d", intr);
384 info = &intr_info_ary[intr];
385 info->i_random.sc_enabled = 0;
389 next_registered_randintr(int intr)
391 struct intr_info *info;
393 if (intr < 0 || intr >= MAX_INTS)
394 panic("register_swi: bad intr %d", intr);
395 while (intr < MAX_INTS) {
396 info = &intr_info_ary[intr];
397 if (info->i_random.sc_enabled)
405 * Dispatch an interrupt. If there's nothing to do we have a stray
406 * interrupt and can just return, leaving the interrupt masked.
408 * We need to schedule the interrupt and set its i_running bit. If
409 * we are not on the interrupt thread's cpu we have to send a message
410 * to the correct cpu that will issue the desired action (interlocking
411 * with the interrupt thread's critical section). We do NOT attempt to
412 * reschedule interrupts whos i_running bit is already set because
413 * this would prematurely wakeup a livelock-limited interrupt thread.
415 * i_running is only tested/set on the same cpu as the interrupt thread.
417 * We are NOT in a critical section, which will allow the scheduled
418 * interrupt to preempt us. The MP lock might *NOT* be held here.
423 sched_ithd_remote(void *arg)
425 sched_ithd((int)arg);
433 struct intr_info *info;
435 info = &intr_info_ary[intr];
438 if (info->i_state != ISTATE_NOTHREAD) {
439 if (info->i_reclist == NULL) {
440 printf("sched_ithd: stray interrupt %d\n", intr);
443 if (info->i_thread.td_gd == mycpu) {
444 if (info->i_running == 0) {
446 if (info->i_state != ISTATE_LIVELOCKED)
447 lwkt_schedule(&info->i_thread); /* MIGHT PREEMPT */
450 lwkt_send_ipiq(info->i_thread.td_gd,
451 sched_ithd_remote, (void *)intr);
454 if (info->i_running == 0) {
456 if (info->i_state != ISTATE_LIVELOCKED)
457 lwkt_schedule(&info->i_thread); /* MIGHT PREEMPT */
462 printf("sched_ithd: stray interrupt %d\n", intr);
467 * This is run from a periodic SYSTIMER (and thus must be MP safe, the BGL
468 * might not be held).
471 ithread_livelock_wakeup(systimer_t st)
473 struct intr_info *info;
475 info = &intr_info_ary[(int)st->data];
476 if (info->i_state != ISTATE_NOTHREAD)
477 lwkt_schedule(&info->i_thread);
481 * This function is called drectly from the ICU or APIC vector code assembly
482 * to process an interrupt. The critical section and interrupt deferral
483 * checks have already been done but the function is entered WITHOUT
484 * a critical section held. The BGL may or may not be held.
486 * Must return non-zero if we do not want the vector code to re-enable
487 * the interrupt (which we don't if we have to schedule the interrupt)
489 int ithread_fast_handler(struct intrframe frame);
492 ithread_fast_handler(struct intrframe frame)
495 struct intr_info *info;
496 struct intrec **list;
501 intrec_t rec, next_rec;
507 info = &intr_info_ary[intr];
510 * If we are not processing any FAST interrupts, just schedule the thing.
511 * (since we aren't in a critical section, this can result in a
514 if (info->i_fast == 0) {
520 * This should not normally occur since interrupts ought to be
521 * masked if the ithread has been scheduled or is running.
527 * Bump the interrupt nesting level to process any FAST interrupts.
528 * Obtain the MP lock as necessary. If the MP lock cannot be obtained,
529 * schedule the interrupt thread to deal with the issue instead.
531 * To reduce overhead, just leave the MP lock held once it has been
535 ++gd->gd_intr_nesting_level;
537 must_schedule = info->i_slow;
542 list = &info->i_reclist;
543 for (rec = *list; rec; rec = next_rec) {
544 next_rec = rec->next; /* rec may be invalid after call */
546 if (rec->intr_flags & INTR_FAST) {
548 if ((rec->intr_flags & INTR_MPSAFE) == 0 && got_mplock == 0) {
549 if (try_mplock() == 0) {
551 * XXX forward to the cpu holding the MP lock
559 if (rec->serializer) {
560 must_schedule += lwkt_serialize_handler_try(
561 rec->serializer, rec->handler,
562 rec->argument, &frame);
564 rec->handler(rec->argument, &frame);
572 --gd->gd_intr_nesting_level;
580 * If we had a problem, schedule the thread to catch the missed
581 * records (it will just re-run all of them). A return value of 0
582 * indicates that all handlers have been run and the interrupt can
583 * be re-enabled, and a non-zero return indicates that the interrupt
584 * thread controls re-enablement.
590 return(must_schedule);
596 /* could not get the MP lock, forward the interrupt */ \
597 movl mp_lock, %eax ; /* check race */ \
598 cmpl $MP_FREE_LOCK,%eax ; \
600 incl PCPU(cnt)+V_FORWARDED_INTS ; \
602 movl $irq_num,8(%esp) ; \
603 movl $forward_fastint_remote,4(%esp) ; \
605 call lwkt_send_ipiq_bycpu ; \
613 * Interrupt threads run this as their main loop.
615 * The handler begins execution outside a critical section and with the BGL
618 * The i_running state starts at 0. When an interrupt occurs, the hardware
619 * interrupt is disabled and sched_ithd() The HW interrupt remains disabled
620 * until all routines have run. We then call ithread_done() to reenable
621 * the HW interrupt and deschedule us until the next interrupt.
623 * We are responsible for atomically checking i_running and ithread_done()
624 * is responsible for atomically checking for platform-specific delayed
625 * interrupts. i_running for our irq is only set in the context of our cpu,
626 * so a critical section is a sufficient interlock.
628 #define LIVELOCK_TIMEFRAME(freq) ((freq) >> 2) /* 1/4 second */
631 ithread_handler(void *arg)
633 struct intr_info *info;
638 struct intrec **list;
641 struct systimer ill_timer; /* enforced freq. timer */
642 u_int ill_count; /* interrupt livelock counter */
648 info = &intr_info_ary[intr];
649 list = &info->i_reclist;
653 * The loop must be entered with one critical section held.
659 * If an interrupt is pending, clear i_running and execute the
660 * handlers. Note that certain types of interrupts can re-trigger
661 * and set i_running again.
663 * Each handler is run in a critical section. Note that we run both
664 * FAST and SLOW designated service routines.
666 if (info->i_running) {
669 for (rec = *list; rec; rec = nrec) {
671 if (rec->serializer) {
672 lwkt_serialize_handler_call(rec->serializer, rec->handler,
673 rec->argument, NULL);
675 rec->handler(rec->argument, NULL);
681 * This is our interrupt hook to add rate randomness to the random
684 if (info->i_random.sc_enabled)
685 add_interrupt_randomness(intr);
688 * Unmask the interrupt to allow it to trigger again. This only
689 * applies to certain types of interrupts (typ level interrupts).
690 * This can result in the interrupt retriggering, but the retrigger
691 * will not be processed until we cycle our critical section.
693 * Only unmask interrupts while handlers are installed. It is
694 * possible to hit a situation where no handlers are installed
695 * due to a device driver livelocking and then tearing down its
696 * interrupt on close (the parallel bus being a good example).
699 machintr_intren(intr);
702 * Do a quick exit/enter to catch any higher-priority interrupt
703 * sources, such as the statclock, so thread time accounting
704 * will still work. This may also cause an interrupt to re-trigger.
710 * LIVELOCK STATE MACHINE
712 switch(info->i_state) {
715 * Calculate a running average every tick.
717 if (lticks != ticks) {
719 ill_count -= ill_count / hz;
723 * If we did not exceed the frequency limit, we are done.
724 * If the interrupt has not retriggered we deschedule ourselves.
726 if (ill_count <= livelock_limit) {
727 if (info->i_running == 0) {
728 lwkt_deschedule_self(gd->gd_curthread);
735 * Otherwise we are livelocked. Set up a periodic systimer
736 * to wake the thread up at the limit frequency.
738 printf("intr %d at %d > %d hz, livelocked limit engaged!\n",
739 intr, livelock_limit, ill_count);
740 info->i_state = ISTATE_LIVELOCKED;
741 if ((use_limit = livelock_limit) < 100)
743 else if (use_limit > 500000)
745 systimer_init_periodic(&ill_timer, ithread_livelock_wakeup,
746 (void *)intr, use_limit);
749 case ISTATE_LIVELOCKED:
751 * Wait for our periodic timer to go off. Since the interrupt
752 * has re-armed it can still set i_running, but it will not
753 * reschedule us while we are in a livelocked state.
755 lwkt_deschedule_self(gd->gd_curthread);
759 * Check to see if the livelock condition no longer applies.
760 * The interrupt must be able to operate normally for one
761 * full second before we restore normal operation.
763 if (lticks != ticks) {
765 if (ill_count < livelock_lowater) {
766 if (++lcount >= hz) {
767 info->i_state = ISTATE_NORMAL;
768 systimer_del(&ill_timer);
769 printf("intr %d at %d < %d hz, livelock removed\n",
770 intr, ill_count, livelock_lowater);
775 ill_count -= ill_count / hz;
784 * Emergency interrupt polling thread. The thread begins execution
785 * outside a critical section with the BGL held.
787 * If emergency interrupt polling is enabled, this thread will
788 * execute all system interrupts not marked INTR_NOPOLL at the
789 * specified polling frequency.
791 * WARNING! This thread runs *ALL* interrupt service routines that
792 * are not marked INTR_NOPOLL, which basically means everything except
793 * the 8254 clock interrupt and the ATA interrupt. It has very high
794 * overhead and should only be used in situations where the machine
795 * cannot otherwise be made to work. Due to the severe performance
796 * degredation, it should not be enabled on production machines.
799 ithread_emergency(void *arg __unused)
801 struct intr_info *info;
806 for (intr = 0; intr < max_installed_hard_intr; ++intr) {
807 info = &intr_info_ary[intr];
808 for (rec = info->i_reclist; rec; rec = nrec) {
809 if ((rec->intr_flags & INTR_NOPOLL) == 0) {
810 if (rec->serializer) {
811 lwkt_serialize_handler_call(rec->serializer,
812 rec->handler, rec->argument, NULL);
814 rec->handler(rec->argument, NULL);
820 lwkt_deschedule_self(curthread);
826 * Systimer callback - schedule the emergency interrupt poll thread
827 * if emergency polling is enabled.
831 emergency_intr_timer_callback(systimer_t info, struct intrframe *frame __unused)
833 if (emergency_intr_enable)
834 lwkt_schedule(info->data);
838 * Sysctls used by systat and others: hw.intrnames and hw.intrcnt.
839 * The data for this machine dependent, and the declarations are in machine
840 * dependent code. The layout of intrnames and intrcnt however is machine
843 * We do not know the length of intrcnt and intrnames at compile time, so
844 * calculate things at run time.
848 sysctl_intrnames(SYSCTL_HANDLER_ARGS)
850 struct intr_info *info;
857 for (intr = 0; error == 0 && intr < MAX_INTS; ++intr) {
858 info = &intr_info_ary[intr];
862 for (rec = info->i_reclist; rec; rec = rec->next) {
863 snprintf(buf + len, sizeof(buf) - len, "%s%s",
864 (len ? "/" : ""), rec->name);
865 len += strlen(buf + len);
868 snprintf(buf, sizeof(buf), "irq%d", intr);
871 error = SYSCTL_OUT(req, buf, len + 1);
877 SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD,
878 NULL, 0, sysctl_intrnames, "", "Interrupt Names");
881 sysctl_intrcnt(SYSCTL_HANDLER_ARGS)
883 struct intr_info *info;
887 for (intr = 0; intr < max_installed_hard_intr; ++intr) {
888 info = &intr_info_ary[intr];
890 error = SYSCTL_OUT(req, &info->i_count, sizeof(info->i_count));
894 for (intr = FIRST_SOFTINT; intr < max_installed_soft_intr; ++intr) {
895 info = &intr_info_ary[intr];
897 error = SYSCTL_OUT(req, &info->i_count, sizeof(info->i_count));
905 SYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD,
906 NULL, 0, sysctl_intrcnt, "", "Interrupt Counts");