2 * Copyright (c) 1991 The Regents of the University of California.
5 * This code is derived from software contributed to Berkeley by
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * from: @(#)isa.c 7.2 (Berkeley) 5/13/91
37 * $FreeBSD: src/sys/i386/isa/intr_machdep.c,v 1.29.2.5 2001/10/14 06:54:27 luigi Exp $
38 * $DragonFly: src/sys/i386/isa/Attic/intr_machdep.c,v 1.26 2005/05/23 18:19:53 dillon Exp $
41 * This file contains an aggregated module marked:
42 * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
43 * All rights reserved.
44 * See the notice for details.
49 #include "opt_auto_eoi.h"
51 #include <sys/param.h>
53 #include <machine/lock.h>
55 #include <sys/systm.h>
56 #include <sys/syslog.h>
57 #include <sys/malloc.h>
58 #include <sys/errno.h>
59 #include <sys/interrupt.h>
60 #include <machine/ipl.h>
61 #include <machine/md_var.h>
62 #include <machine/segments.h>
64 #include <machine/globaldata.h>
66 #include <sys/thread2.h>
68 #include <machine/smptests.h> /** FAST_HI */
69 #include <machine/smp.h>
70 #include <bus/isa/i386/isa.h>
71 #include <i386/isa/icu.h>
74 #include <bus/isa/isavar.h>
76 #include <i386/isa/intr_machdep.h>
77 #include <bus/isa/isavar.h>
78 #include <sys/interrupt.h>
80 #include <machine/clock.h>
82 #include <machine/cpu.h>
85 #include <bus/mca/i386/mca_machdep.h>
88 /* XXX should be in suitable include files */
89 #define ICU_IMR_OFFSET 1 /* IO_ICU{1,2} + 1 */
94 * This is to accommodate "mixed-mode" programming for
95 * motherboards that don't connect the 8254 to the IO APIC.
100 #define NR_INTRNAMES (1 + ICU_LEN + 2 * ICU_LEN)
102 static inthand2_t isa_strayintr;
103 #if defined(FAST_HI) && defined(APIC_IO)
104 static inthand2_t isa_wrongintr;
106 static void init_i8259(void);
108 void *intr_unit[ICU_LEN*2];
109 u_long *intr_countp[ICU_LEN*2];
110 inthand2_t *intr_handler[ICU_LEN*2] = {
111 isa_strayintr, isa_strayintr, isa_strayintr, isa_strayintr,
112 isa_strayintr, isa_strayintr, isa_strayintr, isa_strayintr,
113 isa_strayintr, isa_strayintr, isa_strayintr, isa_strayintr,
114 isa_strayintr, isa_strayintr, isa_strayintr, isa_strayintr,
115 isa_strayintr, isa_strayintr, isa_strayintr, isa_strayintr,
116 isa_strayintr, isa_strayintr, isa_strayintr, isa_strayintr,
117 isa_strayintr, isa_strayintr, isa_strayintr, isa_strayintr,
118 isa_strayintr, isa_strayintr, isa_strayintr, isa_strayintr,
121 static struct md_intr_info {
124 int mihandler_installed;
126 } intr_info[ICU_LEN*2];
128 static inthand_t *fastintr[ICU_LEN] = {
129 &IDTVEC(fastintr0), &IDTVEC(fastintr1),
130 &IDTVEC(fastintr2), &IDTVEC(fastintr3),
131 &IDTVEC(fastintr4), &IDTVEC(fastintr5),
132 &IDTVEC(fastintr6), &IDTVEC(fastintr7),
133 &IDTVEC(fastintr8), &IDTVEC(fastintr9),
134 &IDTVEC(fastintr10), &IDTVEC(fastintr11),
135 &IDTVEC(fastintr12), &IDTVEC(fastintr13),
136 &IDTVEC(fastintr14), &IDTVEC(fastintr15),
138 &IDTVEC(fastintr16), &IDTVEC(fastintr17),
139 &IDTVEC(fastintr18), &IDTVEC(fastintr19),
140 &IDTVEC(fastintr20), &IDTVEC(fastintr21),
141 &IDTVEC(fastintr22), &IDTVEC(fastintr23),
145 unpendhand_t *fastunpend[ICU_LEN] = {
146 IDTVEC(fastunpend0), IDTVEC(fastunpend1),
147 IDTVEC(fastunpend2), IDTVEC(fastunpend3),
148 IDTVEC(fastunpend4), IDTVEC(fastunpend5),
149 IDTVEC(fastunpend6), IDTVEC(fastunpend7),
150 IDTVEC(fastunpend8), IDTVEC(fastunpend9),
151 IDTVEC(fastunpend10), IDTVEC(fastunpend11),
152 IDTVEC(fastunpend12), IDTVEC(fastunpend13),
153 IDTVEC(fastunpend14), IDTVEC(fastunpend15),
155 IDTVEC(fastunpend16), IDTVEC(fastunpend17),
156 IDTVEC(fastunpend18), IDTVEC(fastunpend19),
157 IDTVEC(fastunpend20), IDTVEC(fastunpend21),
158 IDTVEC(fastunpend22), IDTVEC(fastunpend23),
162 static inthand_t *slowintr[ICU_LEN] = {
163 &IDTVEC(intr0), &IDTVEC(intr1), &IDTVEC(intr2), &IDTVEC(intr3),
164 &IDTVEC(intr4), &IDTVEC(intr5), &IDTVEC(intr6), &IDTVEC(intr7),
165 &IDTVEC(intr8), &IDTVEC(intr9), &IDTVEC(intr10), &IDTVEC(intr11),
166 &IDTVEC(intr12), &IDTVEC(intr13), &IDTVEC(intr14), &IDTVEC(intr15),
168 &IDTVEC(intr16), &IDTVEC(intr17), &IDTVEC(intr18), &IDTVEC(intr19),
169 &IDTVEC(intr20), &IDTVEC(intr21), &IDTVEC(intr22), &IDTVEC(intr23),
173 #define NMI_PARITY (1 << 7)
174 #define NMI_IOCHAN (1 << 6)
175 #define ENMI_WATCHDOG (1 << 7)
176 #define ENMI_BUSTIMER (1 << 6)
177 #define ENMI_IOSTATUS (1 << 5)
180 * Handle a NMI, possibly a machine check.
181 * return true to panic system, false to ignore.
188 int isa_port = inb(0x61);
189 int eisa_port = inb(0x461);
191 log(LOG_CRIT, "NMI ISA %x, EISA %x\n", isa_port, eisa_port);
193 if (MCA_system && mca_bus_nmi())
197 if (isa_port & NMI_PARITY) {
198 log(LOG_CRIT, "RAM parity error, likely hardware failure.");
202 if (isa_port & NMI_IOCHAN) {
203 log(LOG_CRIT, "I/O channel check, likely hardware failure.");
208 * On a real EISA machine, this will never happen. However it can
209 * happen on ISA machines which implement XT style floating point
210 * error handling (very rare). Save them from a meaningless panic.
212 if (eisa_port == 0xff)
215 if (eisa_port & ENMI_WATCHDOG) {
216 log(LOG_CRIT, "EISA watchdog timer expired, likely hardware failure.");
220 if (eisa_port & ENMI_BUSTIMER) {
221 log(LOG_CRIT, "EISA bus timeout, likely hardware failure.");
225 if (eisa_port & ENMI_IOSTATUS) {
226 log(LOG_CRIT, "EISA I/O port status error.");
233 * ICU reinitialize when ICU configuration has lost.
241 for(i=0;i<ICU_LEN;i++)
242 if(intr_handler[i] != isa_strayintr)
248 * Fill in default interrupt table (in case of spurious interrupt
249 * during configuration of kernel, setup interrupt control unit
257 for (i = 0; i < ICU_LEN; i++)
258 icu_unset(i, isa_strayintr);
266 /* initialize 8259's */
269 outb(IO_ICU1, 0x19); /* reset; program device, four bytes */
272 outb(IO_ICU1, 0x11); /* reset; program device, four bytes */
274 outb(IO_ICU1+ICU_IMR_OFFSET, NRSVIDT); /* starting at this vector index */
275 outb(IO_ICU1+ICU_IMR_OFFSET, IRQ_SLAVE); /* slave on line 7 */
277 outb(IO_ICU1+ICU_IMR_OFFSET, 2 | 1); /* auto EOI, 8086 mode */
279 outb(IO_ICU1+ICU_IMR_OFFSET, 1); /* 8086 mode */
281 outb(IO_ICU1+ICU_IMR_OFFSET, 0xff); /* leave interrupts masked */
282 outb(IO_ICU1, 0x0a); /* default to IRR on read */
283 outb(IO_ICU1, 0xc0 | (3 - 1)); /* pri order 3-7, 0-2 (com2 first) */
287 outb(IO_ICU2, 0x19); /* reset; program device, four bytes */
290 outb(IO_ICU2, 0x11); /* reset; program device, four bytes */
292 outb(IO_ICU2+ICU_IMR_OFFSET, NRSVIDT+8); /* staring at this vector index */
293 outb(IO_ICU2+ICU_IMR_OFFSET, ICU_SLAVEID); /* my slave id is 7 */
295 outb(IO_ICU2+ICU_IMR_OFFSET, 2 | 1); /* auto EOI, 8086 mode */
297 outb(IO_ICU2+ICU_IMR_OFFSET,1); /* 8086 mode */
299 outb(IO_ICU2+ICU_IMR_OFFSET, 0xff); /* leave interrupts masked */
300 outb(IO_ICU2, 0x0a); /* default to IRR on read */
304 * Caught a stray interrupt, notify
307 isa_strayintr(void *vcookiep)
309 int intr = (void **)vcookiep - &intr_unit[0];
311 /* DON'T BOTHER FOR NOW! */
312 /* for some reason, we get bursts of intr #7, even if not enabled! */
314 * Well the reason you got bursts of intr #7 is because someone
315 * raised an interrupt line and dropped it before the 8259 could
316 * prioritize it. This is documented in the intel data book. This
317 * means you have BAD hardware! I have changed this so that only
318 * the first 5 get logged, then it quits logging them, and puts
319 * out a special message. rgrimes 3/25/1993
322 * XXX TODO print a different message for #7 if it is for a
323 * glitch. Glitches can be distinguished from real #7's by
324 * testing that the in-service bit is _not_ set. The test
325 * must be done before sending an EOI so it can't be done if
326 * we are using AUTO_EOI_1.
328 if (intrcnt[1 + intr] <= 5)
329 log(LOG_ERR, "stray irq %d\n", intr);
330 if (intrcnt[1 + intr] == 5)
332 "too many stray irq %d's; not logging any more\n", intr);
335 #if defined(FAST_HI) && defined(APIC_IO)
338 * This occurs if we mis-programmed the APIC and its vector is still
339 * pointing to the slow vector even when we thought we reprogrammed it
340 * to the high vector. This can occur when interrupts are improperly
341 * routed by the APIC. The unit data is opaque so we have to try to
342 * find it in the unit array.
345 isa_wrongintr(void *vcookiep)
349 for (intr = 0; intr < ICU_LEN*2; ++intr) {
350 if (intr_unit[intr] == vcookiep)
353 if (intr == ICU_LEN*2) {
354 log(LOG_ERR, "stray unknown irq (APIC misprogrammed)\n");
355 } else if (intrcnt[1 + intr] <= 5) {
356 log(LOG_ERR, "stray irq ~%d (APIC misprogrammed)\n", intr);
357 } else if (intrcnt[1 + intr] == 6) {
359 "too many stray irq ~%d's; not logging any more\n", intr);
367 * Return a bitmap of the current interrupt requests. This is 8259-specific
368 * and is only suitable for use at probe time.
371 isa_irq_pending(void)
378 return ((irr2 << 8) | irr1);
383 update_intr_masks(void)
388 for (intr=0; intr < ICU_LEN; intr ++) {
390 /* no 8259 SLAVE to ignore */
392 if (intr==ICU_SLAVEID) continue; /* ignore 8259 SLAVE output */
394 maskptr = intr_info[intr].maskp;
397 *maskptr |= SWI_CLOCK_MASK | (1 << intr);
399 if (mask != intr_info[intr].mask) {
401 printf ("intr_mask[%2d] old=%08x new=%08x ptr=%p.\n",
402 intr, intr_info[intr].mask, mask, maskptr);
404 intr_info[intr].mask = mask;
413 update_intrname(int intr, char *name)
417 int name_index, off, strayintr;
420 * Initialise strings for bitbucket and stray interrupt counters.
421 * These have statically allocated indices 0 and 1 through ICU_LEN.
423 if (intrnames[0] == '\0') {
424 off = sprintf(intrnames, "???") + 1;
425 for (strayintr = 0; strayintr < ICU_LEN; strayintr++)
426 off += sprintf(intrnames + off, "stray irq%d",
432 if (snprintf(buf, sizeof(buf), "%s irq%d", name, intr) >= sizeof(buf))
436 * Search for `buf' in `intrnames'. In the usual case when it is
437 * not found, append it to the end if there is enough space (the \0
438 * terminator for the previous string, if any, becomes a separator).
440 for (cp = intrnames, name_index = 0;
441 cp != eintrnames && name_index < NR_INTRNAMES;
442 cp += strlen(cp) + 1, name_index++) {
444 if (strlen(buf) >= eintrnames - cp)
449 if (strcmp(cp, buf) == 0)
454 printf("update_intrname: counting %s irq%d as %s\n", name, intr,
458 intr_countp[intr] = &intrcnt[name_index];
462 * NOTE! intr_handler[] is only used for FAST interrupts, the *vector.s
463 * code ignores it for normal interrupts.
466 icu_setup(int intr, inthand2_t *handler, void *arg, u_int *maskptr, int flags)
468 #if defined(FAST_HI) && defined(APIC_IO)
469 int select; /* the select register is 8 bits */
471 u_int32_t value; /* the window register is 32 bits */
474 u_int mask = (maskptr ? *maskptr : 0);
477 if ((u_int)intr >= ICU_LEN) /* no 8259 SLAVE to ignore */
479 if ((u_int)intr >= ICU_LEN || intr == ICU_SLAVEID)
482 if (intr_handler[intr] != isa_strayintr)
486 cpu_disable_intr(); /* YYY */
487 intr_handler[intr] = handler;
488 intr_unit[intr] = arg;
489 intr_info[intr].maskp = maskptr;
490 intr_info[intr].mask = mask | SWI_CLOCK_MASK | (1 << intr);
492 /* YYY fast ints supported and mp protected but ... */
495 #if defined(FAST_HI) && defined(APIC_IO)
496 if (flags & INTR_FAST) {
498 * Install a spurious interrupt in the low space in case
499 * the IO apic is not properly reprogrammed.
501 vector = TPR_SLOW_INTS + intr;
502 setidt(vector, isa_wrongintr,
503 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
504 vector = TPR_FAST_INTS + intr;
505 setidt(vector, fastintr[intr],
506 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
508 vector = TPR_SLOW_INTS + intr;
509 #ifdef APIC_INTR_REORDER
510 #ifdef APIC_INTR_HIGHPRI_CLOCK
511 /* XXX: Hack (kludge?) for more accurate clock. */
512 if (intr == apic_8254_intr || intr == 8) {
513 vector = TPR_FAST_INTS + intr;
517 setidt(vector, slowintr[intr],
518 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
520 #ifdef APIC_INTR_REORDER
521 set_lapic_isrloc(intr, vector);
524 * Reprogram the vector in the IO APIC.
526 * XXX EOI/mask a pending (stray) interrupt on the old vector?
528 if (int_to_apicintpin[intr].ioapic >= 0) {
529 select = int_to_apicintpin[intr].redirindex;
530 value = io_apic_read(int_to_apicintpin[intr].ioapic,
531 select) & ~IOART_INTVEC;
532 io_apic_write(int_to_apicintpin[intr].ioapic,
533 select, value | vector);
536 setidt(ICU_OFFSET + intr,
537 flags & INTR_FAST ? fastintr[intr] : slowintr[intr],
538 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
539 #endif /* FAST_HI && APIC_IO */
546 icu_unset(intr, handler)
552 if ((u_int)intr >= ICU_LEN || handler != intr_handler[intr]) {
553 printf("icu_unset: invalid handler %d %p/%p\n", intr, handler,
554 (((u_int)intr >= ICU_LEN) ? (void *)-1 : intr_handler[intr]));
560 cpu_disable_intr(); /* YYY */
561 intr_countp[intr] = &intrcnt[1 + intr];
562 intr_handler[intr] = isa_strayintr;
563 intr_info[intr].maskp = NULL;
564 intr_info[intr].mask = HWI_MASK | SWI_MASK;
565 intr_unit[intr] = &intr_unit[intr];
567 /* XXX how do I re-create dvp here? */
568 setidt(flags & INTR_FAST ? TPR_FAST_INTS + intr : TPR_SLOW_INTS + intr,
569 slowintr[intr], SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
571 #ifdef APIC_INTR_REORDER
572 set_lapic_isrloc(intr, ICU_OFFSET + intr);
574 setidt(ICU_OFFSET + intr, slowintr[intr], SDT_SYS386IGT, SEL_KPL,
575 GSEL(GCODE_SEL, SEL_KPL));
582 /* The following notice applies beyond this point in the file */
585 * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
586 * All rights reserved.
588 * Redistribution and use in source and binary forms, with or without
589 * modification, are permitted provided that the following conditions
591 * 1. Redistributions of source code must retain the above copyright
592 * notice unmodified, this list of conditions, and the following
594 * 2. Redistributions in binary form must reproduce the above copyright
595 * notice, this list of conditions and the following disclaimer in the
596 * documentation and/or other materials provided with the distribution.
598 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
599 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
600 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
601 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
602 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
603 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
604 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
605 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
606 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
607 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
609 * $FreeBSD: src/sys/i386/isa/intr_machdep.c,v 1.29.2.5 2001/10/14 06:54:27 luigi Exp $
613 typedef struct intrec {
625 static intrec *intreclist_head[ICU_LEN];
628 * The interrupt multiplexer calls each of the handlers in turn. The
629 * ipl is initially quite low. It is raised as necessary for each call
630 * and lowered after the call. Thus out of order handling is possible
631 * even for interrupts of the same type. This is probably no more
632 * harmful than out of order handling in general (not harmful except
633 * for real time response which we don't support anyway).
642 for (pp = arg; (p = *pp) != NULL; pp = &p->next) {
643 oldspl = splq(p->mask);
645 p->handler(p->argument);
651 find_idesc(unsigned *maskptr, int irq)
653 intrec *p = intreclist_head[irq];
655 while (p && p->maskptr != maskptr)
662 * Both the low level handler and the shared interrupt multiplexer
663 * block out further interrupts as set in the handlers "mask", while
664 * the handler is running. In fact *maskptr should be used for this
665 * purpose, but since this requires one more pointer dereference on
666 * each interrupt, we rather bother update "mask" whenever *maskptr
667 * changes. The function "update_masks" should be called **after**
668 * all manipulation of the linked list of interrupt handlers hung
669 * off of intrdec_head[irq] is complete, since the chain of handlers
670 * will both determine the *maskptr values and the instances of mask
671 * that are fixed. This function should be called with the irq for
672 * which a new handler has been add blocked, since the masks may not
673 * yet know about the use of this irq for a device of a certain class.
677 update_mux_masks(void)
680 for (irq = 0; irq < ICU_LEN; irq++) {
681 intrec *idesc = intreclist_head[irq];
682 while (idesc != NULL) {
683 if (idesc->maskptr != NULL) {
684 /* our copy of *maskptr may be stale, refresh */
685 idesc->mask = *idesc->maskptr;
693 update_masks(intrmask_t *maskptr, int irq)
695 intrmask_t mask = 1 << irq;
700 if (find_idesc(maskptr, irq) == NULL) {
701 /* no reference to this maskptr was found in this irq's chain */
704 /* a reference to this maskptr was found in this irq's chain */
707 /* we need to update all values in the intr_mask[irq] array */
709 /* update mask in chains of the interrupt multiplex handler as well */
714 * Add an interrupt handler to the linked list hung off of intreclist_head[irq]
715 * and install a shared interrupt multiplex handler. Install an interrupt
716 * thread for each interrupt (though FAST interrupts will not use it).
717 * The preemption procedure checks the CPL. lwkt_preempt() will check
718 * relative thread priorities for us as long as we properly pass through
721 * The interrupt thread has already been put on the run queue, so if we cannot
722 * preempt we should force a reschedule.
725 cpu_intr_preempt(struct thread *td, int critpri)
727 struct md_intr_info *info = td->td_info.intdata;
729 if ((curthread->td_cpl & (1 << info->irq)) == 0)
730 lwkt_preempt(td, critpri);
732 need_lwkt_resched(); /* XXX may not be required */
736 add_intrdesc(intrec *idesc)
738 int irq = idesc->intr;
743 * There are two ways to enter intr_mux(). (1) via the scheduled
744 * interrupt thread or (2) directly. The thread mechanism is the
745 * normal mechanism used by SLOW interrupts, while the direct method
746 * is used by FAST interrupts.
748 * We need to create an interrupt thread if none exists.
750 if (intr_info[irq].mihandler_installed == 0) {
753 intr_info[irq].mihandler_installed = 1;
754 intr_info[irq].irq = irq;
755 td = register_int(irq, intr_mux, &intreclist_head[irq], idesc->name, idesc->maskptr);
756 td->td_info.intdata = &intr_info[irq];
757 td->td_preemptable = cpu_intr_preempt;
758 printf("installed MI handler for int %d\n", irq);
761 headp = &intreclist_head[irq];
768 if ((idesc->flags & INTR_EXCL) || (head->flags & INTR_EXCL)) {
769 printf("\tdevice combination doesn't support "
770 "shared irq%d\n", irq);
776 * Always install intr_mux as the hard handler so it can deal with
777 * individual enablement on handlers.
780 if (icu_setup(irq, intr_mux, &intreclist_head[irq], 0, 0) != 0)
782 update_intrname(irq, idesc->name);
784 if (bootverbose && head->next == NULL)
785 printf("\tusing shared irq%d.\n", irq);
786 update_intrname(irq, "mux");
790 * Append to the end of the chain and update our SPL masks.
792 while (*headp != NULL)
793 headp = &(*headp)->next;
796 update_masks(idesc->maskptr, irq);
801 * Create and activate an interrupt handler descriptor data structure.
803 * The dev_instance pointer is required for resource management, and will
804 * only be passed through to resource_claim().
806 * There will be functions that derive a driver and unit name from a
807 * dev_instance variable, and those functions will be used to maintain the
808 * interrupt counter label array referenced by systat and vmstat to report
809 * device interrupt rates (->update_intrlabels).
811 * Add the interrupt handler descriptor data structure created by an
812 * earlier call of create_intr() to the linked list for its irq and
813 * adjust the interrupt masks if necessary.
815 * WARNING: This is an internal function and not to be used by device
816 * drivers. It is subject to change without notice.
820 inthand_add(const char *name, int irq, inthand2_t handler, void *arg,
821 intrmask_t *maskptr, int flags)
827 if (ICU_LEN > 8 * sizeof *maskptr) {
828 printf("create_intr: ICU_LEN of %d too high for %d bit intrmask\n",
829 ICU_LEN, 8 * sizeof *maskptr);
832 if ((unsigned)irq >= ICU_LEN) {
833 printf("create_intr: requested irq%d too high, limit is %d\n",
838 idesc = malloc(sizeof *idesc, M_DEVBUF, M_WAITOK | M_ZERO);
844 idesc->name = malloc(strlen(name) + 1, M_DEVBUF, M_WAITOK);
845 if (idesc->name == NULL) {
846 free(idesc, M_DEVBUF);
849 strcpy(idesc->name, name);
851 idesc->handler = handler;
852 idesc->argument = arg;
853 idesc->maskptr = maskptr;
855 idesc->flags = flags;
859 oldspl = splq(1 << irq);
861 /* add irq to class selected by maskptr */
862 errcode = add_intrdesc(idesc);
867 printf("\tintr_connect(irq%d) failed, result=%d\n",
869 free(idesc->name, M_DEVBUF);
870 free(idesc, M_DEVBUF);
878 * Deactivate and remove the interrupt handler descriptor data connected
879 * created by an earlier call of intr_connect() from the linked list and
880 * adjust theinterrupt masks if necessary.
882 * Return the memory held by the interrupt handler descriptor data structure
883 * to the system. Make sure, the handler is not actively used anymore, before.
886 inthand_remove(intrec *idesc)
888 intrec **hook, *head;
896 oldspl = splq(1 << irq);
899 * Find and remove the interrupt descriptor.
901 hook = &intreclist_head[irq];
902 while (*hook != idesc) {
907 hook = &(*hook)->next;
912 * If the list is now empty, revert the hard vector to the spurious
915 head = intreclist_head[irq];
918 * No more interrupts on this irq
920 icu_unset(irq, intr_mux);
921 update_intrname(irq, NULL);
922 } else if (head->next) {
924 * This irq is still shared (has at least two handlers)
925 * (the name should already be set to "mux").
929 * This irq is no longer shared
931 update_intrname(irq, head->name);
933 update_masks(idesc->maskptr, irq);
935 free(idesc, M_DEVBUF);
941 * These functions are used in tandem with the device disabling its
942 * interrupt in the device hardware to prevent the handler from being
943 * run. Otherwise it is possible for a device interrupt to occur,
944 * schedule the handler, for the device to disable the hard interrupt,
945 * and for the handler to then run because it has already been scheduled.
948 inthand_disabled(intrec *idesc)
954 inthand_enabled(intrec *idesc)
962 * This function is called by an interrupt thread when it has completed
963 * processing a loop. We re-enable interrupts and interlock with
966 * See kern/kern_intr.c for more information.
969 ithread_done(int irq)
971 struct mdglobaldata *gd = mdcpu;
975 td = gd->mi.gd_curthread;
977 KKASSERT(td->td_pri >= TDPRI_CRIT);
978 lwkt_deschedule_self(td);
980 if (gd->gd_ipending & mask) {
981 atomic_clear_int_nonlocked(&gd->gd_ipending, mask);
983 lwkt_schedule_self(td);
991 * forward_fast_remote()
993 * This function is called from the receiving end of an IPIQ when a
994 * remote cpu wishes to forward a fast interrupt to us. All we have to
995 * do is set the interrupt pending and let the IPI's doreti deal with it.
998 forward_fastint_remote(void *arg)
1001 struct mdglobaldata *gd = mdcpu;
1003 atomic_set_int_nonlocked(&gd->gd_fpending, 1 << irq);
1004 atomic_set_int_nonlocked(&gd->mi.gd_reqflags, RQF_INTPEND);