2 * Copyright (c) 1991 The Regents of the University of California.
5 * This code is derived from software contributed to Berkeley by
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * from: @(#)isa.c 7.2 (Berkeley) 5/13/91
37 * $FreeBSD: src/sys/i386/isa/intr_machdep.c,v 1.29.2.5 2001/10/14 06:54:27 luigi Exp $
38 * $DragonFly: src/sys/platform/pc32/isa/intr_machdep.c,v 1.10 2003/07/12 16:55:50 dillon Exp $
41 * This file contains an aggregated module marked:
42 * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
43 * All rights reserved.
44 * See the notice for details.
47 #include "opt_auto_eoi.h"
51 #include <sys/param.h>
53 #include <machine/lock.h>
55 #include <sys/systm.h>
56 #include <sys/syslog.h>
57 #include <sys/malloc.h>
58 #include <sys/errno.h>
59 #include <sys/interrupt.h>
60 #include <machine/ipl.h>
61 #include <machine/md_var.h>
62 #include <machine/segments.h>
64 #include <machine/globaldata.h>
66 #include <sys/thread2.h>
69 #include <machine/smptests.h> /** FAST_HI */
70 #include <machine/smp.h>
73 #include <pc98/pc98/pc98.h>
74 #include <pc98/pc98/pc98_machdep.h>
75 #include <pc98/pc98/epsonio.h>
77 #include <i386/isa/isa.h>
79 #include <i386/isa/icu.h>
82 #include <isa/isavar.h>
84 #include <i386/isa/intr_machdep.h>
85 #include <sys/interrupt.h>
87 #include <machine/clock.h>
89 #include <machine/cpu.h>
93 #include <i386/isa/mca_machdep.h>
96 /* XXX should be in suitable include files */
98 #define ICU_IMR_OFFSET 2 /* IO_ICU{1,2} + 2 */
101 #define ICU_IMR_OFFSET 1 /* IO_ICU{1,2} + 1 */
102 #define ICU_SLAVEID 2
107 * This is to accommodate "mixed-mode" programming for
108 * motherboards that don't connect the 8254 to the IO APIC.
113 #define NR_INTRNAMES (1 + ICU_LEN + 2 * ICU_LEN)
115 static inthand2_t isa_strayintr;
117 void *intr_unit[ICU_LEN*2];
118 u_long *intr_countp[ICU_LEN*2];
119 inthand2_t *intr_handler[ICU_LEN*2] = {
120 isa_strayintr, isa_strayintr, isa_strayintr, isa_strayintr,
121 isa_strayintr, isa_strayintr, isa_strayintr, isa_strayintr,
122 isa_strayintr, isa_strayintr, isa_strayintr, isa_strayintr,
123 isa_strayintr, isa_strayintr, isa_strayintr, isa_strayintr,
124 isa_strayintr, isa_strayintr, isa_strayintr, isa_strayintr,
125 isa_strayintr, isa_strayintr, isa_strayintr, isa_strayintr,
126 isa_strayintr, isa_strayintr, isa_strayintr, isa_strayintr,
127 isa_strayintr, isa_strayintr, isa_strayintr, isa_strayintr,
130 static struct md_intr_info {
133 int mihandler_installed;
135 } intr_info[ICU_LEN*2];
137 static inthand_t *fastintr[ICU_LEN] = {
138 &IDTVEC(fastintr0), &IDTVEC(fastintr1),
139 &IDTVEC(fastintr2), &IDTVEC(fastintr3),
140 &IDTVEC(fastintr4), &IDTVEC(fastintr5),
141 &IDTVEC(fastintr6), &IDTVEC(fastintr7),
142 &IDTVEC(fastintr8), &IDTVEC(fastintr9),
143 &IDTVEC(fastintr10), &IDTVEC(fastintr11),
144 &IDTVEC(fastintr12), &IDTVEC(fastintr13),
145 &IDTVEC(fastintr14), &IDTVEC(fastintr15),
147 &IDTVEC(fastintr16), &IDTVEC(fastintr17),
148 &IDTVEC(fastintr18), &IDTVEC(fastintr19),
149 &IDTVEC(fastintr20), &IDTVEC(fastintr21),
150 &IDTVEC(fastintr22), &IDTVEC(fastintr23),
154 unpendhand_t *fastunpend[ICU_LEN] = {
155 IDTVEC(fastunpend0), IDTVEC(fastunpend1),
156 IDTVEC(fastunpend2), IDTVEC(fastunpend3),
157 IDTVEC(fastunpend4), IDTVEC(fastunpend5),
158 IDTVEC(fastunpend6), IDTVEC(fastunpend7),
159 IDTVEC(fastunpend8), IDTVEC(fastunpend9),
160 IDTVEC(fastunpend10), IDTVEC(fastunpend11),
161 IDTVEC(fastunpend12), IDTVEC(fastunpend13),
162 IDTVEC(fastunpend14), IDTVEC(fastunpend15),
164 IDTVEC(fastunpend16), IDTVEC(fastunpend17),
165 IDTVEC(fastunpend18), IDTVEC(fastunpend19),
166 IDTVEC(fastunpend20), IDTVEC(fastunpend21),
167 IDTVEC(fastunpend22), IDTVEC(fastunpend23),
171 static inthand_t *slowintr[ICU_LEN] = {
172 &IDTVEC(intr0), &IDTVEC(intr1), &IDTVEC(intr2), &IDTVEC(intr3),
173 &IDTVEC(intr4), &IDTVEC(intr5), &IDTVEC(intr6), &IDTVEC(intr7),
174 &IDTVEC(intr8), &IDTVEC(intr9), &IDTVEC(intr10), &IDTVEC(intr11),
175 &IDTVEC(intr12), &IDTVEC(intr13), &IDTVEC(intr14), &IDTVEC(intr15),
177 &IDTVEC(intr16), &IDTVEC(intr17), &IDTVEC(intr18), &IDTVEC(intr19),
178 &IDTVEC(intr20), &IDTVEC(intr21), &IDTVEC(intr22), &IDTVEC(intr23),
183 #define NMI_PARITY 0x04
184 #define NMI_EPARITY 0x02
186 #define NMI_PARITY (1 << 7)
187 #define NMI_IOCHAN (1 << 6)
188 #define ENMI_WATCHDOG (1 << 7)
189 #define ENMI_BUSTIMER (1 << 6)
190 #define ENMI_IOSTATUS (1 << 5)
194 * Handle a NMI, possibly a machine check.
195 * return true to panic system, false to ignore.
203 int port = inb(0x33);
205 log(LOG_CRIT, "NMI PC98 port = %x\n", port);
206 if (epson_machine_id == 0x20)
207 epson_outb(0xc16, epson_inb(0xc16) | 0x1);
208 if (port & NMI_PARITY) {
209 log(LOG_CRIT, "BASE RAM parity error, likely hardware failure.");
211 } else if (port & NMI_EPARITY) {
212 log(LOG_CRIT, "EXTENDED RAM parity error, likely hardware failure.");
215 log(LOG_CRIT, "\nNMI Resume ??\n");
218 int isa_port = inb(0x61);
219 int eisa_port = inb(0x461);
221 log(LOG_CRIT, "NMI ISA %x, EISA %x\n", isa_port, eisa_port);
223 if (MCA_system && mca_bus_nmi())
227 if (isa_port & NMI_PARITY) {
228 log(LOG_CRIT, "RAM parity error, likely hardware failure.");
232 if (isa_port & NMI_IOCHAN) {
233 log(LOG_CRIT, "I/O channel check, likely hardware failure.");
238 * On a real EISA machine, this will never happen. However it can
239 * happen on ISA machines which implement XT style floating point
240 * error handling (very rare). Save them from a meaningless panic.
242 if (eisa_port == 0xff)
245 if (eisa_port & ENMI_WATCHDOG) {
246 log(LOG_CRIT, "EISA watchdog timer expired, likely hardware failure.");
250 if (eisa_port & ENMI_BUSTIMER) {
251 log(LOG_CRIT, "EISA bus timeout, likely hardware failure.");
255 if (eisa_port & ENMI_IOSTATUS) {
256 log(LOG_CRIT, "EISA I/O port status error.");
264 * Fill in default interrupt table (in case of spuruious interrupt
265 * during configuration of kernel, setup interrupt control unit
273 for (i = 0; i < ICU_LEN; i++)
274 icu_unset(i, (inthand2_t *)NULL);
276 /* initialize 8259's */
279 outb(IO_ICU1, 0x19); /* reset; program device, four bytes */
282 outb(IO_ICU1, 0x11); /* reset; program device, four bytes */
284 outb(IO_ICU1+ICU_IMR_OFFSET, NRSVIDT); /* starting at this vector index */
285 outb(IO_ICU1+ICU_IMR_OFFSET, IRQ_SLAVE); /* slave on line 7 */
288 outb(IO_ICU1+ICU_IMR_OFFSET, 0x1f); /* (master) auto EOI, 8086 mode */
290 outb(IO_ICU1+ICU_IMR_OFFSET, 0x1d); /* (master) 8086 mode */
294 outb(IO_ICU1+ICU_IMR_OFFSET, 2 | 1); /* auto EOI, 8086 mode */
296 outb(IO_ICU1+ICU_IMR_OFFSET, 1); /* 8086 mode */
299 outb(IO_ICU1+ICU_IMR_OFFSET, 0xff); /* leave interrupts masked */
300 outb(IO_ICU1, 0x0a); /* default to IRR on read */
302 outb(IO_ICU1, 0xc0 | (3 - 1)); /* pri order 3-7, 0-2 (com2 first) */
307 outb(IO_ICU2, 0x19); /* reset; program device, four bytes */
310 outb(IO_ICU2, 0x11); /* reset; program device, four bytes */
312 outb(IO_ICU2+ICU_IMR_OFFSET, NRSVIDT+8); /* staring at this vector index */
313 outb(IO_ICU2+ICU_IMR_OFFSET, ICU_SLAVEID); /* my slave id is 7 */
315 outb(IO_ICU2+ICU_IMR_OFFSET,9); /* 8086 mode */
318 outb(IO_ICU2+ICU_IMR_OFFSET, 2 | 1); /* auto EOI, 8086 mode */
320 outb(IO_ICU2+ICU_IMR_OFFSET,1); /* 8086 mode */
323 outb(IO_ICU2+ICU_IMR_OFFSET, 0xff); /* leave interrupts masked */
324 outb(IO_ICU2, 0x0a); /* default to IRR on read */
328 * Caught a stray interrupt, notify
331 isa_strayintr(void *vcookiep)
333 int intr = (void **)vcookiep - &intr_unit[0];
335 /* DON'T BOTHER FOR NOW! */
336 /* for some reason, we get bursts of intr #7, even if not enabled! */
338 * Well the reason you got bursts of intr #7 is because someone
339 * raised an interrupt line and dropped it before the 8259 could
340 * prioritize it. This is documented in the intel data book. This
341 * means you have BAD hardware! I have changed this so that only
342 * the first 5 get logged, then it quits logging them, and puts
343 * out a special message. rgrimes 3/25/1993
346 * XXX TODO print a different message for #7 if it is for a
347 * glitch. Glitches can be distinguished from real #7's by
348 * testing that the in-service bit is _not_ set. The test
349 * must be done before sending an EOI so it can't be done if
350 * we are using AUTO_EOI_1.
352 printf("STRAY %d\n", intr);
353 if (intrcnt[1 + intr] <= 5)
354 log(LOG_ERR, "stray irq %d\n", intr);
355 if (intrcnt[1 + intr] == 5)
357 "too many stray irq %d's; not logging any more\n", intr);
362 * Return a bitmap of the current interrupt requests. This is 8259-specific
363 * and is only suitable for use at probe time.
373 return ((irr2 << 8) | irr1);
378 update_intr_masks(void)
383 for (intr=0; intr < ICU_LEN; intr ++) {
385 /* no 8259 SLAVE to ignore */
387 if (intr==ICU_SLAVEID) continue; /* ignore 8259 SLAVE output */
389 maskptr = intr_info[intr].maskp;
392 *maskptr |= SWI_CLOCK_MASK | (1 << intr);
394 if (mask != intr_info[intr].mask) {
396 printf ("intr_mask[%2d] old=%08x new=%08x ptr=%p.\n",
397 intr, intr_info[intr].mask, mask, maskptr);
399 intr_info[intr].mask = mask;
408 update_intrname(int intr, char *name)
412 int name_index, off, strayintr;
415 * Initialise strings for bitbucket and stray interrupt counters.
416 * These have statically allocated indices 0 and 1 through ICU_LEN.
418 if (intrnames[0] == '\0') {
419 off = sprintf(intrnames, "???") + 1;
420 for (strayintr = 0; strayintr < ICU_LEN; strayintr++)
421 off += sprintf(intrnames + off, "stray irq%d",
427 if (snprintf(buf, sizeof(buf), "%s irq%d", name, intr) >= sizeof(buf))
431 * Search for `buf' in `intrnames'. In the usual case when it is
432 * not found, append it to the end if there is enough space (the \0
433 * terminator for the previous string, if any, becomes a separator).
435 for (cp = intrnames, name_index = 0;
436 cp != eintrnames && name_index < NR_INTRNAMES;
437 cp += strlen(cp) + 1, name_index++) {
439 if (strlen(buf) >= eintrnames - cp)
444 if (strcmp(cp, buf) == 0)
449 printf("update_intrname: counting %s irq%d as %s\n", name, intr,
453 intr_countp[intr] = &intrcnt[name_index];
457 * NOTE! intr_handler[] is only used for FAST interrupts, the *vector.s
458 * code ignores it for normal interrupts.
461 icu_setup(int intr, inthand2_t *handler, void *arg, u_int *maskptr, int flags)
464 int select; /* the select register is 8 bits */
466 u_int32_t value; /* the window register is 32 bits */
469 u_int mask = (maskptr ? *maskptr : 0);
472 if ((u_int)intr >= ICU_LEN) /* no 8259 SLAVE to ignore */
474 if ((u_int)intr >= ICU_LEN || intr == ICU_SLAVEID)
476 if (intr_handler[intr] != isa_strayintr)
480 cpu_disable_intr(); /* YYY */
481 intr_handler[intr] = handler;
482 intr_unit[intr] = arg;
483 intr_info[intr].maskp = maskptr;
484 intr_info[intr].mask = mask | SWI_CLOCK_MASK | (1 << intr);
486 /* YYY fast ints supported and mp protected but ... */
490 if (flags & INTR_FAST) {
491 vector = TPR_FAST_INTS + intr;
492 setidt(vector, fastintr[intr],
493 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
495 vector = TPR_SLOW_INTS + intr;
496 #ifdef APIC_INTR_REORDER
497 #ifdef APIC_INTR_HIGHPRI_CLOCK
498 /* XXX: Hack (kludge?) for more accurate clock. */
499 if (intr == apic_8254_intr || intr == 8) {
500 vector = TPR_FAST_INTS + intr;
504 setidt(vector, slowintr[intr],
505 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
507 #ifdef APIC_INTR_REORDER
508 set_lapic_isrloc(intr, vector);
511 * Reprogram the vector in the IO APIC.
513 if (int_to_apicintpin[intr].ioapic >= 0) {
514 select = int_to_apicintpin[intr].redirindex;
515 value = io_apic_read(int_to_apicintpin[intr].ioapic,
516 select) & ~IOART_INTVEC;
517 io_apic_write(int_to_apicintpin[intr].ioapic,
518 select, value | vector);
521 setidt(ICU_OFFSET + intr,
522 flags & INTR_FAST ? fastintr[intr] : slowintr[intr],
523 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
531 icu_unset(intr, handler)
537 if ((u_int)intr >= ICU_LEN || handler != intr_handler[intr])
542 cpu_disable_intr(); /* YYY */
543 intr_countp[intr] = &intrcnt[1 + intr];
544 intr_handler[intr] = isa_strayintr;
545 intr_info[intr].maskp = NULL;
546 intr_info[intr].mask = HWI_MASK | SWI_MASK;
547 intr_unit[intr] = &intr_unit[intr];
549 /* XXX how do I re-create dvp here? */
550 setidt(flags & INTR_FAST ? TPR_FAST_INTS + intr : TPR_SLOW_INTS + intr,
551 slowintr[intr], SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
553 #ifdef APIC_INTR_REORDER
554 set_lapic_isrloc(intr, ICU_OFFSET + intr);
556 setidt(ICU_OFFSET + intr, slowintr[intr], SDT_SYS386IGT, SEL_KPL,
557 GSEL(GCODE_SEL, SEL_KPL));
564 /* The following notice applies beyond this point in the file */
567 * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
568 * All rights reserved.
570 * Redistribution and use in source and binary forms, with or without
571 * modification, are permitted provided that the following conditions
573 * 1. Redistributions of source code must retain the above copyright
574 * notice unmodified, this list of conditions, and the following
576 * 2. Redistributions in binary form must reproduce the above copyright
577 * notice, this list of conditions and the following disclaimer in the
578 * documentation and/or other materials provided with the distribution.
580 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
581 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
582 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
583 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
584 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
585 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
586 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
587 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
588 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
589 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
591 * $FreeBSD: src/sys/i386/isa/intr_machdep.c,v 1.29.2.5 2001/10/14 06:54:27 luigi Exp $
595 typedef struct intrec {
606 static intrec *intreclist_head[ICU_LEN];
609 * The interrupt multiplexer calls each of the handlers in turn. The
610 * ipl is initially quite low. It is raised as necessary for each call
611 * and lowered after the call. Thus out of order handling is possible
612 * even for interrupts of the same type. This is probably no more
613 * harmful than out of order handling in general (not harmful except
614 * for real time response which we don't support anyway).
623 for (pp = arg; (p = *pp) != NULL; pp = &p->next) {
624 oldspl = splq(p->mask);
625 p->handler(p->argument);
631 find_idesc(unsigned *maskptr, int irq)
633 intrec *p = intreclist_head[irq];
635 while (p && p->maskptr != maskptr)
642 find_pred(intrec *idesc, int irq)
644 intrec **pp = &intreclist_head[irq];
657 * Both the low level handler and the shared interrupt multiplexer
658 * block out further interrupts as set in the handlers "mask", while
659 * the handler is running. In fact *maskptr should be used for this
660 * purpose, but since this requires one more pointer dereference on
661 * each interrupt, we rather bother update "mask" whenever *maskptr
662 * changes. The function "update_masks" should be called **after**
663 * all manipulation of the linked list of interrupt handlers hung
664 * off of intrdec_head[irq] is complete, since the chain of handlers
665 * will both determine the *maskptr values and the instances of mask
666 * that are fixed. This function should be called with the irq for
667 * which a new handler has been add blocked, since the masks may not
668 * yet know about the use of this irq for a device of a certain class.
672 update_mux_masks(void)
675 for (irq = 0; irq < ICU_LEN; irq++) {
676 intrec *idesc = intreclist_head[irq];
677 while (idesc != NULL) {
678 if (idesc->maskptr != NULL) {
679 /* our copy of *maskptr may be stale, refresh */
680 idesc->mask = *idesc->maskptr;
688 update_masks(intrmask_t *maskptr, int irq)
690 intrmask_t mask = 1 << irq;
695 if (find_idesc(maskptr, irq) == NULL) {
696 /* no reference to this maskptr was found in this irq's chain */
699 /* a reference to this maskptr was found in this irq's chain */
702 /* we need to update all values in the intr_mask[irq] array */
704 /* update mask in chains of the interrupt multiplex handler as well */
709 * Add an interrupt handler to the linked list hung off of intreclist_head[irq]
710 * and install a shared interrupt multiplex handler, if necessary. Install
711 * an interrupt thread for each interrupt (though FAST interrupts will not
712 * use it). The preemption procedure checks the CPL. lwkt_preempt() will
713 * check relative thread priorities for us as long as we properly pass through
716 * The interrupt thread has already been put on the run queue, so if we cannot
717 * preempt we should force a reschedule.
719 * YYY needs work. At the moment the handler is run inside a critical
720 * section so only the preemption cpl check is used.
723 cpu_intr_preempt(struct thread *td, int critpri)
725 struct md_intr_info *info = td->td_info.intdata;
727 if ((curthread->td_cpl & (1 << info->irq)) == 0)
728 lwkt_preempt(td, critpri);
734 add_intrdesc(intrec *idesc)
736 int irq = idesc->intr;
740 * YYY This is a hack. The MI interrupt code in kern/kern_intr.c
741 * handles interrupt thread scheduling for NORMAL interrupts. It
742 * will never get called for fast interrupts. On the otherhand,
743 * the handler this code installs in intr_handler[] for a NORMAL
744 * interrupt is not used by the *vector.s code, so we need this
745 * temporary hack to run normal interrupts as interrupt threads.
748 if (intr_info[irq].mihandler_installed == 0) {
751 intr_info[irq].mihandler_installed = 1;
752 intr_info[irq].irq = irq;
753 td = register_int(irq, intr_mux, &intreclist_head[irq], idesc->name);
754 td->td_info.intdata = &intr_info[irq];
755 td->td_preemptable = cpu_intr_preempt;
756 printf("installed MI handler for int %d\n", irq);
759 head = intreclist_head[irq];
762 /* first handler for this irq, just install it */
763 if (icu_setup(irq, idesc->handler, idesc->argument,
764 idesc->maskptr, idesc->flags) != 0)
767 update_intrname(irq, idesc->name);
769 intreclist_head[irq] = idesc;
771 if ((idesc->flags & INTR_EXCL) != 0
772 || (head->flags & INTR_EXCL) != 0) {
774 * can't append new handler, if either list head or
775 * new handler do not allow interrupts to be shared
778 printf("\tdevice combination doesn't support "
779 "shared irq%d\n", irq);
782 if (head->next == NULL) {
784 * second handler for this irq, replace device driver's
785 * handler by shared interrupt multiplexer function
787 icu_unset(irq, head->handler);
788 if (icu_setup(irq, intr_mux, &intreclist_head[irq], 0, 0) != 0)
791 printf("\tusing shared irq%d.\n", irq);
792 update_intrname(irq, "mux");
794 /* just append to the end of the chain */
795 while (head->next != NULL)
799 update_masks(idesc->maskptr, irq);
804 * Create and activate an interrupt handler descriptor data structure.
806 * The dev_instance pointer is required for resource management, and will
807 * only be passed through to resource_claim().
809 * There will be functions that derive a driver and unit name from a
810 * dev_instance variable, and those functions will be used to maintain the
811 * interrupt counter label array referenced by systat and vmstat to report
812 * device interrupt rates (->update_intrlabels).
814 * Add the interrupt handler descriptor data structure created by an
815 * earlier call of create_intr() to the linked list for its irq and
816 * adjust the interrupt masks if necessary.
818 * WARNING: This is an internal function and not to be used by device
819 * drivers. It is subject to change without notice.
823 inthand_add(const char *name, int irq, inthand2_t handler, void *arg,
824 intrmask_t *maskptr, int flags)
830 if (ICU_LEN > 8 * sizeof *maskptr) {
831 printf("create_intr: ICU_LEN of %d too high for %d bit intrmask\n",
832 ICU_LEN, 8 * sizeof *maskptr);
835 if ((unsigned)irq >= ICU_LEN) {
836 printf("create_intr: requested irq%d too high, limit is %d\n",
841 idesc = malloc(sizeof *idesc, M_DEVBUF, M_WAITOK);
844 bzero(idesc, sizeof *idesc);
848 idesc->name = malloc(strlen(name) + 1, M_DEVBUF, M_WAITOK);
849 if (idesc->name == NULL) {
850 free(idesc, M_DEVBUF);
853 strcpy(idesc->name, name);
855 idesc->handler = handler;
856 idesc->argument = arg;
857 idesc->maskptr = maskptr;
859 idesc->flags = flags;
862 oldspl = splq(1 << irq);
864 /* add irq to class selected by maskptr */
865 errcode = add_intrdesc(idesc);
870 printf("\tintr_connect(irq%d) failed, result=%d\n",
872 free(idesc->name, M_DEVBUF);
873 free(idesc, M_DEVBUF);
881 * Deactivate and remove the interrupt handler descriptor data connected
882 * created by an earlier call of intr_connect() from the linked list and
883 * adjust theinterrupt masks if necessary.
885 * Return the memory held by the interrupt handler descriptor data structure
886 * to the system. Make sure, the handler is not actively used anymore, before.
890 inthand_remove(intrec *idesc)
892 intrec **hook, *head;
902 /* find pointer that keeps the reference to this interrupt descriptor */
903 hook = find_pred(idesc, irq);
907 /* make copy of original list head, the line after may overwrite it */
908 head = intreclist_head[irq];
910 /* unlink: make predecessor point to idesc->next instead of to idesc */
913 /* now check whether the element we removed was the list head */
916 oldspl = splq(1 << irq);
918 /* check whether the new list head is the only element on list */
919 head = intreclist_head[irq];
921 icu_unset(irq, intr_mux);
922 if (head->next != NULL) {
923 /* install the multiplex handler with new list head as argument */
924 errcode = icu_setup(irq, intr_mux, &intreclist_head[irq], 0, 0);
926 update_intrname(irq, NULL);
928 /* install the one remaining handler for this irq */
929 errcode = icu_setup(irq, head->handler,
931 head->maskptr, head->flags);
933 update_intrname(irq, head->name);
936 /* revert to old handler, eg: strayintr */
937 icu_unset(irq, idesc->handler);
941 update_masks(idesc->maskptr, irq);
942 free(idesc, M_DEVBUF);
949 * This function is called by an interrupt thread when it has completed
950 * processing a loop. We re-enable itnerrupts and interlock with
953 * See kern/kern_intr.c for more information.
956 ithread_done(int irq)
958 struct mdglobaldata *gd = mdcpu;
961 KKASSERT(curthread->td_pri >= TDPRI_CRIT);
962 lwkt_deschedule_self();
964 if (gd->gd_ipending & mask) {
965 atomic_clear_int_nonlocked(&gd->gd_ipending, mask);
967 lwkt_schedule_self();
975 * forward_fast_remote()
977 * This function is called from the receiving end of an IPIQ when a
978 * remote cpu wishes to forward a fast interrupt to us. All we have to
979 * do is set the interrupt pending and let the IPI's doreti deal with it.
982 forward_fastint_remote(void *arg)
985 struct mdglobaldata *gd = mdcpu;
987 atomic_set_int_nonlocked(&gd->gd_fpending, 1 << irq);
988 gd->mi.gd_reqpri = TDPRI_CRIT;