2 * Copyright (c) 1991 The Regents of the University of California.
5 * This code is derived from software contributed to Berkeley by
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * from: @(#)isa.c 7.2 (Berkeley) 5/13/91
37 * $FreeBSD: src/sys/i386/isa/intr_machdep.c,v 1.29.2.5 2001/10/14 06:54:27 luigi Exp $
38 * $DragonFly: src/sys/i386/isa/Attic/intr_machdep.c,v 1.30 2005/06/11 09:03:49 swildner Exp $
41 * This file contains an aggregated module marked:
42 * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
43 * All rights reserved.
44 * See the notice for details.
48 #include "opt_auto_eoi.h"
50 #include <sys/param.h>
52 #include <machine/lock.h>
54 #include <sys/systm.h>
55 #include <sys/syslog.h>
56 #include <sys/malloc.h>
57 #include <sys/errno.h>
58 #include <sys/interrupt.h>
59 #include <machine/ipl.h>
60 #include <machine/md_var.h>
61 #include <machine/segments.h>
63 #include <machine/globaldata.h>
65 #include <sys/thread2.h>
67 #include <machine/smptests.h> /** FAST_HI */
68 #include <machine/smp.h>
69 #include <bus/isa/i386/isa.h>
70 #include <i386/isa/icu.h>
73 #include <bus/isa/isavar.h>
75 #include <i386/isa/intr_machdep.h>
76 #include <bus/isa/isavar.h>
77 #include <sys/interrupt.h>
79 #include <machine/clock.h>
81 #include <machine/cpu.h>
83 /* XXX should be in suitable include files */
84 #define ICU_IMR_OFFSET 1 /* IO_ICU{1,2} + 1 */
89 * This is to accommodate "mixed-mode" programming for
90 * motherboards that don't connect the 8254 to the IO APIC.
95 #define NR_INTRNAMES (1 + ICU_LEN + 2 * ICU_LEN)
97 static inthand2_t isa_strayintr;
98 #if defined(FAST_HI) && defined(APIC_IO)
99 static inthand2_t isa_wrongintr;
101 static void init_i8259(void);
103 void *intr_unit[ICU_LEN*2];
104 u_long *intr_countp[ICU_LEN*2];
105 inthand2_t *intr_handler[ICU_LEN*2] = {
106 isa_strayintr, isa_strayintr, isa_strayintr, isa_strayintr,
107 isa_strayintr, isa_strayintr, isa_strayintr, isa_strayintr,
108 isa_strayintr, isa_strayintr, isa_strayintr, isa_strayintr,
109 isa_strayintr, isa_strayintr, isa_strayintr, isa_strayintr,
110 isa_strayintr, isa_strayintr, isa_strayintr, isa_strayintr,
111 isa_strayintr, isa_strayintr, isa_strayintr, isa_strayintr,
112 isa_strayintr, isa_strayintr, isa_strayintr, isa_strayintr,
113 isa_strayintr, isa_strayintr, isa_strayintr, isa_strayintr,
116 static struct md_intr_info {
119 int mihandler_installed;
121 } intr_info[ICU_LEN*2];
123 static inthand_t *fastintr[ICU_LEN] = {
124 &IDTVEC(fastintr0), &IDTVEC(fastintr1),
125 &IDTVEC(fastintr2), &IDTVEC(fastintr3),
126 &IDTVEC(fastintr4), &IDTVEC(fastintr5),
127 &IDTVEC(fastintr6), &IDTVEC(fastintr7),
128 &IDTVEC(fastintr8), &IDTVEC(fastintr9),
129 &IDTVEC(fastintr10), &IDTVEC(fastintr11),
130 &IDTVEC(fastintr12), &IDTVEC(fastintr13),
131 &IDTVEC(fastintr14), &IDTVEC(fastintr15),
133 &IDTVEC(fastintr16), &IDTVEC(fastintr17),
134 &IDTVEC(fastintr18), &IDTVEC(fastintr19),
135 &IDTVEC(fastintr20), &IDTVEC(fastintr21),
136 &IDTVEC(fastintr22), &IDTVEC(fastintr23),
140 unpendhand_t *fastunpend[ICU_LEN] = {
141 IDTVEC(fastunpend0), IDTVEC(fastunpend1),
142 IDTVEC(fastunpend2), IDTVEC(fastunpend3),
143 IDTVEC(fastunpend4), IDTVEC(fastunpend5),
144 IDTVEC(fastunpend6), IDTVEC(fastunpend7),
145 IDTVEC(fastunpend8), IDTVEC(fastunpend9),
146 IDTVEC(fastunpend10), IDTVEC(fastunpend11),
147 IDTVEC(fastunpend12), IDTVEC(fastunpend13),
148 IDTVEC(fastunpend14), IDTVEC(fastunpend15),
150 IDTVEC(fastunpend16), IDTVEC(fastunpend17),
151 IDTVEC(fastunpend18), IDTVEC(fastunpend19),
152 IDTVEC(fastunpend20), IDTVEC(fastunpend21),
153 IDTVEC(fastunpend22), IDTVEC(fastunpend23),
157 static inthand_t *slowintr[ICU_LEN] = {
158 &IDTVEC(intr0), &IDTVEC(intr1), &IDTVEC(intr2), &IDTVEC(intr3),
159 &IDTVEC(intr4), &IDTVEC(intr5), &IDTVEC(intr6), &IDTVEC(intr7),
160 &IDTVEC(intr8), &IDTVEC(intr9), &IDTVEC(intr10), &IDTVEC(intr11),
161 &IDTVEC(intr12), &IDTVEC(intr13), &IDTVEC(intr14), &IDTVEC(intr15),
163 &IDTVEC(intr16), &IDTVEC(intr17), &IDTVEC(intr18), &IDTVEC(intr19),
164 &IDTVEC(intr20), &IDTVEC(intr21), &IDTVEC(intr22), &IDTVEC(intr23),
168 #define NMI_PARITY (1 << 7)
169 #define NMI_IOCHAN (1 << 6)
170 #define ENMI_WATCHDOG (1 << 7)
171 #define ENMI_BUSTIMER (1 << 6)
172 #define ENMI_IOSTATUS (1 << 5)
175 * Handle a NMI, possibly a machine check.
176 * return true to panic system, false to ignore.
183 int isa_port = inb(0x61);
184 int eisa_port = inb(0x461);
186 log(LOG_CRIT, "NMI ISA %x, EISA %x\n", isa_port, eisa_port);
188 if (isa_port & NMI_PARITY) {
189 log(LOG_CRIT, "RAM parity error, likely hardware failure.");
193 if (isa_port & NMI_IOCHAN) {
194 log(LOG_CRIT, "I/O channel check, likely hardware failure.");
199 * On a real EISA machine, this will never happen. However it can
200 * happen on ISA machines which implement XT style floating point
201 * error handling (very rare). Save them from a meaningless panic.
203 if (eisa_port == 0xff)
206 if (eisa_port & ENMI_WATCHDOG) {
207 log(LOG_CRIT, "EISA watchdog timer expired, likely hardware failure.");
211 if (eisa_port & ENMI_BUSTIMER) {
212 log(LOG_CRIT, "EISA bus timeout, likely hardware failure.");
216 if (eisa_port & ENMI_IOSTATUS) {
217 log(LOG_CRIT, "EISA I/O port status error.");
224 * ICU reinitialize when ICU configuration has lost.
232 for(i=0;i<ICU_LEN;i++)
233 if(intr_handler[i] != isa_strayintr)
239 * Fill in default interrupt table (in case of spurious interrupt
240 * during configuration of kernel, setup interrupt control unit
248 for (i = 0; i < ICU_LEN; i++)
249 icu_unset(i, isa_strayintr);
257 /* initialize 8259's */
258 outb(IO_ICU1, 0x11); /* reset; program device, four bytes */
259 outb(IO_ICU1+ICU_IMR_OFFSET, NRSVIDT); /* starting at this vector index */
260 outb(IO_ICU1+ICU_IMR_OFFSET, IRQ_SLAVE); /* slave on line 7 */
262 outb(IO_ICU1+ICU_IMR_OFFSET, 2 | 1); /* auto EOI, 8086 mode */
264 outb(IO_ICU1+ICU_IMR_OFFSET, 1); /* 8086 mode */
266 outb(IO_ICU1+ICU_IMR_OFFSET, 0xff); /* leave interrupts masked */
267 outb(IO_ICU1, 0x0a); /* default to IRR on read */
268 outb(IO_ICU1, 0xc0 | (3 - 1)); /* pri order 3-7, 0-2 (com2 first) */
269 outb(IO_ICU2, 0x11); /* reset; program device, four bytes */
270 outb(IO_ICU2+ICU_IMR_OFFSET, NRSVIDT+8); /* staring at this vector index */
271 outb(IO_ICU2+ICU_IMR_OFFSET, ICU_SLAVEID); /* my slave id is 7 */
273 outb(IO_ICU2+ICU_IMR_OFFSET, 2 | 1); /* auto EOI, 8086 mode */
275 outb(IO_ICU2+ICU_IMR_OFFSET,1); /* 8086 mode */
277 outb(IO_ICU2+ICU_IMR_OFFSET, 0xff); /* leave interrupts masked */
278 outb(IO_ICU2, 0x0a); /* default to IRR on read */
282 * Caught a stray interrupt, notify
285 isa_strayintr(void *vcookiep)
287 int intr = (void **)vcookiep - &intr_unit[0];
289 /* DON'T BOTHER FOR NOW! */
290 /* for some reason, we get bursts of intr #7, even if not enabled! */
292 * Well the reason you got bursts of intr #7 is because someone
293 * raised an interrupt line and dropped it before the 8259 could
294 * prioritize it. This is documented in the intel data book. This
295 * means you have BAD hardware! I have changed this so that only
296 * the first 5 get logged, then it quits logging them, and puts
297 * out a special message. rgrimes 3/25/1993
300 * XXX TODO print a different message for #7 if it is for a
301 * glitch. Glitches can be distinguished from real #7's by
302 * testing that the in-service bit is _not_ set. The test
303 * must be done before sending an EOI so it can't be done if
304 * we are using AUTO_EOI_1.
306 if (intrcnt[1 + intr] <= 5)
307 log(LOG_ERR, "stray irq %d\n", intr);
308 if (intrcnt[1 + intr] == 5)
310 "too many stray irq %d's; not logging any more\n", intr);
313 #if defined(FAST_HI) && defined(APIC_IO)
316 * This occurs if we mis-programmed the APIC and its vector is still
317 * pointing to the slow vector even when we thought we reprogrammed it
318 * to the high vector. This can occur when interrupts are improperly
319 * routed by the APIC. The unit data is opaque so we have to try to
320 * find it in the unit array.
323 isa_wrongintr(void *vcookiep)
327 for (intr = 0; intr < ICU_LEN*2; ++intr) {
328 if (intr_unit[intr] == vcookiep)
331 if (intr == ICU_LEN*2) {
332 log(LOG_ERR, "stray unknown irq (APIC misprogrammed)\n");
333 } else if (intrcnt[1 + intr] <= 5) {
334 log(LOG_ERR, "stray irq ~%d (APIC misprogrammed)\n", intr);
335 } else if (intrcnt[1 + intr] == 6) {
337 "too many stray irq ~%d's; not logging any more\n", intr);
345 * Return a bitmap of the current interrupt requests. This is 8259-specific
346 * and is only suitable for use at probe time.
349 isa_irq_pending(void)
356 return ((irr2 << 8) | irr1);
361 update_intr_masks(void)
366 for (intr=0; intr < ICU_LEN; intr ++) {
368 /* no 8259 SLAVE to ignore */
370 if (intr==ICU_SLAVEID) continue; /* ignore 8259 SLAVE output */
372 maskptr = intr_info[intr].maskp;
375 *maskptr |= SWI_CLOCK_MASK | (1 << intr);
377 if (mask != intr_info[intr].mask) {
379 printf ("intr_mask[%2d] old=%08x new=%08x ptr=%p.\n",
380 intr, intr_info[intr].mask, mask, maskptr);
382 intr_info[intr].mask = mask;
391 update_intrname(int intr, char *name)
395 int name_index, off, strayintr;
398 * Initialise strings for bitbucket and stray interrupt counters.
399 * These have statically allocated indices 0 and 1 through ICU_LEN.
401 if (intrnames[0] == '\0') {
402 off = sprintf(intrnames, "???") + 1;
403 for (strayintr = 0; strayintr < ICU_LEN; strayintr++)
404 off += sprintf(intrnames + off, "stray irq%d",
410 if (snprintf(buf, sizeof(buf), "%s irq%d", name, intr) >= sizeof(buf))
414 * Search for `buf' in `intrnames'. In the usual case when it is
415 * not found, append it to the end if there is enough space (the \0
416 * terminator for the previous string, if any, becomes a separator).
418 for (cp = intrnames, name_index = 0;
419 cp != eintrnames && name_index < NR_INTRNAMES;
420 cp += strlen(cp) + 1, name_index++) {
422 if (strlen(buf) >= eintrnames - cp)
427 if (strcmp(cp, buf) == 0)
432 printf("update_intrname: counting %s irq%d as %s\n", name, intr,
436 intr_countp[intr] = &intrcnt[name_index];
440 * NOTE! intr_handler[] is only used for FAST interrupts, the *vector.s
441 * code ignores it for normal interrupts.
444 icu_setup(int intr, inthand2_t *handler, void *arg, u_int *maskptr, int flags)
446 #if defined(FAST_HI) && defined(APIC_IO)
447 int select; /* the select register is 8 bits */
449 u_int32_t value; /* the window register is 32 bits */
452 u_int mask = (maskptr ? *maskptr : 0);
455 if ((u_int)intr >= ICU_LEN) /* no 8259 SLAVE to ignore */
457 if ((u_int)intr >= ICU_LEN || intr == ICU_SLAVEID)
460 if (intr_handler[intr] != isa_strayintr)
464 cpu_disable_intr(); /* YYY */
465 intr_handler[intr] = handler;
466 intr_unit[intr] = arg;
467 intr_info[intr].maskp = maskptr;
468 intr_info[intr].mask = mask | SWI_CLOCK_MASK | (1 << intr);
470 /* YYY fast ints supported and mp protected but ... */
473 #if defined(FAST_HI) && defined(APIC_IO)
474 if (flags & INTR_FAST) {
476 * Install a spurious interrupt in the low space in case
477 * the IO apic is not properly reprogrammed.
479 vector = TPR_SLOW_INTS + intr;
480 setidt(vector, isa_wrongintr,
481 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
482 vector = TPR_FAST_INTS + intr;
483 setidt(vector, fastintr[intr],
484 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
486 vector = TPR_SLOW_INTS + intr;
487 #ifdef APIC_INTR_REORDER
488 #ifdef APIC_INTR_HIGHPRI_CLOCK
489 /* XXX: Hack (kludge?) for more accurate clock. */
490 if (intr == apic_8254_intr || intr == 8) {
491 vector = TPR_FAST_INTS + intr;
495 setidt(vector, slowintr[intr],
496 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
498 #ifdef APIC_INTR_REORDER
499 set_lapic_isrloc(intr, vector);
502 * Reprogram the vector in the IO APIC.
504 * XXX EOI/mask a pending (stray) interrupt on the old vector?
506 if (int_to_apicintpin[intr].ioapic >= 0) {
507 select = int_to_apicintpin[intr].redirindex;
508 value = io_apic_read(int_to_apicintpin[intr].ioapic,
509 select) & ~IOART_INTVEC;
510 io_apic_write(int_to_apicintpin[intr].ioapic,
511 select, value | vector);
514 setidt(ICU_OFFSET + intr,
515 flags & INTR_FAST ? fastintr[intr] : slowintr[intr],
516 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
517 #endif /* FAST_HI && APIC_IO */
524 icu_unset(intr, handler)
530 if ((u_int)intr >= ICU_LEN || handler != intr_handler[intr]) {
531 printf("icu_unset: invalid handler %d %p/%p\n", intr, handler,
532 (((u_int)intr >= ICU_LEN) ? (void *)-1 : intr_handler[intr]));
538 cpu_disable_intr(); /* YYY */
539 intr_countp[intr] = &intrcnt[1 + intr];
540 intr_handler[intr] = isa_strayintr;
541 intr_info[intr].maskp = NULL;
542 intr_info[intr].mask = HWI_MASK | SWI_MASK;
543 intr_unit[intr] = &intr_unit[intr];
545 /* XXX how do I re-create dvp here? */
546 setidt(flags & INTR_FAST ? TPR_FAST_INTS + intr : TPR_SLOW_INTS + intr,
547 slowintr[intr], SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
549 #ifdef APIC_INTR_REORDER
550 set_lapic_isrloc(intr, ICU_OFFSET + intr);
552 setidt(ICU_OFFSET + intr, slowintr[intr], SDT_SYS386IGT, SEL_KPL,
553 GSEL(GCODE_SEL, SEL_KPL));
560 /* The following notice applies beyond this point in the file */
563 * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
564 * All rights reserved.
566 * Redistribution and use in source and binary forms, with or without
567 * modification, are permitted provided that the following conditions
569 * 1. Redistributions of source code must retain the above copyright
570 * notice unmodified, this list of conditions, and the following
572 * 2. Redistributions in binary form must reproduce the above copyright
573 * notice, this list of conditions and the following disclaimer in the
574 * documentation and/or other materials provided with the distribution.
576 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
577 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
578 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
579 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
580 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
581 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
582 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
583 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
584 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
585 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
587 * $FreeBSD: src/sys/i386/isa/intr_machdep.c,v 1.29.2.5 2001/10/14 06:54:27 luigi Exp $
591 typedef struct intrec {
600 lwkt_serialize_t serializer;
601 volatile int in_handler;
604 static intrec *intreclist_head[ICU_LEN];
607 * The interrupt multiplexer calls each of the handlers in turn. A handler
608 * is called only if we can successfully obtain the interlock, meaning
609 * (1) we aren't recursed and (2) the handler has not been disabled via
610 * inthand_disabled().
612 * XXX the IPL is currently raised as necessary for the handler. However,
613 * IPLs are not MP safe so the IPL code will be removed when the device
614 * drivers, BIO, and VM no longer depend on it.
622 for (pp = arg; (p = *pp) != NULL; pp = &p->next) {
625 * New handler dispatch method. Only the serializer
626 * is used to interlock access. Note that this
627 * API includes a handler disablement feature.
629 lwkt_serialize_handler_call(p->serializer,
630 p->handler, p->argument);
633 * Old handlers may expect multiple interrupt
634 * sources to be masked. We must use a critical
638 p->handler(p->argument);
645 find_idesc(unsigned *maskptr, int irq)
647 intrec *p = intreclist_head[irq];
649 while (p && p->maskptr != maskptr)
656 * Both the low level handler and the shared interrupt multiplexer
657 * block out further interrupts as set in the handlers "mask", while
658 * the handler is running. In fact *maskptr should be used for this
659 * purpose, but since this requires one more pointer dereference on
660 * each interrupt, we rather bother update "mask" whenever *maskptr
661 * changes. The function "update_masks" should be called **after**
662 * all manipulation of the linked list of interrupt handlers hung
663 * off of intrdec_head[irq] is complete, since the chain of handlers
664 * will both determine the *maskptr values and the instances of mask
665 * that are fixed. This function should be called with the irq for
666 * which a new handler has been add blocked, since the masks may not
667 * yet know about the use of this irq for a device of a certain class.
671 update_mux_masks(void)
674 for (irq = 0; irq < ICU_LEN; irq++) {
675 intrec *idesc = intreclist_head[irq];
676 while (idesc != NULL) {
677 if (idesc->maskptr != NULL) {
678 /* our copy of *maskptr may be stale, refresh */
679 idesc->mask = *idesc->maskptr;
687 update_masks(intrmask_t *maskptr, int irq)
689 intrmask_t mask = 1 << irq;
694 if (find_idesc(maskptr, irq) == NULL) {
695 /* no reference to this maskptr was found in this irq's chain */
698 /* a reference to this maskptr was found in this irq's chain */
701 /* we need to update all values in the intr_mask[irq] array */
703 /* update mask in chains of the interrupt multiplex handler as well */
708 * Add an interrupt handler to the linked list hung off of intreclist_head[irq]
709 * and install a shared interrupt multiplex handler. Install an interrupt
710 * thread for each interrupt (though FAST interrupts will not use it).
711 * The preemption procedure checks the CPL. lwkt_preempt() will check
712 * relative thread priorities for us as long as we properly pass through
715 * The interrupt thread has already been put on the run queue, so if we cannot
716 * preempt we should force a reschedule.
719 cpu_intr_preempt(struct thread *td, int critpri)
721 struct md_intr_info *info = td->td_info.intdata;
723 if ((curthread->td_cpl & (1 << info->irq)) == 0)
724 lwkt_preempt(td, critpri);
726 need_lwkt_resched(); /* XXX may not be required */
730 add_intrdesc(intrec *idesc)
732 int irq = idesc->intr;
737 * There are two ways to enter intr_mux(). (1) via the scheduled
738 * interrupt thread or (2) directly. The thread mechanism is the
739 * normal mechanism used by SLOW interrupts, while the direct method
740 * is used by FAST interrupts.
742 * We need to create an interrupt thread if none exists.
744 if (intr_info[irq].mihandler_installed == 0) {
747 intr_info[irq].mihandler_installed = 1;
748 intr_info[irq].irq = irq;
749 td = register_int(irq, intr_mux, &intreclist_head[irq], idesc->name, idesc->maskptr);
750 td->td_info.intdata = &intr_info[irq];
751 td->td_preemptable = cpu_intr_preempt;
752 printf("installed MI handler for int %d\n", irq);
755 headp = &intreclist_head[irq];
762 if ((idesc->flags & INTR_EXCL) || (head->flags & INTR_EXCL)) {
763 printf("\tdevice combination doesn't support "
764 "shared irq%d\n", irq);
767 if ((idesc->flags & INTR_FAST) || (head->flags & INTR_FAST)) {
768 printf("\tdevice combination doesn't support "
769 "multiple FAST interrupts on IRQ%d\n", irq);
774 * Always install intr_mux as the hard handler so it can deal with
775 * individual enablement on handlers.
778 if (icu_setup(irq, idesc->handler, idesc->argument, idesc->maskptr, idesc->flags) != 0)
780 update_intrname(irq, idesc->name);
781 } else if (head->next == NULL) {
782 icu_unset(irq, head->handler);
783 if (icu_setup(irq, intr_mux, &intreclist_head[irq], 0, 0) != 0)
785 if (bootverbose && head->next == NULL)
786 printf("\tusing shared irq%d.\n", irq);
787 update_intrname(irq, "mux");
791 * Append to the end of the chain and update our SPL masks.
793 while (*headp != NULL)
794 headp = &(*headp)->next;
797 update_masks(idesc->maskptr, irq);
802 * Create and activate an interrupt handler descriptor data structure.
804 * The dev_instance pointer is required for resource management, and will
805 * only be passed through to resource_claim().
807 * There will be functions that derive a driver and unit name from a
808 * dev_instance variable, and those functions will be used to maintain the
809 * interrupt counter label array referenced by systat and vmstat to report
810 * device interrupt rates (->update_intrlabels).
812 * Add the interrupt handler descriptor data structure created by an
813 * earlier call of create_intr() to the linked list for its irq and
814 * adjust the interrupt masks if necessary.
816 * WARNING: This is an internal function and not to be used by device
817 * drivers. It is subject to change without notice.
821 inthand_add(const char *name, int irq, inthand2_t handler, void *arg,
822 intrmask_t *maskptr, int flags, lwkt_serialize_t serializer)
827 if (ICU_LEN > 8 * sizeof *maskptr) {
828 printf("create_intr: ICU_LEN of %d too high for %d bit intrmask\n",
829 ICU_LEN, 8 * sizeof *maskptr);
832 if ((unsigned)irq >= ICU_LEN) {
833 printf("create_intr: requested irq%d too high, limit is %d\n",
838 idesc = malloc(sizeof *idesc, M_DEVBUF, M_WAITOK | M_ZERO);
844 idesc->name = malloc(strlen(name) + 1, M_DEVBUF, M_WAITOK);
845 if (idesc->name == NULL) {
846 free(idesc, M_DEVBUF);
849 strcpy(idesc->name, name);
851 idesc->handler = handler;
852 idesc->argument = arg;
853 idesc->maskptr = maskptr;
855 idesc->flags = flags;
856 idesc->serializer = serializer;
858 /* add irq to class selected by maskptr */
860 errcode = add_intrdesc(idesc);
865 printf("\tintr_connect(irq%d) failed, result=%d\n",
867 free(idesc->name, M_DEVBUF);
868 free(idesc, M_DEVBUF);
876 * Deactivate and remove the interrupt handler descriptor data connected
877 * created by an earlier call of intr_connect() from the linked list and
878 * adjust theinterrupt masks if necessary.
880 * Return the memory held by the interrupt handler descriptor data structure
881 * to the system. Make sure, the handler is not actively used anymore, before.
884 inthand_remove(intrec *idesc)
886 intrec **hook, *head;
896 * Find and remove the interrupt descriptor.
898 hook = &intreclist_head[irq];
899 while (*hook != idesc) {
904 hook = &(*hook)->next;
909 * If the list is now empty, revert the hard vector to the spurious
912 head = intreclist_head[irq];
915 * No more interrupts on this irq
917 icu_unset(irq, idesc->handler);
918 update_intrname(irq, NULL);
919 } else if (head->next) {
921 * This irq is still shared (has at least two handlers)
922 * (the name should already be set to "mux").
926 * This irq is no longer shared
928 icu_unset(irq, intr_mux);
929 icu_setup(irq, head->handler, head->argument, head->maskptr, head->flags);
930 update_intrname(irq, head->name);
932 update_masks(idesc->maskptr, irq);
934 free(idesc, M_DEVBUF);
942 * This function is called by an interrupt thread when it has completed
943 * processing a loop. We re-enable interrupts and interlock with
946 * See kern/kern_intr.c for more information.
949 ithread_done(int irq)
951 struct mdglobaldata *gd = mdcpu;
955 td = gd->mi.gd_curthread;
957 KKASSERT(td->td_pri >= TDPRI_CRIT);
958 lwkt_deschedule_self(td);
960 if (gd->gd_ipending & mask) {
961 atomic_clear_int_nonlocked(&gd->gd_ipending, mask);
963 lwkt_schedule_self(td);
971 * forward_fast_remote()
973 * This function is called from the receiving end of an IPIQ when a
974 * remote cpu wishes to forward a fast interrupt to us. All we have to
975 * do is set the interrupt pending and let the IPI's doreti deal with it.
978 forward_fastint_remote(void *arg)
981 struct mdglobaldata *gd = mdcpu;
983 atomic_set_int_nonlocked(&gd->gd_fpending, 1 << irq);
984 atomic_set_int_nonlocked(&gd->mi.gd_reqflags, RQF_INTPEND);