MP Implementation 1/2: Get the APIC code working again, sweetly integrate the
[dragonfly.git] / sys / i386 / isa / intr_machdep.c
CommitLineData
984263bc
MD
1/*-
2 * Copyright (c) 1991 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * William Jolitz.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * from: @(#)isa.c 7.2 (Berkeley) 5/13/91
37 * $FreeBSD: src/sys/i386/isa/intr_machdep.c,v 1.29.2.5 2001/10/14 06:54:27 luigi Exp $
8a8d5d85 38 * $DragonFly: src/sys/i386/isa/Attic/intr_machdep.c,v 1.6 2003/07/06 21:23:49 dillon Exp $
984263bc
MD
39 */
40/*
41 * This file contains an aggregated module marked:
42 * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
43 * All rights reserved.
44 * See the notice for details.
45 */
46
47#include "opt_auto_eoi.h"
48
49#include "isa.h"
50
51#include <sys/param.h>
52#ifndef SMP
53#include <machine/lock.h>
54#endif
55#include <sys/systm.h>
56#include <sys/syslog.h>
57#include <sys/malloc.h>
58#include <sys/errno.h>
59#include <sys/interrupt.h>
60#include <machine/ipl.h>
61#include <machine/md_var.h>
62#include <machine/segments.h>
63#include <sys/bus.h>
ef0fdad1
MD
64#include <machine/globaldata.h>
65#include <sys/proc.h>
66#include <sys/thread2.h>
984263bc
MD
67
68#if defined(APIC_IO)
69#include <machine/smp.h>
70#include <machine/smptests.h> /** FAST_HI */
71#endif /* APIC_IO */
72#ifdef PC98
73#include <pc98/pc98/pc98.h>
74#include <pc98/pc98/pc98_machdep.h>
75#include <pc98/pc98/epsonio.h>
76#else
77#include <i386/isa/isa.h>
78#endif
79#include <i386/isa/icu.h>
80
81#if NISA > 0
82#include <isa/isavar.h>
83#endif
84#include <i386/isa/intr_machdep.h>
85#include <sys/interrupt.h>
86#ifdef APIC_IO
87#include <machine/clock.h>
88#endif
89
90#include "mca.h"
91#if NMCA > 0
92#include <i386/isa/mca_machdep.h>
93#endif
94
95/* XXX should be in suitable include files */
96#ifdef PC98
97#define ICU_IMR_OFFSET 2 /* IO_ICU{1,2} + 2 */
98#define ICU_SLAVEID 7
99#else
100#define ICU_IMR_OFFSET 1 /* IO_ICU{1,2} + 1 */
101#define ICU_SLAVEID 2
102#endif
103
104#ifdef APIC_IO
105/*
106 * This is to accommodate "mixed-mode" programming for
107 * motherboards that don't connect the 8254 to the IO APIC.
108 */
109#define AUTO_EOI_1 1
110#endif
111
112#define NR_INTRNAMES (1 + ICU_LEN + 2 * ICU_LEN)
113
26a0694b
MD
114static inthand2_t isa_strayintr;
115
116u_long *intr_countp[ICU_LEN*2];
117inthand2_t *intr_handler[ICU_LEN*2] = {
118 isa_strayintr, isa_strayintr, isa_strayintr, isa_strayintr,
119 isa_strayintr, isa_strayintr, isa_strayintr, isa_strayintr,
120 isa_strayintr, isa_strayintr, isa_strayintr, isa_strayintr,
121 isa_strayintr, isa_strayintr, isa_strayintr, isa_strayintr,
122 isa_strayintr, isa_strayintr, isa_strayintr, isa_strayintr,
123 isa_strayintr, isa_strayintr, isa_strayintr, isa_strayintr,
124 isa_strayintr, isa_strayintr, isa_strayintr, isa_strayintr,
125 isa_strayintr, isa_strayintr, isa_strayintr, isa_strayintr,
126};
127u_int intr_mask[ICU_LEN*2];
128int intr_mihandler_installed[ICU_LEN*2];
129static u_int* intr_mptr[ICU_LEN*2];
130void *intr_unit[ICU_LEN*2];
984263bc
MD
131
132static inthand_t *fastintr[ICU_LEN] = {
133 &IDTVEC(fastintr0), &IDTVEC(fastintr1),
134 &IDTVEC(fastintr2), &IDTVEC(fastintr3),
135 &IDTVEC(fastintr4), &IDTVEC(fastintr5),
136 &IDTVEC(fastintr6), &IDTVEC(fastintr7),
137 &IDTVEC(fastintr8), &IDTVEC(fastintr9),
138 &IDTVEC(fastintr10), &IDTVEC(fastintr11),
139 &IDTVEC(fastintr12), &IDTVEC(fastintr13),
140 &IDTVEC(fastintr14), &IDTVEC(fastintr15),
141#if defined(APIC_IO)
142 &IDTVEC(fastintr16), &IDTVEC(fastintr17),
143 &IDTVEC(fastintr18), &IDTVEC(fastintr19),
144 &IDTVEC(fastintr20), &IDTVEC(fastintr21),
145 &IDTVEC(fastintr22), &IDTVEC(fastintr23),
146#endif /* APIC_IO */
147};
148
ef0fdad1
MD
149unpendhand_t *fastunpend[ICU_LEN] = {
150 IDTVEC(fastunpend0), IDTVEC(fastunpend1),
151 IDTVEC(fastunpend2), IDTVEC(fastunpend3),
152 IDTVEC(fastunpend4), IDTVEC(fastunpend5),
153 IDTVEC(fastunpend6), IDTVEC(fastunpend7),
154 IDTVEC(fastunpend8), IDTVEC(fastunpend9),
155 IDTVEC(fastunpend10), IDTVEC(fastunpend11),
156 IDTVEC(fastunpend12), IDTVEC(fastunpend13),
157 IDTVEC(fastunpend14), IDTVEC(fastunpend15),
158#if defined(APIC_IO)
159 IDTVEC(fastunpend16), IDTVEC(fastunpend17),
160 IDTVEC(fastunpend18), IDTVEC(fastunpend19),
161 IDTVEC(fastunpend20), IDTVEC(fastunpend21),
162 IDTVEC(fastunpend22), IDTVEC(fastunpend23),
ef0fdad1
MD
163#endif
164};
165
984263bc
MD
166static inthand_t *slowintr[ICU_LEN] = {
167 &IDTVEC(intr0), &IDTVEC(intr1), &IDTVEC(intr2), &IDTVEC(intr3),
168 &IDTVEC(intr4), &IDTVEC(intr5), &IDTVEC(intr6), &IDTVEC(intr7),
169 &IDTVEC(intr8), &IDTVEC(intr9), &IDTVEC(intr10), &IDTVEC(intr11),
170 &IDTVEC(intr12), &IDTVEC(intr13), &IDTVEC(intr14), &IDTVEC(intr15),
171#if defined(APIC_IO)
172 &IDTVEC(intr16), &IDTVEC(intr17), &IDTVEC(intr18), &IDTVEC(intr19),
173 &IDTVEC(intr20), &IDTVEC(intr21), &IDTVEC(intr22), &IDTVEC(intr23),
174#endif /* APIC_IO */
175};
176
984263bc
MD
177#ifdef PC98
178#define NMI_PARITY 0x04
179#define NMI_EPARITY 0x02
180#else
181#define NMI_PARITY (1 << 7)
182#define NMI_IOCHAN (1 << 6)
183#define ENMI_WATCHDOG (1 << 7)
184#define ENMI_BUSTIMER (1 << 6)
185#define ENMI_IOSTATUS (1 << 5)
186#endif
187
188/*
189 * Handle a NMI, possibly a machine check.
190 * return true to panic system, false to ignore.
191 */
192int
193isa_nmi(cd)
194 int cd;
195{
196 int retval = 0;
197#ifdef PC98
198 int port = inb(0x33);
199
200 log(LOG_CRIT, "NMI PC98 port = %x\n", port);
201 if (epson_machine_id == 0x20)
202 epson_outb(0xc16, epson_inb(0xc16) | 0x1);
203 if (port & NMI_PARITY) {
204 log(LOG_CRIT, "BASE RAM parity error, likely hardware failure.");
205 retval = 1;
206 } else if (port & NMI_EPARITY) {
207 log(LOG_CRIT, "EXTENDED RAM parity error, likely hardware failure.");
208 retval = 1;
209 } else {
210 log(LOG_CRIT, "\nNMI Resume ??\n");
211 }
212#else /* IBM-PC */
213 int isa_port = inb(0x61);
214 int eisa_port = inb(0x461);
215
216 log(LOG_CRIT, "NMI ISA %x, EISA %x\n", isa_port, eisa_port);
217#if NMCA > 0
218 if (MCA_system && mca_bus_nmi())
219 return(0);
220#endif
221
222 if (isa_port & NMI_PARITY) {
223 log(LOG_CRIT, "RAM parity error, likely hardware failure.");
224 retval = 1;
225 }
226
227 if (isa_port & NMI_IOCHAN) {
228 log(LOG_CRIT, "I/O channel check, likely hardware failure.");
229 retval = 1;
230 }
231
232 /*
233 * On a real EISA machine, this will never happen. However it can
234 * happen on ISA machines which implement XT style floating point
235 * error handling (very rare). Save them from a meaningless panic.
236 */
237 if (eisa_port == 0xff)
238 return(retval);
239
240 if (eisa_port & ENMI_WATCHDOG) {
241 log(LOG_CRIT, "EISA watchdog timer expired, likely hardware failure.");
242 retval = 1;
243 }
244
245 if (eisa_port & ENMI_BUSTIMER) {
246 log(LOG_CRIT, "EISA bus timeout, likely hardware failure.");
247 retval = 1;
248 }
249
250 if (eisa_port & ENMI_IOSTATUS) {
251 log(LOG_CRIT, "EISA I/O port status error.");
252 retval = 1;
253 }
254#endif
255 return(retval);
256}
257
258/*
259 * Fill in default interrupt table (in case of spuruious interrupt
260 * during configuration of kernel, setup interrupt control unit
261 */
262void
263isa_defaultirq()
264{
265 int i;
266
267 /* icu vectors */
268 for (i = 0; i < ICU_LEN; i++)
269 icu_unset(i, (inthand2_t *)NULL);
270
271 /* initialize 8259's */
272#if NMCA > 0
273 if (MCA_system)
274 outb(IO_ICU1, 0x19); /* reset; program device, four bytes */
275 else
276#endif
277 outb(IO_ICU1, 0x11); /* reset; program device, four bytes */
278
279 outb(IO_ICU1+ICU_IMR_OFFSET, NRSVIDT); /* starting at this vector index */
280 outb(IO_ICU1+ICU_IMR_OFFSET, IRQ_SLAVE); /* slave on line 7 */
281#ifdef PC98
282#ifdef AUTO_EOI_1
283 outb(IO_ICU1+ICU_IMR_OFFSET, 0x1f); /* (master) auto EOI, 8086 mode */
284#else
285 outb(IO_ICU1+ICU_IMR_OFFSET, 0x1d); /* (master) 8086 mode */
286#endif
287#else /* IBM-PC */
288#ifdef AUTO_EOI_1
289 outb(IO_ICU1+ICU_IMR_OFFSET, 2 | 1); /* auto EOI, 8086 mode */
290#else
291 outb(IO_ICU1+ICU_IMR_OFFSET, 1); /* 8086 mode */
292#endif
293#endif /* PC98 */
294 outb(IO_ICU1+ICU_IMR_OFFSET, 0xff); /* leave interrupts masked */
295 outb(IO_ICU1, 0x0a); /* default to IRR on read */
296#ifndef PC98
297 outb(IO_ICU1, 0xc0 | (3 - 1)); /* pri order 3-7, 0-2 (com2 first) */
298#endif /* !PC98 */
299
300#if NMCA > 0
301 if (MCA_system)
302 outb(IO_ICU2, 0x19); /* reset; program device, four bytes */
303 else
304#endif
305 outb(IO_ICU2, 0x11); /* reset; program device, four bytes */
306
307 outb(IO_ICU2+ICU_IMR_OFFSET, NRSVIDT+8); /* staring at this vector index */
308 outb(IO_ICU2+ICU_IMR_OFFSET, ICU_SLAVEID); /* my slave id is 7 */
309#ifdef PC98
310 outb(IO_ICU2+ICU_IMR_OFFSET,9); /* 8086 mode */
311#else /* IBM-PC */
312#ifdef AUTO_EOI_2
313 outb(IO_ICU2+ICU_IMR_OFFSET, 2 | 1); /* auto EOI, 8086 mode */
314#else
315 outb(IO_ICU2+ICU_IMR_OFFSET,1); /* 8086 mode */
316#endif
317#endif /* PC98 */
318 outb(IO_ICU2+ICU_IMR_OFFSET, 0xff); /* leave interrupts masked */
319 outb(IO_ICU2, 0x0a); /* default to IRR on read */
320}
321
322/*
323 * Caught a stray interrupt, notify
324 */
325static void
26a0694b 326isa_strayintr(void *vcookiep)
984263bc
MD
327{
328 int intr = (void **)vcookiep - &intr_unit[0];
329
330 /* DON'T BOTHER FOR NOW! */
331 /* for some reason, we get bursts of intr #7, even if not enabled! */
332 /*
333 * Well the reason you got bursts of intr #7 is because someone
334 * raised an interrupt line and dropped it before the 8259 could
335 * prioritize it. This is documented in the intel data book. This
336 * means you have BAD hardware! I have changed this so that only
337 * the first 5 get logged, then it quits logging them, and puts
338 * out a special message. rgrimes 3/25/1993
339 */
340 /*
341 * XXX TODO print a different message for #7 if it is for a
342 * glitch. Glitches can be distinguished from real #7's by
343 * testing that the in-service bit is _not_ set. The test
344 * must be done before sending an EOI so it can't be done if
345 * we are using AUTO_EOI_1.
346 */
26a0694b 347 printf("STRAY %d\n", intr);
984263bc
MD
348 if (intrcnt[1 + intr] <= 5)
349 log(LOG_ERR, "stray irq %d\n", intr);
350 if (intrcnt[1 + intr] == 5)
351 log(LOG_CRIT,
352 "too many stray irq %d's; not logging any more\n", intr);
353}
354
355#if NISA > 0
356/*
357 * Return a bitmap of the current interrupt requests. This is 8259-specific
358 * and is only suitable for use at probe time.
359 */
360intrmask_t
361isa_irq_pending()
362{
363 u_char irr1;
364 u_char irr2;
365
366 irr1 = inb(IO_ICU1);
367 irr2 = inb(IO_ICU2);
368 return ((irr2 << 8) | irr1);
369}
370#endif
371
372int
373update_intr_masks(void)
374{
375 int intr, n=0;
376 u_int mask,*maskptr;
377
378 for (intr=0; intr < ICU_LEN; intr ++) {
379#if defined(APIC_IO)
380 /* no 8259 SLAVE to ignore */
381#else
382 if (intr==ICU_SLAVEID) continue; /* ignore 8259 SLAVE output */
383#endif /* APIC_IO */
384 maskptr = intr_mptr[intr];
385 if (!maskptr)
386 continue;
387 *maskptr |= SWI_CLOCK_MASK | (1 << intr);
388 mask = *maskptr;
389 if (mask != intr_mask[intr]) {
390#if 0
391 printf ("intr_mask[%2d] old=%08x new=%08x ptr=%p.\n",
392 intr, intr_mask[intr], mask, maskptr);
393#endif
394 intr_mask[intr]=mask;
395 n++;
396 }
397
398 }
399 return (n);
400}
401
402static void
403update_intrname(int intr, char *name)
404{
405 char buf[32];
406 char *cp;
407 int name_index, off, strayintr;
408
409 /*
410 * Initialise strings for bitbucket and stray interrupt counters.
411 * These have statically allocated indices 0 and 1 through ICU_LEN.
412 */
413 if (intrnames[0] == '\0') {
414 off = sprintf(intrnames, "???") + 1;
415 for (strayintr = 0; strayintr < ICU_LEN; strayintr++)
416 off += sprintf(intrnames + off, "stray irq%d",
417 strayintr) + 1;
418 }
419
420 if (name == NULL)
421 name = "???";
422 if (snprintf(buf, sizeof(buf), "%s irq%d", name, intr) >= sizeof(buf))
423 goto use_bitbucket;
424
425 /*
426 * Search for `buf' in `intrnames'. In the usual case when it is
427 * not found, append it to the end if there is enough space (the \0
428 * terminator for the previous string, if any, becomes a separator).
429 */
430 for (cp = intrnames, name_index = 0;
431 cp != eintrnames && name_index < NR_INTRNAMES;
432 cp += strlen(cp) + 1, name_index++) {
433 if (*cp == '\0') {
434 if (strlen(buf) >= eintrnames - cp)
435 break;
436 strcpy(cp, buf);
437 goto found;
438 }
439 if (strcmp(cp, buf) == 0)
440 goto found;
441 }
442
443use_bitbucket:
444 printf("update_intrname: counting %s irq%d as %s\n", name, intr,
445 intrnames);
446 name_index = 0;
447found:
448 intr_countp[intr] = &intrcnt[name_index];
449}
450
ef0fdad1
MD
451/*
452 * NOTE! intr_handler[] is only used for FAST interrupts, the *vector.s
453 * code ignores it for normal interrupts.
454 */
984263bc
MD
455int
456icu_setup(int intr, inthand2_t *handler, void *arg, u_int *maskptr, int flags)
457{
458#ifdef FAST_HI
459 int select; /* the select register is 8 bits */
460 int vector;
461 u_int32_t value; /* the window register is 32 bits */
462#endif /* FAST_HI */
463 u_long ef;
464 u_int mask = (maskptr ? *maskptr : 0);
465
466#if defined(APIC_IO)
467 if ((u_int)intr >= ICU_LEN) /* no 8259 SLAVE to ignore */
468#else
469 if ((u_int)intr >= ICU_LEN || intr == ICU_SLAVEID)
470#endif /* APIC_IO */
471 if (intr_handler[intr] != isa_strayintr)
472 return (EBUSY);
473
474 ef = read_eflags();
8a8d5d85 475 cpu_disable_intr(); /* YYY */
984263bc
MD
476 intr_handler[intr] = handler;
477 intr_mptr[intr] = maskptr;
478 intr_mask[intr] = mask | SWI_CLOCK_MASK | (1 << intr);
479 intr_unit[intr] = arg;
480#ifdef FAST_HI
481 if (flags & INTR_FAST) {
482 vector = TPR_FAST_INTS + intr;
483 setidt(vector, fastintr[intr],
484 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
ef0fdad1 485 } else {
984263bc
MD
486 vector = TPR_SLOW_INTS + intr;
487#ifdef APIC_INTR_REORDER
488#ifdef APIC_INTR_HIGHPRI_CLOCK
489 /* XXX: Hack (kludge?) for more accurate clock. */
490 if (intr == apic_8254_intr || intr == 8) {
491 vector = TPR_FAST_INTS + intr;
492 }
493#endif
494#endif
495 setidt(vector, slowintr[intr],
496 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
497 }
498#ifdef APIC_INTR_REORDER
499 set_lapic_isrloc(intr, vector);
500#endif
501 /*
502 * Reprogram the vector in the IO APIC.
503 */
504 if (int_to_apicintpin[intr].ioapic >= 0) {
505 select = int_to_apicintpin[intr].redirindex;
506 value = io_apic_read(int_to_apicintpin[intr].ioapic,
507 select) & ~IOART_INTVEC;
508 io_apic_write(int_to_apicintpin[intr].ioapic,
509 select, value | vector);
510 }
511#else
512 setidt(ICU_OFFSET + intr,
513 flags & INTR_FAST ? fastintr[intr] : slowintr[intr],
514 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
515#endif /* FAST_HI */
516 INTREN(1 << intr);
984263bc
MD
517 write_eflags(ef);
518 return (0);
519}
520
521int
522icu_unset(intr, handler)
523 int intr;
524 inthand2_t *handler;
525{
526 u_long ef;
527
528 if ((u_int)intr >= ICU_LEN || handler != intr_handler[intr])
529 return (EINVAL);
530
531 INTRDIS(1 << intr);
532 ef = read_eflags();
8a8d5d85 533 cpu_disable_intr(); /* YYY */
984263bc
MD
534 intr_countp[intr] = &intrcnt[1 + intr];
535 intr_handler[intr] = isa_strayintr;
536 intr_mptr[intr] = NULL;
537 intr_mask[intr] = HWI_MASK | SWI_MASK;
538 intr_unit[intr] = &intr_unit[intr];
539#ifdef FAST_HI_XXX
540 /* XXX how do I re-create dvp here? */
541 setidt(flags & INTR_FAST ? TPR_FAST_INTS + intr : TPR_SLOW_INTS + intr,
542 slowintr[intr], SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
543#else /* FAST_HI */
544#ifdef APIC_INTR_REORDER
545 set_lapic_isrloc(intr, ICU_OFFSET + intr);
546#endif
547 setidt(ICU_OFFSET + intr, slowintr[intr], SDT_SYS386IGT, SEL_KPL,
548 GSEL(GCODE_SEL, SEL_KPL));
549#endif /* FAST_HI */
984263bc
MD
550 write_eflags(ef);
551 return (0);
552}
553
ef0fdad1 554
984263bc
MD
555/* The following notice applies beyond this point in the file */
556
557/*
558 * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
559 * All rights reserved.
560 *
561 * Redistribution and use in source and binary forms, with or without
562 * modification, are permitted provided that the following conditions
563 * are met:
564 * 1. Redistributions of source code must retain the above copyright
565 * notice unmodified, this list of conditions, and the following
566 * disclaimer.
567 * 2. Redistributions in binary form must reproduce the above copyright
568 * notice, this list of conditions and the following disclaimer in the
569 * documentation and/or other materials provided with the distribution.
570 *
571 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
572 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
573 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
574 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
575 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
576 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
577 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
578 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
579 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
580 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
581 *
582 * $FreeBSD: src/sys/i386/isa/intr_machdep.c,v 1.29.2.5 2001/10/14 06:54:27 luigi Exp $
583 *
584 */
585
586typedef struct intrec {
ef0fdad1
MD
587 intrmask_t mask;
588 inthand2_t *handler;
589 void *argument;
590 struct intrec *next;
591 char *name;
592 int intr;
593 intrmask_t *maskptr;
594 int flags;
984263bc
MD
595} intrec;
596
597static intrec *intreclist_head[ICU_LEN];
598
599/*
600 * The interrupt multiplexer calls each of the handlers in turn. The
601 * ipl is initially quite low. It is raised as necessary for each call
602 * and lowered after the call. Thus out of order handling is possible
603 * even for interrupts of the same type. This is probably no more
604 * harmful than out of order handling in general (not harmful except
605 * for real time response which we don't support anyway).
606 */
607static void
608intr_mux(void *arg)
609{
ef0fdad1 610 intrec **pp;
984263bc
MD
611 intrec *p;
612 intrmask_t oldspl;
613
ef0fdad1 614 for (pp = arg; (p = *pp) != NULL; pp = &p->next) {
984263bc
MD
615 oldspl = splq(p->mask);
616 p->handler(p->argument);
617 splx(oldspl);
618 }
619}
620
621static intrec*
622find_idesc(unsigned *maskptr, int irq)
623{
624 intrec *p = intreclist_head[irq];
625
626 while (p && p->maskptr != maskptr)
627 p = p->next;
628
629 return (p);
630}
631
632static intrec**
633find_pred(intrec *idesc, int irq)
634{
635 intrec **pp = &intreclist_head[irq];
636 intrec *p = *pp;
637
638 while (p != idesc) {
639 if (p == NULL)
640 return (NULL);
641 pp = &p->next;
642 p = *pp;
643 }
644 return (pp);
645}
646
647/*
648 * Both the low level handler and the shared interrupt multiplexer
649 * block out further interrupts as set in the handlers "mask", while
650 * the handler is running. In fact *maskptr should be used for this
651 * purpose, but since this requires one more pointer dereference on
652 * each interrupt, we rather bother update "mask" whenever *maskptr
653 * changes. The function "update_masks" should be called **after**
654 * all manipulation of the linked list of interrupt handlers hung
655 * off of intrdec_head[irq] is complete, since the chain of handlers
656 * will both determine the *maskptr values and the instances of mask
657 * that are fixed. This function should be called with the irq for
658 * which a new handler has been add blocked, since the masks may not
659 * yet know about the use of this irq for a device of a certain class.
660 */
661
662static void
663update_mux_masks(void)
664{
665 int irq;
666 for (irq = 0; irq < ICU_LEN; irq++) {
667 intrec *idesc = intreclist_head[irq];
668 while (idesc != NULL) {
669 if (idesc->maskptr != NULL) {
670 /* our copy of *maskptr may be stale, refresh */
671 idesc->mask = *idesc->maskptr;
672 }
673 idesc = idesc->next;
674 }
675 }
676}
677
678static void
679update_masks(intrmask_t *maskptr, int irq)
680{
681 intrmask_t mask = 1 << irq;
682
683 if (maskptr == NULL)
684 return;
685
686 if (find_idesc(maskptr, irq) == NULL) {
687 /* no reference to this maskptr was found in this irq's chain */
688 *maskptr &= ~mask;
689 } else {
690 /* a reference to this maskptr was found in this irq's chain */
691 *maskptr |= mask;
692 }
693 /* we need to update all values in the intr_mask[irq] array */
694 update_intr_masks();
695 /* update mask in chains of the interrupt multiplex handler as well */
696 update_mux_masks();
697}
698
699/*
700 * Add interrupt handler to linked list hung off of intreclist_head[irq]
701 * and install shared interrupt multiplex handler, if necessary
702 */
703
704static int
705add_intrdesc(intrec *idesc)
706{
707 int irq = idesc->intr;
ef0fdad1 708 intrec *head;
984263bc 709
ef0fdad1
MD
710 /*
711 * YYY This is a hack. The MI interrupt code in kern/kern_intr.c
712 * handles interrupt thread scheduling for NORMAL interrupts. It
713 * will never get called for fast interrupts. On the otherhand,
714 * the handler this code installs in intr_handler[] for a NORMAL
715 * interrupt is not used by the *vector.s code, so we need this
716 * temporary hack to run normal interrupts as interrupt threads.
717 * YYY FIXME!
718 */
719 if (intr_mihandler_installed[irq] == 0) {
720 intr_mihandler_installed[irq] = 1;
721 register_int(irq, intr_mux, &intreclist_head[irq], idesc->name);
722 printf("installing MI handler for int %d\n", irq);
723 }
724
725 head = intreclist_head[irq];
984263bc
MD
726
727 if (head == NULL) {
728 /* first handler for this irq, just install it */
729 if (icu_setup(irq, idesc->handler, idesc->argument,
730 idesc->maskptr, idesc->flags) != 0)
731 return (-1);
732
733 update_intrname(irq, idesc->name);
734 /* keep reference */
735 intreclist_head[irq] = idesc;
736 } else {
737 if ((idesc->flags & INTR_EXCL) != 0
738 || (head->flags & INTR_EXCL) != 0) {
739 /*
740 * can't append new handler, if either list head or
741 * new handler do not allow interrupts to be shared
742 */
743 if (bootverbose)
744 printf("\tdevice combination doesn't support "
745 "shared irq%d\n", irq);
746 return (-1);
747 }
748 if (head->next == NULL) {
749 /*
750 * second handler for this irq, replace device driver's
751 * handler by shared interrupt multiplexer function
752 */
753 icu_unset(irq, head->handler);
ef0fdad1 754 if (icu_setup(irq, intr_mux, &intreclist_head[irq], 0, 0) != 0)
984263bc
MD
755 return (-1);
756 if (bootverbose)
757 printf("\tusing shared irq%d.\n", irq);
758 update_intrname(irq, "mux");
759 }
760 /* just append to the end of the chain */
761 while (head->next != NULL)
762 head = head->next;
763 head->next = idesc;
764 }
765 update_masks(idesc->maskptr, irq);
766 return (0);
767}
768
769/*
770 * Create and activate an interrupt handler descriptor data structure.
771 *
772 * The dev_instance pointer is required for resource management, and will
773 * only be passed through to resource_claim().
774 *
775 * There will be functions that derive a driver and unit name from a
776 * dev_instance variable, and those functions will be used to maintain the
777 * interrupt counter label array referenced by systat and vmstat to report
778 * device interrupt rates (->update_intrlabels).
779 *
780 * Add the interrupt handler descriptor data structure created by an
781 * earlier call of create_intr() to the linked list for its irq and
782 * adjust the interrupt masks if necessary.
783 *
784 * WARNING: This is an internal function and not to be used by device
785 * drivers. It is subject to change without notice.
786 */
787
788intrec *
789inthand_add(const char *name, int irq, inthand2_t handler, void *arg,
790 intrmask_t *maskptr, int flags)
791{
792 intrec *idesc;
793 int errcode = -1;
794 intrmask_t oldspl;
795
796 if (ICU_LEN > 8 * sizeof *maskptr) {
797 printf("create_intr: ICU_LEN of %d too high for %d bit intrmask\n",
798 ICU_LEN, 8 * sizeof *maskptr);
799 return (NULL);
800 }
801 if ((unsigned)irq >= ICU_LEN) {
802 printf("create_intr: requested irq%d too high, limit is %d\n",
803 irq, ICU_LEN -1);
804 return (NULL);
805 }
806
807 idesc = malloc(sizeof *idesc, M_DEVBUF, M_WAITOK);
808 if (idesc == NULL)
809 return NULL;
810 bzero(idesc, sizeof *idesc);
811
812 if (name == NULL)
813 name = "???";
814 idesc->name = malloc(strlen(name) + 1, M_DEVBUF, M_WAITOK);
815 if (idesc->name == NULL) {
816 free(idesc, M_DEVBUF);
817 return NULL;
818 }
819 strcpy(idesc->name, name);
820
821 idesc->handler = handler;
822 idesc->argument = arg;
823 idesc->maskptr = maskptr;
824 idesc->intr = irq;
825 idesc->flags = flags;
826
827 /* block this irq */
828 oldspl = splq(1 << irq);
829
830 /* add irq to class selected by maskptr */
831 errcode = add_intrdesc(idesc);
832 splx(oldspl);
833
834 if (errcode != 0) {
835 if (bootverbose)
836 printf("\tintr_connect(irq%d) failed, result=%d\n",
837 irq, errcode);
838 free(idesc->name, M_DEVBUF);
839 free(idesc, M_DEVBUF);
840 idesc = NULL;
841 }
842
843 return (idesc);
844}
845
846/*
847 * Deactivate and remove the interrupt handler descriptor data connected
848 * created by an earlier call of intr_connect() from the linked list and
849 * adjust theinterrupt masks if necessary.
850 *
851 * Return the memory held by the interrupt handler descriptor data structure
852 * to the system. Make sure, the handler is not actively used anymore, before.
853 */
854
855int
856inthand_remove(intrec *idesc)
857{
858 intrec **hook, *head;
859 int irq;
860 int errcode = 0;
861 intrmask_t oldspl;
862
863 if (idesc == NULL)
864 return (-1);
865
866 irq = idesc->intr;
867
868 /* find pointer that keeps the reference to this interrupt descriptor */
869 hook = find_pred(idesc, irq);
870 if (hook == NULL)
871 return (-1);
872
873 /* make copy of original list head, the line after may overwrite it */
874 head = intreclist_head[irq];
875
876 /* unlink: make predecessor point to idesc->next instead of to idesc */
877 *hook = idesc->next;
878
879 /* now check whether the element we removed was the list head */
880 if (idesc == head) {
881
882 oldspl = splq(1 << irq);
883
884 /* check whether the new list head is the only element on list */
885 head = intreclist_head[irq];
886 if (head != NULL) {
887 icu_unset(irq, intr_mux);
888 if (head->next != NULL) {
889 /* install the multiplex handler with new list head as argument */
ef0fdad1 890 errcode = icu_setup(irq, intr_mux, &intreclist_head[irq], 0, 0);
984263bc
MD
891 if (errcode == 0)
892 update_intrname(irq, NULL);
893 } else {
894 /* install the one remaining handler for this irq */
895 errcode = icu_setup(irq, head->handler,
896 head->argument,
897 head->maskptr, head->flags);
898 if (errcode == 0)
899 update_intrname(irq, head->name);
900 }
901 } else {
902 /* revert to old handler, eg: strayintr */
903 icu_unset(irq, idesc->handler);
904 }
905 splx(oldspl);
906 }
907 update_masks(idesc->maskptr, irq);
908 free(idesc, M_DEVBUF);
909 return (0);
910}
ef0fdad1 911
ef0fdad1
MD
912/*
913 * ithread_done()
914 *
915 * This function is called by an interrupt thread when it has completed
916 * processing a loop. We interlock with ipending and irunning. If
917 * a new interrupt is pending for the thread the function clears the
918 * pending bit and returns. If no new interrupt is pending we
26a0694b
MD
919 * deschedule and sleep. If we reschedule and return we have to
920 * disable the interrupt again or it will keep interrupting us.
921 *
922 * See kern/kern_intr.c for more information.
ef0fdad1
MD
923 */
924void
925ithread_done(int irq)
926{
927 struct mdglobaldata *gd = mdcpu;
928 int mask = 1 << irq;
929
26a0694b 930 KKASSERT(curthread->td_pri >= TDPRI_CRIT);
ef0fdad1
MD
931 INTREN(mask);
932 if (gd->gd_ipending & mask) {
933 atomic_clear_int(&gd->gd_ipending, mask);
26a0694b 934 INTRDIS(mask);
ef0fdad1
MD
935 lwkt_schedule_self();
936 } else {
937 lwkt_deschedule_self();
938 if (gd->gd_ipending & mask) { /* race */
939 atomic_clear_int(&gd->gd_ipending, mask);
26a0694b 940 INTRDIS(mask);
ef0fdad1
MD
941 lwkt_schedule_self();
942 } else {
943 atomic_clear_int(&gd->gd_irunning, mask);
944 lwkt_switch();
945 }
946 }
ef0fdad1
MD
947}
948