2 * from: vector.s, 386BSD 0.1 unknown origin
3 * $FreeBSD: src/sys/i386/isa/icu_vector.s,v 1.14.2.2 2000/07/18 21:12:42 dfr Exp $
4 * $DragonFly: src/sys/i386/icu/Attic/icu_vector.s,v 1.20 2005/11/02 17:20:00 dillon Exp $
8 #include "opt_auto_eoi.h"
10 #include <machine/asmacros.h>
11 #include <machine/ipl.h>
12 #include <machine/lock.h>
13 #include <machine/psl.h>
14 #include <machine/trap.h>
15 #include <machine/smptests.h> /** various SMP options */
17 #include <i386/icu/icu.h>
18 #include <bus/isa/i386/isa.h>
24 #define ICU_IMR_OFFSET 1 /* IO_ICU{1,2} + 1 */
26 #define ICU_EOI 0x20 /* XXX - define elsewhere */
28 #define IRQ_LBIT(irq_num) (1 << (irq_num))
29 #define IRQ_BIT(irq_num) (1 << ((irq_num) % 8))
30 #define IRQ_BYTE(irq_num) ((irq_num) >> 3)
33 #define ENABLE_ICU1 /* use auto-EOI to reduce i/o */
37 movb $ICU_EOI,%al ; /* as soon as possible send EOI ... */ \
38 OUTB_ICU1 ; /* ... to clear in service bit */ \
47 * The data sheet says no auto-EOI on slave, but it sometimes works.
49 #define ENABLE_ICU1_AND_2 ENABLE_ICU1
51 #define ENABLE_ICU1_AND_2 \
52 movb $ICU_EOI,%al ; /* as above */ \
53 outb %al,$IO_ICU2 ; /* but do second icu first ... */ \
54 OUTB_ICU1 ; /* ... then first icu (if !AUTO_EOI_1) */ \
62 pushl $0 ; /* dummy error code */ \
63 pushl $0 ; /* dummy trap type */ \
64 pushal ; /* 8 registers */ \
75 pushfl ; /* phys int frame / flags */ \
76 pushl %cs ; /* phys int frame / cs */ \
77 pushl 12(%esp) ; /* original caller eip */ \
78 pushl $0 ; /* dummy error code */ \
79 pushl $0 ; /* dummy trap type */ \
80 subl $12*4,%esp ; /* pushal + 3 seg regs (dummy) + CPL */ \
83 * Warning: POP_FRAME can only be used if there is no chance of a
84 * segment register being changed (e.g. by procfs), which is why syscalls
92 addl $2*4,%esp ; /* dummy trap & error codes */ \
97 #define MASK_IRQ(icu, irq_num) \
98 movb icu_imen + IRQ_BYTE(irq_num),%al ; \
99 orb $IRQ_BIT(irq_num),%al ; \
100 movb %al,icu_imen + IRQ_BYTE(irq_num) ; \
101 outb %al,$icu+ICU_IMR_OFFSET ; \
103 #define UNMASK_IRQ(icu, irq_num) \
106 movb icu_imen + IRQ_BYTE(irq_num),%al ; \
107 andb $~IRQ_BIT(irq_num),%al ; \
108 movb %al,icu_imen + IRQ_BYTE(irq_num) ; \
109 outb %al,$icu+ICU_IMR_OFFSET ; \
113 * Fast interrupt call handlers run in the following sequence:
115 * - Push the trap frame required by doreti.
116 * - Mask the interrupt and reenable its source.
117 * - If we cannot take the interrupt set its fpending bit and
119 * - If we can take the interrupt clear its fpending bit,
120 * call the handler, then unmask the interrupt and doreti.
122 * YYY can cache gd base pointer instead of using hidden %fs
126 #define FAST_INTR(irq_num, vec_name, icu, enable_icus, maybe_extra_ipending) \
131 FAKE_MCOUNT(13*4(%esp)) ; \
132 maybe_extra_ipending ; \
133 MASK_IRQ(icu, irq_num) ; \
135 movl PCPU(curthread),%ebx ; \
136 pushl $0 ; /* DUMMY CPL FOR DORETI */ \
137 cmpl $TDPRI_CRIT,TD_PRI(%ebx) ; \
140 /* set pending bit and return, leave interrupt masked */ \
141 orl $IRQ_LBIT(irq_num),PCPU(fpending) ; \
142 orl $RQF_INTPEND, PCPU(reqflags) ; \
145 /* clear pending bit, run handler */ \
146 andl $~IRQ_LBIT(irq_num),PCPU(fpending) ; \
148 call ithread_fast_handler ; /* returns 0 to unmask int */ \
150 UNMASK_IRQ(icu, irq_num) ; \
156 * Restart fast interrupt held up by critical section.
158 * - Push a dummy trap frame as required by doreti.
159 * - The interrupt source is already masked.
160 * - Clear the fpending bit
162 * - Unmask the interrupt
163 * - Pop the dummy frame and do a normal return
165 * YYY can cache gd base pointer instead of using hidden %fs
168 #define FAST_UNPEND(irq_num, vec_name, icu) \
176 call ithread_fast_handler ; /* returns 0 to unmask int */ \
178 UNMASK_IRQ(icu, irq_num) ; \
184 * Slow interrupt call handlers run in the following sequence:
186 * - Push the trap frame required by doreti.
187 * - Mask the interrupt and reenable its source.
188 * - If we cannot take the interrupt set its ipending bit and
189 * doreti. In addition to checking for a critical section
190 * and cpl mask we also check to see if the thread is still
192 * - If we can take the interrupt clear its ipending bit
193 * and schedule its thread. Leave interrupts masked and doreti.
195 * sched_ithd() is called with interrupts enabled and outside of a
196 * critical section (so it can preempt us).
198 * YYY sched_ithd may preempt us synchronously (fix interrupt stacking)
200 * Note that intr_nesting_level is not bumped during sched_ithd because
201 * blocking allocations are allowed in the preemption case.
203 * YYY can cache gd base pointer instead of using hidden %fs
207 #define INTR(irq_num, vec_name, icu, enable_icus, reg, maybe_extra_ipending) \
212 FAKE_MCOUNT(13*4(%esp)) ; \
213 maybe_extra_ipending ; \
214 MASK_IRQ(icu, irq_num) ; \
216 movl PCPU(curthread),%ebx ; \
217 pushl $0 ; /* DUMMY CPL FOR DORETI */ \
218 cmpl $TDPRI_CRIT,TD_PRI(%ebx) ; \
221 /* set the pending bit and return, leave interrupt masked */ \
222 orl $IRQ_LBIT(irq_num), PCPU(ipending) ; \
223 orl $RQF_INTPEND, PCPU(reqflags) ; \
226 /* set running bit, clear pending bit, run handler */ \
227 andl $~IRQ_LBIT(irq_num), PCPU(ipending) ; \
237 * Unmask a slow interrupt. This function is used by interrupt threads
238 * after they have descheduled themselves to reenable interrupts and
239 * possibly cause a reschedule to occur.
242 #define INTR_UNMASK(irq_num, vec_name, icu) \
246 pushl %ebp ; /* frame for ddb backtrace */ \
249 UNMASK_IRQ(icu, irq_num) ; \
254 FAST_INTR(0,fastintr0, IO_ICU1, ENABLE_ICU1,)
255 FAST_INTR(1,fastintr1, IO_ICU1, ENABLE_ICU1,)
256 FAST_INTR(2,fastintr2, IO_ICU1, ENABLE_ICU1,)
257 FAST_INTR(3,fastintr3, IO_ICU1, ENABLE_ICU1,)
258 FAST_INTR(4,fastintr4, IO_ICU1, ENABLE_ICU1,)
259 FAST_INTR(5,fastintr5, IO_ICU1, ENABLE_ICU1,)
260 FAST_INTR(6,fastintr6, IO_ICU1, ENABLE_ICU1,)
261 FAST_INTR(7,fastintr7, IO_ICU1, ENABLE_ICU1,)
262 FAST_INTR(8,fastintr8, IO_ICU2, ENABLE_ICU1_AND_2,)
263 FAST_INTR(9,fastintr9, IO_ICU2, ENABLE_ICU1_AND_2,)
264 FAST_INTR(10,fastintr10, IO_ICU2, ENABLE_ICU1_AND_2,)
265 FAST_INTR(11,fastintr11, IO_ICU2, ENABLE_ICU1_AND_2,)
266 FAST_INTR(12,fastintr12, IO_ICU2, ENABLE_ICU1_AND_2,)
267 FAST_INTR(13,fastintr13, IO_ICU2, ENABLE_ICU1_AND_2,)
268 FAST_INTR(14,fastintr14, IO_ICU2, ENABLE_ICU1_AND_2,)
269 FAST_INTR(15,fastintr15, IO_ICU2, ENABLE_ICU1_AND_2,)
271 INTR(0,intr0, IO_ICU1, ENABLE_ICU1, al,)
272 INTR(1,intr1, IO_ICU1, ENABLE_ICU1, al,)
273 INTR(2,intr2, IO_ICU1, ENABLE_ICU1, al,)
274 INTR(3,intr3, IO_ICU1, ENABLE_ICU1, al,)
275 INTR(4,intr4, IO_ICU1, ENABLE_ICU1, al,)
276 INTR(5,intr5, IO_ICU1, ENABLE_ICU1, al,)
277 INTR(6,intr6, IO_ICU1, ENABLE_ICU1, al,)
278 INTR(7,intr7, IO_ICU1, ENABLE_ICU1, al,)
279 INTR(8,intr8, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
280 INTR(9,intr9, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
281 INTR(10,intr10, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
282 INTR(11,intr11, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
283 INTR(12,intr12, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
284 INTR(13,intr13, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
285 INTR(14,intr14, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
286 INTR(15,intr15, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
288 FAST_UNPEND(0,fastunpend0, IO_ICU1)
289 FAST_UNPEND(1,fastunpend1, IO_ICU1)
290 FAST_UNPEND(2,fastunpend2, IO_ICU1)
291 FAST_UNPEND(3,fastunpend3, IO_ICU1)
292 FAST_UNPEND(4,fastunpend4, IO_ICU1)
293 FAST_UNPEND(5,fastunpend5, IO_ICU1)
294 FAST_UNPEND(6,fastunpend6, IO_ICU1)
295 FAST_UNPEND(7,fastunpend7, IO_ICU1)
296 FAST_UNPEND(8,fastunpend8, IO_ICU2)
297 FAST_UNPEND(9,fastunpend9, IO_ICU2)
298 FAST_UNPEND(10,fastunpend10, IO_ICU2)
299 FAST_UNPEND(11,fastunpend11, IO_ICU2)
300 FAST_UNPEND(12,fastunpend12, IO_ICU2)
301 FAST_UNPEND(13,fastunpend13, IO_ICU2)
302 FAST_UNPEND(14,fastunpend14, IO_ICU2)
303 FAST_UNPEND(15,fastunpend15, IO_ICU2)