2 * from: vector.s, 386BSD 0.1 unknown origin
3 * $FreeBSD: src/sys/i386/isa/icu_vector.s,v 1.14.2.2 2000/07/18 21:12:42 dfr Exp $
4 * $DragonFly: src/sys/platform/pc32/icu/icu_vector.s,v 1.19 2005/10/13 00:02:47 dillon Exp $
7 #define ICU_IMR_OFFSET 1 /* IO_ICU{1,2} + 1 */
9 #define ICU_EOI 0x20 /* XXX - define elsewhere */
11 #define IRQ_LBIT(irq_num) (1 << (irq_num))
12 #define IRQ_BIT(irq_num) (1 << ((irq_num) % 8))
13 #define IRQ_BYTE(irq_num) ((irq_num) >> 3)
16 #define ENABLE_ICU1 /* use auto-EOI to reduce i/o */
20 movb $ICU_EOI,%al ; /* as soon as possible send EOI ... */ \
21 OUTB_ICU1 ; /* ... to clear in service bit */ \
30 * The data sheet says no auto-EOI on slave, but it sometimes works.
32 #define ENABLE_ICU1_AND_2 ENABLE_ICU1
34 #define ENABLE_ICU1_AND_2 \
35 movb $ICU_EOI,%al ; /* as above */ \
36 outb %al,$IO_ICU2 ; /* but do second icu first ... */ \
37 OUTB_ICU1 ; /* ... then first icu (if !AUTO_EOI_1) */ \
45 pushl $0 ; /* dummy error code */ \
46 pushl $0 ; /* dummy trap type */ \
47 pushal ; /* 8 registers */ \
58 pushfl ; /* phys int frame / flags */ \
59 pushl %cs ; /* phys int frame / cs */ \
60 pushl 12(%esp) ; /* original caller eip */ \
61 pushl $0 ; /* dummy error code */ \
62 pushl $0 ; /* dummy trap type */ \
63 subl $12*4,%esp ; /* pushal + 3 seg regs (dummy) + CPL */ \
66 * Warning: POP_FRAME can only be used if there is no chance of a
67 * segment register being changed (e.g. by procfs), which is why syscalls
75 addl $2*4,%esp ; /* dummy trap & error codes */ \
80 #define MASK_IRQ(icu, irq_num) \
81 movb imen + IRQ_BYTE(irq_num),%al ; \
82 orb $IRQ_BIT(irq_num),%al ; \
83 movb %al,imen + IRQ_BYTE(irq_num) ; \
84 outb %al,$icu+ICU_IMR_OFFSET ; \
86 #define UNMASK_IRQ(icu, irq_num) \
89 movb imen + IRQ_BYTE(irq_num),%al ; \
90 andb $~IRQ_BIT(irq_num),%al ; \
91 movb %al,imen + IRQ_BYTE(irq_num) ; \
92 outb %al,$icu+ICU_IMR_OFFSET ; \
96 * Fast interrupt call handlers run in the following sequence:
98 * - Push the trap frame required by doreti.
99 * - Mask the interrupt and reenable its source.
100 * - If we cannot take the interrupt set its fpending bit and
102 * - If we can take the interrupt clear its fpending bit,
103 * call the handler, then unmask the interrupt and doreti.
105 * YYY can cache gd base pointer instead of using hidden %fs
109 #define FAST_INTR(irq_num, vec_name, icu, enable_icus, maybe_extra_ipending) \
114 FAKE_MCOUNT(13*4(%esp)) ; \
115 maybe_extra_ipending ; \
116 MASK_IRQ(icu, irq_num) ; \
118 movl PCPU(curthread),%ebx ; \
119 pushl $0 ; /* DUMMY CPL FOR DORETI */ \
120 cmpl $TDPRI_CRIT,TD_PRI(%ebx) ; \
123 /* set pending bit and return, leave interrupt masked */ \
124 orl $IRQ_LBIT(irq_num),PCPU(fpending) ; \
125 orl $RQF_INTPEND, PCPU(reqflags) ; \
128 /* clear pending bit, run handler */ \
129 andl $~IRQ_LBIT(irq_num),PCPU(fpending) ; \
131 call ithread_fast_handler ; /* returns 0 to unmask int */ \
133 UNMASK_IRQ(icu, irq_num) ; \
139 * Restart fast interrupt held up by critical section.
141 * - Push a dummy trap frame as required by doreti.
142 * - The interrupt source is already masked.
143 * - Clear the fpending bit
145 * - Unmask the interrupt
146 * - Pop the dummy frame and do a normal return
148 * YYY can cache gd base pointer instead of using hidden %fs
151 #define FAST_UNPEND(irq_num, vec_name, icu) \
159 call ithread_fast_handler ; /* returns 0 to unmask int */ \
161 UNMASK_IRQ(icu, irq_num) ; \
167 * Slow interrupt call handlers run in the following sequence:
169 * - Push the trap frame required by doreti.
170 * - Mask the interrupt and reenable its source.
171 * - If we cannot take the interrupt set its ipending bit and
172 * doreti. In addition to checking for a critical section
173 * and cpl mask we also check to see if the thread is still
175 * - If we can take the interrupt clear its ipending bit
176 * and schedule its thread. Leave interrupts masked and doreti.
178 * sched_ithd() is called with interrupts enabled and outside of a
179 * critical section (so it can preempt us).
181 * YYY sched_ithd may preempt us synchronously (fix interrupt stacking)
183 * Note that intr_nesting_level is not bumped during sched_ithd because
184 * blocking allocations are allowed in the preemption case.
186 * YYY can cache gd base pointer instead of using hidden %fs
190 #define INTR(irq_num, vec_name, icu, enable_icus, reg, maybe_extra_ipending) \
195 FAKE_MCOUNT(13*4(%esp)) ; \
196 maybe_extra_ipending ; \
197 MASK_IRQ(icu, irq_num) ; \
199 movl PCPU(curthread),%ebx ; \
200 pushl $0 ; /* DUMMY CPL FOR DORETI */ \
201 cmpl $TDPRI_CRIT,TD_PRI(%ebx) ; \
204 /* set the pending bit and return, leave interrupt masked */ \
205 orl $IRQ_LBIT(irq_num), PCPU(ipending) ; \
206 orl $RQF_INTPEND, PCPU(reqflags) ; \
209 /* set running bit, clear pending bit, run handler */ \
210 andl $~IRQ_LBIT(irq_num), PCPU(ipending) ; \
220 * Unmask a slow interrupt. This function is used by interrupt threads
221 * after they have descheduled themselves to reenable interrupts and
222 * possibly cause a reschedule to occur.
225 #define INTR_UNMASK(irq_num, vec_name, icu) \
229 pushl %ebp ; /* frame for ddb backtrace */ \
232 UNMASK_IRQ(icu, irq_num) ; \
237 FAST_INTR(0,fastintr0, IO_ICU1, ENABLE_ICU1,)
238 FAST_INTR(1,fastintr1, IO_ICU1, ENABLE_ICU1,)
239 FAST_INTR(2,fastintr2, IO_ICU1, ENABLE_ICU1,)
240 FAST_INTR(3,fastintr3, IO_ICU1, ENABLE_ICU1,)
241 FAST_INTR(4,fastintr4, IO_ICU1, ENABLE_ICU1,)
242 FAST_INTR(5,fastintr5, IO_ICU1, ENABLE_ICU1,)
243 FAST_INTR(6,fastintr6, IO_ICU1, ENABLE_ICU1,)
244 FAST_INTR(7,fastintr7, IO_ICU1, ENABLE_ICU1,)
245 FAST_INTR(8,fastintr8, IO_ICU2, ENABLE_ICU1_AND_2,)
246 FAST_INTR(9,fastintr9, IO_ICU2, ENABLE_ICU1_AND_2,)
247 FAST_INTR(10,fastintr10, IO_ICU2, ENABLE_ICU1_AND_2,)
248 FAST_INTR(11,fastintr11, IO_ICU2, ENABLE_ICU1_AND_2,)
249 FAST_INTR(12,fastintr12, IO_ICU2, ENABLE_ICU1_AND_2,)
250 FAST_INTR(13,fastintr13, IO_ICU2, ENABLE_ICU1_AND_2,)
251 FAST_INTR(14,fastintr14, IO_ICU2, ENABLE_ICU1_AND_2,)
252 FAST_INTR(15,fastintr15, IO_ICU2, ENABLE_ICU1_AND_2,)
254 INTR(0,intr0, IO_ICU1, ENABLE_ICU1, al,)
255 INTR(1,intr1, IO_ICU1, ENABLE_ICU1, al,)
256 INTR(2,intr2, IO_ICU1, ENABLE_ICU1, al,)
257 INTR(3,intr3, IO_ICU1, ENABLE_ICU1, al,)
258 INTR(4,intr4, IO_ICU1, ENABLE_ICU1, al,)
259 INTR(5,intr5, IO_ICU1, ENABLE_ICU1, al,)
260 INTR(6,intr6, IO_ICU1, ENABLE_ICU1, al,)
261 INTR(7,intr7, IO_ICU1, ENABLE_ICU1, al,)
262 INTR(8,intr8, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
263 INTR(9,intr9, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
264 INTR(10,intr10, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
265 INTR(11,intr11, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
266 INTR(12,intr12, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
267 INTR(13,intr13, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
268 INTR(14,intr14, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
269 INTR(15,intr15, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
271 FAST_UNPEND(0,fastunpend0, IO_ICU1)
272 FAST_UNPEND(1,fastunpend1, IO_ICU1)
273 FAST_UNPEND(2,fastunpend2, IO_ICU1)
274 FAST_UNPEND(3,fastunpend3, IO_ICU1)
275 FAST_UNPEND(4,fastunpend4, IO_ICU1)
276 FAST_UNPEND(5,fastunpend5, IO_ICU1)
277 FAST_UNPEND(6,fastunpend6, IO_ICU1)
278 FAST_UNPEND(7,fastunpend7, IO_ICU1)
279 FAST_UNPEND(8,fastunpend8, IO_ICU2)
280 FAST_UNPEND(9,fastunpend9, IO_ICU2)
281 FAST_UNPEND(10,fastunpend10, IO_ICU2)
282 FAST_UNPEND(11,fastunpend11, IO_ICU2)
283 FAST_UNPEND(12,fastunpend12, IO_ICU2)
284 FAST_UNPEND(13,fastunpend13, IO_ICU2)
285 FAST_UNPEND(14,fastunpend14, IO_ICU2)
286 FAST_UNPEND(15,fastunpend15, IO_ICU2)