2 * from: vector.s, 386BSD 0.1 unknown origin
3 * $FreeBSD: src/sys/i386/isa/icu_vector.s,v 1.14.2.2 2000/07/18 21:12:42 dfr Exp $
4 * $DragonFly: src/sys/i386/icu/Attic/icu_vector.s,v 1.16 2004/01/30 05:42:16 dillon Exp $
8 * modified for PC98 by Kakefuda
12 #define ICU_IMR_OFFSET 2 /* IO_ICU{1,2} + 2 */
14 #define ICU_IMR_OFFSET 1 /* IO_ICU{1,2} + 1 */
17 #define ICU_EOI 0x20 /* XXX - define elsewhere */
19 #define IRQ_LBIT(irq_num) (1 << (irq_num))
20 #define IRQ_BIT(irq_num) (1 << ((irq_num) % 8))
21 #define IRQ_BYTE(irq_num) ((irq_num) >> 3)
24 #define ENABLE_ICU1 /* use auto-EOI to reduce i/o */
28 movb $ICU_EOI,%al ; /* as soon as possible send EOI ... */ \
29 OUTB_ICU1 ; /* ... to clear in service bit */ \
38 * The data sheet says no auto-EOI on slave, but it sometimes works.
40 #define ENABLE_ICU1_AND_2 ENABLE_ICU1
42 #define ENABLE_ICU1_AND_2 \
43 movb $ICU_EOI,%al ; /* as above */ \
44 outb %al,$IO_ICU2 ; /* but do second icu first ... */ \
45 OUTB_ICU1 ; /* ... then first icu (if !AUTO_EOI_1) */ \
53 pushl $0 ; /* dummy error code */ \
54 pushl $0 ; /* dummy trap type */ \
55 pushal ; /* 8 registers */ \
66 pushfl ; /* phys int frame / flags */ \
67 pushl %cs ; /* phys int frame / cs */ \
68 pushl 12(%esp) ; /* original caller eip */ \
69 pushl $0 ; /* dummy error code */ \
70 pushl $0 ; /* dummy trap type */ \
71 subl $12*4,%esp ; /* pushal + 3 seg regs (dummy) + CPL */ \
74 * Warning: POP_FRAME can only be used if there is no chance of a
75 * segment register being changed (e.g. by procfs), which is why syscalls
83 addl $2*4,%esp ; /* dummy trap & error codes */ \
88 #define MASK_IRQ(icu, irq_num) \
89 movb imen + IRQ_BYTE(irq_num),%al ; \
90 orb $IRQ_BIT(irq_num),%al ; \
91 movb %al,imen + IRQ_BYTE(irq_num) ; \
92 outb %al,$icu+ICU_IMR_OFFSET ; \
94 #define UNMASK_IRQ(icu, irq_num) \
95 movb imen + IRQ_BYTE(irq_num),%al ; \
96 andb $~IRQ_BIT(irq_num),%al ; \
97 movb %al,imen + IRQ_BYTE(irq_num) ; \
98 outb %al,$icu+ICU_IMR_OFFSET ; \
101 * Fast interrupt call handlers run in the following sequence:
103 * - Push the trap frame required by doreti.
104 * - Mask the interrupt and reenable its source.
105 * - If we cannot take the interrupt set its fpending bit and
107 * - If we can take the interrupt clear its fpending bit,
108 * call the handler, then unmask the interrupt and doreti.
110 * YYY can cache gd base pointer instead of using hidden %fs
114 #define FAST_INTR(irq_num, vec_name, icu, enable_icus, maybe_extra_ipending) \
119 FAKE_MCOUNT(13*4(%esp)) ; \
120 maybe_extra_ipending ; \
121 MASK_IRQ(icu, irq_num) ; \
123 movl PCPU(curthread),%ebx ; \
124 movl TD_CPL(%ebx),%eax ; /* save the cpl for doreti */ \
126 cmpl $TDPRI_CRIT,TD_PRI(%ebx) ; \
128 testl $IRQ_LBIT(irq_num), %eax ; \
131 /* set pending bit and return, leave interrupt masked */ \
132 orl $IRQ_LBIT(irq_num),PCPU(fpending) ; \
133 orl $RQF_INTPEND, PCPU(reqflags) ; \
136 /* clear pending bit, run handler */ \
137 incl PCPU(intr_nesting_level) ; \
138 addl $TDPRI_CRIT,TD_PRI(%ebx) ; \
139 andl $~IRQ_LBIT(irq_num),PCPU(fpending) ; \
140 pushl intr_unit + (irq_num) * 4 ; \
141 call *intr_handler + (irq_num) * 4 ; \
143 subl $TDPRI_CRIT,TD_PRI(%ebx) ; \
144 decl PCPU(intr_nesting_level) ; \
145 incl PCPU(cnt)+V_INTR ; /* book-keeping YYY make per-cpu */ \
146 movl intr_countp + (irq_num) * 4,%eax ; \
148 UNMASK_IRQ(icu, irq_num) ; \
154 * Restart fast interrupt held up by critical section or cpl.
156 * - Push a dummy trap frame as required by doreti.
157 * - The interrupt source is already masked.
158 * - Clear the fpending bit
160 * - Unmask the interrupt
161 * - Pop the dummy frame and do a normal return
163 * YYY can cache gd base pointer instead of using hidden %fs
166 #define FAST_UNPEND(irq_num, vec_name, icu) \
173 pushl intr_unit + (irq_num) * 4 ; \
174 call *intr_handler + (irq_num) * 4 ; \
176 incl PCPU(cnt)+V_INTR ; \
177 movl intr_countp + (irq_num) * 4, %eax ; \
179 UNMASK_IRQ(icu, irq_num) ; \
185 * Slow interrupt call handlers run in the following sequence:
187 * - Push the trap frame required by doreti.
188 * - Mask the interrupt and reenable its source.
189 * - If we cannot take the interrupt set its ipending bit and
190 * doreti. In addition to checking for a critical section
191 * and cpl mask we also check to see if the thread is still
193 * - If we can take the interrupt clear its ipending bit
194 * and schedule its thread. Leave interrupts masked and doreti.
196 * sched_ithd() is called with interrupts enabled and outside of a
197 * critical section (so it can preempt us).
199 * YYY sched_ithd may preempt us synchronously (fix interrupt stacking)
201 * Note that intr_nesting_level is not bumped during sched_ithd because
202 * blocking allocations are allowed in the preemption case.
204 * YYY can cache gd base pointer instead of using hidden %fs
208 #define INTR(irq_num, vec_name, icu, enable_icus, reg, maybe_extra_ipending) \
213 FAKE_MCOUNT(13*4(%esp)) ; \
214 maybe_extra_ipending ; \
215 MASK_IRQ(icu, irq_num) ; \
217 movl PCPU(curthread),%ebx ; \
218 movl TD_CPL(%ebx), %eax ; \
219 pushl %eax ; /* push CPL for doreti */ \
220 cmpl $TDPRI_CRIT,TD_PRI(%ebx) ; \
222 testl $IRQ_LBIT(irq_num), %eax ; \
225 /* set the pending bit and return, leave interrupt masked */ \
226 orl $IRQ_LBIT(irq_num), PCPU(ipending) ; \
227 orl $RQF_INTPEND, PCPU(reqflags) ; \
230 /* set running bit, clear pending bit, run handler */ \
231 andl $~IRQ_LBIT(irq_num), PCPU(ipending) ; \
236 incl PCPU(cnt)+V_INTR ; /* book-keeping YYY make per-cpu */ \
237 movl intr_countp + (irq_num) * 4,%eax ; \
244 * Unmask a slow interrupt. This function is used by interrupt threads
245 * after they have descheduled themselves to reenable interrupts and
246 * possibly cause a reschedule to occur.
249 #define INTR_UNMASK(irq_num, vec_name, icu) \
253 pushl %ebp ; /* frame for ddb backtrace */ \
255 UNMASK_IRQ(icu, irq_num) ; \
260 FAST_INTR(0,fastintr0, IO_ICU1, ENABLE_ICU1,)
261 FAST_INTR(1,fastintr1, IO_ICU1, ENABLE_ICU1,)
262 FAST_INTR(2,fastintr2, IO_ICU1, ENABLE_ICU1,)
263 FAST_INTR(3,fastintr3, IO_ICU1, ENABLE_ICU1,)
264 FAST_INTR(4,fastintr4, IO_ICU1, ENABLE_ICU1,)
265 FAST_INTR(5,fastintr5, IO_ICU1, ENABLE_ICU1,)
266 FAST_INTR(6,fastintr6, IO_ICU1, ENABLE_ICU1,)
267 FAST_INTR(7,fastintr7, IO_ICU1, ENABLE_ICU1,)
268 FAST_INTR(8,fastintr8, IO_ICU2, ENABLE_ICU1_AND_2,)
269 FAST_INTR(9,fastintr9, IO_ICU2, ENABLE_ICU1_AND_2,)
270 FAST_INTR(10,fastintr10, IO_ICU2, ENABLE_ICU1_AND_2,)
271 FAST_INTR(11,fastintr11, IO_ICU2, ENABLE_ICU1_AND_2,)
272 FAST_INTR(12,fastintr12, IO_ICU2, ENABLE_ICU1_AND_2,)
273 FAST_INTR(13,fastintr13, IO_ICU2, ENABLE_ICU1_AND_2,)
274 FAST_INTR(14,fastintr14, IO_ICU2, ENABLE_ICU1_AND_2,)
275 FAST_INTR(15,fastintr15, IO_ICU2, ENABLE_ICU1_AND_2,)
277 INTR(0,intr0, IO_ICU1, ENABLE_ICU1, al,)
278 INTR(1,intr1, IO_ICU1, ENABLE_ICU1, al,)
279 INTR(2,intr2, IO_ICU1, ENABLE_ICU1, al,)
280 INTR(3,intr3, IO_ICU1, ENABLE_ICU1, al,)
281 INTR(4,intr4, IO_ICU1, ENABLE_ICU1, al,)
282 INTR(5,intr5, IO_ICU1, ENABLE_ICU1, al,)
283 INTR(6,intr6, IO_ICU1, ENABLE_ICU1, al,)
284 INTR(7,intr7, IO_ICU1, ENABLE_ICU1, al,)
285 INTR(8,intr8, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
286 INTR(9,intr9, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
287 INTR(10,intr10, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
288 INTR(11,intr11, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
289 INTR(12,intr12, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
290 INTR(13,intr13, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
291 INTR(14,intr14, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
292 INTR(15,intr15, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
294 FAST_UNPEND(0,fastunpend0, IO_ICU1)
295 FAST_UNPEND(1,fastunpend1, IO_ICU1)
296 FAST_UNPEND(2,fastunpend2, IO_ICU1)
297 FAST_UNPEND(3,fastunpend3, IO_ICU1)
298 FAST_UNPEND(4,fastunpend4, IO_ICU1)
299 FAST_UNPEND(5,fastunpend5, IO_ICU1)
300 FAST_UNPEND(6,fastunpend6, IO_ICU1)
301 FAST_UNPEND(7,fastunpend7, IO_ICU1)
302 FAST_UNPEND(8,fastunpend8, IO_ICU2)
303 FAST_UNPEND(9,fastunpend9, IO_ICU2)
304 FAST_UNPEND(10,fastunpend10, IO_ICU2)
305 FAST_UNPEND(11,fastunpend11, IO_ICU2)
306 FAST_UNPEND(12,fastunpend12, IO_ICU2)
307 FAST_UNPEND(13,fastunpend13, IO_ICU2)
308 FAST_UNPEND(14,fastunpend14, IO_ICU2)
309 FAST_UNPEND(15,fastunpend15, IO_ICU2)