2 * from: vector.s, 386BSD 0.1 unknown origin
3 * $FreeBSD: src/sys/i386/isa/icu_vector.s,v 1.14.2.2 2000/07/18 21:12:42 dfr Exp $
4 * $DragonFly: src/sys/platform/pc32/icu/icu_vector.s,v 1.9 2003/06/30 19:50:31 dillon Exp $
8 * modified for PC98 by Kakefuda
12 #define ICU_IMR_OFFSET 2 /* IO_ICU{1,2} + 2 */
14 #define ICU_IMR_OFFSET 1 /* IO_ICU{1,2} + 1 */
17 #define ICU_EOI 0x20 /* XXX - define elsewhere */
19 #define IRQ_LBIT(irq_num) (1 << (irq_num))
20 #define IRQ_BIT(irq_num) (1 << ((irq_num) % 8))
21 #define IRQ_BYTE(irq_num) ((irq_num) >> 3)
24 #define ENABLE_ICU1 /* use auto-EOI to reduce i/o */
28 movb $ICU_EOI,%al ; /* as soon as possible send EOI ... */ \
29 OUTB_ICU1 ; /* ... to clear in service bit */ \
38 * The data sheet says no auto-EOI on slave, but it sometimes works.
40 #define ENABLE_ICU1_AND_2 ENABLE_ICU1
42 #define ENABLE_ICU1_AND_2 \
43 movb $ICU_EOI,%al ; /* as above */ \
44 outb %al,$IO_ICU2 ; /* but do second icu first ... */ \
45 OUTB_ICU1 ; /* ... then first icu (if !AUTO_EOI_1) */ \
53 pushl $0 ; /* dummy error code */ \
54 pushl $0 ; /* dummy trap type */ \
55 pushal ; /* 8 registers */ \
66 pushfl ; /* phys int frame / flags */ \
67 pushl %cs ; /* phys int frame / cs */ \
68 pushl 12(%esp) ; /* original caller eip */ \
69 pushl $0 ; /* dummy error code */ \
70 pushl $0 ; /* dummy trap type */ \
71 subl $11*4,%esp ; /* pushal + 3 seg regs (dummy) */ \
74 * Warning: POP_FRAME can only be used if there is no chance of a
75 * segment register being changed (e.g. by procfs), which is why syscalls
83 addl $2*4,%esp ; /* dummy trap & error codes */ \
88 #define MASK_IRQ(icu, irq_num) \
89 movb imen + IRQ_BYTE(irq_num),%al ; \
90 orb $IRQ_BIT(irq_num),%al ; \
91 movb %al,imen + IRQ_BYTE(irq_num) ; \
92 outb %al,$icu+ICU_IMR_OFFSET ; \
94 #define UNMASK_IRQ(icu, irq_num) \
95 movb imen + IRQ_BYTE(irq_num),%al ; \
96 andb $~IRQ_BIT(irq_num),%al ; \
97 movb %al,imen + IRQ_BYTE(irq_num) ; \
98 outb %al,$icu+ICU_IMR_OFFSET ; \
101 * Fast interrupt call handlers run in the following sequence:
103 * - Push the trap frame required by doreti.
104 * - Mask the interrupt and reenable its source.
105 * - If we cannot take the interrupt set its fpending bit and
107 * - If we can take the interrupt clear its fpending bit,
108 * call the handler, then unmask the interrupt and doreti.
110 * YYY can cache gd base pointer instead of using hidden %fs
114 #define FAST_INTR(irq_num, vec_name, icu, enable_icus) \
119 FAKE_MCOUNT(13*4(%esp)) ; \
120 MASK_IRQ(icu, irq_num) ; \
122 incl _intr_nesting_level ; \
123 movl _curthread,%ebx ; \
124 movl TD_CPL(%ebx),%eax ; /* save the cpl for doreti */ \
126 cmpl $TDPRI_CRIT,TD_PRI(%ebx) ; \
128 testl $IRQ_LBIT(irq_num), %eax ; \
131 /* set pending bit and return, leave interrupt masked */ \
132 orl $IRQ_LBIT(irq_num),_fpending ; \
133 movl $TDPRI_CRIT,_reqpri ; \
136 /* clear pending bit, run handler */ \
137 addl $TDPRI_CRIT,TD_PRI(%ebx) ; \
138 andl $~IRQ_LBIT(irq_num),_fpending ; \
139 pushl intr_unit + (irq_num) * 4 ; \
140 call *intr_handler + (irq_num) * 4 ; \
142 subl $TDPRI_CRIT,TD_PRI(%ebx) ; \
143 incl _cnt+V_INTR ; /* book-keeping YYY make per-cpu */ \
144 movl intr_countp + (irq_num) * 4,%eax ; \
146 UNMASK_IRQ(icu, irq_num) ; \
152 * Restart fast interrupt held up by critical section or cpl.
154 * - Push a dummy trap frame as required by doreti.
155 * - The interrupt source is already masked.
156 * - Clear the fpending bit
158 * - Unmask the interrupt
159 * - Pop the dummy frame and do a normal return
161 * YYY can cache gd base pointer instead of using hidden %fs
164 #define FAST_UNPEND(irq_num, vec_name, icu) \
171 pushl intr_unit + (irq_num) * 4 ; \
172 call *intr_handler + (irq_num) * 4 ; \
175 movl intr_countp + (irq_num) * 4, %eax ; \
177 UNMASK_IRQ(icu, irq_num) ; \
183 * Slow interrupt call handlers run in the following sequence:
185 * - Push the trap frame required by doreti.
186 * - Mask the interrupt and reenable its source.
187 * - If we cannot take the interrupt set its ipending bit and
188 * doreti. In addition to checking for a critical section
189 * and cpl mask we also check to see if the thread is still
191 * - If we can take the interrupt clear its ipending bit,
192 * set its irunning bit, and schedule its thread. Leave
193 * interrupts masked and doreti.
195 * The interrupt thread will run its handlers and loop if
196 * ipending is found to be set. ipending/irunning interlock
197 * the interrupt thread with the interrupt. The handler calls
198 * UNPEND when it is through.
200 * Note that we do not enable interrupts when calling sched_ithd.
201 * YYY sched_ithd may preempt us synchronously (fix interrupt stacking)
203 * YYY can cache gd base pointer instead of using hidden %fs
207 #define INTR(irq_num, vec_name, icu, enable_icus, reg, maybe_extra_ipending) \
212 FAKE_MCOUNT(13*4(%esp)) ; \
213 maybe_extra_ipending ; \
214 MASK_IRQ(icu, irq_num) ; \
216 incl _intr_nesting_level ; \
217 movl _curthread,%ebx ; \
218 movl TD_CPL(%ebx), %eax ; \
219 pushl %eax ; /* push CPL for doreti */ \
220 cmpl $TDPRI_CRIT,TD_PRI(%ebx) ; \
222 testl $IRQ_LBIT(irq_num),_irunning ; \
224 testl $IRQ_LBIT(irq_num), %eax ; \
227 /* set the pending bit and return, leave interrupt masked */ \
228 orl $IRQ_LBIT(irq_num),_ipending ; \
229 movl $TDPRI_CRIT,_reqpri ; \
232 addl $TDPRI_CRIT,TD_PRI(%ebx) ; \
233 /* set running bit, clear pending bit, run handler */ \
234 orl $IRQ_LBIT(irq_num),_irunning ; \
235 andl $~IRQ_LBIT(irq_num),_ipending ; \
240 subl $TDPRI_CRIT,TD_PRI(%ebx) ; \
241 incl _cnt+V_INTR ; /* book-keeping YYY make per-cpu */ \
242 movl intr_countp + (irq_num) * 4,%eax ; \
249 * Unmask a slow interrupt. This function is used by interrupt threads
250 * after they have descheduled themselves to reenable interrupts and
251 * possibly cause a reschedule to occur. The interrupt's irunning bit
252 * is cleared prior to unmasking.
255 #define INTR_UNMASK(irq_num, vec_name, icu) \
259 pushl %ebp ; /* frame for ddb backtrace */ \
261 andl $~IRQ_LBIT(irq_num),_irunning ; \
262 UNMASK_IRQ(icu, irq_num) ; \
267 FAST_INTR(0,fastintr0, IO_ICU1, ENABLE_ICU1)
268 FAST_INTR(1,fastintr1, IO_ICU1, ENABLE_ICU1)
269 FAST_INTR(2,fastintr2, IO_ICU1, ENABLE_ICU1)
270 FAST_INTR(3,fastintr3, IO_ICU1, ENABLE_ICU1)
271 FAST_INTR(4,fastintr4, IO_ICU1, ENABLE_ICU1)
272 FAST_INTR(5,fastintr5, IO_ICU1, ENABLE_ICU1)
273 FAST_INTR(6,fastintr6, IO_ICU1, ENABLE_ICU1)
274 FAST_INTR(7,fastintr7, IO_ICU1, ENABLE_ICU1)
275 FAST_INTR(8,fastintr8, IO_ICU2, ENABLE_ICU1_AND_2)
276 FAST_INTR(9,fastintr9, IO_ICU2, ENABLE_ICU1_AND_2)
277 FAST_INTR(10,fastintr10, IO_ICU2, ENABLE_ICU1_AND_2)
278 FAST_INTR(11,fastintr11, IO_ICU2, ENABLE_ICU1_AND_2)
279 FAST_INTR(12,fastintr12, IO_ICU2, ENABLE_ICU1_AND_2)
280 FAST_INTR(13,fastintr13, IO_ICU2, ENABLE_ICU1_AND_2)
281 FAST_INTR(14,fastintr14, IO_ICU2, ENABLE_ICU1_AND_2)
282 FAST_INTR(15,fastintr15, IO_ICU2, ENABLE_ICU1_AND_2)
284 #define CLKINTR_PENDING movl $1,CNAME(clkintr_pending)
285 INTR(0,intr0, IO_ICU1, ENABLE_ICU1, al, CLKINTR_PENDING)
286 INTR(1,intr1, IO_ICU1, ENABLE_ICU1, al,)
287 INTR(2,intr2, IO_ICU1, ENABLE_ICU1, al,)
288 INTR(3,intr3, IO_ICU1, ENABLE_ICU1, al,)
289 INTR(4,intr4, IO_ICU1, ENABLE_ICU1, al,)
290 INTR(5,intr5, IO_ICU1, ENABLE_ICU1, al,)
291 INTR(6,intr6, IO_ICU1, ENABLE_ICU1, al,)
292 INTR(7,intr7, IO_ICU1, ENABLE_ICU1, al,)
293 INTR(8,intr8, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
294 INTR(9,intr9, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
295 INTR(10,intr10, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
296 INTR(11,intr11, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
297 INTR(12,intr12, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
298 INTR(13,intr13, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
299 INTR(14,intr14, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
300 INTR(15,intr15, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
302 FAST_UNPEND(0,fastunpend0, IO_ICU1)
303 FAST_UNPEND(1,fastunpend1, IO_ICU1)
304 FAST_UNPEND(2,fastunpend2, IO_ICU1)
305 FAST_UNPEND(3,fastunpend3, IO_ICU1)
306 FAST_UNPEND(4,fastunpend4, IO_ICU1)
307 FAST_UNPEND(5,fastunpend5, IO_ICU1)
308 FAST_UNPEND(6,fastunpend6, IO_ICU1)
309 FAST_UNPEND(7,fastunpend7, IO_ICU1)
310 FAST_UNPEND(8,fastunpend8, IO_ICU2)
311 FAST_UNPEND(9,fastunpend9, IO_ICU2)
312 FAST_UNPEND(10,fastunpend10, IO_ICU2)
313 FAST_UNPEND(11,fastunpend11, IO_ICU2)
314 FAST_UNPEND(12,fastunpend12, IO_ICU2)
315 FAST_UNPEND(13,fastunpend13, IO_ICU2)
316 FAST_UNPEND(14,fastunpend14, IO_ICU2)
317 FAST_UNPEND(15,fastunpend15, IO_ICU2)