2 * from: vector.s, 386BSD 0.1 unknown origin
3 * $FreeBSD: src/sys/i386/isa/icu_vector.s,v 1.14.2.2 2000/07/18 21:12:42 dfr Exp $
4 * $DragonFly: src/sys/i386/icu/Attic/icu_vector.s,v 1.13 2003/07/12 17:54:35 dillon Exp $
8 * modified for PC98 by Kakefuda
12 #define ICU_IMR_OFFSET 2 /* IO_ICU{1,2} + 2 */
14 #define ICU_IMR_OFFSET 1 /* IO_ICU{1,2} + 1 */
17 #define ICU_EOI 0x20 /* XXX - define elsewhere */
19 #define IRQ_LBIT(irq_num) (1 << (irq_num))
20 #define IRQ_BIT(irq_num) (1 << ((irq_num) % 8))
21 #define IRQ_BYTE(irq_num) ((irq_num) >> 3)
24 #define ENABLE_ICU1 /* use auto-EOI to reduce i/o */
28 movb $ICU_EOI,%al ; /* as soon as possible send EOI ... */ \
29 OUTB_ICU1 ; /* ... to clear in service bit */ \
38 * The data sheet says no auto-EOI on slave, but it sometimes works.
40 #define ENABLE_ICU1_AND_2 ENABLE_ICU1
42 #define ENABLE_ICU1_AND_2 \
43 movb $ICU_EOI,%al ; /* as above */ \
44 outb %al,$IO_ICU2 ; /* but do second icu first ... */ \
45 OUTB_ICU1 ; /* ... then first icu (if !AUTO_EOI_1) */ \
53 pushl $0 ; /* dummy error code */ \
54 pushl $0 ; /* dummy trap type */ \
55 pushal ; /* 8 registers */ \
66 pushfl ; /* phys int frame / flags */ \
67 pushl %cs ; /* phys int frame / cs */ \
68 pushl 12(%esp) ; /* original caller eip */ \
69 pushl $0 ; /* dummy error code */ \
70 pushl $0 ; /* dummy trap type */ \
71 subl $12*4,%esp ; /* pushal + 3 seg regs (dummy) + CPL */ \
74 * Warning: POP_FRAME can only be used if there is no chance of a
75 * segment register being changed (e.g. by procfs), which is why syscalls
83 addl $2*4,%esp ; /* dummy trap & error codes */ \
88 #define MASK_IRQ(icu, irq_num) \
89 movb imen + IRQ_BYTE(irq_num),%al ; \
90 orb $IRQ_BIT(irq_num),%al ; \
91 movb %al,imen + IRQ_BYTE(irq_num) ; \
92 outb %al,$icu+ICU_IMR_OFFSET ; \
94 #define UNMASK_IRQ(icu, irq_num) \
95 movb imen + IRQ_BYTE(irq_num),%al ; \
96 andb $~IRQ_BIT(irq_num),%al ; \
97 movb %al,imen + IRQ_BYTE(irq_num) ; \
98 outb %al,$icu+ICU_IMR_OFFSET ; \
101 * Fast interrupt call handlers run in the following sequence:
103 * - Push the trap frame required by doreti.
104 * - Mask the interrupt and reenable its source.
105 * - If we cannot take the interrupt set its fpending bit and
107 * - If we can take the interrupt clear its fpending bit,
108 * call the handler, then unmask the interrupt and doreti.
110 * YYY can cache gd base pointer instead of using hidden %fs
114 #define FAST_INTR(irq_num, vec_name, icu, enable_icus) \
119 FAKE_MCOUNT(13*4(%esp)) ; \
120 MASK_IRQ(icu, irq_num) ; \
122 incl PCPU(intr_nesting_level) ; \
123 movl PCPU(curthread),%ebx ; \
124 movl TD_CPL(%ebx),%eax ; /* save the cpl for doreti */ \
126 cmpl $TDPRI_CRIT,TD_PRI(%ebx) ; \
128 testl $IRQ_LBIT(irq_num), %eax ; \
131 /* set pending bit and return, leave interrupt masked */ \
132 orl $IRQ_LBIT(irq_num),PCPU(fpending) ; \
133 orl $RQF_INTPEND, PCPU(reqflags) ; \
136 /* clear pending bit, run handler */ \
137 addl $TDPRI_CRIT,TD_PRI(%ebx) ; \
138 andl $~IRQ_LBIT(irq_num),PCPU(fpending) ; \
139 pushl intr_unit + (irq_num) * 4 ; \
140 call *intr_handler + (irq_num) * 4 ; \
142 subl $TDPRI_CRIT,TD_PRI(%ebx) ; \
143 incl PCPU(cnt)+V_INTR ; /* book-keeping YYY make per-cpu */ \
144 movl intr_countp + (irq_num) * 4,%eax ; \
146 UNMASK_IRQ(icu, irq_num) ; \
152 * Restart fast interrupt held up by critical section or cpl.
154 * - Push a dummy trap frame as required by doreti.
155 * - The interrupt source is already masked.
156 * - Clear the fpending bit
158 * - Unmask the interrupt
159 * - Pop the dummy frame and do a normal return
161 * YYY can cache gd base pointer instead of using hidden %fs
164 #define FAST_UNPEND(irq_num, vec_name, icu) \
171 pushl intr_unit + (irq_num) * 4 ; \
172 call *intr_handler + (irq_num) * 4 ; \
174 incl PCPU(cnt)+V_INTR ; \
175 movl intr_countp + (irq_num) * 4, %eax ; \
177 UNMASK_IRQ(icu, irq_num) ; \
183 * Slow interrupt call handlers run in the following sequence:
185 * - Push the trap frame required by doreti.
186 * - Mask the interrupt and reenable its source.
187 * - If we cannot take the interrupt set its ipending bit and
188 * doreti. In addition to checking for a critical section
189 * and cpl mask we also check to see if the thread is still
191 * - If we can take the interrupt clear its ipending bit
192 * and schedule its thread. Leave interrupts masked and doreti.
194 * sched_ithd() is called with interrupts enabled and outside of a
195 * critical section (so it can preempt us).
197 * YYY sched_ithd may preempt us synchronously (fix interrupt stacking)
199 * YYY can cache gd base pointer instead of using hidden %fs
203 #define INTR(irq_num, vec_name, icu, enable_icus, reg, maybe_extra_ipending) \
208 FAKE_MCOUNT(13*4(%esp)) ; \
209 maybe_extra_ipending ; \
210 MASK_IRQ(icu, irq_num) ; \
212 incl PCPU(intr_nesting_level) ; \
213 movl PCPU(curthread),%ebx ; \
214 movl TD_CPL(%ebx), %eax ; \
215 pushl %eax ; /* push CPL for doreti */ \
216 cmpl $TDPRI_CRIT,TD_PRI(%ebx) ; \
218 testl $IRQ_LBIT(irq_num), %eax ; \
221 /* set the pending bit and return, leave interrupt masked */ \
222 orl $IRQ_LBIT(irq_num), PCPU(ipending) ; \
223 orl $RQF_INTPEND, PCPU(reqflags) ; \
226 /* set running bit, clear pending bit, run handler */ \
227 andl $~IRQ_LBIT(irq_num), PCPU(ipending) ; \
232 incl PCPU(cnt)+V_INTR ; /* book-keeping YYY make per-cpu */ \
233 movl intr_countp + (irq_num) * 4,%eax ; \
240 * Unmask a slow interrupt. This function is used by interrupt threads
241 * after they have descheduled themselves to reenable interrupts and
242 * possibly cause a reschedule to occur.
245 #define INTR_UNMASK(irq_num, vec_name, icu) \
249 pushl %ebp ; /* frame for ddb backtrace */ \
251 UNMASK_IRQ(icu, irq_num) ; \
256 FAST_INTR(0,fastintr0, IO_ICU1, ENABLE_ICU1)
257 FAST_INTR(1,fastintr1, IO_ICU1, ENABLE_ICU1)
258 FAST_INTR(2,fastintr2, IO_ICU1, ENABLE_ICU1)
259 FAST_INTR(3,fastintr3, IO_ICU1, ENABLE_ICU1)
260 FAST_INTR(4,fastintr4, IO_ICU1, ENABLE_ICU1)
261 FAST_INTR(5,fastintr5, IO_ICU1, ENABLE_ICU1)
262 FAST_INTR(6,fastintr6, IO_ICU1, ENABLE_ICU1)
263 FAST_INTR(7,fastintr7, IO_ICU1, ENABLE_ICU1)
264 FAST_INTR(8,fastintr8, IO_ICU2, ENABLE_ICU1_AND_2)
265 FAST_INTR(9,fastintr9, IO_ICU2, ENABLE_ICU1_AND_2)
266 FAST_INTR(10,fastintr10, IO_ICU2, ENABLE_ICU1_AND_2)
267 FAST_INTR(11,fastintr11, IO_ICU2, ENABLE_ICU1_AND_2)
268 FAST_INTR(12,fastintr12, IO_ICU2, ENABLE_ICU1_AND_2)
269 FAST_INTR(13,fastintr13, IO_ICU2, ENABLE_ICU1_AND_2)
270 FAST_INTR(14,fastintr14, IO_ICU2, ENABLE_ICU1_AND_2)
271 FAST_INTR(15,fastintr15, IO_ICU2, ENABLE_ICU1_AND_2)
273 #define CLKINTR_PENDING movl $1,CNAME(clkintr_pending)
274 INTR(0,intr0, IO_ICU1, ENABLE_ICU1, al, CLKINTR_PENDING)
275 INTR(1,intr1, IO_ICU1, ENABLE_ICU1, al,)
276 INTR(2,intr2, IO_ICU1, ENABLE_ICU1, al,)
277 INTR(3,intr3, IO_ICU1, ENABLE_ICU1, al,)
278 INTR(4,intr4, IO_ICU1, ENABLE_ICU1, al,)
279 INTR(5,intr5, IO_ICU1, ENABLE_ICU1, al,)
280 INTR(6,intr6, IO_ICU1, ENABLE_ICU1, al,)
281 INTR(7,intr7, IO_ICU1, ENABLE_ICU1, al,)
282 INTR(8,intr8, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
283 INTR(9,intr9, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
284 INTR(10,intr10, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
285 INTR(11,intr11, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
286 INTR(12,intr12, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
287 INTR(13,intr13, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
288 INTR(14,intr14, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
289 INTR(15,intr15, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
291 FAST_UNPEND(0,fastunpend0, IO_ICU1)
292 FAST_UNPEND(1,fastunpend1, IO_ICU1)
293 FAST_UNPEND(2,fastunpend2, IO_ICU1)
294 FAST_UNPEND(3,fastunpend3, IO_ICU1)
295 FAST_UNPEND(4,fastunpend4, IO_ICU1)
296 FAST_UNPEND(5,fastunpend5, IO_ICU1)
297 FAST_UNPEND(6,fastunpend6, IO_ICU1)
298 FAST_UNPEND(7,fastunpend7, IO_ICU1)
299 FAST_UNPEND(8,fastunpend8, IO_ICU2)
300 FAST_UNPEND(9,fastunpend9, IO_ICU2)
301 FAST_UNPEND(10,fastunpend10, IO_ICU2)
302 FAST_UNPEND(11,fastunpend11, IO_ICU2)
303 FAST_UNPEND(12,fastunpend12, IO_ICU2)
304 FAST_UNPEND(13,fastunpend13, IO_ICU2)
305 FAST_UNPEND(14,fastunpend14, IO_ICU2)
306 FAST_UNPEND(15,fastunpend15, IO_ICU2)