2 * from: vector.s, 386BSD 0.1 unknown origin
3 * $FreeBSD: src/sys/i386/isa/icu_vector.s,v 1.14.2.2 2000/07/18 21:12:42 dfr Exp $
4 * $DragonFly: src/sys/platform/pc32/icu/icu_vector.s,v 1.31 2008/01/14 15:27:17 dillon Exp $
7 * WARNING! SMP builds can use the ICU now so this code must be MP safe.
11 #include "opt_auto_eoi.h"
13 #include <machine/asmacros.h>
14 #include <machine/lock.h>
15 #include <machine/psl.h>
16 #include <machine/trap.h>
18 #include <machine_base/icu/icu.h>
19 #include <bus/isa/i386/isa.h>
26 #define ICU_IMR_OFFSET 1 /* IO_ICU{1,2} + 1 */
28 #define ICU_EOI 0x20 /* XXX - define elsewhere */
30 #define IRQ_LBIT(irq_num) (1 << (irq_num))
31 #define IRQ_BIT(irq_num) (1 << ((irq_num) % 8))
32 #define IRQ_BYTE(irq_num) ((irq_num) >> 3)
35 #define ENABLE_ICU1 /* use auto-EOI to reduce i/o */
39 movb $ICU_EOI,%al ; /* as soon as possible send EOI ... */ \
40 OUTB_ICU1 ; /* ... to clear in service bit */ \
49 * The data sheet says no auto-EOI on slave, but it sometimes works.
51 #define ENABLE_ICU1_AND_2 ENABLE_ICU1
53 #define ENABLE_ICU1_AND_2 \
54 movb $ICU_EOI,%al ; /* as above */ \
55 outb %al,$IO_ICU2 ; /* but do second icu first ... */ \
56 OUTB_ICU1 ; /* ... then first icu (if !AUTO_EOI_1) */ \
64 pushl $0 ; /* dummy error code */ \
65 pushl $0 ; /* dummy trap type */ \
66 pushl $0 ; /* dummy xflags */ \
67 pushal ; /* 8 registers */ \
80 pushfl ; /* phys int frame / flags */ \
81 pushl %cs ; /* phys int frame / cs */ \
82 pushl 12(%esp) ; /* original caller eip */ \
83 pushl $0 ; /* dummy error code */ \
84 pushl $0 ; /* dummy trap type */ \
85 pushl $0 ; /* dummy xflags */ \
86 subl $13*4,%esp ; /* pushal + 4 seg regs (dummy) + CPL */ \
89 * Warning: POP_FRAME can only be used if there is no chance of a
90 * segment register being changed (e.g. by procfs), which is why syscalls
99 addl $2*4,%esp ; /* dummy trap & error codes */ \
104 #define MASK_IRQ(icu, irq_num) \
106 movb icu_imen + IRQ_BYTE(irq_num),%al ; \
107 orb $IRQ_BIT(irq_num),%al ; \
108 movb %al,icu_imen + IRQ_BYTE(irq_num) ; \
109 outb %al,$icu+ICU_IMR_OFFSET ; \
112 #define UNMASK_IRQ(icu, irq_num) \
116 movb icu_imen + IRQ_BYTE(irq_num),%al ; \
117 andb $~IRQ_BIT(irq_num),%al ; \
118 movb %al,icu_imen + IRQ_BYTE(irq_num) ; \
119 outb %al,$icu+ICU_IMR_OFFSET ; \
124 * Fast interrupt call handlers run in the following sequence:
126 * - Push the trap frame required by doreti.
127 * - Mask the interrupt and reenable its source.
128 * - If we cannot take the interrupt set its fpending bit and
130 * - If we can take the interrupt clear its fpending bit,
131 * call the handler, then unmask the interrupt and doreti.
133 * YYY can cache gd base pointer instead of using hidden %fs
137 #define FAST_INTR(irq_num, vec_name, icu, enable_icus) \
142 FAKE_MCOUNT(15*4(%esp)) ; \
143 MASK_IRQ(icu, irq_num) ; \
145 movl PCPU(curthread),%ebx ; \
146 pushl $0 ; /* DUMMY CPL FOR DORETI */ \
147 testl $-1,TD_NEST_COUNT(%ebx) ; \
149 cmpl $TDPRI_CRIT,TD_PRI(%ebx) ; \
152 /* set pending bit and return, leave interrupt masked */ \
153 orl $IRQ_LBIT(irq_num),PCPU(fpending) ; \
154 orl $RQF_INTPEND, PCPU(reqflags) ; \
157 /* clear pending bit, run handler */ \
158 andl $~IRQ_LBIT(irq_num),PCPU(fpending) ; \
160 pushl %esp ; /* pass frame by reference */ \
161 call ithread_fast_handler ; /* returns 0 to unmask int */ \
163 UNMASK_IRQ(icu, irq_num) ; \
169 * Slow interrupt call handlers run in the following sequence:
171 * - Push the trap frame required by doreti.
172 * - Mask the interrupt and reenable its source.
173 * - If we cannot take the interrupt set its ipending bit and
174 * doreti. In addition to checking for a critical section
175 * and cpl mask we also check to see if the thread is still
177 * - If we can take the interrupt clear its ipending bit
178 * and schedule its thread. Leave interrupts masked and doreti.
180 * sched_ithd() is called with interrupts enabled and outside of a
181 * critical section (so it can preempt us).
183 * YYY sched_ithd may preempt us synchronously (fix interrupt stacking)
185 * Note that intr_nesting_level is not bumped during sched_ithd because
186 * blocking allocations are allowed in the preemption case.
188 * YYY can cache gd base pointer instead of using hidden %fs
192 #define SLOW_INTR(irq_num, vec_name, icu, enable_icus) \
197 FAKE_MCOUNT(15*4(%esp)) ; \
198 MASK_IRQ(icu, irq_num) ; \
199 incl PCPU(cnt) + V_INTR ; \
201 movl PCPU(curthread),%ebx ; \
202 pushl $0 ; /* DUMMY CPL FOR DORETI */ \
203 testl $-1,TD_NEST_COUNT(%ebx) ; \
205 cmpl $TDPRI_CRIT,TD_PRI(%ebx) ; \
208 /* set the pending bit and return, leave interrupt masked */ \
209 orl $IRQ_LBIT(irq_num), PCPU(ipending) ; \
210 orl $RQF_INTPEND, PCPU(reqflags) ; \
213 /* set running bit, clear pending bit, run handler */ \
214 andl $~IRQ_LBIT(irq_num), PCPU(ipending) ; \
215 incl TD_NEST_COUNT(%ebx) ; \
221 decl TD_NEST_COUNT(%ebx) ; \
227 * Unmask a slow interrupt. This function is used by interrupt threads
228 * after they have descheduled themselves to reenable interrupts and
229 * possibly cause a reschedule to occur.
232 #define INTR_UNMASK(irq_num, vec_name, icu) \
236 pushl %ebp ; /* frame for ddb backtrace */ \
239 UNMASK_IRQ(icu, irq_num) ; \
244 FAST_INTR(0,icu_fastintr0, IO_ICU1, ENABLE_ICU1)
245 FAST_INTR(1,icu_fastintr1, IO_ICU1, ENABLE_ICU1)
246 FAST_INTR(2,icu_fastintr2, IO_ICU1, ENABLE_ICU1)
247 FAST_INTR(3,icu_fastintr3, IO_ICU1, ENABLE_ICU1)
248 FAST_INTR(4,icu_fastintr4, IO_ICU1, ENABLE_ICU1)
249 FAST_INTR(5,icu_fastintr5, IO_ICU1, ENABLE_ICU1)
250 FAST_INTR(6,icu_fastintr6, IO_ICU1, ENABLE_ICU1)
251 FAST_INTR(7,icu_fastintr7, IO_ICU1, ENABLE_ICU1)
252 FAST_INTR(8,icu_fastintr8, IO_ICU2, ENABLE_ICU1_AND_2)
253 FAST_INTR(9,icu_fastintr9, IO_ICU2, ENABLE_ICU1_AND_2)
254 FAST_INTR(10,icu_fastintr10, IO_ICU2, ENABLE_ICU1_AND_2)
255 FAST_INTR(11,icu_fastintr11, IO_ICU2, ENABLE_ICU1_AND_2)
256 FAST_INTR(12,icu_fastintr12, IO_ICU2, ENABLE_ICU1_AND_2)
257 FAST_INTR(13,icu_fastintr13, IO_ICU2, ENABLE_ICU1_AND_2)
258 FAST_INTR(14,icu_fastintr14, IO_ICU2, ENABLE_ICU1_AND_2)
259 FAST_INTR(15,icu_fastintr15, IO_ICU2, ENABLE_ICU1_AND_2)
261 SLOW_INTR(0,icu_slowintr0, IO_ICU1, ENABLE_ICU1)
262 SLOW_INTR(1,icu_slowintr1, IO_ICU1, ENABLE_ICU1)
263 SLOW_INTR(2,icu_slowintr2, IO_ICU1, ENABLE_ICU1)
264 SLOW_INTR(3,icu_slowintr3, IO_ICU1, ENABLE_ICU1)
265 SLOW_INTR(4,icu_slowintr4, IO_ICU1, ENABLE_ICU1)
266 SLOW_INTR(5,icu_slowintr5, IO_ICU1, ENABLE_ICU1)
267 SLOW_INTR(6,icu_slowintr6, IO_ICU1, ENABLE_ICU1)
268 SLOW_INTR(7,icu_slowintr7, IO_ICU1, ENABLE_ICU1)
269 SLOW_INTR(8,icu_slowintr8, IO_ICU2, ENABLE_ICU1_AND_2)
270 SLOW_INTR(9,icu_slowintr9, IO_ICU2, ENABLE_ICU1_AND_2)
271 SLOW_INTR(10,icu_slowintr10, IO_ICU2, ENABLE_ICU1_AND_2)
272 SLOW_INTR(11,icu_slowintr11, IO_ICU2, ENABLE_ICU1_AND_2)
273 SLOW_INTR(12,icu_slowintr12, IO_ICU2, ENABLE_ICU1_AND_2)
274 SLOW_INTR(13,icu_slowintr13, IO_ICU2, ENABLE_ICU1_AND_2)
275 SLOW_INTR(14,icu_slowintr14, IO_ICU2, ENABLE_ICU1_AND_2)
276 SLOW_INTR(15,icu_slowintr15, IO_ICU2, ENABLE_ICU1_AND_2)