2 * from: vector.s, 386BSD 0.1 unknown origin
3 * $FreeBSD: src/sys/i386/isa/icu_vector.s,v 1.14.2.2 2000/07/18 21:12:42 dfr Exp $
4 * $DragonFly: src/sys/i386/icu/Attic/icu_vector.s,v 1.23 2005/11/03 23:45:12 dillon Exp $
8 #include "opt_auto_eoi.h"
10 #include <machine/asmacros.h>
11 #include <machine/ipl.h>
12 #include <machine/lock.h>
13 #include <machine/psl.h>
14 #include <machine/trap.h>
16 #include <i386/icu/icu.h>
17 #include <bus/isa/i386/isa.h>
23 #define ICU_IMR_OFFSET 1 /* IO_ICU{1,2} + 1 */
25 #define ICU_EOI 0x20 /* XXX - define elsewhere */
27 #define IRQ_LBIT(irq_num) (1 << (irq_num))
28 #define IRQ_BIT(irq_num) (1 << ((irq_num) % 8))
29 #define IRQ_BYTE(irq_num) ((irq_num) >> 3)
32 #define ENABLE_ICU1 /* use auto-EOI to reduce i/o */
36 movb $ICU_EOI,%al ; /* as soon as possible send EOI ... */ \
37 OUTB_ICU1 ; /* ... to clear in service bit */ \
46 * The data sheet says no auto-EOI on slave, but it sometimes works.
48 #define ENABLE_ICU1_AND_2 ENABLE_ICU1
50 #define ENABLE_ICU1_AND_2 \
51 movb $ICU_EOI,%al ; /* as above */ \
52 outb %al,$IO_ICU2 ; /* but do second icu first ... */ \
53 OUTB_ICU1 ; /* ... then first icu (if !AUTO_EOI_1) */ \
61 pushl $0 ; /* dummy error code */ \
62 pushl $0 ; /* dummy trap type */ \
63 pushal ; /* 8 registers */ \
74 pushfl ; /* phys int frame / flags */ \
75 pushl %cs ; /* phys int frame / cs */ \
76 pushl 12(%esp) ; /* original caller eip */ \
77 pushl $0 ; /* dummy error code */ \
78 pushl $0 ; /* dummy trap type */ \
79 subl $12*4,%esp ; /* pushal + 3 seg regs (dummy) + CPL */ \
82 * Warning: POP_FRAME can only be used if there is no chance of a
83 * segment register being changed (e.g. by procfs), which is why syscalls
91 addl $2*4,%esp ; /* dummy trap & error codes */ \
96 #define MASK_IRQ(icu, irq_num) \
97 movb icu_imen + IRQ_BYTE(irq_num),%al ; \
98 orb $IRQ_BIT(irq_num),%al ; \
99 movb %al,icu_imen + IRQ_BYTE(irq_num) ; \
100 outb %al,$icu+ICU_IMR_OFFSET ; \
102 #define UNMASK_IRQ(icu, irq_num) \
105 movb icu_imen + IRQ_BYTE(irq_num),%al ; \
106 andb $~IRQ_BIT(irq_num),%al ; \
107 movb %al,icu_imen + IRQ_BYTE(irq_num) ; \
108 outb %al,$icu+ICU_IMR_OFFSET ; \
112 * Fast interrupt call handlers run in the following sequence:
114 * - Push the trap frame required by doreti.
115 * - Mask the interrupt and reenable its source.
116 * - If we cannot take the interrupt set its fpending bit and
118 * - If we can take the interrupt clear its fpending bit,
119 * call the handler, then unmask the interrupt and doreti.
121 * YYY can cache gd base pointer instead of using hidden %fs
125 #define FAST_INTR(irq_num, vec_name, icu, enable_icus, maybe_extra_ipending) \
130 FAKE_MCOUNT(13*4(%esp)) ; \
131 maybe_extra_ipending ; \
132 MASK_IRQ(icu, irq_num) ; \
134 movl PCPU(curthread),%ebx ; \
135 pushl $0 ; /* DUMMY CPL FOR DORETI */ \
136 cmpl $TDPRI_CRIT,TD_PRI(%ebx) ; \
139 /* set pending bit and return, leave interrupt masked */ \
140 orl $IRQ_LBIT(irq_num),PCPU(fpending) ; \
141 orl $RQF_INTPEND, PCPU(reqflags) ; \
144 /* clear pending bit, run handler */ \
145 andl $~IRQ_LBIT(irq_num),PCPU(fpending) ; \
147 call ithread_fast_handler ; /* returns 0 to unmask int */ \
149 UNMASK_IRQ(icu, irq_num) ; \
155 * Slow interrupt call handlers run in the following sequence:
157 * - Push the trap frame required by doreti.
158 * - Mask the interrupt and reenable its source.
159 * - If we cannot take the interrupt set its ipending bit and
160 * doreti. In addition to checking for a critical section
161 * and cpl mask we also check to see if the thread is still
163 * - If we can take the interrupt clear its ipending bit
164 * and schedule its thread. Leave interrupts masked and doreti.
166 * sched_ithd() is called with interrupts enabled and outside of a
167 * critical section (so it can preempt us).
169 * YYY sched_ithd may preempt us synchronously (fix interrupt stacking)
171 * Note that intr_nesting_level is not bumped during sched_ithd because
172 * blocking allocations are allowed in the preemption case.
174 * YYY can cache gd base pointer instead of using hidden %fs
178 #define INTR(irq_num, vec_name, icu, enable_icus, reg, maybe_extra_ipending) \
183 FAKE_MCOUNT(13*4(%esp)) ; \
184 maybe_extra_ipending ; \
185 MASK_IRQ(icu, irq_num) ; \
187 movl PCPU(curthread),%ebx ; \
188 pushl $0 ; /* DUMMY CPL FOR DORETI */ \
189 cmpl $TDPRI_CRIT,TD_PRI(%ebx) ; \
192 /* set the pending bit and return, leave interrupt masked */ \
193 orl $IRQ_LBIT(irq_num), PCPU(ipending) ; \
194 orl $RQF_INTPEND, PCPU(reqflags) ; \
197 /* set running bit, clear pending bit, run handler */ \
198 andl $~IRQ_LBIT(irq_num), PCPU(ipending) ; \
208 * Unmask a slow interrupt. This function is used by interrupt threads
209 * after they have descheduled themselves to reenable interrupts and
210 * possibly cause a reschedule to occur.
213 #define INTR_UNMASK(irq_num, vec_name, icu) \
217 pushl %ebp ; /* frame for ddb backtrace */ \
220 UNMASK_IRQ(icu, irq_num) ; \
225 FAST_INTR(0,icu_fastintr0, IO_ICU1, ENABLE_ICU1,)
226 FAST_INTR(1,icu_fastintr1, IO_ICU1, ENABLE_ICU1,)
227 FAST_INTR(2,icu_fastintr2, IO_ICU1, ENABLE_ICU1,)
228 FAST_INTR(3,icu_fastintr3, IO_ICU1, ENABLE_ICU1,)
229 FAST_INTR(4,icu_fastintr4, IO_ICU1, ENABLE_ICU1,)
230 FAST_INTR(5,icu_fastintr5, IO_ICU1, ENABLE_ICU1,)
231 FAST_INTR(6,icu_fastintr6, IO_ICU1, ENABLE_ICU1,)
232 FAST_INTR(7,icu_fastintr7, IO_ICU1, ENABLE_ICU1,)
233 FAST_INTR(8,icu_fastintr8, IO_ICU2, ENABLE_ICU1_AND_2,)
234 FAST_INTR(9,icu_fastintr9, IO_ICU2, ENABLE_ICU1_AND_2,)
235 FAST_INTR(10,icu_fastintr10, IO_ICU2, ENABLE_ICU1_AND_2,)
236 FAST_INTR(11,icu_fastintr11, IO_ICU2, ENABLE_ICU1_AND_2,)
237 FAST_INTR(12,icu_fastintr12, IO_ICU2, ENABLE_ICU1_AND_2,)
238 FAST_INTR(13,icu_fastintr13, IO_ICU2, ENABLE_ICU1_AND_2,)
239 FAST_INTR(14,icu_fastintr14, IO_ICU2, ENABLE_ICU1_AND_2,)
240 FAST_INTR(15,icu_fastintr15, IO_ICU2, ENABLE_ICU1_AND_2,)
242 INTR(0,icu_slowintr0, IO_ICU1, ENABLE_ICU1, al,)
243 INTR(1,icu_slowintr1, IO_ICU1, ENABLE_ICU1, al,)
244 INTR(2,icu_slowintr2, IO_ICU1, ENABLE_ICU1, al,)
245 INTR(3,icu_slowintr3, IO_ICU1, ENABLE_ICU1, al,)
246 INTR(4,icu_slowintr4, IO_ICU1, ENABLE_ICU1, al,)
247 INTR(5,icu_slowintr5, IO_ICU1, ENABLE_ICU1, al,)
248 INTR(6,icu_slowintr6, IO_ICU1, ENABLE_ICU1, al,)
249 INTR(7,icu_slowintr7, IO_ICU1, ENABLE_ICU1, al,)
250 INTR(8,icu_slowintr8, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
251 INTR(9,icu_slowintr9, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
252 INTR(10,icu_slowintr10, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
253 INTR(11,icu_slowintr11, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
254 INTR(12,icu_slowintr12, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
255 INTR(13,icu_slowintr13, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
256 INTR(14,icu_slowintr14, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
257 INTR(15,icu_slowintr15, IO_ICU2, ENABLE_ICU1_AND_2, ah,)