This commit represents a major revamping of the clock interrupt and timebase
[dragonfly.git] / sys / platform / pc32 / icu / icu_vector.s
CommitLineData
984263bc
MD
1/*
2 * from: vector.s, 386BSD 0.1 unknown origin
3 * $FreeBSD: src/sys/i386/isa/icu_vector.s,v 1.14.2.2 2000/07/18 21:12:42 dfr Exp $
88c4d2f6 4 * $DragonFly: src/sys/platform/pc32/icu/icu_vector.s,v 1.16 2004/01/30 05:42:16 dillon Exp $
984263bc
MD
5 */
6
7/*
8 * modified for PC98 by Kakefuda
9 */
10
11#ifdef PC98
12#define ICU_IMR_OFFSET 2 /* IO_ICU{1,2} + 2 */
13#else
14#define ICU_IMR_OFFSET 1 /* IO_ICU{1,2} + 1 */
15#endif
16
17#define ICU_EOI 0x20 /* XXX - define elsewhere */
18
ef0fdad1 19#define IRQ_LBIT(irq_num) (1 << (irq_num))
984263bc
MD
20#define IRQ_BIT(irq_num) (1 << ((irq_num) % 8))
21#define IRQ_BYTE(irq_num) ((irq_num) >> 3)
22
23#ifdef AUTO_EOI_1
24#define ENABLE_ICU1 /* use auto-EOI to reduce i/o */
25#define OUTB_ICU1
26#else
ef0fdad1
MD
27#define ENABLE_ICU1 \
28 movb $ICU_EOI,%al ; /* as soon as possible send EOI ... */ \
29 OUTB_ICU1 ; /* ... to clear in service bit */ \
30
31#define OUTB_ICU1 \
32 outb %al,$IO_ICU1 ; \
33
984263bc
MD
34#endif
35
36#ifdef AUTO_EOI_2
37/*
38 * The data sheet says no auto-EOI on slave, but it sometimes works.
39 */
40#define ENABLE_ICU1_AND_2 ENABLE_ICU1
41#else
ef0fdad1
MD
42#define ENABLE_ICU1_AND_2 \
43 movb $ICU_EOI,%al ; /* as above */ \
44 outb %al,$IO_ICU2 ; /* but do second icu first ... */ \
45 OUTB_ICU1 ; /* ... then first icu (if !AUTO_EOI_1) */ \
46
984263bc
MD
47#endif
48
49/*
ef0fdad1 50 * Macro helpers
984263bc 51 */
ef0fdad1
MD
52#define PUSH_FRAME \
53 pushl $0 ; /* dummy error code */ \
54 pushl $0 ; /* dummy trap type */ \
55 pushal ; /* 8 registers */ \
56 pushl %ds ; \
57 pushl %es ; \
58 pushl %fs ; \
59 mov $KDSEL,%ax ; \
60 mov %ax,%ds ; \
61 mov %ax,%es ; \
62 mov $KPSEL,%ax ; \
63 mov %ax,%fs ; \
64
65#define PUSH_DUMMY \
66 pushfl ; /* phys int frame / flags */ \
67 pushl %cs ; /* phys int frame / cs */ \
68 pushl 12(%esp) ; /* original caller eip */ \
69 pushl $0 ; /* dummy error code */ \
70 pushl $0 ; /* dummy trap type */ \
96728c05 71 subl $12*4,%esp ; /* pushal + 3 seg regs (dummy) + CPL */ \
984263bc 72
ef0fdad1
MD
73/*
74 * Warning: POP_FRAME can only be used if there is no chance of a
75 * segment register being changed (e.g. by procfs), which is why syscalls
76 * have to use doreti.
77 */
78#define POP_FRAME \
79 popl %fs ; \
80 popl %es ; \
81 popl %ds ; \
82 popal ; \
83 addl $2*4,%esp ; /* dummy trap & error codes */ \
84
85#define POP_DUMMY \
96728c05 86 addl $17*4,%esp ; \
ef0fdad1
MD
87
88#define MASK_IRQ(icu, irq_num) \
89 movb imen + IRQ_BYTE(irq_num),%al ; \
90 orb $IRQ_BIT(irq_num),%al ; \
91 movb %al,imen + IRQ_BYTE(irq_num) ; \
92 outb %al,$icu+ICU_IMR_OFFSET ; \
93
94#define UNMASK_IRQ(icu, irq_num) \
95 movb imen + IRQ_BYTE(irq_num),%al ; \
96 andb $~IRQ_BIT(irq_num),%al ; \
97 movb %al,imen + IRQ_BYTE(irq_num) ; \
98 outb %al,$icu+ICU_IMR_OFFSET ; \
99
100/*
101 * Fast interrupt call handlers run in the following sequence:
102 *
103 * - Push the trap frame required by doreti.
104 * - Mask the interrupt and reenable its source.
105 * - If we cannot take the interrupt set its fpending bit and
106 * doreti.
107 * - If we can take the interrupt clear its fpending bit,
108 * call the handler, then unmask the interrupt and doreti.
109 *
110 * YYY can cache gd base pointer instead of using hidden %fs
111 * prefixes.
112 */
113
d97cabe2 114#define FAST_INTR(irq_num, vec_name, icu, enable_icus, maybe_extra_ipending) \
ee776109
MD
115 .text ; \
116 SUPERALIGN_TEXT ; \
117IDTVEC(vec_name) ; \
ef0fdad1
MD
118 PUSH_FRAME ; \
119 FAKE_MCOUNT(13*4(%esp)) ; \
d97cabe2 120 maybe_extra_ipending ; \
ef0fdad1
MD
121 MASK_IRQ(icu, irq_num) ; \
122 enable_icus ; \
2954c92f 123 movl PCPU(curthread),%ebx ; \
ef0fdad1
MD
124 movl TD_CPL(%ebx),%eax ; /* save the cpl for doreti */ \
125 pushl %eax ; \
126 cmpl $TDPRI_CRIT,TD_PRI(%ebx) ; \
ee776109 127 jge 1f ; \
ef0fdad1
MD
128 testl $IRQ_LBIT(irq_num), %eax ; \
129 jz 2f ; \
1301: ; \
131 /* set pending bit and return, leave interrupt masked */ \
2954c92f 132 orl $IRQ_LBIT(irq_num),PCPU(fpending) ; \
235957ed 133 orl $RQF_INTPEND, PCPU(reqflags) ; \
ef0fdad1
MD
134 jmp 5f ; \
1352: ; \
136 /* clear pending bit, run handler */ \
03aa8d99 137 incl PCPU(intr_nesting_level) ; \
4b5f931b 138 addl $TDPRI_CRIT,TD_PRI(%ebx) ; \
2954c92f 139 andl $~IRQ_LBIT(irq_num),PCPU(fpending) ; \
ef0fdad1
MD
140 pushl intr_unit + (irq_num) * 4 ; \
141 call *intr_handler + (irq_num) * 4 ; \
142 addl $4,%esp ; \
4b5f931b 143 subl $TDPRI_CRIT,TD_PRI(%ebx) ; \
03aa8d99 144 decl PCPU(intr_nesting_level) ; \
12e4aaff 145 incl PCPU(cnt)+V_INTR ; /* book-keeping YYY make per-cpu */ \
ef0fdad1
MD
146 movl intr_countp + (irq_num) * 4,%eax ; \
147 incl (%eax) ; \
148 UNMASK_IRQ(icu, irq_num) ; \
1495: ; \
150 MEXITCOUNT ; \
151 jmp doreti ; \
152
153/*
154 * Restart fast interrupt held up by critical section or cpl.
155 *
156 * - Push a dummy trap frame as required by doreti.
157 * - The interrupt source is already masked.
158 * - Clear the fpending bit
159 * - Run the handler
160 * - Unmask the interrupt
161 * - Pop the dummy frame and do a normal return
162 *
163 * YYY can cache gd base pointer instead of using hidden %fs
164 * prefixes.
165 */
166#define FAST_UNPEND(irq_num, vec_name, icu) \
167 .text ; \
168 SUPERALIGN_TEXT ; \
169IDTVEC(vec_name) ; \
26a0694b
MD
170 pushl %ebp ; \
171 movl %esp,%ebp ; \
ef0fdad1 172 PUSH_DUMMY ; \
ef0fdad1
MD
173 pushl intr_unit + (irq_num) * 4 ; \
174 call *intr_handler + (irq_num) * 4 ; \
175 addl $4, %esp ; \
12e4aaff 176 incl PCPU(cnt)+V_INTR ; \
ef0fdad1
MD
177 movl intr_countp + (irq_num) * 4, %eax ; \
178 incl (%eax) ; \
179 UNMASK_IRQ(icu, irq_num) ; \
180 POP_DUMMY ; \
181 popl %ebp ; \
ef0fdad1
MD
182 ret ; \
183
184/*
185 * Slow interrupt call handlers run in the following sequence:
186 *
187 * - Push the trap frame required by doreti.
188 * - Mask the interrupt and reenable its source.
189 * - If we cannot take the interrupt set its ipending bit and
190 * doreti. In addition to checking for a critical section
191 * and cpl mask we also check to see if the thread is still
192 * running.
96728c05
MD
193 * - If we can take the interrupt clear its ipending bit
194 * and schedule its thread. Leave interrupts masked and doreti.
ef0fdad1 195 *
96728c05
MD
196 * sched_ithd() is called with interrupts enabled and outside of a
197 * critical section (so it can preempt us).
ef0fdad1 198 *
ef0fdad1
MD
199 * YYY sched_ithd may preempt us synchronously (fix interrupt stacking)
200 *
03aa8d99
MD
201 * Note that intr_nesting_level is not bumped during sched_ithd because
202 * blocking allocations are allowed in the preemption case.
203 *
ef0fdad1
MD
204 * YYY can cache gd base pointer instead of using hidden %fs
205 * prefixes.
206 */
984263bc
MD
207
208#define INTR(irq_num, vec_name, icu, enable_icus, reg, maybe_extra_ipending) \
ee776109
MD
209 .text ; \
210 SUPERALIGN_TEXT ; \
211IDTVEC(vec_name) ; \
ef0fdad1
MD
212 PUSH_FRAME ; \
213 FAKE_MCOUNT(13*4(%esp)) ; \
ee776109 214 maybe_extra_ipending ; \
ef0fdad1
MD
215 MASK_IRQ(icu, irq_num) ; \
216 enable_icus ; \
2954c92f 217 movl PCPU(curthread),%ebx ; \
ef0fdad1
MD
218 movl TD_CPL(%ebx), %eax ; \
219 pushl %eax ; /* push CPL for doreti */ \
ee776109 220 cmpl $TDPRI_CRIT,TD_PRI(%ebx) ; \
ef0fdad1 221 jge 1f ; \
ef0fdad1
MD
222 testl $IRQ_LBIT(irq_num), %eax ; \
223 jz 2f ; \
2241: ; \
225 /* set the pending bit and return, leave interrupt masked */ \
2954c92f 226 orl $IRQ_LBIT(irq_num), PCPU(ipending) ; \
235957ed 227 orl $RQF_INTPEND, PCPU(reqflags) ; \
ef0fdad1
MD
228 jmp 5f ; \
2292: ; \
230 /* set running bit, clear pending bit, run handler */ \
2954c92f 231 andl $~IRQ_LBIT(irq_num), PCPU(ipending) ; \
26a0694b 232 sti ; \
ef0fdad1 233 pushl $irq_num ; \
2954c92f 234 call sched_ithd ; \
ef0fdad1 235 addl $4,%esp ; \
12e4aaff 236 incl PCPU(cnt)+V_INTR ; /* book-keeping YYY make per-cpu */ \
ef0fdad1
MD
237 movl intr_countp + (irq_num) * 4,%eax ; \
238 incl (%eax) ; \
2395: ; \
240 MEXITCOUNT ; \
241 jmp doreti ; \
242
243/*
244 * Unmask a slow interrupt. This function is used by interrupt threads
245 * after they have descheduled themselves to reenable interrupts and
96728c05 246 * possibly cause a reschedule to occur.
ef0fdad1
MD
247 */
248
249#define INTR_UNMASK(irq_num, vec_name, icu) \
250 .text ; \
251 SUPERALIGN_TEXT ; \
252IDTVEC(vec_name) ; \
253 pushl %ebp ; /* frame for ddb backtrace */ \
254 movl %esp, %ebp ; \
ef0fdad1
MD
255 UNMASK_IRQ(icu, irq_num) ; \
256 popl %ebp ; \
257 ret ; \
984263bc
MD
258
259MCOUNT_LABEL(bintr)
88c4d2f6 260 FAST_INTR(0,fastintr0, IO_ICU1, ENABLE_ICU1,)
d97cabe2
MD
261 FAST_INTR(1,fastintr1, IO_ICU1, ENABLE_ICU1,)
262 FAST_INTR(2,fastintr2, IO_ICU1, ENABLE_ICU1,)
263 FAST_INTR(3,fastintr3, IO_ICU1, ENABLE_ICU1,)
264 FAST_INTR(4,fastintr4, IO_ICU1, ENABLE_ICU1,)
265 FAST_INTR(5,fastintr5, IO_ICU1, ENABLE_ICU1,)
266 FAST_INTR(6,fastintr6, IO_ICU1, ENABLE_ICU1,)
267 FAST_INTR(7,fastintr7, IO_ICU1, ENABLE_ICU1,)
268 FAST_INTR(8,fastintr8, IO_ICU2, ENABLE_ICU1_AND_2,)
269 FAST_INTR(9,fastintr9, IO_ICU2, ENABLE_ICU1_AND_2,)
270 FAST_INTR(10,fastintr10, IO_ICU2, ENABLE_ICU1_AND_2,)
271 FAST_INTR(11,fastintr11, IO_ICU2, ENABLE_ICU1_AND_2,)
272 FAST_INTR(12,fastintr12, IO_ICU2, ENABLE_ICU1_AND_2,)
273 FAST_INTR(13,fastintr13, IO_ICU2, ENABLE_ICU1_AND_2,)
274 FAST_INTR(14,fastintr14, IO_ICU2, ENABLE_ICU1_AND_2,)
275 FAST_INTR(15,fastintr15, IO_ICU2, ENABLE_ICU1_AND_2,)
ef0fdad1 276
88c4d2f6 277 INTR(0,intr0, IO_ICU1, ENABLE_ICU1, al,)
984263bc
MD
278 INTR(1,intr1, IO_ICU1, ENABLE_ICU1, al,)
279 INTR(2,intr2, IO_ICU1, ENABLE_ICU1, al,)
280 INTR(3,intr3, IO_ICU1, ENABLE_ICU1, al,)
281 INTR(4,intr4, IO_ICU1, ENABLE_ICU1, al,)
282 INTR(5,intr5, IO_ICU1, ENABLE_ICU1, al,)
283 INTR(6,intr6, IO_ICU1, ENABLE_ICU1, al,)
284 INTR(7,intr7, IO_ICU1, ENABLE_ICU1, al,)
285 INTR(8,intr8, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
286 INTR(9,intr9, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
287 INTR(10,intr10, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
288 INTR(11,intr11, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
289 INTR(12,intr12, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
290 INTR(13,intr13, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
291 INTR(14,intr14, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
292 INTR(15,intr15, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
ef0fdad1
MD
293
294 FAST_UNPEND(0,fastunpend0, IO_ICU1)
295 FAST_UNPEND(1,fastunpend1, IO_ICU1)
296 FAST_UNPEND(2,fastunpend2, IO_ICU1)
297 FAST_UNPEND(3,fastunpend3, IO_ICU1)
298 FAST_UNPEND(4,fastunpend4, IO_ICU1)
299 FAST_UNPEND(5,fastunpend5, IO_ICU1)
300 FAST_UNPEND(6,fastunpend6, IO_ICU1)
301 FAST_UNPEND(7,fastunpend7, IO_ICU1)
302 FAST_UNPEND(8,fastunpend8, IO_ICU2)
303 FAST_UNPEND(9,fastunpend9, IO_ICU2)
304 FAST_UNPEND(10,fastunpend10, IO_ICU2)
305 FAST_UNPEND(11,fastunpend11, IO_ICU2)
306 FAST_UNPEND(12,fastunpend12, IO_ICU2)
307 FAST_UNPEND(13,fastunpend13, IO_ICU2)
308 FAST_UNPEND(14,fastunpend14, IO_ICU2)
309 FAST_UNPEND(15,fastunpend15, IO_ICU2)
984263bc
MD
310MCOUNT_LABEL(eintr)
311
312 .data
984263bc
MD
313
314 .text