Finish migrating the cpl into the thread structure.
[dragonfly.git] / sys / platform / pc32 / isa / icu_vector.s
CommitLineData
984263bc
MD
1/*
2 * from: vector.s, 386BSD 0.1 unknown origin
3 * $FreeBSD: src/sys/i386/isa/icu_vector.s,v 1.14.2.2 2000/07/18 21:12:42 dfr Exp $
8f41e33b 4 * $DragonFly: src/sys/platform/pc32/isa/Attic/icu_vector.s,v 1.4 2003/06/22 08:54:22 dillon Exp $
984263bc
MD
5 */
6
7/*
8 * modified for PC98 by Kakefuda
9 */
10
11#ifdef PC98
12#define ICU_IMR_OFFSET 2 /* IO_ICU{1,2} + 2 */
13#else
14#define ICU_IMR_OFFSET 1 /* IO_ICU{1,2} + 1 */
15#endif
16
17#define ICU_EOI 0x20 /* XXX - define elsewhere */
18
19#define IRQ_BIT(irq_num) (1 << ((irq_num) % 8))
20#define IRQ_BYTE(irq_num) ((irq_num) >> 3)
21
22#ifdef AUTO_EOI_1
23#define ENABLE_ICU1 /* use auto-EOI to reduce i/o */
24#define OUTB_ICU1
25#else
26#define ENABLE_ICU1 \
27 movb $ICU_EOI,%al ; /* as soon as possible send EOI ... */ \
28 OUTB_ICU1 /* ... to clear in service bit */
29#define OUTB_ICU1 \
30 outb %al,$IO_ICU1
31#endif
32
33#ifdef AUTO_EOI_2
34/*
35 * The data sheet says no auto-EOI on slave, but it sometimes works.
36 */
37#define ENABLE_ICU1_AND_2 ENABLE_ICU1
38#else
39#define ENABLE_ICU1_AND_2 \
40 movb $ICU_EOI,%al ; /* as above */ \
41 outb %al,$IO_ICU2 ; /* but do second icu first ... */ \
42 OUTB_ICU1 /* ... then first icu (if !AUTO_EOI_1) */
43#endif
44
45/*
46 * Macros for interrupt interrupt entry, call to handler, and exit.
47 */
48
49#define FAST_INTR(irq_num, vec_name, enable_icus) \
50 .text ; \
51 SUPERALIGN_TEXT ; \
52IDTVEC(vec_name) ; \
53 pushl %eax ; /* save only call-used registers */ \
54 pushl %ecx ; \
55 pushl %edx ; \
56 pushl %ds ; \
57 MAYBE_PUSHL_ES ; \
58 mov $KDSEL,%ax ; \
59 mov %ax,%ds ; \
60 MAYBE_MOVW_AX_ES ; \
61 FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ; \
62 pushl _intr_unit + (irq_num) * 4 ; \
63 call *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
64 enable_icus ; /* (re)enable ASAP (helps edge trigger?) */ \
65 addl $4,%esp ; \
66 incl _cnt+V_INTR ; /* book-keeping can wait */ \
67 movl _intr_countp + (irq_num) * 4,%eax ; \
68 incl (%eax) ; \
8f41e33b
MD
69 movl _curthread, %ecx ; /* are we in a critical section? */ \
70 cmpl $TDPRI_CRIT,TD_PRI(%ecx) ; \
f1d1c3fa 71 jge 1f ; \
8f41e33b 72 movl TD_MACH+MTD_CPL(%ecx),%eax ; /* unmasking pending ints? */ \
984263bc
MD
73 notl %eax ; \
74 andl _ipending,%eax ; \
75 jne 2f ; /* yes, maybe handle them */ \
761: ; \
77 MEXITCOUNT ; \
78 MAYBE_POPL_ES ; \
79 popl %ds ; \
80 popl %edx ; \
81 popl %ecx ; \
82 popl %eax ; \
83 iret ; \
84; \
85 ALIGN_TEXT ; \
862: ; \
87 cmpb $3,_intr_nesting_level ; /* is there enough stack? */ \
88 jae 1b ; /* no, return */ \
8f41e33b 89 movl TD_MACH+MTD_CPL(%ecx),%eax ; \
984263bc 90 /* XXX next line is probably unnecessary now. */ \
8f41e33b 91 movl $HWI_MASK|SWI_MASK,TD_MACH+MTD_CPL(%ecx) ; /* limit nesting ... */ \
984263bc
MD
92 incb _intr_nesting_level ; /* ... really limit it ... */ \
93 sti ; /* ... to do this as early as possible */ \
94 MAYBE_POPL_ES ; /* discard most of thin frame ... */ \
95 popl %ecx ; /* ... original %ds ... */ \
96 popl %edx ; \
97 xchgl %eax,4(%esp) ; /* orig %eax; save cpl */ \
98 pushal ; /* build fat frame (grrr) ... */ \
99 pushl %ecx ; /* ... actually %ds ... */ \
100 pushl %es ; \
101 pushl %fs ; \
102 mov $KDSEL,%ax ; \
103 mov %ax,%es ; \
104 mov %ax,%fs ; \
105 movl (3+8+0)*4(%esp),%ecx ; /* ... %ecx from thin frame ... */ \
106 movl %ecx,(3+6)*4(%esp) ; /* ... to fat frame ... */ \
107 movl (3+8+1)*4(%esp),%eax ; /* ... cpl from thin frame */ \
108 pushl %eax ; \
109 subl $4,%esp ; /* junk for unit number */ \
110 MEXITCOUNT ; \
111 jmp _doreti
112
113#define INTR(irq_num, vec_name, icu, enable_icus, reg, maybe_extra_ipending) \
114 .text ; \
115 SUPERALIGN_TEXT ; \
116IDTVEC(vec_name) ; \
117 pushl $0 ; /* dummy error code */ \
118 pushl $0 ; /* dummy trap type */ \
119 pushal ; \
120 pushl %ds ; /* save our data and extra segments ... */ \
121 pushl %es ; \
122 pushl %fs ; \
123 mov $KDSEL,%ax ; /* ... and reload with kernel's own ... */ \
124 mov %ax,%ds ; /* ... early for obsolete reasons */ \
125 mov %ax,%es ; \
126 mov %ax,%fs ; \
127 maybe_extra_ipending ; \
128 movb _imen + IRQ_BYTE(irq_num),%al ; \
129 orb $IRQ_BIT(irq_num),%al ; \
130 movb %al,_imen + IRQ_BYTE(irq_num) ; \
131 outb %al,$icu+ICU_IMR_OFFSET ; \
132 enable_icus ; \
8f41e33b
MD
133 movl _curthread, %ebx ; /* are we in a critical section? */ \
134 cmpl $TDPRI_CRIT,TD_PRI(%ebx) ; \
f1d1c3fa 135 jge 2f ; \
8f41e33b 136 movl TD_MACH+MTD_CPL(%ebx),%eax ; /* is this interrupt masked by the cpl? */ \
984263bc
MD
137 testb $IRQ_BIT(irq_num),%reg ; \
138 jne 2f ; \
139 incb _intr_nesting_level ; \
140__CONCAT(Xresume,irq_num): ; \
141 FAKE_MCOUNT(13*4(%esp)) ; /* XXX late to avoid double count */ \
142 incl _cnt+V_INTR ; /* tally interrupts */ \
143 movl _intr_countp + (irq_num) * 4,%eax ; \
144 incl (%eax) ; \
8f41e33b 145 movl TD_MACH+MTD_CPL(%ebx),%eax ; \
984263bc
MD
146 pushl %eax ; \
147 pushl _intr_unit + (irq_num) * 4 ; \
148 orl _intr_mask + (irq_num) * 4,%eax ; \
8f41e33b 149 movl %eax,TD_MACH+MTD_CPL(%ebx) ; \
984263bc
MD
150 sti ; \
151 call *_intr_handler + (irq_num) * 4 ; \
152 cli ; /* must unmask _imen and icu atomically */ \
153 movb _imen + IRQ_BYTE(irq_num),%al ; \
154 andb $~IRQ_BIT(irq_num),%al ; \
155 movb %al,_imen + IRQ_BYTE(irq_num) ; \
156 outb %al,$icu+ICU_IMR_OFFSET ; \
157 sti ; /* XXX _doreti repeats the cli/sti */ \
158 MEXITCOUNT ; \
159 /* We could usually avoid the following jmp by inlining some of */ \
160 /* _doreti, but it's probably better to use less cache. */ \
161 jmp _doreti ; \
162; \
163 ALIGN_TEXT ; \
1642: ; \
165 /* XXX skip mcounting here to avoid double count */ \
166 orb $IRQ_BIT(irq_num),_ipending + IRQ_BYTE(irq_num) ; \
f1d1c3fa 167 movl $TDPRI_CRIT,_reqpri ; \
984263bc
MD
168 popl %fs ; \
169 popl %es ; \
170 popl %ds ; \
171 popal ; \
172 addl $4+4,%esp ; \
173 iret
174
175MCOUNT_LABEL(bintr)
176 FAST_INTR(0,fastintr0, ENABLE_ICU1)
177 FAST_INTR(1,fastintr1, ENABLE_ICU1)
178 FAST_INTR(2,fastintr2, ENABLE_ICU1)
179 FAST_INTR(3,fastintr3, ENABLE_ICU1)
180 FAST_INTR(4,fastintr4, ENABLE_ICU1)
181 FAST_INTR(5,fastintr5, ENABLE_ICU1)
182 FAST_INTR(6,fastintr6, ENABLE_ICU1)
183 FAST_INTR(7,fastintr7, ENABLE_ICU1)
184 FAST_INTR(8,fastintr8, ENABLE_ICU1_AND_2)
185 FAST_INTR(9,fastintr9, ENABLE_ICU1_AND_2)
186 FAST_INTR(10,fastintr10, ENABLE_ICU1_AND_2)
187 FAST_INTR(11,fastintr11, ENABLE_ICU1_AND_2)
188 FAST_INTR(12,fastintr12, ENABLE_ICU1_AND_2)
189 FAST_INTR(13,fastintr13, ENABLE_ICU1_AND_2)
190 FAST_INTR(14,fastintr14, ENABLE_ICU1_AND_2)
191 FAST_INTR(15,fastintr15, ENABLE_ICU1_AND_2)
192#define CLKINTR_PENDING movl $1,CNAME(clkintr_pending)
193 INTR(0,intr0, IO_ICU1, ENABLE_ICU1, al, CLKINTR_PENDING)
194 INTR(1,intr1, IO_ICU1, ENABLE_ICU1, al,)
195 INTR(2,intr2, IO_ICU1, ENABLE_ICU1, al,)
196 INTR(3,intr3, IO_ICU1, ENABLE_ICU1, al,)
197 INTR(4,intr4, IO_ICU1, ENABLE_ICU1, al,)
198 INTR(5,intr5, IO_ICU1, ENABLE_ICU1, al,)
199 INTR(6,intr6, IO_ICU1, ENABLE_ICU1, al,)
200 INTR(7,intr7, IO_ICU1, ENABLE_ICU1, al,)
201 INTR(8,intr8, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
202 INTR(9,intr9, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
203 INTR(10,intr10, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
204 INTR(11,intr11, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
205 INTR(12,intr12, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
206 INTR(13,intr13, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
207 INTR(14,intr14, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
208 INTR(15,intr15, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
209MCOUNT_LABEL(eintr)
210
211 .data
212 .globl _ihandlers
213_ihandlers: /* addresses of interrupt handlers */
214 /* actually resumption addresses for HWI's */
215 .long Xresume0, Xresume1, Xresume2, Xresume3
216 .long Xresume4, Xresume5, Xresume6, Xresume7
217 .long Xresume8, Xresume9, Xresume10, Xresume11
218 .long Xresume12, Xresume13, Xresume14, Xresume15
219 .long _swi_null, swi_net, _swi_null, _swi_null
220 .long _swi_vm, _swi_null, _softclock
221
222imasks: /* masks for interrupt handlers */
223 .space NHWI*4 /* padding; HWI masks are elsewhere */
224
225 .long SWI_TTY_MASK, SWI_NET_MASK, SWI_CAMNET_MASK, SWI_CAMBIO_MASK
226 .long SWI_VM_MASK, SWI_TQ_MASK, SWI_CLOCK_MASK
227
228 .text