Merge from vendor branch CVS:
[dragonfly.git] / sys / platform / pc32 / icu / icu_vector.s
1 /*
2  *      from: vector.s, 386BSD 0.1 unknown origin
3  * $FreeBSD: src/sys/i386/isa/icu_vector.s,v 1.14.2.2 2000/07/18 21:12:42 dfr Exp $
4  * $DragonFly: src/sys/platform/pc32/icu/icu_vector.s,v 1.17 2005/02/27 12:44:43 asmodai Exp $
5  */
6
7 #define ICU_IMR_OFFSET          1       /* IO_ICU{1,2} + 1 */
8
9 #define ICU_EOI                 0x20    /* XXX - define elsewhere */
10
11 #define IRQ_LBIT(irq_num)       (1 << (irq_num))
12 #define IRQ_BIT(irq_num)        (1 << ((irq_num) % 8))
13 #define IRQ_BYTE(irq_num)       ((irq_num) >> 3)
14
15 #ifdef AUTO_EOI_1
16 #define ENABLE_ICU1             /* use auto-EOI to reduce i/o */
17 #define OUTB_ICU1
18 #else
19 #define ENABLE_ICU1                                                     \
20         movb    $ICU_EOI,%al ;  /* as soon as possible send EOI ... */  \
21         OUTB_ICU1 ;             /* ... to clear in service bit */       \
22
23 #define OUTB_ICU1                                                       \
24         outb    %al,$IO_ICU1 ;                                          \
25
26 #endif
27
28 #ifdef AUTO_EOI_2
29 /*
30  * The data sheet says no auto-EOI on slave, but it sometimes works.
31  */
32 #define ENABLE_ICU1_AND_2       ENABLE_ICU1
33 #else
34 #define ENABLE_ICU1_AND_2                                               \
35         movb    $ICU_EOI,%al ;  /* as above */                          \
36         outb    %al,$IO_ICU2 ;  /* but do second icu first ... */       \
37         OUTB_ICU1 ;     /* ... then first icu (if !AUTO_EOI_1) */       \
38
39 #endif
40
41 /*
42  * Macro helpers
43  */
44 #define PUSH_FRAME                                                      \
45         pushl   $0 ;            /* dummy error code */                  \
46         pushl   $0 ;            /* dummy trap type */                   \
47         pushal ;                /* 8 registers */                       \
48         pushl   %ds ;                                                   \
49         pushl   %es ;                                                   \
50         pushl   %fs ;                                                   \
51         mov     $KDSEL,%ax ;                                            \
52         mov     %ax,%ds ;                                               \
53         mov     %ax,%es ;                                               \
54         mov     $KPSEL,%ax ;                                            \
55         mov     %ax,%fs ;                                               \
56
57 #define PUSH_DUMMY                                                      \
58         pushfl ;                /* phys int frame / flags */            \
59         pushl %cs ;             /* phys int frame / cs */               \
60         pushl   12(%esp) ;      /* original caller eip */               \
61         pushl   $0 ;            /* dummy error code */                  \
62         pushl   $0 ;            /* dummy trap type */                   \
63         subl    $12*4,%esp ;    /* pushal + 3 seg regs (dummy) + CPL */ \
64
65 /*
66  * Warning: POP_FRAME can only be used if there is no chance of a
67  * segment register being changed (e.g. by procfs), which is why syscalls
68  * have to use doreti.
69  */
70 #define POP_FRAME                                                       \
71         popl    %fs ;                                                   \
72         popl    %es ;                                                   \
73         popl    %ds ;                                                   \
74         popal ;                                                         \
75         addl    $2*4,%esp ;     /* dummy trap & error codes */          \
76
77 #define POP_DUMMY                                                       \
78         addl    $17*4,%esp ;                                            \
79
80 #define MASK_IRQ(icu, irq_num)                                          \
81         movb    imen + IRQ_BYTE(irq_num),%al ;                          \
82         orb     $IRQ_BIT(irq_num),%al ;                                 \
83         movb    %al,imen + IRQ_BYTE(irq_num) ;                          \
84         outb    %al,$icu+ICU_IMR_OFFSET ;                               \
85
86 #define UNMASK_IRQ(icu, irq_num)                                        \
87         movb    imen + IRQ_BYTE(irq_num),%al ;                          \
88         andb    $~IRQ_BIT(irq_num),%al ;                                \
89         movb    %al,imen + IRQ_BYTE(irq_num) ;                          \
90         outb    %al,$icu+ICU_IMR_OFFSET ;                               \
91         
92 /*
93  * Fast interrupt call handlers run in the following sequence:
94  *
95  *      - Push the trap frame required by doreti.
96  *      - Mask the interrupt and reenable its source.
97  *      - If we cannot take the interrupt set its fpending bit and
98  *        doreti.
99  *      - If we can take the interrupt clear its fpending bit,
100  *        call the handler, then unmask the interrupt and doreti.
101  *
102  *      YYY can cache gd base pointer instead of using hidden %fs
103  *      prefixes.
104  */
105
106 #define FAST_INTR(irq_num, vec_name, icu, enable_icus, maybe_extra_ipending) \
107         .text ;                                                         \
108         SUPERALIGN_TEXT ;                                               \
109 IDTVEC(vec_name) ;                                                      \
110         PUSH_FRAME ;                                                    \
111         FAKE_MCOUNT(13*4(%esp)) ;                                       \
112         maybe_extra_ipending ;                                          \
113         MASK_IRQ(icu, irq_num) ;                                        \
114         enable_icus ;                                                   \
115         movl    PCPU(curthread),%ebx ;                                  \
116         movl    TD_CPL(%ebx),%eax ;     /* save the cpl for doreti */   \
117         pushl   %eax ;                                                  \
118         cmpl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
119         jge     1f ;                                                    \
120         testl   $IRQ_LBIT(irq_num), %eax ;                              \
121         jz      2f ;                                                    \
122 1: ;                                                                    \
123         /* set pending bit and return, leave interrupt masked */        \
124         orl     $IRQ_LBIT(irq_num),PCPU(fpending) ;                     \
125         orl     $RQF_INTPEND, PCPU(reqflags) ;                          \
126         jmp     5f ;                                                    \
127 2: ;                                                                    \
128         /* clear pending bit, run handler */                            \
129         incl    PCPU(intr_nesting_level) ;                              \
130         addl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
131         andl    $~IRQ_LBIT(irq_num),PCPU(fpending) ;                    \
132         pushl   intr_unit + (irq_num) * 4 ;                             \
133         call    *intr_handler + (irq_num) * 4 ;                         \
134         addl    $4,%esp ;                                               \
135         subl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
136         decl    PCPU(intr_nesting_level) ;                              \
137         incl    PCPU(cnt)+V_INTR ; /* book-keeping YYY make per-cpu */  \
138         movl    intr_countp + (irq_num) * 4,%eax ;                      \
139         incl    (%eax) ;                                                \
140         UNMASK_IRQ(icu, irq_num) ;                                      \
141 5: ;                                                                    \
142         MEXITCOUNT ;                                                    \
143         jmp     doreti ;                                                \
144
145 /*
146  * Restart fast interrupt held up by critical section or cpl.
147  *
148  *      - Push a dummy trap frame as required by doreti.
149  *      - The interrupt source is already masked.
150  *      - Clear the fpending bit
151  *      - Run the handler
152  *      - Unmask the interrupt
153  *      - Pop the dummy frame and do a normal return
154  *
155  *      YYY can cache gd base pointer instead of using hidden %fs
156  *      prefixes.
157  */
158 #define FAST_UNPEND(irq_num, vec_name, icu)                             \
159         .text ;                                                         \
160         SUPERALIGN_TEXT ;                                               \
161 IDTVEC(vec_name) ;                                                      \
162         pushl   %ebp ;                                                  \
163         movl    %esp,%ebp ;                                             \
164         PUSH_DUMMY ;                                                    \
165         pushl   intr_unit + (irq_num) * 4 ;                             \
166         call    *intr_handler + (irq_num) * 4 ;                         \
167         addl    $4, %esp ;                                              \
168         incl    PCPU(cnt)+V_INTR ;                                      \
169         movl    intr_countp + (irq_num) * 4, %eax ;                     \
170         incl    (%eax) ;                                                \
171         UNMASK_IRQ(icu, irq_num) ;                                      \
172         POP_DUMMY ;                                                     \
173         popl %ebp ;                                                     \
174         ret ;                                                           \
175
176 /*
177  * Slow interrupt call handlers run in the following sequence:
178  *
179  *      - Push the trap frame required by doreti.
180  *      - Mask the interrupt and reenable its source.
181  *      - If we cannot take the interrupt set its ipending bit and
182  *        doreti.  In addition to checking for a critical section
183  *        and cpl mask we also check to see if the thread is still
184  *        running.
185  *      - If we can take the interrupt clear its ipending bit
186  *        and schedule its thread.  Leave interrupts masked and doreti.
187  *
188  *      sched_ithd() is called with interrupts enabled and outside of a
189  *      critical section (so it can preempt us).
190  *
191  *      YYY sched_ithd may preempt us synchronously (fix interrupt stacking)
192  *
193  *      Note that intr_nesting_level is not bumped during sched_ithd because
194  *      blocking allocations are allowed in the preemption case.
195  *
196  *      YYY can cache gd base pointer instead of using hidden %fs
197  *      prefixes.
198  */
199
200 #define INTR(irq_num, vec_name, icu, enable_icus, reg, maybe_extra_ipending) \
201         .text ;                                                         \
202         SUPERALIGN_TEXT ;                                               \
203 IDTVEC(vec_name) ;                                                      \
204         PUSH_FRAME ;                                                    \
205         FAKE_MCOUNT(13*4(%esp)) ;                                       \
206         maybe_extra_ipending ;                                          \
207         MASK_IRQ(icu, irq_num) ;                                        \
208         enable_icus ;                                                   \
209         movl    PCPU(curthread),%ebx ;                                  \
210         movl    TD_CPL(%ebx), %eax ;                                    \
211         pushl   %eax ;          /* push CPL for doreti */               \
212         cmpl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
213         jge     1f ;                                                    \
214         testl   $IRQ_LBIT(irq_num), %eax ;                              \
215         jz      2f ;                                                    \
216 1: ;                                                                    \
217         /* set the pending bit and return, leave interrupt masked */    \
218         orl     $IRQ_LBIT(irq_num), PCPU(ipending) ;                    \
219         orl     $RQF_INTPEND, PCPU(reqflags) ;                          \
220         jmp     5f ;                                                    \
221 2: ;                                                                    \
222         /* set running bit, clear pending bit, run handler */           \
223         andl    $~IRQ_LBIT(irq_num), PCPU(ipending) ;                   \
224         sti ;                                                           \
225         pushl   $irq_num ;                                              \
226         call    sched_ithd ;                                            \
227         addl    $4,%esp ;                                               \
228         incl    PCPU(cnt)+V_INTR ; /* book-keeping YYY make per-cpu */  \
229         movl    intr_countp + (irq_num) * 4,%eax ;                      \
230         incl    (%eax) ;                                                \
231 5: ;                                                                    \
232         MEXITCOUNT ;                                                    \
233         jmp     doreti ;                                                \
234
235 /*
236  * Unmask a slow interrupt.  This function is used by interrupt threads
237  * after they have descheduled themselves to reenable interrupts and
238  * possibly cause a reschedule to occur.
239  */
240
241 #define INTR_UNMASK(irq_num, vec_name, icu)                             \
242         .text ;                                                         \
243         SUPERALIGN_TEXT ;                                               \
244 IDTVEC(vec_name) ;                                                      \
245         pushl %ebp ;     /* frame for ddb backtrace */                  \
246         movl    %esp, %ebp ;                                            \
247         UNMASK_IRQ(icu, irq_num) ;                                      \
248         popl %ebp ;                                                     \
249         ret ;                                                           \
250
251 MCOUNT_LABEL(bintr)
252         FAST_INTR(0,fastintr0, IO_ICU1, ENABLE_ICU1,)
253         FAST_INTR(1,fastintr1, IO_ICU1, ENABLE_ICU1,)
254         FAST_INTR(2,fastintr2, IO_ICU1, ENABLE_ICU1,)
255         FAST_INTR(3,fastintr3, IO_ICU1, ENABLE_ICU1,)
256         FAST_INTR(4,fastintr4, IO_ICU1, ENABLE_ICU1,)
257         FAST_INTR(5,fastintr5, IO_ICU1, ENABLE_ICU1,)
258         FAST_INTR(6,fastintr6, IO_ICU1, ENABLE_ICU1,)
259         FAST_INTR(7,fastintr7, IO_ICU1, ENABLE_ICU1,)
260         FAST_INTR(8,fastintr8, IO_ICU2, ENABLE_ICU1_AND_2,)
261         FAST_INTR(9,fastintr9, IO_ICU2, ENABLE_ICU1_AND_2,)
262         FAST_INTR(10,fastintr10, IO_ICU2, ENABLE_ICU1_AND_2,)
263         FAST_INTR(11,fastintr11, IO_ICU2, ENABLE_ICU1_AND_2,)
264         FAST_INTR(12,fastintr12, IO_ICU2, ENABLE_ICU1_AND_2,)
265         FAST_INTR(13,fastintr13, IO_ICU2, ENABLE_ICU1_AND_2,)
266         FAST_INTR(14,fastintr14, IO_ICU2, ENABLE_ICU1_AND_2,)
267         FAST_INTR(15,fastintr15, IO_ICU2, ENABLE_ICU1_AND_2,)
268
269         INTR(0,intr0, IO_ICU1, ENABLE_ICU1, al,)
270         INTR(1,intr1, IO_ICU1, ENABLE_ICU1, al,)
271         INTR(2,intr2, IO_ICU1, ENABLE_ICU1, al,)
272         INTR(3,intr3, IO_ICU1, ENABLE_ICU1, al,)
273         INTR(4,intr4, IO_ICU1, ENABLE_ICU1, al,)
274         INTR(5,intr5, IO_ICU1, ENABLE_ICU1, al,)
275         INTR(6,intr6, IO_ICU1, ENABLE_ICU1, al,)
276         INTR(7,intr7, IO_ICU1, ENABLE_ICU1, al,)
277         INTR(8,intr8, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
278         INTR(9,intr9, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
279         INTR(10,intr10, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
280         INTR(11,intr11, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
281         INTR(12,intr12, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
282         INTR(13,intr13, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
283         INTR(14,intr14, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
284         INTR(15,intr15, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
285
286         FAST_UNPEND(0,fastunpend0, IO_ICU1)
287         FAST_UNPEND(1,fastunpend1, IO_ICU1)
288         FAST_UNPEND(2,fastunpend2, IO_ICU1)
289         FAST_UNPEND(3,fastunpend3, IO_ICU1)
290         FAST_UNPEND(4,fastunpend4, IO_ICU1)
291         FAST_UNPEND(5,fastunpend5, IO_ICU1)
292         FAST_UNPEND(6,fastunpend6, IO_ICU1)
293         FAST_UNPEND(7,fastunpend7, IO_ICU1)
294         FAST_UNPEND(8,fastunpend8, IO_ICU2)
295         FAST_UNPEND(9,fastunpend9, IO_ICU2)
296         FAST_UNPEND(10,fastunpend10, IO_ICU2)
297         FAST_UNPEND(11,fastunpend11, IO_ICU2)
298         FAST_UNPEND(12,fastunpend12, IO_ICU2)
299         FAST_UNPEND(13,fastunpend13, IO_ICU2)
300         FAST_UNPEND(14,fastunpend14, IO_ICU2)
301         FAST_UNPEND(15,fastunpend15, IO_ICU2)
302 MCOUNT_LABEL(eintr)
303
304         .data
305
306         .text