format cleanup for readability. Tab out back-slashes.
[dragonfly.git] / sys / platform / pc32 / icu / icu_vector.s
1 /*
2  *      from: vector.s, 386BSD 0.1 unknown origin
3  * $FreeBSD: src/sys/i386/isa/icu_vector.s,v 1.14.2.2 2000/07/18 21:12:42 dfr Exp $
4  * $DragonFly: src/sys/platform/pc32/icu/icu_vector.s,v 1.6 2003/06/28 07:00:58 dillon Exp $
5  */
6
7 /*
8  * modified for PC98 by Kakefuda
9  */
10
11 #ifdef PC98
12 #define ICU_IMR_OFFSET          2       /* IO_ICU{1,2} + 2 */
13 #else
14 #define ICU_IMR_OFFSET          1       /* IO_ICU{1,2} + 1 */
15 #endif
16
17 #define ICU_EOI                 0x20    /* XXX - define elsewhere */
18
19 #define IRQ_BIT(irq_num)        (1 << ((irq_num) % 8))
20 #define IRQ_BYTE(irq_num)       ((irq_num) >> 3)
21
22 #ifdef AUTO_EOI_1
23 #define ENABLE_ICU1             /* use auto-EOI to reduce i/o */
24 #define OUTB_ICU1
25 #else
26 #define ENABLE_ICU1 \
27         movb    $ICU_EOI,%al ;  /* as soon as possible send EOI ... */ \
28         OUTB_ICU1               /* ... to clear in service bit */
29 #define OUTB_ICU1 \
30         outb    %al,$IO_ICU1
31 #endif
32
33 #ifdef AUTO_EOI_2
34 /*
35  * The data sheet says no auto-EOI on slave, but it sometimes works.
36  */
37 #define ENABLE_ICU1_AND_2       ENABLE_ICU1
38 #else
39 #define ENABLE_ICU1_AND_2 \
40         movb    $ICU_EOI,%al ;  /* as above */ \
41         outb    %al,$IO_ICU2 ;  /* but do second icu first ... */ \
42         OUTB_ICU1               /* ... then first icu (if !AUTO_EOI_1) */
43 #endif
44
45 /*
46  * Macros for interrupt interrupt entry, call to handler, and exit.
47  */
48
49 #define FAST_INTR(irq_num, vec_name, enable_icus)                       \
50         .text ;                                                         \
51         SUPERALIGN_TEXT ;                                               \
52 IDTVEC(vec_name) ;                                                      \
53         pushl   %eax ;          /* save only call-used registers */     \
54         pushl   %ecx ;                                                  \
55         pushl   %edx ;                                                  \
56         pushl   %ds ;                                                   \
57         MAYBE_PUSHL_ES ;                                                \
58         mov     $KDSEL,%ax ;                                            \
59         mov     %ax,%ds ;                                               \
60         MAYBE_MOVW_AX_ES ;                                              \
61         FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ;                      \
62         pushl   _intr_unit + (irq_num) * 4 ;                            \
63         call    *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
64         enable_icus ;           /* (re)enable ASAP (helps edge trigger?) */ \
65         addl    $4,%esp ;                                               \
66         incl    _cnt+V_INTR ;   /* book-keeping can wait */             \
67         movl    _intr_countp + (irq_num) * 4,%eax ;                     \
68         incl    (%eax) ;                                                \
69         movl    _curthread, %ecx ; /* are we in a critical section? */  \
70         cmpl    $TDPRI_CRIT,TD_PRI(%ecx) ;                              \
71         jge     1f ;                                                    \
72         movl    TD_MACH+MTD_CPL(%ecx),%eax ; /* unmasking pending ints? */ \
73         notl    %eax ;                                                  \
74         andl    _ipending,%eax ;                                        \
75         jne     2f ;            /* yes, maybe handle them */            \
76 1: ;                                                                    \
77         MEXITCOUNT ;                                                    \
78         MAYBE_POPL_ES ;                                                 \
79         popl    %ds ;                                                   \
80         popl    %edx ;                                                  \
81         popl    %ecx ;                                                  \
82         popl    %eax ;                                                  \
83         iret ;                                                          \
84 ;                                                                       \
85         ALIGN_TEXT ;                                                    \
86 2: ;                                                                    \
87         cmpb    $3,_intr_nesting_level ;        /* is there enough stack? */ \
88         jae     1b ;            /* no, return */                        \
89         movl    TD_MACH+MTD_CPL(%ecx),%eax ;                            \
90         /* XXX next line is probably unnecessary now. */                \
91         movl    $HWI_MASK|SWI_MASK,TD_MACH+MTD_CPL(%ecx) ; /* limit nesting ... */ \
92         incb    _intr_nesting_level ;   /* ... really limit it ... */   \
93         sti ;                   /* ... to do this as early as possible */ \
94         MAYBE_POPL_ES ;         /* discard most of thin frame ... */    \
95         popl    %ecx ;          /* ... original %ds ... */              \
96         popl    %edx ;                                                  \
97         xchgl   %eax,4(%esp) ;  /* orig %eax; save cpl */               \
98         pushal ;                /* build fat frame (grrr) ... */        \
99         pushl   %ecx ;          /* ... actually %ds ... */              \
100         pushl   %es ;                                                   \
101         pushl   %fs ;                                                   \
102         mov     $KDSEL,%ax ;                                            \
103         mov     %ax,%es ;                                               \
104         mov     $KPSEL,%ax ;                                            \
105         mov     %ax,%fs ;                                               \
106         movl    (3+8+0)*4(%esp),%ecx ;  /* ... %ecx from thin frame ... */ \
107         movl    %ecx,(3+6)*4(%esp) ;    /* ... to fat frame ... */      \
108         movl    (3+8+1)*4(%esp),%eax ;  /* ... cpl from thin frame */   \
109         pushl   %eax ;                                                  \
110         subl    $4,%esp ;       /* junk for unit number */              \
111         MEXITCOUNT ;                                                    \
112         jmp     _doreti
113
114 #define INTR(irq_num, vec_name, icu, enable_icus, reg, maybe_extra_ipending) \
115         .text ;                                                         \
116         SUPERALIGN_TEXT ;                                               \
117 IDTVEC(vec_name) ;                                                      \
118         pushl   $0 ;            /* dummy error code */                  \
119         pushl   $0 ;            /* dummy trap type */                   \
120         pushal ;                                                        \
121         pushl   %ds ;           /* save our data and extra segments ... */ \
122         pushl   %es ;                                                   \
123         pushl   %fs ;                                                   \
124         mov     $KDSEL,%ax ;    /* ... and reload with kernel's own ... */ \
125         mov     %ax,%ds ;       /* ... early for obsolete reasons */    \
126         mov     %ax,%es ;                                               \
127         mov     $KPSEL,%ax ;                                            \
128         mov     %ax,%fs ;                                               \
129         maybe_extra_ipending ;                                          \
130         movb    _imen + IRQ_BYTE(irq_num),%al ;                         \
131         orb     $IRQ_BIT(irq_num),%al ;                                 \
132         movb    %al,_imen + IRQ_BYTE(irq_num) ;                         \
133         outb    %al,$icu+ICU_IMR_OFFSET ;                               \
134         enable_icus ;                                                   \
135         movl    _curthread, %ebx ; /* are we in a critical section? */  \
136         cmpl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
137         jge     2f ;                                                    \
138         movl    TD_MACH+MTD_CPL(%ebx),%eax ; /* is this interrupt masked by the cpl? */ \
139         testb   $IRQ_BIT(irq_num),%reg ;                                \
140         jne     2f ;                                                    \
141         incb    _intr_nesting_level ;                                   \
142 __CONCAT(Xresume,irq_num): ;                                            \
143         FAKE_MCOUNT(13*4(%esp)) ;       /* XXX late to avoid double count */ \
144         incl    _cnt+V_INTR ;   /* tally interrupts */                  \
145         movl    _intr_countp + (irq_num) * 4,%eax ;                     \
146         incl    (%eax) ;                                                \
147         movl    TD_MACH+MTD_CPL(%ebx),%eax ;                            \
148         pushl   %eax ;                                                  \
149         pushl   _intr_unit + (irq_num) * 4 ;                            \
150         orl     _intr_mask + (irq_num) * 4,%eax ;                       \
151         movl    %eax,TD_MACH+MTD_CPL(%ebx) ;                            \
152         sti ;                                                           \
153         call    *_intr_handler + (irq_num) * 4 ;                        \
154         cli ;                   /* must unmask _imen and icu atomically */ \
155         movb    _imen + IRQ_BYTE(irq_num),%al ;                         \
156         andb    $~IRQ_BIT(irq_num),%al ;                                \
157         movb    %al,_imen + IRQ_BYTE(irq_num) ;                         \
158         outb    %al,$icu+ICU_IMR_OFFSET ;                               \
159         sti ;                   /* XXX _doreti repeats the cli/sti */   \
160         MEXITCOUNT ;                                                    \
161         /* We could usually avoid the following jmp by inlining some of */ \
162         /* _doreti, but it's probably better to use less cache. */      \
163         jmp     _doreti ;                                               \
164 ;                                                                       \
165         ALIGN_TEXT ;                                                    \
166 2: ;                                                                    \
167         /* XXX skip mcounting here to avoid double count */             \
168         orb     $IRQ_BIT(irq_num),_ipending + IRQ_BYTE(irq_num) ;       \
169         movl    $TDPRI_CRIT,_reqpri ;                                   \
170         popl    %fs ;                                                   \
171         popl    %es ;                                                   \
172         popl    %ds ;                                                   \
173         popal ;                                                         \
174         addl    $4+4,%esp ;                                             \
175         iret
176
177 MCOUNT_LABEL(bintr)
178         FAST_INTR(0,fastintr0, ENABLE_ICU1)
179         FAST_INTR(1,fastintr1, ENABLE_ICU1)
180         FAST_INTR(2,fastintr2, ENABLE_ICU1)
181         FAST_INTR(3,fastintr3, ENABLE_ICU1)
182         FAST_INTR(4,fastintr4, ENABLE_ICU1)
183         FAST_INTR(5,fastintr5, ENABLE_ICU1)
184         FAST_INTR(6,fastintr6, ENABLE_ICU1)
185         FAST_INTR(7,fastintr7, ENABLE_ICU1)
186         FAST_INTR(8,fastintr8, ENABLE_ICU1_AND_2)
187         FAST_INTR(9,fastintr9, ENABLE_ICU1_AND_2)
188         FAST_INTR(10,fastintr10, ENABLE_ICU1_AND_2)
189         FAST_INTR(11,fastintr11, ENABLE_ICU1_AND_2)
190         FAST_INTR(12,fastintr12, ENABLE_ICU1_AND_2)
191         FAST_INTR(13,fastintr13, ENABLE_ICU1_AND_2)
192         FAST_INTR(14,fastintr14, ENABLE_ICU1_AND_2)
193         FAST_INTR(15,fastintr15, ENABLE_ICU1_AND_2)
194 #define CLKINTR_PENDING movl $1,CNAME(clkintr_pending)
195         INTR(0,intr0, IO_ICU1, ENABLE_ICU1, al, CLKINTR_PENDING)
196         INTR(1,intr1, IO_ICU1, ENABLE_ICU1, al,)
197         INTR(2,intr2, IO_ICU1, ENABLE_ICU1, al,)
198         INTR(3,intr3, IO_ICU1, ENABLE_ICU1, al,)
199         INTR(4,intr4, IO_ICU1, ENABLE_ICU1, al,)
200         INTR(5,intr5, IO_ICU1, ENABLE_ICU1, al,)
201         INTR(6,intr6, IO_ICU1, ENABLE_ICU1, al,)
202         INTR(7,intr7, IO_ICU1, ENABLE_ICU1, al,)
203         INTR(8,intr8, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
204         INTR(9,intr9, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
205         INTR(10,intr10, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
206         INTR(11,intr11, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
207         INTR(12,intr12, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
208         INTR(13,intr13, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
209         INTR(14,intr14, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
210         INTR(15,intr15, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
211 MCOUNT_LABEL(eintr)
212
213         .data
214         .globl  _ihandlers
215 _ihandlers:                     /* addresses of interrupt handlers */
216                                 /* actually resumption addresses for HWI's */
217         .long   Xresume0, Xresume1, Xresume2, Xresume3 
218         .long   Xresume4, Xresume5, Xresume6, Xresume7
219         .long   Xresume8, Xresume9, Xresume10, Xresume11
220         .long   Xresume12, Xresume13, Xresume14, Xresume15 
221         .long   _swi_null, swi_net, _swi_null, _swi_null
222         .long   _swi_vm, _swi_null, _softclock
223
224 imasks:                         /* masks for interrupt handlers */
225         .space  NHWI*4          /* padding; HWI masks are elsewhere */
226
227         .long   SWI_TTY_MASK, SWI_NET_MASK, SWI_CAMNET_MASK, SWI_CAMBIO_MASK
228         .long   SWI_VM_MASK, SWI_TQ_MASK, SWI_CLOCK_MASK
229
230         .text