Replace magic numbers in pc64/apic_vector.s
[dragonfly.git] / sys / platform / pc64 / apic / apic_vector.s
1 /*
2  *      from: vector.s, 386BSD 0.1 unknown origin
3  * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $
4  * $DragonFly: src/sys/platform/pc32/apic/apic_vector.s,v 1.39 2008/08/02 01:14:43 dillon Exp $
5  */
6
7 #if 0
8 #include "use_npx.h"
9 #include "opt_auto_eoi.h"
10 #endif
11
12 #include <machine/asmacros.h>
13 #include <machine/lock.h>
14 #include <machine/psl.h>
15 #include <machine/trap.h>
16 #include <machine/segments.h>
17
18 #include <machine_base/icu/icu.h>
19 #include <bus/isa/isa.h>
20
21 #include "assym.s"
22
23 #include "apicreg.h"
24 #include "apic_ipl.h"
25 #include <machine/smp.h>
26 #include <machine_base/isa/intr_machdep.h>
27
28 /* convert an absolute IRQ# into a bitmask */
29 #define IRQ_LBIT(irq_num)       (1 << (irq_num))
30
31 /* make an index into the IO APIC from the IRQ# */
32 #define REDTBL_IDX(irq_num)     (0x10 + ((irq_num) * 2))
33
34 #ifdef SMP
35 #define MPLOCKED     lock ;
36 #else
37 #define MPLOCKED
38 #endif
39
40 #define APIC_PUSH_FRAME                                                 \
41         PUSH_FRAME ;            /* 15 regs + space for 5 extras */      \
42         movq $0,TF_XFLAGS(%rsp) ;                                       \
43         movq $0,TF_TRAPNO(%rsp) ;                                       \
44         movq $0,TF_ADDR(%rsp) ;                                         \
45         movq $0,TF_FLAGS(%rsp) ;                                        \
46         movq $0,TF_ERR(%rsp) ;                                          \
47         cld ;                                                           \
48
49 /*
50  * JG stale? Warning: POP_FRAME can only be used if there is no chance of a
51  * segment register being changed (e.g. by procfs), which is why syscalls
52  * have to use doreti.
53  */
54 #define APIC_POP_FRAME POP_FRAME
55
56 #define IOAPICADDR(irq_num) \
57         CNAME(int_to_apicintpin) + AIMI_SIZE * (irq_num) + AIMI_APIC_ADDRESS
58 #define REDIRIDX(irq_num) \
59         CNAME(int_to_apicintpin) + AIMI_SIZE * (irq_num) + AIMI_REDIRINDEX
60
61 #define MASK_IRQ(irq_num)                                               \
62         APIC_IMASK_LOCK ;                       /* into critical reg */ \
63         testl   $IRQ_LBIT(irq_num), apic_imen ;                         \
64         jne     7f ;                    /* masked, don't mask */        \
65         orl     $IRQ_LBIT(irq_num), apic_imen ; /* set the mask bit */  \
66         movq    IOAPICADDR(irq_num), %rcx ;     /* ioapic addr */       \
67         movl    REDIRIDX(irq_num), %eax ;       /* get the index */     \
68         movl    %eax, (%rcx) ;                  /* write the index */   \
69         orl     $IOART_INTMASK,IOAPIC_WINDOW(%rcx) ;/* set the mask */  \
70 7: ;                                            /* already masked */    \
71         APIC_IMASK_UNLOCK ;                                             \
72
73 /*
74  * Test to see whether we are handling an edge or level triggered INT.
75  *  Level-triggered INTs must still be masked as we don't clear the source,
76  *  and the EOI cycle would cause redundant INTs to occur.
77  */
78 #define MASK_LEVEL_IRQ(irq_num)                                         \
79         testl   $IRQ_LBIT(irq_num), apic_pin_trigger ;                  \
80         jz      9f ;                            /* edge, don't mask */  \
81         MASK_IRQ(irq_num) ;                                             \
82 9: ;                                                                    \
83
84 /*
85  * Test to see if the source is currntly masked, clear if so.
86  */
87 #define UNMASK_IRQ(irq_num)                                     \
88         cmpl    $0,%eax ;                                               \
89         jnz     8f ;                                                    \
90         APIC_IMASK_LOCK ;                       /* into critical reg */ \
91         testl   $IRQ_LBIT(irq_num), apic_imen ;                         \
92         je      7f ;                    /* bit clear, not masked */     \
93         andl    $~IRQ_LBIT(irq_num), apic_imen ;/* clear mask bit */    \
94         movq    IOAPICADDR(irq_num),%rcx ;      /* ioapic addr */       \
95         movl    REDIRIDX(irq_num), %eax ;       /* get the index */     \
96         movl    %eax,(%rcx) ;                   /* write the index */   \
97         andl    $~IOART_INTMASK,IOAPIC_WINDOW(%rcx) ;/* clear the mask */ \
98 7: ;                                                                    \
99         APIC_IMASK_UNLOCK ;                                             \
100 8: ;                                                                    \
101
102 #ifdef APIC_IO
103
104 /*
105  * Fast interrupt call handlers run in the following sequence:
106  *
107  *      - Push the trap frame required by doreti
108  *      - Mask the interrupt and reenable its source
109  *      - If we cannot take the interrupt set its fpending bit and
110  *        doreti.  Note that we cannot mess with mp_lock at all
111  *        if we entered from a critical section!
112  *      - If we can take the interrupt clear its fpending bit,
113  *        call the handler, then unmask and doreti.
114  *
115  * YYY can cache gd base opitner instead of using hidden %fs prefixes.
116  */
117
118 #define FAST_INTR(irq_num, vec_name)                                    \
119         .text ;                                                         \
120         SUPERALIGN_TEXT ;                                               \
121 IDTVEC(vec_name) ;                                                      \
122         APIC_PUSH_FRAME ;                                               \
123         FAKE_MCOUNT(15*4(%esp)) ;                                       \
124         MASK_LEVEL_IRQ(irq_num) ;                                       \
125         movq    lapic, %rax ;                                           \
126         movl    $0, LA_EOI(%rax) ;                                      \
127         movq    PCPU(curthread),%rbx ;                                  \
128         testl   $-1,TD_NEST_COUNT(%rbx) ;                               \
129         jne     1f ;                                                    \
130         testl   $-1,TD_CRITCOUNT(%rbx) ;                                \
131         je      2f ;                                                    \
132 1: ;                                                                    \
133         /* in critical section, make interrupt pending */               \
134         /* set the pending bit and return, leave interrupt masked */    \
135         orl     $IRQ_LBIT(irq_num),PCPU(fpending) ;                     \
136         orl     $RQF_INTPEND,PCPU(reqflags) ;                           \
137         jmp     5f ;                                                    \
138 2: ;                                                                    \
139         /* clear pending bit, run handler */                            \
140         andl    $~IRQ_LBIT(irq_num),PCPU(fpending) ;                    \
141         pushq   $irq_num ;              /* trapframe -> intrframe */    \
142         movq    %rsp, %rdi ;            /* pass frame by reference */   \
143         incl    TD_CRITCOUNT(%rbx) ;                                    \
144         call    ithread_fast_handler ;  /* returns 0 to unmask */       \
145         decl    TD_CRITCOUNT(%rbx) ;                                    \
146         addq    $8, %rsp ;              /* intrframe -> trapframe */    \
147         UNMASK_IRQ(irq_num) ;                                           \
148 5: ;                                                                    \
149         MEXITCOUNT ;                                                    \
150         jmp     doreti ;                                                \
151
152 #endif
153
154 /*
155  * Handle "spurious INTerrupts".
156  * Notes:
157  *  This is different than the "spurious INTerrupt" generated by an
158  *   8259 PIC for missing INTs.  See the APIC documentation for details.
159  *  This routine should NOT do an 'EOI' cycle.
160  */
161         .text
162         SUPERALIGN_TEXT
163         .globl Xspuriousint
164 Xspuriousint:
165
166         /* No EOI cycle used here */
167
168         iretq
169
170
171 /*
172  * Handle TLB shootdowns.
173  */
174         .text
175         SUPERALIGN_TEXT
176         .globl  Xinvltlb
177 Xinvltlb:
178         pushq   %rax
179
180         movq    %cr3, %rax              /* invalidate the TLB */
181         movq    %rax, %cr3
182
183         movq    lapic, %rax
184         movl    $0, LA_EOI(%rax)        /* End Of Interrupt to APIC */
185
186         popq    %rax
187         iretq
188
189
190 /*
191  * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
192  *
193  *  - Signals its receipt.
194  *  - Waits for permission to restart.
195  *  - Processing pending IPIQ events while waiting.
196  *  - Signals its restart.
197  */
198
199         .text
200         SUPERALIGN_TEXT
201         .globl Xcpustop
202 Xcpustop:
203         pushq   %rbp
204         movq    %rsp, %rbp
205         /* We save registers that are not preserved across function calls. */
206         /* JG can be re-written with mov's */
207         pushq   %rax
208         pushq   %rcx
209         pushq   %rdx
210         pushq   %rsi
211         pushq   %rdi
212         pushq   %r8
213         pushq   %r9
214         pushq   %r10
215         pushq   %r11
216
217 #if JG
218         /* JGXXX switch to kernel %gs? */
219         pushl   %ds                     /* save current data segment */
220         pushl   %fs
221
222         movl    $KDSEL, %eax
223         mov     %ax, %ds                /* use KERNEL data segment */
224         movl    $KPSEL, %eax
225         mov     %ax, %fs
226 #endif
227
228         movq    lapic, %rax
229         movl    $0, LA_EOI(%rax)        /* End Of Interrupt to APIC */
230
231         /* JG */
232         movl    PCPU(cpuid), %eax
233         imull   $PCB_SIZE, %eax
234         leaq    CNAME(stoppcbs), %rdi
235         addq    %rax, %rdi
236         call    CNAME(savectx)          /* Save process context */
237         
238                 
239         movl    PCPU(cpuid), %eax
240
241         /*
242          * Indicate that we have stopped and loop waiting for permission
243          * to start again.  We must still process IPI events while in a
244          * stopped state.
245          */
246         MPLOCKED
247         btsl    %eax, stopped_cpus      /* stopped_cpus |= (1<<id) */
248 1:
249         andl    $~RQF_IPIQ,PCPU(reqflags)
250         pushq   %rax
251         call    lwkt_smp_stopped
252         popq    %rax
253         btl     %eax, started_cpus      /* while (!(started_cpus & (1<<id))) */
254         jnc     1b
255
256         MPLOCKED
257         btrl    %eax, started_cpus      /* started_cpus &= ~(1<<id) */
258         MPLOCKED
259         btrl    %eax, stopped_cpus      /* stopped_cpus &= ~(1<<id) */
260
261         test    %eax, %eax
262         jnz     2f
263
264         movq    CNAME(cpustop_restartfunc), %rax
265         test    %rax, %rax
266         jz      2f
267         movq    $0, CNAME(cpustop_restartfunc)  /* One-shot */
268
269         call    *%rax
270 2:
271         popq    %r11
272         popq    %r10
273         popq    %r9
274         popq    %r8
275         popq    %rdi
276         popq    %rsi
277         popq    %rdx
278         popq    %rcx
279         popq    %rax
280
281 #if JG
282         popl    %fs
283         popl    %ds                     /* restore previous data segment */
284 #endif
285         movq    %rbp, %rsp
286         popq    %rbp
287         iretq
288
289         /*
290          * For now just have one ipiq IPI, but what we really want is
291          * to have one for each source cpu to the APICs don't get stalled
292          * backlogging the requests.
293          */
294         .text
295         SUPERALIGN_TEXT
296         .globl Xipiq
297 Xipiq:
298         APIC_PUSH_FRAME
299         movq    lapic, %rax
300         movl    $0, LA_EOI(%rax)        /* End Of Interrupt to APIC */
301         FAKE_MCOUNT(15*4(%esp))
302
303         incl    PCPU(cnt) + V_IPI
304         movq    PCPU(curthread),%rbx
305         testl   $-1,TD_CRITCOUNT(%rbx)
306         jne     1f
307         subq    $8,%rsp                 /* make same as interrupt frame */
308         movq    %rsp,%rdi               /* pass frame by reference */
309         incl    PCPU(intr_nesting_level)
310         incl    TD_CRITCOUNT(%rbx)
311         call    lwkt_process_ipiq_frame
312         decl    TD_CRITCOUNT(%rbx)
313         decl    PCPU(intr_nesting_level)
314         addq    $8,%rsp                 /* turn into trapframe */
315         MEXITCOUNT
316         jmp     doreti
317 1:
318         orl     $RQF_IPIQ,PCPU(reqflags)
319         MEXITCOUNT
320         APIC_POP_FRAME
321         iretq
322
323         .text
324         SUPERALIGN_TEXT
325         .globl Xtimer
326 Xtimer:
327         APIC_PUSH_FRAME
328         movq    lapic, %rax
329         movl    $0, LA_EOI(%rax)        /* End Of Interrupt to APIC */
330         FAKE_MCOUNT(15*4(%esp))
331
332         incl    PCPU(cnt) + V_TIMER
333         movq    PCPU(curthread),%rbx
334         testl   $-1,TD_CRITCOUNT(%rbx)
335         jne     1f
336         testl   $-1,TD_NEST_COUNT(%rbx)
337         jne     1f
338         subq    $8,%rsp                 /* make same as interrupt frame */
339         movq    %rsp,%rdi               /* pass frame by reference */
340         incl    PCPU(intr_nesting_level)
341         incl    TD_CRITCOUNT(%rbx)
342         call    lapic_timer_process_frame
343         decl    TD_CRITCOUNT(%rbx)
344         decl    PCPU(intr_nesting_level)
345         addq    $8,%rsp                 /* turn into trapframe */
346         MEXITCOUNT
347         jmp     doreti
348 1:
349         orl     $RQF_TIMER,PCPU(reqflags)
350         MEXITCOUNT
351         APIC_POP_FRAME
352         iretq
353
354 #ifdef APIC_IO
355
356 MCOUNT_LABEL(bintr)
357         FAST_INTR(0,apic_fastintr0)
358         FAST_INTR(1,apic_fastintr1)
359         FAST_INTR(2,apic_fastintr2)
360         FAST_INTR(3,apic_fastintr3)
361         FAST_INTR(4,apic_fastintr4)
362         FAST_INTR(5,apic_fastintr5)
363         FAST_INTR(6,apic_fastintr6)
364         FAST_INTR(7,apic_fastintr7)
365         FAST_INTR(8,apic_fastintr8)
366         FAST_INTR(9,apic_fastintr9)
367         FAST_INTR(10,apic_fastintr10)
368         FAST_INTR(11,apic_fastintr11)
369         FAST_INTR(12,apic_fastintr12)
370         FAST_INTR(13,apic_fastintr13)
371         FAST_INTR(14,apic_fastintr14)
372         FAST_INTR(15,apic_fastintr15)
373         FAST_INTR(16,apic_fastintr16)
374         FAST_INTR(17,apic_fastintr17)
375         FAST_INTR(18,apic_fastintr18)
376         FAST_INTR(19,apic_fastintr19)
377         FAST_INTR(20,apic_fastintr20)
378         FAST_INTR(21,apic_fastintr21)
379         FAST_INTR(22,apic_fastintr22)
380         FAST_INTR(23,apic_fastintr23)
381 MCOUNT_LABEL(eintr)
382
383 #endif
384
385         .data
386
387 /* variables used by stop_cpus()/restart_cpus()/Xcpustop */
388         .globl stopped_cpus, started_cpus
389 stopped_cpus:
390         .long   0
391 started_cpus:
392         .long   0
393
394         .globl CNAME(cpustop_restartfunc)
395 CNAME(cpustop_restartfunc):
396         .quad 0
397                 
398         .globl  apic_pin_trigger
399 apic_pin_trigger:
400         .long   0
401
402         .text
403