Remove APIC_IO option
[dragonfly.git] / sys / platform / pc64 / apic / apic_vector.s
1 /*
2  *      from: vector.s, 386BSD 0.1 unknown origin
3  * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $
4  * $DragonFly: src/sys/platform/pc32/apic/apic_vector.s,v 1.39 2008/08/02 01:14:43 dillon Exp $
5  */
6
7 #if 0
8 #include "use_npx.h"
9 #include "opt_auto_eoi.h"
10 #endif
11
12 #include <machine/asmacros.h>
13 #include <machine/lock.h>
14 #include <machine/psl.h>
15 #include <machine/trap.h>
16 #include <machine/segments.h>
17
18 #include <machine_base/icu/icu.h>
19 #include <bus/isa/isa.h>
20
21 #include "assym.s"
22
23 #include "apicreg.h"
24 #include "apic_ipl.h"
25 #include <machine/smp.h>
26 #include <machine_base/isa/intr_machdep.h>
27
28 /* convert an absolute IRQ# into a bitmask */
29 #define IRQ_LBIT(irq_num)       (1 << (irq_num))
30
31 /* make an index into the IO APIC from the IRQ# */
32 #define REDTBL_IDX(irq_num)     (0x10 + ((irq_num) * 2))
33
34 #ifdef SMP
35 #define MPLOCKED     lock ;
36 #else
37 #define MPLOCKED
38 #endif
39
40 #define APIC_PUSH_FRAME                                                 \
41         PUSH_FRAME ;            /* 15 regs + space for 5 extras */      \
42         movq $0,TF_XFLAGS(%rsp) ;                                       \
43         movq $0,TF_TRAPNO(%rsp) ;                                       \
44         movq $0,TF_ADDR(%rsp) ;                                         \
45         movq $0,TF_FLAGS(%rsp) ;                                        \
46         movq $0,TF_ERR(%rsp) ;                                          \
47         cld ;                                                           \
48
49 /*
50  * JG stale? Warning: POP_FRAME can only be used if there is no chance of a
51  * segment register being changed (e.g. by procfs), which is why syscalls
52  * have to use doreti.
53  */
54 #define APIC_POP_FRAME POP_FRAME
55
56 #define IOAPICADDR(irq_num) \
57         CNAME(int_to_apicintpin) + AIMI_SIZE * (irq_num) + AIMI_APIC_ADDRESS
58 #define REDIRIDX(irq_num) \
59         CNAME(int_to_apicintpin) + AIMI_SIZE * (irq_num) + AIMI_REDIRINDEX
60 #define IOAPICFLAGS(irq_num) \
61         CNAME(int_to_apicintpin) + AIMI_SIZE * (irq_num) + AIMI_FLAGS
62  
63 #define MASK_IRQ(irq_num)                                               \
64         APIC_IMASK_LOCK ;                       /* into critical reg */ \
65         testl   $AIMI_FLAG_MASKED, IOAPICFLAGS(irq_num) ;               \
66         jne     7f ;                    /* masked, don't mask */        \
67         orl     $AIMI_FLAG_MASKED, IOAPICFLAGS(irq_num) ;               \
68                                                 /* set the mask bit */  \
69         movq    IOAPICADDR(irq_num), %rcx ;     /* ioapic addr */       \
70         movl    REDIRIDX(irq_num), %eax ;       /* get the index */     \
71         movl    %eax, (%rcx) ;                  /* write the index */   \
72         orl     $IOART_INTMASK,IOAPIC_WINDOW(%rcx) ;/* set the mask */  \
73 7: ;                                            /* already masked */    \
74         APIC_IMASK_UNLOCK ;                                             \
75
76 /*
77  * Test to see whether we are handling an edge or level triggered INT.
78  *  Level-triggered INTs must still be masked as we don't clear the source,
79  *  and the EOI cycle would cause redundant INTs to occur.
80  */
81 #define MASK_LEVEL_IRQ(irq_num)                                         \
82         testl   $AIMI_FLAG_LEVEL, IOAPICFLAGS(irq_num) ;                \
83         jz      9f ;                            /* edge, don't mask */  \
84         MASK_IRQ(irq_num) ;                                             \
85 9: ;                                                                    \
86
87 /*
88  * Test to see if the source is currntly masked, clear if so.
89  */
90 #define UNMASK_IRQ(irq_num)                                     \
91         cmpl    $0,%eax ;                                               \
92         jnz     8f ;                                                    \
93         APIC_IMASK_LOCK ;                       /* into critical reg */ \
94         testl   $AIMI_FLAG_MASKED, IOAPICFLAGS(irq_num) ;               \
95         je      7f ;                    /* bit clear, not masked */     \
96         andl    $~AIMI_FLAG_MASKED, IOAPICFLAGS(irq_num) ;              \
97                                                 /* clear mask bit */    \
98         movq    IOAPICADDR(irq_num),%rcx ;      /* ioapic addr */       \
99         movl    REDIRIDX(irq_num), %eax ;       /* get the index */     \
100         movl    %eax,(%rcx) ;                   /* write the index */   \
101         andl    $~IOART_INTMASK,IOAPIC_WINDOW(%rcx) ;/* clear the mask */ \
102 7: ;                                                                    \
103         APIC_IMASK_UNLOCK ;                                             \
104 8: ;                                                                    \
105
106 #ifdef SMP /* APIC-IO */
107
108 /*
109  * Fast interrupt call handlers run in the following sequence:
110  *
111  *      - Push the trap frame required by doreti
112  *      - Mask the interrupt and reenable its source
113  *      - If we cannot take the interrupt set its fpending bit and
114  *        doreti.  Note that we cannot mess with mp_lock at all
115  *        if we entered from a critical section!
116  *      - If we can take the interrupt clear its fpending bit,
117  *        call the handler, then unmask and doreti.
118  *
119  * YYY can cache gd base opitner instead of using hidden %fs prefixes.
120  */
121
122 #define FAST_INTR(irq_num, vec_name)                                    \
123         .text ;                                                         \
124         SUPERALIGN_TEXT ;                                               \
125 IDTVEC(vec_name) ;                                                      \
126         APIC_PUSH_FRAME ;                                               \
127         FAKE_MCOUNT(15*4(%esp)) ;                                       \
128         MASK_LEVEL_IRQ(irq_num) ;                                       \
129         movq    lapic, %rax ;                                           \
130         movl    $0, LA_EOI(%rax) ;                                      \
131         movq    PCPU(curthread),%rbx ;                                  \
132         testl   $-1,TD_NEST_COUNT(%rbx) ;                               \
133         jne     1f ;                                                    \
134         testl   $-1,TD_CRITCOUNT(%rbx) ;                                \
135         je      2f ;                                                    \
136 1: ;                                                                    \
137         /* in critical section, make interrupt pending */               \
138         /* set the pending bit and return, leave interrupt masked */    \
139         orl     $IRQ_LBIT(irq_num),PCPU(fpending) ;                     \
140         orl     $RQF_INTPEND,PCPU(reqflags) ;                           \
141         jmp     5f ;                                                    \
142 2: ;                                                                    \
143         /* clear pending bit, run handler */                            \
144         andl    $~IRQ_LBIT(irq_num),PCPU(fpending) ;                    \
145         pushq   $irq_num ;              /* trapframe -> intrframe */    \
146         movq    %rsp, %rdi ;            /* pass frame by reference */   \
147         incl    TD_CRITCOUNT(%rbx) ;                                    \
148         call    ithread_fast_handler ;  /* returns 0 to unmask */       \
149         decl    TD_CRITCOUNT(%rbx) ;                                    \
150         addq    $8, %rsp ;              /* intrframe -> trapframe */    \
151         UNMASK_IRQ(irq_num) ;                                           \
152 5: ;                                                                    \
153         MEXITCOUNT ;                                                    \
154         jmp     doreti ;                                                \
155
156 #endif
157
158 /*
159  * Handle "spurious INTerrupts".
160  * Notes:
161  *  This is different than the "spurious INTerrupt" generated by an
162  *   8259 PIC for missing INTs.  See the APIC documentation for details.
163  *  This routine should NOT do an 'EOI' cycle.
164  */
165         .text
166         SUPERALIGN_TEXT
167         .globl Xspuriousint
168 Xspuriousint:
169
170         /* No EOI cycle used here */
171
172         iretq
173
174
175 /*
176  * Handle TLB shootdowns.
177  */
178         .text
179         SUPERALIGN_TEXT
180         .globl  Xinvltlb
181 Xinvltlb:
182         pushq   %rax
183
184         movq    %cr3, %rax              /* invalidate the TLB */
185         movq    %rax, %cr3
186
187         movq    lapic, %rax
188         movl    $0, LA_EOI(%rax)        /* End Of Interrupt to APIC */
189
190         popq    %rax
191         iretq
192
193
194 /*
195  * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
196  *
197  *  - Signals its receipt.
198  *  - Waits for permission to restart.
199  *  - Processing pending IPIQ events while waiting.
200  *  - Signals its restart.
201  */
202
203         .text
204         SUPERALIGN_TEXT
205         .globl Xcpustop
206 Xcpustop:
207         pushq   %rbp
208         movq    %rsp, %rbp
209         /* We save registers that are not preserved across function calls. */
210         /* JG can be re-written with mov's */
211         pushq   %rax
212         pushq   %rcx
213         pushq   %rdx
214         pushq   %rsi
215         pushq   %rdi
216         pushq   %r8
217         pushq   %r9
218         pushq   %r10
219         pushq   %r11
220
221 #if JG
222         /* JGXXX switch to kernel %gs? */
223         pushl   %ds                     /* save current data segment */
224         pushl   %fs
225
226         movl    $KDSEL, %eax
227         mov     %ax, %ds                /* use KERNEL data segment */
228         movl    $KPSEL, %eax
229         mov     %ax, %fs
230 #endif
231
232         movq    lapic, %rax
233         movl    $0, LA_EOI(%rax)        /* End Of Interrupt to APIC */
234
235         /* JG */
236         movl    PCPU(cpuid), %eax
237         imull   $PCB_SIZE, %eax
238         leaq    CNAME(stoppcbs), %rdi
239         addq    %rax, %rdi
240         call    CNAME(savectx)          /* Save process context */
241         
242                 
243         movl    PCPU(cpuid), %eax
244
245         /*
246          * Indicate that we have stopped and loop waiting for permission
247          * to start again.  We must still process IPI events while in a
248          * stopped state.
249          */
250         MPLOCKED
251         btsl    %eax, stopped_cpus      /* stopped_cpus |= (1<<id) */
252 1:
253         andl    $~RQF_IPIQ,PCPU(reqflags)
254         pushq   %rax
255         call    lwkt_smp_stopped
256         popq    %rax
257         btl     %eax, started_cpus      /* while (!(started_cpus & (1<<id))) */
258         jnc     1b
259
260         MPLOCKED
261         btrl    %eax, started_cpus      /* started_cpus &= ~(1<<id) */
262         MPLOCKED
263         btrl    %eax, stopped_cpus      /* stopped_cpus &= ~(1<<id) */
264
265         test    %eax, %eax
266         jnz     2f
267
268         movq    CNAME(cpustop_restartfunc), %rax
269         test    %rax, %rax
270         jz      2f
271         movq    $0, CNAME(cpustop_restartfunc)  /* One-shot */
272
273         call    *%rax
274 2:
275         popq    %r11
276         popq    %r10
277         popq    %r9
278         popq    %r8
279         popq    %rdi
280         popq    %rsi
281         popq    %rdx
282         popq    %rcx
283         popq    %rax
284
285 #if JG
286         popl    %fs
287         popl    %ds                     /* restore previous data segment */
288 #endif
289         movq    %rbp, %rsp
290         popq    %rbp
291         iretq
292
293         /*
294          * For now just have one ipiq IPI, but what we really want is
295          * to have one for each source cpu to the APICs don't get stalled
296          * backlogging the requests.
297          */
298         .text
299         SUPERALIGN_TEXT
300         .globl Xipiq
301 Xipiq:
302         APIC_PUSH_FRAME
303         movq    lapic, %rax
304         movl    $0, LA_EOI(%rax)        /* End Of Interrupt to APIC */
305         FAKE_MCOUNT(15*4(%esp))
306
307         incl    PCPU(cnt) + V_IPI
308         movq    PCPU(curthread),%rbx
309         testl   $-1,TD_CRITCOUNT(%rbx)
310         jne     1f
311         subq    $8,%rsp                 /* make same as interrupt frame */
312         movq    %rsp,%rdi               /* pass frame by reference */
313         incl    PCPU(intr_nesting_level)
314         incl    TD_CRITCOUNT(%rbx)
315         call    lwkt_process_ipiq_frame
316         decl    TD_CRITCOUNT(%rbx)
317         decl    PCPU(intr_nesting_level)
318         addq    $8,%rsp                 /* turn into trapframe */
319         MEXITCOUNT
320         jmp     doreti
321 1:
322         orl     $RQF_IPIQ,PCPU(reqflags)
323         MEXITCOUNT
324         APIC_POP_FRAME
325         iretq
326
327         .text
328         SUPERALIGN_TEXT
329         .globl Xtimer
330 Xtimer:
331         APIC_PUSH_FRAME
332         movq    lapic, %rax
333         movl    $0, LA_EOI(%rax)        /* End Of Interrupt to APIC */
334         FAKE_MCOUNT(15*4(%esp))
335
336         incl    PCPU(cnt) + V_TIMER
337         movq    PCPU(curthread),%rbx
338         testl   $-1,TD_CRITCOUNT(%rbx)
339         jne     1f
340         testl   $-1,TD_NEST_COUNT(%rbx)
341         jne     1f
342         subq    $8,%rsp                 /* make same as interrupt frame */
343         movq    %rsp,%rdi               /* pass frame by reference */
344         incl    PCPU(intr_nesting_level)
345         incl    TD_CRITCOUNT(%rbx)
346         call    lapic_timer_process_frame
347         decl    TD_CRITCOUNT(%rbx)
348         decl    PCPU(intr_nesting_level)
349         addq    $8,%rsp                 /* turn into trapframe */
350         MEXITCOUNT
351         jmp     doreti
352 1:
353         orl     $RQF_TIMER,PCPU(reqflags)
354         MEXITCOUNT
355         APIC_POP_FRAME
356         iretq
357
358 #ifdef SMP /* APIC-IO */
359
360 MCOUNT_LABEL(bintr)
361         FAST_INTR(0,apic_fastintr0)
362         FAST_INTR(1,apic_fastintr1)
363         FAST_INTR(2,apic_fastintr2)
364         FAST_INTR(3,apic_fastintr3)
365         FAST_INTR(4,apic_fastintr4)
366         FAST_INTR(5,apic_fastintr5)
367         FAST_INTR(6,apic_fastintr6)
368         FAST_INTR(7,apic_fastintr7)
369         FAST_INTR(8,apic_fastintr8)
370         FAST_INTR(9,apic_fastintr9)
371         FAST_INTR(10,apic_fastintr10)
372         FAST_INTR(11,apic_fastintr11)
373         FAST_INTR(12,apic_fastintr12)
374         FAST_INTR(13,apic_fastintr13)
375         FAST_INTR(14,apic_fastintr14)
376         FAST_INTR(15,apic_fastintr15)
377         FAST_INTR(16,apic_fastintr16)
378         FAST_INTR(17,apic_fastintr17)
379         FAST_INTR(18,apic_fastintr18)
380         FAST_INTR(19,apic_fastintr19)
381         FAST_INTR(20,apic_fastintr20)
382         FAST_INTR(21,apic_fastintr21)
383         FAST_INTR(22,apic_fastintr22)
384         FAST_INTR(23,apic_fastintr23)
385 MCOUNT_LABEL(eintr)
386
387 #endif
388
389         .data
390
391 /* variables used by stop_cpus()/restart_cpus()/Xcpustop */
392         .globl stopped_cpus, started_cpus
393 stopped_cpus:
394         .long   0
395 started_cpus:
396         .long   0
397
398         .globl CNAME(cpustop_restartfunc)
399 CNAME(cpustop_restartfunc):
400         .quad 0
401                 
402         .text
403