Merge branch 'master' of ssh://crater.dragonflybsd.org/repository/git/dragonfly
[dragonfly.git] / sys / platform / pc64 / apic / apic_vector.s
1 /*
2  *      from: vector.s, 386BSD 0.1 unknown origin
3  * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $
4  * $DragonFly: src/sys/platform/pc32/apic/apic_vector.s,v 1.39 2008/08/02 01:14:43 dillon Exp $
5  */
6
7 #if 0
8 #include "use_npx.h"
9 #include "opt_auto_eoi.h"
10 #endif
11
12 #include <machine/asmacros.h>
13 #include <machine/lock.h>
14 #include <machine/psl.h>
15 #include <machine/trap.h>
16
17 #include <machine_base/icu/icu.h>
18 #include <bus/isa/isa.h>
19
20 #include "assym.s"
21
22 #include "apicreg.h"
23 #include "apic_ipl.h"
24 #include <machine/smp.h>
25 #include <machine_base/isa/intr_machdep.h>
26
27 /* convert an absolute IRQ# into a bitmask */
28 #define IRQ_LBIT(irq_num)       (1 << (irq_num))
29
30 /* make an index into the IO APIC from the IRQ# */
31 #define REDTBL_IDX(irq_num)     (0x10 + ((irq_num) * 2))
32
33 #ifdef SMP
34 #define MPLOCKED     lock ;
35 #else
36 #define MPLOCKED
37 #endif
38
39 #define APIC_PUSH_FRAME                                                 \
40         PUSH_FRAME ;            /* 15 regs + space for 5 extras */      \
41         movq $0,TF_XFLAGS(%rsp) ;                                       \
42         movq $0,TF_TRAPNO(%rsp) ;                                       \
43         movq $0,TF_ADDR(%rsp) ;                                         \
44         movq $0,TF_FLAGS(%rsp) ;                                        \
45         movq $0,TF_ERR(%rsp) ;                                          \
46         cld ;                                                           \
47
48 /*
49  * JG stale? Warning: POP_FRAME can only be used if there is no chance of a
50  * segment register being changed (e.g. by procfs), which is why syscalls
51  * have to use doreti.
52  */
53 #define APIC_POP_FRAME POP_FRAME
54
55 /* sizeof(struct apic_intmapinfo) == 24 */
56 #define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 24 * (irq_num) + 8
57 #define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 24 * (irq_num) + 16
58
59 #define MASK_IRQ(irq_num)                                               \
60         APIC_IMASK_LOCK ;                       /* into critical reg */ \
61         testl   $IRQ_LBIT(irq_num), apic_imen ;                         \
62         jne     7f ;                    /* masked, don't mask */        \
63         orl     $IRQ_LBIT(irq_num), apic_imen ; /* set the mask bit */  \
64         movq    IOAPICADDR(irq_num), %rcx ;     /* ioapic addr */       \
65         movl    REDIRIDX(irq_num), %eax ;       /* get the index */     \
66         movl    %eax, (%rcx) ;                  /* write the index */   \
67         movl    IOAPIC_WINDOW(%rcx), %eax ;     /* current value */     \
68         orl     $IOART_INTMASK, %eax ;          /* set the mask */      \
69         movl    %eax, IOAPIC_WINDOW(%rcx) ;     /* new value */         \
70 7: ;                                            /* already masked */    \
71         APIC_IMASK_UNLOCK ;                                             \
72
73 /*
74  * Test to see whether we are handling an edge or level triggered INT.
75  *  Level-triggered INTs must still be masked as we don't clear the source,
76  *  and the EOI cycle would cause redundant INTs to occur.
77  */
78 #define MASK_LEVEL_IRQ(irq_num)                                         \
79         testl   $IRQ_LBIT(irq_num), apic_pin_trigger ;                  \
80         jz      9f ;                            /* edge, don't mask */  \
81         MASK_IRQ(irq_num) ;                                             \
82 9: ;                                                                    \
83
84 /*
85  * Test to see if the source is currntly masked, clear if so.
86  */
87 #define UNMASK_IRQ(irq_num)                                     \
88         cmpl    $0,%eax ;                                               \
89         jnz     8f ;                                                    \
90         APIC_IMASK_LOCK ;                       /* into critical reg */ \
91         testl   $IRQ_LBIT(irq_num), apic_imen ;                         \
92         je      7f ;                    /* bit clear, not masked */     \
93         andl    $~IRQ_LBIT(irq_num), apic_imen ;/* clear mask bit */    \
94         movq    IOAPICADDR(irq_num),%rcx ;      /* ioapic addr */       \
95         movl    REDIRIDX(irq_num), %eax ;       /* get the index */     \
96         movl    %eax,(%rcx) ;                   /* write the index */   \
97         movl    IOAPIC_WINDOW(%rcx),%eax ;      /* current value */     \
98         andl    $~IOART_INTMASK,%eax ;          /* clear the mask */    \
99         movl    %eax,IOAPIC_WINDOW(%rcx) ;      /* new value */         \
100 7: ;                                                                    \
101         APIC_IMASK_UNLOCK ;                                             \
102 8: ;                                                                    \
103
104 #ifdef APIC_IO
105
106 /*
107  * Fast interrupt call handlers run in the following sequence:
108  *
109  *      - Push the trap frame required by doreti
110  *      - Mask the interrupt and reenable its source
111  *      - If we cannot take the interrupt set its fpending bit and
112  *        doreti.  Note that we cannot mess with mp_lock at all
113  *        if we entered from a critical section!
114  *      - If we can take the interrupt clear its fpending bit,
115  *        call the handler, then unmask and doreti.
116  *
117  * YYY can cache gd base opitner instead of using hidden %fs prefixes.
118  */
119
120 #define FAST_INTR(irq_num, vec_name)                                    \
121         .text ;                                                         \
122         SUPERALIGN_TEXT ;                                               \
123 IDTVEC(vec_name) ;                                                      \
124         APIC_PUSH_FRAME ;                                               \
125         FAKE_MCOUNT(15*4(%esp)) ;                                       \
126         MASK_LEVEL_IRQ(irq_num) ;                                       \
127         movq    lapic, %rax ;                                           \
128         movl    $0, LA_EOI(%rax) ;                                      \
129         movq    PCPU(curthread),%rbx ;                                  \
130         testl   $-1,TD_NEST_COUNT(%rbx) ;                               \
131         jne     1f ;                                                    \
132         cmpl    $TDPRI_CRIT,TD_PRI(%rbx) ;                              \
133         jl      2f ;                                                    \
134 1: ;                                                                    \
135         /* in critical section, make interrupt pending */               \
136         /* set the pending bit and return, leave interrupt masked */    \
137         orl     $IRQ_LBIT(irq_num),PCPU(fpending) ;                     \
138         orl     $RQF_INTPEND,PCPU(reqflags) ;                           \
139         jmp     5f ;                                                    \
140 2: ;                                                                    \
141         /* clear pending bit, run handler */                            \
142         andl    $~IRQ_LBIT(irq_num),PCPU(fpending) ;                    \
143         pushq   $irq_num ;              /* trapframe -> intrframe */    \
144         movq    %rsp, %rdi ;            /* pass frame by reference */   \
145         addl    $TDPRI_CRIT,TD_PRI(%rbx) ;                              \
146         call    ithread_fast_handler ;  /* returns 0 to unmask */       \
147         subl    $TDPRI_CRIT,TD_PRI(%rbx) ;                              \
148         addq    $8, %rsp ;              /* intrframe -> trapframe */    \
149         UNMASK_IRQ(irq_num) ;                                           \
150 5: ;                                                                    \
151         MEXITCOUNT ;                                                    \
152         jmp     doreti ;                                                \
153
154 #endif
155
156 /*
157  * Handle "spurious INTerrupts".
158  * Notes:
159  *  This is different than the "spurious INTerrupt" generated by an
160  *   8259 PIC for missing INTs.  See the APIC documentation for details.
161  *  This routine should NOT do an 'EOI' cycle.
162  */
163         .text
164         SUPERALIGN_TEXT
165         .globl Xspuriousint
166 Xspuriousint:
167
168         /* No EOI cycle used here */
169
170         iretq
171
172
173 /*
174  * Handle TLB shootdowns.
175  */
176         .text
177         SUPERALIGN_TEXT
178         .globl  Xinvltlb
179 Xinvltlb:
180         pushq   %rax
181
182         movq    %cr3, %rax              /* invalidate the TLB */
183         movq    %rax, %cr3
184
185         movq    lapic, %rax
186         movl    $0, LA_EOI(%rax)        /* End Of Interrupt to APIC */
187
188         popq    %rax
189         iretq
190
191
192 /*
193  * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
194  *
195  *  - Signals its receipt.
196  *  - Waits for permission to restart.
197  *  - Processing pending IPIQ events while waiting.
198  *  - Signals its restart.
199  */
200
201         .text
202         SUPERALIGN_TEXT
203         .globl Xcpustop
204 Xcpustop:
205         pushq   %rbp
206         movq    %rsp, %rbp
207         /* We save registers that are not preserved across function calls. */
208         /* JG can be re-written with mov's */
209         pushq   %rax
210         pushq   %rcx
211         pushq   %rdx
212         pushq   %rsi
213         pushq   %rdi
214         pushq   %r8
215         pushq   %r9
216         pushq   %r10
217         pushq   %r11
218
219 #if JG
220         /* JGXXX switch to kernel %gs? */
221         pushl   %ds                     /* save current data segment */
222         pushl   %fs
223
224         movl    $KDSEL, %eax
225         mov     %ax, %ds                /* use KERNEL data segment */
226         movl    $KPSEL, %eax
227         mov     %ax, %fs
228 #endif
229
230         movq    lapic, %rax
231         movl    $0, LA_EOI(%rax)        /* End Of Interrupt to APIC */
232
233         /* JG */
234         movl    PCPU(cpuid), %eax
235         imull   $PCB_SIZE, %eax
236         leaq    CNAME(stoppcbs), %rdi
237         addq    %rax, %rdi
238         call    CNAME(savectx)          /* Save process context */
239         
240                 
241         movl    PCPU(cpuid), %eax
242
243         /*
244          * Indicate that we have stopped and loop waiting for permission
245          * to start again.  We must still process IPI events while in a
246          * stopped state.
247          */
248         MPLOCKED
249         btsl    %eax, stopped_cpus      /* stopped_cpus |= (1<<id) */
250 1:
251         andl    $~RQF_IPIQ,PCPU(reqflags)
252         pushq   %rax
253         call    lwkt_smp_stopped
254         popq    %rax
255         btl     %eax, started_cpus      /* while (!(started_cpus & (1<<id))) */
256         jnc     1b
257
258         MPLOCKED
259         btrl    %eax, started_cpus      /* started_cpus &= ~(1<<id) */
260         MPLOCKED
261         btrl    %eax, stopped_cpus      /* stopped_cpus &= ~(1<<id) */
262
263         test    %eax, %eax
264         jnz     2f
265
266         movq    CNAME(cpustop_restartfunc), %rax
267         test    %rax, %rax
268         jz      2f
269         movq    $0, CNAME(cpustop_restartfunc)  /* One-shot */
270
271         call    *%rax
272 2:
273         popq    %r11
274         popq    %r10
275         popq    %r9
276         popq    %r8
277         popq    %rdi
278         popq    %rsi
279         popq    %rdx
280         popq    %rcx
281         popq    %rax
282
283 #if JG
284         popl    %fs
285         popl    %ds                     /* restore previous data segment */
286 #endif
287         movq    %rbp, %rsp
288         popq    %rbp
289         iretq
290
291         /*
292          * For now just have one ipiq IPI, but what we really want is
293          * to have one for each source cpu to the APICs don't get stalled
294          * backlogging the requests.
295          */
296         .text
297         SUPERALIGN_TEXT
298         .globl Xipiq
299 Xipiq:
300         APIC_PUSH_FRAME
301         movq    lapic, %rax
302         movl    $0, LA_EOI(%rax)        /* End Of Interrupt to APIC */
303         FAKE_MCOUNT(15*4(%esp))
304
305         incl    PCPU(cnt) + V_IPI
306         movq    PCPU(curthread),%rbx
307         cmpl    $TDPRI_CRIT,TD_PRI(%rbx)
308         jge     1f
309         subq    $8,%rsp                 /* make same as interrupt frame */
310         movq    %rsp,%rdi               /* pass frame by reference */
311         incl    PCPU(intr_nesting_level)
312         addl    $TDPRI_CRIT,TD_PRI(%rbx)
313         call    lwkt_process_ipiq_frame
314         subl    $TDPRI_CRIT,TD_PRI(%rbx)
315         decl    PCPU(intr_nesting_level)
316         addq    $8,%rsp                 /* turn into trapframe */
317         MEXITCOUNT
318         jmp     doreti
319 1:
320         orl     $RQF_IPIQ,PCPU(reqflags)
321         MEXITCOUNT
322         APIC_POP_FRAME
323         iretq
324
325         .text
326         SUPERALIGN_TEXT
327         .globl Xtimer
328 Xtimer:
329         APIC_PUSH_FRAME
330         movq    lapic, %rax
331         movl    $0, LA_EOI(%rax)        /* End Of Interrupt to APIC */
332         FAKE_MCOUNT(15*4(%esp))
333
334         incl    PCPU(cnt) + V_TIMER
335         movq    PCPU(curthread),%rbx
336         cmpl    $TDPRI_CRIT,TD_PRI(%rbx)
337         jge     1f
338         testl   $-1,TD_NEST_COUNT(%rbx)
339         jne     1f
340         subq    $8,%rsp                 /* make same as interrupt frame */
341         movq    %rsp,%rdi               /* pass frame by reference */
342         incl    PCPU(intr_nesting_level)
343         addl    $TDPRI_CRIT,TD_PRI(%rbx)
344         call    lapic_timer_process_frame
345         subl    $TDPRI_CRIT,TD_PRI(%rbx)
346         decl    PCPU(intr_nesting_level)
347         addq    $8,%rsp                 /* turn into trapframe */
348         MEXITCOUNT
349         jmp     doreti
350 1:
351         orl     $RQF_TIMER,PCPU(reqflags)
352         MEXITCOUNT
353         APIC_POP_FRAME
354         iretq
355
356 #ifdef APIC_IO
357
358 MCOUNT_LABEL(bintr)
359         FAST_INTR(0,apic_fastintr0)
360         FAST_INTR(1,apic_fastintr1)
361         FAST_INTR(2,apic_fastintr2)
362         FAST_INTR(3,apic_fastintr3)
363         FAST_INTR(4,apic_fastintr4)
364         FAST_INTR(5,apic_fastintr5)
365         FAST_INTR(6,apic_fastintr6)
366         FAST_INTR(7,apic_fastintr7)
367         FAST_INTR(8,apic_fastintr8)
368         FAST_INTR(9,apic_fastintr9)
369         FAST_INTR(10,apic_fastintr10)
370         FAST_INTR(11,apic_fastintr11)
371         FAST_INTR(12,apic_fastintr12)
372         FAST_INTR(13,apic_fastintr13)
373         FAST_INTR(14,apic_fastintr14)
374         FAST_INTR(15,apic_fastintr15)
375         FAST_INTR(16,apic_fastintr16)
376         FAST_INTR(17,apic_fastintr17)
377         FAST_INTR(18,apic_fastintr18)
378         FAST_INTR(19,apic_fastintr19)
379         FAST_INTR(20,apic_fastintr20)
380         FAST_INTR(21,apic_fastintr21)
381         FAST_INTR(22,apic_fastintr22)
382         FAST_INTR(23,apic_fastintr23)
383 MCOUNT_LABEL(eintr)
384
385 #endif
386
387         .data
388
389 /* variables used by stop_cpus()/restart_cpus()/Xcpustop */
390         .globl stopped_cpus, started_cpus
391 stopped_cpus:
392         .long   0
393 started_cpus:
394         .long   0
395
396         .globl CNAME(cpustop_restartfunc)
397 CNAME(cpustop_restartfunc):
398         .quad 0
399                 
400         .globl  apic_pin_trigger
401 apic_pin_trigger:
402         .long   0
403
404         .text
405