2 * from: vector.s, 386BSD 0.1 unknown origin
3 * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $
7 #include "opt_auto_eoi.h"
10 #include <machine/asmacros.h>
11 #include <machine/lock.h>
12 #include <machine/psl.h>
13 #include <machine/trap.h>
14 #include <machine/segments.h>
16 #include <machine_base/icu/icu.h>
17 #include <bus/isa/isa.h>
23 #include <machine/smp.h>
24 #include <machine_base/isa/intr_machdep.h>
26 /* convert an absolute IRQ# into a bitmask */
27 #define IRQ_LBIT(irq_num) (1 << (irq_num))
29 /* make an index into the IO APIC from the IRQ# */
30 #define REDTBL_IDX(irq_num) (0x10 + ((irq_num) * 2))
33 #define MPLOCKED lock ;
38 #define APIC_PUSH_FRAME \
39 PUSH_FRAME ; /* 15 regs + space for 5 extras */ \
40 movq $0,TF_XFLAGS(%rsp) ; \
41 movq $0,TF_TRAPNO(%rsp) ; \
42 movq $0,TF_ADDR(%rsp) ; \
43 movq $0,TF_FLAGS(%rsp) ; \
44 movq $0,TF_ERR(%rsp) ; \
48 * JG stale? Warning: POP_FRAME can only be used if there is no chance of a
49 * segment register being changed (e.g. by procfs), which is why syscalls
52 #define APIC_POP_FRAME POP_FRAME
54 #define IOAPICADDR(irq_num) \
55 CNAME(int_to_apicintpin) + AIMI_SIZE * (irq_num) + AIMI_APIC_ADDRESS
56 #define REDIRIDX(irq_num) \
57 CNAME(int_to_apicintpin) + AIMI_SIZE * (irq_num) + AIMI_REDIRINDEX
58 #define IOAPICFLAGS(irq_num) \
59 CNAME(int_to_apicintpin) + AIMI_SIZE * (irq_num) + AIMI_FLAGS
61 #define MASK_IRQ(irq_num) \
62 APIC_IMASK_LOCK ; /* into critical reg */ \
63 testl $AIMI_FLAG_MASKED, IOAPICFLAGS(irq_num) ; \
64 jne 7f ; /* masked, don't mask */ \
65 orl $AIMI_FLAG_MASKED, IOAPICFLAGS(irq_num) ; \
66 /* set the mask bit */ \
67 movq IOAPICADDR(irq_num), %rcx ; /* ioapic addr */ \
68 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
69 movl %eax, (%rcx) ; /* write the index */ \
70 orl $IOART_INTMASK,IOAPIC_WINDOW(%rcx) ;/* set the mask */ \
71 7: ; /* already masked */ \
75 * Test to see whether we are handling an edge or level triggered INT.
76 * Level-triggered INTs must still be masked as we don't clear the source,
77 * and the EOI cycle would cause redundant INTs to occur.
79 #define MASK_LEVEL_IRQ(irq_num) \
80 testl $AIMI_FLAG_LEVEL, IOAPICFLAGS(irq_num) ; \
81 jz 9f ; /* edge, don't mask */ \
86 * Test to see if the source is currntly masked, clear if so.
88 #define UNMASK_IRQ(irq_num) \
91 APIC_IMASK_LOCK ; /* into critical reg */ \
92 testl $AIMI_FLAG_MASKED, IOAPICFLAGS(irq_num) ; \
93 je 7f ; /* bit clear, not masked */ \
94 andl $~AIMI_FLAG_MASKED, IOAPICFLAGS(irq_num) ; \
95 /* clear mask bit */ \
96 movq IOAPICADDR(irq_num),%rcx ; /* ioapic addr */ \
97 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
98 movl %eax,(%rcx) ; /* write the index */ \
99 andl $~IOART_INTMASK,IOAPIC_WINDOW(%rcx) ;/* clear the mask */ \
101 APIC_IMASK_UNLOCK ; \
104 #ifdef SMP /* APIC-IO */
107 * Fast interrupt call handlers run in the following sequence:
109 * - Push the trap frame required by doreti
110 * - Mask the interrupt and reenable its source
111 * - If we cannot take the interrupt set its fpending bit and
113 * - If we can take the interrupt clear its fpending bit,
114 * call the handler, then unmask and doreti.
116 * YYY can cache gd base opitner instead of using hidden %fs prefixes.
119 #define FAST_INTR(irq_num, vec_name) \
124 FAKE_MCOUNT(TF_RIP(%rsp)) ; \
125 MASK_LEVEL_IRQ(irq_num) ; \
127 movl $0, LA_EOI(%rax) ; \
128 movq PCPU(curthread),%rbx ; \
129 testl $-1,TD_NEST_COUNT(%rbx) ; \
131 testl $-1,TD_CRITCOUNT(%rbx) ; \
134 /* in critical section, make interrupt pending */ \
135 /* set the pending bit and return, leave interrupt masked */ \
136 orl $IRQ_LBIT(irq_num),PCPU(fpending) ; \
137 orl $RQF_INTPEND,PCPU(reqflags) ; \
140 /* clear pending bit, run handler */ \
141 andl $~IRQ_LBIT(irq_num),PCPU(fpending) ; \
142 pushq $irq_num ; /* trapframe -> intrframe */ \
143 movq %rsp, %rdi ; /* pass frame by reference */ \
144 incl TD_CRITCOUNT(%rbx) ; \
146 call ithread_fast_handler ; /* returns 0 to unmask */ \
147 decl TD_CRITCOUNT(%rbx) ; \
148 addq $8, %rsp ; /* intrframe -> trapframe */ \
149 UNMASK_IRQ(irq_num) ; \
157 * Handle "spurious INTerrupts".
159 * This is different than the "spurious INTerrupt" generated by an
160 * 8259 PIC for missing INTs. See the APIC documentation for details.
161 * This routine should NOT do an 'EOI' cycle.
168 /* No EOI cycle used here */
174 * Handle TLB shootdowns.
176 * NOTE: interrupts are left disabled.
184 movl $0, LA_EOI(%rax) /* End Of Interrupt to APIC */
185 FAKE_MCOUNT(TF_RIP(%rsp))
186 subq $8,%rsp /* make same as interrupt frame */
187 movq %rsp,%rdi /* pass frame by reference */
188 call smp_invltlb_intr
189 addq $8,%rsp /* turn into trapframe */
195 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
197 * - We cannot call doreti
198 * - Signals its receipt.
199 * - Waits for permission to restart.
200 * - Processing pending IPIQ events while waiting.
201 * - Signals its restart.
210 movl $0, LA_EOI(%rax) /* End Of Interrupt to APIC */
212 movl PCPU(cpuid), %eax
213 imull $PCB_SIZE, %eax
214 leaq CNAME(stoppcbs), %rdi
216 call CNAME(savectx) /* Save process context */
218 movslq PCPU(cpuid), %rax
221 * Indicate that we have stopped and loop waiting for permission
222 * to start again. We must still process IPI events while in a
225 * Interrupts must remain enabled for non-IPI'd per-cpu interrupts
226 * (e.g. Xtimer, Xinvltlb).
229 btsq %rax, stopped_cpus /* stopped_cpus |= (1<<id) */
232 andl $~RQF_IPIQ,PCPU(reqflags)
234 call lwkt_smp_stopped
237 btq %rax, started_cpus /* while (!(started_cpus & (1<<id))) */
241 btrq %rax, started_cpus /* started_cpus &= ~(1<<id) */
243 btrq %rax, stopped_cpus /* stopped_cpus &= ~(1<<id) */
248 movq CNAME(cpustop_restartfunc), %rax
251 movq $0, CNAME(cpustop_restartfunc) /* One-shot */
260 * For now just have one ipiq IPI, but what we really want is
261 * to have one for each source cpu to the APICs don't get stalled
262 * backlogging the requests.
270 movl $0, LA_EOI(%rax) /* End Of Interrupt to APIC */
271 FAKE_MCOUNT(TF_RIP(%rsp))
273 incl PCPU(cnt) + V_IPI
274 movq PCPU(curthread),%rbx
275 testl $-1,TD_CRITCOUNT(%rbx)
277 subq $8,%rsp /* make same as interrupt frame */
278 movq %rsp,%rdi /* pass frame by reference */
279 incl PCPU(intr_nesting_level)
280 incl TD_CRITCOUNT(%rbx)
282 call lwkt_process_ipiq_frame
283 decl TD_CRITCOUNT(%rbx)
284 decl PCPU(intr_nesting_level)
285 addq $8,%rsp /* turn into trapframe */
289 orl $RQF_IPIQ,PCPU(reqflags)
300 movl $0, LA_EOI(%rax) /* End Of Interrupt to APIC */
301 FAKE_MCOUNT(TF_RIP(%rsp))
303 incl PCPU(cnt) + V_TIMER
304 movq PCPU(curthread),%rbx
305 testl $-1,TD_CRITCOUNT(%rbx)
307 testl $-1,TD_NEST_COUNT(%rbx)
309 subq $8,%rsp /* make same as interrupt frame */
310 movq %rsp,%rdi /* pass frame by reference */
311 incl PCPU(intr_nesting_level)
312 incl TD_CRITCOUNT(%rbx)
314 call lapic_timer_process_frame
315 decl TD_CRITCOUNT(%rbx)
316 decl PCPU(intr_nesting_level)
317 addq $8,%rsp /* turn into trapframe */
321 orl $RQF_TIMER,PCPU(reqflags)
326 #ifdef SMP /* APIC-IO */
329 FAST_INTR(0,apic_fastintr0)
330 FAST_INTR(1,apic_fastintr1)
331 FAST_INTR(2,apic_fastintr2)
332 FAST_INTR(3,apic_fastintr3)
333 FAST_INTR(4,apic_fastintr4)
334 FAST_INTR(5,apic_fastintr5)
335 FAST_INTR(6,apic_fastintr6)
336 FAST_INTR(7,apic_fastintr7)
337 FAST_INTR(8,apic_fastintr8)
338 FAST_INTR(9,apic_fastintr9)
339 FAST_INTR(10,apic_fastintr10)
340 FAST_INTR(11,apic_fastintr11)
341 FAST_INTR(12,apic_fastintr12)
342 FAST_INTR(13,apic_fastintr13)
343 FAST_INTR(14,apic_fastintr14)
344 FAST_INTR(15,apic_fastintr15)
345 FAST_INTR(16,apic_fastintr16)
346 FAST_INTR(17,apic_fastintr17)
347 FAST_INTR(18,apic_fastintr18)
348 FAST_INTR(19,apic_fastintr19)
349 FAST_INTR(20,apic_fastintr20)
350 FAST_INTR(21,apic_fastintr21)
351 FAST_INTR(22,apic_fastintr22)
352 FAST_INTR(23,apic_fastintr23)
359 /* variables used by stop_cpus()/restart_cpus()/Xcpustop */
360 .globl stopped_cpus, started_cpus
366 .globl CNAME(cpustop_restartfunc)
367 CNAME(cpustop_restartfunc):