2 * from: vector.s, 386BSD 0.1 unknown origin
3 * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $
7 #include "opt_auto_eoi.h"
10 #include <machine/asmacros.h>
11 #include <machine/lock.h>
12 #include <machine/psl.h>
13 #include <machine/trap.h>
14 #include <machine/segments.h>
16 #include <machine_base/icu/icu.h>
17 #include <bus/isa/isa.h>
23 #include <machine/smp.h>
24 #include <machine_base/isa/intr_machdep.h>
26 /* convert an absolute IRQ# into a bitmask */
27 #define IRQ_LBIT(irq_num) (1 << (irq_num))
29 /* make an index into the IO APIC from the IRQ# */
30 #define REDTBL_IDX(irq_num) (0x10 + ((irq_num) * 2))
33 #define MPLOCKED lock ;
38 #define APIC_PUSH_FRAME \
39 PUSH_FRAME ; /* 15 regs + space for 5 extras */ \
40 movq $0,TF_XFLAGS(%rsp) ; \
41 movq $0,TF_TRAPNO(%rsp) ; \
42 movq $0,TF_ADDR(%rsp) ; \
43 movq $0,TF_FLAGS(%rsp) ; \
44 movq $0,TF_ERR(%rsp) ; \
48 * JG stale? Warning: POP_FRAME can only be used if there is no chance of a
49 * segment register being changed (e.g. by procfs), which is why syscalls
52 #define APIC_POP_FRAME POP_FRAME
54 #define IOAPICADDR(irq_num) \
55 CNAME(int_to_apicintpin) + AIMI_SIZE * (irq_num) + AIMI_APIC_ADDRESS
56 #define REDIRIDX(irq_num) \
57 CNAME(int_to_apicintpin) + AIMI_SIZE * (irq_num) + AIMI_REDIRINDEX
58 #define IOAPICFLAGS(irq_num) \
59 CNAME(int_to_apicintpin) + AIMI_SIZE * (irq_num) + AIMI_FLAGS
61 #define MASK_IRQ(irq_num) \
62 APIC_IMASK_LOCK ; /* into critical reg */ \
63 testl $AIMI_FLAG_MASKED, IOAPICFLAGS(irq_num) ; \
64 jne 7f ; /* masked, don't mask */ \
65 orl $AIMI_FLAG_MASKED, IOAPICFLAGS(irq_num) ; \
66 /* set the mask bit */ \
67 movq IOAPICADDR(irq_num), %rcx ; /* ioapic addr */ \
68 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
69 movl %eax, (%rcx) ; /* write the index */ \
70 orl $IOART_INTMASK,IOAPIC_WINDOW(%rcx) ;/* set the mask */ \
71 7: ; /* already masked */ \
75 * Test to see whether we are handling an edge or level triggered INT.
76 * Level-triggered INTs must still be masked as we don't clear the source,
77 * and the EOI cycle would cause redundant INTs to occur.
79 #define MASK_LEVEL_IRQ(irq_num) \
80 testl $AIMI_FLAG_LEVEL, IOAPICFLAGS(irq_num) ; \
81 jz 9f ; /* edge, don't mask */ \
86 * Test to see if the source is currntly masked, clear if so.
88 #define UNMASK_IRQ(irq_num) \
91 APIC_IMASK_LOCK ; /* into critical reg */ \
92 testl $AIMI_FLAG_MASKED, IOAPICFLAGS(irq_num) ; \
93 je 7f ; /* bit clear, not masked */ \
94 andl $~AIMI_FLAG_MASKED, IOAPICFLAGS(irq_num) ; \
95 /* clear mask bit */ \
96 movq IOAPICADDR(irq_num),%rcx ; /* ioapic addr */ \
97 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
98 movl %eax,(%rcx) ; /* write the index */ \
99 andl $~IOART_INTMASK,IOAPIC_WINDOW(%rcx) ;/* clear the mask */ \
101 APIC_IMASK_UNLOCK ; \
104 #ifdef SMP /* APIC-IO */
107 * Fast interrupt call handlers run in the following sequence:
109 * - Push the trap frame required by doreti
110 * - Mask the interrupt and reenable its source
111 * - If we cannot take the interrupt set its fpending bit and
112 * doreti. Note that we cannot mess with mp_lock at all
113 * if we entered from a critical section!
114 * - If we can take the interrupt clear its fpending bit,
115 * call the handler, then unmask and doreti.
117 * YYY can cache gd base opitner instead of using hidden %fs prefixes.
120 #define FAST_INTR(irq_num, vec_name) \
125 FAKE_MCOUNT(TF_RIP(%rsp)) ; \
126 MASK_LEVEL_IRQ(irq_num) ; \
128 movl $0, LA_EOI(%rax) ; \
129 movq PCPU(curthread),%rbx ; \
130 testl $-1,TD_NEST_COUNT(%rbx) ; \
132 testl $-1,TD_CRITCOUNT(%rbx) ; \
135 /* in critical section, make interrupt pending */ \
136 /* set the pending bit and return, leave interrupt masked */ \
137 orl $IRQ_LBIT(irq_num),PCPU(fpending) ; \
138 orl $RQF_INTPEND,PCPU(reqflags) ; \
141 /* clear pending bit, run handler */ \
142 andl $~IRQ_LBIT(irq_num),PCPU(fpending) ; \
143 pushq $irq_num ; /* trapframe -> intrframe */ \
144 movq %rsp, %rdi ; /* pass frame by reference */ \
145 incl TD_CRITCOUNT(%rbx) ; \
147 call ithread_fast_handler ; /* returns 0 to unmask */ \
148 decl TD_CRITCOUNT(%rbx) ; \
149 addq $8, %rsp ; /* intrframe -> trapframe */ \
150 UNMASK_IRQ(irq_num) ; \
158 * Handle "spurious INTerrupts".
160 * This is different than the "spurious INTerrupt" generated by an
161 * 8259 PIC for missing INTs. See the APIC documentation for details.
162 * This routine should NOT do an 'EOI' cycle.
169 /* No EOI cycle used here */
175 * Handle TLB shootdowns.
177 * NOTE: interrupts are left disabled.
185 movl $0, LA_EOI(%rax) /* End Of Interrupt to APIC */
186 FAKE_MCOUNT(TF_RIP(%rsp))
187 subq $8,%rsp /* make same as interrupt frame */
188 movq %rsp,%rdi /* pass frame by reference */
189 call smp_invltlb_intr
190 addq $8,%rsp /* turn into trapframe */
196 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
198 * - We cannot call doreti
199 * - Signals its receipt.
200 * - Waits for permission to restart.
201 * - Processing pending IPIQ events while waiting.
202 * - Signals its restart.
211 movl $0, LA_EOI(%rax) /* End Of Interrupt to APIC */
213 movl PCPU(cpuid), %eax
214 imull $PCB_SIZE, %eax
215 leaq CNAME(stoppcbs), %rdi
217 call CNAME(savectx) /* Save process context */
219 movl PCPU(cpuid), %eax
222 * Indicate that we have stopped and loop waiting for permission
223 * to start again. We must still process IPI events while in a
226 * Interrupts must remain enabled for non-IPI'd per-cpu interrupts
227 * (e.g. Xtimer, Xinvltlb).
230 btsl %eax, stopped_cpus /* stopped_cpus |= (1<<id) */
233 andl $~RQF_IPIQ,PCPU(reqflags)
235 call lwkt_smp_stopped
238 btl %eax, started_cpus /* while (!(started_cpus & (1<<id))) */
242 btrl %eax, started_cpus /* started_cpus &= ~(1<<id) */
244 btrl %eax, stopped_cpus /* stopped_cpus &= ~(1<<id) */
249 movq CNAME(cpustop_restartfunc), %rax
252 movq $0, CNAME(cpustop_restartfunc) /* One-shot */
261 * For now just have one ipiq IPI, but what we really want is
262 * to have one for each source cpu to the APICs don't get stalled
263 * backlogging the requests.
271 movl $0, LA_EOI(%rax) /* End Of Interrupt to APIC */
272 FAKE_MCOUNT(TF_RIP(%rsp))
274 incl PCPU(cnt) + V_IPI
275 movq PCPU(curthread),%rbx
276 testl $-1,TD_CRITCOUNT(%rbx)
278 subq $8,%rsp /* make same as interrupt frame */
279 movq %rsp,%rdi /* pass frame by reference */
280 incl PCPU(intr_nesting_level)
281 incl TD_CRITCOUNT(%rbx)
283 call lwkt_process_ipiq_frame
284 decl TD_CRITCOUNT(%rbx)
285 decl PCPU(intr_nesting_level)
286 addq $8,%rsp /* turn into trapframe */
290 orl $RQF_IPIQ,PCPU(reqflags)
301 movl $0, LA_EOI(%rax) /* End Of Interrupt to APIC */
302 FAKE_MCOUNT(TF_RIP(%rsp))
304 incl PCPU(cnt) + V_TIMER
305 movq PCPU(curthread),%rbx
306 testl $-1,TD_CRITCOUNT(%rbx)
308 testl $-1,TD_NEST_COUNT(%rbx)
310 subq $8,%rsp /* make same as interrupt frame */
311 movq %rsp,%rdi /* pass frame by reference */
312 incl PCPU(intr_nesting_level)
313 incl TD_CRITCOUNT(%rbx)
315 call lapic_timer_process_frame
316 decl TD_CRITCOUNT(%rbx)
317 decl PCPU(intr_nesting_level)
318 addq $8,%rsp /* turn into trapframe */
322 orl $RQF_TIMER,PCPU(reqflags)
327 #ifdef SMP /* APIC-IO */
330 FAST_INTR(0,apic_fastintr0)
331 FAST_INTR(1,apic_fastintr1)
332 FAST_INTR(2,apic_fastintr2)
333 FAST_INTR(3,apic_fastintr3)
334 FAST_INTR(4,apic_fastintr4)
335 FAST_INTR(5,apic_fastintr5)
336 FAST_INTR(6,apic_fastintr6)
337 FAST_INTR(7,apic_fastintr7)
338 FAST_INTR(8,apic_fastintr8)
339 FAST_INTR(9,apic_fastintr9)
340 FAST_INTR(10,apic_fastintr10)
341 FAST_INTR(11,apic_fastintr11)
342 FAST_INTR(12,apic_fastintr12)
343 FAST_INTR(13,apic_fastintr13)
344 FAST_INTR(14,apic_fastintr14)
345 FAST_INTR(15,apic_fastintr15)
346 FAST_INTR(16,apic_fastintr16)
347 FAST_INTR(17,apic_fastintr17)
348 FAST_INTR(18,apic_fastintr18)
349 FAST_INTR(19,apic_fastintr19)
350 FAST_INTR(20,apic_fastintr20)
351 FAST_INTR(21,apic_fastintr21)
352 FAST_INTR(22,apic_fastintr22)
353 FAST_INTR(23,apic_fastintr23)
360 /* variables used by stop_cpus()/restart_cpus()/Xcpustop */
361 .globl stopped_cpus, started_cpus
367 .globl CNAME(cpustop_restartfunc)
368 CNAME(cpustop_restartfunc):