2 * from: vector.s, 386BSD 0.1 unknown origin
3 * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $
4 * $DragonFly: src/sys/platform/pc32/apic/apic_vector.s,v 1.39 2008/08/02 01:14:43 dillon Exp $
9 #include "opt_auto_eoi.h"
12 #include <machine/asmacros.h>
13 #include <machine/lock.h>
14 #include <machine/psl.h>
15 #include <machine/trap.h>
16 #include <machine/segments.h>
18 #include <machine_base/icu/icu.h>
19 #include <bus/isa/isa.h>
25 #include <machine/smp.h>
26 #include <machine_base/isa/intr_machdep.h>
28 /* convert an absolute IRQ# into a bitmask */
29 #define IRQ_LBIT(irq_num) (1 << (irq_num))
31 /* make an index into the IO APIC from the IRQ# */
32 #define REDTBL_IDX(irq_num) (0x10 + ((irq_num) * 2))
35 #define MPLOCKED lock ;
40 #define APIC_PUSH_FRAME \
41 PUSH_FRAME ; /* 15 regs + space for 5 extras */ \
42 movq $0,TF_XFLAGS(%rsp) ; \
43 movq $0,TF_TRAPNO(%rsp) ; \
44 movq $0,TF_ADDR(%rsp) ; \
45 movq $0,TF_FLAGS(%rsp) ; \
46 movq $0,TF_ERR(%rsp) ; \
50 * JG stale? Warning: POP_FRAME can only be used if there is no chance of a
51 * segment register being changed (e.g. by procfs), which is why syscalls
54 #define APIC_POP_FRAME POP_FRAME
56 /* sizeof(struct apic_intmapinfo) == 24 */
57 #define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 24 * (irq_num) + 8
58 #define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 24 * (irq_num) + 16
60 #define MASK_IRQ(irq_num) \
61 APIC_IMASK_LOCK ; /* into critical reg */ \
62 testl $IRQ_LBIT(irq_num), apic_imen ; \
63 jne 7f ; /* masked, don't mask */ \
64 orl $IRQ_LBIT(irq_num), apic_imen ; /* set the mask bit */ \
65 movq IOAPICADDR(irq_num), %rcx ; /* ioapic addr */ \
66 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
67 movl %eax, (%rcx) ; /* write the index */ \
68 movl IOAPIC_WINDOW(%rcx), %eax ; /* current value */ \
69 orl $IOART_INTMASK, %eax ; /* set the mask */ \
70 movl %eax, IOAPIC_WINDOW(%rcx) ; /* new value */ \
71 7: ; /* already masked */ \
75 * Test to see whether we are handling an edge or level triggered INT.
76 * Level-triggered INTs must still be masked as we don't clear the source,
77 * and the EOI cycle would cause redundant INTs to occur.
79 #define MASK_LEVEL_IRQ(irq_num) \
80 testl $IRQ_LBIT(irq_num), apic_pin_trigger ; \
81 jz 9f ; /* edge, don't mask */ \
86 * Test to see if the source is currntly masked, clear if so.
88 #define UNMASK_IRQ(irq_num) \
91 APIC_IMASK_LOCK ; /* into critical reg */ \
92 testl $IRQ_LBIT(irq_num), apic_imen ; \
93 je 7f ; /* bit clear, not masked */ \
94 andl $~IRQ_LBIT(irq_num), apic_imen ;/* clear mask bit */ \
95 movq IOAPICADDR(irq_num),%rcx ; /* ioapic addr */ \
96 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
97 movl %eax,(%rcx) ; /* write the index */ \
98 movl IOAPIC_WINDOW(%rcx),%eax ; /* current value */ \
99 andl $~IOART_INTMASK,%eax ; /* clear the mask */ \
100 movl %eax,IOAPIC_WINDOW(%rcx) ; /* new value */ \
102 APIC_IMASK_UNLOCK ; \
108 * Fast interrupt call handlers run in the following sequence:
110 * - Push the trap frame required by doreti
111 * - Mask the interrupt and reenable its source
112 * - If we cannot take the interrupt set its fpending bit and
113 * doreti. Note that we cannot mess with mp_lock at all
114 * if we entered from a critical section!
115 * - If we can take the interrupt clear its fpending bit,
116 * call the handler, then unmask and doreti.
118 * YYY can cache gd base opitner instead of using hidden %fs prefixes.
121 #define FAST_INTR(irq_num, vec_name) \
126 FAKE_MCOUNT(TF_RIP(%rsp)) ; \
127 MASK_LEVEL_IRQ(irq_num) ; \
129 movl $0, LA_EOI(%rax) ; \
130 movq PCPU(curthread),%rbx ; \
131 testl $-1,TD_NEST_COUNT(%rbx) ; \
133 testl $-1,TD_CRITCOUNT(%rbx) ; \
136 /* in critical section, make interrupt pending */ \
137 /* set the pending bit and return, leave interrupt masked */ \
138 orl $IRQ_LBIT(irq_num),PCPU(fpending) ; \
139 orl $RQF_INTPEND,PCPU(reqflags) ; \
142 /* clear pending bit, run handler */ \
143 andl $~IRQ_LBIT(irq_num),PCPU(fpending) ; \
144 pushq $irq_num ; /* trapframe -> intrframe */ \
145 movq %rsp, %rdi ; /* pass frame by reference */ \
146 incl TD_CRITCOUNT(%rbx) ; \
147 call ithread_fast_handler ; /* returns 0 to unmask */ \
148 decl TD_CRITCOUNT(%rbx) ; \
149 addq $8, %rsp ; /* intrframe -> trapframe */ \
150 UNMASK_IRQ(irq_num) ; \
158 * Handle "spurious INTerrupts".
160 * This is different than the "spurious INTerrupt" generated by an
161 * 8259 PIC for missing INTs. See the APIC documentation for details.
162 * This routine should NOT do an 'EOI' cycle.
169 /* No EOI cycle used here */
175 * Handle TLB shootdowns.
183 movq %cr3, %rax /* invalidate the TLB */
187 movl $0, LA_EOI(%rax) /* End Of Interrupt to APIC */
194 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
196 * - We cannot call doreti
197 * - Signals its receipt.
198 * - Waits for permission to restart.
199 * - Processing pending IPIQ events while waiting.
200 * - Signals its restart.
209 movl $0, LA_EOI(%rax) /* End Of Interrupt to APIC */
211 movl PCPU(cpuid), %eax
212 imull $PCB_SIZE, %eax
213 leaq CNAME(stoppcbs), %rdi
215 call CNAME(savectx) /* Save process context */
217 movl PCPU(cpuid), %eax
220 * Indicate that we have stopped and loop waiting for permission
221 * to start again. We must still process IPI events while in a
225 btsl %eax, stopped_cpus /* stopped_cpus |= (1<<id) */
227 andl $~RQF_IPIQ,PCPU(reqflags)
229 call lwkt_smp_stopped
232 btl %eax, started_cpus /* while (!(started_cpus & (1<<id))) */
236 btrl %eax, started_cpus /* started_cpus &= ~(1<<id) */
238 btrl %eax, stopped_cpus /* stopped_cpus &= ~(1<<id) */
243 movq CNAME(cpustop_restartfunc), %rax
246 movq $0, CNAME(cpustop_restartfunc) /* One-shot */
255 * For now just have one ipiq IPI, but what we really want is
256 * to have one for each source cpu to the APICs don't get stalled
257 * backlogging the requests.
265 movl $0, LA_EOI(%rax) /* End Of Interrupt to APIC */
266 FAKE_MCOUNT(TF_RIP(%rsp))
268 incl PCPU(cnt) + V_IPI
269 movq PCPU(curthread),%rbx
270 testl $-1,TD_CRITCOUNT(%rbx)
272 subq $8,%rsp /* make same as interrupt frame */
273 movq %rsp,%rdi /* pass frame by reference */
274 incl PCPU(intr_nesting_level)
275 incl TD_CRITCOUNT(%rbx)
276 call lwkt_process_ipiq_frame
277 decl TD_CRITCOUNT(%rbx)
278 decl PCPU(intr_nesting_level)
279 addq $8,%rsp /* turn into trapframe */
283 orl $RQF_IPIQ,PCPU(reqflags)
294 movl $0, LA_EOI(%rax) /* End Of Interrupt to APIC */
295 FAKE_MCOUNT(TF_RIP(%rsp))
297 incl PCPU(cnt) + V_TIMER
298 movq PCPU(curthread),%rbx
299 testl $-1,TD_CRITCOUNT(%rbx)
301 testl $-1,TD_NEST_COUNT(%rbx)
303 subq $8,%rsp /* make same as interrupt frame */
304 movq %rsp,%rdi /* pass frame by reference */
305 incl PCPU(intr_nesting_level)
306 incl TD_CRITCOUNT(%rbx)
307 call lapic_timer_process_frame
308 decl TD_CRITCOUNT(%rbx)
309 decl PCPU(intr_nesting_level)
310 addq $8,%rsp /* turn into trapframe */
314 orl $RQF_TIMER,PCPU(reqflags)
322 FAST_INTR(0,apic_fastintr0)
323 FAST_INTR(1,apic_fastintr1)
324 FAST_INTR(2,apic_fastintr2)
325 FAST_INTR(3,apic_fastintr3)
326 FAST_INTR(4,apic_fastintr4)
327 FAST_INTR(5,apic_fastintr5)
328 FAST_INTR(6,apic_fastintr6)
329 FAST_INTR(7,apic_fastintr7)
330 FAST_INTR(8,apic_fastintr8)
331 FAST_INTR(9,apic_fastintr9)
332 FAST_INTR(10,apic_fastintr10)
333 FAST_INTR(11,apic_fastintr11)
334 FAST_INTR(12,apic_fastintr12)
335 FAST_INTR(13,apic_fastintr13)
336 FAST_INTR(14,apic_fastintr14)
337 FAST_INTR(15,apic_fastintr15)
338 FAST_INTR(16,apic_fastintr16)
339 FAST_INTR(17,apic_fastintr17)
340 FAST_INTR(18,apic_fastintr18)
341 FAST_INTR(19,apic_fastintr19)
342 FAST_INTR(20,apic_fastintr20)
343 FAST_INTR(21,apic_fastintr21)
344 FAST_INTR(22,apic_fastintr22)
345 FAST_INTR(23,apic_fastintr23)
352 /* variables used by stop_cpus()/restart_cpus()/Xcpustop */
353 .globl stopped_cpus, started_cpus
359 .globl CNAME(cpustop_restartfunc)
360 CNAME(cpustop_restartfunc):
363 .globl apic_pin_trigger