2 * from: vector.s, 386BSD 0.1 unknown origin
3 * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $
7 #include "opt_auto_eoi.h"
10 #include <machine/asmacros.h>
11 #include <machine/lock.h>
12 #include <machine/psl.h>
13 #include <machine/trap.h>
14 #include <machine/segments.h>
16 #include <machine_base/icu/icu.h>
17 #include <bus/isa/isa.h>
22 #include <machine_base/apic/ioapic_ipl.h>
23 #include <machine/intr_machdep.h>
26 /* convert an absolute IRQ# into bitmask */
27 #define IRQ_LBIT(irq_num) (1UL << (irq_num & 0x3f))
30 #define IRQ_SBITS(irq_num) ((irq_num) & 0x3f)
32 /* convert an absolute IRQ# into gd_ipending index */
33 #define IRQ_LIDX(irq_num) ((irq_num) >> 6)
35 #define MPLOCKED lock ;
37 #define APIC_PUSH_FRAME \
38 PUSH_FRAME ; /* 15 regs + space for 5 extras */ \
39 movq $0,TF_XFLAGS(%rsp) ; \
40 movq $0,TF_TRAPNO(%rsp) ; \
41 movq $0,TF_ADDR(%rsp) ; \
42 movq $0,TF_FLAGS(%rsp) ; \
43 movq $0,TF_ERR(%rsp) ; \
47 * JG stale? Warning: POP_FRAME can only be used if there is no chance of a
48 * segment register being changed (e.g. by procfs), which is why syscalls
51 #define APIC_POP_FRAME \
54 #define IOAPICADDR(irq_num) \
55 CNAME(ioapic_irqs) + IOAPIC_IRQI_SIZE * (irq_num) + IOAPIC_IRQI_ADDR
56 #define REDIRIDX(irq_num) \
57 CNAME(ioapic_irqs) + IOAPIC_IRQI_SIZE * (irq_num) + IOAPIC_IRQI_IDX
58 #define IOAPICFLAGS(irq_num) \
59 CNAME(ioapic_irqs) + IOAPIC_IRQI_SIZE * (irq_num) + IOAPIC_IRQI_FLAGS
61 #define MASK_IRQ(irq_num) \
62 IOAPIC_IMASK_LOCK ; /* into critical reg */ \
63 testl $IOAPIC_IRQI_FLAG_MASKED, IOAPICFLAGS(irq_num) ; \
64 jne 7f ; /* masked, don't mask */ \
65 orl $IOAPIC_IRQI_FLAG_MASKED, IOAPICFLAGS(irq_num) ; \
66 /* set the mask bit */ \
67 movq IOAPICADDR(irq_num), %rcx ; /* ioapic addr */ \
68 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
69 movl %eax, (%rcx) ; /* write the index */ \
70 orl $IOART_INTMASK,IOAPIC_WINDOW(%rcx) ;/* set the mask */ \
71 7: ; /* already masked */ \
72 IOAPIC_IMASK_UNLOCK ; \
75 * Test to see whether we are handling an edge or level triggered INT.
76 * Level-triggered INTs must still be masked as we don't clear the source,
77 * and the EOI cycle would cause redundant INTs to occur.
79 #define MASK_LEVEL_IRQ(irq_num) \
80 testl $IOAPIC_IRQI_FLAG_LEVEL, IOAPICFLAGS(irq_num) ; \
81 jz 9f ; /* edge, don't mask */ \
86 * Test to see if the source is currntly masked, clear if so.
88 #define UNMASK_IRQ(irq_num) \
91 IOAPIC_IMASK_LOCK ; /* into critical reg */ \
92 testl $IOAPIC_IRQI_FLAG_MASKED, IOAPICFLAGS(irq_num) ; \
93 je 7f ; /* bit clear, not masked */ \
94 andl $~IOAPIC_IRQI_FLAG_MASKED, IOAPICFLAGS(irq_num) ; \
95 /* clear mask bit */ \
96 movq IOAPICADDR(irq_num),%rcx ; /* ioapic addr */ \
97 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
98 movl %eax,(%rcx) ; /* write the index */ \
99 andl $~IOART_INTMASK,IOAPIC_WINDOW(%rcx) ;/* clear the mask */ \
101 IOAPIC_IMASK_UNLOCK ; \
105 * Interrupt call handlers run in the following sequence:
107 * - Push the trap frame required by doreti
108 * - Mask the interrupt and reenable its source
109 * - If we cannot take the interrupt set its ipending bit and
111 * - If we can take the interrupt clear its ipending bit,
112 * call the handler, then unmask and doreti.
114 * YYY can cache gd base opitner instead of using hidden %fs prefixes.
117 #define INTR_HANDLER(irq_num) \
120 IDTVEC(ioapic_intr##irq_num) ; \
122 FAKE_MCOUNT(TF_RIP(%rsp)) ; \
123 MASK_LEVEL_IRQ(irq_num) ; \
125 movl $0, LA_EOI(%rax) ; \
126 movq PCPU(curthread),%rbx ; \
127 testl $-1,TD_NEST_COUNT(%rbx) ; \
129 testl $-1,TD_CRITCOUNT(%rbx) ; \
132 /* in critical section, make interrupt pending */ \
133 /* set the pending bit and return, leave interrupt masked */ \
135 shlq $IRQ_SBITS(irq_num),%rcx ; \
136 movq $IRQ_LIDX(irq_num),%rdx ; \
137 orq %rcx,PCPU_E8(ipending,%rdx) ; \
138 orl $RQF_INTPEND,PCPU(reqflags) ; \
141 /* clear pending bit, run handler */ \
143 shlq $IRQ_SBITS(irq_num),%rcx ; \
145 movq $IRQ_LIDX(irq_num),%rdx ; \
146 andq %rcx,PCPU_E8(ipending,%rdx) ; \
147 pushq $irq_num ; /* trapframe -> intrframe */ \
148 movq %rsp, %rdi ; /* pass frame by reference */ \
149 incl TD_CRITCOUNT(%rbx) ; \
151 call ithread_fast_handler ; /* returns 0 to unmask */ \
152 decl TD_CRITCOUNT(%rbx) ; \
153 addq $8, %rsp ; /* intrframe -> trapframe */ \
154 UNMASK_IRQ(irq_num) ; \
160 * Handle "spurious INTerrupts".
162 * NOTE: This is different than the "spurious INTerrupt" generated by an
163 * 8259 PIC for missing INTs. See the APIC documentation for details.
164 * This routine should NOT do an 'EOI' cycle.
166 * NOTE: Even though we don't do anything here we must still swapgs if
167 * coming from a user frame in case the iretq faults... just use
168 * the nominal APIC_PUSH_FRAME sequence to get it done.
175 /* No EOI cycle used here */
176 FAKE_MCOUNT(TF_RIP(%rsp))
182 * Handle TLB shootdowns.
184 * NOTE: interrupts are left disabled.
192 movl $0, LA_EOI(%rax) /* End Of Interrupt to APIC */
193 FAKE_MCOUNT(TF_RIP(%rsp))
194 subq $8,%rsp /* make same as interrupt frame */
195 movq %rsp,%rdi /* pass frame by reference */
196 call smp_invltlb_intr
197 addq $8,%rsp /* turn into trapframe */
203 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
205 * - We cannot call doreti
206 * - Signals its receipt.
207 * - Waits for permission to restart.
208 * - Processing pending IPIQ events while waiting.
209 * - Signals its restart.
218 movl $0, LA_EOI(%rax) /* End Of Interrupt to APIC */
220 movl PCPU(cpuid), %eax
221 imull $PCB_SIZE, %eax
222 leaq CNAME(stoppcbs), %rdi
224 call CNAME(savectx) /* Save process context */
227 * Indicate that we have stopped and loop waiting for permission
228 * to start again. We must still process IPI events while in a
231 * Interrupts must remain enabled for non-IPI'd per-cpu interrupts
232 * (e.g. Xtimer, Xinvltlb).
234 #if CPUMASK_ELEMENTS != 4
235 #error "assembly incompatible with cpumask_t"
237 movq PCPU(cpumask)+0,%rax /* stopped_cpus |= 1 << cpuid */
238 MPLOCKED orq %rax, stopped_cpus+0
239 movq PCPU(cpumask)+8,%rax
240 MPLOCKED orq %rax, stopped_cpus+8
241 movq PCPU(cpumask)+16,%rax
242 MPLOCKED orq %rax, stopped_cpus+16
243 movq PCPU(cpumask)+24,%rax
244 MPLOCKED orq %rax, stopped_cpus+24
247 andl $~RQF_IPIQ,PCPU(reqflags)
248 call lwkt_smp_stopped
252 movq started_cpus+0,%rax /* while (!(started_cpus & (1<<id))) */
253 andq PCPU(cpumask)+0,%rax
255 movq started_cpus+8,%rax
256 andq PCPU(cpumask)+8,%rax
258 movq started_cpus+16,%rax
259 andq PCPU(cpumask)+16,%rax
261 movq started_cpus+24,%rax
262 andq PCPU(cpumask)+24,%rax
267 movq PCPU(other_cpus)+0,%rax /* started_cpus &= ~(1 << cpuid) */
268 MPLOCKED andq %rax, started_cpus+0
269 movq PCPU(other_cpus)+8,%rax
270 MPLOCKED andq %rax, started_cpus+8
271 movq PCPU(other_cpus)+16,%rax
272 MPLOCKED andq %rax, started_cpus+16
273 movq PCPU(other_cpus)+24,%rax
274 MPLOCKED andq %rax, started_cpus+24
276 movq PCPU(other_cpus)+0,%rax /* stopped_cpus &= ~(1 << cpuid) */
277 MPLOCKED andq %rax, stopped_cpus+0
278 movq PCPU(other_cpus)+8,%rax
279 MPLOCKED andq %rax, stopped_cpus+8
280 movq PCPU(other_cpus)+16,%rax
281 MPLOCKED andq %rax, stopped_cpus+16
282 movq PCPU(other_cpus)+24,%rax
283 MPLOCKED andq %rax, stopped_cpus+24
288 movq CNAME(cpustop_restartfunc), %rax
291 movq $0, CNAME(cpustop_restartfunc) /* One-shot */
300 * For now just have one ipiq IPI, but what we really want is
301 * to have one for each source cpu to the APICs don't get stalled
302 * backlogging the requests.
310 movl $0, LA_EOI(%rax) /* End Of Interrupt to APIC */
311 FAKE_MCOUNT(TF_RIP(%rsp))
313 incl PCPU(cnt) + V_IPI
314 movq PCPU(curthread),%rbx
315 testl $-1,TD_CRITCOUNT(%rbx)
317 subq $8,%rsp /* make same as interrupt frame */
318 movq %rsp,%rdi /* pass frame by reference */
319 incl PCPU(intr_nesting_level)
320 incl TD_CRITCOUNT(%rbx)
322 call lwkt_process_ipiq_frame
323 decl TD_CRITCOUNT(%rbx)
324 decl PCPU(intr_nesting_level)
325 addq $8,%rsp /* turn into trapframe */
329 orl $RQF_IPIQ,PCPU(reqflags)
340 movl $0, LA_EOI(%rax) /* End Of Interrupt to APIC */
341 FAKE_MCOUNT(TF_RIP(%rsp))
343 subq $8,%rsp /* make same as interrupt frame */
344 movq %rsp,%rdi /* pass frame by reference */
345 call lapic_timer_always
346 addq $8,%rsp /* turn into trapframe */
348 incl PCPU(cnt) + V_TIMER
349 movq TF_RIP(%rsp),%rbx /* sample addr before checking crit */
350 movq %rbx,PCPU(sample_pc)
351 movq PCPU(curthread),%rbx
352 testl $-1,TD_CRITCOUNT(%rbx)
354 testl $-1,TD_NEST_COUNT(%rbx)
356 subq $8,%rsp /* make same as interrupt frame */
357 movq %rsp,%rdi /* pass frame by reference */
358 incl PCPU(intr_nesting_level)
359 incl TD_CRITCOUNT(%rbx)
361 call lapic_timer_process_frame
362 decl TD_CRITCOUNT(%rbx)
363 decl PCPU(intr_nesting_level)
364 addq $8,%rsp /* turn into trapframe */
368 orl $RQF_TIMER,PCPU(reqflags)
570 #if CPUMASK_ELEMENTS != 4
571 #error "assembly incompatible with cpumask_t"
573 /* variables used by stop_cpus()/restart_cpus()/Xcpustop */
574 .globl stopped_cpus, started_cpus
586 .globl CNAME(cpustop_restartfunc)
587 CNAME(cpustop_restartfunc):