2 * from: vector.s, 386BSD 0.1 unknown origin
3 * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $
7 #include "opt_auto_eoi.h"
10 #include <machine/asmacros.h>
11 #include <machine/lock.h>
12 #include <machine/psl.h>
13 #include <machine/trap.h>
14 #include <machine/segments.h>
16 #include <machine_base/icu/icu.h>
17 #include <bus/isa/isa.h>
22 #include <machine_base/apic/ioapic_ipl.h>
23 #include <machine/intr_machdep.h>
26 /* convert an absolute IRQ# into bitmask */
27 #define IRQ_LBIT(irq_num) (1UL << (irq_num & 0x3f))
30 #define IRQ_SBITS(irq_num) ((irq_num) & 0x3f)
32 /* convert an absolute IRQ# into gd_ipending index */
33 #define IRQ_LIDX(irq_num) ((irq_num) >> 6)
35 #define MPLOCKED lock ;
37 #define APIC_PUSH_FRAME_TFRIP \
38 PUSH_FRAME_TFRIP ; /* 15 regs + space for 5 extras */ \
39 movq $0,TF_XFLAGS(%rsp) ; \
40 movq $0,TF_TRAPNO(%rsp) ; \
41 movq $0,TF_ADDR(%rsp) ; \
42 movq $0,TF_FLAGS(%rsp) ; \
43 movq $0,TF_ERR(%rsp) ; \
47 * JG stale? Warning: POP_FRAME can only be used if there is no chance of a
48 * segment register being changed (e.g. by procfs), which is why syscalls
51 #define APIC_POP_FRAME(lastinsn) \
54 #define IOAPICADDR(irq_num) \
55 CNAME(ioapic_irqs) + IOAPIC_IRQI_SIZE * (irq_num) + IOAPIC_IRQI_ADDR
56 #define REDIRIDX(irq_num) \
57 CNAME(ioapic_irqs) + IOAPIC_IRQI_SIZE * (irq_num) + IOAPIC_IRQI_IDX
58 #define IOAPICFLAGS(irq_num) \
59 CNAME(ioapic_irqs) + IOAPIC_IRQI_SIZE * (irq_num) + IOAPIC_IRQI_FLAGS
61 #define MASK_IRQ(irq_num) \
62 testl $IOAPIC_IRQI_FLAG_MASKED, IOAPICFLAGS(irq_num) ; \
63 jne 7f ; /* masked, don't mask */ \
64 orl $IOAPIC_IRQI_FLAG_MASKED, IOAPICFLAGS(irq_num) ; \
65 /* set the mask bit */ \
66 movq IOAPICADDR(irq_num), %rcx ; /* ioapic addr */ \
67 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
68 movl %eax, (%rcx) ; /* write the index */ \
69 orl $IOART_INTMASK,IOAPIC_WINDOW(%rcx) ;/* set the mask */ \
70 7: ; /* already masked */ \
73 * Test to see whether we are handling an edge or level triggered INT.
74 * Level-triggered INTs must still be masked as we don't clear the source,
75 * and the EOI cycle would cause redundant INTs to occur.
77 #define MASK_LEVEL_IRQ(irq_num) \
78 testl $IOAPIC_IRQI_FLAG_LEVEL, IOAPICFLAGS(irq_num) ; \
79 jz 9f ; /* edge, don't mask */ \
84 * Test to see if the source is currntly masked, clear if so.
86 #define UNMASK_IRQ(irq_num) \
90 testl $IOAPIC_IRQI_FLAG_MASKED, IOAPICFLAGS(irq_num) ; \
91 je 7f ; /* bit clear, not masked */ \
92 andl $~IOAPIC_IRQI_FLAG_MASKED, IOAPICFLAGS(irq_num) ; \
93 /* clear mask bit */ \
94 movq IOAPICADDR(irq_num),%rcx ; /* ioapic addr */ \
95 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
96 movl %eax,(%rcx) ; /* write the index */ \
97 andl $~IOART_INTMASK,IOAPIC_WINDOW(%rcx) ;/* clear the mask */ \
99 IOAPIC_IMASK_UNLOCK ; \
103 * Interrupt call handlers run in the following sequence:
105 * - Push the trap frame required by doreti
106 * - Mask the interrupt and reenable its source
107 * - If we cannot take the interrupt set its ipending bit and
109 * - If we can take the interrupt clear its ipending bit,
110 * call the handler, then unmask and doreti.
112 * YYY can cache gd base opitner instead of using hidden %fs prefixes.
115 #define INTR_HANDLER(irq_num) \
118 IDTVEC(ioapic_intr##irq_num) ; \
119 APIC_PUSH_FRAME_TFRIP ; \
120 FAKE_MCOUNT(TF_RIP(%rsp)) ; \
121 IOAPIC_IMASK_LOCK ; \
122 MASK_LEVEL_IRQ(irq_num) ; \
123 movq lapic_eoi, %rax ; \
124 IOAPIC_IMASK_UNLOCK ; \
126 movq PCPU(curthread),%rbx ; \
127 testl $-1,TD_NEST_COUNT(%rbx) ; \
129 testl $-1,TD_CRITCOUNT(%rbx) ; \
132 /* in critical section, make interrupt pending */ \
133 /* set the pending bit and return, leave interrupt masked */ \
135 shlq $IRQ_SBITS(irq_num),%rcx ; \
136 movq $IRQ_LIDX(irq_num),%rdx ; \
137 orq %rcx,PCPU_E8(ipending,%rdx) ; \
138 orl $RQF_INTPEND,PCPU(reqflags) ; \
141 /* clear pending bit, run handler */ \
143 shlq $IRQ_SBITS(irq_num),%rcx ; \
145 movq $IRQ_LIDX(irq_num),%rdx ; \
146 andq %rcx,PCPU_E8(ipending,%rdx) ; \
147 pushq $irq_num ; /* trapframe -> intrframe */ \
148 movq %rsp, %rdi ; /* pass frame by reference */ \
149 incl TD_CRITCOUNT(%rbx) ; \
151 call ithread_fast_handler ; /* returns 0 to unmask */ \
152 cli ; /* interlock avoid stacking */ \
153 decl TD_CRITCOUNT(%rbx) ; \
154 addq $8, %rsp ; /* intrframe -> trapframe */ \
155 UNMASK_IRQ(irq_num) ; \
161 * Handle "spurious INTerrupts".
163 * NOTE: This is different than the "spurious INTerrupt" generated by an
164 * 8259 PIC for missing INTs. See the APIC documentation for details.
165 * This routine should NOT do an 'EOI' cycle.
167 * NOTE: Even though we don't do anything here we must still swapgs if
168 * coming from a user frame in case the iretq faults... just use
169 * the nominal APIC_PUSH_FRAME sequence to get it done.
175 APIC_PUSH_FRAME_TFRIP
176 /* No EOI cycle used here */
177 FAKE_MCOUNT(TF_RIP(%rsp))
179 APIC_POP_FRAME(jmp doreti_iret)
182 * Handle TLB shootdowns.
184 * NOTE: interrupts are left disabled.
190 APIC_PUSH_FRAME_TFRIP
192 callq *%rax /* End Of Interrupt to APIC */
193 FAKE_MCOUNT(TF_RIP(%rsp))
194 incl PCPU(cnt) + V_IPI
195 movq PCPU(curthread),%rbx
196 incl PCPU(intr_nesting_level)
197 incl TD_CRITCOUNT(%rbx)
198 subq $8,%rsp /* make same as interrupt frame */
199 movq %rsp,%rdi /* pass frame by reference */
200 orl $RQF_XINVLTLB,PCPU(reqflags) /* HVM interlock */
201 call smp_inval_intr /* called w/interrupts disabled */
202 addq $8,%rsp /* turn into trapframe */
203 decl TD_CRITCOUNT(%rbx)
204 decl PCPU(intr_nesting_level)
207 jmp doreti /* doreti b/c intrs enabled */
210 * Handle sniffs - sniff %rip and %rsp.
216 APIC_PUSH_FRAME_TFRIP
218 callq *%rax /* End Of Interrupt to APIC */
219 FAKE_MCOUNT(TF_RIP(%rsp))
220 incl PCPU(cnt) + V_IPI
222 call CNAME(hard_sniff) /* systat -pv and flame sniff */
224 APIC_POP_FRAME(jmp doreti_iret)
227 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
229 * - We cannot call doreti
230 * - Signals its receipt.
231 * - Waits for permission to restart.
232 * - Processing pending IPIQ events while waiting.
233 * - Signals its restart.
240 APIC_PUSH_FRAME_TFRIP
242 callq *%rax /* End Of Interrupt to APIC */
244 movl PCPU(cpuid), %eax
245 imull $PCB_SIZE, %eax
246 leaq CNAME(stoppcbs), %rdi
248 call CNAME(savectx) /* Save process context */
251 * Indicate that we have stopped and loop waiting for permission
252 * to start again. We must still process IPI events while in a
255 * Interrupts must remain enabled for non-IPI'd per-cpu interrupts
256 * (e.g. Xtimer, Xinvltlb).
258 #if CPUMASK_ELEMENTS != 4
259 #error "assembly incompatible with cpumask_t"
261 movq PCPU(cpumask)+0,%rax /* stopped_cpus |= 1 << cpuid */
262 MPLOCKED orq %rax, stopped_cpus+0
263 movq PCPU(cpumask)+8,%rax
264 MPLOCKED orq %rax, stopped_cpus+8
265 movq PCPU(cpumask)+16,%rax
266 MPLOCKED orq %rax, stopped_cpus+16
267 movq PCPU(cpumask)+24,%rax
268 MPLOCKED orq %rax, stopped_cpus+24
270 movq PCPU(curthread),%rbx
271 incl PCPU(intr_nesting_level)
272 incl TD_CRITCOUNT(%rbx)
275 andl $~RQF_IPIQ,PCPU(reqflags)
276 call lwkt_smp_stopped
280 movq started_cpus+0,%rax /* while (!(started_cpus & (1<<id))) */
281 andq PCPU(cpumask)+0,%rax
283 movq started_cpus+8,%rax
284 andq PCPU(cpumask)+8,%rax
286 movq started_cpus+16,%rax
287 andq PCPU(cpumask)+16,%rax
289 movq started_cpus+24,%rax
290 andq PCPU(cpumask)+24,%rax
295 movq PCPU(other_cpus)+0,%rax /* started_cpus &= ~(1 << cpuid) */
296 MPLOCKED andq %rax, started_cpus+0
297 movq PCPU(other_cpus)+8,%rax
298 MPLOCKED andq %rax, started_cpus+8
299 movq PCPU(other_cpus)+16,%rax
300 MPLOCKED andq %rax, started_cpus+16
301 movq PCPU(other_cpus)+24,%rax
302 MPLOCKED andq %rax, started_cpus+24
304 movq PCPU(other_cpus)+0,%rax /* stopped_cpus &= ~(1 << cpuid) */
305 MPLOCKED andq %rax, stopped_cpus+0
306 movq PCPU(other_cpus)+8,%rax
307 MPLOCKED andq %rax, stopped_cpus+8
308 movq PCPU(other_cpus)+16,%rax
309 MPLOCKED andq %rax, stopped_cpus+16
310 movq PCPU(other_cpus)+24,%rax
311 MPLOCKED andq %rax, stopped_cpus+24
316 movq CNAME(cpustop_restartfunc), %rax
319 movq $0, CNAME(cpustop_restartfunc) /* One-shot */
323 decl TD_CRITCOUNT(%rbx)
324 decl PCPU(intr_nesting_level)
330 * For now just have one ipiq IPI, but what we really want is
331 * to have one for each source cpu to the APICs don't get stalled
332 * backlogging the requests.
338 APIC_PUSH_FRAME_TFRIP
340 callq *%rax /* End Of Interrupt to APIC */
341 FAKE_MCOUNT(TF_RIP(%rsp))
343 incl PCPU(cnt) + V_IPI
344 movq PCPU(curthread),%rbx
345 testl $-1,TD_CRITCOUNT(%rbx)
347 subq $8,%rsp /* make same as interrupt frame */
348 movq %rsp,%rdi /* pass frame by reference */
349 incl PCPU(intr_nesting_level)
350 incl TD_CRITCOUNT(%rbx)
353 xchgl %eax,PCPU(npoll) /* (atomic op) allow another Xipi */
354 call lwkt_process_ipiq_frame
355 cli /* interlock avoid stacking */
356 decl TD_CRITCOUNT(%rbx)
357 decl PCPU(intr_nesting_level)
358 addq $8,%rsp /* turn into trapframe */
362 orl $RQF_IPIQ,PCPU(reqflags)
364 APIC_POP_FRAME(jmp doreti_iret)
370 APIC_PUSH_FRAME_TFRIP
372 callq *%rax /* End Of Interrupt to APIC */
373 FAKE_MCOUNT(TF_RIP(%rsp))
375 subq $8,%rsp /* make same as interrupt frame */
376 movq %rsp,%rdi /* pass frame by reference */
377 call pcpu_timer_always
378 addq $8,%rsp /* turn into trapframe */
380 incl PCPU(cnt) + V_TIMER
381 movq TF_RIP(%rsp),%rbx /* sample addr before checking crit */
382 movq %rbx,PCPU(sample_pc)
383 movq PCPU(curthread),%rbx
384 testl $-1,TD_CRITCOUNT(%rbx)
386 testl $-1,TD_NEST_COUNT(%rbx)
388 subq $8,%rsp /* make same as interrupt frame */
389 movq %rsp,%rdi /* pass frame by reference */
390 incl PCPU(intr_nesting_level)
391 incl TD_CRITCOUNT(%rbx)
393 call pcpu_timer_process_frame
394 cli /* interlock avoid stacking */
395 decl TD_CRITCOUNT(%rbx)
396 decl PCPU(intr_nesting_level)
397 addq $8,%rsp /* turn into trapframe */
401 orl $RQF_TIMER,PCPU(reqflags)
403 APIC_POP_FRAME(jmp doreti_iret)
602 #if CPUMASK_ELEMENTS != 4
603 #error "assembly incompatible with cpumask_t"
605 /* variables used by stop_cpus()/restart_cpus()/Xcpustop */
606 .globl stopped_cpus, started_cpus
618 .globl CNAME(cpustop_restartfunc)
619 CNAME(cpustop_restartfunc):