2 * from: vector.s, 386BSD 0.1 unknown origin
3 * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $
4 * $DragonFly: src/sys/platform/pc32/apic/apic_vector.s,v 1.39 2008/08/02 01:14:43 dillon Exp $
9 #include "opt_auto_eoi.h"
12 #include <machine/asmacros.h>
13 #include <machine/lock.h>
14 #include <machine/psl.h>
15 #include <machine/trap.h>
16 #include <machine/segments.h>
18 #include <machine_base/icu/icu.h>
19 #include <bus/isa/isa.h>
25 #include <machine/smp.h>
26 #include <machine_base/isa/intr_machdep.h>
28 /* convert an absolute IRQ# into a bitmask */
29 #define IRQ_LBIT(irq_num) (1 << (irq_num))
31 /* make an index into the IO APIC from the IRQ# */
32 #define REDTBL_IDX(irq_num) (0x10 + ((irq_num) * 2))
35 #define MPLOCKED lock ;
40 #define APIC_PUSH_FRAME \
41 PUSH_FRAME ; /* 15 regs + space for 5 extras */ \
42 movq $0,TF_XFLAGS(%rsp) ; \
43 movq $0,TF_TRAPNO(%rsp) ; \
44 movq $0,TF_ADDR(%rsp) ; \
45 movq $0,TF_FLAGS(%rsp) ; \
46 movq $0,TF_ERR(%rsp) ; \
50 * JG stale? Warning: POP_FRAME can only be used if there is no chance of a
51 * segment register being changed (e.g. by procfs), which is why syscalls
54 #define APIC_POP_FRAME POP_FRAME
56 #define IOAPICADDR(irq_num) \
57 CNAME(int_to_apicintpin) + AIMI_SIZE * (irq_num) + AIMI_APIC_ADDRESS
58 #define REDIRIDX(irq_num) \
59 CNAME(int_to_apicintpin) + AIMI_SIZE * (irq_num) + AIMI_REDIRINDEX
61 #define MASK_IRQ(irq_num) \
62 APIC_IMASK_LOCK ; /* into critical reg */ \
63 testl $IRQ_LBIT(irq_num), apic_imen ; \
64 jne 7f ; /* masked, don't mask */ \
65 orl $IRQ_LBIT(irq_num), apic_imen ; /* set the mask bit */ \
66 movq IOAPICADDR(irq_num), %rcx ; /* ioapic addr */ \
67 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
68 movl %eax, (%rcx) ; /* write the index */ \
69 orl $IOART_INTMASK,IOAPIC_WINDOW(%rcx) ;/* set the mask */ \
70 7: ; /* already masked */ \
74 * Test to see whether we are handling an edge or level triggered INT.
75 * Level-triggered INTs must still be masked as we don't clear the source,
76 * and the EOI cycle would cause redundant INTs to occur.
78 #define MASK_LEVEL_IRQ(irq_num) \
79 testl $IRQ_LBIT(irq_num), apic_pin_trigger ; \
80 jz 9f ; /* edge, don't mask */ \
85 * Test to see if the source is currntly masked, clear if so.
87 #define UNMASK_IRQ(irq_num) \
90 APIC_IMASK_LOCK ; /* into critical reg */ \
91 testl $IRQ_LBIT(irq_num), apic_imen ; \
92 je 7f ; /* bit clear, not masked */ \
93 andl $~IRQ_LBIT(irq_num), apic_imen ;/* clear mask bit */ \
94 movq IOAPICADDR(irq_num),%rcx ; /* ioapic addr */ \
95 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
96 movl %eax,(%rcx) ; /* write the index */ \
97 andl $~IOART_INTMASK,IOAPIC_WINDOW(%rcx) ;/* clear the mask */ \
105 * Fast interrupt call handlers run in the following sequence:
107 * - Push the trap frame required by doreti
108 * - Mask the interrupt and reenable its source
109 * - If we cannot take the interrupt set its fpending bit and
110 * doreti. Note that we cannot mess with mp_lock at all
111 * if we entered from a critical section!
112 * - If we can take the interrupt clear its fpending bit,
113 * call the handler, then unmask and doreti.
115 * YYY can cache gd base opitner instead of using hidden %fs prefixes.
118 #define FAST_INTR(irq_num, vec_name) \
123 FAKE_MCOUNT(15*4(%esp)) ; \
124 MASK_LEVEL_IRQ(irq_num) ; \
126 movl $0, LA_EOI(%rax) ; \
127 movq PCPU(curthread),%rbx ; \
128 testl $-1,TD_NEST_COUNT(%rbx) ; \
130 testl $-1,TD_CRITCOUNT(%rbx) ; \
133 /* in critical section, make interrupt pending */ \
134 /* set the pending bit and return, leave interrupt masked */ \
135 orl $IRQ_LBIT(irq_num),PCPU(fpending) ; \
136 orl $RQF_INTPEND,PCPU(reqflags) ; \
139 /* clear pending bit, run handler */ \
140 andl $~IRQ_LBIT(irq_num),PCPU(fpending) ; \
141 pushq $irq_num ; /* trapframe -> intrframe */ \
142 movq %rsp, %rdi ; /* pass frame by reference */ \
143 incl TD_CRITCOUNT(%rbx) ; \
144 call ithread_fast_handler ; /* returns 0 to unmask */ \
145 decl TD_CRITCOUNT(%rbx) ; \
146 addq $8, %rsp ; /* intrframe -> trapframe */ \
147 UNMASK_IRQ(irq_num) ; \
155 * Handle "spurious INTerrupts".
157 * This is different than the "spurious INTerrupt" generated by an
158 * 8259 PIC for missing INTs. See the APIC documentation for details.
159 * This routine should NOT do an 'EOI' cycle.
166 /* No EOI cycle used here */
172 * Handle TLB shootdowns.
180 movq %cr3, %rax /* invalidate the TLB */
184 movl $0, LA_EOI(%rax) /* End Of Interrupt to APIC */
191 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
193 * - Signals its receipt.
194 * - Waits for permission to restart.
195 * - Processing pending IPIQ events while waiting.
196 * - Signals its restart.
205 /* We save registers that are not preserved across function calls. */
206 /* JG can be re-written with mov's */
218 /* JGXXX switch to kernel %gs? */
219 pushl %ds /* save current data segment */
223 mov %ax, %ds /* use KERNEL data segment */
229 movl $0, LA_EOI(%rax) /* End Of Interrupt to APIC */
232 movl PCPU(cpuid), %eax
233 imull $PCB_SIZE, %eax
234 leaq CNAME(stoppcbs), %rdi
236 call CNAME(savectx) /* Save process context */
239 movl PCPU(cpuid), %eax
242 * Indicate that we have stopped and loop waiting for permission
243 * to start again. We must still process IPI events while in a
247 btsl %eax, stopped_cpus /* stopped_cpus |= (1<<id) */
249 andl $~RQF_IPIQ,PCPU(reqflags)
251 call lwkt_smp_stopped
253 btl %eax, started_cpus /* while (!(started_cpus & (1<<id))) */
257 btrl %eax, started_cpus /* started_cpus &= ~(1<<id) */
259 btrl %eax, stopped_cpus /* stopped_cpus &= ~(1<<id) */
264 movq CNAME(cpustop_restartfunc), %rax
267 movq $0, CNAME(cpustop_restartfunc) /* One-shot */
283 popl %ds /* restore previous data segment */
290 * For now just have one ipiq IPI, but what we really want is
291 * to have one for each source cpu to the APICs don't get stalled
292 * backlogging the requests.
300 movl $0, LA_EOI(%rax) /* End Of Interrupt to APIC */
301 FAKE_MCOUNT(15*4(%esp))
303 incl PCPU(cnt) + V_IPI
304 movq PCPU(curthread),%rbx
305 testl $-1,TD_CRITCOUNT(%rbx)
307 subq $8,%rsp /* make same as interrupt frame */
308 movq %rsp,%rdi /* pass frame by reference */
309 incl PCPU(intr_nesting_level)
310 incl TD_CRITCOUNT(%rbx)
311 call lwkt_process_ipiq_frame
312 decl TD_CRITCOUNT(%rbx)
313 decl PCPU(intr_nesting_level)
314 addq $8,%rsp /* turn into trapframe */
318 orl $RQF_IPIQ,PCPU(reqflags)
329 movl $0, LA_EOI(%rax) /* End Of Interrupt to APIC */
330 FAKE_MCOUNT(15*4(%esp))
332 incl PCPU(cnt) + V_TIMER
333 movq PCPU(curthread),%rbx
334 testl $-1,TD_CRITCOUNT(%rbx)
336 testl $-1,TD_NEST_COUNT(%rbx)
338 subq $8,%rsp /* make same as interrupt frame */
339 movq %rsp,%rdi /* pass frame by reference */
340 incl PCPU(intr_nesting_level)
341 incl TD_CRITCOUNT(%rbx)
342 call lapic_timer_process_frame
343 decl TD_CRITCOUNT(%rbx)
344 decl PCPU(intr_nesting_level)
345 addq $8,%rsp /* turn into trapframe */
349 orl $RQF_TIMER,PCPU(reqflags)
357 FAST_INTR(0,apic_fastintr0)
358 FAST_INTR(1,apic_fastintr1)
359 FAST_INTR(2,apic_fastintr2)
360 FAST_INTR(3,apic_fastintr3)
361 FAST_INTR(4,apic_fastintr4)
362 FAST_INTR(5,apic_fastintr5)
363 FAST_INTR(6,apic_fastintr6)
364 FAST_INTR(7,apic_fastintr7)
365 FAST_INTR(8,apic_fastintr8)
366 FAST_INTR(9,apic_fastintr9)
367 FAST_INTR(10,apic_fastintr10)
368 FAST_INTR(11,apic_fastintr11)
369 FAST_INTR(12,apic_fastintr12)
370 FAST_INTR(13,apic_fastintr13)
371 FAST_INTR(14,apic_fastintr14)
372 FAST_INTR(15,apic_fastintr15)
373 FAST_INTR(16,apic_fastintr16)
374 FAST_INTR(17,apic_fastintr17)
375 FAST_INTR(18,apic_fastintr18)
376 FAST_INTR(19,apic_fastintr19)
377 FAST_INTR(20,apic_fastintr20)
378 FAST_INTR(21,apic_fastintr21)
379 FAST_INTR(22,apic_fastintr22)
380 FAST_INTR(23,apic_fastintr23)
387 /* variables used by stop_cpus()/restart_cpus()/Xcpustop */
388 .globl stopped_cpus, started_cpus
394 .globl CNAME(cpustop_restartfunc)
395 CNAME(cpustop_restartfunc):
398 .globl apic_pin_trigger