2 * from: vector.s, 386BSD 0.1 unknown origin
3 * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $
4 * $DragonFly: src/sys/platform/pc32/apic/apic_vector.s,v 1.39 2008/08/02 01:14:43 dillon Exp $
8 #include "opt_auto_eoi.h"
10 #include <machine/asmacros.h>
11 #include <machine/lock.h>
12 #include <machine/psl.h>
13 #include <machine/trap.h>
15 #include <machine_base/icu/icu.h>
16 #include <bus/isa/isa.h>
22 #include <machine/smp.h>
23 #include <machine_base/isa/intr_machdep.h>
25 /* convert an absolute IRQ# into a bitmask */
26 #define IRQ_LBIT(irq_num) (1 << (irq_num))
28 /* make an index into the IO APIC from the IRQ# */
29 #define REDTBL_IDX(irq_num) (0x10 + ((irq_num) * 2))
32 #define MPLOCKED lock ;
38 * Push an interrupt frame in a format acceptable to doreti, reload
39 * the segment registers for the kernel.
42 pushl $0 ; /* dummy error code */ \
43 pushl $0 ; /* dummy trap type */ \
44 pushl $0 ; /* dummy xflags type */ \
46 pushl %ds ; /* save data and extra segments ... */ \
59 * Warning: POP_FRAME can only be used if there is no chance of a
60 * segment register being changed (e.g. by procfs), which is why syscalls
69 addl $3*4,%esp ; /* dummy xflags, trap & error codes */ \
71 #define IOAPICADDR(irq_num) \
72 CNAME(int_to_apicintpin) + IOAPIC_IM_SIZE * (irq_num) + IOAPIC_IM_ADDR
73 #define REDIRIDX(irq_num) \
74 CNAME(int_to_apicintpin) + IOAPIC_IM_SIZE * (irq_num) + IOAPIC_IM_ENTIDX
75 #define IOAPICFLAGS(irq_num) \
76 CNAME(int_to_apicintpin) + IOAPIC_IM_SIZE * (irq_num) + IOAPIC_IM_FLAGS
78 #define MASK_IRQ(irq_num) \
79 APIC_IMASK_LOCK ; /* into critical reg */ \
80 testl $IOAPIC_IM_FLAG_MASKED, IOAPICFLAGS(irq_num) ; \
81 jne 7f ; /* masked, don't mask */ \
82 orl $IOAPIC_IM_FLAG_MASKED, IOAPICFLAGS(irq_num) ; \
83 /* set the mask bit */ \
84 movl IOAPICADDR(irq_num), %ecx ; /* ioapic addr */ \
85 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
86 movl %eax, (%ecx) ; /* write the index */ \
87 orl $IOART_INTMASK,IOAPIC_WINDOW(%ecx) ;/* set the mask */ \
88 7: ; /* already masked */ \
92 * Test to see whether we are handling an edge or level triggered INT.
93 * Level-triggered INTs must still be masked as we don't clear the source,
94 * and the EOI cycle would cause redundant INTs to occur.
96 #define MASK_LEVEL_IRQ(irq_num) \
97 testl $IOAPIC_IM_FLAG_LEVEL, IOAPICFLAGS(irq_num) ; \
98 jz 9f ; /* edge, don't mask */ \
103 * Test to see if the source is currntly masked, clear if so.
105 #define UNMASK_IRQ(irq_num) \
108 APIC_IMASK_LOCK ; /* into critical reg */ \
109 testl $IOAPIC_IM_FLAG_MASKED, IOAPICFLAGS(irq_num) ; \
110 je 7f ; /* bit clear, not masked */ \
111 andl $~IOAPIC_IM_FLAG_MASKED, IOAPICFLAGS(irq_num) ; \
112 /* clear mask bit */ \
113 movl IOAPICADDR(irq_num),%ecx ; /* ioapic addr */ \
114 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
115 movl %eax,(%ecx) ; /* write the index */ \
116 andl $~IOART_INTMASK,IOAPIC_WINDOW(%ecx) ;/* clear the mask */ \
118 APIC_IMASK_UNLOCK ; \
124 * Fast interrupt call handlers run in the following sequence:
126 * - Push the trap frame required by doreti
127 * - Mask the interrupt and reenable its source
128 * - If we cannot take the interrupt set its fpending bit and
129 * doreti. Note that we cannot mess with mp_lock at all
130 * if we entered from a critical section!
131 * - If we can take the interrupt clear its fpending bit,
132 * call the handler, then unmask and doreti.
134 * YYY can cache gd base opitner instead of using hidden %fs prefixes.
137 #define FAST_INTR(irq_num, vec_name) \
142 FAKE_MCOUNT(15*4(%esp)) ; \
143 MASK_LEVEL_IRQ(irq_num) ; \
144 movl $0, lapic_eoi ; \
145 movl PCPU(curthread),%ebx ; \
146 movl $0,%eax ; /* CURRENT CPL IN FRAME (REMOVED) */ \
148 testl $-1,TD_NEST_COUNT(%ebx) ; \
150 testl $-1,TD_CRITCOUNT(%ebx) ; \
153 /* in critical section, make interrupt pending */ \
154 /* set the pending bit and return, leave interrupt masked */ \
155 orl $IRQ_LBIT(irq_num),PCPU(fpending) ; \
156 orl $RQF_INTPEND,PCPU(reqflags) ; \
159 /* clear pending bit, run handler */ \
160 andl $~IRQ_LBIT(irq_num),PCPU(fpending) ; \
162 pushl %esp ; /* pass frame by reference */ \
163 incl TD_CRITCOUNT(%ebx) ; \
165 call ithread_fast_handler ; /* returns 0 to unmask */ \
166 decl TD_CRITCOUNT(%ebx) ; \
168 UNMASK_IRQ(irq_num) ; \
176 * Handle "spurious INTerrupts".
178 * This is different than the "spurious INTerrupt" generated by an
179 * 8259 PIC for missing INTs. See the APIC documentation for details.
180 * This routine should NOT do an 'EOI' cycle.
187 /* No EOI cycle used here */
193 * Handle TLB shootdowns.
195 * NOTE: Interrupts remain disabled.
202 movl $0, lapic_eoi /* End Of Interrupt to APIC */
203 FAKE_MCOUNT(15*4(%esp))
205 subl $8,%esp /* make same as interrupt frame */
206 pushl %esp /* pass frame by reference */
207 call smp_invltlb_intr
211 jmp doreti_syscall_ret
214 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
216 * - Signals its receipt.
217 * - Waits for permission to restart.
218 * - Processing pending IPIQ events while waiting.
219 * - Signals its restart.
231 pushl %ds /* save current data segment */
235 mov %ax, %ds /* use KERNEL data segment */
239 movl $0, lapic_eoi /* End Of Interrupt to APIC */
241 movl PCPU(cpuid), %eax
242 imull $PCB_SIZE, %eax
243 leal CNAME(stoppcbs)(%eax), %eax
245 call CNAME(savectx) /* Save process context */
249 movl PCPU(cpuid), %eax
252 * Indicate that we have stopped and loop waiting for permission
253 * to start again. We must still process IPI events while in a
256 * Interrupts must remain enabled for non-IPI'd per-cpu interrupts
257 * (e.g. Xtimer, Xinvltlb).
260 btsl %eax, stopped_cpus /* stopped_cpus |= (1<<id) */
263 andl $~RQF_IPIQ,PCPU(reqflags)
265 call lwkt_smp_stopped
267 btl %eax, started_cpus /* while (!(started_cpus & (1<<id))) */
271 btrl %eax, started_cpus /* started_cpus &= ~(1<<id) */
273 btrl %eax, stopped_cpus /* stopped_cpus &= ~(1<<id) */
278 movl CNAME(cpustop_restartfunc), %eax
281 movl $0, CNAME(cpustop_restartfunc) /* One-shot */
286 popl %ds /* restore previous data segment */
295 * For now just have one ipiq IPI, but what we really want is
296 * to have one for each source cpu to the APICs don't get stalled
297 * backlogging the requests.
304 movl $0, lapic_eoi /* End Of Interrupt to APIC */
305 FAKE_MCOUNT(15*4(%esp))
307 incl PCPU(cnt) + V_IPI
308 movl PCPU(curthread),%ebx
309 testl $-1,TD_CRITCOUNT(%ebx)
311 subl $8,%esp /* make same as interrupt frame */
312 pushl %esp /* pass frame by reference */
313 incl PCPU(intr_nesting_level)
314 incl TD_CRITCOUNT(%ebx)
316 call lwkt_process_ipiq_frame
317 decl TD_CRITCOUNT(%ebx)
318 decl PCPU(intr_nesting_level)
320 pushl $0 /* CPL for frame (REMOVED) */
324 orl $RQF_IPIQ,PCPU(reqflags)
326 jmp doreti_syscall_ret
333 movl $0, lapic_eoi /* End Of Interrupt to APIC */
334 FAKE_MCOUNT(15*4(%esp))
336 incl PCPU(cnt) + V_TIMER
337 movl PCPU(curthread),%ebx
338 testl $-1,TD_CRITCOUNT(%ebx)
340 testl $-1,TD_NEST_COUNT(%ebx)
342 subl $8,%esp /* make same as interrupt frame */
343 pushl %esp /* pass frame by reference */
344 incl PCPU(intr_nesting_level)
345 incl TD_CRITCOUNT(%ebx)
347 call lapic_timer_process_frame
348 decl TD_CRITCOUNT(%ebx)
349 decl PCPU(intr_nesting_level)
351 pushl $0 /* CPL for frame (REMOVED) */
355 orl $RQF_TIMER,PCPU(reqflags)
357 jmp doreti_syscall_ret
362 FAST_INTR(0,apic_fastintr0)
363 FAST_INTR(1,apic_fastintr1)
364 FAST_INTR(2,apic_fastintr2)
365 FAST_INTR(3,apic_fastintr3)
366 FAST_INTR(4,apic_fastintr4)
367 FAST_INTR(5,apic_fastintr5)
368 FAST_INTR(6,apic_fastintr6)
369 FAST_INTR(7,apic_fastintr7)
370 FAST_INTR(8,apic_fastintr8)
371 FAST_INTR(9,apic_fastintr9)
372 FAST_INTR(10,apic_fastintr10)
373 FAST_INTR(11,apic_fastintr11)
374 FAST_INTR(12,apic_fastintr12)
375 FAST_INTR(13,apic_fastintr13)
376 FAST_INTR(14,apic_fastintr14)
377 FAST_INTR(15,apic_fastintr15)
378 FAST_INTR(16,apic_fastintr16)
379 FAST_INTR(17,apic_fastintr17)
380 FAST_INTR(18,apic_fastintr18)
381 FAST_INTR(19,apic_fastintr19)
382 FAST_INTR(20,apic_fastintr20)
383 FAST_INTR(21,apic_fastintr21)
384 FAST_INTR(22,apic_fastintr22)
385 FAST_INTR(23,apic_fastintr23)
386 FAST_INTR(24,apic_fastintr24)
387 FAST_INTR(25,apic_fastintr25)
388 FAST_INTR(26,apic_fastintr26)
389 FAST_INTR(27,apic_fastintr27)
390 FAST_INTR(28,apic_fastintr28)
391 FAST_INTR(29,apic_fastintr29)
392 FAST_INTR(30,apic_fastintr30)
393 FAST_INTR(31,apic_fastintr31)
400 /* variables used by stop_cpus()/restart_cpus()/Xcpustop */
401 .globl stopped_cpus, started_cpus
407 .globl CNAME(cpustop_restartfunc)
408 CNAME(cpustop_restartfunc):