2 * from: vector.s, 386BSD 0.1 unknown origin
3 * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $
4 * $DragonFly: src/sys/platform/pc32/apic/apic_vector.s,v 1.39 2008/08/02 01:14:43 dillon Exp $
9 #include "opt_auto_eoi.h"
12 #include <machine/asmacros.h>
13 #include <machine/lock.h>
14 #include <machine/psl.h>
15 #include <machine/trap.h>
17 #include <machine_base/icu/icu.h>
18 #include <bus/isa/isa.h>
24 #include <machine/smp.h>
25 #include <machine_base/isa/intr_machdep.h>
27 /* convert an absolute IRQ# into a bitmask */
28 #define IRQ_LBIT(irq_num) (1 << (irq_num))
30 /* make an index into the IO APIC from the IRQ# */
31 #define REDTBL_IDX(irq_num) (0x10 + ((irq_num) * 2))
34 #define MPLOCKED lock ;
39 #define APIC_PUSH_FRAME \
40 PUSH_FRAME ; /* 15 regs + space for 5 extras */ \
41 movq $0,TF_XFLAGS(%rsp) ; \
42 movq $0,TF_TRAPNO(%rsp) ; \
43 movq $0,TF_ADDR(%rsp) ; \
44 movq $0,TF_FLAGS(%rsp) ; \
45 movq $0,TF_ERR(%rsp) ; \
49 * JG stale? Warning: POP_FRAME can only be used if there is no chance of a
50 * segment register being changed (e.g. by procfs), which is why syscalls
53 #define APIC_POP_FRAME POP_FRAME
55 /* sizeof(struct apic_intmapinfo) == 24 */
56 #define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 24 * (irq_num) + 8
57 #define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 24 * (irq_num) + 16
59 #define MASK_IRQ(irq_num) \
60 APIC_IMASK_LOCK ; /* into critical reg */ \
61 testl $IRQ_LBIT(irq_num), apic_imen ; \
62 jne 7f ; /* masked, don't mask */ \
63 orl $IRQ_LBIT(irq_num), apic_imen ; /* set the mask bit */ \
64 movq IOAPICADDR(irq_num), %rcx ; /* ioapic addr */ \
65 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
66 movl %eax, (%rcx) ; /* write the index */ \
67 movl IOAPIC_WINDOW(%rcx), %eax ; /* current value */ \
68 orl $IOART_INTMASK, %eax ; /* set the mask */ \
69 movl %eax, IOAPIC_WINDOW(%rcx) ; /* new value */ \
70 7: ; /* already masked */ \
74 * Test to see whether we are handling an edge or level triggered INT.
75 * Level-triggered INTs must still be masked as we don't clear the source,
76 * and the EOI cycle would cause redundant INTs to occur.
78 #define MASK_LEVEL_IRQ(irq_num) \
79 testl $IRQ_LBIT(irq_num), apic_pin_trigger ; \
80 jz 9f ; /* edge, don't mask */ \
85 * Test to see if the source is currntly masked, clear if so.
87 #define UNMASK_IRQ(irq_num) \
90 APIC_IMASK_LOCK ; /* into critical reg */ \
91 testl $IRQ_LBIT(irq_num), apic_imen ; \
92 je 7f ; /* bit clear, not masked */ \
93 andl $~IRQ_LBIT(irq_num), apic_imen ;/* clear mask bit */ \
94 movq IOAPICADDR(irq_num),%rcx ; /* ioapic addr */ \
95 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
96 movl %eax,(%rcx) ; /* write the index */ \
97 movl IOAPIC_WINDOW(%rcx),%eax ; /* current value */ \
98 andl $~IOART_INTMASK,%eax ; /* clear the mask */ \
99 movl %eax,IOAPIC_WINDOW(%rcx) ; /* new value */ \
101 APIC_IMASK_UNLOCK ; \
107 * Fast interrupt call handlers run in the following sequence:
109 * - Push the trap frame required by doreti
110 * - Mask the interrupt and reenable its source
111 * - If we cannot take the interrupt set its fpending bit and
112 * doreti. Note that we cannot mess with mp_lock at all
113 * if we entered from a critical section!
114 * - If we can take the interrupt clear its fpending bit,
115 * call the handler, then unmask and doreti.
117 * YYY can cache gd base opitner instead of using hidden %fs prefixes.
120 #define FAST_INTR(irq_num, vec_name) \
125 FAKE_MCOUNT(15*4(%esp)) ; \
126 MASK_LEVEL_IRQ(irq_num) ; \
128 movl $0, LA_EOI(%rax) ; \
129 movq PCPU(curthread),%rbx ; \
130 testl $-1,TD_NEST_COUNT(%rbx) ; \
132 cmpl $TDPRI_CRIT,TD_PRI(%rbx) ; \
135 /* in critical section, make interrupt pending */ \
136 /* set the pending bit and return, leave interrupt masked */ \
137 orl $IRQ_LBIT(irq_num),PCPU(fpending) ; \
138 orl $RQF_INTPEND,PCPU(reqflags) ; \
141 /* clear pending bit, run handler */ \
142 andl $~IRQ_LBIT(irq_num),PCPU(fpending) ; \
143 pushq $irq_num ; /* trapframe -> intrframe */ \
144 movq %rsp, %rdi ; /* pass frame by reference */ \
145 call ithread_fast_handler ; /* returns 0 to unmask */ \
146 addq $8, %rsp ; /* intrframe -> trapframe */ \
147 UNMASK_IRQ(irq_num) ; \
153 * Slow interrupt call handlers run in the following sequence:
155 * - Push the trap frame required by doreti.
156 * - Mask the interrupt and reenable its source.
157 * - If we cannot take the interrupt set its ipending bit and
158 * doreti. In addition to checking for a critical section
159 * and cpl mask we also check to see if the thread is still
160 * running. Note that we cannot mess with mp_lock at all
161 * if we entered from a critical section!
162 * - If we can take the interrupt clear its ipending bit
163 * and schedule the thread. Leave interrupts masked and doreti.
165 * Note that calls to sched_ithd() are made with interrupts enabled
166 * and outside a critical section. YYY sched_ithd may preempt us
167 * synchronously (fix interrupt stacking).
169 * YYY can cache gd base pointer instead of using hidden %fs
173 #define SLOW_INTR(irq_num, vec_name, maybe_extra_ipending) \
178 maybe_extra_ipending ; \
180 MASK_LEVEL_IRQ(irq_num) ; \
181 incl PCPU(cnt) + V_INTR ; \
183 movl $0, LA_EOI(%rax) ; \
184 movq PCPU(curthread),%rbx ; \
185 testl $-1,TD_NEST_COUNT(%rbx) ; \
187 cmpl $TDPRI_CRIT,TD_PRI(%rbx) ; \
190 /* set the pending bit and return, leave the interrupt masked */ \
191 orl $IRQ_LBIT(irq_num), PCPU(ipending) ; \
192 orl $RQF_INTPEND,PCPU(reqflags) ; \
195 /* set running bit, clear pending bit, run handler */ \
196 andl $~IRQ_LBIT(irq_num), PCPU(ipending) ; \
197 incl TD_NEST_COUNT(%rbx) ; \
199 movq $irq_num,%rdi ; \
202 decl TD_NEST_COUNT(%rbx) ; \
208 * Wrong interrupt call handlers. We program these into APIC vectors
209 * that should otherwise never occur. For example, we program the SLOW
210 * vector for irq N with this when we program the FAST vector with the
213 * XXX for now all we can do is EOI it. We can't call do_wrongintr
214 * (yet) because we could be in a critical section.
216 #define WRONGINTR(irq_num,vec_name) \
222 movl $0,LA_EOI(%rax) ; /* End Of Interrupt to APIC */ \
223 /*pushl $irq_num ;*/ \
224 /*call do_wrongintr ;*/ \
232 * Handle "spurious INTerrupts".
234 * This is different than the "spurious INTerrupt" generated by an
235 * 8259 PIC for missing INTs. See the APIC documentation for details.
236 * This routine should NOT do an 'EOI' cycle.
243 /* No EOI cycle used here */
249 * Handle TLB shootdowns.
257 movq %cr3, %rax /* invalidate the TLB */
261 movl $0, LA_EOI(%rax) /* End Of Interrupt to APIC */
268 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
270 * - Signals its receipt.
271 * - Waits for permission to restart.
272 * - Processing pending IPIQ events while waiting.
273 * - Signals its restart.
282 /* We save registers that are not preserved across function calls. */
283 /* JG can be re-written with mov's */
295 /* JGXXX switch to kernel %gs? */
296 pushl %ds /* save current data segment */
300 mov %ax, %ds /* use KERNEL data segment */
306 movl $0, LA_EOI(%rax) /* End Of Interrupt to APIC */
309 movl PCPU(cpuid), %eax
310 imull $PCB_SIZE, %eax
311 leaq CNAME(stoppcbs), %rdi
313 call CNAME(savectx) /* Save process context */
316 movl PCPU(cpuid), %eax
319 * Indicate that we have stopped and loop waiting for permission
320 * to start again. We must still process IPI events while in a
324 btsl %eax, stopped_cpus /* stopped_cpus |= (1<<id) */
326 andl $~RQF_IPIQ,PCPU(reqflags)
328 call lwkt_smp_stopped
330 btl %eax, started_cpus /* while (!(started_cpus & (1<<id))) */
334 btrl %eax, started_cpus /* started_cpus &= ~(1<<id) */
336 btrl %eax, stopped_cpus /* stopped_cpus &= ~(1<<id) */
341 movq CNAME(cpustop_restartfunc), %rax
344 movq $0, CNAME(cpustop_restartfunc) /* One-shot */
360 popl %ds /* restore previous data segment */
367 * For now just have one ipiq IPI, but what we really want is
368 * to have one for each source cpu to the APICs don't get stalled
369 * backlogging the requests.
377 movl $0, LA_EOI(%rax) /* End Of Interrupt to APIC */
378 FAKE_MCOUNT(15*4(%esp))
380 incl PCPU(cnt) + V_IPI
381 movq PCPU(curthread),%rbx
382 cmpl $TDPRI_CRIT,TD_PRI(%rbx)
384 subq $8,%rsp /* make same as interrupt frame */
385 movq %rsp,%rdi /* pass frame by reference */
386 incl PCPU(intr_nesting_level)
387 addl $TDPRI_CRIT,TD_PRI(%rbx)
388 call lwkt_process_ipiq_frame
389 subl $TDPRI_CRIT,TD_PRI(%rbx)
390 decl PCPU(intr_nesting_level)
391 addq $8,%rsp /* turn into trapframe */
395 orl $RQF_IPIQ,PCPU(reqflags)
406 movl $0, LA_EOI(%rax) /* End Of Interrupt to APIC */
407 FAKE_MCOUNT(15*4(%esp))
409 incl PCPU(cnt) + V_TIMER
410 movq PCPU(curthread),%rbx
411 cmpl $TDPRI_CRIT,TD_PRI(%rbx)
413 testl $-1,TD_NEST_COUNT(%rbx)
415 subq $8,%rsp /* make same as interrupt frame */
416 movq %rsp,%rdi /* pass frame by reference */
417 incl PCPU(intr_nesting_level)
418 addl $TDPRI_CRIT,TD_PRI(%rbx)
419 call lapic_timer_process_frame
420 subl $TDPRI_CRIT,TD_PRI(%rbx)
421 decl PCPU(intr_nesting_level)
422 addq $8,%rsp /* turn into trapframe */
426 orl $RQF_TIMER,PCPU(reqflags)
434 FAST_INTR(0,apic_fastintr0)
435 FAST_INTR(1,apic_fastintr1)
436 FAST_INTR(2,apic_fastintr2)
437 FAST_INTR(3,apic_fastintr3)
438 FAST_INTR(4,apic_fastintr4)
439 FAST_INTR(5,apic_fastintr5)
440 FAST_INTR(6,apic_fastintr6)
441 FAST_INTR(7,apic_fastintr7)
442 FAST_INTR(8,apic_fastintr8)
443 FAST_INTR(9,apic_fastintr9)
444 FAST_INTR(10,apic_fastintr10)
445 FAST_INTR(11,apic_fastintr11)
446 FAST_INTR(12,apic_fastintr12)
447 FAST_INTR(13,apic_fastintr13)
448 FAST_INTR(14,apic_fastintr14)
449 FAST_INTR(15,apic_fastintr15)
450 FAST_INTR(16,apic_fastintr16)
451 FAST_INTR(17,apic_fastintr17)
452 FAST_INTR(18,apic_fastintr18)
453 FAST_INTR(19,apic_fastintr19)
454 FAST_INTR(20,apic_fastintr20)
455 FAST_INTR(21,apic_fastintr21)
456 FAST_INTR(22,apic_fastintr22)
457 FAST_INTR(23,apic_fastintr23)
459 /* YYY what is this garbage? */
461 SLOW_INTR(0,apic_slowintr0,)
462 SLOW_INTR(1,apic_slowintr1,)
463 SLOW_INTR(2,apic_slowintr2,)
464 SLOW_INTR(3,apic_slowintr3,)
465 SLOW_INTR(4,apic_slowintr4,)
466 SLOW_INTR(5,apic_slowintr5,)
467 SLOW_INTR(6,apic_slowintr6,)
468 SLOW_INTR(7,apic_slowintr7,)
469 SLOW_INTR(8,apic_slowintr8,)
470 SLOW_INTR(9,apic_slowintr9,)
471 SLOW_INTR(10,apic_slowintr10,)
472 SLOW_INTR(11,apic_slowintr11,)
473 SLOW_INTR(12,apic_slowintr12,)
474 SLOW_INTR(13,apic_slowintr13,)
475 SLOW_INTR(14,apic_slowintr14,)
476 SLOW_INTR(15,apic_slowintr15,)
477 SLOW_INTR(16,apic_slowintr16,)
478 SLOW_INTR(17,apic_slowintr17,)
479 SLOW_INTR(18,apic_slowintr18,)
480 SLOW_INTR(19,apic_slowintr19,)
481 SLOW_INTR(20,apic_slowintr20,)
482 SLOW_INTR(21,apic_slowintr21,)
483 SLOW_INTR(22,apic_slowintr22,)
484 SLOW_INTR(23,apic_slowintr23,)
486 WRONGINTR(0,apic_wrongintr0)
487 WRONGINTR(1,apic_wrongintr1)
488 WRONGINTR(2,apic_wrongintr2)
489 WRONGINTR(3,apic_wrongintr3)
490 WRONGINTR(4,apic_wrongintr4)
491 WRONGINTR(5,apic_wrongintr5)
492 WRONGINTR(6,apic_wrongintr6)
493 WRONGINTR(7,apic_wrongintr7)
494 WRONGINTR(8,apic_wrongintr8)
495 WRONGINTR(9,apic_wrongintr9)
496 WRONGINTR(10,apic_wrongintr10)
497 WRONGINTR(11,apic_wrongintr11)
498 WRONGINTR(12,apic_wrongintr12)
499 WRONGINTR(13,apic_wrongintr13)
500 WRONGINTR(14,apic_wrongintr14)
501 WRONGINTR(15,apic_wrongintr15)
502 WRONGINTR(16,apic_wrongintr16)
503 WRONGINTR(17,apic_wrongintr17)
504 WRONGINTR(18,apic_wrongintr18)
505 WRONGINTR(19,apic_wrongintr19)
506 WRONGINTR(20,apic_wrongintr20)
507 WRONGINTR(21,apic_wrongintr21)
508 WRONGINTR(22,apic_wrongintr22)
509 WRONGINTR(23,apic_wrongintr23)
516 /* variables used by stop_cpus()/restart_cpus()/Xcpustop */
517 .globl stopped_cpus, started_cpus
523 .globl CNAME(cpustop_restartfunc)
524 CNAME(cpustop_restartfunc):
527 .globl apic_pin_trigger