2 * from: vector.s, 386BSD 0.1 unknown origin
3 * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $
4 * $DragonFly: src/sys/platform/pc32/apic/apic_vector.s,v 1.9 2003/07/08 06:27:27 dillon Exp $
8 #include <machine/apic.h>
9 #include <machine/smp.h>
10 #include "i386/isa/intr_machdep.h"
12 /* convert an absolute IRQ# into a bitmask */
13 #define IRQ_LBIT(irq_num) (1 << (irq_num))
15 /* make an index into the IO APIC from the IRQ# */
16 #define REDTBL_IDX(irq_num) (0x10 + ((irq_num) * 2))
19 * Push an interrupt frame in a format acceptable to doreti, reload
20 * the segment registers for the kernel.
23 pushl $0 ; /* dummy error code */ \
24 pushl $0 ; /* dummy trap type */ \
26 pushl %ds ; /* save data and extra segments ... */ \
36 pushfl ; /* phys int frame / flags */ \
37 pushl %cs ; /* phys int frame / cs */ \
38 pushl 12(%esp) ; /* original caller eip */ \
39 pushl $0 ; /* dummy error code */ \
40 pushl $0 ; /* dummy trap type */ \
41 subl $12*4,%esp ; /* pushal + 3 seg regs (dummy) + CPL */ \
44 * Warning: POP_FRAME can only be used if there is no chance of a
45 * segment register being changed (e.g. by procfs), which is why syscalls
53 addl $2*4,%esp ; /* dummy trap & error codes */ \
58 #define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
59 #define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
62 * Interrupts are expected to already be disabled when using these
66 SPIN_LOCK(imen_spinlock) ; \
68 #define IMASK_UNLOCK \
69 SPIN_UNLOCK(imen_spinlock) ; \
71 #define MASK_IRQ(irq_num) \
72 IMASK_LOCK ; /* into critical reg */ \
73 testl $IRQ_LBIT(irq_num), apic_imen ; \
74 jne 7f ; /* masked, don't mask */ \
75 orl $IRQ_LBIT(irq_num), apic_imen ; /* set the mask bit */ \
76 movl IOAPICADDR(irq_num), %ecx ; /* ioapic addr */ \
77 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
78 movl %eax, (%ecx) ; /* write the index */ \
79 movl IOAPIC_WINDOW(%ecx), %eax ; /* current value */ \
80 orl $IOART_INTMASK, %eax ; /* set the mask */ \
81 movl %eax, IOAPIC_WINDOW(%ecx) ; /* new value */ \
82 7: ; /* already masked */ \
86 * Test to see whether we are handling an edge or level triggered INT.
87 * Level-triggered INTs must still be masked as we don't clear the source,
88 * and the EOI cycle would cause redundant INTs to occur.
90 #define MASK_LEVEL_IRQ(irq_num) \
91 testl $IRQ_LBIT(irq_num), apic_pin_trigger ; \
92 jz 9f ; /* edge, don't mask */ \
97 #ifdef APIC_INTR_REORDER
98 #define EOI_IRQ(irq_num) \
99 movl apic_isrbit_location + 8 * (irq_num), %eax ; \
100 movl (%eax), %eax ; \
101 testl apic_isrbit_location + 4 + 8 * (irq_num), %eax ; \
102 jz 9f ; /* not active */ \
103 movl $0, lapic_eoi ; \
108 #define EOI_IRQ(irq_num) \
109 testl $IRQ_LBIT(irq_num), lapic_isr1; \
110 jz 9f ; /* not active */ \
111 movl $0, lapic_eoi; \
117 * Test to see if the source is currntly masked, clear if so.
119 #define UNMASK_IRQ(irq_num) \
120 IMASK_LOCK ; /* into critical reg */ \
121 testl $IRQ_LBIT(irq_num), apic_imen ; \
122 je 7f ; /* bit clear, not masked */ \
123 andl $~IRQ_LBIT(irq_num), apic_imen ;/* clear mask bit */ \
124 movl IOAPICADDR(irq_num),%ecx ; /* ioapic addr */ \
125 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
126 movl %eax,(%ecx) ; /* write the index */ \
127 movl IOAPIC_WINDOW(%ecx),%eax ; /* current value */ \
128 andl $~IOART_INTMASK,%eax ; /* clear the mask */ \
129 movl %eax,IOAPIC_WINDOW(%ecx) ; /* new value */ \
134 * Fast interrupt call handlers run in the following sequence:
136 * - Push the trap frame required by doreti
137 * - Mask the interrupt and reenable its source
138 * - If we cannot take the interrupt set its fpending bit and
140 * - If we can take the interrupt clear its fpending bit,
141 * call the handler, then unmask and doreti.
143 * YYY can cache gd base opitner instead of using hidden %fs prefixes.
146 #define FAST_INTR(irq_num, vec_name) \
151 FAKE_MCOUNT(13*4(%esp)) ; \
152 MASK_LEVEL_IRQ(irq_num) ; \
154 incl PCPU(intr_nesting_level) ; \
155 movl PCPU(curthread),%ebx ; \
156 movl TD_CPL(%ebx),%eax ; \
158 cmpl $TDPRI_CRIT,TD_PRI(%ebx) ; \
160 testl $IRQ_LBIT(irq_num), %eax ; \
163 /* set the pending bit and return, leave interrupt masked */ \
164 orl $IRQ_LBIT(irq_num),PCPU(fpending) ; \
165 movl $TDPRI_CRIT, PCPU(reqpri) ; \
168 /* try to get giant */ \
172 /* clear pending bit, run handler */ \
173 addl $TDPRI_CRIT,TD_PRI(%ebx) ; \
174 andl $~IRQ_LBIT(irq_num),PCPU(fpending) ; \
175 pushl intr_unit + (irq_num) * 4 ; \
176 call *intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
178 subl $TDPRI_CRIT,TD_PRI(%ebx) ; \
179 incl PCPU(cnt)+V_INTR ; /* book-keeping make per cpu YYY */ \
180 movl intr_countp + (irq_num) * 4, %eax ; \
183 UNMASK_IRQ(irq_num) ; \
189 * Restart fast interrupt held up by critical section or cpl.
191 * - Push a dummy trape frame as required by doreti
192 * - The interrupt source is already masked
193 * - Clear the fpending bit
195 * - Unmask the interrupt
196 * - Pop the dummy frame and do a normal return
198 * The BGL is held on call and left held on return.
200 * YYY can cache gd base pointer instead of using hidden %fs
204 #define FAST_UNPEND(irq_num, vec_name) \
211 pushl intr_unit + (irq_num) * 4 ; \
212 call *intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
214 incl PCPU(cnt)+V_INTR ; /* book-keeping make per cpu YYY */ \
215 movl intr_countp + (irq_num) * 4, %eax ; \
217 UNMASK_IRQ(irq_num) ; \
223 * Slow interrupt call handlers run in the following sequence:
225 * - Push the trap frame required by doreti.
226 * - Mask the interrupt and reenable its source.
227 * - If we cannot take the interrupt set its ipending bit and
228 * doreti. In addition to checking for a critical section
229 * and cpl mask we also check to see if the thread is still
231 * - If we can take the interrupt clear its ipending bit
232 * and schedule the thread. Leave interrupts masked and doreti.
234 * Note that calls to sched_ithd() are made with interrupts enabled
235 * and outside a critical section. YYY sched_ithd may preempt us
236 * synchronously (fix interrupt stacking)
238 * YYY can cache gd base pointer instead of using hidden %fs
242 #define INTR(irq_num, vec_name, maybe_extra_ipending) \
247 maybe_extra_ipending ; \
249 MASK_LEVEL_IRQ(irq_num) ; \
251 incl PCPU(intr_nesting_level) ; \
252 movl PCPU(curthread),%ebx ; \
253 movl TD_CPL(%ebx),%eax ; \
254 pushl %eax ; /* cpl do restore */ \
255 cmpl $TDPRI_CRIT,TD_PRI(%ebx) ; \
257 testl $IRQ_LBIT(irq_num),%eax ; \
260 /* set the pending bit and return, leave the interrupt masked */ \
261 orl $IRQ_LBIT(irq_num), PCPU(ipending) ; \
262 movl $TDPRI_CRIT, PCPU(reqpri) ; \
265 /* set running bit, clear pending bit, run handler */ \
266 andl $~IRQ_LBIT(irq_num), PCPU(ipending) ; \
271 incl PCPU(cnt)+V_INTR ; /* book-keeping YYY make per-cpu */ \
272 movl intr_countp + (irq_num) * 4,%eax ; \
279 * Unmask a slow interrupt. This function is used by interrupt threads
280 * after they have descheduled themselves to reenable interrupts and
281 * possibly cause a reschedule to occur.
284 #define INTR_UNMASK(irq_num, vec_name, icu) \
288 pushl %ebp ; /* frame for ddb backtrace */ \
290 UNMASK_IRQ(irq_num) ; \
295 /* XXX forward_irq to cpu holding the BGL? */
298 3: ; /* other cpu has isr lock */ \
300 orl $IRQ_LBIT(irq_num), PCPU(ipending) ; \
301 movl $TDPRI_CRIT,_reqpri ; \
302 testl $IRQ_LBIT(irq_num), TD_CPL(%ebx) ; \
303 jne 4f ; /* this INT masked */ \
304 call forward_irq ; /* forward irq to lock holder */ \
305 POP_FRAME ; /* and return */ \
309 POP_FRAME ; /* and return */ \
313 * Handle "spurious INTerrupts".
315 * This is different than the "spurious INTerrupt" generated by an
316 * 8259 PIC for missing INTs. See the APIC documentation for details.
317 * This routine should NOT do an 'EOI' cycle.
327 /* No EOI cycle used here */
333 * Handle TLB shootdowns.
341 #ifdef COUNT_XINVLTLB_HITS
345 movl PCPU(cpuid), %eax
349 #endif /* COUNT_XINVLTLB_HITS */
351 movl %cr3, %eax /* invalidate the TLB */
354 ss /* stack segment, avoid %ds load */
355 movl $0, lapic_eoi /* End Of Interrupt to APIC */
365 * Executed by a CPU when it receives an Xcpucheckstate IPI from another CPU,
367 * - Stores current cpu state in checkstate_cpustate[cpuid]
368 * 0 == user, 1 == sys, 2 == intr
369 * - Stores current process in checkstate_curproc[cpuid]
371 * - Signals its receipt by setting bit cpuid in checkstate_probed_cpus.
373 * stack: 0->ds, 4->fs, 8->ebx, 12->eax, 16->eip, 20->cs, 24->eflags
378 .globl Xcpucheckstate
379 .globl checkstate_cpustate
380 .globl checkstate_curproc
385 pushl %ds /* save current data segment */
389 mov %ax, %ds /* use KERNEL data segment */
393 movl $0, lapic_eoi /* End Of Interrupt to APIC */
400 testl $PSL_VM, 24(%esp)
402 incl %ebx /* system or interrupt */
404 movl PCPU(cpuid), %eax
405 movl %ebx, checkstate_cpustate(,%eax,4)
406 movl PCPU(curthread), %ebx
407 movl TD_PROC(%ebx),%ebx
408 movl %ebx, checkstate_curproc(,%eax,4)
410 movl %ebx, checkstate_pc(,%eax,4)
412 lock /* checkstate_probed_cpus |= (1<<id) */
413 btsl %eax, checkstate_probed_cpus
416 popl %ds /* restore previous data segment */
421 #endif /* BETTER_CLOCK */
425 * Executed by a CPU when it receives an Xcpuast IPI from another CPU,
427 * - Signals its receipt by clearing bit cpuid in checkstate_need_ast.
428 * - MP safe in regards to setting AST_PENDING because doreti is in
429 * a cli mode when it checks.
438 movl PCPU(cpuid), %eax
439 lock /* checkstate_need_ast &= ~(1<<id) */
440 btrl %eax, checkstate_need_ast
441 movl $0, lapic_eoi /* End Of Interrupt to APIC */
444 btsl %eax, checkstate_pending_ast
447 FAKE_MCOUNT(13*4(%esp))
449 movl PCPU(curthread), %eax
450 pushl TD_CPL(%eax) /* cpl restored by doreti */
452 orl $AST_PENDING, PCPU(astpending) /* XXX */
453 incl PCPU(intr_nesting_level)
456 movl PCPU(cpuid), %eax
458 btrl %eax, checkstate_pending_ast
460 btrl %eax, CNAME(resched_cpus)
462 orl $AST_PENDING+AST_RESCHED,PCPU(astpending)
467 /* We are already in the process of delivering an ast for this CPU */
473 * Executed by a CPU when it receives an XFORWARD_IRQ IPI.
482 movl $0, lapic_eoi /* End Of Interrupt to APIC */
484 FAKE_MCOUNT(13*4(%esp))
487 testl %eax,%eax /* Did we get the lock ? */
490 incl PCPU(cnt)+V_FORWARDED_HITS
492 movl PCPU(curthread), %eax
493 pushl TD_CPL(%eax) /* cpl restored by doreti */
495 incl PCPU(intr_nesting_level)
499 jmp doreti /* Handle forwarded interrupt */
501 incl PCPU(cnt)+V_FORWARDED_MISSES
502 call forward_irq /* Oops, we've lost the isr lock */
520 cmpl $0, CNAME(forward_irq_enabled)
524 cmpl $MP_FREE_LOCK,%eax
526 movl $0, %eax /* Pick CPU #0 if noone has lock */
529 movl cpu_num_to_apic_id(,%eax,4),%ecx
531 movl lapic_icr_hi, %eax
532 andl $~APIC_ID_MASK, %eax
534 movl %eax, lapic_icr_hi
537 movl lapic_icr_lo, %eax
538 andl $APIC_DELSTAT_MASK,%eax
540 movl lapic_icr_lo, %eax
541 andl $APIC_RESV2_MASK, %eax
542 orl $(APIC_DEST_DESTFLD|APIC_DELMODE_FIXED|XFORWARD_IRQ_OFFSET), %eax
543 movl %eax, lapic_icr_lo
545 movl lapic_icr_lo, %eax
546 andl $APIC_DELSTAT_MASK,%eax
552 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
554 * - Signals its receipt.
555 * - Waits for permission to restart.
556 * - Signals its restart.
568 pushl %ds /* save current data segment */
572 mov %ax, %ds /* use KERNEL data segment */
576 movl $0, lapic_eoi /* End Of Interrupt to APIC */
578 movl PCPU(cpuid), %eax
579 imull $PCB_SIZE, %eax
580 leal CNAME(stoppcbs)(%eax), %eax
582 call CNAME(savectx) /* Save process context */
586 movl PCPU(cpuid), %eax
589 btsl %eax, stopped_cpus /* stopped_cpus |= (1<<id) */
591 btl %eax, started_cpus /* while (!(started_cpus & (1<<id))) */
595 btrl %eax, started_cpus /* started_cpus &= ~(1<<id) */
597 btrl %eax, stopped_cpus /* stopped_cpus &= ~(1<<id) */
602 movl CNAME(cpustop_restartfunc), %eax
605 movl $0, CNAME(cpustop_restartfunc) /* One-shot */
610 popl %ds /* restore previous data segment */
619 * For now just have one ipiq IPI, but what we really want is
620 * to have one for each source cpu to the APICs don't get stalled
621 * backlogging the requests.
628 movl $0, lapic_eoi /* End Of Interrupt to APIC */
629 FAKE_MCOUNT(13*4(%esp))
631 movl PCPU(curthread),%ebx
632 cmpl $TDPRI_CRIT,TD_PRI(%ebx)
634 addl $TDPRI_CRIT,TD_PRI(%ebx)
635 call lwkt_process_ipiq
636 subl $TDPRI_CRIT,TD_PRI(%ebx)
638 incl PCPU(intr_nesting_level)
642 movl $TDPRI_CRIT,PCPU(reqpri)
643 orl $AST_IPIQ,PCPU(astpending)
649 FAST_INTR(0,fastintr0)
650 FAST_INTR(1,fastintr1)
651 FAST_INTR(2,fastintr2)
652 FAST_INTR(3,fastintr3)
653 FAST_INTR(4,fastintr4)
654 FAST_INTR(5,fastintr5)
655 FAST_INTR(6,fastintr6)
656 FAST_INTR(7,fastintr7)
657 FAST_INTR(8,fastintr8)
658 FAST_INTR(9,fastintr9)
659 FAST_INTR(10,fastintr10)
660 FAST_INTR(11,fastintr11)
661 FAST_INTR(12,fastintr12)
662 FAST_INTR(13,fastintr13)
663 FAST_INTR(14,fastintr14)
664 FAST_INTR(15,fastintr15)
665 FAST_INTR(16,fastintr16)
666 FAST_INTR(17,fastintr17)
667 FAST_INTR(18,fastintr18)
668 FAST_INTR(19,fastintr19)
669 FAST_INTR(20,fastintr20)
670 FAST_INTR(21,fastintr21)
671 FAST_INTR(22,fastintr22)
672 FAST_INTR(23,fastintr23)
674 /* YYY what is this garbage? */
675 #define CLKINTR_PENDING \
677 movl $1,CNAME(clkintr_pending) ; \
678 call clock_unlock ; \
680 INTR(0,intr0, CLKINTR_PENDING)
705 FAST_UNPEND(0,fastunpend0)
706 FAST_UNPEND(1,fastunpend1)
707 FAST_UNPEND(2,fastunpend2)
708 FAST_UNPEND(3,fastunpend3)
709 FAST_UNPEND(4,fastunpend4)
710 FAST_UNPEND(5,fastunpend5)
711 FAST_UNPEND(6,fastunpend6)
712 FAST_UNPEND(7,fastunpend7)
713 FAST_UNPEND(8,fastunpend8)
714 FAST_UNPEND(9,fastunpend9)
715 FAST_UNPEND(10,fastunpend10)
716 FAST_UNPEND(11,fastunpend11)
717 FAST_UNPEND(12,fastunpend12)
718 FAST_UNPEND(13,fastunpend13)
719 FAST_UNPEND(14,fastunpend14)
720 FAST_UNPEND(15,fastunpend15)
721 FAST_UNPEND(16,fastunpend16)
722 FAST_UNPEND(17,fastunpend17)
723 FAST_UNPEND(18,fastunpend18)
724 FAST_UNPEND(19,fastunpend19)
725 FAST_UNPEND(20,fastunpend20)
726 FAST_UNPEND(21,fastunpend21)
727 FAST_UNPEND(22,fastunpend22)
728 FAST_UNPEND(23,fastunpend23)
732 * Executed by a CPU when it receives a RENDEZVOUS IPI from another CPU.
734 * - Calls the generic rendezvous action function.
742 mov %ax, %ds /* use KERNEL data segment */
747 call smp_rendezvous_action
749 movl $0, lapic_eoi /* End Of Interrupt to APIC */
758 * Addresses of interrupt handlers.
759 * XresumeNN: Resumption addresses for HWIs.
765 * ipl.s: doreti_unpend
767 .long Xresume0, Xresume1, Xresume2, Xresume3
768 .long Xresume4, Xresume5, Xresume6, Xresume7
769 .long Xresume8, Xresume9, Xresume10, Xresume11
770 .long Xresume12, Xresume13, Xresume14, Xresume15
771 .long Xresume16, Xresume17, Xresume18, Xresume19
772 .long Xresume20, Xresume21, Xresume22, Xresume23
775 * ipl.s: doreti_unpend
776 * apic_ipl.s: splz_unpend
778 .long _swi_null, swi_net, _swi_null, _swi_null
779 .long _swi_vm, _swi_null, _softclock
781 imasks: /* masks for interrupt handlers */
782 .space NHWI*4 /* padding; HWI masks are elsewhere */
784 .long SWI_TTY_MASK, SWI_NET_MASK, SWI_CAMNET_MASK, SWI_CAMBIO_MASK
785 .long SWI_VM_MASK, SWI_TQ_MASK, SWI_CLOCK_MASK
789 #ifdef COUNT_XINVLTLB_HITS
793 #endif /* COUNT_XINVLTLB_HITS */
795 /* variables used by stop_cpus()/restart_cpus()/Xcpustop */
796 .globl stopped_cpus, started_cpus
803 .globl checkstate_probed_cpus
804 checkstate_probed_cpus:
806 #endif /* BETTER_CLOCK */
807 .globl checkstate_need_ast
810 checkstate_pending_ast:
812 .globl CNAME(resched_cpus)
813 .globl CNAME(cpustop_restartfunc)
816 CNAME(cpustop_restartfunc):
819 .globl apic_pin_trigger