2 * from: vector.s, 386BSD 0.1 unknown origin
3 * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $
7 #include <machine/apic.h>
8 #include <machine/smp.h>
10 #include "i386/isa/intr_machdep.h"
12 /* convert an absolute IRQ# into a bitmask */
13 #define IRQ_BIT(irq_num) (1 << (irq_num))
15 /* make an index into the IO APIC from the IRQ# */
16 #define REDTBL_IDX(irq_num) (0x10 + ((irq_num) * 2))
20 * Macros for interrupt interrupt entry, call to handler, and exit.
23 #define FAST_INTR(irq_num, vec_name) \
27 pushl %eax ; /* save only call-used registers */ \
38 FAKE_MCOUNT((5+ACTUALLY_PUSHED)*4(%esp)) ; \
39 pushl _intr_unit + (irq_num) * 4 ; \
40 call *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
42 movl $0, lapic_eoi ; \
44 incl _cnt+V_INTR ; /* book-keeping can wait */ \
45 movl _intr_countp + (irq_num) * 4, %eax ; \
61 pushl $0 ; /* dummy error code */ \
62 pushl $0 ; /* dummy trap type */ \
64 pushl %ds ; /* save data and extra segments ... */ \
75 #define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
76 #define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
78 #define MASK_IRQ(irq_num) \
79 IMASK_LOCK ; /* into critical reg */ \
80 testl $IRQ_BIT(irq_num), _apic_imen ; \
81 jne 7f ; /* masked, don't mask */ \
82 orl $IRQ_BIT(irq_num), _apic_imen ; /* set the mask bit */ \
83 movl IOAPICADDR(irq_num), %ecx ; /* ioapic addr */ \
84 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
85 movl %eax, (%ecx) ; /* write the index */ \
86 movl IOAPIC_WINDOW(%ecx), %eax ; /* current value */ \
87 orl $IOART_INTMASK, %eax ; /* set the mask */ \
88 movl %eax, IOAPIC_WINDOW(%ecx) ; /* new value */ \
89 7: ; /* already masked */ \
92 * Test to see whether we are handling an edge or level triggered INT.
93 * Level-triggered INTs must still be masked as we don't clear the source,
94 * and the EOI cycle would cause redundant INTs to occur.
96 #define MASK_LEVEL_IRQ(irq_num) \
97 testl $IRQ_BIT(irq_num), _apic_pin_trigger ; \
98 jz 9f ; /* edge, don't mask */ \
103 #ifdef APIC_INTR_REORDER
104 #define EOI_IRQ(irq_num) \
105 movl _apic_isrbit_location + 8 * (irq_num), %eax ; \
106 movl (%eax), %eax ; \
107 testl _apic_isrbit_location + 4 + 8 * (irq_num), %eax ; \
108 jz 9f ; /* not active */ \
109 movl $0, lapic_eoi ; \
110 APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ; \
114 #define EOI_IRQ(irq_num) \
115 testl $IRQ_BIT(irq_num), lapic_isr1; \
116 jz 9f ; /* not active */ \
117 movl $0, lapic_eoi; \
118 APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ; \
124 * Test to see if the source is currntly masked, clear if so.
126 #define UNMASK_IRQ(irq_num) \
127 IMASK_LOCK ; /* into critical reg */ \
128 testl $IRQ_BIT(irq_num), _apic_imen ; \
129 je 7f ; /* bit clear, not masked */ \
130 andl $~IRQ_BIT(irq_num), _apic_imen ;/* clear mask bit */ \
131 movl IOAPICADDR(irq_num),%ecx ; /* ioapic addr */ \
132 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
133 movl %eax,(%ecx) ; /* write the index */ \
134 movl IOAPIC_WINDOW(%ecx),%eax ; /* current value */ \
135 andl $~IOART_INTMASK,%eax ; /* clear the mask */ \
136 movl %eax,IOAPIC_WINDOW(%ecx) ; /* new value */ \
140 #ifdef APIC_INTR_DIAGNOSTIC
141 #ifdef APIC_INTR_DIAGNOSTIC_IRQ
145 pushl $CNAME(apic_itrace_debuglock)
146 call CNAME(s_lock_np)
148 movl CNAME(apic_itrace_debugbuffer_idx), %ecx
153 movw %ax, CNAME(apic_itrace_debugbuffer)(,%ecx,2)
156 movl %ecx, CNAME(apic_itrace_debugbuffer_idx)
157 pushl $CNAME(apic_itrace_debuglock)
158 call CNAME(s_unlock_np)
164 #define APIC_ITRACE(name, irq_num, id) \
165 lock ; /* MP-safe */ \
166 incl CNAME(name) + (irq_num) * 4 ; \
170 movl $(irq_num), %eax ; \
171 cmpl $APIC_INTR_DIAGNOSTIC_IRQ, %eax ; \
174 call log_intr_event ; \
181 #define APIC_ITRACE(name, irq_num, id) \
182 lock ; /* MP-safe */ \
183 incl CNAME(name) + (irq_num) * 4
186 #define APIC_ITRACE_ENTER 1
187 #define APIC_ITRACE_EOI 2
188 #define APIC_ITRACE_TRYISRLOCK 3
189 #define APIC_ITRACE_GOTISRLOCK 4
190 #define APIC_ITRACE_ENTER2 5
191 #define APIC_ITRACE_LEAVE 6
192 #define APIC_ITRACE_UNMASK 7
193 #define APIC_ITRACE_ACTIVE 8
194 #define APIC_ITRACE_MASKED 9
195 #define APIC_ITRACE_NOISRLOCK 10
196 #define APIC_ITRACE_MASKED2 11
197 #define APIC_ITRACE_SPLZ 12
198 #define APIC_ITRACE_DORETI 13
201 #define APIC_ITRACE(name, irq_num, id)
204 #define INTR(irq_num, vec_name, maybe_extra_ipending) \
207 /* _XintrNN: entry point used by IDT/HWIs & splz_unpend via _vec[]. */ \
210 movl $KDSEL, %eax ; /* reload with kernel's data segment */ \
213 movl $KPSEL, %eax ; \
216 maybe_extra_ipending ; \
218 APIC_ITRACE(apic_itrace_enter, irq_num, APIC_ITRACE_ENTER) ; \
219 lock ; /* MP-safe */ \
220 btsl $(irq_num), iactive ; /* lazy masking */ \
221 jc 1f ; /* already active */ \
223 MASK_LEVEL_IRQ(irq_num) ; \
226 APIC_ITRACE(apic_itrace_tryisrlock, irq_num, APIC_ITRACE_TRYISRLOCK) ;\
227 MP_TRYLOCK ; /* XXX this is going away... */ \
228 testl %eax, %eax ; /* did we get it? */ \
231 APIC_ITRACE(apic_itrace_gotisrlock, irq_num, APIC_ITRACE_GOTISRLOCK) ;\
232 testl $IRQ_BIT(irq_num), _cpl ; \
233 jne 2f ; /* this INT masked */ \
235 incb _intr_nesting_level ; \
237 /* entry point used by doreti_unpend for HWIs. */ \
238 __CONCAT(Xresume,irq_num): ; \
239 FAKE_MCOUNT(13*4(%esp)) ; /* XXX avoid dbl cnt */ \
240 lock ; incl _cnt+V_INTR ; /* tally interrupts */ \
241 movl _intr_countp + (irq_num) * 4, %eax ; \
242 lock ; incl (%eax) ; \
246 orl _intr_mask + (irq_num) * 4, %eax ; \
249 andl $~IRQ_BIT(irq_num), _ipending ; \
251 pushl _intr_unit + (irq_num) * 4 ; \
252 APIC_ITRACE(apic_itrace_enter2, irq_num, APIC_ITRACE_ENTER2) ; \
254 call *_intr_handler + (irq_num) * 4 ; \
256 APIC_ITRACE(apic_itrace_leave, irq_num, APIC_ITRACE_LEAVE) ; \
258 lock ; andl $~IRQ_BIT(irq_num), iactive ; \
259 UNMASK_IRQ(irq_num) ; \
260 APIC_ITRACE(apic_itrace_unmask, irq_num, APIC_ITRACE_UNMASK) ; \
261 sti ; /* doreti repeats cli/sti */ \
267 APIC_ITRACE(apic_itrace_active, irq_num, APIC_ITRACE_ACTIVE) ; \
268 MASK_IRQ(irq_num) ; \
271 orl $IRQ_BIT(irq_num), _ipending ; \
273 btsl $(irq_num), iactive ; /* still active */ \
274 jnc 0b ; /* retry */ \
276 iret ; /* XXX: iactive bit might be 0 now */ \
278 2: ; /* masked by cpl, leave iactive set */ \
279 APIC_ITRACE(apic_itrace_masked, irq_num, APIC_ITRACE_MASKED) ; \
281 orl $IRQ_BIT(irq_num), _ipending ; \
286 3: ; /* other cpu has isr lock */ \
287 APIC_ITRACE(apic_itrace_noisrlock, irq_num, APIC_ITRACE_NOISRLOCK) ;\
289 orl $IRQ_BIT(irq_num), _ipending ; \
290 testl $IRQ_BIT(irq_num), _cpl ; \
291 jne 4f ; /* this INT masked */ \
292 call forward_irq ; /* forward irq to lock holder */ \
293 POP_FRAME ; /* and return */ \
297 APIC_ITRACE(apic_itrace_masked2, irq_num, APIC_ITRACE_MASKED2) ;\
298 POP_FRAME ; /* and return */ \
302 * Handle "spurious INTerrupts".
304 * This is different than the "spurious INTerrupt" generated by an
305 * 8259 PIC for missing INTs. See the APIC documentation for details.
306 * This routine should NOT do an 'EOI' cycle.
313 /* No EOI cycle used here */
319 * Handle TLB shootdowns.
327 #ifdef COUNT_XINVLTLB_HITS
335 #endif /* COUNT_XINVLTLB_HITS */
337 movl %cr3, %eax /* invalidate the TLB */
340 ss /* stack segment, avoid %ds load */
341 movl $0, lapic_eoi /* End Of Interrupt to APIC */
350 * Executed by a CPU when it receives an Xcpucheckstate IPI from another CPU,
352 * - Stores current cpu state in checkstate_cpustate[cpuid]
353 * 0 == user, 1 == sys, 2 == intr
354 * - Stores current process in checkstate_curproc[cpuid]
356 * - Signals its receipt by setting bit cpuid in checkstate_probed_cpus.
358 * stack: 0->ds, 4->fs, 8->ebx, 12->eax, 16->eip, 20->cs, 24->eflags
363 .globl _Xcpucheckstate
364 .globl _checkstate_cpustate
365 .globl _checkstate_curproc
366 .globl _checkstate_pc
370 pushl %ds /* save current data segment */
374 mov %ax, %ds /* use KERNEL data segment */
378 movl $0, lapic_eoi /* End Of Interrupt to APIC */
385 testl $PSL_VM, 24(%esp)
387 incl %ebx /* system or interrupt */
390 movl %ebx, _checkstate_cpustate(,%eax,4)
392 movl %ebx, _checkstate_curproc(,%eax,4)
394 movl %ebx, _checkstate_pc(,%eax,4)
396 lock /* checkstate_probed_cpus |= (1<<id) */
397 btsl %eax, _checkstate_probed_cpus
400 popl %ds /* restore previous data segment */
405 #endif /* BETTER_CLOCK */
408 * Executed by a CPU when it receives an Xcpuast IPI from another CPU,
410 * - Signals its receipt by clearing bit cpuid in checkstate_need_ast.
412 * - We need a better method of triggering asts on other cpus.
421 mov %ax, %ds /* use KERNEL data segment */
427 lock /* checkstate_need_ast &= ~(1<<id) */
428 btrl %eax, _checkstate_need_ast
429 movl $0, lapic_eoi /* End Of Interrupt to APIC */
432 btsl %eax, _checkstate_pending_ast
435 FAKE_MCOUNT(13*4(%esp))
438 * Giant locks do not come cheap.
439 * A lot of cycles are going to be wasted here.
445 orl $AST_PENDING, _astpending /* XXX */
446 incb _intr_nesting_level
453 btrl %eax, _checkstate_pending_ast
455 btrl %eax, CNAME(resched_cpus)
457 orl $AST_PENDING+AST_RESCHED,_astpending
459 incl CNAME(want_resched_cnt)
462 incl CNAME(cpuast_cnt)
466 /* We are already in the process of delivering an ast for this CPU */
472 * Executed by a CPU when it receives an XFORWARD_IRQ IPI.
481 mov %ax, %ds /* use KERNEL data segment */
486 movl $0, lapic_eoi /* End Of Interrupt to APIC */
488 FAKE_MCOUNT(13*4(%esp))
491 testl %eax,%eax /* Did we get the lock ? */
495 incl CNAME(forward_irq_hitcnt)
496 cmpb $4, _intr_nesting_level
501 incb _intr_nesting_level
507 jmp _doreti /* Handle forwarded interrupt */
510 incl CNAME(forward_irq_misscnt)
511 call forward_irq /* Oops, we've lost the isr lock */
517 incl CNAME(forward_irq_toodeepcnt)
532 cmpl $0, CNAME(forward_irq_enabled)
538 movl $0, %eax /* Pick CPU #0 if noone has lock */
541 movl _cpu_num_to_apic_id(,%eax,4),%ecx
543 movl lapic_icr_hi, %eax
544 andl $~APIC_ID_MASK, %eax
546 movl %eax, lapic_icr_hi
549 movl lapic_icr_lo, %eax
550 andl $APIC_DELSTAT_MASK,%eax
552 movl lapic_icr_lo, %eax
553 andl $APIC_RESV2_MASK, %eax
554 orl $(APIC_DEST_DESTFLD|APIC_DELMODE_FIXED|XFORWARD_IRQ_OFFSET), %eax
555 movl %eax, lapic_icr_lo
557 movl lapic_icr_lo, %eax
558 andl $APIC_DELSTAT_MASK,%eax
564 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
566 * - Signals its receipt.
567 * - Waits for permission to restart.
568 * - Signals its restart.
580 pushl %ds /* save current data segment */
584 mov %ax, %ds /* use KERNEL data segment */
588 movl $0, lapic_eoi /* End Of Interrupt to APIC */
591 imull $PCB_SIZE, %eax
592 leal CNAME(stoppcbs)(%eax), %eax
594 call CNAME(savectx) /* Save process context */
601 btsl %eax, _stopped_cpus /* stopped_cpus |= (1<<id) */
603 btl %eax, _started_cpus /* while (!(started_cpus & (1<<id))) */
607 btrl %eax, _started_cpus /* started_cpus &= ~(1<<id) */
609 btrl %eax, _stopped_cpus /* stopped_cpus &= ~(1<<id) */
614 movl CNAME(cpustop_restartfunc), %eax
617 movl $0, CNAME(cpustop_restartfunc) /* One-shot */
622 popl %ds /* restore previous data segment */
632 FAST_INTR(0,fastintr0)
633 FAST_INTR(1,fastintr1)
634 FAST_INTR(2,fastintr2)
635 FAST_INTR(3,fastintr3)
636 FAST_INTR(4,fastintr4)
637 FAST_INTR(5,fastintr5)
638 FAST_INTR(6,fastintr6)
639 FAST_INTR(7,fastintr7)
640 FAST_INTR(8,fastintr8)
641 FAST_INTR(9,fastintr9)
642 FAST_INTR(10,fastintr10)
643 FAST_INTR(11,fastintr11)
644 FAST_INTR(12,fastintr12)
645 FAST_INTR(13,fastintr13)
646 FAST_INTR(14,fastintr14)
647 FAST_INTR(15,fastintr15)
648 FAST_INTR(16,fastintr16)
649 FAST_INTR(17,fastintr17)
650 FAST_INTR(18,fastintr18)
651 FAST_INTR(19,fastintr19)
652 FAST_INTR(20,fastintr20)
653 FAST_INTR(21,fastintr21)
654 FAST_INTR(22,fastintr22)
655 FAST_INTR(23,fastintr23)
657 #define CLKINTR_PENDING \
658 pushl $clock_lock ; \
660 movl $1,CNAME(clkintr_pending) ; \
664 INTR(0,intr0, CLKINTR_PENDING)
691 * Executed by a CPU when it receives a RENDEZVOUS IPI from another CPU.
693 * - Calls the generic rendezvous action function.
701 mov %ax, %ds /* use KERNEL data segment */
706 call _smp_rendezvous_action
708 movl $0, lapic_eoi /* End Of Interrupt to APIC */
715 * Addresses of interrupt handlers.
716 * XresumeNN: Resumption addresses for HWIs.
722 * ipl.s: doreti_unpend
724 .long Xresume0, Xresume1, Xresume2, Xresume3
725 .long Xresume4, Xresume5, Xresume6, Xresume7
726 .long Xresume8, Xresume9, Xresume10, Xresume11
727 .long Xresume12, Xresume13, Xresume14, Xresume15
728 .long Xresume16, Xresume17, Xresume18, Xresume19
729 .long Xresume20, Xresume21, Xresume22, Xresume23
732 * ipl.s: doreti_unpend
733 * apic_ipl.s: splz_unpend
735 .long _swi_null, swi_net, _swi_null, _swi_null
736 .long _swi_vm, _swi_null, _softclock
738 imasks: /* masks for interrupt handlers */
739 .space NHWI*4 /* padding; HWI masks are elsewhere */
741 .long SWI_TTY_MASK, SWI_NET_MASK, SWI_CAMNET_MASK, SWI_CAMBIO_MASK
742 .long SWI_VM_MASK, SWI_TQ_MASK, SWI_CLOCK_MASK
744 /* active flag for lazy masking */
748 #ifdef COUNT_XINVLTLB_HITS
752 #endif /* COUNT_XINVLTLB_HITS */
754 /* variables used by stop_cpus()/restart_cpus()/Xcpustop */
755 .globl _stopped_cpus, _started_cpus
762 .globl _checkstate_probed_cpus
763 _checkstate_probed_cpus:
765 #endif /* BETTER_CLOCK */
766 .globl _checkstate_need_ast
767 _checkstate_need_ast:
769 _checkstate_pending_ast:
771 .globl CNAME(forward_irq_misscnt)
772 .globl CNAME(forward_irq_toodeepcnt)
773 .globl CNAME(forward_irq_hitcnt)
774 .globl CNAME(resched_cpus)
775 .globl CNAME(want_resched_cnt)
776 .globl CNAME(cpuast_cnt)
777 .globl CNAME(cpustop_restartfunc)
778 CNAME(forward_irq_misscnt):
780 CNAME(forward_irq_hitcnt):
782 CNAME(forward_irq_toodeepcnt):
786 CNAME(want_resched_cnt):
790 CNAME(cpustop_restartfunc):
795 .globl _apic_pin_trigger