2 * from: vector.s, 386BSD 0.1 unknown origin
3 * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $
4 * $DragonFly: src/sys/platform/pc32/apic/apic_vector.s,v 1.2 2003/06/17 04:28:36 dillon Exp $
8 #include <machine/apic.h>
9 #include <machine/smp.h>
11 #include "i386/isa/intr_machdep.h"
13 /* convert an absolute IRQ# into a bitmask */
14 #define IRQ_BIT(irq_num) (1 << (irq_num))
16 /* make an index into the IO APIC from the IRQ# */
17 #define REDTBL_IDX(irq_num) (0x10 + ((irq_num) * 2))
21 * Macros for interrupt interrupt entry, call to handler, and exit.
24 #define FAST_INTR(irq_num, vec_name) \
28 pushl %eax ; /* save only call-used registers */ \
39 FAKE_MCOUNT((5+ACTUALLY_PUSHED)*4(%esp)) ; \
40 pushl _intr_unit + (irq_num) * 4 ; \
41 call *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
43 movl $0, lapic_eoi ; \
45 incl _cnt+V_INTR ; /* book-keeping can wait */ \
46 movl _intr_countp + (irq_num) * 4, %eax ; \
62 pushl $0 ; /* dummy error code */ \
63 pushl $0 ; /* dummy trap type */ \
65 pushl %ds ; /* save data and extra segments ... */ \
76 #define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
77 #define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
79 #define MASK_IRQ(irq_num) \
80 IMASK_LOCK ; /* into critical reg */ \
81 testl $IRQ_BIT(irq_num), _apic_imen ; \
82 jne 7f ; /* masked, don't mask */ \
83 orl $IRQ_BIT(irq_num), _apic_imen ; /* set the mask bit */ \
84 movl IOAPICADDR(irq_num), %ecx ; /* ioapic addr */ \
85 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
86 movl %eax, (%ecx) ; /* write the index */ \
87 movl IOAPIC_WINDOW(%ecx), %eax ; /* current value */ \
88 orl $IOART_INTMASK, %eax ; /* set the mask */ \
89 movl %eax, IOAPIC_WINDOW(%ecx) ; /* new value */ \
90 7: ; /* already masked */ \
93 * Test to see whether we are handling an edge or level triggered INT.
94 * Level-triggered INTs must still be masked as we don't clear the source,
95 * and the EOI cycle would cause redundant INTs to occur.
97 #define MASK_LEVEL_IRQ(irq_num) \
98 testl $IRQ_BIT(irq_num), _apic_pin_trigger ; \
99 jz 9f ; /* edge, don't mask */ \
100 MASK_IRQ(irq_num) ; \
104 #ifdef APIC_INTR_REORDER
105 #define EOI_IRQ(irq_num) \
106 movl _apic_isrbit_location + 8 * (irq_num), %eax ; \
107 movl (%eax), %eax ; \
108 testl _apic_isrbit_location + 4 + 8 * (irq_num), %eax ; \
109 jz 9f ; /* not active */ \
110 movl $0, lapic_eoi ; \
111 APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ; \
115 #define EOI_IRQ(irq_num) \
116 testl $IRQ_BIT(irq_num), lapic_isr1; \
117 jz 9f ; /* not active */ \
118 movl $0, lapic_eoi; \
119 APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ; \
125 * Test to see if the source is currntly masked, clear if so.
127 #define UNMASK_IRQ(irq_num) \
128 IMASK_LOCK ; /* into critical reg */ \
129 testl $IRQ_BIT(irq_num), _apic_imen ; \
130 je 7f ; /* bit clear, not masked */ \
131 andl $~IRQ_BIT(irq_num), _apic_imen ;/* clear mask bit */ \
132 movl IOAPICADDR(irq_num),%ecx ; /* ioapic addr */ \
133 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
134 movl %eax,(%ecx) ; /* write the index */ \
135 movl IOAPIC_WINDOW(%ecx),%eax ; /* current value */ \
136 andl $~IOART_INTMASK,%eax ; /* clear the mask */ \
137 movl %eax,IOAPIC_WINDOW(%ecx) ; /* new value */ \
141 #ifdef APIC_INTR_DIAGNOSTIC
142 #ifdef APIC_INTR_DIAGNOSTIC_IRQ
146 pushl $CNAME(apic_itrace_debuglock)
147 call CNAME(s_lock_np)
149 movl CNAME(apic_itrace_debugbuffer_idx), %ecx
154 movw %ax, CNAME(apic_itrace_debugbuffer)(,%ecx,2)
157 movl %ecx, CNAME(apic_itrace_debugbuffer_idx)
158 pushl $CNAME(apic_itrace_debuglock)
159 call CNAME(s_unlock_np)
165 #define APIC_ITRACE(name, irq_num, id) \
166 lock ; /* MP-safe */ \
167 incl CNAME(name) + (irq_num) * 4 ; \
171 movl $(irq_num), %eax ; \
172 cmpl $APIC_INTR_DIAGNOSTIC_IRQ, %eax ; \
175 call log_intr_event ; \
182 #define APIC_ITRACE(name, irq_num, id) \
183 lock ; /* MP-safe */ \
184 incl CNAME(name) + (irq_num) * 4
187 #define APIC_ITRACE_ENTER 1
188 #define APIC_ITRACE_EOI 2
189 #define APIC_ITRACE_TRYISRLOCK 3
190 #define APIC_ITRACE_GOTISRLOCK 4
191 #define APIC_ITRACE_ENTER2 5
192 #define APIC_ITRACE_LEAVE 6
193 #define APIC_ITRACE_UNMASK 7
194 #define APIC_ITRACE_ACTIVE 8
195 #define APIC_ITRACE_MASKED 9
196 #define APIC_ITRACE_NOISRLOCK 10
197 #define APIC_ITRACE_MASKED2 11
198 #define APIC_ITRACE_SPLZ 12
199 #define APIC_ITRACE_DORETI 13
202 #define APIC_ITRACE(name, irq_num, id)
205 #define INTR(irq_num, vec_name, maybe_extra_ipending) \
208 /* _XintrNN: entry point used by IDT/HWIs & splz_unpend via _vec[]. */ \
211 movl $KDSEL, %eax ; /* reload with kernel's data segment */ \
214 movl $KPSEL, %eax ; \
217 maybe_extra_ipending ; \
219 APIC_ITRACE(apic_itrace_enter, irq_num, APIC_ITRACE_ENTER) ; \
220 lock ; /* MP-safe */ \
221 btsl $(irq_num), iactive ; /* lazy masking */ \
222 jc 1f ; /* already active */ \
224 MASK_LEVEL_IRQ(irq_num) ; \
227 APIC_ITRACE(apic_itrace_tryisrlock, irq_num, APIC_ITRACE_TRYISRLOCK) ;\
228 MP_TRYLOCK ; /* XXX this is going away... */ \
229 testl %eax, %eax ; /* did we get it? */ \
232 APIC_ITRACE(apic_itrace_gotisrlock, irq_num, APIC_ITRACE_GOTISRLOCK) ;\
233 testl $IRQ_BIT(irq_num), _cpl ; \
234 jne 2f ; /* this INT masked */ \
236 incb _intr_nesting_level ; \
238 /* entry point used by doreti_unpend for HWIs. */ \
239 __CONCAT(Xresume,irq_num): ; \
240 FAKE_MCOUNT(13*4(%esp)) ; /* XXX avoid dbl cnt */ \
241 lock ; incl _cnt+V_INTR ; /* tally interrupts */ \
242 movl _intr_countp + (irq_num) * 4, %eax ; \
243 lock ; incl (%eax) ; \
247 orl _intr_mask + (irq_num) * 4, %eax ; \
250 andl $~IRQ_BIT(irq_num), _ipending ; \
252 pushl _intr_unit + (irq_num) * 4 ; \
253 APIC_ITRACE(apic_itrace_enter2, irq_num, APIC_ITRACE_ENTER2) ; \
255 call *_intr_handler + (irq_num) * 4 ; \
257 APIC_ITRACE(apic_itrace_leave, irq_num, APIC_ITRACE_LEAVE) ; \
259 lock ; andl $~IRQ_BIT(irq_num), iactive ; \
260 UNMASK_IRQ(irq_num) ; \
261 APIC_ITRACE(apic_itrace_unmask, irq_num, APIC_ITRACE_UNMASK) ; \
262 sti ; /* doreti repeats cli/sti */ \
268 APIC_ITRACE(apic_itrace_active, irq_num, APIC_ITRACE_ACTIVE) ; \
269 MASK_IRQ(irq_num) ; \
272 orl $IRQ_BIT(irq_num), _ipending ; \
274 btsl $(irq_num), iactive ; /* still active */ \
275 jnc 0b ; /* retry */ \
277 iret ; /* XXX: iactive bit might be 0 now */ \
279 2: ; /* masked by cpl, leave iactive set */ \
280 APIC_ITRACE(apic_itrace_masked, irq_num, APIC_ITRACE_MASKED) ; \
282 orl $IRQ_BIT(irq_num), _ipending ; \
287 3: ; /* other cpu has isr lock */ \
288 APIC_ITRACE(apic_itrace_noisrlock, irq_num, APIC_ITRACE_NOISRLOCK) ;\
290 orl $IRQ_BIT(irq_num), _ipending ; \
291 testl $IRQ_BIT(irq_num), _cpl ; \
292 jne 4f ; /* this INT masked */ \
293 call forward_irq ; /* forward irq to lock holder */ \
294 POP_FRAME ; /* and return */ \
298 APIC_ITRACE(apic_itrace_masked2, irq_num, APIC_ITRACE_MASKED2) ;\
299 POP_FRAME ; /* and return */ \
303 * Handle "spurious INTerrupts".
305 * This is different than the "spurious INTerrupt" generated by an
306 * 8259 PIC for missing INTs. See the APIC documentation for details.
307 * This routine should NOT do an 'EOI' cycle.
314 /* No EOI cycle used here */
320 * Handle TLB shootdowns.
328 #ifdef COUNT_XINVLTLB_HITS
336 #endif /* COUNT_XINVLTLB_HITS */
338 movl %cr3, %eax /* invalidate the TLB */
341 ss /* stack segment, avoid %ds load */
342 movl $0, lapic_eoi /* End Of Interrupt to APIC */
351 * Executed by a CPU when it receives an Xcpucheckstate IPI from another CPU,
353 * - Stores current cpu state in checkstate_cpustate[cpuid]
354 * 0 == user, 1 == sys, 2 == intr
355 * - Stores current process in checkstate_curproc[cpuid]
357 * - Signals its receipt by setting bit cpuid in checkstate_probed_cpus.
359 * stack: 0->ds, 4->fs, 8->ebx, 12->eax, 16->eip, 20->cs, 24->eflags
364 .globl _Xcpucheckstate
365 .globl _checkstate_cpustate
366 .globl _checkstate_curproc
367 .globl _checkstate_pc
371 pushl %ds /* save current data segment */
375 mov %ax, %ds /* use KERNEL data segment */
379 movl $0, lapic_eoi /* End Of Interrupt to APIC */
386 testl $PSL_VM, 24(%esp)
388 incl %ebx /* system or interrupt */
391 movl %ebx, _checkstate_cpustate(,%eax,4)
393 movl %ebx, _checkstate_curproc(,%eax,4)
395 movl %ebx, _checkstate_pc(,%eax,4)
397 lock /* checkstate_probed_cpus |= (1<<id) */
398 btsl %eax, _checkstate_probed_cpus
401 popl %ds /* restore previous data segment */
406 #endif /* BETTER_CLOCK */
409 * Executed by a CPU when it receives an Xcpuast IPI from another CPU,
411 * - Signals its receipt by clearing bit cpuid in checkstate_need_ast.
413 * - We need a better method of triggering asts on other cpus.
422 mov %ax, %ds /* use KERNEL data segment */
428 lock /* checkstate_need_ast &= ~(1<<id) */
429 btrl %eax, _checkstate_need_ast
430 movl $0, lapic_eoi /* End Of Interrupt to APIC */
433 btsl %eax, _checkstate_pending_ast
436 FAKE_MCOUNT(13*4(%esp))
439 * Giant locks do not come cheap.
440 * A lot of cycles are going to be wasted here.
446 orl $AST_PENDING, _astpending /* XXX */
447 incb _intr_nesting_level
454 btrl %eax, _checkstate_pending_ast
456 btrl %eax, CNAME(resched_cpus)
458 orl $AST_PENDING+AST_RESCHED,_astpending
460 incl CNAME(want_resched_cnt)
463 incl CNAME(cpuast_cnt)
467 /* We are already in the process of delivering an ast for this CPU */
473 * Executed by a CPU when it receives an XFORWARD_IRQ IPI.
482 mov %ax, %ds /* use KERNEL data segment */
487 movl $0, lapic_eoi /* End Of Interrupt to APIC */
489 FAKE_MCOUNT(13*4(%esp))
492 testl %eax,%eax /* Did we get the lock ? */
496 incl CNAME(forward_irq_hitcnt)
497 cmpb $4, _intr_nesting_level
502 incb _intr_nesting_level
508 jmp _doreti /* Handle forwarded interrupt */
511 incl CNAME(forward_irq_misscnt)
512 call forward_irq /* Oops, we've lost the isr lock */
518 incl CNAME(forward_irq_toodeepcnt)
533 cmpl $0, CNAME(forward_irq_enabled)
539 movl $0, %eax /* Pick CPU #0 if noone has lock */
542 movl _cpu_num_to_apic_id(,%eax,4),%ecx
544 movl lapic_icr_hi, %eax
545 andl $~APIC_ID_MASK, %eax
547 movl %eax, lapic_icr_hi
550 movl lapic_icr_lo, %eax
551 andl $APIC_DELSTAT_MASK,%eax
553 movl lapic_icr_lo, %eax
554 andl $APIC_RESV2_MASK, %eax
555 orl $(APIC_DEST_DESTFLD|APIC_DELMODE_FIXED|XFORWARD_IRQ_OFFSET), %eax
556 movl %eax, lapic_icr_lo
558 movl lapic_icr_lo, %eax
559 andl $APIC_DELSTAT_MASK,%eax
565 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
567 * - Signals its receipt.
568 * - Waits for permission to restart.
569 * - Signals its restart.
581 pushl %ds /* save current data segment */
585 mov %ax, %ds /* use KERNEL data segment */
589 movl $0, lapic_eoi /* End Of Interrupt to APIC */
592 imull $PCB_SIZE, %eax
593 leal CNAME(stoppcbs)(%eax), %eax
595 call CNAME(savectx) /* Save process context */
602 btsl %eax, _stopped_cpus /* stopped_cpus |= (1<<id) */
604 btl %eax, _started_cpus /* while (!(started_cpus & (1<<id))) */
608 btrl %eax, _started_cpus /* started_cpus &= ~(1<<id) */
610 btrl %eax, _stopped_cpus /* stopped_cpus &= ~(1<<id) */
615 movl CNAME(cpustop_restartfunc), %eax
618 movl $0, CNAME(cpustop_restartfunc) /* One-shot */
623 popl %ds /* restore previous data segment */
633 FAST_INTR(0,fastintr0)
634 FAST_INTR(1,fastintr1)
635 FAST_INTR(2,fastintr2)
636 FAST_INTR(3,fastintr3)
637 FAST_INTR(4,fastintr4)
638 FAST_INTR(5,fastintr5)
639 FAST_INTR(6,fastintr6)
640 FAST_INTR(7,fastintr7)
641 FAST_INTR(8,fastintr8)
642 FAST_INTR(9,fastintr9)
643 FAST_INTR(10,fastintr10)
644 FAST_INTR(11,fastintr11)
645 FAST_INTR(12,fastintr12)
646 FAST_INTR(13,fastintr13)
647 FAST_INTR(14,fastintr14)
648 FAST_INTR(15,fastintr15)
649 FAST_INTR(16,fastintr16)
650 FAST_INTR(17,fastintr17)
651 FAST_INTR(18,fastintr18)
652 FAST_INTR(19,fastintr19)
653 FAST_INTR(20,fastintr20)
654 FAST_INTR(21,fastintr21)
655 FAST_INTR(22,fastintr22)
656 FAST_INTR(23,fastintr23)
658 #define CLKINTR_PENDING \
659 pushl $clock_lock ; \
661 movl $1,CNAME(clkintr_pending) ; \
665 INTR(0,intr0, CLKINTR_PENDING)
692 * Executed by a CPU when it receives a RENDEZVOUS IPI from another CPU.
694 * - Calls the generic rendezvous action function.
702 mov %ax, %ds /* use KERNEL data segment */
707 call _smp_rendezvous_action
709 movl $0, lapic_eoi /* End Of Interrupt to APIC */
716 * Addresses of interrupt handlers.
717 * XresumeNN: Resumption addresses for HWIs.
723 * ipl.s: doreti_unpend
725 .long Xresume0, Xresume1, Xresume2, Xresume3
726 .long Xresume4, Xresume5, Xresume6, Xresume7
727 .long Xresume8, Xresume9, Xresume10, Xresume11
728 .long Xresume12, Xresume13, Xresume14, Xresume15
729 .long Xresume16, Xresume17, Xresume18, Xresume19
730 .long Xresume20, Xresume21, Xresume22, Xresume23
733 * ipl.s: doreti_unpend
734 * apic_ipl.s: splz_unpend
736 .long _swi_null, swi_net, _swi_null, _swi_null
737 .long _swi_vm, _swi_null, _softclock
739 imasks: /* masks for interrupt handlers */
740 .space NHWI*4 /* padding; HWI masks are elsewhere */
742 .long SWI_TTY_MASK, SWI_NET_MASK, SWI_CAMNET_MASK, SWI_CAMBIO_MASK
743 .long SWI_VM_MASK, SWI_TQ_MASK, SWI_CLOCK_MASK
745 /* active flag for lazy masking */
749 #ifdef COUNT_XINVLTLB_HITS
753 #endif /* COUNT_XINVLTLB_HITS */
755 /* variables used by stop_cpus()/restart_cpus()/Xcpustop */
756 .globl _stopped_cpus, _started_cpus
763 .globl _checkstate_probed_cpus
764 _checkstate_probed_cpus:
766 #endif /* BETTER_CLOCK */
767 .globl _checkstate_need_ast
768 _checkstate_need_ast:
770 _checkstate_pending_ast:
772 .globl CNAME(forward_irq_misscnt)
773 .globl CNAME(forward_irq_toodeepcnt)
774 .globl CNAME(forward_irq_hitcnt)
775 .globl CNAME(resched_cpus)
776 .globl CNAME(want_resched_cnt)
777 .globl CNAME(cpuast_cnt)
778 .globl CNAME(cpustop_restartfunc)
779 CNAME(forward_irq_misscnt):
781 CNAME(forward_irq_hitcnt):
783 CNAME(forward_irq_toodeepcnt):
787 CNAME(want_resched_cnt):
791 CNAME(cpustop_restartfunc):
796 .globl _apic_pin_trigger