2 * from: vector.s, 386BSD 0.1 unknown origin
3 * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $
4 * $DragonFly: src/sys/platform/pc32/isa/Attic/apic_vector.s,v 1.7 2003/07/01 20:31:38 dillon Exp $
8 #include <machine/apic.h>
9 #include <machine/smp.h>
11 #include "i386/isa/intr_machdep.h"
13 /* convert an absolute IRQ# into a bitmask */
14 #define IRQ_BIT(irq_num) (1 << (irq_num))
16 /* make an index into the IO APIC from the IRQ# */
17 #define REDTBL_IDX(irq_num) (0x10 + ((irq_num) * 2))
21 * Macros for interrupt interrupt entry, call to handler, and exit.
24 #define FAST_INTR(irq_num, vec_name) \
28 pushl %eax ; /* save only call-used registers */ \
39 FAKE_MCOUNT(6*4(%esp)) ; \
40 pushl intr_unit + (irq_num) * 4 ; \
41 call *intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
43 movl $0, lapic_eoi ; \
45 incl cnt+V_INTR ; /* book-keeping can wait */ \
46 movl intr_countp + (irq_num) * 4, %eax ; \
62 pushl $0 ; /* dummy error code */ \
63 pushl $0 ; /* dummy trap type */ \
65 pushl %ds ; /* save data and extra segments ... */ \
76 #define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
77 #define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
79 #define MASK_IRQ(irq_num) \
80 IMASK_LOCK ; /* into critical reg */ \
81 testl $IRQ_BIT(irq_num), apic_imen ; \
82 jne 7f ; /* masked, don't mask */ \
83 orl $IRQ_BIT(irq_num), apic_imen ; /* set the mask bit */ \
84 movl IOAPICADDR(irq_num), %ecx ; /* ioapic addr */ \
85 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
86 movl %eax, (%ecx) ; /* write the index */ \
87 movl IOAPIC_WINDOW(%ecx), %eax ; /* current value */ \
88 orl $IOART_INTMASK, %eax ; /* set the mask */ \
89 movl %eax, IOAPIC_WINDOW(%ecx) ; /* new value */ \
90 7: ; /* already masked */ \
93 * Test to see whether we are handling an edge or level triggered INT.
94 * Level-triggered INTs must still be masked as we don't clear the source,
95 * and the EOI cycle would cause redundant INTs to occur.
97 #define MASK_LEVEL_IRQ(irq_num) \
98 testl $IRQ_BIT(irq_num), apic_pin_trigger ; \
99 jz 9f ; /* edge, don't mask */ \
100 MASK_IRQ(irq_num) ; \
104 #ifdef APIC_INTR_REORDER
105 #define EOI_IRQ(irq_num) \
106 movl apic_isrbit_location + 8 * (irq_num), %eax ; \
107 movl (%eax), %eax ; \
108 testl apic_isrbit_location + 4 + 8 * (irq_num), %eax ; \
109 jz 9f ; /* not active */ \
110 movl $0, lapic_eoi ; \
111 APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ; \
115 #define EOI_IRQ(irq_num) \
116 testl $IRQ_BIT(irq_num), lapic_isr1; \
117 jz 9f ; /* not active */ \
118 movl $0, lapic_eoi; \
119 APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ; \
125 * Test to see if the source is currntly masked, clear if so.
127 #define UNMASK_IRQ(irq_num) \
128 IMASK_LOCK ; /* into critical reg */ \
129 testl $IRQ_BIT(irq_num), apic_imen ; \
130 je 7f ; /* bit clear, not masked */ \
131 andl $~IRQ_BIT(irq_num), apic_imen ;/* clear mask bit */ \
132 movl IOAPICADDR(irq_num),%ecx ; /* ioapic addr */ \
133 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
134 movl %eax,(%ecx) ; /* write the index */ \
135 movl IOAPIC_WINDOW(%ecx),%eax ; /* current value */ \
136 andl $~IOART_INTMASK,%eax ; /* clear the mask */ \
137 movl %eax,IOAPIC_WINDOW(%ecx) ; /* new value */ \
141 #ifdef APIC_INTR_DIAGNOSTIC
142 #ifdef APIC_INTR_DIAGNOSTIC_IRQ
146 pushl $CNAME(apic_itrace_debuglock)
147 call CNAME(s_lock_np)
149 movl CNAME(apic_itrace_debugbuffer_idx), %ecx
151 movl PCPU(cpuid), %eax
154 movw %ax, CNAME(apic_itrace_debugbuffer)(,%ecx,2)
157 movl %ecx, CNAME(apic_itrace_debugbuffer_idx)
158 pushl $CNAME(apic_itrace_debuglock)
159 call CNAME(s_unlock_np)
165 #define APIC_ITRACE(name, irq_num, id) \
166 lock ; /* MP-safe */ \
167 incl CNAME(name) + (irq_num) * 4 ; \
171 movl $(irq_num), %eax ; \
172 cmpl $APIC_INTR_DIAGNOSTIC_IRQ, %eax ; \
175 call log_intr_event ; \
182 #define APIC_ITRACE(name, irq_num, id) \
183 lock ; /* MP-safe */ \
184 incl CNAME(name) + (irq_num) * 4
187 #define APIC_ITRACE_ENTER 1
188 #define APIC_ITRACE_EOI 2
189 #define APIC_ITRACE_TRYISRLOCK 3
190 #define APIC_ITRACE_GOTISRLOCK 4
191 #define APIC_ITRACE_ENTER2 5
192 #define APIC_ITRACE_LEAVE 6
193 #define APIC_ITRACE_UNMASK 7
194 #define APIC_ITRACE_ACTIVE 8
195 #define APIC_ITRACE_MASKED 9
196 #define APIC_ITRACE_NOISRLOCK 10
197 #define APIC_ITRACE_MASKED2 11
198 #define APIC_ITRACE_SPLZ 12
199 #define APIC_ITRACE_DORETI 13
202 #define APIC_ITRACE(name, irq_num, id)
205 #define INTR(irq_num, vec_name, maybe_extra_ipending) \
208 /* XintrNN: entry point used by IDT/HWIs & splz_unpend via _vec[]. */ \
211 movl $KDSEL, %eax ; /* reload with kernel's data segment */ \
214 movl $KPSEL, %eax ; \
217 maybe_extra_ipending ; \
219 APIC_ITRACE(apic_itrace_enter, irq_num, APIC_ITRACE_ENTER) ; \
220 lock ; /* MP-safe */ \
221 btsl $(irq_num), iactive ; /* lazy masking */ \
222 jc 1f ; /* already active */ \
224 MASK_LEVEL_IRQ(irq_num) ; \
227 APIC_ITRACE(apic_itrace_tryisrlock, irq_num, APIC_ITRACE_TRYISRLOCK) ;\
228 MP_TRYLOCK ; /* XXX this is going away... */ \
229 testl %eax, %eax ; /* did we get it? */ \
232 APIC_ITRACE(apic_itrace_gotisrlock, irq_num, APIC_ITRACE_GOTISRLOCK) ;\
233 movl PCPU(curthread),%ebx ; \
234 testl $IRQ_BIT(irq_num), TD_MACH+MTD_CPL(%eax) ; \
235 jne 2f ; /* this INT masked */ \
236 cmpl $TDPRI_CRIT,TD_PRI(%ebx) ; \
237 jge 2f ; /* in critical sec */ \
239 incb PCPU(intr_nesting_level) ; \
241 /* entry point used by doreti_unpend for HWIs. */ \
242 __CONCAT(Xresume,irq_num): ; \
243 FAKE_MCOUNT(13*4(%esp)) ; /* XXX avoid dbl cnt */ \
244 lock ; incl _cnt+V_INTR ; /* tally interrupts */ \
245 movl _intr_countp + (irq_num) * 4, %eax ; \
246 lock ; incl (%eax) ; \
248 movl PCPU(curthread), %ebx ; \
249 movl TD_MACH+MTD_CPL(%ebx), %eax ; \
250 pushl %eax ; /* cpl restored by doreti */ \
251 orl _intr_mask + (irq_num) * 4, %eax ; \
252 movl %eax, TD_MACH+MTD_CPL(%ebx) ; \
254 andl $~IRQ_BIT(irq_num), PCPU(ipending) ; \
256 pushl _intr_unit + (irq_num) * 4 ; \
257 APIC_ITRACE(apic_itrace_enter2, irq_num, APIC_ITRACE_ENTER2) ; \
259 call *_intr_handler + (irq_num) * 4 ; \
261 APIC_ITRACE(apic_itrace_leave, irq_num, APIC_ITRACE_LEAVE) ; \
264 lock ; andl $~IRQ_BIT(irq_num), iactive ; \
265 UNMASK_IRQ(irq_num) ; \
266 APIC_ITRACE(apic_itrace_unmask, irq_num, APIC_ITRACE_UNMASK) ; \
267 sti ; /* doreti repeats cli/sti */ \
273 APIC_ITRACE(apic_itrace_active, irq_num, APIC_ITRACE_ACTIVE) ; \
274 MASK_IRQ(irq_num) ; \
277 orl $IRQ_BIT(irq_num), PCPU(ipending) ; \
278 movl $TDPRI_CRIT, PCPU(reqpri) ; \
280 btsl $(irq_num), iactive ; /* still active */ \
281 jnc 0b ; /* retry */ \
283 iret ; /* XXX: iactive bit might be 0 now */ \
285 2: ; /* masked by cpl, leave iactive set */ \
286 APIC_ITRACE(apic_itrace_masked, irq_num, APIC_ITRACE_MASKED) ; \
288 orl $IRQ_BIT(irq_num), PCPU(ipending) ; \
289 movl $TDPRI_CRIT, PCPU(reqpri) ; \
294 3: ; /* other cpu has isr lock */ \
295 APIC_ITRACE(apic_itrace_noisrlock, irq_num, APIC_ITRACE_NOISRLOCK) ;\
297 orl $IRQ_BIT(irq_num), PCPU(ipending) ; \
298 movl $TDPRI_CRIT,_reqpri ; \
299 testl $IRQ_BIT(irq_num), TD_MACH+MTD_CPL(%ebx) ; \
300 jne 4f ; /* this INT masked */ \
301 call forward_irq ; /* forward irq to lock holder */ \
302 POP_FRAME ; /* and return */ \
306 APIC_ITRACE(apic_itrace_masked2, irq_num, APIC_ITRACE_MASKED2) ;\
307 POP_FRAME ; /* and return */ \
311 * Handle "spurious INTerrupts".
313 * This is different than the "spurious INTerrupt" generated by an
314 * 8259 PIC for missing INTs. See the APIC documentation for details.
315 * This routine should NOT do an 'EOI' cycle.
322 /* No EOI cycle used here */
328 * Handle TLB shootdowns.
336 #ifdef COUNT_XINVLTLB_HITS
340 movl PCPU(cpuid), %eax
344 #endif /* COUNT_XINVLTLB_HITS */
346 movl %cr3, %eax /* invalidate the TLB */
349 ss /* stack segment, avoid %ds load */
350 movl $0, lapic_eoi /* End Of Interrupt to APIC */
359 * Executed by a CPU when it receives an Xcpucheckstate IPI from another CPU,
361 * - Stores current cpu state in checkstate_cpustate[cpuid]
362 * 0 == user, 1 == sys, 2 == intr
363 * - Stores current process in checkstate_curproc[cpuid]
365 * - Signals its receipt by setting bit cpuid in checkstate_probed_cpus.
367 * stack: 0->ds, 4->fs, 8->ebx, 12->eax, 16->eip, 20->cs, 24->eflags
372 .globl Xcpucheckstate
373 .globl checkstate_cpustate
374 .globl checkstate_curproc
379 pushl %ds /* save current data segment */
383 mov %ax, %ds /* use KERNEL data segment */
387 movl $0, lapic_eoi /* End Of Interrupt to APIC */
394 testl $PSL_VM, 24(%esp)
396 incl %ebx /* system or interrupt */
398 movl PCPU(cpuid), %eax
399 movl %ebx, checkstate_cpustate(,%eax,4)
400 movl PCPU(curthread), %ebx
401 movl TD_PROC(%ebx),%ebx
402 movl %ebx, checkstate_curproc(,%eax,4)
404 movl %ebx, checkstate_pc(,%eax,4)
406 lock /* checkstate_probed_cpus |= (1<<id) */
407 btsl %eax, checkstate_probed_cpus
410 popl %ds /* restore previous data segment */
415 #endif /* BETTER_CLOCK */
418 * Executed by a CPU when it receives an Xcpuast IPI from another CPU,
420 * - Signals its receipt by clearing bit cpuid in checkstate_need_ast.
422 * - We need a better method of triggering asts on other cpus.
431 mov %ax, %ds /* use KERNEL data segment */
436 movl PCPU(cpuid), %eax
437 lock /* checkstate_need_ast &= ~(1<<id) */
438 btrl %eax, checkstate_need_ast
439 movl $0, lapic_eoi /* End Of Interrupt to APIC */
442 btsl %eax, checkstate_pending_ast
445 FAKE_MCOUNT(13*4(%esp))
448 * Giant locks do not come cheap.
449 * A lot of cycles are going to be wasted here.
453 movl PCPU(curthread), %eax
454 pushl TD_MACH+MTD_CPL(%eax) /* cpl restored by doreti */
456 orl $AST_PENDING, PCPU(astpending) /* XXX */
457 incb PCPU(intr_nesting_level)
460 movl PCPU(cpuid), %eax
462 btrl %eax, checkstate_pending_ast
464 btrl %eax, CNAME(resched_cpus)
466 orl $AST_PENDING+AST_RESCHED,PCPU(astpending)
468 incl CNAME(want_resched_cnt)
471 incl CNAME(cpuast_cnt)
475 /* We are already in the process of delivering an ast for this CPU */
481 * Executed by a CPU when it receives an XFORWARD_IRQ IPI.
490 mov %ax, %ds /* use KERNEL data segment */
495 movl $0, lapic_eoi /* End Of Interrupt to APIC */
497 FAKE_MCOUNT(13*4(%esp))
500 testl %eax,%eax /* Did we get the lock ? */
504 incl CNAME(forward_irq_hitcnt)
505 cmpb $4, PCPU(intr_nesting_level)
508 movl PCPU(curthread), %eax
509 pushl TD_MACH+MTD_CPL(%eax) /* cpl restored by doreti */
511 incb PCPU(intr_nesting_level)
515 jmp doreti /* Handle forwarded interrupt */
518 incl CNAME(forward_irq_misscnt)
519 call forward_irq /* Oops, we've lost the isr lock */
525 incl CNAME(forward_irq_toodeepcnt)
540 cmpl $0, CNAME(forward_irq_enabled)
546 movl $0, %eax /* Pick CPU #0 if noone has lock */
549 movl _cpu_num_to_apic_id(,%eax,4),%ecx
551 movl lapic_icr_hi, %eax
552 andl $~APIC_ID_MASK, %eax
554 movl %eax, lapic_icr_hi
557 movl lapic_icr_lo, %eax
558 andl $APIC_DELSTAT_MASK,%eax
560 movl lapic_icr_lo, %eax
561 andl $APIC_RESV2_MASK, %eax
562 orl $(APIC_DEST_DESTFLD|APIC_DELMODE_FIXED|XFORWARD_IRQ_OFFSET), %eax
563 movl %eax, lapic_icr_lo
565 movl lapic_icr_lo, %eax
566 andl $APIC_DELSTAT_MASK,%eax
572 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
574 * - Signals its receipt.
575 * - Waits for permission to restart.
576 * - Signals its restart.
588 pushl %ds /* save current data segment */
592 mov %ax, %ds /* use KERNEL data segment */
596 movl $0, lapic_eoi /* End Of Interrupt to APIC */
598 movl PCPU(cpuid), %eax
599 imull $PCB_SIZE, %eax
600 leal CNAME(stoppcbs)(%eax), %eax
602 call CNAME(savectx) /* Save process context */
606 movl PCPU(cpuid), %eax
609 btsl %eax, stopped_cpus /* stopped_cpus |= (1<<id) */
611 btl %eax, started_cpus /* while (!(started_cpus & (1<<id))) */
615 btrl %eax, started_cpus /* started_cpus &= ~(1<<id) */
617 btrl %eax, stopped_cpus /* stopped_cpus &= ~(1<<id) */
622 movl CNAME(cpustop_restartfunc), %eax
625 movl $0, CNAME(cpustop_restartfunc) /* One-shot */
630 popl %ds /* restore previous data segment */
640 FAST_INTR(0,fastintr0)
641 FAST_INTR(1,fastintr1)
642 FAST_INTR(2,fastintr2)
643 FAST_INTR(3,fastintr3)
644 FAST_INTR(4,fastintr4)
645 FAST_INTR(5,fastintr5)
646 FAST_INTR(6,fastintr6)
647 FAST_INTR(7,fastintr7)
648 FAST_INTR(8,fastintr8)
649 FAST_INTR(9,fastintr9)
650 FAST_INTR(10,fastintr10)
651 FAST_INTR(11,fastintr11)
652 FAST_INTR(12,fastintr12)
653 FAST_INTR(13,fastintr13)
654 FAST_INTR(14,fastintr14)
655 FAST_INTR(15,fastintr15)
656 FAST_INTR(16,fastintr16)
657 FAST_INTR(17,fastintr17)
658 FAST_INTR(18,fastintr18)
659 FAST_INTR(19,fastintr19)
660 FAST_INTR(20,fastintr20)
661 FAST_INTR(21,fastintr21)
662 FAST_INTR(22,fastintr22)
663 FAST_INTR(23,fastintr23)
665 #define CLKINTR_PENDING \
666 pushl $clock_lock ; \
668 movl $1,CNAME(clkintr_pending) ; \
672 INTR(0,intr0, CLKINTR_PENDING)
699 * Executed by a CPU when it receives a RENDEZVOUS IPI from another CPU.
701 * - Calls the generic rendezvous action function.
709 mov %ax, %ds /* use KERNEL data segment */
714 call _smp_rendezvous_action
716 movl $0, lapic_eoi /* End Of Interrupt to APIC */
725 * Addresses of interrupt handlers.
726 * XresumeNN: Resumption addresses for HWIs.
732 * ipl.s: doreti_unpend
734 .long Xresume0, Xresume1, Xresume2, Xresume3
735 .long Xresume4, Xresume5, Xresume6, Xresume7
736 .long Xresume8, Xresume9, Xresume10, Xresume11
737 .long Xresume12, Xresume13, Xresume14, Xresume15
738 .long Xresume16, Xresume17, Xresume18, Xresume19
739 .long Xresume20, Xresume21, Xresume22, Xresume23
742 * ipl.s: doreti_unpend
743 * apic_ipl.s: splz_unpend
745 .long _swi_null, swi_net, _swi_null, _swi_null
746 .long _swi_vm, _swi_null, _softclock
748 imasks: /* masks for interrupt handlers */
749 .space NHWI*4 /* padding; HWI masks are elsewhere */
751 .long SWI_TTY_MASK, SWI_NET_MASK, SWI_CAMNET_MASK, SWI_CAMBIO_MASK
752 .long SWI_VM_MASK, SWI_TQ_MASK, SWI_CLOCK_MASK
755 /* active flag for lazy masking */
759 #ifdef COUNT_XINVLTLB_HITS
763 #endif /* COUNT_XINVLTLB_HITS */
765 /* variables used by stop_cpus()/restart_cpus()/Xcpustop */
766 .globl stopped_cpus, started_cpus
773 .globl checkstate_probed_cpus
774 checkstate_probed_cpus:
776 #endif /* BETTER_CLOCK */
777 .globl checkstate_need_ast
780 checkstate_pending_ast:
782 .globl CNAME(forward_irq_misscnt)
783 .globl CNAME(forward_irq_toodeepcnt)
784 .globl CNAME(forward_irq_hitcnt)
785 .globl CNAME(resched_cpus)
786 .globl CNAME(want_resched_cnt)
787 .globl CNAME(cpuast_cnt)
788 .globl CNAME(cpustop_restartfunc)
789 CNAME(forward_irq_misscnt):
791 CNAME(forward_irq_hitcnt):
793 CNAME(forward_irq_toodeepcnt):
797 CNAME(want_resched_cnt):
801 CNAME(cpustop_restartfunc):
806 .globl apic_pin_trigger