2 * from: vector.s, 386BSD 0.1 unknown origin
3 * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $
4 * $DragonFly: src/sys/i386/isa/Attic/apic_vector.s,v 1.5 2003/06/22 08:54:22 dillon Exp $
8 #include <machine/apic.h>
9 #include <machine/smp.h>
11 #include "i386/isa/intr_machdep.h"
13 /* convert an absolute IRQ# into a bitmask */
14 #define IRQ_BIT(irq_num) (1 << (irq_num))
16 /* make an index into the IO APIC from the IRQ# */
17 #define REDTBL_IDX(irq_num) (0x10 + ((irq_num) * 2))
21 * Macros for interrupt interrupt entry, call to handler, and exit.
24 #define FAST_INTR(irq_num, vec_name) \
28 pushl %eax ; /* save only call-used registers */ \
39 FAKE_MCOUNT((5+ACTUALLY_PUSHED)*4(%esp)) ; \
40 pushl _intr_unit + (irq_num) * 4 ; \
41 call *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
43 movl $0, lapic_eoi ; \
45 incl _cnt+V_INTR ; /* book-keeping can wait */ \
46 movl _intr_countp + (irq_num) * 4, %eax ; \
62 pushl $0 ; /* dummy error code */ \
63 pushl $0 ; /* dummy trap type */ \
65 pushl %ds ; /* save data and extra segments ... */ \
76 #define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
77 #define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
79 #define MASK_IRQ(irq_num) \
80 IMASK_LOCK ; /* into critical reg */ \
81 testl $IRQ_BIT(irq_num), _apic_imen ; \
82 jne 7f ; /* masked, don't mask */ \
83 orl $IRQ_BIT(irq_num), _apic_imen ; /* set the mask bit */ \
84 movl IOAPICADDR(irq_num), %ecx ; /* ioapic addr */ \
85 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
86 movl %eax, (%ecx) ; /* write the index */ \
87 movl IOAPIC_WINDOW(%ecx), %eax ; /* current value */ \
88 orl $IOART_INTMASK, %eax ; /* set the mask */ \
89 movl %eax, IOAPIC_WINDOW(%ecx) ; /* new value */ \
90 7: ; /* already masked */ \
93 * Test to see whether we are handling an edge or level triggered INT.
94 * Level-triggered INTs must still be masked as we don't clear the source,
95 * and the EOI cycle would cause redundant INTs to occur.
97 #define MASK_LEVEL_IRQ(irq_num) \
98 testl $IRQ_BIT(irq_num), _apic_pin_trigger ; \
99 jz 9f ; /* edge, don't mask */ \
100 MASK_IRQ(irq_num) ; \
104 #ifdef APIC_INTR_REORDER
105 #define EOI_IRQ(irq_num) \
106 movl _apic_isrbit_location + 8 * (irq_num), %eax ; \
107 movl (%eax), %eax ; \
108 testl _apic_isrbit_location + 4 + 8 * (irq_num), %eax ; \
109 jz 9f ; /* not active */ \
110 movl $0, lapic_eoi ; \
111 APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ; \
115 #define EOI_IRQ(irq_num) \
116 testl $IRQ_BIT(irq_num), lapic_isr1; \
117 jz 9f ; /* not active */ \
118 movl $0, lapic_eoi; \
119 APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ; \
125 * Test to see if the source is currntly masked, clear if so.
127 #define UNMASK_IRQ(irq_num) \
128 IMASK_LOCK ; /* into critical reg */ \
129 testl $IRQ_BIT(irq_num), _apic_imen ; \
130 je 7f ; /* bit clear, not masked */ \
131 andl $~IRQ_BIT(irq_num), _apic_imen ;/* clear mask bit */ \
132 movl IOAPICADDR(irq_num),%ecx ; /* ioapic addr */ \
133 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
134 movl %eax,(%ecx) ; /* write the index */ \
135 movl IOAPIC_WINDOW(%ecx),%eax ; /* current value */ \
136 andl $~IOART_INTMASK,%eax ; /* clear the mask */ \
137 movl %eax,IOAPIC_WINDOW(%ecx) ; /* new value */ \
141 #ifdef APIC_INTR_DIAGNOSTIC
142 #ifdef APIC_INTR_DIAGNOSTIC_IRQ
146 pushl $CNAME(apic_itrace_debuglock)
147 call CNAME(s_lock_np)
149 movl CNAME(apic_itrace_debugbuffer_idx), %ecx
154 movw %ax, CNAME(apic_itrace_debugbuffer)(,%ecx,2)
157 movl %ecx, CNAME(apic_itrace_debugbuffer_idx)
158 pushl $CNAME(apic_itrace_debuglock)
159 call CNAME(s_unlock_np)
165 #define APIC_ITRACE(name, irq_num, id) \
166 lock ; /* MP-safe */ \
167 incl CNAME(name) + (irq_num) * 4 ; \
171 movl $(irq_num), %eax ; \
172 cmpl $APIC_INTR_DIAGNOSTIC_IRQ, %eax ; \
175 call log_intr_event ; \
182 #define APIC_ITRACE(name, irq_num, id) \
183 lock ; /* MP-safe */ \
184 incl CNAME(name) + (irq_num) * 4
187 #define APIC_ITRACE_ENTER 1
188 #define APIC_ITRACE_EOI 2
189 #define APIC_ITRACE_TRYISRLOCK 3
190 #define APIC_ITRACE_GOTISRLOCK 4
191 #define APIC_ITRACE_ENTER2 5
192 #define APIC_ITRACE_LEAVE 6
193 #define APIC_ITRACE_UNMASK 7
194 #define APIC_ITRACE_ACTIVE 8
195 #define APIC_ITRACE_MASKED 9
196 #define APIC_ITRACE_NOISRLOCK 10
197 #define APIC_ITRACE_MASKED2 11
198 #define APIC_ITRACE_SPLZ 12
199 #define APIC_ITRACE_DORETI 13
202 #define APIC_ITRACE(name, irq_num, id)
205 #define INTR(irq_num, vec_name, maybe_extra_ipending) \
208 /* _XintrNN: entry point used by IDT/HWIs & splz_unpend via _vec[]. */ \
211 movl $KDSEL, %eax ; /* reload with kernel's data segment */ \
214 movl $KPSEL, %eax ; \
217 maybe_extra_ipending ; \
219 APIC_ITRACE(apic_itrace_enter, irq_num, APIC_ITRACE_ENTER) ; \
220 lock ; /* MP-safe */ \
221 btsl $(irq_num), iactive ; /* lazy masking */ \
222 jc 1f ; /* already active */ \
224 MASK_LEVEL_IRQ(irq_num) ; \
227 APIC_ITRACE(apic_itrace_tryisrlock, irq_num, APIC_ITRACE_TRYISRLOCK) ;\
228 MP_TRYLOCK ; /* XXX this is going away... */ \
229 testl %eax, %eax ; /* did we get it? */ \
232 APIC_ITRACE(apic_itrace_gotisrlock, irq_num, APIC_ITRACE_GOTISRLOCK) ;\
233 movl _curthread,%ebx ; \
234 testl $IRQ_BIT(irq_num), TD_MACH+MTD_CPL(%eax) ; \
235 jne 2f ; /* this INT masked */ \
236 cmpl $TDPRI_CRIT,TD_PRI(%ebx) ; \
237 jge 2f ; /* in critical sec */ \
239 incb _intr_nesting_level ; \
241 /* entry point used by doreti_unpend for HWIs. */ \
242 __CONCAT(Xresume,irq_num): ; \
243 FAKE_MCOUNT(13*4(%esp)) ; /* XXX avoid dbl cnt */ \
244 lock ; incl _cnt+V_INTR ; /* tally interrupts */ \
245 movl _intr_countp + (irq_num) * 4, %eax ; \
246 lock ; incl (%eax) ; \
248 movl _curthread, %ebx ; \
249 movl TD_MACH+MTD_CPL(%ebx), %eax ; \
250 pushl %eax ; /* cpl restored by doreti */ \
251 orl _intr_mask + (irq_num) * 4, %eax ; \
252 movl %eax, TD_MACH+MTD_CPL(%ebx) ; \
254 andl $~IRQ_BIT(irq_num), _ipending ; \
256 pushl _intr_unit + (irq_num) * 4 ; \
257 APIC_ITRACE(apic_itrace_enter2, irq_num, APIC_ITRACE_ENTER2) ; \
259 call *_intr_handler + (irq_num) * 4 ; \
261 APIC_ITRACE(apic_itrace_leave, irq_num, APIC_ITRACE_LEAVE) ; \
263 lock ; andl $~IRQ_BIT(irq_num), iactive ; \
264 UNMASK_IRQ(irq_num) ; \
265 APIC_ITRACE(apic_itrace_unmask, irq_num, APIC_ITRACE_UNMASK) ; \
266 sti ; /* doreti repeats cli/sti */ \
272 APIC_ITRACE(apic_itrace_active, irq_num, APIC_ITRACE_ACTIVE) ; \
273 MASK_IRQ(irq_num) ; \
276 orl $IRQ_BIT(irq_num), _ipending ; \
277 movl $TDPRI_CRIT,_reqpri ; \
279 btsl $(irq_num), iactive ; /* still active */ \
280 jnc 0b ; /* retry */ \
282 iret ; /* XXX: iactive bit might be 0 now */ \
284 2: ; /* masked by cpl, leave iactive set */ \
285 APIC_ITRACE(apic_itrace_masked, irq_num, APIC_ITRACE_MASKED) ; \
287 orl $IRQ_BIT(irq_num), _ipending ; \
288 movl $TDPRI_CRIT,_reqpri ; \
293 3: ; /* other cpu has isr lock */ \
294 APIC_ITRACE(apic_itrace_noisrlock, irq_num, APIC_ITRACE_NOISRLOCK) ;\
296 orl $IRQ_BIT(irq_num), _ipending ; \
297 movl $TDPRI_CRIT,_reqpri ; \
298 testl $IRQ_BIT(irq_num), TD_MACH+MTD_CPL(%ebx) ; \
299 jne 4f ; /* this INT masked */ \
300 call forward_irq ; /* forward irq to lock holder */ \
301 POP_FRAME ; /* and return */ \
305 APIC_ITRACE(apic_itrace_masked2, irq_num, APIC_ITRACE_MASKED2) ;\
306 POP_FRAME ; /* and return */ \
310 * Handle "spurious INTerrupts".
312 * This is different than the "spurious INTerrupt" generated by an
313 * 8259 PIC for missing INTs. See the APIC documentation for details.
314 * This routine should NOT do an 'EOI' cycle.
321 /* No EOI cycle used here */
327 * Handle TLB shootdowns.
335 #ifdef COUNT_XINVLTLB_HITS
343 #endif /* COUNT_XINVLTLB_HITS */
345 movl %cr3, %eax /* invalidate the TLB */
348 ss /* stack segment, avoid %ds load */
349 movl $0, lapic_eoi /* End Of Interrupt to APIC */
358 * Executed by a CPU when it receives an Xcpucheckstate IPI from another CPU,
360 * - Stores current cpu state in checkstate_cpustate[cpuid]
361 * 0 == user, 1 == sys, 2 == intr
362 * - Stores current process in checkstate_curproc[cpuid]
364 * - Signals its receipt by setting bit cpuid in checkstate_probed_cpus.
366 * stack: 0->ds, 4->fs, 8->ebx, 12->eax, 16->eip, 20->cs, 24->eflags
371 .globl _Xcpucheckstate
372 .globl _checkstate_cpustate
373 .globl _checkstate_curproc
374 .globl _checkstate_pc
378 pushl %ds /* save current data segment */
382 mov %ax, %ds /* use KERNEL data segment */
386 movl $0, lapic_eoi /* End Of Interrupt to APIC */
393 testl $PSL_VM, 24(%esp)
395 incl %ebx /* system or interrupt */
398 movl %ebx, _checkstate_cpustate(,%eax,4)
399 movl _curthread, %ebx
400 movl TD_PROC(%ebx),%ebx
401 movl %ebx, _checkstate_curproc(,%eax,4)
403 movl %ebx, _checkstate_pc(,%eax,4)
405 lock /* checkstate_probed_cpus |= (1<<id) */
406 btsl %eax, _checkstate_probed_cpus
409 popl %ds /* restore previous data segment */
414 #endif /* BETTER_CLOCK */
417 * Executed by a CPU when it receives an Xcpuast IPI from another CPU,
419 * - Signals its receipt by clearing bit cpuid in checkstate_need_ast.
421 * - We need a better method of triggering asts on other cpus.
430 mov %ax, %ds /* use KERNEL data segment */
436 lock /* checkstate_need_ast &= ~(1<<id) */
437 btrl %eax, _checkstate_need_ast
438 movl $0, lapic_eoi /* End Of Interrupt to APIC */
441 btsl %eax, _checkstate_pending_ast
444 FAKE_MCOUNT(13*4(%esp))
447 * Giant locks do not come cheap.
448 * A lot of cycles are going to be wasted here.
452 movl _curthread, %eax
453 pushl TD_MACH+MTD_CPL(%eax) /* cpl restored by doreti */
455 orl $AST_PENDING, _astpending /* XXX */
456 incb _intr_nesting_level
463 btrl %eax, _checkstate_pending_ast
465 btrl %eax, CNAME(resched_cpus)
467 orl $AST_PENDING+AST_RESCHED,_astpending
469 incl CNAME(want_resched_cnt)
472 incl CNAME(cpuast_cnt)
476 /* We are already in the process of delivering an ast for this CPU */
482 * Executed by a CPU when it receives an XFORWARD_IRQ IPI.
491 mov %ax, %ds /* use KERNEL data segment */
496 movl $0, lapic_eoi /* End Of Interrupt to APIC */
498 FAKE_MCOUNT(13*4(%esp))
501 testl %eax,%eax /* Did we get the lock ? */
505 incl CNAME(forward_irq_hitcnt)
506 cmpb $4, _intr_nesting_level
509 movl _curthread, %eax
510 pushl TD_MACH+MTD_CPL(%eax) /* cpl restored by doreti */
512 incb _intr_nesting_level
518 jmp _doreti /* Handle forwarded interrupt */
521 incl CNAME(forward_irq_misscnt)
522 call forward_irq /* Oops, we've lost the isr lock */
528 incl CNAME(forward_irq_toodeepcnt)
543 cmpl $0, CNAME(forward_irq_enabled)
549 movl $0, %eax /* Pick CPU #0 if noone has lock */
552 movl _cpu_num_to_apic_id(,%eax,4),%ecx
554 movl lapic_icr_hi, %eax
555 andl $~APIC_ID_MASK, %eax
557 movl %eax, lapic_icr_hi
560 movl lapic_icr_lo, %eax
561 andl $APIC_DELSTAT_MASK,%eax
563 movl lapic_icr_lo, %eax
564 andl $APIC_RESV2_MASK, %eax
565 orl $(APIC_DEST_DESTFLD|APIC_DELMODE_FIXED|XFORWARD_IRQ_OFFSET), %eax
566 movl %eax, lapic_icr_lo
568 movl lapic_icr_lo, %eax
569 andl $APIC_DELSTAT_MASK,%eax
575 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
577 * - Signals its receipt.
578 * - Waits for permission to restart.
579 * - Signals its restart.
591 pushl %ds /* save current data segment */
595 mov %ax, %ds /* use KERNEL data segment */
599 movl $0, lapic_eoi /* End Of Interrupt to APIC */
602 imull $PCB_SIZE, %eax
603 leal CNAME(stoppcbs)(%eax), %eax
605 call CNAME(savectx) /* Save process context */
612 btsl %eax, _stopped_cpus /* stopped_cpus |= (1<<id) */
614 btl %eax, _started_cpus /* while (!(started_cpus & (1<<id))) */
618 btrl %eax, _started_cpus /* started_cpus &= ~(1<<id) */
620 btrl %eax, _stopped_cpus /* stopped_cpus &= ~(1<<id) */
625 movl CNAME(cpustop_restartfunc), %eax
628 movl $0, CNAME(cpustop_restartfunc) /* One-shot */
633 popl %ds /* restore previous data segment */
643 FAST_INTR(0,fastintr0)
644 FAST_INTR(1,fastintr1)
645 FAST_INTR(2,fastintr2)
646 FAST_INTR(3,fastintr3)
647 FAST_INTR(4,fastintr4)
648 FAST_INTR(5,fastintr5)
649 FAST_INTR(6,fastintr6)
650 FAST_INTR(7,fastintr7)
651 FAST_INTR(8,fastintr8)
652 FAST_INTR(9,fastintr9)
653 FAST_INTR(10,fastintr10)
654 FAST_INTR(11,fastintr11)
655 FAST_INTR(12,fastintr12)
656 FAST_INTR(13,fastintr13)
657 FAST_INTR(14,fastintr14)
658 FAST_INTR(15,fastintr15)
659 FAST_INTR(16,fastintr16)
660 FAST_INTR(17,fastintr17)
661 FAST_INTR(18,fastintr18)
662 FAST_INTR(19,fastintr19)
663 FAST_INTR(20,fastintr20)
664 FAST_INTR(21,fastintr21)
665 FAST_INTR(22,fastintr22)
666 FAST_INTR(23,fastintr23)
668 #define CLKINTR_PENDING \
669 pushl $clock_lock ; \
671 movl $1,CNAME(clkintr_pending) ; \
675 INTR(0,intr0, CLKINTR_PENDING)
702 * Executed by a CPU when it receives a RENDEZVOUS IPI from another CPU.
704 * - Calls the generic rendezvous action function.
712 mov %ax, %ds /* use KERNEL data segment */
717 call _smp_rendezvous_action
719 movl $0, lapic_eoi /* End Of Interrupt to APIC */
726 * Addresses of interrupt handlers.
727 * XresumeNN: Resumption addresses for HWIs.
733 * ipl.s: doreti_unpend
735 .long Xresume0, Xresume1, Xresume2, Xresume3
736 .long Xresume4, Xresume5, Xresume6, Xresume7
737 .long Xresume8, Xresume9, Xresume10, Xresume11
738 .long Xresume12, Xresume13, Xresume14, Xresume15
739 .long Xresume16, Xresume17, Xresume18, Xresume19
740 .long Xresume20, Xresume21, Xresume22, Xresume23
743 * ipl.s: doreti_unpend
744 * apic_ipl.s: splz_unpend
746 .long _swi_null, swi_net, _swi_null, _swi_null
747 .long _swi_vm, _swi_null, _softclock
749 imasks: /* masks for interrupt handlers */
750 .space NHWI*4 /* padding; HWI masks are elsewhere */
752 .long SWI_TTY_MASK, SWI_NET_MASK, SWI_CAMNET_MASK, SWI_CAMBIO_MASK
753 .long SWI_VM_MASK, SWI_TQ_MASK, SWI_CLOCK_MASK
755 /* active flag for lazy masking */
759 #ifdef COUNT_XINVLTLB_HITS
763 #endif /* COUNT_XINVLTLB_HITS */
765 /* variables used by stop_cpus()/restart_cpus()/Xcpustop */
766 .globl _stopped_cpus, _started_cpus
773 .globl _checkstate_probed_cpus
774 _checkstate_probed_cpus:
776 #endif /* BETTER_CLOCK */
777 .globl _checkstate_need_ast
778 _checkstate_need_ast:
780 _checkstate_pending_ast:
782 .globl CNAME(forward_irq_misscnt)
783 .globl CNAME(forward_irq_toodeepcnt)
784 .globl CNAME(forward_irq_hitcnt)
785 .globl CNAME(resched_cpus)
786 .globl CNAME(want_resched_cnt)
787 .globl CNAME(cpuast_cnt)
788 .globl CNAME(cpustop_restartfunc)
789 CNAME(forward_irq_misscnt):
791 CNAME(forward_irq_hitcnt):
793 CNAME(forward_irq_toodeepcnt):
797 CNAME(want_resched_cnt):
801 CNAME(cpustop_restartfunc):
806 .globl _apic_pin_trigger