2 * from: vector.s, 386BSD 0.1 unknown origin
3 * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $
4 * $DragonFly: src/sys/platform/pc32/isa/Attic/apic_vector.s,v 1.8 2003/07/06 21:23:49 dillon Exp $
8 #include <machine/apic.h>
9 #include <machine/smp.h>
10 #include "i386/isa/intr_machdep.h"
12 /* convert an absolute IRQ# into a bitmask */
13 #define IRQ_LBIT(irq_num) (1 << (irq_num))
15 /* make an index into the IO APIC from the IRQ# */
16 #define REDTBL_IDX(irq_num) (0x10 + ((irq_num) * 2))
19 * Push an interrupt frame in a format acceptable to doreti, reload
20 * the segment registers for the kernel.
23 pushl $0 ; /* dummy error code */ \
24 pushl $0 ; /* dummy trap type */ \
26 pushl %ds ; /* save data and extra segments ... */ \
36 pushfl ; /* phys int frame / flags */ \
37 pushl %cs ; /* phys int frame / cs */ \
38 pushl 12(%esp) ; /* original caller eip */ \
39 pushl $0 ; /* dummy error code */ \
40 pushl $0 ; /* dummy trap type */ \
41 subl $11*4,%esp ; /* pushal + 3 seg regs (dummy) */ \
44 * Warning: POP_FRAME can only be used if there is no chance of a
45 * segment register being changed (e.g. by procfs), which is why syscalls
53 addl $2*4,%esp ; /* dummy trap & error codes */ \
58 #define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
59 #define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
62 * Interrupts are expected to already be disabled when using these
66 SPIN_LOCK(imen_spinlock) ; \
68 #define IMASK_UNLOCK \
69 SPIN_UNLOCK(imen_spinlock) ; \
71 #define MASK_IRQ(irq_num) \
72 IMASK_LOCK ; /* into critical reg */ \
73 testl $IRQ_LBIT(irq_num), apic_imen ; \
74 jne 7f ; /* masked, don't mask */ \
75 orl $IRQ_LBIT(irq_num), apic_imen ; /* set the mask bit */ \
76 movl IOAPICADDR(irq_num), %ecx ; /* ioapic addr */ \
77 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
78 movl %eax, (%ecx) ; /* write the index */ \
79 movl IOAPIC_WINDOW(%ecx), %eax ; /* current value */ \
80 orl $IOART_INTMASK, %eax ; /* set the mask */ \
81 movl %eax, IOAPIC_WINDOW(%ecx) ; /* new value */ \
82 7: ; /* already masked */ \
86 * Test to see whether we are handling an edge or level triggered INT.
87 * Level-triggered INTs must still be masked as we don't clear the source,
88 * and the EOI cycle would cause redundant INTs to occur.
90 #define MASK_LEVEL_IRQ(irq_num) \
91 testl $IRQ_LBIT(irq_num), apic_pin_trigger ; \
92 jz 9f ; /* edge, don't mask */ \
97 #ifdef APIC_INTR_REORDER
98 #define EOI_IRQ(irq_num) \
99 movl apic_isrbit_location + 8 * (irq_num), %eax ; \
100 movl (%eax), %eax ; \
101 testl apic_isrbit_location + 4 + 8 * (irq_num), %eax ; \
102 jz 9f ; /* not active */ \
103 movl $0, lapic_eoi ; \
108 #define EOI_IRQ(irq_num) \
109 testl $IRQ_LBIT(irq_num), lapic_isr1; \
110 jz 9f ; /* not active */ \
111 movl $0, lapic_eoi; \
117 * Test to see if the source is currntly masked, clear if so.
119 #define UNMASK_IRQ(irq_num) \
120 IMASK_LOCK ; /* into critical reg */ \
121 testl $IRQ_LBIT(irq_num), apic_imen ; \
122 je 7f ; /* bit clear, not masked */ \
123 andl $~IRQ_LBIT(irq_num), apic_imen ;/* clear mask bit */ \
124 movl IOAPICADDR(irq_num),%ecx ; /* ioapic addr */ \
125 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
126 movl %eax,(%ecx) ; /* write the index */ \
127 movl IOAPIC_WINDOW(%ecx),%eax ; /* current value */ \
128 andl $~IOART_INTMASK,%eax ; /* clear the mask */ \
129 movl %eax,IOAPIC_WINDOW(%ecx) ; /* new value */ \
134 * Fast interrupt call handlers run in the following sequence:
136 * - Push the trap frame required by doreti
137 * - Mask the interrupt and reenable its source
138 * - If we cannot take the interrupt set its fpending bit and
140 * - If we can take the interrupt clear its fpending bit,
141 * call the handler, then unmask and doreti.
143 * YYY can cache gd base opitner instead of using hidden %fs prefixes.
146 #define FAST_INTR(irq_num, vec_name) \
151 FAKE_MCOUNT(13*4(%esp)) ; \
152 MASK_LEVEL_IRQ(irq_num) ; \
154 incl PCPU(intr_nesting_level) ; \
155 movl PCPU(curthread),%ebx ; \
156 movl TD_CPL(%ebx),%eax ; \
158 cmpl $TDPRI_CRIT,TD_PRI(%ebx) ; \
160 testl $IRQ_LBIT(irq_num), %eax ; \
163 /* set the pending bit and return, leave interrupt masked */ \
164 orl $IRQ_LBIT(irq_num),PCPU(fpending) ; \
165 movl $TDPRI_CRIT, PCPU(reqpri) ; \
168 /* clear pending bit, run handler */ \
169 addl $TDPRI_CRIT,TD_PRI(%ebx) ; \
170 andl $~IRQ_LBIT(irq_num),PCPU(fpending) ; \
171 pushl intr_unit + (irq_num) * 4 ; \
172 call *intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
174 subl $TDPRI_CRIT,TD_PRI(%ebx) ; \
175 incl PCPU(cnt)+V_INTR ; /* book-keeping make per cpu YYY */ \
176 movl intr_countp + (irq_num) * 4, %eax ; \
178 UNMASK_IRQ(irq_num) ; \
184 * Restart fast interrupt held up by critical section or cpl.
186 * - Push a dummy trape frame as required by doreti
187 * - The interrupt source is already masked
188 * - Clear the fpending bit
190 * - Unmask the interrupt
191 * - Pop the dummy frame and do a normal return
193 * YYY can cache gd base pointer instead of using hidden %fs
197 #define FAST_UNPEND(irq_num, vec_name) \
204 pushl intr_unit + (irq_num) * 4 ; \
205 call *intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
207 incl PCPU(cnt)+V_INTR ; /* book-keeping make per cpu YYY */ \
208 movl intr_countp + (irq_num) * 4, %eax ; \
210 UNMASK_IRQ(irq_num) ; \
216 * Slow interrupt call handlers run in the following sequence:
218 * - Push the trap frame required by doreti.
219 * - Mask the interrupt and reenable its source.
220 * - If we cannot take the interrupt set its ipending bit and
221 * doreti. In addition to checking for a critical section
222 * and cpl mask we also check to see if the thread is still
224 * - If we can take the interrupt clear its ipending bit,
225 * set its irunning bit, and schedule the thread. Leave
226 * interrupts masked and doreti.
228 * the interrupt thread will run its handlers and loop if
229 * ipending is found to be set. ipending/irunning interlock
230 * the interrupt thread with the interrupt. The handler calls
231 * UNPEND when it is through.
233 * Note that we do not enable interrupts when calling sched_ithd.
234 * YYY sched_ithd may preempt us synchronously (fix interrupt stacking)
236 * YYY can cache gd base pointer instead of using hidden %fs
240 #define INTR(irq_num, vec_name, maybe_extra_ipending) \
245 maybe_extra_ipending ; \
247 MASK_LEVEL_IRQ(irq_num) ; \
249 incl PCPU(intr_nesting_level) ; \
250 movl PCPU(curthread),%ebx ; \
251 movl TD_CPL(%ebx),%eax ; \
252 pushl %eax ; /* cpl do restore */ \
253 cmpl $TDPRI_CRIT,TD_PRI(%ebx) ; \
255 testl $IRQ_LBIT(irq_num),PCPU(irunning) ; \
257 testl $IRQ_LBIT(irq_num),%eax ; \
260 /* set the pending bit and return, leave the interrupt masked */ \
261 orl $IRQ_LBIT(irq_num), PCPU(ipending) ; \
262 movl $TDPRI_CRIT, PCPU(reqpri) ; \
265 addl $TDPRI_CRIT,TD_PRI(%ebx) ; \
266 /* set running bit, clear pending bit, run handler */ \
267 orl $IRQ_LBIT(irq_num), PCPU(irunning) ; \
268 andl $~IRQ_LBIT(irq_num), PCPU(ipending) ; \
273 subl $TDPRI_CRIT,TD_PRI(%ebx) ; \
274 incl PCPU(cnt)+V_INTR ; /* book-keeping YYY make per-cpu */ \
275 movl intr_countp + (irq_num) * 4,%eax ; \
282 * Unmask a slow interrupt. This function is used by interrupt threads
283 * after they have descheduled themselves to reenable interrupts and
284 * possibly cause a reschedule to occur. The interrupt's irunning bit
285 * is cleared prior to unmasking.
288 #define INTR_UNMASK(irq_num, vec_name, icu) \
292 pushl %ebp ; /* frame for ddb backtrace */ \
294 andl $~IRQ_LBIT(irq_num), PCPU(irunning) ; \
295 UNMASK_IRQ(irq_num) ; \
300 /* XXX forward_irq to cpu holding the BGL? */
303 3: ; /* other cpu has isr lock */ \
305 orl $IRQ_LBIT(irq_num), PCPU(ipending) ; \
306 movl $TDPRI_CRIT,_reqpri ; \
307 testl $IRQ_LBIT(irq_num), TD_CPL(%ebx) ; \
308 jne 4f ; /* this INT masked */ \
309 call forward_irq ; /* forward irq to lock holder */ \
310 POP_FRAME ; /* and return */ \
314 POP_FRAME ; /* and return */ \
318 * Handle "spurious INTerrupts".
320 * This is different than the "spurious INTerrupt" generated by an
321 * 8259 PIC for missing INTs. See the APIC documentation for details.
322 * This routine should NOT do an 'EOI' cycle.
332 /* No EOI cycle used here */
338 * Handle TLB shootdowns.
346 #ifdef COUNT_XINVLTLB_HITS
350 movl PCPU(cpuid), %eax
354 #endif /* COUNT_XINVLTLB_HITS */
356 movl %cr3, %eax /* invalidate the TLB */
359 ss /* stack segment, avoid %ds load */
360 movl $0, lapic_eoi /* End Of Interrupt to APIC */
370 * Executed by a CPU when it receives an Xcpucheckstate IPI from another CPU,
372 * - Stores current cpu state in checkstate_cpustate[cpuid]
373 * 0 == user, 1 == sys, 2 == intr
374 * - Stores current process in checkstate_curproc[cpuid]
376 * - Signals its receipt by setting bit cpuid in checkstate_probed_cpus.
378 * stack: 0->ds, 4->fs, 8->ebx, 12->eax, 16->eip, 20->cs, 24->eflags
383 .globl Xcpucheckstate
384 .globl checkstate_cpustate
385 .globl checkstate_curproc
390 pushl %ds /* save current data segment */
394 mov %ax, %ds /* use KERNEL data segment */
398 movl $0, lapic_eoi /* End Of Interrupt to APIC */
405 testl $PSL_VM, 24(%esp)
407 incl %ebx /* system or interrupt */
409 movl PCPU(cpuid), %eax
410 movl %ebx, checkstate_cpustate(,%eax,4)
411 movl PCPU(curthread), %ebx
412 movl TD_PROC(%ebx),%ebx
413 movl %ebx, checkstate_curproc(,%eax,4)
415 movl %ebx, checkstate_pc(,%eax,4)
417 lock /* checkstate_probed_cpus |= (1<<id) */
418 btsl %eax, checkstate_probed_cpus
421 popl %ds /* restore previous data segment */
426 #endif /* BETTER_CLOCK */
430 * Executed by a CPU when it receives an Xcpuast IPI from another CPU,
432 * - Signals its receipt by clearing bit cpuid in checkstate_need_ast.
433 * - MP safe in regards to setting AST_PENDING because doreti is in
434 * a cli mode when it checks.
443 movl PCPU(cpuid), %eax
444 lock /* checkstate_need_ast &= ~(1<<id) */
445 btrl %eax, checkstate_need_ast
446 movl $0, lapic_eoi /* End Of Interrupt to APIC */
449 btsl %eax, checkstate_pending_ast
452 FAKE_MCOUNT(13*4(%esp))
454 movl PCPU(curthread), %eax
455 pushl TD_CPL(%eax) /* cpl restored by doreti */
457 orl $AST_PENDING, PCPU(astpending) /* XXX */
458 incb PCPU(intr_nesting_level)
461 movl PCPU(cpuid), %eax
463 btrl %eax, checkstate_pending_ast
465 btrl %eax, CNAME(resched_cpus)
467 orl $AST_PENDING+AST_RESCHED,PCPU(astpending)
472 /* We are already in the process of delivering an ast for this CPU */
478 * Executed by a CPU when it receives an XFORWARD_IRQ IPI.
487 movl $0, lapic_eoi /* End Of Interrupt to APIC */
489 FAKE_MCOUNT(13*4(%esp))
492 testl %eax,%eax /* Did we get the lock ? */
495 incl PCPU(cnt)+V_FORWARDED_HITS
497 movl PCPU(curthread), %eax
498 pushl TD_CPL(%eax) /* cpl restored by doreti */
500 incb PCPU(intr_nesting_level)
504 jmp doreti /* Handle forwarded interrupt */
506 incl PCPU(cnt)+V_FORWARDED_MISSES
507 call forward_irq /* Oops, we've lost the isr lock */
525 cmpl $0, CNAME(forward_irq_enabled)
529 cmpl $MP_FREE_LOCK,%eax
531 movl $0, %eax /* Pick CPU #0 if noone has lock */
534 movl cpu_num_to_apic_id(,%eax,4),%ecx
536 movl lapic_icr_hi, %eax
537 andl $~APIC_ID_MASK, %eax
539 movl %eax, lapic_icr_hi
542 movl lapic_icr_lo, %eax
543 andl $APIC_DELSTAT_MASK,%eax
545 movl lapic_icr_lo, %eax
546 andl $APIC_RESV2_MASK, %eax
547 orl $(APIC_DEST_DESTFLD|APIC_DELMODE_FIXED|XFORWARD_IRQ_OFFSET), %eax
548 movl %eax, lapic_icr_lo
550 movl lapic_icr_lo, %eax
551 andl $APIC_DELSTAT_MASK,%eax
557 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
559 * - Signals its receipt.
560 * - Waits for permission to restart.
561 * - Signals its restart.
573 pushl %ds /* save current data segment */
577 mov %ax, %ds /* use KERNEL data segment */
581 movl $0, lapic_eoi /* End Of Interrupt to APIC */
583 movl PCPU(cpuid), %eax
584 imull $PCB_SIZE, %eax
585 leal CNAME(stoppcbs)(%eax), %eax
587 call CNAME(savectx) /* Save process context */
591 movl PCPU(cpuid), %eax
594 btsl %eax, stopped_cpus /* stopped_cpus |= (1<<id) */
596 btl %eax, started_cpus /* while (!(started_cpus & (1<<id))) */
600 btrl %eax, started_cpus /* started_cpus &= ~(1<<id) */
602 btrl %eax, stopped_cpus /* stopped_cpus &= ~(1<<id) */
607 movl CNAME(cpustop_restartfunc), %eax
610 movl $0, CNAME(cpustop_restartfunc) /* One-shot */
615 popl %ds /* restore previous data segment */
625 FAST_INTR(0,fastintr0)
626 FAST_INTR(1,fastintr1)
627 FAST_INTR(2,fastintr2)
628 FAST_INTR(3,fastintr3)
629 FAST_INTR(4,fastintr4)
630 FAST_INTR(5,fastintr5)
631 FAST_INTR(6,fastintr6)
632 FAST_INTR(7,fastintr7)
633 FAST_INTR(8,fastintr8)
634 FAST_INTR(9,fastintr9)
635 FAST_INTR(10,fastintr10)
636 FAST_INTR(11,fastintr11)
637 FAST_INTR(12,fastintr12)
638 FAST_INTR(13,fastintr13)
639 FAST_INTR(14,fastintr14)
640 FAST_INTR(15,fastintr15)
641 FAST_INTR(16,fastintr16)
642 FAST_INTR(17,fastintr17)
643 FAST_INTR(18,fastintr18)
644 FAST_INTR(19,fastintr19)
645 FAST_INTR(20,fastintr20)
646 FAST_INTR(21,fastintr21)
647 FAST_INTR(22,fastintr22)
648 FAST_INTR(23,fastintr23)
650 /* YYY what is this garbage? */
651 #define CLKINTR_PENDING \
653 movl $1,CNAME(clkintr_pending) ; \
654 call clock_unlock ; \
656 INTR(0,intr0, CLKINTR_PENDING)
681 FAST_UNPEND(0,fastunpend0)
682 FAST_UNPEND(1,fastunpend1)
683 FAST_UNPEND(2,fastunpend2)
684 FAST_UNPEND(3,fastunpend3)
685 FAST_UNPEND(4,fastunpend4)
686 FAST_UNPEND(5,fastunpend5)
687 FAST_UNPEND(6,fastunpend6)
688 FAST_UNPEND(7,fastunpend7)
689 FAST_UNPEND(8,fastunpend8)
690 FAST_UNPEND(9,fastunpend9)
691 FAST_UNPEND(10,fastunpend10)
692 FAST_UNPEND(11,fastunpend11)
693 FAST_UNPEND(12,fastunpend12)
694 FAST_UNPEND(13,fastunpend13)
695 FAST_UNPEND(14,fastunpend14)
696 FAST_UNPEND(15,fastunpend15)
697 FAST_UNPEND(16,fastunpend16)
698 FAST_UNPEND(17,fastunpend17)
699 FAST_UNPEND(18,fastunpend18)
700 FAST_UNPEND(19,fastunpend19)
701 FAST_UNPEND(20,fastunpend20)
702 FAST_UNPEND(21,fastunpend21)
703 FAST_UNPEND(22,fastunpend22)
704 FAST_UNPEND(23,fastunpend23)
708 * Executed by a CPU when it receives a RENDEZVOUS IPI from another CPU.
710 * - Calls the generic rendezvous action function.
718 mov %ax, %ds /* use KERNEL data segment */
723 call smp_rendezvous_action
725 movl $0, lapic_eoi /* End Of Interrupt to APIC */
734 * Addresses of interrupt handlers.
735 * XresumeNN: Resumption addresses for HWIs.
741 * ipl.s: doreti_unpend
743 .long Xresume0, Xresume1, Xresume2, Xresume3
744 .long Xresume4, Xresume5, Xresume6, Xresume7
745 .long Xresume8, Xresume9, Xresume10, Xresume11
746 .long Xresume12, Xresume13, Xresume14, Xresume15
747 .long Xresume16, Xresume17, Xresume18, Xresume19
748 .long Xresume20, Xresume21, Xresume22, Xresume23
751 * ipl.s: doreti_unpend
752 * apic_ipl.s: splz_unpend
754 .long _swi_null, swi_net, _swi_null, _swi_null
755 .long _swi_vm, _swi_null, _softclock
757 imasks: /* masks for interrupt handlers */
758 .space NHWI*4 /* padding; HWI masks are elsewhere */
760 .long SWI_TTY_MASK, SWI_NET_MASK, SWI_CAMNET_MASK, SWI_CAMBIO_MASK
761 .long SWI_VM_MASK, SWI_TQ_MASK, SWI_CLOCK_MASK
765 #ifdef COUNT_XINVLTLB_HITS
769 #endif /* COUNT_XINVLTLB_HITS */
771 /* variables used by stop_cpus()/restart_cpus()/Xcpustop */
772 .globl stopped_cpus, started_cpus
779 .globl checkstate_probed_cpus
780 checkstate_probed_cpus:
782 #endif /* BETTER_CLOCK */
783 .globl checkstate_need_ast
786 checkstate_pending_ast:
788 .globl CNAME(resched_cpus)
789 .globl CNAME(cpustop_restartfunc)
792 CNAME(cpustop_restartfunc):
797 .globl apic_pin_trigger