2 * from: vector.s, 386BSD 0.1 unknown origin
3 * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $
4 * $DragonFly: src/sys/platform/pc32/apic/apic_vector.s,v 1.10 2003/07/11 01:23:23 dillon Exp $
8 #include <machine/apic.h>
9 #include <machine/smp.h>
10 #include "i386/isa/intr_machdep.h"
12 /* convert an absolute IRQ# into a bitmask */
13 #define IRQ_LBIT(irq_num) (1 << (irq_num))
15 /* make an index into the IO APIC from the IRQ# */
16 #define REDTBL_IDX(irq_num) (0x10 + ((irq_num) * 2))
19 * Push an interrupt frame in a format acceptable to doreti, reload
20 * the segment registers for the kernel.
23 pushl $0 ; /* dummy error code */ \
24 pushl $0 ; /* dummy trap type */ \
26 pushl %ds ; /* save data and extra segments ... */ \
36 pushfl ; /* phys int frame / flags */ \
37 pushl %cs ; /* phys int frame / cs */ \
38 pushl 12(%esp) ; /* original caller eip */ \
39 pushl $0 ; /* dummy error code */ \
40 pushl $0 ; /* dummy trap type */ \
41 subl $12*4,%esp ; /* pushal + 3 seg regs (dummy) + CPL */ \
44 * Warning: POP_FRAME can only be used if there is no chance of a
45 * segment register being changed (e.g. by procfs), which is why syscalls
53 addl $2*4,%esp ; /* dummy trap & error codes */ \
58 #define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
59 #define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
62 * Interrupts are expected to already be disabled when using these
66 SPIN_LOCK(imen_spinlock) ; \
68 #define IMASK_UNLOCK \
69 SPIN_UNLOCK(imen_spinlock) ; \
71 #define MASK_IRQ(irq_num) \
72 IMASK_LOCK ; /* into critical reg */ \
73 testl $IRQ_LBIT(irq_num), apic_imen ; \
74 jne 7f ; /* masked, don't mask */ \
75 orl $IRQ_LBIT(irq_num), apic_imen ; /* set the mask bit */ \
76 movl IOAPICADDR(irq_num), %ecx ; /* ioapic addr */ \
77 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
78 movl %eax, (%ecx) ; /* write the index */ \
79 movl IOAPIC_WINDOW(%ecx), %eax ; /* current value */ \
80 orl $IOART_INTMASK, %eax ; /* set the mask */ \
81 movl %eax, IOAPIC_WINDOW(%ecx) ; /* new value */ \
82 7: ; /* already masked */ \
86 * Test to see whether we are handling an edge or level triggered INT.
87 * Level-triggered INTs must still be masked as we don't clear the source,
88 * and the EOI cycle would cause redundant INTs to occur.
90 #define MASK_LEVEL_IRQ(irq_num) \
91 testl $IRQ_LBIT(irq_num), apic_pin_trigger ; \
92 jz 9f ; /* edge, don't mask */ \
97 #ifdef APIC_INTR_REORDER
98 #define EOI_IRQ(irq_num) \
99 movl apic_isrbit_location + 8 * (irq_num), %eax ; \
100 movl (%eax), %eax ; \
101 testl apic_isrbit_location + 4 + 8 * (irq_num), %eax ; \
102 jz 9f ; /* not active */ \
103 movl $0, lapic_eoi ; \
108 #define EOI_IRQ(irq_num) \
109 testl $IRQ_LBIT(irq_num), lapic_isr1; \
110 jz 9f ; /* not active */ \
111 movl $0, lapic_eoi; \
117 * Test to see if the source is currntly masked, clear if so.
119 #define UNMASK_IRQ(irq_num) \
120 IMASK_LOCK ; /* into critical reg */ \
121 testl $IRQ_LBIT(irq_num), apic_imen ; \
122 je 7f ; /* bit clear, not masked */ \
123 andl $~IRQ_LBIT(irq_num), apic_imen ;/* clear mask bit */ \
124 movl IOAPICADDR(irq_num),%ecx ; /* ioapic addr */ \
125 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
126 movl %eax,(%ecx) ; /* write the index */ \
127 movl IOAPIC_WINDOW(%ecx),%eax ; /* current value */ \
128 andl $~IOART_INTMASK,%eax ; /* clear the mask */ \
129 movl %eax,IOAPIC_WINDOW(%ecx) ; /* new value */ \
134 * Fast interrupt call handlers run in the following sequence:
136 * - Push the trap frame required by doreti
137 * - Mask the interrupt and reenable its source
138 * - If we cannot take the interrupt set its fpending bit and
140 * - If we can take the interrupt clear its fpending bit,
141 * call the handler, then unmask and doreti.
143 * YYY can cache gd base opitner instead of using hidden %fs prefixes.
146 #define FAST_INTR(irq_num, vec_name) \
151 FAKE_MCOUNT(13*4(%esp)) ; \
152 MASK_LEVEL_IRQ(irq_num) ; \
154 incl PCPU(intr_nesting_level) ; \
155 movl PCPU(curthread),%ebx ; \
156 movl TD_CPL(%ebx),%eax ; \
158 cmpl $TDPRI_CRIT,TD_PRI(%ebx) ; \
160 testl $IRQ_LBIT(irq_num), %eax ; \
163 /* set the pending bit and return, leave interrupt masked */ \
164 orl $IRQ_LBIT(irq_num),PCPU(fpending) ; \
165 movl $TDPRI_CRIT, PCPU(reqpri) ; \
168 /* try to get giant */ \
172 /* clear pending bit, run handler */ \
173 addl $TDPRI_CRIT,TD_PRI(%ebx) ; \
174 andl $~IRQ_LBIT(irq_num),PCPU(fpending) ; \
175 pushl intr_unit + (irq_num) * 4 ; \
176 call *intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
178 subl $TDPRI_CRIT,TD_PRI(%ebx) ; \
179 incl PCPU(cnt)+V_INTR ; /* book-keeping make per cpu YYY */ \
180 movl intr_countp + (irq_num) * 4, %eax ; \
183 UNMASK_IRQ(irq_num) ; \
189 * Restart fast interrupt held up by critical section or cpl.
191 * - Push a dummy trape frame as required by doreti
192 * - The interrupt source is already masked
193 * - Clear the fpending bit
195 * - Unmask the interrupt
196 * - Pop the dummy frame and do a normal return
198 * The BGL is held on call and left held on return.
200 * YYY can cache gd base pointer instead of using hidden %fs
204 #define FAST_UNPEND(irq_num, vec_name) \
211 pushl intr_unit + (irq_num) * 4 ; \
212 call *intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
214 incl PCPU(cnt)+V_INTR ; /* book-keeping make per cpu YYY */ \
215 movl intr_countp + (irq_num) * 4, %eax ; \
217 UNMASK_IRQ(irq_num) ; \
223 * Slow interrupt call handlers run in the following sequence:
225 * - Push the trap frame required by doreti.
226 * - Mask the interrupt and reenable its source.
227 * - If we cannot take the interrupt set its ipending bit and
228 * doreti. In addition to checking for a critical section
229 * and cpl mask we also check to see if the thread is still
231 * - If we can take the interrupt clear its ipending bit
232 * and schedule the thread. Leave interrupts masked and doreti.
234 * Note that calls to sched_ithd() are made with interrupts enabled
235 * and outside a critical section. YYY sched_ithd may preempt us
236 * synchronously (fix interrupt stacking)
238 * YYY can cache gd base pointer instead of using hidden %fs
242 #define INTR(irq_num, vec_name, maybe_extra_ipending) \
247 maybe_extra_ipending ; \
249 MASK_LEVEL_IRQ(irq_num) ; \
251 incl PCPU(intr_nesting_level) ; \
252 movl PCPU(curthread),%ebx ; \
253 movl TD_CPL(%ebx),%eax ; \
254 pushl %eax ; /* cpl do restore */ \
255 cmpl $TDPRI_CRIT,TD_PRI(%ebx) ; \
257 testl $IRQ_LBIT(irq_num),%eax ; \
260 /* set the pending bit and return, leave the interrupt masked */ \
261 orl $IRQ_LBIT(irq_num), PCPU(ipending) ; \
262 movl $TDPRI_CRIT, PCPU(reqpri) ; \
265 /* set running bit, clear pending bit, run handler */ \
266 andl $~IRQ_LBIT(irq_num), PCPU(ipending) ; \
271 incl PCPU(cnt)+V_INTR ; /* book-keeping YYY make per-cpu */ \
272 movl intr_countp + (irq_num) * 4,%eax ; \
279 * Unmask a slow interrupt. This function is used by interrupt threads
280 * after they have descheduled themselves to reenable interrupts and
281 * possibly cause a reschedule to occur.
284 #define INTR_UNMASK(irq_num, vec_name, icu) \
288 pushl %ebp ; /* frame for ddb backtrace */ \
290 UNMASK_IRQ(irq_num) ; \
295 /* XXX forward_irq to cpu holding the BGL? */
298 3: ; /* other cpu has isr lock */ \
300 orl $IRQ_LBIT(irq_num), PCPU(ipending) ; \
301 movl $TDPRI_CRIT,_reqpri ; \
302 testl $IRQ_LBIT(irq_num), TD_CPL(%ebx) ; \
303 jne 4f ; /* this INT masked */ \
304 call forward_irq ; /* forward irq to lock holder */ \
305 POP_FRAME ; /* and return */ \
309 POP_FRAME ; /* and return */ \
313 * Handle "spurious INTerrupts".
315 * This is different than the "spurious INTerrupt" generated by an
316 * 8259 PIC for missing INTs. See the APIC documentation for details.
317 * This routine should NOT do an 'EOI' cycle.
327 /* No EOI cycle used here */
333 * Handle TLB shootdowns.
341 #ifdef COUNT_XINVLTLB_HITS
345 movl PCPU(cpuid), %eax
349 #endif /* COUNT_XINVLTLB_HITS */
351 movl %cr3, %eax /* invalidate the TLB */
354 ss /* stack segment, avoid %ds load */
355 movl $0, lapic_eoi /* End Of Interrupt to APIC */
365 * Executed by a CPU when it receives an Xcpucheckstate IPI from another CPU,
367 * - Stores current cpu state in checkstate_cpustate[cpuid]
368 * 0 == user, 1 == sys, 2 == intr
369 * - Stores current process in checkstate_curproc[cpuid]
371 * - Signals its receipt by setting bit cpuid in checkstate_probed_cpus.
373 * stack: 0->ds, 4->fs, 8->ebx, 12->eax, 16->eip, 20->cs, 24->eflags
378 .globl Xcpucheckstate
379 .globl checkstate_cpustate
380 .globl checkstate_curproc
385 pushl %ds /* save current data segment */
389 mov %ax, %ds /* use KERNEL data segment */
393 movl $0, lapic_eoi /* End Of Interrupt to APIC */
400 testl $PSL_VM, 24(%esp)
402 incl %ebx /* system or interrupt */
404 movl PCPU(cpuid), %eax
405 movl %ebx, checkstate_cpustate(,%eax,4)
406 movl PCPU(curthread), %ebx
407 movl TD_PROC(%ebx),%ebx
408 movl %ebx, checkstate_curproc(,%eax,4)
410 movl %ebx, checkstate_pc(,%eax,4)
412 lock /* checkstate_probed_cpus |= (1<<id) */
413 btsl %eax, checkstate_probed_cpus
416 popl %ds /* restore previous data segment */
421 #endif /* BETTER_CLOCK */
425 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
427 * - Signals its receipt.
428 * - Waits for permission to restart.
429 * - Signals its restart.
441 pushl %ds /* save current data segment */
445 mov %ax, %ds /* use KERNEL data segment */
449 movl $0, lapic_eoi /* End Of Interrupt to APIC */
451 movl PCPU(cpuid), %eax
452 imull $PCB_SIZE, %eax
453 leal CNAME(stoppcbs)(%eax), %eax
455 call CNAME(savectx) /* Save process context */
459 movl PCPU(cpuid), %eax
462 btsl %eax, stopped_cpus /* stopped_cpus |= (1<<id) */
464 btl %eax, started_cpus /* while (!(started_cpus & (1<<id))) */
468 btrl %eax, started_cpus /* started_cpus &= ~(1<<id) */
470 btrl %eax, stopped_cpus /* stopped_cpus &= ~(1<<id) */
475 movl CNAME(cpustop_restartfunc), %eax
478 movl $0, CNAME(cpustop_restartfunc) /* One-shot */
483 popl %ds /* restore previous data segment */
492 * For now just have one ipiq IPI, but what we really want is
493 * to have one for each source cpu to the APICs don't get stalled
494 * backlogging the requests.
501 movl $0, lapic_eoi /* End Of Interrupt to APIC */
502 FAKE_MCOUNT(13*4(%esp))
504 movl PCPU(curthread),%ebx
505 cmpl $TDPRI_CRIT,TD_PRI(%ebx)
507 addl $TDPRI_CRIT,TD_PRI(%ebx)
508 call lwkt_process_ipiq
509 subl $TDPRI_CRIT,TD_PRI(%ebx)
511 incl PCPU(intr_nesting_level)
515 movl $TDPRI_CRIT,PCPU(reqpri)
516 orl $AST_IPIQ,PCPU(astpending)
522 FAST_INTR(0,fastintr0)
523 FAST_INTR(1,fastintr1)
524 FAST_INTR(2,fastintr2)
525 FAST_INTR(3,fastintr3)
526 FAST_INTR(4,fastintr4)
527 FAST_INTR(5,fastintr5)
528 FAST_INTR(6,fastintr6)
529 FAST_INTR(7,fastintr7)
530 FAST_INTR(8,fastintr8)
531 FAST_INTR(9,fastintr9)
532 FAST_INTR(10,fastintr10)
533 FAST_INTR(11,fastintr11)
534 FAST_INTR(12,fastintr12)
535 FAST_INTR(13,fastintr13)
536 FAST_INTR(14,fastintr14)
537 FAST_INTR(15,fastintr15)
538 FAST_INTR(16,fastintr16)
539 FAST_INTR(17,fastintr17)
540 FAST_INTR(18,fastintr18)
541 FAST_INTR(19,fastintr19)
542 FAST_INTR(20,fastintr20)
543 FAST_INTR(21,fastintr21)
544 FAST_INTR(22,fastintr22)
545 FAST_INTR(23,fastintr23)
547 /* YYY what is this garbage? */
548 #define CLKINTR_PENDING \
550 movl $1,CNAME(clkintr_pending) ; \
551 call clock_unlock ; \
553 INTR(0,intr0, CLKINTR_PENDING)
578 FAST_UNPEND(0,fastunpend0)
579 FAST_UNPEND(1,fastunpend1)
580 FAST_UNPEND(2,fastunpend2)
581 FAST_UNPEND(3,fastunpend3)
582 FAST_UNPEND(4,fastunpend4)
583 FAST_UNPEND(5,fastunpend5)
584 FAST_UNPEND(6,fastunpend6)
585 FAST_UNPEND(7,fastunpend7)
586 FAST_UNPEND(8,fastunpend8)
587 FAST_UNPEND(9,fastunpend9)
588 FAST_UNPEND(10,fastunpend10)
589 FAST_UNPEND(11,fastunpend11)
590 FAST_UNPEND(12,fastunpend12)
591 FAST_UNPEND(13,fastunpend13)
592 FAST_UNPEND(14,fastunpend14)
593 FAST_UNPEND(15,fastunpend15)
594 FAST_UNPEND(16,fastunpend16)
595 FAST_UNPEND(17,fastunpend17)
596 FAST_UNPEND(18,fastunpend18)
597 FAST_UNPEND(19,fastunpend19)
598 FAST_UNPEND(20,fastunpend20)
599 FAST_UNPEND(21,fastunpend21)
600 FAST_UNPEND(22,fastunpend22)
601 FAST_UNPEND(23,fastunpend23)
605 * Executed by a CPU when it receives a RENDEZVOUS IPI from another CPU.
607 * - Calls the generic rendezvous action function.
615 mov %ax, %ds /* use KERNEL data segment */
620 call smp_rendezvous_action
622 movl $0, lapic_eoi /* End Of Interrupt to APIC */
631 * Addresses of interrupt handlers.
632 * XresumeNN: Resumption addresses for HWIs.
638 * ipl.s: doreti_unpend
640 .long Xresume0, Xresume1, Xresume2, Xresume3
641 .long Xresume4, Xresume5, Xresume6, Xresume7
642 .long Xresume8, Xresume9, Xresume10, Xresume11
643 .long Xresume12, Xresume13, Xresume14, Xresume15
644 .long Xresume16, Xresume17, Xresume18, Xresume19
645 .long Xresume20, Xresume21, Xresume22, Xresume23
648 * ipl.s: doreti_unpend
649 * apic_ipl.s: splz_unpend
651 .long _swi_null, swi_net, _swi_null, _swi_null
652 .long _swi_vm, _swi_null, _softclock
654 imasks: /* masks for interrupt handlers */
655 .space NHWI*4 /* padding; HWI masks are elsewhere */
657 .long SWI_TTY_MASK, SWI_NET_MASK, SWI_CAMNET_MASK, SWI_CAMBIO_MASK
658 .long SWI_VM_MASK, SWI_TQ_MASK, SWI_CLOCK_MASK
662 #ifdef COUNT_XINVLTLB_HITS
666 #endif /* COUNT_XINVLTLB_HITS */
668 /* variables used by stop_cpus()/restart_cpus()/Xcpustop */
669 .globl stopped_cpus, started_cpus
676 .globl checkstate_probed_cpus
677 checkstate_probed_cpus:
679 #endif /* BETTER_CLOCK */
680 .globl CNAME(cpustop_restartfunc)
681 CNAME(cpustop_restartfunc):
684 .globl apic_pin_trigger