2 * from: vector.s, 386BSD 0.1 unknown origin
3 * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $
4 * $DragonFly: src/sys/i386/isa/Attic/apic_vector.s,v 1.13 2003/08/25 19:50:32 dillon Exp $
8 #include <machine/apic.h>
9 #include <machine/smp.h>
10 #include "i386/isa/intr_machdep.h"
12 /* convert an absolute IRQ# into a bitmask */
13 #define IRQ_LBIT(irq_num) (1 << (irq_num))
15 /* make an index into the IO APIC from the IRQ# */
16 #define REDTBL_IDX(irq_num) (0x10 + ((irq_num) * 2))
19 * Push an interrupt frame in a format acceptable to doreti, reload
20 * the segment registers for the kernel.
23 pushl $0 ; /* dummy error code */ \
24 pushl $0 ; /* dummy trap type */ \
26 pushl %ds ; /* save data and extra segments ... */ \
36 pushfl ; /* phys int frame / flags */ \
37 pushl %cs ; /* phys int frame / cs */ \
38 pushl 12(%esp) ; /* original caller eip */ \
39 pushl $0 ; /* dummy error code */ \
40 pushl $0 ; /* dummy trap type */ \
41 subl $12*4,%esp ; /* pushal + 3 seg regs (dummy) + CPL */ \
44 * Warning: POP_FRAME can only be used if there is no chance of a
45 * segment register being changed (e.g. by procfs), which is why syscalls
53 addl $2*4,%esp ; /* dummy trap & error codes */ \
58 #define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
59 #define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
62 * Interrupts are expected to already be disabled when using these
66 SPIN_LOCK(imen_spinlock) ; \
68 #define IMASK_UNLOCK \
69 SPIN_UNLOCK(imen_spinlock) ; \
71 #define MASK_IRQ(irq_num) \
72 IMASK_LOCK ; /* into critical reg */ \
73 testl $IRQ_LBIT(irq_num), apic_imen ; \
74 jne 7f ; /* masked, don't mask */ \
75 orl $IRQ_LBIT(irq_num), apic_imen ; /* set the mask bit */ \
76 movl IOAPICADDR(irq_num), %ecx ; /* ioapic addr */ \
77 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
78 movl %eax, (%ecx) ; /* write the index */ \
79 movl IOAPIC_WINDOW(%ecx), %eax ; /* current value */ \
80 orl $IOART_INTMASK, %eax ; /* set the mask */ \
81 movl %eax, IOAPIC_WINDOW(%ecx) ; /* new value */ \
82 7: ; /* already masked */ \
86 * Test to see whether we are handling an edge or level triggered INT.
87 * Level-triggered INTs must still be masked as we don't clear the source,
88 * and the EOI cycle would cause redundant INTs to occur.
90 #define MASK_LEVEL_IRQ(irq_num) \
91 testl $IRQ_LBIT(irq_num), apic_pin_trigger ; \
92 jz 9f ; /* edge, don't mask */ \
97 #ifdef APIC_INTR_REORDER
98 #define EOI_IRQ(irq_num) \
99 movl apic_isrbit_location + 8 * (irq_num), %eax ; \
100 movl (%eax), %eax ; \
101 testl apic_isrbit_location + 4 + 8 * (irq_num), %eax ; \
102 jz 9f ; /* not active */ \
103 movl $0, lapic_eoi ; \
108 #define EOI_IRQ(irq_num) \
109 testl $IRQ_LBIT(irq_num), lapic_isr1; \
110 jz 9f ; /* not active */ \
111 movl $0, lapic_eoi; \
117 * Test to see if the source is currntly masked, clear if so.
119 #define UNMASK_IRQ(irq_num) \
120 IMASK_LOCK ; /* into critical reg */ \
121 testl $IRQ_LBIT(irq_num), apic_imen ; \
122 je 7f ; /* bit clear, not masked */ \
123 andl $~IRQ_LBIT(irq_num), apic_imen ;/* clear mask bit */ \
124 movl IOAPICADDR(irq_num),%ecx ; /* ioapic addr */ \
125 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
126 movl %eax,(%ecx) ; /* write the index */ \
127 movl IOAPIC_WINDOW(%ecx),%eax ; /* current value */ \
128 andl $~IOART_INTMASK,%eax ; /* clear the mask */ \
129 movl %eax,IOAPIC_WINDOW(%ecx) ; /* new value */ \
134 * Fast interrupt call handlers run in the following sequence:
136 * - Push the trap frame required by doreti
137 * - Mask the interrupt and reenable its source
138 * - If we cannot take the interrupt set its fpending bit and
140 * - If we can take the interrupt clear its fpending bit,
141 * call the handler, then unmask and doreti.
143 * YYY can cache gd base opitner instead of using hidden %fs prefixes.
146 #define FAST_INTR(irq_num, vec_name) \
151 FAKE_MCOUNT(13*4(%esp)) ; \
152 MASK_LEVEL_IRQ(irq_num) ; \
154 movl PCPU(curthread),%ebx ; \
155 movl TD_CPL(%ebx),%eax ; \
157 cmpl $TDPRI_CRIT,TD_PRI(%ebx) ; \
159 testl $IRQ_LBIT(irq_num), %eax ; \
162 /* in critical section, make interrupt pending */ \
163 /* set the pending bit and return, leave interrupt masked */ \
164 orl $IRQ_LBIT(irq_num),PCPU(fpending) ; \
165 orl $RQF_INTPEND,PCPU(reqflags) ; \
168 /* try to get the MP lock */ \
172 /* clear pending bit, run handler */ \
173 incl PCPU(intr_nesting_level) ; \
174 addl $TDPRI_CRIT,TD_PRI(%ebx) ; \
175 andl $~IRQ_LBIT(irq_num),PCPU(fpending) ; \
176 pushl intr_unit + (irq_num) * 4 ; \
177 call *intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
179 subl $TDPRI_CRIT,TD_PRI(%ebx) ; \
180 incl PCPU(cnt)+V_INTR ; /* book-keeping make per cpu YYY */ \
181 movl intr_countp + (irq_num) * 4, %eax ; \
183 decl PCPU(intr_nesting_level) ; \
185 UNMASK_IRQ(irq_num) ; \
190 /* could not get MP lock, forward the interrupt */ \
191 movl mp_lock, %eax ; /* check race */ \
192 cmpl $MP_FREE_LOCK,%eax ; \
194 incl PCPU(cnt)+V_FORWARDED_INTS ; \
196 movl $irq_num,8(%esp) ; \
197 movl $forward_fastint_remote,4(%esp) ; \
199 call lwkt_send_ipiq ; \
204 * Restart fast interrupt held up by critical section or cpl.
206 * - Push a dummy trape frame as required by doreti
207 * - The interrupt source is already masked
208 * - Clear the fpending bit
210 * - Unmask the interrupt
211 * - Pop the dummy frame and do a normal return
213 * The BGL is held on call and left held on return.
215 * YYY can cache gd base pointer instead of using hidden %fs
219 #define FAST_UNPEND(irq_num, vec_name) \
226 pushl intr_unit + (irq_num) * 4 ; \
227 call *intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
229 incl PCPU(cnt)+V_INTR ; /* book-keeping make per cpu YYY */ \
230 movl intr_countp + (irq_num) * 4, %eax ; \
232 UNMASK_IRQ(irq_num) ; \
238 * Slow interrupt call handlers run in the following sequence:
240 * - Push the trap frame required by doreti.
241 * - Mask the interrupt and reenable its source.
242 * - If we cannot take the interrupt set its ipending bit and
243 * doreti. In addition to checking for a critical section
244 * and cpl mask we also check to see if the thread is still
246 * - If we can take the interrupt clear its ipending bit
247 * and schedule the thread. Leave interrupts masked and doreti.
249 * Note that calls to sched_ithd() are made with interrupts enabled
250 * and outside a critical section. YYY sched_ithd may preempt us
251 * synchronously (fix interrupt stacking).
253 * YYY can cache gd base pointer instead of using hidden %fs
257 #define INTR(irq_num, vec_name, maybe_extra_ipending) \
262 maybe_extra_ipending ; \
264 MASK_LEVEL_IRQ(irq_num) ; \
266 movl PCPU(curthread),%ebx ; \
267 movl TD_CPL(%ebx),%eax ; \
268 pushl %eax ; /* cpl do restore */ \
269 cmpl $TDPRI_CRIT,TD_PRI(%ebx) ; \
271 testl $IRQ_LBIT(irq_num),%eax ; \
274 /* set the pending bit and return, leave the interrupt masked */ \
275 orl $IRQ_LBIT(irq_num), PCPU(ipending) ; \
276 orl $RQF_INTPEND,PCPU(reqflags) ; \
279 /* set running bit, clear pending bit, run handler */ \
280 andl $~IRQ_LBIT(irq_num), PCPU(ipending) ; \
285 incl PCPU(cnt)+V_INTR ; /* book-keeping YYY make per-cpu */ \
286 movl intr_countp + (irq_num) * 4,%eax ; \
294 * Handle "spurious INTerrupts".
296 * This is different than the "spurious INTerrupt" generated by an
297 * 8259 PIC for missing INTs. See the APIC documentation for details.
298 * This routine should NOT do an 'EOI' cycle.
305 /* No EOI cycle used here */
311 * Handle TLB shootdowns.
319 #ifdef COUNT_XINVLTLB_HITS
323 movl PCPU(cpuid), %eax
327 #endif /* COUNT_XINVLTLB_HITS */
329 movl %cr3, %eax /* invalidate the TLB */
332 ss /* stack segment, avoid %ds load */
333 movl $0, lapic_eoi /* End Of Interrupt to APIC */
343 * Executed by a CPU when it receives an Xcpucheckstate IPI from another CPU,
345 * - Stores current cpu state in checkstate_cpustate[cpuid]
346 * 0 == user, 1 == sys, 2 == intr
347 * - Stores current process in checkstate_curproc[cpuid]
349 * - Signals its receipt by setting bit cpuid in checkstate_probed_cpus.
351 * stack: 0->ds, 4->fs, 8->ebx, 12->eax, 16->eip, 20->cs, 24->eflags
356 .globl Xcpucheckstate
357 .globl checkstate_cpustate
358 .globl checkstate_curproc
363 pushl %ds /* save current data segment */
367 mov %ax, %ds /* use KERNEL data segment */
371 movl $0, lapic_eoi /* End Of Interrupt to APIC */
378 testl $PSL_VM, 24(%esp)
380 incl %ebx /* system or interrupt */
382 movl PCPU(cpuid), %eax
383 movl %ebx, checkstate_cpustate(,%eax,4)
384 movl PCPU(curthread), %ebx
385 movl TD_PROC(%ebx),%ebx
386 movl %ebx, checkstate_curproc(,%eax,4)
388 movl %ebx, checkstate_pc(,%eax,4)
390 lock /* checkstate_probed_cpus |= (1<<id) */
391 btsl %eax, checkstate_probed_cpus
394 popl %ds /* restore previous data segment */
399 #endif /* BETTER_CLOCK */
403 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
405 * - Signals its receipt.
406 * - Waits for permission to restart.
407 * - Signals its restart.
419 pushl %ds /* save current data segment */
423 mov %ax, %ds /* use KERNEL data segment */
427 movl $0, lapic_eoi /* End Of Interrupt to APIC */
429 movl PCPU(cpuid), %eax
430 imull $PCB_SIZE, %eax
431 leal CNAME(stoppcbs)(%eax), %eax
433 call CNAME(savectx) /* Save process context */
437 movl PCPU(cpuid), %eax
440 btsl %eax, stopped_cpus /* stopped_cpus |= (1<<id) */
442 btl %eax, started_cpus /* while (!(started_cpus & (1<<id))) */
446 btrl %eax, started_cpus /* started_cpus &= ~(1<<id) */
448 btrl %eax, stopped_cpus /* stopped_cpus &= ~(1<<id) */
453 movl CNAME(cpustop_restartfunc), %eax
456 movl $0, CNAME(cpustop_restartfunc) /* One-shot */
461 popl %ds /* restore previous data segment */
470 * For now just have one ipiq IPI, but what we really want is
471 * to have one for each source cpu to the APICs don't get stalled
472 * backlogging the requests.
479 movl $0, lapic_eoi /* End Of Interrupt to APIC */
480 FAKE_MCOUNT(13*4(%esp))
482 movl PCPU(curthread),%ebx
483 cmpl $TDPRI_CRIT,TD_PRI(%ebx)
485 incl PCPU(intr_nesting_level)
486 addl $TDPRI_CRIT,TD_PRI(%ebx)
487 call lwkt_process_ipiq
488 subl $TDPRI_CRIT,TD_PRI(%ebx)
489 decl PCPU(intr_nesting_level)
494 orl $RQF_IPIQ,PCPU(reqflags)
500 FAST_INTR(0,fastintr0)
501 FAST_INTR(1,fastintr1)
502 FAST_INTR(2,fastintr2)
503 FAST_INTR(3,fastintr3)
504 FAST_INTR(4,fastintr4)
505 FAST_INTR(5,fastintr5)
506 FAST_INTR(6,fastintr6)
507 FAST_INTR(7,fastintr7)
508 FAST_INTR(8,fastintr8)
509 FAST_INTR(9,fastintr9)
510 FAST_INTR(10,fastintr10)
511 FAST_INTR(11,fastintr11)
512 FAST_INTR(12,fastintr12)
513 FAST_INTR(13,fastintr13)
514 FAST_INTR(14,fastintr14)
515 FAST_INTR(15,fastintr15)
516 FAST_INTR(16,fastintr16)
517 FAST_INTR(17,fastintr17)
518 FAST_INTR(18,fastintr18)
519 FAST_INTR(19,fastintr19)
520 FAST_INTR(20,fastintr20)
521 FAST_INTR(21,fastintr21)
522 FAST_INTR(22,fastintr22)
523 FAST_INTR(23,fastintr23)
525 /* YYY what is this garbage? */
526 #define CLKINTR_PENDING \
528 movl $1,CNAME(clkintr_pending) ; \
529 call clock_unlock ; \
531 INTR(0,intr0, CLKINTR_PENDING)
556 FAST_UNPEND(0,fastunpend0)
557 FAST_UNPEND(1,fastunpend1)
558 FAST_UNPEND(2,fastunpend2)
559 FAST_UNPEND(3,fastunpend3)
560 FAST_UNPEND(4,fastunpend4)
561 FAST_UNPEND(5,fastunpend5)
562 FAST_UNPEND(6,fastunpend6)
563 FAST_UNPEND(7,fastunpend7)
564 FAST_UNPEND(8,fastunpend8)
565 FAST_UNPEND(9,fastunpend9)
566 FAST_UNPEND(10,fastunpend10)
567 FAST_UNPEND(11,fastunpend11)
568 FAST_UNPEND(12,fastunpend12)
569 FAST_UNPEND(13,fastunpend13)
570 FAST_UNPEND(14,fastunpend14)
571 FAST_UNPEND(15,fastunpend15)
572 FAST_UNPEND(16,fastunpend16)
573 FAST_UNPEND(17,fastunpend17)
574 FAST_UNPEND(18,fastunpend18)
575 FAST_UNPEND(19,fastunpend19)
576 FAST_UNPEND(20,fastunpend20)
577 FAST_UNPEND(21,fastunpend21)
578 FAST_UNPEND(22,fastunpend22)
579 FAST_UNPEND(23,fastunpend23)
583 * Executed by a CPU when it receives a RENDEZVOUS IPI from another CPU.
585 * - Calls the generic rendezvous action function.
593 mov %ax, %ds /* use KERNEL data segment */
598 call smp_rendezvous_action
600 movl $0, lapic_eoi /* End Of Interrupt to APIC */
609 * Addresses of interrupt handlers.
610 * XresumeNN: Resumption addresses for HWIs.
616 * ipl.s: doreti_unpend
618 .long Xresume0, Xresume1, Xresume2, Xresume3
619 .long Xresume4, Xresume5, Xresume6, Xresume7
620 .long Xresume8, Xresume9, Xresume10, Xresume11
621 .long Xresume12, Xresume13, Xresume14, Xresume15
622 .long Xresume16, Xresume17, Xresume18, Xresume19
623 .long Xresume20, Xresume21, Xresume22, Xresume23
626 * ipl.s: doreti_unpend
627 * apic_ipl.s: splz_unpend
629 .long _swi_null, swi_net, _swi_null, _swi_null
630 .long _swi_vm, _swi_null, _softclock
632 imasks: /* masks for interrupt handlers */
633 .space NHWI*4 /* padding; HWI masks are elsewhere */
635 .long SWI_TTY_MASK, SWI_NET_MASK, SWI_CAMNET_MASK, SWI_CAMBIO_MASK
636 .long SWI_VM_MASK, SWI_TQ_MASK, SWI_CLOCK_MASK
640 #ifdef COUNT_XINVLTLB_HITS
644 #endif /* COUNT_XINVLTLB_HITS */
646 /* variables used by stop_cpus()/restart_cpus()/Xcpustop */
647 .globl stopped_cpus, started_cpus
654 .globl checkstate_probed_cpus
655 checkstate_probed_cpus:
657 #endif /* BETTER_CLOCK */
658 .globl CNAME(cpustop_restartfunc)
659 CNAME(cpustop_restartfunc):
662 .globl apic_pin_trigger