2 * from: vector.s, 386BSD 0.1 unknown origin
3 * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $
4 * $DragonFly: src/sys/i386/apic/Attic/apic_vector.s,v 1.14 2003/09/25 23:49:08 dillon Exp $
8 #include <machine/apic.h>
9 #include <machine/smp.h>
10 #include "i386/isa/intr_machdep.h"
12 /* convert an absolute IRQ# into a bitmask */
13 #define IRQ_LBIT(irq_num) (1 << (irq_num))
15 /* make an index into the IO APIC from the IRQ# */
16 #define REDTBL_IDX(irq_num) (0x10 + ((irq_num) * 2))
19 * Push an interrupt frame in a format acceptable to doreti, reload
20 * the segment registers for the kernel.
23 pushl $0 ; /* dummy error code */ \
24 pushl $0 ; /* dummy trap type */ \
26 pushl %ds ; /* save data and extra segments ... */ \
36 pushfl ; /* phys int frame / flags */ \
37 pushl %cs ; /* phys int frame / cs */ \
38 pushl 12(%esp) ; /* original caller eip */ \
39 pushl $0 ; /* dummy error code */ \
40 pushl $0 ; /* dummy trap type */ \
41 subl $12*4,%esp ; /* pushal + 3 seg regs (dummy) + CPL */ \
44 * Warning: POP_FRAME can only be used if there is no chance of a
45 * segment register being changed (e.g. by procfs), which is why syscalls
53 addl $2*4,%esp ; /* dummy trap & error codes */ \
58 #define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
59 #define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
62 * Interrupts are expected to already be disabled when using these
66 SPIN_LOCK(imen_spinlock) ; \
68 #define IMASK_UNLOCK \
69 SPIN_UNLOCK(imen_spinlock) ; \
71 #define MASK_IRQ(irq_num) \
72 IMASK_LOCK ; /* into critical reg */ \
73 testl $IRQ_LBIT(irq_num), apic_imen ; \
74 jne 7f ; /* masked, don't mask */ \
75 orl $IRQ_LBIT(irq_num), apic_imen ; /* set the mask bit */ \
76 movl IOAPICADDR(irq_num), %ecx ; /* ioapic addr */ \
77 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
78 movl %eax, (%ecx) ; /* write the index */ \
79 movl IOAPIC_WINDOW(%ecx), %eax ; /* current value */ \
80 orl $IOART_INTMASK, %eax ; /* set the mask */ \
81 movl %eax, IOAPIC_WINDOW(%ecx) ; /* new value */ \
82 7: ; /* already masked */ \
86 * Test to see whether we are handling an edge or level triggered INT.
87 * Level-triggered INTs must still be masked as we don't clear the source,
88 * and the EOI cycle would cause redundant INTs to occur.
90 #define MASK_LEVEL_IRQ(irq_num) \
91 testl $IRQ_LBIT(irq_num), apic_pin_trigger ; \
92 jz 9f ; /* edge, don't mask */ \
97 #ifdef APIC_INTR_REORDER
98 #define EOI_IRQ(irq_num) \
99 movl apic_isrbit_location + 8 * (irq_num), %eax ; \
100 movl (%eax), %eax ; \
101 testl apic_isrbit_location + 4 + 8 * (irq_num), %eax ; \
102 jz 9f ; /* not active */ \
103 movl $0, lapic_eoi ; \
108 #define EOI_IRQ(irq_num) \
109 testl $IRQ_LBIT(irq_num), lapic_isr1; \
110 jz 9f ; /* not active */ \
111 movl $0, lapic_eoi; \
117 * Test to see if the source is currntly masked, clear if so.
119 #define UNMASK_IRQ(irq_num) \
120 IMASK_LOCK ; /* into critical reg */ \
121 testl $IRQ_LBIT(irq_num), apic_imen ; \
122 je 7f ; /* bit clear, not masked */ \
123 andl $~IRQ_LBIT(irq_num), apic_imen ;/* clear mask bit */ \
124 movl IOAPICADDR(irq_num),%ecx ; /* ioapic addr */ \
125 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
126 movl %eax,(%ecx) ; /* write the index */ \
127 movl IOAPIC_WINDOW(%ecx),%eax ; /* current value */ \
128 andl $~IOART_INTMASK,%eax ; /* clear the mask */ \
129 movl %eax,IOAPIC_WINDOW(%ecx) ; /* new value */ \
134 * Fast interrupt call handlers run in the following sequence:
136 * - Push the trap frame required by doreti
137 * - Mask the interrupt and reenable its source
138 * - If we cannot take the interrupt set its fpending bit and
139 * doreti. Note that we cannot mess with mp_lock at all
140 * if we entered from a critical section!
141 * - If we can take the interrupt clear its fpending bit,
142 * call the handler, then unmask and doreti.
144 * YYY can cache gd base opitner instead of using hidden %fs prefixes.
147 #define FAST_INTR(irq_num, vec_name) \
152 FAKE_MCOUNT(13*4(%esp)) ; \
153 MASK_LEVEL_IRQ(irq_num) ; \
155 movl PCPU(curthread),%ebx ; \
156 movl TD_CPL(%ebx),%eax ; \
158 cmpl $TDPRI_CRIT,TD_PRI(%ebx) ; \
160 testl $IRQ_LBIT(irq_num), %eax ; \
163 /* in critical section, make interrupt pending */ \
164 /* set the pending bit and return, leave interrupt masked */ \
165 orl $IRQ_LBIT(irq_num),PCPU(fpending) ; \
166 orl $RQF_INTPEND,PCPU(reqflags) ; \
169 /* try to get the MP lock */ \
173 /* clear pending bit, run handler */ \
174 incl PCPU(intr_nesting_level) ; \
175 addl $TDPRI_CRIT,TD_PRI(%ebx) ; \
176 andl $~IRQ_LBIT(irq_num),PCPU(fpending) ; \
177 pushl intr_unit + (irq_num) * 4 ; \
178 call *intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
180 subl $TDPRI_CRIT,TD_PRI(%ebx) ; \
181 incl PCPU(cnt)+V_INTR ; /* book-keeping make per cpu YYY */ \
182 movl intr_countp + (irq_num) * 4, %eax ; \
184 decl PCPU(intr_nesting_level) ; \
186 UNMASK_IRQ(irq_num) ; \
191 /* could not get MP lock, forward the interrupt */ \
192 movl mp_lock, %eax ; /* check race */ \
193 cmpl $MP_FREE_LOCK,%eax ; \
195 incl PCPU(cnt)+V_FORWARDED_INTS ; \
197 movl $irq_num,8(%esp) ; \
198 movl $forward_fastint_remote,4(%esp) ; \
200 call lwkt_send_ipiq ; \
205 * Restart fast interrupt held up by critical section or cpl.
207 * - Push a dummy trape frame as required by doreti
208 * - The interrupt source is already masked
209 * - Clear the fpending bit
211 * - Unmask the interrupt
212 * - Pop the dummy frame and do a normal return
214 * The BGL is held on call and left held on return.
216 * YYY can cache gd base pointer instead of using hidden %fs
220 #define FAST_UNPEND(irq_num, vec_name) \
227 pushl intr_unit + (irq_num) * 4 ; \
228 call *intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
230 incl PCPU(cnt)+V_INTR ; /* book-keeping make per cpu YYY */ \
231 movl intr_countp + (irq_num) * 4, %eax ; \
233 UNMASK_IRQ(irq_num) ; \
239 * Slow interrupt call handlers run in the following sequence:
241 * - Push the trap frame required by doreti.
242 * - Mask the interrupt and reenable its source.
243 * - If we cannot take the interrupt set its ipending bit and
244 * doreti. In addition to checking for a critical section
245 * and cpl mask we also check to see if the thread is still
246 * running. Note that we cannot mess with mp_lock at all
247 * if we entered from a critical section!
248 * - If we can take the interrupt clear its ipending bit
249 * and schedule the thread. Leave interrupts masked and doreti.
251 * Note that calls to sched_ithd() are made with interrupts enabled
252 * and outside a critical section. YYY sched_ithd may preempt us
253 * synchronously (fix interrupt stacking).
255 * YYY can cache gd base pointer instead of using hidden %fs
259 #define INTR(irq_num, vec_name, maybe_extra_ipending) \
264 maybe_extra_ipending ; \
266 MASK_LEVEL_IRQ(irq_num) ; \
268 movl PCPU(curthread),%ebx ; \
269 movl TD_CPL(%ebx),%eax ; \
270 pushl %eax ; /* cpl do restore */ \
271 cmpl $TDPRI_CRIT,TD_PRI(%ebx) ; \
273 testl $IRQ_LBIT(irq_num),%eax ; \
276 /* set the pending bit and return, leave the interrupt masked */ \
277 orl $IRQ_LBIT(irq_num), PCPU(ipending) ; \
278 orl $RQF_INTPEND,PCPU(reqflags) ; \
281 /* set running bit, clear pending bit, run handler */ \
282 andl $~IRQ_LBIT(irq_num), PCPU(ipending) ; \
287 incl PCPU(cnt)+V_INTR ; /* book-keeping YYY make per-cpu */ \
288 movl intr_countp + (irq_num) * 4,%eax ; \
296 * Handle "spurious INTerrupts".
298 * This is different than the "spurious INTerrupt" generated by an
299 * 8259 PIC for missing INTs. See the APIC documentation for details.
300 * This routine should NOT do an 'EOI' cycle.
307 /* No EOI cycle used here */
313 * Handle TLB shootdowns.
321 #ifdef COUNT_XINVLTLB_HITS
325 movl PCPU(cpuid), %eax
329 #endif /* COUNT_XINVLTLB_HITS */
331 movl %cr3, %eax /* invalidate the TLB */
334 ss /* stack segment, avoid %ds load */
335 movl $0, lapic_eoi /* End Of Interrupt to APIC */
345 * Executed by a CPU when it receives an Xcpucheckstate IPI from another CPU,
347 * - Stores current cpu state in checkstate_cpustate[cpuid]
348 * 0 == user, 1 == sys, 2 == intr
349 * - Stores current process in checkstate_curproc[cpuid]
351 * - Signals its receipt by setting bit cpuid in checkstate_probed_cpus.
353 * stack: 0->ds, 4->fs, 8->ebx, 12->eax, 16->eip, 20->cs, 24->eflags
358 .globl Xcpucheckstate
359 .globl checkstate_cpustate
360 .globl checkstate_curproc
365 pushl %ds /* save current data segment */
369 mov %ax, %ds /* use KERNEL data segment */
373 movl $0, lapic_eoi /* End Of Interrupt to APIC */
380 testl $PSL_VM, 24(%esp)
382 incl %ebx /* system or interrupt */
384 movl PCPU(cpuid), %eax
385 movl %ebx, checkstate_cpustate(,%eax,4)
386 movl PCPU(curthread), %ebx
387 movl TD_PROC(%ebx),%ebx
388 movl %ebx, checkstate_curproc(,%eax,4)
390 movl %ebx, checkstate_pc(,%eax,4)
392 lock /* checkstate_probed_cpus |= (1<<id) */
393 btsl %eax, checkstate_probed_cpus
396 popl %ds /* restore previous data segment */
401 #endif /* BETTER_CLOCK */
405 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
407 * - Signals its receipt.
408 * - Waits for permission to restart.
409 * - Signals its restart.
421 pushl %ds /* save current data segment */
425 mov %ax, %ds /* use KERNEL data segment */
429 movl $0, lapic_eoi /* End Of Interrupt to APIC */
431 movl PCPU(cpuid), %eax
432 imull $PCB_SIZE, %eax
433 leal CNAME(stoppcbs)(%eax), %eax
435 call CNAME(savectx) /* Save process context */
439 movl PCPU(cpuid), %eax
442 btsl %eax, stopped_cpus /* stopped_cpus |= (1<<id) */
444 btl %eax, started_cpus /* while (!(started_cpus & (1<<id))) */
448 btrl %eax, started_cpus /* started_cpus &= ~(1<<id) */
450 btrl %eax, stopped_cpus /* stopped_cpus &= ~(1<<id) */
455 movl CNAME(cpustop_restartfunc), %eax
458 movl $0, CNAME(cpustop_restartfunc) /* One-shot */
463 popl %ds /* restore previous data segment */
472 * For now just have one ipiq IPI, but what we really want is
473 * to have one for each source cpu to the APICs don't get stalled
474 * backlogging the requests.
481 movl $0, lapic_eoi /* End Of Interrupt to APIC */
482 FAKE_MCOUNT(13*4(%esp))
484 movl PCPU(curthread),%ebx
485 cmpl $TDPRI_CRIT,TD_PRI(%ebx)
487 incl PCPU(intr_nesting_level)
488 addl $TDPRI_CRIT,TD_PRI(%ebx)
489 call lwkt_process_ipiq
490 subl $TDPRI_CRIT,TD_PRI(%ebx)
491 decl PCPU(intr_nesting_level)
496 orl $RQF_IPIQ,PCPU(reqflags)
502 FAST_INTR(0,fastintr0)
503 FAST_INTR(1,fastintr1)
504 FAST_INTR(2,fastintr2)
505 FAST_INTR(3,fastintr3)
506 FAST_INTR(4,fastintr4)
507 FAST_INTR(5,fastintr5)
508 FAST_INTR(6,fastintr6)
509 FAST_INTR(7,fastintr7)
510 FAST_INTR(8,fastintr8)
511 FAST_INTR(9,fastintr9)
512 FAST_INTR(10,fastintr10)
513 FAST_INTR(11,fastintr11)
514 FAST_INTR(12,fastintr12)
515 FAST_INTR(13,fastintr13)
516 FAST_INTR(14,fastintr14)
517 FAST_INTR(15,fastintr15)
518 FAST_INTR(16,fastintr16)
519 FAST_INTR(17,fastintr17)
520 FAST_INTR(18,fastintr18)
521 FAST_INTR(19,fastintr19)
522 FAST_INTR(20,fastintr20)
523 FAST_INTR(21,fastintr21)
524 FAST_INTR(22,fastintr22)
525 FAST_INTR(23,fastintr23)
527 /* YYY what is this garbage? */
528 #define CLKINTR_PENDING \
530 movl $1,CNAME(clkintr_pending) ; \
531 call clock_unlock ; \
533 INTR(0,intr0, CLKINTR_PENDING)
558 FAST_UNPEND(0,fastunpend0)
559 FAST_UNPEND(1,fastunpend1)
560 FAST_UNPEND(2,fastunpend2)
561 FAST_UNPEND(3,fastunpend3)
562 FAST_UNPEND(4,fastunpend4)
563 FAST_UNPEND(5,fastunpend5)
564 FAST_UNPEND(6,fastunpend6)
565 FAST_UNPEND(7,fastunpend7)
566 FAST_UNPEND(8,fastunpend8)
567 FAST_UNPEND(9,fastunpend9)
568 FAST_UNPEND(10,fastunpend10)
569 FAST_UNPEND(11,fastunpend11)
570 FAST_UNPEND(12,fastunpend12)
571 FAST_UNPEND(13,fastunpend13)
572 FAST_UNPEND(14,fastunpend14)
573 FAST_UNPEND(15,fastunpend15)
574 FAST_UNPEND(16,fastunpend16)
575 FAST_UNPEND(17,fastunpend17)
576 FAST_UNPEND(18,fastunpend18)
577 FAST_UNPEND(19,fastunpend19)
578 FAST_UNPEND(20,fastunpend20)
579 FAST_UNPEND(21,fastunpend21)
580 FAST_UNPEND(22,fastunpend22)
581 FAST_UNPEND(23,fastunpend23)
585 * Executed by a CPU when it receives a RENDEZVOUS IPI from another CPU.
587 * - Calls the generic rendezvous action function.
595 mov %ax, %ds /* use KERNEL data segment */
600 call smp_rendezvous_action
602 movl $0, lapic_eoi /* End Of Interrupt to APIC */
611 * Addresses of interrupt handlers.
612 * XresumeNN: Resumption addresses for HWIs.
618 * ipl.s: doreti_unpend
620 .long Xresume0, Xresume1, Xresume2, Xresume3
621 .long Xresume4, Xresume5, Xresume6, Xresume7
622 .long Xresume8, Xresume9, Xresume10, Xresume11
623 .long Xresume12, Xresume13, Xresume14, Xresume15
624 .long Xresume16, Xresume17, Xresume18, Xresume19
625 .long Xresume20, Xresume21, Xresume22, Xresume23
628 * ipl.s: doreti_unpend
629 * apic_ipl.s: splz_unpend
631 .long _swi_null, swi_net, _swi_null, _swi_null
632 .long _swi_vm, _swi_null, _softclock
634 imasks: /* masks for interrupt handlers */
635 .space NHWI*4 /* padding; HWI masks are elsewhere */
637 .long SWI_TTY_MASK, SWI_NET_MASK, SWI_CAMNET_MASK, SWI_CAMBIO_MASK
638 .long SWI_VM_MASK, SWI_TQ_MASK, SWI_CLOCK_MASK
642 #ifdef COUNT_XINVLTLB_HITS
646 #endif /* COUNT_XINVLTLB_HITS */
648 /* variables used by stop_cpus()/restart_cpus()/Xcpustop */
649 .globl stopped_cpus, started_cpus
656 .globl checkstate_probed_cpus
657 checkstate_probed_cpus:
659 #endif /* BETTER_CLOCK */
660 .globl CNAME(cpustop_restartfunc)
661 CNAME(cpustop_restartfunc):
664 .globl apic_pin_trigger