2 * from: vector.s, 386BSD 0.1 unknown origin
3 * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $
4 * $DragonFly: src/sys/platform/pc32/apic/apic_vector.s,v 1.18 2004/02/21 06:37:08 dillon Exp $
8 #include <machine/apicreg.h>
9 #include <machine/smp.h>
10 #include "i386/isa/intr_machdep.h"
12 /* convert an absolute IRQ# into a bitmask */
13 #define IRQ_LBIT(irq_num) (1 << (irq_num))
15 /* make an index into the IO APIC from the IRQ# */
16 #define REDTBL_IDX(irq_num) (0x10 + ((irq_num) * 2))
19 * Push an interrupt frame in a format acceptable to doreti, reload
20 * the segment registers for the kernel.
23 pushl $0 ; /* dummy error code */ \
24 pushl $0 ; /* dummy trap type */ \
26 pushl %ds ; /* save data and extra segments ... */ \
36 pushfl ; /* phys int frame / flags */ \
37 pushl %cs ; /* phys int frame / cs */ \
38 pushl 12(%esp) ; /* original caller eip */ \
39 pushl $0 ; /* dummy error code */ \
40 pushl $0 ; /* dummy trap type */ \
41 subl $12*4,%esp ; /* pushal + 3 seg regs (dummy) + CPL */ \
44 * Warning: POP_FRAME can only be used if there is no chance of a
45 * segment register being changed (e.g. by procfs), which is why syscalls
53 addl $2*4,%esp ; /* dummy trap & error codes */ \
58 #define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
59 #define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
62 * Interrupts are expected to already be disabled when using these
66 SPIN_LOCK(imen_spinlock) ; \
68 #define IMASK_UNLOCK \
69 SPIN_UNLOCK(imen_spinlock) ; \
71 #define MASK_IRQ(irq_num) \
72 IMASK_LOCK ; /* into critical reg */ \
73 testl $IRQ_LBIT(irq_num), apic_imen ; \
74 jne 7f ; /* masked, don't mask */ \
75 orl $IRQ_LBIT(irq_num), apic_imen ; /* set the mask bit */ \
76 movl IOAPICADDR(irq_num), %ecx ; /* ioapic addr */ \
77 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
78 movl %eax, (%ecx) ; /* write the index */ \
79 movl IOAPIC_WINDOW(%ecx), %eax ; /* current value */ \
80 orl $IOART_INTMASK, %eax ; /* set the mask */ \
81 movl %eax, IOAPIC_WINDOW(%ecx) ; /* new value */ \
82 7: ; /* already masked */ \
86 * Test to see whether we are handling an edge or level triggered INT.
87 * Level-triggered INTs must still be masked as we don't clear the source,
88 * and the EOI cycle would cause redundant INTs to occur.
90 #define MASK_LEVEL_IRQ(irq_num) \
91 testl $IRQ_LBIT(irq_num), apic_pin_trigger ; \
92 jz 9f ; /* edge, don't mask */ \
97 #ifdef APIC_INTR_REORDER
98 #define EOI_IRQ(irq_num) \
99 movl apic_isrbit_location + 8 * (irq_num), %eax ; \
100 movl (%eax), %eax ; \
101 testl apic_isrbit_location + 4 + 8 * (irq_num), %eax ; \
102 jz 9f ; /* not active */ \
103 movl $0, lapic_eoi ; \
108 #define EOI_IRQ(irq_num) \
109 testl $IRQ_LBIT(irq_num), lapic_isr1; \
110 jz 9f ; /* not active */ \
111 movl $0, lapic_eoi; \
117 * Test to see if the source is currntly masked, clear if so.
119 #define UNMASK_IRQ(irq_num) \
120 IMASK_LOCK ; /* into critical reg */ \
121 testl $IRQ_LBIT(irq_num), apic_imen ; \
122 je 7f ; /* bit clear, not masked */ \
123 andl $~IRQ_LBIT(irq_num), apic_imen ;/* clear mask bit */ \
124 movl IOAPICADDR(irq_num),%ecx ; /* ioapic addr */ \
125 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
126 movl %eax,(%ecx) ; /* write the index */ \
127 movl IOAPIC_WINDOW(%ecx),%eax ; /* current value */ \
128 andl $~IOART_INTMASK,%eax ; /* clear the mask */ \
129 movl %eax,IOAPIC_WINDOW(%ecx) ; /* new value */ \
134 * Fast interrupt call handlers run in the following sequence:
136 * - Push the trap frame required by doreti
137 * - Mask the interrupt and reenable its source
138 * - If we cannot take the interrupt set its fpending bit and
139 * doreti. Note that we cannot mess with mp_lock at all
140 * if we entered from a critical section!
141 * - If we can take the interrupt clear its fpending bit,
142 * call the handler, then unmask and doreti.
144 * YYY can cache gd base opitner instead of using hidden %fs prefixes.
147 #define FAST_INTR(irq_num, vec_name) \
152 FAKE_MCOUNT(13*4(%esp)) ; \
153 MASK_LEVEL_IRQ(irq_num) ; \
155 movl PCPU(curthread),%ebx ; \
156 movl TD_CPL(%ebx),%eax ; \
158 cmpl $TDPRI_CRIT,TD_PRI(%ebx) ; \
160 testl $IRQ_LBIT(irq_num), %eax ; \
163 /* in critical section, make interrupt pending */ \
164 /* set the pending bit and return, leave interrupt masked */ \
165 orl $IRQ_LBIT(irq_num),PCPU(fpending) ; \
166 orl $RQF_INTPEND,PCPU(reqflags) ; \
169 /* try to get the MP lock */ \
173 /* clear pending bit, run handler */ \
174 incl PCPU(intr_nesting_level) ; \
175 addl $TDPRI_CRIT,TD_PRI(%ebx) ; \
176 andl $~IRQ_LBIT(irq_num),PCPU(fpending) ; \
177 pushl intr_unit + (irq_num) * 4 ; \
178 call *intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
180 subl $TDPRI_CRIT,TD_PRI(%ebx) ; \
181 incl PCPU(cnt)+V_INTR ; /* book-keeping make per cpu YYY */ \
182 movl intr_countp + (irq_num) * 4, %eax ; \
184 decl PCPU(intr_nesting_level) ; \
186 UNMASK_IRQ(irq_num) ; \
191 /* could not get the MP lock, forward the interrupt */ \
192 movl mp_lock, %eax ; /* check race */ \
193 cmpl $MP_FREE_LOCK,%eax ; \
195 incl PCPU(cnt)+V_FORWARDED_INTS ; \
197 movl $irq_num,8(%esp) ; \
198 movl $forward_fastint_remote,4(%esp) ; \
200 call lwkt_send_ipiq_bycpu ; \
205 * Restart fast interrupt held up by critical section or cpl.
207 * - Push a dummy trape frame as required by doreti
208 * - The interrupt source is already masked
209 * - Clear the fpending bit
211 * - Unmask the interrupt
212 * - Pop the dummy frame and do a normal return
214 * The BGL is held on call and left held on return.
216 * YYY can cache gd base pointer instead of using hidden %fs
220 #define FAST_UNPEND(irq_num, vec_name) \
227 pushl intr_unit + (irq_num) * 4 ; \
228 call *intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
230 incl PCPU(cnt)+V_INTR ; /* book-keeping make per cpu YYY */ \
231 movl intr_countp + (irq_num) * 4, %eax ; \
233 UNMASK_IRQ(irq_num) ; \
239 * Slow interrupt call handlers run in the following sequence:
241 * - Push the trap frame required by doreti.
242 * - Mask the interrupt and reenable its source.
243 * - If we cannot take the interrupt set its ipending bit and
244 * doreti. In addition to checking for a critical section
245 * and cpl mask we also check to see if the thread is still
246 * running. Note that we cannot mess with mp_lock at all
247 * if we entered from a critical section!
248 * - If we can take the interrupt clear its ipending bit
249 * and schedule the thread. Leave interrupts masked and doreti.
251 * Note that calls to sched_ithd() are made with interrupts enabled
252 * and outside a critical section. YYY sched_ithd may preempt us
253 * synchronously (fix interrupt stacking).
255 * YYY can cache gd base pointer instead of using hidden %fs
259 #define INTR(irq_num, vec_name, maybe_extra_ipending) \
264 maybe_extra_ipending ; \
266 MASK_LEVEL_IRQ(irq_num) ; \
268 movl PCPU(curthread),%ebx ; \
269 movl TD_CPL(%ebx),%eax ; \
270 pushl %eax ; /* cpl do restore */ \
271 cmpl $TDPRI_CRIT,TD_PRI(%ebx) ; \
273 testl $IRQ_LBIT(irq_num),%eax ; \
276 /* set the pending bit and return, leave the interrupt masked */ \
277 orl $IRQ_LBIT(irq_num), PCPU(ipending) ; \
278 orl $RQF_INTPEND,PCPU(reqflags) ; \
281 /* set running bit, clear pending bit, run handler */ \
282 andl $~IRQ_LBIT(irq_num), PCPU(ipending) ; \
287 incl PCPU(cnt)+V_INTR ; /* book-keeping YYY make per-cpu */ \
288 movl intr_countp + (irq_num) * 4,%eax ; \
296 * Handle "spurious INTerrupts".
298 * This is different than the "spurious INTerrupt" generated by an
299 * 8259 PIC for missing INTs. See the APIC documentation for details.
300 * This routine should NOT do an 'EOI' cycle.
307 /* No EOI cycle used here */
313 * Handle TLB shootdowns.
321 #ifdef COUNT_XINVLTLB_HITS
325 movl PCPU(cpuid), %eax
329 #endif /* COUNT_XINVLTLB_HITS */
331 movl %cr3, %eax /* invalidate the TLB */
334 ss /* stack segment, avoid %ds load */
335 movl $0, lapic_eoi /* End Of Interrupt to APIC */
342 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
344 * - Signals its receipt.
345 * - Waits for permission to restart.
346 * - Signals its restart.
358 pushl %ds /* save current data segment */
362 mov %ax, %ds /* use KERNEL data segment */
366 movl $0, lapic_eoi /* End Of Interrupt to APIC */
368 movl PCPU(cpuid), %eax
369 imull $PCB_SIZE, %eax
370 leal CNAME(stoppcbs)(%eax), %eax
372 call CNAME(savectx) /* Save process context */
376 movl PCPU(cpuid), %eax
379 btsl %eax, stopped_cpus /* stopped_cpus |= (1<<id) */
381 btl %eax, started_cpus /* while (!(started_cpus & (1<<id))) */
385 btrl %eax, started_cpus /* started_cpus &= ~(1<<id) */
387 btrl %eax, stopped_cpus /* stopped_cpus &= ~(1<<id) */
392 movl CNAME(cpustop_restartfunc), %eax
395 movl $0, CNAME(cpustop_restartfunc) /* One-shot */
400 popl %ds /* restore previous data segment */
409 * For now just have one ipiq IPI, but what we really want is
410 * to have one for each source cpu to the APICs don't get stalled
411 * backlogging the requests.
418 movl $0, lapic_eoi /* End Of Interrupt to APIC */
419 FAKE_MCOUNT(13*4(%esp))
421 movl PCPU(curthread),%ebx
422 cmpl $TDPRI_CRIT,TD_PRI(%ebx)
424 subl $8,%esp /* make same as interrupt frame */
425 incl PCPU(intr_nesting_level)
426 addl $TDPRI_CRIT,TD_PRI(%ebx)
427 call lwkt_process_ipiq_frame
428 subl $TDPRI_CRIT,TD_PRI(%ebx)
429 decl PCPU(intr_nesting_level)
435 orl $RQF_IPIQ,PCPU(reqflags)
441 FAST_INTR(0,fastintr0)
442 FAST_INTR(1,fastintr1)
443 FAST_INTR(2,fastintr2)
444 FAST_INTR(3,fastintr3)
445 FAST_INTR(4,fastintr4)
446 FAST_INTR(5,fastintr5)
447 FAST_INTR(6,fastintr6)
448 FAST_INTR(7,fastintr7)
449 FAST_INTR(8,fastintr8)
450 FAST_INTR(9,fastintr9)
451 FAST_INTR(10,fastintr10)
452 FAST_INTR(11,fastintr11)
453 FAST_INTR(12,fastintr12)
454 FAST_INTR(13,fastintr13)
455 FAST_INTR(14,fastintr14)
456 FAST_INTR(15,fastintr15)
457 FAST_INTR(16,fastintr16)
458 FAST_INTR(17,fastintr17)
459 FAST_INTR(18,fastintr18)
460 FAST_INTR(19,fastintr19)
461 FAST_INTR(20,fastintr20)
462 FAST_INTR(21,fastintr21)
463 FAST_INTR(22,fastintr22)
464 FAST_INTR(23,fastintr23)
466 /* YYY what is this garbage? */
493 FAST_UNPEND(0,fastunpend0)
494 FAST_UNPEND(1,fastunpend1)
495 FAST_UNPEND(2,fastunpend2)
496 FAST_UNPEND(3,fastunpend3)
497 FAST_UNPEND(4,fastunpend4)
498 FAST_UNPEND(5,fastunpend5)
499 FAST_UNPEND(6,fastunpend6)
500 FAST_UNPEND(7,fastunpend7)
501 FAST_UNPEND(8,fastunpend8)
502 FAST_UNPEND(9,fastunpend9)
503 FAST_UNPEND(10,fastunpend10)
504 FAST_UNPEND(11,fastunpend11)
505 FAST_UNPEND(12,fastunpend12)
506 FAST_UNPEND(13,fastunpend13)
507 FAST_UNPEND(14,fastunpend14)
508 FAST_UNPEND(15,fastunpend15)
509 FAST_UNPEND(16,fastunpend16)
510 FAST_UNPEND(17,fastunpend17)
511 FAST_UNPEND(18,fastunpend18)
512 FAST_UNPEND(19,fastunpend19)
513 FAST_UNPEND(20,fastunpend20)
514 FAST_UNPEND(21,fastunpend21)
515 FAST_UNPEND(22,fastunpend22)
516 FAST_UNPEND(23,fastunpend23)
520 * Executed by a CPU when it receives a RENDEZVOUS IPI from another CPU.
522 * - Calls the generic rendezvous action function.
530 mov %ax, %ds /* use KERNEL data segment */
535 call smp_rendezvous_action
537 movl $0, lapic_eoi /* End Of Interrupt to APIC */
546 * Addresses of interrupt handlers.
547 * XresumeNN: Resumption addresses for HWIs.
553 * ipl.s: doreti_unpend
555 .long Xresume0, Xresume1, Xresume2, Xresume3
556 .long Xresume4, Xresume5, Xresume6, Xresume7
557 .long Xresume8, Xresume9, Xresume10, Xresume11
558 .long Xresume12, Xresume13, Xresume14, Xresume15
559 .long Xresume16, Xresume17, Xresume18, Xresume19
560 .long Xresume20, Xresume21, Xresume22, Xresume23
563 * ipl.s: doreti_unpend
564 * apic_ipl.s: splz_unpend
566 .long _swi_null, swi_net, _swi_null, _swi_null
567 .long _swi_vm, _swi_null, _softclock
569 imasks: /* masks for interrupt handlers */
570 .space NHWI*4 /* padding; HWI masks are elsewhere */
572 .long SWI_TTY_MASK, SWI_NET_MASK, SWI_CAMNET_MASK, SWI_CAMBIO_MASK
573 .long SWI_VM_MASK, SWI_TQ_MASK, SWI_CLOCK_MASK
577 #ifdef COUNT_XINVLTLB_HITS
581 #endif /* COUNT_XINVLTLB_HITS */
583 /* variables used by stop_cpus()/restart_cpus()/Xcpustop */
584 .globl stopped_cpus, started_cpus
590 .globl CNAME(cpustop_restartfunc)
591 CNAME(cpustop_restartfunc):
594 .globl apic_pin_trigger