2 * from: vector.s, 386BSD 0.1 unknown origin
3 * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $
4 * $DragonFly: src/sys/i386/apic/Attic/apic_vector.s,v 1.22 2005/09/10 06:48:08 dillon Exp $
8 #include <machine/apicreg.h>
9 #include <machine/smp.h>
10 #include "i386/isa/intr_machdep.h"
12 /* convert an absolute IRQ# into a bitmask */
13 #define IRQ_LBIT(irq_num) (1 << (irq_num))
15 /* make an index into the IO APIC from the IRQ# */
16 #define REDTBL_IDX(irq_num) (0x10 + ((irq_num) * 2))
19 * Push an interrupt frame in a format acceptable to doreti, reload
20 * the segment registers for the kernel.
23 pushl $0 ; /* dummy error code */ \
24 pushl $0 ; /* dummy trap type */ \
26 pushl %ds ; /* save data and extra segments ... */ \
36 pushfl ; /* phys int frame / flags */ \
37 pushl %cs ; /* phys int frame / cs */ \
38 pushl 12(%esp) ; /* original caller eip */ \
39 pushl $0 ; /* dummy error code */ \
40 pushl $0 ; /* dummy trap type */ \
41 subl $12*4,%esp ; /* pushal + 3 seg regs (dummy) + CPL */ \
44 * Warning: POP_FRAME can only be used if there is no chance of a
45 * segment register being changed (e.g. by procfs), which is why syscalls
53 addl $2*4,%esp ; /* dummy trap & error codes */ \
58 #define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
59 #define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
62 * Interrupts are expected to already be disabled when using these
66 SPIN_LOCK(imen_spinlock) ; \
68 #define IMASK_UNLOCK \
69 SPIN_UNLOCK(imen_spinlock) ; \
71 #define MASK_IRQ(irq_num) \
72 IMASK_LOCK ; /* into critical reg */ \
73 testl $IRQ_LBIT(irq_num), apic_imen ; \
74 jne 7f ; /* masked, don't mask */ \
75 orl $IRQ_LBIT(irq_num), apic_imen ; /* set the mask bit */ \
76 movl IOAPICADDR(irq_num), %ecx ; /* ioapic addr */ \
77 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
78 movl %eax, (%ecx) ; /* write the index */ \
79 movl IOAPIC_WINDOW(%ecx), %eax ; /* current value */ \
80 orl $IOART_INTMASK, %eax ; /* set the mask */ \
81 movl %eax, IOAPIC_WINDOW(%ecx) ; /* new value */ \
82 7: ; /* already masked */ \
86 * Test to see whether we are handling an edge or level triggered INT.
87 * Level-triggered INTs must still be masked as we don't clear the source,
88 * and the EOI cycle would cause redundant INTs to occur.
90 #define MASK_LEVEL_IRQ(irq_num) \
91 testl $IRQ_LBIT(irq_num), apic_pin_trigger ; \
92 jz 9f ; /* edge, don't mask */ \
97 #ifdef APIC_INTR_REORDER
98 #define EOI_IRQ(irq_num) \
99 movl apic_isrbit_location + 8 * (irq_num), %eax ; \
100 movl (%eax), %eax ; \
101 testl apic_isrbit_location + 4 + 8 * (irq_num), %eax ; \
102 jz 9f ; /* not active */ \
103 movl $0, lapic_eoi ; \
108 #define EOI_IRQ(irq_num) \
109 testl $IRQ_LBIT(irq_num), lapic_isr1; \
110 jz 9f ; /* not active */ \
111 movl $0, lapic_eoi; \
117 * Test to see if the source is currntly masked, clear if so.
119 #define UNMASK_IRQ(irq_num) \
120 IMASK_LOCK ; /* into critical reg */ \
121 testl $IRQ_LBIT(irq_num), apic_imen ; \
122 je 7f ; /* bit clear, not masked */ \
123 andl $~IRQ_LBIT(irq_num), apic_imen ;/* clear mask bit */ \
124 movl IOAPICADDR(irq_num),%ecx ; /* ioapic addr */ \
125 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
126 movl %eax,(%ecx) ; /* write the index */ \
127 movl IOAPIC_WINDOW(%ecx),%eax ; /* current value */ \
128 andl $~IOART_INTMASK,%eax ; /* clear the mask */ \
129 movl %eax,IOAPIC_WINDOW(%ecx) ; /* new value */ \
134 * Fast interrupt call handlers run in the following sequence:
136 * - Push the trap frame required by doreti
137 * - Mask the interrupt and reenable its source
138 * - If we cannot take the interrupt set its fpending bit and
139 * doreti. Note that we cannot mess with mp_lock at all
140 * if we entered from a critical section!
141 * - If we can take the interrupt clear its fpending bit,
142 * call the handler, then unmask and doreti.
144 * YYY can cache gd base opitner instead of using hidden %fs prefixes.
147 #define FAST_INTR(irq_num, vec_name) \
152 FAKE_MCOUNT(13*4(%esp)) ; \
153 MASK_LEVEL_IRQ(irq_num) ; \
155 movl PCPU(curthread),%ebx ; \
156 movl $0,%eax ; /* CURRENT CPL IN FRAME (REMOVED) */ \
158 cmpl $TDPRI_CRIT,TD_PRI(%ebx) ; \
161 /* in critical section, make interrupt pending */ \
162 /* set the pending bit and return, leave interrupt masked */ \
163 orl $IRQ_LBIT(irq_num),PCPU(fpending) ; \
164 orl $RQF_INTPEND,PCPU(reqflags) ; \
167 /* try to get the MP lock */ \
171 /* clear pending bit, run handler */ \
172 incl PCPU(intr_nesting_level) ; \
173 addl $TDPRI_CRIT,TD_PRI(%ebx) ; \
174 andl $~IRQ_LBIT(irq_num),PCPU(fpending) ; \
175 pushl intr_unit + (irq_num) * 4 ; \
176 call *intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
178 subl $TDPRI_CRIT,TD_PRI(%ebx) ; \
179 incl PCPU(cnt)+V_INTR ; /* book-keeping make per cpu YYY */ \
180 movl intr_countp + (irq_num) * 4, %eax ; \
182 decl PCPU(intr_nesting_level) ; \
184 UNMASK_IRQ(irq_num) ; \
189 /* could not get the MP lock, forward the interrupt */ \
190 movl mp_lock, %eax ; /* check race */ \
191 cmpl $MP_FREE_LOCK,%eax ; \
193 incl PCPU(cnt)+V_FORWARDED_INTS ; \
195 movl $irq_num,8(%esp) ; \
196 movl $forward_fastint_remote,4(%esp) ; \
198 call lwkt_send_ipiq_bycpu ; \
203 * Restart fast interrupt held up by critical section or cpl.
205 * - Push a dummy trape frame as required by doreti
206 * - The interrupt source is already masked
207 * - Clear the fpending bit
209 * - Unmask the interrupt
210 * - Pop the dummy frame and do a normal return
212 * The BGL is held on call and left held on return.
214 * YYY can cache gd base pointer instead of using hidden %fs
218 #define FAST_UNPEND(irq_num, vec_name) \
225 pushl intr_unit + (irq_num) * 4 ; \
226 call *intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
228 incl PCPU(cnt)+V_INTR ; /* book-keeping make per cpu YYY */ \
229 movl intr_countp + (irq_num) * 4, %eax ; \
231 UNMASK_IRQ(irq_num) ; \
237 * Slow interrupt call handlers run in the following sequence:
239 * - Push the trap frame required by doreti.
240 * - Mask the interrupt and reenable its source.
241 * - If we cannot take the interrupt set its ipending bit and
242 * doreti. In addition to checking for a critical section
243 * and cpl mask we also check to see if the thread is still
244 * running. Note that we cannot mess with mp_lock at all
245 * if we entered from a critical section!
246 * - If we can take the interrupt clear its ipending bit
247 * and schedule the thread. Leave interrupts masked and doreti.
249 * Note that calls to sched_ithd() are made with interrupts enabled
250 * and outside a critical section. YYY sched_ithd may preempt us
251 * synchronously (fix interrupt stacking).
253 * YYY can cache gd base pointer instead of using hidden %fs
257 #define INTR(irq_num, vec_name, maybe_extra_ipending) \
262 maybe_extra_ipending ; \
264 MASK_LEVEL_IRQ(irq_num) ; \
266 movl PCPU(curthread),%ebx ; \
267 movl $0,%eax ; /* CURRENT CPL IN FRAME (REMOVED) */ \
268 pushl %eax ; /* cpl do restore */ \
269 cmpl $TDPRI_CRIT,TD_PRI(%ebx) ; \
272 /* set the pending bit and return, leave the interrupt masked */ \
273 orl $IRQ_LBIT(irq_num), PCPU(ipending) ; \
274 orl $RQF_INTPEND,PCPU(reqflags) ; \
277 /* set running bit, clear pending bit, run handler */ \
278 andl $~IRQ_LBIT(irq_num), PCPU(ipending) ; \
283 incl PCPU(cnt)+V_INTR ; /* book-keeping YYY make per-cpu */ \
284 movl intr_countp + (irq_num) * 4,%eax ; \
291 * Wrong interrupt call handlers. We program these into APIC vectors
292 * that should otherwise never occur. For example, we program the SLOW
293 * vector for irq N with this when we program the FAST vector with the
296 * XXX for now all we can do is EOI it. We can't call do_wrongintr
297 * (yet) because we could be in a critical section.
299 #define WRONGINTR(irq_num,vec_name) \
304 movl $0, lapic_eoi ; /* End Of Interrupt to APIC */ \
305 /*pushl $irq_num ;*/ \
306 /*call do_wrongintr ;*/ \
312 * Handle "spurious INTerrupts".
314 * This is different than the "spurious INTerrupt" generated by an
315 * 8259 PIC for missing INTs. See the APIC documentation for details.
316 * This routine should NOT do an 'EOI' cycle.
323 /* No EOI cycle used here */
329 * Handle TLB shootdowns.
337 #ifdef COUNT_XINVLTLB_HITS
341 movl PCPU(cpuid), %eax
345 #endif /* COUNT_XINVLTLB_HITS */
347 movl %cr3, %eax /* invalidate the TLB */
350 ss /* stack segment, avoid %ds load */
351 movl $0, lapic_eoi /* End Of Interrupt to APIC */
358 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
360 * - Signals its receipt.
361 * - Waits for permission to restart.
362 * - Processing pending IPIQ events while waiting.
363 * - Signals its restart.
375 pushl %ds /* save current data segment */
379 mov %ax, %ds /* use KERNEL data segment */
383 movl $0, lapic_eoi /* End Of Interrupt to APIC */
385 movl PCPU(cpuid), %eax
386 imull $PCB_SIZE, %eax
387 leal CNAME(stoppcbs)(%eax), %eax
389 call CNAME(savectx) /* Save process context */
393 movl PCPU(cpuid), %eax
396 * Indicate that we have stopped and loop waiting for permission
397 * to start again. We must still process IPI events while in a
401 btsl %eax, stopped_cpus /* stopped_cpus |= (1<<id) */
403 andl $~RQF_IPIQ,PCPU(reqflags)
405 call lwkt_smp_stopped
407 btl %eax, started_cpus /* while (!(started_cpus & (1<<id))) */
411 btrl %eax, started_cpus /* started_cpus &= ~(1<<id) */
413 btrl %eax, stopped_cpus /* stopped_cpus &= ~(1<<id) */
418 movl CNAME(cpustop_restartfunc), %eax
421 movl $0, CNAME(cpustop_restartfunc) /* One-shot */
426 popl %ds /* restore previous data segment */
435 * For now just have one ipiq IPI, but what we really want is
436 * to have one for each source cpu to the APICs don't get stalled
437 * backlogging the requests.
444 movl $0, lapic_eoi /* End Of Interrupt to APIC */
445 FAKE_MCOUNT(13*4(%esp))
447 movl PCPU(curthread),%ebx
448 cmpl $TDPRI_CRIT,TD_PRI(%ebx)
450 subl $8,%esp /* make same as interrupt frame */
451 incl PCPU(intr_nesting_level)
452 addl $TDPRI_CRIT,TD_PRI(%ebx)
453 call lwkt_process_ipiq_frame
454 subl $TDPRI_CRIT,TD_PRI(%ebx)
455 decl PCPU(intr_nesting_level)
457 pushl $0 /* CPL for frame (REMOVED) */
461 orl $RQF_IPIQ,PCPU(reqflags)
467 FAST_INTR(0,fastintr0)
468 FAST_INTR(1,fastintr1)
469 FAST_INTR(2,fastintr2)
470 FAST_INTR(3,fastintr3)
471 FAST_INTR(4,fastintr4)
472 FAST_INTR(5,fastintr5)
473 FAST_INTR(6,fastintr6)
474 FAST_INTR(7,fastintr7)
475 FAST_INTR(8,fastintr8)
476 FAST_INTR(9,fastintr9)
477 FAST_INTR(10,fastintr10)
478 FAST_INTR(11,fastintr11)
479 FAST_INTR(12,fastintr12)
480 FAST_INTR(13,fastintr13)
481 FAST_INTR(14,fastintr14)
482 FAST_INTR(15,fastintr15)
483 FAST_INTR(16,fastintr16)
484 FAST_INTR(17,fastintr17)
485 FAST_INTR(18,fastintr18)
486 FAST_INTR(19,fastintr19)
487 FAST_INTR(20,fastintr20)
488 FAST_INTR(21,fastintr21)
489 FAST_INTR(22,fastintr22)
490 FAST_INTR(23,fastintr23)
492 /* YYY what is this garbage? */
519 FAST_UNPEND(0,fastunpend0)
520 FAST_UNPEND(1,fastunpend1)
521 FAST_UNPEND(2,fastunpend2)
522 FAST_UNPEND(3,fastunpend3)
523 FAST_UNPEND(4,fastunpend4)
524 FAST_UNPEND(5,fastunpend5)
525 FAST_UNPEND(6,fastunpend6)
526 FAST_UNPEND(7,fastunpend7)
527 FAST_UNPEND(8,fastunpend8)
528 FAST_UNPEND(9,fastunpend9)
529 FAST_UNPEND(10,fastunpend10)
530 FAST_UNPEND(11,fastunpend11)
531 FAST_UNPEND(12,fastunpend12)
532 FAST_UNPEND(13,fastunpend13)
533 FAST_UNPEND(14,fastunpend14)
534 FAST_UNPEND(15,fastunpend15)
535 FAST_UNPEND(16,fastunpend16)
536 FAST_UNPEND(17,fastunpend17)
537 FAST_UNPEND(18,fastunpend18)
538 FAST_UNPEND(19,fastunpend19)
539 FAST_UNPEND(20,fastunpend20)
540 FAST_UNPEND(21,fastunpend21)
541 FAST_UNPEND(22,fastunpend22)
542 FAST_UNPEND(23,fastunpend23)
544 WRONGINTR(0,wrongintr0)
545 WRONGINTR(1,wrongintr1)
546 WRONGINTR(2,wrongintr2)
547 WRONGINTR(3,wrongintr3)
548 WRONGINTR(4,wrongintr4)
549 WRONGINTR(5,wrongintr5)
550 WRONGINTR(6,wrongintr6)
551 WRONGINTR(7,wrongintr7)
552 WRONGINTR(8,wrongintr8)
553 WRONGINTR(9,wrongintr9)
554 WRONGINTR(10,wrongintr10)
555 WRONGINTR(11,wrongintr11)
556 WRONGINTR(12,wrongintr12)
557 WRONGINTR(13,wrongintr13)
558 WRONGINTR(14,wrongintr14)
559 WRONGINTR(15,wrongintr15)
560 WRONGINTR(16,wrongintr16)
561 WRONGINTR(17,wrongintr17)
562 WRONGINTR(18,wrongintr18)
563 WRONGINTR(19,wrongintr19)
564 WRONGINTR(20,wrongintr20)
565 WRONGINTR(21,wrongintr21)
566 WRONGINTR(22,wrongintr22)
567 WRONGINTR(23,wrongintr23)
572 #ifdef COUNT_XINVLTLB_HITS
576 #endif /* COUNT_XINVLTLB_HITS */
578 /* variables used by stop_cpus()/restart_cpus()/Xcpustop */
579 .globl stopped_cpus, started_cpus
585 .globl CNAME(cpustop_restartfunc)
586 CNAME(cpustop_restartfunc):
589 .globl apic_pin_trigger