2 * from: vector.s, 386BSD 0.1 unknown origin
3 * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $
4 * $DragonFly: src/sys/i386/apic/Attic/apic_vector.s,v 1.23 2005/10/13 00:02:47 dillon Exp $
8 #include <machine/apicreg.h>
9 #include <machine/smp.h>
10 #include "i386/isa/intr_machdep.h"
12 /* convert an absolute IRQ# into a bitmask */
13 #define IRQ_LBIT(irq_num) (1 << (irq_num))
15 /* make an index into the IO APIC from the IRQ# */
16 #define REDTBL_IDX(irq_num) (0x10 + ((irq_num) * 2))
19 * Push an interrupt frame in a format acceptable to doreti, reload
20 * the segment registers for the kernel.
23 pushl $0 ; /* dummy error code */ \
24 pushl $0 ; /* dummy trap type */ \
26 pushl %ds ; /* save data and extra segments ... */ \
36 pushfl ; /* phys int frame / flags */ \
37 pushl %cs ; /* phys int frame / cs */ \
38 pushl 12(%esp) ; /* original caller eip */ \
39 pushl $0 ; /* dummy error code */ \
40 pushl $0 ; /* dummy trap type */ \
41 subl $12*4,%esp ; /* pushal + 3 seg regs (dummy) + CPL */ \
44 * Warning: POP_FRAME can only be used if there is no chance of a
45 * segment register being changed (e.g. by procfs), which is why syscalls
53 addl $2*4,%esp ; /* dummy trap & error codes */ \
58 #define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
59 #define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
62 * Interrupts are expected to already be disabled when using these
66 SPIN_LOCK(imen_spinlock) ; \
68 #define IMASK_UNLOCK \
69 SPIN_UNLOCK(imen_spinlock) ; \
71 #define MASK_IRQ(irq_num) \
72 IMASK_LOCK ; /* into critical reg */ \
73 testl $IRQ_LBIT(irq_num), apic_imen ; \
74 jne 7f ; /* masked, don't mask */ \
75 orl $IRQ_LBIT(irq_num), apic_imen ; /* set the mask bit */ \
76 movl IOAPICADDR(irq_num), %ecx ; /* ioapic addr */ \
77 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
78 movl %eax, (%ecx) ; /* write the index */ \
79 movl IOAPIC_WINDOW(%ecx), %eax ; /* current value */ \
80 orl $IOART_INTMASK, %eax ; /* set the mask */ \
81 movl %eax, IOAPIC_WINDOW(%ecx) ; /* new value */ \
82 7: ; /* already masked */ \
86 * Test to see whether we are handling an edge or level triggered INT.
87 * Level-triggered INTs must still be masked as we don't clear the source,
88 * and the EOI cycle would cause redundant INTs to occur.
90 #define MASK_LEVEL_IRQ(irq_num) \
91 testl $IRQ_LBIT(irq_num), apic_pin_trigger ; \
92 jz 9f ; /* edge, don't mask */ \
97 #ifdef APIC_INTR_REORDER
98 #define EOI_IRQ(irq_num) \
99 movl apic_isrbit_location + 8 * (irq_num), %eax ; \
100 movl (%eax), %eax ; \
101 testl apic_isrbit_location + 4 + 8 * (irq_num), %eax ; \
102 jz 9f ; /* not active */ \
103 movl $0, lapic_eoi ; \
108 #define EOI_IRQ(irq_num) \
109 testl $IRQ_LBIT(irq_num), lapic_isr1; \
110 jz 9f ; /* not active */ \
111 movl $0, lapic_eoi; \
117 * Test to see if the source is currntly masked, clear if so.
119 #define UNMASK_IRQ(irq_num) \
122 IMASK_LOCK ; /* into critical reg */ \
123 testl $IRQ_LBIT(irq_num), apic_imen ; \
124 je 7f ; /* bit clear, not masked */ \
125 andl $~IRQ_LBIT(irq_num), apic_imen ;/* clear mask bit */ \
126 movl IOAPICADDR(irq_num),%ecx ; /* ioapic addr */ \
127 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
128 movl %eax,(%ecx) ; /* write the index */ \
129 movl IOAPIC_WINDOW(%ecx),%eax ; /* current value */ \
130 andl $~IOART_INTMASK,%eax ; /* clear the mask */ \
131 movl %eax,IOAPIC_WINDOW(%ecx) ; /* new value */ \
137 * Fast interrupt call handlers run in the following sequence:
139 * - Push the trap frame required by doreti
140 * - Mask the interrupt and reenable its source
141 * - If we cannot take the interrupt set its fpending bit and
142 * doreti. Note that we cannot mess with mp_lock at all
143 * if we entered from a critical section!
144 * - If we can take the interrupt clear its fpending bit,
145 * call the handler, then unmask and doreti.
147 * YYY can cache gd base opitner instead of using hidden %fs prefixes.
150 #define FAST_INTR(irq_num, vec_name) \
155 FAKE_MCOUNT(13*4(%esp)) ; \
156 MASK_LEVEL_IRQ(irq_num) ; \
158 movl PCPU(curthread),%ebx ; \
159 movl $0,%eax ; /* CURRENT CPL IN FRAME (REMOVED) */ \
161 cmpl $TDPRI_CRIT,TD_PRI(%ebx) ; \
164 /* in critical section, make interrupt pending */ \
165 /* set the pending bit and return, leave interrupt masked */ \
166 orl $IRQ_LBIT(irq_num),PCPU(fpending) ; \
167 orl $RQF_INTPEND,PCPU(reqflags) ; \
170 /* clear pending bit, run handler */ \
171 andl $~IRQ_LBIT(irq_num),PCPU(fpending) ; \
173 call ithread_fast_handler ; /* returns 0 to unmask */ \
175 UNMASK_IRQ(irq_num) ; \
181 * Restart fast interrupt held up by critical section or cpl.
183 * - Push a dummy trape frame as required by doreti
184 * - The interrupt source is already masked
185 * - Clear the fpending bit
187 * - Unmask the interrupt
188 * - Pop the dummy frame and do a normal return
190 * The BGL is held on call and left held on return.
192 * YYY can cache gd base pointer instead of using hidden %fs
196 #define FAST_UNPEND(irq_num, vec_name) \
204 call ithread_fast_handler ; /* returns 0 to unmask */ \
206 UNMASK_IRQ(irq_num) ; \
212 * Slow interrupt call handlers run in the following sequence:
214 * - Push the trap frame required by doreti.
215 * - Mask the interrupt and reenable its source.
216 * - If we cannot take the interrupt set its ipending bit and
217 * doreti. In addition to checking for a critical section
218 * and cpl mask we also check to see if the thread is still
219 * running. Note that we cannot mess with mp_lock at all
220 * if we entered from a critical section!
221 * - If we can take the interrupt clear its ipending bit
222 * and schedule the thread. Leave interrupts masked and doreti.
224 * Note that calls to sched_ithd() are made with interrupts enabled
225 * and outside a critical section. YYY sched_ithd may preempt us
226 * synchronously (fix interrupt stacking).
228 * YYY can cache gd base pointer instead of using hidden %fs
232 #define INTR(irq_num, vec_name, maybe_extra_ipending) \
237 maybe_extra_ipending ; \
239 MASK_LEVEL_IRQ(irq_num) ; \
241 movl PCPU(curthread),%ebx ; \
242 movl $0,%eax ; /* CURRENT CPL IN FRAME (REMOVED) */ \
243 pushl %eax ; /* cpl do restore */ \
244 cmpl $TDPRI_CRIT,TD_PRI(%ebx) ; \
247 /* set the pending bit and return, leave the interrupt masked */ \
248 orl $IRQ_LBIT(irq_num), PCPU(ipending) ; \
249 orl $RQF_INTPEND,PCPU(reqflags) ; \
252 /* set running bit, clear pending bit, run handler */ \
253 andl $~IRQ_LBIT(irq_num), PCPU(ipending) ; \
263 * Wrong interrupt call handlers. We program these into APIC vectors
264 * that should otherwise never occur. For example, we program the SLOW
265 * vector for irq N with this when we program the FAST vector with the
268 * XXX for now all we can do is EOI it. We can't call do_wrongintr
269 * (yet) because we could be in a critical section.
271 #define WRONGINTR(irq_num,vec_name) \
276 movl $0, lapic_eoi ; /* End Of Interrupt to APIC */ \
277 /*pushl $irq_num ;*/ \
278 /*call do_wrongintr ;*/ \
284 * Handle "spurious INTerrupts".
286 * This is different than the "spurious INTerrupt" generated by an
287 * 8259 PIC for missing INTs. See the APIC documentation for details.
288 * This routine should NOT do an 'EOI' cycle.
295 /* No EOI cycle used here */
301 * Handle TLB shootdowns.
309 #ifdef COUNT_XINVLTLB_HITS
313 movl PCPU(cpuid), %eax
317 #endif /* COUNT_XINVLTLB_HITS */
319 movl %cr3, %eax /* invalidate the TLB */
322 ss /* stack segment, avoid %ds load */
323 movl $0, lapic_eoi /* End Of Interrupt to APIC */
330 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
332 * - Signals its receipt.
333 * - Waits for permission to restart.
334 * - Processing pending IPIQ events while waiting.
335 * - Signals its restart.
347 pushl %ds /* save current data segment */
351 mov %ax, %ds /* use KERNEL data segment */
355 movl $0, lapic_eoi /* End Of Interrupt to APIC */
357 movl PCPU(cpuid), %eax
358 imull $PCB_SIZE, %eax
359 leal CNAME(stoppcbs)(%eax), %eax
361 call CNAME(savectx) /* Save process context */
365 movl PCPU(cpuid), %eax
368 * Indicate that we have stopped and loop waiting for permission
369 * to start again. We must still process IPI events while in a
373 btsl %eax, stopped_cpus /* stopped_cpus |= (1<<id) */
375 andl $~RQF_IPIQ,PCPU(reqflags)
377 call lwkt_smp_stopped
379 btl %eax, started_cpus /* while (!(started_cpus & (1<<id))) */
383 btrl %eax, started_cpus /* started_cpus &= ~(1<<id) */
385 btrl %eax, stopped_cpus /* stopped_cpus &= ~(1<<id) */
390 movl CNAME(cpustop_restartfunc), %eax
393 movl $0, CNAME(cpustop_restartfunc) /* One-shot */
398 popl %ds /* restore previous data segment */
407 * For now just have one ipiq IPI, but what we really want is
408 * to have one for each source cpu to the APICs don't get stalled
409 * backlogging the requests.
416 movl $0, lapic_eoi /* End Of Interrupt to APIC */
417 FAKE_MCOUNT(13*4(%esp))
419 movl PCPU(curthread),%ebx
420 cmpl $TDPRI_CRIT,TD_PRI(%ebx)
422 subl $8,%esp /* make same as interrupt frame */
423 incl PCPU(intr_nesting_level)
424 addl $TDPRI_CRIT,TD_PRI(%ebx)
425 call lwkt_process_ipiq_frame
426 subl $TDPRI_CRIT,TD_PRI(%ebx)
427 decl PCPU(intr_nesting_level)
429 pushl $0 /* CPL for frame (REMOVED) */
433 orl $RQF_IPIQ,PCPU(reqflags)
439 FAST_INTR(0,fastintr0)
440 FAST_INTR(1,fastintr1)
441 FAST_INTR(2,fastintr2)
442 FAST_INTR(3,fastintr3)
443 FAST_INTR(4,fastintr4)
444 FAST_INTR(5,fastintr5)
445 FAST_INTR(6,fastintr6)
446 FAST_INTR(7,fastintr7)
447 FAST_INTR(8,fastintr8)
448 FAST_INTR(9,fastintr9)
449 FAST_INTR(10,fastintr10)
450 FAST_INTR(11,fastintr11)
451 FAST_INTR(12,fastintr12)
452 FAST_INTR(13,fastintr13)
453 FAST_INTR(14,fastintr14)
454 FAST_INTR(15,fastintr15)
455 FAST_INTR(16,fastintr16)
456 FAST_INTR(17,fastintr17)
457 FAST_INTR(18,fastintr18)
458 FAST_INTR(19,fastintr19)
459 FAST_INTR(20,fastintr20)
460 FAST_INTR(21,fastintr21)
461 FAST_INTR(22,fastintr22)
462 FAST_INTR(23,fastintr23)
464 /* YYY what is this garbage? */
491 FAST_UNPEND(0,fastunpend0)
492 FAST_UNPEND(1,fastunpend1)
493 FAST_UNPEND(2,fastunpend2)
494 FAST_UNPEND(3,fastunpend3)
495 FAST_UNPEND(4,fastunpend4)
496 FAST_UNPEND(5,fastunpend5)
497 FAST_UNPEND(6,fastunpend6)
498 FAST_UNPEND(7,fastunpend7)
499 FAST_UNPEND(8,fastunpend8)
500 FAST_UNPEND(9,fastunpend9)
501 FAST_UNPEND(10,fastunpend10)
502 FAST_UNPEND(11,fastunpend11)
503 FAST_UNPEND(12,fastunpend12)
504 FAST_UNPEND(13,fastunpend13)
505 FAST_UNPEND(14,fastunpend14)
506 FAST_UNPEND(15,fastunpend15)
507 FAST_UNPEND(16,fastunpend16)
508 FAST_UNPEND(17,fastunpend17)
509 FAST_UNPEND(18,fastunpend18)
510 FAST_UNPEND(19,fastunpend19)
511 FAST_UNPEND(20,fastunpend20)
512 FAST_UNPEND(21,fastunpend21)
513 FAST_UNPEND(22,fastunpend22)
514 FAST_UNPEND(23,fastunpend23)
516 WRONGINTR(0,wrongintr0)
517 WRONGINTR(1,wrongintr1)
518 WRONGINTR(2,wrongintr2)
519 WRONGINTR(3,wrongintr3)
520 WRONGINTR(4,wrongintr4)
521 WRONGINTR(5,wrongintr5)
522 WRONGINTR(6,wrongintr6)
523 WRONGINTR(7,wrongintr7)
524 WRONGINTR(8,wrongintr8)
525 WRONGINTR(9,wrongintr9)
526 WRONGINTR(10,wrongintr10)
527 WRONGINTR(11,wrongintr11)
528 WRONGINTR(12,wrongintr12)
529 WRONGINTR(13,wrongintr13)
530 WRONGINTR(14,wrongintr14)
531 WRONGINTR(15,wrongintr15)
532 WRONGINTR(16,wrongintr16)
533 WRONGINTR(17,wrongintr17)
534 WRONGINTR(18,wrongintr18)
535 WRONGINTR(19,wrongintr19)
536 WRONGINTR(20,wrongintr20)
537 WRONGINTR(21,wrongintr21)
538 WRONGINTR(22,wrongintr22)
539 WRONGINTR(23,wrongintr23)
544 #ifdef COUNT_XINVLTLB_HITS
548 #endif /* COUNT_XINVLTLB_HITS */
550 /* variables used by stop_cpus()/restart_cpus()/Xcpustop */
551 .globl stopped_cpus, started_cpus
557 .globl CNAME(cpustop_restartfunc)
558 CNAME(cpustop_restartfunc):
561 .globl apic_pin_trigger