2 * from: vector.s, 386BSD 0.1 unknown origin
3 * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $
4 * $DragonFly: src/sys/platform/pc32/apic/apic_vector.s,v 1.39 2008/08/02 01:14:43 dillon Exp $
8 #include "opt_auto_eoi.h"
10 #include <machine/asmacros.h>
11 #include <machine/lock.h>
12 #include <machine/psl.h>
13 #include <machine/trap.h>
15 #include <machine_base/icu/icu.h>
16 #include <bus/isa/isa.h>
22 #include <machine/smp.h>
23 #include <machine_base/isa/intr_machdep.h>
25 /* convert an absolute IRQ# into a bitmask */
26 #define IRQ_LBIT(irq_num) (1 << (irq_num))
28 /* make an index into the IO APIC from the IRQ# */
29 #define REDTBL_IDX(irq_num) (0x10 + ((irq_num) * 2))
32 #define MPLOCKED lock ;
38 * Push an interrupt frame in a format acceptable to doreti, reload
39 * the segment registers for the kernel.
42 pushl $0 ; /* dummy error code */ \
43 pushl $0 ; /* dummy trap type */ \
44 pushl $0 ; /* dummy xflags type */ \
46 pushl %ds ; /* save data and extra segments ... */ \
59 pushfl ; /* phys int frame / flags */ \
60 pushl %cs ; /* phys int frame / cs */ \
61 pushl 12(%esp) ; /* original caller eip */ \
62 pushl $0 ; /* dummy error code */ \
63 pushl $0 ; /* dummy trap type */ \
64 pushl $0 ; /* dummy xflags type */ \
65 subl $13*4,%esp ; /* pushal + 4 seg regs (dummy) + CPL */ \
68 * Warning: POP_FRAME can only be used if there is no chance of a
69 * segment register being changed (e.g. by procfs), which is why syscalls
78 addl $3*4,%esp ; /* dummy xflags, trap & error codes */ \
83 #define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
84 #define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
86 #define MASK_IRQ(irq_num) \
87 APIC_IMASK_LOCK ; /* into critical reg */ \
88 testl $IRQ_LBIT(irq_num), apic_imen ; \
89 jne 7f ; /* masked, don't mask */ \
90 orl $IRQ_LBIT(irq_num), apic_imen ; /* set the mask bit */ \
91 movl IOAPICADDR(irq_num), %ecx ; /* ioapic addr */ \
92 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
93 movl %eax, (%ecx) ; /* write the index */ \
94 orl $IOART_INTMASK,IOAPIC_WINDOW(%ecx) ;/* set the mask */ \
95 7: ; /* already masked */ \
99 * Test to see whether we are handling an edge or level triggered INT.
100 * Level-triggered INTs must still be masked as we don't clear the source,
101 * and the EOI cycle would cause redundant INTs to occur.
103 #define MASK_LEVEL_IRQ(irq_num) \
104 testl $IRQ_LBIT(irq_num), apic_pin_trigger ; \
105 jz 9f ; /* edge, don't mask */ \
106 MASK_IRQ(irq_num) ; \
110 * Test to see if the source is currntly masked, clear if so.
112 #define UNMASK_IRQ(irq_num) \
115 APIC_IMASK_LOCK ; /* into critical reg */ \
116 testl $IRQ_LBIT(irq_num), apic_imen ; \
117 je 7f ; /* bit clear, not masked */ \
118 andl $~IRQ_LBIT(irq_num), apic_imen ;/* clear mask bit */ \
119 movl IOAPICADDR(irq_num),%ecx ; /* ioapic addr */ \
120 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
121 movl %eax,(%ecx) ; /* write the index */ \
122 andl $~IOART_INTMASK,IOAPIC_WINDOW(%ecx) ;/* clear the mask */ \
124 APIC_IMASK_UNLOCK ; \
130 * Fast interrupt call handlers run in the following sequence:
132 * - Push the trap frame required by doreti
133 * - Mask the interrupt and reenable its source
134 * - If we cannot take the interrupt set its fpending bit and
135 * doreti. Note that we cannot mess with mp_lock at all
136 * if we entered from a critical section!
137 * - If we can take the interrupt clear its fpending bit,
138 * call the handler, then unmask and doreti.
140 * YYY can cache gd base opitner instead of using hidden %fs prefixes.
143 #define FAST_INTR(irq_num, vec_name) \
148 FAKE_MCOUNT(15*4(%esp)) ; \
149 MASK_LEVEL_IRQ(irq_num) ; \
150 movl $0, lapic_eoi ; \
151 movl PCPU(curthread),%ebx ; \
152 movl $0,%eax ; /* CURRENT CPL IN FRAME (REMOVED) */ \
154 testl $-1,TD_NEST_COUNT(%ebx) ; \
156 cmpl $TDPRI_CRIT,TD_PRI(%ebx) ; \
159 /* in critical section, make interrupt pending */ \
160 /* set the pending bit and return, leave interrupt masked */ \
161 orl $IRQ_LBIT(irq_num),PCPU(fpending) ; \
162 orl $RQF_INTPEND,PCPU(reqflags) ; \
165 /* clear pending bit, run handler */ \
166 andl $~IRQ_LBIT(irq_num),PCPU(fpending) ; \
168 pushl %esp ; /* pass frame by reference */ \
169 call ithread_fast_handler ; /* returns 0 to unmask */ \
171 UNMASK_IRQ(irq_num) ; \
177 * Slow interrupt call handlers run in the following sequence:
179 * - Push the trap frame required by doreti.
180 * - Mask the interrupt and reenable its source.
181 * - If we cannot take the interrupt set its ipending bit and
182 * doreti. In addition to checking for a critical section
183 * and cpl mask we also check to see if the thread is still
184 * running. Note that we cannot mess with mp_lock at all
185 * if we entered from a critical section!
186 * - If we can take the interrupt clear its ipending bit
187 * and schedule the thread. Leave interrupts masked and doreti.
189 * Note that calls to sched_ithd() are made with interrupts enabled
190 * and outside a critical section. YYY sched_ithd may preempt us
191 * synchronously (fix interrupt stacking).
193 * YYY can cache gd base pointer instead of using hidden %fs
197 #define SLOW_INTR(irq_num, vec_name, maybe_extra_ipending) \
202 maybe_extra_ipending ; \
204 MASK_LEVEL_IRQ(irq_num) ; \
205 incl PCPU(cnt) + V_INTR ; \
206 movl $0, lapic_eoi ; \
207 movl PCPU(curthread),%ebx ; \
208 movl $0,%eax ; /* CURRENT CPL IN FRAME (REMOVED) */ \
209 pushl %eax ; /* cpl do restore */ \
210 testl $-1,TD_NEST_COUNT(%ebx) ; \
212 cmpl $TDPRI_CRIT,TD_PRI(%ebx) ; \
215 /* set the pending bit and return, leave the interrupt masked */ \
216 orl $IRQ_LBIT(irq_num), PCPU(ipending) ; \
217 orl $RQF_INTPEND,PCPU(reqflags) ; \
220 /* set running bit, clear pending bit, run handler */ \
221 andl $~IRQ_LBIT(irq_num), PCPU(ipending) ; \
222 incl TD_NEST_COUNT(%ebx) ; \
228 decl TD_NEST_COUNT(%ebx) ; \
234 * Wrong interrupt call handlers. We program these into APIC vectors
235 * that should otherwise never occur. For example, we program the SLOW
236 * vector for irq N with this when we program the FAST vector with the
239 * XXX for now all we can do is EOI it. We can't call do_wrongintr
240 * (yet) because we could be in a critical section.
242 #define WRONGINTR(irq_num,vec_name) \
247 movl $0, lapic_eoi ; /* End Of Interrupt to APIC */ \
248 /*pushl $irq_num ;*/ \
249 /*call do_wrongintr ;*/ \
257 * Handle "spurious INTerrupts".
259 * This is different than the "spurious INTerrupt" generated by an
260 * 8259 PIC for missing INTs. See the APIC documentation for details.
261 * This routine should NOT do an 'EOI' cycle.
268 /* No EOI cycle used here */
274 * Handle TLB shootdowns.
282 movl %cr3, %eax /* invalidate the TLB */
285 ss /* stack segment, avoid %ds load */
286 movl $0, lapic_eoi /* End Of Interrupt to APIC */
293 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
295 * - Signals its receipt.
296 * - Waits for permission to restart.
297 * - Processing pending IPIQ events while waiting.
298 * - Signals its restart.
310 pushl %ds /* save current data segment */
314 mov %ax, %ds /* use KERNEL data segment */
318 movl $0, lapic_eoi /* End Of Interrupt to APIC */
320 movl PCPU(cpuid), %eax
321 imull $PCB_SIZE, %eax
322 leal CNAME(stoppcbs)(%eax), %eax
324 call CNAME(savectx) /* Save process context */
328 movl PCPU(cpuid), %eax
331 * Indicate that we have stopped and loop waiting for permission
332 * to start again. We must still process IPI events while in a
336 btsl %eax, stopped_cpus /* stopped_cpus |= (1<<id) */
338 andl $~RQF_IPIQ,PCPU(reqflags)
340 call lwkt_smp_stopped
342 btl %eax, started_cpus /* while (!(started_cpus & (1<<id))) */
346 btrl %eax, started_cpus /* started_cpus &= ~(1<<id) */
348 btrl %eax, stopped_cpus /* stopped_cpus &= ~(1<<id) */
353 movl CNAME(cpustop_restartfunc), %eax
356 movl $0, CNAME(cpustop_restartfunc) /* One-shot */
361 popl %ds /* restore previous data segment */
370 * For now just have one ipiq IPI, but what we really want is
371 * to have one for each source cpu to the APICs don't get stalled
372 * backlogging the requests.
379 movl $0, lapic_eoi /* End Of Interrupt to APIC */
380 FAKE_MCOUNT(15*4(%esp))
382 incl PCPU(cnt) + V_IPI
383 movl PCPU(curthread),%ebx
384 cmpl $TDPRI_CRIT,TD_PRI(%ebx)
386 subl $8,%esp /* make same as interrupt frame */
387 pushl %esp /* pass frame by reference */
388 incl PCPU(intr_nesting_level)
389 addl $TDPRI_CRIT,TD_PRI(%ebx)
390 call lwkt_process_ipiq_frame
391 subl $TDPRI_CRIT,TD_PRI(%ebx)
392 decl PCPU(intr_nesting_level)
394 pushl $0 /* CPL for frame (REMOVED) */
398 orl $RQF_IPIQ,PCPU(reqflags)
408 movl $0, lapic_eoi /* End Of Interrupt to APIC */
409 FAKE_MCOUNT(15*4(%esp))
411 incl PCPU(cnt) + V_TIMER
412 movl PCPU(curthread),%ebx
413 cmpl $TDPRI_CRIT,TD_PRI(%ebx)
415 testl $-1,TD_NEST_COUNT(%ebx)
417 subl $8,%esp /* make same as interrupt frame */
418 pushl %esp /* pass frame by reference */
419 incl PCPU(intr_nesting_level)
420 addl $TDPRI_CRIT,TD_PRI(%ebx)
421 call lapic_timer_process_frame
422 subl $TDPRI_CRIT,TD_PRI(%ebx)
423 decl PCPU(intr_nesting_level)
425 pushl $0 /* CPL for frame (REMOVED) */
429 orl $RQF_TIMER,PCPU(reqflags)
437 FAST_INTR(0,apic_fastintr0)
438 FAST_INTR(1,apic_fastintr1)
439 FAST_INTR(2,apic_fastintr2)
440 FAST_INTR(3,apic_fastintr3)
441 FAST_INTR(4,apic_fastintr4)
442 FAST_INTR(5,apic_fastintr5)
443 FAST_INTR(6,apic_fastintr6)
444 FAST_INTR(7,apic_fastintr7)
445 FAST_INTR(8,apic_fastintr8)
446 FAST_INTR(9,apic_fastintr9)
447 FAST_INTR(10,apic_fastintr10)
448 FAST_INTR(11,apic_fastintr11)
449 FAST_INTR(12,apic_fastintr12)
450 FAST_INTR(13,apic_fastintr13)
451 FAST_INTR(14,apic_fastintr14)
452 FAST_INTR(15,apic_fastintr15)
453 FAST_INTR(16,apic_fastintr16)
454 FAST_INTR(17,apic_fastintr17)
455 FAST_INTR(18,apic_fastintr18)
456 FAST_INTR(19,apic_fastintr19)
457 FAST_INTR(20,apic_fastintr20)
458 FAST_INTR(21,apic_fastintr21)
459 FAST_INTR(22,apic_fastintr22)
460 FAST_INTR(23,apic_fastintr23)
462 /* YYY what is this garbage? */
464 SLOW_INTR(0,apic_slowintr0,)
465 SLOW_INTR(1,apic_slowintr1,)
466 SLOW_INTR(2,apic_slowintr2,)
467 SLOW_INTR(3,apic_slowintr3,)
468 SLOW_INTR(4,apic_slowintr4,)
469 SLOW_INTR(5,apic_slowintr5,)
470 SLOW_INTR(6,apic_slowintr6,)
471 SLOW_INTR(7,apic_slowintr7,)
472 SLOW_INTR(8,apic_slowintr8,)
473 SLOW_INTR(9,apic_slowintr9,)
474 SLOW_INTR(10,apic_slowintr10,)
475 SLOW_INTR(11,apic_slowintr11,)
476 SLOW_INTR(12,apic_slowintr12,)
477 SLOW_INTR(13,apic_slowintr13,)
478 SLOW_INTR(14,apic_slowintr14,)
479 SLOW_INTR(15,apic_slowintr15,)
480 SLOW_INTR(16,apic_slowintr16,)
481 SLOW_INTR(17,apic_slowintr17,)
482 SLOW_INTR(18,apic_slowintr18,)
483 SLOW_INTR(19,apic_slowintr19,)
484 SLOW_INTR(20,apic_slowintr20,)
485 SLOW_INTR(21,apic_slowintr21,)
486 SLOW_INTR(22,apic_slowintr22,)
487 SLOW_INTR(23,apic_slowintr23,)
489 WRONGINTR(0,apic_wrongintr0)
490 WRONGINTR(1,apic_wrongintr1)
491 WRONGINTR(2,apic_wrongintr2)
492 WRONGINTR(3,apic_wrongintr3)
493 WRONGINTR(4,apic_wrongintr4)
494 WRONGINTR(5,apic_wrongintr5)
495 WRONGINTR(6,apic_wrongintr6)
496 WRONGINTR(7,apic_wrongintr7)
497 WRONGINTR(8,apic_wrongintr8)
498 WRONGINTR(9,apic_wrongintr9)
499 WRONGINTR(10,apic_wrongintr10)
500 WRONGINTR(11,apic_wrongintr11)
501 WRONGINTR(12,apic_wrongintr12)
502 WRONGINTR(13,apic_wrongintr13)
503 WRONGINTR(14,apic_wrongintr14)
504 WRONGINTR(15,apic_wrongintr15)
505 WRONGINTR(16,apic_wrongintr16)
506 WRONGINTR(17,apic_wrongintr17)
507 WRONGINTR(18,apic_wrongintr18)
508 WRONGINTR(19,apic_wrongintr19)
509 WRONGINTR(20,apic_wrongintr20)
510 WRONGINTR(21,apic_wrongintr21)
511 WRONGINTR(22,apic_wrongintr22)
512 WRONGINTR(23,apic_wrongintr23)
519 /* variables used by stop_cpus()/restart_cpus()/Xcpustop */
520 .globl stopped_cpus, started_cpus
526 .globl CNAME(cpustop_restartfunc)
527 CNAME(cpustop_restartfunc):
530 .globl apic_pin_trigger