2 * from: vector.s, 386BSD 0.1 unknown origin
3 * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $
4 * $DragonFly: src/sys/i386/isa/Attic/apic_vector.s,v 1.19 2005/06/16 21:12:47 dillon Exp $
8 #include <machine/apicreg.h>
9 #include <machine/smp.h>
10 #include "i386/isa/intr_machdep.h"
12 /* convert an absolute IRQ# into a bitmask */
13 #define IRQ_LBIT(irq_num) (1 << (irq_num))
15 /* make an index into the IO APIC from the IRQ# */
16 #define REDTBL_IDX(irq_num) (0x10 + ((irq_num) * 2))
19 * Push an interrupt frame in a format acceptable to doreti, reload
20 * the segment registers for the kernel.
23 pushl $0 ; /* dummy error code */ \
24 pushl $0 ; /* dummy trap type */ \
26 pushl %ds ; /* save data and extra segments ... */ \
36 pushfl ; /* phys int frame / flags */ \
37 pushl %cs ; /* phys int frame / cs */ \
38 pushl 12(%esp) ; /* original caller eip */ \
39 pushl $0 ; /* dummy error code */ \
40 pushl $0 ; /* dummy trap type */ \
41 subl $12*4,%esp ; /* pushal + 3 seg regs (dummy) + CPL */ \
44 * Warning: POP_FRAME can only be used if there is no chance of a
45 * segment register being changed (e.g. by procfs), which is why syscalls
53 addl $2*4,%esp ; /* dummy trap & error codes */ \
58 #define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
59 #define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
62 * Interrupts are expected to already be disabled when using these
66 SPIN_LOCK(imen_spinlock) ; \
68 #define IMASK_UNLOCK \
69 SPIN_UNLOCK(imen_spinlock) ; \
71 #define MASK_IRQ(irq_num) \
72 IMASK_LOCK ; /* into critical reg */ \
73 testl $IRQ_LBIT(irq_num), apic_imen ; \
74 jne 7f ; /* masked, don't mask */ \
75 orl $IRQ_LBIT(irq_num), apic_imen ; /* set the mask bit */ \
76 movl IOAPICADDR(irq_num), %ecx ; /* ioapic addr */ \
77 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
78 movl %eax, (%ecx) ; /* write the index */ \
79 movl IOAPIC_WINDOW(%ecx), %eax ; /* current value */ \
80 orl $IOART_INTMASK, %eax ; /* set the mask */ \
81 movl %eax, IOAPIC_WINDOW(%ecx) ; /* new value */ \
82 7: ; /* already masked */ \
86 * Test to see whether we are handling an edge or level triggered INT.
87 * Level-triggered INTs must still be masked as we don't clear the source,
88 * and the EOI cycle would cause redundant INTs to occur.
90 #define MASK_LEVEL_IRQ(irq_num) \
91 testl $IRQ_LBIT(irq_num), apic_pin_trigger ; \
92 jz 9f ; /* edge, don't mask */ \
97 #ifdef APIC_INTR_REORDER
98 #define EOI_IRQ(irq_num) \
99 movl apic_isrbit_location + 8 * (irq_num), %eax ; \
100 movl (%eax), %eax ; \
101 testl apic_isrbit_location + 4 + 8 * (irq_num), %eax ; \
102 jz 9f ; /* not active */ \
103 movl $0, lapic_eoi ; \
108 #define EOI_IRQ(irq_num) \
109 testl $IRQ_LBIT(irq_num), lapic_isr1; \
110 jz 9f ; /* not active */ \
111 movl $0, lapic_eoi; \
117 * Test to see if the source is currntly masked, clear if so.
119 #define UNMASK_IRQ(irq_num) \
120 IMASK_LOCK ; /* into critical reg */ \
121 testl $IRQ_LBIT(irq_num), apic_imen ; \
122 je 7f ; /* bit clear, not masked */ \
123 andl $~IRQ_LBIT(irq_num), apic_imen ;/* clear mask bit */ \
124 movl IOAPICADDR(irq_num),%ecx ; /* ioapic addr */ \
125 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
126 movl %eax,(%ecx) ; /* write the index */ \
127 movl IOAPIC_WINDOW(%ecx),%eax ; /* current value */ \
128 andl $~IOART_INTMASK,%eax ; /* clear the mask */ \
129 movl %eax,IOAPIC_WINDOW(%ecx) ; /* new value */ \
134 * Fast interrupt call handlers run in the following sequence:
136 * - Push the trap frame required by doreti
137 * - Mask the interrupt and reenable its source
138 * - If we cannot take the interrupt set its fpending bit and
139 * doreti. Note that we cannot mess with mp_lock at all
140 * if we entered from a critical section!
141 * - If we can take the interrupt clear its fpending bit,
142 * call the handler, then unmask and doreti.
144 * YYY can cache gd base opitner instead of using hidden %fs prefixes.
147 #define FAST_INTR(irq_num, vec_name) \
152 FAKE_MCOUNT(13*4(%esp)) ; \
153 MASK_LEVEL_IRQ(irq_num) ; \
155 movl PCPU(curthread),%ebx ; \
156 movl $0,%eax ; /* CURRENT CPL IN FRAME (REMOVED) */ \
158 cmpl $TDPRI_CRIT,TD_PRI(%ebx) ; \
161 /* in critical section, make interrupt pending */ \
162 /* set the pending bit and return, leave interrupt masked */ \
163 orl $IRQ_LBIT(irq_num),PCPU(fpending) ; \
164 orl $RQF_INTPEND,PCPU(reqflags) ; \
167 /* try to get the MP lock */ \
171 /* clear pending bit, run handler */ \
172 incl PCPU(intr_nesting_level) ; \
173 addl $TDPRI_CRIT,TD_PRI(%ebx) ; \
174 andl $~IRQ_LBIT(irq_num),PCPU(fpending) ; \
175 pushl intr_unit + (irq_num) * 4 ; \
176 call *intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
178 subl $TDPRI_CRIT,TD_PRI(%ebx) ; \
179 incl PCPU(cnt)+V_INTR ; /* book-keeping make per cpu YYY */ \
180 movl intr_countp + (irq_num) * 4, %eax ; \
182 decl PCPU(intr_nesting_level) ; \
184 UNMASK_IRQ(irq_num) ; \
189 /* could not get the MP lock, forward the interrupt */ \
190 movl mp_lock, %eax ; /* check race */ \
191 cmpl $MP_FREE_LOCK,%eax ; \
193 incl PCPU(cnt)+V_FORWARDED_INTS ; \
195 movl $irq_num,8(%esp) ; \
196 movl $forward_fastint_remote,4(%esp) ; \
198 call lwkt_send_ipiq_bycpu ; \
203 * Restart fast interrupt held up by critical section or cpl.
205 * - Push a dummy trape frame as required by doreti
206 * - The interrupt source is already masked
207 * - Clear the fpending bit
209 * - Unmask the interrupt
210 * - Pop the dummy frame and do a normal return
212 * The BGL is held on call and left held on return.
214 * YYY can cache gd base pointer instead of using hidden %fs
218 #define FAST_UNPEND(irq_num, vec_name) \
225 pushl intr_unit + (irq_num) * 4 ; \
226 call *intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
228 incl PCPU(cnt)+V_INTR ; /* book-keeping make per cpu YYY */ \
229 movl intr_countp + (irq_num) * 4, %eax ; \
231 UNMASK_IRQ(irq_num) ; \
237 * Slow interrupt call handlers run in the following sequence:
239 * - Push the trap frame required by doreti.
240 * - Mask the interrupt and reenable its source.
241 * - If we cannot take the interrupt set its ipending bit and
242 * doreti. In addition to checking for a critical section
243 * and cpl mask we also check to see if the thread is still
244 * running. Note that we cannot mess with mp_lock at all
245 * if we entered from a critical section!
246 * - If we can take the interrupt clear its ipending bit
247 * and schedule the thread. Leave interrupts masked and doreti.
249 * Note that calls to sched_ithd() are made with interrupts enabled
250 * and outside a critical section. YYY sched_ithd may preempt us
251 * synchronously (fix interrupt stacking).
253 * YYY can cache gd base pointer instead of using hidden %fs
257 #define INTR(irq_num, vec_name, maybe_extra_ipending) \
262 maybe_extra_ipending ; \
264 MASK_LEVEL_IRQ(irq_num) ; \
266 movl PCPU(curthread),%ebx ; \
267 movl $0,%eax ; /* CURRENT CPL IN FRAME (REMOVED) */ \
268 pushl %eax ; /* cpl do restore */ \
269 cmpl $TDPRI_CRIT,TD_PRI(%ebx) ; \
272 /* set the pending bit and return, leave the interrupt masked */ \
273 orl $IRQ_LBIT(irq_num), PCPU(ipending) ; \
274 orl $RQF_INTPEND,PCPU(reqflags) ; \
277 /* set running bit, clear pending bit, run handler */ \
278 andl $~IRQ_LBIT(irq_num), PCPU(ipending) ; \
283 incl PCPU(cnt)+V_INTR ; /* book-keeping YYY make per-cpu */ \
284 movl intr_countp + (irq_num) * 4,%eax ; \
292 * Handle "spurious INTerrupts".
294 * This is different than the "spurious INTerrupt" generated by an
295 * 8259 PIC for missing INTs. See the APIC documentation for details.
296 * This routine should NOT do an 'EOI' cycle.
303 /* No EOI cycle used here */
309 * Handle TLB shootdowns.
317 #ifdef COUNT_XINVLTLB_HITS
321 movl PCPU(cpuid), %eax
325 #endif /* COUNT_XINVLTLB_HITS */
327 movl %cr3, %eax /* invalidate the TLB */
330 ss /* stack segment, avoid %ds load */
331 movl $0, lapic_eoi /* End Of Interrupt to APIC */
338 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
340 * - Signals its receipt.
341 * - Waits for permission to restart.
342 * - Signals its restart.
354 pushl %ds /* save current data segment */
358 mov %ax, %ds /* use KERNEL data segment */
362 movl $0, lapic_eoi /* End Of Interrupt to APIC */
364 movl PCPU(cpuid), %eax
365 imull $PCB_SIZE, %eax
366 leal CNAME(stoppcbs)(%eax), %eax
368 call CNAME(savectx) /* Save process context */
372 movl PCPU(cpuid), %eax
375 btsl %eax, stopped_cpus /* stopped_cpus |= (1<<id) */
377 btl %eax, started_cpus /* while (!(started_cpus & (1<<id))) */
381 btrl %eax, started_cpus /* started_cpus &= ~(1<<id) */
383 btrl %eax, stopped_cpus /* stopped_cpus &= ~(1<<id) */
388 movl CNAME(cpustop_restartfunc), %eax
391 movl $0, CNAME(cpustop_restartfunc) /* One-shot */
396 popl %ds /* restore previous data segment */
405 * For now just have one ipiq IPI, but what we really want is
406 * to have one for each source cpu to the APICs don't get stalled
407 * backlogging the requests.
414 movl $0, lapic_eoi /* End Of Interrupt to APIC */
415 FAKE_MCOUNT(13*4(%esp))
417 movl PCPU(curthread),%ebx
418 cmpl $TDPRI_CRIT,TD_PRI(%ebx)
420 subl $8,%esp /* make same as interrupt frame */
421 incl PCPU(intr_nesting_level)
422 addl $TDPRI_CRIT,TD_PRI(%ebx)
423 call lwkt_process_ipiq_frame
424 subl $TDPRI_CRIT,TD_PRI(%ebx)
425 decl PCPU(intr_nesting_level)
427 pushl $0 /* CPL for frame (REMOVED) */
431 orl $RQF_IPIQ,PCPU(reqflags)
437 FAST_INTR(0,fastintr0)
438 FAST_INTR(1,fastintr1)
439 FAST_INTR(2,fastintr2)
440 FAST_INTR(3,fastintr3)
441 FAST_INTR(4,fastintr4)
442 FAST_INTR(5,fastintr5)
443 FAST_INTR(6,fastintr6)
444 FAST_INTR(7,fastintr7)
445 FAST_INTR(8,fastintr8)
446 FAST_INTR(9,fastintr9)
447 FAST_INTR(10,fastintr10)
448 FAST_INTR(11,fastintr11)
449 FAST_INTR(12,fastintr12)
450 FAST_INTR(13,fastintr13)
451 FAST_INTR(14,fastintr14)
452 FAST_INTR(15,fastintr15)
453 FAST_INTR(16,fastintr16)
454 FAST_INTR(17,fastintr17)
455 FAST_INTR(18,fastintr18)
456 FAST_INTR(19,fastintr19)
457 FAST_INTR(20,fastintr20)
458 FAST_INTR(21,fastintr21)
459 FAST_INTR(22,fastintr22)
460 FAST_INTR(23,fastintr23)
462 /* YYY what is this garbage? */
489 FAST_UNPEND(0,fastunpend0)
490 FAST_UNPEND(1,fastunpend1)
491 FAST_UNPEND(2,fastunpend2)
492 FAST_UNPEND(3,fastunpend3)
493 FAST_UNPEND(4,fastunpend4)
494 FAST_UNPEND(5,fastunpend5)
495 FAST_UNPEND(6,fastunpend6)
496 FAST_UNPEND(7,fastunpend7)
497 FAST_UNPEND(8,fastunpend8)
498 FAST_UNPEND(9,fastunpend9)
499 FAST_UNPEND(10,fastunpend10)
500 FAST_UNPEND(11,fastunpend11)
501 FAST_UNPEND(12,fastunpend12)
502 FAST_UNPEND(13,fastunpend13)
503 FAST_UNPEND(14,fastunpend14)
504 FAST_UNPEND(15,fastunpend15)
505 FAST_UNPEND(16,fastunpend16)
506 FAST_UNPEND(17,fastunpend17)
507 FAST_UNPEND(18,fastunpend18)
508 FAST_UNPEND(19,fastunpend19)
509 FAST_UNPEND(20,fastunpend20)
510 FAST_UNPEND(21,fastunpend21)
511 FAST_UNPEND(22,fastunpend22)
512 FAST_UNPEND(23,fastunpend23)
516 * Executed by a CPU when it receives a RENDEZVOUS IPI from another CPU.
518 * - Calls the generic rendezvous action function.
526 mov %ax, %ds /* use KERNEL data segment */
531 call smp_rendezvous_action
533 movl $0, lapic_eoi /* End Of Interrupt to APIC */
540 #ifdef COUNT_XINVLTLB_HITS
544 #endif /* COUNT_XINVLTLB_HITS */
546 /* variables used by stop_cpus()/restart_cpus()/Xcpustop */
547 .globl stopped_cpus, started_cpus
553 .globl CNAME(cpustop_restartfunc)
554 CNAME(cpustop_restartfunc):
557 .globl apic_pin_trigger