2 * Copyright (c) 2008 The DragonFly Project. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
14 * 3. Neither the name of The DragonFly Project nor the names of its
15 * contributors may be used to endorse or promote products derived
16 * from this software without specific, prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
21 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
22 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
26 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
28 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * from: vector.s, 386BSD 0.1 unknown origin
32 * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $
33 * $DragonFly: src/sys/platform/pc64/apic/apic_vector.s,v 1.1 2008/08/29 17:07:12 dillon Exp $
37 #include "opt_auto_eoi.h"
39 #include <machine/asmacros.h>
40 #include <machine/lock.h>
41 #include <machine/psl.h>
42 #include <machine/trap.h>
44 #include <machine_base/icu/icu.h>
45 #include <bus/isa/isa.h>
51 #include <machine/smp.h>
52 #include <machine_base/isa/intr_machdep.h>
54 /* convert an absolute IRQ# into a bitmask */
55 #define IRQ_LBIT(irq_num) (1 << (irq_num))
57 /* make an index into the IO APIC from the IRQ# */
58 #define REDTBL_IDX(irq_num) (0x10 + ((irq_num) * 2))
61 #define MPLOCKED lock ;
67 * Push an interrupt frame in a format acceptable to doreti, reload
68 * the segment registers for the kernel.
71 pushl $0 ; /* dummy error code */ \
72 pushl $0 ; /* dummy trap type */ \
73 pushl $0 ; /* dummy xflags type */ \
75 pushl %ds ; /* save data and extra segments ... */ \
88 pushfl ; /* phys int frame / flags */ \
89 pushl %cs ; /* phys int frame / cs */ \
90 pushl 12(%esp) ; /* original caller eip */ \
91 pushl $0 ; /* dummy error code */ \
92 pushl $0 ; /* dummy trap type */ \
93 pushl $0 ; /* dummy xflags type */ \
94 subl $13*4,%esp ; /* pushal + 4 seg regs (dummy) + CPL */ \
97 * Warning: POP_FRAME can only be used if there is no chance of a
98 * segment register being changed (e.g. by procfs), which is why syscalls
107 addl $3*4,%esp ; /* dummy xflags, trap & error codes */ \
112 #define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
113 #define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
115 #define MASK_IRQ(irq_num) \
116 APIC_IMASK_LOCK ; /* into critical reg */ \
117 testl $IRQ_LBIT(irq_num), apic_imen ; \
118 jne 7f ; /* masked, don't mask */ \
119 orl $IRQ_LBIT(irq_num), apic_imen ; /* set the mask bit */ \
120 movl IOAPICADDR(irq_num), %ecx ; /* ioapic addr */ \
121 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
122 movl %eax, (%ecx) ; /* write the index */ \
123 movl IOAPIC_WINDOW(%ecx), %eax ; /* current value */ \
124 orl $IOART_INTMASK, %eax ; /* set the mask */ \
125 movl %eax, IOAPIC_WINDOW(%ecx) ; /* new value */ \
126 7: ; /* already masked */ \
127 APIC_IMASK_UNLOCK ; \
130 * Test to see whether we are handling an edge or level triggered INT.
131 * Level-triggered INTs must still be masked as we don't clear the source,
132 * and the EOI cycle would cause redundant INTs to occur.
134 #define MASK_LEVEL_IRQ(irq_num) \
135 testl $IRQ_LBIT(irq_num), apic_pin_trigger ; \
136 jz 9f ; /* edge, don't mask */ \
137 MASK_IRQ(irq_num) ; \
141 * Test to see if the source is currntly masked, clear if so.
143 #define UNMASK_IRQ(irq_num) \
146 APIC_IMASK_LOCK ; /* into critical reg */ \
147 testl $IRQ_LBIT(irq_num), apic_imen ; \
148 je 7f ; /* bit clear, not masked */ \
149 andl $~IRQ_LBIT(irq_num), apic_imen ;/* clear mask bit */ \
150 movl IOAPICADDR(irq_num),%ecx ; /* ioapic addr */ \
151 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
152 movl %eax,(%ecx) ; /* write the index */ \
153 movl IOAPIC_WINDOW(%ecx),%eax ; /* current value */ \
154 andl $~IOART_INTMASK,%eax ; /* clear the mask */ \
155 movl %eax,IOAPIC_WINDOW(%ecx) ; /* new value */ \
157 APIC_IMASK_UNLOCK ; \
163 * Fast interrupt call handlers run in the following sequence:
165 * - Push the trap frame required by doreti
166 * - Mask the interrupt and reenable its source
167 * - If we cannot take the interrupt set its fpending bit and
168 * doreti. Note that we cannot mess with mp_lock at all
169 * if we entered from a critical section!
170 * - If we can take the interrupt clear its fpending bit,
171 * call the handler, then unmask and doreti.
173 * YYY can cache gd base opitner instead of using hidden %fs prefixes.
176 #define FAST_INTR(irq_num, vec_name) \
181 FAKE_MCOUNT(15*4(%esp)) ; \
182 MASK_LEVEL_IRQ(irq_num) ; \
183 movl $0, lapic_eoi ; \
184 movl PCPU(curthread),%ebx ; \
185 movl $0,%eax ; /* CURRENT CPL IN FRAME (REMOVED) */ \
187 testl $-1,TD_NEST_COUNT(%ebx) ; \
189 cmpl $TDPRI_CRIT,TD_PRI(%ebx) ; \
192 /* in critical section, make interrupt pending */ \
193 /* set the pending bit and return, leave interrupt masked */ \
194 orl $IRQ_LBIT(irq_num),PCPU(fpending) ; \
195 orl $RQF_INTPEND,PCPU(reqflags) ; \
198 /* clear pending bit, run handler */ \
199 andl $~IRQ_LBIT(irq_num),PCPU(fpending) ; \
201 pushl %esp ; /* pass frame by reference */ \
202 call ithread_fast_handler ; /* returns 0 to unmask */ \
204 UNMASK_IRQ(irq_num) ; \
210 * Slow interrupt call handlers run in the following sequence:
212 * - Push the trap frame required by doreti.
213 * - Mask the interrupt and reenable its source.
214 * - If we cannot take the interrupt set its ipending bit and
215 * doreti. In addition to checking for a critical section
216 * and cpl mask we also check to see if the thread is still
217 * running. Note that we cannot mess with mp_lock at all
218 * if we entered from a critical section!
219 * - If we can take the interrupt clear its ipending bit
220 * and schedule the thread. Leave interrupts masked and doreti.
222 * Note that calls to sched_ithd() are made with interrupts enabled
223 * and outside a critical section. YYY sched_ithd may preempt us
224 * synchronously (fix interrupt stacking).
226 * YYY can cache gd base pointer instead of using hidden %fs
230 #define SLOW_INTR(irq_num, vec_name, maybe_extra_ipending) \
235 maybe_extra_ipending ; \
237 MASK_LEVEL_IRQ(irq_num) ; \
238 incl PCPU(cnt) + V_INTR ; \
239 movl $0, lapic_eoi ; \
240 movl PCPU(curthread),%ebx ; \
241 movl $0,%eax ; /* CURRENT CPL IN FRAME (REMOVED) */ \
242 pushl %eax ; /* cpl do restore */ \
243 testl $-1,TD_NEST_COUNT(%ebx) ; \
245 cmpl $TDPRI_CRIT,TD_PRI(%ebx) ; \
248 /* set the pending bit and return, leave the interrupt masked */ \
249 orl $IRQ_LBIT(irq_num), PCPU(ipending) ; \
250 orl $RQF_INTPEND,PCPU(reqflags) ; \
253 /* set running bit, clear pending bit, run handler */ \
254 andl $~IRQ_LBIT(irq_num), PCPU(ipending) ; \
255 incl TD_NEST_COUNT(%ebx) ; \
261 decl TD_NEST_COUNT(%ebx) ; \
267 * Wrong interrupt call handlers. We program these into APIC vectors
268 * that should otherwise never occur. For example, we program the SLOW
269 * vector for irq N with this when we program the FAST vector with the
272 * XXX for now all we can do is EOI it. We can't call do_wrongintr
273 * (yet) because we could be in a critical section.
275 #define WRONGINTR(irq_num,vec_name) \
280 movl $0, lapic_eoi ; /* End Of Interrupt to APIC */ \
281 /*pushl $irq_num ;*/ \
282 /*call do_wrongintr ;*/ \
290 * Handle "spurious INTerrupts".
292 * This is different than the "spurious INTerrupt" generated by an
293 * 8259 PIC for missing INTs. See the APIC documentation for details.
294 * This routine should NOT do an 'EOI' cycle.
301 /* No EOI cycle used here */
307 * Handle TLB shootdowns.
315 movl %cr3, %eax /* invalidate the TLB */
318 ss /* stack segment, avoid %ds load */
319 movl $0, lapic_eoi /* End Of Interrupt to APIC */
326 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
328 * - Signals its receipt.
329 * - Waits for permission to restart.
330 * - Processing pending IPIQ events while waiting.
331 * - Signals its restart.
343 pushl %ds /* save current data segment */
347 mov %ax, %ds /* use KERNEL data segment */
351 movl $0, lapic_eoi /* End Of Interrupt to APIC */
353 movl PCPU(cpuid), %eax
354 imull $PCB_SIZE, %eax
355 leal CNAME(stoppcbs)(%eax), %eax
357 call CNAME(savectx) /* Save process context */
361 movl PCPU(cpuid), %eax
364 * Indicate that we have stopped and loop waiting for permission
365 * to start again. We must still process IPI events while in a
369 btsl %eax, stopped_cpus /* stopped_cpus |= (1<<id) */
371 andl $~RQF_IPIQ,PCPU(reqflags)
373 call lwkt_smp_stopped
375 btl %eax, started_cpus /* while (!(started_cpus & (1<<id))) */
379 btrl %eax, started_cpus /* started_cpus &= ~(1<<id) */
381 btrl %eax, stopped_cpus /* stopped_cpus &= ~(1<<id) */
386 movl CNAME(cpustop_restartfunc), %eax
389 movl $0, CNAME(cpustop_restartfunc) /* One-shot */
394 popl %ds /* restore previous data segment */
403 * For now just have one ipiq IPI, but what we really want is
404 * to have one for each source cpu to the APICs don't get stalled
405 * backlogging the requests.
412 movl $0, lapic_eoi /* End Of Interrupt to APIC */
413 FAKE_MCOUNT(15*4(%esp))
415 movl PCPU(curthread),%ebx
416 cmpl $TDPRI_CRIT,TD_PRI(%ebx)
418 subl $8,%esp /* make same as interrupt frame */
419 pushl %esp /* pass frame by reference */
420 incl PCPU(intr_nesting_level)
421 addl $TDPRI_CRIT,TD_PRI(%ebx)
422 call lwkt_process_ipiq_frame
423 subl $TDPRI_CRIT,TD_PRI(%ebx)
424 decl PCPU(intr_nesting_level)
426 pushl $0 /* CPL for frame (REMOVED) */
430 orl $RQF_IPIQ,PCPU(reqflags)
438 FAST_INTR(0,apic_fastintr0)
439 FAST_INTR(1,apic_fastintr1)
440 FAST_INTR(2,apic_fastintr2)
441 FAST_INTR(3,apic_fastintr3)
442 FAST_INTR(4,apic_fastintr4)
443 FAST_INTR(5,apic_fastintr5)
444 FAST_INTR(6,apic_fastintr6)
445 FAST_INTR(7,apic_fastintr7)
446 FAST_INTR(8,apic_fastintr8)
447 FAST_INTR(9,apic_fastintr9)
448 FAST_INTR(10,apic_fastintr10)
449 FAST_INTR(11,apic_fastintr11)
450 FAST_INTR(12,apic_fastintr12)
451 FAST_INTR(13,apic_fastintr13)
452 FAST_INTR(14,apic_fastintr14)
453 FAST_INTR(15,apic_fastintr15)
454 FAST_INTR(16,apic_fastintr16)
455 FAST_INTR(17,apic_fastintr17)
456 FAST_INTR(18,apic_fastintr18)
457 FAST_INTR(19,apic_fastintr19)
458 FAST_INTR(20,apic_fastintr20)
459 FAST_INTR(21,apic_fastintr21)
460 FAST_INTR(22,apic_fastintr22)
461 FAST_INTR(23,apic_fastintr23)
463 /* YYY what is this garbage? */
465 SLOW_INTR(0,apic_slowintr0,)
466 SLOW_INTR(1,apic_slowintr1,)
467 SLOW_INTR(2,apic_slowintr2,)
468 SLOW_INTR(3,apic_slowintr3,)
469 SLOW_INTR(4,apic_slowintr4,)
470 SLOW_INTR(5,apic_slowintr5,)
471 SLOW_INTR(6,apic_slowintr6,)
472 SLOW_INTR(7,apic_slowintr7,)
473 SLOW_INTR(8,apic_slowintr8,)
474 SLOW_INTR(9,apic_slowintr9,)
475 SLOW_INTR(10,apic_slowintr10,)
476 SLOW_INTR(11,apic_slowintr11,)
477 SLOW_INTR(12,apic_slowintr12,)
478 SLOW_INTR(13,apic_slowintr13,)
479 SLOW_INTR(14,apic_slowintr14,)
480 SLOW_INTR(15,apic_slowintr15,)
481 SLOW_INTR(16,apic_slowintr16,)
482 SLOW_INTR(17,apic_slowintr17,)
483 SLOW_INTR(18,apic_slowintr18,)
484 SLOW_INTR(19,apic_slowintr19,)
485 SLOW_INTR(20,apic_slowintr20,)
486 SLOW_INTR(21,apic_slowintr21,)
487 SLOW_INTR(22,apic_slowintr22,)
488 SLOW_INTR(23,apic_slowintr23,)
490 WRONGINTR(0,apic_wrongintr0)
491 WRONGINTR(1,apic_wrongintr1)
492 WRONGINTR(2,apic_wrongintr2)
493 WRONGINTR(3,apic_wrongintr3)
494 WRONGINTR(4,apic_wrongintr4)
495 WRONGINTR(5,apic_wrongintr5)
496 WRONGINTR(6,apic_wrongintr6)
497 WRONGINTR(7,apic_wrongintr7)
498 WRONGINTR(8,apic_wrongintr8)
499 WRONGINTR(9,apic_wrongintr9)
500 WRONGINTR(10,apic_wrongintr10)
501 WRONGINTR(11,apic_wrongintr11)
502 WRONGINTR(12,apic_wrongintr12)
503 WRONGINTR(13,apic_wrongintr13)
504 WRONGINTR(14,apic_wrongintr14)
505 WRONGINTR(15,apic_wrongintr15)
506 WRONGINTR(16,apic_wrongintr16)
507 WRONGINTR(17,apic_wrongintr17)
508 WRONGINTR(18,apic_wrongintr18)
509 WRONGINTR(19,apic_wrongintr19)
510 WRONGINTR(20,apic_wrongintr20)
511 WRONGINTR(21,apic_wrongintr21)
512 WRONGINTR(22,apic_wrongintr22)
513 WRONGINTR(23,apic_wrongintr23)
520 /* variables used by stop_cpus()/restart_cpus()/Xcpustop */
521 .globl stopped_cpus, started_cpus
527 .globl CNAME(cpustop_restartfunc)
528 CNAME(cpustop_restartfunc):
531 .globl apic_pin_trigger