/* * from: vector.s, 386BSD 0.1 unknown origin * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $ * $DragonFly: src/sys/platform/pc32/apic/apic_vector.s,v 1.39 2008/08/02 01:14:43 dillon Exp $ */ #include "use_npx.h" #include "opt_auto_eoi.h" #include #include #include #include #include #include #include "assym.s" #include "apicreg.h" #include "apic_ipl.h" #include #include /* convert an absolute IRQ# into a bitmask */ #define IRQ_LBIT(irq_num) (1 << (irq_num)) /* make an index into the IO APIC from the IRQ# */ #define REDTBL_IDX(irq_num) (0x10 + ((irq_num) * 2)) #ifdef SMP #define MPLOCKED lock ; #else #define MPLOCKED #endif /* * Push an interrupt frame in a format acceptable to doreti, reload * the segment registers for the kernel. */ #define PUSH_FRAME \ pushl $0 ; /* dummy error code */ \ pushl $0 ; /* dummy trap type */ \ pushl $0 ; /* dummy xflags type */ \ pushal ; \ pushl %ds ; /* save data and extra segments ... */ \ pushl %es ; \ pushl %fs ; \ pushl %gs ; \ cld ; \ mov $KDSEL,%ax ; \ mov %ax,%ds ; \ mov %ax,%es ; \ mov %ax,%gs ; \ mov $KPSEL,%ax ; \ mov %ax,%fs ; \ #define PUSH_DUMMY \ pushfl ; /* phys int frame / flags */ \ pushl %cs ; /* phys int frame / cs */ \ pushl 12(%esp) ; /* original caller eip */ \ pushl $0 ; /* dummy error code */ \ pushl $0 ; /* dummy trap type */ \ pushl $0 ; /* dummy xflags type */ \ subl $13*4,%esp ; /* pushal + 4 seg regs (dummy) + CPL */ \ /* * Warning: POP_FRAME can only be used if there is no chance of a * segment register being changed (e.g. by procfs), which is why syscalls * have to use doreti. */ #define POP_FRAME \ popl %gs ; \ popl %fs ; \ popl %es ; \ popl %ds ; \ popal ; \ addl $3*4,%esp ; /* dummy xflags, trap & error codes */ \ #define POP_DUMMY \ addl $19*4,%esp ; \ #define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8 #define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12 #define MASK_IRQ(irq_num) \ APIC_IMASK_LOCK ; /* into critical reg */ \ testl $IRQ_LBIT(irq_num), apic_imen ; \ jne 7f ; /* masked, don't mask */ \ orl $IRQ_LBIT(irq_num), apic_imen ; /* set the mask bit */ \ movl IOAPICADDR(irq_num), %ecx ; /* ioapic addr */ \ movl REDIRIDX(irq_num), %eax ; /* get the index */ \ movl %eax, (%ecx) ; /* write the index */ \ movl IOAPIC_WINDOW(%ecx), %eax ; /* current value */ \ orl $IOART_INTMASK, %eax ; /* set the mask */ \ movl %eax, IOAPIC_WINDOW(%ecx) ; /* new value */ \ 7: ; /* already masked */ \ APIC_IMASK_UNLOCK ; \ /* * Test to see whether we are handling an edge or level triggered INT. * Level-triggered INTs must still be masked as we don't clear the source, * and the EOI cycle would cause redundant INTs to occur. */ #define MASK_LEVEL_IRQ(irq_num) \ testl $IRQ_LBIT(irq_num), apic_pin_trigger ; \ jz 9f ; /* edge, don't mask */ \ MASK_IRQ(irq_num) ; \ 9: ; \ /* * Test to see if the source is currntly masked, clear if so. */ #define UNMASK_IRQ(irq_num) \ cmpl $0,%eax ; \ jnz 8f ; \ APIC_IMASK_LOCK ; /* into critical reg */ \ testl $IRQ_LBIT(irq_num), apic_imen ; \ je 7f ; /* bit clear, not masked */ \ andl $~IRQ_LBIT(irq_num), apic_imen ;/* clear mask bit */ \ movl IOAPICADDR(irq_num),%ecx ; /* ioapic addr */ \ movl REDIRIDX(irq_num), %eax ; /* get the index */ \ movl %eax,(%ecx) ; /* write the index */ \ movl IOAPIC_WINDOW(%ecx),%eax ; /* current value */ \ andl $~IOART_INTMASK,%eax ; /* clear the mask */ \ movl %eax,IOAPIC_WINDOW(%ecx) ; /* new value */ \ 7: ; \ APIC_IMASK_UNLOCK ; \ 8: ; \ #ifdef APIC_IO /* * Fast interrupt call handlers run in the following sequence: * * - Push the trap frame required by doreti * - Mask the interrupt and reenable its source * - If we cannot take the interrupt set its fpending bit and * doreti. Note that we cannot mess with mp_lock at all * if we entered from a critical section! * - If we can take the interrupt clear its fpending bit, * call the handler, then unmask and doreti. * * YYY can cache gd base opitner instead of using hidden %fs prefixes. */ #define FAST_INTR(irq_num, vec_name) \ .text ; \ SUPERALIGN_TEXT ; \ IDTVEC(vec_name) ; \ PUSH_FRAME ; \ FAKE_MCOUNT(15*4(%esp)) ; \ MASK_LEVEL_IRQ(irq_num) ; \ movl $0, lapic_eoi ; \ movl PCPU(curthread),%ebx ; \ movl $0,%eax ; /* CURRENT CPL IN FRAME (REMOVED) */ \ pushl %eax ; \ testl $-1,TD_NEST_COUNT(%ebx) ; \ jne 1f ; \ cmpl $TDPRI_CRIT,TD_PRI(%ebx) ; \ jl 2f ; \ 1: ; \ /* in critical section, make interrupt pending */ \ /* set the pending bit and return, leave interrupt masked */ \ orl $IRQ_LBIT(irq_num),PCPU(fpending) ; \ orl $RQF_INTPEND,PCPU(reqflags) ; \ jmp 5f ; \ 2: ; \ /* clear pending bit, run handler */ \ andl $~IRQ_LBIT(irq_num),PCPU(fpending) ; \ pushl $irq_num ; \ pushl %esp ; /* pass frame by reference */ \ call ithread_fast_handler ; /* returns 0 to unmask */ \ addl $8, %esp ; \ UNMASK_IRQ(irq_num) ; \ 5: ; \ MEXITCOUNT ; \ jmp doreti ; \ /* * Slow interrupt call handlers run in the following sequence: * * - Push the trap frame required by doreti. * - Mask the interrupt and reenable its source. * - If we cannot take the interrupt set its ipending bit and * doreti. In addition to checking for a critical section * and cpl mask we also check to see if the thread is still * running. Note that we cannot mess with mp_lock at all * if we entered from a critical section! * - If we can take the interrupt clear its ipending bit * and schedule the thread. Leave interrupts masked and doreti. * * Note that calls to sched_ithd() are made with interrupts enabled * and outside a critical section. YYY sched_ithd may preempt us * synchronously (fix interrupt stacking). * * YYY can cache gd base pointer instead of using hidden %fs * prefixes. */ #define SLOW_INTR(irq_num, vec_name, maybe_extra_ipending) \ .text ; \ SUPERALIGN_TEXT ; \ IDTVEC(vec_name) ; \ PUSH_FRAME ; \ maybe_extra_ipending ; \ ; \ MASK_LEVEL_IRQ(irq_num) ; \ incl PCPU(cnt) + V_INTR ; \ movl $0, lapic_eoi ; \ movl PCPU(curthread),%ebx ; \ movl $0,%eax ; /* CURRENT CPL IN FRAME (REMOVED) */ \ pushl %eax ; /* cpl do restore */ \ testl $-1,TD_NEST_COUNT(%ebx) ; \ jne 1f ; \ cmpl $TDPRI_CRIT,TD_PRI(%ebx) ; \ jl 2f ; \ 1: ; \ /* set the pending bit and return, leave the interrupt masked */ \ orl $IRQ_LBIT(irq_num), PCPU(ipending) ; \ orl $RQF_INTPEND,PCPU(reqflags) ; \ jmp 5f ; \ 2: ; \ /* set running bit, clear pending bit, run handler */ \ andl $~IRQ_LBIT(irq_num), PCPU(ipending) ; \ incl TD_NEST_COUNT(%ebx) ; \ sti ; \ pushl $irq_num ; \ call sched_ithd ; \ addl $4,%esp ; \ cli ; \ decl TD_NEST_COUNT(%ebx) ; \ 5: ; \ MEXITCOUNT ; \ jmp doreti ; \ /* * Wrong interrupt call handlers. We program these into APIC vectors * that should otherwise never occur. For example, we program the SLOW * vector for irq N with this when we program the FAST vector with the * real interrupt. * * XXX for now all we can do is EOI it. We can't call do_wrongintr * (yet) because we could be in a critical section. */ #define WRONGINTR(irq_num,vec_name) \ .text ; \ SUPERALIGN_TEXT ; \ IDTVEC(vec_name) ; \ PUSH_FRAME ; \ movl $0, lapic_eoi ; /* End Of Interrupt to APIC */ \ /*pushl $irq_num ;*/ \ /*call do_wrongintr ;*/ \ /*addl $4,%esp ;*/ \ POP_FRAME ; \ iret ; \ #endif /* * Handle "spurious INTerrupts". * Notes: * This is different than the "spurious INTerrupt" generated by an * 8259 PIC for missing INTs. See the APIC documentation for details. * This routine should NOT do an 'EOI' cycle. */ .text SUPERALIGN_TEXT .globl Xspuriousint Xspuriousint: /* No EOI cycle used here */ iret /* * Handle TLB shootdowns. */ .text SUPERALIGN_TEXT .globl Xinvltlb Xinvltlb: pushl %eax movl %cr3, %eax /* invalidate the TLB */ movl %eax, %cr3 ss /* stack segment, avoid %ds load */ movl $0, lapic_eoi /* End Of Interrupt to APIC */ popl %eax iret /* * Executed by a CPU when it receives an Xcpustop IPI from another CPU, * * - Signals its receipt. * - Waits for permission to restart. * - Processing pending IPIQ events while waiting. * - Signals its restart. */ .text SUPERALIGN_TEXT .globl Xcpustop Xcpustop: pushl %ebp movl %esp, %ebp pushl %eax pushl %ecx pushl %edx pushl %ds /* save current data segment */ pushl %fs movl $KDSEL, %eax mov %ax, %ds /* use KERNEL data segment */ movl $KPSEL, %eax mov %ax, %fs movl $0, lapic_eoi /* End Of Interrupt to APIC */ movl PCPU(cpuid), %eax imull $PCB_SIZE, %eax leal CNAME(stoppcbs)(%eax), %eax pushl %eax call CNAME(savectx) /* Save process context */ addl $4, %esp movl PCPU(cpuid), %eax /* * Indicate that we have stopped and loop waiting for permission * to start again. We must still process IPI events while in a * stopped state. */ MPLOCKED btsl %eax, stopped_cpus /* stopped_cpus |= (1<