/* * from: vector.s, 386BSD 0.1 unknown origin * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $ * $DragonFly: src/sys/i386/isa/Attic/apic_vector.s,v 1.12 2003/07/12 17:54:35 dillon Exp $ */ #include #include #include "i386/isa/intr_machdep.h" /* convert an absolute IRQ# into a bitmask */ #define IRQ_LBIT(irq_num) (1 << (irq_num)) /* make an index into the IO APIC from the IRQ# */ #define REDTBL_IDX(irq_num) (0x10 + ((irq_num) * 2)) /* * Push an interrupt frame in a format acceptable to doreti, reload * the segment registers for the kernel. */ #define PUSH_FRAME \ pushl $0 ; /* dummy error code */ \ pushl $0 ; /* dummy trap type */ \ pushal ; \ pushl %ds ; /* save data and extra segments ... */ \ pushl %es ; \ pushl %fs ; \ mov $KDSEL,%ax ; \ mov %ax,%ds ; \ mov %ax,%es ; \ mov $KPSEL,%ax ; \ mov %ax,%fs ; \ #define PUSH_DUMMY \ pushfl ; /* phys int frame / flags */ \ pushl %cs ; /* phys int frame / cs */ \ pushl 12(%esp) ; /* original caller eip */ \ pushl $0 ; /* dummy error code */ \ pushl $0 ; /* dummy trap type */ \ subl $12*4,%esp ; /* pushal + 3 seg regs (dummy) + CPL */ \ /* * Warning: POP_FRAME can only be used if there is no chance of a * segment register being changed (e.g. by procfs), which is why syscalls * have to use doreti. */ #define POP_FRAME \ popl %fs ; \ popl %es ; \ popl %ds ; \ popal ; \ addl $2*4,%esp ; /* dummy trap & error codes */ \ #define POP_DUMMY \ addl $17*4,%esp ; \ #define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8 #define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12 /* * Interrupts are expected to already be disabled when using these * IMASK_*() macros. */ #define IMASK_LOCK \ SPIN_LOCK(imen_spinlock) ; \ #define IMASK_UNLOCK \ SPIN_UNLOCK(imen_spinlock) ; \ #define MASK_IRQ(irq_num) \ IMASK_LOCK ; /* into critical reg */ \ testl $IRQ_LBIT(irq_num), apic_imen ; \ jne 7f ; /* masked, don't mask */ \ orl $IRQ_LBIT(irq_num), apic_imen ; /* set the mask bit */ \ movl IOAPICADDR(irq_num), %ecx ; /* ioapic addr */ \ movl REDIRIDX(irq_num), %eax ; /* get the index */ \ movl %eax, (%ecx) ; /* write the index */ \ movl IOAPIC_WINDOW(%ecx), %eax ; /* current value */ \ orl $IOART_INTMASK, %eax ; /* set the mask */ \ movl %eax, IOAPIC_WINDOW(%ecx) ; /* new value */ \ 7: ; /* already masked */ \ IMASK_UNLOCK ; \ /* * Test to see whether we are handling an edge or level triggered INT. * Level-triggered INTs must still be masked as we don't clear the source, * and the EOI cycle would cause redundant INTs to occur. */ #define MASK_LEVEL_IRQ(irq_num) \ testl $IRQ_LBIT(irq_num), apic_pin_trigger ; \ jz 9f ; /* edge, don't mask */ \ MASK_IRQ(irq_num) ; \ 9: ; \ #ifdef APIC_INTR_REORDER #define EOI_IRQ(irq_num) \ movl apic_isrbit_location + 8 * (irq_num), %eax ; \ movl (%eax), %eax ; \ testl apic_isrbit_location + 4 + 8 * (irq_num), %eax ; \ jz 9f ; /* not active */ \ movl $0, lapic_eoi ; \ 9: \ #else #define EOI_IRQ(irq_num) \ testl $IRQ_LBIT(irq_num), lapic_isr1; \ jz 9f ; /* not active */ \ movl $0, lapic_eoi; \ 9: \ #endif /* * Test to see if the source is currntly masked, clear if so. */ #define UNMASK_IRQ(irq_num) \ IMASK_LOCK ; /* into critical reg */ \ testl $IRQ_LBIT(irq_num), apic_imen ; \ je 7f ; /* bit clear, not masked */ \ andl $~IRQ_LBIT(irq_num), apic_imen ;/* clear mask bit */ \ movl IOAPICADDR(irq_num),%ecx ; /* ioapic addr */ \ movl REDIRIDX(irq_num), %eax ; /* get the index */ \ movl %eax,(%ecx) ; /* write the index */ \ movl IOAPIC_WINDOW(%ecx),%eax ; /* current value */ \ andl $~IOART_INTMASK,%eax ; /* clear the mask */ \ movl %eax,IOAPIC_WINDOW(%ecx) ; /* new value */ \ 7: ; \ IMASK_UNLOCK ; \ /* * Fast interrupt call handlers run in the following sequence: * * - Push the trap frame required by doreti * - Mask the interrupt and reenable its source * - If we cannot take the interrupt set its fpending bit and * doreti. * - If we can take the interrupt clear its fpending bit, * call the handler, then unmask and doreti. * * YYY can cache gd base opitner instead of using hidden %fs prefixes. */ #define FAST_INTR(irq_num, vec_name) \ .text ; \ SUPERALIGN_TEXT ; \ IDTVEC(vec_name) ; \ PUSH_FRAME ; \ FAKE_MCOUNT(13*4(%esp)) ; \ MASK_LEVEL_IRQ(irq_num) ; \ EOI_IRQ(irq_num) ; \ incl PCPU(intr_nesting_level) ; \ movl PCPU(curthread),%ebx ; \ movl TD_CPL(%ebx),%eax ; \ pushl %eax ; \ cmpl $TDPRI_CRIT,TD_PRI(%ebx) ; \ jge 1f ; \ testl $IRQ_LBIT(irq_num), %eax ; \ jz 2f ; \ 1: ; \ /* in critical section, make interrupt pending */ \ /* set the pending bit and return, leave interrupt masked */ \ orl $IRQ_LBIT(irq_num),PCPU(fpending) ; \ orl $RQF_INTPEND,PCPU(reqflags) ; \ jmp 5f ; \ 2: ; \ /* try to get the MP lock */ \ call try_mplock ; \ testl %eax,%eax ; \ jz 6f ; \ /* clear pending bit, run handler */ \ addl $TDPRI_CRIT,TD_PRI(%ebx) ; \ andl $~IRQ_LBIT(irq_num),PCPU(fpending) ; \ pushl intr_unit + (irq_num) * 4 ; \ call *intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \ addl $4, %esp ; \ subl $TDPRI_CRIT,TD_PRI(%ebx) ; \ incl PCPU(cnt)+V_INTR ; /* book-keeping make per cpu YYY */ \ movl intr_countp + (irq_num) * 4, %eax ; \ incl (%eax) ; \ call rel_mplock ; \ UNMASK_IRQ(irq_num) ; \ 5: ; \ MEXITCOUNT ; \ jmp doreti ; \ 6: ; \ /* could not get MP lock, forward the interrupt */ \ movl mp_lock, %eax ; /* check race */ \ cmpl $MP_FREE_LOCK,%eax ; \ je 2b ; \ incl PCPU(cnt)+V_FORWARDED_INTS ; \ subl $12,%esp ; \ movl $irq_num,8(%esp) ; \ movl $forward_fastint_remote,4(%esp) ; \ movl %eax,(%esp) ; \ call lwkt_send_ipiq ; \ addl $12,%esp ; \ jmp 5f ; \ /* * Restart fast interrupt held up by critical section or cpl. * * - Push a dummy trape frame as required by doreti * - The interrupt source is already masked * - Clear the fpending bit * - Run the handler * - Unmask the interrupt * - Pop the dummy frame and do a normal return * * The BGL is held on call and left held on return. * * YYY can cache gd base pointer instead of using hidden %fs * prefixes. */ #define FAST_UNPEND(irq_num, vec_name) \ .text ; \ SUPERALIGN_TEXT ; \ IDTVEC(vec_name) ; \ pushl %ebp ; \ movl %esp,%ebp ; \ PUSH_DUMMY ; \ pushl intr_unit + (irq_num) * 4 ; \ call *intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \ addl $4, %esp ; \ incl PCPU(cnt)+V_INTR ; /* book-keeping make per cpu YYY */ \ movl intr_countp + (irq_num) * 4, %eax ; \ incl (%eax) ; \ UNMASK_IRQ(irq_num) ; \ POP_DUMMY ; \ popl %ebp ; \ ret ; \ /* * Slow interrupt call handlers run in the following sequence: * * - Push the trap frame required by doreti. * - Mask the interrupt and reenable its source. * - If we cannot take the interrupt set its ipending bit and * doreti. In addition to checking for a critical section * and cpl mask we also check to see if the thread is still * running. * - If we can take the interrupt clear its ipending bit * and schedule the thread. Leave interrupts masked and doreti. * * Note that calls to sched_ithd() are made with interrupts enabled * and outside a critical section. YYY sched_ithd may preempt us * synchronously (fix interrupt stacking) * * YYY can cache gd base pointer instead of using hidden %fs * prefixes. */ #define INTR(irq_num, vec_name, maybe_extra_ipending) \ .text ; \ SUPERALIGN_TEXT ; \ IDTVEC(vec_name) ; \ PUSH_FRAME ; \ maybe_extra_ipending ; \ ; \ MASK_LEVEL_IRQ(irq_num) ; \ EOI_IRQ(irq_num) ; \ incl PCPU(intr_nesting_level) ; \ movl PCPU(curthread),%ebx ; \ movl TD_CPL(%ebx),%eax ; \ pushl %eax ; /* cpl do restore */ \ cmpl $TDPRI_CRIT,TD_PRI(%ebx) ; \ jge 1f ; \ testl $IRQ_LBIT(irq_num),%eax ; \ jz 2f ; \ 1: ; \ /* set the pending bit and return, leave the interrupt masked */ \ orl $IRQ_LBIT(irq_num), PCPU(ipending) ; \ orl $RQF_INTPEND,PCPU(reqflags) ; \ jmp 5f ; \ 2: ; \ /* set running bit, clear pending bit, run handler */ \ andl $~IRQ_LBIT(irq_num), PCPU(ipending) ; \ sti ; \ pushl $irq_num ; \ call sched_ithd ; \ addl $4,%esp ; \ incl PCPU(cnt)+V_INTR ; /* book-keeping YYY make per-cpu */ \ movl intr_countp + (irq_num) * 4,%eax ; \ incl (%eax) ; \ 5: ; \ MEXITCOUNT ; \ jmp doreti ; \ /* * Handle "spurious INTerrupts". * Notes: * This is different than the "spurious INTerrupt" generated by an * 8259 PIC for missing INTs. See the APIC documentation for details. * This routine should NOT do an 'EOI' cycle. */ .text SUPERALIGN_TEXT .globl Xspuriousint Xspuriousint: /* No EOI cycle used here */ iret /* * Handle TLB shootdowns. */ .text SUPERALIGN_TEXT .globl Xinvltlb Xinvltlb: pushl %eax #ifdef COUNT_XINVLTLB_HITS pushl %fs movl $KPSEL, %eax mov %ax, %fs movl PCPU(cpuid), %eax popl %fs ss incl _xhits(,%eax,4) #endif /* COUNT_XINVLTLB_HITS */ movl %cr3, %eax /* invalidate the TLB */ movl %eax, %cr3 ss /* stack segment, avoid %ds load */ movl $0, lapic_eoi /* End Of Interrupt to APIC */ popl %eax iret #if 0 #ifdef BETTER_CLOCK /* * Executed by a CPU when it receives an Xcpucheckstate IPI from another CPU, * * - Stores current cpu state in checkstate_cpustate[cpuid] * 0 == user, 1 == sys, 2 == intr * - Stores current process in checkstate_curproc[cpuid] * * - Signals its receipt by setting bit cpuid in checkstate_probed_cpus. * * stack: 0->ds, 4->fs, 8->ebx, 12->eax, 16->eip, 20->cs, 24->eflags */ .text SUPERALIGN_TEXT .globl Xcpucheckstate .globl checkstate_cpustate .globl checkstate_curproc .globl checkstate_pc Xcpucheckstate: pushl %eax pushl %ebx pushl %ds /* save current data segment */ pushl %fs movl $KDSEL, %eax mov %ax, %ds /* use KERNEL data segment */ movl $KPSEL, %eax mov %ax, %fs movl $0, lapic_eoi /* End Of Interrupt to APIC */ movl $0, %ebx movl 20(%esp), %eax andl $3, %eax cmpl $3, %eax je 1f testl $PSL_VM, 24(%esp) jne 1f incl %ebx /* system or interrupt */ 1: movl PCPU(cpuid), %eax movl %ebx, checkstate_cpustate(,%eax,4) movl PCPU(curthread), %ebx movl TD_PROC(%ebx),%ebx movl %ebx, checkstate_curproc(,%eax,4) movl 16(%esp), %ebx movl %ebx, checkstate_pc(,%eax,4) lock /* checkstate_probed_cpus |= (1<