Remove all remaining SPL code. Replace the mtd_cpl field in the machine
[dragonfly.git] / sys / i386 / apic / apic_vector.s
index bd2dc8e..6d2aea9 100644 (file)
@@ -1,62 +1,23 @@
 /*
  *     from: vector.s, 386BSD 0.1 unknown origin
  * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $
- * $DragonFly: src/sys/i386/apic/Attic/apic_vector.s,v 1.4 2003/06/21 07:54:56 dillon Exp $
+ * $DragonFly: src/sys/i386/apic/Attic/apic_vector.s,v 1.19 2005/06/16 21:12:47 dillon Exp $
  */
 
 
-#include <machine/apic.h>
+#include <machine/apicreg.h>
 #include <machine/smp.h>
-
 #include "i386/isa/intr_machdep.h"
 
 /* convert an absolute IRQ# into a bitmask */
-#define IRQ_BIT(irq_num)       (1 << (irq_num))
+#define IRQ_LBIT(irq_num)      (1 << (irq_num))
 
 /* make an index into the IO APIC from the IRQ# */
 #define REDTBL_IDX(irq_num)    (0x10 + ((irq_num) * 2))
 
-
-/*
- * Macros for interrupt interrupt entry, call to handler, and exit.
- */
-
-#define        FAST_INTR(irq_num, vec_name)                                    \
-       .text ;                                                         \
-       SUPERALIGN_TEXT ;                                               \
-IDTVEC(vec_name) ;                                                     \
-       pushl   %eax ;          /* save only call-used registers */     \
-       pushl   %ecx ;                                                  \
-       pushl   %edx ;                                                  \
-       pushl   %ds ;                                                   \
-       MAYBE_PUSHL_ES ;                                                \
-       pushl   %fs ;                                                   \
-       movl    $KDSEL,%eax ;                                           \
-       mov     %ax,%ds ;                                               \
-       MAYBE_MOVW_AX_ES ;                                              \
-       movl    $KPSEL,%eax ;                                           \
-       mov     %ax,%fs ;                                               \
-       FAKE_MCOUNT((5+ACTUALLY_PUSHED)*4(%esp)) ;                      \
-       pushl   _intr_unit + (irq_num) * 4 ;                            \
-       call    *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
-       addl    $4, %esp ;                                              \
-       movl    $0, lapic_eoi ;                                         \
-       lock ;                                                          \
-       incl    _cnt+V_INTR ;   /* book-keeping can wait */             \
-       movl    _intr_countp + (irq_num) * 4, %eax ;                    \
-       lock ;                                                          \
-       incl    (%eax) ;                                                \
-       MEXITCOUNT ;                                                    \
-       popl    %fs ;                                                   \
-       MAYBE_POPL_ES ;                                                 \
-       popl    %ds ;                                                   \
-       popl    %edx ;                                                  \
-       popl    %ecx ;                                                  \
-       popl    %eax ;                                                  \
-       iret
-
 /*
- * 
+ * Push an interrupt frame in a format acceptable to doreti, reload
+ * the segment registers for the kernel.
  */
 #define PUSH_FRAME                                                     \
        pushl   $0 ;            /* dummy error code */                  \
@@ -64,23 +25,54 @@ IDTVEC(vec_name) ;                                                  \
        pushal ;                                                        \
        pushl   %ds ;           /* save data and extra segments ... */  \
        pushl   %es ;                                                   \
-       pushl   %fs
+       pushl   %fs ;                                                   \
+       mov     $KDSEL,%ax ;                                            \
+       mov     %ax,%ds ;                                               \
+       mov     %ax,%es ;                                               \
+       mov     $KPSEL,%ax ;                                            \
+       mov     %ax,%fs ;                                               \
+
+#define PUSH_DUMMY                                                     \
+       pushfl ;                /* phys int frame / flags */            \
+       pushl %cs ;             /* phys int frame / cs */               \
+       pushl   12(%esp) ;      /* original caller eip */               \
+       pushl   $0 ;            /* dummy error code */                  \
+       pushl   $0 ;            /* dummy trap type */                   \
+       subl    $12*4,%esp ;    /* pushal + 3 seg regs (dummy) + CPL */ \
 
+/*
+ * Warning: POP_FRAME can only be used if there is no chance of a
+ * segment register being changed (e.g. by procfs), which is why syscalls
+ * have to use doreti.
+ */
 #define POP_FRAME                                                      \
        popl    %fs ;                                                   \
        popl    %es ;                                                   \
        popl    %ds ;                                                   \
        popal ;                                                         \
-       addl    $4+4,%esp
+       addl    $2*4,%esp ;     /* dummy trap & error codes */          \
+
+#define POP_DUMMY                                                      \
+       addl    $17*4,%esp ;                                            \
 
 #define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
 #define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
+
+/*
+ * Interrupts are expected to already be disabled when using these
+ * IMASK_*() macros.
+ */
+#define IMASK_LOCK                                                     \
+       SPIN_LOCK(imen_spinlock) ;                                      \
+
+#define IMASK_UNLOCK                                                   \
+       SPIN_UNLOCK(imen_spinlock) ;                                    \
        
 #define MASK_IRQ(irq_num)                                              \
        IMASK_LOCK ;                            /* into critical reg */ \
-       testl   $IRQ_BIT(irq_num), _apic_imen ;                         \
+       testl   $IRQ_LBIT(irq_num), apic_imen ;                         \
        jne     7f ;                    /* masked, don't mask */        \
-       orl     $IRQ_BIT(irq_num), _apic_imen ; /* set the mask bit */  \
+       orl     $IRQ_LBIT(irq_num), apic_imen ; /* set the mask bit */  \
        movl    IOAPICADDR(irq_num), %ecx ;     /* ioapic addr */       \
        movl    REDIRIDX(irq_num), %eax ;       /* get the index */     \
        movl    %eax, (%ecx) ;                  /* write the index */   \
@@ -88,47 +80,47 @@ IDTVEC(vec_name) ;                                                  \
        orl     $IOART_INTMASK, %eax ;          /* set the mask */      \
        movl    %eax, IOAPIC_WINDOW(%ecx) ;     /* new value */         \
 7: ;                                           /* already masked */    \
-       IMASK_UNLOCK
+       IMASK_UNLOCK ;                                                  \
+
 /*
  * Test to see whether we are handling an edge or level triggered INT.
  *  Level-triggered INTs must still be masked as we don't clear the source,
  *  and the EOI cycle would cause redundant INTs to occur.
  */
 #define MASK_LEVEL_IRQ(irq_num)                                                \
-       testl   $IRQ_BIT(irq_num), _apic_pin_trigger ;                  \
+       testl   $IRQ_LBIT(irq_num), apic_pin_trigger ;                  \
        jz      9f ;                            /* edge, don't mask */  \
        MASK_IRQ(irq_num) ;                                             \
-9:
+9: ;                                                                   \
 
 
 #ifdef APIC_INTR_REORDER
 #define EOI_IRQ(irq_num)                                               \
-       movl    _apic_isrbit_location + 8 * (irq_num), %eax ;           \
+       movl    apic_isrbit_location + 8 * (irq_num), %eax ;            \
        movl    (%eax), %eax ;                                          \
-       testl   _apic_isrbit_location + 4 + 8 * (irq_num), %eax ;       \
+       testl   apic_isrbit_location + 4 + 8 * (irq_num), %eax ;        \
        jz      9f ;                            /* not active */        \
        movl    $0, lapic_eoi ;                                         \
-       APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ;        \
-9:
+9:                                                                     \
 
 #else
+
 #define EOI_IRQ(irq_num)                                               \
-       testl   $IRQ_BIT(irq_num), lapic_isr1;                          \
+       testl   $IRQ_LBIT(irq_num), lapic_isr1;                         \
        jz      9f      ;                       /* not active */        \
        movl    $0, lapic_eoi;                                          \
-       APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ;        \
-9:
+9:                                                                     \
+
 #endif
        
-       
 /*
  * Test to see if the source is currntly masked, clear if so.
  */
 #define UNMASK_IRQ(irq_num)                                    \
        IMASK_LOCK ;                            /* into critical reg */ \
-       testl   $IRQ_BIT(irq_num), _apic_imen ;                         \
+       testl   $IRQ_LBIT(irq_num), apic_imen ;                         \
        je      7f ;                    /* bit clear, not masked */     \
-       andl    $~IRQ_BIT(irq_num), _apic_imen ;/* clear mask bit */    \
+       andl    $~IRQ_LBIT(irq_num), apic_imen ;/* clear mask bit */    \
        movl    IOAPICADDR(irq_num),%ecx ;      /* ioapic addr */       \
        movl    REDIRIDX(irq_num), %eax ;       /* get the index */     \
        movl    %eax,(%ecx) ;                   /* write the index */   \
@@ -136,174 +128,165 @@ IDTVEC(vec_name) ;                                                      \
        andl    $~IOART_INTMASK,%eax ;          /* clear the mask */    \
        movl    %eax,IOAPIC_WINDOW(%ecx) ;      /* new value */         \
 7: ;                                                                   \
-       IMASK_UNLOCK
-
-#ifdef APIC_INTR_DIAGNOSTIC
-#ifdef APIC_INTR_DIAGNOSTIC_IRQ
-log_intr_event:
-       pushf
-       cli
-       pushl   $CNAME(apic_itrace_debuglock)
-       call    CNAME(s_lock_np)
-       addl    $4, %esp
-       movl    CNAME(apic_itrace_debugbuffer_idx), %ecx
-       andl    $32767, %ecx
-       movl    _cpuid, %eax
-       shll    $8,     %eax
-       orl     8(%esp), %eax
-       movw    %ax,    CNAME(apic_itrace_debugbuffer)(,%ecx,2)
-       incl    %ecx
-       andl    $32767, %ecx
-       movl    %ecx,   CNAME(apic_itrace_debugbuffer_idx)
-       pushl   $CNAME(apic_itrace_debuglock)
-       call    CNAME(s_unlock_np)
-       addl    $4, %esp
-       popf
-       ret
-       
+       IMASK_UNLOCK ;                                                  \
 
-#define APIC_ITRACE(name, irq_num, id)                                 \
-       lock ;                                  /* MP-safe */           \
-       incl    CNAME(name) + (irq_num) * 4 ;                           \
+/*
+ * Fast interrupt call handlers run in the following sequence:
+ *
+ *     - Push the trap frame required by doreti
+ *     - Mask the interrupt and reenable its source
+ *     - If we cannot take the interrupt set its fpending bit and
+ *       doreti.  Note that we cannot mess with mp_lock at all
+ *       if we entered from a critical section!
+ *     - If we can take the interrupt clear its fpending bit,
+ *       call the handler, then unmask and doreti.
+ *
+ * YYY can cache gd base opitner instead of using hidden %fs prefixes.
+ */
+
+#define        FAST_INTR(irq_num, vec_name)                                    \
+       .text ;                                                         \
+       SUPERALIGN_TEXT ;                                               \
+IDTVEC(vec_name) ;                                                     \
+       PUSH_FRAME ;                                                    \
+       FAKE_MCOUNT(13*4(%esp)) ;                                       \
+       MASK_LEVEL_IRQ(irq_num) ;                                       \
+       EOI_IRQ(irq_num) ;                                              \
+       movl    PCPU(curthread),%ebx ;                                  \
+       movl    $0,%eax ;       /* CURRENT CPL IN FRAME (REMOVED) */    \
        pushl   %eax ;                                                  \
-       pushl   %ecx ;                                                  \
-       pushl   %edx ;                                                  \
-       movl    $(irq_num), %eax ;                                      \
-       cmpl    $APIC_INTR_DIAGNOSTIC_IRQ, %eax ;                       \
-       jne     7f ;                                                    \
-       pushl   $id ;                                                   \
-       call    log_intr_event ;                                        \
+       cmpl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
+       jl      2f ;                                                    \
+1: ;                                                                   \
+       /* in critical section, make interrupt pending */               \
+       /* set the pending bit and return, leave interrupt masked */    \
+       orl     $IRQ_LBIT(irq_num),PCPU(fpending) ;                     \
+       orl     $RQF_INTPEND,PCPU(reqflags) ;                           \
+       jmp     5f ;                                                    \
+2: ;                                                                   \
+       /* try to get the MP lock */                                    \
+       call    try_mplock ;                                            \
+       testl   %eax,%eax ;                                             \
+       jz      6f ;                                                    \
+       /* clear pending bit, run handler */                            \
+       incl    PCPU(intr_nesting_level) ;                              \
+       addl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
+       andl    $~IRQ_LBIT(irq_num),PCPU(fpending) ;                    \
+       pushl   intr_unit + (irq_num) * 4 ;                             \
+       call    *intr_handler + (irq_num) * 4 ; /* do the work ASAP */  \
        addl    $4, %esp ;                                              \
-7: ;                                                                   \
-       popl    %edx ;                                                  \
-       popl    %ecx ;                                                  \
-       popl    %eax
-#else
-#define APIC_ITRACE(name, irq_num, id)                                 \
-       lock ;                                  /* MP-safe */           \
-       incl    CNAME(name) + (irq_num) * 4
-#endif
+       subl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
+       incl    PCPU(cnt)+V_INTR ;      /* book-keeping make per cpu YYY */ \
+       movl    intr_countp + (irq_num) * 4, %eax ;                     \
+       incl    (%eax) ;                                                \
+       decl    PCPU(intr_nesting_level) ;                              \
+       call    rel_mplock ;                                            \
+       UNMASK_IRQ(irq_num) ;                                           \
+5: ;                                                                   \
+       MEXITCOUNT ;                                                    \
+       jmp     doreti ;                                                \
+6: ;                                                                   \
+       /* could not get the MP lock, forward the interrupt */          \
+       movl    mp_lock, %eax ;          /* check race */               \
+       cmpl    $MP_FREE_LOCK,%eax ;                                    \
+       je      2b ;                                                    \
+       incl    PCPU(cnt)+V_FORWARDED_INTS ;                            \
+       subl    $12,%esp ;                                              \
+       movl    $irq_num,8(%esp) ;                                      \
+       movl    $forward_fastint_remote,4(%esp) ;                       \
+       movl    %eax,(%esp) ;                                           \
+       call    lwkt_send_ipiq_bycpu ;                                  \
+       addl    $12,%esp ;                                              \
+       jmp     5f ;                                                    \
 
-#define APIC_ITRACE_ENTER 1
-#define APIC_ITRACE_EOI 2
-#define APIC_ITRACE_TRYISRLOCK 3
-#define APIC_ITRACE_GOTISRLOCK 4
-#define APIC_ITRACE_ENTER2 5
-#define APIC_ITRACE_LEAVE 6
-#define APIC_ITRACE_UNMASK 7
-#define APIC_ITRACE_ACTIVE 8
-#define APIC_ITRACE_MASKED 9
-#define APIC_ITRACE_NOISRLOCK 10
-#define APIC_ITRACE_MASKED2 11
-#define APIC_ITRACE_SPLZ 12
-#define APIC_ITRACE_DORETI 13  
-       
-#else  
-#define APIC_ITRACE(name, irq_num, id)
-#endif
-               
-#define        INTR(irq_num, vec_name, maybe_extra_ipending)                   \
+/*
+ * Restart fast interrupt held up by critical section or cpl.
+ *
+ *     - Push a dummy trape frame as required by doreti
+ *     - The interrupt source is already masked
+ *     - Clear the fpending bit
+ *     - Run the handler
+ *     - Unmask the interrupt
+ *     - Pop the dummy frame and do a normal return
+ *
+ *     The BGL is held on call and left held on return.
+ *
+ *     YYY can cache gd base pointer instead of using hidden %fs
+ *     prefixes.
+ */
+
+#define FAST_UNPEND(irq_num, vec_name)                                 \
+       .text ;                                                         \
+       SUPERALIGN_TEXT ;                                               \
+IDTVEC(vec_name) ;                                                     \
+       pushl   %ebp ;                                                  \
+       movl    %esp,%ebp ;                                             \
+       PUSH_DUMMY ;                                                    \
+       pushl   intr_unit + (irq_num) * 4 ;                             \
+       call    *intr_handler + (irq_num) * 4 ; /* do the work ASAP */  \
+       addl    $4, %esp ;                                              \
+       incl    PCPU(cnt)+V_INTR ;      /* book-keeping make per cpu YYY */ \
+       movl    intr_countp + (irq_num) * 4, %eax ;                     \
+       incl    (%eax) ;                                                \
+       UNMASK_IRQ(irq_num) ;                                           \
+       POP_DUMMY ;                                                     \
+       popl %ebp ;                                                     \
+       ret ;                                                           \
+
+/*
+ * Slow interrupt call handlers run in the following sequence:
+ *
+ *     - Push the trap frame required by doreti.
+ *     - Mask the interrupt and reenable its source.
+ *     - If we cannot take the interrupt set its ipending bit and
+ *       doreti.  In addition to checking for a critical section
+ *       and cpl mask we also check to see if the thread is still
+ *       running.  Note that we cannot mess with mp_lock at all
+ *       if we entered from a critical section!
+ *     - If we can take the interrupt clear its ipending bit
+ *       and schedule the thread.  Leave interrupts masked and doreti.
+ *
+ *     Note that calls to sched_ithd() are made with interrupts enabled
+ *     and outside a critical section.  YYY sched_ithd may preempt us
+ *     synchronously (fix interrupt stacking).
+ *
+ *     YYY can cache gd base pointer instead of using hidden %fs
+ *     prefixes.
+ */
+
+#define INTR(irq_num, vec_name, maybe_extra_ipending)                  \
        .text ;                                                         \
        SUPERALIGN_TEXT ;                                               \
-/* _XintrNN: entry point used by IDT/HWIs & splz_unpend via _vec[]. */ \
 IDTVEC(vec_name) ;                                                     \
        PUSH_FRAME ;                                                    \
-       movl    $KDSEL, %eax ;  /* reload with kernel's data segment */ \
-       mov     %ax, %ds ;                                              \
-       mov     %ax, %es ;                                              \
-       movl    $KPSEL, %eax ;                                          \
-       mov     %ax, %fs ;                                              \
-;                                                                      \
        maybe_extra_ipending ;                                          \
-;                                                                      \
-       APIC_ITRACE(apic_itrace_enter, irq_num, APIC_ITRACE_ENTER) ;    \
-       lock ;                                  /* MP-safe */           \
-       btsl    $(irq_num), iactive ;           /* lazy masking */      \
-       jc      1f ;                            /* already active */    \
 ;                                                                      \
        MASK_LEVEL_IRQ(irq_num) ;                                       \
        EOI_IRQ(irq_num) ;                                              \
-0: ;                                                                   \
-       APIC_ITRACE(apic_itrace_tryisrlock, irq_num, APIC_ITRACE_TRYISRLOCK) ;\
-       MP_TRYLOCK ;            /* XXX this is going away... */         \
-       testl   %eax, %eax ;                    /* did we get it? */    \
-       jz      3f ;                            /* no */                \
-;                                                                      \
-       APIC_ITRACE(apic_itrace_gotisrlock, irq_num, APIC_ITRACE_GOTISRLOCK) ;\
-       testl   $IRQ_BIT(irq_num), _cpl ;                               \
-       jne     2f ;                            /* this INT masked */   \
-       movl    _curthread,%eax ;                                       \
-       cmpl    $TDPRI_CRIT,TD_PRI(%eax) ;                              \
-       jge     2f ;                            /* in critical sec */   \
-;                                                                      \
-       incb    _intr_nesting_level ;                                   \
-;                                                                      \
-  /* entry point used by doreti_unpend for HWIs. */                    \
-__CONCAT(Xresume,irq_num): ;                                           \
-       FAKE_MCOUNT(13*4(%esp)) ;               /* XXX avoid dbl cnt */ \
-       lock ;  incl    _cnt+V_INTR ;           /* tally interrupts */  \
-       movl    _intr_countp + (irq_num) * 4, %eax ;                    \
-       lock ;  incl    (%eax) ;                                        \
-;                                                                      \
-       movl    _cpl, %eax ;                                            \
-       pushl   %eax ;                                                  \
-       orl     _intr_mask + (irq_num) * 4, %eax ;                      \
-       movl    %eax, _cpl ;                                            \
-       lock ;                                                          \
-       andl    $~IRQ_BIT(irq_num), _ipending ;                         \
-;                                                                      \
-       pushl   _intr_unit + (irq_num) * 4 ;                            \
-       APIC_ITRACE(apic_itrace_enter2, irq_num, APIC_ITRACE_ENTER2) ;  \
+       movl    PCPU(curthread),%ebx ;                                  \
+       movl    $0,%eax ;       /* CURRENT CPL IN FRAME (REMOVED) */    \
+       pushl   %eax ;          /* cpl do restore */                    \
+       cmpl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
+       jl      2f ;                                                    \
+1: ;                                                                   \
+       /* set the pending bit and return, leave the interrupt masked */ \
+       orl     $IRQ_LBIT(irq_num), PCPU(ipending) ;                    \
+       orl     $RQF_INTPEND,PCPU(reqflags) ;                           \
+       jmp     5f ;                                                    \
+2: ;                                                                   \
+       /* set running bit, clear pending bit, run handler */           \
+       andl    $~IRQ_LBIT(irq_num), PCPU(ipending) ;                   \
        sti ;                                                           \
-       call    *_intr_handler + (irq_num) * 4 ;                        \
-       cli ;                                                           \
-       APIC_ITRACE(apic_itrace_leave, irq_num, APIC_ITRACE_LEAVE) ;    \
-;                                                                      \
-       lock ;  andl    $~IRQ_BIT(irq_num), iactive ;                   \
-       UNMASK_IRQ(irq_num) ;                                           \
-       APIC_ITRACE(apic_itrace_unmask, irq_num, APIC_ITRACE_UNMASK) ;  \
-       sti ;                           /* doreti repeats cli/sti */    \
+       pushl   $irq_num ;                                              \
+       call    sched_ithd ;                                            \
+       addl    $4,%esp ;                                               \
+       incl    PCPU(cnt)+V_INTR ; /* book-keeping YYY make per-cpu */  \
+       movl    intr_countp + (irq_num) * 4,%eax ;                      \
+       incl    (%eax) ;                                                \
+5: ;                                                                   \
        MEXITCOUNT ;                                                    \
-       jmp     _doreti ;                                               \
-;                                                                      \
-       ALIGN_TEXT ;                                                    \
-1: ;                                           /* active  */           \
-       APIC_ITRACE(apic_itrace_active, irq_num, APIC_ITRACE_ACTIVE) ;  \
-       MASK_IRQ(irq_num) ;                                             \
-       EOI_IRQ(irq_num) ;                                              \
-       lock ;                                                          \
-       orl     $IRQ_BIT(irq_num), _ipending ;                          \
-       movl    $TDPRI_CRIT,_reqpri ;                                   \
-       lock ;                                                          \
-       btsl    $(irq_num), iactive ;           /* still active */      \
-       jnc     0b ;                            /* retry */             \
-       POP_FRAME ;                                                     \
-       iret ;          /* XXX:  iactive bit might be 0 now */          \
-       ALIGN_TEXT ;                                                    \
-2: ;                           /* masked by cpl, leave iactive set */  \
-       APIC_ITRACE(apic_itrace_masked, irq_num, APIC_ITRACE_MASKED) ;  \
-       lock ;                                                          \
-       orl     $IRQ_BIT(irq_num), _ipending ;                          \
-       movl    $TDPRI_CRIT,_reqpri ;                                   \
-       MP_RELLOCK ;                                                    \
-       POP_FRAME ;                                                     \
-       iret ;                                                          \
-       ALIGN_TEXT ;                                                    \
-3: ;                   /* other cpu has isr lock */                    \
-       APIC_ITRACE(apic_itrace_noisrlock, irq_num, APIC_ITRACE_NOISRLOCK) ;\
-       lock ;                                                          \
-       orl     $IRQ_BIT(irq_num), _ipending ;                          \
-       movl    $TDPRI_CRIT,_reqpri ;                                   \
-       testl   $IRQ_BIT(irq_num), _cpl ;                               \
-       jne     4f ;                            /* this INT masked */   \
-       call    forward_irq ;    /* forward irq to lock holder */       \
-       POP_FRAME ;                             /* and return */        \
-       iret ;                                                          \
-       ALIGN_TEXT ;                                                    \
-4: ;                                           /* blocked */           \
-       APIC_ITRACE(apic_itrace_masked2, irq_num, APIC_ITRACE_MASKED2) ;\
-       POP_FRAME ;                             /* and return */        \
-       iret
+       jmp     doreti ;                                                \
+
 
 /*
  * Handle "spurious INTerrupts".
@@ -314,8 +297,8 @@ __CONCAT(Xresume,irq_num): ;                                                \
  */
        .text
        SUPERALIGN_TEXT
-       .globl _Xspuriousint
-_Xspuriousint:
+       .globl Xspuriousint
+Xspuriousint:
 
        /* No EOI cycle used here */
 
@@ -327,15 +310,15 @@ _Xspuriousint:
  */
        .text
        SUPERALIGN_TEXT
-       .globl  _Xinvltlb
-_Xinvltlb:
+       .globl  Xinvltlb
+Xinvltlb:
        pushl   %eax
 
 #ifdef COUNT_XINVLTLB_HITS
        pushl   %fs
        movl    $KPSEL, %eax
        mov     %ax, %fs
-       movl    _cpuid, %eax
+       movl    PCPU(cpuid), %eax
        popl    %fs
        ss
        incl    _xhits(,%eax,4)
@@ -351,223 +334,6 @@ _Xinvltlb:
        iret
 
 
-#ifdef BETTER_CLOCK
-
-/*
- * Executed by a CPU when it receives an Xcpucheckstate IPI from another CPU,
- *
- *  - Stores current cpu state in checkstate_cpustate[cpuid]
- *      0 == user, 1 == sys, 2 == intr
- *  - Stores current process in checkstate_curproc[cpuid]
- *
- *  - Signals its receipt by setting bit cpuid in checkstate_probed_cpus.
- *
- * stack: 0->ds, 4->fs, 8->ebx, 12->eax, 16->eip, 20->cs, 24->eflags
- */
-
-       .text
-       SUPERALIGN_TEXT
-       .globl _Xcpucheckstate
-       .globl _checkstate_cpustate
-       .globl _checkstate_curproc
-       .globl _checkstate_pc
-_Xcpucheckstate:
-       pushl   %eax
-       pushl   %ebx            
-       pushl   %ds                     /* save current data segment */
-       pushl   %fs
-
-       movl    $KDSEL, %eax
-       mov     %ax, %ds                /* use KERNEL data segment */
-       movl    $KPSEL, %eax
-       mov     %ax, %fs
-
-       movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
-
-       movl    $0, %ebx                
-       movl    20(%esp), %eax  
-       andl    $3, %eax
-       cmpl    $3, %eax
-       je      1f
-       testl   $PSL_VM, 24(%esp)
-       jne     1f
-       incl    %ebx                    /* system or interrupt */
-1:     
-       movl    _cpuid, %eax
-       movl    %ebx, _checkstate_cpustate(,%eax,4)
-       movl    _curthread, %ebx
-       movl    TD_PROC(%ebx),%ebx
-       movl    %ebx, _checkstate_curproc(,%eax,4)
-       movl    16(%esp), %ebx
-       movl    %ebx, _checkstate_pc(,%eax,4)
-
-       lock                            /* checkstate_probed_cpus |= (1<<id) */
-       btsl    %eax, _checkstate_probed_cpus
-
-       popl    %fs
-       popl    %ds                     /* restore previous data segment */
-       popl    %ebx
-       popl    %eax
-       iret
-
-#endif /* BETTER_CLOCK */
-
-/*
- * Executed by a CPU when it receives an Xcpuast IPI from another CPU,
- *
- *  - Signals its receipt by clearing bit cpuid in checkstate_need_ast.
- *
- *  - We need a better method of triggering asts on other cpus.
- */
-
-       .text
-       SUPERALIGN_TEXT
-       .globl _Xcpuast
-_Xcpuast:
-       PUSH_FRAME
-       movl    $KDSEL, %eax
-       mov     %ax, %ds                /* use KERNEL data segment */
-       mov     %ax, %es
-       movl    $KPSEL, %eax
-       mov     %ax, %fs
-
-       movl    _cpuid, %eax
-       lock                            /* checkstate_need_ast &= ~(1<<id) */
-       btrl    %eax, _checkstate_need_ast
-       movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
-
-       lock
-       btsl    %eax, _checkstate_pending_ast
-       jc      1f
-
-       FAKE_MCOUNT(13*4(%esp))
-
-       /* 
-        * Giant locks do not come cheap.
-        * A lot of cycles are going to be wasted here.
-        */
-       call    _get_mplock
-
-       movl    _cpl, %eax
-       pushl   %eax
-       orl     $AST_PENDING, _astpending       /* XXX */
-       incb    _intr_nesting_level
-       sti
-       
-       pushl   $0
-       
-       movl    _cpuid, %eax
-       lock    
-       btrl    %eax, _checkstate_pending_ast
-       lock    
-       btrl    %eax, CNAME(resched_cpus)
-       jnc     2f
-       orl     $AST_PENDING+AST_RESCHED,_astpending
-       lock
-       incl    CNAME(want_resched_cnt)
-2:             
-       lock
-       incl    CNAME(cpuast_cnt)
-       MEXITCOUNT
-       jmp     _doreti
-1:
-       /* We are already in the process of delivering an ast for this CPU */
-       POP_FRAME
-       iret                    
-
-
-/*
- *      Executed by a CPU when it receives an XFORWARD_IRQ IPI.
- */
-
-       .text
-       SUPERALIGN_TEXT
-       .globl _Xforward_irq
-_Xforward_irq:
-       PUSH_FRAME
-       movl    $KDSEL, %eax
-       mov     %ax, %ds                /* use KERNEL data segment */
-       mov     %ax, %es
-       movl    $KPSEL, %eax
-       mov     %ax, %fs
-
-       movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
-
-       FAKE_MCOUNT(13*4(%esp))
-
-       MP_TRYLOCK
-       testl   %eax,%eax               /* Did we get the lock ? */
-       jz  1f                          /* No */
-
-       lock
-       incl    CNAME(forward_irq_hitcnt)
-       cmpb    $4, _intr_nesting_level
-       jae     2f
-       
-       movl    _cpl, %eax
-       pushl   %eax
-       incb    _intr_nesting_level
-       sti
-       
-       pushl   $0
-
-       MEXITCOUNT
-       jmp     _doreti                 /* Handle forwarded interrupt */
-1:
-       lock
-       incl    CNAME(forward_irq_misscnt)
-       call    forward_irq     /* Oops, we've lost the isr lock */
-       MEXITCOUNT
-       POP_FRAME
-       iret
-2:
-       lock
-       incl    CNAME(forward_irq_toodeepcnt)
-3:     
-       MP_RELLOCK
-       MEXITCOUNT
-       POP_FRAME
-       iret
-
-/*
- * 
- */
-forward_irq:
-       MCOUNT
-       cmpl    $0,_invltlb_ok
-       jz      4f
-
-       cmpl    $0, CNAME(forward_irq_enabled)
-       jz      4f
-
-       movl    _mp_lock,%eax
-       cmpl    $FREE_LOCK,%eax
-       jne     1f
-       movl    $0, %eax                /* Pick CPU #0 if noone has lock */
-1:
-       shrl    $24,%eax
-       movl    _cpu_num_to_apic_id(,%eax,4),%ecx
-       shll    $24,%ecx
-       movl    lapic_icr_hi, %eax
-       andl    $~APIC_ID_MASK, %eax
-       orl     %ecx, %eax
-       movl    %eax, lapic_icr_hi
-
-2:
-       movl    lapic_icr_lo, %eax
-       andl    $APIC_DELSTAT_MASK,%eax
-       jnz     2b
-       movl    lapic_icr_lo, %eax
-       andl    $APIC_RESV2_MASK, %eax
-       orl     $(APIC_DEST_DESTFLD|APIC_DELMODE_FIXED|XFORWARD_IRQ_OFFSET), %eax
-       movl    %eax, lapic_icr_lo
-3:
-       movl    lapic_icr_lo, %eax
-       andl    $APIC_DELSTAT_MASK,%eax
-       jnz     3b
-4:             
-       ret
-       
 /*
  * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
  *
@@ -578,8 +344,8 @@ forward_irq:
 
        .text
        SUPERALIGN_TEXT
-       .globl _Xcpustop
-_Xcpustop:
+       .globl Xcpustop
+Xcpustop:
        pushl   %ebp
        movl    %esp, %ebp
        pushl   %eax
@@ -595,7 +361,7 @@ _Xcpustop:
 
        movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
 
-       movl    _cpuid, %eax
+       movl    PCPU(cpuid), %eax
        imull   $PCB_SIZE, %eax
        leal    CNAME(stoppcbs)(%eax), %eax
        pushl   %eax
@@ -603,18 +369,18 @@ _Xcpustop:
        addl    $4, %esp
        
                
-       movl    _cpuid, %eax
+       movl    PCPU(cpuid), %eax
 
        lock
-       btsl    %eax, _stopped_cpus     /* stopped_cpus |= (1<<id) */
+       btsl    %eax, stopped_cpus      /* stopped_cpus |= (1<<id) */
 1:
-       btl     %eax, _started_cpus     /* while (!(started_cpus & (1<<id))) */
+       btl     %eax, started_cpus      /* while (!(started_cpus & (1<<id))) */
        jnc     1b
 
        lock
-       btrl    %eax, _started_cpus     /* started_cpus &= ~(1<<id) */
+       btrl    %eax, started_cpus      /* started_cpus &= ~(1<<id) */
        lock
-       btrl    %eax, _stopped_cpus     /* stopped_cpus &= ~(1<<id) */
+       btrl    %eax, stopped_cpus      /* stopped_cpus &= ~(1<<id) */
 
        test    %eax, %eax
        jnz     2f
@@ -635,6 +401,37 @@ _Xcpustop:
        popl    %ebp
        iret
 
+       /*
+        * For now just have one ipiq IPI, but what we really want is
+        * to have one for each source cpu to the APICs don't get stalled
+        * backlogging the requests.
+        */
+       .text
+       SUPERALIGN_TEXT
+       .globl Xipiq
+Xipiq:
+       PUSH_FRAME
+       movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
+       FAKE_MCOUNT(13*4(%esp))
+
+       movl    PCPU(curthread),%ebx
+       cmpl    $TDPRI_CRIT,TD_PRI(%ebx)
+       jge     1f
+       subl    $8,%esp                 /* make same as interrupt frame */
+       incl    PCPU(intr_nesting_level)
+       addl    $TDPRI_CRIT,TD_PRI(%ebx)
+       call    lwkt_process_ipiq_frame
+       subl    $TDPRI_CRIT,TD_PRI(%ebx)
+       decl    PCPU(intr_nesting_level)
+       addl    $8,%esp
+       pushl   $0                      /* CPL for frame (REMOVED) */
+       MEXITCOUNT
+       jmp     doreti
+1:
+       orl     $RQF_IPIQ,PCPU(reqflags)
+       MEXITCOUNT
+       POP_FRAME
+       iret
 
 MCOUNT_LABEL(bintr)
        FAST_INTR(0,fastintr0)
@@ -662,14 +459,9 @@ MCOUNT_LABEL(bintr)
        FAST_INTR(22,fastintr22)
        FAST_INTR(23,fastintr23)
        
-#define        CLKINTR_PENDING                                                 \
-       pushl $clock_lock ;                                             \
-       call s_lock ;                                                   \
-       movl $1,CNAME(clkintr_pending) ;                                \
-       call s_unlock ;                                                 \
-       addl $4, %esp
-
-       INTR(0,intr0, CLKINTR_PENDING)
+       /* YYY what is this garbage? */
+
+       INTR(0,intr0,)
        INTR(1,intr1,)
        INTR(2,intr2,)
        INTR(3,intr3,)
@@ -693,17 +485,42 @@ MCOUNT_LABEL(bintr)
        INTR(21,intr21,)
        INTR(22,intr22,)
        INTR(23,intr23,)
+
+       FAST_UNPEND(0,fastunpend0)
+       FAST_UNPEND(1,fastunpend1)
+       FAST_UNPEND(2,fastunpend2)
+       FAST_UNPEND(3,fastunpend3)
+       FAST_UNPEND(4,fastunpend4)
+       FAST_UNPEND(5,fastunpend5)
+       FAST_UNPEND(6,fastunpend6)
+       FAST_UNPEND(7,fastunpend7)
+       FAST_UNPEND(8,fastunpend8)
+       FAST_UNPEND(9,fastunpend9)
+       FAST_UNPEND(10,fastunpend10)
+       FAST_UNPEND(11,fastunpend11)
+       FAST_UNPEND(12,fastunpend12)
+       FAST_UNPEND(13,fastunpend13)
+       FAST_UNPEND(14,fastunpend14)
+       FAST_UNPEND(15,fastunpend15)
+       FAST_UNPEND(16,fastunpend16)
+       FAST_UNPEND(17,fastunpend17)
+       FAST_UNPEND(18,fastunpend18)
+       FAST_UNPEND(19,fastunpend19)
+       FAST_UNPEND(20,fastunpend20)
+       FAST_UNPEND(21,fastunpend21)
+       FAST_UNPEND(22,fastunpend22)
+       FAST_UNPEND(23,fastunpend23)
 MCOUNT_LABEL(eintr)
 
-/*
- * Executed by a CPU when it receives a RENDEZVOUS IPI from another CPU.
- *
- * - Calls the generic rendezvous action function.
- */
+       /*
       * Executed by a CPU when it receives a RENDEZVOUS IPI from another CPU.
       *
       * - Calls the generic rendezvous action function.
       */
        .text
        SUPERALIGN_TEXT
-       .globl  _Xrendezvous
-_Xrendezvous:
+       .globl  Xrendezvous
+Xrendezvous:
        PUSH_FRAME
        movl    $KDSEL, %eax
        mov     %ax, %ds                /* use KERNEL data segment */
@@ -711,7 +528,7 @@ _Xrendezvous:
        movl    $KPSEL, %eax
        mov     %ax, %fs
 
-       call    _smp_rendezvous_action
+       call    smp_rendezvous_action
 
        movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
        POP_FRAME
@@ -719,89 +536,26 @@ _Xrendezvous:
        
        
        .data
-/*
- * Addresses of interrupt handlers.
- *  XresumeNN: Resumption addresses for HWIs.
- */
-       .globl _ihandlers
-_ihandlers:
-/*
- * used by:
- *  ipl.s:     doreti_unpend
- */
-       .long   Xresume0,  Xresume1,  Xresume2,  Xresume3 
-       .long   Xresume4,  Xresume5,  Xresume6,  Xresume7
-       .long   Xresume8,  Xresume9,  Xresume10, Xresume11
-       .long   Xresume12, Xresume13, Xresume14, Xresume15 
-       .long   Xresume16, Xresume17, Xresume18, Xresume19
-       .long   Xresume20, Xresume21, Xresume22, Xresume23
-/*
- * used by:
- *  ipl.s:     doreti_unpend
- *  apic_ipl.s:        splz_unpend
- */
-       .long   _swi_null, swi_net, _swi_null, _swi_null
-       .long   _swi_vm, _swi_null, _softclock
-
-imasks:                                /* masks for interrupt handlers */
-       .space  NHWI*4          /* padding; HWI masks are elsewhere */
-
-       .long   SWI_TTY_MASK, SWI_NET_MASK, SWI_CAMNET_MASK, SWI_CAMBIO_MASK
-       .long   SWI_VM_MASK, SWI_TQ_MASK, SWI_CLOCK_MASK
-
-/* active flag for lazy masking */
-iactive:
-       .long   0
 
 #ifdef COUNT_XINVLTLB_HITS
-       .globl  _xhits
-_xhits:
+       .globl  xhits
+xhits:
        .space  (NCPU * 4), 0
 #endif /* COUNT_XINVLTLB_HITS */
 
 /* variables used by stop_cpus()/restart_cpus()/Xcpustop */
-       .globl _stopped_cpus, _started_cpus
-_stopped_cpus:
+       .globl stopped_cpus, started_cpus
+stopped_cpus:
        .long   0
-_started_cpus:
+started_cpus:
        .long   0
 
-#ifdef BETTER_CLOCK
-       .globl _checkstate_probed_cpus
-_checkstate_probed_cpus:
-       .long   0       
-#endif /* BETTER_CLOCK */
-       .globl _checkstate_need_ast
-_checkstate_need_ast:
-       .long   0
-_checkstate_pending_ast:
-       .long   0
-       .globl CNAME(forward_irq_misscnt)
-       .globl CNAME(forward_irq_toodeepcnt)
-       .globl CNAME(forward_irq_hitcnt)
-       .globl CNAME(resched_cpus)
-       .globl CNAME(want_resched_cnt)
-       .globl CNAME(cpuast_cnt)
        .globl CNAME(cpustop_restartfunc)
-CNAME(forward_irq_misscnt):    
-       .long 0
-CNAME(forward_irq_hitcnt):     
-       .long 0
-CNAME(forward_irq_toodeepcnt):
-       .long 0
-CNAME(resched_cpus):
-       .long 0
-CNAME(want_resched_cnt):
-       .long 0
-CNAME(cpuast_cnt):
-       .long 0
 CNAME(cpustop_restartfunc):
        .long 0
                
-
-
-       .globl  _apic_pin_trigger
-_apic_pin_trigger:
+       .globl  apic_pin_trigger
+apic_pin_trigger:
        .long   0
 
        .text