Use same interrupt vector handler for fast/slow interrupt handlers
authorSepherosa Ziehau <sephe@dragonflybsd.org>
Fri, 10 Jul 2009 07:29:24 +0000 (15:29 +0800)
committerSepherosa Ziehau <sephe@dragonflybsd.org>
Sat, 11 Jul 2009 02:27:17 +0000 (10:27 +0800)
Slow interrupt vector handler is removed.  Fast interrupt vector handler,
ithread_fast_handler(), now schedules slow interrupt handlers if necessary:
o  No fast interrupt handlers are registered
o  Mixed fast and slow interrpt handlers are registered
o  Non-MPSAFE fast interrupt handlers could not get BGL

i386/amd64: gd_ipending field in mdglobaldata is revoked, which is only
used by slow interrupt vector handler.

ithread_fast_handler()'s invoking convetion is changed:
- ithead_fast_handler() must be called with critical section being held
- Callers of ithead_fast_handler() no longer bump gd_intr_nesting_level

Discussed-with: dillon@
Reviewed-by: dillon@
23 files changed:
sys/kern/kern_intr.c
sys/platform/pc32/apic/apic_abi.c
sys/platform/pc32/apic/apic_ipl.h
sys/platform/pc32/apic/apic_vector.s
sys/platform/pc32/i386/genassym.c
sys/platform/pc32/i386/globals.s
sys/platform/pc32/i386/mp_machdep.c
sys/platform/pc32/icu/icu_abi.c
sys/platform/pc32/icu/icu_vector.s
sys/platform/pc32/include/globaldata.h
sys/platform/pc32/isa/ipl.s
sys/platform/pc32/isa/ipl_funcs.c
sys/platform/pc64/amd64/genassym.c
sys/platform/pc64/amd64/global.s
sys/platform/pc64/amd64/ipl.s
sys/platform/pc64/amd64/ipl_funcs.c
sys/platform/pc64/amd64/mp_machdep.c
sys/platform/pc64/apic/apic_abi.c
sys/platform/pc64/apic/apic_ipl.h
sys/platform/pc64/apic/apic_vector.s
sys/platform/pc64/icu/icu_abi.c
sys/platform/pc64/icu/icu_vector.s
sys/platform/pc64/include/globaldata.h

index 7417a8e..21ac592 100644 (file)
@@ -553,6 +553,28 @@ ithread_livelock_wakeup(systimer_t st)
        lwkt_schedule(&info->i_thread);
 }
 
+/*
+ * Schedule ithread within fast intr handler
+ *
+ * XXX Protect sched_ithd() call with gd_intr_nesting_level?
+ * Interrupts aren't enabled, but still...
+ */
+static __inline void
+ithread_fast_sched(int intr, thread_t td)
+{
+    ++td->td_nest_count;
+
+    /*
+     * We are already in critical section, exit it now to
+     * allow preemption.
+     */
+    crit_exit_quick(td);
+    sched_ithd(intr);
+    crit_enter_quick(td);
+
+    --td->td_nest_count;
+}
+
 /*
  * This function is called directly from the ICU or APIC vector code assembly
  * to process an interrupt.  The critical section and interrupt deferral
@@ -576,23 +598,23 @@ ithread_fast_handler(struct intrframe *frame)
 #endif
     intrec_t rec, next_rec;
     globaldata_t gd;
+    thread_t td;
 
     intr = frame->if_vec;
     gd = mycpu;
+    td = curthread;
+
+    /* We must be in critical section. */
+    KKASSERT(td->td_pri >= TDPRI_CRIT);
 
     info = &intr_info_ary[intr];
 
     /*
      * If we are not processing any FAST interrupts, just schedule the thing.
-     * (since we aren't in a critical section, this can result in a
-     * preemption)
-     *
-     * XXX Protect sched_ithd() call with gd_intr_nesting_level? Interrupts
-     * aren't enabled, but still...
      */
     if (info->i_fast == 0) {
        ++gd->gd_cnt.v_intr;
-       sched_ithd(intr);
+       ithread_fast_sched(intr, td);
        return(1);
     }
 
@@ -611,7 +633,6 @@ ithread_fast_handler(struct intrframe *frame)
      * To reduce overhead, just leave the MP lock held once it has been
      * obtained.
      */
-    crit_enter_gd(gd);
     ++gd->gd_intr_nesting_level;
     ++gd->gd_cnt.v_intr;
     must_schedule = info->i_slow;
@@ -652,17 +673,17 @@ ithread_fast_handler(struct intrframe *frame)
     if (got_mplock)
        rel_mplock();
 #endif
-    crit_exit_gd(gd);
 
     /*
-     * If we had a problem, schedule the thread to catch the missed
-     * records (it will just re-run all of them).  A return value of 0
-     * indicates that all handlers have been run and the interrupt can
-     * be re-enabled, and a non-zero return indicates that the interrupt
-     * thread controls re-enablement.
+     * If we had a problem, or mixed fast and slow interrupt handlers are
+     * registered, schedule the ithread to catch the missed records (it
+     * will just re-run all of them).  A return value of 0 indicates that
+     * all handlers have been run and the interrupt can be re-enabled, and
+     * a non-zero return indicates that the interrupt thread controls
+     * re-enablement.
      */
     if (must_schedule > 0)
-       sched_ithd(intr);
+       ithread_fast_sched(intr, td);
     else if (must_schedule == 0)
        ++info->i_count;
     return(must_schedule);
index 4aa2945..0eeef9b 100644 (file)
@@ -78,34 +78,6 @@ extern inthand_t
        IDTVEC(apic_fastintr20), IDTVEC(apic_fastintr21),
        IDTVEC(apic_fastintr22), IDTVEC(apic_fastintr23);
 
-extern inthand_t
-       IDTVEC(apic_slowintr0), IDTVEC(apic_slowintr1),
-       IDTVEC(apic_slowintr2), IDTVEC(apic_slowintr3),
-       IDTVEC(apic_slowintr4), IDTVEC(apic_slowintr5),
-       IDTVEC(apic_slowintr6), IDTVEC(apic_slowintr7),
-       IDTVEC(apic_slowintr8), IDTVEC(apic_slowintr9),
-       IDTVEC(apic_slowintr10), IDTVEC(apic_slowintr11),
-       IDTVEC(apic_slowintr12), IDTVEC(apic_slowintr13),
-       IDTVEC(apic_slowintr14), IDTVEC(apic_slowintr15),
-       IDTVEC(apic_slowintr16), IDTVEC(apic_slowintr17),
-       IDTVEC(apic_slowintr18), IDTVEC(apic_slowintr19),
-       IDTVEC(apic_slowintr20), IDTVEC(apic_slowintr21),
-       IDTVEC(apic_slowintr22), IDTVEC(apic_slowintr23);
-
-extern inthand_t
-       IDTVEC(apic_wrongintr0), IDTVEC(apic_wrongintr1),
-       IDTVEC(apic_wrongintr2), IDTVEC(apic_wrongintr3),
-       IDTVEC(apic_wrongintr4), IDTVEC(apic_wrongintr5),
-       IDTVEC(apic_wrongintr6), IDTVEC(apic_wrongintr7),
-       IDTVEC(apic_wrongintr8), IDTVEC(apic_wrongintr9),
-       IDTVEC(apic_wrongintr10), IDTVEC(apic_wrongintr11),
-       IDTVEC(apic_wrongintr12), IDTVEC(apic_wrongintr13),
-       IDTVEC(apic_wrongintr14), IDTVEC(apic_wrongintr15),
-       IDTVEC(apic_wrongintr16), IDTVEC(apic_wrongintr17),
-       IDTVEC(apic_wrongintr18), IDTVEC(apic_wrongintr19),
-       IDTVEC(apic_wrongintr20), IDTVEC(apic_wrongintr21),
-       IDTVEC(apic_wrongintr22), IDTVEC(apic_wrongintr23);
-
 static int apic_setvar(int, const void *);
 static int apic_getvar(int, void *);
 static int apic_vectorctl(int, int, int);
@@ -127,36 +99,6 @@ static inthand_t *apic_fastintr[APIC_HWI_VECTORS] = {
        &IDTVEC(apic_fastintr22), &IDTVEC(apic_fastintr23)
 };
 
-static inthand_t *apic_slowintr[APIC_HWI_VECTORS] = {
-       &IDTVEC(apic_slowintr0), &IDTVEC(apic_slowintr1),
-       &IDTVEC(apic_slowintr2), &IDTVEC(apic_slowintr3),
-       &IDTVEC(apic_slowintr4), &IDTVEC(apic_slowintr5),
-       &IDTVEC(apic_slowintr6), &IDTVEC(apic_slowintr7),
-       &IDTVEC(apic_slowintr8), &IDTVEC(apic_slowintr9),
-       &IDTVEC(apic_slowintr10), &IDTVEC(apic_slowintr11),
-       &IDTVEC(apic_slowintr12), &IDTVEC(apic_slowintr13),
-       &IDTVEC(apic_slowintr14), &IDTVEC(apic_slowintr15),
-       &IDTVEC(apic_slowintr16), &IDTVEC(apic_slowintr17),
-       &IDTVEC(apic_slowintr18), &IDTVEC(apic_slowintr19),
-       &IDTVEC(apic_slowintr20), &IDTVEC(apic_slowintr21),
-       &IDTVEC(apic_slowintr22), &IDTVEC(apic_slowintr23)
-};
-
-static inthand_t *apic_wrongintr[APIC_HWI_VECTORS] = {
-       &IDTVEC(apic_wrongintr0), &IDTVEC(apic_wrongintr1),
-       &IDTVEC(apic_wrongintr2), &IDTVEC(apic_wrongintr3),
-       &IDTVEC(apic_wrongintr4), &IDTVEC(apic_wrongintr5),
-       &IDTVEC(apic_wrongintr6), &IDTVEC(apic_wrongintr7),
-       &IDTVEC(apic_wrongintr8), &IDTVEC(apic_wrongintr9),
-       &IDTVEC(apic_wrongintr10), &IDTVEC(apic_wrongintr11),
-       &IDTVEC(apic_wrongintr12), &IDTVEC(apic_wrongintr13),
-       &IDTVEC(apic_wrongintr14), &IDTVEC(apic_wrongintr15),
-       &IDTVEC(apic_wrongintr16), &IDTVEC(apic_wrongintr17),
-       &IDTVEC(apic_wrongintr18), &IDTVEC(apic_wrongintr19),
-       &IDTVEC(apic_wrongintr20), &IDTVEC(apic_wrongintr21),
-       &IDTVEC(apic_wrongintr22), &IDTVEC(apic_wrongintr23)
-};
-
 static int apic_imcr_present;
 
 struct machintr_abi MachIntrABI = {
@@ -254,7 +196,6 @@ static void
 apic_cleanup(void)
 {
        mdcpu->gd_fpending = 0;
-       mdcpu->gd_ipending = 0;
 }
 
 static
@@ -276,29 +217,9 @@ apic_vectorctl(int op, int intr, int flags)
 
     switch(op) {
     case MACHINTR_VECTOR_SETUP:
-       /*
-        * Setup an interrupt vector.  First install the vector in the
-        * cpu's Interrupt Descriptor Table (IDT).
-        */
-       if (flags & INTR_FAST) {
-           vector = TPR_SLOW_INTS + intr;
-           setidt(vector, apic_wrongintr[intr],
-                   SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
-           vector = TPR_FAST_INTS + intr;
-           setidt(vector, apic_fastintr[intr],
-                   SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
-       } else {
-           vector = TPR_SLOW_INTS + intr;
-
-           /*
-            * This is probably not needed any more. XXX
-            */
-           if (intr == apic_8254_intr || intr == 8) {
-               vector = TPR_FAST_INTS + intr;
-           }
-           setidt(vector, apic_slowintr[intr],
-                   SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
-       }
+       vector = TPR_FAST_INTS + intr;
+       setidt(vector, apic_fastintr[intr],
+              SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
 
        /*
         * Now reprogram the vector in the IO APIC.  In order to avoid
@@ -328,8 +249,8 @@ apic_vectorctl(int op, int intr, int flags)
         * installed in the cpu's IDT, but make sure.
         */
        machintr_intrdis(intr);
-       vector = TPR_SLOW_INTS + intr;
-       setidt(vector, apic_slowintr[intr], SDT_SYS386IGT, SEL_KPL,
+       vector = TPR_FAST_INTS + intr;
+       setidt(vector, apic_fastintr[intr], SDT_SYS386IGT, SEL_KPL,
                GSEL(GCODE_SEL, SEL_KPL));
 
        /*
@@ -362,7 +283,7 @@ apic_vectorctl(int op, int intr, int flags)
         * to IDT_OFFSET + intr.
         */
        vector = IDT_OFFSET + intr;
-       setidt(vector, apic_slowintr[intr], SDT_SYS386IGT, SEL_KPL,
+       setidt(vector, apic_fastintr[intr], SDT_SYS386IGT, SEL_KPL,
                GSEL(GCODE_SEL, SEL_KPL));
        break;
     default:
index af433c9..ac4a389 100644 (file)
@@ -32,7 +32,6 @@
 #ifdef APIC_IO
 
 /* IDT vector base for regular (aka. slow) and fast interrupts */
-#define TPR_SLOW_INTS  0x20
 #define TPR_FAST_INTS  0x60
 
 #define APIC_HWI_VECTORS 24
index 59519ca..a392764 100644 (file)
@@ -160,91 +160,15 @@ IDTVEC(vec_name) ;                                                        \
        andl    $~IRQ_LBIT(irq_num),PCPU(fpending) ;                    \
        pushl   $irq_num ;                                              \
        pushl   %esp ;                   /* pass frame by reference */  \
+       addl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
        call    ithread_fast_handler ;   /* returns 0 to unmask */      \
+       subl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
        addl    $8, %esp ;                                              \
        UNMASK_IRQ(irq_num) ;                                           \
 5: ;                                                                   \
        MEXITCOUNT ;                                                    \
        jmp     doreti ;                                                \
 
-/*
- * Slow interrupt call handlers run in the following sequence:
- *
- *     - Push the trap frame required by doreti.
- *     - Mask the interrupt and reenable its source.
- *     - If we cannot take the interrupt set its ipending bit and
- *       doreti.  In addition to checking for a critical section
- *       and cpl mask we also check to see if the thread is still
- *       running.  Note that we cannot mess with mp_lock at all
- *       if we entered from a critical section!
- *     - If we can take the interrupt clear its ipending bit
- *       and schedule the thread.  Leave interrupts masked and doreti.
- *
- *     Note that calls to sched_ithd() are made with interrupts enabled
- *     and outside a critical section.  YYY sched_ithd may preempt us
- *     synchronously (fix interrupt stacking).
- *
- *     YYY can cache gd base pointer instead of using hidden %fs
- *     prefixes.
- */
-
-#define SLOW_INTR(irq_num, vec_name, maybe_extra_ipending)             \
-       .text ;                                                         \
-       SUPERALIGN_TEXT ;                                               \
-IDTVEC(vec_name) ;                                                     \
-       PUSH_FRAME ;                                                    \
-       maybe_extra_ipending ;                                          \
-;                                                                      \
-       MASK_LEVEL_IRQ(irq_num) ;                                       \
-       incl    PCPU(cnt) + V_INTR ;                                    \
-       movl    $0, lapic_eoi ;                                         \
-       movl    PCPU(curthread),%ebx ;                                  \
-       movl    $0,%eax ;       /* CURRENT CPL IN FRAME (REMOVED) */    \
-       pushl   %eax ;          /* cpl do restore */                    \
-       testl   $-1,TD_NEST_COUNT(%ebx) ;                               \
-       jne     1f ;                                                    \
-       cmpl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
-       jl      2f ;                                                    \
-1: ;                                                                   \
-       /* set the pending bit and return, leave the interrupt masked */ \
-       orl     $IRQ_LBIT(irq_num), PCPU(ipending) ;                    \
-       orl     $RQF_INTPEND,PCPU(reqflags) ;                           \
-       jmp     5f ;                                                    \
-2: ;                                                                   \
-       /* set running bit, clear pending bit, run handler */           \
-       andl    $~IRQ_LBIT(irq_num), PCPU(ipending) ;                   \
-       incl    TD_NEST_COUNT(%ebx) ;                                   \
-       sti ;                                                           \
-       pushl   $irq_num ;                                              \
-       call    sched_ithd ;                                            \
-       addl    $4,%esp ;                                               \
-       cli ;                                                           \
-       decl    TD_NEST_COUNT(%ebx) ;                                   \
-5: ;                                                                   \
-       MEXITCOUNT ;                                                    \
-       jmp     doreti ;                                                \
-
-/*
- * Wrong interrupt call handlers.  We program these into APIC vectors
- * that should otherwise never occur.  For example, we program the SLOW
- * vector for irq N with this when we program the FAST vector with the
- * real interrupt.
- *
- * XXX for now all we can do is EOI it.  We can't call do_wrongintr
- * (yet) because we could be in a critical section.
- */
-#define WRONGINTR(irq_num,vec_name)                                    \
-       .text ;                                                         \
-       SUPERALIGN_TEXT  ;                                              \
-IDTVEC(vec_name) ;                                                     \
-       PUSH_FRAME ;                                                    \
-       movl    $0, lapic_eoi ; /* End Of Interrupt to APIC */          \
-       /*pushl $irq_num ;*/                                            \
-       /*call  do_wrongintr ;*/                                        \
-       /*addl  $4,%esp ;*/                                             \
-       POP_FRAME ;                                                     \
-       iret  ;                                                         \
-
 #endif
 
 /*
@@ -452,58 +376,6 @@ MCOUNT_LABEL(bintr)
        FAST_INTR(21,apic_fastintr21)
        FAST_INTR(22,apic_fastintr22)
        FAST_INTR(23,apic_fastintr23)
-       
-       /* YYY what is this garbage? */
-
-       SLOW_INTR(0,apic_slowintr0,)
-       SLOW_INTR(1,apic_slowintr1,)
-       SLOW_INTR(2,apic_slowintr2,)
-       SLOW_INTR(3,apic_slowintr3,)
-       SLOW_INTR(4,apic_slowintr4,)
-       SLOW_INTR(5,apic_slowintr5,)
-       SLOW_INTR(6,apic_slowintr6,)
-       SLOW_INTR(7,apic_slowintr7,)
-       SLOW_INTR(8,apic_slowintr8,)
-       SLOW_INTR(9,apic_slowintr9,)
-       SLOW_INTR(10,apic_slowintr10,)
-       SLOW_INTR(11,apic_slowintr11,)
-       SLOW_INTR(12,apic_slowintr12,)
-       SLOW_INTR(13,apic_slowintr13,)
-       SLOW_INTR(14,apic_slowintr14,)
-       SLOW_INTR(15,apic_slowintr15,)
-       SLOW_INTR(16,apic_slowintr16,)
-       SLOW_INTR(17,apic_slowintr17,)
-       SLOW_INTR(18,apic_slowintr18,)
-       SLOW_INTR(19,apic_slowintr19,)
-       SLOW_INTR(20,apic_slowintr20,)
-       SLOW_INTR(21,apic_slowintr21,)
-       SLOW_INTR(22,apic_slowintr22,)
-       SLOW_INTR(23,apic_slowintr23,)
-
-       WRONGINTR(0,apic_wrongintr0)
-       WRONGINTR(1,apic_wrongintr1)
-       WRONGINTR(2,apic_wrongintr2)
-       WRONGINTR(3,apic_wrongintr3)
-       WRONGINTR(4,apic_wrongintr4)
-       WRONGINTR(5,apic_wrongintr5)
-       WRONGINTR(6,apic_wrongintr6)
-       WRONGINTR(7,apic_wrongintr7)
-       WRONGINTR(8,apic_wrongintr8)
-       WRONGINTR(9,apic_wrongintr9)
-       WRONGINTR(10,apic_wrongintr10)
-       WRONGINTR(11,apic_wrongintr11)
-       WRONGINTR(12,apic_wrongintr12)
-       WRONGINTR(13,apic_wrongintr13)
-       WRONGINTR(14,apic_wrongintr14)
-       WRONGINTR(15,apic_wrongintr15)
-       WRONGINTR(16,apic_wrongintr16)
-       WRONGINTR(17,apic_wrongintr17)
-       WRONGINTR(18,apic_wrongintr18)
-       WRONGINTR(19,apic_wrongintr19)
-       WRONGINTR(20,apic_wrongintr20)
-       WRONGINTR(21,apic_wrongintr21)
-       WRONGINTR(22,apic_wrongintr22)
-       WRONGINTR(23,apic_wrongintr23)
 MCOUNT_LABEL(eintr)
 
 #endif
index f0b7d35..aa826df 100644 (file)
@@ -203,7 +203,6 @@ ASSYM(FIRST_SOFTINT, FIRST_SOFTINT);
 ASSYM(MDGLOBALDATA_BASEALLOC_PAGES, MDGLOBALDATA_BASEALLOC_PAGES);
 
 ASSYM(GD_FPENDING, offsetof(struct mdglobaldata, gd_fpending));
-ASSYM(GD_IPENDING, offsetof(struct mdglobaldata, gd_ipending));
 ASSYM(GD_SPENDING, offsetof(struct mdglobaldata, gd_spending));
 ASSYM(GD_COMMON_TSS, offsetof(struct mdglobaldata, gd_common_tss));
 ASSYM(GD_COMMON_TSSD, offsetof(struct mdglobaldata, gd_common_tssd));
index a8f127f..2c488d2 100644 (file)
@@ -77,7 +77,7 @@
        .globl  gd_ss_eflags, gd_intr_nesting_level
        .globl  gd_CMAP1, gd_CMAP2, gd_CMAP3, gd_PMAP1
        .globl  gd_CADDR1, gd_CADDR2, gd_CADDR3, gd_PADDR1
-       .globl  gd_spending, gd_ipending, gd_fpending
+       .globl  gd_spending, gd_fpending
        .globl  gd_cnt, gd_private_tss
 
        .set    gd_cpuid,globaldata + GD_CPUID
@@ -94,7 +94,6 @@
        .set    gd_CADDR3,globaldata + GD_PRV_CADDR3
        .set    gd_PADDR1,globaldata + GD_PRV_PADDR1
        .set    gd_fpending,globaldata + GD_FPENDING
-       .set    gd_ipending,globaldata + GD_IPENDING
        .set    gd_spending,globaldata + GD_SPENDING
        .set    gd_cnt,globaldata + GD_CNT
 
index 224c13a..08d4d82 100644 (file)
@@ -2675,7 +2675,6 @@ ap_init(void)
         */
        __asm __volatile("sti; pause; pause"::);
        mdcpu->gd_fpending = 0;
-       mdcpu->gd_ipending = 0;
 
        initclocks_pcpu();      /* clock interrupts (via IPIs) */
        lwkt_process_ipiq();
index cc253c4..32aaa47 100644 (file)
@@ -71,16 +71,6 @@ extern inthand_t
        IDTVEC(icu_fastintr12), IDTVEC(icu_fastintr13),
        IDTVEC(icu_fastintr14), IDTVEC(icu_fastintr15);
 
-extern inthand_t
-       IDTVEC(icu_slowintr0), IDTVEC(icu_slowintr1),
-       IDTVEC(icu_slowintr2), IDTVEC(icu_slowintr3),
-       IDTVEC(icu_slowintr4), IDTVEC(icu_slowintr5),
-       IDTVEC(icu_slowintr6), IDTVEC(icu_slowintr7),
-       IDTVEC(icu_slowintr8), IDTVEC(icu_slowintr9),
-       IDTVEC(icu_slowintr10), IDTVEC(icu_slowintr11),
-       IDTVEC(icu_slowintr12), IDTVEC(icu_slowintr13),
-       IDTVEC(icu_slowintr14), IDTVEC(icu_slowintr15);
-
 static int icu_vectorctl(int, int, int);
 static int icu_setvar(int, const void *);
 static int icu_getvar(int, void *);
@@ -98,17 +88,6 @@ static inthand_t *icu_fastintr[ICU_HWI_VECTORS] = {
        &IDTVEC(icu_fastintr14), &IDTVEC(icu_fastintr15)
 };
 
-static inthand_t *icu_slowintr[ICU_HWI_VECTORS] = {
-       &IDTVEC(icu_slowintr0), &IDTVEC(icu_slowintr1),
-       &IDTVEC(icu_slowintr2), &IDTVEC(icu_slowintr3),
-       &IDTVEC(icu_slowintr4), &IDTVEC(icu_slowintr5),
-       &IDTVEC(icu_slowintr6), &IDTVEC(icu_slowintr7),
-       &IDTVEC(icu_slowintr8), &IDTVEC(icu_slowintr9),
-       &IDTVEC(icu_slowintr10), &IDTVEC(icu_slowintr11),
-       &IDTVEC(icu_slowintr12), &IDTVEC(icu_slowintr13),
-       &IDTVEC(icu_slowintr14), &IDTVEC(icu_slowintr15)
-};
-
 struct machintr_abi MachIntrABI = {
     MACHINTR_ICU,
     .intrdis = ICU_INTRDIS,
@@ -198,7 +177,6 @@ void
 icu_cleanup(void)
 {
        mdcpu->gd_fpending = 0;
-       mdcpu->gd_ipending = 0;
 }
 
 
@@ -218,14 +196,13 @@ icu_vectorctl(int op, int intr, int flags)
 
     switch(op) {
     case MACHINTR_VECTOR_SETUP:
-       setidt(IDT_OFFSET + intr,
-               flags & INTR_FAST ? icu_fastintr[intr] : icu_slowintr[intr],
-               SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
+       setidt(IDT_OFFSET + intr, icu_fastintr[intr], SDT_SYS386IGT, SEL_KPL,
+               GSEL(GCODE_SEL, SEL_KPL));
        machintr_intren(intr);
        break;
     case MACHINTR_VECTOR_TEARDOWN:
     case MACHINTR_VECTOR_SETDEFAULT:
-       setidt(IDT_OFFSET + intr, icu_slowintr[intr], SDT_SYS386IGT, SEL_KPL,
+       setidt(IDT_OFFSET + intr, icu_fastintr[intr], SDT_SYS386IGT, SEL_KPL,
                GSEL(GCODE_SEL, SEL_KPL));
        machintr_intrdis(intr);
        break;
index efc3d4a..7c7c7c9 100644 (file)
@@ -159,71 +159,15 @@ IDTVEC(vec_name) ;                                                        \
        andl    $~IRQ_LBIT(irq_num),PCPU(fpending) ;                    \
        pushl   $irq_num ;                                              \
        pushl   %esp ;                  /* pass frame by reference */   \
+       addl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
        call    ithread_fast_handler ;  /* returns 0 to unmask int */   \
+       subl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
        addl    $8,%esp ;                                               \
        UNMASK_IRQ(icu, irq_num) ;                                      \
 5: ;                                                                   \
        MEXITCOUNT ;                                                    \
        jmp     doreti ;                                                \
 
-/*
- * Slow interrupt call handlers run in the following sequence:
- *
- *     - Push the trap frame required by doreti.
- *     - Mask the interrupt and reenable its source.
- *     - If we cannot take the interrupt set its ipending bit and
- *       doreti.  In addition to checking for a critical section
- *       and cpl mask we also check to see if the thread is still
- *       running.
- *     - If we can take the interrupt clear its ipending bit
- *       and schedule its thread.  Leave interrupts masked and doreti.
- *
- *     sched_ithd() is called with interrupts enabled and outside of a
- *     critical section (so it can preempt us).
- *
- *     YYY sched_ithd may preempt us synchronously (fix interrupt stacking)
- *
- *     Note that intr_nesting_level is not bumped during sched_ithd because
- *     blocking allocations are allowed in the preemption case.
- *
- *     YYY can cache gd base pointer instead of using hidden %fs
- *     prefixes.
- */
-
-#define        SLOW_INTR(irq_num, vec_name, icu, enable_icus)                   \
-       .text ;                                                         \
-       SUPERALIGN_TEXT ;                                               \
-IDTVEC(vec_name) ;                                                     \
-       PUSH_FRAME ;                                                    \
-       FAKE_MCOUNT(15*4(%esp)) ;                                       \
-       MASK_IRQ(icu, irq_num) ;                                        \
-       incl    PCPU(cnt) + V_INTR ;                                    \
-       enable_icus ;                                                   \
-       movl    PCPU(curthread),%ebx ;                                  \
-       pushl   $0 ;                    /* DUMMY CPL FOR DORETI */      \
-       testl   $-1,TD_NEST_COUNT(%ebx) ;                               \
-       jne     1f ;                                                    \
-       cmpl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
-       jl      2f ;                                                    \
-1: ;                                                                   \
-       /* set the pending bit and return, leave interrupt masked */    \
-       orl     $IRQ_LBIT(irq_num), PCPU(ipending) ;                    \
-       orl     $RQF_INTPEND, PCPU(reqflags) ;                          \
-       jmp     5f ;                                                    \
-2: ;                                                                   \
-       /* set running bit, clear pending bit, run handler */           \
-       andl    $~IRQ_LBIT(irq_num), PCPU(ipending) ;                   \
-       incl    TD_NEST_COUNT(%ebx) ;                                   \
-       sti ;                                                           \
-       pushl   $irq_num ;                                              \
-       call    sched_ithd ;                                            \
-       addl    $4,%esp ;                                               \
-       cli ;                                                           \
-       decl    TD_NEST_COUNT(%ebx) ;                                   \
-5: ;                                                                   \
-       MEXITCOUNT ;                                                    \
-       jmp     doreti ;                                                \
-
 /*
  * Unmask a slow interrupt.  This function is used by interrupt threads
  * after they have descheduled themselves to reenable interrupts and
@@ -258,24 +202,6 @@ MCOUNT_LABEL(bintr)
        FAST_INTR(13,icu_fastintr13, IO_ICU2, ENABLE_ICU1_AND_2)
        FAST_INTR(14,icu_fastintr14, IO_ICU2, ENABLE_ICU1_AND_2)
        FAST_INTR(15,icu_fastintr15, IO_ICU2, ENABLE_ICU1_AND_2)
-
-       SLOW_INTR(0,icu_slowintr0, IO_ICU1, ENABLE_ICU1)
-       SLOW_INTR(1,icu_slowintr1, IO_ICU1, ENABLE_ICU1)
-       SLOW_INTR(2,icu_slowintr2, IO_ICU1, ENABLE_ICU1)
-       SLOW_INTR(3,icu_slowintr3, IO_ICU1, ENABLE_ICU1)
-       SLOW_INTR(4,icu_slowintr4, IO_ICU1, ENABLE_ICU1)
-       SLOW_INTR(5,icu_slowintr5, IO_ICU1, ENABLE_ICU1)
-       SLOW_INTR(6,icu_slowintr6, IO_ICU1, ENABLE_ICU1)
-       SLOW_INTR(7,icu_slowintr7, IO_ICU1, ENABLE_ICU1)
-       SLOW_INTR(8,icu_slowintr8, IO_ICU2, ENABLE_ICU1_AND_2)
-       SLOW_INTR(9,icu_slowintr9, IO_ICU2, ENABLE_ICU1_AND_2)
-       SLOW_INTR(10,icu_slowintr10, IO_ICU2, ENABLE_ICU1_AND_2)
-       SLOW_INTR(11,icu_slowintr11, IO_ICU2, ENABLE_ICU1_AND_2)
-       SLOW_INTR(12,icu_slowintr12, IO_ICU2, ENABLE_ICU1_AND_2)
-       SLOW_INTR(13,icu_slowintr13, IO_ICU2, ENABLE_ICU1_AND_2)
-       SLOW_INTR(14,icu_slowintr14, IO_ICU2, ENABLE_ICU1_AND_2)
-       SLOW_INTR(15,icu_slowintr15, IO_ICU2, ENABLE_ICU1_AND_2)
-
 MCOUNT_LABEL(eintr)
 
        .data
index 99033a8..6fc2220 100644 (file)
@@ -54,9 +54,9 @@
 
 /*
  * Note on interrupt control.  Pending interrupts not yet dispatched are
- * marked in gd_fpending, gd_ipending, or gd_spending.  Once dispatched 
- * the interrupt's pending bit is cleared and the interrupt is masked. 
- * Upon completion the interrupt is unmasked.
+ * marked in gd_fpending or gd_spending.  Once dispatched the interrupt's
+ * pending bit is cleared and the interrupt is masked.  Upon completion
+ * the interrupt is unmasked.
  *
  * For edge triggered interrupts interrupts may be enabled again at this
  * point and if they occur before the interrupt service routine is complete
@@ -76,7 +76,7 @@ struct mdglobaldata {
        union savefpu   gd_savefpu;     /* fast bcopy/zero temp fpu save area */
        int             gd_fpu_lock;    /* fast bcopy/zero cpu lock */
        int             gd_fpending;    /* fast interrupt pending */
-       int             gd_ipending;    /* normal interrupt pending */
+       int             unused002;
        int             gd_spending;    /* software interrupt pending */
        int             gd_sdelayed;    /* delayed software ints */
        int             gd_currentldt;
index d19acd2..a6e5bae 100644 (file)
@@ -55,7 +55,7 @@
  * AT/386
  * Vector interrupt control section
  *
- *  ipending   - Pending interrupts (set when a masked interrupt occurs)
+ *  fpending   - Pending interrupts (set when a masked interrupt occurs)
  *  spending   - Pending software interrupts
  */
        .data
@@ -131,9 +131,6 @@ doreti_next:
        testl   PCPU(fpending),%ecx     /* check for an unmasked fast int */
        jnz     doreti_fast
 
-       testl   PCPU(ipending),%ecx     /* check for an unmasked slow int */
-       jnz     doreti_intr
-
        movl    PCPU(spending),%ecx     /* check for a pending software int */
        cmpl    $0,%ecx
        jnz     doreti_soft
@@ -223,33 +220,7 @@ doreti_fast:
        jnc     doreti_next
        pushl   %eax                    /* save IRQ mask unavailable for BGL */
                                        /* NOTE: is also CPL in frame */
-       incl    PCPU(intr_nesting_level)
        call    dofastunpend            /* unpend fast intr %ecx */
-       decl    PCPU(intr_nesting_level)
-       popl    %eax
-       jmp     doreti_next
-
-       /*
-        *  INTR interrupt pending
-        *
-        *  Temporarily back-out our critical section to allow an interrupt
-        *  preempt us when we schedule it.  Bump intr_nesting_level to
-        *  prevent the switch code from recursing via splz too deeply.
-        */
-       ALIGN_TEXT
-doreti_intr:
-       andl    PCPU(ipending),%ecx     /* only check normal ints */
-       bsfl    %ecx, %ecx              /* locate the next dispatchable int */
-       btrl    %ecx, PCPU(ipending)    /* is it really still pending? */
-       jnc     doreti_next
-       pushl   %eax
-       pushl   %ecx
-       incl    TD_NEST_COUNT(%ebx)     /* prevent doreti/splz nesting */
-       subl    $TDPRI_CRIT,TD_PRI(%ebx) /* so we can preempt */
-       call    sched_ithd              /* YYY must pull in imasks */
-       addl    $TDPRI_CRIT,TD_PRI(%ebx)
-       decl    TD_NEST_COUNT(%ebx)
-       addl    $4,%esp
        popl    %eax
        jmp     doreti_next
 
@@ -360,9 +331,6 @@ splz_next:
        testl   PCPU(fpending),%ecx     /* check for an unmasked fast int */
        jnz     splz_fast
 
-       testl   PCPU(ipending),%ecx
-       jnz     splz_intr
-
        movl    PCPU(spending),%ecx
        cmpl    $0,%ecx
        jnz     splz_soft
@@ -393,33 +361,7 @@ splz_fast:
        btrl    %ecx, PCPU(fpending)    /* is it really still pending? */
        jnc     splz_next
        pushl   %eax
-       incl    PCPU(intr_nesting_level)
        call    dofastunpend            /* unpend fast intr %ecx */
-       decl    PCPU(intr_nesting_level)
-       popl    %eax
-       jmp     splz_next
-
-       /*
-        *  INTR interrupt pending
-        *
-        *  Temporarily back-out our critical section to allow the interrupt
-        *  preempt us.
-        */
-       ALIGN_TEXT
-splz_intr:
-       andl    PCPU(ipending),%ecx     /* only check normal ints */
-       bsfl    %ecx, %ecx              /* locate the next dispatchable int */
-       btrl    %ecx, PCPU(ipending)    /* is it really still pending? */
-       jnc     splz_next
-       sti
-       pushl   %eax
-       pushl   %ecx
-       subl    $TDPRI_CRIT,TD_PRI(%ebx)
-       incl    TD_NEST_COUNT(%ebx)     /* prevent doreti/splz nesting */
-       call    sched_ithd              /* YYY must pull in imasks */
-       addl    $TDPRI_CRIT,TD_PRI(%ebx)
-       decl    TD_NEST_COUNT(%ebx)     /* prevent doreti/splz nesting */
-       addl    $4,%esp
        popl    %eax
        jmp     splz_next
 
index b9f30d4..76433a3 100644 (file)
@@ -38,8 +38,8 @@
 #include <machine_base/isa/intr_machdep.h>
 
 /*
- * Bits in the ipending bitmap variable must be set atomically because
- * ipending may be manipulated by interrupts or other cpu's without holding 
+ * Bits in the spending bitmap variable must be set atomically because
+ * spending may be manipulated by interrupts or other cpu's without holding 
  * any locks.
  *
  * Note: setbits uses a locked or, making simple cases MP safe.
index a40ebe6..776f23a 100644 (file)
@@ -200,7 +200,6 @@ ASSYM(GD_USER_GS, offsetof(struct mdglobaldata, gd_user_gs));
 ASSYM(GD_INTR_NESTING_LEVEL, offsetof(struct mdglobaldata, mi.gd_intr_nesting_level));
 
 ASSYM(GD_FPENDING, offsetof(struct mdglobaldata, gd_fpending));
-ASSYM(GD_IPENDING, offsetof(struct mdglobaldata, gd_ipending));
 ASSYM(GD_SPENDING, offsetof(struct mdglobaldata, gd_spending));
 ASSYM(GD_COMMON_TSS, offsetof(struct mdglobaldata, gd_common_tss));
 ASSYM(GD_COMMON_TSSD, offsetof(struct mdglobaldata, gd_common_tssd));
index 27327b7..84a5c94 100644 (file)
@@ -81,7 +81,7 @@
        .globl  gd_ss_eflags, gd_intr_nesting_level
        .globl  gd_CMAP1, gd_CMAP2, gd_CMAP3, gd_PMAP1
        .globl  gd_CADDR1, gd_CADDR2, gd_CADDR3, gd_PADDR1
-       .globl  gd_spending, gd_ipending, gd_fpending
+       .globl  gd_spending, gd_fpending
        .globl  gd_cnt, gd_private_tss
        .globl  gd_scratch_rsp, gd_rsp0
        .globl  gd_user_fs, gd_user_gs
        .set    gd_CADDR3,globaldata + GD_PRV_CADDR3
        .set    gd_PADDR1,globaldata + GD_PRV_PADDR1
        .set    gd_fpending,globaldata + GD_FPENDING
-       .set    gd_ipending,globaldata + GD_IPENDING
        .set    gd_spending,globaldata + GD_SPENDING
        .set    gd_cnt,globaldata + GD_CNT
        .set    gd_scratch_rsp,globaldata + GD_SCRATCH_RSP
index 5f28af4..c830cce 100644 (file)
@@ -87,7 +87,7 @@
  * AT/386
  * Vector interrupt control section
  *
- *  ipending   - Pending interrupts (set when a masked interrupt occurs)
+ *  fpending   - Pending interrupts (set when a masked interrupt occurs)
  *  spending   - Pending software interrupts
  */
        .data
@@ -159,9 +159,6 @@ doreti_next:
        testl   PCPU(fpending),%ecx     /* check for an unmasked fast int */
        jnz     doreti_fast
 
-       testl   PCPU(ipending),%ecx     /* check for an unmasked slow int */
-       jnz     doreti_intr
-
        movl    PCPU(spending),%ecx     /* check for a pending software int */
        cmpl    $0,%ecx
        jnz     doreti_soft
@@ -249,9 +246,7 @@ doreti_fast:
        /* MP lock successful */
 #endif
 #endif
-       incl    PCPU(intr_nesting_level)
        call    dofastunpend            /* unpend fast intr %ecx */
-       decl    PCPU(intr_nesting_level)
 #if 0
 #ifdef SMP
        call    rel_mplock
@@ -265,29 +260,6 @@ doreti_fast:
        orl     PCPU(fpending),%eax
        jmp     doreti_next
 
-       /*
-        *  INTR interrupt pending
-        *
-        *  Temporarily back-out our critical section to allow an interrupt
-        *  preempt us when we schedule it.  Bump intr_nesting_level to
-        *  prevent the switch code from recursing via splz too deeply.
-        */
-       ALIGN_TEXT
-doreti_intr:
-       andl    PCPU(ipending),%ecx     /* only check normal ints */
-       bsfl    %ecx, %ecx              /* locate the next dispatchable int */
-       btrl    %ecx, PCPU(ipending)    /* is it really still pending? */
-       jnc     doreti_next
-       pushq   %rax
-       movl    %ecx,%edi               /* argument to C function */
-       incl    TD_NEST_COUNT(%rbx)     /* prevent doreti/splz nesting */
-       subl    $TDPRI_CRIT,TD_PRI(%rbx) /* so we can preempt */
-       call    sched_ithd              /* YYY must pull in imasks */
-       addl    $TDPRI_CRIT,TD_PRI(%rbx)
-       decl    TD_NEST_COUNT(%rbx)
-       popq    %rax
-       jmp     doreti_next
-
        /*
         *  SOFT interrupt pending
         *
@@ -393,9 +365,6 @@ splz_next:
        testl   PCPU(fpending),%ecx     /* check for an unmasked fast int */
        jnz     splz_fast
 
-       testl   PCPU(ipending),%ecx
-       jnz     splz_intr
-
        movl    PCPU(spending),%ecx
        cmpl    $0,%ecx
        jnz     splz_soft
@@ -434,9 +403,7 @@ splz_fast:
        jz      1f
 #endif
 #endif
-       incl    PCPU(intr_nesting_level)
        call    dofastunpend            /* unpend fast intr %ecx */
-       decl    PCPU(intr_nesting_level)
 #if 0
 #ifdef SMP
        call    rel_mplock
@@ -450,29 +417,6 @@ splz_fast:
        orl     PCPU(fpending),%eax
        jmp     splz_next
 
-       /*
-        *  INTR interrupt pending
-        *
-        *  Temporarily back-out our critical section to allow the interrupt
-        *  preempt us.
-        */
-       ALIGN_TEXT
-splz_intr:
-       andl    PCPU(ipending),%ecx     /* only check normal ints */
-       bsfl    %ecx, %ecx              /* locate the next dispatchable int */
-       btrl    %ecx, PCPU(ipending)    /* is it really still pending? */
-       jnc     splz_next
-       sti
-       pushq   %rax
-       movl    %ecx,%edi               /* C argument */
-       subl    $TDPRI_CRIT,TD_PRI(%rbx)
-       incl    TD_NEST_COUNT(%rbx)     /* prevent doreti/splz nesting */
-       call    sched_ithd              /* YYY must pull in imasks */
-       addl    $TDPRI_CRIT,TD_PRI(%rbx)
-       decl    TD_NEST_COUNT(%rbx)     /* prevent doreti/splz nesting */
-       popq    %rax
-       jmp     splz_next
-
        /*
         *  SOFT interrupt pending
         *
index ff9ce08..8fae7c3 100644 (file)
@@ -36,8 +36,8 @@
 #include <machine/globaldata.h>
 
 /*
- * Bits in the ipending bitmap variable must be set atomically because
- * ipending may be manipulated by interrupts or other cpu's without holding 
+ * Bits in the spending bitmap variable must be set atomically because
+ * spending may be manipulated by interrupts or other cpu's without holding 
  * any locks.
  *
  * Note: setbits uses a locked or, making simple cases MP safe.
index 3155e0c..dab1fc5 100644 (file)
@@ -2565,7 +2565,6 @@ ap_init(void)
         */
        __asm __volatile("sti; pause; pause"::);
        mdcpu->gd_fpending = 0;
-       mdcpu->gd_ipending = 0;
 
        initclocks_pcpu();      /* clock interrupts (via IPIs) */
        lwkt_process_ipiq();
index 692d025..25c3e81 100644 (file)
@@ -78,34 +78,6 @@ extern inthand_t
        IDTVEC(apic_fastintr20), IDTVEC(apic_fastintr21),
        IDTVEC(apic_fastintr22), IDTVEC(apic_fastintr23);
 
-extern inthand_t
-       IDTVEC(apic_slowintr0), IDTVEC(apic_slowintr1),
-       IDTVEC(apic_slowintr2), IDTVEC(apic_slowintr3),
-       IDTVEC(apic_slowintr4), IDTVEC(apic_slowintr5),
-       IDTVEC(apic_slowintr6), IDTVEC(apic_slowintr7),
-       IDTVEC(apic_slowintr8), IDTVEC(apic_slowintr9),
-       IDTVEC(apic_slowintr10), IDTVEC(apic_slowintr11),
-       IDTVEC(apic_slowintr12), IDTVEC(apic_slowintr13),
-       IDTVEC(apic_slowintr14), IDTVEC(apic_slowintr15),
-       IDTVEC(apic_slowintr16), IDTVEC(apic_slowintr17),
-       IDTVEC(apic_slowintr18), IDTVEC(apic_slowintr19),
-       IDTVEC(apic_slowintr20), IDTVEC(apic_slowintr21),
-       IDTVEC(apic_slowintr22), IDTVEC(apic_slowintr23);
-
-extern inthand_t
-       IDTVEC(apic_wrongintr0), IDTVEC(apic_wrongintr1),
-       IDTVEC(apic_wrongintr2), IDTVEC(apic_wrongintr3),
-       IDTVEC(apic_wrongintr4), IDTVEC(apic_wrongintr5),
-       IDTVEC(apic_wrongintr6), IDTVEC(apic_wrongintr7),
-       IDTVEC(apic_wrongintr8), IDTVEC(apic_wrongintr9),
-       IDTVEC(apic_wrongintr10), IDTVEC(apic_wrongintr11),
-       IDTVEC(apic_wrongintr12), IDTVEC(apic_wrongintr13),
-       IDTVEC(apic_wrongintr14), IDTVEC(apic_wrongintr15),
-       IDTVEC(apic_wrongintr16), IDTVEC(apic_wrongintr17),
-       IDTVEC(apic_wrongintr18), IDTVEC(apic_wrongintr19),
-       IDTVEC(apic_wrongintr20), IDTVEC(apic_wrongintr21),
-       IDTVEC(apic_wrongintr22), IDTVEC(apic_wrongintr23);
-
 static int apic_setvar(int, const void *);
 static int apic_getvar(int, void *);
 static int apic_vectorctl(int, int, int);
@@ -127,36 +99,6 @@ static inthand_t *apic_fastintr[APIC_HWI_VECTORS] = {
        &IDTVEC(apic_fastintr22), &IDTVEC(apic_fastintr23)
 };
 
-static inthand_t *apic_slowintr[APIC_HWI_VECTORS] = {
-       &IDTVEC(apic_slowintr0), &IDTVEC(apic_slowintr1),
-       &IDTVEC(apic_slowintr2), &IDTVEC(apic_slowintr3),
-       &IDTVEC(apic_slowintr4), &IDTVEC(apic_slowintr5),
-       &IDTVEC(apic_slowintr6), &IDTVEC(apic_slowintr7),
-       &IDTVEC(apic_slowintr8), &IDTVEC(apic_slowintr9),
-       &IDTVEC(apic_slowintr10), &IDTVEC(apic_slowintr11),
-       &IDTVEC(apic_slowintr12), &IDTVEC(apic_slowintr13),
-       &IDTVEC(apic_slowintr14), &IDTVEC(apic_slowintr15),
-       &IDTVEC(apic_slowintr16), &IDTVEC(apic_slowintr17),
-       &IDTVEC(apic_slowintr18), &IDTVEC(apic_slowintr19),
-       &IDTVEC(apic_slowintr20), &IDTVEC(apic_slowintr21),
-       &IDTVEC(apic_slowintr22), &IDTVEC(apic_slowintr23)
-};
-
-static inthand_t *apic_wrongintr[APIC_HWI_VECTORS] = {
-       &IDTVEC(apic_wrongintr0), &IDTVEC(apic_wrongintr1),
-       &IDTVEC(apic_wrongintr2), &IDTVEC(apic_wrongintr3),
-       &IDTVEC(apic_wrongintr4), &IDTVEC(apic_wrongintr5),
-       &IDTVEC(apic_wrongintr6), &IDTVEC(apic_wrongintr7),
-       &IDTVEC(apic_wrongintr8), &IDTVEC(apic_wrongintr9),
-       &IDTVEC(apic_wrongintr10), &IDTVEC(apic_wrongintr11),
-       &IDTVEC(apic_wrongintr12), &IDTVEC(apic_wrongintr13),
-       &IDTVEC(apic_wrongintr14), &IDTVEC(apic_wrongintr15),
-       &IDTVEC(apic_wrongintr16), &IDTVEC(apic_wrongintr17),
-       &IDTVEC(apic_wrongintr18), &IDTVEC(apic_wrongintr19),
-       &IDTVEC(apic_wrongintr20), &IDTVEC(apic_wrongintr21),
-       &IDTVEC(apic_wrongintr22), &IDTVEC(apic_wrongintr23)
-};
-
 static int apic_imcr_present;
 
 struct machintr_abi MachIntrABI = {
@@ -254,7 +196,6 @@ static void
 apic_cleanup(void)
 {
        mdcpu->gd_fpending = 0;
-       mdcpu->gd_ipending = 0;
 }
 
 static
@@ -276,29 +217,8 @@ apic_vectorctl(int op, int intr, int flags)
 
     switch(op) {
     case MACHINTR_VECTOR_SETUP:
-       /*
-        * Setup an interrupt vector.  First install the vector in the
-        * cpu's Interrupt Descriptor Table (IDT).
-        */
-       if (flags & INTR_FAST) {
-           vector = TPR_SLOW_INTS + intr;
-           setidt(vector, apic_wrongintr[intr],
-                   SDT_SYSIGT, SEL_KPL, 0);
-           vector = TPR_FAST_INTS + intr;
-           setidt(vector, apic_fastintr[intr],
-                   SDT_SYSIGT, SEL_KPL, 0);
-       } else {
-           vector = TPR_SLOW_INTS + intr;
-
-           /*
-            * This is probably not needed any more. XXX
-            */
-           if (intr == apic_8254_intr || intr == 8) {
-               vector = TPR_FAST_INTS + intr;
-           }
-           setidt(vector, apic_slowintr[intr],
-                   SDT_SYSIGT, SEL_KPL, 0);
-       }
+       vector = TPR_FAST_INTS + intr;
+       setidt(vector, apic_fastintr[intr], SDT_SYSIGT, SEL_KPL, 0);
 
        /*
         * Now reprogram the vector in the IO APIC.  In order to avoid
@@ -325,9 +245,8 @@ apic_vectorctl(int op, int intr, int flags)
         * installed in the cpu's IDT, but make sure.
         */
        machintr_intrdis(intr);
-       vector = TPR_SLOW_INTS + intr;
-       setidt(vector, apic_slowintr[intr], SDT_SYSIGT, SEL_KPL,
-               0);
+       vector = TPR_FAST_INTS + intr;
+       setidt(vector, apic_fastintr[intr], SDT_SYSIGT, SEL_KPL, 0);
 
        /*
         * And then reprogram the IO APIC to point to the SLOW vector (it may
@@ -359,7 +278,7 @@ apic_vectorctl(int op, int intr, int flags)
         * to IDT_OFFSET + intr.
         */
        vector = IDT_OFFSET + intr;
-       setidt(vector, apic_slowintr[intr], SDT_SYSIGT, SEL_KPL, 0);
+       setidt(vector, apic_fastintr[intr], SDT_SYSIGT, SEL_KPL, 0);
        break;
     default:
        error = EOPNOTSUPP;
index f3d7358..6073714 100644 (file)
@@ -33,7 +33,6 @@
 #ifdef APIC_IO
 
 /* IDT vector base for regular (aka. slow) and fast interrupts */
-#define TPR_SLOW_INTS  0x20
 #define TPR_FAST_INTS  0x60
 
 #define APIC_HWI_VECTORS 24
index 048d0ae..349dd61 100644 (file)
@@ -142,90 +142,15 @@ IDTVEC(vec_name) ;                                                        \
        andl    $~IRQ_LBIT(irq_num),PCPU(fpending) ;                    \
        pushq   $irq_num ;              /* trapframe -> intrframe */    \
        movq    %rsp, %rdi ;            /* pass frame by reference */   \
+       addl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
        call    ithread_fast_handler ;  /* returns 0 to unmask */       \
+       subl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
        addq    $8, %rsp ;              /* intrframe -> trapframe */    \
        UNMASK_IRQ(irq_num) ;                                           \
 5: ;                                                                   \
        MEXITCOUNT ;                                                    \
        jmp     doreti ;                                                \
 
-/*
- * Slow interrupt call handlers run in the following sequence:
- *
- *     - Push the trap frame required by doreti.
- *     - Mask the interrupt and reenable its source.
- *     - If we cannot take the interrupt set its ipending bit and
- *       doreti.  In addition to checking for a critical section
- *       and cpl mask we also check to see if the thread is still
- *       running.  Note that we cannot mess with mp_lock at all
- *       if we entered from a critical section!
- *     - If we can take the interrupt clear its ipending bit
- *       and schedule the thread.  Leave interrupts masked and doreti.
- *
- *     Note that calls to sched_ithd() are made with interrupts enabled
- *     and outside a critical section.  YYY sched_ithd may preempt us
- *     synchronously (fix interrupt stacking).
- *
- *     YYY can cache gd base pointer instead of using hidden %fs
- *     prefixes.
- */
-
-#define SLOW_INTR(irq_num, vec_name, maybe_extra_ipending)             \
-       .text ;                                                         \
-       SUPERALIGN_TEXT ;                                               \
-IDTVEC(vec_name) ;                                                     \
-       APIC_PUSH_FRAME ;                                                       \
-       maybe_extra_ipending ;                                          \
-;                                                                      \
-       MASK_LEVEL_IRQ(irq_num) ;                                       \
-       incl    PCPU(cnt) + V_INTR ;                                    \
-       movq    lapic, %rax ;                                           \
-       movl    $0, LA_EOI(%rax) ;                                      \
-       movq    PCPU(curthread),%rbx ;                                  \
-       testl   $-1,TD_NEST_COUNT(%rbx) ;                               \
-       jne     1f ;                                                    \
-       cmpl    $TDPRI_CRIT,TD_PRI(%rbx) ;                              \
-       jl      2f ;                                                    \
-1: ;                                                                   \
-       /* set the pending bit and return, leave the interrupt masked */ \
-       orl     $IRQ_LBIT(irq_num), PCPU(ipending) ;                    \
-       orl     $RQF_INTPEND,PCPU(reqflags) ;                           \
-       jmp     5f ;                                                    \
-2: ;                                                                   \
-       /* set running bit, clear pending bit, run handler */           \
-       andl    $~IRQ_LBIT(irq_num), PCPU(ipending) ;                   \
-       incl    TD_NEST_COUNT(%rbx) ;                                   \
-       sti ;                                                           \
-       movq    $irq_num,%rdi ;                                 \
-       call    sched_ithd ;                                            \
-       cli ;                                                           \
-       decl    TD_NEST_COUNT(%rbx) ;                                   \
-5: ;                                                                   \
-       MEXITCOUNT ;                                                    \
-       jmp     doreti ;                                                \
-
-/*
- * Wrong interrupt call handlers.  We program these into APIC vectors
- * that should otherwise never occur.  For example, we program the SLOW
- * vector for irq N with this when we program the FAST vector with the
- * real interrupt.
- *
- * XXX for now all we can do is EOI it.  We can't call do_wrongintr
- * (yet) because we could be in a critical section.
- */
-#define WRONGINTR(irq_num,vec_name)                                    \
-       .text ;                                                         \
-       SUPERALIGN_TEXT  ;                                              \
-IDTVEC(vec_name) ;                                                     \
-       APIC_PUSH_FRAME ;                                               \
-       movq    lapic,%rax ;                                            \
-       movl    $0,LA_EOI(%rax) ;       /* End Of Interrupt to APIC */  \
-       /*pushl $irq_num ;*/                                            \
-       /*call  do_wrongintr ;*/                                        \
-       /*addl  $4,%esp ;*/                                             \
-       APIC_POP_FRAME ;                                                \
-       iretq  ;                                                                \
-
 #endif
 
 /*
@@ -455,58 +380,6 @@ MCOUNT_LABEL(bintr)
        FAST_INTR(21,apic_fastintr21)
        FAST_INTR(22,apic_fastintr22)
        FAST_INTR(23,apic_fastintr23)
-       
-       /* YYY what is this garbage? */
-
-       SLOW_INTR(0,apic_slowintr0,)
-       SLOW_INTR(1,apic_slowintr1,)
-       SLOW_INTR(2,apic_slowintr2,)
-       SLOW_INTR(3,apic_slowintr3,)
-       SLOW_INTR(4,apic_slowintr4,)
-       SLOW_INTR(5,apic_slowintr5,)
-       SLOW_INTR(6,apic_slowintr6,)
-       SLOW_INTR(7,apic_slowintr7,)
-       SLOW_INTR(8,apic_slowintr8,)
-       SLOW_INTR(9,apic_slowintr9,)
-       SLOW_INTR(10,apic_slowintr10,)
-       SLOW_INTR(11,apic_slowintr11,)
-       SLOW_INTR(12,apic_slowintr12,)
-       SLOW_INTR(13,apic_slowintr13,)
-       SLOW_INTR(14,apic_slowintr14,)
-       SLOW_INTR(15,apic_slowintr15,)
-       SLOW_INTR(16,apic_slowintr16,)
-       SLOW_INTR(17,apic_slowintr17,)
-       SLOW_INTR(18,apic_slowintr18,)
-       SLOW_INTR(19,apic_slowintr19,)
-       SLOW_INTR(20,apic_slowintr20,)
-       SLOW_INTR(21,apic_slowintr21,)
-       SLOW_INTR(22,apic_slowintr22,)
-       SLOW_INTR(23,apic_slowintr23,)
-
-       WRONGINTR(0,apic_wrongintr0)
-       WRONGINTR(1,apic_wrongintr1)
-       WRONGINTR(2,apic_wrongintr2)
-       WRONGINTR(3,apic_wrongintr3)
-       WRONGINTR(4,apic_wrongintr4)
-       WRONGINTR(5,apic_wrongintr5)
-       WRONGINTR(6,apic_wrongintr6)
-       WRONGINTR(7,apic_wrongintr7)
-       WRONGINTR(8,apic_wrongintr8)
-       WRONGINTR(9,apic_wrongintr9)
-       WRONGINTR(10,apic_wrongintr10)
-       WRONGINTR(11,apic_wrongintr11)
-       WRONGINTR(12,apic_wrongintr12)
-       WRONGINTR(13,apic_wrongintr13)
-       WRONGINTR(14,apic_wrongintr14)
-       WRONGINTR(15,apic_wrongintr15)
-       WRONGINTR(16,apic_wrongintr16)
-       WRONGINTR(17,apic_wrongintr17)
-       WRONGINTR(18,apic_wrongintr18)
-       WRONGINTR(19,apic_wrongintr19)
-       WRONGINTR(20,apic_wrongintr20)
-       WRONGINTR(21,apic_wrongintr21)
-       WRONGINTR(22,apic_wrongintr22)
-       WRONGINTR(23,apic_wrongintr23)
 MCOUNT_LABEL(eintr)
 
 #endif
index 961276f..95ead43 100644 (file)
@@ -71,16 +71,6 @@ extern inthand_t
        IDTVEC(icu_fastintr12), IDTVEC(icu_fastintr13),
        IDTVEC(icu_fastintr14), IDTVEC(icu_fastintr15);
 
-extern inthand_t
-       IDTVEC(icu_slowintr0), IDTVEC(icu_slowintr1),
-       IDTVEC(icu_slowintr2), IDTVEC(icu_slowintr3),
-       IDTVEC(icu_slowintr4), IDTVEC(icu_slowintr5),
-       IDTVEC(icu_slowintr6), IDTVEC(icu_slowintr7),
-       IDTVEC(icu_slowintr8), IDTVEC(icu_slowintr9),
-       IDTVEC(icu_slowintr10), IDTVEC(icu_slowintr11),
-       IDTVEC(icu_slowintr12), IDTVEC(icu_slowintr13),
-       IDTVEC(icu_slowintr14), IDTVEC(icu_slowintr15);
-
 static int icu_vectorctl(int, int, int);
 static int icu_setvar(int, const void *);
 static int icu_getvar(int, void *);
@@ -98,17 +88,6 @@ static inthand_t *icu_fastintr[ICU_HWI_VECTORS] = {
        &IDTVEC(icu_fastintr14), &IDTVEC(icu_fastintr15)
 };
 
-static inthand_t *icu_slowintr[ICU_HWI_VECTORS] = {
-       &IDTVEC(icu_slowintr0), &IDTVEC(icu_slowintr1),
-       &IDTVEC(icu_slowintr2), &IDTVEC(icu_slowintr3),
-       &IDTVEC(icu_slowintr4), &IDTVEC(icu_slowintr5),
-       &IDTVEC(icu_slowintr6), &IDTVEC(icu_slowintr7),
-       &IDTVEC(icu_slowintr8), &IDTVEC(icu_slowintr9),
-       &IDTVEC(icu_slowintr10), &IDTVEC(icu_slowintr11),
-       &IDTVEC(icu_slowintr12), &IDTVEC(icu_slowintr13),
-       &IDTVEC(icu_slowintr14), &IDTVEC(icu_slowintr15)
-};
-
 struct machintr_abi MachIntrABI = {
     MACHINTR_ICU,
     .intrdis = ICU_INTRDIS,
@@ -198,7 +177,6 @@ void
 icu_cleanup(void)
 {
        mdcpu->gd_fpending = 0;
-       mdcpu->gd_ipending = 0;
 }
 
 
@@ -218,15 +196,12 @@ icu_vectorctl(int op, int intr, int flags)
 
     switch(op) {
     case MACHINTR_VECTOR_SETUP:
-       setidt(IDT_OFFSET + intr,
-               flags & INTR_FAST ? icu_fastintr[intr] : icu_slowintr[intr],
-               SDT_SYSIGT, SEL_KPL, 0);
+       setidt(IDT_OFFSET + intr, icu_fastintr[intr], SDT_SYSIGT, SEL_KPL, 0);
        machintr_intren(intr);
        break;
     case MACHINTR_VECTOR_TEARDOWN:
     case MACHINTR_VECTOR_SETDEFAULT:
-       setidt(IDT_OFFSET + intr, icu_slowintr[intr], 
-               SDT_SYSIGT, SEL_KPL, 0);
+       setidt(IDT_OFFSET + intr, icu_fastintr[intr], SDT_SYSIGT, SEL_KPL, 0);
        machintr_intrdis(intr);
        break;
     default:
index 63ef84d..b6fd834 100644 (file)
@@ -153,69 +153,15 @@ IDTVEC(vec_name) ;                                                        \
        andl    $~IRQ_LBIT(irq_num),PCPU(fpending) ;                    \
        pushq   $irq_num ;                                              \
        movq    %rsp,%rdi ;             /* rdi = call argument */       \
+       addl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
        call    ithread_fast_handler ;  /* returns 0 to unmask int */   \
+       subl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
        addq    $8,%rsp ;               /* intr frame -> trap frame */  \
        UNMASK_IRQ(icu, irq_num) ;                                      \
 5: ;                                                                   \
        MEXITCOUNT ;                                                    \
        jmp     doreti ;                                                \
 
-/*
- * Slow interrupt call handlers run in the following sequence:
- *
- *     - Push the trap frame required by doreti.
- *     - Mask the interrupt and reenable its source.
- *     - If we cannot take the interrupt set its ipending bit and
- *       doreti.  In addition to checking for a critical section
- *       and cpl mask we also check to see if the thread is still
- *       running.
- *     - If we can take the interrupt clear its ipending bit
- *       and schedule its thread.  Leave interrupts masked and doreti.
- *
- *     sched_ithd() is called with interrupts enabled and outside of a
- *     critical section (so it can preempt us).
- *
- *     YYY sched_ithd may preempt us synchronously (fix interrupt stacking)
- *
- *     Note that intr_nesting_level is not bumped during sched_ithd because
- *     blocking allocations are allowed in the preemption case.
- *
- *     YYY can cache gd base pointer instead of using hidden %fs
- *     prefixes.
- */
-
-#define        SLOW_INTR(irq_num, vec_name, icu, enable_icus)                  \
-       .text ;                                                         \
-       SUPERALIGN_TEXT ;                                               \
-IDTVEC(vec_name) ;                                                     \
-       ICU_PUSH_FRAME ;                                                \
-       FAKE_MCOUNT(15*4(%esp)) ;                                       \
-       MASK_IRQ(icu, irq_num) ;                                        \
-       incl    PCPU(cnt) + V_INTR ;                                    \
-       enable_icus ;                                                   \
-       movq    PCPU(curthread),%rbx ;                                  \
-       testl   $-1,TD_NEST_COUNT(%rbx) ;                               \
-       jne     1f ;                                                    \
-       cmpl    $TDPRI_CRIT,TD_PRI(%rbx) ;                              \
-       jl      2f ;                                                    \
-1: ;                                                                   \
-       /* set the pending bit and return, leave interrupt masked */    \
-       orl     $IRQ_LBIT(irq_num), PCPU(ipending) ;                    \
-       orl     $RQF_INTPEND, PCPU(reqflags) ;                          \
-       jmp     5f ;                                                    \
-2: ;                                                                   \
-       /* set running bit, clear pending bit, run handler */           \
-       andl    $~IRQ_LBIT(irq_num), PCPU(ipending) ;                   \
-       incl    TD_NEST_COUNT(%rbx) ;                                   \
-       sti ;                                                           \
-       movq    $irq_num,%rdi ; /* %rdi = argument to call */           \
-       call    sched_ithd ;                                            \
-       cli ;                                                           \
-       decl    TD_NEST_COUNT(%rbx) ;                                   \
-5: ;                                                                   \
-       MEXITCOUNT ;                                                    \
-       jmp     doreti ;                                                \
-
 /*
  * Unmask a slow interrupt.  This function is used by interrupt threads
  * after they have descheduled themselves to reenable interrupts and
@@ -250,24 +196,6 @@ MCOUNT_LABEL(bintr)
        FAST_INTR(13,icu_fastintr13, IO_ICU2, ENABLE_ICU1_AND_2)
        FAST_INTR(14,icu_fastintr14, IO_ICU2, ENABLE_ICU1_AND_2)
        FAST_INTR(15,icu_fastintr15, IO_ICU2, ENABLE_ICU1_AND_2)
-
-       SLOW_INTR(0,icu_slowintr0, IO_ICU1, ENABLE_ICU1)
-       SLOW_INTR(1,icu_slowintr1, IO_ICU1, ENABLE_ICU1)
-       SLOW_INTR(2,icu_slowintr2, IO_ICU1, ENABLE_ICU1)
-       SLOW_INTR(3,icu_slowintr3, IO_ICU1, ENABLE_ICU1)
-       SLOW_INTR(4,icu_slowintr4, IO_ICU1, ENABLE_ICU1)
-       SLOW_INTR(5,icu_slowintr5, IO_ICU1, ENABLE_ICU1)
-       SLOW_INTR(6,icu_slowintr6, IO_ICU1, ENABLE_ICU1)
-       SLOW_INTR(7,icu_slowintr7, IO_ICU1, ENABLE_ICU1)
-       SLOW_INTR(8,icu_slowintr8, IO_ICU2, ENABLE_ICU1_AND_2)
-       SLOW_INTR(9,icu_slowintr9, IO_ICU2, ENABLE_ICU1_AND_2)
-       SLOW_INTR(10,icu_slowintr10, IO_ICU2, ENABLE_ICU1_AND_2)
-       SLOW_INTR(11,icu_slowintr11, IO_ICU2, ENABLE_ICU1_AND_2)
-       SLOW_INTR(12,icu_slowintr12, IO_ICU2, ENABLE_ICU1_AND_2)
-       SLOW_INTR(13,icu_slowintr13, IO_ICU2, ENABLE_ICU1_AND_2)
-       SLOW_INTR(14,icu_slowintr14, IO_ICU2, ENABLE_ICU1_AND_2)
-       SLOW_INTR(15,icu_slowintr15, IO_ICU2, ENABLE_ICU1_AND_2)
-
 MCOUNT_LABEL(eintr)
 
        .data
index 988b07b..aa6942f 100644 (file)
@@ -55,9 +55,9 @@
 
 /*
  * Note on interrupt control.  Pending interrupts not yet dispatched are
- * marked in gd_fpending, gd_ipending, or gd_spending.  Once dispatched 
- * the interrupt's pending bit is cleared and the interrupt is masked.
- * Upon completion the interrupt is unmasked.
+ * marked in gd_fpending or gd_spending.  Once dispatched the interrupt's
+ * pending bit is cleared and the interrupt is masked.  Upon completion
+ * the interrupt is unmasked.
  *
  * For edge triggered interrupts interrupts may be enabled again at this
  * point and if they occur before the interrupt service routine is complete
@@ -77,7 +77,7 @@ struct mdglobaldata {
        union savefpu   gd_savefpu;     /* fast bcopy/zero temp fpu save area */
        int             gd_fpu_lock;    /* fast bcopy/zero cpu lock */
        int             gd_fpending;    /* fast interrupt pending */
-       int             gd_ipending;    /* normal interrupt pending */
+       int             unused002;
        int             gd_spending;    /* software interrupt pending */
        int             gd_sdelayed;    /* delayed software ints */
        int             gd_currentldt;