threaded interrupts 1: Rewrite the ICU interrupt code, splz, and doreti code.
authorMatthew Dillon <dillon@dragonflybsd.org>
Sun, 29 Jun 2003 03:28:46 +0000 (03:28 +0000)
committerMatthew Dillon <dillon@dragonflybsd.org>
Sun, 29 Jun 2003 03:28:46 +0000 (03:28 +0000)
The APIC code hasn't been done yet.   Consolidate many interrupt thread
related functions into MI code, especially software interrupts.  All normal
interrupts and software interrupts are now threaded, and I'm almost ready
to deal with interrupt-thread-only preemption.  At the moment I run
interrupt threads in a critical section and probably will continue to do
so until I can make them MP safe.

85 files changed:
sys/bus/cam/cam_xpt.c
sys/bus/isa/i386/isa.h
sys/bus/isa/i386/isa_device.h
sys/bus/isa/isareg.h
sys/bus/usb/ohci.c
sys/bus/usb/uhci.c
sys/bus/usb/usbdi.c
sys/conf/files
sys/cpu/i386/include/cpu.h
sys/cpu/i386/include/cpufunc.h
sys/cpu/i386/include/types.h
sys/dev/raid/vinum/vinumext.h
sys/dev/raid/vinum/vinummemory.c
sys/dev/serial/sio/sio.c
sys/i386/apic/apic_ipl.s
sys/i386/apic/apic_vector.s
sys/i386/i386/exception.s
sys/i386/i386/genassym.c
sys/i386/i386/globals.s
sys/i386/i386/machdep.c
sys/i386/i386/swtch.s
sys/i386/i386/trap.c
sys/i386/i386/vm86bios.s
sys/i386/i386/vm_machdep.c
sys/i386/icu/icu.h
sys/i386/icu/icu_ipl.s
sys/i386/icu/icu_vector.s
sys/i386/include/asnames.h
sys/i386/include/cpu.h
sys/i386/include/cpufunc.h
sys/i386/include/globaldata.h
sys/i386/include/md_var.h
sys/i386/include/types.h
sys/i386/isa/apic_ipl.s
sys/i386/isa/apic_vector.s
sys/i386/isa/icu.h
sys/i386/isa/icu_ipl.s
sys/i386/isa/icu_vector.s
sys/i386/isa/intr_machdep.c
sys/i386/isa/intr_machdep.h
sys/i386/isa/ipl.s
sys/i386/isa/ipl_funcs.c
sys/i386/isa/vector.s
sys/kern/kern_intr.c
sys/kern/kern_malloc.c
sys/kern/kern_timeout.c
sys/kern/lwkt_thread.c
sys/kern/subr_taskqueue.c
sys/kern/uipc_mbuf.c
sys/net/netisr.c [new file with mode: 0644]
sys/net/netisr.h
sys/opencrypto/crypto.c
sys/platform/pc32/apic/apic_ipl.s
sys/platform/pc32/apic/apic_vector.s
sys/platform/pc32/i386/exception.s
sys/platform/pc32/i386/genassym.c
sys/platform/pc32/i386/globals.s
sys/platform/pc32/i386/machdep.c
sys/platform/pc32/i386/swtch.s
sys/platform/pc32/i386/trap.c
sys/platform/pc32/i386/vm86bios.s
sys/platform/pc32/i386/vm_machdep.c
sys/platform/pc32/icu/icu.h
sys/platform/pc32/icu/icu_ipl.s
sys/platform/pc32/icu/icu_vector.s
sys/platform/pc32/include/asnames.h
sys/platform/pc32/include/globaldata.h
sys/platform/pc32/include/md_var.h
sys/platform/pc32/isa/apic_ipl.s
sys/platform/pc32/isa/apic_vector.s
sys/platform/pc32/isa/icu.h
sys/platform/pc32/isa/icu_ipl.s
sys/platform/pc32/isa/icu_vector.s
sys/platform/pc32/isa/intr_machdep.c
sys/platform/pc32/isa/intr_machdep.h
sys/platform/pc32/isa/ipl.s
sys/platform/pc32/isa/ipl_funcs.c
sys/platform/pc32/isa/vector.s
sys/platform/vkernel/i386/genassym.c
sys/sys/globaldata.h
sys/sys/interrupt.h
sys/sys/random.h
sys/sys/systm.h
sys/sys/thread.h
sys/vm/vm_glue.c

index 97d7dfd..c4cc3b6 100644 (file)
@@ -27,7 +27,7 @@
  * SUCH DAMAGE.
  *
  * $FreeBSD: src/sys/cam/cam_xpt.c,v 1.80.2.18 2002/12/09 17:31:55 gibbs Exp $
- * $DragonFly: src/sys/bus/cam/cam_xpt.c,v 1.3 2003/06/23 17:55:24 dillon Exp $
+ * $DragonFly: src/sys/bus/cam/cam_xpt.c,v 1.4 2003/06/29 03:28:39 dillon Exp $
  */
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -715,8 +715,8 @@ static xpt_devicefunc_t xptpassannouncefunc;
 static void     xpt_finishconfig(struct cam_periph *periph, union ccb *ccb);
 static void     xptaction(struct cam_sim *sim, union ccb *work_ccb);
 static void     xptpoll(struct cam_sim *sim);
-static swihand_t swi_camnet;
-static swihand_t swi_cambio;
+static inthand2_t swi_camnet;
+static inthand2_t swi_cambio;
 static void     camisr(cam_isrq_t *queue);
 #if 0
 static void     xptstart(struct cam_periph *periph, union ccb *work_ccb);
@@ -1374,8 +1374,8 @@ xpt_init(dummy)
        }
 
        /* Install our software interrupt handlers */
-       register_swi(SWI_CAMNET, swi_camnet);
-       register_swi(SWI_CAMBIO, swi_cambio);
+       register_swi(SWI_CAMNET, swi_camnet, NULL, "swi_camnet");
+       register_swi(SWI_CAMBIO, swi_cambio, NULL, "swi_cambio");
 }
 
 static cam_status
@@ -3411,8 +3411,8 @@ xpt_polled_action(union ccb *start_ccb)
           && (--timeout > 0)) {
                DELAY(1000);
                (*(sim->sim_poll))(sim);
-               swi_camnet();
-               swi_cambio();           
+               swi_camnet(NULL);
+               swi_cambio(NULL);               
        }
        
        dev->ccbq.devq_openings++;
@@ -3422,8 +3422,8 @@ xpt_polled_action(union ccb *start_ccb)
                xpt_action(start_ccb);
                while(--timeout > 0) {
                        (*(sim->sim_poll))(sim);
-                       swi_camnet();
-                       swi_cambio();
+                       swi_camnet(NULL);
+                       swi_cambio(NULL);
                        if ((start_ccb->ccb_h.status  & CAM_STATUS_MASK)
                            != CAM_REQ_INPROG)
                                break;
@@ -6278,13 +6278,13 @@ xptpoll(struct cam_sim *sim)
  */
 
 static void
-swi_camnet(void)
+swi_camnet(void *arg)
 {
        camisr(&cam_netq);
 }
 
 static void
-swi_cambio(void)
+swi_cambio(void *arg)
 {
        camisr(&cam_bioq);
 }
index d409b68..1bf1575 100644 (file)
@@ -35,7 +35,7 @@
  *
  *     from: @(#)isa.h 5.7 (Berkeley) 5/9/91
  * $FreeBSD: src/sys/i386/isa/isa.h,v 1.23 1999/08/28 00:44:54 peter Exp $
- * $DragonFly: src/sys/bus/isa/i386/isa.h,v 1.2 2003/06/17 04:28:37 dillon Exp $
+ * $DragonFly: src/sys/bus/isa/i386/isa.h,v 1.3 2003/06/29 03:28:43 dillon Exp $
  */
 
 #ifdef PC98
@@ -57,6 +57,8 @@
 #ifndef IO_ISABEGIN
 #define        IO_ISABEGIN     0x000           /* 0x000 - Beginning of I/O Registers */
 
+#define ICU_IMR_OFFSET 1               /* IO_ICU{1,2} + 1 */
+
                /* CPU Board */
 #define        IO_DMA1         0x000           /* 8237A DMA Controller #1 */
 #define        IO_ICU1         0x020           /* 8259A Interrupt Controller #1 */
index 22f9bcb..0c9417c 100644 (file)
@@ -32,7 +32,7 @@
  *
  *     from: @(#)isa_device.h  7.1 (Berkeley) 5/9/91
  * $FreeBSD: src/sys/i386/isa/isa_device.h,v 1.68 2000/01/29 18:01:10 peter Exp $
- * $DragonFly: src/sys/bus/isa/i386/isa_device.h,v 1.2 2003/06/17 04:28:37 dillon Exp $
+ * $DragonFly: src/sys/bus/isa/i386/isa_device.h,v 1.3 2003/06/29 03:28:43 dillon Exp $
  */
 
 #ifndef _I386_ISA_ISA_DEVICE_H_
@@ -43,6 +43,9 @@
 #include <i386/isa/isa_dma.h>
 #endif
 #endif
+#ifndef _SYS_INTERRUPT_H_
+#include <sys/interrupt.h>
+#endif
 
 /*
  * ISA Bus Autoconfiguration
index 1f941e3..83623a6 100644 (file)
@@ -35,7 +35,7 @@
  *
  *     from: @(#)isa.h 5.7 (Berkeley) 5/9/91
  * $FreeBSD: src/sys/isa/isareg.h,v 1.4.2.1 2000/07/18 20:39:05 dfr Exp $
- * $DragonFly: src/sys/bus/isa/isareg.h,v 1.2 2003/06/17 04:28:40 dillon Exp $
+ * $DragonFly: src/sys/bus/isa/isareg.h,v 1.3 2003/06/29 03:28:44 dillon Exp $
  */
 
 #ifdef PC98
@@ -57,6 +57,8 @@
 #ifndef IO_ISABEGIN
 #define        IO_ISABEGIN     0x000           /* 0x000 - Beginning of I/O Registers */
 
+#define ICU_IMR_OFFSET 1               /* IO_ICU{1,2} + 1 */
+
                /* CPU Board */
 #define        IO_DMA1         0x000           /* 8237A DMA Controller #1 */
 #define        IO_ICU1         0x020           /* 8259A Interrupt Controller #1 */
index 9897514..696ed2b 100644 (file)
@@ -1,6 +1,6 @@
 /*     $NetBSD: ohci.c,v 1.64 2000/01/19 00:23:58 augustss Exp $       */
 /*     $FreeBSD: src/sys/dev/usb/ohci.c,v 1.39.2.9 2003/03/05 17:09:44 shiba Exp $     */
-/*     $DragonFly: src/sys/bus/usb/ohci.c,v 1.2 2003/06/17 04:28:32 dillon Exp $       */
+/*     $DragonFly: src/sys/bus/usb/ohci.c,v 1.3 2003/06/29 03:28:41 dillon Exp $       */
 
 /*
  * Copyright (c) 1998 The NetBSD Foundation, Inc.
@@ -1832,8 +1832,8 @@ ohci_abort_xfer(usbd_xfer_handle xfer, usbd_status status)
                /* We have no process context, so we can't use tsleep(). */
                timeout(ohci_abort_xfer_end, xfer, hz / USB_FRAMES_PER_SECOND);
        } else {
-#if defined(DIAGNOSTIC) && defined(__i386__) && defined(__FreeBSD__)
-               KASSERT(intr_nesting_level == 0,
+#if defined(DIAGNOSTIC) && defined(__FreeBSD__)
+               KASSERT(mycpu->gd_intr_nesting_level == 0,
                        ("ohci_abort_req in interrupt context"));
 #endif
                usb_delay_ms(opipe->pipe.device->bus, 1);
index aad93d6..d85f641 100644 (file)
@@ -1,6 +1,6 @@
 /*     $NetBSD: uhci.c,v 1.80 2000/01/19 01:16:38 augustss Exp $       */
 /*     $FreeBSD: src/sys/dev/usb/uhci.c,v 1.40.2.10 2003/01/12 02:13:58 iedowse Exp $  */
-/*     $DragonFly: src/sys/bus/usb/uhci.c,v 1.3 2003/06/21 17:27:24 dillon Exp $       */
+/*     $DragonFly: src/sys/bus/usb/uhci.c,v 1.4 2003/06/29 03:28:42 dillon Exp $       */
 
 /*
  * Copyright (c) 1998 The NetBSD Foundation, Inc.
@@ -1616,8 +1616,8 @@ uhci_abort_xfer(usbd_xfer_handle xfer, usbd_status status)
                /* We have no process context, so we can't use tsleep(). */
                timeout(uhci_abort_xfer_end, xfer, hz / USB_FRAMES_PER_SECOND);
        } else {
-#if defined(DIAGNOSTIC) && defined(__i386__) && defined(__FreeBSD__)
-               KASSERT(intr_nesting_level == 0,
+#if defined(DIAGNOSTIC) && defined(__FreeBSD__)
+               KASSERT(mycpu->gd_intr_nesting_level == 0,
                        ("ohci_abort_req in interrupt context"));
 #endif
                usb_delay_ms(xfer->pipe->device->bus, 1);
index a379c77..68338bf 100644 (file)
@@ -1,6 +1,6 @@
 /*     $NetBSD: usbdi.c,v 1.60 2000/01/19 00:23:58 augustss Exp $      */
 /*     $FreeBSD: src/sys/dev/usb/usbdi.c,v 1.34.2.7 2002/11/06 14:03:37 joe Exp $      */
-/*     $DragonFly: src/sys/bus/usb/usbdi.c,v 1.2 2003/06/17 04:28:32 dillon Exp $      */
+/*     $DragonFly: src/sys/bus/usb/usbdi.c,v 1.3 2003/06/29 03:28:42 dillon Exp $      */
 
 /*
  * Copyright (c) 1998 The NetBSD Foundation, Inc.
@@ -851,8 +851,8 @@ usbd_do_request_flags(usbd_device_handle dev,
        usbd_status err;
 
 #ifdef DIAGNOSTIC
-#if defined(__i386__) && defined(__FreeBSD__)
-       KASSERT(intr_nesting_level == 0,
+#if defined(__FreeBSD__)
+       KASSERT(mycpu->gd_intr_nesting_level == 0,
                ("usbd_do_request: in interrupt context"));
 #endif
        if (dev->bus->intr_context) {
index 2aaf2a9..ec77129 100644 (file)
@@ -1,5 +1,5 @@
 # $FreeBSD: src/sys/conf/files,v 1.340.2.137 2003/06/04 17:10:30 sam Exp $
-# $DragonFly: src/sys/conf/files,v 1.5 2003/06/21 17:31:05 dillon Exp $
+# $DragonFly: src/sys/conf/files,v 1.6 2003/06/29 03:28:41 dillon Exp $
 #
 # The long compile-with and dependency lines are required because of
 # limitations in config: backslash-newline doesn't work in strings, and
@@ -758,6 +758,7 @@ net/if_tun.c                optional tun
 net/if_tap.c           optional tap
 net/if_vlan.c          optional vlan
 net/net_osdep.c                standard
+net/netisr.c           standard
 net/ppp_deflate.c      optional ppp_deflate
 net/ppp_tty.c          optional ppp
 net/intrq.c            standard
index 4bc45cb..a240e74 100644 (file)
@@ -35,7 +35,7 @@
  *
  *     from: @(#)cpu.h 5.4 (Berkeley) 5/9/91
  * $FreeBSD: src/sys/i386/include/cpu.h,v 1.43.2.2 2001/06/15 09:37:57 scottl Exp $
- * $DragonFly: src/sys/cpu/i386/include/cpu.h,v 1.4 2003/06/20 02:09:54 dillon Exp $
+ * $DragonFly: src/sys/cpu/i386/include/cpu.h,v 1.5 2003/06/29 03:28:43 dillon Exp $
  */
 
 #ifndef _MACHINE_CPU_H_
@@ -61,7 +61,7 @@
 #define        CLKF_USERMODE(framep) \
        ((ISPL((framep)->cf_cs) == SEL_UPL) || (framep->cf_eflags & PSL_VM))
 
-#define CLKF_INTR(framep)      (intr_nesting_level >= 2)
+#define CLKF_INTR(framep)      (mycpu->gd_intr_nesting_level >= 2)
 #if 0
 /*
  * XXX splsoftclock() is very broken and barely worth fixing.  It doesn't
 #ifdef _KERNEL
 extern char    btext[];
 extern char    etext[];
-extern u_char  intr_nesting_level;
 
 void   fork_trampoline __P((void));
 void   fork_return __P((struct proc *, struct trapframe));
index 14afba9..c630ea9 100644 (file)
@@ -31,7 +31,7 @@
  * SUCH DAMAGE.
  *
  * $FreeBSD: src/sys/i386/include/cpufunc.h,v 1.96.2.3 2002/04/28 22:50:54 dwmalone Exp $
- * $DragonFly: src/sys/cpu/i386/include/cpufunc.h,v 1.3 2003/06/28 02:09:49 dillon Exp $
+ * $DragonFly: src/sys/cpu/i386/include/cpufunc.h,v 1.4 2003/06/29 03:28:43 dillon Exp $
  */
 
 /*
@@ -68,6 +68,10 @@ breakpoint(void)
        __asm __volatile("int $3");
 }
 
+/*
+ * Find the first 1 in mask, starting with bit 0 and return the
+ * bit number.  If mask is 0 the result is undefined.
+ */
 static __inline u_int
 bsfl(u_int mask)
 {
@@ -77,6 +81,10 @@ bsfl(u_int mask)
        return (result);
 }
 
+/*
+ * Find the last 1 in mask, starting with bit 31 and return the
+ * bit number.  If mask is 0 the result is undefined.
+ */
 static __inline u_int
 bsrl(u_int mask)
 {
@@ -86,6 +94,34 @@ bsrl(u_int mask)
        return (result);
 }
 
+/*
+ * Test and set the specified bit (1 << bit) in the integer.  The
+ * previous value of the bit is returned (0 or 1).
+ */
+static __inline int
+btsl(u_int *mask, int bit)
+{
+       int result;
+
+       __asm __volatile("btsl %2,%1; movl $0,%0; adcl $0,%0" :
+                   "=r"(result), "=m"(*mask) : "r" (bit));
+       return(result);
+}
+
+/*
+ * Test and clear the specified bit (1 << bit) in the integer.  The
+ * previous value of the bit is returned (0 or 1).
+ */
+static __inline int
+btrl(u_int *mask, int bit)
+{
+       int result;
+
+       __asm __volatile("btrl %2,%1; movl $0,%0; adcl $0,%0" :
+                   "=r"(result), "=m"(*mask) : "r" (bit));
+       return(result);
+}
+
 static __inline void
 disable_intr(void)
 {
index 797c3da..d9ad94f 100644 (file)
@@ -32,7 +32,7 @@
  *
  *     @(#)types.h     8.3 (Berkeley) 1/5/94
  * $FreeBSD: src/sys/i386/include/types.h,v 1.19.2.1 2001/03/21 10:50:58 peter Exp $
- * $DragonFly: src/sys/cpu/i386/include/types.h,v 1.3 2003/06/28 02:09:49 dillon Exp $
+ * $DragonFly: src/sys/cpu/i386/include/types.h,v 1.4 2003/06/29 03:28:43 dillon Exp $
  */
 
 #ifndef _MACHINE_TYPES_H_
@@ -67,8 +67,4 @@ typedef unsigned int *pt_entry_t;
 /* Interrupt mask (spl, xxx_imask, etc) */
 typedef __uint32_t             intrmask_t;
 
-/* Interrupt handler function type. */
-typedef        void                    inthand2_t __P((void *_cookie));
-typedef        void                    ointhand2_t __P((int _device_id));
-
 #endif /* !_MACHINE_TYPES_H_ */
index 7c7f4c9..91ab0e0 100644 (file)
@@ -35,7 +35,7 @@
  *
  * $Id: vinumext.h,v 1.26 2000/05/16 07:38:08 grog Exp grog $
  * $FreeBSD: src/sys/dev/vinum/vinumext.h,v 1.25.2.3 2001/05/11 02:11:06 grog Exp $
- * $DragonFly: src/sys/dev/raid/vinum/vinumext.h,v 1.2 2003/06/17 04:28:33 dillon Exp $
+ * $DragonFly: src/sys/dev/raid/vinum/vinumext.h,v 1.3 2003/06/29 03:28:42 dillon Exp $
  */
 
 /* vinumext.h: external definitions */
@@ -255,7 +255,7 @@ void FFree(void *mem, char *, int);
 #define LOCKDRIVE(d) lockdrive (d, __FILE__, __LINE__)
 #else
 #define Malloc(x)  malloc((x), M_DEVBUF, \
-       intr_nesting_level == 0? M_WAITOK: M_NOWAIT)
+       mycpu->gd_intr_nesting_level == 0? M_WAITOK: M_NOWAIT)
 #define Free(x)    free((x), M_DEVBUF)
 #define LOCKDRIVE(d) lockdrive (d)
 #endif
index a05ea50..08e35a2 100644 (file)
@@ -35,7 +35,7 @@
  *
  * $Id: vinummemory.c,v 1.25 2000/05/04 01:57:48 grog Exp grog $
  * $FreeBSD: src/sys/dev/vinum/vinummemory.c,v 1.22.2.1 2000/06/02 04:26:11 grog Exp $
- * $DragonFly: src/sys/dev/raid/vinum/vinummemory.c,v 1.2 2003/06/17 04:28:33 dillon Exp $
+ * $DragonFly: src/sys/dev/raid/vinum/vinummemory.c,v 1.3 2003/06/29 03:28:42 dillon Exp $
  */
 
 #include <dev/vinum/vinumhdr.h>
@@ -152,7 +152,7 @@ MMalloc(int size, char *file, int line)
        return 0;                                           /* can't continue */
     }
     /* Wait for malloc if we can */
-    result = malloc(size, M_DEVBUF, intr_nesting_level == 0 ? M_WAITOK : M_NOWAIT);
+    result = malloc(size, M_DEVBUF, mycpu->gd_intr_nesting_level == 0 ? M_WAITOK : M_NOWAIT);
     if (result == NULL)
        log(LOG_ERR, "vinum: can't allocate %d bytes from %s:%d\n", size, file, line);
     else {
index b1cc8b9..8f5c84f 100644 (file)
@@ -31,7 +31,7 @@
  * SUCH DAMAGE.
  *
  * $FreeBSD: src/sys/isa/sio.c,v 1.291.2.35 2003/05/18 08:51:15 murray Exp $
- * $DragonFly: src/sys/dev/serial/sio/sio.c,v 1.4 2003/06/25 03:55:56 dillon Exp $
+ * $DragonFly: src/sys/dev/serial/sio/sio.c,v 1.5 2003/06/29 03:28:44 dillon Exp $
  *     from: @(#)com.c 7.5 (Berkeley) 5/16/91
  *     from: i386/isa sio.c,v 1.234
  */
@@ -316,7 +316,7 @@ static      void    siointr1        __P((struct com_s *com));
 static void    siointr         __P((void *arg));
 static int     commctl         __P((struct com_s *com, int bits, int how));
 static int     comparam        __P((struct tty *tp, struct termios *t));
-static swihand_t siopoll;
+static inthand2_t siopoll;
 static int     sioprobe        __P((device_t dev, int xrid, u_long rclk));
 static int     sio_isa_probe   __P((device_t dev));
 static void    siosettimeout   __P((void));
@@ -1410,7 +1410,7 @@ determined_type: ;
        printf("\n");
 
        if (!sio_registered) {
-               register_swi(SWI_TTY, siopoll);
+               register_swi(SWI_TTY, siopoll, NULL ,"swi_siopoll");
                sio_registered = TRUE;
        }
        minorbase = UNIT_TO_MINOR(unit);
@@ -2345,7 +2345,7 @@ sioioctl(dev_t dev, u_long cmd, caddr_t   data, int flag, struct thread *td)
 }
 
 static void
-siopoll()
+siopoll(void *dummy)
 {
        int             unit;
 
index b7b8704..065c7af 100644 (file)
  * SUCH DAMAGE.
  *
  * $FreeBSD: src/sys/i386/isa/apic_ipl.s,v 1.27.2.2 2000/09/30 02:49:35 ps Exp $
- * $DragonFly: src/sys/i386/apic/Attic/apic_ipl.s,v 1.4 2003/06/22 08:54:22 dillon Exp $
+ * $DragonFly: src/sys/i386/apic/Attic/apic_ipl.s,v 1.5 2003/06/29 03:28:43 dillon Exp $
  */
 
+#if 0
 
        .data
        ALIGN_DATA
@@ -101,7 +102,7 @@ ENTRY(splz)
         */
        pushl   %ebx
        movl    _curthread,%ebx
-       movl    TD_MACH+MTD_CPL(%ebx),%eax
+       movl    TD_CPL(%ebx),%eax
 splz_next:
        /*
         * We don't need any locking here.  (ipending & ~cpl) cannot grow 
@@ -141,10 +142,10 @@ splz_unpend:
 splz_swi:
        pushl   %eax                    /* save cpl across call */
        orl     imasks(,%ecx,4),%eax
-       movl    %eax,TD_MACH+MTD_CPL(%ebx) /* set cpl for SWI */
+       movl    %eax,TD_CPL(%ebx) /* set cpl for SWI */
        call    *_ihandlers(,%ecx,4)
        popl    %eax
-       movl    %eax,TD_MACH+MTD_CPL(%ebx) /* restore cpl and loop */
+       movl    %eax,TD_CPL(%ebx) /* restore cpl and loop */
        jmp     splz_next
 
 /*
@@ -463,3 +464,5 @@ ENTRY(io_apic_write)
 ENTRY(apic_eoi)
        movl    $0, _lapic+0xb0
        ret
+
+#endif
index b66cd21..5f76e6c 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *     from: vector.s, 386BSD 0.1 unknown origin
  * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $
- * $DragonFly: src/sys/i386/apic/Attic/apic_vector.s,v 1.5 2003/06/22 08:54:22 dillon Exp $
+ * $DragonFly: src/sys/i386/apic/Attic/apic_vector.s,v 1.6 2003/06/29 03:28:43 dillon Exp $
  */
 
 
@@ -29,14 +29,14 @@ IDTVEC(vec_name) ;                                                  \
        pushl   %ecx ;                                                  \
        pushl   %edx ;                                                  \
        pushl   %ds ;                                                   \
-       MAYBE_PUSHL_ES ;                                                \
+       pushl   %es ;                                                   \
        pushl   %fs ;                                                   \
        movl    $KDSEL,%eax ;                                           \
        mov     %ax,%ds ;                                               \
-       MAYBE_MOVW_AX_ES ;                                              \
+       movl    %ax,%es ;                                               \
        movl    $KPSEL,%eax ;                                           \
        mov     %ax,%fs ;                                               \
-       FAKE_MCOUNT((5+ACTUALLY_PUSHED)*4(%esp)) ;                      \
+       FAKE_MCOUNT(6*4(%esp)) ;                                        \
        pushl   _intr_unit + (irq_num) * 4 ;                            \
        call    *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
        addl    $4, %esp ;                                              \
@@ -48,7 +48,7 @@ IDTVEC(vec_name) ;                                                    \
        incl    (%eax) ;                                                \
        MEXITCOUNT ;                                                    \
        popl    %fs ;                                                   \
-       MAYBE_POPL_ES ;                                                 \
+       popl    %es ;                                                   \
        popl    %ds ;                                                   \
        popl    %edx ;                                                  \
        popl    %ecx ;                                                  \
@@ -259,6 +259,7 @@ __CONCAT(Xresume,irq_num): ;                                                \
        call    *_intr_handler + (irq_num) * 4 ;                        \
        cli ;                                                           \
        APIC_ITRACE(apic_itrace_leave, irq_num, APIC_ITRACE_LEAVE) ;    \
+       addl    $4,%esp ;                                               \
 ;                                                                      \
        lock ;  andl    $~IRQ_BIT(irq_num), iactive ;                   \
        UNMASK_IRQ(irq_num) ;                                           \
@@ -456,8 +457,6 @@ _Xcpuast:
        incb    _intr_nesting_level
        sti
        
-       pushl   $0
-       
        movl    _cpuid, %eax
        lock    
        btrl    %eax, _checkstate_pending_ast
@@ -512,8 +511,6 @@ _Xforward_irq:
        incb    _intr_nesting_level
        sti
        
-       pushl   $0
-
        MEXITCOUNT
        jmp     _doreti                 /* Handle forwarded interrupt */
 1:
@@ -722,6 +719,8 @@ _Xrendezvous:
        
        
        .data
+
+#if 0
 /*
  * Addresses of interrupt handlers.
  *  XresumeNN: Resumption addresses for HWIs.
@@ -751,6 +750,7 @@ imasks:                             /* masks for interrupt handlers */
 
        .long   SWI_TTY_MASK, SWI_NET_MASK, SWI_CAMNET_MASK, SWI_CAMBIO_MASK
        .long   SWI_VM_MASK, SWI_TQ_MASK, SWI_CLOCK_MASK
+#endif
 
 /* active flag for lazy masking */
 iactive:
index 8830209..dd5d9ba 100644 (file)
@@ -31,7 +31,7 @@
  * SUCH DAMAGE.
  *
  * $FreeBSD: src/sys/i386/i386/exception.s,v 1.65.2.3 2001/08/15 01:23:49 peter Exp $
- * $DragonFly: src/sys/i386/i386/Attic/exception.s,v 1.6 2003/06/28 02:09:47 dillon Exp $
+ * $DragonFly: src/sys/i386/i386/Attic/exception.s,v 1.7 2003/06/29 03:28:42 dillon Exp $
  */
 
 #include "npx.h"
  * On entry to a trap or interrupt WE DO NOT OWN THE MP LOCK.  This means
  * that we must be careful in regards to accessing global variables.  We
  * save (push) the current cpl (our software interrupt disable mask), call
- * the trap function, then call _doreti to restore the cpl and deal with
+ * the trap function, then jump to _doreti to restore the cpl and deal with
  * ASTs (software interrupts).  _doreti will determine if the restoration
  * of the cpl unmasked any pending interrupts and will issue those interrupts
  * synchronously prior to doing the iret.
- *
- * At the moment we must own the MP lock to do any cpl manipulation, which
- * means we must own it prior to  calling _doreti.  The syscall case attempts
- * to avoid this by handling a reduced set of cases itself and iret'ing.
  */
 #define        IDTVEC(name)    ALIGN_TEXT; .globl __CONCAT(_X,name); \
                        .type __CONCAT(_X,name),@function; __CONCAT(_X,name):
@@ -171,21 +167,14 @@ IDTVEC(fpu)
        mov     %ax,%fs
        FAKE_MCOUNT(13*4(%esp))
 
-#ifdef SMP
-       MPLOCKED incl _cnt+V_TRAP
-       MP_LOCK
-       movl    _curthread,%eax         /* save original cpl */
-       pushl   TD_MACH+MTD_CPL(%eax)
-       pushl   $0                      /* dummy unit to finish intr frame */
-#else /* SMP */
-       movl    _curthread,%eax         /* save original cpl */
-       pushl   TD_MACH+MTD_CPL(%eax)
-       pushl   $0                      /* dummy unit to finish intr frame */
+       movl    _curthread,%ebx         /* save original cpl */
+       movl    TD_CPL(%ebx), %ebx
+       pushl   %ebx
        incl    _cnt+V_TRAP
-#endif /* SMP */
 
-       call    _npx_intr
+       call    _npx_intr               /* note: call might mess w/ argument */
 
+       movl    %ebx, (%esp)            /* save cpl for doreti */
        incb    _intr_nesting_level
        MEXITCOUNT
        jmp     _doreti
@@ -229,7 +218,7 @@ calltrap:
        MPLOCKED incl _cnt+V_TRAP
        MP_LOCK
        movl    _curthread,%eax         /* keep orig cpl here during call */
-       movl    TD_MACH+MTD_CPL(%eax),%ebx
+       movl    TD_CPL(%eax),%ebx
        call    _trap
 
        /*
@@ -237,7 +226,6 @@ calltrap:
         * to interrupt frame.
         */
        pushl   %ebx                    /* cpl to restore */
-       subl    $4,%esp                 /* dummy unit to finish intr frame */
        incb    _intr_nesting_level
        MEXITCOUNT
        jmp     _doreti
@@ -284,8 +272,7 @@ IDTVEC(syscall)
        MP_LOCK
 #endif
        pushl   $0                      /* cpl to restore */
-       subl    $4,%esp                 /* dummy unit for interrupt frame */
-       movb    $1,_intr_nesting_level
+       movl    $1,_intr_nesting_level
        jmp     _doreti
 
 /*
@@ -322,8 +309,7 @@ IDTVEC(int0x80_syscall)
        MP_LOCK
 #endif
        pushl   $0                      /* cpl to restore */
-       subl    $4,%esp                 /* dummy unit for interrupt frame */
-       movb    $1,_intr_nesting_level
+       movl    $1,_intr_nesting_level
        jmp     _doreti
 
 ENTRY(fork_trampoline)
@@ -348,7 +334,6 @@ ENTRY(fork_trampoline)
         * Return via _doreti to handle ASTs.
         */
        pushl   $0                      /* cpl to restore */
-       subl    $4,%esp                 /* dummy unit to finish intr frame */
        movb    $1,_intr_nesting_level
        MEXITCOUNT
        jmp     _doreti
index 115d591..55c74a7 100644 (file)
@@ -35,7 +35,7 @@
  *
  *     from: @(#)genassym.c    5.11 (Berkeley) 5/10/91
  * $FreeBSD: src/sys/i386/i386/genassym.c,v 1.86.2.3 2002/03/03 05:42:49 nyan Exp $
- * $DragonFly: src/sys/i386/i386/Attic/genassym.c,v 1.17 2003/06/28 04:16:02 dillon Exp $
+ * $DragonFly: src/sys/i386/i386/Attic/genassym.c,v 1.18 2003/06/29 03:28:42 dillon Exp $
  */
 
 #include "opt_user_ldt.h"
@@ -92,7 +92,7 @@ ASSYM(TDF_EXITED, TDF_EXITED);
 
 ASSYM(RW_OWNER, offsetof(struct lwkt_rwlock, rw_owner));
 
-ASSYM(MTD_CPL, offsetof(struct md_thread, mtd_cpl));
+ASSYM(TD_CPL, offsetof(struct thread, td_mach) + offsetof(struct md_thread, mtd_cpl));
 
 ASSYM(TDPRI_CRIT, TDPRI_CRIT);
 
@@ -185,13 +185,16 @@ ASSYM(BI_KERNEND, offsetof(struct bootinfo, bi_kernend));
 ASSYM(GD_CURTHREAD, offsetof(struct mdglobaldata, mi.gd_curthread));
 ASSYM(GD_REQPRI, offsetof(struct mdglobaldata, mi.gd_reqpri));
 ASSYM(GD_CPUID, offsetof(struct mdglobaldata, mi.gd_cpuid));
-ASSYM(GD_INSIDE_INTR, offsetof(struct mdglobaldata, mi.gd_inside_intr));
+ASSYM(GD_INTR_NESTING_LEVEL, offsetof(struct mdglobaldata, mi.gd_intr_nesting_level));
 ASSYM(GD_ASTPENDING, offsetof(struct mdglobaldata, mi.gd_astpending));
 
 #ifdef USER_LDT
 ASSYM(GD_CURRENTLDT, offsetof(struct mdglobaldata, gd_currentldt));
 #endif
 
+ASSYM(GD_FPENDING, offsetof(struct mdglobaldata, gd_fpending));
+ASSYM(GD_IPENDING, offsetof(struct mdglobaldata, gd_ipending));
+ASSYM(GD_IRUNNING, offsetof(struct mdglobaldata, gd_irunning));
 ASSYM(GD_COMMON_TSS, offsetof(struct mdglobaldata, gd_common_tss));
 ASSYM(GD_COMMON_TSSD, offsetof(struct mdglobaldata, gd_common_tssd));
 ASSYM(GD_TSS_GDT, offsetof(struct mdglobaldata, gd_tss_gdt));
index f9ada65..37f5883 100644 (file)
@@ -24,7 +24,7 @@
  * SUCH DAMAGE.
  *
  * $FreeBSD: src/sys/i386/i386/globals.s,v 1.13.2.1 2000/05/16 06:58:06 dillon Exp $
- * $DragonFly: src/sys/i386/i386/Attic/globals.s,v 1.11 2003/06/28 04:16:02 dillon Exp $
+ * $DragonFly: src/sys/i386/i386/Attic/globals.s,v 1.12 2003/06/29 03:28:42 dillon Exp $
  */
 
 #include "opt_user_ldt.h"
         * the AP versions are setup in mp_machdep.c.
         */
        .globl  gd_cpuid, gd_cpu_lockid, gd_other_cpus
-       .globl  gd_ss_eflags, gd_inside_intr
+       .globl  gd_ss_eflags, gd_intr_nesting_level
        .globl  gd_CMAP1, gd_CMAP2, gd_CMAP3, gd_PMAP1
        .globl  gd_CADDR1, gd_CADDR2, gd_CADDR3, gd_PADDR1
+       .globl  gd_irunning, gd_ipending, gd_fpending
 
        .set    gd_cpuid,globaldata + GD_CPUID
        .set    gd_cpu_lockid,globaldata + GD_CPU_LOCKID
        .set    gd_other_cpus,globaldata + GD_OTHER_CPUS
        .set    gd_ss_eflags,globaldata + GD_SS_EFLAGS
-       .set    gd_inside_intr,globaldata + GD_INSIDE_INTR
+       .set    gd_intr_nesting_level,globaldata + GD_INTR_NESTING_LEVEL
        .set    gd_CMAP1,globaldata + GD_PRV_CMAP1
        .set    gd_CMAP2,globaldata + GD_PRV_CMAP2
        .set    gd_CMAP3,globaldata + GD_PRV_CMAP3
        .set    gd_CADDR2,globaldata + GD_PRV_CADDR2
        .set    gd_CADDR3,globaldata + GD_PRV_CADDR3
        .set    gd_PADDR1,globaldata + GD_PRV_PADDR1
+       .set    gd_fpending,globaldata + GD_FPENDING
+       .set    gd_ipending,globaldata + GD_IPENDING
+       .set    gd_irunning,globaldata + GD_IRUNNING
 
 #if defined(APIC_IO)
        .globl  lapic_eoi, lapic_svr, lapic_tpr, lapic_irr1, lapic_ver
index 3113fd9..ddcebf4 100644 (file)
@@ -36,7 +36,7 @@
  *
  *     from: @(#)machdep.c     7.4 (Berkeley) 6/3/91
  * $FreeBSD: src/sys/i386/i386/machdep.c,v 1.385.2.30 2003/05/31 08:48:05 alc Exp $
- * $DragonFly: src/sys/i386/i386/Attic/machdep.c,v 1.15 2003/06/28 04:16:02 dillon Exp $
+ * $DragonFly: src/sys/i386/i386/Attic/machdep.c,v 1.16 2003/06/29 03:28:42 dillon Exp $
  */
 
 #include "apm.h"
@@ -90,8 +90,6 @@
 
 #include <ddb/ddb.h>
 
-#include <net/netisr.h>
-
 #include <machine/cpu.h>
 #include <machine/reg.h>
 #include <machine/clock.h>
@@ -454,33 +452,6 @@ again:
        cpu_setregs();
 }
 
-int
-register_netisr(num, handler)
-       int num;
-       netisr_t *handler;
-{
-       
-       if (num < 0 || num >= (sizeof(netisrs)/sizeof(*netisrs)) ) {
-               printf("register_netisr: bad isr number: %d\n", num);
-               return (EINVAL);
-       }
-       netisrs[num] = handler;
-       return (0);
-}
-
-int
-unregister_netisr(num)
-       int num;
-{
-
-       if (num < 0 || num >= (sizeof(netisrs)/sizeof(*netisrs)) ) {
-               printf("unregister_netisr: bad isr number: %d\n", num);
-               return (EINVAL);
-       }
-       netisrs[num] = NULL;
-       return (0);
-}
-
 /*
  * Send an interrupt to process.
  *
@@ -974,18 +945,13 @@ cpu_halt(void)
  * Note on cpu_idle_hlt:  On an SMP system this may cause the system to 
  * halt until the next clock tick, even if a thread is ready YYY
  */
-#ifdef SMP
 static int     cpu_idle_hlt = 0;
-#else
-static int     cpu_idle_hlt = 1;
-#endif
 SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hlt, CTLFLAG_RW,
     &cpu_idle_hlt, 0, "Idle loop HLT enable");
 
 void
 cpu_idle(void)
 {
-       spl0();
        for (;;) {
                lwkt_switch();
                if (cpu_idle_hlt) {
@@ -997,7 +963,6 @@ cpu_idle(void)
                } else {
                        __asm __volatile("sti");
                }
-               /* YYY BGL */
        }
 }
 
@@ -2069,6 +2034,8 @@ init386(int first)
  * Initialize machine-dependant portions of the global data structure.
  * Note that the global data area and cpu0's idlestack in the private
  * data space were allocated in locore.
+ *
+ * Note: the idlethread's cpl is 0
  */
 void
 cpu_gdinit(struct mdglobaldata *gd, int cpu)
index ce326a2..354b35d 100644 (file)
@@ -35,7 +35,7 @@
  * SUCH DAMAGE.
  *
  * $FreeBSD: src/sys/i386/i386/swtch.s,v 1.89.2.10 2003/01/23 03:36:24 ps Exp $
- * $DragonFly: src/sys/i386/i386/Attic/swtch.s,v 1.15 2003/06/28 02:09:47 dillon Exp $
+ * $DragonFly: src/sys/i386/i386/Attic/swtch.s,v 1.16 2003/06/29 03:28:42 dillon Exp $
  */
 
 #include "npx.h"
@@ -443,6 +443,13 @@ cpu_switch_load_gs:
 
 CROSSJUMPTARGET(sw1a)
 
+badsw0:
+       pushl   %eax
+       pushl   $sw0_1
+       call    _panic
+
+sw0_1: .asciz  "cpu_switch: panic: %p"
+
 #ifdef DIAGNOSTIC
 badsw1:
        pushl   $sw0_1
@@ -538,6 +545,7 @@ ENTRY(savectx)
 ENTRY(cpu_idle_restore)
        movl    $0,%ebp
        pushl   $0
+       sti
        jmp     cpu_idle
 
 /*
@@ -551,6 +559,7 @@ ENTRY(cpu_idle_restore)
 ENTRY(cpu_kthread_restore)
        movl    TD_PCB(%eax),%ebx
        movl    $0,%ebp
+       sti
        popl    %edx            /* kthread exit function */
        pushl   PCB_EBX(%ebx)   /* argument to ESI function */
        pushl   %edx            /* set exit func as return address */
@@ -589,13 +598,14 @@ ENTRY(cpu_lwkt_restore)
        popl    %esi
        popl    %ebx
        popl    %ebp
-       movl    TD_MACH+MTD_CPL(%eax),%ecx      /* unmasked cpl? YYY too complex */
-       notl    %ecx
-       andl    _ipending,%ecx
-       je      1f
        cmpl    $0,_intr_nesting_level          /* don't stack too deeply */
-       jne     1f
-       call    splz                            /* execute unmasked ints */
+       jne     2f
+       testl   _ipending,%ecx
+       jnz     1f
+       testl   _fpending,%ecx
+       jz      2f
 1:
+       call    splz                            /* execute unmasked ints */
+2:
        ret
 
index 2606619..d685442 100644 (file)
@@ -36,7 +36,7 @@
  *
  *     from: @(#)trap.c        7.4 (Berkeley) 5/13/91
  * $FreeBSD: src/sys/i386/i386/trap.c,v 1.147.2.11 2003/02/27 19:09:59 luoqi Exp $
- * $DragonFly: src/sys/i386/i386/Attic/trap.c,v 1.10 2003/06/28 04:16:02 dillon Exp $
+ * $DragonFly: src/sys/i386/i386/Attic/trap.c,v 1.11 2003/06/29 03:28:42 dillon Exp $
  */
 
 /*
@@ -503,7 +503,7 @@ kernel_trap:
                }                                                       \
        } while (0)
 
-                       if (intr_nesting_level == 0) {
+                       if (mycpu->gd_intr_nesting_level == 0) {
                                /*
                                 * Invalid %fs's and %gs's can be created using
                                 * procfs or PT_SETREGS or by invalidating the
@@ -695,7 +695,7 @@ trap_pfault(frame, usermode, eva)
 
                if (p == NULL ||
                    (!usermode && va < VM_MAXUSER_ADDRESS &&
-                    (intr_nesting_level != 0 || 
+                    (mycpu->gd_intr_nesting_level != 0 || 
                      curthread->td_pcb->pcb_onfault == NULL))) {
                        trap_fatal(frame, eva);
                        return (-1);
@@ -758,7 +758,7 @@ trap_pfault(frame, usermode, eva)
                return (0);
 nogo:
        if (!usermode) {
-               if (intr_nesting_level == 0 && curthread->td_pcb->pcb_onfault) {
+               if (mycpu->gd_intr_nesting_level == 0 && curthread->td_pcb->pcb_onfault) {
                        frame->tf_eip = (int)curthread->td_pcb->pcb_onfault;
                        return (0);
                }
@@ -865,7 +865,7 @@ trap_pfault(frame, usermode, eva)
                return (0);
 nogo:
        if (!usermode) {
-               if (intr_nesting_level == 0 && curthread->td_pcb->pcb_onfault) {
+               if (mycpu->gd_intr_nesting_level == 0 && curthread->td_pcb->pcb_onfault) {
                        frame->tf_eip = (int)curthread->td_pcb->pcb_onfault;
                        return (0);
                }
index db92fc7..a3c0734 100644 (file)
@@ -24,7 +24,7 @@
  * SUCH DAMAGE.
  *
  * $FreeBSD: src/sys/i386/i386/vm86bios.s,v 1.15.2.1 2000/05/16 06:58:07 dillon Exp $
- * $DragonFly: src/sys/i386/i386/Attic/vm86bios.s,v 1.6 2003/06/22 08:54:18 dillon Exp $
+ * $DragonFly: src/sys/i386/i386/Attic/vm86bios.s,v 1.7 2003/06/29 03:28:42 dillon Exp $
  */
 
 #include <machine/asmacros.h>          /* miscellaneous asm macros */
@@ -143,8 +143,7 @@ ENTRY(vm86_bioscall)
         * Return via _doreti, restore the same cpl as our current cpl
         */
        movl    _curthread,%eax
-       pushl   TD_MACH+MTD_CPL(%eax)
-       subl    $4,%esp                 /* dummy unit */
+       pushl   TD_CPL(%eax)
        incb    _intr_nesting_level     /* dummy to match doreti */
        MEXITCOUNT
        jmp     _doreti
index 6069ca6..e46a935 100644 (file)
@@ -39,7 +39,7 @@
  *     from: @(#)vm_machdep.c  7.3 (Berkeley) 5/13/91
  *     Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
  * $FreeBSD: src/sys/i386/i386/vm_machdep.c,v 1.132.2.9 2003/01/25 19:02:23 dillon Exp $
- * $DragonFly: src/sys/i386/i386/Attic/vm_machdep.c,v 1.13 2003/06/28 04:16:02 dillon Exp $
+ * $DragonFly: src/sys/i386/i386/Attic/vm_machdep.c,v 1.14 2003/06/29 03:28:42 dillon Exp $
  */
 
 #include "npx.h"
@@ -54,6 +54,7 @@
 #include <sys/malloc.h>
 #include <sys/proc.h>
 #include <sys/buf.h>
+#include <sys/interrupt.h>
 #include <sys/vnode.h>
 #include <sys/vmmeter.h>
 #include <sys/kernel.h>
@@ -70,6 +71,7 @@
 #include <machine/pcb_ext.h>
 #include <machine/vm86.h>
 #include <machine/globaldata.h>        /* npxthread */
+#include <machine/ipl.h>       /* SWI_ */
 
 #include <vm/vm.h>
 #include <vm/vm_param.h>
@@ -606,16 +608,22 @@ vm_page_zero_idle()
        return (0);
 }
 
-/*
- * Software interrupt handler for queued VM system processing.
- */   
-void  
-swi_vm() 
-{     
+void
+swi_vm(void *arg)
+{
        if (busdma_swi_pending != 0)
                busdma_swi();
 }
 
+static void
+swi_vm_setup(void *arg)
+{
+       register_swi(SWI_VM, swi_vm, NULL, "swi_vm");
+}
+
+SYSINIT(vm_setup, SI_SUB_CPU, SI_ORDER_ANY, swi_vm_setup, NULL);
+
+
 /*
  * Tell whether this address is in some physical memory region.
  * Currently used by the kernel coredump code in order to avoid
index d03241c..bc8c04a 100644 (file)
@@ -35,7 +35,7 @@
  *
  *     from: @(#)icu.h 5.6 (Berkeley) 5/9/91
  * $FreeBSD: src/sys/i386/isa/icu.h,v 1.18 1999/12/26 12:43:47 bde Exp $
- * $DragonFly: src/sys/i386/icu/Attic/icu.h,v 1.2 2003/06/17 04:28:37 dillon Exp $
+ * $DragonFly: src/sys/i386/icu/Attic/icu.h,v 1.3 2003/06/29 03:28:43 dillon Exp $
  */
 
 /*
 
 #ifndef        LOCORE
 
-#ifdef APIC_IO
-
-/*
-#define MP_SAFE
- * Note:
- *     Most of the SMP equivilants of the icu macros are coded
- *     elsewhere in an MP-safe fashion.
- *     In particular note that the 'imen' variable is opaque.
- *     DO NOT access imen directly, use INTREN()/INTRDIS().
- */
-
 void   INTREN                  __P((u_int));
 void   INTRDIS                 __P((u_int));
 
-#else /* APIC_IO */
-
-/*
- * Interrupt "level" mechanism variables, masks, and macros
- */
-extern unsigned imen;          /* interrupt mask enable */
-
-#define        INTREN(s)               (imen &= ~(s), SET_ICUS())
-#define        INTRDIS(s)              (imen |= (s), SET_ICUS())
-
-#if 0
-#ifdef PC98
-#define        SET_ICUS()      (outb(IO_ICU1 + 2, imen), outb(IU_ICU2 + 2, imen >> 8))
-#define INTRGET()      ((inb(IO_ICU2) << 8 | inb(IO_ICU1)) & 0xffff)
-#else  /* IBM-PC */
-#define        SET_ICUS()      (outb(IO_ICU1 + 1, imen), outb(IU_ICU2 + 1, imen >> 8))
-#define INTRGET()      ((inb(IO_ICU2) << 8 | inb(IO_ICU1)) & 0xffff)
-#endif /* PC98 */
-#else
-/*
- * XXX - IO_ICU* are defined in isa.h, not icu.h, and nothing much bothers to
- * include isa.h, while too many things include icu.h.
- */
-#ifdef PC98
-#define        SET_ICUS()      (outb(0x02, imen), outb(0x0a, imen >> 8))
-/* XXX is this correct? */
-#define INTRGET()      ((inb(0x0a) << 8 | inb(0x02)) & 0xffff)
-#else
-#define        SET_ICUS()      (outb(0x21, imen), outb(0xa1, imen >> 8))
-#define INTRGET()      ((inb(0xa1) << 8 | inb(0x21)) & 0xffff)
-#endif
-#endif
-
-#endif /* APIC_IO */
-
 #endif /* LOCORE */
 
-
 #ifdef APIC_IO
 /*
  * Note: The APIC uses different values for IRQxxx.
index dee21ca..e070898 100644 (file)
@@ -2,6 +2,7 @@
  * Copyright (c) 1989, 1990 William F. Jolitz.
  * Copyright (c) 1990 The Regents of the University of California.
  * All rights reserved.
+ * Copyright (c) 2003 Matthew Dillon
  *
  * This code is derived from software contributed to Berkeley by
  * William Jolitz.
  * SUCH DAMAGE.
  *
  * $FreeBSD: src/sys/i386/isa/icu_ipl.s,v 1.6 1999/08/28 00:44:42 peter Exp $
- * $DragonFly: src/sys/i386/icu/Attic/icu_ipl.s,v 1.4 2003/06/22 08:54:22 dillon Exp $
+ * $DragonFly: src/sys/i386/icu/Attic/icu_ipl.s,v 1.5 2003/06/29 03:28:43 dillon Exp $
  */
 
        .data
        ALIGN_DATA
-vec:
-       .long    vec0,  vec1,  vec2,  vec3,  vec4,  vec5,  vec6,  vec7
-       .long    vec8,  vec9, vec10, vec11, vec12, vec13, vec14, vec15
 
-/* interrupt mask enable (all h/w off) */
+       /*
+        * Interrupt mask for ICU interrupts, defaults to all hardware
+        * interrupts turned off.
+        */
        .globl  _imen
 _imen: .long   HWI_MASK
 
-
-/*
- * 
- */
        .text
        SUPERALIGN_TEXT
 
-/*
- * Interrupt priority mechanism
- *     -- soft splXX masks with group mechanism (cpl)
- *     -- h/w masks for currently active or unused interrupts (imen)
- *     -- ipending = active interrupts currently masked by cpl
- *     -- splz handles pending interrupts regardless of the critical
- *        nesting state, it is only called synchronously.
- */
-
-ENTRY(splz)
-       /*
-        * The caller has restored cpl and checked that (ipending & ~cpl)
-        * is nonzero.  We have to repeat the check since if there is an
-        * interrupt while we're looking, _doreti processing for the
-        * interrupt will handle all the unmasked pending interrupts
-        * because we restored early.  We're repeating the calculation
-        * of (ipending & ~cpl) anyway so that the caller doesn't have
-        * to pass it, so this only costs one "jne".  "bsfl %ecx,%ecx"
-        * is undefined when %ecx is 0 so we can't rely on the secondary
-        * btrl tests.
-        */
-       pushl   %ebx
-       movl    _curthread,%ebx
-       movl    TD_MACH+MTD_CPL(%ebx),%eax
-splz_next:
-       /*
-        * We don't need any locking here.  (ipending & ~cpl) cannot grow 
-        * while we're looking at it - any interrupt will shrink it to 0.
-        */
-       movl    $0,_reqpri
-       movl    %eax,%ecx
-       notl    %ecx
-       andl    _ipending,%ecx
-       jne     splz_unpend
-       popl    %ebx
-       ret
-
-       ALIGN_TEXT
-splz_unpend:
-       bsfl    %ecx,%ecx
-       btrl    %ecx,_ipending
-       jnc     splz_next
-       cmpl    $NHWI,%ecx
-       jae     splz_swi
        /*
-        * We would prefer to call the intr handler directly here but that
-        * doesn't work for badly behaved handlers that want the interrupt
-        * frame.  Also, there's a problem determining the unit number.
-        * We should change the interface so that the unit number is not
-        * determined at config time.
+        * Functions to enable and disable a hardware interrupt.  Only
+        * 16 ICU interrupts exist.
+        *
+        * INTREN(1 << irq)     (one interrupt only)
+        * INTDIS(1 << irq)     (one interrupt only)
         */
-       popl    %ebx
-       jmp     *vec(,%ecx,4)
-
-       ALIGN_TEXT
-splz_swi:
-       pushl   %eax
-       orl     imasks(,%ecx,4),%eax
-       movl    %eax,TD_MACH+MTD_CPL(%ebx)
-       call    *_ihandlers(,%ecx,4)
-       popl    %eax
-       movl    %eax,TD_MACH+MTD_CPL(%ebx)
-       jmp     splz_next
-
-/*
- * Fake clock interrupt(s) so that they appear to come from our caller instead
- * of from here, so that system profiling works.
- * XXX do this more generally (for all vectors; look up the C entry point).
- * XXX frame bogusness stops us from just jumping to the C entry point.
- */
-       ALIGN_TEXT
-vec0:
-       popl    %eax                    /* return address */
+ENTRY(INTRDIS)
+       movl    4(%esp),%eax
+       orl     %eax,_imen
        pushfl
-       pushl   $KCSEL
-       pushl   %eax
        cli
-       MEXITCOUNT
-       jmp     _Xintr0                 /* XXX might need _Xfastintr0 */
+       movl    _imen,%eax
+       outb    %al,$IO_ICU1+ICU_IMR_OFFSET
+       mov     %ah,%al
+       outb    %al,$IO_ICU2+ICU_IMR_OFFSET
+       popfl
+       ret
 
-#ifndef PC98
-       ALIGN_TEXT
-vec8:
-       popl    %eax    
+ENTRY(INTREN)
+       movl    4(%esp),%eax
+       notl    %eax
+       andl    %eax,_imen
        pushfl
-       pushl   $KCSEL
-       pushl   %eax
        cli
-       MEXITCOUNT
-       jmp     _Xintr8                 /* XXX might need _Xfastintr8 */
-#endif /* PC98 */
-
-/*
- * The 'generic' vector stubs.
- */
-
-#define BUILD_VEC(irq_num)                     \
-       ALIGN_TEXT ;                            \
-__CONCAT(vec,irq_num): ;                       \
-       int     $ICU_OFFSET + (irq_num) ;       \
+       movl    _imen,%eax
+       outb    %al,$IO_ICU1+ICU_IMR_OFFSET
+       mov     %ah,%al
+       outb    %al,$IO_ICU2+ICU_IMR_OFFSET
+       popfl
        ret
 
-       BUILD_VEC(1)
-       BUILD_VEC(2)
-       BUILD_VEC(3)
-       BUILD_VEC(4)
-       BUILD_VEC(5)
-       BUILD_VEC(6)
-       BUILD_VEC(7)
-#ifdef PC98
-       BUILD_VEC(8)
-#endif
-       BUILD_VEC(9)
-       BUILD_VEC(10)
-       BUILD_VEC(11)
-       BUILD_VEC(12)
-       BUILD_VEC(13)
-       BUILD_VEC(14)
-       BUILD_VEC(15)
+
index dd2ded2..c7df9e5 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *     from: vector.s, 386BSD 0.1 unknown origin
  * $FreeBSD: src/sys/i386/isa/icu_vector.s,v 1.14.2.2 2000/07/18 21:12:42 dfr Exp $
- * $DragonFly: src/sys/i386/icu/Attic/icu_vector.s,v 1.6 2003/06/28 07:00:58 dillon Exp $
+ * $DragonFly: src/sys/i386/icu/Attic/icu_vector.s,v 1.7 2003/06/29 03:28:43 dillon Exp $
  */
 
 /*
@@ -16,6 +16,7 @@
 
 #define        ICU_EOI                 0x20    /* XXX - define elsewhere */
 
+#define        IRQ_LBIT(irq_num)       (1 << (irq_num))
 #define        IRQ_BIT(irq_num)        (1 << ((irq_num) % 8))
 #define        IRQ_BYTE(irq_num)       ((irq_num) >> 3)
 
 #define        ENABLE_ICU1             /* use auto-EOI to reduce i/o */
 #define        OUTB_ICU1
 #else
-#define        ENABLE_ICU1 \
-       movb    $ICU_EOI,%al ;  /* as soon as possible send EOI ... */ \
-       OUTB_ICU1               /* ... to clear in service bit */
-#define        OUTB_ICU1 \
-       outb    %al,$IO_ICU1
+#define        ENABLE_ICU1                                                     \
+       movb    $ICU_EOI,%al ;  /* as soon as possible send EOI ... */  \
+       OUTB_ICU1 ;             /* ... to clear in service bit */       \
+
+#define        OUTB_ICU1                                                       \
+       outb    %al,$IO_ICU1 ;                                          \
+
 #endif
 
 #ifdef AUTO_EOI_2
  */
 #define        ENABLE_ICU1_AND_2       ENABLE_ICU1
 #else
-#define        ENABLE_ICU1_AND_2 \
-       movb    $ICU_EOI,%al ;  /* as above */ \
-       outb    %al,$IO_ICU2 ;  /* but do second icu first ... */ \
-       OUTB_ICU1               /* ... then first icu (if !AUTO_EOI_1) */
+#define        ENABLE_ICU1_AND_2                                               \
+       movb    $ICU_EOI,%al ;  /* as above */                          \
+       outb    %al,$IO_ICU2 ;  /* but do second icu first ... */       \
+       OUTB_ICU1 ;     /* ... then first icu (if !AUTO_EOI_1) */       \
+
 #endif
 
 /*
- * Macros for interrupt interrupt entry, call to handler, and exit.
+ * Macro helpers
  */
+#define PUSH_FRAME                                                     \
+       pushl   $0 ;            /* dummy error code */                  \
+       pushl   $0 ;            /* dummy trap type */                   \
+       pushal ;                /* 8 registers */                       \
+       pushl   %ds ;                                                   \
+       pushl   %es ;                                                   \
+       pushl   %fs ;                                                   \
+       mov     $KDSEL,%ax ;                                            \
+       mov     %ax,%ds ;                                               \
+       mov     %ax,%es ;                                               \
+       mov     $KPSEL,%ax ;                                            \
+       mov     %ax,%fs ;                                               \
+
+#define PUSH_DUMMY                                                     \
+       pushfl ;                /* phys int frame / flags */            \
+       pushl %cs ;             /* phys int frame / cs */               \
+       pushl   12(%esp) ;      /* original caller eip */               \
+       pushl   $0 ;            /* dummy error code */                  \
+       pushl   $0 ;            /* dummy trap type */                   \
+       subl    $11*4,%esp ;    /* pushal + 3 seg regs (dummy) */       \
 
-#define        FAST_INTR(irq_num, vec_name, enable_icus)                       \
+/*
+ * Warning: POP_FRAME can only be used if there is no chance of a
+ * segment register being changed (e.g. by procfs), which is why syscalls
+ * have to use doreti.
+ */
+#define POP_FRAME                                                      \
+       popl    %fs ;                                                   \
+       popl    %es ;                                                   \
+       popl    %ds ;                                                   \
+       popal ;                                                         \
+       addl    $2*4,%esp ;     /* dummy trap & error codes */          \
+
+#define POP_DUMMY                                                      \
+       addl    $16*4,%esp ;                                            \
+
+#define MASK_IRQ(icu, irq_num)                                         \
+       movb    imen + IRQ_BYTE(irq_num),%al ;                          \
+       orb     $IRQ_BIT(irq_num),%al ;                                 \
+       movb    %al,imen + IRQ_BYTE(irq_num) ;                          \
+       outb    %al,$icu+ICU_IMR_OFFSET ;                               \
+
+#define UNMASK_IRQ(icu, irq_num)                                       \
+       movb    imen + IRQ_BYTE(irq_num),%al ;                          \
+       andb    $~IRQ_BIT(irq_num),%al ;                                \
+       movb    %al,imen + IRQ_BYTE(irq_num) ;                          \
+       outb    %al,$icu+ICU_IMR_OFFSET ;                               \
+       
+/*
+ * Fast interrupt call handlers run in the following sequence:
+ *
+ *     - Push the trap frame required by doreti.
+ *     - Mask the interrupt and reenable its source.
+ *     - If we cannot take the interrupt set its fpending bit and
+ *       doreti.
+ *     - If we can take the interrupt clear its fpending bit,
+ *       call the handler, then unmask the interrupt and doreti.
+ *
+ *     YYY can cache gd base pointer instead of using hidden %fs
+ *     prefixes.
+ */
+
+#define        FAST_INTR(irq_num, vec_name, icu, enable_icus)                  \
        .text ;                                                         \
        SUPERALIGN_TEXT ;                                               \
 IDTVEC(vec_name) ;                                                     \
-       pushl   %eax ;          /* save only call-used registers */     \
-       pushl   %ecx ;                                                  \
-       pushl   %edx ;                                                  \
-       pushl   %ds ;                                                   \
-       MAYBE_PUSHL_ES ;                                                \
-       mov     $KDSEL,%ax ;                                            \
-       mov     %ax,%ds ;                                               \
-       MAYBE_MOVW_AX_ES ;                                              \
-       FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ;                      \
-       pushl   _intr_unit + (irq_num) * 4 ;                            \
-       call    *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
-       enable_icus ;           /* (re)enable ASAP (helps edge trigger?) */ \
-       addl    $4,%esp ;                                               \
-       incl    _cnt+V_INTR ;   /* book-keeping can wait */             \
-       movl    _intr_countp + (irq_num) * 4,%eax ;                     \
-       incl    (%eax) ;                                                \
-       movl    _curthread, %ecx ; /* are we in a critical section? */  \
-       cmpl    $TDPRI_CRIT,TD_PRI(%ecx) ;                              \
+       PUSH_FRAME ;                                                    \
+       FAKE_MCOUNT(13*4(%esp)) ;                                       \
+       MASK_IRQ(icu, irq_num) ;                                        \
+       enable_icus ;                                                   \
+       incl    _intr_nesting_level ;                                   \
+       movl    _curthread,%ebx ;                                       \
+       movl    TD_CPL(%ebx),%eax ;     /* save the cpl for doreti */   \
+       pushl   %eax ;                                                  \
+       cmpl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
        jge     1f ;                                                    \
-       movl    TD_MACH+MTD_CPL(%ecx),%eax ; /* unmasking pending ints? */ \
-       notl    %eax ;                                                  \
-       andl    _ipending,%eax ;                                        \
-       jne     2f ;            /* yes, maybe handle them */            \
-1: ;                                                                   \
-       MEXITCOUNT ;                                                    \
-       MAYBE_POPL_ES ;                                                 \
-       popl    %ds ;                                                   \
-       popl    %edx ;                                                  \
-       popl    %ecx ;                                                  \
-       popl    %eax ;                                                  \
-       iret ;                                                          \
-;                                                                      \
-       ALIGN_TEXT ;                                                    \
-2: ;                                                                   \
-       cmpb    $3,_intr_nesting_level ;        /* is there enough stack? */ \
-       jae     1b ;            /* no, return */                        \
-       movl    TD_MACH+MTD_CPL(%ecx),%eax ;                            \
-       /* XXX next line is probably unnecessary now. */                \
-       movl    $HWI_MASK|SWI_MASK,TD_MACH+MTD_CPL(%ecx) ; /* limit nesting ... */ \
-       incb    _intr_nesting_level ;   /* ... really limit it ... */   \
-       sti ;                   /* ... to do this as early as possible */ \
-       MAYBE_POPL_ES ;         /* discard most of thin frame ... */    \
-       popl    %ecx ;          /* ... original %ds ... */              \
-       popl    %edx ;                                                  \
-       xchgl   %eax,4(%esp) ;  /* orig %eax; save cpl */               \
-       pushal ;                /* build fat frame (grrr) ... */        \
-       pushl   %ecx ;          /* ... actually %ds ... */              \
-       pushl   %es ;                                                   \
-       pushl   %fs ;                                                   \
-       mov     $KDSEL,%ax ;                                            \
-       mov     %ax,%es ;                                               \
-       mov     $KPSEL,%ax ;                                            \
-       mov     %ax,%fs ;                                               \
-       movl    (3+8+0)*4(%esp),%ecx ;  /* ... %ecx from thin frame ... */ \
-       movl    %ecx,(3+6)*4(%esp) ;    /* ... to fat frame ... */      \
-       movl    (3+8+1)*4(%esp),%eax ;  /* ... cpl from thin frame */   \
-       pushl   %eax ;                                                  \
-       subl    $4,%esp ;       /* junk for unit number */              \
-       MEXITCOUNT ;                                                    \
-       jmp     _doreti
+       testl   $IRQ_LBIT(irq_num), %eax ;                              \
+       jz      2f ;                                                    \
+1: ;                                                                   \
+       /* set pending bit and return, leave interrupt masked */        \
+       orl     $IRQ_LBIT(irq_num),_fpending ;                          \
+       movl    $TDPRI_CRIT,_reqpri ;                                   \
+       jmp     5f ;                                                    \
+2: ;                                                                   \
+       /* clear pending bit, run handler */                            \
+       andl    $~IRQ_LBIT(irq_num),_fpending ;                         \
+       pushl   intr_unit + (irq_num) * 4 ;                             \
+       call    *intr_handler + (irq_num) * 4 ;                         \
+       addl    $4,%esp ;                                               \
+       incl    _cnt+V_INTR ; /* book-keeping YYY make per-cpu */       \
+       movl    intr_countp + (irq_num) * 4,%eax ;                      \
+       incl    (%eax) ;                                                \
+       UNMASK_IRQ(icu, irq_num) ;                                      \
+5: ;                                                                   \
+       MEXITCOUNT ;                                                    \
+       jmp     doreti ;                                                \
+
+/*
+ * Restart fast interrupt held up by critical section or cpl.
+ *
+ *     - Push a dummy trap frame as required by doreti.
+ *     - The interrupt source is already masked.
+ *     - Clear the fpending bit
+ *     - Run the handler
+ *     - Unmask the interrupt
+ *     - Pop the dummy frame and do a normal return
+ *
+ *     YYY can cache gd base pointer instead of using hidden %fs
+ *     prefixes.
+ */
+#define FAST_UNPEND(irq_num, vec_name, icu)                            \
+       .text ;                                                         \
+       SUPERALIGN_TEXT ;                                               \
+IDTVEC(vec_name) ;                                                     \
+       incl    _intr_nesting_level ;                                   \
+       pushl %ebp ;     /* frame for ddb backtrace */                  \
+       movl    %esp, %ebp ;                                            \
+       PUSH_DUMMY ;                                                    \
+       andl    $~IRQ_LBIT(irq_num),_fpending ;                         \
+       pushl   intr_unit + (irq_num) * 4 ;                             \
+       call    *intr_handler + (irq_num) * 4 ;                         \
+       addl    $4, %esp ;                                              \
+       incl    _cnt+V_INTR ;                                           \
+       movl    intr_countp + (irq_num) * 4, %eax ;                     \
+       incl    (%eax) ;                                                \
+       UNMASK_IRQ(icu, irq_num) ;                                      \
+       POP_DUMMY ;                                                     \
+       popl %ebp ;                                                     \
+       decl    _intr_nesting_level ;                                   \
+       ret ;                                                           \
+
+/*
+ * Slow interrupt call handlers run in the following sequence:
+ *
+ *     - Push the trap frame required by doreti.
+ *     - Mask the interrupt and reenable its source.
+ *     - If we cannot take the interrupt set its ipending bit and
+ *       doreti.  In addition to checking for a critical section
+ *       and cpl mask we also check to see if the thread is still
+ *       running.
+ *     - If we can take the interrupt clear its ipending bit,
+ *       set its irunning bit, and schedule its thread.  Leave
+ *       interrupts masked and doreti.
+ *
+ *     The interrupt thread will run its handlers and loop if 
+ *     ipending is found to be set.  ipending/irunning interlock
+ *     the interrupt thread with the interrupt.  The handler calls
+ *     UNPEND when it is through.
+ *
+ *     Note that we do not enable interrupts when calling sched_ithd.
+ *     YYY sched_ithd may preempt us synchronously (fix interrupt stacking)
+ *
+ *     YYY can cache gd base pointer instead of using hidden %fs
+ *     prefixes.
+ */
 
 #define        INTR(irq_num, vec_name, icu, enable_icus, reg, maybe_extra_ipending) \
        .text ;                                                         \
        SUPERALIGN_TEXT ;                                               \
 IDTVEC(vec_name) ;                                                     \
-       pushl   $0 ;            /* dummy error code */                  \
-       pushl   $0 ;            /* dummy trap type */                   \
-       pushal ;                                                        \
-       pushl   %ds ;           /* save our data and extra segments ... */ \
-       pushl   %es ;                                                   \
-       pushl   %fs ;                                                   \
-       mov     $KDSEL,%ax ;    /* ... and reload with kernel's own ... */ \
-       mov     %ax,%ds ;       /* ... early for obsolete reasons */    \
-       mov     %ax,%es ;                                               \
-       mov     $KPSEL,%ax ;                                            \
-       mov     %ax,%fs ;                                               \
+       PUSH_FRAME ;                                                    \
+       FAKE_MCOUNT(13*4(%esp)) ;                                       \
        maybe_extra_ipending ;                                          \
-       movb    _imen + IRQ_BYTE(irq_num),%al ;                         \
-       orb     $IRQ_BIT(irq_num),%al ;                                 \
-       movb    %al,_imen + IRQ_BYTE(irq_num) ;                         \
-       outb    %al,$icu+ICU_IMR_OFFSET ;                               \
-       enable_icus ;                                                   \
-       movl    _curthread, %ebx ; /* are we in a critical section? */  \
+       MASK_IRQ(icu, irq_num) ;                                        \
+       enable_icus ;                                                   \
+       incl    _intr_nesting_level ;                                   \
+       movl    _curthread,%ebx ;                                       \
+       movl    TD_CPL(%ebx), %eax ;                                    \
+       pushl   %eax ;          /* push CPL for doreti */               \
        cmpl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
-       jge     2f ;                                                    \
-       movl    TD_MACH+MTD_CPL(%ebx),%eax ; /* is this interrupt masked by the cpl? */ \
-       testb   $IRQ_BIT(irq_num),%reg ;                                \
-       jne     2f ;                                                    \
-       incb    _intr_nesting_level ;                                   \
-__CONCAT(Xresume,irq_num): ;                                           \
-       FAKE_MCOUNT(13*4(%esp)) ;       /* XXX late to avoid double count */ \
-       incl    _cnt+V_INTR ;   /* tally interrupts */                  \
-       movl    _intr_countp + (irq_num) * 4,%eax ;                     \
-       incl    (%eax) ;                                                \
-       movl    TD_MACH+MTD_CPL(%ebx),%eax ;                            \
-       pushl   %eax ;                                                  \
-       pushl   _intr_unit + (irq_num) * 4 ;                            \
-       orl     _intr_mask + (irq_num) * 4,%eax ;                       \
-       movl    %eax,TD_MACH+MTD_CPL(%ebx) ;                            \
-       sti ;                                                           \
-       call    *_intr_handler + (irq_num) * 4 ;                        \
-       cli ;                   /* must unmask _imen and icu atomically */ \
-       movb    _imen + IRQ_BYTE(irq_num),%al ;                         \
-       andb    $~IRQ_BIT(irq_num),%al ;                                \
-       movb    %al,_imen + IRQ_BYTE(irq_num) ;                         \
-       outb    %al,$icu+ICU_IMR_OFFSET ;                               \
-       sti ;                   /* XXX _doreti repeats the cli/sti */   \
-       MEXITCOUNT ;                                                    \
-       /* We could usually avoid the following jmp by inlining some of */ \
-       /* _doreti, but it's probably better to use less cache. */      \
-       jmp     _doreti ;                                               \
-;                                                                      \
-       ALIGN_TEXT ;                                                    \
-2: ;                                                                   \
-       /* XXX skip mcounting here to avoid double count */             \
-       orb     $IRQ_BIT(irq_num),_ipending + IRQ_BYTE(irq_num) ;       \
-       movl    $TDPRI_CRIT,_reqpri ;                                   \
-       popl    %fs ;                                                   \
-       popl    %es ;                                                   \
-       popl    %ds ;                                                   \
-       popal ;                                                         \
-       addl    $4+4,%esp ;                                             \
-       iret
+       jge     1f ;                                                    \
+       testl   $IRQ_LBIT(irq_num),_irunning ;                          \
+       jnz     1f ;                                                    \
+       testl   $IRQ_LBIT(irq_num), %eax ;                              \
+       jz      2f ;                                                    \
+1: ;                                                                   \
+       /* set the pending bit and return, leave interrupt masked */    \
+       orl     $IRQ_LBIT(irq_num),_ipending ;                          \
+       movl    $TDPRI_CRIT,_reqpri ;                                   \
+       jmp     5f ;                                                    \
+2: ;                                                                   \
+       /* set running bit, clear pending bit, run handler */           \
+       orl     $IRQ_LBIT(irq_num),_irunning ;                          \
+       andl    $~IRQ_LBIT(irq_num),_ipending ;                         \
+       pushl   $irq_num ;                                              \
+       call    _sched_ithd ;                                           \
+       addl    $4,%esp ;                                               \
+       incl    _cnt+V_INTR ; /* book-keeping YYY make per-cpu */       \
+       movl    intr_countp + (irq_num) * 4,%eax ;                      \
+       incl    (%eax) ;                                                \
+5: ;                                                                   \
+       MEXITCOUNT ;                                                    \
+       jmp     doreti ;                                                \
+
+/*
+ * Unmask a slow interrupt.  This function is used by interrupt threads
+ * after they have descheduled themselves to reenable interrupts and
+ * possibly cause a reschedule to occur.  The interrupt's irunning bit
+ * is cleared prior to unmasking.
+ */
+
+#define INTR_UNMASK(irq_num, vec_name, icu)                            \
+       .text ;                                                         \
+       SUPERALIGN_TEXT ;                                               \
+IDTVEC(vec_name) ;                                                     \
+       pushl %ebp ;     /* frame for ddb backtrace */                  \
+       movl    %esp, %ebp ;                                            \
+       andl    $~IRQ_LBIT(irq_num),_irunning ;                         \
+       UNMASK_IRQ(icu, irq_num) ;                                      \
+       popl %ebp ;                                                     \
+       ret ;                                                           \
 
 MCOUNT_LABEL(bintr)
-       FAST_INTR(0,fastintr0, ENABLE_ICU1)
-       FAST_INTR(1,fastintr1, ENABLE_ICU1)
-       FAST_INTR(2,fastintr2, ENABLE_ICU1)
-       FAST_INTR(3,fastintr3, ENABLE_ICU1)
-       FAST_INTR(4,fastintr4, ENABLE_ICU1)
-       FAST_INTR(5,fastintr5, ENABLE_ICU1)
-       FAST_INTR(6,fastintr6, ENABLE_ICU1)
-       FAST_INTR(7,fastintr7, ENABLE_ICU1)
-       FAST_INTR(8,fastintr8, ENABLE_ICU1_AND_2)
-       FAST_INTR(9,fastintr9, ENABLE_ICU1_AND_2)
-       FAST_INTR(10,fastintr10, ENABLE_ICU1_AND_2)
-       FAST_INTR(11,fastintr11, ENABLE_ICU1_AND_2)
-       FAST_INTR(12,fastintr12, ENABLE_ICU1_AND_2)
-       FAST_INTR(13,fastintr13, ENABLE_ICU1_AND_2)
-       FAST_INTR(14,fastintr14, ENABLE_ICU1_AND_2)
-       FAST_INTR(15,fastintr15, ENABLE_ICU1_AND_2)
+       FAST_INTR(0,fastintr0, IO_ICU1, ENABLE_ICU1)
+       FAST_INTR(1,fastintr1, IO_ICU1, ENABLE_ICU1)
+       FAST_INTR(2,fastintr2, IO_ICU1, ENABLE_ICU1)
+       FAST_INTR(3,fastintr3, IO_ICU1, ENABLE_ICU1)
+       FAST_INTR(4,fastintr4, IO_ICU1, ENABLE_ICU1)
+       FAST_INTR(5,fastintr5, IO_ICU1, ENABLE_ICU1)
+       FAST_INTR(6,fastintr6, IO_ICU1, ENABLE_ICU1)
+       FAST_INTR(7,fastintr7, IO_ICU1, ENABLE_ICU1)
+       FAST_INTR(8,fastintr8, IO_ICU2, ENABLE_ICU1_AND_2)
+       FAST_INTR(9,fastintr9, IO_ICU2, ENABLE_ICU1_AND_2)
+       FAST_INTR(10,fastintr10, IO_ICU2, ENABLE_ICU1_AND_2)
+       FAST_INTR(11,fastintr11, IO_ICU2, ENABLE_ICU1_AND_2)
+       FAST_INTR(12,fastintr12, IO_ICU2, ENABLE_ICU1_AND_2)
+       FAST_INTR(13,fastintr13, IO_ICU2, ENABLE_ICU1_AND_2)
+       FAST_INTR(14,fastintr14, IO_ICU2, ENABLE_ICU1_AND_2)
+       FAST_INTR(15,fastintr15, IO_ICU2, ENABLE_ICU1_AND_2)
+
 #define        CLKINTR_PENDING movl $1,CNAME(clkintr_pending)
        INTR(0,intr0, IO_ICU1, ENABLE_ICU1, al, CLKINTR_PENDING)
        INTR(1,intr1, IO_ICU1, ENABLE_ICU1, al,)
@@ -208,23 +296,25 @@ MCOUNT_LABEL(bintr)
        INTR(13,intr13, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
        INTR(14,intr14, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
        INTR(15,intr15, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
+
+       FAST_UNPEND(0,fastunpend0, IO_ICU1)
+       FAST_UNPEND(1,fastunpend1, IO_ICU1)
+       FAST_UNPEND(2,fastunpend2, IO_ICU1)
+       FAST_UNPEND(3,fastunpend3, IO_ICU1)
+       FAST_UNPEND(4,fastunpend4, IO_ICU1)
+       FAST_UNPEND(5,fastunpend5, IO_ICU1)
+       FAST_UNPEND(6,fastunpend6, IO_ICU1)
+       FAST_UNPEND(7,fastunpend7, IO_ICU1)
+       FAST_UNPEND(8,fastunpend8, IO_ICU2)
+       FAST_UNPEND(9,fastunpend9, IO_ICU2)
+       FAST_UNPEND(10,fastunpend10, IO_ICU2)
+       FAST_UNPEND(11,fastunpend11, IO_ICU2)
+       FAST_UNPEND(12,fastunpend12, IO_ICU2)
+       FAST_UNPEND(13,fastunpend13, IO_ICU2)
+       FAST_UNPEND(14,fastunpend14, IO_ICU2)
+       FAST_UNPEND(15,fastunpend15, IO_ICU2)
 MCOUNT_LABEL(eintr)
 
        .data
-       .globl  _ihandlers
-_ihandlers:                    /* addresses of interrupt handlers */
-                               /* actually resumption addresses for HWI's */
-       .long   Xresume0, Xresume1, Xresume2, Xresume3 
-       .long   Xresume4, Xresume5, Xresume6, Xresume7
-       .long   Xresume8, Xresume9, Xresume10, Xresume11
-       .long   Xresume12, Xresume13, Xresume14, Xresume15 
-       .long   _swi_null, swi_net, _swi_null, _swi_null
-       .long   _swi_vm, _swi_null, _softclock
-
-imasks:                                /* masks for interrupt handlers */
-       .space  NHWI*4          /* padding; HWI masks are elsewhere */
-
-       .long   SWI_TTY_MASK, SWI_NET_MASK, SWI_CAMNET_MASK, SWI_CAMBIO_MASK
-       .long   SWI_VM_MASK, SWI_TQ_MASK, SWI_CLOCK_MASK
 
        .text
index 2b760ba..92bcee6 100644 (file)
@@ -24,7 +24,7 @@
  * SUCH DAMAGE.
  *
  * $FreeBSD: src/sys/i386/include/asnames.h,v 1.44.2.8 2003/01/22 20:14:53 jhb Exp $
- * $DragonFly: src/sys/i386/include/Attic/asnames.h,v 1.10 2003/06/28 04:16:03 dillon Exp $
+ * $DragonFly: src/sys/i386/include/Attic/asnames.h,v 1.11 2003/06/29 03:28:43 dillon Exp $
  */
 
 #ifndef _MACHINE_ASNAMES_H_
 #define _Xdna                          Xdna
 #define _Xfastintr0                    Xfastintr0
 #define _Xfastintr1                    Xfastintr1
+#define _Xfastintr2                    Xfastintr2
+#define _Xfastintr3                    Xfastintr3
+#define _Xfastintr4                    Xfastintr4
+#define _Xfastintr5                    Xfastintr5
+#define _Xfastintr6                    Xfastintr6
+#define _Xfastintr7                    Xfastintr7
+#define _Xfastintr8                    Xfastintr8
+#define _Xfastintr9                    Xfastintr9
 #define _Xfastintr10                   Xfastintr10
 #define _Xfastintr11                   Xfastintr11
 #define _Xfastintr12                   Xfastintr12
 #define _Xfastintr17                   Xfastintr17
 #define _Xfastintr18                   Xfastintr18
 #define _Xfastintr19                   Xfastintr19
-#define _Xfastintr2                    Xfastintr2
 #define _Xfastintr20                   Xfastintr20
 #define _Xfastintr21                   Xfastintr21
 #define _Xfastintr22                   Xfastintr22
 #define _Xfastintr23                   Xfastintr23
-#define _Xfastintr3                    Xfastintr3
-#define _Xfastintr4                    Xfastintr4
-#define _Xfastintr5                    Xfastintr5
-#define _Xfastintr6                    Xfastintr6
-#define _Xfastintr7                    Xfastintr7
-#define _Xfastintr8                    Xfastintr8
-#define _Xfastintr9                    Xfastintr9
+#define _Xfastunpend0                  Xfastunpend0
+#define _Xfastunpend1                  Xfastunpend1
+#define _Xfastunpend2                  Xfastunpend2
+#define _Xfastunpend3                  Xfastunpend3
+#define _Xfastunpend4                  Xfastunpend4
+#define _Xfastunpend5                  Xfastunpend5
+#define _Xfastunpend6                  Xfastunpend6
+#define _Xfastunpend7                  Xfastunpend7
+#define _Xfastunpend8                  Xfastunpend8
+#define _Xfastunpend9                  Xfastunpend9
+#define _Xfastunpend10                 Xfastunpend10
+#define _Xfastunpend11                 Xfastunpend11
+#define _Xfastunpend12                 Xfastunpend12
+#define _Xfastunpend13                 Xfastunpend13
+#define _Xfastunpend14                 Xfastunpend14
+#define _Xfastunpend15                 Xfastunpend15
+#define _Xfastunpend16                 Xfastunpend16
+#define _Xfastunpend17                 Xfastunpend17
+#define _Xfastunpend18                 Xfastunpend18
+#define _Xfastunpend19                 Xfastunpend19
+#define _Xfastunpend20                 Xfastunpend20
+#define _Xfastunpend21                 Xfastunpend21
+#define _Xfastunpend22                 Xfastunpend22
+#define _Xfastunpend23                 Xfastunpend23
 #define _Xforward_irq                  Xforward_irq
 #define _Xfpu                          Xfpu
 #define _Xfpusegm                      Xfpusegm
 #define _etext                         etext
 #define _exception                     exception
 #define _fast_intr_lock                        fast_intr_lock
+#define _fastunpend                    fastunpend
 #define _fastmove                      fastmove
 #define _gdt                           gdt
 #define _generic_bcopy                 generic_bcopy
 #define _intr_countp                   intr_countp
 #define _intr_handler                  intr_handler
 #define _intr_mask                     intr_mask
-#define _intr_nesting_level            intr_nesting_level
 #define _intr_unit                     intr_unit
 #define _intrcnt                       intrcnt
 #define _intrnames                     intrnames
 #define _invltlb_ok                    invltlb_ok
 #define _ioapic                                ioapic
-#define _ipending                      ipending
 #define _isr_lock                      isr_lock
 #define _kernbase                      kernbase
 #define _kernelname                    kernelname
 #define _mul64                         mul64
 #define _net_imask                     net_imask
 #define _netisr                                netisr
-#define _netisrs                       netisrs
 #define _nfs_diskless                  nfs_diskless
 #define _nfs_diskless_valid            nfs_diskless_valid
 #define _normalize                     normalize
 #define _round_reg                     round_reg
 #define _s_lock                                s_lock
 #define _s_unlock                      s_unlock
+#define _sched_ithd                    sched_ithd
 #define _set_precision_flag_down       set_precision_flag_down
 #define _set_precision_flag_up         set_precision_flag_up
 #define _set_user_ldt                  set_user_ldt
 
 #define        FS(x)   %fs:gd_ ## x
 
+#define _fpending                      FS(fpending)
+#define _ipending                      FS(ipending)
+#define _irunning                      FS(irunning)
 #define _common_tss                    FS(common_tss)
 #define _common_tssd                   FS(common_tssd)
 #define _cpuid                         FS(cpuid)
 #define _idlethread                    FS(idlethread)
 #define _astpending                    FS(astpending)
 #define _currentldt                    FS(currentldt)
-#define _inside_intr                   FS(inside_intr)
+#define _intr_nesting_level            FS(intr_nesting_level)
 #define _npxthread                     FS(npxthread)
 #define _other_cpus                    FS(other_cpus)
 #define _CADDR1                                FS(CADDR1)
index 1de6444..700e916 100644 (file)
@@ -35,7 +35,7 @@
  *
  *     from: @(#)cpu.h 5.4 (Berkeley) 5/9/91
  * $FreeBSD: src/sys/i386/include/cpu.h,v 1.43.2.2 2001/06/15 09:37:57 scottl Exp $
- * $DragonFly: src/sys/i386/include/Attic/cpu.h,v 1.4 2003/06/20 02:09:54 dillon Exp $
+ * $DragonFly: src/sys/i386/include/Attic/cpu.h,v 1.5 2003/06/29 03:28:43 dillon Exp $
  */
 
 #ifndef _MACHINE_CPU_H_
@@ -61,7 +61,7 @@
 #define        CLKF_USERMODE(framep) \
        ((ISPL((framep)->cf_cs) == SEL_UPL) || (framep->cf_eflags & PSL_VM))
 
-#define CLKF_INTR(framep)      (intr_nesting_level >= 2)
+#define CLKF_INTR(framep)      (mycpu->gd_intr_nesting_level >= 2)
 #if 0
 /*
  * XXX splsoftclock() is very broken and barely worth fixing.  It doesn't
 #ifdef _KERNEL
 extern char    btext[];
 extern char    etext[];
-extern u_char  intr_nesting_level;
 
 void   fork_trampoline __P((void));
 void   fork_return __P((struct proc *, struct trapframe));
index 868819b..b28d441 100644 (file)
@@ -31,7 +31,7 @@
  * SUCH DAMAGE.
  *
  * $FreeBSD: src/sys/i386/include/cpufunc.h,v 1.96.2.3 2002/04/28 22:50:54 dwmalone Exp $
- * $DragonFly: src/sys/i386/include/Attic/cpufunc.h,v 1.3 2003/06/28 02:09:49 dillon Exp $
+ * $DragonFly: src/sys/i386/include/Attic/cpufunc.h,v 1.4 2003/06/29 03:28:43 dillon Exp $
  */
 
 /*
@@ -68,6 +68,10 @@ breakpoint(void)
        __asm __volatile("int $3");
 }
 
+/*
+ * Find the first 1 in mask, starting with bit 0 and return the
+ * bit number.  If mask is 0 the result is undefined.
+ */
 static __inline u_int
 bsfl(u_int mask)
 {
@@ -77,6 +81,10 @@ bsfl(u_int mask)
        return (result);
 }
 
+/*
+ * Find the last 1 in mask, starting with bit 31 and return the
+ * bit number.  If mask is 0 the result is undefined.
+ */
 static __inline u_int
 bsrl(u_int mask)
 {
@@ -86,6 +94,34 @@ bsrl(u_int mask)
        return (result);
 }
 
+/*
+ * Test and set the specified bit (1 << bit) in the integer.  The
+ * previous value of the bit is returned (0 or 1).
+ */
+static __inline int
+btsl(u_int *mask, int bit)
+{
+       int result;
+
+       __asm __volatile("btsl %2,%1; movl $0,%0; adcl $0,%0" :
+                   "=r"(result), "=m"(*mask) : "r" (bit));
+       return(result);
+}
+
+/*
+ * Test and clear the specified bit (1 << bit) in the integer.  The
+ * previous value of the bit is returned (0 or 1).
+ */
+static __inline int
+btrl(u_int *mask, int bit)
+{
+       int result;
+
+       __asm __volatile("btrl %2,%1; movl $0,%0; adcl $0,%0" :
+                   "=r"(result), "=m"(*mask) : "r" (bit));
+       return(result);
+}
+
 static __inline void
 disable_intr(void)
 {
index ab06d7b..7415c2c 100644 (file)
@@ -28,7 +28,7 @@
  *     should not include this file.
  *
  * $FreeBSD: src/sys/i386/include/globaldata.h,v 1.11.2.1 2000/05/16 06:58:10 dillon Exp $
- * $DragonFly: src/sys/i386/include/Attic/globaldata.h,v 1.13 2003/06/28 04:16:03 dillon Exp $
+ * $DragonFly: src/sys/i386/include/Attic/globaldata.h,v 1.14 2003/06/29 03:28:43 dillon Exp $
  */
 
 #ifndef _MACHINE_GLOBALDATA_H_
 #include <machine/tss.h>       /* struct i386tss */
 #endif
 
-
+/*
+ * Note on interrupt control.  Pending interrupts not yet dispatched are
+ * marked in gd_fpending or gd_ipending.  Once dispatched an interrupt
+ * is marked in irunning and the fpending bit is cleared.  For edge triggered
+ * interrupts interrupts may be enabled again at this point and if they
+ * occur before the interrupt service routine is complete the ipending bit
+ * will be set again and cause the interrupt service to loop.  The current
+ * thread's cpl is stored in the thread structure.
+ */
 struct mdglobaldata {
        struct globaldata mi;
        struct thread   gd_idlethread;
@@ -55,6 +63,9 @@ struct mdglobaldata {
        struct segment_descriptor *gd_tss_gdt;
        struct thread   *gd_npxthread;
        struct i386tss  gd_common_tss;
+       int             gd_fpending;    /* fast interrupt pending */
+       int             gd_ipending;    /* normal interrupt pending */
+       int             gd_irunning;    /* normal interrupt in progress */
        int             gd_currentldt;  /* USER_LDT */
        u_int           gd_cpu_lockid;
        u_int           gd_other_cpus;
index 098cc4e..beb1e53 100644 (file)
@@ -27,7 +27,7 @@
  * SUCH DAMAGE.
  *
  * $FreeBSD: src/sys/i386/include/md_var.h,v 1.35.2.4 2003/01/22 20:14:53 jhb Exp $
- * $DragonFly: src/sys/i386/include/Attic/md_var.h,v 1.7 2003/06/28 04:16:03 dillon Exp $
+ * $DragonFly: src/sys/i386/include/Attic/md_var.h,v 1.8 2003/06/29 03:28:43 dillon Exp $
  */
 
 #ifndef _MACHINE_MD_VAR_H_
@@ -58,7 +58,6 @@ extern        char    kstack[];
 extern int     need_pre_dma_flush;
 extern int     need_post_dma_flush;
 #endif
-extern void    (*netisrs[32]) __P((void));
 extern int     nfs_diskless_valid;
 extern void    (*ovbcopy_vector) __P((const void *from, void *to, size_t len));
 extern char    sigcode[];
@@ -110,7 +109,6 @@ int is_physical_memory __P((vm_offset_t addr));
 u_long kvtop __P((void *addr));
 void   setidt __P((int idx, alias_for_inthand_t *func, int typ, int dpl,
                    int selec));
-void   swi_vm __P((void));
 void   userconfig __P((void));
 int     user_dbreg_trap __P((void));
 int    vm_page_zero_idle __P((void));
index 903a5ee..dcfd6c9 100644 (file)
@@ -32,7 +32,7 @@
  *
  *     @(#)types.h     8.3 (Berkeley) 1/5/94
  * $FreeBSD: src/sys/i386/include/types.h,v 1.19.2.1 2001/03/21 10:50:58 peter Exp $
- * $DragonFly: src/sys/i386/include/Attic/types.h,v 1.3 2003/06/28 02:09:49 dillon Exp $
+ * $DragonFly: src/sys/i386/include/Attic/types.h,v 1.4 2003/06/29 03:28:43 dillon Exp $
  */
 
 #ifndef _MACHINE_TYPES_H_
@@ -67,8 +67,4 @@ typedef unsigned int *pt_entry_t;
 /* Interrupt mask (spl, xxx_imask, etc) */
 typedef __uint32_t             intrmask_t;
 
-/* Interrupt handler function type. */
-typedef        void                    inthand2_t __P((void *_cookie));
-typedef        void                    ointhand2_t __P((int _device_id));
-
 #endif /* !_MACHINE_TYPES_H_ */
index a8e41d5..a84e902 100644 (file)
  * SUCH DAMAGE.
  *
  * $FreeBSD: src/sys/i386/isa/apic_ipl.s,v 1.27.2.2 2000/09/30 02:49:35 ps Exp $
- * $DragonFly: src/sys/i386/isa/Attic/apic_ipl.s,v 1.4 2003/06/22 08:54:22 dillon Exp $
+ * $DragonFly: src/sys/i386/isa/Attic/apic_ipl.s,v 1.5 2003/06/29 03:28:43 dillon Exp $
  */
 
+#if 0
 
        .data
        ALIGN_DATA
@@ -101,7 +102,7 @@ ENTRY(splz)
         */
        pushl   %ebx
        movl    _curthread,%ebx
-       movl    TD_MACH+MTD_CPL(%ebx),%eax
+       movl    TD_CPL(%ebx),%eax
 splz_next:
        /*
         * We don't need any locking here.  (ipending & ~cpl) cannot grow 
@@ -141,10 +142,10 @@ splz_unpend:
 splz_swi:
        pushl   %eax                    /* save cpl across call */
        orl     imasks(,%ecx,4),%eax
-       movl    %eax,TD_MACH+MTD_CPL(%ebx) /* set cpl for SWI */
+       movl    %eax,TD_CPL(%ebx) /* set cpl for SWI */
        call    *_ihandlers(,%ecx,4)
        popl    %eax
-       movl    %eax,TD_MACH+MTD_CPL(%ebx) /* restore cpl and loop */
+       movl    %eax,TD_CPL(%ebx) /* restore cpl and loop */
        jmp     splz_next
 
 /*
@@ -463,3 +464,5 @@ ENTRY(io_apic_write)
 ENTRY(apic_eoi)
        movl    $0, _lapic+0xb0
        ret
+
+#endif
index aa2267d..e98beb5 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *     from: vector.s, 386BSD 0.1 unknown origin
  * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $
- * $DragonFly: src/sys/i386/isa/Attic/apic_vector.s,v 1.5 2003/06/22 08:54:22 dillon Exp $
+ * $DragonFly: src/sys/i386/isa/Attic/apic_vector.s,v 1.6 2003/06/29 03:28:43 dillon Exp $
  */
 
 
@@ -29,14 +29,14 @@ IDTVEC(vec_name) ;                                                  \
        pushl   %ecx ;                                                  \
        pushl   %edx ;                                                  \
        pushl   %ds ;                                                   \
-       MAYBE_PUSHL_ES ;                                                \
+       pushl   %es ;                                                   \
        pushl   %fs ;                                                   \
        movl    $KDSEL,%eax ;                                           \
        mov     %ax,%ds ;                                               \
-       MAYBE_MOVW_AX_ES ;                                              \
+       movl    %ax,%es ;                                               \
        movl    $KPSEL,%eax ;                                           \
        mov     %ax,%fs ;                                               \
-       FAKE_MCOUNT((5+ACTUALLY_PUSHED)*4(%esp)) ;                      \
+       FAKE_MCOUNT(6*4(%esp)) ;                                        \
        pushl   _intr_unit + (irq_num) * 4 ;                            \
        call    *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
        addl    $4, %esp ;                                              \
@@ -48,7 +48,7 @@ IDTVEC(vec_name) ;                                                    \
        incl    (%eax) ;                                                \
        MEXITCOUNT ;                                                    \
        popl    %fs ;                                                   \
-       MAYBE_POPL_ES ;                                                 \
+       popl    %es ;                                                   \
        popl    %ds ;                                                   \
        popl    %edx ;                                                  \
        popl    %ecx ;                                                  \
@@ -259,6 +259,7 @@ __CONCAT(Xresume,irq_num): ;                                                \
        call    *_intr_handler + (irq_num) * 4 ;                        \
        cli ;                                                           \
        APIC_ITRACE(apic_itrace_leave, irq_num, APIC_ITRACE_LEAVE) ;    \
+       addl    $4,%esp ;                                               \
 ;                                                                      \
        lock ;  andl    $~IRQ_BIT(irq_num), iactive ;                   \
        UNMASK_IRQ(irq_num) ;                                           \
@@ -456,8 +457,6 @@ _Xcpuast:
        incb    _intr_nesting_level
        sti
        
-       pushl   $0
-       
        movl    _cpuid, %eax
        lock    
        btrl    %eax, _checkstate_pending_ast
@@ -512,8 +511,6 @@ _Xforward_irq:
        incb    _intr_nesting_level
        sti
        
-       pushl   $0
-
        MEXITCOUNT
        jmp     _doreti                 /* Handle forwarded interrupt */
 1:
@@ -722,6 +719,8 @@ _Xrendezvous:
        
        
        .data
+
+#if 0
 /*
  * Addresses of interrupt handlers.
  *  XresumeNN: Resumption addresses for HWIs.
@@ -751,6 +750,7 @@ imasks:                             /* masks for interrupt handlers */
 
        .long   SWI_TTY_MASK, SWI_NET_MASK, SWI_CAMNET_MASK, SWI_CAMBIO_MASK
        .long   SWI_VM_MASK, SWI_TQ_MASK, SWI_CLOCK_MASK
+#endif
 
 /* active flag for lazy masking */
 iactive:
index 26b295d..ca54dc7 100644 (file)
@@ -35,7 +35,7 @@
  *
  *     from: @(#)icu.h 5.6 (Berkeley) 5/9/91
  * $FreeBSD: src/sys/i386/isa/icu.h,v 1.18 1999/12/26 12:43:47 bde Exp $
- * $DragonFly: src/sys/i386/isa/Attic/icu.h,v 1.2 2003/06/17 04:28:37 dillon Exp $
+ * $DragonFly: src/sys/i386/isa/Attic/icu.h,v 1.3 2003/06/29 03:28:43 dillon Exp $
  */
 
 /*
 
 #ifndef        LOCORE
 
-#ifdef APIC_IO
-
-/*
-#define MP_SAFE
- * Note:
- *     Most of the SMP equivilants of the icu macros are coded
- *     elsewhere in an MP-safe fashion.
- *     In particular note that the 'imen' variable is opaque.
- *     DO NOT access imen directly, use INTREN()/INTRDIS().
- */
-
 void   INTREN                  __P((u_int));
 void   INTRDIS                 __P((u_int));
 
-#else /* APIC_IO */
-
-/*
- * Interrupt "level" mechanism variables, masks, and macros
- */
-extern unsigned imen;          /* interrupt mask enable */
-
-#define        INTREN(s)               (imen &= ~(s), SET_ICUS())
-#define        INTRDIS(s)              (imen |= (s), SET_ICUS())
-
-#if 0
-#ifdef PC98
-#define        SET_ICUS()      (outb(IO_ICU1 + 2, imen), outb(IU_ICU2 + 2, imen >> 8))
-#define INTRGET()      ((inb(IO_ICU2) << 8 | inb(IO_ICU1)) & 0xffff)
-#else  /* IBM-PC */
-#define        SET_ICUS()      (outb(IO_ICU1 + 1, imen), outb(IU_ICU2 + 1, imen >> 8))
-#define INTRGET()      ((inb(IO_ICU2) << 8 | inb(IO_ICU1)) & 0xffff)
-#endif /* PC98 */
-#else
-/*
- * XXX - IO_ICU* are defined in isa.h, not icu.h, and nothing much bothers to
- * include isa.h, while too many things include icu.h.
- */
-#ifdef PC98
-#define        SET_ICUS()      (outb(0x02, imen), outb(0x0a, imen >> 8))
-/* XXX is this correct? */
-#define INTRGET()      ((inb(0x0a) << 8 | inb(0x02)) & 0xffff)
-#else
-#define        SET_ICUS()      (outb(0x21, imen), outb(0xa1, imen >> 8))
-#define INTRGET()      ((inb(0xa1) << 8 | inb(0x21)) & 0xffff)
-#endif
-#endif
-
-#endif /* APIC_IO */
-
 #endif /* LOCORE */
 
-
 #ifdef APIC_IO
 /*
  * Note: The APIC uses different values for IRQxxx.
index c315f5c..83baa1a 100644 (file)
@@ -2,6 +2,7 @@
  * Copyright (c) 1989, 1990 William F. Jolitz.
  * Copyright (c) 1990 The Regents of the University of California.
  * All rights reserved.
+ * Copyright (c) 2003 Matthew Dillon
  *
  * This code is derived from software contributed to Berkeley by
  * William Jolitz.
  * SUCH DAMAGE.
  *
  * $FreeBSD: src/sys/i386/isa/icu_ipl.s,v 1.6 1999/08/28 00:44:42 peter Exp $
- * $DragonFly: src/sys/i386/isa/Attic/icu_ipl.s,v 1.4 2003/06/22 08:54:22 dillon Exp $
+ * $DragonFly: src/sys/i386/isa/Attic/icu_ipl.s,v 1.5 2003/06/29 03:28:43 dillon Exp $
  */
 
        .data
        ALIGN_DATA
-vec:
-       .long    vec0,  vec1,  vec2,  vec3,  vec4,  vec5,  vec6,  vec7
-       .long    vec8,  vec9, vec10, vec11, vec12, vec13, vec14, vec15
 
-/* interrupt mask enable (all h/w off) */
+       /*
+        * Interrupt mask for ICU interrupts, defaults to all hardware
+        * interrupts turned off.
+        */
        .globl  _imen
 _imen: .long   HWI_MASK
 
-
-/*
- * 
- */
        .text
        SUPERALIGN_TEXT
 
-/*
- * Interrupt priority mechanism
- *     -- soft splXX masks with group mechanism (cpl)
- *     -- h/w masks for currently active or unused interrupts (imen)
- *     -- ipending = active interrupts currently masked by cpl
- *     -- splz handles pending interrupts regardless of the critical
- *        nesting state, it is only called synchronously.
- */
-
-ENTRY(splz)
-       /*
-        * The caller has restored cpl and checked that (ipending & ~cpl)
-        * is nonzero.  We have to repeat the check since if there is an
-        * interrupt while we're looking, _doreti processing for the
-        * interrupt will handle all the unmasked pending interrupts
-        * because we restored early.  We're repeating the calculation
-        * of (ipending & ~cpl) anyway so that the caller doesn't have
-        * to pass it, so this only costs one "jne".  "bsfl %ecx,%ecx"
-        * is undefined when %ecx is 0 so we can't rely on the secondary
-        * btrl tests.
-        */
-       pushl   %ebx
-       movl    _curthread,%ebx
-       movl    TD_MACH+MTD_CPL(%ebx),%eax
-splz_next:
-       /*
-        * We don't need any locking here.  (ipending & ~cpl) cannot grow 
-        * while we're looking at it - any interrupt will shrink it to 0.
-        */
-       movl    $0,_reqpri
-       movl    %eax,%ecx
-       notl    %ecx
-       andl    _ipending,%ecx
-       jne     splz_unpend
-       popl    %ebx
-       ret
-
-       ALIGN_TEXT
-splz_unpend:
-       bsfl    %ecx,%ecx
-       btrl    %ecx,_ipending
-       jnc     splz_next
-       cmpl    $NHWI,%ecx
-       jae     splz_swi
        /*
-        * We would prefer to call the intr handler directly here but that
-        * doesn't work for badly behaved handlers that want the interrupt
-        * frame.  Also, there's a problem determining the unit number.
-        * We should change the interface so that the unit number is not
-        * determined at config time.
+        * Functions to enable and disable a hardware interrupt.  Only
+        * 16 ICU interrupts exist.
+        *
+        * INTREN(1 << irq)     (one interrupt only)
+        * INTDIS(1 << irq)     (one interrupt only)
         */
-       popl    %ebx
-       jmp     *vec(,%ecx,4)
-
-       ALIGN_TEXT
-splz_swi:
-       pushl   %eax
-       orl     imasks(,%ecx,4),%eax
-       movl    %eax,TD_MACH+MTD_CPL(%ebx)
-       call    *_ihandlers(,%ecx,4)
-       popl    %eax
-       movl    %eax,TD_MACH+MTD_CPL(%ebx)
-       jmp     splz_next
-
-/*
- * Fake clock interrupt(s) so that they appear to come from our caller instead
- * of from here, so that system profiling works.
- * XXX do this more generally (for all vectors; look up the C entry point).
- * XXX frame bogusness stops us from just jumping to the C entry point.
- */
-       ALIGN_TEXT
-vec0:
-       popl    %eax                    /* return address */
+ENTRY(INTRDIS)
+       movl    4(%esp),%eax
+       orl     %eax,_imen
        pushfl
-       pushl   $KCSEL
-       pushl   %eax
        cli
-       MEXITCOUNT
-       jmp     _Xintr0                 /* XXX might need _Xfastintr0 */
+       movl    _imen,%eax
+       outb    %al,$IO_ICU1+ICU_IMR_OFFSET
+       mov     %ah,%al
+       outb    %al,$IO_ICU2+ICU_IMR_OFFSET
+       popfl
+       ret
 
-#ifndef PC98
-       ALIGN_TEXT
-vec8:
-       popl    %eax    
+ENTRY(INTREN)
+       movl    4(%esp),%eax
+       notl    %eax
+       andl    %eax,_imen
        pushfl
-       pushl   $KCSEL
-       pushl   %eax
        cli
-       MEXITCOUNT
-       jmp     _Xintr8                 /* XXX might need _Xfastintr8 */
-#endif /* PC98 */
-
-/*
- * The 'generic' vector stubs.
- */
-
-#define BUILD_VEC(irq_num)                     \
-       ALIGN_TEXT ;                            \
-__CONCAT(vec,irq_num): ;                       \
-       int     $ICU_OFFSET + (irq_num) ;       \
+       movl    _imen,%eax
+       outb    %al,$IO_ICU1+ICU_IMR_OFFSET
+       mov     %ah,%al
+       outb    %al,$IO_ICU2+ICU_IMR_OFFSET
+       popfl
        ret
 
-       BUILD_VEC(1)
-       BUILD_VEC(2)
-       BUILD_VEC(3)
-       BUILD_VEC(4)
-       BUILD_VEC(5)
-       BUILD_VEC(6)
-       BUILD_VEC(7)
-#ifdef PC98
-       BUILD_VEC(8)
-#endif
-       BUILD_VEC(9)
-       BUILD_VEC(10)
-       BUILD_VEC(11)
-       BUILD_VEC(12)
-       BUILD_VEC(13)
-       BUILD_VEC(14)
-       BUILD_VEC(15)
+
index 8667a5e..b21a70a 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *     from: vector.s, 386BSD 0.1 unknown origin
  * $FreeBSD: src/sys/i386/isa/icu_vector.s,v 1.14.2.2 2000/07/18 21:12:42 dfr Exp $
- * $DragonFly: src/sys/i386/isa/Attic/icu_vector.s,v 1.6 2003/06/28 07:00:58 dillon Exp $
+ * $DragonFly: src/sys/i386/isa/Attic/icu_vector.s,v 1.7 2003/06/29 03:28:43 dillon Exp $
  */
 
 /*
@@ -16,6 +16,7 @@
 
 #define        ICU_EOI                 0x20    /* XXX - define elsewhere */
 
+#define        IRQ_LBIT(irq_num)       (1 << (irq_num))
 #define        IRQ_BIT(irq_num)        (1 << ((irq_num) % 8))
 #define        IRQ_BYTE(irq_num)       ((irq_num) >> 3)
 
 #define        ENABLE_ICU1             /* use auto-EOI to reduce i/o */
 #define        OUTB_ICU1
 #else
-#define        ENABLE_ICU1 \
-       movb    $ICU_EOI,%al ;  /* as soon as possible send EOI ... */ \
-       OUTB_ICU1               /* ... to clear in service bit */
-#define        OUTB_ICU1 \
-       outb    %al,$IO_ICU1
+#define        ENABLE_ICU1                                                     \
+       movb    $ICU_EOI,%al ;  /* as soon as possible send EOI ... */  \
+       OUTB_ICU1 ;             /* ... to clear in service bit */       \
+
+#define        OUTB_ICU1                                                       \
+       outb    %al,$IO_ICU1 ;                                          \
+
 #endif
 
 #ifdef AUTO_EOI_2
  */
 #define        ENABLE_ICU1_AND_2       ENABLE_ICU1
 #else
-#define        ENABLE_ICU1_AND_2 \
-       movb    $ICU_EOI,%al ;  /* as above */ \
-       outb    %al,$IO_ICU2 ;  /* but do second icu first ... */ \
-       OUTB_ICU1               /* ... then first icu (if !AUTO_EOI_1) */
+#define        ENABLE_ICU1_AND_2                                               \
+       movb    $ICU_EOI,%al ;  /* as above */                          \
+       outb    %al,$IO_ICU2 ;  /* but do second icu first ... */       \
+       OUTB_ICU1 ;     /* ... then first icu (if !AUTO_EOI_1) */       \
+
 #endif
 
 /*
- * Macros for interrupt interrupt entry, call to handler, and exit.
+ * Macro helpers
  */
+#define PUSH_FRAME                                                     \
+       pushl   $0 ;            /* dummy error code */                  \
+       pushl   $0 ;            /* dummy trap type */                   \
+       pushal ;                /* 8 registers */                       \
+       pushl   %ds ;                                                   \
+       pushl   %es ;                                                   \
+       pushl   %fs ;                                                   \
+       mov     $KDSEL,%ax ;                                            \
+       mov     %ax,%ds ;                                               \
+       mov     %ax,%es ;                                               \
+       mov     $KPSEL,%ax ;                                            \
+       mov     %ax,%fs ;                                               \
+
+#define PUSH_DUMMY                                                     \
+       pushfl ;                /* phys int frame / flags */            \
+       pushl %cs ;             /* phys int frame / cs */               \
+       pushl   12(%esp) ;      /* original caller eip */               \
+       pushl   $0 ;            /* dummy error code */                  \
+       pushl   $0 ;            /* dummy trap type */                   \
+       subl    $11*4,%esp ;    /* pushal + 3 seg regs (dummy) */       \
 
-#define        FAST_INTR(irq_num, vec_name, enable_icus)                       \
+/*
+ * Warning: POP_FRAME can only be used if there is no chance of a
+ * segment register being changed (e.g. by procfs), which is why syscalls
+ * have to use doreti.
+ */
+#define POP_FRAME                                                      \
+       popl    %fs ;                                                   \
+       popl    %es ;                                                   \
+       popl    %ds ;                                                   \
+       popal ;                                                         \
+       addl    $2*4,%esp ;     /* dummy trap & error codes */          \
+
+#define POP_DUMMY                                                      \
+       addl    $16*4,%esp ;                                            \
+
+#define MASK_IRQ(icu, irq_num)                                         \
+       movb    imen + IRQ_BYTE(irq_num),%al ;                          \
+       orb     $IRQ_BIT(irq_num),%al ;                                 \
+       movb    %al,imen + IRQ_BYTE(irq_num) ;                          \
+       outb    %al,$icu+ICU_IMR_OFFSET ;                               \
+
+#define UNMASK_IRQ(icu, irq_num)                                       \
+       movb    imen + IRQ_BYTE(irq_num),%al ;                          \
+       andb    $~IRQ_BIT(irq_num),%al ;                                \
+       movb    %al,imen + IRQ_BYTE(irq_num) ;                          \
+       outb    %al,$icu+ICU_IMR_OFFSET ;                               \
+       
+/*
+ * Fast interrupt call handlers run in the following sequence:
+ *
+ *     - Push the trap frame required by doreti.
+ *     - Mask the interrupt and reenable its source.
+ *     - If we cannot take the interrupt set its fpending bit and
+ *       doreti.
+ *     - If we can take the interrupt clear its fpending bit,
+ *       call the handler, then unmask the interrupt and doreti.
+ *
+ *     YYY can cache gd base pointer instead of using hidden %fs
+ *     prefixes.
+ */
+
+#define        FAST_INTR(irq_num, vec_name, icu, enable_icus)                  \
        .text ;                                                         \
        SUPERALIGN_TEXT ;                                               \
 IDTVEC(vec_name) ;                                                     \
-       pushl   %eax ;          /* save only call-used registers */     \
-       pushl   %ecx ;                                                  \
-       pushl   %edx ;                                                  \
-       pushl   %ds ;                                                   \
-       MAYBE_PUSHL_ES ;                                                \
-       mov     $KDSEL,%ax ;                                            \
-       mov     %ax,%ds ;                                               \
-       MAYBE_MOVW_AX_ES ;                                              \
-       FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ;                      \
-       pushl   _intr_unit + (irq_num) * 4 ;                            \
-       call    *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
-       enable_icus ;           /* (re)enable ASAP (helps edge trigger?) */ \
-       addl    $4,%esp ;                                               \
-       incl    _cnt+V_INTR ;   /* book-keeping can wait */             \
-       movl    _intr_countp + (irq_num) * 4,%eax ;                     \
-       incl    (%eax) ;                                                \
-       movl    _curthread, %ecx ; /* are we in a critical section? */  \
-       cmpl    $TDPRI_CRIT,TD_PRI(%ecx) ;                              \
+       PUSH_FRAME ;                                                    \
+       FAKE_MCOUNT(13*4(%esp)) ;                                       \
+       MASK_IRQ(icu, irq_num) ;                                        \
+       enable_icus ;                                                   \
+       incl    _intr_nesting_level ;                                   \
+       movl    _curthread,%ebx ;                                       \
+       movl    TD_CPL(%ebx),%eax ;     /* save the cpl for doreti */   \
+       pushl   %eax ;                                                  \
+       cmpl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
        jge     1f ;                                                    \
-       movl    TD_MACH+MTD_CPL(%ecx),%eax ; /* unmasking pending ints? */ \
-       notl    %eax ;                                                  \
-       andl    _ipending,%eax ;                                        \
-       jne     2f ;            /* yes, maybe handle them */            \
-1: ;                                                                   \
-       MEXITCOUNT ;                                                    \
-       MAYBE_POPL_ES ;                                                 \
-       popl    %ds ;                                                   \
-       popl    %edx ;                                                  \
-       popl    %ecx ;                                                  \
-       popl    %eax ;                                                  \
-       iret ;                                                          \
-;                                                                      \
-       ALIGN_TEXT ;                                                    \
-2: ;                                                                   \
-       cmpb    $3,_intr_nesting_level ;        /* is there enough stack? */ \
-       jae     1b ;            /* no, return */                        \
-       movl    TD_MACH+MTD_CPL(%ecx),%eax ;                            \
-       /* XXX next line is probably unnecessary now. */                \
-       movl    $HWI_MASK|SWI_MASK,TD_MACH+MTD_CPL(%ecx) ; /* limit nesting ... */ \
-       incb    _intr_nesting_level ;   /* ... really limit it ... */   \
-       sti ;                   /* ... to do this as early as possible */ \
-       MAYBE_POPL_ES ;         /* discard most of thin frame ... */    \
-       popl    %ecx ;          /* ... original %ds ... */              \
-       popl    %edx ;                                                  \
-       xchgl   %eax,4(%esp) ;  /* orig %eax; save cpl */               \
-       pushal ;                /* build fat frame (grrr) ... */        \
-       pushl   %ecx ;          /* ... actually %ds ... */              \
-       pushl   %es ;                                                   \
-       pushl   %fs ;                                                   \
-       mov     $KDSEL,%ax ;                                            \
-       mov     %ax,%es ;                                               \
-       mov     $KPSEL,%ax ;                                            \
-       mov     %ax,%fs ;                                               \
-       movl    (3+8+0)*4(%esp),%ecx ;  /* ... %ecx from thin frame ... */ \
-       movl    %ecx,(3+6)*4(%esp) ;    /* ... to fat frame ... */      \
-       movl    (3+8+1)*4(%esp),%eax ;  /* ... cpl from thin frame */   \
-       pushl   %eax ;                                                  \
-       subl    $4,%esp ;       /* junk for unit number */              \
-       MEXITCOUNT ;                                                    \
-       jmp     _doreti
+       testl   $IRQ_LBIT(irq_num), %eax ;                              \
+       jz      2f ;                                                    \
+1: ;                                                                   \
+       /* set pending bit and return, leave interrupt masked */        \
+       orl     $IRQ_LBIT(irq_num),_fpending ;                          \
+       movl    $TDPRI_CRIT,_reqpri ;                                   \
+       jmp     5f ;                                                    \
+2: ;                                                                   \
+       /* clear pending bit, run handler */                            \
+       andl    $~IRQ_LBIT(irq_num),_fpending ;                         \
+       pushl   intr_unit + (irq_num) * 4 ;                             \
+       call    *intr_handler + (irq_num) * 4 ;                         \
+       addl    $4,%esp ;                                               \
+       incl    _cnt+V_INTR ; /* book-keeping YYY make per-cpu */       \
+       movl    intr_countp + (irq_num) * 4,%eax ;                      \
+       incl    (%eax) ;                                                \
+       UNMASK_IRQ(icu, irq_num) ;                                      \
+5: ;                                                                   \
+       MEXITCOUNT ;                                                    \
+       jmp     doreti ;                                                \
+
+/*
+ * Restart fast interrupt held up by critical section or cpl.
+ *
+ *     - Push a dummy trap frame as required by doreti.
+ *     - The interrupt source is already masked.
+ *     - Clear the fpending bit
+ *     - Run the handler
+ *     - Unmask the interrupt
+ *     - Pop the dummy frame and do a normal return
+ *
+ *     YYY can cache gd base pointer instead of using hidden %fs
+ *     prefixes.
+ */
+#define FAST_UNPEND(irq_num, vec_name, icu)                            \
+       .text ;                                                         \
+       SUPERALIGN_TEXT ;                                               \
+IDTVEC(vec_name) ;                                                     \
+       incl    _intr_nesting_level ;                                   \
+       pushl %ebp ;     /* frame for ddb backtrace */                  \
+       movl    %esp, %ebp ;                                            \
+       PUSH_DUMMY ;                                                    \
+       andl    $~IRQ_LBIT(irq_num),_fpending ;                         \
+       pushl   intr_unit + (irq_num) * 4 ;                             \
+       call    *intr_handler + (irq_num) * 4 ;                         \
+       addl    $4, %esp ;                                              \
+       incl    _cnt+V_INTR ;                                           \
+       movl    intr_countp + (irq_num) * 4, %eax ;                     \
+       incl    (%eax) ;                                                \
+       UNMASK_IRQ(icu, irq_num) ;                                      \
+       POP_DUMMY ;                                                     \
+       popl %ebp ;                                                     \
+       decl    _intr_nesting_level ;                                   \
+       ret ;                                                           \
+
+/*
+ * Slow interrupt call handlers run in the following sequence:
+ *
+ *     - Push the trap frame required by doreti.
+ *     - Mask the interrupt and reenable its source.
+ *     - If we cannot take the interrupt set its ipending bit and
+ *       doreti.  In addition to checking for a critical section
+ *       and cpl mask we also check to see if the thread is still
+ *       running.
+ *     - If we can take the interrupt clear its ipending bit,
+ *       set its irunning bit, and schedule its thread.  Leave
+ *       interrupts masked and doreti.
+ *
+ *     The interrupt thread will run its handlers and loop if 
+ *     ipending is found to be set.  ipending/irunning interlock
+ *     the interrupt thread with the interrupt.  The handler calls
+ *     UNPEND when it is through.
+ *
+ *     Note that we do not enable interrupts when calling sched_ithd.
+ *     YYY sched_ithd may preempt us synchronously (fix interrupt stacking)
+ *
+ *     YYY can cache gd base pointer instead of using hidden %fs
+ *     prefixes.
+ */
 
 #define        INTR(irq_num, vec_name, icu, enable_icus, reg, maybe_extra_ipending) \
        .text ;                                                         \
        SUPERALIGN_TEXT ;                                               \
 IDTVEC(vec_name) ;                                                     \
-       pushl   $0 ;            /* dummy error code */                  \
-       pushl   $0 ;            /* dummy trap type */                   \
-       pushal ;                                                        \
-       pushl   %ds ;           /* save our data and extra segments ... */ \
-       pushl   %es ;                                                   \
-       pushl   %fs ;                                                   \
-       mov     $KDSEL,%ax ;    /* ... and reload with kernel's own ... */ \
-       mov     %ax,%ds ;       /* ... early for obsolete reasons */    \
-       mov     %ax,%es ;                                               \
-       mov     $KPSEL,%ax ;                                            \
-       mov     %ax,%fs ;                                               \
+       PUSH_FRAME ;                                                    \
+       FAKE_MCOUNT(13*4(%esp)) ;                                       \
        maybe_extra_ipending ;                                          \
-       movb    _imen + IRQ_BYTE(irq_num),%al ;                         \
-       orb     $IRQ_BIT(irq_num),%al ;                                 \
-       movb    %al,_imen + IRQ_BYTE(irq_num) ;                         \
-       outb    %al,$icu+ICU_IMR_OFFSET ;                               \
-       enable_icus ;                                                   \
-       movl    _curthread, %ebx ; /* are we in a critical section? */  \
+       MASK_IRQ(icu, irq_num) ;                                        \
+       enable_icus ;                                                   \
+       incl    _intr_nesting_level ;                                   \
+       movl    _curthread,%ebx ;                                       \
+       movl    TD_CPL(%ebx), %eax ;                                    \
+       pushl   %eax ;          /* push CPL for doreti */               \
        cmpl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
-       jge     2f ;                                                    \
-       movl    TD_MACH+MTD_CPL(%ebx),%eax ; /* is this interrupt masked by the cpl? */ \
-       testb   $IRQ_BIT(irq_num),%reg ;                                \
-       jne     2f ;                                                    \
-       incb    _intr_nesting_level ;                                   \
-__CONCAT(Xresume,irq_num): ;                                           \
-       FAKE_MCOUNT(13*4(%esp)) ;       /* XXX late to avoid double count */ \
-       incl    _cnt+V_INTR ;   /* tally interrupts */                  \
-       movl    _intr_countp + (irq_num) * 4,%eax ;                     \
-       incl    (%eax) ;                                                \
-       movl    TD_MACH+MTD_CPL(%ebx),%eax ;                            \
-       pushl   %eax ;                                                  \
-       pushl   _intr_unit + (irq_num) * 4 ;                            \
-       orl     _intr_mask + (irq_num) * 4,%eax ;                       \
-       movl    %eax,TD_MACH+MTD_CPL(%ebx) ;                            \
-       sti ;                                                           \
-       call    *_intr_handler + (irq_num) * 4 ;                        \
-       cli ;                   /* must unmask _imen and icu atomically */ \
-       movb    _imen + IRQ_BYTE(irq_num),%al ;                         \
-       andb    $~IRQ_BIT(irq_num),%al ;                                \
-       movb    %al,_imen + IRQ_BYTE(irq_num) ;                         \
-       outb    %al,$icu+ICU_IMR_OFFSET ;                               \
-       sti ;                   /* XXX _doreti repeats the cli/sti */   \
-       MEXITCOUNT ;                                                    \
-       /* We could usually avoid the following jmp by inlining some of */ \
-       /* _doreti, but it's probably better to use less cache. */      \
-       jmp     _doreti ;                                               \
-;                                                                      \
-       ALIGN_TEXT ;                                                    \
-2: ;                                                                   \
-       /* XXX skip mcounting here to avoid double count */             \
-       orb     $IRQ_BIT(irq_num),_ipending + IRQ_BYTE(irq_num) ;       \
-       movl    $TDPRI_CRIT,_reqpri ;                                   \
-       popl    %fs ;                                                   \
-       popl    %es ;                                                   \
-       popl    %ds ;                                                   \
-       popal ;                                                         \
-       addl    $4+4,%esp ;                                             \
-       iret
+       jge     1f ;                                                    \
+       testl   $IRQ_LBIT(irq_num),_irunning ;                          \
+       jnz     1f ;                                                    \
+       testl   $IRQ_LBIT(irq_num), %eax ;                              \
+       jz      2f ;                                                    \
+1: ;                                                                   \
+       /* set the pending bit and return, leave interrupt masked */    \
+       orl     $IRQ_LBIT(irq_num),_ipending ;                          \
+       movl    $TDPRI_CRIT,_reqpri ;                                   \
+       jmp     5f ;                                                    \
+2: ;                                                                   \
+       /* set running bit, clear pending bit, run handler */           \
+       orl     $IRQ_LBIT(irq_num),_irunning ;                          \
+       andl    $~IRQ_LBIT(irq_num),_ipending ;                         \
+       pushl   $irq_num ;                                              \
+       call    _sched_ithd ;                                           \
+       addl    $4,%esp ;                                               \
+       incl    _cnt+V_INTR ; /* book-keeping YYY make per-cpu */       \
+       movl    intr_countp + (irq_num) * 4,%eax ;                      \
+       incl    (%eax) ;                                                \
+5: ;                                                                   \
+       MEXITCOUNT ;                                                    \
+       jmp     doreti ;                                                \
+
+/*
+ * Unmask a slow interrupt.  This function is used by interrupt threads
+ * after they have descheduled themselves to reenable interrupts and
+ * possibly cause a reschedule to occur.  The interrupt's irunning bit
+ * is cleared prior to unmasking.
+ */
+
+#define INTR_UNMASK(irq_num, vec_name, icu)                            \
+       .text ;                                                         \
+       SUPERALIGN_TEXT ;                                               \
+IDTVEC(vec_name) ;                                                     \
+       pushl %ebp ;     /* frame for ddb backtrace */                  \
+       movl    %esp, %ebp ;                                            \
+       andl    $~IRQ_LBIT(irq_num),_irunning ;                         \
+       UNMASK_IRQ(icu, irq_num) ;                                      \
+       popl %ebp ;                                                     \
+       ret ;                                                           \
 
 MCOUNT_LABEL(bintr)
-       FAST_INTR(0,fastintr0, ENABLE_ICU1)
-       FAST_INTR(1,fastintr1, ENABLE_ICU1)
-       FAST_INTR(2,fastintr2, ENABLE_ICU1)
-       FAST_INTR(3,fastintr3, ENABLE_ICU1)
-       FAST_INTR(4,fastintr4, ENABLE_ICU1)
-       FAST_INTR(5,fastintr5, ENABLE_ICU1)
-       FAST_INTR(6,fastintr6, ENABLE_ICU1)
-       FAST_INTR(7,fastintr7, ENABLE_ICU1)
-       FAST_INTR(8,fastintr8, ENABLE_ICU1_AND_2)
-       FAST_INTR(9,fastintr9, ENABLE_ICU1_AND_2)
-       FAST_INTR(10,fastintr10, ENABLE_ICU1_AND_2)
-       FAST_INTR(11,fastintr11, ENABLE_ICU1_AND_2)
-       FAST_INTR(12,fastintr12, ENABLE_ICU1_AND_2)
-       FAST_INTR(13,fastintr13, ENABLE_ICU1_AND_2)
-       FAST_INTR(14,fastintr14, ENABLE_ICU1_AND_2)
-       FAST_INTR(15,fastintr15, ENABLE_ICU1_AND_2)
+       FAST_INTR(0,fastintr0, IO_ICU1, ENABLE_ICU1)
+       FAST_INTR(1,fastintr1, IO_ICU1, ENABLE_ICU1)
+       FAST_INTR(2,fastintr2, IO_ICU1, ENABLE_ICU1)
+       FAST_INTR(3,fastintr3, IO_ICU1, ENABLE_ICU1)
+       FAST_INTR(4,fastintr4, IO_ICU1, ENABLE_ICU1)
+       FAST_INTR(5,fastintr5, IO_ICU1, ENABLE_ICU1)
+       FAST_INTR(6,fastintr6, IO_ICU1, ENABLE_ICU1)
+       FAST_INTR(7,fastintr7, IO_ICU1, ENABLE_ICU1)
+       FAST_INTR(8,fastintr8, IO_ICU2, ENABLE_ICU1_AND_2)
+       FAST_INTR(9,fastintr9, IO_ICU2, ENABLE_ICU1_AND_2)
+       FAST_INTR(10,fastintr10, IO_ICU2, ENABLE_ICU1_AND_2)
+       FAST_INTR(11,fastintr11, IO_ICU2, ENABLE_ICU1_AND_2)
+       FAST_INTR(12,fastintr12, IO_ICU2, ENABLE_ICU1_AND_2)
+       FAST_INTR(13,fastintr13, IO_ICU2, ENABLE_ICU1_AND_2)
+       FAST_INTR(14,fastintr14, IO_ICU2, ENABLE_ICU1_AND_2)
+       FAST_INTR(15,fastintr15, IO_ICU2, ENABLE_ICU1_AND_2)
+
 #define        CLKINTR_PENDING movl $1,CNAME(clkintr_pending)
        INTR(0,intr0, IO_ICU1, ENABLE_ICU1, al, CLKINTR_PENDING)
        INTR(1,intr1, IO_ICU1, ENABLE_ICU1, al,)
@@ -208,23 +296,25 @@ MCOUNT_LABEL(bintr)
        INTR(13,intr13, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
        INTR(14,intr14, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
        INTR(15,intr15, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
+
+       FAST_UNPEND(0,fastunpend0, IO_ICU1)
+       FAST_UNPEND(1,fastunpend1, IO_ICU1)
+       FAST_UNPEND(2,fastunpend2, IO_ICU1)
+       FAST_UNPEND(3,fastunpend3, IO_ICU1)
+       FAST_UNPEND(4,fastunpend4, IO_ICU1)
+       FAST_UNPEND(5,fastunpend5, IO_ICU1)
+       FAST_UNPEND(6,fastunpend6, IO_ICU1)
+       FAST_UNPEND(7,fastunpend7, IO_ICU1)
+       FAST_UNPEND(8,fastunpend8, IO_ICU2)
+       FAST_UNPEND(9,fastunpend9, IO_ICU2)
+       FAST_UNPEND(10,fastunpend10, IO_ICU2)
+       FAST_UNPEND(11,fastunpend11, IO_ICU2)
+       FAST_UNPEND(12,fastunpend12, IO_ICU2)
+       FAST_UNPEND(13,fastunpend13, IO_ICU2)
+       FAST_UNPEND(14,fastunpend14, IO_ICU2)
+       FAST_UNPEND(15,fastunpend15, IO_ICU2)
 MCOUNT_LABEL(eintr)
 
        .data
-       .globl  _ihandlers
-_ihandlers:                    /* addresses of interrupt handlers */
-                               /* actually resumption addresses for HWI's */
-       .long   Xresume0, Xresume1, Xresume2, Xresume3 
-       .long   Xresume4, Xresume5, Xresume6, Xresume7
-       .long   Xresume8, Xresume9, Xresume10, Xresume11
-       .long   Xresume12, Xresume13, Xresume14, Xresume15 
-       .long   _swi_null, swi_net, _swi_null, _swi_null
-       .long   _swi_vm, _swi_null, _softclock
-
-imasks:                                /* masks for interrupt handlers */
-       .space  NHWI*4          /* padding; HWI masks are elsewhere */
-
-       .long   SWI_TTY_MASK, SWI_NET_MASK, SWI_CAMNET_MASK, SWI_CAMBIO_MASK
-       .long   SWI_VM_MASK, SWI_TQ_MASK, SWI_CLOCK_MASK
 
        .text
index eaf82b4..c972594 100644 (file)
@@ -35,7 +35,7 @@
  *
  *     from: @(#)isa.c 7.2 (Berkeley) 5/13/91
  * $FreeBSD: src/sys/i386/isa/intr_machdep.c,v 1.29.2.5 2001/10/14 06:54:27 luigi Exp $
- * $DragonFly: src/sys/i386/isa/Attic/intr_machdep.c,v 1.2 2003/06/17 04:28:37 dillon Exp $
+ * $DragonFly: src/sys/i386/isa/Attic/intr_machdep.c,v 1.3 2003/06/29 03:28:43 dillon Exp $
  */
 /*
  * This file contains an aggregated module marked:
@@ -61,6 +61,9 @@
 #include <machine/md_var.h>
 #include <machine/segments.h>
 #include <sys/bus.h> 
+#include <machine/globaldata.h>
+#include <sys/proc.h>
+#include <sys/thread2.h>
 
 #if defined(APIC_IO)
 #include <machine/smp.h>
 u_long *intr_countp[ICU_LEN];
 inthand2_t *intr_handler[ICU_LEN];
 u_int  intr_mask[ICU_LEN];
+int    intr_mihandler_installed[ICU_LEN];
 static u_int*  intr_mptr[ICU_LEN];
 void   *intr_unit[ICU_LEN];
 
@@ -131,6 +135,27 @@ static inthand_t *fastintr[ICU_LEN] = {
 #endif /* APIC_IO */
 };
 
+unpendhand_t *fastunpend[ICU_LEN] = {
+       IDTVEC(fastunpend0), IDTVEC(fastunpend1),
+       IDTVEC(fastunpend2), IDTVEC(fastunpend3),
+       IDTVEC(fastunpend4), IDTVEC(fastunpend5),
+       IDTVEC(fastunpend6), IDTVEC(fastunpend7),
+       IDTVEC(fastunpend8), IDTVEC(fastunpend9),
+       IDTVEC(fastunpend10), IDTVEC(fastunpend11),
+       IDTVEC(fastunpend12), IDTVEC(fastunpend13),
+       IDTVEC(fastunpend14), IDTVEC(fastunpend15),
+#if defined(APIC_IO)
+       IDTVEC(fastunpend16), IDTVEC(fastunpend17),
+       IDTVEC(fastunpend18), IDTVEC(fastunpend19),
+       IDTVEC(fastunpend20), IDTVEC(fastunpend21),
+       IDTVEC(fastunpend22), IDTVEC(fastunpend23),
+       IDTVEC(fastunpend24), IDTVEC(fastunpend25),
+       IDTVEC(fastunpend26), IDTVEC(fastunpend27),
+       IDTVEC(fastunpend28), IDTVEC(fastunpend29),
+       IDTVEC(fastunpend30), IDTVEC(fastunpend31),
+#endif
+};
+
 static inthand_t *slowintr[ICU_LEN] = {
        &IDTVEC(intr0), &IDTVEC(intr1), &IDTVEC(intr2), &IDTVEC(intr3),
        &IDTVEC(intr4), &IDTVEC(intr5), &IDTVEC(intr6), &IDTVEC(intr7),
@@ -418,6 +443,10 @@ found:
        intr_countp[intr] = &intrcnt[name_index];
 }
 
+/*
+ * NOTE!  intr_handler[] is only used for FAST interrupts, the *vector.s
+ * code ignores it for normal interrupts.
+ */
 int
 icu_setup(int intr, inthand2_t *handler, void *arg, u_int *maskptr, int flags)
 {
@@ -448,8 +477,7 @@ icu_setup(int intr, inthand2_t *handler, void *arg, u_int *maskptr, int flags)
                vector = TPR_FAST_INTS + intr;
                setidt(vector, fastintr[intr],
                       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
-       }
-       else {
+       } else {
                vector = TPR_SLOW_INTS + intr;
 #ifdef APIC_INTR_REORDER
 #ifdef APIC_INTR_HIGHPRI_CLOCK
@@ -520,6 +548,7 @@ icu_unset(intr, handler)
        return (0);
 }
 
+
 /* The following notice applies beyond this point in the file */
 
 /*
@@ -552,14 +581,14 @@ icu_unset(intr, handler)
  */
 
 typedef struct intrec {
-       intrmask_t      mask;
-       inthand2_t      *handler;
-       void            *argument;
-       struct intrec   *next;
-       char            *name;
-       int             intr;
-       intrmask_t      *maskptr;
-       int             flags;
+       intrmask_t      mask;
+       inthand2_t      *handler;
+       void            *argument;
+       struct intrec   *next;
+       char            *name;
+       int             intr;
+       intrmask_t      *maskptr;
+       int             flags;
 } intrec;
 
 static intrec *intreclist_head[ICU_LEN];
@@ -575,10 +604,11 @@ static intrec *intreclist_head[ICU_LEN];
 static void
 intr_mux(void *arg)
 {
+       intrec **pp;
        intrec *p;
        intrmask_t oldspl;
 
-       for (p = arg; p != NULL; p = p->next) {
+       for (pp = arg; (p = *pp) != NULL; pp = &p->next) {
                oldspl = splq(p->mask);
                p->handler(p->argument);
                splx(oldspl);
@@ -672,8 +702,24 @@ static int
 add_intrdesc(intrec *idesc)
 {
        int irq = idesc->intr;
+       intrec *head;
 
-       intrec *head = intreclist_head[irq];
+       /*
+        * YYY This is a hack.   The MI interrupt code in kern/kern_intr.c
+        * handles interrupt thread scheduling for NORMAL interrupts.  It 
+        * will never get called for fast interrupts.  On the otherhand,
+        * the handler this code installs in intr_handler[] for a NORMAL
+        * interrupt is not used by the *vector.s code, so we need this
+        * temporary hack to run normal interrupts as interrupt threads.
+        * YYY FIXME!
+        */
+       if (intr_mihandler_installed[irq] == 0) {
+               intr_mihandler_installed[irq] = 1;
+               register_int(irq, intr_mux, &intreclist_head[irq], idesc->name);
+               printf("installing MI handler for int %d\n", irq);
+       }
+
+       head = intreclist_head[irq];
 
        if (head == NULL) {
                /* first handler for this irq, just install it */
@@ -702,7 +748,7 @@ add_intrdesc(intrec *idesc)
                         * handler by shared interrupt multiplexer function
                         */
                        icu_unset(irq, head->handler);
-                       if (icu_setup(irq, intr_mux, head, 0, 0) != 0)
+                       if (icu_setup(irq, intr_mux, &intreclist_head[irq], 0, 0) != 0)
                                return (-1);
                        if (bootverbose)
                                printf("\tusing shared irq%d.\n", irq);
@@ -838,7 +884,7 @@ inthand_remove(intrec *idesc)
                        icu_unset(irq, intr_mux);
                        if (head->next != NULL) {
                                /* install the multiplex handler with new list head as argument */
-                               errcode = icu_setup(irq, intr_mux, head, 0, 0);
+                               errcode = icu_setup(irq, intr_mux, &intreclist_head[irq], 0, 0);
                                if (errcode == 0)
                                        update_intrname(irq, NULL);
                        } else {
@@ -859,3 +905,43 @@ inthand_remove(intrec *idesc)
        free(idesc, M_DEVBUF);
        return (0);
 }
+
+void
+call_fast_unpend(int irq)
+{
+       fastunpend[irq]();
+}
+
+/*
+ * ithread_done()
+ *
+ *     This function is called by an interrupt thread when it has completed
+ *     processing a loop.  We interlock with ipending and irunning.  If
+ *     a new interrupt is pending for the thread the function clears the
+ *     pending bit and returns.  If no new interrupt is pending we 
+ *     deschedule and sleep.
+ */
+void
+ithread_done(int irq)
+{
+    struct mdglobaldata *gd = mdcpu;
+    int mask = 1 << irq;
+
+    crit_enter();
+    INTREN(mask);
+    if (gd->gd_ipending & mask) {
+       atomic_clear_int(&gd->gd_ipending, mask);
+       lwkt_schedule_self();
+    } else {
+       lwkt_deschedule_self();
+       if (gd->gd_ipending & mask) {   /* race */
+           atomic_clear_int(&gd->gd_ipending, mask);
+           lwkt_schedule_self();
+       } else {
+           atomic_clear_int(&gd->gd_irunning, mask);
+           lwkt_switch();
+       }
+    }
+    crit_exit();
+}
+
index 6d06e7e..e7d9720 100644 (file)
  * SUCH DAMAGE.
  *
  * $FreeBSD: src/sys/i386/isa/intr_machdep.h,v 1.19.2.2 2001/10/14 20:05:50 luigi Exp $
- * $DragonFly: src/sys/i386/isa/Attic/intr_machdep.h,v 1.2 2003/06/17 04:28:37 dillon Exp $
+ * $DragonFly: src/sys/i386/isa/Attic/intr_machdep.h,v 1.3 2003/06/29 03:28:43 dillon Exp $
  */
 
 #ifndef _I386_ISA_INTR_MACHDEP_H_
 #define        _I386_ISA_INTR_MACHDEP_H_
 
+#ifndef _SYS_INTERRUPT_H_
+#include <sys/interrupt.h>
+#endif
+
 /*
  * Low level interrupt code.
  */ 
 /*
  * Type of the first (asm) part of an interrupt handler.
  */
-typedef void inthand_t __P((u_int cs, u_int ef, u_int esp, u_int ss));
+typedef void inthand_t(u_int cs, u_int ef, u_int esp, u_int ss);
+typedef void unpendhand_t(void);
 
 #define        IDTVEC(name)    __CONCAT(X,name)
 
 extern u_long *intr_countp[];  /* pointers into intrcnt[] */
-extern inthand2_t *intr_handler[];     /* C entry points of intr handlers */
+extern inthand2_t *intr_handler[];     /* C entry points for FAST ints */
 extern u_int intr_mask[];      /* sets of intrs masked during handling of 1 */
 extern void *intr_unit[];      /* cookies to pass to intr handlers */
 
@@ -161,7 +166,17 @@ inthand_t
        IDTVEC(intr8), IDTVEC(intr9), IDTVEC(intr10), IDTVEC(intr11),
        IDTVEC(intr12), IDTVEC(intr13), IDTVEC(intr14), IDTVEC(intr15);
 
-#if defined(SMP) || defined(APIC_IO)
+unpendhand_t
+       IDTVEC(fastunpend0), IDTVEC(fastunpend1),
+       IDTVEC(fastunpend2), IDTVEC(fastunpend3),
+       IDTVEC(fastunpend4), IDTVEC(fastunpend5),
+       IDTVEC(fastunpend6), IDTVEC(fastunpend7),
+       IDTVEC(fastunpend8), IDTVEC(fastunpend9),
+       IDTVEC(fastunpend10), IDTVEC(fastunpend11),
+       IDTVEC(fastunpend12), IDTVEC(fastunpend13),
+       IDTVEC(fastunpend14), IDTVEC(fastunpend15);
+
+#if defined(APIC_IO)
 inthand_t
        IDTVEC(fastintr16), IDTVEC(fastintr17),
        IDTVEC(fastintr18), IDTVEC(fastintr19),
@@ -170,7 +185,14 @@ inthand_t
 inthand_t
        IDTVEC(intr16), IDTVEC(intr17), IDTVEC(intr18), IDTVEC(intr19),
        IDTVEC(intr20), IDTVEC(intr21), IDTVEC(intr22), IDTVEC(intr23);
+unpendhand_t
+       IDTVEC(fastunpend16), IDTVEC(fastunpend17),
+       IDTVEC(fastunpend18), IDTVEC(fastunpend19),
+       IDTVEC(fastunpend20), IDTVEC(fastunpend21),
+       IDTVEC(fastunpend22), IDTVEC(fastunpend23);
+#endif
 
+#if defined(SMP)
 inthand_t
        Xinvltlb,       /* TLB shootdowns */
 #ifdef BETTER_CLOCK
@@ -186,8 +208,9 @@ inthand_t
 inthand_t
        Xtest1;         /* 'fake' HWI at top of APIC prio 0x3x, 32+31 = 0x3f */
 #endif /** TEST_TEST1 */
-#endif /* SMP || APIC_IO */
+#endif /* SMP */
 
+void   call_fast_unpend(int irq);
 void   isa_defaultirq __P((void));
 int    isa_nmi __P((int cd));
 int    icu_setup __P((int intr, inthand2_t *func, void *arg, 
index 44b5c53..c79dbdf 100644 (file)
@@ -37,7 +37,7 @@
  *     @(#)ipl.s
  *
  * $FreeBSD: src/sys/i386/isa/ipl.s,v 1.32.2.3 2002/05/16 16:03:56 bde Exp $
- * $DragonFly: src/sys/i386/isa/Attic/ipl.s,v 1.3 2003/06/22 08:54:22 dillon Exp $
+ * $DragonFly: src/sys/i386/isa/Attic/ipl.s,v 1.4 2003/06/29 03:28:43 dillon Exp $
  */
 
 
@@ -69,106 +69,71 @@ _softnet_imask:    .long   SWI_NET_MASK
        .globl  _softtty_imask
 _softtty_imask:        .long   SWI_TTY_MASK
 
-/* pending interrupts blocked by splxxx() */
-       .globl  _ipending
-_ipending:     .long   0
-
-/* set with bits for which queue to service */
-       .globl  _netisr
-_netisr:       .long   0
-
-       .globl _netisrs
-_netisrs:
-       .long   dummynetisr, dummynetisr, dummynetisr, dummynetisr
-       .long   dummynetisr, dummynetisr, dummynetisr, dummynetisr
-       .long   dummynetisr, dummynetisr, dummynetisr, dummynetisr
-       .long   dummynetisr, dummynetisr, dummynetisr, dummynetisr
-       .long   dummynetisr, dummynetisr, dummynetisr, dummynetisr
-       .long   dummynetisr, dummynetisr, dummynetisr, dummynetisr
-       .long   dummynetisr, dummynetisr, dummynetisr, dummynetisr
-       .long   dummynetisr, dummynetisr, dummynetisr, dummynetisr
-
        .text
 
-/*
- * Handle return from interrupts, traps and syscalls.
- */
+       /*
+        * DORETI
+        *
+        * Handle return from interrupts, traps and syscalls.  This function
+        * checks the cpl for unmasked pending interrupts (fast, normal, or
+        * soft) and schedules them if appropriate, then irets.
+        */
        SUPERALIGN_TEXT
        .type   _doreti,@function
 _doreti:
        FAKE_MCOUNT(_bintr)             /* init "from" _bintr -> _doreti */
-       addl    $4,%esp                 /* discard unit number */
-       popl    %eax                    /* cpl or cml to restore */
-       movl    _curthread,%ebx 
+       popl    %eax                    /* cpl to restore */
+       movl    _curthread,%ebx
+       cli                             /* interlock with TDPRI_CRIT */
+       movl    %eax,TD_CPL(%ebx)       /* save cpl being restored */
+       cmpl    $TDPRI_CRIT,TD_PRI(%ebx) /* can't unpend if in critical sec */
+       jge     5f
+       addl    $TDPRI_CRIT,TD_PRI(%ebx) /* force all ints to pending */
 doreti_next:
-       /*
-        * Check for pending HWIs and SWIs atomically with restoring cpl
-        * and exiting.  The check has to be atomic with exiting to stop
-        * (ipending & ~cpl) changing from zero to nonzero while we're
-        * looking at it (this wouldn't be fatal but it would increase
-        * interrupt latency).  Restoring cpl has to be atomic with exiting
-        * so that the stack cannot pile up (the nesting level of interrupt
-        * handlers is limited by the number of bits in cpl).
-        */
-#ifdef SMP
-       cli                             /* early to prevent INT deadlock */
-doreti_next2:
-#endif
-       movl    %eax,%ecx
-       notl    %ecx                    /* set bit = unmasked level */
-#ifndef SMP
-       cli
-#endif
-       andl    _ipending,%ecx          /* set bit = unmasked pending INT */
-       jne     doreti_unpend
-       movl    %eax,TD_MACH+MTD_CPL(%ebx)
-       decb    _intr_nesting_level
+       sti                             /* allow new interrupts */
+       movl    %eax,%ecx               /* cpl being restored */
+       notl    %ecx
+       cli                             /* disallow YYY remove */
+       testl   _fpending,%ecx          /* check for an unmasked fast int */
+       jne     doreti_fast
 
-       /* Check for ASTs that can be handled now. */
-       testl   $AST_PENDING,_astpending
-       je      doreti_exit
+       movl    _irunning,%edx          /* check for an unmasked normal int */
+       notl    %edx                    /* that isn't already running */
+       andl    %edx, %ecx
+       testl   _ipending,%ecx
+       jne     doreti_intr
+       testl   $AST_PENDING,_astpending /* any pending ASTs? */
+       je      2f
        testl   $PSL_VM,TF_EFLAGS(%esp)
-       jz      doreti_notvm86
+       jz      1f
        cmpl    $1,_in_vm86call
-       jne     doreti_ast
-       jmp     doreti_exit     
-
-doreti_notvm86:
+       jnz     doreti_ast
+1:
        testb   $SEL_RPL_MASK,TF_CS(%esp)
        jnz     doreti_ast
 
        /*
-        * doreti_exit -        release MP lock, pop registers, iret.
-        *
-        *      Note that the syscall trap shotcuts to doreti_syscall_ret.
-        *      The segment register pop is a special case, since it may
-        *      fault if (for example) a sigreturn specifies bad segment
-        *      registers.  The fault is handled in trap.c
+        * Nothing left to do, finish up.  Interrupts are still disabled.
         */
-
-doreti_exit:
+2:
+       subl    $TDPRI_CRIT,TD_PRI(%ebx)        /* interlocked with cli */
+5:
+       decl    _intr_nesting_level
        MEXITCOUNT
-
-#ifdef SMP
-       /* release the kernel lock */
-       movl    $_mp_lock, %edx         /* GIANT_LOCK */
-       call    _MPrellock_edx
-#endif /* SMP */
-
        .globl  doreti_popl_fs
+       .globl  doreti_popl_es
+       .globl  doreti_popl_ds
+       .globl  doreti_iret
        .globl  doreti_syscall_ret
 doreti_syscall_ret:
 doreti_popl_fs:
        popl    %fs
-       .globl  doreti_popl_es
 doreti_popl_es:
        popl    %es
-       .globl  doreti_popl_ds
 doreti_popl_ds:
        popl    %ds
        popal
        addl    $8,%esp
-       .globl  doreti_iret
 doreti_iret:
        iret
 
@@ -190,125 +155,111 @@ doreti_popl_fs_fault:
        movl    $T_PROTFLT,TF_TRAPNO(%esp)
        jmp     alltraps_with_regs_pushed
 
-       ALIGN_TEXT
-doreti_unpend:
        /*
-        * Enabling interrupts is safe because we haven't restored cpl yet.
-        * %ecx contains the next probable ready interrupt (~cpl & ipending)
+        * FAST interrupt pending
         */
-#ifdef SMP
+       ALIGN_TEXT
+doreti_fast:
+       andl    _fpending,%ecx          /* only check fast ints */
        bsfl    %ecx, %ecx              /* locate the next dispatchable int */
-       lock
-       btrl    %ecx, _ipending         /* is it really still pending? */
-       jnc     doreti_next2            /* some intr cleared memory copy */
-       sti                             /* late to prevent INT deadlock */
-#else
-       sti
-       bsfl    %ecx,%ecx               /* slow, but not worth optimizing */
-       btrl    %ecx,_ipending
-       jnc     doreti_next             /* some intr cleared memory copy */
-#endif /* SMP */
+       btrl    %ecx, _fpending         /* is it really still pending? */
+       jnc     doreti_next
+       pushl   %eax                    /* YYY cpl */
+       call    *_fastunpend(,%ecx,4)
+       popl    %eax
+       jmp     doreti_next
+
        /*
-        * Execute handleable interrupt
-        *
-        * Set up JUMP to _ihandlers[%ecx] for HWIs.
-        * Set up CALL of _ihandlers[%ecx] for SWIs.
-        * This is a bit early for the SMP case - we have to push %ecx and
-        * %edx, but could push only %ecx and load %edx later.
+        *  INTR interrupt pending
         */
-       movl    _ihandlers(,%ecx,4),%edx
-       cmpl    $NHWI,%ecx
-       jae     doreti_swi              /* software interrupt handling */
-       cli                             /* else hardware int handling */
-       movl    %eax,TD_MACH+MTD_CPL(%ebx) /* same as non-smp case right now */
-       MEXITCOUNT
-#ifdef APIC_INTR_DIAGNOSTIC
-       lock
-       incl    CNAME(apic_itrace_doreti)(,%ecx,4)
-#ifdef APIC_INTR_DIAGNOSTIC_IRQ        
-       cmpl    $APIC_INTR_DIAGNOSTIC_IRQ,%ecx
-       jne     9f
+       ALIGN_TEXT
+doreti_intr:
+       andl    _ipending,%ecx          /* only check normal ints */
+       bsfl    %ecx, %ecx              /* locate the next dispatchable int */
+       btrl    %ecx, _ipending         /* is it really still pending? */
+       jnc     doreti_next
        pushl   %eax
        pushl   %ecx
-       pushl   %edx
-       pushl   $APIC_ITRACE_DORETI
-       call    log_intr_event
+       call    _sched_ithd             /* YYY must pull in imasks */
        addl    $4,%esp
-       popl    %edx
-       popl    %ecx
        popl    %eax
-9:     
-#endif
-#endif
-       jmp     *%edx
+       jmp     doreti_next
 
-       ALIGN_TEXT
-doreti_swi:
-       pushl   %eax
        /*
-        * At least the SWI_CLOCK handler has to run at a possibly strictly
-        * lower cpl, so we have to restore
-        * all the h/w bits in cpl now and have to worry about stack growth.
-        * The worst case is currently (30 Jan 1994) 2 SWI handlers nested
-        * in dying interrupt frames and about 12 HWIs nested in active
-        * interrupt frames.  There are only 4 different SWIs and the HWI
-        * and SWI masks limit the nesting further.
-        *
-        * The SMP case is currently the same as the non-SMP case.
+        * AST pending
         */
-       orl     imasks(,%ecx,4), %eax   /* or in imasks */
-       movl    %eax,TD_MACH+MTD_CPL(%ebx)      /* set cpl for call */
-
-       call    *%edx
-       popl    %eax                    /* cpl to restore */
-       jmp     doreti_next
-
-       ALIGN_TEXT
 doreti_ast:
        andl    $~AST_PENDING,_astpending
        sti
        movl    $T_ASTFLT,TF_TRAPNO(%esp)
+       decl    _intr_nesting_level
        call    _trap
-       subl    %eax,%eax               /* recover cpl|cml */
-       movb    $1,_intr_nesting_level  /* for doreti_next to decrement */
+       incl    _intr_nesting_level
+       movl    TD_CPL(%ebx),%eax       /* retrieve cpl again for loop */
        jmp     doreti_next
 
-       ALIGN_TEXT
-swi_net:
-       MCOUNT
-       bsfl    _netisr,%eax
-       je      swi_net_done
-swi_net_more:
-       btrl    %eax,_netisr
-       jnc     swi_net_next
-       call    *_netisrs(,%eax,4)
-swi_net_next:
-       bsfl    _netisr,%eax
-       jne     swi_net_more
-swi_net_done:
+
+       /*
+        * SPLZ() a C callable procedure to dispatch any unmasked pending
+        *        interrupts regardless of critical section nesting.  ASTs
+        *        are not dispatched.
+        */
+       SUPERALIGN_TEXT
+
+ENTRY(splz)
+       pushl   %ebx
+       movl    _curthread,%ebx
+       movl    TD_CPL(%ebx),%eax
+
+splz_next:
+       movl    %eax,%ecx               /* ecx = ~CPL */
+       notl    %ecx
+       testl   _fpending,%ecx          /* check for an unmasked fast int */
+       jne     splz_fast
+
+       movl    _irunning,%edx          /* check for an unmasked normal int */
+       notl    %edx                    /* that isn't already running */
+       andl    %edx, %ecx
+       testl   _ipending,%ecx
+       jne     splz_intr
+
+       popl    %ebx
        ret
 
+       /*
+        * FAST interrupt pending
+        */
        ALIGN_TEXT
-dummynetisr:
-       MCOUNT
-       ret     
+splz_fast:
+       andl    _fpending,%ecx          /* only check fast ints */
+       bsfl    %ecx, %ecx              /* locate the next dispatchable int */
+       btrl    %ecx, _fpending         /* is it really still pending? */
+       jnc     splz_next
+       pushl   %eax
+       call    *_fastunpend(,%ecx,4)
+       popl    %eax
+       jmp     splz_next
 
-/*
- * The arg is in a nonstandard place, so swi_dispatcher() can't be called
- * directly and swi_generic() can't use ENTRY() or MCOUNT.
- */
+       /*
+        *  INTR interrupt pending
+        */
        ALIGN_TEXT
-       .globl  _swi_generic
-       .type   _swi_generic,@function
-_swi_generic:
+splz_intr:
+       andl    _ipending,%ecx          /* only check normal ints */
+       bsfl    %ecx, %ecx              /* locate the next dispatchable int */
+       btrl    %ecx, _ipending         /* is it really still pending? */
+       jnc     splz_next
+       pushl   %eax
        pushl   %ecx
-       FAKE_MCOUNT(4(%esp))
-       call    _swi_dispatcher
-       popl    %ecx
-       ret
+       call    _sched_ithd             /* YYY must pull in imasks */
+       addl    $4,%esp
+       popl    %eax
+       jmp     splz_next
 
-ENTRY(swi_null)
-       ret
+       /*
+        * APIC/ICU specific ipl functions provide masking and unmasking
+        * calls for userland.
+        */
 
 #ifdef APIC_IO
 #include "i386/isa/apic_ipl.s"
index dedb7a3..3c7268d 100644 (file)
@@ -24,7 +24,7 @@
  * SUCH DAMAGE.
  *
  * $FreeBSD: src/sys/i386/isa/ipl_funcs.c,v 1.32.2.5 2002/12/17 18:04:02 sam Exp $
- * $DragonFly: src/sys/i386/isa/Attic/ipl_funcs.c,v 1.5 2003/06/28 04:16:04 dillon Exp $
+ * $DragonFly: src/sys/i386/isa/Attic/ipl_funcs.c,v 1.6 2003/06/29 03:28:43 dillon Exp $
  */
 
 #include <sys/param.h>
 void name(void)                                        \
 {                                              \
        atomic_set_int(var, bits);              \
-       mycpu->gd_reqpri = TDPRI_CRIT;          \
+       mdcpu->mi.gd_reqpri = TDPRI_CRIT;       \
 }
 
-DO_SETBITS(setdelayed,   &ipending, loadandclear(&idelayed))
+DO_SETBITS(setdelayed,   &mdcpu->gd_ipending, loadandclear(&idelayed))
 
-DO_SETBITS(setsoftcamnet,&ipending, SWI_CAMNET_PENDING)
-DO_SETBITS(setsoftcambio,&ipending, SWI_CAMBIO_PENDING)
-DO_SETBITS(setsoftclock, &ipending, SWI_CLOCK_PENDING)
-DO_SETBITS(setsoftnet,   &ipending, SWI_NET_PENDING)
-DO_SETBITS(setsofttty,   &ipending, SWI_TTY_PENDING)
-DO_SETBITS(setsoftvm,   &ipending, SWI_VM_PENDING)
-DO_SETBITS(setsofttq,   &ipending, SWI_TQ_PENDING)
-DO_SETBITS(setsoftcrypto,&ipending, SWI_CRYPTO_PENDING)
+DO_SETBITS(setsoftcamnet,&mdcpu->gd_ipending, SWI_CAMNET_PENDING)
+DO_SETBITS(setsoftcambio,&mdcpu->gd_ipending, SWI_CAMBIO_PENDING)
+DO_SETBITS(setsoftclock, &mdcpu->gd_ipending, SWI_CLOCK_PENDING)
+DO_SETBITS(setsoftnet,   &mdcpu->gd_ipending, SWI_NET_PENDING)
+DO_SETBITS(setsofttty,   &mdcpu->gd_ipending, SWI_TTY_PENDING)
+DO_SETBITS(setsoftvm,   &mdcpu->gd_ipending, SWI_VM_PENDING)
+DO_SETBITS(setsofttq,   &mdcpu->gd_ipending, SWI_TQ_PENDING)
+DO_SETBITS(setsoftcrypto,&mdcpu->gd_ipending, SWI_CRYPTO_PENDING)
 
 DO_SETBITS(schedsoftcamnet, &idelayed, SWI_CAMNET_PENDING)
 DO_SETBITS(schedsoftcambio, &idelayed, SWI_CAMBIO_PENDING)
@@ -68,11 +68,12 @@ DO_SETBITS(schedsoftnet, &idelayed, SWI_NET_PENDING)
 DO_SETBITS(schedsofttty, &idelayed, SWI_TTY_PENDING)
 DO_SETBITS(schedsoftvm,        &idelayed, SWI_VM_PENDING)
 DO_SETBITS(schedsofttq,        &idelayed, SWI_TQ_PENDING)
+/* YYY schedsoft what? */
 
 unsigned
 softclockpending(void)
 {
-       return (ipending & SWI_CLOCK_PENDING);
+       return ((mdcpu->gd_ipending | mdcpu->gd_fpending) & SWI_CLOCK_PENDING);
 }
 
 /*
@@ -132,8 +133,8 @@ NAME##assert(const char *msg)                               \
  *  The SPL routines mess around with the 'cpl' global, which masks 
  *  interrupts.  Interrupts are not *actually* masked.  What happens is 
  *  that if an interrupt masked by the cpl occurs, the appropriate bit
- *  in 'ipending' is set and the interrupt is defered.  When we clear
- *  bits in the cpl we must check to see if any ipending interrupts have
+ *  in '*pending' is set and the interrupt is defered.  When we clear
+ *  bits in the cpl we must check to see if any *pending interrupts have
  *  been unmasked and issue the synchronously, which is what the splz()
  *  call does.
  *
@@ -152,113 +153,53 @@ NAME##assert(const char *msg)                            \
  *  NOT need to use locked instructions to modify it.
  */
 
-#ifndef SMP
-
 #define        GENSPL(NAME, OP, MODIFIER, PC)          \
 GENSPLASSERT(NAME, MODIFIER)                   \
 unsigned NAME(void)                            \
 {                                              \
        unsigned x;                             \
+       struct thread *td = curthread;          \
                                                \
-       x = curthread->td_cpl;                  \
-       curthread->td_cpl OP MODIFIER;          \
+       x = td->td_cpl;                         \
+       td->td_cpl OP MODIFIER;                 \
        return (x);                             \
 }
 
 void
 spl0(void)
 {
-       curthread->td_cpl = 0;
-       if (ipending && curthread->td_pri < TDPRI_CRIT)
-               splz();
-}
-
-void
-splx(unsigned ipl)
-{
-       curthread->td_cpl = ipl;
-       if ((ipending & ~ipl) && curthread->td_pri < TDPRI_CRIT)
-               splz();
-}
-
-intrmask_t
-splq(intrmask_t mask)
-{ 
-       intrmask_t tmp = curthread->td_cpl;
-       curthread->td_cpl |= mask;
-       return (tmp);
-}       
-
-#else /* !SMP */
-
-#include <machine/smp.h>
-#include <machine/smptests.h>
-
-/*
- *     SMP CASE
- *
- *     Mostly the same as the non-SMP case now, but it didn't used to be
- *     this clean.
- */
-
-#define        GENSPL(NAME, OP, MODIFIER, PC)          \
-GENSPLASSERT(NAME, MODIFIER)                   \
-unsigned NAME(void)                            \
-{                                              \
-       unsigned x;                             \
-                                               \
-       x = curthread->td_cpl;                  \
-       curthread->td_cpl OP MODIFIER;          \
-                                               \
-       return (x);                             \
-}
+       struct mdglobaldata *gd = mdcpu;
+       struct thread *td = gd->mi.gd_curthread;
 
-/*
- * spl0() -    unmask all interrupts
- *
- *     The MP lock must be held on entry
- *     This routine may only be called from mainline code.
- */
-void
-spl0(void)
-{
-       KASSERT(inside_intr == 0, ("spl0: called from interrupt"));
-       curthread->td_cpl = 0;
-       if (ipending && curthread->td_pri < TDPRI_CRIT)
+       td->td_cpl = 0;
+       if ((gd->gd_ipending || gd->gd_fpending) && td->td_pri < TDPRI_CRIT)
                splz();
 }
 
-/*
- * splx() -    restore previous interrupt mask
- *
- *     The MP lock must be held on entry
- */
-
 void
 splx(unsigned ipl)
 {
-       curthread->td_cpl = ipl;
-       if (inside_intr == 0 && (ipending & ~curthread->td_cpl) != 0 &&
-           curthread->td_pri < TDPRI_CRIT) {
+       struct mdglobaldata *gd = mdcpu;
+       struct thread *td = gd->mi.gd_curthread;
+
+       td->td_cpl = ipl;
+       if (((gd->gd_ipending | gd->gd_fpending) & ~ipl) &&
+           td->td_pri < TDPRI_CRIT) {
                splz();
        }
 }
 
-
-/*
- * splq() -    blocks specified interrupts
- *
- *     The MP lock must be held on entry
- */
 intrmask_t
 splq(intrmask_t mask)
-{
-       intrmask_t tmp = curthread->td_cpl;
-       curthread->td_cpl |= mask;
-       return (tmp);
-}
+{ 
+       struct mdglobaldata *gd = mdcpu;
+       struct thread *td = gd->mi.gd_curthread;
+       intrmask_t tmp;
 
-#endif /* !SMP */
+       tmp = td->td_cpl;
+       td->td_cpl |= mask;
+       return (tmp);
+}       
 
 /* Finally, generate the actual spl*() functions */
 
index 24a5970..7b02637 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *     from: vector.s, 386BSD 0.1 unknown origin
  * $FreeBSD: src/sys/i386/isa/vector.s,v 1.32 1999/08/28 00:45:04 peter Exp $
- * $DragonFly: src/sys/i386/isa/Attic/vector.s,v 1.2 2003/06/17 04:28:37 dillon Exp $
+ * $DragonFly: src/sys/i386/isa/Attic/vector.s,v 1.3 2003/06/29 03:28:43 dillon Exp $
  */
 
 /*
 #include <pc98/pc98/pc98.h>
 #else
 #include <i386/isa/isa.h>
-#endif
-
-#ifdef FAST_INTR_HANDLER_USES_ES
-#define        ACTUALLY_PUSHED         1
-#define        MAYBE_MOVW_AX_ES        movl    %ax,%es
-#define        MAYBE_POPL_ES           popl    %es
-#define        MAYBE_PUSHL_ES          pushl   %es
-#else
-/*
- * We can usually skip loading %es for fastintr handlers.  %es should
- * only be used for string instructions, and fastintr handlers shouldn't
- * do anything slow enough to justify using a string instruction.
- */
-#define        ACTUALLY_PUSHED         0
-#define        MAYBE_MOVW_AX_ES
-#define        MAYBE_POPL_ES
-#define        MAYBE_PUSHL_ES
 #endif
 
        .data
        ALIGN_DATA
 
-       .globl  _intr_nesting_level
-_intr_nesting_level:
-       .byte   0
-       .space  3
-
 /*
  * Interrupt counters and names for export to vmstat(8) and friends.
  *
index f9f399c..3db8f47 100644 (file)
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
- * All rights reserved.
+ * Copyright (c) 2003, Matthew Dillon <dillon@backplane.com> All rights reserved.
+ * Copyright (c) 1997, Stefan Esser <se@freebsd.org> All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  * $FreeBSD: src/sys/kern/kern_intr.c,v 1.24.2.1 2001/10/14 20:05:50 luigi Exp $
- * $DragonFly: src/sys/kern/kern_intr.c,v 1.2 2003/06/17 04:28:41 dillon Exp $
+ * $DragonFly: src/sys/kern/kern_intr.c,v 1.3 2003/06/29 03:28:44 dillon Exp $
  *
  */
 
-
 #include <sys/param.h>
 #include <sys/systm.h>
 #include <sys/malloc.h>
 #include <sys/kernel.h>
 #include <sys/sysctl.h>
+#include <sys/thread.h>
+#include <sys/proc.h>
+#include <sys/thread2.h>
 
 #include <machine/ipl.h>
 
 #include <sys/interrupt.h>
 
-struct swilist {
-       swihand_t       *sl_handler;
-       struct swilist  *sl_next;
-};
+typedef struct intrec {
+    struct intrec *next;
+    inthand2_t *handler;
+    void       *argument;
+    const char *name;
+    int                intr;
+} intrec_t;
+
+static intrec_t        *intlists[NHWI+NSWI];
+static thread_t ithreads[NHWI+NSWI];
+static struct thread ithread_ary[NHWI+NSWI];
 
-static struct swilist swilists[NSWI];
+static void ithread_handler(void *arg);
 
 void
-register_swi(intr, handler)
-       int intr;
-       swihand_t *handler;
+register_swi(int intr, inthand2_t *handler, void *arg, const char *name)
 {
-       struct swilist *slp, *slq;
-       int s;
-
-       if (intr < NHWI || intr >= NHWI + NSWI)
-               panic("register_swi: bad intr %d", intr);
-       if (handler == swi_generic || handler == swi_null)
-               panic("register_swi: bad handler %p", (void *)handler);
-       slp = &swilists[intr - NHWI];
-       s = splhigh();
-       if (ihandlers[intr] == swi_null)
-               ihandlers[intr] = handler;
-       else {
-               if (slp->sl_next == NULL) {
-                       slp->sl_handler = ihandlers[intr];
-                       ihandlers[intr] = swi_generic;
-               }
-               slq = malloc(sizeof(*slq), M_DEVBUF, M_NOWAIT);
-               if (slq == NULL)
-                       panic("register_swi: malloc failed");
-               slq->sl_handler = handler;
-               slq->sl_next = NULL;
-               while (slp->sl_next != NULL)
-                       slp = slp->sl_next;
-               slp->sl_next = slq;
-       }
-       splx(s);
+    if (intr < NHWI || intr >= NHWI + NSWI)
+       panic("register_swi: bad intr %d", intr);
+    register_int(intr, handler, arg, name);
 }
 
 void
-swi_dispatcher(intr)
-       int intr;
+register_int(int intr, inthand2_t *handler, void *arg, const char *name)
 {
-       struct swilist *slp;
+    intrec_t **list;
+    intrec_t *rec;
+    thread_t td;
+
+    if (intr < 0 || intr > NHWI + NSWI)
+       panic("register_int: bad intr %d", intr);
+
+    rec = malloc(sizeof(intrec_t), M_DEVBUF, M_NOWAIT);
+    if (rec == NULL)
+       panic("register_swi: malloc failed");
+    rec->handler = handler;
+    rec->argument = arg;
+    rec->name = name;
+    rec->intr = intr;
+    rec->next = NULL;
+
+    list = &intlists[intr];
+
+    /*
+     * Create an interrupt thread if necessary, leave it in an unscheduled
+     * state.
+     */
+    if ((td = ithreads[intr]) == NULL) {
+       lwkt_create((void *)ithread_handler, (void *)intr, &ithreads[intr],
+           &ithread_ary[intr], TDF_STOPREQ, "ithread %d", intr);
+       td = ithreads[intr];
+    }
+
+    /*
+     * Add the record to the interrupt list
+     */
+    crit_enter();      /* token */
+    while (*list != NULL)
+       list = &(*list)->next;
+    *list = rec;
+    crit_exit();
+}
 
-       slp = &swilists[intr - NHWI];
-       do {
-               (*slp->sl_handler)();
-               slp = slp->sl_next;
-       } while (slp != NULL);
+void
+unregister_swi(int intr, inthand2_t *handler)
+{
+    if (intr < NHWI || intr >= NHWI + NSWI)
+       panic("register_swi: bad intr %d", intr);
+    unregister_int(intr, handler);
 }
 
 void
-unregister_swi(intr, handler)
-       int intr;
-       swihand_t *handler;
+unregister_int(int intr, inthand2_t handler)
 {
-       struct swilist *slfoundpred, *slp, *slq;
-       int s;
-
-       if (intr < NHWI || intr >= NHWI + NSWI)
-               panic("unregister_swi: bad intr %d", intr);
-       if (handler == swi_generic || handler == swi_null)
-               panic("unregister_swi: bad handler %p", (void *)handler);
-       slp = &swilists[intr - NHWI];
-       s = splhigh();
-       if (ihandlers[intr] == handler)
-               ihandlers[intr] = swi_null;
-       else if (slp->sl_next != NULL) {
-               slfoundpred = NULL;
-               for (slq = slp->sl_next; slq != NULL;
-                   slp = slq, slq = slp->sl_next)
-                       if (slq->sl_handler == handler)
-                               slfoundpred = slp;
-               slp = &swilists[intr - NHWI];
-               if (slfoundpred != NULL) {
-                       slq = slfoundpred->sl_next;
-                       slfoundpred->sl_next = slq->sl_next;
-                       free(slq, M_DEVBUF);
-               } else if (slp->sl_handler == handler) {
-                       slq = slp->sl_next;
-                       slp->sl_next = slq->sl_next;
-                       slp->sl_handler = slq->sl_handler;
-                       free(slq, M_DEVBUF);
-               }
-               if (slp->sl_next == NULL)
-                       ihandlers[intr] = slp->sl_handler;
+    intrec_t **list;
+    intrec_t *rec;
+
+    if (intr < 0 || intr > NHWI + NSWI)
+       panic("register_int: bad intr %d", intr);
+    list = &intlists[intr];
+    crit_enter();
+    while ((rec = *list) != NULL) {
+       if (rec->handler == (void *)handler) {
+           *list = rec->next;
+           break;
        }
-       splx(s);
+       list = &rec->next;
+    }
+    crit_exit();
+    if (rec != NULL) {
+       free(rec, M_DEVBUF);
+    } else {
+       printf("warning: unregister_int: int %d handler %p not found\n",
+           intr, handler);
+    }
 }
 
+/*
+ * Dispatch an interrupt.
+ */
+void
+sched_ithd(int intr)
+{
+    thread_t td;
+
+    if ((td = ithreads[intr]) != NULL) {
+       if (intlists[intr] == NULL)
+           printf("sched_ithd: stray interrupt %d\n", intr);
+       else
+           lwkt_schedule(td);
+    } else {
+       printf("sched_ithd: stray interrupt %d\n", intr);
+    }
+}
+
+static void
+ithread_handler(void *arg)
+{
+    int intr = (int)arg;
+    intrec_t **list = &intlists[intr];
+    intrec_t *rec;
+    intrec_t *nrec;
+
+    crit_enter();      /* replaces SPLs */
+    for (;;) {
+       for (rec = *list; rec; rec = nrec) {
+           nrec = rec->next;
+           rec->handler(rec->argument);
+       }
+       ithread_done(intr);
+       KKASSERT(curthread->td_pri == TDPRI_CRIT);
+    }
+    crit_exit();       /* not reached */
+}
+
+
 /* 
  * Sysctls used by systat and others: hw.intrnames and hw.intrcnt.
  * The data for this machine dependent, and the declarations are in machine
index e59ae98..3e1b06b 100644 (file)
@@ -32,7 +32,7 @@
  *
  *     @(#)kern_malloc.c       8.3 (Berkeley) 1/4/94
  * $FreeBSD: src/sys/kern/kern_malloc.c,v 1.64.2.5 2002/03/16 02:19:51 archie Exp $
- * $DragonFly: src/sys/kern/Attic/kern_malloc.c,v 1.3 2003/06/21 07:54:57 dillon Exp $
+ * $DragonFly: src/sys/kern/Attic/kern_malloc.c,v 1.4 2003/06/29 03:28:44 dillon Exp $
  */
 
 #include "opt_vm.h"
@@ -44,6 +44,7 @@
 #include <sys/mbuf.h>
 #include <sys/vmmeter.h>
 #include <sys/lock.h>
+#include <sys/thread.h>
 
 #include <vm/vm.h>
 #include <vm/vm_param.h>
@@ -151,7 +152,7 @@ malloc(size, type, flags)
 
 #if defined(INVARIANTS) && defined(__i386__)
        if (flags == M_WAITOK)
-               KASSERT(intr_nesting_level == 0,
+               KASSERT(mycpu->gd_intr_nesting_level == 0,
                   ("malloc(M_WAITOK) in interrupt context"));
 #endif
        /*
index ccab213..858c61b 100644 (file)
  *
  *     From: @(#)kern_clock.c  8.5 (Berkeley) 1/21/94
  * $FreeBSD: src/sys/kern/kern_timeout.c,v 1.59.2.1 2001/11/13 18:24:52 archie Exp $
- * $DragonFly: src/sys/kern/kern_timeout.c,v 1.2 2003/06/17 04:28:41 dillon Exp $
+ * $DragonFly: src/sys/kern/kern_timeout.c,v 1.3 2003/06/29 03:28:44 dillon Exp $
  */
 
 #include <sys/param.h>
 #include <sys/systm.h>
 #include <sys/callout.h>
 #include <sys/kernel.h>
+#include <sys/interrupt.h>
+#include <machine/ipl.h>
 
 /*
  * TODO:
@@ -75,8 +77,8 @@ static struct callout *nextsoftcheck; /* Next callout to be checked. */
  * Software (low priority) clock interrupt.
  * Run periodic events from timeout queue.
  */
-void
-softclock()
+static void
+swi_softclock(void *dummy)
 {
        register struct callout *c;
        register struct callout_tailq *bucket;
@@ -292,6 +294,14 @@ callout_init(c)
        bzero(c, sizeof *c);
 }
 
+static void
+swi_softclock_setup(void *arg)
+{
+       register_swi(SWI_CLOCK, swi_softclock, NULL, "swi_sftclk");
+}
+
+SYSINIT(vm_setup, SI_SUB_CPU, SI_ORDER_ANY, swi_softclock_setup, NULL);
+
 #ifdef APM_FIXUP_CALLTODO
 /* 
  * Adjust the kernel calltodo timeout list.  This routine is used after 
index 0198cdd..ba645e7 100644 (file)
@@ -27,7 +27,7 @@
  *     thread scheduler, which means that generally speaking we only need
  *     to use a critical section to prevent hicups.
  *
- * $DragonFly: src/sys/kern/lwkt_thread.c,v 1.8 2003/06/28 04:16:04 dillon Exp $
+ * $DragonFly: src/sys/kern/lwkt_thread.c,v 1.9 2003/06/29 03:28:44 dillon Exp $
  */
 
 #include <sys/param.h>
@@ -108,26 +108,34 @@ lwkt_init_wait(lwkt_wait_t w)
  * does everything except load the startup and switcher function.
  */
 thread_t
-lwkt_alloc_thread(void)
+lwkt_alloc_thread(struct thread *td)
 {
-    struct thread *td;
     void *stack;
+    int flags = 0;
 
     crit_enter();
-    if (mycpu->gd_tdfreecount > 0) {
-       --mycpu->gd_tdfreecount;
-       td = TAILQ_FIRST(&mycpu->gd_tdfreeq);
-       KASSERT(td != NULL && (td->td_flags & TDF_EXITED),
-           ("lwkt_alloc_thread: unexpected NULL or corrupted td"));
-       TAILQ_REMOVE(&mycpu->gd_tdfreeq, td, td_threadq);
-       crit_exit();
-       stack = td->td_kstack;
-    } else {
-       crit_exit();
-       td = zalloc(thread_zone);
+    if (td == NULL) {
+       if (mycpu->gd_tdfreecount > 0) {
+           --mycpu->gd_tdfreecount;
+           td = TAILQ_FIRST(&mycpu->gd_tdfreeq);
+           KASSERT(td != NULL && (td->td_flags & TDF_EXITED),
+               ("lwkt_alloc_thread: unexpected NULL or corrupted td"));
+           TAILQ_REMOVE(&mycpu->gd_tdfreeq, td, td_threadq);
+           crit_exit();
+           stack = td->td_kstack;
+           flags = td->td_flags & (TDF_ALLOCATED_STACK|TDF_ALLOCATED_THREAD);
+       } else {
+           crit_exit();
+           td = zalloc(thread_zone);
+           td->td_kstack = NULL;
+           flags |= TDF_ALLOCATED_THREAD;
+       }
+    }
+    if ((stack = td->td_kstack) == NULL) {
        stack = (void *)kmem_alloc(kernel_map, UPAGES * PAGE_SIZE);
+       flags |= TDF_ALLOCATED_STACK;
     }
-    lwkt_init_thread(td, stack, TDF_ALLOCATED_STACK|TDF_ALLOCATED_THREAD);
+    lwkt_init_thread(td, stack, flags);
     return(td);
 }
 
@@ -198,6 +206,9 @@ lwkt_switch(void)
     thread_t td = curthread;
     thread_t ntd;
 
+    if (mycpu->gd_intr_nesting_level)
+       panic("lwkt_switch: cannot switch from within an interrupt\n");
+
     crit_enter();
     if ((ntd = td->td_preempted) != NULL) {
        /*
@@ -227,16 +238,18 @@ lwkt_switch(void)
  * inside the critical section to pervent its own crit_exit() from reentering
  * lwkt_yield_quick().
  *
+ * gd_reqpri indicates that *something* changed, e.g. an interrupt or softint
+ * came along but was blocked and made pending.
+ *
  * (self contained on a per cpu basis)
  */
 void
 lwkt_yield_quick(void)
 {
     thread_t td = curthread;
-    while ((td->td_pri & TDPRI_MASK) < mycpu->gd_reqpri) {
-#if 0
-       cpu_schedule_reqs();    /* resets gd_reqpri */
-#endif
+
+    if ((td->td_pri & TDPRI_MASK) < mycpu->gd_reqpri) {
+       mycpu->gd_reqpri = 0;
        splz();
     }
 
@@ -246,7 +259,7 @@ lwkt_yield_quick(void)
      * preemption and MP without actually doing preemption or MP, because a
      * lot of code assumes that wakeup() does not block.
      */
-    if (untimely_switch && intr_nesting_level == 0) {
+    if (untimely_switch && mycpu->gd_intr_nesting_level == 0) {
        crit_enter();
        /*
         * YYY temporary hacks until we disassociate the userland scheduler
@@ -568,14 +581,15 @@ lwkt_regettoken(lwkt_token_t tok)
  */
 int
 lwkt_create(void (*func)(void *), void *arg,
-    struct thread **tdp, const char *fmt, ...)
+    struct thread **tdp, struct thread *template, int tdflags,
+    const char *fmt, ...)
 {
     struct thread *td;
     va_list ap;
 
-    td = *tdp = lwkt_alloc_thread();
+    td = *tdp = lwkt_alloc_thread(template);
     cpu_set_thread_handler(td, kthread_exit, func, arg);
-    td->td_flags |= TDF_VERBOSE;
+    td->td_flags |= TDF_VERBOSE | tdflags;
 
     /*
      * Set up arg0 for 'ps' etc
@@ -587,7 +601,10 @@ lwkt_create(void (*func)(void *), void *arg,
     /*
      * Schedule the thread to run
      */
-    lwkt_schedule(td);
+    if ((td->td_flags & TDF_STOPREQ) == 0)
+       lwkt_schedule(td);
+    else
+       td->td_flags &= ~TDF_STOPREQ;
     return 0;
 }
 
@@ -612,9 +629,7 @@ lwkt_exit(void)
 
 /*
  * Create a kernel process/thread/whatever.  It shares it's address space
- * with proc0 - ie: kernel only.
- *
- * XXX exact duplicate of lwkt_create().
+ * with proc0 - ie: kernel only.  5.x compatible.
  */
 int
 kthread_create(void (*func)(void *), void *arg,
@@ -623,7 +638,7 @@ kthread_create(void (*func)(void *), void *arg,
     struct thread *td;
     va_list ap;
 
-    td = *tdp = lwkt_alloc_thread();
+    td = *tdp = lwkt_alloc_thread(NULL);
     cpu_set_thread_handler(td, kthread_exit, func, arg);
     td->td_flags |= TDF_VERBOSE;
 
index f92567b..086b4e4 100644 (file)
@@ -24,7 +24,7 @@
  * SUCH DAMAGE.
  *
  *     $FreeBSD: src/sys/kern/subr_taskqueue.c,v 1.1.2.2 2001/03/31 03:33:44 hsu Exp $
- *     $DragonFly: src/sys/kern/subr_taskqueue.c,v 1.2 2003/06/17 04:28:41 dillon Exp $
+ *     $DragonFly: src/sys/kern/subr_taskqueue.c,v 1.3 2003/06/29 03:28:44 dillon Exp $
  */
 
 #include <sys/param.h>
@@ -195,10 +195,10 @@ taskqueue_swi_enqueue(void *context)
 }
 
 static void
-taskqueue_swi_run(void)
+taskqueue_swi_run(void *arg)
 {
        taskqueue_run(taskqueue_swi);
 }
 
 TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, 0,
-                register_swi(SWI_TQ, taskqueue_swi_run));
+                register_swi(SWI_TQ, taskqueue_swi_run, NULL, "swi_taskq"));
index f3826a5..f7bc517 100644 (file)
@@ -32,7 +32,7 @@
  *
  *     @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94
  * $FreeBSD: src/sys/kern/uipc_mbuf.c,v 1.51.2.24 2003/04/15 06:59:29 silby Exp $
- * $DragonFly: src/sys/kern/uipc_mbuf.c,v 1.3 2003/06/22 17:39:42 dillon Exp $
+ * $DragonFly: src/sys/kern/uipc_mbuf.c,v 1.4 2003/06/29 03:28:44 dillon Exp $
  */
 
 #include "opt_param.h"
@@ -45,6 +45,7 @@
 #include <sys/sysctl.h>
 #include <sys/domain.h>
 #include <sys/protosw.h>
+#include <sys/thread.h>
 
 #include <vm/vm.h>
 #include <vm/vm_kern.h>
@@ -400,7 +401,7 @@ m_clalloc_wait(void)
 
 #ifdef __i386__
        /* If in interrupt context, and INVARIANTS, maintain sanity and die. */
-       KASSERT(intr_nesting_level == 0, ("CLALLOC: CANNOT WAIT IN INTERRUPT"));
+       KASSERT(mycpu->gd_intr_nesting_level == 0, ("CLALLOC: CANNOT WAIT IN INTERRUPT"));
 #endif
 
        /* Sleep until something's available or until we expire. */
@@ -442,7 +443,7 @@ m_retry(i, t)
         */
        if (i == M_WAIT) {
 #ifdef __i386__
-               KASSERT(intr_nesting_level == 0,
+               KASSERT(mycpu->gd_intr_nesting_level == 0,
                    ("MBALLOC: CANNOT WAIT IN INTERRUPT"));
 #endif
                m_reclaim();
@@ -487,7 +488,7 @@ m_retryhdr(i, t)
         */
        if (i == M_WAIT) {
 #ifdef __i386__
-               KASSERT(intr_nesting_level == 0,
+               KASSERT(mycpu->gd_intr_nesting_level == 0,
                    ("MBALLOC: CANNOT WAIT IN INTERRUPT"));
 #endif
                m_reclaim();
diff --git a/sys/net/netisr.c b/sys/net/netisr.c
new file mode 100644 (file)
index 0000000..a992e52
--- /dev/null
@@ -0,0 +1,67 @@
+/*-
+ * Copyright (c) 2003 Matthew Dillon
+ *
+ * $DragonFly: src/sys/net/netisr.c,v 1.1 2003/06/29 03:28:45 dillon Exp $
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/interrupt.h>
+#include <net/netisr.h>
+#include <machine/cpufunc.h>
+#include <machine/ipl.h>
+
+static int isrmask;
+static int isrsoftint_installed;
+static netisr_t *netisrs[NETISR_MAX];
+
+static void
+swi_net(void *arg)
+{
+    int mask;
+    int bit;
+    netisr_t *func;
+       
+    while ((mask = isrmask) != 0) {
+       bit = bsfl(mask);
+       if (btrl(&isrmask, bit)) {
+           if ((func = netisrs[bit]) != NULL)
+               func();
+       }
+    }
+}
+
+int
+register_netisr(int num, netisr_t *handler)
+{
+    if (num < 0 || num >= (sizeof(netisrs)/sizeof(*netisrs)) ) {
+       printf("register_netisr: bad isr number: %d\n", num);
+       return (EINVAL);
+    }
+    if (isrsoftint_installed == 0) {
+       isrsoftint_installed = 1;
+       register_swi(SWI_NET, swi_net, NULL, "swi_net");
+    }
+    netisrs[num] = handler;
+    return (0);
+}
+
+int
+unregister_netisr(int num)
+{
+    if (num < 0 || num >= (sizeof(netisrs)/sizeof(*netisrs)) ) {
+       printf("unregister_netisr: bad isr number: %d\n", num);
+       return (EINVAL);
+    }
+    netisrs[num] = NULL;
+    return (0);
+}
+
+void
+schednetisr(int isrnum) 
+{
+    atomic_set_int(&isrmask, 1 << isrnum);
+    setsoftnet();
+}
+
index c1846a9..a6d4cae 100644 (file)
@@ -32,7 +32,7 @@
  *
  *     @(#)netisr.h    8.1 (Berkeley) 6/10/93
  * $FreeBSD: src/sys/net/netisr.h,v 1.21.2.5 2002/02/09 23:02:39 luigi Exp $
- * $DragonFly: src/sys/net/netisr.h,v 1.2 2003/06/17 04:28:48 dillon Exp $
+ * $DragonFly: src/sys/net/netisr.h,v 1.3 2003/06/29 03:28:45 dillon Exp $
  */
 
 #ifndef _NET_NETISR_H_
 #define        NETISR_NETGRAPH 30              /* same as AF_NETGRAPH */
 #define        NETISR_POLLMORE 31              /* check if we need more polling */
 
+#define NETISR_MAX     32
+
 
 #ifndef LOCORE
 #ifdef _KERNEL
 
-extern volatile unsigned int   netisr; /* scheduling bits for network */
-#define        schednetisr(anisr)      { netisr |= 1 << (anisr); setsoftnet(); }
-
 typedef void netisr_t __P((void));
 
 int register_netisr __P((int, netisr_t *));
 int unregister_netisr __P((int));
+void schednetisr(int isrnum);
+
 
 #endif
 #endif
index 0111632..20d1139 100644 (file)
@@ -1,5 +1,5 @@
 /*     $FreeBSD: src/sys/opencrypto/crypto.c,v 1.4.2.7 2003/06/03 00:09:02 sam Exp $   */
-/*     $DragonFly: src/sys/opencrypto/crypto.c,v 1.3 2003/06/22 17:39:44 dillon Exp $  */
+/*     $DragonFly: src/sys/opencrypto/crypto.c,v 1.4 2003/06/29 03:28:45 dillon Exp $  */
 /*     $OpenBSD: crypto.c,v 1.38 2002/06/11 11:14:29 beck Exp $        */
 /*
  * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
@@ -112,7 +112,7 @@ MALLOC_DEFINE(M_CRYPTO_DATA, "crypto", "crypto session records");
  *
  * This scheme is not intended for SMP machines.
  */ 
-static void cryptointr(void);          /* swi thread to dispatch ops */
+static void cryptointr(void *dummy);   /* swi thread to dispatch ops */
 static void cryptoret(void);           /* kernel thread for callbacks*/
 static struct thread *cryptothread;
 static void crypto_destroy(void);
@@ -156,7 +156,7 @@ crypto_init(void)
        TAILQ_INIT(&crp_ret_q);
        TAILQ_INIT(&crp_ret_kq);
 
-       register_swi(SWI_CRYPTO, cryptointr);
+       register_swi(SWI_CRYPTO, cryptointr, NULL, "swi_crypto");
        error = kthread_create((void (*)(void *)) cryptoret, NULL,
                    &cryptothread, "cryptoret");
        if (error) {
@@ -1000,7 +1000,7 @@ out:
  * Software interrupt thread to dispatch crypto requests.
  */
 static void
-cryptointr(void)
+cryptointr(void *dummy)
 {
        struct cryptop *crp, *submit;
        struct cryptkop *krp;
index 55c880f..169bca1 100644 (file)
  * SUCH DAMAGE.
  *
  * $FreeBSD: src/sys/i386/isa/apic_ipl.s,v 1.27.2.2 2000/09/30 02:49:35 ps Exp $
- * $DragonFly: src/sys/platform/pc32/apic/apic_ipl.s,v 1.4 2003/06/22 08:54:22 dillon Exp $
+ * $DragonFly: src/sys/platform/pc32/apic/apic_ipl.s,v 1.5 2003/06/29 03:28:43 dillon Exp $
  */
 
+#if 0
 
        .data
        ALIGN_DATA
@@ -101,7 +102,7 @@ ENTRY(splz)
         */
        pushl   %ebx
        movl    _curthread,%ebx
-       movl    TD_MACH+MTD_CPL(%ebx),%eax
+       movl    TD_CPL(%ebx),%eax
 splz_next:
        /*
         * We don't need any locking here.  (ipending & ~cpl) cannot grow 
@@ -141,10 +142,10 @@ splz_unpend:
 splz_swi:
        pushl   %eax                    /* save cpl across call */
        orl     imasks(,%ecx,4),%eax
-       movl    %eax,TD_MACH+MTD_CPL(%ebx) /* set cpl for SWI */
+       movl    %eax,TD_CPL(%ebx) /* set cpl for SWI */
        call    *_ihandlers(,%ecx,4)
        popl    %eax
-       movl    %eax,TD_MACH+MTD_CPL(%ebx) /* restore cpl and loop */
+       movl    %eax,TD_CPL(%ebx) /* restore cpl and loop */
        jmp     splz_next
 
 /*
@@ -463,3 +464,5 @@ ENTRY(io_apic_write)
 ENTRY(apic_eoi)
        movl    $0, _lapic+0xb0
        ret
+
+#endif
index 09fefb9..23d5792 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *     from: vector.s, 386BSD 0.1 unknown origin
  * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $
- * $DragonFly: src/sys/platform/pc32/apic/apic_vector.s,v 1.5 2003/06/22 08:54:22 dillon Exp $
+ * $DragonFly: src/sys/platform/pc32/apic/apic_vector.s,v 1.6 2003/06/29 03:28:43 dillon Exp $
  */
 
 
@@ -29,14 +29,14 @@ IDTVEC(vec_name) ;                                                  \
        pushl   %ecx ;                                                  \
        pushl   %edx ;                                                  \
        pushl   %ds ;                                                   \
-       MAYBE_PUSHL_ES ;                                                \
+       pushl   %es ;                                                   \
        pushl   %fs ;                                                   \
        movl    $KDSEL,%eax ;                                           \
        mov     %ax,%ds ;                                               \
-       MAYBE_MOVW_AX_ES ;                                              \
+       movl    %ax,%es ;                                               \
        movl    $KPSEL,%eax ;                                           \
        mov     %ax,%fs ;                                               \
-       FAKE_MCOUNT((5+ACTUALLY_PUSHED)*4(%esp)) ;                      \
+       FAKE_MCOUNT(6*4(%esp)) ;                                        \
        pushl   _intr_unit + (irq_num) * 4 ;                            \
        call    *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
        addl    $4, %esp ;                                              \
@@ -48,7 +48,7 @@ IDTVEC(vec_name) ;                                                    \
        incl    (%eax) ;                                                \
        MEXITCOUNT ;                                                    \
        popl    %fs ;                                                   \
-       MAYBE_POPL_ES ;                                                 \
+       popl    %es ;                                                   \
        popl    %ds ;                                                   \
        popl    %edx ;                                                  \
        popl    %ecx ;                                                  \
@@ -259,6 +259,7 @@ __CONCAT(Xresume,irq_num): ;                                                \
        call    *_intr_handler + (irq_num) * 4 ;                        \
        cli ;                                                           \
        APIC_ITRACE(apic_itrace_leave, irq_num, APIC_ITRACE_LEAVE) ;    \
+       addl    $4,%esp ;                                               \
 ;                                                                      \
        lock ;  andl    $~IRQ_BIT(irq_num), iactive ;                   \
        UNMASK_IRQ(irq_num) ;                                           \
@@ -456,8 +457,6 @@ _Xcpuast:
        incb    _intr_nesting_level
        sti
        
-       pushl   $0
-       
        movl    _cpuid, %eax
        lock    
        btrl    %eax, _checkstate_pending_ast
@@ -512,8 +511,6 @@ _Xforward_irq:
        incb    _intr_nesting_level
        sti
        
-       pushl   $0
-
        MEXITCOUNT
        jmp     _doreti                 /* Handle forwarded interrupt */
 1:
@@ -722,6 +719,8 @@ _Xrendezvous:
        
        
        .data
+
+#if 0
 /*
  * Addresses of interrupt handlers.
  *  XresumeNN: Resumption addresses for HWIs.
@@ -751,6 +750,7 @@ imasks:                             /* masks for interrupt handlers */
 
        .long   SWI_TTY_MASK, SWI_NET_MASK, SWI_CAMNET_MASK, SWI_CAMBIO_MASK
        .long   SWI_VM_MASK, SWI_TQ_MASK, SWI_CLOCK_MASK
+#endif
 
 /* active flag for lazy masking */
 iactive:
index 98053cc..acf2ff8 100644 (file)
@@ -31,7 +31,7 @@
  * SUCH DAMAGE.
  *
  * $FreeBSD: src/sys/i386/i386/exception.s,v 1.65.2.3 2001/08/15 01:23:49 peter Exp $
- * $DragonFly: src/sys/platform/pc32/i386/exception.s,v 1.6 2003/06/28 02:09:47 dillon Exp $
+ * $DragonFly: src/sys/platform/pc32/i386/exception.s,v 1.7 2003/06/29 03:28:42 dillon Exp $
  */
 
 #include "npx.h"
  * On entry to a trap or interrupt WE DO NOT OWN THE MP LOCK.  This means
  * that we must be careful in regards to accessing global variables.  We
  * save (push) the current cpl (our software interrupt disable mask), call
- * the trap function, then call _doreti to restore the cpl and deal with
+ * the trap function, then jump to _doreti to restore the cpl and deal with
  * ASTs (software interrupts).  _doreti will determine if the restoration
  * of the cpl unmasked any pending interrupts and will issue those interrupts
  * synchronously prior to doing the iret.
- *
- * At the moment we must own the MP lock to do any cpl manipulation, which
- * means we must own it prior to  calling _doreti.  The syscall case attempts
- * to avoid this by handling a reduced set of cases itself and iret'ing.
  */
 #define        IDTVEC(name)    ALIGN_TEXT; .globl __CONCAT(_X,name); \
                        .type __CONCAT(_X,name),@function; __CONCAT(_X,name):
@@ -171,21 +167,14 @@ IDTVEC(fpu)
        mov     %ax,%fs
        FAKE_MCOUNT(13*4(%esp))
 
-#ifdef SMP
-       MPLOCKED incl _cnt+V_TRAP
-       MP_LOCK
-       movl    _curthread,%eax         /* save original cpl */
-       pushl   TD_MACH+MTD_CPL(%eax)
-       pushl   $0                      /* dummy unit to finish intr frame */
-#else /* SMP */
-       movl    _curthread,%eax         /* save original cpl */
-       pushl   TD_MACH+MTD_CPL(%eax)
-       pushl   $0                      /* dummy unit to finish intr frame */
+       movl    _curthread,%ebx         /* save original cpl */
+       movl    TD_CPL(%ebx), %ebx
+       pushl   %ebx
        incl    _cnt+V_TRAP
-#endif /* SMP */
 
-       call    _npx_intr
+       call    _npx_intr               /* note: call might mess w/ argument */
 
+       movl    %ebx, (%esp)            /* save cpl for doreti */
        incb    _intr_nesting_level
        MEXITCOUNT
        jmp     _doreti
@@ -229,7 +218,7 @@ calltrap:
        MPLOCKED incl _cnt+V_TRAP
        MP_LOCK
        movl    _curthread,%eax         /* keep orig cpl here during call */
-       movl    TD_MACH+MTD_CPL(%eax),%ebx
+       movl    TD_CPL(%eax),%ebx
        call    _trap
 
        /*
@@ -237,7 +226,6 @@ calltrap:
         * to interrupt frame.
         */
        pushl   %ebx                    /* cpl to restore */
-       subl    $4,%esp                 /* dummy unit to finish intr frame */
        incb    _intr_nesting_level
        MEXITCOUNT
        jmp     _doreti
@@ -284,8 +272,7 @@ IDTVEC(syscall)
        MP_LOCK
 #endif
        pushl   $0                      /* cpl to restore */
-       subl    $4,%esp                 /* dummy unit for interrupt frame */
-       movb    $1,_intr_nesting_level
+       movl    $1,_intr_nesting_level
        jmp     _doreti
 
 /*
@@ -322,8 +309,7 @@ IDTVEC(int0x80_syscall)
        MP_LOCK
 #endif
        pushl   $0                      /* cpl to restore */
-       subl    $4,%esp                 /* dummy unit for interrupt frame */
-       movb    $1,_intr_nesting_level
+       movl    $1,_intr_nesting_level
        jmp     _doreti
 
 ENTRY(fork_trampoline)
@@ -348,7 +334,6 @@ ENTRY(fork_trampoline)
         * Return via _doreti to handle ASTs.
         */
        pushl   $0                      /* cpl to restore */
-       subl    $4,%esp                 /* dummy unit to finish intr frame */
        movb    $1,_intr_nesting_level
        MEXITCOUNT
        jmp     _doreti
index 20bfea1..a27faf1 100644 (file)
@@ -35,7 +35,7 @@
  *
  *     from: @(#)genassym.c    5.11 (Berkeley) 5/10/91
  * $FreeBSD: src/sys/i386/i386/genassym.c,v 1.86.2.3 2002/03/03 05:42:49 nyan Exp $
- * $DragonFly: src/sys/platform/pc32/i386/genassym.c,v 1.17 2003/06/28 04:16:02 dillon Exp $
+ * $DragonFly: src/sys/platform/pc32/i386/genassym.c,v 1.18 2003/06/29 03:28:42 dillon Exp $
  */
 
 #include "opt_user_ldt.h"
@@ -92,7 +92,7 @@ ASSYM(TDF_EXITED, TDF_EXITED);
 
 ASSYM(RW_OWNER, offsetof(struct lwkt_rwlock, rw_owner));
 
-ASSYM(MTD_CPL, offsetof(struct md_thread, mtd_cpl));
+ASSYM(TD_CPL, offsetof(struct thread, td_mach) + offsetof(struct md_thread, mtd_cpl));
 
 ASSYM(TDPRI_CRIT, TDPRI_CRIT);
 
@@ -185,13 +185,16 @@ ASSYM(BI_KERNEND, offsetof(struct bootinfo, bi_kernend));
 ASSYM(GD_CURTHREAD, offsetof(struct mdglobaldata, mi.gd_curthread));
 ASSYM(GD_REQPRI, offsetof(struct mdglobaldata, mi.gd_reqpri));
 ASSYM(GD_CPUID, offsetof(struct mdglobaldata, mi.gd_cpuid));
-ASSYM(GD_INSIDE_INTR, offsetof(struct mdglobaldata, mi.gd_inside_intr));
+ASSYM(GD_INTR_NESTING_LEVEL, offsetof(struct mdglobaldata, mi.gd_intr_nesting_level));
 ASSYM(GD_ASTPENDING, offsetof(struct mdglobaldata, mi.gd_astpending));
 
 #ifdef USER_LDT
 ASSYM(GD_CURRENTLDT, offsetof(struct mdglobaldata, gd_currentldt));
 #endif
 
+ASSYM(GD_FPENDING, offsetof(struct mdglobaldata, gd_fpending));
+ASSYM(GD_IPENDING, offsetof(struct mdglobaldata, gd_ipending));
+ASSYM(GD_IRUNNING, offsetof(struct mdglobaldata, gd_irunning));
 ASSYM(GD_COMMON_TSS, offsetof(struct mdglobaldata, gd_common_tss));
 ASSYM(GD_COMMON_TSSD, offsetof(struct mdglobaldata, gd_common_tssd));
 ASSYM(GD_TSS_GDT, offsetof(struct mdglobaldata, gd_tss_gdt));
index b31652e..e6e803e 100644 (file)
@@ -24,7 +24,7 @@
  * SUCH DAMAGE.
  *
  * $FreeBSD: src/sys/i386/i386/globals.s,v 1.13.2.1 2000/05/16 06:58:06 dillon Exp $
- * $DragonFly: src/sys/platform/pc32/i386/globals.s,v 1.11 2003/06/28 04:16:02 dillon Exp $
+ * $DragonFly: src/sys/platform/pc32/i386/globals.s,v 1.12 2003/06/29 03:28:42 dillon Exp $
  */
 
 #include "opt_user_ldt.h"
         * the AP versions are setup in mp_machdep.c.
         */
        .globl  gd_cpuid, gd_cpu_lockid, gd_other_cpus
-       .globl  gd_ss_eflags, gd_inside_intr
+       .globl  gd_ss_eflags, gd_intr_nesting_level
        .globl  gd_CMAP1, gd_CMAP2, gd_CMAP3, gd_PMAP1
        .globl  gd_CADDR1, gd_CADDR2, gd_CADDR3, gd_PADDR1
+       .globl  gd_irunning, gd_ipending, gd_fpending
 
        .set    gd_cpuid,globaldata + GD_CPUID
        .set    gd_cpu_lockid,globaldata + GD_CPU_LOCKID
        .set    gd_other_cpus,globaldata + GD_OTHER_CPUS
        .set    gd_ss_eflags,globaldata + GD_SS_EFLAGS
-       .set    gd_inside_intr,globaldata + GD_INSIDE_INTR
+       .set    gd_intr_nesting_level,globaldata + GD_INTR_NESTING_LEVEL
        .set    gd_CMAP1,globaldata + GD_PRV_CMAP1
        .set    gd_CMAP2,globaldata + GD_PRV_CMAP2
        .set    gd_CMAP3,globaldata + GD_PRV_CMAP3
        .set    gd_CADDR2,globaldata + GD_PRV_CADDR2
        .set    gd_CADDR3,globaldata + GD_PRV_CADDR3
        .set    gd_PADDR1,globaldata + GD_PRV_PADDR1
+       .set    gd_fpending,globaldata + GD_FPENDING
+       .set    gd_ipending,globaldata + GD_IPENDING
+       .set    gd_irunning,globaldata + GD_IRUNNING
 
 #if defined(APIC_IO)
        .globl  lapic_eoi, lapic_svr, lapic_tpr, lapic_irr1, lapic_ver
index dc4b675..504f28c 100644 (file)
@@ -36,7 +36,7 @@
  *
  *     from: @(#)machdep.c     7.4 (Berkeley) 6/3/91
  * $FreeBSD: src/sys/i386/i386/machdep.c,v 1.385.2.30 2003/05/31 08:48:05 alc Exp $
- * $DragonFly: src/sys/platform/pc32/i386/machdep.c,v 1.15 2003/06/28 04:16:02 dillon Exp $
+ * $DragonFly: src/sys/platform/pc32/i386/machdep.c,v 1.16 2003/06/29 03:28:42 dillon Exp $
  */
 
 #include "apm.h"
@@ -90,8 +90,6 @@
 
 #include <ddb/ddb.h>
 
-#include <net/netisr.h>
-
 #include <machine/cpu.h>
 #include <machine/reg.h>
 #include <machine/clock.h>
@@ -454,33 +452,6 @@ again:
        cpu_setregs();
 }
 
-int
-register_netisr(num, handler)
-       int num;
-       netisr_t *handler;
-{
-       
-       if (num < 0 || num >= (sizeof(netisrs)/sizeof(*netisrs)) ) {
-               printf("register_netisr: bad isr number: %d\n", num);
-               return (EINVAL);
-       }
-       netisrs[num] = handler;
-       return (0);
-}
-
-int
-unregister_netisr(num)
-       int num;
-{
-
-       if (num < 0 || num >= (sizeof(netisrs)/sizeof(*netisrs)) ) {
-               printf("unregister_netisr: bad isr number: %d\n", num);
-               return (EINVAL);
-       }
-       netisrs[num] = NULL;
-       return (0);
-}
-
 /*
  * Send an interrupt to process.
  *
@@ -974,18 +945,13 @@ cpu_halt(void)
  * Note on cpu_idle_hlt:  On an SMP system this may cause the system to 
  * halt until the next clock tick, even if a thread is ready YYY
  */
-#ifdef SMP
 static int     cpu_idle_hlt = 0;
-#else
-static int     cpu_idle_hlt = 1;
-#endif
 SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hlt, CTLFLAG_RW,
     &cpu_idle_hlt, 0, "Idle loop HLT enable");
 
 void
 cpu_idle(void)
 {
-       spl0();
        for (;;) {
                lwkt_switch();
                if (cpu_idle_hlt) {
@@ -997,7 +963,6 @@ cpu_idle(void)
                } else {
                        __asm __volatile("sti");
                }
-               /* YYY BGL */
        }
 }
 
@@ -2069,6 +2034,8 @@ init386(int first)
  * Initialize machine-dependant portions of the global data structure.
  * Note that the global data area and cpu0's idlestack in the private
  * data space were allocated in locore.
+ *
+ * Note: the idlethread's cpl is 0
  */
 void
 cpu_gdinit(struct mdglobaldata *gd, int cpu)
index 11d6e75..c834e43 100644 (file)
@@ -35,7 +35,7 @@
  * SUCH DAMAGE.
  *
  * $FreeBSD: src/sys/i386/i386/swtch.s,v 1.89.2.10 2003/01/23 03:36:24 ps Exp $
- * $DragonFly: src/sys/platform/pc32/i386/swtch.s,v 1.15 2003/06/28 02:09:47 dillon Exp $
+ * $DragonFly: src/sys/platform/pc32/i386/swtch.s,v 1.16 2003/06/29 03:28:42 dillon Exp $
  */
 
 #include "npx.h"
@@ -443,6 +443,13 @@ cpu_switch_load_gs:
 
 CROSSJUMPTARGET(sw1a)
 
+badsw0:
+       pushl   %eax
+       pushl   $sw0_1
+       call    _panic
+
+sw0_1: .asciz  "cpu_switch: panic: %p"
+
 #ifdef DIAGNOSTIC
 badsw1:
        pushl   $sw0_1
@@ -538,6 +545,7 @@ ENTRY(savectx)
 ENTRY(cpu_idle_restore)
        movl    $0,%ebp
        pushl   $0
+       sti
        jmp     cpu_idle
 
 /*
@@ -551,6 +559,7 @@ ENTRY(cpu_idle_restore)
 ENTRY(cpu_kthread_restore)
        movl    TD_PCB(%eax),%ebx
        movl    $0,%ebp
+       sti
        popl    %edx            /* kthread exit function */
        pushl   PCB_EBX(%ebx)   /* argument to ESI function */
        pushl   %edx            /* set exit func as return address */
@@ -589,13 +598,14 @@ ENTRY(cpu_lwkt_restore)
        popl    %esi
        popl    %ebx
        popl    %ebp
-       movl    TD_MACH+MTD_CPL(%eax),%ecx      /* unmasked cpl? YYY too complex */
-       notl    %ecx
-       andl    _ipending,%ecx
-       je      1f
        cmpl    $0,_intr_nesting_level          /* don't stack too deeply */
-       jne     1f
-       call    splz                            /* execute unmasked ints */
+       jne     2f
+       testl   _ipending,%ecx
+       jnz     1f
+       testl   _fpending,%ecx
+       jz      2f
 1:
+       call    splz                            /* execute unmasked ints */
+2:
        ret
 
index de73a9f..c2b4c6b 100644 (file)
@@ -36,7 +36,7 @@
  *
  *     from: @(#)trap.c        7.4 (Berkeley) 5/13/91
  * $FreeBSD: src/sys/i386/i386/trap.c,v 1.147.2.11 2003/02/27 19:09:59 luoqi Exp $
- * $DragonFly: src/sys/platform/pc32/i386/trap.c,v 1.10 2003/06/28 04:16:02 dillon Exp $
+ * $DragonFly: src/sys/platform/pc32/i386/trap.c,v 1.11 2003/06/29 03:28:42 dillon Exp $
  */
 
 /*
@@ -503,7 +503,7 @@ kernel_trap:
                }                                                       \
        } while (0)
 
-                       if (intr_nesting_level == 0) {
+                       if (mycpu->gd_intr_nesting_level == 0) {
                                /*
                                 * Invalid %fs's and %gs's can be created using
                                 * procfs or PT_SETREGS or by invalidating the
@@ -695,7 +695,7 @@ trap_pfault(frame, usermode, eva)
 
                if (p == NULL ||
                    (!usermode && va < VM_MAXUSER_ADDRESS &&
-                    (intr_nesting_level != 0 || 
+                    (mycpu->gd_intr_nesting_level != 0 || 
                      curthread->td_pcb->pcb_onfault == NULL))) {
                        trap_fatal(frame, eva);
                        return (-1);
@@ -758,7 +758,7 @@ trap_pfault(frame, usermode, eva)
                return (0);
 nogo:
        if (!usermode) {
-               if (intr_nesting_level == 0 && curthread->td_pcb->pcb_onfault) {
+               if (mycpu->gd_intr_nesting_level == 0 && curthread->td_pcb->pcb_onfault) {
                        frame->tf_eip = (int)curthread->td_pcb->pcb_onfault;
                        return (0);
                }
@@ -865,7 +865,7 @@ trap_pfault(frame, usermode, eva)
                return (0);
 nogo:
        if (!usermode) {
-               if (intr_nesting_level == 0 && curthread->td_pcb->pcb_onfault) {
+               if (mycpu->gd_intr_nesting_level == 0 && curthread->td_pcb->pcb_onfault) {
                        frame->tf_eip = (int)curthread->td_pcb->pcb_onfault;
                        return (0);
                }
index 230f157..3a48847 100644 (file)
@@ -24,7 +24,7 @@
  * SUCH DAMAGE.
  *
  * $FreeBSD: src/sys/i386/i386/vm86bios.s,v 1.15.2.1 2000/05/16 06:58:07 dillon Exp $
- * $DragonFly: src/sys/platform/pc32/i386/vm86bios.s,v 1.6 2003/06/22 08:54:18 dillon Exp $
+ * $DragonFly: src/sys/platform/pc32/i386/vm86bios.s,v 1.7 2003/06/29 03:28:42 dillon Exp $
  */
 
 #include <machine/asmacros.h>          /* miscellaneous asm macros */
@@ -143,8 +143,7 @@ ENTRY(vm86_bioscall)
         * Return via _doreti, restore the same cpl as our current cpl
         */
        movl    _curthread,%eax
-       pushl   TD_MACH+MTD_CPL(%eax)
-       subl    $4,%esp                 /* dummy unit */
+       pushl   TD_CPL(%eax)
        incb    _intr_nesting_level     /* dummy to match doreti */
        MEXITCOUNT
        jmp     _doreti
index 9d9362a..87975c8 100644 (file)
@@ -39,7 +39,7 @@
  *     from: @(#)vm_machdep.c  7.3 (Berkeley) 5/13/91
  *     Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
  * $FreeBSD: src/sys/i386/i386/vm_machdep.c,v 1.132.2.9 2003/01/25 19:02:23 dillon Exp $
- * $DragonFly: src/sys/platform/pc32/i386/vm_machdep.c,v 1.13 2003/06/28 04:16:02 dillon Exp $
+ * $DragonFly: src/sys/platform/pc32/i386/vm_machdep.c,v 1.14 2003/06/29 03:28:42 dillon Exp $
  */
 
 #include "npx.h"
@@ -54,6 +54,7 @@
 #include <sys/malloc.h>
 #include <sys/proc.h>
 #include <sys/buf.h>
+#include <sys/interrupt.h>
 #include <sys/vnode.h>
 #include <sys/vmmeter.h>
 #include <sys/kernel.h>
@@ -70,6 +71,7 @@
 #include <machine/pcb_ext.h>
 #include <machine/vm86.h>
 #include <machine/globaldata.h>        /* npxthread */
+#include <machine/ipl.h>       /* SWI_ */
 
 #include <vm/vm.h>
 #include <vm/vm_param.h>
@@ -606,16 +608,22 @@ vm_page_zero_idle()
        return (0);
 }
 
-/*
- * Software interrupt handler for queued VM system processing.
- */   
-void  
-swi_vm() 
-{     
+void
+swi_vm(void *arg)
+{
        if (busdma_swi_pending != 0)
                busdma_swi();
 }
 
+static void
+swi_vm_setup(void *arg)
+{
+       register_swi(SWI_VM, swi_vm, NULL, "swi_vm");
+}
+
+SYSINIT(vm_setup, SI_SUB_CPU, SI_ORDER_ANY, swi_vm_setup, NULL);
+
+
 /*
  * Tell whether this address is in some physical memory region.
  * Currently used by the kernel coredump code in order to avoid
index e7751f4..aa44d4c 100644 (file)
@@ -35,7 +35,7 @@
  *
  *     from: @(#)icu.h 5.6 (Berkeley) 5/9/91
  * $FreeBSD: src/sys/i386/isa/icu.h,v 1.18 1999/12/26 12:43:47 bde Exp $
- * $DragonFly: src/sys/platform/pc32/icu/icu.h,v 1.2 2003/06/17 04:28:37 dillon Exp $
+ * $DragonFly: src/sys/platform/pc32/icu/icu.h,v 1.3 2003/06/29 03:28:43 dillon Exp $
  */
 
 /*
 
 #ifndef        LOCORE
 
-#ifdef APIC_IO
-
-/*
-#define MP_SAFE
- * Note:
- *     Most of the SMP equivilants of the icu macros are coded
- *     elsewhere in an MP-safe fashion.
- *     In particular note that the 'imen' variable is opaque.
- *     DO NOT access imen directly, use INTREN()/INTRDIS().
- */
-
 void   INTREN                  __P((u_int));
 void   INTRDIS                 __P((u_int));
 
-#else /* APIC_IO */
-
-/*
- * Interrupt "level" mechanism variables, masks, and macros
- */
-extern unsigned imen;          /* interrupt mask enable */
-
-#define        INTREN(s)               (imen &= ~(s), SET_ICUS())
-#define        INTRDIS(s)              (imen |= (s), SET_ICUS())
-
-#if 0
-#ifdef PC98
-#define        SET_ICUS()      (outb(IO_ICU1 + 2, imen), outb(IU_ICU2 + 2, imen >> 8))
-#define INTRGET()      ((inb(IO_ICU2) << 8 | inb(IO_ICU1)) & 0xffff)
-#else  /* IBM-PC */
-#define        SET_ICUS()      (outb(IO_ICU1 + 1, imen), outb(IU_ICU2 + 1, imen >> 8))
-#define INTRGET()      ((inb(IO_ICU2) << 8 | inb(IO_ICU1)) & 0xffff)
-#endif /* PC98 */
-#else
-/*
- * XXX - IO_ICU* are defined in isa.h, not icu.h, and nothing much bothers to
- * include isa.h, while too many things include icu.h.
- */
-#ifdef PC98
-#define        SET_ICUS()      (outb(0x02, imen), outb(0x0a, imen >> 8))
-/* XXX is this correct? */
-#define INTRGET()      ((inb(0x0a) << 8 | inb(0x02)) & 0xffff)
-#else
-#define        SET_ICUS()      (outb(0x21, imen), outb(0xa1, imen >> 8))
-#define INTRGET()      ((inb(0xa1) << 8 | inb(0x21)) & 0xffff)
-#endif
-#endif
-
-#endif /* APIC_IO */
-
 #endif /* LOCORE */
 
-
 #ifdef APIC_IO
 /*
  * Note: The APIC uses different values for IRQxxx.
index ae2f90d..23ed43c 100644 (file)
@@ -2,6 +2,7 @@
  * Copyright (c) 1989, 1990 William F. Jolitz.
  * Copyright (c) 1990 The Regents of the University of California.
  * All rights reserved.
+ * Copyright (c) 2003 Matthew Dillon
  *
  * This code is derived from software contributed to Berkeley by
  * William Jolitz.
  * SUCH DAMAGE.
  *
  * $FreeBSD: src/sys/i386/isa/icu_ipl.s,v 1.6 1999/08/28 00:44:42 peter Exp $
- * $DragonFly: src/sys/platform/pc32/icu/icu_ipl.s,v 1.4 2003/06/22 08:54:22 dillon Exp $
+ * $DragonFly: src/sys/platform/pc32/icu/icu_ipl.s,v 1.5 2003/06/29 03:28:43 dillon Exp $
  */
 
        .data
        ALIGN_DATA
-vec:
-       .long    vec0,  vec1,  vec2,  vec3,  vec4,  vec5,  vec6,  vec7
-       .long    vec8,  vec9, vec10, vec11, vec12, vec13, vec14, vec15
 
-/* interrupt mask enable (all h/w off) */
+       /*
+        * Interrupt mask for ICU interrupts, defaults to all hardware
+        * interrupts turned off.
+        */
        .globl  _imen
 _imen: .long   HWI_MASK
 
-
-/*
- * 
- */
        .text
        SUPERALIGN_TEXT
 
-/*
- * Interrupt priority mechanism
- *     -- soft splXX masks with group mechanism (cpl)
- *     -- h/w masks for currently active or unused interrupts (imen)
- *     -- ipending = active interrupts currently masked by cpl
- *     -- splz handles pending interrupts regardless of the critical
- *        nesting state, it is only called synchronously.
- */
-
-ENTRY(splz)
-       /*
-        * The caller has restored cpl and checked that (ipending & ~cpl)
-        * is nonzero.  We have to repeat the check since if there is an
-        * interrupt while we're looking, _doreti processing for the
-        * interrupt will handle all the unmasked pending interrupts
-        * because we restored early.  We're repeating the calculation
-        * of (ipending & ~cpl) anyway so that the caller doesn't have
-        * to pass it, so this only costs one "jne".  "bsfl %ecx,%ecx"
-        * is undefined when %ecx is 0 so we can't rely on the secondary
-        * btrl tests.
-        */
-       pushl   %ebx
-       movl    _curthread,%ebx
-       movl    TD_MACH+MTD_CPL(%ebx),%eax
-splz_next:
-       /*
-        * We don't need any locking here.  (ipending & ~cpl) cannot grow 
-        * while we're looking at it - any interrupt will shrink it to 0.
-        */
-       movl    $0,_reqpri
-       movl    %eax,%ecx
-       notl    %ecx
-       andl    _ipending,%ecx
-       jne     splz_unpend
-       popl    %ebx
-       ret
-
-       ALIGN_TEXT
-splz_unpend:
-       bsfl    %ecx,%ecx
-       btrl    %ecx,_ipending
-       jnc     splz_next
-       cmpl    $NHWI,%ecx
-       jae     splz_swi
        /*
-        * We would prefer to call the intr handler directly here but that
-        * doesn't work for badly behaved handlers that want the interrupt
-        * frame.  Also, there's a problem determining the unit number.
-        * We should change the interface so that the unit number is not
-        * determined at config time.
+        * Functions to enable and disable a hardware interrupt.  Only
+        * 16 ICU interrupts exist.
+        *
+        * INTREN(1 << irq)     (one interrupt only)
+        * INTDIS(1 << irq)     (one interrupt only)
         */
-       popl    %ebx
-       jmp     *vec(,%ecx,4)
-
-       ALIGN_TEXT
-splz_swi:
-       pushl   %eax
-       orl     imasks(,%ecx,4),%eax
-       movl    %eax,TD_MACH+MTD_CPL(%ebx)
-       call    *_ihandlers(,%ecx,4)
-       popl    %eax
-       movl    %eax,TD_MACH+MTD_CPL(%ebx)
-       jmp     splz_next
-
-/*
- * Fake clock interrupt(s) so that they appear to come from our caller instead
- * of from here, so that system profiling works.
- * XXX do this more generally (for all vectors; look up the C entry point).
- * XXX frame bogusness stops us from just jumping to the C entry point.
- */
-       ALIGN_TEXT
-vec0:
-       popl    %eax                    /* return address */
+ENTRY(INTRDIS)
+       movl    4(%esp),%eax
+       orl     %eax,_imen
        pushfl
-       pushl   $KCSEL
-       pushl   %eax
        cli
-       MEXITCOUNT
-       jmp     _Xintr0                 /* XXX might need _Xfastintr0 */
+       movl    _imen,%eax
+       outb    %al,$IO_ICU1+ICU_IMR_OFFSET
+       mov     %ah,%al
+       outb    %al,$IO_ICU2+ICU_IMR_OFFSET
+       popfl
+       ret
 
-#ifndef PC98
-       ALIGN_TEXT
-vec8:
-       popl    %eax    
+ENTRY(INTREN)
+       movl    4(%esp),%eax
+       notl    %eax
+       andl    %eax,_imen
        pushfl
-       pushl   $KCSEL
-       pushl   %eax
        cli
-       MEXITCOUNT
-       jmp     _Xintr8                 /* XXX might need _Xfastintr8 */
-#endif /* PC98 */
-
-/*
- * The 'generic' vector stubs.
- */
-
-#define BUILD_VEC(irq_num)                     \
-       ALIGN_TEXT ;                            \
-__CONCAT(vec,irq_num): ;                       \
-       int     $ICU_OFFSET + (irq_num) ;       \
+       movl    _imen,%eax
+       outb    %al,$IO_ICU1+ICU_IMR_OFFSET
+       mov     %ah,%al
+       outb    %al,$IO_ICU2+ICU_IMR_OFFSET
+       popfl
        ret
 
-       BUILD_VEC(1)
-       BUILD_VEC(2)
-       BUILD_VEC(3)
-       BUILD_VEC(4)
-       BUILD_VEC(5)
-       BUILD_VEC(6)
-       BUILD_VEC(7)
-#ifdef PC98
-       BUILD_VEC(8)
-#endif
-       BUILD_VEC(9)
-       BUILD_VEC(10)
-       BUILD_VEC(11)
-       BUILD_VEC(12)
-       BUILD_VEC(13)
-       BUILD_VEC(14)
-       BUILD_VEC(15)
+
index 218b1f7..4fe5b5a 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *     from: vector.s, 386BSD 0.1 unknown origin
  * $FreeBSD: src/sys/i386/isa/icu_vector.s,v 1.14.2.2 2000/07/18 21:12:42 dfr Exp $
- * $DragonFly: src/sys/platform/pc32/icu/icu_vector.s,v 1.6 2003/06/28 07:00:58 dillon Exp $
+ * $DragonFly: src/sys/platform/pc32/icu/icu_vector.s,v 1.7 2003/06/29 03:28:43 dillon Exp $
  */
 
 /*
@@ -16,6 +16,7 @@
 
 #define        ICU_EOI                 0x20    /* XXX - define elsewhere */
 
+#define        IRQ_LBIT(irq_num)       (1 << (irq_num))
 #define        IRQ_BIT(irq_num)        (1 << ((irq_num) % 8))
 #define        IRQ_BYTE(irq_num)       ((irq_num) >> 3)
 
 #define        ENABLE_ICU1             /* use auto-EOI to reduce i/o */
 #define        OUTB_ICU1
 #else
-#define        ENABLE_ICU1 \
-       movb    $ICU_EOI,%al ;  /* as soon as possible send EOI ... */ \
-       OUTB_ICU1               /* ... to clear in service bit */
-#define        OUTB_ICU1 \
-       outb    %al,$IO_ICU1
+#define        ENABLE_ICU1                                                     \
+       movb    $ICU_EOI,%al ;  /* as soon as possible send EOI ... */  \
+       OUTB_ICU1 ;             /* ... to clear in service bit */       \
+
+#define        OUTB_ICU1                                                       \
+       outb    %al,$IO_ICU1 ;                                          \
+
 #endif
 
 #ifdef AUTO_EOI_2
  */
 #define        ENABLE_ICU1_AND_2       ENABLE_ICU1
 #else
-#define        ENABLE_ICU1_AND_2 \
-       movb    $ICU_EOI,%al ;  /* as above */ \
-       outb    %al,$IO_ICU2 ;  /* but do second icu first ... */ \
-       OUTB_ICU1               /* ... then first icu (if !AUTO_EOI_1) */
+#define        ENABLE_ICU1_AND_2                                               \
+       movb    $ICU_EOI,%al ;  /* as above */                          \
+       outb    %al,$IO_ICU2 ;  /* but do second icu first ... */       \
+       OUTB_ICU1 ;     /* ... then first icu (if !AUTO_EOI_1) */       \
+
 #endif
 
 /*
- * Macros for interrupt interrupt entry, call to handler, and exit.
+ * Macro helpers
  */
+#define PUSH_FRAME                                                     \
+       pushl   $0 ;            /* dummy error code */                  \
+       pushl   $0 ;            /* dummy trap type */                   \
+       pushal ;                /* 8 registers */                       \
+       pushl   %ds ;                                                   \
+       pushl   %es ;                                                   \
+       pushl   %fs ;                                                   \
+       mov     $KDSEL,%ax ;                                            \
+       mov     %ax,%ds ;                                               \
+       mov     %ax,%es ;                                               \
+       mov     $KPSEL,%ax ;                                            \
+       mov     %ax,%fs ;                                               \
+
+#define PUSH_DUMMY                                                     \
+       pushfl ;                /* phys int frame / flags */            \
+       pushl %cs ;             /* phys int frame / cs */               \
+       pushl   12(%esp) ;      /* original caller eip */               \
+       pushl   $0 ;            /* dummy error code */                  \
+       pushl   $0 ;            /* dummy trap type */                   \
+       subl    $11*4,%esp ;    /* pushal + 3 seg regs (dummy) */       \
 
-#define        FAST_INTR(irq_num, vec_name, enable_icus)                       \
+/*
+ * Warning: POP_FRAME can only be used if there is no chance of a
+ * segment register being changed (e.g. by procfs), which is why syscalls
+ * have to use doreti.
+ */
+#define POP_FRAME                                                      \
+       popl    %fs ;                                                   \
+       popl    %es ;                                                   \
+       popl    %ds ;                                                   \
+       popal ;                                                         \
+       addl    $2*4,%esp ;     /* dummy trap & error codes */          \
+
+#define POP_DUMMY                                                      \
+       addl    $16*4,%esp ;                                            \
+
+#define MASK_IRQ(icu, irq_num)                                         \
+       movb    imen + IRQ_BYTE(irq_num),%al ;                          \
+       orb     $IRQ_BIT(irq_num),%al ;                                 \
+       movb    %al,imen + IRQ_BYTE(irq_num) ;                          \
+       outb    %al,$icu+ICU_IMR_OFFSET ;                               \
+
+#define UNMASK_IRQ(icu, irq_num)                                       \
+       movb    imen + IRQ_BYTE(irq_num),%al ;                          \
+       andb    $~IRQ_BIT(irq_num),%al ;                                \
+       movb    %al,imen + IRQ_BYTE(irq_num) ;                          \
+       outb    %al,$icu+ICU_IMR_OFFSET ;                               \
+       
+/*
+ * Fast interrupt call handlers run in the following sequence:
+ *
+ *     - Push the trap frame required by doreti.
+ *     - Mask the interrupt and reenable its source.
+ *     - If we cannot take the interrupt set its fpending bit and
+ *       doreti.
+ *     - If we can take the interrupt clear its fpending bit,
+ *       call the handler, then unmask the interrupt and doreti.
+ *
+ *     YYY can cache gd base pointer instead of using hidden %fs
+ *     prefixes.
+ */
+
+#define        FAST_INTR(irq_num, vec_name, icu, enable_icus)                  \
        .text ;                                                         \
        SUPERALIGN_TEXT ;                                               \
 IDTVEC(vec_name) ;                                                     \
-       pushl   %eax ;          /* save only call-used registers */     \
-       pushl   %ecx ;                                                  \
-       pushl   %edx ;                                                  \
-       pushl   %ds ;                                                   \
-       MAYBE_PUSHL_ES ;                                                \
-       mov     $KDSEL,%ax ;                                            \
-       mov     %ax,%ds ;                                               \
-       MAYBE_MOVW_AX_ES ;                                              \
-       FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ;                      \
-       pushl   _intr_unit + (irq_num) * 4 ;                            \
-       call    *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
-       enable_icus ;           /* (re)enable ASAP (helps edge trigger?) */ \
-       addl    $4,%esp ;                                               \
-       incl    _cnt+V_INTR ;   /* book-keeping can wait */             \
-       movl    _intr_countp + (irq_num) * 4,%eax ;                     \
-       incl    (%eax) ;                                                \
-       movl    _curthread, %ecx ; /* are we in a critical section? */  \
-       cmpl    $TDPRI_CRIT,TD_PRI(%ecx) ;                              \
+       PUSH_FRAME ;                                                    \
+       FAKE_MCOUNT(13*4(%esp)) ;                                       \
+       MASK_IRQ(icu, irq_num) ;                                        \
+       enable_icus ;                                                   \
+       incl    _intr_nesting_level ;                                   \
+       movl    _curthread,%ebx ;                                       \
+       movl    TD_CPL(%ebx),%eax ;     /* save the cpl for doreti */   \
+       pushl   %eax ;                                                  \
+       cmpl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
        jge     1f ;                                                    \
-       movl    TD_MACH+MTD_CPL(%ecx),%eax ; /* unmasking pending ints? */ \
-       notl    %eax ;                                                  \
-       andl    _ipending,%eax ;                                        \
-       jne     2f ;            /* yes, maybe handle them */            \
-1: ;                                                                   \
-       MEXITCOUNT ;                                                    \
-       MAYBE_POPL_ES ;                                                 \
-       popl    %ds ;                                                   \
-       popl    %edx ;                                                  \
-       popl    %ecx ;                                                  \
-       popl    %eax ;                                                  \
-       iret ;                                                          \
-;                                                                      \
-       ALIGN_TEXT ;                                                    \
-2: ;                                                                   \
-       cmpb    $3,_intr_nesting_level ;        /* is there enough stack? */ \
-       jae     1b ;            /* no, return */                        \
-       movl    TD_MACH+MTD_CPL(%ecx),%eax ;                            \
-       /* XXX next line is probably unnecessary now. */                \
-       movl    $HWI_MASK|SWI_MASK,TD_MACH+MTD_CPL(%ecx) ; /* limit nesting ... */ \
-       incb    _intr_nesting_level ;   /* ... really limit it ... */   \
-       sti ;                   /* ... to do this as early as possible */ \
-       MAYBE_POPL_ES ;         /* discard most of thin frame ... */    \
-       popl    %ecx ;          /* ... original %ds ... */              \
-       popl    %edx ;                                                  \
-       xchgl   %eax,4(%esp) ;  /* orig %eax; save cpl */               \
-       pushal ;                /* build fat frame (grrr) ... */        \
-       pushl   %ecx ;          /* ... actually %ds ... */              \
-       pushl   %es ;                                                   \
-       pushl   %fs ;                                                   \
-       mov     $KDSEL,%ax ;                                            \
-       mov     %ax,%es ;                                               \
-       mov     $KPSEL,%ax ;                                            \
-       mov     %ax,%fs ;                                               \
-       movl    (3+8+0)*4(%esp),%ecx ;  /* ... %ecx from thin frame ... */ \
-       movl    %ecx,(3+6)*4(%esp) ;    /* ... to fat frame ... */      \
-       movl    (3+8+1)*4(%esp),%eax ;  /* ... cpl from thin frame */   \
-       pushl   %eax ;                                                  \
-       subl    $4,%esp ;       /* junk for unit number */              \
-       MEXITCOUNT ;                                                    \
-       jmp     _doreti
+       testl   $IRQ_LBIT(irq_num), %eax ;                              \
+       jz      2f ;                                                    \
+1: ;                                                                   \
+       /* set pending bit and return, leave interrupt masked */        \
+       orl     $IRQ_LBIT(irq_num),_fpending ;                          \
+       movl    $TDPRI_CRIT,_reqpri ;                                   \
+       jmp     5f ;                                                    \
+2: ;                                                                   \
+       /* clear pending bit, run handler */                            \
+       andl    $~IRQ_LBIT(irq_num),_fpending ;                         \
+       pushl   intr_unit + (irq_num) * 4 ;                             \
+       call    *intr_handler + (irq_num) * 4 ;                         \
+       addl    $4,%esp ;                                               \
+       incl    _cnt+V_INTR ; /* book-keeping YYY make per-cpu */       \
+       movl    intr_countp + (irq_num) * 4,%eax ;                      \
+       incl    (%eax) ;                                                \
+       UNMASK_IRQ(icu, irq_num) ;                                      \
+5: ;                                                                   \
+       MEXITCOUNT ;                                                    \
+       jmp     doreti ;                                                \
+
+/*
+ * Restart fast interrupt held up by critical section or cpl.
+ *
+ *     - Push a dummy trap frame as required by doreti.
+ *     - The interrupt source is already masked.
+ *     - Clear the fpending bit
+ *     - Run the handler
+ *     - Unmask the interrupt
+ *     - Pop the dummy frame and do a normal return
+ *
+ *     YYY can cache gd base pointer instead of using hidden %fs
+ *     prefixes.
+ */
+#define FAST_UNPEND(irq_num, vec_name, icu)                            \
+       .text ;                                                         \
+       SUPERALIGN_TEXT ;                                               \
+IDTVEC(vec_name) ;                                                     \
+       incl    _intr_nesting_level ;                                   \
+       pushl %ebp ;     /* frame for ddb backtrace */                  \
+       movl    %esp, %ebp ;                                            \
+       PUSH_DUMMY ;                                                    \
+       andl    $~IRQ_LBIT(irq_num),_fpending ;                         \
+       pushl   intr_unit + (irq_num) * 4 ;                             \
+       call    *intr_handler + (irq_num) * 4 ;                         \
+       addl    $4, %esp ;                                              \
+       incl    _cnt+V_INTR ;                                           \
+       movl    intr_countp + (irq_num) * 4, %eax ;                     \
+       incl    (%eax) ;                                                \
+       UNMASK_IRQ(icu, irq_num) ;                                      \
+       POP_DUMMY ;                                                     \
+       popl %ebp ;                                                     \
+       decl    _intr_nesting_level ;                                   \
+       ret ;                                                           \
+
+/*
+ * Slow interrupt call handlers run in the following sequence:
+ *
+ *     - Push the trap frame required by doreti.
+ *     - Mask the interrupt and reenable its source.
+ *     - If we cannot take the interrupt set its ipending bit and
+ *       doreti.  In addition to checking for a critical section
+ *       and cpl mask we also check to see if the thread is still
+ *       running.
+ *     - If we can take the interrupt clear its ipending bit,
+ *       set its irunning bit, and schedule its thread.  Leave
+ *       interrupts masked and doreti.
+ *
+ *     The interrupt thread will run its handlers and loop if 
+ *     ipending is found to be set.  ipending/irunning interlock
+ *     the interrupt thread with the interrupt.  The handler calls
+ *     UNPEND when it is through.
+ *
+ *     Note that we do not enable interrupts when calling sched_ithd.
+ *     YYY sched_ithd may preempt us synchronously (fix interrupt stacking)
+ *
+ *     YYY can cache gd base pointer instead of using hidden %fs
+ *     prefixes.
+ */
 
 #define        INTR(irq_num, vec_name, icu, enable_icus, reg, maybe_extra_ipending) \
        .text ;                                                         \
        SUPERALIGN_TEXT ;                                               \
 IDTVEC(vec_name) ;                                                     \
-       pushl   $0 ;            /* dummy error code */                  \
-       pushl   $0 ;            /* dummy trap type */                   \
-       pushal ;                                                        \
-       pushl   %ds ;           /* save our data and extra segments ... */ \
-       pushl   %es ;                                                   \
-       pushl   %fs ;                                                   \
-       mov     $KDSEL,%ax ;    /* ... and reload with kernel's own ... */ \
-       mov     %ax,%ds ;       /* ... early for obsolete reasons */    \
-       mov     %ax,%es ;                                               \
-       mov     $KPSEL,%ax ;                                            \
-       mov     %ax,%fs ;                                               \
+       PUSH_FRAME ;                                                    \
+       FAKE_MCOUNT(13*4(%esp)) ;                                       \
        maybe_extra_ipending ;                                          \
-       movb    _imen + IRQ_BYTE(irq_num),%al ;                         \
-       orb     $IRQ_BIT(irq_num),%al ;                                 \
-       movb    %al,_imen + IRQ_BYTE(irq_num) ;                         \
-       outb    %al,$icu+ICU_IMR_OFFSET ;                               \
-       enable_icus ;                                                   \
-       movl    _curthread, %ebx ; /* are we in a critical section? */  \
+       MASK_IRQ(icu, irq_num) ;                                        \
+       enable_icus ;                                                   \
+       incl    _intr_nesting_level ;                                   \
+       movl    _curthread,%ebx ;                                       \
+       movl    TD_CPL(%ebx), %eax ;                                    \
+       pushl   %eax ;          /* push CPL for doreti */               \
        cmpl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
-       jge     2f ;                                                    \
-       movl    TD_MACH+MTD_CPL(%ebx),%eax ; /* is this interrupt masked by the cpl? */ \
-       testb   $IRQ_BIT(irq_num),%reg ;                                \
-       jne     2f ;                                                    \
-       incb    _intr_nesting_level ;                                   \
-__CONCAT(Xresume,irq_num): ;                                           \
-       FAKE_MCOUNT(13*4(%esp)) ;       /* XXX late to avoid double count */ \
-       incl    _cnt+V_INTR ;   /* tally interrupts */                  \
-       movl    _intr_countp + (irq_num) * 4,%eax ;                     \
-       incl    (%eax) ;                                                \
-       movl    TD_MACH+MTD_CPL(%ebx),%eax ;                            \
-       pushl   %eax ;                                                  \
-       pushl   _intr_unit + (irq_num) * 4 ;                            \
-       orl     _intr_mask + (irq_num) * 4,%eax ;                       \
-       movl    %eax,TD_MACH+MTD_CPL(%ebx) ;                            \
-       sti ;                                                           \
-       call    *_intr_handler + (irq_num) * 4 ;                        \
-       cli ;                   /* must unmask _imen and icu atomically */ \
-       movb    _imen + IRQ_BYTE(irq_num),%al ;                         \
-       andb    $~IRQ_BIT(irq_num),%al ;                                \
-       movb    %al,_imen + IRQ_BYTE(irq_num) ;                         \
-       outb    %al,$icu+ICU_IMR_OFFSET ;                               \
-       sti ;                   /* XXX _doreti repeats the cli/sti */   \
-       MEXITCOUNT ;                                                    \
-       /* We could usually avoid the following jmp by inlining some of */ \
-       /* _doreti, but it's probably better to use less cache. */      \
-       jmp     _doreti ;                                               \
-;                                                                      \
-       ALIGN_TEXT ;                                                    \
-2: ;                                                                   \
-       /* XXX skip mcounting here to avoid double count */             \
-       orb     $IRQ_BIT(irq_num),_ipending + IRQ_BYTE(irq_num) ;       \
-       movl    $TDPRI_CRIT,_reqpri ;                                   \
-       popl    %fs ;                                                   \
-       popl    %es ;                                                   \
-       popl    %ds ;                                                   \
-       popal ;                                                         \
-       addl    $4+4,%esp ;                                             \
-       iret
+       jge     1f ;                                                    \
+       testl   $IRQ_LBIT(irq_num),_irunning ;                          \
+       jnz     1f ;                                                    \
+       testl   $IRQ_LBIT(irq_num), %eax ;                              \
+       jz      2f ;                                                    \
+1: ;                                                                   \
+       /* set the pending bit and return, leave interrupt masked */    \
+       orl     $IRQ_LBIT(irq_num),_ipending ;                          \
+       movl    $TDPRI_CRIT,_reqpri ;                                   \
+       jmp     5f ;                                                    \
+2: ;                                                                   \
+       /* set running bit, clear pending bit, run handler */           \
+       orl     $IRQ_LBIT(irq_num),_irunning ;                          \
+       andl    $~IRQ_LBIT(irq_num),_ipending ;                         \
+       pushl   $irq_num ;                                              \
+       call    _sched_ithd ;                                           \
+       addl    $4,%esp ;                                               \
+       incl    _cnt+V_INTR ; /* book-keeping YYY make per-cpu */       \
+       movl    intr_countp + (irq_num) * 4,%eax ;                      \
+       incl    (%eax) ;                                                \
+5: ;                                                                   \
+       MEXITCOUNT ;                                                    \
+       jmp     doreti ;                                                \
+
+/*
+ * Unmask a slow interrupt.  This function is used by interrupt threads
+ * after they have descheduled themselves to reenable interrupts and
+ * possibly cause a reschedule to occur.  The interrupt's irunning bit
+ * is cleared prior to unmasking.
+ */
+
+#define INTR_UNMASK(irq_num, vec_name, icu)                            \
+       .text ;                                                         \
+       SUPERALIGN_TEXT ;                                               \
+IDTVEC(vec_name) ;                                                     \
+       pushl %ebp ;     /* frame for ddb backtrace */                  \
+       movl    %esp, %ebp ;                                            \
+       andl    $~IRQ_LBIT(irq_num),_irunning ;                         \
+       UNMASK_IRQ(icu, irq_num) ;                                      \
+       popl %ebp ;                                                     \
+       ret ;                                                           \
 
 MCOUNT_LABEL(bintr)
-       FAST_INTR(0,fastintr0, ENABLE_ICU1)
-       FAST_INTR(1,fastintr1, ENABLE_ICU1)
-       FAST_INTR(2,fastintr2, ENABLE_ICU1)
-       FAST_INTR(3,fastintr3, ENABLE_ICU1)
-       FAST_INTR(4,fastintr4, ENABLE_ICU1)
-       FAST_INTR(5,fastintr5, ENABLE_ICU1)
-       FAST_INTR(6,fastintr6, ENABLE_ICU1)
-       FAST_INTR(7,fastintr7, ENABLE_ICU1)
-       FAST_INTR(8,fastintr8, ENABLE_ICU1_AND_2)
-       FAST_INTR(9,fastintr9, ENABLE_ICU1_AND_2)
-       FAST_INTR(10,fastintr10, ENABLE_ICU1_AND_2)
-       FAST_INTR(11,fastintr11, ENABLE_ICU1_AND_2)
-       FAST_INTR(12,fastintr12, ENABLE_ICU1_AND_2)
-       FAST_INTR(13,fastintr13, ENABLE_ICU1_AND_2)
-       FAST_INTR(14,fastintr14, ENABLE_ICU1_AND_2)
-       FAST_INTR(15,fastintr15, ENABLE_ICU1_AND_2)
+       FAST_INTR(0,fastintr0, IO_ICU1, ENABLE_ICU1)
+       FAST_INTR(1,fastintr1, IO_ICU1, ENABLE_ICU1)
+       FAST_INTR(2,fastintr2, IO_ICU1, ENABLE_ICU1)
+       FAST_INTR(3,fastintr3, IO_ICU1, ENABLE_ICU1)
+       FAST_INTR(4,fastintr4, IO_ICU1, ENABLE_ICU1)
+       FAST_INTR(5,fastintr5, IO_ICU1, ENABLE_ICU1)
+       FAST_INTR(6,fastintr6, IO_ICU1, ENABLE_ICU1)
+       FAST_INTR(7,fastintr7, IO_ICU1, ENABLE_ICU1)
+       FAST_INTR(8,fastintr8, IO_ICU2, ENABLE_ICU1_AND_2)
+       FAST_INTR(9,fastintr9, IO_ICU2, ENABLE_ICU1_AND_2)
+       FAST_INTR(10,fastintr10, IO_ICU2, ENABLE_ICU1_AND_2)
+       FAST_INTR(11,fastintr11, IO_ICU2, ENABLE_ICU1_AND_2)
+       FAST_INTR(12,fastintr12, IO_ICU2, ENABLE_ICU1_AND_2)
+       FAST_INTR(13,fastintr13, IO_ICU2, ENABLE_ICU1_AND_2)
+       FAST_INTR(14,fastintr14, IO_ICU2, ENABLE_ICU1_AND_2)
+       FAST_INTR(15,fastintr15, IO_ICU2, ENABLE_ICU1_AND_2)
+
 #define        CLKINTR_PENDING movl $1,CNAME(clkintr_pending)
        INTR(0,intr0, IO_ICU1, ENABLE_ICU1, al, CLKINTR_PENDING)
        INTR(1,intr1, IO_ICU1, ENABLE_ICU1, al,)
@@ -208,23 +296,25 @@ MCOUNT_LABEL(bintr)
        INTR(13,intr13, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
        INTR(14,intr14, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
        INTR(15,intr15, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
+
+       FAST_UNPEND(0,fastunpend0, IO_ICU1)
+       FAST_UNPEND(1,fastunpend1, IO_ICU1)
+       FAST_UNPEND(2,fastunpend2, IO_ICU1)
+       FAST_UNPEND(3,fastunpend3, IO_ICU1)
+       FAST_UNPEND(4,fastunpend4, IO_ICU1)
+       FAST_UNPEND(5,fastunpend5, IO_ICU1)
+       FAST_UNPEND(6,fastunpend6, IO_ICU1)
+       FAST_UNPEND(7,fastunpend7, IO_ICU1)
+       FAST_UNPEND(8,fastunpend8, IO_ICU2)
+       FAST_UNPEND(9,fastunpend9, IO_ICU2)
+       FAST_UNPEND(10,fastunpend10, IO_ICU2)
+       FAST_UNPEND(11,fastunpend11, IO_ICU2)
+       FAST_UNPEND(12,fastunpend12, IO_ICU2)
+       FAST_UNPEND(13,fastunpend13, IO_ICU2)
+       FAST_UNPEND(14,fastunpend14, IO_ICU2)
+       FAST_UNPEND(15,fastunpend15, IO_ICU2)
 MCOUNT_LABEL(eintr)
 
        .data
-       .globl  _ihandlers
-_ihandlers:                    /* addresses of interrupt handlers */
-                               /* actually resumption addresses for HWI's */
-       .long   Xresume0, Xresume1, Xresume2, Xresume3 
-       .long   Xresume4, Xresume5, Xresume6, Xresume7
-       .long   Xresume8, Xresume9, Xresume10, Xresume11
-       .long   Xresume12, Xresume13, Xresume14, Xresume15 
-       .long   _swi_null, swi_net, _swi_null, _swi_null
-       .long   _swi_vm, _swi_null, _softclock
-
-imasks:                                /* masks for interrupt handlers */
-       .space  NHWI*4          /* padding; HWI masks are elsewhere */
-
-       .long   SWI_TTY_MASK, SWI_NET_MASK, SWI_CAMNET_MASK, SWI_CAMBIO_MASK
-       .long   SWI_VM_MASK, SWI_TQ_MASK, SWI_CLOCK_MASK
 
        .text
index c58f7d6..6f89c2a 100644 (file)
@@ -24,7 +24,7 @@
  * SUCH DAMAGE.
  *
  * $FreeBSD: src/sys/i386/include/asnames.h,v 1.44.2.8 2003/01/22 20:14:53 jhb Exp $
- * $DragonFly: src/sys/platform/pc32/include/Attic/asnames.h,v 1.10 2003/06/28 04:16:03 dillon Exp $
+ * $DragonFly: src/sys/platform/pc32/include/Attic/asnames.h,v 1.11 2003/06/29 03:28:43 dillon Exp $
  */
 
 #ifndef _MACHINE_ASNAMES_H_
 #define _Xdna                          Xdna
 #define _Xfastintr0                    Xfastintr0
 #define _Xfastintr1                    Xfastintr1
+#define _Xfastintr2                    Xfastintr2
+#define _Xfastintr3                    Xfastintr3
+#define _Xfastintr4                    Xfastintr4
+#define _Xfastintr5                    Xfastintr5
+#define _Xfastintr6                    Xfastintr6
+#define _Xfastintr7                    Xfastintr7
+#define _Xfastintr8                    Xfastintr8
+#define _Xfastintr9                    Xfastintr9
 #define _Xfastintr10                   Xfastintr10
 #define _Xfastintr11                   Xfastintr11
 #define _Xfastintr12                   Xfastintr12
 #define _Xfastintr17                   Xfastintr17
 #define _Xfastintr18                   Xfastintr18
 #define _Xfastintr19                   Xfastintr19
-#define _Xfastintr2                    Xfastintr2
 #define _Xfastintr20                   Xfastintr20
 #define _Xfastintr21                   Xfastintr21
 #define _Xfastintr22                   Xfastintr22
 #define _Xfastintr23                   Xfastintr23
-#define _Xfastintr3                    Xfastintr3
-#define _Xfastintr4                    Xfastintr4
-#define _Xfastintr5                    Xfastintr5
-#define _Xfastintr6                    Xfastintr6
-#define _Xfastintr7                    Xfastintr7
-#define _Xfastintr8                    Xfastintr8
-#define _Xfastintr9                    Xfastintr9
+#define _Xfastunpend0                  Xfastunpend0
+#define _Xfastunpend1                  Xfastunpend1
+#define _Xfastunpend2                  Xfastunpend2
+#define _Xfastunpend3                  Xfastunpend3
+#define _Xfastunpend4                  Xfastunpend4
+#define _Xfastunpend5                  Xfastunpend5
+#define _Xfastunpend6                  Xfastunpend6
+#define _Xfastunpend7                  Xfastunpend7
+#define _Xfastunpend8                  Xfastunpend8
+#define _Xfastunpend9                  Xfastunpend9
+#define _Xfastunpend10                 Xfastunpend10
+#define _Xfastunpend11                 Xfastunpend11
+#define _Xfastunpend12                 Xfastunpend12
+#define _Xfastunpend13                 Xfastunpend13
+#define _Xfastunpend14                 Xfastunpend14
+#define _Xfastunpend15                 Xfastunpend15
+#define _Xfastunpend16                 Xfastunpend16
+#define _Xfastunpend17                 Xfastunpend17
+#define _Xfastunpend18                 Xfastunpend18
+#define _Xfastunpend19                 Xfastunpend19
+#define _Xfastunpend20                 Xfastunpend20
+#define _Xfastunpend21                 Xfastunpend21
+#define _Xfastunpend22                 Xfastunpend22
+#define _Xfastunpend23                 Xfastunpend23
 #define _Xforward_irq                  Xforward_irq
 #define _Xfpu                          Xfpu
 #define _Xfpusegm                      Xfpusegm
 #define _etext                         etext
 #define _exception                     exception
 #define _fast_intr_lock                        fast_intr_lock
+#define _fastunpend                    fastunpend
 #define _fastmove                      fastmove
 #define _gdt                           gdt
 #define _generic_bcopy                 generic_bcopy
 #define _intr_countp                   intr_countp
 #define _intr_handler                  intr_handler
 #define _intr_mask                     intr_mask
-#define _intr_nesting_level            intr_nesting_level
 #define _intr_unit                     intr_unit
 #define _intrcnt                       intrcnt
 #define _intrnames                     intrnames
 #define _invltlb_ok                    invltlb_ok
 #define _ioapic                                ioapic
-#define _ipending                      ipending
 #define _isr_lock                      isr_lock
 #define _kernbase                      kernbase
 #define _kernelname                    kernelname
 #define _mul64                         mul64
 #define _net_imask                     net_imask
 #define _netisr                                netisr
-#define _netisrs                       netisrs
 #define _nfs_diskless                  nfs_diskless
 #define _nfs_diskless_valid            nfs_diskless_valid
 #define _normalize                     normalize
 #define _round_reg                     round_reg
 #define _s_lock                                s_lock
 #define _s_unlock                      s_unlock
+#define _sched_ithd                    sched_ithd
 #define _set_precision_flag_down       set_precision_flag_down
 #define _set_precision_flag_up         set_precision_flag_up
 #define _set_user_ldt                  set_user_ldt
 
 #define        FS(x)   %fs:gd_ ## x
 
+#define _fpending                      FS(fpending)
+#define _ipending                      FS(ipending)
+#define _irunning                      FS(irunning)
 #define _common_tss                    FS(common_tss)
 #define _common_tssd                   FS(common_tssd)
 #define _cpuid       &nbs