MP Implementation 1/2: Get the APIC code working again, sweetly integrate the
authorMatthew Dillon <dillon@dragonflybsd.org>
Sun, 6 Jul 2003 21:23:56 +0000 (21:23 +0000)
committerMatthew Dillon <dillon@dragonflybsd.org>
Sun, 6 Jul 2003 21:23:56 +0000 (21:23 +0000)
MP lock into the LWKT scheduler, replace the old simplelock code with
tokens or spin locks as appropriate.  In particular, the vnode interlock
(and most other interlocks) are now tokens.  Also clean up a few curproc/cred
sequences that are no longer needed.

The APs are left in degenerate state with non IPI interrupts disabled as
additional LWKT work must be done before we can really make use of them,
and FAST interrupts are not managed by the MP lock yet.  The main thing
for this stage was to get the system working with an APIC again.

buildworld tested on UP and 2xCPU/MP (Dell 2550)

148 files changed:
sys/bus/isa/i386/isa_dma.c
sys/conf/files.alpha
sys/conf/files.i386
sys/conf/files.pc98
sys/cpu/i386/include/cpufunc.h
sys/dev/drm/drm_dma.h
sys/dev/drm/drm_os_freebsd.h
sys/dev/raid/aac/aac.c
sys/dev/raid/aac/aacvar.h
sys/dev/serial/sio/sio.c
sys/dev/sound/isa/i386/spkr/spkr.c
sys/i386/apic/apic_ipl.s
sys/i386/apic/apic_vector.s
sys/i386/apic/mpapic.c
sys/i386/i386/autoconf.c
sys/i386/i386/db_interface.c
sys/i386/i386/exception.s
sys/i386/i386/genassym.c
sys/i386/i386/i686_mem.c
sys/i386/i386/identcpu.c
sys/i386/i386/initcpu.c
sys/i386/i386/k6_mem.c
sys/i386/i386/machdep.c
sys/i386/i386/mp_machdep.c
sys/i386/i386/mpapic.c
sys/i386/i386/mpboot.s
sys/i386/i386/mplock.s
sys/i386/i386/perfmon.c
sys/i386/i386/pmap.c
sys/i386/i386/simplelock.s [deleted file]
sys/i386/i386/spinlock.s [new file with mode: 0644]
sys/i386/i386/swtch.s
sys/i386/i386/trap.c
sys/i386/i386/vm86.c
sys/i386/i386/vm86bios.s
sys/i386/include/apic.h
sys/i386/include/cpufunc.h
sys/i386/include/lock.h
sys/i386/include/smp.h
sys/i386/include/smptests.h
sys/i386/isa/apic_ipl.s
sys/i386/isa/apic_vector.s
sys/i386/isa/clock.c
sys/i386/isa/intr_machdep.c
sys/i386/isa/intr_machdep.h
sys/i386/isa/npx.c
sys/kern/imgact_elf.c
sys/kern/init_main.c
sys/kern/kern_exit.c
sys/kern/kern_lock.c
sys/kern/kern_synch.c
sys/kern/lwkt_thread.c
sys/kern/subr_bus.c
sys/kern/subr_prf.c
sys/kern/subr_rman.c
sys/kern/uipc_socket.c
sys/kern/vfs_aio.c
sys/kern/vfs_bio.c
sys/kern/vfs_conf.c
sys/kern/vfs_default.c
sys/kern/vfs_subr.c
sys/kern/vfs_syscalls.c
sys/kern/vfs_vnops.c
sys/netinet/in_pcb.c
sys/netproto/smb/smb_iod.c
sys/netproto/smb/smb_rq.c
sys/netproto/smb/smb_subr.c
sys/netproto/smb/smb_subr.h
sys/opencrypto/crypto.c
sys/platform/pc32/apic/apic_ipl.s
sys/platform/pc32/apic/apic_vector.s
sys/platform/pc32/apic/mpapic.c
sys/platform/pc32/i386/autoconf.c
sys/platform/pc32/i386/db_interface.c
sys/platform/pc32/i386/exception.s
sys/platform/pc32/i386/genassym.c
sys/platform/pc32/i386/i686_mem.c
sys/platform/pc32/i386/identcpu.c
sys/platform/pc32/i386/initcpu.c
sys/platform/pc32/i386/k6_mem.c
sys/platform/pc32/i386/machdep.c
sys/platform/pc32/i386/mp_machdep.c
sys/platform/pc32/i386/mpapic.c
sys/platform/pc32/i386/mpboot.s
sys/platform/pc32/i386/mplock.s
sys/platform/pc32/i386/perfmon.c
sys/platform/pc32/i386/pmap.c
sys/platform/pc32/i386/simplelock.s [deleted file]
sys/platform/pc32/i386/spinlock.s [new file with mode: 0644]
sys/platform/pc32/i386/swtch.s
sys/platform/pc32/i386/trap.c
sys/platform/pc32/i386/vm86.c
sys/platform/pc32/i386/vm86bios.s
sys/platform/pc32/include/apic.h
sys/platform/pc32/include/lock.h
sys/platform/pc32/include/smp.h
sys/platform/pc32/include/smptests.h
sys/platform/pc32/isa/apic_ipl.s
sys/platform/pc32/isa/apic_vector.s
sys/platform/pc32/isa/clock.c
sys/platform/pc32/isa/intr_machdep.c
sys/platform/pc32/isa/intr_machdep.h
sys/platform/pc32/isa/npx.c
sys/platform/vkernel/i386/genassym.c
sys/sys/buf.h
sys/sys/buf2.h
sys/sys/lock.h
sys/sys/mount.h
sys/sys/proc.h
sys/sys/rman.h
sys/sys/signalvar.h
sys/sys/thread.h
sys/sys/thread2.h
sys/sys/vmmeter.h
sys/sys/vnode.h
sys/vfs/deadfs/dead_vnops.c
sys/vfs/gnu/ext2fs/ext2_vfsops.c
sys/vfs/hpfs/hpfs.h
sys/vfs/hpfs/hpfs_hash.c
sys/vfs/hpfs/hpfs_vfsops.c
sys/vfs/hpfs/hpfs_vnops.c
sys/vfs/isofs/cd9660/cd9660_node.c
sys/vfs/mfs/mfs_vfsops.c
sys/vfs/msdosfs/msdosfs_denode.c
sys/vfs/msdosfs/msdosfs_vfsops.c
sys/vfs/msdosfs/msdosfs_vnops.c
sys/vfs/nfs/nfs_nqlease.c
sys/vfs/ntfs/ntfs_ihash.c
sys/vfs/ntfs/ntfs_inode.h
sys/vfs/ntfs/ntfs_subr.c
sys/vfs/ntfs/ntfs_vfsops.c
sys/vfs/nullfs/null_vnops.c
sys/vfs/nwfs/nwfs_node.c
sys/vfs/nwfs/nwfs_vnops.c
sys/vfs/smbfs/smbfs.h
sys/vfs/ufs/ffs_vfsops.c
sys/vfs/ufs/ufs_ihash.c
sys/vfs/ufs/ufs_inode.c
sys/vfs/ufs/ufs_lookup.c
sys/vfs/ufs/ufs_quota.c
sys/vfs/ufs/ufs_readwrite.c
sys/vfs/ufs/ufs_vnops.c
sys/vm/vm_map.c
sys/vm/vm_map.h
sys/vm/vm_object.c
sys/vm/vm_zone.c
sys/vm/vm_zone.h
sys/vm/vnode_pager.c

index bc0bacc..cba7a22 100644 (file)
@@ -35,7 +35,7 @@
  *
  *     from: @(#)isa.c 7.2 (Berkeley) 5/13/91
  * $FreeBSD: src/sys/i386/isa/isa_dma.c,v 1.4.2.1 2000/08/08 19:49:53 peter Exp $
- * $DragonFly: src/sys/bus/isa/i386/isa_dma.c,v 1.2 2003/06/17 04:28:37 dillon Exp $
+ * $DragonFly: src/sys/bus/isa/i386/isa_dma.c,v 1.3 2003/07/06 21:23:49 dillon Exp $
  */
 
 /*
@@ -448,14 +448,14 @@ isa_dmastatus(int chan)
                waport = DMA2_CHN(chan - 4) + 2;
        }
 
-       disable_intr();                 /* no interrupts Mr Jones! */
+       cpu_disable_intr();             /* YYY *//* no interrupts Mr Jones! */
        outb(ffport, 0);                /* clear register LSB flipflop */
        low1 = inb(waport);
        high1 = inb(waport);
        outb(ffport, 0);                /* clear again */
        low2 = inb(waport);
        high2 = inb(waport);
-       enable_intr();                  /* enable interrupts again */
+       cpu_enable_intr();              /* enable interrupts again */
 
        /* 
         * Now decide if a wrap has tried to skew our results.
index cba0f5e..28c493e 100644 (file)
@@ -2,7 +2,7 @@
 # files marked standard are always included.
 #
 # $FreeBSD: src/sys/conf/files.alpha,v 1.43.2.9 2002/11/21 23:45:37 sam Exp $
-# $DragonFly: src/sys/conf/Attic/files.alpha,v 1.2 2003/06/17 04:28:19 dillon Exp $
+# $DragonFly: src/sys/conf/Attic/files.alpha,v 1.3 2003/07/06 21:23:45 dillon Exp $
 #
 # The long compile-with and dependency lines are required because of
 # limitations in config: backslash-newline doesn't work in strings, and
@@ -68,7 +68,7 @@ alpha/alpha/perfmon.c         optional        perfmon profiling-routine
 alpha/alpha/perfmon.c          optional        perfmon
 alpha/alpha/pmap.c             standard
 alpha/alpha/procfs_machdep.c   standard
-alpha/alpha/simplelock.s       optional        smp
+alpha/alpha/spinlock.s         standard
 alpha/alpha/support.s          standard
 alpha/alpha/swtch.s            standard
 alpha/alpha/sys_machdep.c      standard
index 585a061..59db079 100644 (file)
@@ -2,7 +2,7 @@
 # files marked standard are always included.
 #
 # $FreeBSD: src/sys/conf/files.i386,v 1.307.2.38 2003/01/02 20:41:33 kan Exp $
-# $DragonFly: src/sys/conf/Attic/files.i386,v 1.2 2003/06/17 04:28:19 dillon Exp $
+# $DragonFly: src/sys/conf/Attic/files.i386,v 1.3 2003/07/06 21:23:45 dillon Exp $
 #
 # The long compile-with and dependency lines are required because of
 # limitations in config: backslash-newline doesn't work in strings, and
@@ -206,7 +206,7 @@ i386/i386/perfmon.c         optional        perfmon
 i386/i386/perfmon.c            optional        perfmon profiling-routine
 i386/i386/pmap.c               standard
 i386/i386/procfs_machdep.c     standard
-i386/i386/simplelock.s         optional        smp
+i386/i386/spinlock.s           standard
 i386/i386/support.s            standard
 i386/i386/swtch.s              standard
 i386/i386/sys_machdep.c                standard
index 839fd0d..1b54149 100644 (file)
@@ -4,7 +4,7 @@
 # modified for PC-9801
 #
 # $FreeBSD: src/sys/conf/files.pc98,v 1.140.2.44 2003/02/10 13:11:50 nyan Exp $
-# $DragonFly: src/sys/conf/Attic/files.pc98,v 1.2 2003/06/17 04:28:20 dillon Exp $
+# $DragonFly: src/sys/conf/Attic/files.pc98,v 1.3 2003/07/06 21:23:45 dillon Exp $
 #
 # The long compile-with and dependency lines are required because of
 # limitations in config: backslash-newline doesn't work in strings, and
@@ -195,7 +195,7 @@ i386/i386/perfmon.c         optional        perfmon
 i386/i386/perfmon.c            optional        perfmon profiling-routine
 i386/i386/pmap.c               standard
 i386/i386/procfs_machdep.c     standard
-i386/i386/simplelock.s         optional        smp
+i386/i386/spinlock.s           standard
 i386/i386/support.s            standard
 i386/i386/swtch.s              standard
 i386/i386/sys_machdep.c                standard
index c630ea9..1abf5fc 100644 (file)
@@ -31,7 +31,7 @@
  * SUCH DAMAGE.
  *
  * $FreeBSD: src/sys/i386/include/cpufunc.h,v 1.96.2.3 2002/04/28 22:50:54 dwmalone Exp $
- * $DragonFly: src/sys/cpu/i386/include/cpufunc.h,v 1.4 2003/06/29 03:28:43 dillon Exp $
+ * $DragonFly: src/sys/cpu/i386/include/cpufunc.h,v 1.5 2003/07/06 21:23:49 dillon Exp $
  */
 
 /*
@@ -122,15 +122,6 @@ btrl(u_int *mask, int bit)
        return(result);
 }
 
-static __inline void
-disable_intr(void)
-{
-       __asm __volatile("cli" : : : "memory");
-#ifdef SMP
-       MPINTR_LOCK();
-#endif
-}
-
 static __inline void
 do_cpuid(u_int ax, u_int *p)
 {
@@ -140,11 +131,14 @@ do_cpuid(u_int ax, u_int *p)
 }
 
 static __inline void
-enable_intr(void)
+cpu_disable_intr(void)
+{
+       __asm __volatile("cli" : : : "memory");
+}
+
+static __inline void
+cpu_enable_intr(void)
 {
-#ifdef SMP
-       MPINTR_UNLOCK();
-#endif
        __asm __volatile("sti");
 }
 
@@ -286,7 +280,9 @@ invd(void)
  * will cause the invl*() functions to be equivalent to the cpu_invl*()
  * functions.
  */
-#ifndef SMP
+#ifdef SMP
+void smp_invltlb(void);
+#else
 #define smp_invltlb()
 #endif
 
@@ -630,9 +626,9 @@ load_dr7(u_int sel)
 int    breakpoint      __P((void));
 u_int  bsfl            __P((u_int mask));
 u_int  bsrl            __P((u_int mask));
-void   disable_intr    __P((void));
+void   cpu_disable_intr __P((void));
 void   do_cpuid        __P((u_int ax, u_int *p));
-void   enable_intr     __P((void));
+void   cpu_enable_intr __P((void));
 u_char inb             __P((u_int port));
 u_int  inl             __P((u_int port));
 void   insb            __P((u_int port, void *addr, size_t cnt));
index c058401..f8808b8 100644 (file)
@@ -29,7 +29,7 @@
  *    Gareth Hughes <gareth@valinux.com>
  *
  * $FreeBSD: src/sys/dev/drm/drm_dma.h,v 1.5.2.1 2003/04/26 07:05:28 anholt Exp $
- * $DragonFly: src/sys/dev/drm/Attic/drm_dma.h,v 1.2 2003/06/17 04:28:24 dillon Exp $
+ * $DragonFly: src/sys/dev/drm/Attic/drm_dma.h,v 1.3 2003/07/06 21:23:47 dillon Exp $
  */
 
 #include "dev/drm/drmP.h"
@@ -347,6 +347,7 @@ void DRM(vbl_send_signals)( drm_device_t *dev )
 
        DRM_SPINLOCK(&dev->vbl_lock);
 
+loop:
        vbl_sig = TAILQ_FIRST(&dev->vbl_sig_list);
        while (vbl_sig != NULL) {
                drm_vbl_sig_t *next = TAILQ_NEXT(vbl_sig, link);
@@ -357,7 +358,9 @@ void DRM(vbl_send_signals)( drm_device_t *dev )
                                psignal(p, vbl_sig->signo);
 
                        TAILQ_REMOVE(&dev->vbl_sig_list, vbl_sig, link);
+                       DRM_SPINUNLOCK(&dev->vbl_lock);
                        DRM_FREE(vbl_sig,sizeof(*vbl_sig));
+                       goto loop;
                }
                vbl_sig = next;
        }
index 7a3a9e0..fe95022 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * $FreeBSD: src/sys/dev/drm/drm_os_freebsd.h,v 1.10.2.1 2003/04/26 07:05:28 anholt Exp $
- * $DragonFly: src/sys/dev/drm/Attic/drm_os_freebsd.h,v 1.3 2003/06/25 03:55:47 dillon Exp $
+ * $DragonFly: src/sys/dev/drm/Attic/drm_os_freebsd.h,v 1.4 2003/07/06 21:23:47 dillon Exp $
  */
 #include <sys/param.h>
 #include <sys/queue.h>
 #else
 #define DRM_CURPROC            curproc
 #define DRM_STRUCTPROC         struct proc
-#define DRM_SPINTYPE           struct simplelock
-#define DRM_SPININIT(l,name)   simple_lock_init(&l)
+#define DRM_SPINTYPE           struct lwkt_token
+#define DRM_SPININIT(l,name)   lwkt_inittoken(&l)
 #define DRM_SPINUNINIT(l)
-#define DRM_SPINLOCK(l)                simple_lock(l)
-#define DRM_SPINUNLOCK(u)      simple_unlock(u);
+#define DRM_SPINLOCK(l)                lwkt_gettoken(l)
+#define DRM_SPINUNLOCK(u)      lwkt_reltoken(u);
 #define DRM_CURRENTPID         curproc->p_pid
 #endif
 
index 10ec4bd..2b32cda 100644 (file)
@@ -27,7 +27,7 @@
  * SUCH DAMAGE.
  *
  *     $FreeBSD: src/sys/dev/aac/aac.c,v 1.9.2.14 2003/04/08 13:22:08 scottl Exp $
- *     $DragonFly: src/sys/dev/raid/aac/aac.c,v 1.3 2003/06/27 01:53:21 dillon Exp $
+ *     $DragonFly: src/sys/dev/raid/aac/aac.c,v 1.4 2003/07/06 21:23:47 dillon Exp $
  */
 
 /*
@@ -2681,8 +2681,10 @@ aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib)
                /* On the off chance that someone is sleeping for an aif... */
                if (sc->aac_state & AAC_STATE_AIF_SLEEPER)
                        wakeup(sc->aac_aifq);
+               /* token may have been lost */
                /* Wakeup any poll()ers */
                selwakeup(&sc->rcv_select);
+               /* token may have been lost */
        }
        AAC_LOCK_RELEASE(&sc->aac_aifq_lock);
 
@@ -2770,6 +2772,8 @@ aac_getnext_aif(struct aac_softc *sc, caddr_t arg)
 
 /*
  * Hand the next AIF off the top of the queue out to userspace.
+ *
+ * YYY token could be lost during copyout
  */
 static int
 aac_return_aif(struct aac_softc *sc, caddr_t uptr)
index 1a8afa1..4d7b63a 100644 (file)
@@ -27,7 +27,7 @@
  * SUCH DAMAGE.
  *
  *     $FreeBSD: src/sys/dev/aac/aacvar.h,v 1.4.2.7 2003/04/08 13:22:08 scottl Exp $
- *     $DragonFly: src/sys/dev/raid/aac/aacvar.h,v 1.3 2003/06/23 17:55:28 dillon Exp $
+ *     $DragonFly: src/sys/dev/raid/aac/aacvar.h,v 1.4 2003/07/06 21:23:47 dillon Exp $
  */
 
 /*
@@ -259,10 +259,10 @@ typedef struct mtx aac_lock_t;
 #define AAC_LOCK_ACQUIRE(l)    mtx_lock(l)
 #define AAC_LOCK_RELEASE(l)    mtx_unlock(l)
 #else
-typedef struct simplelock aac_lock_t;
-#define AAC_LOCK_INIT(l, s)    simple_lock_init(l)
-#define AAC_LOCK_ACQUIRE(l)    simple_lock(l)
-#define AAC_LOCK_RELEASE(l)    simple_unlock(l)
+typedef struct lwkt_token aac_lock_t;
+#define AAC_LOCK_INIT(l, s)    lwkt_inittoken(l)
+#define AAC_LOCK_ACQUIRE(l)    lwkt_gettoken(l)
+#define AAC_LOCK_RELEASE(l)    lwkt_reltoken(l)
 #endif
 
 #if __FreeBSD_version >= 500005
index 8f5c84f..0652f6b 100644 (file)
@@ -31,7 +31,7 @@
  * SUCH DAMAGE.
  *
  * $FreeBSD: src/sys/isa/sio.c,v 1.291.2.35 2003/05/18 08:51:15 murray Exp $
- * $DragonFly: src/sys/dev/serial/sio/sio.c,v 1.5 2003/06/29 03:28:44 dillon Exp $
+ * $DragonFly: src/sys/dev/serial/sio/sio.c,v 1.6 2003/07/06 21:23:50 dillon Exp $
  *     from: @(#)com.c 7.5 (Berkeley) 5/16/91
  *     from: i386/isa sio.c,v 1.234
  */
 #endif
 #include <dev/ic/ns16550.h>
 
-#ifndef __i386__
-#define disable_intr()
-#define enable_intr()
-#endif
-
-#ifdef SMP
-#define disable_intr() COM_DISABLE_INTR()
-#define enable_intr()  COM_ENABLE_INTR()
-#endif /* SMP */
-
 #define        LOTS_OF_EVENTS  64      /* helps separate urgent events from input */
 
 #define        CALLOUT_MASK            0x80
@@ -917,7 +907,7 @@ sioprobe(dev, xrid, rclk)
         * but mask them in the processor as well in case there are some
         * (misconfigured) shared interrupts.
         */
-       disable_intr();
+       com_lock();
 /* EXTRA DELAY? */
 
        /*
@@ -1040,7 +1030,7 @@ sioprobe(dev, xrid, rclk)
                }
                sio_setreg(com, com_ier, 0);
                sio_setreg(com, com_cfcr, CFCR_8BITS);
-               enable_intr();
+               com_unlock();
                bus_release_resource(dev, SYS_RES_IOPORT, rid, port);
                return (iobase == siocniobase ? 0 : result);
        }
@@ -1080,7 +1070,7 @@ sioprobe(dev, xrid, rclk)
        irqmap[3] = isa_irq_pending();
        failures[9] = (sio_getreg(com, com_iir) & IIR_IMASK) - IIR_NOPEND;
 
-       enable_intr();
+       com_unlock();
 
        irqs = irqmap[1] & ~irqmap[0];
        if (bus_get_resource(idev, SYS_RES_IRQ, 0, &xirq, NULL) == 0 &&
@@ -1274,7 +1264,7 @@ sioattach(dev, xrid, rclk)
        } else
                com->it_in.c_ispeed = com->it_in.c_ospeed = TTYDEF_SPEED;
        if (siosetwater(com, com->it_in.c_ispeed) != 0) {
-               enable_intr();
+               com_unlock();
                /*
                 * Leave i/o resources allocated if this is a `cn'-level
                 * console, so that other devices can't snarf them.
@@ -1283,7 +1273,7 @@ sioattach(dev, xrid, rclk)
                        bus_release_resource(dev, SYS_RES_IOPORT, rid, port);
                return (ENOMEM);
        }
-       enable_intr();
+       com_unlock();
        termioschars(&com->it_in);
        com->it_out = com->it_in;
 
@@ -1583,7 +1573,7 @@ open_top:
                        }
                }
 
-               disable_intr();
+               com_lock();
                (void) inb(com->line_status_port);
                (void) inb(com->data_port);
                com->prev_modem_status = com->last_modem_status
@@ -1595,7 +1585,7 @@ open_top:
                        outb(com->intr_ctl_port, IER_ERXRDY | IER_ETXRDY
                                                | IER_ERLS | IER_EMSC);
                }
-               enable_intr();
+               com_unlock();
                /*
                 * Handle initial DCD.  Callout devices get a fake initial
                 * DCD (trapdoor DCD).  If we are callout, then any sleeping
@@ -1874,7 +1864,7 @@ sioinput(com)
                 * call overhead).
                 */
                do {
-                       enable_intr();
+                       com_unlock();
                        incc = com->iptr - buf;
                        if (tp->t_rawq.c_cc + incc > tp->t_ihiwat
                            && (com->state & CS_RTS_IFLOW
@@ -1895,11 +1885,11 @@ sioinput(com)
                                tp->t_lflag &= ~FLUSHO;
                                comstart(tp);
                        }
-                       disable_intr();
+                       com_lock();
                } while (buf < com->iptr);
        } else {
                do {
-                       enable_intr();
+                       com_unlock();
                        line_status = buf[com->ierroff];
                        recv_data = *buf++;
                        if (line_status
@@ -1914,7 +1904,7 @@ sioinput(com)
                                        recv_data |= TTY_PE;
                        }
                        (*linesw[tp->t_line].l_rint)(recv_data, tp);
-                       disable_intr();
+                       com_lock();
                } while (buf < com->iptr);
        }
        com_events -= (com->iptr - com->ibuf);
@@ -1935,9 +1925,9 @@ siointr(arg)
        void            *arg;
 {
 #ifndef COM_MULTIPORT
-       COM_LOCK();
+       com_lock();
        siointr1((struct com_s *) arg);
-       COM_UNLOCK();
+       com_unlock();
 #else /* COM_MULTIPORT */
        bool_t          possibly_more_intrs;
        int             unit;
@@ -1950,13 +1940,13 @@ siointr(arg)
         * devices, then the edge from one may be lost because another is
         * on.
         */
-       COM_LOCK();
+       com_lock();
        do {
                possibly_more_intrs = FALSE;
                for (unit = 0; unit < sio_numunits; ++unit) {
                        com = com_addr(unit);
                        /*
-                        * XXX COM_LOCK();
+                        * XXX com_lock();
                         * would it work here, or be counter-productive?
                         */
                        if (com != NULL 
@@ -1966,10 +1956,10 @@ siointr(arg)
                                siointr1(com);
                                possibly_more_intrs = TRUE;
                        }
-                       /* XXX COM_UNLOCK(); */
+                       /* XXX com_unlock(); */
                }
        } while (possibly_more_intrs);
-       COM_UNLOCK();
+       com_unlock();
 #endif /* COM_MULTIPORT */
 }
 
@@ -2366,7 +2356,7 @@ repeat:
                         * Discard any events related to never-opened or
                         * going-away devices.
                         */
-                       disable_intr();
+                       com_lock();
                        incc = com->iptr - com->ibuf;
                        com->iptr = com->ibuf;
                        if (com->state & CS_CHECKMSR) {
@@ -2374,33 +2364,33 @@ repeat:
                                com->state &= ~CS_CHECKMSR;
                        }
                        com_events -= incc;
-                       enable_intr();
+                       com_unlock();
                        continue;
                }
                if (com->iptr != com->ibuf) {
-                       disable_intr();
+                       com_lock();
                        sioinput(com);
-                       enable_intr();
+                       com_unlock();
                }
                if (com->state & CS_CHECKMSR) {
                        u_char  delta_modem_status;
 
-                       disable_intr();
+                       com_lock();
                        delta_modem_status = com->last_modem_status
                                             ^ com->prev_modem_status;
                        com->prev_modem_status = com->last_modem_status;
                        com_events -= LOTS_OF_EVENTS;
                        com->state &= ~CS_CHECKMSR;
-                       enable_intr();
+                       com_unlock();
                        if (delta_modem_status & MSR_DCD)
                                (*linesw[tp->t_line].l_modem)
                                        (tp, com->prev_modem_status & MSR_DCD);
                }
                if (com->state & CS_ODONE) {
-                       disable_intr();
+                       com_lock();
                        com_events -= LOTS_OF_EVENTS;
                        com->state &= ~CS_ODONE;
-                       enable_intr();
+                       com_unlock();
                        if (!(com->state & CS_BUSY)
                            && !(com->extra_state & CSE_BUSYCHECK)) {
                                timeout(siobusycheck, com, hz / 100);
@@ -2600,7 +2590,7 @@ comparam(tp, t)
        if (com->state >= (CS_BUSY | CS_TTGO))
                siointr1(com);
 
-       enable_intr();
+       com_unlock();
        splx(s);
        comstart(tp);
        if (com->ibufold != NULL) {
@@ -2630,7 +2620,7 @@ siosetwater(com, speed)
        for (ibufsize = 128; ibufsize < cp4ticks;)
                ibufsize <<= 1;
        if (ibufsize == com->ibufsize) {
-               disable_intr();
+               com_lock();
                return (0);
        }
 
@@ -2640,7 +2630,7 @@ siosetwater(com, speed)
         */
        ibuf = malloc(2 * ibufsize, M_DEVBUF, M_NOWAIT);
        if (ibuf == NULL) {
-               disable_intr();
+               com_lock();
                return (ENOMEM);
        }
 
@@ -2658,7 +2648,7 @@ siosetwater(com, speed)
         * Read current input buffer, if any.  Continue with interrupts
         * disabled.
         */
-       disable_intr();
+       com_lock();
        if (com->iptr != com->ibuf)
                sioinput(com);
 
@@ -2693,7 +2683,7 @@ comstart(tp)
        if (com == NULL)
                return;
        s = spltty();
-       disable_intr();
+       com_lock();
        if (tp->t_state & TS_TTSTOP)
                com->state &= ~CS_TTGO;
        else
@@ -2706,7 +2696,7 @@ comstart(tp)
                    && com->state & CS_RTS_IFLOW)
                        outb(com->modem_ctl_port, com->mcr_image |= MCR_RTS);
        }
-       enable_intr();
+       com_unlock();
        if (tp->t_state & (TS_TIMEOUT | TS_TTSTOP)) {
                ttwwakeup(tp);
                splx(s);
@@ -2722,7 +2712,7 @@ comstart(tp)
                                                  sizeof com->obuf1);
                        com->obufs[0].l_next = NULL;
                        com->obufs[0].l_queued = TRUE;
-                       disable_intr();
+                       com_lock();
                        if (com->state & CS_BUSY) {
                                qp = com->obufq.l_next;
                                while ((next = qp->l_next) != NULL)
@@ -2734,7 +2724,7 @@ comstart(tp)
                                com->obufq.l_next = &com->obufs[0];
                                com->state |= CS_BUSY;
                        }
-                       enable_intr();
+                       com_unlock();
                }
                if (tp->t_outq.c_cc != 0 && !com->obufs[1].l_queued) {
                        com->obufs[1].l_tail
@@ -2742,7 +2732,7 @@ comstart(tp)
                                                  sizeof com->obuf2);
                        com->obufs[1].l_next = NULL;
                        com->obufs[1].l_queued = TRUE;
-                       disable_intr();
+                       com_lock();
                        if (com->state & CS_BUSY) {
                                qp = com->obufq.l_next;
                                while ((next = qp->l_next) != NULL)
@@ -2754,14 +2744,14 @@ comstart(tp)
                                com->obufq.l_next = &com->obufs[1];
                                com->state |= CS_BUSY;
                        }
-                       enable_intr();
+                       com_unlock();
                }
                tp->t_state |= TS_BUSY;
        }
-       disable_intr();
+       com_lock();
        if (com->state >= (CS_BUSY | CS_TTGO))
                siointr1(com);  /* fake interrupt to start output */
-       enable_intr();
+       com_unlock();
        ttwwakeup(tp);
        splx(s);
 }
@@ -2776,7 +2766,7 @@ comstop(tp, rw)
        com = com_addr(DEV_TO_UNIT(tp->t_dev));
        if (com == NULL || com->gone)
                return;
-       disable_intr();
+       com_lock();
        if (rw & FWRITE) {
                if (com->hasfifo)
 #ifdef COM_ESP
@@ -2803,7 +2793,7 @@ comstop(tp, rw)
                com_events -= (com->iptr - com->ibuf);
                com->iptr = com->ibuf;
        }
-       enable_intr();
+       com_unlock();
        comstart(tp);
 }
 
@@ -2846,7 +2836,7 @@ commctl(com, bits, how)
                mcr |= MCR_RTS;
        if (com->gone)
                return(0);
-       disable_intr();
+       com_lock();
        switch (how) {
        case DMSET:
                outb(com->modem_ctl_port,
@@ -2859,7 +2849,7 @@ commctl(com, bits, how)
                outb(com->modem_ctl_port, com->mcr_image &= ~mcr);
                break;
        }
-       enable_intr();
+       com_unlock();
        return (0);
 }
 
@@ -2918,9 +2908,9 @@ comwakeup(chan)
                com = com_addr(unit);
                if (com != NULL && !com->gone
                    && (com->state >= (CS_BUSY | CS_TTGO) || com->poll)) {
-                       disable_intr();
+                       com_lock();
                        siointr1(com);
-                       enable_intr();
+                       com_unlock();
                }
        }
 
@@ -2942,10 +2932,10 @@ comwakeup(chan)
                        u_int   delta;
                        u_long  total;
 
-                       disable_intr();
+                       com_lock();
                        delta = com->delta_error_counts[errnum];
                        com->delta_error_counts[errnum] = 0;
-                       enable_intr();
+                       com_unlock();
                        if (delta == 0)
                                continue;
                        total = com->error_counts[errnum] += delta;
index 75dc8bc..0d602be 100644 (file)
@@ -5,7 +5,7 @@
  * modified for FreeBSD by Andrew A. Chernov <ache@astral.msk.su>
  *
  * $FreeBSD: src/sys/i386/isa/spkr.c,v 1.45 2000/01/29 16:00:32 peter Exp $
- * $DragonFly: src/sys/dev/sound/isa/i386/spkr/Attic/spkr.c,v 1.3 2003/06/23 17:55:39 dillon Exp $
+ * $DragonFly: src/sys/dev/sound/isa/i386/spkr/Attic/spkr.c,v 1.4 2003/07/06 21:23:49 dillon Exp $
  */
 
 #include <sys/param.h>
@@ -98,10 +98,10 @@ tone(thz, ticks)
        return;
     }
     splx(sps);
-    disable_intr();
+    clock_lock();
     outb(TIMER_CNTR2, (divisor & 0xff));       /* send lo byte */
     outb(TIMER_CNTR2, (divisor >> 8)); /* send hi byte */
-    enable_intr();
+    clock_unlock();
 
     /* turn the speaker on */
     outb(IO_PPI, inb(IO_PPI) | PPI_SPKR);
index 313a0b7..455f2cc 100644 (file)
@@ -1,6 +1,6 @@
 /*-
- * Copyright (c) 1997, by Steve Passe
- * All rights reserved.
+ * Copyright (c) 1997, by Steve Passe,  All rights reserved.
+ * Copyright (c) 2003, by Matthew Dillon,  All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * SUCH DAMAGE.
  *
  * $FreeBSD: src/sys/i386/isa/apic_ipl.s,v 1.27.2.2 2000/09/30 02:49:35 ps Exp $
- * $DragonFly: src/sys/i386/apic/Attic/apic_ipl.s,v 1.6 2003/07/01 20:31:38 dillon Exp $
+ * $DragonFly: src/sys/i386/apic/Attic/apic_ipl.s,v 1.7 2003/07/06 21:23:49 dillon Exp $
  */
 
-#if 0
-
        .data
        ALIGN_DATA
 
-/*
- * Routines used by splz_unpend to build an interrupt frame from a
- * trap frame.  The _vec[] routines build the proper frame on the stack,
- * then call one of _Xintr0 thru _XintrNN.
- *
- * used by:
- *   i386/isa/apic_ipl.s (this file):  splz_unpend JUMPs to HWIs.
- *   i386/isa/clock.c:                 setup _vec[clock] to point at _vec8254.
- */
-       .globl _vec
-_vec:
-       .long    vec0,  vec1,  vec2,  vec3,  vec4,  vec5,  vec6,  vec7
-       .long    vec8,  vec9, vec10, vec11, vec12, vec13, vec14, vec15
-       .long   vec16, vec17, vec18, vec19, vec20, vec21, vec22, vec23
+       /*
+        * Interrupt mask for APIC interrupts, defaults to all hardware
+        * interrupts turned off.
+        */
 
-/*
- * Note:
- *     This is the UP equivilant of _imen.
- *     It is OPAQUE, and must NOT be accessed directly.
- *     It MUST be accessed along with the IO APIC as a 'critical region'.
- *     Accessed by:
- *             INTREN()
- *             INTRDIS()
- *             MAYBE_MASK_IRQ
- *             MAYBE_UNMASK_IRQ
- *             imen_dump()
- */
        .p2align 2                              /* MUST be 32bit aligned */
-       .globl _apic_imen
-_apic_imen:
-       .long   HWI_MASK
 
+       .globl apic_imen
+apic_imen:
+       .long   HWI_MASK
 
-/*
- * 
- */
        .text
        SUPERALIGN_TEXT
 
-/*
- * splz() -    dispatch pending interrupts after cpl reduced
- *
- * Interrupt priority mechanism
- *     -- soft splXX masks with group mechanism (cpl)
- *     -- h/w masks for currently active or unused interrupts (imen)
- *     -- ipending = active interrupts currently masked by cpl
- */
-
-ENTRY(splz)
-       /*
-        * The caller has restored cpl and checked that (ipending & ~cpl)
-        * is nonzero.  However, since ipending can change at any time
-        * (by an interrupt or, with SMP, by another cpu), we have to
-        * repeat the check.  At the moment we must own the MP lock in
-        * the SMP case because the interruput handlers require it.  We
-        * loop until no unmasked pending interrupts remain.  
-        *
-        * No new unmaksed pending interrupts will be added during the
-        * loop because, being unmasked, the interrupt code will be able
-        * to execute the interrupts.
-        *
-        * Interrupts come in two flavors:  Hardware interrupts and software
-        * interrupts.  We have to detect the type of interrupt (based on the
-        * position of the interrupt bit) and call the appropriate dispatch
-        * routine.
-        * 
-        * NOTE: "bsfl %ecx,%ecx" is undefined when %ecx is 0 so we can't
-        * rely on the secondary btrl tests.
-        */
-       pushl   %ebx
-       movl    _curthread,%ebx
-       movl    TD_CPL(%ebx),%eax
-splz_next:
-       /*
-        * We don't need any locking here.  (ipending & ~cpl) cannot grow 
-        * while we're looking at it - any interrupt will shrink it to 0.
-        */
-       movl    $0,_reqpri
-       movl    %eax,%ecx
-       notl    %ecx                    /* set bit = unmasked level */
-       andl    _ipending,%ecx          /* set bit = unmasked pending INT */
-       jne     splz_unpend
-       popl    %ebx
-       ret
-
-       ALIGN_TEXT
-splz_unpend:
-       bsfl    %ecx,%ecx
-       lock
-       btrl    %ecx,_ipending
-       jnc     splz_next
-       cmpl    $NHWI,%ecx
-       jae     splz_swi
        /*
-        * We would prefer to call the intr handler directly here but that
-        * doesn't work for badly behaved handlers that want the interrupt
-        * frame.  Also, there's a problem determining the unit number.
-        * We should change the interface so that the unit number is not
-        * determined at config time.
-        *
-        * The vec[] routines build the proper frame on the stack so
-        * the interrupt will eventually return to the caller or splz,
-        * then calls one of _Xintr0 thru _XintrNN.
+        * Functions to enable and disable a hardware interrupt.  Generally
+        * called with only one bit set in the mask but can handle multiple
+        * bits to present the same API as the ICU.
         */
-       popl    %ebx
-       jmp     *_vec(,%ecx,4)
-
-       ALIGN_TEXT
-splz_swi:
-       pushl   %eax                    /* save cpl across call */
-       orl     imasks(,%ecx,4),%eax
-       movl    %eax,TD_CPL(%ebx) /* set cpl for SWI */
-       call    *_ihandlers(,%ecx,4)
-       popl    %eax
-       movl    %eax,TD_CPL(%ebx) /* restore cpl and loop */
-       jmp     splz_next
-
-/*
- * Fake clock interrupt(s) so that they appear to come from our caller instead
- * of from here, so that system profiling works.
- * XXX do this more generally (for all vectors; look up the C entry point).
- * XXX frame bogusness stops us from just jumping to the C entry point.
- * We have to clear iactive since this is an unpend call, and it will be
- * set from the time of the original INT.
- */
-
-/*
- * The 'generic' vector stubs.
- */
-
-#define BUILD_VEC(irq_num)                                             \
-       ALIGN_TEXT ;                                                    \
-__CONCAT(vec,irq_num): ;                                               \
-       popl    %eax ;                                                  \
-       pushfl ;                                                        \
-       pushl   $KCSEL ;                                                \
-       pushl   %eax ;                                                  \
-       cli ;                                                           \
-       lock ;                                  /* MP-safe */           \
-       andl    $~IRQ_BIT(irq_num), iactive ;   /* lazy masking */      \
-       MEXITCOUNT ;                                                    \
-       APIC_ITRACE(apic_itrace_splz, irq_num, APIC_ITRACE_SPLZ) ;      \
-       jmp     __CONCAT(_Xintr,irq_num)
 
-
-       BUILD_VEC(0)
-       BUILD_VEC(1)
-       BUILD_VEC(2)
-       BUILD_VEC(3)
-       BUILD_VEC(4)
-       BUILD_VEC(5)
-       BUILD_VEC(6)
-       BUILD_VEC(7)
-       BUILD_VEC(8)
-       BUILD_VEC(9)
-       BUILD_VEC(10)
-       BUILD_VEC(11)
-       BUILD_VEC(12)
-       BUILD_VEC(13)
-       BUILD_VEC(14)
-       BUILD_VEC(15)
-       BUILD_VEC(16)                   /* 8 additional INTs in IO APIC */
-       BUILD_VEC(17)
-       BUILD_VEC(18)
-       BUILD_VEC(19)
-       BUILD_VEC(20)
-       BUILD_VEC(21)
-       BUILD_VEC(22)
-       BUILD_VEC(23)
-
-
-/******************************************************************************
- * XXX FIXME: figure out where these belong.
- */
-
-/* this nonsense is to verify that masks ALWAYS have 1 and only 1 bit set */
-#define QUALIFY_MASKS_NOT
-
-#ifdef QUALIFY_MASKS
-#define QUALIFY_MASK           \
-       btrl    %ecx, %eax ;    \
-       andl    %eax, %eax ;    \
-       jz      1f ;            \
-       pushl   $bad_mask ;     \
-       call    _panic ;        \
-1:
-
-bad_mask:      .asciz  "bad mask"
-#else
-#define QUALIFY_MASK
-#endif
-
-/*
- * (soon to be) MP-safe function to clear ONE INT mask bit.
- * The passed arg is a 32bit u_int MASK.
- * It sets the associated bit in _apic_imen.
- * It sets the mask bit of the associated IO APIC register.
- */
-ENTRY(INTREN)
-       pushfl                          /* save state of EI flag */
-       cli                             /* prevent recursion */
+ENTRY(INTRDIS)
        IMASK_LOCK                      /* enter critical reg */
-
-       movl    8(%esp), %eax           /* mask into %eax */
-       bsfl    %eax, %ecx              /* get pin index */
-       btrl    %ecx, apic_imen         /* update apic_imen */
-
-       QUALIFY_MASK
-
+       movl    4(%esp),%eax
+1:
+       bsfl    %eax,%ecx
+       jz      2f
+       btrl    %ecx,%eax
+       btsl    %ecx, apic_imen
        shll    $4, %ecx
        movl    CNAME(int_to_apicintpin) + 8(%ecx), %edx
        movl    CNAME(int_to_apicintpin) + 12(%ecx), %ecx
        testl   %edx, %edx
-       jz      1f
-
-       movl    %ecx, (%edx)            /* write the target register index */
-       movl    16(%edx), %eax          /* read the target register data */
-       andl    $~IOART_INTMASK, %eax   /* clear mask bit */
-       movl    %eax, 16(%edx)          /* write the APIC register data */
-1:     
+       jz      2f
+       movl    %ecx, (%edx)            /* target register index */
+       orl     $IOART_INTMASK,16(%edx) /* set intmask in target apic reg */
+       jmp     1b
+2:
        IMASK_UNLOCK                    /* exit critical reg */
-       popfl                           /* restore old state of EI flag */
        ret
 
-/*
- * (soon to be) MP-safe function to set ONE INT mask bit.
- * The passed arg is a 32bit u_int MASK.
- * It clears the associated bit in apic_imen.
- * It clears the mask bit of the associated IO APIC register.
- */
-ENTRY(INTRDIS)
-       pushfl                          /* save state of EI flag */
-       cli                             /* prevent recursion */
+ENTRY(INTREN)
        IMASK_LOCK                      /* enter critical reg */
-
-       movl    8(%esp), %eax           /* mask into %eax */
+       movl    4(%esp), %eax           /* mask into %eax */
+1:
        bsfl    %eax, %ecx              /* get pin index */
-       btsl    %ecx, apic_imen         /* update _apic_imen */
-
-       QUALIFY_MASK
-
+       jz      2f
+       btrl    %ecx,%eax
+       btrl    %ecx, apic_imen         /* update apic_imen */
        shll    $4, %ecx
        movl    CNAME(int_to_apicintpin) + 8(%ecx), %edx
        movl    CNAME(int_to_apicintpin) + 12(%ecx), %ecx
        testl   %edx, %edx
-       jz      1f
-
+       jz      2f
        movl    %ecx, (%edx)            /* write the target register index */
-       movl    16(%edx), %eax          /* read the target register data */
-       orl     $IOART_INTMASK, %eax    /* set mask bit */
-       movl    %eax, 16(%edx)          /* write the APIC register data */
-1:     
+       andl    $~IOART_INTMASK, 16(%edx) /* clear mask bit */
+       jmp     1b
+2:     
        IMASK_UNLOCK                    /* exit critical reg */
-       popfl                           /* restore old state of EI flag */
-       ret
-
-
-/******************************************************************************
- *
- */
-
-
-/*
- * void write_ioapic_mask(int apic, u_int mask); 
- */
-
-#define _INT_MASK      0x00010000
-#define _PIN_MASK      0x00ffffff
-
-#define _OLD_ESI         0(%esp)
-#define _OLD_EBX         4(%esp)
-#define _RETADDR         8(%esp)
-#define _APIC           12(%esp)
-#define _MASK           16(%esp)
-
-       ALIGN_TEXT
-write_ioapic_mask:
-       pushl %ebx                      /* scratch */
-       pushl %esi                      /* scratch */
-
-       movl    apic_imen, %ebx
-       xorl    _MASK, %ebx             /* %ebx = _apic_imen ^ mask */
-       andl    $_PIN_MASK, %ebx        /* %ebx = _apic_imen & 0x00ffffff */
-       jz      all_done                /* no change, return */
-
-       movl    _APIC, %esi             /* APIC # */
-       movl    ioapic, %ecx
-       movl    (%ecx,%esi,4), %esi     /* %esi holds APIC base address */
-
-next_loop:                             /* %ebx = diffs, %esi = APIC base */
-       bsfl    %ebx, %ecx              /* %ecx = index if 1st/next set bit */
-       jz      all_done
-
-       btrl    %ecx, %ebx              /* clear this bit in diffs */
-       leal    16(,%ecx,2), %edx       /* calculate register index */
-
-       movl    %edx, (%esi)            /* write the target register index */
-       movl    16(%esi), %eax          /* read the target register data */
-
-       btl     %ecx, _MASK             /* test for mask or unmask */
-       jnc     clear                   /* bit is clear */
-       orl     $_INT_MASK, %eax        /* set mask bit */
-       jmp     write
-clear: andl    $~_INT_MASK, %eax       /* clear mask bit */
-
-write: movl    %eax, 16(%esi)          /* write the APIC register data */
-
-       jmp     next_loop               /* try another pass */
-
-all_done:
-       popl    %esi
-       popl    %ebx
-       ret
-
-#undef _OLD_ESI
-#undef _OLD_EBX
-#undef _RETADDR
-#undef _APIC
-#undef _MASK
-
-#undef _PIN_MASK
-#undef _INT_MASK
-
-#ifdef oldcode
-
-_INTREN:
-       movl apic_imen, %eax
-       notl %eax                       /* mask = ~mask */
-       andl apic_imen, %eax            /* %eax = _apic_imen & ~mask */
-
-       pushl %eax                      /* new (future) _apic_imen value */
-       pushl $0                        /* APIC# arg */
-       call write_ioapic_mask          /* modify the APIC registers */
-
-       addl $4, %esp                   /* remove APIC# arg from stack */
-       popl apic_imen                  /* _apic_imen |= mask */
-       ret
-
-_INTRDIS:
-       movl _apic_imen, %eax
-       orl 4(%esp), %eax               /* %eax = _apic_imen | mask */
-
-       pushl %eax                      /* new (future) _apic_imen value */
-       pushl $0                        /* APIC# arg */
-       call write_ioapic_mask          /* modify the APIC registers */
-
-       addl $4, %esp                   /* remove APIC# arg from stack */
-       popl apic_imen                  /* _apic_imen |= mask */
-       ret
-
-#endif /* oldcode */
-
-
-#ifdef ready
-
-/*
- * u_int read_io_apic_mask(int apic); 
- */
-       ALIGN_TEXT
-read_io_apic_mask:
        ret
 
-/*
- * Set INT mask bit for each bit set in 'mask'.
- * Ignore INT mask bit for all others.
- *
- * void set_io_apic_mask(apic, u_int32_t bits); 
- */
-       ALIGN_TEXT
-set_io_apic_mask:
-       ret
-
-/*
- * void set_ioapic_maskbit(int apic, int bit); 
- */
-       ALIGN_TEXT
-set_ioapic_maskbit:
-       ret
-
-/*
- * Clear INT mask bit for each bit set in 'mask'.
- * Ignore INT mask bit for all others.
- *
- * void clr_io_apic_mask(int apic, u_int32_t bits); 
- */
-       ALIGN_TEXT
-clr_io_apic_mask:
-       ret
-
-/*
- * void clr_ioapic_maskbit(int apic, int bit); 
- */
-       ALIGN_TEXT
-clr_ioapic_maskbit:
-       ret
-
-#endif /** ready */
-
 /******************************************************************************
  * 
  */
@@ -465,4 +125,3 @@ ENTRY(apic_eoi)
        movl    $0, lapic+0xb0
        ret
 
-#endif
index 27de928..6bc664a 100644 (file)
@@ -1,62 +1,23 @@
 /*
  *     from: vector.s, 386BSD 0.1 unknown origin
  * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $
- * $DragonFly: src/sys/i386/apic/Attic/apic_vector.s,v 1.7 2003/07/01 20:31:38 dillon Exp $
+ * $DragonFly: src/sys/i386/apic/Attic/apic_vector.s,v 1.8 2003/07/06 21:23:49 dillon Exp $
  */
 
 
 #include <machine/apic.h>
 #include <machine/smp.h>
-
 #include "i386/isa/intr_machdep.h"
 
 /* convert an absolute IRQ# into a bitmask */
-#define IRQ_BIT(irq_num)       (1 << (irq_num))
+#define IRQ_LBIT(irq_num)      (1 << (irq_num))
 
 /* make an index into the IO APIC from the IRQ# */
 #define REDTBL_IDX(irq_num)    (0x10 + ((irq_num) * 2))
 
-
-/*
- * Macros for interrupt interrupt entry, call to handler, and exit.
- */
-
-#define        FAST_INTR(irq_num, vec_name)                                    \
-       .text ;                                                         \
-       SUPERALIGN_TEXT ;                                               \
-IDTVEC(vec_name) ;                                                     \
-       pushl   %eax ;          /* save only call-used registers */     \
-       pushl   %ecx ;                                                  \
-       pushl   %edx ;                                                  \
-       pushl   %ds ;                                                   \
-       pushl   %es ;                                                   \
-       pushl   %fs ;                                                   \
-       movl    $KDSEL,%eax ;                                           \
-       mov     %ax,%ds ;                                               \
-       movl    %ax,%es ;                                               \
-       movl    $KPSEL,%eax ;                                           \
-       mov     %ax,%fs ;                                               \
-       FAKE_MCOUNT(6*4(%esp)) ;                                        \
-       pushl   intr_unit + (irq_num) * 4 ;                             \
-       call    *intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
-       addl    $4, %esp ;                                              \
-       movl    $0, lapic_eoi ;                                         \
-       lock ;                                                          \
-       incl    cnt+V_INTR ;    /* book-keeping can wait */             \
-       movl    intr_countp + (irq_num) * 4, %eax ;                     \
-       lock ;                                                          \
-       incl    (%eax) ;                                                \
-       MEXITCOUNT ;                                                    \
-       popl    %fs ;                                                   \
-       popl    %es ;                                                   \
-       popl    %ds ;                                                   \
-       popl    %edx ;                                                  \
-       popl    %ecx ;                                                  \
-       popl    %eax ;                                                  \
-       iret
-
 /*
- * 
+ * Push an interrupt frame in a format acceptable to doreti, reload
+ * the segment registers for the kernel.
  */
 #define PUSH_FRAME                                                     \
        pushl   $0 ;            /* dummy error code */                  \
@@ -64,23 +25,54 @@ IDTVEC(vec_name) ;                                                  \
        pushal ;                                                        \
        pushl   %ds ;           /* save data and extra segments ... */  \
        pushl   %es ;                                                   \
-       pushl   %fs
+       pushl   %fs ;                                                   \
+       mov     $KDSEL,%ax ;                                            \
+       mov     %ax,%ds ;                                               \
+       mov     %ax,%es ;                                               \
+       mov     $KPSEL,%ax ;                                            \
+       mov     %ax,%fs ;                                               \
 
+#define PUSH_DUMMY                                                     \
+       pushfl ;                /* phys int frame / flags */            \
+       pushl %cs ;             /* phys int frame / cs */               \
+       pushl   12(%esp) ;      /* original caller eip */               \
+       pushl   $0 ;            /* dummy error code */                  \
+       pushl   $0 ;            /* dummy trap type */                   \
+       subl    $11*4,%esp ;    /* pushal + 3 seg regs (dummy) */       \
+
+/*
+ * Warning: POP_FRAME can only be used if there is no chance of a
+ * segment register being changed (e.g. by procfs), which is why syscalls
+ * have to use doreti.
+ */
 #define POP_FRAME                                                      \
        popl    %fs ;                                                   \
        popl    %es ;                                                   \
        popl    %ds ;                                                   \
        popal ;                                                         \
-       addl    $4+4,%esp
+       addl    $2*4,%esp ;     /* dummy trap & error codes */          \
+
+#define POP_DUMMY                                                      \
+       addl    $16*4,%esp ;                                            \
 
 #define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
 #define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
+
+/*
+ * Interrupts are expected to already be disabled when using these
+ * IMASK_*() macros.
+ */
+#define IMASK_LOCK                                                     \
+       SPIN_LOCK(imen_spinlock) ;                                      \
+
+#define IMASK_UNLOCK                                                   \
+       SPIN_UNLOCK(imen_spinlock) ;                                    \
        
 #define MASK_IRQ(irq_num)                                              \
        IMASK_LOCK ;                            /* into critical reg */ \
-       testl   $IRQ_BIT(irq_num), apic_imen ;                          \
+       testl   $IRQ_LBIT(irq_num), apic_imen ;                         \
        jne     7f ;                    /* masked, don't mask */        \
-       orl     $IRQ_BIT(irq_num), apic_imen ;  /* set the mask bit */  \
+       orl     $IRQ_LBIT(irq_num), apic_imen ; /* set the mask bit */  \
        movl    IOAPICADDR(irq_num), %ecx ;     /* ioapic addr */       \
        movl    REDIRIDX(irq_num), %eax ;       /* get the index */     \
        movl    %eax, (%ecx) ;                  /* write the index */   \
@@ -88,17 +80,18 @@ IDTVEC(vec_name) ;                                                  \
        orl     $IOART_INTMASK, %eax ;          /* set the mask */      \
        movl    %eax, IOAPIC_WINDOW(%ecx) ;     /* new value */         \
 7: ;                                           /* already masked */    \
-       IMASK_UNLOCK
+       IMASK_UNLOCK ;                                                  \
+
 /*
  * Test to see whether we are handling an edge or level triggered INT.
  *  Level-triggered INTs must still be masked as we don't clear the source,
  *  and the EOI cycle would cause redundant INTs to occur.
  */
 #define MASK_LEVEL_IRQ(irq_num)                                                \
-       testl   $IRQ_BIT(irq_num), apic_pin_trigger ;                   \
+       testl   $IRQ_LBIT(irq_num), apic_pin_trigger ;                  \
        jz      9f ;                            /* edge, don't mask */  \
        MASK_IRQ(irq_num) ;                                             \
-9:
+9: ;                                                                   \
 
 
 #ifdef APIC_INTR_REORDER
@@ -108,27 +101,26 @@ IDTVEC(vec_name) ;                                                        \
        testl   apic_isrbit_location + 4 + 8 * (irq_num), %eax ;        \
        jz      9f ;                            /* not active */        \
        movl    $0, lapic_eoi ;                                         \
-       APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ;        \
-9:
+9:                                                                     \
 
 #else
+
 #define EOI_IRQ(irq_num)                                               \
-       testl   $IRQ_BIT(irq_num), lapic_isr1;                          \
+       testl   $IRQ_LBIT(irq_num), lapic_isr1;                         \
        jz      9f      ;                       /* not active */        \
        movl    $0, lapic_eoi;                                          \
-       APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ;        \
-9:
+9:                                                                     \
+
 #endif
        
-       
 /*
  * Test to see if the source is currntly masked, clear if so.
  */
 #define UNMASK_IRQ(irq_num)                                    \
        IMASK_LOCK ;                            /* into critical reg */ \
-       testl   $IRQ_BIT(irq_num), apic_imen ;                          \
+       testl   $IRQ_LBIT(irq_num), apic_imen ;                         \
        je      7f ;                    /* bit clear, not masked */     \
-       andl    $~IRQ_BIT(irq_num), apic_imen ;/* clear mask bit */     \
+       andl    $~IRQ_LBIT(irq_num), apic_imen ;/* clear mask bit */    \
        movl    IOAPICADDR(irq_num),%ecx ;      /* ioapic addr */       \
        movl    REDIRIDX(irq_num), %eax ;       /* get the index */     \
        movl    %eax,(%ecx) ;                   /* write the index */   \
@@ -136,174 +128,189 @@ IDTVEC(vec_name) ;                                                      \
        andl    $~IOART_INTMASK,%eax ;          /* clear the mask */    \
        movl    %eax,IOAPIC_WINDOW(%ecx) ;      /* new value */         \
 7: ;                                                                   \
-       IMASK_UNLOCK
-
-#ifdef APIC_INTR_DIAGNOSTIC
-#ifdef APIC_INTR_DIAGNOSTIC_IRQ
-log_intr_event:
-       pushf
-       cli
-       pushl   $CNAME(apic_itrace_debuglock)
-       call    CNAME(s_lock_np)
-       addl    $4, %esp
-       movl    CNAME(apic_itrace_debugbuffer_idx), %ecx
-       andl    $32767, %ecx
-       movl    PCPU(cpuid), %eax
-       shll    $8,     %eax
-       orl     8(%esp), %eax
-       movw    %ax,    CNAME(apic_itrace_debugbuffer)(,%ecx,2)
-       incl    %ecx
-       andl    $32767, %ecx
-       movl    %ecx,   CNAME(apic_itrace_debugbuffer_idx)
-       pushl   $CNAME(apic_itrace_debuglock)
-       call    CNAME(s_unlock_np)
-       addl    $4, %esp
-       popf
-       ret
-       
+       IMASK_UNLOCK ;                                                  \
 
-#define APIC_ITRACE(name, irq_num, id)                                 \
-       lock ;                                  /* MP-safe */           \
-       incl    CNAME(name) + (irq_num) * 4 ;                           \
+/*
+ * Fast interrupt call handlers run in the following sequence:
+ *
+ *     - Push the trap frame required by doreti
+ *     - Mask the interrupt and reenable its source
+ *     - If we cannot take the interrupt set its fpending bit and
+ *       doreti.
+ *     - If we can take the interrupt clear its fpending bit,
+ *       call the handler, then unmask and doreti.
+ *
+ * YYY can cache gd base opitner instead of using hidden %fs prefixes.
+ */
+
+#define        FAST_INTR(irq_num, vec_name)                                    \
+       .text ;                                                         \
+       SUPERALIGN_TEXT ;                                               \
+IDTVEC(vec_name) ;                                                     \
+       PUSH_FRAME ;                                                    \
+       FAKE_MCOUNT(13*4(%esp)) ;                                       \
+       MASK_LEVEL_IRQ(irq_num) ;                                       \
+       EOI_IRQ(irq_num) ;                                              \
+       incl    PCPU(intr_nesting_level) ;                              \
+       movl    PCPU(curthread),%ebx ;                                  \
+       movl    TD_CPL(%ebx),%eax ;                                     \
        pushl   %eax ;                                                  \
-       pushl   %ecx ;                                                  \
-       pushl   %edx ;                                                  \
-       movl    $(irq_num), %eax ;                                      \
-       cmpl    $APIC_INTR_DIAGNOSTIC_IRQ, %eax ;                       \
-       jne     7f ;                                                    \
-       pushl   $id ;                                                   \
-       call    log_intr_event ;                                        \
+       cmpl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
+       jge     1f ;                                                    \
+       testl   $IRQ_LBIT(irq_num), %eax ;                              \
+       jz      2f ;                                                    \
+1: ;                                                                   \
+       /* set the pending bit and return, leave interrupt masked */    \
+       orl     $IRQ_LBIT(irq_num),PCPU(fpending) ;                     \
+       movl    $TDPRI_CRIT, PCPU(reqpri) ;                             \
+       jmp     5f ;                                                    \
+2: ;                                                                   \
+       /* clear pending bit, run handler */                            \
+       addl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
+       andl    $~IRQ_LBIT(irq_num),PCPU(fpending) ;                    \
+       pushl   intr_unit + (irq_num) * 4 ;                             \
+       call    *intr_handler + (irq_num) * 4 ; /* do the work ASAP */  \
        addl    $4, %esp ;                                              \
-7: ;                                                                   \
-       popl    %edx ;                                                  \
-       popl    %ecx ;                                                  \
-       popl    %eax
-#else
-#define APIC_ITRACE(name, irq_num, id)                                 \
-       lock ;                                  /* MP-safe */           \
-       incl    CNAME(name) + (irq_num) * 4
-#endif
+       subl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
+       incl    PCPU(cnt)+V_INTR ;      /* book-keeping make per cpu YYY */ \
+       movl    intr_countp + (irq_num) * 4, %eax ;                     \
+       incl    (%eax) ;                                                \
+       UNMASK_IRQ(irq_num) ;                                           \
+5: ;                                                                   \
+       MEXITCOUNT ;                                                    \
+       jmp     doreti ;                                                \
 
-#define APIC_ITRACE_ENTER 1
-#define APIC_ITRACE_EOI 2
-#define APIC_ITRACE_TRYISRLOCK 3
-#define APIC_ITRACE_GOTISRLOCK 4
-#define APIC_ITRACE_ENTER2 5
-#define APIC_ITRACE_LEAVE 6
-#define APIC_ITRACE_UNMASK 7
-#define APIC_ITRACE_ACTIVE 8
-#define APIC_ITRACE_MASKED 9
-#define APIC_ITRACE_NOISRLOCK 10
-#define APIC_ITRACE_MASKED2 11
-#define APIC_ITRACE_SPLZ 12
-#define APIC_ITRACE_DORETI 13  
-       
-#else  
-#define APIC_ITRACE(name, irq_num, id)
-#endif
-               
-#define        INTR(irq_num, vec_name, maybe_extra_ipending)                   \
+/*
+ * Restart fast interrupt held up by critical section or cpl.
+ *
+ *     - Push a dummy trape frame as required by doreti
+ *     - The interrupt source is already masked
+ *     - Clear the fpending bit
+ *     - Run the handler
+ *     - Unmask the interrupt
+ *     - Pop the dummy frame and do a normal return
+ *
+ *     YYY can cache gd base pointer instead of using hidden %fs
+ *     prefixes.
+ */
+
+#define FAST_UNPEND(irq_num, vec_name)                                 \
+       .text ;                                                         \
+       SUPERALIGN_TEXT ;                                               \
+IDTVEC(vec_name) ;                                                     \
+       pushl   %ebp ;                                                  \
+       movl    %esp,%ebp ;                                             \
+       PUSH_DUMMY ;                                                    \
+       pushl   intr_unit + (irq_num) * 4 ;                             \
+       call    *intr_handler + (irq_num) * 4 ; /* do the work ASAP */  \
+       addl    $4, %esp ;                                              \
+       incl    PCPU(cnt)+V_INTR ;      /* book-keeping make per cpu YYY */ \
+       movl    intr_countp + (irq_num) * 4, %eax ;                     \
+       incl    (%eax) ;                                                \
+       UNMASK_IRQ(irq_num) ;                                           \
+       POP_DUMMY ;                                                     \
+       popl %ebp ;                                                     \
+       ret ;                                                           \
+
+/*
+ * Slow interrupt call handlers run in the following sequence:
+ *
+ *     - Push the trap frame required by doreti.
+ *     - Mask the interrupt and reenable its source.
+ *     - If we cannot take the interrupt set its ipending bit and
+ *       doreti.  In addition to checking for a critical section
+ *       and cpl mask we also check to see if the thread is still
+ *       running.
+ *     - If we can take the interrupt clear its ipending bit,
+ *       set its irunning bit, and schedule the thread.  Leave
+ *       interrupts masked and doreti.
+ *
+ *     the interrupt thread will run its handlers and loop if
+ *     ipending is found to be set.  ipending/irunning interlock
+ *     the interrupt thread with the interrupt.  The handler calls
+ *     UNPEND when it is through.
+ *
+ *     Note that we do not enable interrupts when calling sched_ithd.
+ *     YYY sched_ithd may preempt us synchronously (fix interrupt stacking)
+ *
+ *     YYY can cache gd base pointer instead of using hidden %fs
+ *     prefixes.
+ */
+
+#define INTR(irq_num, vec_name, maybe_extra_ipending)                  \
        .text ;                                                         \
        SUPERALIGN_TEXT ;                                               \
-/* XintrNN: entry point used by IDT/HWIs & splz_unpend via _vec[]. */  \
 IDTVEC(vec_name) ;                                                     \
        PUSH_FRAME ;                                                    \
-       movl    $KDSEL, %eax ;  /* reload with kernel's data segment */ \
-       mov     %ax, %ds ;                                              \
-       mov     %ax, %es ;                                              \
-       movl    $KPSEL, %eax ;                                          \
-       mov     %ax, %fs ;                                              \
-;                                                                      \
        maybe_extra_ipending ;                                          \
-;                                                                      \
-       APIC_ITRACE(apic_itrace_enter, irq_num, APIC_ITRACE_ENTER) ;    \
-       lock ;                                  /* MP-safe */           \
-       btsl    $(irq_num), iactive ;           /* lazy masking */      \
-       jc      1f ;                            /* already active */    \
 ;                                                                      \
        MASK_LEVEL_IRQ(irq_num) ;                                       \
        EOI_IRQ(irq_num) ;                                              \
-0: ;                                                                   \
-       APIC_ITRACE(apic_itrace_tryisrlock, irq_num, APIC_ITRACE_TRYISRLOCK) ;\
-       MP_TRYLOCK ;            /* XXX this is going away... */         \
-       testl   %eax, %eax ;                    /* did we get it? */    \
-       jz      3f ;                            /* no */                \
-;                                                                      \
-       APIC_ITRACE(apic_itrace_gotisrlock, irq_num, APIC_ITRACE_GOTISRLOCK) ;\
+       incl    PCPU(intr_nesting_level) ;                              \
        movl    PCPU(curthread),%ebx ;                                  \
-       testl   $IRQ_BIT(irq_num), TD_MACH+MTD_CPL(%eax) ;              \
-       jne     2f ;                            /* this INT masked */   \
+       movl    TD_CPL(%ebx),%eax ;                                     \
+       pushl   %eax ;          /* cpl do restore */                    \
        cmpl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
-       jge     2f ;                            /* in critical sec */   \
-;                                                                      \
-       incb    PCPU(intr_nesting_level) ;                              \
-;                                                                      \
-  /* entry point used by doreti_unpend for HWIs. */                    \
-__CONCAT(Xresume,irq_num): ;                                           \
-       FAKE_MCOUNT(13*4(%esp)) ;               /* XXX avoid dbl cnt */ \
-       lock ;  incl    _cnt+V_INTR ;           /* tally interrupts */  \
-       movl    _intr_countp + (irq_num) * 4, %eax ;                    \
-       lock ;  incl    (%eax) ;                                        \
-;                                                                      \
-       movl    PCPU(curthread), %ebx ;                                 \
-       movl    TD_MACH+MTD_CPL(%ebx), %eax ;                           \
-       pushl   %eax ;   /* cpl restored by doreti */                   \
-       orl     _intr_mask + (irq_num) * 4, %eax ;                      \
-       movl    %eax, TD_MACH+MTD_CPL(%ebx) ;                           \
-       lock ;                                                          \
-       andl    $~IRQ_BIT(irq_num), PCPU(ipending) ;                    \
-;                                                                      \
-       pushl   _intr_unit + (irq_num) * 4 ;                            \
-       APIC_ITRACE(apic_itrace_enter2, irq_num, APIC_ITRACE_ENTER2) ;  \
+       jge     1f ;                                                    \
+       testl   $IRQ_LBIT(irq_num),PCPU(irunning) ;                     \
+       jnz     1f ;                                                    \
+       testl   $IRQ_LBIT(irq_num),%eax ;                               \
+       jz      1f ;                                                    \
+1: ;                                                                   \
+       /* set the pending bit and return, leave the interrupt masked */ \
+       orl     $IRQ_LBIT(irq_num), PCPU(ipending) ;                    \
+       movl    $TDPRI_CRIT, PCPU(reqpri) ;                             \
+       jmp     5f ;                                                    \
+2: ;                                                                   \
+       addl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
+       /* set running bit, clear pending bit, run handler */           \
+       orl     $IRQ_LBIT(irq_num), PCPU(irunning) ;                    \
+       andl    $~IRQ_LBIT(irq_num), PCPU(ipending) ;                   \
        sti ;                                                           \
-       call    *_intr_handler + (irq_num) * 4 ;                        \
-       cli ;                                                           \
-       APIC_ITRACE(apic_itrace_leave, irq_num, APIC_ITRACE_LEAVE) ;    \
+       pushl   $irq_num ;                                              \
+       call    sched_ithd ;                                            \
        addl    $4,%esp ;                                               \
-;                                                                      \
-       lock ;  andl    $~IRQ_BIT(irq_num), iactive ;                   \
-       UNMASK_IRQ(irq_num) ;                                           \
-       APIC_ITRACE(apic_itrace_unmask, irq_num, APIC_ITRACE_UNMASK) ;  \
-       sti ;                           /* doreti repeats cli/sti */    \
+       subl    $TDPRI_CRIT,TD_PRI(%ebx) ;                              \
+       incl    PCPU(cnt)+V_INTR ; /* book-keeping YYY make per-cpu */  \
+       movl    intr_countp + (irq_num) * 4,%eax ;                      \
+       incl    (%eax) ;                                                \
+5: ;                                                                   \
        MEXITCOUNT ;                                                    \
        jmp     doreti ;                                                \
-;                                                                      \
-       ALIGN_TEXT ;                                                    \
-1: ;                                           /* active  */           \
-       APIC_ITRACE(apic_itrace_active, irq_num, APIC_ITRACE_ACTIVE) ;  \
-       MASK_IRQ(irq_num) ;                                             \
-       EOI_IRQ(irq_num) ;                                              \
-       lock ;                                                          \
-       orl     $IRQ_BIT(irq_num), PCPU(ipending) ;                     \
-       movl    $TDPRI_CRIT, PCPU(reqpri) ;                             \
-       lock ;                                                          \
-       btsl    $(irq_num), iactive ;           /* still active */      \
-       jnc     0b ;                            /* retry */             \
-       POP_FRAME ;                                                     \
-       iret ;          /* XXX:  iactive bit might be 0 now */          \
-       ALIGN_TEXT ;                                                    \
-2: ;                           /* masked by cpl, leave iactive set */  \
-       APIC_ITRACE(apic_itrace_masked, irq_num, APIC_ITRACE_MASKED) ;  \
-       lock ;                                                          \
-       orl     $IRQ_BIT(irq_num), PCPU(ipending) ;                     \
-       movl    $TDPRI_CRIT, PCPU(reqpri) ;                             \
-       MP_RELLOCK ;                                                    \
-       POP_FRAME ;                                                     \
-       iret ;                                                          \
+
+/*
+ * Unmask a slow interrupt.  This function is used by interrupt threads
+ * after they have descheduled themselves to reenable interrupts and
+ * possibly cause a reschedule to occur.  The interrupt's irunning bit
+ * is cleared prior to unmasking.
+ */
+
+#define INTR_UNMASK(irq_num, vec_name, icu)                            \
+       .text ;                                                         \
+       SUPERALIGN_TEXT ;                                               \
+IDTVEC(vec_name) ;                                                     \
+       pushl %ebp ;     /* frame for ddb backtrace */                  \
+       movl    %esp, %ebp ;                                            \
+       andl    $~IRQ_LBIT(irq_num), PCPU(irunning) ;                   \
+       UNMASK_IRQ(irq_num) ;                                           \
+       popl %ebp ;                                                     \
+       ret ;                                                           \
+
+#if 0
+       /* XXX forward_irq to cpu holding the BGL? */
+
        ALIGN_TEXT ;                                                    \
 3: ;                   /* other cpu has isr lock */                    \
-       APIC_ITRACE(apic_itrace_noisrlock, irq_num, APIC_ITRACE_NOISRLOCK) ;\
        lock ;                                                          \
-       orl     $IRQ_BIT(irq_num), PCPU(ipending) ;                     \
+       orl     $IRQ_LBIT(irq_num), PCPU(ipending) ;                    \
        movl    $TDPRI_CRIT,_reqpri ;                                   \
-       testl   $IRQ_BIT(irq_num), TD_MACH+MTD_CPL(%ebx) ;              \
+       testl   $IRQ_LBIT(irq_num), TD_CPL(%ebx) ;              \
        jne     4f ;                            /* this INT masked */   \
        call    forward_irq ;    /* forward irq to lock holder */       \
        POP_FRAME ;                             /* and return */        \
        iret ;                                                          \
        ALIGN_TEXT ;                                                    \
 4: ;                                           /* blocked */           \
-       APIC_ITRACE(apic_itrace_masked2, irq_num, APIC_ITRACE_MASKED2) ;\
        POP_FRAME ;                             /* and return */        \
        iret
 
@@ -314,6 +321,9 @@ __CONCAT(Xresume,irq_num): ;                                                \
  *   8259 PIC for missing INTs.  See the APIC documentation for details.
  *  This routine should NOT do an 'EOI' cycle.
  */
+
+#endif
+
        .text
        SUPERALIGN_TEXT
        .globl Xspuriousint
@@ -329,8 +339,8 @@ Xspuriousint:
  */
        .text
        SUPERALIGN_TEXT
-       .globl  _Xinvltlb
-_Xinvltlb:
+       .globl  Xinvltlb
+Xinvltlb:
        pushl   %eax
 
 #ifdef COUNT_XINVLTLB_HITS
@@ -353,6 +363,7 @@ _Xinvltlb:
        iret
 
 
+#if 0
 #ifdef BETTER_CLOCK
 
 /*
@@ -413,13 +424,14 @@ Xcpucheckstate:
        iret
 
 #endif /* BETTER_CLOCK */
+#endif
 
 /*
  * Executed by a CPU when it receives an Xcpuast IPI from another CPU,
  *
  *  - Signals its receipt by clearing bit cpuid in checkstate_need_ast.
- *
- *  - We need a better method of triggering asts on other cpus.
+ *  - MP safe in regards to setting AST_PENDING because doreti is in
+ *    a cli mode when it checks.
  */
 
        .text
@@ -427,11 +439,6 @@ Xcpucheckstate:
        .globl Xcpuast
 Xcpuast:
        PUSH_FRAME
-       movl    $KDSEL, %eax
-       mov     %ax, %ds                /* use KERNEL data segment */
-       mov     %ax, %es
-       movl    $KPSEL, %eax
-       mov     %ax, %fs
 
        movl    PCPU(cpuid), %eax
        lock                            /* checkstate_need_ast &= ~(1<<id) */
@@ -444,14 +451,8 @@ Xcpuast:
 
        FAKE_MCOUNT(13*4(%esp))
 
-       /* 
-        * Giant locks do not come cheap.
-        * A lot of cycles are going to be wasted here.
-        */
-       call    get_mplock
-
        movl    PCPU(curthread), %eax
-       pushl   TD_MACH+MTD_CPL(%eax)           /* cpl restored by doreti */
+       pushl   TD_CPL(%eax)            /* cpl restored by doreti */
 
        orl     $AST_PENDING, PCPU(astpending)  /* XXX */
        incb    PCPU(intr_nesting_level)
@@ -464,11 +465,7 @@ Xcpuast:
        btrl    %eax, CNAME(resched_cpus)
        jnc     2f
        orl     $AST_PENDING+AST_RESCHED,PCPU(astpending)
-       lock
-       incl    CNAME(want_resched_cnt)
 2:             
-       lock
-       incl    CNAME(cpuast_cnt)
        MEXITCOUNT
        jmp     doreti
 1:
@@ -486,27 +483,19 @@ Xcpuast:
        .globl Xforward_irq
 Xforward_irq:
        PUSH_FRAME
-       movl    $KDSEL, %eax
-       mov     %ax, %ds                /* use KERNEL data segment */
-       mov     %ax, %es
-       movl    $KPSEL, %eax
-       mov     %ax, %fs
 
        movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
 
        FAKE_MCOUNT(13*4(%esp))
 
-       MP_TRYLOCK
+       call    try_mplock
        testl   %eax,%eax               /* Did we get the lock ? */
        jz  1f                          /* No */
 
-       lock
-       incl    CNAME(forward_irq_hitcnt)
-       cmpb    $4, PCPU(intr_nesting_level)
-       jae     2f
+       incl    PCPU(cnt)+V_FORWARDED_HITS
        
        movl    PCPU(curthread), %eax
-       pushl   TD_MACH+MTD_CPL(%eax)           /* cpl restored by doreti */
+       pushl   TD_CPL(%eax)            /* cpl restored by doreti */
 
        incb    PCPU(intr_nesting_level)
        sti
@@ -514,17 +503,13 @@ Xforward_irq:
        MEXITCOUNT
        jmp     doreti                  /* Handle forwarded interrupt */
 1:
-       lock
-       incl    CNAME(forward_irq_misscnt)
+       incl    PCPU(cnt)+V_FORWARDED_MISSES
        call    forward_irq     /* Oops, we've lost the isr lock */
        MEXITCOUNT
        POP_FRAME
        iret
-2:
-       lock
-       incl    CNAME(forward_irq_toodeepcnt)
 3:     
-       MP_RELLOCK
+       call    rel_mplock
        MEXITCOUNT
        POP_FRAME
        iret
@@ -534,19 +519,19 @@ Xforward_irq:
  */
 forward_irq:
        MCOUNT
-       cmpl    $0,_invltlb_ok
+       cmpl    $0,invltlb_ok
        jz      4f
 
        cmpl    $0, CNAME(forward_irq_enabled)
        jz      4f
 
-       movl    _mp_lock,%eax
-       cmpl    $FREE_LOCK,%eax
+       movl    mp_lock,%eax
+       cmpl    $MP_FREE_LOCK,%eax
        jne     1f
        movl    $0, %eax                /* Pick CPU #0 if noone has lock */
 1:
        shrl    $24,%eax
-       movl    _cpu_num_to_apic_id(,%eax,4),%ecx
+       movl    cpu_num_to_apic_id(,%eax,4),%ecx
        shll    $24,%ecx
        movl    lapic_icr_hi, %eax
        andl    $~APIC_ID_MASK, %eax
@@ -662,12 +647,11 @@ MCOUNT_LABEL(bintr)
        FAST_INTR(22,fastintr22)
        FAST_INTR(23,fastintr23)
        
+       /* YYY what is this garbage? */
 #define        CLKINTR_PENDING                                                 \
-       pushl $clock_lock ;                                             \
-       call s_lock ;                                                   \
+       call    clock_lock ;                                            \
        movl $1,CNAME(clkintr_pending) ;                                \
-       call s_unlock ;                                                 \
-       addl $4, %esp
+       call    clock_unlock ;                                          \
 
        INTR(0,intr0, CLKINTR_PENDING)
        INTR(1,intr1,)
@@ -693,17 +677,42 @@ MCOUNT_LABEL(bintr)
        INTR(21,intr21,)
        INTR(22,intr22,)
        INTR(23,intr23,)
+
+       FAST_UNPEND(0,fastunpend0)
+       FAST_UNPEND(1,fastunpend1)
+       FAST_UNPEND(2,fastunpend2)
+       FAST_UNPEND(3,fastunpend3)
+       FAST_UNPEND(4,fastunpend4)
+       FAST_UNPEND(5,fastunpend5)
+       FAST_UNPEND(6,fastunpend6)
+       FAST_UNPEND(7,fastunpend7)
+       FAST_UNPEND(8,fastunpend8)
+       FAST_UNPEND(9,fastunpend9)
+       FAST_UNPEND(10,fastunpend10)
+       FAST_UNPEND(11,fastunpend11)
+       FAST_UNPEND(12,fastunpend12)
+       FAST_UNPEND(13,fastunpend13)
+       FAST_UNPEND(14,fastunpend14)
+       FAST_UNPEND(15,fastunpend15)
+       FAST_UNPEND(16,fastunpend16)
+       FAST_UNPEND(17,fastunpend17)
+       FAST_UNPEND(18,fastunpend18)
+       FAST_UNPEND(19,fastunpend19)
+       FAST_UNPEND(20,fastunpend20)
+       FAST_UNPEND(21,fastunpend21)
+       FAST_UNPEND(22,fastunpend22)
+       FAST_UNPEND(23,fastunpend23)
 MCOUNT_LABEL(eintr)
 
-/*
- * Executed by a CPU when it receives a RENDEZVOUS IPI from another CPU.
- *
- * - Calls the generic rendezvous action function.
- */
+       /*
       * Executed by a CPU when it receives a RENDEZVOUS IPI from another CPU.
       *
       * - Calls the generic rendezvous action function.
       */
        .text
        SUPERALIGN_TEXT
-       .globl  _Xrendezvous
-_Xrendezvous:
+       .globl  Xrendezvous
+Xrendezvous:
        PUSH_FRAME
        movl    $KDSEL, %eax
        mov     %ax, %ds                /* use KERNEL data segment */
@@ -711,7 +720,7 @@ _Xrendezvous:
        movl    $KPSEL, %eax
        mov     %ax, %fs
 
-       call    _smp_rendezvous_action
+       call    smp_rendezvous_action
 
        movl    $0, lapic_eoi           /* End Of Interrupt to APIC */
        POP_FRAME
@@ -750,11 +759,8 @@ imasks:                            /* masks for interrupt handlers */
 
        .long   SWI_TTY_MASK, SWI_NET_MASK, SWI_CAMNET_MASK, SWI_CAMBIO_MASK
        .long   SWI_VM_MASK, SWI_TQ_MASK, SWI_CLOCK_MASK
-#endif
+#endif /* 0 */
 
-/* active flag for lazy masking */
-iactive:
-       .long   0
 
 #ifdef COUNT_XINVLTLB_HITS
        .globl  xhits
@@ -779,25 +785,10 @@ checkstate_need_ast:
        .long   0
 checkstate_pending_ast:
        .long   0
-       .globl CNAME(forward_irq_misscnt)
-       .globl CNAME(forward_irq_toodeepcnt)
-       .globl CNAME(forward_irq_hitcnt)
        .globl CNAME(resched_cpus)
-       .globl CNAME(want_resched_cnt)
-       .globl CNAME(cpuast_cnt)
        .globl CNAME(cpustop_restartfunc)
-CNAME(forward_irq_misscnt):    
-       .long 0
-CNAME(forward_irq_hitcnt):     
-       .long 0
-CNAME(forward_irq_toodeepcnt):
-       .long 0
 CNAME(resched_cpus):
        .long 0
-CNAME(want_resched_cnt):
-       .long 0
-CNAME(cpuast_cnt):
-       .long 0
 CNAME(cpustop_restartfunc):
        .long 0
                
index 3f28c6e..c4e2a9c 100644 (file)
@@ -23,7 +23,7 @@
  * SUCH DAMAGE.
  *
  * $FreeBSD: src/sys/i386/i386/mpapic.c,v 1.37.2.7 2003/01/25 02:31:47 peter Exp $
- * $DragonFly: src/sys/i386/apic/Attic/mpapic.c,v 1.3 2003/07/04 00:32:24 dillon Exp $
+ * $DragonFly: src/sys/i386/apic/Attic/mpapic.c,v 1.4 2003/07/06 21:23:48 dillon Exp $
  */
 
 #include <sys/param.h>
@@ -74,11 +74,13 @@ apic_initialize(void)
        /* set the Task Priority Register as needed */
        temp = lapic.tpr;
        temp &= ~APIC_TPR_PRIO;         /* clear priority field */
-#ifdef GRAB_LOPRIO
-       /* Leave the BSP at TPR 0 during boot to make sure it gets interrupts */
+
+       /*
+        * Leave the BSP and TPR 0 during boot so it gets all the interrupts,
+        * set APs at TPR 0xF0 at boot so they get no ints.
+        */
        if (mycpu->gd_cpuid != 0)
-               temp |= LOPRIO_LEVEL;   /* allow INT arbitration */
-#endif
+               temp |= TPR_IPI_ONLY;   /* disable INTs on this cpu */
        lapic.tpr = temp;
 
        /* enable the local APIC */
@@ -188,7 +190,6 @@ io_apic_setup_intpin(int apic, int pin)
        u_int32_t       target;         /* the window register is 32 bits */
        u_int32_t       vector;         /* the window register is 32 bits */
        int             level;
-       u_int           eflags;
 
        target = IOART_DEST;
 
@@ -209,14 +210,11 @@ io_apic_setup_intpin(int apic, int pin)
         * shouldn't and stop the carnage.
         */
        vector = NRSVIDT + pin;                 /* IDT vec */
-       eflags = read_eflags();
-       __asm __volatile("cli" : : : "memory");
-       s_lock(&imen_lock);
+       imen_lock();
        io_apic_write(apic, select,
                      (io_apic_read(apic, select) & ~IOART_INTMASK 
                       & ~0xff)|IOART_INTMSET|vector);
-       s_unlock(&imen_lock);
-       write_eflags(eflags);
+       imen_unlock();
        
        /* we only deal with vectored INTs here */
        if (apic_int_type(apic, pin) != 0)
@@ -260,13 +258,10 @@ io_apic_setup_intpin(int apic, int pin)
                printf("IOAPIC #%d intpin %d -> irq %d\n",
                       apic, pin, irq);
        vector = NRSVIDT + irq;                 /* IDT vec */
-       eflags = read_eflags();
-       __asm __volatile("cli" : : : "memory");
-       s_lock(&imen_lock);
+       imen_lock();
        io_apic_write(apic, select, flags | vector);
        io_apic_write(apic, select + 1, target);
-       s_unlock(&imen_lock);
-       write_eflags(eflags);
+       imen_unlock();
 }
 
 int
index 512cb20..78f5a8a 100644 (file)
@@ -35,7 +35,7 @@
  *
  *     from: @(#)autoconf.c    7.1 (Berkeley) 5/9/91
  * $FreeBSD: src/sys/i386/i386/autoconf.c,v 1.146.2.2 2001/06/07 06:05:58 dd Exp $
- * $DragonFly: src/sys/i386/i386/Attic/autoconf.c,v 1.4 2003/06/28 04:16:02 dillon Exp $
+ * $DragonFly: src/sys/i386/i386/Attic/autoconf.c,v 1.5 2003/07/06 21:23:48 dillon Exp $
  */
 
 /*
@@ -144,9 +144,9 @@ configure(dummy)
         */
 #ifdef APIC_IO
        bsp_apic_configure();
-       enable_intr();
+       cpu_enable_intr();
 #else
-       enable_intr();
+       cpu_enable_intr();
        INTREN(IRQ_SLAVE);
 #endif /* APIC_IO */
 
index 7021f1e..ce1c478 100644 (file)
@@ -24,7 +24,7 @@
  * rights to redistribute these changes.
  *
  * $FreeBSD: src/sys/i386/i386/db_interface.c,v 1.48.2.1 2000/07/07 00:38:46 obrien Exp $
- * $DragonFly: src/sys/i386/i386/Attic/db_interface.c,v 1.3 2003/07/04 00:32:24 dillon Exp $
+ * $DragonFly: src/sys/i386/i386/Attic/db_interface.c,v 1.4 2003/07/06 21:23:48 dillon Exp $
  */
 
 /*
@@ -64,6 +64,8 @@ static int    db_global_jmpbuf_valid;
 #define        rss() ({u_short ss; __asm __volatile("mov %%ss,%0" : "=r" (ss)); ss;})
 #endif
 
+#define VERBOSE_CPUSTOP_ON_DDBBREAK
+
 /*
  *  kdb_trap - field a TRACE or BPT trap
  */
@@ -139,7 +141,8 @@ kdb_trap(type, code, regs)
 #ifdef CPUSTOP_ON_DDBBREAK
 
 #if defined(VERBOSE_CPUSTOP_ON_DDBBREAK)
-       db_printf("\nCPU%d stopping CPUs: 0x%08x\n", cpuid, other_cpus);
+       db_printf("\nCPU%d stopping CPUs: 0x%08x\n", 
+           mycpu->gd_cpuid, mycpu->gd_other_cpus);
 #endif /* VERBOSE_CPUSTOP_ON_DDBBREAK */
 
        /* We stop all CPUs except ourselves (obviously) */
@@ -168,7 +171,8 @@ kdb_trap(type, code, regs)
 #ifdef CPUSTOP_ON_DDBBREAK
 
 #if defined(VERBOSE_CPUSTOP_ON_DDBBREAK)
-       db_printf("\nCPU%d restarting CPUs: 0x%08x\n", cpuid, stopped_cpus);
+       db_printf("\nCPU%d restarting CPUs: 0x%08x\n",
+           mycpu->gd_cpuid, stopped_cpus);
 #endif /* VERBOSE_CPUSTOP_ON_DDBBREAK */
 
        /* Restart all the CPUs we previously stopped */
index 8d41ad7..03b2a1e 100644 (file)
@@ -31,7 +31,7 @@
  * SUCH DAMAGE.
  *
  * $FreeBSD: src/sys/i386/i386/exception.s,v 1.65.2.3 2001/08/15 01:23:49 peter Exp $
- * $DragonFly: src/sys/i386/i386/Attic/exception.s,v 1.10 2003/07/03 17:24:01 dillon Exp $
+ * $DragonFly: src/sys/i386/i386/Attic/exception.s,v 1.11 2003/07/06 21:23:48 dillon Exp $
  */
 
 #include "npx.h"
@@ -194,8 +194,7 @@ IDTVEC(xmm)
         * Note that int0x80_syscall is a trap gate.  Only page faults
         * use an interrupt gate.
         *
-        * Note that all calls to MP_LOCK must occur with interrupts enabled
-        * in order to be able to take IPI's while waiting for the lock.
+        * Note that we are MP through to the call to trap().
         */
 
        SUPERALIGN_TEXT
@@ -216,7 +215,6 @@ alltraps_with_regs_pushed:
 calltrap:
        FAKE_MCOUNT(btrap)              /* init "from" _btrap -> calltrap */
        incl PCPU(cnt)+V_TRAP           /* YYY per-cpu */
-       MP_LOCK
        movl    PCPU(curthread),%eax    /* keep orig cpl here during call */
        movl    TD_CPL(%eax),%ebx
        call    trap
@@ -268,9 +266,6 @@ IDTVEC(syscall)
        cli                             /* atomic astpending access */
        cmpl    $0,PCPU(astpending)
        je      doreti_syscall_ret
-#ifdef SMP
-       MP_LOCK
-#endif
        pushl   $0                      /* cpl to restore */
        movl    $1,PCPU(intr_nesting_level)
        jmp     doreti
@@ -305,9 +300,6 @@ IDTVEC(int0x80_syscall)
        cli                             /* atomic astpending access */
        cmpl    $0,PCPU(astpending)
        je      doreti_syscall_ret
-#ifdef SMP
-       MP_LOCK
-#endif
        pushl   $0                      /* cpl to restore */
        movl    $1,PCPU(intr_nesting_level)
        jmp     doreti
@@ -318,6 +310,9 @@ IDTVEC(int0x80_syscall)
  * cpu_heavy_restore from being interrupted (especially since it stores
  * its context in a static place!), so the first thing we do is release
  * the critical section.
+ *
+ * The MP lock is held on entry, but for processes fork_return (esi)
+ * releases it.  'doreti' always runs without the MP lock.
  */
 ENTRY(fork_trampoline)
        movl    PCPU(curthread),%eax
index 3aec802..1bda990 100644 (file)
@@ -35,7 +35,7 @@
  *
  *     from: @(#)genassym.c    5.11 (Berkeley) 5/10/91
  * $FreeBSD: src/sys/i386/i386/genassym.c,v 1.86.2.3 2002/03/03 05:42:49 nyan Exp $
- * $DragonFly: src/sys/i386/i386/Attic/genassym.c,v 1.20 2003/07/04 00:32:24 dillon Exp $
+ * $DragonFly: src/sys/i386/i386/Attic/genassym.c,v 1.21 2003/07/06 21:23:48 dillon Exp $
  */
 
 #include "opt_user_ldt.h"
@@ -50,6 +50,7 @@
 #include <sys/errno.h>
 #include <sys/mount.h>
 #include <sys/socket.h>
+#include <sys/lock.h>
 #include <sys/resourcevar.h>
 #include <machine/frame.h>
 #include <machine/bootinfo.h>
@@ -87,8 +88,14 @@ ASSYM(TD_SP, offsetof(struct thread, td_sp));
 ASSYM(TD_PRI, offsetof(struct thread, td_pri));
 ASSYM(TD_MACH, offsetof(struct thread, td_mach));
 ASSYM(TD_WCHAN, offsetof(struct thread, td_wchan));
+#ifdef SMP
+ASSYM(TD_MPCOUNT, offsetof(struct thread, td_mpcount));
+#endif
 ASSYM(TD_FLAGS, offsetof(struct thread, td_flags));
 ASSYM(TDF_EXITED, TDF_EXITED);
+#ifdef SMP
+ASSYM(MP_FREE_LOCK, MP_FREE_LOCK);
+#endif
 
 ASSYM(RW_OWNER, offsetof(struct lwkt_rwlock, rw_owner));
 
@@ -101,6 +108,8 @@ ASSYM(SRUN, SRUN);
 ASSYM(V_TRAP, offsetof(struct vmmeter, v_trap));
 ASSYM(V_SYSCALL, offsetof(struct vmmeter, v_syscall));
 ASSYM(V_INTR, offsetof(struct vmmeter, v_intr));
+ASSYM(V_FORWARDED_HITS, offsetof(struct vmmeter, v_forwarded_hits));
+ASSYM(V_FORWARDED_MISSES, offsetof(struct vmmeter, v_forwarded_misses));
 ASSYM(UPAGES, UPAGES);
 ASSYM(PAGE_SIZE, PAGE_SIZE);
 ASSYM(NPTEPG, NPTEPG);
index d49c972..72c71ff 100644 (file)
@@ -24,7 +24,7 @@
  * SUCH DAMAGE.
  *
  * $FreeBSD: src/sys/i386/i386/i686_mem.c,v 1.8.2.4 2002/09/24 08:12:51 mdodd Exp $
- * $DragonFly: src/sys/i386/i386/Attic/i686_mem.c,v 1.2 2003/06/17 04:28:35 dillon Exp $
+ * $DragonFly: src/sys/i386/i386/Attic/i686_mem.c,v 1.3 2003/07/06 21:23:48 dillon Exp $
  */
 
 #include <sys/param.h>
@@ -39,6 +39,7 @@
 #ifdef SMP
 #include <machine/smp.h>
 #endif
+#include <machine/lock.h>
 
 /*
  * i686 memory range operations
@@ -268,9 +269,9 @@ i686_mrstore(struct mem_range_softc *sc)
      */
     smp_rendezvous(NULL, i686_mrstoreone, NULL, (void *)sc);
 #else
-    disable_intr();                            /* disable interrupts */
+    mpintr_lock();                     /* doesn't have to be mpintr YYY */
     i686_mrstoreone((void *)sc);
-    enable_intr();
+    mpintr_unlock();
 #endif
 }
 
index 5e764b1..27d62c5 100644 (file)
@@ -39,7 +39,7 @@
  *
  *     from: Id: machdep.c,v 1.193 1996/06/18 01:22:04 bde Exp
  * $FreeBSD: src/sys/i386/i386/identcpu.c,v 1.80.2.15 2003/04/11 17:06:41 jhb Exp $
- * $DragonFly: src/sys/i386/i386/Attic/identcpu.c,v 1.2 2003/06/17 04:28:35 dillon Exp $
+ * $DragonFly: src/sys/i386/i386/Attic/identcpu.c,v 1.3 2003/07/06 21:23:48 dillon Exp $
  */
 
 #include "opt_cpu.h"
@@ -813,12 +813,10 @@ identblue(void)
 static void
 identifycyrix(void)
 {
-       u_int   eflags;
        int     ccr2_test = 0, dir_test = 0;
        u_char  ccr2, ccr3;
 
-       eflags = read_eflags();
-       disable_intr();
+       mpintr_lock();
 
        ccr2 = read_cyrix_reg(CCR2);
        write_cyrix_reg(CCR2, ccr2 ^ CCR2_LOCK_NW);
@@ -843,7 +841,7 @@ identifycyrix(void)
        else
                cyrix_did = 0x00ff;             /* Old 486SLC/DLC and TI486SXLC/SXL */
 
-       write_eflags(eflags);
+       mpintr_unlock();
 }
 
 /*
@@ -1097,12 +1095,10 @@ u_int32_t longrun_modes[LONGRUN_MODE_MAX][3] = {
 static u_int 
 tmx86_get_longrun_mode(void)
 {
-       u_long          eflags;
        union msrinfo   msrinfo;
        u_int           low, high, flags, mode;
 
-       eflags = read_eflags();
-       disable_intr();
+       mpintr_lock();
 
        msrinfo.msr = rdmsr(MSR_TMx86_LONGRUN);
        low = LONGRUN_MODE_MASK(msrinfo.regs[0]);
@@ -1118,40 +1114,36 @@ tmx86_get_longrun_mode(void)
        }
        mode = LONGRUN_MODE_UNKNOWN;
 out:
-       write_eflags(eflags);
+       mpintr_unlock();
        return (mode);
 }
 
 static u_int 
 tmx86_get_longrun_status(u_int * frequency, u_int * voltage, u_int * percentage)
 {
-       u_long          eflags;
        u_int           regs[4];
 
-       eflags = read_eflags();
-       disable_intr();
+       mpintr_lock();
 
        do_cpuid(0x80860007, regs);
        *frequency = regs[0];
        *voltage = regs[1];
        *percentage = regs[2];
 
-       write_eflags(eflags);
+       mpintr_unlock();
        return (1);
 }
 
 static u_int 
 tmx86_set_longrun_mode(u_int mode)
 {
-       u_long          eflags;
        union msrinfo   msrinfo;
 
        if (mode >= LONGRUN_MODE_UNKNOWN) {
                return (0);
        }
 
-       eflags = read_eflags();
-       disable_intr();
+       mpintr_lock();
 
        /* Write LongRun mode values to Model Specific Register. */
        msrinfo.msr = rdmsr(MSR_TMx86_LONGRUN);
@@ -1166,7 +1158,7 @@ tmx86_set_longrun_mode(u_int mode)
        msrinfo.regs[0] = (msrinfo.regs[0] & ~0x01) | longrun_modes[mode][2];
        wrmsr(MSR_TMx86_LONGRUN_FLAGS, msrinfo.msr);
 
-       write_eflags(eflags);
+       mpintr_unlock();
        return (1);
 }
 
index 49e6967..dddbd08 100644 (file)
@@ -27,7 +27,7 @@
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  * $FreeBSD: src/sys/i386/i386/initcpu.c,v 1.19.2.9 2003/04/05 13:47:19 dwmalone Exp $
- * $DragonFly: src/sys/i386/i386/Attic/initcpu.c,v 1.2 2003/06/17 04:28:35 dillon Exp $
+ * $DragonFly: src/sys/i386/i386/Attic/initcpu.c,v 1.3 2003/07/06 21:23:48 dillon Exp $
  */
 
 #include "opt_cpu.h"
@@ -87,7 +87,7 @@ init_bluelightning(void)
 #endif
 
        eflags = read_eflags();
-       disable_intr();
+       cpu_disable_intr();
 
        load_cr0(rcr0() | CR0_CD | CR0_NW);
        invd();
@@ -121,7 +121,7 @@ init_486dlc(void)
        u_char  ccr0;
 
        eflags = read_eflags();
-       disable_intr();
+       cpu_disable_intr();
        invd();
 
        ccr0 = read_cyrix_reg(CCR0);
@@ -167,7 +167,7 @@ init_cy486dx(void)
        u_char  ccr2;
 
        eflags = read_eflags();
-       disable_intr();
+       cpu_disable_intr();
        invd();
 
        ccr2 = read_cyrix_reg(CCR2);
@@ -198,7 +198,7 @@ init_5x86(void)
        u_char  ccr2, ccr3, ccr4, pcr0;
 
        eflags = read_eflags();
-       disable_intr();
+       cpu_disable_intr();
 
        load_cr0(rcr0() | CR0_CD | CR0_NW);
        wbinvd();
@@ -302,7 +302,7 @@ init_i486_on_386(void)
 #endif
 
        eflags = read_eflags();
-       disable_intr();
+       cpu_disable_intr();
 
        load_cr0(rcr0() & ~(CR0_CD | CR0_NW));  /* CD = 0, NW = 0 */
 
@@ -322,7 +322,7 @@ init_6x86(void)
        u_char  ccr3, ccr4;
 
        eflags = read_eflags();
-       disable_intr();
+       cpu_disable_intr();
 
        load_cr0(rcr0() | CR0_CD | CR0_NW);
        wbinvd();
@@ -403,7 +403,7 @@ init_6x86MX(void)
        u_char  ccr3, ccr4;
 
        eflags = read_eflags();
-       disable_intr();
+       cpu_disable_intr();
 
        load_cr0(rcr0() | CR0_CD | CR0_NW);
        wbinvd();
@@ -483,7 +483,7 @@ init_mendocino(void)
        u_int64_t       bbl_cr_ctl3;
 
        eflags = read_eflags();
-       disable_intr();
+       cpu_disable_intr();
 
        load_cr0(rcr0() | CR0_CD | CR0_NW);
        wbinvd();
@@ -657,7 +657,7 @@ enable_K5_wt_alloc(void)
         * a stepping of 4 or greater.
         */
        if (((cpu_id & 0xf0) > 0) && ((cpu_id & 0x0f) > 3)) {
-               disable_intr();
+               cpu_disable_intr();
                msr = rdmsr(0x83);              /* HWCR */
                wrmsr(0x83, msr & !(0x10));
 
@@ -701,7 +701,7 @@ enable_K6_wt_alloc(void)
        u_long  eflags;
 
        eflags = read_eflags();
-       disable_intr();
+       cpu_disable_intr();
        wbinvd();
 
 #ifdef CPU_DISABLE_CACHE
@@ -763,7 +763,7 @@ enable_K6_2_wt_alloc(void)
        u_long  eflags;
 
        eflags = read_eflags();
-       disable_intr();
+       cpu_disable_intr();
        wbinvd();
 
 #ifdef CPU_DISABLE_CACHE
@@ -832,7 +832,7 @@ DB_SHOW_COMMAND(cyrixreg, cyrixreg)
        cr0 = rcr0();
        if (strcmp(cpu_vendor,"CyrixInstead") == 0) {
                eflags = read_eflags();
-               disable_intr();
+               cpu_disable_intr();
 
 
                if ((cpu != CPU_M1SC) && (cpu != CPU_CY486DX)) {
index 98d5f75..84efb8b 100644 (file)
@@ -24,7 +24,7 @@
  * SUCH DAMAGE.
  *
  * $FreeBSD: src/sys/i386/i386/k6_mem.c,v 1.4.2.2 2002/09/16 21:58:41 dwmalone Exp $
- * $DragonFly: src/sys/i386/i386/Attic/k6_mem.c,v 1.2 2003/06/17 04:28:35 dillon Exp $
+ * $DragonFly: src/sys/i386/i386/Attic/k6_mem.c,v 1.3 2003/07/06 21:23:48 dillon Exp $
  *
  */
 
@@ -37,6 +37,7 @@
 
 #include <machine/md_var.h>
 #include <machine/specialreg.h>
+#include <machine/lock.h>
 
 /*
  * A K6-2 MTRR is defined as the highest 15 bits having the address, the next
@@ -167,14 +168,14 @@ k6_mrset(struct mem_range_softc *sc, struct mem_range_desc *desc, int *arg) {
 
 out:
        
-       disable_intr();
+       mpintr_lock();
        wbinvd();
        reg = rdmsr(UWCCR);
        reg &= ~(0xffffffff << (32 * d));
        reg |= mtrr << (32 * d);
        wrmsr(UWCCR, reg);
        wbinvd();
-       enable_intr();
+       mpintr_unlock();
 
        return 0;
 }
index c77d7b5..ceacbc8 100644 (file)
@@ -36,7 +36,7 @@
  *
  *     from: @(#)machdep.c     7.4 (Berkeley) 6/3/91
  * $FreeBSD: src/sys/i386/i386/machdep.c,v 1.385.2.30 2003/05/31 08:48:05 alc Exp $
- * $DragonFly: src/sys/i386/i386/Attic/machdep.c,v 1.21 2003/07/03 18:19:51 dillon Exp $
+ * $DragonFly: src/sys/i386/i386/Attic/machdep.c,v 1.22 2003/07/06 21:23:48 dillon Exp $
  */
 
 #include "apm.h"
@@ -135,6 +135,7 @@ static void fill_fpregs_xmm __P((struct savexmm *, struct save87 *));
 #ifdef DIRECTIO
 extern void ffs_rawread_setup(void);
 #endif /* DIRECTIO */
+static void init_locks(void);
 
 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL)
 
@@ -950,7 +951,7 @@ cpu_halt(void)
  * Note on cpu_idle_hlt:  On an SMP system this may cause the system to 
  * halt until the next clock tick, even if a thread is ready YYY
  */
-static int     cpu_idle_hlt = 1;
+static int     cpu_idle_hlt = 0;
 SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hlt, CTLFLAG_RW,
     &cpu_idle_hlt, 0, "Idle loop HLT enable");
 
@@ -1829,6 +1830,7 @@ init386(int first)
         * Prevent lowering of the ipl if we call tsleep() early.
         */
        gd = &CPU_prvspace[0].mdglobaldata;
+       bzero(gd, sizeof(*gd));
 
        gd->mi.gd_curthread = &thread0;
 
@@ -1915,6 +1917,8 @@ init386(int first)
 #ifdef USER_LDT
        gd->gd_currentldt = _default_ldt;
 #endif
+       /* spinlocks and the BGL */
+       init_locks();
 
        /* exceptions */
        for (x = 0; x < NIDT; x++)
@@ -2633,3 +2637,66 @@ outb(u_int port, u_char data)
 }
 
 #endif /* DDB */
+
+
+
+#include "opt_cpu.h"
+#include "opt_htt.h"
+#include "opt_user_ldt.h"
+
+
+/*
+ * initialize all the SMP locks
+ */
+
+/* critical region around IO APIC, apic_imen */
+struct spinlock        imen_spinlock;
+
+/* Make FAST_INTR() routines sequential */
+struct spinlock        fast_intr_spinlock;
+
+/* critical region for old style disable_intr/enable_intr */
+struct spinlock        mpintr_spinlock;
+
+/* critical region around INTR() routines */
+struct spinlock        intr_spinlock;
+
+/* lock region used by kernel profiling */
+struct spinlock        mcount_spinlock;
+
+/* locks com (tty) data/hardware accesses: a FASTINTR() */
+struct spinlock        com_spinlock;
+
+/* locks kernel printfs */
+struct spinlock        cons_spinlock;
+
+/* lock regions around the clock hardware */
+struct spinlock        clock_spinlock;
+
+/* lock around the MP rendezvous */
+struct spinlock smp_rv_spinlock;
+
+static void
+init_locks(void)
+{
+       /*
+        * mp_lock = 0; BSP already owns the MP lock 
+        */
+       /*
+        * Get the initial mp_lock with a count of 1 for the BSP.
+        * This uses a LOGICAL cpu ID, ie BSP == 0.
+        */
+#ifdef SMP
+       cpu_get_initial_mplock();
+#endif
+       spin_lock_init(&mcount_spinlock);
+       spin_lock_init(&fast_intr_spinlock);
+       spin_lock_init(&intr_spinlock);
+       spin_lock_init(&mpintr_spinlock);
+       spin_lock_init(&imen_spinlock);
+       spin_lock_init(&smp_rv_spinlock);
+       spin_lock_init(&com_spinlock);
+       spin_lock_init(&clock_spinlock);
+       spin_lock_init(&cons_spinlock);
+}
+
index 6212465..f60a00a 100644 (file)
@@ -23,7 +23,7 @@
  * SUCH DAMAGE.
  *
  * $FreeBSD: src/sys/i386/i386/mp_machdep.c,v 1.115.2.15 2003/03/14 21:22:35 jhb Exp $
- * $DragonFly: src/sys/i386/i386/Attic/mp_machdep.c,v 1.8 2003/06/28 04:16:02 dillon Exp $
+ * $DragonFly: src/sys/i386/i386/Attic/mp_machdep.c,v 1.9 2003/07/06 21:23:48 dillon Exp $
  */
 
 #include "opt_cpu.h"
 #include <vm/pmap.h>
 #include <vm/vm_kern.h>
 #include <vm/vm_extern.h>
-#ifdef BETTER_CLOCK
 #include <sys/lock.h>
 #include <vm/vm_map.h>
 #include <sys/user.h>
 #ifdef GPROF 
 #include <sys/gmon.h>
 #endif
-#endif
 
 #include <machine/smp.h>
 #include <machine/apic.h>
@@ -187,8 +185,8 @@ typedef struct BASETABLE_ENTRY {
  * it follows the very early stages of AP boot by placing values in CMOS ram.
  * it NORMALLY will never be needed and thus the primitive method for enabling.
  *
-#define CHECK_POINTS
  */
+#define CHECK_POINTS
 
 #if defined(CHECK_POINTS) && !defined(PC98)
 #define CHECK_READ(A)   (outb(CMOS_REG, (A)), inb(CMOS_DATA))
@@ -259,27 +257,7 @@ extern     int nkpt;
 u_int32_t cpu_apic_versions[MAXCPU];
 u_int32_t *io_apic_versions;
 
-#ifdef APIC_INTR_DIAGNOSTIC
-int apic_itrace_enter[32];
-int apic_itrace_tryisrlock[32];
-int apic_itrace_gotisrlock[32];
-int apic_itrace_active[32];
-int apic_itrace_masked[32];
-int apic_itrace_noisrlock[32];
-int apic_itrace_masked2[32];
-int apic_itrace_unmask[32];
-int apic_itrace_noforward[32];
-int apic_itrace_leave[32];
-int apic_itrace_enter2[32];
-int apic_itrace_doreti[32];
-int apic_itrace_splz[32];
-int apic_itrace_eoi[32];
-#ifdef APIC_INTR_DIAGNOSTIC_IRQ
-unsigned short apic_itrace_debugbuffer[32768];
-int apic_itrace_debugbuffer_idx;
-struct simplelock apic_itrace_debuglock;
-#endif
-#endif
+struct apic_intmapinfo int_to_apicintpin[APIC_INTMAPSIZE];
 
 #ifdef APIC_INTR_REORDER
 struct {
@@ -288,7 +266,6 @@ struct {
 } apic_isrbit_location[32];
 #endif
 
-struct apic_intmapinfo int_to_apicintpin[APIC_INTMAPSIZE];
 
 /*
  * APIC ID logical/physical mapping structures.
@@ -337,7 +314,6 @@ static int  mptable_pass2(void);
 static void    default_mp_table(int type);
 static void    fix_mp_table(void);
 static void    setup_apic_irq_mapping(void);
-static void    init_locks(void);
 static int     start_all_aps(u_int boot_addr);
 static void    install_ap_tramp(u_int boot_addr);
 static int     start_ap(int logicalCpu, u_int boot_addr);
@@ -463,11 +439,12 @@ init_secondary(void)
        int     gsel_tss;
        int     x, myid = bootAP;
        u_int   cr0;
+       struct mdglobaldata *md;
 
        gdt_segs[GPRIV_SEL].ssd_base = (int) &CPU_prvspace[myid];
        gdt_segs[GPROC0_SEL].ssd_base =
-               (int) &CPU_prvspace[myid].globaldata.gd_common_tss;
-       CPU_prvspace[myid].globaldata.gd_prvspace = &CPU_prvspace[myid];
+               (int) &CPU_prvspace[myid].mdglobaldata.gd_common_tss;
+       CPU_prvspace[myid].mdglobaldata.mi.gd_prvspace = &CPU_prvspace[myid];
 
        for (x = 0; x < NGDT; x++) {
                ssdtosd(&gdt_segs[x], &gdt[myid * NGDT + x].sd);
@@ -486,11 +463,14 @@ init_secondary(void)
 
        gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
        gdt[myid * NGDT + GPROC0_SEL].sd.sd_type = SDT_SYS386TSS;
-       common_tss.tss_esp0 = 0;        /* not used until after switch */
-       common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
-       common_tss.tss_ioopt = (sizeof common_tss) << 16;
-       tss_gdt = &gdt[myid * NGDT + GPROC0_SEL].sd;
-       common_tssd = *tss_gdt;
+
+       md = mdcpu;
+
+       md->gd_common_tss.tss_esp0 = 0; /* not used until after switch */
+       md->gd_common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
+       md->gd_common_tss.tss_ioopt = (sizeof md->gd_common_tss) << 16;
+       md->gd_tss_gdt = &gdt[myid * NGDT + GPROC0_SEL].sd;
+       md->gd_common_tssd = *md->gd_tss_gdt;
        ltr(gsel_tss);
 
        /*
@@ -575,9 +555,6 @@ mp_enable(u_int boot_addr)
        if (x)
                default_mp_table(x);
 
-       /* initialize all SMP locks */
-       init_locks();
-
        /* post scan cleanup */
        fix_mp_table();
        setup_apic_irq_mapping();
@@ -604,10 +581,12 @@ mp_enable(u_int boot_addr)
        setidt(XINVLTLB_OFFSET, Xinvltlb,
               SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
 
+#if 0
 #ifdef BETTER_CLOCK
        /* install an inter-CPU IPI for reading processor state */
        setidt(XCPUCHECKSTATE_OFFSET, Xcpucheckstate,
               SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
+#endif
 #endif
        
        /* install an inter-CPU IPI for all-CPU rendezvous */
@@ -745,7 +724,7 @@ static int lookup_bus_type  __P((char *name));
  * 1st pass on motherboard's Intel MP specification table.
  *
  * initializes:
- *     mp_ncpus = 1
+ *     ncpus = 1
  *
  * determines:
  *     cpu_apic_address (common to all CPUs)
@@ -862,7 +841,7 @@ mptable_pass1(void)
         * Count the BSP.
         * This is also used as a counter while starting the APs.
         */
-       mp_ncpus = 1;
+       ncpus = 1;
 
        --mp_naps;      /* subtract the BSP */
 }
@@ -1998,82 +1977,6 @@ default_mp_table(int type)
 #endif /* APIC_IO */
 }
 
-
-/*
- * initialize all the SMP locks
- */
-
-/* critical region around IO APIC, apic_imen */
-struct simplelock      imen_lock;
-
-/* critical region around splxx(), cpl, cml, cil, ipending */
-struct simplelock      cpl_lock;
-
-/* Make FAST_INTR() routines sequential */
-struct simplelock      fast_intr_lock;
-
-/* critical region around INTR() routines */
-struct simplelock      intr_lock;
-
-/* lock regions protected in UP kernel via cli/sti */
-struct simplelock      mpintr_lock;
-
-/* lock region used by kernel profiling */
-struct simplelock      mcount_lock;
-
-#ifdef USE_COMLOCK
-/* locks com (tty) data/hardware accesses: a FASTINTR() */
-struct simplelock      com_lock;
-#endif /* USE_COMLOCK */
-
-#ifdef USE_CLOCKLOCK
-/* lock regions around the clock hardware */
-struct simplelock      clock_lock;
-#endif /* USE_CLOCKLOCK */
-
-/* lock around the MP rendezvous */
-static struct simplelock smp_rv_lock;
-
-static void
-init_locks(void)
-{
-       /*
-        * Get the initial mp_lock with a count of 1 for the BSP.
-        * This uses a LOGICAL cpu ID, ie BSP == 0.
-        */
-       mp_lock = 0x00000001;
-
-#if 0
-       /* ISR uses its own "giant lock" */
-       isr_lock = FREE_LOCK;
-#endif
-
-#if defined(APIC_INTR_DIAGNOSTIC) && defined(APIC_INTR_DIAGNOSTIC_IRQ)
-       s_lock_init((struct simplelock*)&apic_itrace_debuglock);
-#endif
-
-       s_lock_init((struct simplelock*)&mpintr_lock);
-
-       s_lock_init((struct simplelock*)&mcount_lock);
-
-       s_lock_init((struct simplelock*)&fast_intr_lock);
-       s_lock_init((struct simplelock*)&intr_lock);
-       s_lock_init((struct simplelock*)&imen_lock);
-       s_lock_init((struct simplelock*)&cpl_lock);
-       s_lock_init(&smp_rv_lock);
-
-#ifdef USE_COMLOCK
-       s_lock_init((struct simplelock*)&com_lock);
-#endif /* USE_COMLOCK */
-#ifdef USE_CLOCKLOCK
-       s_lock_init((struct simplelock*)&clock_lock);
-#endif /* USE_CLOCKLOCK */
-}
-
-
-/* Wait for all APs to be fully initialized */
-extern int wait_ap(unsigned int);
-
 /*
  * start each AP in our list
  */
@@ -2083,7 +1986,7 @@ start_all_aps(u_int boot_addr)
        int     x, i, pg;
        u_char  mpbiosreason;
        u_long  mpbioswarmvec;
-       struct globaldata *gd;
+       struct mdglobaldata *gd;
        char *stack;
        uintptr_t kptbase;
 
@@ -2124,24 +2027,29 @@ start_all_aps(u_int boot_addr)
                pg = x * i386_btop(sizeof(struct privatespace));
 
                /* allocate a new private data page */
-               gd = (struct globaldata *)kmem_alloc(kernel_map, PAGE_SIZE);
+               gd = (struct mdglobaldata *)kmem_alloc(kernel_map, PAGE_SIZE);
 
                /* wire it into the private page table page */
                SMPpt[pg] = (pt_entry_t)(PG_V | PG_RW | vtophys(gd));
 
                /* allocate and set up an idle stack data page */
                stack = (char *)kmem_alloc(kernel_map, UPAGES*PAGE_SIZE);
-               for (i = 0; i < UPAGES; i++)
+               for (i = 0; i < UPAGES; i++) {
                        SMPpt[pg + 5 + i] = (pt_entry_t)
                            (PG_V | PG_RW | vtophys(PAGE_SIZE * i + stack));
+               }
 
                SMPpt[pg + 1] = 0;              /* *gd_CMAP1 */
                SMPpt[pg + 2] = 0;              /* *gd_CMAP2 */
                SMPpt[pg + 3] = 0;              /* *gd_CMAP3 */
                SMPpt[pg + 4] = 0;              /* *gd_PMAP1 */
 
+               gd = &CPU_prvspace[x].mdglobaldata;     /* official location */
+               bzero(gd, sizeof(*gd));
+               gd->mi.gd_prvspace = &CPU_prvspace[x];
+
                /* prime data page for it to use */
-               mi_gdinit(gd, x);
+               mi_gdinit(&gd->mi, x);
                cpu_gdinit(gd, x);
                gd->gd_cpu_lockid = x << 24;
                gd->gd_CMAP1 = &SMPpt[pg + 1];
@@ -2161,7 +2069,10 @@ start_all_aps(u_int boot_addr)
                outb(CMOS_DATA, BIOS_WARM);     /* 'warm-start' */
 #endif
 
-               bootSTK = &CPU_prvspace[x].idlestack[UPAGES*PAGE_SIZE];
+               /*
+                * Setup the AP boot stack
+                */
+               bootSTK = &CPU_prvspace[x].idlestack[UPAGES*PAGE_SIZE/2];
                bootAP = x;
 
                /* attempt to start the Application Processor */
@@ -2183,7 +2094,7 @@ start_all_aps(u_int boot_addr)
        }
 
        /* build our map of 'other' CPUs */
-       other_cpus = all_cpus & ~(1 << cpuid);
+       mycpu->gd_other_cpus = all_cpus & ~(1 << mycpu->gd_cpuid);
 
        /* fill in our (BSP) APIC version */
        cpu_apic_versions[0] = lapic.version;
@@ -2196,24 +2107,15 @@ start_all_aps(u_int boot_addr)
 #endif
 
        /*
-        * Set up the idle context for the BSP.  Similar to above except
-        * that some was done by locore, some by pmap.c and some is implicit
-        * because the BSP is cpu#0 and the page is initially zero, and also
-        * because we can refer to variables by name on the BSP..
+        * NOTE!  The idlestack for the BSP was setup by locore.  Finish
+        * up, clean out the P==V mapping we did earlier.
         */
-
-       /* Allocate and setup BSP idle stack */
-       stack = (char *)kmem_alloc(kernel_map, UPAGES * PAGE_SIZE);
-       for (i = 0; i < UPAGES; i++)
-               SMPpt[5 + i] = (pt_entry_t)
-                   (PG_V | PG_RW | vtophys(PAGE_SIZE * i + stack));
-
        for (x = 0; x < NKPT; x++)
                PTD[x] = 0;
        pmap_set_opt();
 
        /* number of APs actually started */
-       return mp_ncpus - 1;
+       return ncpus - 1;
 }
 
 
@@ -2301,7 +2203,10 @@ start_ap(int logical_cpu, u_int boot_addr)
        vector = (boot_addr >> 12) & 0xff;
 
        /* used as a watchpoint to signal AP startup */
-       cpus = mp_ncpus;
+       cpus = ncpus;
+
+       /* Make sure the target cpu sees everything */
+       wbinvd();
 
        /*
         * first we do an INIT/RESET IPI this INIT IPI might be run, reseting
@@ -2358,12 +2263,12 @@ start_ap(int logical_cpu, u_int boot_addr)
                 /* spin */ ;
        u_sleep(200);           /* wait ~200uS */
 
-       /* wait for it to start */
+       /* wait for it to start, see ap_init() */
        set_apic_timer(5000000);/* == 5 seconds */
-       while (read_apic_timer())
-               if (mp_ncpus > cpus)
+       while (read_apic_timer()) {
+               if (ncpus > cpus)
                        return 1;       /* return SUCCESS */
-
+       }
        return 0;               /* return FAILURE */
 }
 
@@ -2473,29 +2378,42 @@ SYSCTL_INT(_machdep, OID_AUTO, forward_roundrobin_enabled, CTLFLAG_RW,
           &forward_roundrobin_enabled, 0, "");
 
 /*
- * This is called once the rest of the system is up and running and we're
- * ready to let the AP's out of the pen.
+ * This is called once the mpboot code has gotten us properly relocated
+ * and the MMU turned on, etc.   ap_init() is actually the idle thread,
+ * and when it returns the scheduler will call the real cpu_idle() main
+ * loop for the idlethread.  Interrupts are disabled on entry and should
+ * remain disabled at return.
  */
-void ap_init(void);
 
 void
-ap_init()
+ap_init(void)
 {
        u_int   apic_id;
 
+       /*
+        * Signal the BSP that we have started up successfully by incrementing
+        * ncpus.  Note that we do not hold the BGL yet.  The BSP is waiting
+        * for our signal.
+        */
+       ++ncpus;
+
+       /*
+        * Get the MP lock so we can finish initializing.
+        */
+       while (cpu_try_mplock() == 0)
+           ;
+
        /* BSP may have changed PTD while we're waiting for the lock */
        cpu_invltlb();
 
-       smp_cpus++;
-
 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
        lidt(&r_idt);
 #endif
 
        /* Build our map of 'other' CPUs. */
-       other_cpus = all_cpus & ~(1 << cpuid);
+       mycpu->gd_other_cpus = all_cpus & ~(1 << mycpu->gd_cpuid);
 
-       printf("SMP: AP CPU #%d Launched!\n", cpuid);
+       printf("SMP: AP CPU #%d Launched!\n", mycpu->gd_cpuid);
 
        /* set up CPU registers and state */
        cpu_setregs();
@@ -2508,8 +2426,8 @@ ap_init()
 
        /* A quick check from sanity claus */
        apic_id = (apic_id_to_logical[(lapic.id & 0x0f000000) >> 24]);
-       if (cpuid != apic_id) {
-               printf("SMP: cpuid = %d\n", cpuid);
+       if (mycpu->gd_cpuid != apic_id) {
+               printf("SMP: cpuid = %d\n", mycpu->gd_cpuid);
                printf("SMP: apic_id = %d\n", apic_id);
                printf("PTD[MPPTDI] = %p\n", (void *)PTD[MPPTDI]);
                panic("cpuid mismatch! boom!!");
@@ -2522,15 +2440,23 @@ ap_init()
        mem_range_AP_init();
 
        /*
-        * Activate smp_invltlb, although strictly speaking, this isn't
-        * quite correct yet.  We should have a bitfield for cpus willing
-        * to accept TLB flush IPI's or something and sync them.
+        * Since we have the BGL if smp_cpus matches ncpus then we are
+        * the last AP to get to this point and we can enable IPI's,
+        * tlb shootdowns, freezes, and so forth.
         */
-       if (smp_cpus == mp_ncpus) {
+       ++smp_cpus;
+       if (smp_cpus == ncpus) {
                invltlb_ok = 1;
                smp_started = 1; /* enable IPI's, tlb shootdown, freezes etc */
                smp_active = 1;  /* historic */
        }
+
+       /*
+        * The idle loop doesn't expect the BGL to be held and while
+        * lwkt_switch() normally cleans things up this is a special case
+        * because we returning almost directly into the idle loop.
+        */
+       cpu_rel_mplock();
 }
 
 #ifdef BETTER_CLOCK
@@ -2540,7 +2466,7 @@ ap_init()
 #define CHECKSTATE_INTR        2
 
 /* Do not staticize.  Used from apic_vector.s */
-struct proc*   checkstate_curproc[MAXCPU];
+struct thread   *checkstate_curtd[MAXCPU];
 int            checkstate_cpustate[MAXCPU];
 u_long         checkstate_pc[MAXCPU];
 
@@ -2548,6 +2474,7 @@ u_long            checkstate_pc[MAXCPU];
         ((int)(((u_quad_t)((pc) - (prof)->pr_off) *    \
             (u_quad_t)((prof)->pr_scale)) >> 16) & ~1)
 
+#if 0
 static void
 addupc_intr_forwarded(struct proc *p, int id, int *astmap)
 {
@@ -2567,28 +2494,30 @@ addupc_intr_forwarded(struct proc *p, int id, int *astmap)
                *astmap |= (1 << id);
        }
 }
+#endif
 
 static void
 forwarded_statclock(int id, int pscnt, int *astmap)
 {
+#if 0
        struct pstats *pstats;
        long rss;
        struct rusage *ru;
        struct vmspace *vm;
        int cpustate;
-       struct proc *p;
+       struct thread *td;
 #ifdef GPROF
        register struct gmonparam *g;
        int i;
 #endif
 
-       p = checkstate_curproc[id];
+       t = checkstate_curtd[id];
        cpustate = checkstate_cpustate[id];
 
        switch (cpustate) {
        case CHECKSTATE_USER:
-               if (p->p_flag & P_PROFIL)
-                       addupc_intr_forwarded(p, id, astmap);
+               if (td->td_proc && td->td_proc->p_flag & P_PROFIL)
+                       addupc_intr_forwarded(td->td_proc, id, astmap);
                if (pscnt > 1)
                        return;
                p->p_uticks++;
@@ -2657,6 +2586,7 @@ forwarded_statclock(int id, int pscnt, int *astmap)
                                ru->ru_maxrss = rss;
                }
        }
+#endif
 }
 
 void
@@ -2680,9 +2610,10 @@ forward_statclock(int pscnt)
        if (!smp_started || !invltlb_ok || cold || panicstr)
                return;
 
+       printf("forward_statclock\n");
        /* Step 1: Probe state   (user, cpu, interrupt, spinlock, idle ) */
        
-       map = other_cpus & ~stopped_cpus ;
+       map = mycpu->gd_other_cpus & ~stopped_cpus ;
        checkstate_probed_cpus = 0;
        if (map != 0)
                selected_apic_ipi(map,
@@ -2707,8 +2638,8 @@ forward_statclock(int pscnt)
         */
        
        map = 0;
-       for (id = 0; id < mp_ncpus; id++) {
-               if (id == cpuid)
+       for (id = 0; id < ncpus; id++) {
+               if (id == mycpu->gd_cpuid)
                        continue;
                if (((1 << id) & checkstate_probed_cpus) == 0)
                        continue;
@@ -2737,8 +2668,10 @@ forward_hardclock(int pscnt)
 {
        int map;
        int id;
+#if 0
        struct proc *p;
        struct pstats *pstats;
+#endif
        int i;
 
        /* Kludge. We don't yet have separate locks for the interrupts
@@ -2757,7 +2690,7 @@ forward_hardclock(int pscnt)
 
        /* Step 1: Probe state   (user, cpu, interrupt, spinlock, idle) */
        
-       map = other_cpus & ~stopped_cpus ;
+       map = mycpu->gd_other_cpus & ~stopped_cpus ;
        checkstate_probed_cpus = 0;
        if (map != 0)
                selected_apic_ipi(map,
@@ -2783,11 +2716,13 @@ forward_hardclock(int pscnt)
         */
        
        map = 0;
-       for (id = 0; id < mp_ncpus; id++) {
-               if (id == cpuid)
+       for (id = 0; id < ncpus; id++) {
+               if (id == mycpu->gd_cpuid)
                        continue;
                if (((1 << id) & checkstate_probed_cpus) == 0)
                        continue;
+               printf("forward_hardclock\n");
+#if 0
                p = checkstate_curproc[id];
                if (p) {
                        pstats = p->p_stats;
@@ -2806,6 +2741,7 @@ forward_hardclock(int pscnt)
                if (stathz == 0) {
                        forwarded_statclock( id, pscnt, &map);
                }
+#endif
        }
        if (map != 0) {
                checkstate_need_ast |= map;
@@ -2830,6 +2766,8 @@ forward_hardclock(int pscnt)
 void 
 forward_signal(struct proc *p)
 {
+       /* YYY forward_signal */
+#if 0
        int map;
        int id;
        int i;
@@ -2873,11 +2811,14 @@ forward_signal(struct proc *p)
                if (id == p->p_oncpu)
                        return;
        }
+#endif
 }
 
 void
 forward_roundrobin(void)
 {
+       /* YYY forward_roundrobin */
+#if 0
        u_int map;
        int i;
 
@@ -2885,8 +2826,8 @@ forward_roundrobin(void)
                return;
        if (!forward_roundrobin_enabled)
                return;
-       resched_cpus |= other_cpus;
-       map = other_cpus & ~stopped_cpus ;
+       resched_cpus |= mycpu->gd_other_cpus;
+       map = mycpu->gd_other_cpus & ~stopped_cpus ;
 #if 1
        selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED);
 #else
@@ -2904,20 +2845,20 @@ forward_roundrobin(void)
                        break;
                }
        }
+#endif
 }
 
-
 #ifdef APIC_INTR_REORDER
 /*
- *     Maintain mapping from softintr vector to isr bit in local apic.
+ *     Maintain mapping from softintr vector to isr bit in local apic.
  */
 void
 set_lapic_isrloc(int intr, int vector)
 {
        if (intr < 0 || intr > 32)
-               panic("set_apic_isrloc: bad intr argument: %d",intr);
+              panic("set_apic_isrloc: bad intr argument: %d",intr);
        if (vector < ICU_OFFSET || vector > 255)
-               panic("set_apic_isrloc: bad vector argument: %d",vector);
+              panic("set_apic_isrloc: bad vector argument: %d",vector);
        apic_isrbit_location[intr].location = &lapic.isr0 + ((vector>>5)<<2);
        apic_isrbit_location[intr].bit = (1<<(vector & 31));
 }
@@ -2946,14 +2887,14 @@ smp_rendezvous_action(void)
                smp_rv_setup_func(smp_rv_func_arg);
        /* spin on entry rendezvous */
        atomic_add_int(&smp_rv_waiters[0], 1);
-       while (smp_rv_waiters[0] < mp_ncpus)
+       while (smp_rv_waiters[0] < ncpus)
                ;
        /* action function */
        if (smp_rv_action_func != NULL)
                smp_rv_action_func(smp_rv_func_arg);
        /* spin on exit rendezvous */
        atomic_add_int(&smp_rv_waiters[1], 1);
-       while (smp_rv_waiters[1] < mp_ncpus)
+       while (smp_rv_waiters[1] < ncpus)
                ;
        /* teardown function */
        if (smp_rv_teardown_func != NULL)
@@ -2966,10 +2907,8 @@ smp_rendezvous(void (* setup_func)(void *),
               void (* teardown_func)(void *),
               void *arg)
 {
-       u_int   efl;
-       
-       /* obtain rendezvous lock */
-       s_lock(&smp_rv_lock);           /* XXX sleep here? NOWAIT flag? */
+       /* obtain rendezvous lock.  This disables interrupts */
+       spin_lock(&smp_rv_spinlock);    /* XXX sleep here? NOWAIT flag? */
 
        /* set static function pointers */
        smp_rv_setup_func = setup_func;
@@ -2979,19 +2918,12 @@ smp_rendezvous(void (* setup_func)(void *),
        smp_rv_waiters[0] = 0;
        smp_rv_waiters[1] = 0;
 
-       /* disable interrupts on this CPU, save interrupt status */
-       efl = read_eflags();
-       write_eflags(efl & ~PSL_I);
-
        /* signal other processors, which will enter the IPI with interrupts off */
        all_but_self_ipi(XRENDEZVOUS_OFFSET);
 
        /* call executor function */
        smp_rendezvous_action();
 
-       /* restore interrupt flag */
-       write_eflags(efl);
-
        /* release lock */
-       s_unlock(&smp_rv_lock);
+       spin_unlock(&smp_rv_spinlock);
 }
index 8e4c7cc..f2a7bcc 100644 (file)
@@ -23,7 +23,7 @@
  * SUCH DAMAGE.
  *
  * $FreeBSD: src/sys/i386/i386/mpapic.c,v 1.37.2.7 2003/01/25 02:31:47 peter Exp $
- * $DragonFly: src/sys/i386/i386/Attic/mpapic.c,v 1.3 2003/07/04 00:32:24 dillon Exp $
+ * $DragonFly: src/sys/i386/i386/Attic/mpapic.c,v 1.4 2003/07/06 21:23:48 dillon Exp $
  */
 
 #include <sys/param.h>
@@ -74,11 +74,13 @@ apic_initialize(void)
        /* set the Task Priority Register as needed */
        temp = lapic.tpr;
        temp &= ~APIC_TPR_PRIO;         /* clear priority field */
-#ifdef GRAB_LOPRIO
-       /* Leave the BSP at TPR 0 during boot to make sure it gets interrupts */
+
+       /*
+        * Leave the BSP and TPR 0 during boot so it gets all the interrupts,
+        * set APs at TPR 0xF0 at boot so they get no ints.
+        */
        if (mycpu->gd_cpuid != 0)
-               temp |= LOPRIO_LEVEL;   /* allow INT arbitration */
-#endif
+               temp |= TPR_IPI_ONLY;   /* disable INTs on this cpu */
        lapic.tpr = temp;
 
        /* enable the local APIC */
@@ -188,7 +190,6 @@ io_apic_setup_intpin(int apic, int pin)
        u_int32_t       target;         /* the window register is 32 bits */
        u_int32_t       vector;         /* the window register is 32 bits */
        int             level;
-       u_int           eflags;
 
        target = IOART_DEST;
 
@@ -209,14 +210,11 @@ io_apic_setup_intpin(int apic, int pin)
         * shouldn't and stop the carnage.
         */
        vector = NRSVIDT + pin;                 /* IDT vec */
-       eflags = read_eflags();
-       __asm __volatile("cli" : : : "memory");
-       s_lock(&imen_lock);
+       imen_lock();
        io_apic_write(apic, select,
                      (io_apic_read(apic, select) & ~IOART_INTMASK 
                       & ~0xff)|IOART_INTMSET|vector);
-       s_unlock(&imen_lock);
-       write_eflags(eflags);
+       imen_unlock();
        
        /* we only deal with vectored INTs here */
        if (apic_int_type(apic, pin) != 0)
@@ -260,13 +258,10 @@ io_apic_setup_intpin(int apic, int pin)
                printf("IOAPIC #%d intpin %d -> irq %d\n",
                       apic, pin, irq);
        vector = NRSVIDT + irq;                 /* IDT vec */
-       eflags = read_eflags();
-       __asm __volatile("cli" : : : "memory");
-       s_lock(&imen_lock);
+       imen_lock();
        io_apic_write(apic, select, flags | vector);
        io_apic_write(apic, select + 1, target);
-       s_unlock(&imen_lock);
-       write_eflags(eflags);
+       imen_unlock();
 }
 
 int
index 7797a77..8c581ad 100644 (file)
@@ -32,7 +32,7 @@
  *             multiprocessor systems.
  *
  * $FreeBSD: src/sys/i386/i386/mpboot.s,v 1.13.2.3 2000/09/07 01:18:26 tegge Exp $
- * $DragonFly: src/sys/i386/i386/Attic/mpboot.s,v 1.3 2003/07/01 20:30:40 dillon Exp $
+ * $DragonFly: src/sys/i386/i386/Attic/mpboot.s,v 1.4 2003/07/06 21:23:48 dillon Exp $
  */
 
 #include <machine/asmacros.h>          /* miscellaneous asm macros */
  * it follows the very early stages of AP boot by placing values in CMOS ram.
  * it NORMALLY will never be needed and thus the primitive method for enabling.
  *
-#define CHECK_POINTS
  */
 
+#define CHECK_POINTS
 #if defined(CHECK_POINTS) && !defined(PC98)
 
 #define CMOS_REG       (0x70)
 #define CMOS_DATA      (0x71)
 
 #define CHECKPOINT(A,D)                \
-       movb    $(A),%al ;      \
+       movb    $A,%al ;        \
        outb    %al,$CMOS_REG ; \
-       movb    $(D),%al ;      \
+       movb    D,%al ;         \
        outb    %al,$CMOS_DATA
 
 #else
 
 
 /*
- * the APs enter here from their trampoline code (bootMP, below)
+ * The APs enter here from their trampoline code (bootMP, below)
+ * NOTE: %fs is not setup until the call to init_secondary()!
  */
        .p2align 4
 
 NON_GPROF_ENTRY(MPentry)
-       CHECKPOINT(0x36, 3)
+       CHECKPOINT(0x36, $3)
        /* Now enable paging mode */
        movl    IdlePTD-KERNBASE, %eax
        movl    %eax,%cr3       
        movl    %cr0,%eax
        orl     $CR0_PE|CR0_PG,%eax             /* enable paging */
        movl    %eax,%cr0                       /* let the games begin! */
-       movl    bootSTK,%esp                    /* boot stack end loc. */
 
+       movl    bootSTK,%esp                    /* boot stack end loc. */
        pushl   $mp_begin                       /* jump to high mem */
-       ret
+       NON_GPROF_RET
 
        /*
         * Wait for the booting CPU to signal startup
         */
 mp_begin:      /* now running relocated at KERNBASE */
-       CHECKPOINT(0x37, 4)
+       CHECKPOINT(0x37, $4)
        call    init_secondary                  /* load i386 tables */
-       CHECKPOINT(0x38, 5)
+       CHECKPOINT(0x38, $5)
 
        /*
         * If the [BSP] CPU has support for VME, turn it on.
@@ -108,47 +109,23 @@ mp_begin: /* now running relocated at KERNBASE */
        andl    $~APIC_SVR_SWEN, %eax           /* clear software enable bit */
        movl    %eax, lapic_svr
 
-       /* signal our startup to the BSP */
+       /* data returned to BSP */
        movl    lapic_ver, %eax                 /* our version reg contents */
        movl    %eax, cpu_apic_versions         /* into [ 0 ] */
-       incl    mp_ncpus                        /* signal BSP */
-
-       CHECKPOINT(0x39, 6)
 
-       /* wait till we can get into the kernel */
-       call    boot_get_mplock
+       CHECKPOINT(0x39, $6)
 
-       /* Now, let's prepare for some REAL WORK :-) */
-       call    ap_init
-
-       call    rel_mplock
-       wbinvd                          /* Avoid livelock */
-2:     
-       cmpl    $0, CNAME(smp_started)  /* Wait for last AP to be ready */
-       jz      2b
-       call    get_mplock
-       
-       /* let her rip! (loads new stack) */
-       jmp     cpu_switch
-
-NON_GPROF_ENTRY(wait_ap)
-       pushl   %ebp
-       movl    %esp, %ebp
-       call    rel_mplock
-       wbinvd                          /* Avoid livelock */
-       movl    %eax, 8(%ebp)
-1:             
-       cmpl    $0, CNAME(smp_started)
-       jnz     2f
-       decl    %eax
-       cmpl    $0, %eax
-       jge     1b
-2:
-       call    get_mplock
-       movl    %ebp, %esp
-       popl    %ebp
+       /*
+        * Execute the context restore function for the idlethread which
+        * has conveniently been set as curthread.  Remember, %eax must
+        * contain the target thread.  Or BSP/AP synchronization occurs
+        * in ap_init().  We do not need to mess with the BGL for this
+        * because LWKT threads are self-contained on each cpu (or, at least,
+        * the idlethread is!).
+        */
+       movl    PCPU(curthread),%eax
+       movl    TD_SP(%eax),%esp
        ret
-       
 
 /*
  * This is the embedded trampoline or bootstrap that is
@@ -167,7 +144,7 @@ BOOTMP1:
 NON_GPROF_ENTRY(bootMP)
        .code16         
        cli
-       CHECKPOINT(0x34, 1)
+       CHECKPOINT(0x34, $1)
        /* First guarantee a 'clean slate' */
        xorl    %eax, %eax
        movl    %eax, %ebx
@@ -203,7 +180,7 @@ NON_GPROF_ENTRY(bootMP)
 
        .code32         
 protmode:
-       CHECKPOINT(0x35, 2)
+       CHECKPOINT(0x35, $2)
 
        /*
         * we are NOW running for the first time with %eip
index eda75de..207c3d2 100644 (file)
@@ -7,7 +7,7 @@
  * ----------------------------------------------------------------------------
  *
  * $FreeBSD: src/sys/i386/i386/mplock.s,v 1.29.2.2 2000/05/16 06:58:06 dillon Exp $
- * $DragonFly: src/sys/i386/i386/Attic/mplock.s,v 1.3 2003/07/01 20:30:40 dillon Exp $
+ * $DragonFly: src/sys/i386/i386/Attic/mplock.s,v 1.4 2003/07/06 21:23:48 dillon Exp $
  *
  * Functions for locking between CPUs in a SMP system.
  *
 #include <machine/smptests.h>          /** GRAB_LOPRIO */
 #include <machine/apic.h>
 
-#define GLPROFILE_NOT
-
-#ifdef CHEAP_TPR
-
-/* we assumme that the 'reserved bits' can be written with zeros */
-
-#else /* CHEAP_TPR */
-
-#error HEADS UP: this code needs work
-/*
- * The APIC doc says that reserved bits must be written with whatever
- * value they currently contain, ie you should:        read, modify, write,
- * instead of just writing new values to the TPR register.  Current
- * silicon seems happy with just writing.  If the behaviour of the
- * silicon changes, all code that access the lapic_tpr must be modified.
- * The last version to contain such code was:
- *   Id: mplock.s,v 1.17 1997/08/10 20:59:07 fsmp Exp
- */
-
-#endif /* CHEAP_TPR */
-
-#ifdef GRAB_LOPRIO
-/*
- * Claim LOWest PRIOrity, ie. attempt to grab ALL INTerrupts.
- */
-
-/* after 1st acquire of lock we grab all hardware INTs */
-#define GRAB_HWI       movl    $ALLHWI_LEVEL, lapic_tpr
-
-/* after last release of lock give up LOW PRIO (ie, arbitrate INTerrupts) */
-#define ARB_HWI                movl    $LOPRIO_LEVEL, lapic_tpr /* CHEAP_TPR */
-
-#else /* GRAB_LOPRIO */
-
-#define GRAB_HWI       /* nop */
-#define ARB_HWI                /* nop */
-
-#endif /* GRAB_LOPRIO */
+#include "assym.s"
 
+       .data
+       ALIGN_DATA
+#ifdef SMP
+       .globl  mp_lock
+mp_lock:
+       .long   -1                      /* initialized to not held */
+#endif
 
        .text
-
-#ifdef SMP 
-
-/***********************************************************************
- *  void MPgetlock_edx(unsigned int *lock : %edx)
- *  ----------------------------------
- *  Destroys   %eax, %ecx.  %edx must hold lock argument.
- *
- *  Grabs hardware interrupts on first aquire.
- *
- *  NOTE: Serialization is not required if we already hold the lock, since
- *  we already hold the lock, nor do we need a locked instruction if we 
- *  already hold the lock.
- */
-
-NON_GPROF_ENTRY(MPgetlock_edx)
+       SUPERALIGN_TEXT
+
+       /*
+        * Note on cmpxchgl... exchanges ecx with mem if mem matches eax.
+        * Z=1 (jz) on success. 
+        */
+NON_GPROF_ENTRY(cpu_get_initial_mplock)
+       movl    PCPU(curthread),%ecx
+       movl    $1,TD_MPCOUNT(%ecx)     /* curthread has mpcount of 1 */
+       movl    $0,mp_lock              /* owned by cpu 0 */
+       NON_GPROF_RET
+
+       /*
+        * cpu_try_mplock() returns non-zero on success, 0 on failure.  It
+        * only adjusts mp_lock.  It does not touch td_mpcount, and it
+        * must be called from inside a critical section.
+        */
+NON_GPROF_ENTRY(cpu_try_mplock)
+       movl    PCPU(cpuid),%ecx
+       movl    $-1,%eax
+       cmpxchgl %ecx,mp_lock           /* ecx<->mem if eax matches */
+       jnz     1f
+       movl    $1,%eax
+       NON_GPROF_RET
 1:
-       movl    (%edx), %eax            /* Get current contents of lock */
-       movl    %eax, %ecx
-       andl    $CPU_FIELD,%ecx
-       cmpl    cpu_lockid, %ecx        /* Do we already own the lock? */
-       jne     2f
-       incl    %eax                    /* yes, just bump the count */
-       movl    %eax, (%edx)            /* serialization not required */
-       ret
-2:
-       movl    $FREE_LOCK, %eax        /* lock must be free */
-       movl    cpu_lockid, %ecx
-       incl    %ecx
-       lock
-       cmpxchg %ecx, (%edx)            /* attempt to replace %eax<->%ecx */
-#ifdef GLPROFILE
-       jne     3f
-       incl    gethits2
-#else
-       jne     1b
-#endif /* GLPROFILE */
-       GRAB_HWI                        /* 1st acquire, grab hw INTs */
-       ret
-#ifdef GLPROFILE
-3:
-       incl    gethits3
-       jmp     1b
-#endif
-
-/***********************************************************************
- *  int MPtrylock(unsigned int *lock)
- *  ---------------------------------
- *  Destroys   %eax, %ecx and %edx.
- *  Returns    1 if lock was successfull
- */
+       movl    $0,%eax
+       NON_GPROF_RET
 
-NON_GPROF_ENTRY(MPtrylock)
-       movl    4(%esp), %edx           /* Get the address of the lock */
-
-       movl    $FREE_LOCK, %eax        /* Assume it's free */
-       movl    cpu_lockid, %ecx        /* - get pre-shifted logical cpu id */
-       incl    %ecx                    /* - new count is one */
-       lock
-       cmpxchg %ecx, (%edx)            /* - try it atomically */
-       jne     1f                      /* ...do not collect $200 */
-#ifdef GLPROFILE
-       incl    tryhits2
-#endif /* GLPROFILE */
-       GRAB_HWI                        /* 1st acquire, grab hw INTs */
-       movl    $1, %eax
-       ret
+NON_GPROF_ENTRY(get_mplock)
+       movl    PCPU(curthread),%edx
+       cmpl    $0,TD_MPCOUNT(%edx)
+       je      1f
+       incl    TD_MPCOUNT(%edx)        /* already have it, just ++mpcount */
+       NON_GPROF_RET
 1:
-       movl    (%edx), %eax            /* Try to see if we have it already */
-       andl    $COUNT_FIELD, %eax      /* - get count */
-       movl    cpu_lockid, %ecx        /* - get pre-shifted logical cpu id */
-       orl     %ecx, %eax              /* - combine them */
-       movl    %eax, %ecx
-       incl    %ecx                    /* - new count is one more */
-       lock
-       cmpxchg %ecx, (%edx)            /* - try it atomically */
-       jne     2f                      /* - miss */
-#ifdef GLPROFILE
-       incl    tryhits
-#endif /* GLPROFILE */
-       movl    $1, %eax
-       ret
-2:
-#ifdef GLPROFILE
-       incl    tryhits3
-#endif /* GLPROFILE */
-       movl    $0, %eax
-       ret
-
-
-/***********************************************************************
- *  void MPrellock_edx(unsigned int *lock : %edx)
- *  ----------------------------------
- *  Destroys   %ecx, argument must be in %edx
- *
- *  SERIALIZATION NOTE!
- *
- *  After a lot of arguing, it turns out that there is no problem with
- *  not having a synchronizing instruction in the MP unlock code.  There
- *  are two things to keep in mind:  First, Intel guarentees that writes
- *  are ordered amoungst themselves.  Second, the P6 is allowed to reorder
- *  reads around writes.  Third, the P6 maintains cache consistency (snoops
- *  the bus).  The second is not an issue since the one read we do is the 
- *  basis for the conditional which determines whether the write will be 
- *  made or not.
- *
- *  Therefore, no synchronizing instruction is required on unlock.  There are
- *  three performance cases:  First, if a single cpu is getting and releasing
- *  the lock the removal of the synchronizing instruction saves approx
- *  200 nS (testing w/ duel cpu PIII 450).  Second, if one cpu is contending
- *  for the lock while the other holds it, the removal of the synchronizing
- *  instruction results in a 700nS LOSS in performance.  Third, if two cpu's
- *  are switching off ownership of the MP lock but not contending for it (the
- *  most common case), this results in a 400nS IMPROVEMENT in performance.
- *
- *  Since our goal is to reduce lock contention in the first place, we have
- *  decided to remove the synchronizing instruction from the unlock code.
- */
-
-NON_GPROF_ENTRY(MPrellock_edx)
-       movl    (%edx), %ecx            /* - get the value */
-       decl    %ecx                    /* - new count is one less */
-       testl   $COUNT_FIELD, %ecx      /* - Unless it's zero... */
+       pushfl
+       cli
+       movl    $1,TD_MPCOUNT(%edx)
+       movl    PCPU(cpuid),%ecx
+       movl    $-1,%eax
+       cmpxchgl %ecx,mp_lock           /* ecx<->mem & JZ if eax matches */
        jnz     2f
-       ARB_HWI                         /* last release, arbitrate hw INTs */
-       movl    $FREE_LOCK, %ecx        /* - In which case we release it */
-#if 0
-       lock
-       addl    $0,0(%esp)              /* see note above */
-#endif
+       popfl                           /* success */
+       NON_GPROF_RET
 2:
-       movl    %ecx, (%edx)
-       ret
-
-/***********************************************************************
- *  void get_mplock()
- *  -----------------
- *  All registers preserved
- *
- *  Stack (after call to _MPgetlock):
- *     
- *     edx              4(%esp)
- *     ecx              8(%esp)
- *     eax             12(%esp)
- *
- * Requirements:  Interrupts should be enabled on call so we can take
- *               IPI's and FAST INTs while we are waiting for the lock
- *               (else the system may not be able to halt).
- *
- *               XXX there are still places where get_mplock() is called
- *               with interrupts disabled, so we have to temporarily reenable
- *               interrupts.
- *
- * Side effects:  The current cpu will be given ownership of the
- *               hardware interrupts when it first aquires the lock.
- *
- * Costs:        Initial aquisition requires the use of a costly locked
- *               instruction, but recursive aquisition is cheap.  Release
- *               is very cheap.
- */
+       movl    PCPU(cpuid),%eax        /* failure */
+       cmpl    %eax,mp_lock
+       je      badmp_get
+       popfl
+       jmp     lwkt_switch             /* will be correct on return */
 
-NON_GPROF_ENTRY(get_mplock)
-       pushl   %eax
-       pushl   %ecx
-       pushl   %edx
-       movl    $mp_lock, %edx
-       pushfl  
-       testl   $(1<<9), (%esp)
-       jz     2f           
-       call    MPgetlock_edx
-       addl    $4,%esp
+NON_GPROF_ENTRY(try_mplock)
+       movl    PCPU(curthread),%edx
+       cmpl    $0,TD_MPCOUNT(%edx)
+       je      1f
+       incl    TD_MPCOUNT(%edx)        /* already have it, just ++mpcount */
+       movl    $1,%eax
+       NON_GPROF_RET
 1:
-       popl    %edx
-       popl    %ecx
-       popl    %eax
-       ret
+       pushfl
+       cli
+       movl    PCPU(cpuid),%ecx
+       movl    $-1,%eax
+       cmpxchgl %ecx,mp_lock           /* ecx<->mem & JZ if eax matches */
+       jnz     2f
+       movl    $1,TD_MPCOUNT(%edx)
+       popfl                           /* success */
+       movl    $1,%eax
+       NON_GPROF_RET
 2:
-       sti
-       call    MPgetlock_edx
+       movl    PCPU(cpuid),%eax        /* failure */
+       cmpl    %eax,mp_lock
+       je      badmp_get
        popfl
-       jmp     1b
+       movl    $0,%eax
+       NON_GPROF_RET
 
-/*
- * Special version of get_mplock that is used during bootstrap when we can't
- * yet enable interrupts of any sort since the APIC isn't online yet.  We
- * do an endrun around MPgetlock_edx to avoid enabling interrupts.
- *
- * XXX FIXME.. - APIC should be online from the start to simplify IPI's.
- */
-NON_GPROF_ENTRY(boot_get_mplock)
-       pushl   %eax
-       pushl   %ecx
-       pushl   %edx
-#ifdef GRAB_LOPRIO     
+NON_GPROF_ENTRY(rel_mplock)
+       movl    PCPU(curthread),%edx
+       cmpl    $1,TD_MPCOUNT(%edx)
+       je      1f
+       subl    $1,TD_MPCOUNT(%edx)
+       NON_GPROF_RET
+1:
        pushfl
-       pushl   lapic_tpr
        cli
-#endif
-       
-       movl    $mp_lock, %edx
-       call    MPgetlock_edx
-
-#ifdef GRAB_LOPRIO     
-       popl    lapic_tpr
+       movl    $0,TD_MPCOUNT(%edx)
+       movl    $MP_FREE_LOCK,mp_lock
        popfl
-#endif
-       popl    %edx
-       popl    %ecx
-       popl    %eax
-       ret
-
-/***********************************************************************
- *  void try_mplock()
- *  -----------------
- *  reg %eax == 1 if success
- */
-
-NON_GPROF_ENTRY(try_mplock)
-       pushl   %ecx
-       pushl   %edx
-       pushl   $mp_lock
-       call    MPtrylock
-       add     $4, %esp
-       popl    %edx
-       popl    %ecx
-       ret
-
-/***********************************************************************
- *  void rel_mplock()
- *  -----------------
- *  All registers preserved
- */
-
-NON_GPROF_ENTRY(rel_mplock)
-       pushl   %ecx
-       pushl   %edx
-       movl    $mp_lock,%edx
-       call    MPrellock_edx
-       popl    %edx
-       popl    %ecx
-       ret
+       NON_GPROF_RET
 
-#endif
+badmp_get:
+       pushl   $bmpsw1
+       call    panic
+badmp_rel:
+       pushl   $bmpsw2
+       call    panic
 
-/***********************************************************************
- * 
- */
        .data
-       .p2align 2                      /* xx_lock aligned on int boundary */
 
-#ifdef SMP
+bmpsw1:
+       .asciz  "try/get_mplock(): already have lock!"
 
-       .globl mp_lock
-mp_lock:       .long   0               
+bmpsw2:
+       .asciz  "rel_mplock(): not holding lock!"
 
-#ifdef GLPROFILE
-       .globl  gethits
-gethits:
-       .long   0
-gethits2:
-       .long   0
-gethits3:
-       .long   0
+#if 0
+/* after 1st acquire of lock we grab all hardware INTs */
+#ifdef GRAB_LOPRIO
+#define GRAB_HWI       movl    $ALLHWI_LEVEL, lapic_tpr
 
-       .globl  tryhits
-tryhits:
-       .long   0
-tryhits2:
-       .long   0
-tryhits3:
-       .long   0
+/* after last release of lock give up LOW PRIO (ie, arbitrate INTerrupts) */
+#define ARB_HWI                movl    $LOPRIO_LEVEL, lapic_tpr /* CHEAP_TPR */
+#endif
+#endif
 
-msg:
-       .asciz  "lock hits: 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x\n"
-#endif /* GLPROFILE */
-#endif /* SMP */
index 9b1bbd5..77a19a3 100644 (file)
@@ -27,7 +27,7 @@
  * SUCH DAMAGE.
  *
  * $FreeBSD: src/sys/i386/i386/perfmon.c,v 1.21 1999/09/25 18:24:04 phk Exp $
- * $DragonFly: src/sys/i386/i386/Attic/perfmon.c,v 1.3 2003/06/23 17:55:38 dillon Exp $
+ * $DragonFly: src/sys/i386/i386/Attic/perfmon.c,v 1.4 2003/07/06 21:23:48 dillon Exp $
  */
 
 #include <sys/param.h>
@@ -124,11 +124,11 @@ perfmon_setup(int pmc, unsigned int control)
 
        perfmon_inuse |= (1 << pmc);
        control &= ~(PMCF_SYS_FLAGS << 16);
-       disable_intr();
+       mpintr_lock();  /* doesn't have to be mpintr_lock YYY */
        ctl_shadow[pmc] = control;
        writectl(pmc);
        wrmsr(msr_pmc[pmc], pmc_shadow[pmc] = 0);
-       enable_intr();
+       mpintr_unlock();
        return 0;
 }
 
@@ -167,11 +167,11 @@ perfmon_start(int pmc)
                return EINVAL;
 
        if (perfmon_inuse & (1 << pmc)) {
-               disable_intr();
+               mpintr_lock();  /* doesn't have to be mpintr YYY */
                ctl_shadow[pmc] |= (PMCF_EN << 16);
                wrmsr(msr_pmc[pmc], pmc_shadow[pmc]);
                writectl(pmc);
-               enable_intr();
+               mpintr_unlock();
                return 0;
        }
        return EBUSY;
@@ -184,11 +184,11 @@ perfmon_stop(int pmc)
                return EINVAL;
 
        if (perfmon_inuse & (1 << pmc)) {
-               disable_intr();
+               mpintr_lock();
                pmc_shadow[pmc] = rdmsr(msr_pmc[pmc]) & 0xffffffffffULL;
                ctl_shadow[pmc] &= ~(PMCF_EN << 16);
                writectl(pmc);
-               enable_intr();
+               mpintr_unlock();
                return 0;
        }
        return EBUSY;
index 3210ed5..3074043 100644 (file)
@@ -40,7 +40,7 @@
  *
  *     from:   @(#)pmap.c      7.7 (Berkeley)  5/12/91
  * $FreeBSD: src/sys/i386/i386/pmap.c,v 1.250.2.18 2002/03/06 22:48:53 silby Exp $
- * $DragonFly: src/sys/i386/i386/Attic/pmap.c,v 1.15 2003/07/04 00:32:24 dillon Exp $
+ * $DragonFly: src/sys/i386/i386/Attic/pmap.c,v 1.16 2003/07/06 21:23:48 dillon Exp $
  */
 
 /*
@@ -373,25 +373,27 @@ pmap_bootstrap(firstaddr, loadaddr)
                ptditmp |= PG_V | PG_RW | PG_PS | PG_U | pgeflag;
                pdir4mb = ptditmp;
 
-               if (ncpus == 1) {
-                       /*
-                        * Enable the PSE mode.
-                        */
-                       load_cr4(rcr4() | CR4_PSE);
+#ifndef SMP
+               /*
+                * Enable the PSE mode.  If we are SMP we can't do this
+                * now because the APs will not be able to use it when
+                * they boot up.
+                */
+               load_cr4(rcr4() | CR4_PSE);
 
-                       /*
-                        * We can do the mapping here for the single processor
-                        * case.  We simply ignore the old page table page from
-                        * now on.
-                        */
-                       /*
-                        * For SMP, we still need 4K pages to bootstrap APs,
-                        * PSE will be enabled as soon as all APs are up.
-                        */
-                       PTD[KPTDI] = (pd_entry_t) ptditmp;
-                       kernel_pmap->pm_pdir[KPTDI] = (pd_entry_t) ptditmp;
-                       invltlb();
-               }
+               /*
+                * We can do the mapping here for the single processor
+                * case.  We simply ignore the old page table page from
+                * now on.
+                */
+               /*
+                * For SMP, we still need 4K pages to bootstrap APs,
+                * PSE will be enabled as soon as all APs are up.
+                */
+               PTD[KPTDI] = (pd_entry_t) ptditmp;
+               kernel_pmap->pm_pdir[KPTDI] = (pd_entry_t) ptditmp;
+               invltlb();
+#endif
        }
 #endif
 #ifdef APIC_IO
@@ -827,6 +829,9 @@ pmap_init_proc(struct proc *p, struct thread *td)
        p->p_thread = td;
        td->td_proc = p;
        td->td_switch = cpu_heavy_switch;
+#ifdef SMP
+       td->td_mpcount = 1;
+#endif
        bzero(p->p_addr, sizeof(*p->p_addr));
 }
 
@@ -1405,21 +1410,20 @@ pmap_reference(pmap)
  ***************************************************/
 
 /*
- * free the pv_entry back to the free list
+ * free the pv_entry back to the free list.  This function may be
+ * called from an interrupt.
  */
 static PMAP_INLINE void
 free_pv_entry(pv)
        pv_entry_t pv;
 {
        pv_entry_count--;
-       zfreei(pvzone, pv);
+       zfree(pvzone, pv);
 }
 
 /*
  * get a new pv_entry, allocating a block from the system
- * when needed.
- * the memory allocation is performed bypassing the malloc code
- * because of the possibility of allocations at interrupt time.
+ * when needed.  This function may be called from an interrupt.
  */
 static pv_entry_t
 get_pv_entry(void)
@@ -1431,7 +1435,7 @@ get_pv_entry(void)
                pmap_pagedaemon_waken = 1;
                wakeup (&vm_pages_needed);
        }
-       return zalloci(pvzone);
+       return zalloc(pvzone);
 }
 
 /*
diff --git a/sys/i386/i386/simplelock.s b/sys/i386/i386/simplelock.s
deleted file mode 100644 (file)
index 92c23d6..0000000
+++ /dev/null
@@ -1,321 +0,0 @@
-/*-
- * Copyright (c) 1997, by Steve Passe
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. The name of the developer may NOT be used to endorse or promote products
- *    derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * $FreeBSD: src/sys/i386/i386/simplelock.s,v 1.11.2.2 2003/02/04 20:55:28 jhb Exp $
- * $DragonFly: src/sys/i386/i386/Attic/simplelock.s,v 1.3 2003/07/01 20:30:40 dillon Exp $
- */
-
-/*
- * credit to Bruce Evans <bde@zeta.org.au> for help with asm optimization.
- */
-
-#include <machine/asmacros.h>                  /* miscellaneous macros */
-#include <i386/isa/intr_machdep.h>
-#include <machine/psl.h>
-       
-#include <machine/smptests.h>                  /** FAST_HI */
-
-/*
- * The following impliments the primitives described in i386/i386/param.h
- * necessary for the Lite2 lock manager system.
- * The major difference is that the "volatility" of the lock datum has been
- * pushed down from the various functions to lock_data itself.
- */
-
-/*
- * The simple-lock routines are the primitives out of which the lock
- * package is built. The machine-dependent code must implement an
- * atomic test_and_set operation that indivisibly sets the simple lock
- * to non-zero and returns its old value. It also assumes that the
- * setting of the lock to zero below is indivisible. Simple locks may
- * only be used for exclusive locks.
- * 
- * struct simplelock {
- *     volatile int    lock_data;
- * };
- */
-
-/*
- * void
- * s_lock_init(struct simplelock *lkp)
- * {
- *     lkp->lock_data = 0;
- * }
- */
-ENTRY(s_lock_init)
-       movl    4(%esp), %eax           /* get the address of the lock */
-       movl    $0, (%eax)
-       ret
-
-
-/*
- * void
- * s_lock(struct simplelock *lkp)
- * {
- *     while (test_and_set(&lkp->lock_data))
- *             continue;
- * }
- *
- * Note:
- *     If the acquire fails we do a loop of reads waiting for the lock to
- *     become free instead of continually beating on the lock with xchgl.
- *     The theory here is that the CPU will stay within its cache until
- *     a write by the other CPU updates it, instead of continually updating
- *     the local cache (and thus causing external bus writes) with repeated
- *     writes to the lock.
- */
-#ifndef SL_DEBUG
-
-ENTRY(s_lock)
-       movl    4(%esp), %eax           /* get the address of the lock */
-       movl    $1, %ecx
-setlock:
-       xchgl   %ecx, (%eax)
-       testl   %ecx, %ecx
-       jz      gotit                   /* it was clear, return */
-wait:
-       pause
-       cmpl    $0, (%eax)              /* wait to empty */
-       jne     wait                    /* still set... */
-       jmp     setlock                 /* empty again, try once more */
-gotit:
-       ret
-
-#else /* SL_DEBUG */
-
-ENTRY(s_lock)
-       movl    4(%esp), %edx           /* get the address of the lock */
-setlock:
-       movl    _cpu_lockid, %ecx       /* add cpu id portion */
-       incl    %ecx                    /* add lock portion */
-       movl    $0, %eax
-       lock
-       cmpxchgl %ecx, (%edx)
-       jz      gotit                   /* it was clear, return */
-       pushl   %eax                    /* save what we xchanged */
-       decl    %eax                    /* remove lock portion */
-       cmpl    _cpu_lockid, %eax       /* do we hold it? */
-       je      bad_slock               /* yes, thats not good... */
-       addl    $4, %esp                /* clear the stack */
-wait:
-       pause
-       cmpl    $0, (%edx)              /* wait to empty */
-       jne     wait                    /* still set... */
-       jmp     setlock                 /* empty again, try once more */
-gotit:
-       ret
-
-       ALIGN_TEXT
-bad_slock:
-       /* %eax (current lock) is already on the stack */
-       pushl   %edx
-       pushl   cpuid
-       pushl   $bsl1
-       call    panic
-
-bsl1:  .asciz  "rslock: cpu: %d, addr: 0x%08x, lock: 0x%08x"
-
-#endif /* SL_DEBUG */
-
-
-/*
- * int
- * s_lock_try(struct simplelock *lkp)
- * {
- *     return (!test_and_set(&lkp->lock_data));
- * }
- */
-#ifndef SL_DEBUG
-
-ENTRY(s_lock_try)
-       movl    4(%esp), %eax           /* get the address of the lock */
-       movl    $1, %ecx
-
-       xchgl   %ecx, (%eax)
-       testl   %ecx, %ecx
-       setz    %al                     /* 1 if previous value was 0 */
-       movzbl  %al, %eax               /* convert to an int */
-
-       ret
-
-#else /* SL_DEBUG */
-
-ENTRY(s_lock_try)
-       movl    4(%esp), %edx           /* get the address of the lock */
-       movl    cpu_lockid, %ecx        /* add cpu id portion */
-       incl    %ecx                    /* add lock portion */
-
-       xorl    %eax, %eax
-       lock
-       cmpxchgl %ecx, (%edx)
-       setz    %al                     /* 1 if previous value was 0 */
-       movzbl  %al, %eax               /* convert to an int */
-
-       ret
-
-#endif /* SL_DEBUG */
-
-
-/*
- * void
- * s_unlock(struct simplelock *lkp)
- * {
- *     lkp->lock_data = 0;
- * }
- */
-ENTRY(s_unlock)
-       movl    4(%esp), %eax           /* get the address of the lock */
-       movl    $0, (%eax)
-       ret
-
-#if 0
-
-/*
- *     XXX CRUFTY SS_LOCK IMPLEMENTATION REMOVED XXX
- *
- * These versions of simple_lock block interrupts,
- * making it suitable for regions accessed by both top and bottom levels.
- * This is done by saving the current value of the cpu flags in a per-cpu
- * global, and disabling interrupts when the lock is taken.  When the
- * lock is released, interrupts might be enabled, depending upon the saved
- * cpu flags.
- * Because of this, it must ONLY be used for SHORT, deterministic paths!
- *
- * Note:
- * It would appear to be "bad behaviour" to blindly store a value in
- * ss_eflags, as this could destroy the previous contents.  But since ss_eflags
- * is a per-cpu variable, and its fatal to attempt to acquire a simplelock
- * that you already hold, we get away with it.  This needs to be cleaned
- * up someday...
- */
-
-/*
- * void ss_lock(struct simplelock *lkp)
- */
-#ifndef SL_DEBUG
-
-ENTRY(ss_lock)
-       movl    4(%esp), %eax           /* get the address of the lock */
-       movl    $1, %ecx                /* value for a held lock */
-ssetlock:
-       pushfl
-       cli
-       xchgl   %ecx, (%eax)            /* compete */
-       testl   %ecx, %ecx
-       jz      sgotit                  /* it was clear, return */
-       popfl                           /* previous value while waiting */
-swait:
-       pause
-       cmpl    $0, (%eax)              /* wait to empty */
-       jne     swait                   /* still set... */
-       jmp     ssetlock                /* empty again, try once more */
-sgotit:
-       popl    ss_eflags               /* save the old eflags */
-       ret
-
-#else /* SL_DEBUG */
-
-ENTRY(ss_lock)
-       movl    4(%esp), %edx           /* get the address of the lock */
-ssetlock:
-       movl    cpu_lockid, %ecx        /* add cpu id portion */
-       incl    %ecx                    /* add lock portion */
-       pushfl
-       cli
-       movl    $0, %eax
-       lock
-       cmpxchgl %ecx, (%edx)           /* compete */
-       jz      sgotit                  /* it was clear, return */
-       pushl   %eax                    /* save what we xchanged */
-       decl    %eax                    /* remove lock portion */
-       cmpl    cpu_lockid, %eax        /* do we hold it? */
-       je      sbad_slock              /* yes, thats not good... */
-       addl    $4, %esp                /* clear the stack */
-       popfl
-swait:
-       pause
-       cmpl    $0, (%edx)              /* wait to empty */
-       jne     swait                   /* still set... */
-       jmp     ssetlock                /* empty again, try once more */
-sgotit:
-       popl    ss_eflags               /* save the old task priority */
-sgotit2:
-       ret
-
-       ALIGN_TEXT
-sbad_slock:
-       /* %eax (current lock) is already on the stack */
-       pushl   %edx
-       pushl   cpuid
-       pushl   $sbsl1
-       call    panic
-
-sbsl1: .asciz  "rsslock: cpu: %d, addr: 0x%08x, lock: 0x%08x"
-
-#endif /* SL_DEBUG */
-
-/*
- * void ss_unlock(struct simplelock *lkp)
- */
-ENTRY(ss_unlock)
-       movl    4(%esp), %eax           /* get the address of the lock */
-       movl    $0, (%eax)              /* clear the simple lock */
-       testl   $PSL_I, ss_eflags
-       jz      ss_unlock2
-       sti
-ss_unlock2:    
-       ret
-
-#endif
-
-/* 
- * These versions of simple_lock does not contain calls to profiling code.
- * Thus they can be called from the profiling code. 
- */
-               
-/*
- * void s_lock_np(struct simplelock *lkp)
- */
-NON_GPROF_ENTRY(s_lock_np)
-       movl    4(%esp), %eax           /* get the address of the lock */
-       movl    $1, %ecx
-1:
-       xchgl   %ecx, (%eax)
-       testl   %ecx, %ecx
-       jz      3f
-2:
-       pause
-       cmpl    $0, (%eax)              /* wait to empty */
-       jne     2b                      /* still set... */
-       jmp     1b                      /* empty again, try once more */
-3:
-       NON_GPROF_RET
-
-/*
- * void s_unlock_np(struct simplelock *lkp)
- */
-NON_GPROF_ENTRY(s_unlock_np)
-       movl    4(%esp), %eax           /* get the address of the lock */
-       movl    $0, (%eax)
-       NON_GPROF_RET
diff --git a/sys/i386/i386/spinlock.s b/sys/i386/i386/spinlock.s
new file mode 100644 (file)
index 0000000..e8191ba
--- /dev/null
@@ -0,0 +1,110 @@
+/*-
+ * Copyright (c) 2003, by Matthew dillon <dillon@backplane.com> All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. The name of the developer may NOT be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: src/sys/i386/i386/simplelock.s,v 1.11.2.2 2003/02/04 20:55:28 jhb Exp $
+ * $DragonFly: src/sys/i386/i386/Attic/spinlock.s,v 1.1 2003/07/06 21:23:48 dillon Exp $
+ */
+
+#include <machine/asmacros.h>                  /* miscellaneous macros */
+#include <machine/lock.h>
+
+/*
+ * The spinlock routines may only be used for low level debugging, like
+ * kernel printfs, and when no other option is available such as situations
+ * relating to hardware interrupt masks.  Spinlock routines should not be
+ * used in interrupt service routines or in any other situation.
+ *
+ * NOTE: for UP the spinlock routines still disable/restore interrupts
+ */
+ENTRY(spin_lock)
+       movl    4(%esp),%edx
+       SPIN_LOCK((%edx))               /* note: %eax, %ecx tromped */
+       ret
+
+ENTRY(spin_unlock)
+       movl    4(%esp),%edx
+       SPIN_UNLOCK((%edx))             /* note: %eax, %ecx tromped */
+       ret
+
+NON_GPROF_ENTRY(spin_lock_np)
+       movl    4(%esp),%edx
+       SPIN_LOCK((%edx))               /* note: %eax, %ecx tromped */
+       NON_GPROF_RET
+
+NON_GPROF_ENTRY(spin_unlock_np)
+       movl    4(%esp), %edx           /* get the address of the lock */
+       SPIN_UNLOCK((%edx))
+       NON_GPROF_RET
+
+/*
+ * Auxillary convenience routines.  Note that these functions disable and
+ * restore interrupts as well, on SMP, as performing spin locking functions.
+ */
+NON_GPROF_ENTRY(imen_lock)
+       SPIN_LOCK(imen_spinlock)
+       NON_GPROF_RET
+
+NON_GPROF_ENTRY(imen_unlock)
+       SPIN_UNLOCK(imen_spinlock)
+       NON_GPROF_RET
+
+NON_GPROF_ENTRY(intr_lock)
+       SPIN_LOCK(intr_spinlock)
+       NON_GPROF_RET
+
+NON_GPROF_ENTRY(intr_unlock)
+       SPIN_UNLOCK(intr_spinlock)
+       NON_GPROF_RET
+
+NON_GPROF_ENTRY(mpintr_lock)
+       SPIN_LOCK(mpintr_spinlock)
+       NON_GPROF_RET
+
+NON_GPROF_ENTRY(mpintr_unlock)
+       SPIN_UNLOCK(mpintr_spinlock)
+       NON_GPROF_RET
+
+NON_GPROF_ENTRY(clock_lock)
+       SPIN_LOCK(clock_spinlock)
+       NON_GPROF_RET
+
+NON_GPROF_ENTRY(clock_unlock)
+       SPIN_UNLOCK(clock_spinlock)
+       NON_GPROF_RET
+
+NON_GPROF_ENTRY(com_lock)
+       SPIN_LOCK(com_spinlock)
+       NON_GPROF_RET
+
+NON_GPROF_ENTRY(com_unlock)
+       SPIN_UNLOCK(com_spinlock)
+       NON_GPROF_RET
+
+NON_GPROF_ENTRY(cons_lock)
+       SPIN_LOCK(cons_spinlock)
+       NON_GPROF_RET
+
+NON_GPROF_ENTRY(cons_unlock)
+       SPIN_UNLOCK(cons_spinlock)
+       NON_GPROF_RET
+
index 2e84637..4ea5c72 100644 (file)
@@ -35,7 +35,7 @@
  * SUCH DAMAGE.
  *
  * $FreeBSD: src/sys/i386/i386/swtch.s,v 1.89.2.10 2003/01/23 03:36:24 ps Exp $
- * $DragonFly: src/sys/i386/i386/Attic/swtch.s,v 1.20 2003/07/05 05:54:00 dillon Exp $
+ * $DragonFly: src/sys/i386/i386/Attic/swtch.s,v 1.21 2003/07/06 21:23:48 dillon Exp $
  */
 
 #include "npx.h"
@@ -194,11 +194,13 @@ ENTRY(cpu_exit_switch)
         * any waiters.
         */
        orl     $TDF_EXITED,TD_FLAGS(%ecx)
+#if 0                  /* YYY MP lock may not be held by new target */
        pushl   %eax
        pushl   %ecx    /* wakeup(oldthread) */
        call    wakeup
        addl    $4,%esp
        popl    %eax    /* note: next thread expects curthread in %eax */
+#endif
 
        /*
         * Restore the next thread's state and resume it.  Note: the
@@ -318,20 +320,6 @@ ENTRY(cpu_heavy_restore)
        movl    PCB_EIP(%edx),%eax
        movl    %eax,(%esp)
 
-       /*
-        * SMP ickyness to direct interrupts.
-        */
-
-#ifdef SMP
-#ifdef GRAB_LOPRIO                             /* hold LOPRIO for INTs */
-#ifdef CHEAP_TPR
-       movl    $0, lapic_tpr
-#else
-       andl    $~APIC_TPR_PRIO, lapic_tpr
-#endif /** CHEAP_TPR */
-#endif /** GRAB_LOPRIO */
-#endif /* SMP */
-
        /*
         * Restore the user LDT if we have one
         */
index 932ab07..7c07619 100644 (file)
@@ -36,7 +36,7 @@
  *
  *     from: @(#)trap.c        7.4 (Berkeley) 5/13/91
  * $FreeBSD: src/sys/i386/i386/trap.c,v 1.147.2.11 2003/02/27 19:09:59 luoqi Exp $
- * $DragonFly: src/sys/i386/i386/Attic/trap.c,v 1.17 2003/07/03 21:22:38 dillon Exp $
+ * $DragonFly: src/sys/i386/i386/Attic/trap.c,v 1.18 2003/07/06 21:23:48 dillon Exp $
  */
 
 /*
@@ -162,7 +162,9 @@ SYSCTL_INT(_machdep, OID_AUTO, panic_on_nmi, CTLFLAG_RW,
  * point of view of the userland scheduler unless we actually have to
  * switch.
  *
- * usertdsw is called from within a critical section.
+ * usertdsw is called from within a critical section, but the BGL will
+ * have already been released by lwkt_switch() so only call MP safe functions
+ * that don't block!
  */
 static void
 usertdsw(struct thread *ntd)
@@ -205,9 +207,8 @@ userenter(void)
        td->td_switch = usertdsw;
 }
 
-static int
-userret(struct proc *p, struct trapframe *frame,
-       u_quad_t oticks, int have_mplock)
+static void
+userret(struct proc *p, struct trapframe *frame, u_quad_t oticks)
 {
        int sig, s;
        struct thread *td = curthread;
@@ -217,10 +218,6 @@ userret(struct proc *p, struct trapframe *frame,
         */
        crit_enter();
        while ((sig = CURSIG(p)) != 0) {
-               if (have_mplock == 0) {
-                       get_mplock();
-                       have_mplock = 1;
-               }
                crit_exit();
                postsig(sig);
                crit_enter();
@@ -257,10 +254,6 @@ userret(struct proc *p, struct trapframe *frame,
         */
        if (resched_wanted()) {
                uio_yield();
-               if (have_mplock == 0) {
-                       get_mplock();
-                       have_mplock = 1;
-               }
                while ((sig = CURSIG(p)) != 0)
                        postsig(sig);
        }
@@ -269,10 +262,6 @@ userret(struct proc *p, struct trapframe *frame,
         * Charge system time if profiling.
         */
        if (p->p_flag & P_PROFIL) {
-               if (have_mplock == 0) {
-                       get_mplock();
-                       have_mplock = 1;
-               }
                addupc_task(p, frame->tf_eip, 
                    (u_int)(curthread->td_sticks - oticks) * psratio);
        }
@@ -290,8 +279,6 @@ userret(struct proc *p, struct trapframe *frame,
        }
        splx(s);
        KKASSERT(mycpu->gd_uprocscheduled == 1);
-
-       return(have_mplock);
 }
 
 #ifdef DEVICE_POLLING
@@ -315,11 +302,13 @@ trap(frame)
        int i = 0, ucode = 0, type, code;
        vm_offset_t eva;
 
+       get_mplock();
+
 #ifdef DDB
        if (db_active) {
                eva = (frame.tf_trapno == T_PAGEFLT ? rcr2() : 0);
                trap_fatal(&frame, eva);
-               return;
+               goto out2;
        }
 #endif
 
@@ -342,7 +331,7 @@ trap(frame)
                         */
                        printf("kernel trap %d with interrupts disabled\n",
                            type);
-               enable_intr();
+               cpu_enable_intr();
        }
 
        eva = 0;
@@ -359,7 +348,7 @@ trap(frame)
                 * correct.
                 */
                eva = rcr2();
-               enable_intr();
+               cpu_enable_intr();
        }
 
 #ifdef DEVICE_POLLING
@@ -377,12 +366,13 @@ restart:
                if (frame.tf_eflags & PSL_VM &&
                    (type == T_PROTFLT || type == T_STKFLT)) {
                        i = vm86_emulate((struct vm86frame *)&frame);
-                       if (i != 0)
+                       if (i != 0) {
                                /*
                                 * returns to original process
                                 */
                                vm86_trap((struct vm86frame *)&frame);
-                       return;
+                       }
+                       goto out2;
                }
                switch (type) {
                        /*
@@ -392,7 +382,7 @@ restart:
                case T_PROTFLT:
                case T_SEGNPFLT:
                        trap_fatal(&frame, eva);
-                       return;
+                       goto out2;
                case T_TRCTRAP:
                        type = T_BPTFLT;        /* kernel breakpoint */
                        /* FALL THROUGH */
@@ -494,7 +484,7 @@ restart:
                                        kdb_trap (type, 0, &frame);
                                }
 #endif /* DDB */
-                               return;
+                               goto out2;
                        } else if (panic_on_nmi)
                                panic("NMI indicates hardware failure");
                        break;
@@ -525,7 +515,7 @@ restart:
                        i = (*pmath_emulate)(&frame);
                        if (i == 0) {
                                if (!(frame.tf_eflags & PSL_T))
-                                       return;
+                                       goto out2;
                                frame.tf_eflags &= ~PSL_T;
                                i = SIGTRAP;
                        }
@@ -549,7 +539,7 @@ kernel_trap:
                switch (type) {
                case T_PAGEFLT:                 /* page fault */
                        (void) trap_pfault(&frame, FALSE, eva);
-                       return;
+                       goto out2;
 
                case T_DNA:
 #if NNPX > 0
@@ -559,7 +549,7 @@ kernel_trap:
                         * registered such use.
                         */
                        if (npxdna())
-                               return;
+                               goto out2;
 #endif
                        break;
 
@@ -579,7 +569,7 @@ kernel_trap:
        do {                                                            \
                if (frame.tf_eip == (int)where) {                       \
                        frame.tf_eip = (int)whereto;                    \
-                       return;                                         \
+                       goto out2;                                      \
                }                                                       \
        } while (0)
 
@@ -596,7 +586,7 @@ kernel_trap:
                                if (frame.tf_eip == (int)cpu_switch_load_gs) {
                                        curthread->td_pcb->pcb_gs = 0;
                                        psignal(p, SIGBUS);
-                                       return;
+                                       goto out2;
                                }
                                MAYBE_DORETI_FAULT(doreti_iret,
                                                   doreti_iret_fault);
@@ -608,7 +598,7 @@ kernel_trap:
                                                   doreti_popl_fs_fault);
                                if (curthread->td_pcb->pcb_onfault) {
                                        frame.tf_eip = (int)curthread->td_pcb->pcb_onfault;
-                                       return;
+                                       goto out2;
                                }
                        }
                        break;
@@ -625,7 +615,7 @@ kernel_trap:
                         */
                        if (frame.tf_eflags & PSL_NT) {
                                frame.tf_eflags &= ~PSL_NT;
-                               return;
+                               goto out2;
                        }
                        break;
 
@@ -637,7 +627,7 @@ kernel_trap:
                                 * silently until the syscall handler has
                                 * saved the flags.
                                 */
-                               return;
+                               goto out2;
                        }
                        if (frame.tf_eip == (int)IDTVEC(syscall) + 1) {
                                /*
@@ -645,7 +635,7 @@ kernel_trap:
                                 * flags.  Stop single stepping it.
                                 */
                                frame.tf_eflags &= ~PSL_T;
-                               return;
+                               goto out2;
                        }
                         /*
                          * Ignore debug register trace traps due to
@@ -663,7 +653,7 @@ kernel_trap:
                                  * processor doesn't
                                  */
                                 load_dr6(rdr6() & 0xfffffff0);
-                                return;
+                                goto out2;
                         }
                        /*
                         * Fall through (TRCTRAP kernel mode, kernel address)
@@ -675,7 +665,7 @@ kernel_trap:
                         */
 #ifdef DDB
                        if (kdb_trap (type, 0, &frame))
-                               return;
+                               goto out2;
 #endif
                        break;
 
@@ -695,7 +685,8 @@ kernel_trap:
                      sysbeep(TIMER_FREQ/880, hz);
                      lastalert = time_second;
                    }
-                 return;
+                   /* YYY mp count */
+                 goto out2;
                }
 #else /* !POWERFAIL_NMI */
                        /* machine/parity/power fail/"kitchen sink" faults */
@@ -710,16 +701,16 @@ kernel_trap:
                                        kdb_trap (type, 0, &frame);
                                }
 #endif /* DDB */
-                               return;
+                               goto out2;
                        } else if (panic_on_nmi == 0)
-                               return;
+                               goto out2;
                        /* FALL THROUGH */
 #endif /* POWERFAIL_NMI */
 #endif /* NISA > 0 */
                }
 
                trap_fatal(&frame, eva);
-               return;
+               goto out2;
        }
 
        /* Translate fault for emulators (e.g. Linux) */
@@ -739,7 +730,13 @@ kernel_trap:
 #endif
 
 out:
-       userret(p, &frame, sticks, 1);
+#ifdef SMP
+        if (ISPL(frame.tf_cs) == SEL_UPL)
+               KASSERT(curthread->td_mpcount == 1, ("badmpcount trap from %p", (void *)frame.tf_eip));
+#endif
+       userret(p, &frame, sticks);
+out2:
+       rel_mplock();
 }
 
 #ifdef notyet
@@ -979,7 +976,7 @@ trap_fatal(frame, eva)
 #ifdef SMP
        /* three seperate prints in case of a trap on an unmapped page */
        printf("mp_lock = %08x; ", mp_lock);
-       printf("cpuid = %d; ", cpuid);
+       printf("cpuid = %d; ", mycpu->gd_cpuid);
        printf("lapic.id = %08x\n", lapic.id);
 #endif
        if (type == T_PAGEFLT) {
@@ -1089,7 +1086,7 @@ dblfault_handler()
 #ifdef SMP
        /* three seperate prints in case of a trap on an unmapped page */
        printf("mp_lock = %08x; ", mp_lock);
-       printf("cpuid = %d; ", cpuid);
+       printf("cpuid = %d; ", mycpu->gd_cpuid);
        printf("lapic.id = %08x\n", lapic.id);
 #endif
        panic("double fault");
@@ -1164,7 +1161,6 @@ syscall2(frame)
        int error;
        int narg;
        int args[8];
-       int have_mplock = 0;
        u_int code;
 
 #ifdef DIAGNOSTIC
@@ -1175,10 +1171,14 @@ syscall2(frame)
        }
 #endif
 
+#ifdef SMP
+       KASSERT(curthread->td_mpcount == 0, ("badmpcount syscall from %p", (void *)frame.tf_eip));
+       get_mplock();
+#endif
        /*
         * access non-atomic field from critical section.  p_sticks is
         * updated by the clock interrupt.  Also use this opportunity
-        * to raise our LWKT priority.
+        * to lazy-raise our LWKT priority.
         */
        crit_enter();
        userenter();
@@ -1194,9 +1194,7 @@ syscall2(frame)
                /*
                 * The prep code is not MP aware.
                 */
-               get_mplock();
                (*p->p_sysent->sv_prepsyscall)(&frame, args, &code, &params);
-               rel_mplock();
        } else {
                /*
                 * Need to check if this is a 32 bit or 64 bit syscall.
@@ -1233,8 +1231,6 @@ syscall2(frame)
         */
        if (params && (i = narg * sizeof(int)) &&
            (error = copyin(params, (caddr_t)args, (u_int)i))) {
-               get_mplock();
-               have_mplock = 1;
 #ifdef KTRACE
                if (KTRPOINT(td, KTR_SYSCALL))
                        ktrsyscall(p->p_tracep, code, narg, args);
@@ -1242,6 +1238,7 @@ syscall2(frame)
                goto bad;
        }
 
+#if 0
        /*
         * Try to run the syscall without the MP lock if the syscall
         * is MP safe.  We have to obtain the MP lock no matter what if 
@@ -1251,13 +1248,10 @@ syscall2(frame)
                get_mplock();
                have_mplock = 1;
        }
+#endif
 
 #ifdef KTRACE
        if (KTRPOINT(td, KTR_SYSCALL)) {
-               if (have_mplock == 0) {
-                       get_mplock();
-                       have_mplock = 1;
-               }
                ktrsyscall(p->p_tracep, code, narg, args);
        }
 #endif
@@ -1311,10 +1305,6 @@ bad:
         * Traced syscall.  trapsignal() is not MP aware.
         */
        if ((orig_tf_eflags & PSL_T) && !(orig_tf_eflags & PSL_VM)) {
-               if (have_mplock == 0) {
-                       get_mplock();
-                       have_mplock = 1;
-               }
                frame.tf_eflags &= ~PSL_T;
                trapsignal(p, SIGTRAP, 0);
        }
@@ -1322,14 +1312,10 @@ bad:
        /*
         * Handle reschedule and other end-of-syscall issues
         */
-       have_mplock = userret(p, &frame, sticks, have_mplock);
+       userret(p, &frame, sticks);
 
 #ifdef KTRACE
        if (KTRPOINT(td, KTR_SYSRET)) {
-               if (have_mplock == 0) {
-                       get_mplock();
-                       have_mplock = 1;
-               }
                ktrsysret(p->p_tracep, code, error, p->p_retval[0]);
        }
 #endif
@@ -1341,17 +1327,20 @@ bad:
         */
        STOPEVENT(p, S_SCX, code);
 
+#ifdef SMP
        /*
         * Release the MP lock if we had to get it
         */
-       if (have_mplock)
-               rel_mplock();
+       KASSERT(curthread->td_mpcount == 1, ("badmpcount syscall from %p", (void *)frame.tf_eip));
+       rel_mplock();
+#endif
 }
 
 /*
  * Simplified back end of syscall(), used when returning from fork()
- * directly into user mode.  MP lock is held on entry and should be 
- * held on return.
+ * directly into user mode.  MP lock is held on entry and should be
+ * released on return.  This code will return back into the fork
+ * trampoline code which then runs doreti.
  */
 void
 fork_return(p, frame)
@@ -1362,9 +1351,14 @@ fork_return(p, frame)
        frame.tf_eflags &= ~PSL_C;      /* success */
        frame.tf_edx = 1;
 
-       userret(p, &frame, 0, 1);
+       userret(p, &frame, 0);
 #ifdef KTRACE
        if (KTRPOINT(p->p_thread, KTR_SYSRET))
                ktrsysret(p->p_tracep, SYS_fork, 0, 0);
 #endif
+#ifdef SMP
+       KKASSERT(curthread->td_mpcount == 1);
+       rel_mplock();
+#endif
 }
+
index a096995..133181e 100644 (file)
@@ -24,7 +24,7 @@
  * SUCH DAMAGE.
  *
  * $FreeBSD: src/sys/i386/i386/vm86.c,v 1.31.2.2 2001/10/05 06:18:55 peter Exp $
- * $DragonFly: src/sys/i386/i386/Attic/vm86.c,v 1.5 2003/06/25 03:55:53 dillon Exp $
+ * $DragonFly: src/sys/i386/i386/Attic/vm86.c,v 1.6 2003/07/06 21:23:48 dillon Exp $
  */
 
 #include <sys/param.h>
@@ -544,6 +544,9 @@ vm86_prepcall(struct vm86frame vmf)
 /*
  * vm86 trap handler; determines whether routine succeeded or not.
  * Called while in vm86 space, returns to calling process.
+ *
+ * A MP lock ref is held on entry from trap() and must be released prior
+ * to returning to the VM86 call.
  */
 void
 vm86_trap(struct vm86frame *vmf)
@@ -560,6 +563,7 @@ vm86_trap(struct vm86frame *vmf)
        else
                vmf->vmf_trapno = vmf->vmf_trapno << 16;
 
+       rel_mplock();
        vm86_biosret(vmf);
 }
 
@@ -569,6 +573,8 @@ vm86_intcall(int intnum, struct vm86frame *vmf)
        if (intnum < 0 || intnum > 0xff)
                return (EINVAL);
 
+       ASSERT_MP_LOCK_HELD();
+
        vmf->vmf_trapno = intnum;
        return (vm86_bioscall(vmf));
 }
@@ -589,6 +595,8 @@ vm86_datacall(intnum, vmf, vmc)
        u_int page;
        int i, entry, retval;
 
+       ASSERT_MP_LOCK_HELD();
+
        for (i = 0; i < vmc->npages; i++) {
                page = vtophys(vmc->pmap[i].kva & PG_FRAME);
                entry = vmc->pmap[i].pte_num; 
index ab089fb..3edd8a1 100644 (file)
@@ -24,7 +24,7 @@
  * SUCH DAMAGE.
  *
  * $FreeBSD: src/sys/i386/i386/vm86bios.s,v 1.15.2.1 2000/05/16 06:58:07 dillon Exp $
- * $DragonFly: src/sys/i386/i386/Attic/vm86bios.s,v 1.8 2003/07/01 20:30:40 dillon Exp $
+ * $DragonFly: src/sys/i386/i386/Attic/vm86bios.s,v 1.9 2003/07/06 21:23:48 dillon Exp $
  */
 
 #include <machine/asmacros.h>          /* miscellaneous asm macros */
@@ -63,12 +63,6 @@ ENTRY(vm86_bioscall)
        pushl   %edi
        pushl   %gs
 
-#ifdef SMP     
-       pushl   %edx
-       MP_LOCK                         /* Get global lock */
-       popl    %edx
-#endif
-
 #if NNPX > 0
        movl    PCPU(curthread),%ecx
        cmpl    %ecx,PCPU(npxthread)    /* do we need to save fp? */
index 6bb9c1f..7a8c823 100644 (file)
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 1996, by Peter Wemm and Steve Passe
- * All rights reserved.
+ * Copyright (c) 1996, by Peter Wemm and Steve Passe, All rights reserved.
+ * Copyright (c) 2003, by Matthew Dillon, All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * SUCH DAMAGE.
  *
  * $FreeBSD: src/sys/i386/include/apic.h,v 1.14.2.2 2003/03/21 21:46:15 jhb Exp $
- * $DragonFly: src/sys/i386/include/Attic/apic.h,v 1.2 2003/06/17 04:28:35 dillon Exp $
+ * $DragonFly: src/sys/i386/include/Attic/apic.h,v 1.3 2003/07/06 21:23:49 dillon Exp $
  */
 
 #ifndef _MACHINE_APIC_H_
 #define _MACHINE_APIC_H_
 
 /*
- * Local && I/O APIC definitions.
- */
-
-/*
- * Pentium P54C+ Build-in APIC
- * (Advanced programmable Interrupt Controller)
- * 
- * Base Address of Build-in APIC in memory location
- * is 0xfee00000.
- * 
- * Map of APIC REgisters:
- * 
- * Offset (hex)    Description                     Read/Write state
- * 000             Reserved
- * 010             Reserved
- * 020 ID          Local APIC ID                   R/W
- * 030 VER         Local APIC Version              R
- * 040             Reserved
- * 050             Reserved
- * 060             Reserved
- * 070             Reserved
- * 080             Task Priority Register          R/W
- * 090             Arbitration Priority Register   R
- * 0A0             Processor Priority Register     R
- * 0B0             EOI Register                    W
- * 0C0 RRR         Remote read                     R
- * 0D0             Logical Destination             R/W
- * 0E0             Destination Format Register     0..27 R;  28..31 R/W
- * 0F0 SVR         Spurious Interrupt Vector Reg.  0..3  R;  4..9   R/W
- * 100             ISR  000-031                    R
- * 110             ISR  032-063                    R
- * 120             ISR  064-095                    R
- * 130             ISR  095-128                    R
- * 140             ISR  128-159                    R
- * 150             ISR  160-191                    R
- * 160             ISR  192-223                    R
- * 170             ISR  224-255                    R
- * 180             TMR  000-031                    R
- * 190             TMR  032-063                    R
- * 1A0             TMR  064-095                    R
- * 1B0             TMR  095-128                    R
- * 1C0             TMR  128-159                    R
- * 1D0             TMR  160-191                    R
- * 1E0             TMR  192-223                    R
- * 1F0             TMR  224-255                    R
- * 200             IRR  000-031                    R
- * 210             IRR  032-063                    R
- * 220             IRR  064-095                    R
- * 230             IRR  095-128                    R
- * 240             IRR  128-159                    R
- * 250             IRR  160-191                    R
- * 260             IRR  192-223                    R
- * 270             IRR  224-255                    R
- * 280             Error Status Register           R
- * 290             Reserved
- * 2A0             Reserved
- * 2B0             Reserved
- * 2C0             Reserved
- * 2D0             Reserved
- * 2E0             Reserved
- * 2F0             Reserved
- * 300 ICR_LOW     Interrupt Command Reg. (0-31)   R/W
- * 310 ICR_HI      Interrupt Command Reg. (32-63)  R/W
- * 320             Local Vector Table (Timer)      R/W
- * 330             Reserved
- * 340             Reserved
- * 350 LVT1        Local Vector Table (LINT0)      R/W
- * 360 LVT2        Local Vector Table (LINT1)      R/W
- * 370 LVT3        Local Vector Table (ERROR)      R/W
- * 380             Initial Count Reg. for Timer    R/W
- * 390             Current Count of Timer          R
- * 3A0             Reserved
- * 3B0             Reserved
- * 3C0             Reserved
- * 3D0             Reserved
- * 3E0             Timer Divide Configuration Reg. R/W
- * 3F0             Reserved
- */
-
-
-/******************************************************************************
- * global defines, etc.
- */
-
-
-/******************************************************************************
- * LOCAL APIC structure
+ * Local && I/O APIC definitions for Pentium P54C+ Built-in APIC.
+ *
+ * A per-cpu APIC resides in memory location 0xFEE00000.
+ *
+ *               31 ... 24   23 ... 16   15 ... 8     7 ... 0
+ *             +-----------+-----------+-----------+-----------+
+ * 0000        |           |           |           |           |
+ * 0010        |           |           |           |           |
+ *             +-----------+-----------+-----------+-----------+
+ *
+ *             +-----------+-----------+-----------+-----------+
+ * 0020 ID     |     | ID  |           |           |           | RW
+ *             +-----------+-----------+-----------+-----------+
+ *
+ *                 The physical APIC ID is used with physical interrupt
+ *                 delivery modes.
+ *
+ *             +-----------+-----------+-----------+-----------+
+ * 0030 VER    |           |           |           |           |
+ *             +-----------+-----------+-----------+-----------+
+ * 0040        |           |           |           |           |
+ * 0050        |           |           |           |           |
+ * 0060        |           |           |           |           |
+ * 0070        |           |           |           |           |
+ *             +-----------+-----------+-----------+-----------+
+ * 0080 TPR    |           |           |           | PRIO SUBC |
+ * 0090 APR    |           |           |           |           |
+ * 00A0 PPR    |           |           |           |           |
+ *             +-----------+-----------+-----------+-----------+
+ *
+ *                 The Task Priority Register provides a priority threshold
+ *                 mechanism for interrupting the processor.  Only interrupts
+ *                 with a higher priority then that specified in the TPR will
+ *                 be served.   Other interrupts are recorded and serviced
+ *                 as soon as the TPR value decreases enough to allow that
+ *                 (unless EOId by another APIC).
+ *
+ *                 PRIO (7:4).  Main priority.  If 15 the APIC will not
+ *                              accept any interrupts.
+ *                 SUBC (3:0)   Sub priority.  See APR/PPR.
+ *
+ *
+ *                 The Processor Priority Register determines whether a
+ *                 pending interrupt can be dispensed to the processor.  ISRV
+ *                 Is the vector of the highest priority ISR bit set or
+ *                 zero if no ISR bit is set.
+ *
+ *                 IF TPR[7:4] >= ISRV[7:4]
+ *                     PPR[7:0] = TPR[7:0]
+ *                 ELSE
+ *                     PPR[7:0] = ISRV[7:4].000
+ *                     
+ *                 The Arbitration Priority Register holds the current
+ *                 lowest priority of the procsesor, a value used during
+ *                 lowest-priority arbitration.
+ *
+ *                 IF (TPR[7:4] >= IRRV[7:4] AND TPR[7:4] > ISRV[7:4])
+ *                     APR[7:0] = TPR[7:0]
+ *                 ELSE
+ *                     APR[7:4] = max((TPR[7:4]&ISRV[7:4]),IRRV[7:4]).000
+ *                 
+ *             +-----------+-----------+-----------+-----------+
+ * 00B0 EOI    |           |           |           |           |
+ *             +-----------+-----------+-----------+-----------+
+ * 00C0        |           |           |           |           |
+ *             +-----------+-----------+-----------+-----------+
+ * 00D0 LDR    |LOG APICID |           |           |           |
+ *             +-----------+-----------+-----------+-----------+
+ * 00E0 DFR    |MODEL|     |           |           |           |
+ *             +-----------+-----------+-----------+-----------+
+ *
+ *                 The logical APIC ID is used with logical interrupt
+ *                 delivery modes.  Interpretation of logical destination
+ *                 information depends on the MODEL bits in the Destination
+ *                 Format Regiuster.  
+ *
+ *                 MODEL=1111 FLAT MODEL - The MDA is interpreted as
+ *                                         a decoded address.  By setting
+ *                                         one bit in the LDR for each
+ *                                         local apic 8 APICs can coexist.
+ *
+ *                 MODEL=0000 CLUSTER MODEL - 
+ *
+ *               31 ... 24   23 ... 16   15 ... 8     7 ... 0
+ *             +-----------+-----------+-----------+-----------+
+ * 00F0 SVR    |           |           |           |           |
+ *             +-----------+-----------+-----------+-----------+
+ * 0100-0170 ISR|           |           |           |           |
+ * 0180-01F0 TMR|           |           |           |           |
+ * 0200-0270 IRR|           |           |           |           |
+ *             +-----------+-----------+-----------+-----------+
+ *
+ *                 These registers represent 256 bits, one bit for each
+ *                 possible interrupt.  Interrupts 0-15 are reserved so
+ *                 bits 0-15 are also reserved.
+ *
+ *                 TMR - Trigger mode register.  Upon acceptance of an int
+ *                       the corresponding bit is cleared for edge-trig and
+ *                       set for level-trig.  If the TMR bit is set (level),
+ *                       the local APIC sends an EOI to all I/O APICs as
+ *                       a result of software issuing an EOI command.
+ *                       
+ *                 IRR - Interrupt Request Register.  Contains active
+ *                       interrupt requests that have been accepted but not
+ *                       yet dispensed by the current local APIC.  The bit is
+ *                       cleared and the corresponding ISR bit is set when
+ *                       the INTA cycle is issued.
+ *
+ *                 ISR - Interrupt In-Service register.  Interrupt has been
+ *                       delivered but not yet fully serviced.  Cleared when
+ *                       an EOI is issued from the processor.  An EOI will
+ *                       also send an EOI to all I/O APICs if TMR was set.
+ *
+ *             +-----------+-----------+-----------+-----------+
+ * 0280 ESR    |           |           |           |           |
+ * 0290-02F0    |           |           |           |           |
+ *             +--FEDCBA98-+--76543210-+--FEDCBA98-+-----------+
+ * 0300        ICR_LO  |           |      XX   |  TL SDMMM | vector    |
+ * 0310        ICR_HI  | DEST FIELD|           |           |           |
+ *             +-----------+-----------+-----------+-----------+
+ *
+ *                 The interrupt command register 
+ *
+ *                     XX:     Destination Shorthand field:
+ *
+ *                             00      Use Destination field
+ *                             01      Self only.  Dest field ignored.
+ *                             10      All including self (uses a 
+ *                                     destination field of 0x0F)
+ *                             11      All excluding self (uses a
+ *                                     destination field of 0x0F)
+ *
+ *                     T:      1 = Level 0 = Edge Trigger modde, used for
+ *                             the INIT level de-assert delivery mode only.
+ *                             Not sure.
+ *
+ *                     L:      0 = De-Assert, 1 = Assert.  Not sure what this
+ *                             is.  For INIT mode use 0, for all other modes
+ *                             use 1.
+ *
+ *                     S:      1 = Send Pending.  Interrupt has been injected
+ *                             but APIC has not yet accepted it.
+ *
+ *                     D:      0=physical 1=logical.  In physical mode
+ *                             only 24-27 of DEST FIELD is used from ICR_HI.
+ *
+ *                     MMM:    000 Fixed. Deliver to all processors according
+ *                                 to the ICR.  Always treated as edge trig.
+ *
+ *                             001 Lowest Priority.  Deliver to just the
+ *                                 processor running at the lowest priority.
+ *
+ *                             010 SMI.  The vector must be 00B.  Only edge
+ *                                 triggered is allowed.  The vector field
+ *                                 must be programmed to zero (huh?).
+ *
+ *                             011 <reserved>
+ *
+ *                             100 NMI.  Deliver as an NMI to all processors
+ *                                 listed in the destination field.  The
+ *                                 vector is ignored.  Alawys treated as 
+ *                                 edge triggered.
+ *
+ *                             101 INIT.  Deliver as an INIT signal to all
+ *                                 processors (like FIXED).  Vector is ignored
+ *                                 and it is always edge-triggered.
+ *
+ *                             110 Start Up.  Sends a special message between
+ *                                 cpus.  the vector contains a start-up
+ *                                 address for MP boot protocol.
+ *                                 Always edge triggered.  Note: a startup
+ *                                 int is not automatically tried in case of
+ *                                 failure.
+ *
+ *                             111 <reserved>
+ *
+ *             +-----------+--------10-+--FEDCBA98-+-----------+
+ * 0320        LTIMER  |           |        TM |  ---S---- | vector    |
+ * 0330                |           |           |           |           |
+ *             +-----------+--------10-+--FEDCBA98-+-----------+
+ * 0340        LVPCINT |           |        -M |  ---S-MMM | vector    |
+ * 0350        LVINT0  |           |        -M |  LRPS-MMM | vector    |
+ * 0360 LVINT1 |           |        -M |  LRPS-MMM | vector    |
+ * 0370        LVERROR |           |        -M |  -------- | vector    |
+ *             +-----------+-----------+-----------+-----------+
+ *
+ *                     T:      1 = periodic, 0 = one-shot
+ *
+ *                     M:      1 = masked
+ *
+ *                     L:      1 = level, 0 = edge
+ *
+ *                     R:      For level triggered only, set to 1 when a
+ *                             level int is accepted, cleared by EOI.
+ *
+ *                     P:      Pin Polarity 0 = Active High, 1 = Active Low
+ *
+ *                     S:      1 = Send Pending.  Interrupt has been injected
+ *                             but APIC has not yet accepted it.
+ *
+ *                     MMM     000 = Fixed     deliver to cpu according to LVT
+ *
+ *                     MMM     100 = NMI       deliver as an NMI.  Always edge
+ *
+ *                     MMM     111 = ExtInt    deliver from 8259, routes INTA
+ *                                             bus cycle to external
+ *                                             controller.  Controller is 
+ *                                             expected to supply vector.
+ *                                             Always level.
+ *
+ *             +-----------+-----------+-----------+-----------+
+ * 0380        ICR     |           |           |           |           |
+ * 0390        CCR     |           |           |           |           |
+ * 03A0                |           |           |           |           |
+ * 03B0                |           |           |           |           |
+ * 03C0                |           |           |           |           |
+ * 03D0                |           |           |           |           |
+ * 03E0 DCR    |           |           |           |           |
+ *             +-----------+-----------+-----------+-----------+
+ *
+ *
+ *     NOTE ON EOI: Upon receiving an EOI the APIC clears the highest priority
+ *     interrupt in the ISR and selects the next highest priority interrupt
+ *     for posting to the CPU.  If the interrupt being EOId was level
+ *     triggered the APIC will send an EOI to all I/O APICs.  For the moment
+ *     you can write garbage to the EOI register but for future compatibility
+ *     0 should be written.
+ *
  */
 
 #ifndef LOCORE
 struct LAPIC {
        /* reserved */          PAD4;
        /* reserved */          PAD4;
-       u_int32_t id;           PAD3;
-       u_int32_t version;      PAD3;
+       u_int32_t id;           PAD3;   /* 0020 R/W */
+       u_int32_t version;      PAD3;   /* 0030 RO */
        /* reserved */          PAD4;
        /* reserved */          PAD4;
        /* reserved */          PAD4;
@@ -220,11 +361,6 @@ typedef struct IOAPIC ioapic_t;
 #define ALLHWI_LEVEL           0x00000000      /* TPR of CPU grabbing INTs */
 #endif /** GRAB_LOPRIO */
 
-/* XXX these 2 don't really belong here... */
-#define COUNT_FIELD            0x00ffffff      /* count portion of the lock */
-#define CPU_FIELD              0xff000000      /* cpu portion of the lock */
-#define FREE_LOCK              0xffffffff      /* value of lock when free */
-
 /*
  * XXX This code assummes that the reserved field of the
  *      local APIC TPR can be written with all 0s.
index b28d441..7899b21 100644 (file)
@@ -31,7 +31,7 @@
  * SUCH DAMAGE.
  *
  * $FreeBSD: src/sys/i386/include/cpufunc.h,v 1.96.2.3 2002/04/28 22:50:54 dwmalone Exp $
- * $DragonFly: src/sys/i386/include/Attic/cpufunc.h,v 1.4 2003/06/29 03:28:43 dillon Exp $
+ * $DragonFly: src/sys/i386/include/Attic/cpufunc.h,v 1.5 2003/07/06 21:23:49 dillon Exp $
  */
 
 /*
@@ -122,15 +122,6 @@ btrl(u_int *mask, int bit)
        return(result);
 }
 
-static __inline void
-disable_intr(void)
-{
-       __asm __volatile("cli" : : : "memory");
-#ifdef SMP
-       MPINTR_LOCK();
-#endif
-}
-
 static __inline void
 do_cpuid(u_int ax, u_int *p)
 {
@@ -140,11 +131,14 @@ do_cpuid(u_int ax, u_int *p)
 }
 
 static __inline void
-enable_intr(void)
+cpu_disable_intr(void)
+{
+       __asm __volatile("cli" : : : "memory");
+}
+
+static __inline void
+cpu_enable_intr(void)
 {
-#ifdef SMP
-       MPINTR_UNLOCK();
-#endif
        __asm __volatile("sti");
 }
 
@@ -286,7 +280,9 @@ invd(void)
  * will cause the invl*() functions to be equivalent to the cpu_invl*()
  * functions.
  */
-#ifndef SMP
+#ifdef SMP
+void smp_invltlb(void);
+#else
 #define smp_invltlb()
 #endif
 
@@ -630,9 +626,9 @@ load_dr7(u_int sel)
 int    breakpoint      __P((void));
 u_int  bsfl            __P((u_int mask));
 u_int  bsrl            __P((u_int mask));
-void   disable_intr    __P((void));
+void   cpu_disable_intr __P((void));
 void   do_cpuid        __P((u_int ax, u_int *p));
-void   enable_intr     __P((void));
+void   cpu_enable_intr __P((void));
 u_char inb             __P((u_int port));
 u_int  inl             __P((u_int port));
 void   insb            __P((u_int port, void *addr, size_t cnt));
index ba28263..a8e0070 100644 (file)
@@ -1,6 +1,5 @@
 /*
- * Copyright (c) 1997, by Steve Passe
- * All rights reserved.
+ * Copyright (c) 2003, Matthew Dillon, All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * SUCH DAMAGE.
  *
  * $FreeBSD: src/sys/i386/include/lock.h,v 1.11.2.2 2000/09/30 02:49:34 ps Exp $
- * $DragonFly: src/sys/i386/include/Attic/lock.h,v 1.2 2003/06/17 04:28:35 dillon Exp $
+ * $DragonFly: src/sys/i386/include/Attic/lock.h,v 1.3 2003/07/06 21:23:49 dillon Exp $
  */
 
-
 #ifndef _MACHINE_LOCK_H_
 #define _MACHINE_LOCK_H_
 
+#ifndef _MACHINE_PSL_H_
+#include <machine/psl.h>
+#endif
 
-#ifdef LOCORE
-
+/*
+ * MP_FREE_LOCK is used by both assembly and C under SMP.
+ */
 #ifdef SMP
+#define MP_FREE_LOCK           0xffffffff      /* value of lock when free */
+#endif
 
-#define        MPLOCKED        lock ;
+#ifdef LOCORE
 
 /*
- * Some handy macros to allow logical organization.
+ * Spinlock assembly support.  Note: eax and ecx can be tromped.  No
+ * other register will be.   Note that these routines are sometimes
+ * called with (%edx) as the mem argument.
+ *
+ * Under UP the spinlock routines still serve to disable/restore 
+ * interrupts.
  */
 
-#define MP_LOCK                call    _get_mplock
 
-#define MP_TRYLOCK                                                     \
-       pushl   $_mp_lock ;                     /* GIANT_LOCK */        \
-       call    _MPtrylock ;                    /* try to get lock */   \
-       add     $4, %esp
+#ifdef SMP
 
-#define MP_RELLOCK                                                     \
-       movl    $_mp_lock,%edx ;                /* GIANT_LOCK */        \
-       call    _MPrellock_edx
+#define SPIN_INIT(mem)                                         \
+       movl    $0,mem ;                                        \
+
+#define SPIN_INIT_NOREG(mem)                                   \
+       SPIN_INIT(mem) ;                                        \
+
+#define SPIN_LOCK(mem)                                         \
+       pushfl ;                                                \
+       popl    %ecx ;          /* flags */                     \
+       cli ;                                                   \
+       orl     $PSL_C,%ecx ;   /* make sure non-zero */        \
+7: ;                                                           \
+       movl    $0,%eax ;       /* expected contents of lock */ \
+       cmpxchgl %ecx,mem ;     /* Z=1 (jz) on success */       \
+       jz      8f ;                                            \
+       jmp     7b ;                                            \
+8: ;                                                           \
+
+#define SPIN_LOCK_PUSH_REGS                                    \
+       subl    $8,%esp ;                                       \
+       movl    %ecx,(%esp) ;                                   \
+       movl    %eax,4(%esp) ;                                  \
+
+#define SPIN_LOCK_POP_REGS                                     \
+       movl    (%esp),%ecx ;                                   \
+       movl    4(%esp),%eax ;                                  \
+       addl    $8,%esp ;                                       \
+
+#define SPIN_LOCK_FRAME_SIZE   8
+
+#define SPIN_LOCK_NOREG(mem)                                   \
+       SPIN_LOCK_PUSH_REGS ;                                   \
+       SPIN_LOCK(mem) ;                                        \
+       SPIN_LOCK_POP_REGS ;                                    \
+
+#define SPIN_UNLOCK(mem)                                       \
+       pushl   mem ;                                           \
+       movl    $0,mem ;                                        \
+       popfl ;                                                 \
+
+#define SPIN_UNLOCK_PUSH_REGS
+#define SPIN_UNLOCK_POP_REGS
+#define SPIN_UNLOCK_FRAME_SIZE 0
+
+#define SPIN_UNLOCK_NOREG(mem)                                 \
+       SPIN_UNLOCK(mem) ;                                      \
 
-/*
- * Protects the IO APIC and apic_imen as a critical region.
- */
-#define IMASK_LOCK                                                     \
-       pushl   $_imen_lock ;                   /* address of lock */   \
-       call    _s_lock ;                       /* MP-safe */           \
-       addl    $4, %esp
+#else
 
-#define IMASK_UNLOCK                                                   \
-       movl    $0, _imen_lock
+#define SPIN_LOCK(mem)                                         \
+       pushfl ;                                                \
+       cli ;                                                   \
+       orl     $PSL_C,(%esp) ;                                 \
+       popl    mem ;                                           \
 
-#else  /* SMP */
+#define SPIN_LOCK_PUSH_RESG
+#define SPIN_LOCK_POP_REGS
+#define SPIN_LOCK_FRAME_SIZE   0
 
-#define        MPLOCKED                                /* NOP */
+#define SPIN_UNLOCK(mem)                                       \
+       pushl   mem ;                                           \
+       movl    $0,mem ;                                        \
+       popfl ;                                                 \
 
-#define MP_LOCK                                        /* NOP */
+#define SPIN_UNLOCK_PUSH_REGS
+#define SPIN_UNLOCK_POP_REGS
+#define SPIN_UNLOCK_FRAME_SIZE 0
 
-#endif /* SMP */
+#endif /* SMP */
 
-#else /* LOCORE */
+#else  /* LOCORE */
 
-#ifdef SMP
+/*
+ * Spinlock functions (UP and SMP).  Under UP a spinlock still serves
+ * to disable/restore interrupts even if it doesn't spin.
+ */
+struct spinlock {
+       volatile int    opaque;
+};
 
-#include <machine/smptests.h>                  /** xxx_LOCK */
+typedef struct spinlock *spinlock_t;
+
+void   mpintr_lock(void);      /* disables int / spinlock combo */
+void   mpintr_unlock(void);
+void   com_lock(void);         /* disables int / spinlock combo */
+void   com_unlock(void);
+void   imen_lock(void);        /* disables int / spinlock combo */
+void   imen_unlock(void);
+void   clock_lock(void);       /* disables int / spinlock combo */
+void   clock_unlock(void);
+void   cons_lock(void);        /* disables int / spinlock combo */
+void   cons_unlock(void);
+
+extern struct spinlock smp_rv_spinlock;
+
+void   spin_lock(spinlock_t lock);
+void   spin_lock_np(spinlock_t lock);
+void   spin_unlock(spinlock_t lock);
+void   spin_unlock_np(spinlock_t lock);
+#if 0
+void   spin_lock_init(spinlock_t lock);
+#endif
 
 /*
- * Locks regions protected in UP kernel via cli/sti.
+ * Inline version of spinlock routines -- overrides assembly.  Only unlock
+ * and init here please.
  */
-#ifdef USE_MPINTRLOCK
-#define MPINTR_LOCK()  s_lock(&mpintr_lock)
-#define MPINTR_UNLOCK()        s_unlock(&mpintr_lock)
-#else
-#define MPINTR_LOCK()
-#define MPINTR_UNLOCK()
-#endif /* USE_MPINTRLOCK */
+static __inline void
+spin_lock_init(spinlock_t lock)
+{
+       lock->opaque = 0;
+}
 
 /*
- * sio/cy lock.
- * XXX should rc (RISCom/8) use this?
- */
-#ifdef USE_COMLOCK
-#define COM_LOCK()     s_lock(&com_lock)
-#define COM_UNLOCK()   s_unlock(&com_lock)
-#define COM_DISABLE_INTR() \
-               { __asm __volatile("cli" : : : "memory"); COM_LOCK(); }
-#define COM_ENABLE_INTR() \
-               { COM_UNLOCK(); __asm __volatile("sti"); }
-#else
-#define COM_LOCK()
-#define COM_UNLOCK()
-#define COM_DISABLE_INTR()     disable_intr()
-#define COM_ENABLE_INTR()      enable_intr()
-#endif /* USE_COMLOCK */
-
-/* 
- * Clock hardware/struct lock.
- * XXX pcaudio and friends still need this lock installed.
+ * MP LOCK functions for SMP and UP.  Under UP the MP lock does not exist
+ * but we leave a few functions intact as macros for convenience.
  */
-#ifdef USE_CLOCKLOCK
-#define CLOCK_LOCK()   s_lock(&clock_lock)
-#define CLOCK_UNLOCK() s_unlock(&clock_lock)
-#define CLOCK_DISABLE_INTR() \
-               { __asm __volatile("cli" : : : "memory"); CLOCK_LOCK(); }
-#define CLOCK_ENABLE_INTR() \
-               { CLOCK_UNLOCK(); __asm __volatile("sti"); }
-#else
-#define CLOCK_LOCK()
-#define CLOCK_UNLOCK()
-#define CLOCK_DISABLE_INTR()   disable_intr()
-#define CLOCK_ENABLE_INTR()    enable_intr()
-#endif /* USE_CLOCKLOCK */
-
-#else /* SMP */
-
-#define MPINTR_LOCK()
-#define MPINTR_UNLOCK()
-
-#define COM_LOCK()
-#define COM_UNLOCK()
-#define CLOCK_LOCK()
-#define CLOCK_UNLOCK()
+#ifdef SMP
 
-#endif /* SMP */
+void   get_mplock(void);
+int    try_mplock(void);
+void   rel_mplock(void);
+int    cpu_try_mplock(void);
+#if 0
+void   cpu_rel_mplock(void);
+#endif
+void   cpu_get_initial_mplock(void);
 
-/*
- * Simple spin lock.
- * It is an error to hold one of these locks while a process is sleeping.
- */
-struct simplelock {
-       volatile int    lock_data;
-};
+extern u_int   mp_lock;
 
-/* functions in simplelock.s */
-void   s_lock_init             __P((struct simplelock *));
-void   s_lock                  __P((struct simplelock *));
-int    s_lock_try              __P((struct simplelock *));
-void   ss_lock                 __P((struct simplelock *));
-void   ss_unlock               __P((struct simplelock *));
-void   s_lock_np               __P((struct simplelock *));
-void   s_unlock_np             __P((struct simplelock *));
+#define MP_LOCK_HELD()   (mp_lock == mycpu->gd_cpuid)
+#define ASSERT_MP_LOCK_HELD()   KKASSERT(MP_LOCK_HELD())
 
-/* inline simplelock functions */
 static __inline void
-s_unlock(struct simplelock *lkp)
+cpu_rel_mplock(void)
 {
-       lkp->lock_data = 0;
+       mp_lock = MP_FREE_LOCK;
 }
 
-/* global data in mp_machdep.c */
-extern struct simplelock       imen_lock;
-extern struct simplelock       cpl_lock;
-extern struct simplelock       fast_intr_lock;
-extern struct simplelock       intr_lock;
-extern struct simplelock       clock_lock;
-extern struct simplelock       com_lock;
-extern struct simplelock       mpintr_lock;
-extern struct simplelock       mcount_lock;
-
-#if !defined(SIMPLELOCK_DEBUG) && MAXCPU > 1
-/*
- * This set of defines turns on the real functions in i386/isa/apic_ipl.s.
- */
-#define        simple_lock_init(alp)   s_lock_init(alp)
-#define        simple_lock(alp)        s_lock(alp)
-#define        simple_lock_try(alp)    s_lock_try(alp)
-#define        simple_unlock(alp)      s_unlock(alp)
-
-#endif /* !SIMPLELOCK_DEBUG && MAXCPU > 1 */
+#else
 
-#endif /* LOCORE */
+#define get_mplock()
+#define try_mplock()   1
+#define rel_mplock()
+#define ASSERT_MP_LOCK_HELD()
 
-#endif /* !_MACHINE_LOCK_H_ */
+#endif /* SMP */
+#endif /* LOCORE */
+#endif /* !_MACHINE_LOCK_H_ */
index 78056f4..ad3d893 100644 (file)
@@ -7,7 +7,7 @@
  * ----------------------------------------------------------------------------
  *
  * $FreeBSD: src/sys/i386/include/smp.h,v 1.50.2.5 2001/02/13 22:32:45 tegge Exp $
- * $DragonFly: src/sys/i386/include/Attic/smp.h,v 1.2 2003/06/17 04:28:36 dillon Exp $
+ * $DragonFly: src/sys/i386/include/Attic/smp.h,v 1.3 2003/07/06 21:23:49 dillon Exp $
  *
  */
 
@@ -47,23 +47,6 @@ extern int                   bootMP_size;
 /* functions in mpboot.s */
 void   bootMP                  __P((void));
 
-/* global data in mplock.s */
-extern u_int                   mp_lock;
-extern u_int                   isr_lock;
-#ifdef RECURSIVE_MPINTRLOCK
-extern u_int                   mpintr_lock;
-#endif /*  RECURSIVE_MPINTRLOCK */
-
-/* functions in mplock.s */
-void   get_mplock              __P((void));
-void   rel_mplock              __P((void));
-int            try_mplock              __P((void));
-#ifdef RECURSIVE_MPINTRLOCK
-void   get_mpintrlock          __P((void));
-void   rel_mpintrlock          __P((void));
-int            try_mpintrlock          __P((void));
-#endif /*  RECURSIVE_MPINTRLOCK */
-
 /* global data in apic_vector.s */
 extern volatile u_int          stopped_cpus;
 extern volatile u_int          started_cpus;
@@ -80,7 +63,6 @@ void  io_apic_write           __P((int, int, u_int));
 
 /* global data in mp_machdep.c */
 extern int                     bsp_apic_ready;
-extern int                     mp_ncpus;
 extern int                     mp_naps;
 extern int                     mp_nbusses;
 extern int                     mp_napics;
@@ -126,8 +108,8 @@ void        assign_apic_irq         __P((int apic, int intpin, int irq));
 void   revoke_apic_irq         __P((int irq));
 void   bsp_apic_configure      __P((void));
 void   init_secondary          __P((void));
-void   smp_invltlb             __P((void));
 int    stop_cpus               __P((u_int));
+void   ap_init                 __P((void));
 int    restart_cpus            __P((u_int));
 #ifdef BETTER_CLOCK 
 void   forward_statclock       __P((int pscnt));
@@ -178,20 +160,6 @@ extern volatile int                smp_idle_loops;
 #endif /* !LOCORE */
 #else  /* !SMP && !APIC_IO */
 
-/*
- * Create dummy MP lock empties
- */
-
-static __inline void
-get_mplock(void)
-{
-}
-
-static __inline void
-rel_mplock(void)
-{
-}
-
 #endif
 
 #endif /* _KERNEL */
index c97ed4f..19fe591 100644 (file)
@@ -23,7 +23,7 @@
  * SUCH DAMAGE.
  *
  * $FreeBSD: src/sys/i386/include/smptests.h,v 1.33.2.1 2000/05/16 06:58:10 dillon Exp $
- * $DragonFly: src/sys/i386/include/Attic/smptests.h,v 1.2 2003/06/17 04:28:36 dillon Exp $
+ * $DragonFly: src/sys/i386/include/Attic/smptests.h,v 1.3 2003/07/06 21:23:49 dillon Exp $
  */
 
 #ifndef _MACHINE_SMPTESTS_H_
 #define PUSHDOWN_LEVEL_3_NOT
 #define PUSHDOWN_LEVEL_4_NOT
 
-/*
- * Debug version of simple_lock.  This will store the CPU id of the
- * holding CPU along with the lock.  When a CPU fails to get the lock
- * it compares its own id to the holder id.  If they are the same it
- * panic()s, as simple locks are binary, and this would cause a deadlock.
- *
- */
-#define SL_DEBUG
-
-
 /*
  * Put FAST_INTR() ISRs at an APIC priority above the regular INTs.
  * Allow the mp_lock() routines to handle FAST interrupts while spinning.
 #define GIANT_LOCK
 
 #ifdef APIC_IO
-/*
- * Enable extra counters for some selected locations in the interrupt handlers.
- * Look in apic_vector.s, apic_ipl.s and ipl.s for APIC_ITRACE or 
- * APIC_INTR_DIAGNOSTIC.
- */
-#undef APIC_INTR_DIAGNOSTIC
-
-/*
- * Add extra tracking of a specific interrupt. Look in apic_vector.s, 
- * apic_ipl.s and ipl.s for APIC_ITRACE and log_intr_event.
- * APIC_INTR_DIAGNOSTIC must be defined for this to work.
- */
-#ifdef APIC_INTR_DIAGNOSTIC
-#define APIC_INTR_DIAGNOSTIC_IRQ 17
-#endif
 
 /*
  * Don't assume that slow interrupt handler X is called from vector
index 1bd6c2d..05c7d27 100644 (file)
@@ -1,6 +1,6 @@
 /*-
- * Copyright (c) 1997, by Steve Passe
- * All rights reserved.
+ * Copyright (c) 1997, by Steve Passe,  All rights reserved.
+ * Copyright (c) 2003, by Matthew Dillon,  All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * SUCH DAMAGE.
  *
  * $FreeBSD: src/sys/i386/isa/apic_ipl.s,v 1.27.2.2 2000/09/30 02:49:35 ps Exp $
- * $DragonFly: src/sys/i386/isa/Attic/apic_ipl.s,v 1.6 2003/07/01 20:31:38 dillon Exp $
+ * $DragonFly: src/sys/i386/isa/Attic/apic_ipl.s,v 1.7 2003/07/06 21:23:49 dillon Exp $
  */
 
-#if 0
-
        .data
        ALIGN_DATA
 
-/*
- * Routines used by splz_unpend to build an interrupt frame from a
- * trap frame.  The _vec[] routines build the proper frame on the stack,
- * then call one of _Xintr0 thru _XintrNN.
- *
- * used by:
- *   i386/isa/apic_ipl.s (this file):  splz_unpend JUMPs to HWIs.
- *   i386/isa/clock.c:                 setup _vec[clock] to point at _vec8254.
- */
-       .globl _vec
-_vec:
-       .long    vec0,  vec1,  vec2,  vec3,  vec4,  vec5,  vec6,  vec7
-       .long    vec8,  vec9, vec10, vec11, vec12, vec13, vec14, vec15
-       .long   vec16, vec17, vec18, vec19, vec20, vec21, vec22, vec23
+       /*
+        * Interrupt mask for APIC interrupts, defaults to all hardware
+        * interrupts turned off.
+        */
 
-/*
- * Note:
- *     This is the UP equivilant of _imen.
- *     It is OPAQUE, and must NOT be accessed directly.
- *     It MUST be accessed along with the IO APIC as a 'critical region'.
- *     Accessed by:
- *             INTREN()
- *             INTRDIS()
- *             MAYBE_MASK_IRQ
- *             MAYBE_UNMASK_IRQ
- *             imen_dump()
- */
        .p2align 2                              /* MUST be 32bit aligned */
-       .globl _apic_imen
-_apic_imen:
-       .long   HWI_MASK
 
+       .globl apic_imen
+apic_imen:
+       .long   HWI_MASK
 
-/*
- * 
- */
        .text
        SUPERALIGN_TEXT
 
-/*
- * splz() -    dispatch pending interrupts after cpl reduced
- *
- * Interrupt priority mechanism
- *     -- soft splXX masks with group mechanism (cpl)
- *     -- h/w masks for currently active or unused interrupts (imen)
- *     -- ipending = active interrupts currently masked by cpl
- */
-
-ENTRY(splz)
-       /*
-        * The caller has restored cpl and checked that (ipending & ~cpl)
-        * is nonzero.  However, since ipending can change at any time
-        * (by an interrupt or, with SMP, by another cpu), we have to
-        * repeat the check.  At the moment we must own the MP lock in
-        * the SMP case because the interruput handlers require it.  We
-        * loop until no unmasked pending interrupts remain.  
-        *
-        * No new unmaksed pending interrupts will be added during the
-        * loop because, being unmasked, the interrupt code will be able
-        * to execute the interrupts.
-        *
-        * Interrupts come in two flavors:  Hardware interrupts and software
-        * interrupts.  We have to detect the type of interrupt (based on the
-        * position of the interrupt bit) and call the appropriate dispatch
-        * routine.
-        * 
-        * NOTE: "bsfl %ecx,%ecx" is undefined when %ecx is 0 so we can't
-        * rely on the secondary btrl tests.
-        */
-       pushl   %ebx
-       movl    _curthread,%ebx
-       movl    TD_CPL(%ebx),%eax
-splz_next:
-       /*
-        * We don't need any locking here.  (ipending & ~cpl) cannot grow 
-        * while we're looking at it - any interrupt will shrink it to 0.
-        */
-       movl    $0,_reqpri
-       movl    %eax,%ecx
-       notl    %ecx                    /* set bit = unmasked level */
-       andl    _ipending,%ecx          /* set bit = unmasked pending INT */
-       jne     splz_unpend
-       popl    %ebx
-       ret
-
-       ALIGN_TEXT
-splz_unpend:
-       bsfl    %ecx,%ecx
-       lock
-       btrl    %ecx,_ipending
-       jnc     splz_next
-       cmpl    $NHWI,%ecx
-       jae     splz_swi
        /*
-        * We would prefer to call the intr handler directly here but that
-        * doesn't work for badly behaved handlers that want the interrupt
-        * frame.  Also, there's a problem determining the unit number.
-        * We should change the interface so that the unit number is not
-        * determined at config time.
-        *
-        * The vec[] routines build the proper frame on the stack so
-        * the interrupt will eventually return to the caller or splz,
-        * then calls one of _Xintr0 thru _XintrNN.
+        * Functions to enable and disable a hardware interrupt.  Generally
+        * called with only one bit set in the mask but can handle multiple
+        * bits to present the same API as the ICU.
         */
-       popl    %ebx
-       jmp     *_vec(,%ecx,4)
-
-       ALIGN_TEXT
-splz_swi:
-       pushl   %eax                    /* save cpl across call */
-       orl     imasks(,%ecx,4),%eax
-       movl    %eax,TD_CPL(%ebx) /* set cpl for SWI */
-       call    *_ihandlers(,%ecx,4)
-       popl    %eax
-       movl    %eax,TD_CPL(%ebx) /* restore cpl and loop */
-       jmp     splz_next
-
-/*
- * Fake clock interrupt(s) so that they appear to come from our caller instead
- * of from here, so that system profiling works.
- * XXX do this more generally (for all vectors; look up the C entry point).
- * XXX frame bogusness stops us from just jumping to the C entry point.
- * We have to clear iactive since this is an unpend call, and it will be
- * set from the time of the original INT.
- */
-
-/*
- * The 'generic' vector stubs.
- */
-
-#define BUILD_VEC(irq_num)                                             \
-       ALIGN_TEXT ;                                                    \
-__CONCAT(vec,irq_num): ;                                               \
-       popl    %eax ;                                                  \
-       pushfl ;                                                        \
-       pushl   $KCSEL ;                                                \
-       pushl   %eax ;                                                  \
-       cli ;                                                           \
-       lock ;                                  /* MP-safe */           \
-       andl    $~IRQ_BIT(irq_num), iactive ;   /* lazy masking */      \
-       MEXITCOUNT ;                                                    \
-       APIC_ITRACE(apic_itrace_splz, irq_num, APIC_ITRACE_SPLZ) ;      \
-       jmp     __CONCAT(_Xintr,irq_num)
 
-
-       BUILD_VEC(0)
-       BUILD_VEC(1)
-       BUILD_VEC(2)
-       BUILD_VEC(3)
-       BUILD_VEC(4)
-       BUILD_VEC(5)
-       BUILD_VEC(6)
-       BUILD_VEC(7)
-       BUILD_VEC(8)
-       BUILD_VEC(9)
-       BUILD_VEC(10)
-       BUILD_VEC(11)
-       BUILD_VEC(12)
-       BUILD_VEC(13)
-       BUILD_VEC(14)
-       BUILD_VEC(15)
-       BUILD_VEC(16)                   /* 8 additional INTs in IO APIC */
-       BUILD_VEC(17)
-       BUILD_VEC(18)
-       BUILD_VEC(19)
-       BUILD_VEC(20)
-       BUILD_VEC(21)
-       BUILD_VEC(22)
-       BUILD_VEC(23)
-
-
-/******************************************************************************
- * XXX FIXME: figure out where these belong.
- */
-
-/* this nonsense is to verify that masks ALWAYS have 1 and only 1 bit set */
-#define QUALIFY_MASKS_NOT
-
-#ifdef QUALIFY_MASKS
-#define QUALIFY_MASK           \
-       btrl    %ecx, %eax ;    \
-       andl    %eax, %eax ;    \
-       jz      1f ;            \
-       pushl   $bad_mask ;     \
-       call    _panic ;        \
-1:
-
-bad_mask:      .asciz  "bad mask"
-#else
-#define QUALIFY_MASK
-#endif
-
-/*
- * (soon to be) MP-safe function to clear ONE INT mask bit.
- * The passed arg is a 32bit u_int MASK.
- * It sets the associated bit in _apic_imen.
- * It sets the mask bit of the associated IO APIC register.
- */
-ENTRY(INTREN)
-       pushfl                          /* save state of EI flag */
-       cli                             /* prevent recursion */
+ENTRY(INTRDIS)
        IMASK_LOCK                      /* enter critical reg */
-
-       movl    8(%esp), %eax           /* mask into %eax */
-       bsfl    %eax, %ecx              /* get pin index */
-       btrl    %ecx, apic_imen         /* update apic_imen */
-
-       QUALIFY_MASK
-
+       movl    4(%esp),%eax
+1:
+       bsfl    %eax,%ecx
+       jz      2f
+       btrl    %ecx,%eax
+       btsl    %ecx, apic_imen
        shll    $4, %ecx
        movl    CNAME(int_to_apicintpin) + 8(%ecx), %edx
        movl    CNAME(int_to_apicintpin) + 12(%ecx), %ecx
        testl   %edx, %edx
-       jz      1f
-
-       movl    %ecx, (%edx)            /* write the target register index */
-       movl    16(%edx), %eax          /* read the target register data */
-       andl    $~IOART_INTMASK, %eax   /* clear mask bit */
-       movl    %eax, 16(%edx)          /* write the APIC register data */
-1:     
+       jz      2f
+       movl    %ecx, (%edx)            /* target register index */
+       orl     $IOART_INTMASK,16(%edx) /* set intmask in target apic reg */
+       jmp     1b
+2:
        IMASK_UNLOCK                    /* exit critical reg */
-       popfl                           /* restore old state of EI flag */
        ret
 
-/*
- * (soon to be) MP-safe function to set ONE INT mask bit.
- * The passed arg is a 32bit u_int MASK.
- * It clears the associated bit in apic_imen.
- * It clears the mask bit of the associated IO APIC register.
- */
-ENTRY(INTRDIS)
-       pushfl                          /* save state of EI flag */
-       cli                             /* prevent recursion */
+ENTRY(INTREN)
        IMASK_LOCK                      /* enter critical reg */
-
-       movl    8(%esp), %eax           /* mask into %eax */
+       movl    4(%esp), %eax           /* mask into %eax */
+1:
        bsfl    %eax, %ecx              /* get pin index */
-       btsl    %ecx, apic_imen         /* update _apic_imen */
-
-       QUALIFY_MASK
-
+       jz      2f
+       btrl    %ecx,%eax
+       btrl    %ecx, apic_imen         /* update apic_imen */
        shll    $4, %ecx
        movl    CNAME(int_to_apicintpin) + 8(%ecx), %edx
        movl    CNAME(int_to_apicintpin) + 12(%ecx), %ecx
        testl   %edx, %edx
-       jz      1f
-
+       jz      2f
        movl    %ecx, (%edx)            /* write the target register index */
-       movl    16(%edx), %eax          /* read the target register data */
-       orl     $IOART_INTMASK, %eax    /* set mask bit */
-       movl    %eax, 16(%edx)          /* write the APIC register data */
-1:     
+       andl    $~IOART_INTMASK, 16(%edx) /* clear mask bit */
+       jmp     1b
+2:     
        IMASK_UNLOCK                    /* exit critical reg */
-       popfl                           /* restore old state of EI flag */
-       ret
-
-
-/******************************************************************************
- *
- */
-
-
-/*
- * void write_ioapic_mask(int apic, u_int mask); 
- */
-
-#define _INT_MASK      0x00010000
-#define _PIN_MASK      0x00ffffff
-
-#define _OLD_ESI         0(%esp)
-#define _OLD_EBX         4(%esp)
-#define _RETADDR         8(%esp)
-#define _APIC           12(%esp)
-#define _MASK           16(%esp)
-
-       ALIGN_TEXT
-write_ioapic_mask:
-       pushl %ebx                      /* scratch */
-       pushl %esi                      /* scratch */
-
-       movl    apic_imen, %ebx
-       xorl    _MASK, %ebx             /* %ebx = _apic_imen ^ mask */
-       andl    $_PIN_MASK, %ebx        /* %ebx = _apic_imen & 0x00ffffff */
-       jz      all_done                /* no change, return */
-
-       movl    _APIC, %esi             /* APIC # */
-       movl    ioapic, %ecx
-       movl    (%ecx,%esi,4), %esi     /* %esi holds APIC base address */
-
-next_loop:                             /* %ebx = diffs, %esi = APIC base */
-       bsfl    %ebx, %ecx              /* %ecx = index if 1st/next set bit */
-       jz      all_done
-
-       btrl    %ecx, %ebx              /* clear this bit in diffs */
-       leal    16(,%ecx,2), %edx       /* calculate register index */
-
-       movl    %edx, (%esi)            /* write the target register index */
-       movl    16(%esi), %eax          /* read the target register data */
-
-       btl     %ecx, _MASK             /* test for mask or unmask */
-       jnc     clear                   /* bit is clear */
-       orl     $_INT_MASK, %eax        /* set mask bit */
-       jmp     write
-clear: andl    $~_INT_MASK, %eax       /* clear mask bit */
-
-write: movl    %eax, 16(%esi)          /* write the APIC register data */
-
-       jmp     next_loop               /* try another pass */
-
-all_done:
-       popl    %esi
-       popl    %ebx
-       ret
-
-#undef _OLD_ESI
-#undef _OLD_EBX
-#undef _RETADDR
-#undef _APIC
-#undef _MASK
-
-#undef _PIN_MASK
-#undef _INT_MASK
-
-#ifdef oldcode
-
-_INTREN:
-       movl apic_imen, %eax
-       notl %eax                       /* mask = ~mask */
-       andl apic_imen, %eax            /* %eax = _apic_imen & ~mask */
-
-       pushl %eax                      /* new (future) _apic_imen value */
-       pushl $0                        /* APIC# arg */
-       call write_ioapic_mask          /* modify the APIC registers */
-
-       addl $4, %esp                   /* remove APIC# arg from stack */
-       popl apic_imen                  /* _apic_imen |= mask */
-       ret
-
-_INTRDIS:
-       movl _apic_imen, %eax
-       orl 4(%esp), %eax               /* %eax = _apic_imen | mask */
-
-       pushl %eax                      /* new (future) _apic_imen value */
-       pushl $0                        /* APIC# arg */
-       call write_ioapic_mask          /* modify the APIC registers */
-
-       addl $4, %esp                   /* remove APIC# arg from stack */
-       popl apic_imen                  /* _apic_imen |= mask */
-       ret
-
-#endif /* oldcode */
-
-
-#ifdef ready
-
-/*
- * u_int read_io_apic_mask(int apic); 
- */
-       ALIGN_TEXT
-read_io_apic_mask:
        ret
 
-/*
- * Set INT mask bit for each bit set in 'mask'.
- * Ignore INT mask bit for all others.
- *
- * void set_io_apic_mask(apic, u_int32_t bits); 
- */
-       ALIGN_TEXT
-set_io_apic_mask:
-       ret
-
-/*
- * void set_ioapic_maskbit(int apic, int bit); 
- */
-       ALIGN_TEXT
-set_ioapic_maskbit:
-       ret
-
-/*
- * Clear INT mask bit for each bit set in 'mask'.
- * Ignore INT mask bit for all others.
- *
- * void clr_io_apic_mask(int apic, u_int32_t bits); 
- */
-       ALIGN_TEXT
-clr_io_apic_mask:
-       ret
-
-/*
- * void clr_ioapic_maskbit(int apic, int bit); 
- */
-       ALIGN_TEXT
-clr_ioapic_maskbit:
-       ret
-
-#endif /** ready */
-
 /******************************************************************************
  * 
  */
@@ -465,4 +125,3 @@ ENTRY(apic_eoi)
        movl    $0, lapic+0xb0
        ret
 
-#endif
index 6cd3736..55a4af9 100644 (file)
@@ -1,62 +1,23 @@
 /*
  *     from: vector.s, 386BSD 0.1 unknown origin
  * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $
- * $DragonFly: src/sys/i386/isa/Attic/apic_vector.s,v 1.7 2003/07/01 20:31:38 dillon Exp $
+ * $DragonFly: src/sys/i386/isa/Attic/apic_vector.s,v 1.8 2003/07/06 21:23:49 dillon Exp $
  */
 
 
 #include <machine/apic.h>
 #include <machine/smp.h>
-
 #include "i386/isa/intr_machdep.h"
 
 /* convert an absolute IRQ# into a bitmask */
-#define IRQ_BIT(irq_num)       (1 << (irq_num))
+#define IRQ_LBIT(irq_num)      (1 << (irq_num))
 
 /* make an index into the IO APIC from the IRQ# */
 #define REDTBL_IDX(irq_num)    (0x10 + ((irq_num) * 2))
 
-
-/*
- * Macros for interrupt interrupt entry, call to handler, and exit.
- */
-
-#define        FAST_INTR(irq_num, vec_name)                                    \
-       .text ;                                                         \
-       SUPERALIGN_TEXT ;                                               \
-IDTVEC(vec_name) ;                                                     \
-       pushl   %eax ;          /* save only call-used registers */     \
-       pushl   %ecx ;                                                  \
-       pushl   %edx ;