spinlocks - Rename API to spin_{try,un,}lock
authorAlex Hornung <ahornung@gmail.com>
Mon, 30 Aug 2010 10:10:17 +0000 (11:10 +0100)
committerAlex Hornung <ahornung@gmail.com>
Mon, 30 Aug 2010 10:33:51 +0000 (11:33 +0100)
* Rename the API to spin_trylock, spin_unlock and spin_lock instead of
  spin_lock_wr, spin_unlock_wr and spin_trylock_wr now that we only have
  exclusive spinlocks.

* 99% of this patch was generated by a semantic coccinelle patch

66 files changed:
sys/bus/cam/cam_xpt.c
sys/bus/pci/i386/pci_cfgreg.c
sys/bus/pci/x86_64/pci_cfgreg.c
sys/crypto/aesni/aesni.c
sys/crypto/via/padlock.c
sys/dev/acpica5/Osd/OsdSynch.c
sys/dev/crypto/glxsb/glxsb.c
sys/dev/disk/nata/ata-all.c
sys/dev/disk/nata/ata-chipset.c
sys/dev/disk/nata/ata-queue.c
sys/dev/disk/nata/ata-raid.c
sys/dev/disk/nata/ata-usb.c
sys/dev/disk/nata/atapi-cam.c
sys/dev/misc/syscons/syscons.c
sys/dev/netif/e1000/e1000_osdep.h
sys/dev/netif/e1000/if_em.h
sys/dev/netif/e1000/if_igb.h
sys/dev/raid/twa/tw_osl.h
sys/dev/raid/twa/tw_osl_freebsd.c
sys/dev/raid/twa/tw_osl_inline.h
sys/emulation/linux/linux_futex.c
sys/kern/kern_ccms.c
sys/kern/kern_condvar.c
sys/kern/kern_descrip.c
sys/kern/kern_environment.c
sys/kern/kern_ktr.c
sys/kern/kern_lock.c
sys/kern/kern_lockf.c
sys/kern/kern_nrandom.c
sys/kern/kern_objcache.c
sys/kern/kern_plimit.c
sys/kern/kern_resource.c
sys/kern/kern_sensors.c
sys/kern/kern_spinlock.c
sys/kern/kern_syslink.c
sys/kern/kern_sysref.c
sys/kern/kern_wdog.c
sys/kern/lwkt_msgport.c
sys/kern/subr_prf.c
sys/kern/subr_taskqueue.c
sys/kern/sys_process.c
sys/kern/uipc_usrreq.c
sys/kern/usched_bsd4.c
sys/kern/usched_dummy.c
sys/kern/vfs_bio.c
sys/kern/vfs_cache.c
sys/kern/vfs_journal.c
sys/kern/vfs_lock.c
sys/kern/vfs_mount.c
sys/kern/vfs_subr.c
sys/kern/vfs_syscalls.c
sys/kern/vfs_vm.c
sys/netproto/smb/smb_subr.h
sys/opencrypto/cryptosoft.c
sys/platform/pc32/i386/busdma_machdep.c
sys/platform/pc64/x86_64/busdma_machdep.c
sys/platform/vkernel/platform/busdma_machdep.c
sys/platform/vkernel64/platform/busdma_machdep.c
sys/sys/spinlock2.h
sys/vfs/devfs/devfs_core.c
sys/vfs/ntfs/ntfs_subr.c
sys/vfs/procfs/procfs_vnops.c
sys/vfs/tmpfs/tmpfs_subr.c
sys/vm/swap_pager.c
sys/vm/vm_pager.c
sys/vm/vm_zone.c

index 56bf705..ffc91dc 100644 (file)
@@ -988,7 +988,7 @@ xptopen(struct dev_open_args *ap)
         * We don't allow nonblocking access.
         */
        if ((ap->a_oflags & O_NONBLOCK) != 0) {
-               kprintf("%s: can't do nonblocking access\n", devtoname(dev));
+               kprintf("%s: can't' do nonblocking access\n", devtoname(dev));
                return(ENODEV);
        }
 
@@ -4882,19 +4882,19 @@ xpt_done(union ccb *done_ccb)
                sim = done_ccb->ccb_h.path->bus->sim;
                switch (done_ccb->ccb_h.path->periph->type) {
                case CAM_PERIPH_BIO:
-                       spin_lock_wr(&sim->sim_spin);
+                       spin_lock(&sim->sim_spin);
                        TAILQ_INSERT_TAIL(&sim->sim_doneq, &done_ccb->ccb_h,
                                          sim_links.tqe);
                        done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
-                       spin_unlock_wr(&sim->sim_spin);
+                       spin_unlock(&sim->sim_spin);
                        if ((sim->flags & CAM_SIM_ON_DONEQ) == 0) {
-                               spin_lock_wr(&cam_simq_spin);
+                               spin_lock(&cam_simq_spin);
                                if ((sim->flags & CAM_SIM_ON_DONEQ) == 0) {
                                        TAILQ_INSERT_TAIL(&cam_simq, sim,
                                                          links);
                                        sim->flags |= CAM_SIM_ON_DONEQ;
                                }
-                               spin_unlock_wr(&cam_simq_spin);
+                               spin_unlock(&cam_simq_spin);
                        }
                        if ((done_ccb->ccb_h.flags & CAM_POLLED) == 0)
                                setsoftcambio();
@@ -5508,7 +5508,7 @@ typedef struct {
        probe_flags     flags;
        MD5_CTX         context;
        u_int8_t        digest[16];
-} probe_softc;
+}probe_softc; 
 
 static void
 xpt_scan_lun(struct cam_periph *periph, struct cam_path *path,
@@ -7235,10 +7235,10 @@ camisr(void *dummy)
        cam_simq_t queue;
        struct cam_sim *sim;
 
-       spin_lock_wr(&cam_simq_spin);
+       spin_lock(&cam_simq_spin);
        TAILQ_INIT(&queue);
        TAILQ_CONCAT(&queue, &cam_simq, links);
-       spin_unlock_wr(&cam_simq_spin);
+       spin_unlock(&cam_simq_spin);
 
        while ((sim = TAILQ_FIRST(&queue)) != NULL) {
                TAILQ_REMOVE(&queue, sim, links);
@@ -7255,10 +7255,10 @@ camisr_runqueue(struct cam_sim *sim)
        struct  ccb_hdr *ccb_h;
        int     runq;
 
-       spin_lock_wr(&sim->sim_spin);
+       spin_lock(&sim->sim_spin);
        while ((ccb_h = TAILQ_FIRST(&sim->sim_doneq)) != NULL) {
                TAILQ_REMOVE(&sim->sim_doneq, ccb_h, sim_links.tqe);
-               spin_unlock_wr(&sim->sim_spin);
+               spin_unlock(&sim->sim_spin);
                ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
 
                CAM_DEBUG(ccb_h->path, CAM_DEBUG_TRACE,
@@ -7350,9 +7350,9 @@ camisr_runqueue(struct cam_sim *sim)
 
                /* Call the peripheral driver's callback */
                (*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h);
-               spin_lock_wr(&sim->sim_spin);
+               spin_lock(&sim->sim_spin);
        }
-       spin_unlock_wr(&sim->sim_spin);
+       spin_unlock(&sim->sim_spin);
 }
 
 /*
index 94aad48..1f72301 100644 (file)
@@ -53,8 +53,8 @@
 
 #if defined(__DragonFly__)
 #define mtx_init(a, b, c, d) spin_init(a)
-#define mtx_lock_spin(a) spin_lock_wr(a)
-#define mtx_unlock_spin(a) spin_unlock_wr(a)
+#define mtx_lock_spin(a) spin_lock(a)
+#define mtx_unlock_spin(a) spin_unlock(a)
 #endif
 
 #define PRVERB(a) do {                                                 \
index 77d24c5..28d19b6 100644 (file)
@@ -53,8 +53,8 @@
 
 #if defined(__DragonFly__)
 #define mtx_init(a, b, c, d) spin_init(a)
-#define mtx_lock_spin(a) spin_lock_wr(a)
-#define mtx_unlock_spin(a) spin_unlock_wr(a)
+#define mtx_lock_spin(a) spin_lock(a)
+#define mtx_unlock_spin(a) spin_unlock(a)
 #endif
 
 #define PRVERB(a) do {                                                 \
index 010450f..208d0ae 100644 (file)
@@ -107,10 +107,10 @@ aesni_detach(device_t dev)
        struct aesni_session *ses;
 
        sc = device_get_softc(dev);
-       spin_lock_wr(&sc->lock);
+       spin_lock(&sc->lock);
        TAILQ_FOREACH(ses, &sc->sessions, next) {
                if (ses->used) {
-                       spin_unlock_wr(&sc->lock);
+                       spin_unlock(&sc->lock);
                        device_printf(dev,
                            "Cannot detach, sessions still active.\n");
                        return (EBUSY);
@@ -120,7 +120,7 @@ aesni_detach(device_t dev)
                TAILQ_REMOVE(&sc->sessions, ses, next);
                kfree(ses, M_AESNI);
        }
-       spin_unlock_wr(&sc->lock);
+       spin_unlock(&sc->lock);
        spin_uninit(&sc->lock);
        crypto_unregister_all(sc->cid);
        return (0);
@@ -154,7 +154,7 @@ aesni_newsession(device_t dev, uint32_t *sidp, struct cryptoini *cri)
        if (encini == NULL)
                return (EINVAL);
 
-       spin_lock_wr(&sc->lock);
+       spin_lock(&sc->lock);
        /*
         * Free sessions goes first, so if first session is used, we need to
         * allocate one.
@@ -163,7 +163,7 @@ aesni_newsession(device_t dev, uint32_t *sidp, struct cryptoini *cri)
        if (ses == NULL || ses->used) {
                ses = kmalloc(sizeof(*ses), M_AESNI, M_NOWAIT | M_ZERO);
                if (ses == NULL) {
-                       spin_unlock_wr(&sc->lock);
+                       spin_unlock(&sc->lock);
                        return (ENOMEM);
                }
                KASSERT(((uintptr_t)ses) % 0x10 == 0,
@@ -174,13 +174,13 @@ aesni_newsession(device_t dev, uint32_t *sidp, struct cryptoini *cri)
        }
        ses->used = 1;
        TAILQ_INSERT_TAIL(&sc->sessions, ses, next);
-       spin_unlock_wr(&sc->lock);
+       spin_unlock(&sc->lock);
 
        error = aesni_cipher_setup(ses, encini);
        if (error != 0) {
-               spin_lock_wr(&sc->lock);
+               spin_lock(&sc->lock);
                aesni_freesession_locked(sc, ses);
-               spin_unlock_wr(&sc->lock);
+               spin_unlock(&sc->lock);
                return (error);
        }
 
@@ -209,17 +209,17 @@ aesni_freesession(device_t dev, uint64_t tid)
 
        sc = device_get_softc(dev);
        sid = ((uint32_t)tid) & 0xffffffff;
-       spin_lock_wr(&sc->lock);
+       spin_lock(&sc->lock);
        TAILQ_FOREACH_REVERSE(ses, &sc->sessions, aesni_sessions_head, next) {
                if (ses->id == sid)
                        break;
        }
        if (ses == NULL) {
-               spin_unlock_wr(&sc->lock);
+               spin_unlock(&sc->lock);
                return (EINVAL);
        }
        aesni_freesession_locked(sc, ses);
-       spin_unlock_wr(&sc->lock);
+       spin_unlock(&sc->lock);
        return (0);
 }
 
@@ -261,12 +261,12 @@ aesni_process(device_t dev, struct cryptop *crp, int hint __unused)
                goto out;
        }
 
-       spin_lock_wr(&sc->lock); /* XXX: was rd lock */
+       spin_lock(&sc->lock); /* XXX: was rd lock */
        TAILQ_FOREACH_REVERSE(ses, &sc->sessions, aesni_sessions_head, next) {
                if (ses->id == (crp->crp_sid & 0xffffffff))
                        break;
        }
-       spin_unlock_wr(&sc->lock); /* XXX: was rd lock */
+       spin_unlock(&sc->lock); /* XXX: was rd lock */
        if (ses == NULL) {
                error = EINVAL;
                goto out;
index b2fb9d4..7ee0dbe 100644 (file)
@@ -146,10 +146,10 @@ padlock_detach(device_t dev)
        struct padlock_softc *sc = device_get_softc(dev);
        struct padlock_session *ses;
 
-       spin_lock_wr(&sc->sc_sessions_lock);
+       spin_lock(&sc->sc_sessions_lock);
        TAILQ_FOREACH(ses, &sc->sc_sessions, ses_next) {
                if (ses->ses_used) {
-                       spin_unlock_wr(&sc->sc_sessions_lock);
+                       spin_unlock(&sc->sc_sessions_lock);
                        device_printf(dev,
                            "Cannot detach, sessions still active.\n");
                        return (EBUSY);
@@ -159,7 +159,7 @@ padlock_detach(device_t dev)
                TAILQ_REMOVE(&sc->sc_sessions, ses, ses_next);
                kfree(ses->ses_freeaddr, M_PADLOCK);
        }
-       spin_unlock_wr(&sc->sc_sessions_lock);
+       spin_unlock(&sc->sc_sessions_lock);
        spin_uninit(&sc->sc_sessions_lock);
        crypto_unregister_all(sc->sc_cid);
        return (0);
@@ -211,7 +211,7 @@ padlock_newsession(device_t dev, uint32_t *sidp, struct cryptoini *cri)
        /*
         * Let's look for a free session structure.
         */
-       spin_lock_wr(&sc->sc_sessions_lock);
+       spin_lock(&sc->sc_sessions_lock);
        /*
         * Free sessions goes first, so if first session is used, we need to
         * allocate one.
@@ -220,7 +220,7 @@ padlock_newsession(device_t dev, uint32_t *sidp, struct cryptoini *cri)
        if (ses == NULL || ses->ses_used) {
                ses = kmalloc(sizeof(*ses) + 16, M_PADLOCK, M_NOWAIT | M_ZERO);
                if (ses == NULL) {
-                       spin_unlock_wr(&sc->sc_sessions_lock);
+                       spin_unlock(&sc->sc_sessions_lock);
                        return (ENOMEM);
                }
                /* Check if 'ses' is 16-byte aligned. If not, align it. */
@@ -237,7 +237,7 @@ padlock_newsession(device_t dev, uint32_t *sidp, struct cryptoini *cri)
        }
        ses->ses_used = 1;
        TAILQ_INSERT_TAIL(&sc->sc_sessions, ses, ses_next);
-       spin_unlock_wr(&sc->sc_sessions_lock);
+       spin_unlock(&sc->sc_sessions_lock);
 
        error = padlock_cipher_setup(ses, encini);
        if (error != 0) {
@@ -264,7 +264,7 @@ padlock_freesession_one(struct padlock_softc *sc, struct padlock_session *ses,
        uint32_t sid = ses->ses_id;
 
        if (!locked)
-               spin_lock_wr(&sc->sc_sessions_lock);
+               spin_lock(&sc->sc_sessions_lock);
        TAILQ_REMOVE(&sc->sc_sessions, ses, ses_next);
        padlock_hash_free(ses);
        bzero(ses, sizeof(*ses));
@@ -272,7 +272,7 @@ padlock_freesession_one(struct padlock_softc *sc, struct padlock_session *ses,
        ses->ses_id = sid;
        TAILQ_INSERT_HEAD(&sc->sc_sessions, ses, ses_next);
        if (!locked)
-               spin_unlock_wr(&sc->sc_sessions_lock);
+               spin_unlock(&sc->sc_sessions_lock);
 }
 
 static int
@@ -282,18 +282,18 @@ padlock_freesession(device_t dev, uint64_t tid)
        struct padlock_session *ses;
        uint32_t sid = ((uint32_t)tid) & 0xffffffff;
 
-       spin_lock_wr(&sc->sc_sessions_lock);
+       spin_lock(&sc->sc_sessions_lock);
        TAILQ_FOREACH_REVERSE(ses, &sc->sc_sessions, padlock_sessions_head,
            ses_next) {
                if (ses->ses_id == sid)
                        break;
        }
        if (ses == NULL) {
-               spin_unlock_wr(&sc->sc_sessions_lock);
+               spin_unlock(&sc->sc_sessions_lock);
                return (EINVAL);
        }
        padlock_freesession_one(sc, ses, 1);
-       spin_unlock_wr(&sc->sc_sessions_lock);
+       spin_unlock(&sc->sc_sessions_lock);
        return (0);
 }
 
@@ -347,13 +347,13 @@ padlock_process(device_t dev, struct cryptop *crp, int hint __unused)
                goto out;
        }
 
-       spin_lock_wr(&sc->sc_sessions_lock); /* XXX: was rd lock */
+       spin_lock(&sc->sc_sessions_lock); /* XXX: was rd lock */
        TAILQ_FOREACH_REVERSE(ses, &sc->sc_sessions, padlock_sessions_head,
            ses_next) {
                if (ses->ses_id == (crp->crp_sid & 0xffffffff))
                        break;
        }
-       spin_unlock_wr(&sc->sc_sessions_lock); /* XXX: was rd lock */
+       spin_unlock(&sc->sc_sessions_lock); /* XXX: was rd lock */
        if (ses == NULL) {
                error = EINVAL;
                goto out;
index 03edf31..c2d9ee5 100644 (file)
@@ -50,8 +50,8 @@ ACPI_MODULE_NAME("SYNCH")
 
 MALLOC_DEFINE(M_ACPISEM, "acpisem", "ACPI semaphore");
 
-#define AS_LOCK(as)            spin_lock_wr(&(as)->as_spin)
-#define AS_UNLOCK(as)          spin_unlock_wr(&(as)->as_spin)
+#define AS_LOCK(as)            spin_lock(&(as)->as_spin)
+#define AS_UNLOCK(as)          spin_unlock(&(as)->as_spin)
 #define AS_LOCK_DECL
 
 /*
@@ -375,7 +375,7 @@ _AcpiOsAcquireLock (ACPI_SPINLOCK Spin, const char *func, int line)
 AcpiOsAcquireLock (ACPI_SPINLOCK Spin)
 #endif
 {
-    spin_lock_wr(&Spin->lock);
+    spin_lock(&Spin->lock);
 
 #ifdef ACPI_DEBUG_LOCKS
     if (Spin->owner) {
@@ -408,7 +408,7 @@ AcpiOsReleaseLock (ACPI_SPINLOCK Spin, ACPI_CPU_FLAGS Flags)
     Spin->func = "";
     Spin->line = 0;
 #endif
-    spin_unlock_wr(&Spin->lock);
+    spin_unlock(&Spin->lock);
 }
 
 /* Section 5.2.9.1:  global lock acquire/release functions */
index e07e04c..98f3e7b 100644 (file)
@@ -354,10 +354,10 @@ glxsb_detach(device_t dev)
        struct glxsb_softc *sc = device_get_softc(dev);
        struct glxsb_session *ses;
 
-       spin_lock_wr(&sc->sc_sessions_lock);
+       spin_lock(&sc->sc_sessions_lock);
        TAILQ_FOREACH(ses, &sc->sc_sessions, ses_next) {
                if (ses->ses_used) {
-                       spin_unlock_wr(&sc->sc_sessions_lock);
+                       spin_unlock(&sc->sc_sessions_lock);
                        device_printf(dev,
                                "cannot detach, sessions still active.\n");
                        return (EBUSY);
@@ -368,7 +368,7 @@ glxsb_detach(device_t dev)
                TAILQ_REMOVE(&sc->sc_sessions, ses, ses_next);
                kfree(ses, M_GLXSB);
        }
-       spin_unlock_wr(&sc->sc_sessions_lock);
+       spin_unlock(&sc->sc_sessions_lock);
        crypto_unregister_all(sc->sc_cid);
 #if 0
        /* XXX: need implementation of callout_drain or workaround */
@@ -590,12 +590,12 @@ glxsb_crypto_newsession(device_t dev, uint32_t *sidp, struct cryptoini *cri)
         * allocate one.
         */
 
-       spin_lock_wr(&sc->sc_sessions_lock);
+       spin_lock(&sc->sc_sessions_lock);
        ses = TAILQ_FIRST(&sc->sc_sessions);
        if (ses == NULL || ses->ses_used) {
                ses = kmalloc(sizeof(*ses), M_GLXSB, M_NOWAIT | M_ZERO);
                if (ses == NULL) {
-                       spin_unlock_wr(&sc->sc_sessions_lock);
+                       spin_unlock(&sc->sc_sessions_lock);
                        return (ENOMEM);
                }
                ses->ses_id = sc->sc_sid++;
@@ -604,7 +604,7 @@ glxsb_crypto_newsession(device_t dev, uint32_t *sidp, struct cryptoini *cri)
        }
        ses->ses_used = 1;
        TAILQ_INSERT_TAIL(&sc->sc_sessions, ses, ses_next);
-       spin_unlock_wr(&sc->sc_sessions_lock);
+       spin_unlock(&sc->sc_sessions_lock);
 
        if (encini->cri_alg == CRYPTO_AES_CBC) {
                if (encini->cri_klen != 128) {
@@ -641,13 +641,13 @@ glxsb_crypto_freesession(device_t dev, uint64_t tid)
        if (sc == NULL)
                return (EINVAL);
 
-       spin_lock_wr(&sc->sc_sessions_lock);
+       spin_lock(&sc->sc_sessions_lock);
        TAILQ_FOREACH_REVERSE(ses, &sc->sc_sessions, ses_head, ses_next) {
                if (ses->ses_id == sid)
                        break;
        }
        if (ses == NULL) {
-               spin_unlock_wr(&sc->sc_sessions_lock);
+               spin_unlock(&sc->sc_sessions_lock);
                return (EINVAL);
        }
        TAILQ_REMOVE(&sc->sc_sessions, ses, ses_next);
@@ -656,7 +656,7 @@ glxsb_crypto_freesession(device_t dev, uint64_t tid)
        ses->ses_used = 0;
        ses->ses_id = sid;
        TAILQ_INSERT_HEAD(&sc->sc_sessions, ses, ses_next);
-       spin_unlock_wr(&sc->sc_sessions_lock);
+       spin_unlock(&sc->sc_sessions_lock);
 
        return (0);
 }
@@ -864,9 +864,9 @@ glxsb_crypto_task(void *arg, int pending)
                        goto out;
        }
 out:
-       spin_lock_wr(&sc->sc_task_mtx);
+       spin_lock(&sc->sc_task_mtx);
        sc->sc_task_count--;
-       spin_unlock_wr(&sc->sc_task_mtx);
+       spin_unlock(&sc->sc_task_mtx);
 
        crp->crp_etype = error;
        crypto_unblock(sc->sc_cid, CRYPTO_SYMQ);
@@ -927,20 +927,20 @@ glxsb_crypto_process(device_t dev, struct cryptop *crp, int hint)
        }
 
        sid = crp->crp_sid & 0xffffffff;
-       spin_lock_wr(&sc->sc_sessions_lock);
+       spin_lock(&sc->sc_sessions_lock);
        TAILQ_FOREACH_REVERSE(ses, &sc->sc_sessions, ses_head, ses_next) {
                if (ses->ses_id == sid)
                        break;
        }
-       spin_unlock_wr(&sc->sc_sessions_lock);
+       spin_unlock(&sc->sc_sessions_lock);
        if (ses == NULL || !ses->ses_used) {
                error = EINVAL;
                goto fail;
        }
 
-       spin_lock_wr(&sc->sc_task_mtx);
+       spin_lock(&sc->sc_task_mtx);
        if (sc->sc_task_count != 0) {
-               spin_unlock_wr(&sc->sc_task_mtx);
+               spin_unlock(&sc->sc_task_mtx);
                return (ERESTART);
        }
        sc->sc_task_count++;
@@ -949,7 +949,7 @@ glxsb_crypto_process(device_t dev, struct cryptop *crp, int hint)
        sc->sc_to.to_enccrd = enccrd;
        sc->sc_to.to_crp = crp;
        sc->sc_to.to_ses = ses;
-       spin_unlock_wr(&sc->sc_task_mtx);
+       spin_unlock(&sc->sc_task_mtx);
        /* XXX: thread taskqueues ? */
        taskqueue_enqueue(sc->sc_tq, &sc->sc_cryptotask);
        return(0);
index ce3dacf..41d49d9 100644 (file)
@@ -164,9 +164,9 @@ ata_detach(device_t dev)
        return ENXIO;
 
     /* grap the channel lock so no new requests gets launched */
-    spin_lock_wr(&ch->state_mtx);
+    spin_lock(&ch->state_mtx);
     ch->state |= ATA_STALL_QUEUE;
-    spin_unlock_wr(&ch->state_mtx);
+    spin_unlock(&ch->state_mtx);
 
     /* detach & delete all children */
     if (!device_get_children(dev, &children, &nchildren)) {
@@ -205,14 +205,14 @@ ata_reinit(device_t dev)
        tsleep(&dev, 0, "atarini", 1);
 
     /* catch eventual request in ch->running */
-    spin_lock_wr(&ch->state_mtx);
+    spin_lock(&ch->state_mtx);
     if ((request = ch->running))
        callout_stop(&request->callout);
     ch->running = NULL;
 
     /* unconditionally grap the channel lock */
     ch->state |= ATA_STALL_QUEUE;
-    spin_unlock_wr(&ch->state_mtx);
+    spin_unlock(&ch->state_mtx);
 
     /* reset the controller HW, the channel and device(s) */
     ATA_RESET(dev);
@@ -258,9 +258,9 @@ ata_reinit(device_t dev)
     }
 
     /* we're done release the channel for new work */
-    spin_lock_wr(&ch->state_mtx);
+    spin_lock(&ch->state_mtx);
     ch->state = ATA_IDLE;
-    spin_unlock_wr(&ch->state_mtx);
+    spin_unlock(&ch->state_mtx);
     ATA_LOCKING(dev, ATA_LF_UNLOCK);
 
     if (bootverbose)
@@ -282,13 +282,13 @@ ata_suspend(device_t dev)
 
     /* wait for the channel to be IDLE or detached before suspending */
     while (ch->r_irq) {
-       spin_lock_wr(&ch->state_mtx);
+       spin_lock(&ch->state_mtx);
        if (ch->state == ATA_IDLE) {
            ch->state = ATA_ACTIVE;
-           spin_unlock_wr(&ch->state_mtx);
+           spin_unlock(&ch->state_mtx);
            break;
        }
-       spin_unlock_wr(&ch->state_mtx);
+       spin_unlock(&ch->state_mtx);
        tsleep(ch, 0, "atasusp", hz/10);
     }
     ATA_LOCKING(dev, ATA_LF_UNLOCK);
@@ -319,7 +319,7 @@ ata_interrupt(void *data)
     struct ata_channel *ch = (struct ata_channel *)data;
     struct ata_request *request;
 
-    spin_lock_wr(&ch->state_mtx);
+    spin_lock(&ch->state_mtx);
     do {
        /*
         * Ignore interrupt if its not for us.  This may also have the
@@ -356,13 +356,13 @@ ata_interrupt(void *data)
            ch->running = NULL;
            if (ch->state == ATA_ACTIVE)
                ch->state = ATA_IDLE;
-           spin_unlock_wr(&ch->state_mtx);
+           spin_unlock(&ch->state_mtx);
            ATA_LOCKING(ch->dev, ATA_LF_UNLOCK);
            ata_finish(request);
            return 1;
        }
     } while (0);
-    spin_unlock_wr(&ch->state_mtx);
+    spin_unlock(&ch->state_mtx);
     return 0;
 }
 
index e775db6..6607d6f 100644 (file)
@@ -309,9 +309,9 @@ ata_sata_phy_event(void *context, int dummy)
                    device_delete_child(tp->dev, children[i]);
            kfree(children, M_TEMP);
        }    
-       spin_lock_wr(&ch->state_mtx);
+       spin_lock(&ch->state_mtx);
        ch->state = ATA_IDLE;
-       spin_unlock_wr(&ch->state_mtx);
+       spin_unlock(&ch->state_mtx);
        if (bootverbose)
            device_printf(tp->dev, "DISCONNECTED\n");
     }
@@ -3993,14 +3993,14 @@ ata_promise_mio_reset(device_t dev)
                  ~0x00003f9f) | (ch->unit + 1));
 
        /* softreset HOST module */ /* XXX SOS what about other outstandings */
-       spin_lock_wr(&hpktp->mtx);
+       spin_lock(&hpktp->mtx);
        ATA_OUTL(ctlr->r_res2, 0xc012c,
                 (ATA_INL(ctlr->r_res2, 0xc012c) & ~0x00000f9f) | (1 << 11));
        DELAY(10);
        ATA_OUTL(ctlr->r_res2, 0xc012c,
                 (ATA_INL(ctlr->r_res2, 0xc012c) & ~0x00000f9f));
        hpktp->busy = 0;
-       spin_unlock_wr(&hpktp->mtx);
+       spin_unlock(&hpktp->mtx);
        ata_generic_reset(dev);
        break;
 
@@ -4292,7 +4292,7 @@ ata_promise_queue_hpkt(struct ata_pci_controller *ctlr, u_int32_t hpkt)
 {
     struct ata_promise_sx4 *hpktp = device_get_ivars(ctlr->dev);
 
-    spin_lock_wr(&hpktp->mtx);
+    spin_lock(&hpktp->mtx);
     if (hpktp->busy) {
        struct host_packet *hp = 
            kmalloc(sizeof(struct host_packet), M_TEMP, M_INTWAIT | M_ZERO);
@@ -4303,7 +4303,7 @@ ata_promise_queue_hpkt(struct ata_pci_controller *ctlr, u_int32_t hpkt)
        hpktp->busy = 1;
        ATA_OUTL(ctlr->r_res2, 0x000c0100, hpkt);
     }
-    spin_unlock_wr(&hpktp->mtx);
+    spin_unlock(&hpktp->mtx);
 }
 
 static void
@@ -4312,7 +4312,7 @@ ata_promise_next_hpkt(struct ata_pci_controller *ctlr)
     struct ata_promise_sx4 *hpktp = device_get_ivars(ctlr->dev);
     struct host_packet *hp;
 
-    spin_lock_wr(&hpktp->mtx);
+    spin_lock(&hpktp->mtx);
     if ((hp = TAILQ_FIRST(&hpktp->queue))) {
        TAILQ_REMOVE(&hpktp->queue, hp, chain);
        ATA_OUTL(ctlr->r_res2, 0x000c0100, hp->addr);
@@ -4320,7 +4320,7 @@ ata_promise_next_hpkt(struct ata_pci_controller *ctlr)
     }
     else
        hpktp->busy = 0;
-    spin_unlock_wr(&hpktp->mtx);
+    spin_unlock(&hpktp->mtx);
 }
 
 
@@ -5791,7 +5791,7 @@ ata_serialize(device_t dev, int flags)
     else
        serial = device_get_ivars(ctlr->dev);
 
-    spin_lock_wr(&serial->locked_mtx);
+    spin_lock(&serial->locked_mtx);
     switch (flags) {
     case ATA_LF_LOCK:
        if (serial->locked_ch == -1)
@@ -5806,7 +5806,7 @@ ata_serialize(device_t dev, int flags)
            if (serial->restart_ch != -1) {
                if ((ch = ctlr->interrupt[serial->restart_ch].argument)) {
                    serial->restart_ch = -1;
-                   spin_unlock_wr(&serial->locked_mtx);
+                   spin_unlock(&serial->locked_mtx);
                    ata_start(ch->dev);
                    return -1;
                }
@@ -5818,7 +5818,7 @@ ata_serialize(device_t dev, int flags)
        break;
     }
     res = serial->locked_ch;
-    spin_unlock_wr(&serial->locked_mtx);
+    spin_unlock(&serial->locked_mtx);
     return res;
 }
 
index 8b8ffe6..8aec1d3 100644 (file)
@@ -66,12 +66,12 @@ ata_drop_requests(device_t dev)
     struct ata_channel *ch = device_get_softc(device_get_parent(dev));
     struct ata_request *request, *tmp;
 
-    spin_lock_wr(&ch->queue_mtx);
+    spin_lock(&ch->queue_mtx);
     TAILQ_FOREACH_MUTABLE(request, &ch->ata_queue, chain, tmp) {
        TAILQ_REMOVE(&ch->ata_queue, request, chain);
        request->result = ENXIO;
     }
-    spin_unlock_wr(&ch->queue_mtx);
+    spin_unlock(&ch->queue_mtx);
 }
 
 void
@@ -96,22 +96,22 @@ ata_queue_request(struct ata_request *request)
 
     /* in ATA_STALL_QUEUE state we call HW directly */
     if ((ch->state & ATA_STALL_QUEUE) && (request->flags & ATA_R_CONTROL)) {
-       spin_lock_wr(&ch->state_mtx);
+       spin_lock(&ch->state_mtx);
        ch->running = request;
        if (ch->hw.begin_transaction(request) == ATA_OP_FINISHED) {
            ch->running = NULL;
            if (!request->callback) 
                spin_uninit(&request->done);
-           spin_unlock_wr(&ch->state_mtx);
+           spin_unlock(&ch->state_mtx);
            return;
        }
        /* interlock against interrupt */
        request->flags |= ATA_R_HWCMDQUEUED;
-       spin_unlock_wr(&ch->state_mtx);
+       spin_unlock(&ch->state_mtx);
     }
     /* otherwise put request on the locked queue at the specified location */
     else  {
-       spin_lock_wr(&ch->queue_mtx);
+       spin_lock(&ch->queue_mtx);
        if (request->flags & ATA_R_AT_HEAD) {
            TAILQ_INSERT_HEAD(&ch->ata_queue, request, chain);
        } else if (request->flags & ATA_R_ORDERED) {
@@ -120,7 +120,7 @@ ata_queue_request(struct ata_request *request)
            TAILQ_INSERT_TAIL(&ch->ata_queue, request, chain);
            ch->transition = NULL;
        }
-       spin_unlock_wr(&ch->queue_mtx);
+       spin_unlock(&ch->queue_mtx);
        ATA_DEBUG_RQ(request, "queued");
        ata_start(ch->dev);
     }
@@ -134,12 +134,12 @@ ata_queue_request(struct ata_request *request)
        ATA_DEBUG_RQ(request, "wait for completion");
        if (!dumping) {
            /* interlock against wakeup */
-           spin_lock_wr(&request->done);
+           spin_lock(&request->done);
            /* check if the request was completed already */
            if (!(request->flags & ATA_R_COMPLETED))
                ssleep(request, &request->done, 0, "ATA request completion "
                       "wait", request->timeout * hz * 4);
-           spin_unlock_wr(&request->done);
+           spin_unlock(&request->done);
            /* check if the request was completed while sleeping */
            if (!(request->flags & ATA_R_COMPLETED)) {
                /* apparently not */
@@ -213,7 +213,7 @@ ata_start(device_t dev)
     int dependencies = 0;
 
     /* if we have a request on the queue try to get it running */
-    spin_lock_wr(&ch->queue_mtx);
+    spin_lock(&ch->queue_mtx);
     if ((request = TAILQ_FIRST(&ch->ata_queue))) {
 
        /* we need the locking function to get the lock for this channel */
@@ -221,16 +221,16 @@ ata_start(device_t dev)
 
            /* check for composite dependencies */
            if ((cptr = request->composite)) {
-               spin_lock_wr(&cptr->lock);
+               spin_lock(&cptr->lock);
                if ((request->flags & ATA_R_WRITE) &&
                    (cptr->wr_depend & cptr->rd_done) != cptr->wr_depend) {
                    dependencies = 1;
                }
-               spin_unlock_wr(&cptr->lock);
+               spin_unlock(&cptr->lock);
            }
 
            /* check we are in the right state and has no dependencies */
-           spin_lock_wr(&ch->state_mtx);
+           spin_lock(&ch->state_mtx);
            if (ch->state == ATA_IDLE && !dependencies) {
                ATA_DEBUG_RQ(request, "starting");
 
@@ -243,8 +243,8 @@ ata_start(device_t dev)
                if (ch->hw.begin_transaction(request) == ATA_OP_FINISHED) {
                    ch->running = NULL;
                    ch->state = ATA_IDLE;
-                   spin_unlock_wr(&ch->state_mtx);
-                   spin_unlock_wr(&ch->queue_mtx);
+                   spin_unlock(&ch->state_mtx);
+                   spin_unlock(&ch->queue_mtx);
                    ATA_LOCKING(dev, ATA_LF_UNLOCK);
                    ata_finish(request);
                    return;
@@ -254,17 +254,17 @@ ata_start(device_t dev)
                request->flags |= ATA_R_HWCMDQUEUED;
 
                if (dumping) {
-                   spin_unlock_wr(&ch->state_mtx);
-                   spin_unlock_wr(&ch->queue_mtx);
+                   spin_unlock(&ch->state_mtx);
+                   spin_unlock(&ch->queue_mtx);
                    while (!ata_interrupt(ch))
                        DELAY(10);
                    return;
                }       
            }
-           spin_unlock_wr(&ch->state_mtx);
+           spin_unlock(&ch->state_mtx);
        }
     }
-    spin_unlock_wr(&ch->queue_mtx);
+    spin_unlock(&ch->queue_mtx);
 }
 
 void
@@ -483,7 +483,7 @@ ata_completed(void *context, int dummy)
     if ((composite = request->composite)) {
        int index = 0;
 
-       spin_lock_wr(&composite->lock);
+       spin_lock(&composite->lock);
 
        /* update whats done */
        if (request->flags & ATA_R_READ)
@@ -498,7 +498,7 @@ ata_completed(void *context, int dummy)
            index = composite->wr_needed & ~composite->wr_done;
        }
 
-       spin_unlock_wr(&composite->lock);
+       spin_unlock(&composite->lock);
 
        /* if we have any ready candidates kick them off */
        if (index) {
@@ -515,9 +515,9 @@ ata_completed(void *context, int dummy)
     if (request->callback)
        (request->callback)(request);
     else {
-       spin_lock_wr(&request->done);
+       spin_lock(&request->done);
        request->flags |= ATA_R_COMPLETED;
-       spin_unlock_wr(&request->done);
+       spin_unlock(&request->done);
        wakeup_one(request);
     }
 
@@ -532,7 +532,7 @@ ata_timeout(struct ata_request *request)
     struct ata_channel *ch = device_get_softc(request->parent);
 
     /* acquire state_mtx, softclock_handler() doesn't do this for us */
-    spin_lock_wr(&ch->state_mtx);
+    spin_lock(&ch->state_mtx);
 
     /*request->flags |= ATA_R_DEBUG;*/
     ATA_DEBUG_RQ(request, "timeout");
@@ -545,12 +545,12 @@ ata_timeout(struct ata_request *request)
      */
     if (ch->state == ATA_ACTIVE) {
        request->flags |= ATA_R_TIMEOUT;
-       spin_unlock_wr(&ch->state_mtx);
+       spin_unlock(&ch->state_mtx);
        ATA_LOCKING(ch->dev, ATA_LF_UNLOCK);
        ata_finish(request);
     }
     else {
-       spin_unlock_wr(&ch->state_mtx);
+       spin_unlock(&ch->state_mtx);
     }
 }
 
@@ -563,8 +563,8 @@ ata_fail_requests(device_t dev)
     TAILQ_INIT(&fail_requests);
 
     /* grap all channel locks to avoid races */
-    spin_lock_wr(&ch->queue_mtx);
-    spin_lock_wr(&ch->state_mtx);
+    spin_lock(&ch->queue_mtx);
+    spin_lock(&ch->state_mtx);
 
     /* do we have any running request to care about ? */
     if ((request = ch->running) && (!dev || request->dev == dev)) {
@@ -585,8 +585,8 @@ ata_fail_requests(device_t dev)
        }
     }
 
-    spin_unlock_wr(&ch->state_mtx);
-    spin_unlock_wr(&ch->queue_mtx);
+    spin_unlock(&ch->state_mtx);
+    spin_unlock(&ch->queue_mtx);
    
     /* finish up all requests collected above */
     TAILQ_FOREACH_MUTABLE(request, &fail_requests, chain, tmp) {
index 9907500..d648319 100644 (file)
@@ -699,7 +699,7 @@ ata_raid_done(struct ata_request *request)
 
                /* is this a rebuild composite */
                if ((composite = request->composite)) {
-                   spin_lock_wr(&composite->lock);
+                   spin_lock(&composite->lock);
                
                    /* handle the read part of a rebuild composite */
                    if (request->flags & ATA_R_READ) {
@@ -735,7 +735,7 @@ ata_raid_done(struct ata_request *request)
                                finished = 1;
                        }
                    }
-                   spin_unlock_wr(&composite->lock);
+                   spin_unlock(&composite->lock);
                }
 
                /* if read failed retry on the mirror */
@@ -756,7 +756,7 @@ ata_raid_done(struct ata_request *request)
            else if (bbp->b_cmd == BUF_CMD_WRITE) {
                /* do we have a mirror or rebuild to deal with ? */
                if ((composite = request->composite)) {
-                   spin_lock_wr(&composite->lock);
+                   spin_lock(&composite->lock);
                    if (composite->wr_done & (1 << mirror)) {
                        if (request->result) {
                            if (composite->request[mirror]->result) {
@@ -779,7 +779,7 @@ ata_raid_done(struct ata_request *request)
                        if (!composite->residual)
                            finished = 1;
                    }
-                   spin_unlock_wr(&composite->lock);
+                   spin_unlock(&composite->lock);
                }
                /* no mirror we are done */
                else {
@@ -912,7 +912,7 @@ ata_raid_config_changed(struct ar_softc *rdp, int writeback)
 {
     int disk, count, status;
 
-    spin_lock_wr(&rdp->lock);
+    spin_lock(&rdp->lock);
     /* set default all working mode */
     status = rdp->status;
     rdp->status &= ~AR_S_DEGRADED;
@@ -988,7 +988,7 @@ ata_raid_config_changed(struct ar_softc *rdp, int writeback)
            writeback = 1;
        }
     }
-    spin_unlock_wr(&rdp->lock);
+    spin_unlock(&rdp->lock);
     if (writeback)
        ata_raid_write_metadata(rdp);
 
index 06482f8..5ec5200 100644 (file)
@@ -935,7 +935,7 @@ ata_usbchannel_locking(device_t dev, int flags)
     int res = -1;
 
 
-    spin_lock_wr(&sc->locked_mtx);
+    spin_lock(&sc->locked_mtx);
     switch (flags) {
     case ATA_LF_LOCK:
        if (sc->locked_ch == NULL)
@@ -950,7 +950,7 @@ ata_usbchannel_locking(device_t dev, int flags)
            if (sc->restart_ch) {
                ch = sc->restart_ch;
                sc->restart_ch = NULL;
-               spin_unlock_wr(&sc->locked_mtx);
+               spin_unlock(&sc->locked_mtx);
                ata_start(ch->dev);
                return res;
            }
@@ -962,7 +962,7 @@ ata_usbchannel_locking(device_t dev, int flags)
     }
     if (sc->locked_ch)
        res = sc->locked_ch->unit;
-    spin_unlock_wr(&sc->locked_mtx);
+    spin_unlock(&sc->locked_mtx);
     return res;
 }
 
index 54a0fc4..d019ba4 100644 (file)
@@ -251,9 +251,9 @@ atapi_cam_detach(device_t dev)
     get_mplock();
     xpt_freeze_simq(scp->sim, 1 /*count*/);
     rel_mplock();
-    spin_lock_wr(&scp->state_lock);
+    spin_lock(&scp->state_lock);
     scp->flags |= DETACHING;
-    spin_unlock_wr(&scp->state_lock);
+    spin_unlock(&scp->state_lock);
     free_softc(scp);
     return (0);
 }
@@ -283,7 +283,7 @@ reinit_bus(struct atapi_xpt_softc *scp, enum reinit_reason reason) {
        return;
     }
 
-    spin_lock_wr(&scp->state_lock);
+    spin_lock(&scp->state_lock);
     old_atadev[0] = scp->atadev[0];
     old_atadev[1] = scp->atadev[1];
     scp->atadev[0] = NULL;
@@ -303,7 +303,7 @@ reinit_bus(struct atapi_xpt_softc *scp, enum reinit_reason reason) {
     }
     dev_changed = (old_atadev[0] != scp->atadev[0])
            || (old_atadev[1] != scp->atadev[1]);
-    spin_unlock_wr(&scp->state_lock);
+    spin_unlock(&scp->state_lock);
     kfree(children, M_TEMP);
 
     switch (reason) {
@@ -375,11 +375,11 @@ atapi_action(struct cam_sim *sim, union ccb *ccb)
        cpi->protocol_version = SCSI_REV_2;
 
        if (softc->ata_ch && tid != CAM_TARGET_WILDCARD) {
-           spin_lock_wr(&softc->state_lock);
+           spin_lock(&softc->state_lock);
            if (softc->atadev[tid] == NULL) {
                ccb->ccb_h.status = CAM_DEV_NOT_THERE;
                xpt_done(ccb);
-               spin_unlock_wr(&softc->state_lock);
+               spin_unlock(&softc->state_lock);
                return;
            }
            switch (softc->atadev[ccb_h->target_id]->mode) {
@@ -412,7 +412,7 @@ atapi_action(struct cam_sim *sim, union ccb *ccb)
            default:
                break;
            }
-           spin_unlock_wr(&softc->state_lock);
+           spin_unlock(&softc->state_lock);
        }
        ccb->ccb_h.status = CAM_REQ_CMP;
        xpt_done(ccb);
@@ -473,18 +473,18 @@ atapi_action(struct cam_sim *sim, union ccb *ccb)
 
        CAM_DEBUG(ccb_h->path, CAM_DEBUG_SUBTRACE, ("XPT_SCSI_IO\n"));
 
-       spin_lock_wr(&softc->state_lock);
+       spin_lock(&softc->state_lock);
        if (softc->flags & DETACHING) {
            ccb->ccb_h.status = CAM_REQ_ABORTED;
            xpt_done(ccb);
-           spin_unlock_wr(&softc->state_lock);
+           spin_unlock(&softc->state_lock);
            return;
        }
 
        if (softc->atadev[tid] == NULL) {
            ccb->ccb_h.status = CAM_DEV_NOT_THERE;
            xpt_done(ccb);
-           spin_unlock_wr(&softc->state_lock);
+           spin_unlock(&softc->state_lock);
            return;
        }
 
@@ -492,7 +492,7 @@ atapi_action(struct cam_sim *sim, union ccb *ccb)
        if ((ccb_h->status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
            kprintf("XPT_SCSI_IO received but already in progress?\n");
            xpt_done(ccb);
-           spin_unlock_wr(&softc->state_lock);
+           spin_unlock(&softc->state_lock);
            return;
        }
        if (lid > 0) {
@@ -625,7 +625,7 @@ atapi_action(struct cam_sim *sim, union ccb *ccb)
        TAILQ_INSERT_TAIL(&softc->pending_hcbs, hcb, chain);
        hcb->flags |= QUEUED;
        ccb_h->status |= CAM_SIM_QUEUED;
-       spin_unlock_wr(&softc->state_lock);
+       spin_unlock(&softc->state_lock);
 
        ata_queue_request(request);
        return;
@@ -644,7 +644,7 @@ action_oom:
        ata_free_request(request);
     if (hcb != NULL)
        free_hcb(hcb);
-    spin_unlock_wr(&softc->state_lock);
+    spin_unlock(&softc->state_lock);
     get_mplock();
     xpt_print_path(ccb_h->path);
     kprintf("out of memory, freezing queue.\n");
@@ -656,7 +656,7 @@ action_oom:
     return;
 
 action_invalid:
-    spin_unlock_wr(&softc->state_lock);
+    spin_unlock(&softc->state_lock);
     ccb_h->status = CAM_REQ_INVALID;
     xpt_done(ccb);
     return;
@@ -748,9 +748,9 @@ atapi_cb(struct ata_request *request)
        }
     }
 
-    spin_lock_wr(&scp->state_lock);
+    spin_lock(&scp->state_lock);
     free_hcb_and_ccb_done(hcb, rc);
-    spin_unlock_wr(&scp->state_lock);
+    spin_unlock(&scp->state_lock);
 
     ata_free_request(request);
 }
@@ -874,11 +874,11 @@ free_softc(struct atapi_xpt_softc *scp)
     struct atapi_hcb *hcb;
 
     if (scp != NULL) {
-       spin_lock_wr(&scp->state_lock);
+       spin_lock(&scp->state_lock);
        TAILQ_FOREACH(hcb, &scp->pending_hcbs, chain) {
            free_hcb_and_ccb_done(hcb, CAM_UNREC_HBA_ERROR);
        }
-       spin_unlock_wr(&scp->state_lock);
+       spin_unlock(&scp->state_lock);
        get_mplock();
        if (scp->path != NULL) {
            setup_async_cb(scp, 0);
index d136e73..236552d 100644 (file)
@@ -192,7 +192,7 @@ static int sc_allocate_keyboard(sc_softc_t *sc, int unit);
 static void
 syscons_lock(void)
 {
-       spin_lock_wr(&syscons_spin);
+       spin_lock(&syscons_spin);
 }
 
 /*
@@ -201,13 +201,13 @@ syscons_lock(void)
 static int
 syscons_lock_nonblock(void)
 {
-       return(spin_trylock_wr(&syscons_spin));
+       return(spin_trylock(&syscons_spin));
 }
 
 static void
 syscons_unlock(void)
 {
-       spin_unlock_wr(&syscons_spin);
+       spin_unlock(&syscons_spin);
 }
 
 /*
index 9969780..0bf4e45 100644 (file)
@@ -71,9 +71,9 @@
 #define E1000_MUTEX                     struct spinlock
 #define E1000_MUTEX_INIT(spin)          spin_init(spin)
 #define E1000_MUTEX_DESTROY(spin)       spin_uninit(spin)
-#define E1000_MUTEX_LOCK(spin)          spin_lock_wr(spin)
-#define E1000_MUTEX_TRYLOCK(spin)       spin_trylock_wr(spin)
-#define E1000_MUTEX_UNLOCK(spin)        spin_unlock_wr(spin)
+#define E1000_MUTEX_LOCK(spin)          spin_lock(spin)
+#define E1000_MUTEX_TRYLOCK(spin)       spin_trylock(spin)
+#define E1000_MUTEX_UNLOCK(spin)        spin_unlock(spin)
 
 typedef uint64_t       u64;
 typedef uint32_t       u32;
index 72a5ebf..f69cd01 100644 (file)
@@ -477,13 +477,13 @@ typedef struct _DESCRIPTOR_PAIR
 #define        EM_CORE_LOCK_DESTROY(_sc)       spin_uninit(&(_sc)->core_spin)
 #define        EM_TX_LOCK_DESTROY(_sc)         spin_uninit(&(_sc)->tx_spin)
 #define        EM_RX_LOCK_DESTROY(_sc)         spin_uninit(&(_sc)->rx_spin)
-#define        EM_CORE_LOCK(_sc)               spin_lock_wr(&(_sc)->core_spin)
-#define        EM_TX_LOCK(_sc)                 spin_lock_wr(&(_sc)->tx_spin)
-#define        EM_TX_TRYLOCK(_sc)              spin_trylock_wr(&(_sc)->tx_spin)
-#define        EM_RX_LOCK(_sc)                 spin_lock_wr(&(_sc)->rx_spin)
-#define        EM_CORE_UNLOCK(_sc)             spin_unlock_wr(&(_sc)->core_spin)
-#define        EM_TX_UNLOCK(_sc)               spin_unlock_wr(&(_sc)->tx_spin)
-#define        EM_RX_UNLOCK(_sc)               spin_unlock_wr(&(_sc)->rx_spin)
+#define        EM_CORE_LOCK(_sc)               spin_lock(&(_sc)->core_spin)
+#define        EM_TX_LOCK(_sc)                 spin_lock(&(_sc)->tx_spin)
+#define        EM_TX_TRYLOCK(_sc)              spin_trylock(&(_sc)->tx_spin)
+#define        EM_RX_LOCK(_sc)                 spin_lock(&(_sc)->rx_spin)
+#define        EM_CORE_UNLOCK(_sc)             spin_unlock(&(_sc)->core_spin)
+#define        EM_TX_UNLOCK(_sc)               spin_unlock(&(_sc)->tx_spin)
+#define        EM_RX_UNLOCK(_sc)               spin_unlock(&(_sc)->rx_spin)
 #define        EM_CORE_LOCK_ASSERT(_sc)
 #define        EM_TX_LOCK_ASSERT(_sc)
 
index ccc7861..d6108a3 100644 (file)
@@ -481,19 +481,19 @@ struct igb_rx_buf {
 
 #define        IGB_CORE_LOCK_INIT(_sc, _name)  spin_init(&(_sc)->core_spin)
 #define        IGB_CORE_LOCK_DESTROY(_sc)      spin_uninit(&(_sc)->core_spin)
-#define        IGB_CORE_LOCK(_sc)              spin_lock_wr(&(_sc)->core_spin)
-#define        IGB_CORE_UNLOCK(_sc)            spin_unlock_wr(&(_sc)->core_spin)
+#define        IGB_CORE_LOCK(_sc)              spin_lock(&(_sc)->core_spin)
+#define        IGB_CORE_UNLOCK(_sc)            spin_unlock(&(_sc)->core_spin)
 #define        IGB_CORE_LOCK_ASSERT(_sc)       
 
 #define        IGB_TX_LOCK_DESTROY(_sc)        spin_uninit(&(_sc)->tx_spin)
-#define        IGB_TX_LOCK(_sc)                spin_lock_wr(&(_sc)->tx_spin)
-#define        IGB_TX_UNLOCK(_sc)              spin_unlock_wr(&(_sc)->tx_spin)
-#define        IGB_TX_TRYLOCK(_sc)             spin_trylock_wr(&(_sc)->tx_spin)
+#define        IGB_TX_LOCK(_sc)                spin_lock(&(_sc)->tx_spin)
+#define        IGB_TX_UNLOCK(_sc)              spin_unlock(&(_sc)->tx_spin)
+#define        IGB_TX_TRYLOCK(_sc)             spin_trylock(&(_sc)->tx_spin)
 #define        IGB_TX_LOCK_ASSERT(_sc)         
 
 #define        IGB_RX_LOCK_DESTROY(_sc)        spin_uninit(&(_sc)->rx_spin)
-#define        IGB_RX_LOCK(_sc)                spin_lock_wr(&(_sc)->rx_spin)
-#define        IGB_RX_UNLOCK(_sc)              spin_unlock_wr(&(_sc)->rx_spin)
+#define        IGB_RX_LOCK(_sc)                spin_lock(&(_sc)->rx_spin)
+#define        IGB_RX_UNLOCK(_sc)              spin_unlock(&(_sc)->rx_spin)
 #define        IGB_TX_LOCK_ASSERT(_sc)         
 
 #endif /* _IGB_H_DEFINED_ */
index edc5580..144953a 100644 (file)
@@ -229,10 +229,10 @@ tw_osli_req_q_init(struct twa_softc *sc, TW_UINT8 q_type)
 static __inline        TW_VOID
 tw_osli_req_q_insert_head(struct tw_osli_req_context *req, TW_UINT8 q_type)
 {
-       spin_lock_wr(req->ctlr->q_lock);
+       spin_lock(req->ctlr->q_lock);
        TW_CL_Q_INSERT_HEAD(&(req->ctlr->req_q_head[q_type]), &(req->link));
        TW_OSLI_Q_INSERT(req->ctlr, q_type);
-       spin_unlock_wr(req->ctlr->q_lock);
+       spin_unlock(req->ctlr->q_lock);
 }
 
 
@@ -241,10 +241,10 @@ tw_osli_req_q_insert_head(struct tw_osli_req_context *req, TW_UINT8 q_type)
 static __inline        TW_VOID
 tw_osli_req_q_insert_tail(struct tw_osli_req_context *req, TW_UINT8 q_type)
 {
-       spin_lock_wr(req->ctlr->q_lock);
+       spin_lock(req->ctlr->q_lock);
        TW_CL_Q_INSERT_TAIL(&(req->ctlr->req_q_head[q_type]), &(req->link));
        TW_OSLI_Q_INSERT(req->ctlr, q_type);
-       spin_unlock_wr(req->ctlr->q_lock);
+       spin_unlock(req->ctlr->q_lock);
 }
 
 
@@ -256,7 +256,7 @@ tw_osli_req_q_remove_head(struct twa_softc *sc, TW_UINT8 q_type)
        struct tw_osli_req_context      *req = NULL;
        struct tw_cl_link               *link;
 
-       spin_lock_wr(sc->q_lock);
+       spin_lock(sc->q_lock);
        if ((link = TW_CL_Q_FIRST_ITEM(&(sc->req_q_head[q_type]))) !=
                TW_CL_NULL) {
                req = TW_CL_STRUCT_HEAD(link,
@@ -264,7 +264,7 @@ tw_osli_req_q_remove_head(struct twa_softc *sc, TW_UINT8 q_type)
                TW_CL_Q_REMOVE_ITEM(&(sc->req_q_head[q_type]), &(req->link));
                TW_OSLI_Q_REMOVE(sc, q_type);
        }
-       spin_unlock_wr(sc->q_lock);
+       spin_unlock(sc->q_lock);
        return(req);
 }
 
@@ -274,10 +274,10 @@ tw_osli_req_q_remove_head(struct twa_softc *sc, TW_UINT8 q_type)
 static __inline TW_VOID
 tw_osli_req_q_remove_item(struct tw_osli_req_context *req, TW_UINT8 q_type)
 {
-       spin_lock_wr(req->ctlr->q_lock);
+       spin_lock(req->ctlr->q_lock);
        TW_CL_Q_REMOVE_ITEM(&(req->ctlr->req_q_head[q_type]), &(req->link));
        TW_OSLI_Q_REMOVE(req->ctlr, q_type);
-       spin_unlock_wr(req->ctlr->q_lock);
+       spin_unlock(req->ctlr->q_lock);
 }
 
 
index 6e19c70..045531f 100644 (file)
@@ -848,11 +848,11 @@ twa_busdma_lock(TW_VOID *lock_arg, bus_dma_lock_op_t op)
        lock = (struct spinlock *)lock_arg;
        switch (op) {
        case BUS_DMA_LOCK:
-               spin_lock_wr(lock);
+               spin_lock(lock);
                break;
 
        case BUS_DMA_UNLOCK:
-               spin_unlock_wr(lock);
+               spin_unlock(lock);
                break;
 
        default:
@@ -1376,12 +1376,12 @@ tw_osli_map_request(struct tw_osli_req_context *req)
                 */
                if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) {
                        /* Lock against multiple simultaneous ioctl calls. */
-                       spin_lock_wr(sc->io_lock);
+                       spin_lock(sc->io_lock);
                        error = bus_dmamap_load(sc->ioctl_tag, sc->ioctl_map,
                                req->data, req->length,
                                twa_map_load_data_callback, req,
                                BUS_DMA_WAITOK);
-                       spin_unlock_wr(sc->io_lock);
+                       spin_unlock(sc->io_lock);
                } else {
                        /*
                         * There's only one CAM I/O thread running at a time.
@@ -1402,11 +1402,11 @@ tw_osli_map_request(struct tw_osli_req_context *req)
                                 * in ...tag_create should protect the access
                                 * of ...FLAGS_MAPPED from the callback.
                                 */
-                               spin_lock_wr(sc->io_lock);
+                               spin_lock(sc->io_lock);
                                if (!(req->flags & TW_OSLI_REQ_FLAGS_MAPPED))
                                        req->flags |= TW_OSLI_REQ_FLAGS_IN_PROGRESS;
                                tw_osli_disallow_new_requests(sc, &(req->req_handle));
-                               spin_unlock_wr(sc->io_lock);
+                               spin_unlock(sc->io_lock);
                                error = 0;
                        } else {
                                /* Free alignment buffer if it was used. */
@@ -1463,7 +1463,7 @@ tw_osli_unmap_request(struct tw_osli_req_context *req)
        if (req->data != NULL) {
                if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) {
                        /* Lock against multiple simultaneous ioctl calls. */
-                       spin_lock_wr(sc->io_lock);
+                       spin_lock(sc->io_lock);
 
                        if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN) {
                                bus_dmamap_sync(sc->ioctl_tag,
@@ -1484,7 +1484,7 @@ tw_osli_unmap_request(struct tw_osli_req_context *req)
 
                        bus_dmamap_unload(sc->ioctl_tag, sc->ioctl_map);
 
-                       spin_unlock_wr(sc->io_lock);
+                       spin_unlock(sc->io_lock);
                } else {
                        if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN) {
                                bus_dmamap_sync(sc->dma_tag,
index fc4ac71..5330d89 100644 (file)
@@ -90,7 +90,7 @@
  * Return value:       None
  */
 #define tw_osl_get_lock(ctlr_handle, lock)     \
-       spin_lock_wr(lock)
+       spin_lock(lock)
 
 
 
  * Return value:       None
  */
 #define tw_osl_free_lock(ctlr_handle, lock)    \
-       spin_unlock_wr(lock)
+       spin_unlock(lock)
 
 
 
index 0f3db5c..639eb48 100644 (file)
@@ -87,8 +87,8 @@ struct futex {
 struct futex_list futex_list;
 
 #if 0
-#define FUTEX_LOCK(f)          spin_lock_wr(&(f)->f_lck)
-#define FUTEX_UNLOCK(f)                spin_unlock_wr(&(f)->f_lck)
+#define FUTEX_LOCK(f)          spin_lock(&(f)->f_lck)
+#define FUTEX_UNLOCK(f)                spin_unlock(&(f)->f_lck)
 #define FUTEX_INIT(f)          spin_init(&(f)->f_lck)
 #define        FUTEX_SLEEP(f, id, flag, wm, timo)      ssleep((id), &(f)->f_lck, (flag), (wm), (timo))
 #endif
index 0f70a91..6644506 100644 (file)
@@ -132,12 +132,12 @@ ccms_dataspace_destroy(ccms_dataspace_t ds)
 {
     ccms_cst_t cst;
 
-    spin_lock_wr(&ds->spin);
+    spin_lock(&ds->spin);
     RB_SCAN(ccms_rb_tree, &ds->tree, NULL,
            ccms_dataspace_destroy_match, ds);
     cst = ds->delayed_free;
     ds->delayed_free = NULL;
-    spin_unlock_wr(&ds->spin);
+    spin_unlock(&ds->spin);
     ccms_delayed_free(cst);
 }
 
@@ -186,7 +186,7 @@ ccms_lock_get(ccms_dataspace_t ds, ccms_lock_t lock)
     info.cst1 = objcache_get(ccms_oc, M_WAITOK);
     info.cst2 = objcache_get(ccms_oc, M_WAITOK);
 
-    spin_lock_wr(&ds->spin);
+    spin_lock(&ds->spin);
     RB_SCAN(ccms_rb_tree, &ds->tree, ccms_lock_scan_cmp,
            ccms_lock_get_match, &info);
 
@@ -207,7 +207,7 @@ ccms_lock_get(ccms_dataspace_t ds, ccms_lock_t lock)
     }
     cst = ds->delayed_free;
     ds->delayed_free = NULL;
-    spin_unlock_wr(&ds->spin);
+    spin_unlock(&ds->spin);
 
     /*
      * Cleanup
@@ -479,12 +479,12 @@ ccms_lock_put(ccms_dataspace_t ds, ccms_lock_t lock)
     info.cst1 = NULL;
     info.cst2 = NULL;
 
-    spin_lock_wr(&ds->spin);
+    spin_lock(&ds->spin);
     RB_SCAN(ccms_rb_tree, &ds->tree, ccms_lock_scan_cmp,
            ccms_lock_put_match, &info);
     cst = ds->delayed_free;
     ds->delayed_free = NULL;
-    spin_unlock_wr(&ds->spin);
+    spin_unlock(&ds->spin);
 
     ccms_delayed_free(cst);
     if (info.cst1)
index 06ec868..ae700a3 100644 (file)
@@ -23,10 +23,10 @@ _cv_timedwait(struct cv *c, struct lock *l, int timo, int wakesig)
        int flags = wakesig ? PCATCH : 0;
        int error;
 
-       spin_lock_wr(&c->cv_lock);
+       spin_lock(&c->cv_lock);
        tsleep_interlock(c, flags);
        c->cv_waiters++;
-       spin_unlock_wr(&c->cv_lock);
+       spin_unlock(&c->cv_lock);
        if (l != NULL)
                lockmgr(l, LK_RELEASE);
        error = tsleep(c, flags, c->cv_desc, timo);
@@ -39,7 +39,7 @@ _cv_timedwait(struct cv *c, struct lock *l, int timo, int wakesig)
 void
 _cv_signal(struct cv *c, int broadcast)
 {
-       spin_lock_wr(&c->cv_lock);
+       spin_lock(&c->cv_lock);
        if (c->cv_waiters == 0)
                goto out;
 
@@ -52,5 +52,5 @@ _cv_signal(struct cv *c, int broadcast)
        }
 
 out:
-       spin_unlock_wr(&c->cv_lock);
+       spin_unlock(&c->cv_lock);
 }
index 31e1a1f..545f4c6 100644 (file)
@@ -169,12 +169,12 @@ sys_getdtablesize(struct getdtablesize_args *uap)
        struct plimit *limit = p->p_limit;
        int dtsize;
 
-       spin_lock_wr(&limit->p_spin);
+       spin_lock(&limit->p_spin);
        if (limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur > INT_MAX)
                dtsize = INT_MAX;
        else
                dtsize = (int)limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur;
-       spin_unlock_wr(&limit->p_spin);
+       spin_unlock(&limit->p_spin);
 
        if (dtsize > maxfilesperproc)
                dtsize = maxfilesperproc;
@@ -495,14 +495,14 @@ retry:
        if (new < 0 || new > dtsize)
                return (EINVAL);
 
-       spin_lock_wr(&fdp->fd_spin);
+       spin_lock(&fdp->fd_spin);
        if ((unsigned)old >= fdp->fd_nfiles || fdp->fd_files[old].fp == NULL) {
-               spin_unlock_wr(&fdp->fd_spin);
+               spin_unlock(&fdp->fd_spin);
                return (EBADF);
        }
        if (type == DUP_FIXED && old == new) {
                *res = new;
-               spin_unlock_wr(&fdp->fd_spin);
+               spin_unlock(&fdp->fd_spin);
                return (0);
        }
        fp = fdp->fd_files[old].fp;
@@ -521,11 +521,11 @@ retry:
         * setup for the next code block.
         */
        if (type == DUP_VARIABLE || new >= fdp->fd_nfiles) {
-               spin_unlock_wr(&fdp->fd_spin);
+               spin_unlock(&fdp->fd_spin);
                error = fdalloc(p, new, &newfd);
-               spin_lock_wr(&fdp->fd_spin);
+               spin_lock(&fdp->fd_spin);
                if (error) {
-                       spin_unlock_wr(&fdp->fd_spin);
+                       spin_unlock(&fdp->fd_spin);
                        fdrop(fp);
                        return (error);
                }
@@ -534,7 +534,7 @@ retry:
                 */
                if (old >= fdp->fd_nfiles || fdp->fd_files[old].fp != fp) {
                        fsetfd_locked(fdp, NULL, newfd);
-                       spin_unlock_wr(&fdp->fd_spin);
+                       spin_unlock(&fdp->fd_spin);
                        fdrop(fp);
                        goto retry;
                }
@@ -543,7 +543,7 @@ retry:
                 */
                if (type != DUP_VARIABLE && new != newfd) {
                        fsetfd_locked(fdp, NULL, newfd);
-                       spin_unlock_wr(&fdp->fd_spin);
+                       spin_unlock(&fdp->fd_spin);
                        fdrop(fp);
                        goto retry;
                }
@@ -553,7 +553,7 @@ retry:
                 */
                if (old == newfd) {
                        fsetfd_locked(fdp, NULL, newfd);
-                       spin_unlock_wr(&fdp->fd_spin);
+                       spin_unlock(&fdp->fd_spin);
                        fdrop(fp);
                        goto retry;
                }
@@ -561,7 +561,7 @@ retry:
                delfp = NULL;
        } else {
                if (fdp->fd_files[new].reserved) {
-                       spin_unlock_wr(&fdp->fd_spin);
+                       spin_unlock(&fdp->fd_spin);
                        fdrop(fp);
                        kprintf("Warning: dup(): target descriptor %d is reserved, waiting for it to be resolved\n", new);
                        tsleep(fdp, 0, "fdres", hz);
@@ -612,7 +612,7 @@ retry:
         */
        fsetfd_locked(fdp, fp, new);
        fdp->fd_files[new].fileflags = oldflags & ~UF_EXCLOSE;
-       spin_unlock_wr(&fdp->fd_spin);
+       spin_unlock(&fdp->fd_spin);
        fdrop(fp);
        *res = new;
 
@@ -626,15 +626,15 @@ retry:
                        knote_fdclose(delfp, fdp, new);
                closef(delfp, p);
                if (holdleaders) {
-                       spin_lock_wr(&fdp->fd_spin);
+                       spin_lock(&fdp->fd_spin);
                        fdp->fd_holdleaderscount--;
                        if (fdp->fd_holdleaderscount == 0 &&
                            fdp->fd_holdleaderswakeup != 0) {
                                fdp->fd_holdleaderswakeup = 0;
-                               spin_unlock_wr(&fdp->fd_spin);
+                               spin_unlock(&fdp->fd_spin);
                                wakeup(&fdp->fd_holdleaderscount);
                        } else {
-                               spin_unlock_wr(&fdp->fd_spin);
+                               spin_unlock(&fdp->fd_spin);
                        }
                }
        }
@@ -790,18 +790,18 @@ kern_closefrom(int fd)
         * reserved descriptors that have not yet been assigned.  
         * fd_lastfile can change as a side effect of kern_close().
         */
-       spin_lock_wr(&fdp->fd_spin);
+       spin_lock(&fdp->fd_spin);
        while (fd <= fdp->fd_lastfile) {
                if (fdp->fd_files[fd].fp != NULL) {
-                       spin_unlock_wr(&fdp->fd_spin);
+                       spin_unlock(&fdp->fd_spin);
                        /* ok if this races another close */
                        if (kern_close(fd) == EINTR)
                                return (EINTR);
-                       spin_lock_wr(&fdp->fd_spin);
+                       spin_lock(&fdp->fd_spin);
                }
                ++fd;
        }
-       spin_unlock_wr(&fdp->fd_spin);
+       spin_unlock(&fdp->fd_spin);
        return (0);
 }
 
@@ -832,9 +832,9 @@ kern_close(int fd)
        KKASSERT(p);
        fdp = p->p_fd;
 
-       spin_lock_wr(&fdp->fd_spin);
+       spin_lock(&fdp->fd_spin);
        if ((fp = funsetfd_locked(fdp, fd)) == NULL) {
-               spin_unlock_wr(&fdp->fd_spin);
+               spin_unlock(&fdp->fd_spin);
                return (EBADF);
        }
        holdleaders = 0;
@@ -851,20 +851,20 @@ kern_close(int fd)
         * we now hold the fp reference that used to be owned by the descriptor
         * array.
         */
-       spin_unlock_wr(&fdp->fd_spin);
+       spin_unlock(&fdp->fd_spin);
        if (SLIST_FIRST(&fp->f_klist))
                knote_fdclose(fp, fdp, fd);
        error = closef(fp, p);
        if (holdleaders) {
-               spin_lock_wr(&fdp->fd_spin);
+               spin_lock(&fdp->fd_spin);
                fdp->fd_holdleaderscount--;
                if (fdp->fd_holdleaderscount == 0 &&
                    fdp->fd_holdleaderswakeup != 0) {
                        fdp->fd_holdleaderswakeup = 0;
-                       spin_unlock_wr(&fdp->fd_spin);
+                       spin_unlock(&fdp->fd_spin);
                        wakeup(&fdp->fd_holdleaderscount);
                } else {
-                       spin_unlock_wr(&fdp->fd_spin);
+                       spin_unlock(&fdp->fd_spin);
                }
        }
        return (error);
@@ -1012,18 +1012,18 @@ fdgrow_locked(struct filedesc *fdp, int want)
                nf = 2 * nf + 1;
        } while (nf <= want);
 
-       spin_unlock_wr(&fdp->fd_spin);
+       spin_unlock(&fdp->fd_spin);
        newfiles = kmalloc(nf * sizeof(struct fdnode), M_FILEDESC, M_WAITOK);
-       spin_lock_wr(&fdp->fd_spin);
+       spin_lock(&fdp->fd_spin);
 
        /*
         * We could have raced another extend while we were not holding
         * the spinlock.
         */
        if (fdp->fd_nfiles >= nf) {
-               spin_unlock_wr(&fdp->fd_spin);
+               spin_unlock(&fdp->fd_spin);
                kfree(newfiles, M_FILEDESC);
-               spin_lock_wr(&fdp->fd_spin);
+               spin_lock(&fdp->fd_spin);
                return;
        }
        /*
@@ -1039,9 +1039,9 @@ fdgrow_locked(struct filedesc *fdp, int want)
        fdp->fd_nfiles = nf;
 
        if (oldfiles != fdp->fd_builtin_files) {
-               spin_unlock_wr(&fdp->fd_spin);
+               spin_unlock(&fdp->fd_spin);
                kfree(oldfiles, M_FILEDESC);
-               spin_lock_wr(&fdp->fd_spin);
+               spin_lock(&fdp->fd_spin);
        }
        fdexpand++;
 }
@@ -1107,12 +1107,12 @@ fdalloc(struct proc *p, int want, int *result)
        /*
         * Check dtable size limit
         */
-       spin_lock_wr(&p->p_limit->p_spin);
+       spin_lock(&p->p_limit->p_spin);
        if (p->p_rlimit[RLIMIT_NOFILE].rlim_cur > INT_MAX)
                lim = INT_MAX;
        else
                lim = (int)p->p_rlimit[RLIMIT_NOFILE].rlim_cur;
-       spin_unlock_wr(&p->p_limit->p_spin);
+       spin_unlock(&p->p_limit->p_spin);
 
        if (lim > maxfilesperproc)
                lim = maxfilesperproc;
@@ -1142,7 +1142,7 @@ fdalloc(struct proc *p, int want, int *result)
        /*
         * Grow the dtable if necessary
         */
-       spin_lock_wr(&fdp->fd_spin);
+       spin_lock(&fdp->fd_spin);
        if (want >= fdp->fd_nfiles)
                fdgrow_locked(fdp, want);
 
@@ -1193,7 +1193,7 @@ retry:
         * No space in current array.  Expand?
         */
        if (fdp->fd_nfiles >= lim) {
-               spin_unlock_wr(&fdp->fd_spin);
+               spin_unlock(&fdp->fd_spin);
                return (EMFILE);
        }
        fdgrow_locked(fdp, want);
@@ -1211,7 +1211,7 @@ found:
        fdp->fd_files[fd].fileflags = 0;
        fdp->fd_files[fd].reserved = 1;
        fdreserve_locked(fdp, fd, 1);
-       spin_unlock_wr(&fdp->fd_spin);
+       spin_unlock(&fdp->fd_spin);
        return (0);
 }
 
@@ -1228,32 +1228,32 @@ fdavail(struct proc *p, int n)
        struct fdnode *fdnode;
        int i, lim, last;
 
-       spin_lock_wr(&p->p_limit->p_spin);
+       spin_lock(&p->p_limit->p_spin);
        if (p->p_rlimit[RLIMIT_NOFILE].rlim_cur > INT_MAX)
                lim = INT_MAX;
        else
                lim = (int)p->p_rlimit[RLIMIT_NOFILE].rlim_cur;
-       spin_unlock_wr(&p->p_limit->p_spin);
+       spin_unlock(&p->p_limit->p_spin);
 
        if (lim > maxfilesperproc)
                lim = maxfilesperproc;
        if (lim < minfilesperproc)
                lim = minfilesperproc;
 
-       spin_lock_wr(&fdp->fd_spin);
+       spin_lock(&fdp->fd_spin);
        if ((i = lim - fdp->fd_nfiles) > 0 && (n -= i) <= 0) {
-               spin_unlock_wr(&fdp->fd_spin);
+               spin_unlock(&fdp->fd_spin);
                return (1);
        }
        last = min(fdp->fd_nfiles, lim);
        fdnode = &fdp->fd_files[fdp->fd_freefile];
        for (i = last - fdp->fd_freefile; --i >= 0; ++fdnode) {
                if (fdnode->fp == NULL && --n <= 0) {
-                       spin_unlock_wr(&fdp->fd_spin);
+                       spin_unlock(&fdp->fd_spin);
                        return (1);
                }
        }
-       spin_unlock_wr(&fdp->fd_spin);
+       spin_unlock(&fdp->fd_spin);
        return (0);
 }
 
@@ -1390,32 +1390,32 @@ fdrevoke_proc_callback(struct proc *p, void *vinfo)
        /*
         * Softref the fdp to prevent it from being destroyed
         */
-       spin_lock_wr(&p->p_spin);
+       spin_lock(&p->p_spin);
        if ((fdp = p->p_fd) == NULL) {
-               spin_unlock_wr(&p->p_spin);
+               spin_unlock(&p->p_spin);
                return(0);
        }
        atomic_add_int(&fdp->fd_softrefs, 1);
-       spin_unlock_wr(&p->p_spin);
+       spin_unlock(&p->p_spin);
 
        /*
         * Locate and close any matching file descriptors.
         */
-       spin_lock_wr(&fdp->fd_spin);
+       spin_lock(&fdp->fd_spin);
        for (n = 0; n < fdp->fd_nfiles; ++n) {
                if ((fp = fdp->fd_files[n].fp) == NULL)
                        continue;
                if (fp->f_flag & FREVOKED) {
                        fhold(info->nfp);
                        fdp->fd_files[n].fp = info->nfp;
-                       spin_unlock_wr(&fdp->fd_spin);
+                       spin_unlock(&fdp->fd_spin);
                        knote_fdclose(fp, fdp, n);      /* XXX */
                        closef(fp, p);
-                       spin_lock_wr(&fdp->fd_spin);
+                       spin_lock(&fdp->fd_spin);
                        --info->count;
                }
        }
-       spin_unlock_wr(&fdp->fd_spin);
+       spin_unlock(&fdp->fd_spin);
        atomic_subtract_int(&fdp->fd_softrefs, 1);
        return(0);
 }
@@ -1472,10 +1472,10 @@ falloc(struct lwp *lp, struct file **resultfp, int *resultfd)
        fp->f_ops = &badfileops;
        fp->f_seqcount = 1;
        fsetcred(fp, cred);
-       spin_lock_wr(&filehead_spin);
+       spin_lock(&filehead_spin);
        nfiles++;
        LIST_INSERT_HEAD(&filehead, fp, f_list);
-       spin_unlock_wr(&filehead_spin);
+       spin_unlock(&filehead_spin);
        if (resultfd) {
                if ((error = fdalloc(lp->lwp_proc, 0, resultfd)) != 0) {
                        fdrop(fp);
@@ -1501,12 +1501,12 @@ checkfdclosed(struct filedesc *fdp, int fd, struct file *fp)
 {
        int error;
 
-       spin_lock_wr(&fdp->fd_spin);
+       spin_lock(&fdp->fd_spin);
        if ((unsigned)fd >= fdp->fd_nfiles || fp != fdp->fd_files[fd].fp)
                error = EBADF;
        else
                error = 0;
-       spin_unlock_wr(&fdp->fd_spin);
+       spin_unlock(&fdp->fd_spin);
        return (error);
 }
 
@@ -1542,9 +1542,9 @@ fsetfd_locked(struct filedesc *fdp, struct file *fp, int fd)
 void
 fsetfd(struct filedesc *fdp, struct file *fp, int fd)
 {
-       spin_lock_wr(&fdp->fd_spin);
+       spin_lock(&fdp->fd_spin);
        fsetfd_locked(fdp, fp, fd);
-       spin_unlock_wr(&fdp->fd_spin);
+       spin_unlock(&fdp->fd_spin);
 }
 
 /*
@@ -1576,7 +1576,7 @@ fgetfdflags(struct filedesc *fdp, int fd, int *flagsp)
 {
        int error;
 
-       spin_lock_wr(&fdp->fd_spin);
+       spin_lock(&fdp->fd_spin);
        if (((u_int)fd) >= fdp->fd_nfiles) {
                error = EBADF;
        } else if (fdp->fd_files[fd].fp == NULL) {
@@ -1585,7 +1585,7 @@ fgetfdflags(struct filedesc *fdp, int fd, int *flagsp)
                *flagsp = fdp->fd_files[fd].fileflags;
                error = 0;
        }
-       spin_unlock_wr(&fdp->fd_spin);
+       spin_unlock(&fdp->fd_spin);
        return (error);
 }
 
@@ -1597,7 +1597,7 @@ fsetfdflags(struct filedesc *fdp, int fd, int add_flags)
 {
        int error;
 
-       spin_lock_wr(&fdp->fd_spin);
+       spin_lock(&fdp->fd_spin);
        if (((u_int)fd) >= fdp->fd_nfiles) {
                error = EBADF;
        } else if (fdp->fd_files[fd].fp == NULL) {
@@ -1606,7 +1606,7 @@ fsetfdflags(struct filedesc *fdp, int fd, int add_flags)
                fdp->fd_files[fd].fileflags |= add_flags;
                error = 0;
        }
-       spin_unlock_wr(&fdp->fd_spin);
+       spin_unlock(&fdp->fd_spin);
        return (error);
 }
 
@@ -1618,7 +1618,7 @@ fclrfdflags(struct filedesc *fdp, int fd, int rem_flags)
 {
        int error;
 
-       spin_lock_wr(&fdp->fd_spin);
+       spin_lock(&fdp->fd_spin);
        if (((u_int)fd) >= fdp->fd_nfiles) {
                error = EBADF;
        } else if (fdp->fd_files[fd].fp == NULL) {
@@ -1627,7 +1627,7 @@ fclrfdflags(struct filedesc *fdp, int fd, int rem_flags)
                fdp->fd_files[fd].fileflags &= ~rem_flags;
                error = 0;
        }
-       spin_unlock_wr(&fdp->fd_spin);
+       spin_unlock(&fdp->fd_spin);
        return (error);
 }
 
@@ -1666,10 +1666,10 @@ void
 ffree(struct file *fp)
 {
        KASSERT((fp->f_count == 0), ("ffree: fp_fcount not 0!"));
-       spin_lock_wr(&filehead_spin);
+       spin_lock(&filehead_spin);
        LIST_REMOVE(fp, f_list);
        nfiles--;
-       spin_unlock_wr(&filehead_spin);
+       spin_unlock(&filehead_spin);
        fsetcred(fp, NULL);
        if (fp->f_nchandle.ncp)
            cache_drop(&fp->f_nchandle);
@@ -1704,7 +1704,7 @@ fdinit(struct proc *p)
        struct filedesc *fdp = p->p_fd;
 
        newfdp = kmalloc(sizeof(struct filedesc), M_FILEDESC, M_WAITOK|M_ZERO);
-       spin_lock_wr(&fdp->fd_spin);
+       spin_lock(&fdp->fd_spin);
        if (fdp->fd_cdir) {
                newfdp->fd_cdir = fdp->fd_cdir;
                vref(newfdp->fd_cdir);
@@ -1725,7 +1725,7 @@ fdinit(struct proc *p)
                vref(newfdp->fd_jdir);
                cache_copy(&fdp->fd_njdir, &newfdp->fd_njdir);
        }
-       spin_unlock_wr(&fdp->fd_spin);
+       spin_unlock(&fdp->fd_spin);
 
        /* Create the file descriptor table. */
        newfdp->fd_refcnt = 1;
@@ -1749,9 +1749,9 @@ fdshare(struct proc *p)
        struct filedesc *fdp;
 
        fdp = p->p_fd;
-       spin_lock_wr(&fdp->fd_spin);
+       spin_lock(&fdp->fd_spin);
        fdp->fd_refcnt++;
-       spin_unlock_wr(&fdp->fd_spin);
+       spin_unlock(&fdp->fd_spin);
        return (fdp);
 }
 
@@ -1782,7 +1782,7 @@ fdcopy(struct proc *p)
         */
        newfdp = kmalloc(sizeof(struct filedesc), M_FILEDESC, M_WAITOK | M_ZERO);
 again:
-       spin_lock_wr(&fdp->fd_spin);
+       spin_lock(&fdp->fd_spin);
        if (fdp->fd_lastfile < NDFILE) {
                newfdp->fd_files = newfdp->fd_builtin_files;
                i = NDFILE;
@@ -1797,16 +1797,16 @@ again:
                        i = ni;
                        ni = (i - 1) / 2;
                }
-               spin_unlock_wr(&fdp->fd_spin);
+               spin_unlock(&fdp->fd_spin);
                newfdp->fd_files = kmalloc(i * sizeof(struct fdnode),
                                          M_FILEDESC, M_WAITOK | M_ZERO);
 
                /*
                 * Check for race, retry
                 */
-               spin_lock_wr(&fdp->fd_spin);
+               spin_lock(&fdp->fd_spin);
                if (i <= fdp->fd_lastfile) {
-                       spin_unlock_wr(&fdp->fd_spin);
+                       spin_unlock(&fdp->fd_spin);
                        kfree(newfdp->fd_files, M_FILEDESC);
                        goto again;
                }
@@ -1871,7 +1871,7 @@ again:
                        }
                }
        }
-       spin_unlock_wr(&fdp->fd_spin);
+       spin_unlock(&fdp->fd_spin);
        return (newfdp);
 }
 
@@ -1903,7 +1903,7 @@ fdfree(struct proc *p, struct filedesc *repl)
        /*
         * Severe messing around to follow.
         */
-       spin_lock_wr(&fdp->fd_spin);
+       spin_lock(&fdp->fd_spin);
 
        /* Check for special need to clear POSIX style locks */
        fdtol = p->p_fdtol;
@@ -1921,7 +1921,7 @@ fdfree(struct proc *p, struct filedesc *repl)
                                }
                                fp = fdnode->fp;
                                fhold(fp);
-                               spin_unlock_wr(&fdp->fd_spin);
+                               spin_unlock(&fdp->fd_spin);
 
                                lf.l_whence = SEEK_SET;
                                lf.l_start = 0;
@@ -1934,7 +1934,7 @@ fdfree(struct proc *p, struct filedesc *repl)
                                                   &lf,
                                                   F_POSIX);
                                fdrop(fp);
-                               spin_lock_wr(&fdp->fd_spin);
+                               spin_lock(&fdp->fd_spin);
                        }
                }
        retry:
@@ -1970,16 +1970,16 @@ fdfree(struct proc *p, struct filedesc *repl)
                }
                p->p_fdtol = NULL;
                if (fdtol != NULL) {
-                       spin_unlock_wr(&fdp->fd_spin);
+                       spin_unlock(&fdp->fd_spin);
                        kfree(fdtol, M_FILEDESC_TO_LEADER);
-                       spin_lock_wr(&fdp->fd_spin);
+                       spin_lock(&fdp->fd_spin);
                }
        }
        if (--fdp->fd_refcnt > 0) {
-               spin_unlock_wr(&fdp->fd_spin);
-               spin_lock_wr(&p->p_spin);
+               spin_unlock(&fdp->fd_spin);
+               spin_lock(&p->p_spin);
                p->p_fd = repl;
-               spin_unlock_wr(&p->p_spin);
+               spin_unlock(&p->p_spin);
                return;
        }
 
@@ -1996,22 +1996,22 @@ fdfree(struct proc *p, struct filedesc *repl)
                if (fdp->fd_files[i].fp) {
                        fp = funsetfd_locked(fdp, i);
                        if (fp) {
-                               spin_unlock_wr(&fdp->fd_spin);
+                               spin_unlock(&fdp->fd_spin);
                                if (SLIST_FIRST(&fp->f_klist))
                                        knote_fdclose(fp, fdp, i);
                                closef(fp, p);
-                               spin_lock_wr(&fdp->fd_spin);
+                               spin_lock(&fdp->fd_spin);
                        }
                }
        }
-       spin_unlock_wr(&fdp->fd_spin);
+       spin_unlock(&fdp->fd_spin);
 
        /*
         * Interlock against an allproc scan operations (typically frevoke).
         */
-       spin_lock_wr(&p->p_spin);
+       spin_lock(&p->p_spin);
        p->p_fd = repl;
-       spin_unlock_wr(&p->p_spin);
+       spin_unlock(&p->p_spin);
 
        /*
         * Wait for any softrefs to go away.  This race rarely occurs so
@@ -2054,7 +2054,7 @@ holdfp(struct filedesc *fdp, int fd, int flag)
 {
        struct file* fp;
 
-       spin_lock_wr(&fdp->fd_spin);
+       spin_lock(&fdp->fd_spin);
        if (((u_int)fd) >= fdp->fd_nfiles) {
                fp = NULL;
                goto done;
@@ -2067,7 +2067,7 @@ holdfp(struct filedesc *fdp, int fd, int flag)
        }
        fhold(fp);
 done:
-       spin_unlock_wr(&fdp->fd_spin);
+       spin_unlock(&fdp->fd_spin);
        return (fp);
 }
 
@@ -2084,7 +2084,7 @@ holdsock(struct filedesc *fdp, int fd, struct file **fpp)
        struct file *fp;
        int error;
 
-       spin_lock_wr(&fdp->fd_spin);
+       spin_lock(&fdp->fd_spin);
        if ((unsigned)fd >= fdp->fd_nfiles) {
                error = EBADF;
                fp = NULL;
@@ -2101,7 +2101,7 @@ holdsock(struct filedesc *fdp, int fd, struct file **fpp)
        fhold(fp);
        error = 0;
 done:
-       spin_unlock_wr(&fdp->fd_spin);
+       spin_unlock(&fdp->fd_spin);
        *fpp = fp;
        return (error);
 }
@@ -2117,7 +2117,7 @@ holdvnode(struct filedesc *fdp, int fd, struct file **fpp)
        struct file *fp;
        int error;
 
-       spin_lock_wr(&fdp->fd_spin);
+       spin_lock(&fdp->fd_spin);
        if ((unsigned)fd >= fdp->fd_nfiles) {
                error = EBADF;
                fp = NULL;
@@ -2135,7 +2135,7 @@ holdvnode(struct filedesc *fdp, int fd, struct file **fpp)
        fhold(fp);
        error = 0;
 done:
-       spin_unlock_wr(&fdp->fd_spin);
+       spin_unlock(&fdp->fd_spin);
        *fpp = fp;
        return (error);
 }
@@ -2553,24 +2553,24 @@ dupfdopen(struct filedesc *fdp, int dfd, int sfd, int mode, int error)
                        error = EACCES;
                        break;
                }
-               spin_lock_wr(&fdp->fd_spin);
+               spin_lock(&fdp->fd_spin);
                fdp->fd_files[dfd].fileflags = fdp->fd_files[sfd].fileflags;
                fsetfd_locked(fdp, wfp, dfd);
-               spin_unlock_wr(&fdp->fd_spin);
+               spin_unlock(&fdp->fd_spin);
                error = 0;
                break;
        case ENXIO:
                /*
                 * Steal away the file pointer from dfd, and stuff it into indx.
                 */
-               spin_lock_wr(&fdp->fd_spin);
+               spin_lock(&fdp->fd_spin);
                fdp->fd_files[dfd].fileflags = fdp->fd_files[sfd].fileflags;
                fsetfd(fdp, wfp, dfd);
                if ((xfp = funsetfd_locked(fdp, sfd)) != NULL) {
-                       spin_unlock_wr(&fdp->fd_spin);
+                       spin_unlock(&fdp->fd_spin);
                        fdrop(xfp);
                } else {
-                       spin_unlock_wr(&fdp->fd_spin);
+                       spin_unlock(&fdp->fd_spin);
                }
                error = 0;
                break;
@@ -2621,13 +2621,13 @@ allfiles_scan_exclusive(int (*callback)(struct file *, void *), void *data)
        struct file *fp;
        int res;
 
-       spin_lock_wr(&filehead_spin);
+       spin_lock(&filehead_spin);
        LIST_FOREACH(fp, &filehead, f_list) {
                res = callback(fp, data);
                if (res < 0)
                        break;
        }
-       spin_unlock_wr(&filehead_spin);
+       spin_unlock(&filehead_spin);
 }
 
 /*
@@ -2705,19 +2705,19 @@ sysctl_kern_file_callback(struct proc *p, void *data)
        /*
         * Softref the fdp to prevent it from being destroyed
         */
-       spin_lock_wr(&p->p_spin);
+       spin_lock(&p->p_spin);
        if ((fdp = p->p_fd) == NULL) {
-               spin_unlock_wr(&p->p_spin);
+               spin_unlock(&p->p_spin);
                return(0);
        }
        atomic_add_int(&fdp->fd_softrefs, 1);
-       spin_unlock_wr(&p->p_spin);
+       spin_unlock(&p->p_spin);
 
        /*
         * The fdp's own spinlock prevents the contents from being
         * modified.
         */
-       spin_lock_wr(&fdp->fd_spin);
+       spin_lock(&fdp->fd_spin);
        for (n = 0; n < fdp->fd_nfiles; ++n) {
                if ((fp = fdp->fd_files[n].fp) == NULL)
                        continue;
@@ -2726,14 +2726,14 @@ sysctl_kern_file_callback(struct proc *p, void *data)
                } else {
                        uid = p->p_ucred ? p->p_ucred->cr_uid : -1;
                        kcore_make_file(&kf, fp, p->p_pid, uid, n);
-                       spin_unlock_wr(&fdp->fd_spin);
+                       spin_unlock(&fdp->fd_spin);
                        info->error = SYSCTL_OUT(info->req, &kf, sizeof(kf));
-                       spin_lock_wr(&fdp->fd_spin);
+                       spin_lock(&fdp->fd_spin);
                        if (info->error)
                                break;
                }
        }
-       spin_unlock_wr(&fdp->fd_spin);
+       spin_unlock(&fdp->fd_spin);
        atomic_subtract_int(&fdp->fd_softrefs, 1);
        if (info->error)
                return(-1);
index 5c98835..64c614b 100644 (file)
@@ -126,16 +126,16 @@ kgetenv(const char *name)
        int     len;
 
        if (kenv_isdynamic) {
-               spin_lock_wr(&kenv_dynlock);
+               spin_lock(&kenv_dynlock);
                cp = kenv_getstring_dynamic(name, NULL);
                if (cp != NULL) {
                        strcpy(buf, cp);
-                       spin_unlock_wr(&kenv_dynlock);
+                       spin_unlock(&kenv_dynlock);
                        len = strlen(buf) + 1;
                        ret = kmalloc(len, M_KENV, M_WAITOK);
                        strcpy(ret, buf);
                } else {
-                       spin_unlock_wr(&kenv_dynlock);
+                       spin_unlock(&kenv_dynlock);
                        ret = NULL;
                }
        } else
@@ -159,13 +159,13 @@ ksetenv(const char *name, const char *value)
                        return(-1);
                buf = kmalloc(namelen + vallen, M_KENV, M_WAITOK);
                ksprintf(buf, "%s=%s", name, value);
-               spin_lock_wr(&kenv_dynlock);
+               spin_lock(&kenv_dynlock);
                cp = kenv_getstring_dynamic(name, &i);
                if (cp != NULL) {
                        /* replace existing environment variable */
                        oldenv = kenv_dynp[i];
                        kenv_dynp[i] = buf;
-                       spin_unlock_wr(&kenv_dynlock);
+                       spin_unlock(&kenv_dynlock);
                        kfree(oldenv, M_KENV);
                } else {
                        /* append new environment variable */
@@ -174,12 +174,12 @@ ksetenv(const char *name, const char *value)
                        /* bounds checking */
                        if (i < 0 || i >= (KENV_DYNMAXNUM - 1)) {
                                kfree(buf, M_KENV);
-                               spin_unlock_wr(&kenv_dynlock);
+                               spin_unlock(&kenv_dynlock);
                                return(-1);
                        }
                        kenv_dynp[i] = buf;
                        kenv_dynp[i + 1] = NULL;
-                       spin_unlock_wr(&kenv_dynlock);
+                       spin_unlock(&kenv_dynlock);
                }
                return(0);
        } else {
@@ -198,7 +198,7 @@ kunsetenv(const char *name)
        int     i, j;
 
        if (kenv_isdynamic) {
-               spin_lock_wr(&kenv_dynlock);
+               spin_lock(&kenv_dynlock);
                cp = kenv_getstring_dynamic(name, &i);
                if (cp != NULL) {
                        oldenv = kenv_dynp[i];
@@ -206,11 +206,11 @@ kunsetenv(const char *name)
                        for (j = i + 1; kenv_dynp[j] != NULL; j++)
                                kenv_dynp[i++] = kenv_dynp[j];
                        kenv_dynp[i] = NULL;
-                       spin_unlock_wr(&kenv_dynlock);
+                       spin_unlock(&kenv_dynlock);
                        kfree(oldenv, M_KENV);
                        return(0);
                }
-               spin_unlock_wr(&kenv_dynlock);
+               spin_unlock(&kenv_dynlock);
                return(-1);
        } else {
                kprintf("WARNING: kunsetenv: dynamic array not created yet\n");
@@ -237,9 +237,9 @@ ktestenv(const char *name)
        char    *cp;
 
        if (kenv_isdynamic) {
-               spin_lock_wr(&kenv_dynlock);
+               spin_lock(&kenv_dynlock);
                cp = kenv_getstring_dynamic(name, NULL);
-               spin_unlock_wr(&kenv_dynlock);
+               spin_unlock(&kenv_dynlock);
        } else
                cp = kenv_getstring_static(name);
        if (cp != NULL)
index d0748f3..9329469 100644 (file)
@@ -280,12 +280,12 @@ ktr_resync_callback(void *dummy __unused)
                struct spinlock spin;
 
                spin_init(&spin);
-               spin_lock_wr(&spin);
-               spin_unlock_wr(&spin);
+               spin_lock(&spin);
+               spin_unlock(&spin);
                logtest_noargs(spin_beg);
                for (count = ktr_testspincnt; count; --count) {
-                       spin_lock_wr(&spin);
-                       spin_unlock_wr(&spin);
+                       spin_lock(&spin);
+                       spin_unlock(&spin);
                }
                logtest_noargs(spin_end);
                ktr_testspincnt = 0;
index 132d31a..b33a7af 100644 (file)
@@ -187,10 +187,10 @@ debuglockmgr(struct lock *lkp, u_int flags,
        /*
         * So sue me, I'm too tired.
         */
-       if (spin_trylock_wr(&lkp->lk_spinlock) == FALSE) {
+       if (spin_trylock(&lkp->lk_spinlock) == FALSE) {
                if (flags & LK_NOSPINWAIT)
                        return(EBUSY);
-               spin_lock_wr(&lkp->lk_spinlock);
+               spin_lock(&lkp->lk_spinlock);
        }
 
        extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
@@ -238,7 +238,7 @@ debuglockmgr(struct lock *lkp, u_int flags,
 
        case LK_DOWNGRADE:
                if (lkp->lk_lockholder != td || lkp->lk_exclusivecount == 0) {
-                       spin_unlock_wr(&lkp->lk_spinlock);
+                       spin_unlock(&lkp->lk_spinlock);
                        panic("lockmgr: not holding exclusive lock");
                }
                sharelock(lkp, lkp->lk_exclusivecount);
@@ -273,7 +273,7 @@ debuglockmgr(struct lock *lkp, u_int flags,
                 * will always be unlocked.
                 */
                if ((lkp->lk_lockholder == td) || (lkp->lk_sharecount <= 0)) {
-                       spin_unlock_wr(&lkp->lk_spinlock);
+                       spin_unlock(&lkp->lk_spinlock);
                        panic("lockmgr: upgrade exclusive lock");
                }
                dowakeup += shareunlock(lkp, 1);
@@ -302,7 +302,7 @@ debuglockmgr(struct lock *lkp, u_int flags,
                        lkp->lk_flags |= LK_HAVE_EXCL;
                        lkp->lk_lockholder = td;
                        if (lkp->lk_exclusivecount != 0) {
-                               spin_unlock_wr(&lkp->lk_spinlock);
+                               spin_unlock(&lkp->lk_spinlock);
                                panic("lockmgr: non-zero exclusive count");
                        }
                        lkp->lk_exclusivecount = 1;
@@ -331,7 +331,7 @@ debuglockmgr(struct lock *lkp, u_int flags,
                         *      Recursive lock.
                         */
                        if ((extflags & (LK_NOWAIT | LK_CANRECURSE)) == 0) {
-                               spin_unlock_wr(&lkp->lk_spinlock);
+                               spin_unlock(&lkp->lk_spinlock);
                                panic("lockmgr: locking against myself");
                        }
                        if ((extflags & LK_CANRECURSE) != 0) {
@@ -365,7 +365,7 @@ debuglockmgr(struct lock *lkp, u_int flags,
                lkp->lk_flags |= LK_HAVE_EXCL;
                lkp->lk_lockholder = td;
                if (lkp->lk_exclusivecount != 0) {
-                       spin_unlock_wr(&lkp->lk_spinlock);
+                       spin_unlock(&lkp->lk_spinlock);
                        panic("lockmgr: non-zero exclusive count");
                }
                lkp->lk_exclusivecount = 1;
@@ -381,7 +381,7 @@ debuglockmgr(struct lock *lkp, u_int flags,
                if (lkp->lk_exclusivecount != 0) {
                        if (lkp->lk_lockholder != td &&
                            lkp->lk_lockholder != LK_KERNTHREAD) {
-                               spin_unlock_wr(&lkp->lk_spinlock);
+                               spin_unlock(&lkp->lk_spinlock);
                                panic("lockmgr: pid %d, not %s thr %p/%p unlocking",
                                    (td->td_proc ? td->td_proc->p_pid : -1),
                                    "exclusive lock holder",
@@ -406,12 +406,12 @@ debuglockmgr(struct lock *lkp, u_int flags,
                break;
 
        default:
-               spin_unlock_wr(&lkp->lk_spinlock);
+               spin_unlock(&lkp->lk_spinlock);
                panic("lockmgr: unknown locktype request %d",
                    flags & LK_TYPE_MASK);
                /* NOTREACHED */
        }
-       spin_unlock_wr(&lkp->lk_spinlock);
+       spin_unlock(&lkp->lk_spinlock);
        if (dowakeup)
                wakeup(lkp);
        return (error);
@@ -468,7 +468,7 @@ lockmgr_clrexclusive_interlocked(struct lock *lkp)
        if (lkp->lk_flags & LK_WAIT_NONZERO)
                dowakeup = 1;
        COUNT(td, -1);
-       spin_unlock_wr(&lkp->lk_spinlock);
+       spin_unlock(&lkp->lk_spinlock);
        if (dowakeup)
                wakeup((void *)lkp);
 }
@@ -497,12 +497,12 @@ lockinit(struct lock *lkp, char *wmesg, int timo, int flags)
 void
 lockreinit(struct lock *lkp, char *wmesg, int timo, int flags)
 {
-       spin_lock_wr(&lkp->lk_spinlock);
+       spin_lock(&lkp->lk_spinlock);
        lkp->lk_flags = (lkp->lk_flags & ~LK_EXTFLG_MASK) |
                        (flags & LK_EXTFLG_MASK);
        lkp->lk_wmesg = wmesg;
        lkp->lk_timo = timo;
-       spin_unlock_wr(&lkp->lk_spinlock);
+       spin_unlock(&lkp->lk_spinlock);
 }
 
 /*
@@ -528,7 +528,7 @@ lockstatus(struct lock *lkp, struct thread *td)
 {
        int lock_type = 0;
 
-       spin_lock_wr(&lkp->lk_spinlock);
+       spin_lock(&lkp->lk_spinlock);
        if (lkp->lk_exclusivecount != 0) {
                if (td == NULL || lkp->lk_lockholder == td)
                        lock_type = LK_EXCLUSIVE;
@@ -537,7 +537,7 @@ lockstatus(struct lock *lkp, struct thread *td)
        } else if (lkp->lk_sharecount != 0) {
                lock_type = LK_SHARED;
        }
-       spin_unlock_wr(&lkp->lk_spinlock);
+       spin_unlock(&lkp->lk_spinlock);
        return (lock_type);
 }
 
@@ -565,9 +565,9 @@ lockcount(struct lock *lkp)
 {
        int count;
 
-       spin_lock_wr(&lkp->lk_spinlock);
+       spin_lock(&lkp->lk_spinlock);
        count = lkp->lk_exclusivecount + lkp->lk_sharecount;
-       spin_unlock_wr(&lkp->lk_spinlock);
+       spin_unlock(&lkp->lk_spinlock);
        return (count);
 }
 
index 20c48f7..f660b20 100644 (file)
@@ -134,7 +134,7 @@ lf_count_adjust(struct proc *p, int increase)
        KKASSERT(p != NULL);
 
        uip = p->p_ucred->cr_uidinfo;
-       spin_lock_wr(&uip->ui_lock);
+       spin_lock(&uip->ui_lock);
 
        if (increase)
                uip->ui_posixlocks += p->p_numposixlocks;
@@ -144,7 +144,7 @@ lf_count_adjust(struct proc *p, int increase)
        KASSERT(uip->ui_posixlocks >= 0,
                ("Negative number of POSIX locks held by %s user: %d.",
                 increase ? "new" : "old", uip->ui_posixlocks));
-       spin_unlock_wr(&uip->ui_lock);
+       spin_unlock(&uip->ui_lock);
 }
 
 static int
@@ -162,7 +162,7 @@ lf_count_change(struct proc *owner, int diff)
        max = MIN(owner->p_rlimit[RLIMIT_POSIXLOCKS].rlim_cur,
                  maxposixlocksperuid);
 
-       spin_lock_wr(&uip->ui_lock);
+       spin_lock(&uip->ui_lock);
        if (diff > 0 && owner->p_ucred->cr_uid != 0 && max != -1 &&
            uip->ui_posixlocks >= max ) {
                ret = 1;
@@ -177,7 +177,7 @@ lf_count_change(struct proc *owner, int diff)
                         uip->ui_posixlocks));
                ret = 0;
        }
-       spin_unlock_wr(&uip->ui_lock);
+       spin_unlock(&uip->ui_lock);
        return ret;
 }
 
index 55396a2..13b98e9 100644 (file)
@@ -451,9 +451,9 @@ rand_initialize(void)
 void
 add_keyboard_randomness(u_char scancode)
 {
-       spin_lock_wr(&rand_spin);
+       spin_lock(&rand_spin);
        L15_Vector((const LByteType *) &scancode, sizeof (scancode));
-       spin_unlock_wr(&rand_spin);
+       spin_unlock(&rand_spin);
        add_interrupt_randomness(0);
 }
 
@@ -475,11 +475,11 @@ add_interrupt_randomness(int intr)
 void
 add_true_randomness(int val)
 {
-       spin_lock_wr(&rand_spin);
+       spin_lock(&rand_spin);
        IBAA_Seed(val);
        L15_Vector((const LByteType *) &val, sizeof (val));
        ++nrandevents;
-       spin_unlock_wr(&rand_spin);
+       spin_unlock(&rand_spin);
 }
 
 int
@@ -541,10 +541,10 @@ read_random(void *buf, u_int nbytes)
 {
        u_int i;
 
-       spin_lock_wr(&rand_spin);
+       spin_lock(&rand_spin);
        for (i = 0; i < nbytes; ++i) 
                ((u_char *)buf)[i] = IBAA_Byte();
-       spin_unlock_wr(&rand_spin);
+       spin_unlock(&rand_spin);
        add_interrupt_randomness(0);
        return(i);
 }
@@ -558,10 +558,10 @@ read_random_unlimited(void *buf, u_int nbytes)
 {
        u_int i;
 
-       spin_lock_wr(&rand_spin);
+       spin_lock(&rand_spin);
        for (i = 0; i < nbytes; ++i)
                ((u_char *)buf)[i] = L15_Byte();
-       spin_unlock_wr(&rand_spin);
+       spin_unlock(&rand_spin);
        add_interrupt_randomness(0);
        return (i);
 }
@@ -580,9 +580,9 @@ rand_thread_loop(void *dummy)
 
        for (;;) {
                NANOUP_EVENT ();
-               spin_lock_wr(&rand_spin);
+               spin_lock(&rand_spin);
                count = (int)(L15_Byte() * hz / (256 * 10) + hz / 10 + 1);
-               spin_unlock_wr(&rand_spin);
+               spin_unlock(&rand_spin);
                tsleep(rand_td, 0, "rwait", count);
                crit_enter();
                lwkt_deschedule_self(rand_td);
@@ -624,7 +624,7 @@ NANOUP_EVENT(void)
        struct timespec         now;
 
        nanouptime(&now);
-       spin_lock_wr(&rand_spin);
+       spin_lock(&rand_spin);
        if ((now.tv_nsec > NEXT.tv_nsec) || (now.tv_sec != NEXT.tv_sec)) {
                /* 
                 * Randomised time-delay: 200e6 - 350e6 ns; 5 - 2.86 Hz. 
@@ -651,6 +651,6 @@ NANOUP_EVENT(void)
                           sizeof(ACCUM.tv_nsec));
                ++nrandevents;
        }
-       spin_unlock_wr(&rand_spin);
+       spin_unlock(&rand_spin);
 }
 
index c1e88e2..c734406 100644 (file)
@@ -280,9 +280,9 @@ objcache_create(const char *name, int *cluster_limit0, int mag_capacity,
                SLIST_INSERT_HEAD(&depot->emptymagazines, mag, nextmagazine);
        }
 
-       spin_lock_wr(&objcachelist_spin);
+       spin_lock(&objcachelist_spin);
        LIST_INSERT_HEAD(&allobjcaches, oc, oc_next);
-       spin_unlock_wr(&objcachelist_spin);
+       spin_unlock(&objcachelist_spin);
 
        if (cluster_limit0 != NULL)
                *cluster_limit0 = cluster_limit;
@@ -384,7 +384,7 @@ retry:
         * NOTE: Beyond this point, M_* flags are handled via oc->alloc()
         */
        depot = &oc->depot[myclusterid];
-       spin_lock_wr(&depot->spin);
+       spin_lock(&depot->spin);
 
        /*
         * Recheck the cpucache after obtaining the depot spinlock.  This
@@ -393,7 +393,7 @@ retry:
        if (MAGAZINE_NOTEMPTY(cpucache->loaded_magazine) ||
            MAGAZINE_NOTEMPTY(cpucache->previous_magazine)
        ) {
-               spin_unlock_wr(&depot->spin);
+               spin_unlock(&depot->spin);
                goto retry;
        }
 
@@ -410,7 +410,7 @@ retry:
                KKASSERT(MAGAZINE_EMPTY(emptymag));
                SLIST_INSERT_HEAD(&depot->emptymagazines,
                                  emptymag, nextmagazine);
-               spin_unlock_wr(&depot->spin);
+               spin_unlock(&depot->spin);
                goto retry;
        }
 
@@ -424,7 +424,7 @@ retry:
         */
        if (depot->unallocated_objects) {
                --depot->unallocated_objects;
-               spin_unlock_wr(&depot->spin);
+               spin_unlock(&depot->spin);
                crit_exit();
 
                obj = oc->alloc(oc->allocator_args, ocflags);
@@ -435,9 +435,9 @@ retry:
                        obj = NULL;
                }
                if (obj == NULL) {
-                       spin_lock_wr(&depot->spin);
+                       spin_lock(&depot->spin);
                        ++depot->unallocated_objects;
-                       spin_unlock_wr(&depot->spin);
+                       spin_unlock(&depot->spin);
                        if (depot->waiting)
                                wakeup(depot);
 
@@ -466,7 +466,7 @@ retry:
                ssleep(depot, &depot->spin, 0, "objcache_get", 0);
                --cpucache->waiting;
                --depot->waiting;
-               spin_unlock_wr(&depot->spin);
+               spin_unlock(&depot->spin);
                goto retry;
        }
 
@@ -476,7 +476,7 @@ retry:
        ++cpucache->gets_null;
        --cpucache->gets_cumulative;
        crit_exit();
-       spin_unlock_wr(&depot->spin);
+       spin_unlock(&depot->spin);
        return (NULL);
 }
 
@@ -577,7 +577,7 @@ retry:
         * Obtain the depot spinlock.
         */
        depot = &oc->depot[myclusterid];
-       spin_lock_wr(&depot->spin);
+       spin_lock(&depot->spin);
 
        /*
         * If an empty magazine is available in the depot, cycle it
@@ -598,11 +598,11 @@ retry:
                if (MAGAZINE_EMPTY(loadedmag)) {
                        SLIST_INSERT_HEAD(&depot->emptymagazines,
                                          loadedmag, nextmagazine);
-                       spin_unlock_wr(&depot->spin);
+                       spin_unlock(&depot->spin);
                } else {
                        SLIST_INSERT_HEAD(&depot->fullmagazines,
                                          loadedmag, nextmagazine);
-                       spin_unlock_wr(&depot->spin);
+                       spin_unlock(&depot->spin);
                        if (depot->waiting)
                                wakeup(depot);
                }
@@ -615,7 +615,7 @@ retry:
         * to allocate a mag, just free the object.
         */
        ++depot->unallocated_objects;
-       spin_unlock_wr(&depot->spin);
+       spin_unlock(&depot->spin);
        if (depot->waiting)
                wakeup(depot);
        crit_exit();
@@ -634,9 +634,9 @@ objcache_dtor(struct objcache *oc, void *obj)
        struct magazinedepot *depot;
 
        depot = &oc->depot[myclusterid];
-       spin_lock_wr(&depot->spin);
+       spin_lock(&depot->spin);
        ++depot->unallocated_objects;
-       spin_unlock_wr(&depot->spin);
+       spin_unlock(&depot->spin);
        if (depot->waiting)
                wakeup(depot);
        oc->dtor(obj, oc->privdata);
@@ -758,10 +758,10 @@ objcache_reclaim(struct objcache *oc)
        count += mag_purge(oc, cache_percpu->previous_magazine, FALSE);
        crit_exit();
 
-       spin_lock_wr(&depot->spin);
+       spin_lock(&depot->spin);
        depot->unallocated_objects += count;
        depot_disassociate(depot, &tmplist);
-       spin_unlock_wr(&depot->spin);
+       spin_unlock(&depot->spin);
        count += maglist_purge(oc, &tmplist);
        if (count && depot->waiting)
                wakeup(depot);
@@ -795,17 +795,17 @@ objcache_reclaimlist(struct objcache *oclist[], int nlist, int ocflags)
                        count += mag_purge(oc, cpucache->previous_magazine, FALSE);
                crit_exit();
                if (count > 0) {
-                       spin_lock_wr(&depot->spin);
+                       spin_lock(&depot->spin);
                        depot->unallocated_objects += count;
-                       spin_unlock_wr(&depot->spin);
+                       spin_unlock(&depot->spin);
                        if (depot->waiting)
                                wakeup(depot);
                        return (TRUE);
                }
-               spin_lock_wr(&depot->spin);
+               spin_lock(&depot->spin);
                maglist_disassociate(depot, &depot->fullmagazines,
                                     &tmplist, FALSE);
-               spin_unlock_wr(&depot->spin);
+               spin_unlock(&depot->spin);
                count = maglist_purge(oc, &tmplist);
                if (count > 0) {
                        if (depot->waiting)
@@ -827,16 +827,16 @@ objcache_destroy(struct objcache *oc)
        int clusterid, cpuid;
        struct magazinelist tmplist;
 
-       spin_lock_wr(&objcachelist_spin);
+       spin_lock(&objcachelist_spin);
        LIST_REMOVE(oc, oc_next);
-       spin_unlock_wr(&objcachelist_spin);
+       spin_unlock(&objcachelist_spin);
 
        SLIST_INIT(&tmplist);
        for (clusterid = 0; clusterid < MAXCLUSTERS; clusterid++) {
                depot = &oc->depot[clusterid];
-               spin_lock_wr(&depot->spin);
+               spin_lock(&depot->spin);
                depot_disassociate(depot, &tmplist);
-               spin_unlock_wr(&depot->spin);
+               spin_unlock(&depot->spin);
        }
        maglist_purge(oc, &tmplist);
 
index b41a687..116d9d9 100644 (file)
@@ -153,9 +153,9 @@ plimit_fork(struct proc *p1)
                if (olimit->p_refcnt == 1) {
                        ++olimit->p_refcnt;
                } else {
-                       spin_lock_wr(&olimit->p_spin);
+                       spin_lock(&olimit->p_spin);
                        ++olimit->p_refcnt;
-                       spin_unlock_wr(&olimit->p_spin);
+                       spin_unlock(&olimit->p_spin);
                }
                return(olimit);
        }
@@ -164,7 +164,7 @@ plimit_fork(struct proc *p1)
         * Full-blown code-up.
         */
        nlimit = NULL;
-       spin_lock_wr(&olimit->p_spin);
+       spin_lock(&olimit->p_spin);
 
        for (;;) {
                if (olimit->p_exclusive == 0) {
@@ -178,11 +178,11 @@ plimit_fork(struct proc *p1)
                        nlimit = NULL;
                        break;
                }
-               spin_unlock_wr(&olimit->p_spin);
+               spin_unlock(&olimit->p_spin);
                nlimit = kmalloc(sizeof(*nlimit), M_SUBPROC, M_WAITOK);
-               spin_lock_wr(&olimit->p_spin);
+               spin_lock(&olimit->p_spin);
        }
-       spin_unlock_wr(&olimit->p_spin);
+       spin_unlock(&olimit->p_spin);
        if (nlimit)
                kfree(nlimit, M_SUBPROC);
        return(rlimit);
@@ -255,7 +255,7 @@ plimit_modify(struct proc *p, int index, struct rlimit *rlim)
         * exclusive without copying.
         */
        nlimit = NULL;
-       spin_lock_wr(&olimit->p_spin);
+       spin_lock(&olimit->p_spin);
 
        for (;;) {
                if (olimit->p_refcnt == 1) {
@@ -273,13 +273,13 @@ plimit_modify(struct proc *p, int index, struct rlimit *rlim)
                        nlimit = NULL;
                        break;
                }
-               spin_unlock_wr(&olimit->p_spin);
+               spin_unlock(&olimit->p_spin);
                nlimit = kmalloc(sizeof(*nlimit), M_SUBPROC, M_WAITOK);
-               spin_lock_wr(&olimit->p_spin);
+               spin_lock(&olimit->p_spin);
        }
        if (index >= 0)
                rlimit->pl_rlimit[index] = *rlim;
-       spin_unlock_wr(&olimit->p_spin);
+       spin_unlock(&olimit->p_spin);
        if (nlimit)
                kfree(nlimit, M_SUBPROC);
 }
@@ -301,12 +301,12 @@ plimit_free(struct proc *p)
                        limit->p_refcnt = -999;
                        kfree(limit, M_SUBPROC);
                } else {
-                       spin_lock_wr(&limit->p_spin);
+                       spin_lock(&limit->p_spin);
                        if (--limit->p_refcnt == 0) {
-                               spin_unlock_wr(&limit->p_spin);
+                               spin_unlock(&limit->p_spin);
                                kfree(limit, M_SUBPROC);
                        } else {
-                               spin_unlock_wr(&limit->p_spin);
+                               spin_unlock(&limit->p_spin);
                        }
                }
        }
@@ -343,27 +343,27 @@ kern_setrlimit(u_int which, struct rlimit *limp)
         if (limp->rlim_max < 0)
                 limp->rlim_max = RLIM_INFINITY;
 
-       spin_lock_wr(&limit->p_spin);
+       spin_lock(&limit->p_spin);
         if (limp->rlim_cur > alimp->rlim_max ||
             limp->rlim_max > alimp->rlim_max) {
-               spin_unlock_wr(&limit->p_spin);
+               spin_unlock(&limit->p_spin);
                 error = priv_check_cred(p->p_ucred, PRIV_PROC_SETRLIMIT, 0);
                 if (error)
                         return (error);
        } else {
-               spin_unlock_wr(&limit->p_spin);
+               spin_unlock(&limit->p_spin);
        }
         if (limp->rlim_cur > limp->rlim_max)
                 limp->rlim_cur = limp->rlim_max;
 
         switch (which) {
         case RLIMIT_CPU:
-               spin_lock_wr(&limit->p_spin);
+               spin_lock(&limit->p_spin);
                 if (limp->rlim_cur > RLIM_INFINITY / (rlim_t)1000000)
                         limit->p_cpulimit = RLIM_INFINITY;
                 else
                         limit->p_cpulimit = (rlim_t)1000000 * limp->rlim_cur;
-               spin_unlock_wr(&limit->p_spin);
+               spin_unlock(&limit->p_spin);
                 break;
         case RLIMIT_DATA:
                 if (limp->rlim_cur > maxdsiz)
@@ -382,7 +382,7 @@ kern_setrlimit(u_int which, struct rlimit *limp)
                  * "rlim_cur" bytes accessible.  If stack limit is going
                  * up make more accessible, if going down make inaccessible.
                  */
-               spin_lock_wr(&limit->p_spin);
+               spin_lock(&limit->p_spin);
                 if (limp->rlim_cur != alimp->rlim_cur) {
                         vm_offset_t addr;
                         vm_size_t size;
@@ -397,13 +397,13 @@ kern_setrlimit(u_int which, struct rlimit *limp)
                                 size = alimp->rlim_cur - limp->rlim_cur;
                                 addr = USRSTACK - alimp->rlim_cur;
                         }
-                       spin_unlock_wr(&limit->p_spin);
+                       spin_unlock(&limit->p_spin);
                         addr = trunc_page(addr);
                         size = round_page(size);
                         vm_map_protect(&p->p_vmspace->vm_map,
                                       addr, addr+size, prot, FALSE);
                 } else {
-                       spin_unlock_wr(&limit->p_spin);
+                       spin_unlock(&limit->p_spin);
                }
                 break;
 
@@ -431,9 +431,9 @@ kern_setrlimit(u_int which, struct rlimit *limp)
                         limp->rlim_max = maxposixlocksperuid;
                 break;
         }
-       spin_lock_wr(&limit->p_spin);
+       spin_lock(&limit->p_spin);
         *alimp = *limp;
-       spin_unlock_wr(&limit->p_spin);
+       spin_unlock(&limit->p_spin);
         return (0);
 }
 
@@ -452,9 +452,9 @@ kern_getrlimit(u_int which, struct rlimit *limp)
                 return (EINVAL);
 
        limit = p->p_limit;
-       spin_lock_wr(&limit->p_spin);
+       spin_lock(&limit->p_spin);
         *limp = p->p_rlimit[which];
-       spin_unlock_wr(&limit->p_spin);
+       spin_unlock(&limit->p_spin);
         return (0);
 }
 
@@ -480,7 +480,7 @@ plimit_testcpulimit(struct plimit *limit, u_int64_t ttime)
        if (ttime <= limit->p_cpulimit)
                return(PLIMIT_TESTCPU_OK);
 
-       spin_lock_wr(&limit->p_spin);
+       spin_lock(&limit->p_spin);
        if (ttime > limit->p_cpulimit) {
                rlim = &limit->pl_rlimit[RLIMIT_CPU];
                if (ttime / (rlim_t)1000000 >= rlim->rlim_max + 5)
@@ -490,7 +490,7 @@ plimit_testcpulimit(struct plimit *limit, u_int64_t ttime)
        } else {
                mode = PLIMIT_TESTCPU_OK;
        }
-       spin_unlock_wr(&limit->p_spin);
+       spin_unlock(&limit->p_spin);
        return(mode);
 }
 
index 3d6c2c8..1f6c0d5 100644 (file)
@@ -901,11 +901,11 @@ uicreate(uid_t uid)
         * Somebody may have already created the uidinfo for this
         * uid. If so, return that instead.
         */
-       spin_lock_wr(&uihash_lock);
+       spin_lock(&uihash_lock);
        tmp = uilookup(uid);
        if (tmp != NULL) {
                uihold(tmp);
-               spin_unlock_wr(&uihash_lock);
+               spin_unlock(&uihash_lock);
 
                spin_uninit(&uip->ui_lock);
                varsymset_clean(&uip->ui_varsymset);
@@ -913,7 +913,7 @@ uicreate(uid_t uid)
                uip = tmp;
        } else {
                LIST_INSERT_HEAD(UIHASH(uid), uip, ui_hash);
-               spin_unlock_wr(&uihash_lock);
+               spin_unlock(&uihash_lock);
        }
        return (uip);
 }
@@ -928,14 +928,14 @@ uifind(uid_t uid)
 {
        struct  uidinfo *uip;
 
-       spin_lock_wr(&uihash_lock);
+       spin_lock(&uihash_lock);
        uip = uilookup(uid);
        if (uip == NULL) {
-               spin_unlock_wr(&uihash_lock);
+               spin_unlock(&uihash_lock);
                uip = uicreate(uid);
        } else {
                uihold(uip);
-               spin_unlock_wr(&uihash_lock);
+               spin_unlock(&uihash_lock);
        }
        return (uip);
 }
@@ -957,13 +957,13 @@ uifree(struct uidinfo *uip)
         * we can safely unlink the uip and destroy it.  Otherwise we lost
         * a race and must fail.
         */
-       spin_lock_wr(&uihash_lock);
+       spin_lock(&uihash_lock);
        if (uip->ui_ref != 1) {
-               spin_unlock_wr(&uihash_lock);
+               spin_unlock(&uihash_lock);
                return(-1);
        }
        LIST_REMOVE(uip, ui_hash);
-       spin_unlock_wr(&uihash_lock);
+       spin_unlock(&uihash_lock);
 
        /*
         * The uip is now orphaned and we can destroy it at our
@@ -1037,7 +1037,7 @@ int
 chgproccnt(struct uidinfo *uip, int diff, int max)
 {
        int ret;
-       spin_lock_wr(&uip->ui_lock);
+       spin_lock(&uip->ui_lock);
        /* don't allow them to exceed max, but allow subtraction */
        if (diff > 0 && uip->ui_proccnt + diff > max && max != 0) {
                ret = 0;
@@ -1047,7 +1047,7 @@ chgproccnt(struct uidinfo *uip, int diff, int max)
                        kprintf("negative proccnt for uid = %d\n", uip->ui_uid);
                ret = 1;
        }
-       spin_unlock_wr(&uip->ui_lock);
+       spin_unlock(&uip->ui_lock);
        return ret;
 }
 
@@ -1059,7 +1059,7 @@ chgsbsize(struct uidinfo *uip, u_long *hiwat, u_long to, rlim_t max)
 {
        rlim_t new;
 
-       spin_lock_wr(&uip->ui_lock);
+       spin_lock(&uip->ui_lock);
        new = uip->ui_sbsize + to - *hiwat;
        KKASSERT(new >= 0);
 
@@ -1078,7 +1078,7 @@ chgsbsize(struct uidinfo *uip, u_long *hiwat, u_long to, rlim_t max)
        }
        uip->ui_sbsize = new;
        *hiwat = to;
-       spin_unlock_wr(&uip->ui_lock);
+       spin_unlock(&uip->ui_lock);
        return (1);
 }
 
index d266ebe..63faaf4 100644 (file)
@@ -72,7 +72,7 @@ sensordev_install(struct ksensordev *sensdev)
        struct ksensordev *v, *nv;
 
        /* mtx_lock(&Giant); */
-       spin_lock_wr(&sensor_dev_lock);
+       spin_lock(&sensor_dev_lock);
        if (sensordev_count == 0) {
                sensdev->num = 0;
                SLIST_INSERT_HEAD(&sensordev_list, sensdev, list);
@@ -86,7 +86,7 @@ sensordev_install(struct ksensordev *sensdev)
        }
        sensordev_count++;
        /* mtx_unlock(&Giant); */
-       spin_unlock_wr(&sensor_dev_lock);
+       spin_unlock(&sensor_dev_lock);
 
 #ifndef NOSYSCTL8HACK
        sensor_sysctl8magic_install(sensdev);
@@ -101,7 +101,7 @@ sensor_attach(struct ksensordev *sensdev, struct ksensor *sens)
        int i;
 
        /* mtx_lock(&Giant); */
-       spin_lock_wr(&sensor_dev_lock);
+       spin_lock(&sensor_dev_lock);
        sh = &sensdev->sensors_list;
        if (sensdev->sensors_count == 0) {
                for (i = 0; i < SENSOR_MAX_TYPES; i++)
@@ -127,7 +127,7 @@ sensor_attach(struct ksensordev *sensdev, struct ksensor *sens)
        if (sensdev->maxnumt[sens->type] == sens->numt)
                sensdev->maxnumt[sens->type]++;
        sensdev->sensors_count++;
-       spin_unlock_wr(&sensor_dev_lock);
+       spin_unlock(&sensor_dev_lock);
        /* mtx_unlock(&Giant); */
 }
 
@@ -135,11 +135,11 @@ void
 sensordev_deinstall(struct ksensordev *sensdev)
 {
        /* mtx_lock(&Giant); */
-       spin_lock_wr(&sensor_dev_lock);
+       spin_lock(&sensor_dev_lock);
        sensordev_count--;
        SLIST_REMOVE(&sensordev_list, sensdev, ksensordev, list);
        /* mtx_unlock(&Giant); */
-       spin_unlock_wr(&sensor_dev_lock);
+       spin_unlock(&sensor_dev_lock);
 
 #ifndef NOSYSCTL8HACK
        sensor_sysctl8magic_deinstall(sensdev);
@@ -168,14 +168,14 @@ sensordev_get(int num)
 {
        struct ksensordev *sd;
 
-       spin_lock_wr(&sensor_dev_lock);
+       spin_lock(&sensor_dev_lock);
        SLIST_FOREACH(sd, &sensordev_list, list)
                if (sd->num == num) {
-                       spin_unlock_wr(&sensor_dev_lock);
+                       spin_unlock(&sensor_dev_lock);
                        return (sd);
                }
 
-       spin_unlock_wr(&sensor_dev_lock);
+       spin_unlock(&sensor_dev_lock);
        return (NULL);
 }
 
@@ -185,16 +185,16 @@ sensor_find(struct ksensordev *sensdev, enum sensor_type type, int numt)
        struct ksensor *s;
        struct ksensors_head *sh;
 
-       spin_lock_wr(&sensor_dev_lock);
+       spin_lock(&sensor_dev_lock);
        sh = &sensdev->sensors_list;
        SLIST_FOREACH(s, sh, list) {
                if (s->type == type && s->numt == numt) {
-                       spin_unlock_wr(&sensor_dev_lock);
+                       spin_unlock(&sensor_dev_lock);
                        return (s);
                }
        }
 
-       spin_unlock_wr(&sensor_dev_lock);
+       spin_unlock(&sensor_dev_lock);
        return (NULL);
 }
 
index ece6e04..540055a 100644 (file)
@@ -256,11 +256,11 @@ sysctl_spin_lock_test(SYSCTL_HANDLER_ARGS)
         */
        if (value == 1) {
                spin_init(&mtx);
-               spin_lock_wr(&mtx);     /* force an indefinite wait */
+               spin_lock(&mtx);        /* force an indefinite wait */
                spin_lock_test_mode = 1;
-               spin_lock_wr(&mtx);
-               spin_unlock_wr(&mtx);   /* Clean up the spinlock count */
-               spin_unlock_wr(&mtx);
+               spin_lock(&mtx);
+               spin_unlock(&mtx);      /* Clean up the spinlock count */
+               spin_unlock(&mtx);
                spin_lock_test_mode = 0;
        }
 
index dd39717..0ba21d7 100644 (file)
@@ -415,27 +415,27 @@ shutdownsldesc(struct sldesc *sl, int how)
        /*
         * Return unread and unreplied messages
         */
-       spin_lock_wr(&sl->spin);
+       spin_lock(&sl->spin);
        while ((slmsg = TAILQ_FIRST(&sl->inq)) != NULL) {
                TAILQ_REMOVE(&sl->inq, slmsg, tqnode);
-               spin_unlock_wr(&sl->spin);
+               spin_unlock(&sl->spin);
                if (slmsg->msg->sm_proto & SM_PROTO_REPLY) {
                        sl->repbytes -= slmsg->maxsize;
                        slmsg->flags &= ~SLMSGF_ONINQ;
                        sl->peer->backend_dispose(sl->peer, slmsg);
                }
                /* leave ONINQ set for commands, it will cleared below */
-               spin_lock_wr(&sl->spin);
+               spin_lock(&sl->spin);
        }
        while ((slmsg = RB_ROOT(&sl->reply_rb_root)) != NULL) {
                RB_REMOVE(slmsg_rb_tree, &sl->reply_rb_root, slmsg);
                sl->cmdbytes -= slmsg->maxsize;
-               spin_unlock_wr(&sl->spin);
+               spin_unlock(&sl->spin);
                slmsg->flags &= ~SLMSGF_ONINQ;
                sl->peer->backend_reply(sl->peer, slmsg, NULL);
-               spin_lock_wr(&sl->spin);
+               spin_lock(&sl->spin);
        }
-       spin_unlock_wr(&sl->spin);
+       spin_unlock(&sl->spin);
 
        /*
         * Call shutdown on the peer with the opposite flags
@@ -459,7 +459,7 @@ static
 void
 shutdownsldesc2(struct sldesc *sl, int how)
 {
-       spin_lock_wr(&sl->spin);
+       spin_lock(&sl->spin);
        switch(how) {
        case SHUT_RD:
                sl->flags |= SLF_RSHUTDOWN;
@@ -471,7 +471,7 @@ shutdownsldesc2(struct sldesc *sl, int how)
                sl->flags |= SLF_RSHUTDOWN | SLF_WSHUTDOWN;
                break;
        }
-       spin_unlock_wr(&sl->spin);
+       spin_unlock(&sl->spin);
 
        /*
         * Handle signaling on the user side
@@ -494,9 +494,9 @@ sldrop(struct sldesc *sl)
 {
        struct sldesc *slpeer;
 
-       spin_lock_wr(&sl->common->spin);
+       spin_lock(&sl->common->spin);
        if (--sl->common->refs == 0) {
-               spin_unlock_wr(&sl->common->spin);
+               spin_unlock(&sl->common->spin);
                if ((slpeer = sl->peer) != NULL) {
                        sl->peer = NULL;
                        slpeer->peer = NULL;
@@ -513,7 +513,7 @@ sldrop(struct sldesc *sl)
                sl->common = NULL;
                kfree(sl, M_SYSLINK);
        } else {
-               spin_unlock_wr(&sl->common->spin);
+               spin_unlock(&sl->common->spin);
        }
 }
 
@@ -582,7 +582,7 @@ slfileop_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
        /*
         * Get a message, blocking if necessary.
         */
-       spin_lock_wr(&sl->spin);
+       spin_lock(&sl->spin);
        while ((slmsg = TAILQ_FIRST(&sl->inq)) == NULL) {
                if (sl->flags & SLF_RSHUTDOWN) {
                        error = 0;
@@ -631,7 +631,7 @@ slfileop_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
        TAILQ_REMOVE(&sl->inq, slmsg, tqnode);
        if (slmsg->msg->sm_proto & SM_PROTO_REPLY)
                sl->repbytes -= slmsg->maxsize;
-       spin_unlock_wr(&sl->spin);
+       spin_unlock(&sl->spin);
 
        /*
         * Load the message data into the user buffer.
@@ -691,10 +691,10 @@ slfileop_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
                /*
                 * Requeue the message if we could not read it successfully
                 */
-               spin_lock_wr(&sl->spin);
+               spin_lock(&sl->spin);
                TAILQ_INSERT_HEAD(&sl->inq, slmsg, tqnode);
                slmsg->flags |= SLMSGF_ONINQ;
-               spin_unlock_wr(&sl->spin);
+               spin_unlock(&sl->spin);
        } else if (slmsg->msg->sm_proto & SM_PROTO_REPLY) {
                /*
                 * Dispose of any received reply after we've copied it
@@ -716,7 +716,7 @@ slfileop_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
        }
        return(error);
 done1:
-       spin_unlock_wr(&sl->spin);
+       spin_unlock(&sl->spin);
 done2:
        return(error);
 }
@@ -857,17 +857,17 @@ slfileop_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
                /*
                 * Replies have to be matched up against received commands.
                 */
-               spin_lock_wr(&sl->spin);
+               spin_lock(&sl->spin);
                slcmd = slmsg_rb_tree_RB_LOOKUP(&sl->reply_rb_root,
                                                slmsg->msg->sm_msgid);
                if (slcmd == NULL || (slcmd->flags & SLMSGF_ONINQ)) {
                        error = ENOENT;
-                       spin_unlock_wr(&sl->spin);
+                       spin_unlock(&sl->spin);
                        goto done1;
                }
                RB_REMOVE(slmsg_rb_tree, &sl->reply_rb_root, slcmd);
                sl->cmdbytes -= slcmd->maxsize;
-               spin_unlock_wr(&sl->spin);
+               spin_unlock(&sl->spin);
 
                /*
                 * If the original command specified DMAR, has an xio, and
@@ -1219,7 +1219,7 @@ backend_wblocked_user(struct sldesc *sl, int nbio, sl_proto_t proto)
         * ok to have a MP race against cmdbytes.
         */
        if (*bytesp >= syslink_bufsize) {
-               spin_lock_wr(&sl->spin);
+               spin_lock(&sl->spin);
                while (*bytesp >= syslink_bufsize) {
                        if (sl->flags & SLF_WSHUTDOWN) {
                                error = EPIPE;
@@ -1235,7 +1235,7 @@ backend_wblocked_user(struct sldesc *sl, int nbio, sl_proto_t proto)
                        if (error)
                                break;
                }
-               spin_unlock_wr(&sl->spin);
+               spin_unlock(&sl->spin);
        }
        return (error);
 }
@@ -1252,7 +1252,7 @@ backend_write_user(struct sldesc *sl, struct slmsg *slmsg)
 {
        int error;
 
-       spin_lock_wr(&sl->spin);
+       spin_lock(&sl->spin);
        if (sl->flags & SLF_RSHUTDOWN) {
                /*
                 * Not accepting new messages
@@ -1281,7 +1281,7 @@ backend_write_user(struct sldesc *sl, struct slmsg *slmsg)
                slmsg->flags |= SLMSGF_ONINQ;
                error = 0;
        }
-       spin_unlock_wr(&sl->spin);
+       spin_unlock(&sl->spin);
        if (sl->rwaiters)
                wakeup(&sl->rwaiters);
        return(error);
@@ -1300,7 +1300,7 @@ backend_reply_user(struct sldesc *sl, struct slmsg *slcmd, struct slmsg *slrep)
 
        slmsg_put(slcmd);
        if (slrep) {
-               spin_lock_wr(&sl->spin);
+               spin_lock(&sl->spin);
                if ((sl->flags & SLF_RSHUTDOWN) == 0) {
                        TAILQ_INSERT_TAIL(&sl->inq, slrep, tqnode);
                        sl->repbytes += slrep->maxsize;
@@ -1308,7 +1308,7 @@ backend_reply_user(struct sldesc *sl, struct slmsg *slcmd, struct slmsg *slrep)
                } else {
                        error = EPIPE;
                }
-               spin_unlock_wr(&sl->spin);
+               spin_unlock(&sl->spin);
                if (error)
                        sl->peer->backend_dispose(sl->peer, slrep);
                else if (sl->rwaiters)
@@ -1401,7 +1401,7 @@ syslink_kdomsg(struct sldesc *ksl, struct slmsg *slmsg)
         * then remove the message from the matching tree and return.
         */
        error = ksl->peer->backend_write(ksl->peer, slmsg);
-       spin_lock_wr(&ksl->spin);
+       spin_lock(&ksl->spin);
        if (error == 0) {
                while (slmsg->rep == NULL) {
                        error = ssleep(slmsg, &ksl->spin, 0, "kwtmsg", 0);
@@ -1414,7 +1414,7 @@ syslink_kdomsg(struct sldesc *ksl, struct slmsg *slmsg)
                        error = slmsg->rep->msg->sm_head.se_aux;
                }
        }
-       spin_unlock_wr(&ksl->spin);
+       spin_unlock(&ksl->spin);
        return(error);
 }
 
@@ -1458,7 +1458,7 @@ syslink_kwaitmsg(struct sldesc *ksl, struct slmsg *slmsg)
 {
        int error;
 
-       spin_lock_wr(&ksl->spin);
+       spin_lock(&ksl->spin);
        while (slmsg->rep == NULL) {
                error = ssleep(slmsg, &ksl->spin, 0, "kwtmsg", 0);
                /* XXX ignore error for now */
@@ -1469,7 +1469,7 @@ syslink_kwaitmsg(struct sldesc *ksl, struct slmsg *slmsg)
        } else {
                error = slmsg->rep->msg->sm_head.se_aux;
        }
-       spin_unlock_wr(&ksl->spin);
+       spin_unlock(&ksl->spin);
        return(error);
 }
 
@@ -1590,7 +1590,7 @@ backend_reply_kern(struct sldesc *ksl, struct slmsg *slcmd, struct slmsg *slrep)
 {
        int error;
 
-       spin_lock_wr(&ksl->spin);
+       spin_lock(&ksl->spin);
        if (slrep == NULL) {
                slcmd->rep = (struct slmsg *)-1;
                error = EIO;
@@ -1598,7 +1598,7 @@ backend_reply_kern(struct sldesc *ksl, struct slmsg *slcmd, struct slmsg *slrep)
                slcmd->rep = slrep;
                error = slrep->msg->sm_head.se_aux;
        }
-       spin_unlock_wr(&ksl->spin);
+       spin_unlock(&ksl->spin);
 
        /*
         * Issue callback or wakeup a synchronous waiter.
index 96b748e..5138bb9 100644 (file)
@@ -119,9 +119,9 @@ sysref_init(struct sysref *sr, struct sysref_class *srclass)
        sr->srclass = srclass;
 
        sa = &sysref_array[gd->gd_cpuid];
-       spin_lock_wr(&sa->spin);
+       spin_lock(&sa->spin);
        sysref_rb_tree_RB_INSERT(&sa->rbtree, sr);
-       spin_unlock_wr(&sa->spin);
+       spin_unlock(&sa->spin);
        crit_exit_gd(gd);
 }
 
@@ -222,9 +222,9 @@ sysref_ctor(void *data, void *privdata, int ocflags)
        sr->srclass = srclass;
 
        sa = &sysref_array[gd->gd_cpuid];
-       spin_lock_wr(&sa->spin);
+       spin_lock(&sa->spin);
        sysref_rb_tree_RB_INSERT(&sa->rbtree, sr);
-       spin_unlock_wr(&sa->spin);
+       spin_unlock(&sa->spin);
        crit_exit_gd(gd);
 
        /*
@@ -256,9 +256,9 @@ sysref_dtor(void *data, void *privdata)
 
        KKASSERT(sr->refcnt == 0);
        sa = &sysref_array[(int)sr->sysid & ncpus_fit_mask];
-       spin_lock_wr(&sa->spin);
+       spin_lock(&sa->spin);
        sysref_rb_tree_RB_REMOVE(&sa->rbtree, sr);
-       spin_unlock_wr(&sa->spin);
+       spin_unlock(&sa->spin);
        if (srclass->dtor)
                srclass->dtor(data, privdata);
 }
index c9ca573..3ebe2e3 100644 (file)
@@ -63,10 +63,10 @@ static void wdog_reset_all(void *unused);
 void
 wdog_register(struct watchdog *wd)
 {
-       spin_lock_wr(&wdogmtx);
+       spin_lock(&wdogmtx);
        wd->period = WDOG_DEFAULT_PERIOD;
        LIST_INSERT_HEAD(&wdoglist, wd, link);
-       spin_unlock_wr(&wdogmtx);
+       spin_unlock(&wdogmtx);
 
        wdog_reset_all(NULL);
 
@@ -77,9 +77,9 @@ wdog_register(struct watchdog *wd)
 void
 wdog_unregister(struct watchdog *wd)
 {
-       spin_lock_wr(&wdogmtx);
+       spin_lock(&wdogmtx);
        LIST_REMOVE(wd, link);
-       spin_unlock_wr(&wdogmtx);
+       spin_unlock(&wdogmtx);
 
        kprintf("wdog: Watchdog %s unregistered\n", wd->name);
 }
@@ -96,7 +96,7 @@ wdog_reset_all(void *unused)
        struct watchdog *wd;
        int period, min_period = INT_MAX;
 
-       spin_lock_wr(&wdogmtx);
+       spin_lock(&wdogmtx);
        LIST_FOREACH(wd, &wdoglist, link) {
                period = wdog_reset(wd);
                if (period < min_period)
@@ -107,7 +107,7 @@ wdog_reset_all(void *unused)
 
        wdog_auto_period = min_period;
 
-       spin_unlock_wr(&wdogmtx);
+       spin_unlock(&wdogmtx);
 }
 
 static void
@@ -115,12 +115,12 @@ wdog_set_period(int period)
 {
        struct watchdog *wd;
 
-       spin_lock_wr(&wdogmtx);
+       spin_lock(&wdogmtx);
        LIST_FOREACH(wd, &wdoglist, link) {
                /* XXX: check for period_max */
                wd->period = period;
        }
-       spin_unlock_wr(&wdogmtx);
+       spin_unlock(&wdogmtx);
 }
 
 
index 8b95e6c..ab6bd61 100644 (file)
@@ -748,10 +748,10 @@ lwkt_spin_getport(lwkt_port_t port)
 {
     lwkt_msg_t msg;
 
-    spin_lock_wr(&port->mpu_spin);
+    spin_lock(&port->mpu_spin);
     if ((msg = _lwkt_pollmsg(port)) != NULL)
        _lwkt_pullmsg(port, msg);
-    spin_unlock_wr(&port->mpu_spin);
+    spin_unlock(&port->mpu_spin);
     return(msg);
 }
 
@@ -764,14 +764,14 @@ lwkt_spin_putport(lwkt_port_t port, lwkt_msg_t msg)
     KKASSERT((msg->ms_flags & (MSGF_DONE | MSGF_REPLY)) == 0);
 
     msg->ms_target_port = port;
-    spin_lock_wr(&port->mpu_spin);
+    spin_lock(&port->mpu_spin);
     _lwkt_pushmsg(port, msg);
     dowakeup = 0;
     if (port->mp_flags & MSGPORTF_WAITING) {
        port->mp_flags &= ~MSGPORTF_WAITING;
        dowakeup = 1;
     }
-    spin_unlock_wr(&port->mpu_spin);
+    spin_unlock(&port->mpu_spin);
     if (dowakeup)
        wakeup(port);
     return (EASYNC);
@@ -791,7 +791,7 @@ lwkt_spin_waitmsg(lwkt_msg_t msg, int flags)
     if ((msg->ms_flags & MSGF_DONE) == 0) {
        port = msg->ms_reply_port;
        sentabort = 0;
-       spin_lock_wr(&port->mpu_spin);
+       spin_lock(&port->mpu_spin);
        while ((msg->ms_flags & MSGF_DONE) == 0) {
            void *won;
 
@@ -815,9 +815,9 @@ lwkt_spin_waitmsg(lwkt_msg_t msg, int flags)
                error = ssleep(won, &port->mpu_spin, PCATCH, "waitmsg", 0);
                if (error) {
                    sentabort = error;
-                   spin_unlock_wr(&port->mpu_spin);
+                   spin_unlock(&port->mpu_spin);
                    lwkt_abortmsg(msg);
-                   spin_lock_wr(&port->mpu_spin);
+                   spin_lock(&port->mpu_spin);
                }
            } else {
                error = ssleep(won, &port->mpu_spin, 0, "waitmsg", 0);
@@ -831,13 +831,13 @@ lwkt_spin_waitmsg(lwkt_msg_t msg, int flags)
            msg->ms_error = sentabort;
        if (msg->ms_flags & MSGF_QUEUED)
            _lwkt_pullmsg(port, msg);
-       spin_unlock_wr(&port->mpu_spin);
+       spin_unlock(&port->mpu_spin);
     } else {
        if (msg->ms_flags & MSGF_QUEUED) {
            port = msg->ms_reply_port;
-           spin_lock_wr(&port->mpu_spin);
+           spin_lock(&port->mpu_spin);
            _lwkt_pullmsg(port, msg);
-           spin_unlock_wr(&port->mpu_spin);
+           spin_unlock(&port->mpu_spin);
        }
     }
     return(msg->ms_error);
@@ -850,18 +850,18 @@ lwkt_spin_waitport(lwkt_port_t port, int flags)
     lwkt_msg_t msg;
     int error;
 
-    spin_lock_wr(&port->mpu_spin);
+    spin_lock(&port->mpu_spin);
     while ((msg = _lwkt_pollmsg(port)) == NULL) {
        port->mp_flags |= MSGPORTF_WAITING;
        error = ssleep(port, &port->mpu_spin, flags, "waitport", 0);
        /* see note at the top on the MSGPORTF_WAITING flag */
        if (error) {
-           spin_unlock_wr(&port->mpu_spin);
+           spin_unlock(&port->mpu_spin);
            return(NULL);
        }
     }
     _lwkt_pullmsg(port, msg);
-    spin_unlock_wr(&port->mpu_spin);
+    spin_unlock(&port->mpu_spin);
     return(msg);
 }
 
@@ -885,14 +885,14 @@ lwkt_spin_replyport(lwkt_port_t port, lwkt_msg_t msg)
         * If an asynchronous completion has been requested the message
         * must be queued to the reply port.
         */
-       spin_lock_wr(&port->mpu_spin);
+       spin_lock(&port->mpu_spin);
        _lwkt_enqueue_reply(port, msg);
        dowakeup = 0;
        if (port->mp_flags & MSGPORTF_WAITING) {
            port->mp_flags &= ~MSGPORTF_WAITING;
            dowakeup = 1;
        }
-       spin_unlock_wr(&port->mpu_spin);
+       spin_unlock(&port->mpu_spin);
        if (dowakeup)
            wakeup(port);
     }
index 1b4f517..7c25489 100644 (file)
@@ -601,7 +601,7 @@ kvcprintf(char const *fmt, void (*func)(int, void*), void *arg,
                   (((struct putchar_arg *)arg)->flags & TOTTY) == 0);
        if (usespin) {
                crit_enter_hard();
-               spin_lock_wr(&cons_spin);
+               spin_lock(&cons_spin);
        }
 
        for (;;) {
@@ -889,7 +889,7 @@ done:
        if (func == kputchar)
                atomic_clear_long(&mycpu->gd_flags, GDF_KPRINTF);
        if (usespin) {
-               spin_unlock_wr(&cons_spin);
+               spin_unlock(&cons_spin);
                crit_exit_hard();
        }
        return (retval);
index 94dc634..eae647f 100644 (file)
@@ -83,13 +83,13 @@ TQ_LOCK_UNINIT(struct taskqueue *tq)
 static __inline void
 TQ_LOCK(struct taskqueue *tq)
 {
-       spin_lock_wr(&tq->tq_lock);
+       spin_lock(&tq->tq_lock);
 }
 
 static __inline void
 TQ_UNLOCK(struct taskqueue *tq)
 {
-       spin_unlock_wr(&tq->tq_lock);
+       spin_unlock(&tq->tq_lock);
 }
 
 static __inline void
index 80f2eee..104c0e5 100644 (file)
@@ -698,16 +698,16 @@ stopevent(struct proc *p, unsigned int event, unsigned int val)
         * Set event info.  Recheck p_stops in case we are
         * racing a close() on procfs.
         */
-       spin_lock_wr(&p->p_spin);
+       spin_lock(&p->p_spin);
        if ((p->p_stops & event) == 0) {
-               spin_unlock_wr(&p->p_spin);
+               spin_unlock(&p->p_spin);
                return;
        }
        p->p_xstat = val;
        p->p_stype = event;
        p->p_step = 1;
        tsleep_interlock(&p->p_step, 0);
-       spin_unlock_wr(&p->p_spin);
+       spin_unlock(&p->p_spin);
 
        /*
         * Wakeup any PIOCWAITing procs and wait for p_step to
@@ -716,13 +716,13 @@ stopevent(struct proc *p, unsigned int event, unsigned int val)
        for (;;) {
                wakeup(&p->p_stype);
                tsleep(&p->p_step, PINTERLOCKED, "stopevent", 0);
-               spin_lock_wr(&p->p_spin);
+               spin_lock(&p->p_spin);
                if (p->p_step == 0) {
-                       spin_unlock_wr(&p->p_spin);
+                       spin_unlock(&p->p_spin);
                        break;
                }
                tsleep_interlock(&p->p_step, 0);
-               spin_unlock_wr(&p->p_spin);
+               spin_unlock(&p->p_spin);
        }
 }
 
index 1ebd684..c0712dd 100644 (file)
@@ -417,9 +417,9 @@ uipc_sense(struct socket *so, struct stat *sb)
        sb->st_blksize = so->so_snd.ssb_hiwat;
        sb->st_dev = NOUDEV;
        if (unp->unp_ino == 0) {        /* make up a non-zero inode number */
-               spin_lock_wr(&unp_ino_spin);
+               spin_lock(&unp_ino_spin);
                unp->unp_ino = unp_ino++;
-               spin_unlock_wr(&unp_ino_spin);
+               spin_unlock(&unp_ino_spin);
        }
        sb->st_ino = unp->unp_ino;
        return (0);
@@ -1030,10 +1030,10 @@ unp_fp_externalize(struct lwp *lp, struct file *fp, int fd)
                        fsetfd(lp->lwp_proc->p_fd, fp, fd);
                }
        }
-       spin_lock_wr(&unp_spin);
+       spin_lock(&unp_spin);
        fp->f_msgcount--;
        unp_rights--;
-       spin_unlock_wr(&unp_spin);
+       spin_unlock(&unp_spin);
        fdrop(fp);
 }
 
@@ -1148,10 +1148,10 @@ unp_internalize(struct mbuf *control, struct thread *td)
                        fp = fdescp->fd_files[*fdp--].fp;
                        *rp-- = fp;
                        fhold(fp);
-                       spin_lock_wr(&unp_spin);
+                       spin_lock(&unp_spin);
                        fp->f_msgcount++;
                        unp_rights++;
-                       spin_unlock_wr(&unp_spin);
+                       spin_unlock(&unp_spin);
                }
        } else {
                fdp = (int *)CMSG_DATA(cm);
@@ -1160,10 +1160,10 @@ unp_internalize(struct mbuf *control, struct thread *td)
                        fp = fdescp->fd_files[*fdp++].fp;
                        *rp++ = fp;
                        fhold(fp);
-                       spin_lock_wr(&unp_spin);
+                       spin_lock(&unp_spin);
                        fp->f_msgcount++;
                        unp_rights++;
-                       spin_unlock_wr(&unp_spin);
+                       spin_unlock(&unp_spin);
                }
        }
        return (0);
@@ -1193,13 +1193,13 @@ unp_gc(void)
        struct file **fpp;
        int i;
 
-       spin_lock_wr(&unp_spin);
+       spin_lock(&unp_spin);
        if (unp_gcing) {
-               spin_unlock_wr(&unp_spin);
+               spin_unlock(&unp_spin);
                return;
        }
        unp_gcing = TRUE;
-       spin_unlock_wr(&unp_spin);
+       spin_unlock(&unp_spin);
 
        /* 
         * before going through all this, set all FDs to 
@@ -1563,10 +1563,10 @@ unp_mark(struct file *fp, void *data)
 static void
 unp_discard(struct file *fp, void *data __unused)
 {
-       spin_lock_wr(&unp_spin);
+       spin_lock(&unp_spin);
        fp->f_msgcount--;
        unp_rights--;
-       spin_unlock_wr(&unp_spin);
+       spin_unlock(&unp_spin);
        closef(fp, NULL);
 }
 
index 61847d5..0494e91 100644 (file)
@@ -366,22 +366,22 @@ bsd4_select_curproc(globaldata_t gd)
 
        crit_enter_gd(gd);
 
-       spin_lock_wr(&bsd4_spin);
+       spin_lock(&bsd4_spin);
        if ((nlp = chooseproc_locked(dd->uschedcp)) != NULL) {
                atomic_set_int(&bsd4_curprocmask, 1 << cpuid);
                dd->upri = nlp->lwp_priority;
                dd->uschedcp = nlp;
-               spin_unlock_wr(&bsd4_spin);
+               spin_unlock(&bsd4_spin);
 #ifdef SMP
                lwkt_acquire(nlp->lwp_thread);
 #endif
                lwkt_schedule(nlp->lwp_thread);
        } else if (bsd4_runqcount && (bsd4_rdyprocmask & (1 << cpuid))) {
                atomic_clear_int(&bsd4_rdyprocmask, 1 << cpuid);
-               spin_unlock_wr(&bsd4_spin);
+               spin_unlock(&bsd4_spin);
                lwkt_schedule(&dd->helper_thread);
        } else {
-               spin_unlock_wr(&bsd4_spin);
+               spin_unlock(&bsd4_spin);
        }
        crit_exit_gd(gd);
 }
@@ -469,7 +469,7 @@ bsd4_setrunqueue(struct lwp *lp)
         * up and it could exit, or its priority could be further adjusted,
         * or something like that.
         */
-       spin_lock_wr(&bsd4_spin);
+       spin_lock(&bsd4_spin);
        bsd4_setrunqueue_locked(lp);
 
 #ifdef SMP
@@ -481,7 +481,7 @@ bsd4_setrunqueue(struct lwp *lp)
        ++bsd4_scancpu;
        mask = ~bsd4_curprocmask & bsd4_rdyprocmask &
                lp->lwp_cpumask & smp_active_mask;
-       spin_unlock_wr(&bsd4_spin);
+       spin_unlock(&bsd4_spin);
 
        while (mask) {
                tmpmask = ~((1 << cpuid) - 1);
@@ -505,7 +505,7 @@ bsd4_setrunqueue(struct lwp *lp)
        /*
         * Request a reschedule if appropriate.
         */
-       spin_unlock_wr(&bsd4_spin);
+       spin_unlock(&bsd4_spin);
        if ((dd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) {
                need_user_resched();
        }
@@ -687,7 +687,7 @@ bsd4_resetpriority(struct lwp *lp)
         * Calculate the new priority and queue type
         */
        crit_enter();
-       spin_lock_wr(&bsd4_spin);
+       spin_lock(&bsd4_spin);
 
        newrqtype = lp->lwp_rtprio.type;
 
@@ -737,7 +737,7 @@ bsd4_resetpriority(struct lwp *lp)
                lp->lwp_priority = newpriority;
                reschedcpu = -1;
        }
-       spin_unlock_wr(&bsd4_spin);
+       spin_unlock(&bsd4_spin);
 
        /*
         * Determine if we need to reschedule the target cpu.  This only
@@ -1111,7 +1111,7 @@ sched_thread(void *dummy)
         */
        crit_enter_gd(gd);
        lwkt_deschedule_self(gd->gd_curthread);
-       spin_lock_wr(&bsd4_spin);
+       spin_lock(&bsd4_spin);
        atomic_set_int(&bsd4_rdyprocmask, cpumask);
 
        clear_user_resched();   /* This satisfied the reschedule request */
@@ -1126,11 +1126,11 @@ sched_thread(void *dummy)
                        atomic_set_int(&bsd4_curprocmask, cpumask);
                        dd->upri = nlp->lwp_priority;
                        dd->uschedcp = nlp;
-                       spin_unlock_wr(&bsd4_spin);
+                       spin_unlock(&bsd4_spin);
                        lwkt_acquire(nlp->lwp_thread);
                        lwkt_schedule(nlp->lwp_thread);
                } else {
-                       spin_unlock_wr(&bsd4_spin);
+                       spin_unlock(&bsd4_spin);
                }
 #if 0
        /*
@@ -1164,7 +1164,7 @@ sched_thread(void *dummy)
                /*
                 * The runq is empty.
                 */
-               spin_unlock_wr(&bsd4_spin);
+               spin_unlock(&bsd4_spin);
        }
        crit_exit_gd(gd);
        lwkt_switch();
index e66ae7c..a718130 100644 (file)
@@ -238,18 +238,18 @@ dummy_select_curproc(globaldata_t gd)
        struct lwp *lp;
 
        clear_user_resched();
-       spin_lock_wr(&dummy_spin);
+       spin_lock(&dummy_spin);
        if ((lp = TAILQ_FIRST(&dummy_runq)) == NULL) {
                dd->uschedcp = NULL;
                atomic_clear_int(&dummy_curprocmask, gd->gd_cpumask);
-               spin_unlock_wr(&dummy_spin);
+               spin_unlock(&dummy_spin);
        } else {
                --dummy_runqcount;
                TAILQ_REMOVE(&dummy_runq, lp, lwp_procq);
                lp->lwp_flag &= ~LWP_ONRUNQ;
                dd->uschedcp = lp;
                atomic_set_int(&dummy_curprocmask, gd->gd_cpumask);
-               spin_unlock_wr(&dummy_spin);
+               spin_unlock(&dummy_spin);
 #ifdef SMP
                lwkt_acquire(lp->lwp_thread);
 #endif
@@ -287,7 +287,7 @@ dummy_setrunqueue(struct lwp *lp)
                 * Add to our global runq
                 */
                KKASSERT((lp->lwp_flag & LWP_ONRUNQ) == 0);
-               spin_lock_wr(&dummy_spin);
+               spin_lock(&dummy_spin);
                ++dummy_runqcount;
                TAILQ_INSERT_TAIL(&dummy_runq, lp, lwp_procq);
                lp->lwp_flag |= LWP_ONRUNQ;
@@ -311,10 +311,10 @@ dummy_setrunqueue(struct lwp *lp)
                if (mask) {
                        cpuid = bsfl(mask);
                        atomic_clear_int(&dummy_rdyprocmask, 1 << cpuid);
-                       spin_unlock_wr(&dummy_spin);
+                       spin_unlock(&dummy_spin);
                        lwkt_schedule(&dummy_pcpu[cpuid].helper_thread);
                } else {
-                       spin_unlock_wr(&dummy_spin);
+                       spin_unlock(&dummy_spin);
                }
        }
 }
@@ -479,7 +479,7 @@ dummy_sched_thread(void *dummy)
     for (;;) {
        lwkt_deschedule_self(gd->gd_curthread);         /* interlock */
        atomic_set_int(&dummy_rdyprocmask, cpumask);
-       spin_lock_wr(&dummy_spin);
+       spin_lock(&dummy_spin);
        if (dd->uschedcp) {
                /*
                 * We raced another cpu trying to schedule a thread onto us.
@@ -491,10 +491,10 @@ dummy_sched_thread(void *dummy)
                        tmpid = bsfl(tmpmask);
                        KKASSERT(tmpid != cpuid);
                        atomic_clear_int(&dummy_rdyprocmask, 1 << tmpid);
-                       spin_unlock_wr(&dummy_spin);
+                       spin_unlock(&dummy_spin);
                        lwkt_schedule(&dummy_pcpu[tmpid].helper_thread);
                } else {
-                       spin_unlock_wr(&dummy_spin);
+                       spin_unlock(&dummy_spin);
                }
        } else if ((lp = TAILQ_FIRST(&dummy_runq)) != NULL) {
                --dummy_runqcount;
@@ -502,13 +502,13 @@ dummy_sched_thread(void *dummy)
                lp->lwp_flag &= ~LWP_ONRUNQ;
                dd->uschedcp = lp;
                atomic_set_int(&dummy_curprocmask, cpumask);
-               spin_unlock_wr(&dummy_spin);
+               spin_unlock(&dummy_spin);
 #ifdef SMP
                lwkt_acquire(lp->lwp_thread);
 #endif
                lwkt_schedule(lp->lwp_thread);
        } else {
-               spin_unlock_wr(&dummy_spin);
+               spin_unlock(&dummy_spin);
        }
        lwkt_switch();
     }
index fa6bc2d..4e13a56 100644 (file)
@@ -232,13 +232,13 @@ bufspacewakeup(void)
         * though we haven't freed the kva space yet, the waiting
         * process will be able to now.
         */
-       spin_lock_wr(&bufcspin);
+       spin_lock(&bufcspin);
        if (needsbuffer & VFS_BIO_NEED_BUFSPACE) {
                needsbuffer &= ~VFS_BIO_NEED_BUFSPACE;
-               spin_unlock_wr(&bufcspin);
+               spin_unlock(&bufcspin);
                wakeup(&needsbuffer);
        } else {
-               spin_unlock_wr(&bufcspin);
+               spin_unlock(&bufcspin);
        }
 }
 
@@ -255,7 +255,7 @@ runningbufwakeup(struct buf *bp)
        int limit;
 
        if ((totalspace = bp->b_runningbufspace) != 0) {
-               spin_lock_wr(&bufcspin);
+               spin_lock(&bufcspin);
                runningbufspace -= totalspace;
                --runningbufcount;
                bp->b_runningbufspace = 0;
@@ -266,10 +266,10 @@ runningbufwakeup(struct buf *bp)
                limit = hirunningspace * 4 / 6;
                if (runningbufreq && runningbufspace <= limit) {
                        runningbufreq = 0;
-                       spin_unlock_wr(&bufcspin);
+                       spin_unlock(&bufcspin);
                        wakeup(&runningbufreq);
                } else {
-                       spin_unlock_wr(&bufcspin);
+                       spin_unlock(&bufcspin);
                }
                bd_signal(totalspace);
        }
@@ -288,13 +288,13 @@ runningbufwakeup(struct buf *bp)
 static __inline void
 bufcountwakeup(void) 
 {
-       spin_lock_wr(&bufcspin);
+       spin_lock(&bufcspin);
        if (needsbuffer) {
                needsbuffer &= ~VFS_BIO_NEED_ANY;
-               spin_unlock_wr(&bufcspin);
+               spin_unlock(&bufcspin);
                wakeup(&needsbuffer);
        } else {
-               spin_unlock_wr(&bufcspin);
+               spin_unlock(&bufcspin);
        }
 }
 
@@ -320,19 +320,19 @@ waitrunningbufspace(void)
        int limit = hirunningspace * 4 / 6;
        int dummy;
 
-       spin_lock_wr(&bufcspin);
+       spin_lock(&bufcspin);
        if (runningbufspace > limit) {
                while (runningbufspace > limit) {
                        ++runningbufreq;
                        ssleep(&runningbufreq, &bufcspin, 0, "wdrn1", 0);
                }
-               spin_unlock_wr(&bufcspin);
+               spin_unlock(&bufcspin);
        } else if (runningbufspace > limit / 2) {
                ++runningbufreq;
-               spin_unlock_wr(&bufcspin);
+               spin_unlock(&bufcspin);
                tsleep(&dummy, 0, "wdrn2", 1);
        } else {
-               spin_unlock_wr(&bufcspin);
+               spin_unlock(&bufcspin);
        }
 }
 
@@ -400,17 +400,17 @@ bd_speedup(void)
        if (bd_request == 0 &&
            (dirtybufspace - dirtybufspacehw > lodirtybufspace / 2 ||
             dirtybufcount - dirtybufcounthw >= nbuf / 2)) {
-               spin_lock_wr(&bufcspin);
+               spin_lock(&bufcspin);
                bd_request = 1;
-               spin_unlock_wr(&bufcspin);
+               spin_unlock(&bufcspin);
                wakeup(&bd_request);
        }
        if (bd_request_hw == 0 &&
            (dirtybufspacehw > lodirtybufspace / 2 ||
             dirtybufcounthw >= nbuf / 2)) {
-               spin_lock_wr(&bufcspin);
+               spin_lock(&bufcspin);
                bd_request_hw = 1;
-               spin_unlock_wr(&bufcspin);
+               spin_unlock(&bufcspin);
                wakeup(&bd_request_hw);
        }
 }
@@ -473,7 +473,7 @@ bd_wait(int totalspace)
                if (count >= BD_WAKE_SIZE)
                        count = BD_WAKE_SIZE - 1;
 
-               spin_lock_wr(&bufcspin);
+               spin_lock(&bufcspin);
                i = (bd_wake_index + count) & BD_WAKE_MASK;
                ++bd_wake_ary[i];
 
@@ -482,7 +482,7 @@ bd_wait(int totalspace)
                 * with locking access to dirtybufspace*
                 */
                tsleep_interlock(&bd_wake_ary[i], 0);
-               spin_unlock_wr(&bufcspin);
+               spin_unlock(&bufcspin);
                tsleep(&bd_wake_ary[i], PINTERLOCKED, "flstik", hz);
 
                totalspace = runningbufspace + dirtybufspace - hidirtybufspace;
@@ -506,19 +506,19 @@ bd_signal(int totalspace)
        if (totalspace > 0) {
                if (totalspace > BKVASIZE * BD_WAKE_SIZE)
                        totalspace = BKVASIZE * BD_WAKE_SIZE;
-               spin_lock_wr(&bufcspin);
+               spin_lock(&bufcspin);
                while (totalspace > 0) {
                        i = bd_wake_index++;
                        i &= BD_WAKE_MASK;
                        if (bd_wake_ary[i]) {
                                bd_wake_ary[i] = 0;
-                               spin_unlock_wr(&bufcspin);
+                               spin_unlock(&bufcspin);
                                wakeup(&bd_wake_ary[i]);
-                               spin_lock_wr(&bufcspin);
+                               spin_lock(&bufcspin);
                        }
                        totalspace -= BKVASIZE;
                }
-               spin_unlock_wr(&bufcspin);
+               spin_unlock(&bufcspin);
        }
 }
 
@@ -839,9 +839,9 @@ _bremfree(struct buf *bp)
 void
 bremfree(struct buf *bp)
 {
-       spin_lock_wr(&bufqspin);
+       spin_lock(&bufqspin);
        _bremfree(bp);
-       spin_unlock_wr(&bufqspin);
+       spin_unlock(&bufqspin);
 }
 
 static void
@@ -1168,14 +1168,14 @@ bdirty(struct buf *bp)
                reassignbuf(bp);
                lwkt_reltoken(&bp->b_vp->v_token);
 
-               spin_lock_wr(&bufcspin);
+               spin_lock(&bufcspin);
                ++dirtybufcount;
                dirtybufspace += bp->b_bufsize;
                if (bp->b_flags & B_HEAVY) {
                        ++dirtybufcounthw;
                        dirtybufspacehw += bp->b_bufsize;
                }
-               spin_unlock_wr(&bufcspin);
+               spin_unlock(&bufcspin);
 
                bd_heatup();
        }
@@ -1192,10 +1192,10 @@ bheavy(struct buf *bp)
        if ((bp->b_flags & B_HEAVY) == 0) {
                bp->b_flags |= B_HEAVY;
                if (bp->b_flags & B_DELWRI) {
-                       spin_lock_wr(&bufcspin);
+                       spin_lock(&bufcspin);
                        ++dirtybufcounthw;
                        dirtybufspacehw += bp->b_bufsize;
-                       spin_unlock_wr(&bufcspin);
+                       spin_unlock(&bufcspin);
                }
        }
 }
@@ -1222,14 +1222,14 @@ bundirty(struct buf *bp)
                reassignbuf(bp);
                lwkt_reltoken(&bp->b_vp->v_token);
 
-               spin_lock_wr(&bufcspin);
+               spin_lock(&bufcspin);
                --dirtybufcount;
                dirtybufspace -= bp->b_bufsize;
                if (bp->b_flags & B_HEAVY) {
                        --dirtybufcounthw;
                        dirtybufspacehw -= bp->b_bufsize;
                }
-               spin_unlock_wr(&bufcspin);
+               spin_unlock(&bufcspin);
 
                bd_signal(bp->b_bufsize);
        }
@@ -1248,10 +1248,10 @@ bsetrunningbufspace(struct buf *bp, int bytes)
 {
        bp->b_runningbufspace = bytes;
        if (bytes) {
-               spin_lock_wr(&bufcspin);
+               spin_lock(&bufcspin);
                runningbufspace += bytes;
                ++runningbufcount;
-               spin_unlock_wr(&bufcspin);
+               spin_unlock(&bufcspin);
        }
 }
 
@@ -1307,14 +1307,14 @@ brelse(struct buf *bp)
                if (LIST_FIRST(&bp->b_dep) != NULL)
                        buf_deallocate(bp);
                if (bp->b_flags & B_DELWRI) {
-                       spin_lock_wr(&bufcspin);
+                       spin_lock(&bufcspin);
                        --dirtybufcount;
                        dirtybufspace -= bp->b_bufsize;
                        if (bp->b_flags & B_HEAVY) {
                                --dirtybufcounthw;
                                dirtybufspacehw -= bp->b_bufsize;
                        }
-                       spin_unlock_wr(&bufcspin);
+                       spin_unlock(&bufcspin);
 
                        bd_signal(bp->b_bufsize);
                }
@@ -1524,7 +1524,7 @@ brelse(struct buf *bp)
         * Buffers placed in the EMPTY or EMPTYKVA had better already be
         * disassociated from their vnode.
         */
-       spin_lock_wr(&bufqspin);
+       spin_lock(&bufqspin);
        if (bp->b_flags & B_LOCKED) {
                /*
                 * Buffers that are locked are placed in the locked queue
@@ -1583,7 +1583,7 @@ brelse(struct buf *bp)
                    break;
                }
        }
-       spin_unlock_wr(&bufqspin);
+       spin_unlock(&bufqspin);
 
        /*
         * If B_INVAL, clear B_DELWRI.  We've already placed the buffer
@@ -1645,7 +1645,7 @@ bqrelse(struct buf *bp)
 
        buf_act_advance(bp);
 
-       spin_lock_wr(&bufqspin);
+       spin_lock(&bufqspin);
        if (bp->b_flags & B_LOCKED) {
                /*
                 * Locked buffers are released to the locked queue.  However,
@@ -1665,14 +1665,14 @@ bqrelse(struct buf *bp)
                 * buffer (most importantly: the wired pages making up its
                 * backing store) *now*.
                 */
-               spin_unlock_wr(&bufqspin);
+               spin_unlock(&bufqspin);
                brelse(bp);
                return;
        } else {
                bp->b_qindex = BQUEUE_CLEAN;
                TAILQ_INSERT_TAIL(&bufqueues[BQUEUE_CLEAN], bp, b_freelist);
        }
-       spin_unlock_wr(&bufqspin);
+       spin_unlock(&bufqspin);
 
        if ((bp->b_flags & B_LOCKED) == 0 &&
            ((bp->b_flags & B_INVAL) || (bp->b_flags & B_DELWRI) == 0)) {
@@ -1936,7 +1936,7 @@ restart:
         * where we cannot backup.
         */
        nqindex = BQUEUE_EMPTYKVA;
-       spin_lock_wr(&bufqspin);
+       spin_lock(&bufqspin);
        nbp = TAILQ_FIRST(&bufqueues[BQUEUE_EMPTYKVA]);
 
        if (nbp == NULL) {
@@ -2051,12 +2051,12 @@ restart:
                 */
 
                if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT) != 0) {
-                       spin_unlock_wr(&bufqspin);
+                       spin_unlock(&bufqspin);
                        tsleep(&bd_request, 0, "gnbxxx", hz / 100);
                        goto restart;
                }
                if (bp->b_qindex != qindex) {
-                       spin_unlock_wr(&bufqspin);
+                       spin_unlock(&bufqspin);
                        kprintf("getnewbuf: warning, BUF_LOCK blocked "
                                "unexpectedly on buf %p index %d->%d, "
                                "race corrected\n",
@@ -2065,7 +2065,7 @@ restart:
                        goto restart;
                }
                bremfree_locked(bp);
-               spin_unlock_wr(&bufqspin);
+               spin_unlock(&bufqspin);
 
                /*
                 * Dependancies must be handled before we disassociate the
@@ -2184,7 +2184,7 @@ restart:
                int flags;
                char *waitmsg;
 
-               spin_unlock_wr(&bufqspin);
+               spin_unlock(&bufqspin);
                if (defrag) {
                        flags = VFS_BIO_NEED_BUFSPACE;
                        waitmsg = "nbufkv";
@@ -2197,16 +2197,16 @@ restart:
                }
 
                bd_speedup();   /* heeeelp */
-               spin_lock_wr(&bufcspin);
+               spin_lock(&bufcspin);
                needsbuffer |= flags;
                while (needsbuffer & flags) {
                        if (ssleep(&needsbuffer, &bufcspin,
                                   slpflags, waitmsg, slptimeo)) {
-                               spin_unlock_wr(&bufcspin);
+                               spin_unlock(&bufcspin);
                                return (NULL);
                        }
                }
-               spin_unlock_wr(&bufcspin);
+               spin_unlock(&bufcspin);
        } else {
                /*
                 * We finally have a valid bp.  We aren't quite out of the
@@ -2278,7 +2278,7 @@ recoverbufpages(void)
 
        ++recoverbufcalls;
 
-       spin_lock_wr(&bufqspin);
+       spin_lock(&bufqspin);
        while (bytes < MAXBSIZE) {
                bp = TAILQ_FIRST(&bufqueues[BQUEUE_CLEAN]);
                if (bp == NULL)
@@ -2325,7 +2325,7 @@ recoverbufpages(void)
                        continue;
                }
                bremfree_locked(bp);
-               spin_unlock_wr(&bufqspin);
+               spin_unlock(&bufqspin);
 
                /*
                 * Dependancies must be handled before we disassociate the
@@ -2339,7 +2339,7 @@ recoverbufpages(void)
                        buf_deallocate(bp);
                        if (bp->b_flags & B_LOCKED) {
                                bqrelse(bp);
-                               spin_lock_wr(&bufqspin);
+                               spin_lock(&bufqspin);
                                continue;
                        }
                        KKASSERT(LIST_FIRST(&bp->b_dep) == NULL);
@@ -2379,9 +2379,9 @@ recoverbufpages(void)
                bp->b_flags |= B_INVAL;
                /* bfreekva(bp); */
                brelse(bp);
-               spin_lock_wr(&bufqspin);
+               spin_lock(&bufqspin);
        }
-       spin_unlock_wr(&bufqspin);
+       spin_unlock(&bufqspin);
        return(bytes);
 }
 
@@ -2464,11 +2464,11 @@ buf_daemon(void)
                 * request and sleep until we are needed again.
                 * The sleep is just so the suspend code works.
                 */
-               spin_lock_wr(&bufcspin);
+               spin_lock(&bufcspin);
                if (bd_request == 0)
                        ssleep(&bd_request, &bufcspin, 0, "psleep", hz);
                bd_request = 0;
-               spin_unlock_wr(&bufcspin);
+               spin_unlock(&bufcspin);
        }
 }
 
@@ -2523,11 +2523,11 @@ buf_daemon_hw(void)
                 * request and sleep until we are needed again.
                 * The sleep is just so the suspend code works.
                 */
-               spin_lock_wr(&bufcspin);
+               spin_lock(&bufcspin);
                if (bd_request_hw == 0)
                        ssleep(&bd_request_hw, &bufcspin, 0, "psleep", hz);
                bd_request_hw = 0;
-               spin_unlock_wr(&bufcspin);
+               spin_unlock(&bufcspin);
        }
 }
 
@@ -2552,7 +2552,7 @@ flushbufqueues(bufq_type_t q)
        int r = 0;
        int spun;
 
-       spin_lock_wr(&bufqspin);
+       spin_lock(&bufqspin);
        spun = 1;
 
        bp = TAILQ_FIRST(&bufqueues[q]);
@@ -2580,7 +2580,7 @@ flushbufqueues(bufq_type_t q)
 
                if (bp->b_flags & B_INVAL) {
                        _bremfree(bp);
-                       spin_unlock_wr(&bufqspin);
+                       spin_unlock(&bufqspin);
                        spun = 0;
                        brelse(bp);
                        ++r;
@@ -2607,7 +2607,7 @@ flushbufqueues(bufq_type_t q)
                 *
                 * NOTE: buf_checkwrite is MPSAFE.
                 */
-               spin_unlock_wr(&bufqspin);
+               spin_unlock(&bufqspin);
                spun = 0;
 
                if (LIST_FIRST(&bp->b_dep) != NULL && buf_checkwrite(bp)) {
@@ -2625,7 +2625,7 @@ flushbufqueues(bufq_type_t q)
                break;
        }
        if (spun)
-               spin_unlock_wr(&bufqspin);
+               spin_unlock(&bufqspin);
        return (r);
 }
 
@@ -3422,11 +3422,11 @@ allocbuf(struct buf *bp, int size)
 
        /* adjust space use on already-dirty buffer */
        if (bp->b_flags & B_DELWRI) {
-               spin_lock_wr(&bufcspin);
+               spin_lock(&bufcspin);
                dirtybufspace += newbsize - bp->b_bufsize;
                if (bp->b_flags & B_HEAVY)
                        dirtybufspacehw += newbsize - bp->b_bufsize;
-               spin_unlock_wr(&bufcspin);
+               spin_unlock(&bufcspin);
        }
        if (newbsize < bp->b_bufsize)
                bufspacewakeup();
@@ -4811,12 +4811,12 @@ vfs_bufstats(void)
                 for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++)
                         counts[j] = 0;
 
-               spin_lock_wr(&bufqspin);
+               spin_lock(&bufqspin);
                 TAILQ_FOREACH(bp, dp, b_freelist) {
                         counts[bp->b_bufsize/PAGE_SIZE]++;
                         count++;
                 }
-               spin_unlock_wr(&bufqspin);
+               spin_unlock(&bufqspin);
 
                 kprintf("%s: total-%d", bname[i], count);
                 for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++)
index 6166e70..f5fa1a8 100644 (file)
@@ -569,13 +569,13 @@ _cache_unlink_parent(struct namecache *ncp)
                KKASSERT(ncp->nc_parent == par);
                _cache_hold(par);
                _cache_lock(par);
-               spin_lock_wr(&ncp->nc_head->spin);
+               spin_lock(&ncp->nc_head->spin);
                LIST_REMOVE(ncp, nc_hash);
                TAILQ_REMOVE(&par->nc_list, ncp, nc_entry);
                dropvp = NULL;
                if (par->nc_vp && TAILQ_EMPTY(&par->nc_list))
                        dropvp = par->nc_vp;
-               spin_unlock_wr(&ncp->nc_head->spin);
+               spin_unlock(&ncp->nc_head->spin);
                ncp->nc_parent = NULL;
                ncp->nc_head = NULL;
                _cache_unlock(par);
@@ -876,10 +876,10 @@ _cache_setvp(struct mount *mp, struct namecache *ncp, struct vnode *vp)
                 */
                if (!TAILQ_EMPTY(&ncp->nc_list))
                        vhold(vp);
-               spin_lock_wr(&vp->v_spinlock);
+               spin_lock(&vp->v_spinlock);
                ncp->nc_vp = vp;
                TAILQ_INSERT_HEAD(&vp->v_namecache, ncp, nc_vnode);
-               spin_unlock_wr(&vp->v_spinlock);
+               spin_unlock(&vp->v_spinlock);
                if (ncp->nc_exlocks)
                        vhold(vp);
 
@@ -908,10 +908,10 @@ _cache_setvp(struct mount *mp, struct namecache *ncp, struct vnode *vp)
                 * other remote FSs.
                 */
                ncp->nc_vp = NULL;
-               spin_lock_wr(&ncspin);
+               spin_lock(&ncspin);
                TAILQ_INSERT_TAIL(&ncneglist, ncp, nc_vnode);
                ++numneg;
-               spin_unlock_wr(&ncspin);
+               spin_unlock(&ncspin);
                ncp->nc_error = ENOENT;
                if (mp)
                        ncp->nc_namecache_gen = mp->mnt_namecache_gen;
@@ -968,10 +968,10 @@ _cache_setunresolved(struct namecache *ncp)
                ncp->nc_error = ENOTCONN;
                if ((vp = ncp->nc_vp) != NULL) {
                        atomic_add_int(&numcache, -1);
-                       spin_lock_wr(&vp->v_spinlock);
+                       spin_lock(&vp->v_spinlock);
                        ncp->nc_vp = NULL;
                        TAILQ_REMOVE(&vp->v_namecache, ncp, nc_vnode);
-                       spin_unlock_wr(&vp->v_spinlock);
+                       spin_unlock(&vp->v_spinlock);
 
                        /*
                         * Any vp associated with an ncp with children is
@@ -984,10 +984,10 @@ _cache_setunresolved(struct namecache *ncp)
                        if (ncp->nc_exlocks)
                                vdrop(vp);
                } else {
-                       spin_lock_wr(&ncspin);
+                       spin_lock(&ncspin);
                        TAILQ_REMOVE(&ncneglist, ncp, nc_vnode);
                        --numneg;
-                       spin_unlock_wr(&ncspin);
+                       spin_unlock(&ncspin);
                }
                ncp->nc_flag &= ~(NCF_WHITEOUT|NCF_ISDIR|NCF_ISSYMLINK);
        }
@@ -1257,7 +1257,7 @@ cache_inval_vp(struct vnode *vp, int flags)
        struct namecache *next;
 
 restart:
-       spin_lock_wr(&vp->v_spinlock);
+       spin_lock(&vp->v_spinlock);
        ncp = TAILQ_FIRST(&vp->v_namecache);
        if (ncp)
                _cache_hold(ncp);
@@ -1265,7 +1265,7 @@ restart:
                /* loop entered with ncp held and vp spin-locked */
                if ((next = TAILQ_NEXT(ncp, nc_vnode)) != NULL)
                        _cache_hold(next);
-               spin_unlock_wr(&vp->v_spinlock);
+               spin_unlock(&vp->v_spinlock);
                _cache_lock(ncp);
                if (ncp->nc_vp != vp) {
                        kprintf("Warning: cache_inval_vp: race-A detected on "
@@ -1278,16 +1278,16 @@ restart:
                _cache_inval(ncp, flags);
                _cache_put(ncp);                /* also releases reference */
                ncp = next;
-               spin_lock_wr(&vp->v_spinlock);
+               spin_lock(&vp->v_spinlock);
                if (ncp && ncp->nc_vp != vp) {
-                       spin_unlock_wr(&vp->v_spinlock);
+                       spin_unlock(&vp->v_spinlock);
                        kprintf("Warning: cache_inval_vp: race-B detected on "
                                "%s\n", ncp->nc_name);
                        _cache_drop(ncp);
                        goto restart;
                }
        }
-       spin_unlock_wr(&vp->v_spinlock);
+       spin_unlock(&vp->v_spinlock);
        return(TAILQ_FIRST(&vp->v_namecache) != NULL);
 }
 
@@ -1306,7 +1306,7 @@ cache_inval_vp_nonblock(struct vnode *vp)
        struct namecache *ncp;
        struct namecache *next;
 
-       spin_lock_wr(&vp->v_spinlock);
+       spin_lock(&vp->v_spinlock);
        ncp = TAILQ_FIRST(&vp->v_namecache);
        if (ncp)
                _cache_hold(ncp);
@@ -1314,7 +1314,7 @@ cache_inval_vp_nonblock(struct vnode *vp)
                /* loop entered with ncp held */
                if ((next = TAILQ_NEXT(ncp, nc_vnode)) != NULL)
                        _cache_hold(next);
-               spin_unlock_wr(&vp->v_spinlock);
+               spin_unlock(&vp->v_spinlock);
                if (_cache_lock_nonblock(ncp)) {
                        _cache_drop(ncp);
                        if (next)
@@ -1332,16 +1332,16 @@ cache_inval_vp_nonblock(struct vnode *vp)
                _cache_inval(ncp, 0);
                _cache_put(ncp);                /* also releases reference */
                ncp = next;
-               spin_lock_wr(&vp->v_spinlock);
+               spin_lock(&vp->v_spinlock);
                if (ncp && ncp->nc_vp != vp) {
-                       spin_unlock_wr(&vp->v_spinlock);
+                       spin_unlock(&vp->v_spinlock);
                        kprintf("Warning: cache_inval_vp: race-B detected on "
                                "%s\n", ncp->nc_name);
                        _cache_drop(ncp);
                        goto done;
                }
        }
-       spin_unlock_wr(&vp->v_spinlock);
+       spin_unlock(&vp->v_spinlock);
 done:
        return(TAILQ_FIRST(&vp->v_namecache) != NULL);
 }
@@ -1385,9 +1385,9 @@ cache_rename(struct nchandle *fnch, struct nchandle *tnch)
        hash = fnv_32_buf(&tncp_par, sizeof(tncp_par), hash);
        nchpp = NCHHASH(hash);
 
-       spin_lock_wr(&nchpp->spin);
+       spin_lock(&nchpp->spin);
        _cache_link_parent(fncp, tncp_par, nchpp);
-       spin_unlock_wr(&nchpp->spin);
+       spin_unlock(&nchpp->spin);
 
        _cache_put(tncp_par);
 
@@ -1607,11 +1607,11 @@ cache_fromdvp(struct vnode *dvp, struct ucred *cred, int makeit,
         * Handle the makeit == 0 degenerate case
         */
        if (makeit == 0) {
-               spin_lock_wr(&dvp->v_spinlock);
+               spin_lock(&dvp->v_spinlock);
                nch->ncp = TAILQ_FIRST(&dvp->v_namecache);
                if (nch->ncp)
                        cache_hold(nch);
-               spin_unlock_wr(&dvp->v_spinlock);
+               spin_unlock(&dvp->v_spinlock);
        }
 
        /*
@@ -1621,14 +1621,14 @@ cache_fromdvp(struct vnode *dvp, struct ucred *cred, int makeit,
                /*
                 * Break out if we successfully acquire a working ncp.
                 */
-               spin_lock_wr(&dvp->v_spinlock);
+               spin_lock(&dvp->v_spinlock);
                nch->ncp = TAILQ_FIRST(&dvp->v_namecache);
                if (nch->ncp) {
                        cache_hold(nch);
-                       spin_unlock_wr(&dvp->v_spinlock);
+                       spin_unlock(&dvp->v_spinlock);
                        break;
                }
-               spin_unlock_wr(&dvp->v_spinlock);
+               spin_unlock(&dvp->v_spinlock);
 
                /*
                 * If dvp is the root of its filesystem it should already
@@ -1768,14 +1768,14 @@ cache_fromdvp_try(struct vnode *dvp, struct ucred *cred,
                        break;
                }
                vn_unlock(pvp);
-               spin_lock_wr(&pvp->v_spinlock);
+               spin_lock(&pvp->v_spinlock);
                if ((nch.ncp = TAILQ_FIRST(&pvp->v_namecache)) != NULL) {
                        _cache_hold(nch.ncp);
-                       spin_unlock_wr(&pvp->v_spinlock);
+                       spin_unlock(&pvp->v_spinlock);
                        vrele(pvp);
                        break;
                }
-               spin_unlock_wr(&pvp->v_spinlock);
+               spin_unlock(&pvp->v_spinlock);
                if (pvp->v_flag & VROOT) {
                        nch.ncp = _cache_get(pvp->v_mount->mnt_ncmountpt.ncp);
                        error = cache_resolve_mp(nch.mount);
@@ -2047,7 +2047,7 @@ cache_zap(struct namecache *ncp, int nonblock)
                        _cache_hold(par);
                        _cache_lock(par);
                }
-               spin_lock_wr(&ncp->nc_head->spin);
+               spin_lock(&ncp->nc_head->spin);
        }
 
        /*
@@ -2062,7 +2062,7 @@ cache_zap(struct namecache *ncp, int nonblock)
                        break;
                if (atomic_cmpset_int(&ncp->nc_refs, refs, refs - 1)) {
                        if (par) {
-                               spin_unlock_wr(&ncp->nc_head->spin);
+                               spin_unlock(&ncp->nc_head->spin);
                                _cache_put(par);
                        }
                        _cache_unlock(ncp);
@@ -2090,7 +2090,7 @@ cache_zap(struct namecache *ncp, int nonblock)
                        dropvp = par->nc_vp;
                ncp->nc_head = NULL;
                ncp->nc_parent = NULL;
-               spin_unlock_wr(&nchpp->spin);
+               spin_unlock(&nchpp->spin);
                _cache_unlock(par);
        } else {
                KKASSERT(ncp->nc_head == NULL);
@@ -2226,7 +2226,7 @@ cache_nlookup(struct nchandle *par_nch, struct nlcomponent *nlc)
        new_ncp = NULL;
        nchpp = NCHHASH(hash);
 restart:
-       spin_lock_wr(&nchpp->spin);
+       spin_lock(&nchpp->spin);
        LIST_FOREACH(ncp, &nchpp->list, nc_hash) {
                numchecks++;
 
@@ -2241,7 +2241,7 @@ restart:
                    (ncp->nc_flag & NCF_DESTROYED) == 0
                ) {
                        _cache_hold(ncp);
-                       spin_unlock_wr(&nchpp->spin);
+                       spin_unlock(&nchpp->spin);
                        if (par_locked) {
                                _cache_unlock(par_nch->ncp);
                                par_locked = 0;
@@ -2271,7 +2271,7 @@ restart:
         *       mount case, in which case nc_name will be NULL.
         */
        if (new_ncp == NULL) {
-               spin_unlock_wr(&nchpp->spin);
+               spin_unlock(&nchpp->spin);
                new_ncp = cache_alloc(nlc->nlc_namelen);
                if (nlc->nlc_namelen) {
                        bcopy(nlc->nlc_nameptr, new_ncp->nc_name,
@@ -2281,7 +2281,7 @@ restart:
                goto restart;
        }
        if (par_locked == 0) {
-               spin_unlock_wr(&nchpp->spin);
+               spin_unlock(&nchpp->spin);
                _cache_lock(par_nch->ncp);
                par_locked = 1;
                goto restart;
@@ -2293,7 +2293,7 @@ restart:
         */
        ncp = new_ncp;
        _cache_link_parent(ncp, par_nch->ncp, nchpp);
-       spin_unlock_wr(&nchpp->spin);
+       spin_unlock(&nchpp->spin);
        _cache_unlock(par_nch->ncp);
        /* par_locked = 0 - not used */
 found:
@@ -2342,7 +2342,7 @@ cache_nlookup_nonblock(struct nchandle *par_nch, struct nlcomponent *nlc)
        new_ncp = NULL;
        nchpp = NCHHASH(hash);
 restart:
-       spin_lock_wr(&nchpp->spin);
+       spin_lock(&nchpp->spin);
        LIST_FOREACH(ncp, &nchpp->list, nc_hash) {
                numchecks++;
 
@@ -2357,7 +2357,7 @@ restart:
                    (ncp->nc_flag & NCF_DESTROYED) == 0
                ) {
                        _cache_hold(ncp);
-                       spin_unlock_wr(&nchpp->spin);
+                       spin_unlock(&nchpp->spin);
                        if (par_locked) {
                                _cache_unlock(par_nch->ncp);
                                par_locked = 0;
@@ -2387,7 +2387,7 @@ restart:
         *       mount case, in which case nc_name will be NULL.
         */
        if (new_ncp == NULL) {
-               spin_unlock_wr(&nchpp->spin);
+               spin_unlock(&nchpp->spin);
                new_ncp = cache_alloc(nlc->nlc_namelen);
                if (nlc->nlc_namelen) {
                        bcopy(nlc->nlc_nameptr, new_ncp->nc_name,
@@ -2397,7 +2397,7 @@ restart:
                goto restart;
        }
        if (par_locked == 0) {
-               spin_unlock_wr(&nchpp->spin);
+               spin_unlock(&nchpp->spin);
                if (_cache_lock_nonblock(par_nch->ncp) == 0) {
                        par_locked = 1;
                        goto restart;
@@ -2411,7 +2411,7 @@ restart:
         */
        ncp = new_ncp;
        _cache_link_parent(ncp, par_nch->ncp, nchpp);
-       spin_unlock_wr(&nchpp->spin);
+       spin_unlock(&nchpp->spin);
        _cache_unlock(par_nch->ncp);
        /* par_locked = 0 - not used */
 found:
@@ -2733,16 +2733,16 @@ _cache_cleanneg(int count)
         * entries.
         */
        while (count) {
-               spin_lock_wr(&ncspin);
+               spin_lock(&ncspin);
                ncp = TAILQ_FIRST(&ncneglist);
                if (ncp == NULL) {
-                       spin_unlock_wr(&ncspin);
+                       spin_unlock(&ncspin);
                        break;
                }
                TAILQ_REMOVE(&ncneglist, ncp, nc_vnode);
                TAILQ_INSERT_TAIL(&ncneglist, ncp, nc_vnode);
                _cache_hold(ncp);
-               spin_unlock_wr(&ncspin);
+               spin_unlock(&ncspin);
                if (_cache_lock_special(ncp) == 0) {
                        ncp = cache_zap(ncp, 0);
                        if (ncp)
@@ -2779,7 +2779,7 @@ _cache_cleandefered(void)
        for (i = 0; i <= nchash; ++i) {
                nchpp = &nchashtbl[i];
 
-               spin_lock_wr(&nchpp->spin);
+               spin_lock(&nchpp->spin);
                LIST_INSERT_HEAD(&nchpp->list, &dummy, nc_hash);
                ncp = &dummy;
                while ((ncp = LIST_NEXT(ncp, nc_hash)) != NULL) {
@@ -2788,17 +2788,17 @@ _cache_cleandefered(void)
                        LIST_REMOVE(&dummy, nc_hash);
                        LIST_INSERT_AFTER(ncp, &dummy, nc_hash);
                        _cache_hold(ncp);
-                       spin_unlock_wr(&nchpp->spin);
+                       spin_unlock(&nchpp->spin);
                        if (_cache_lock_nonblock(ncp) == 0) {
                                ncp->nc_flag &= ~NCF_DEFEREDZAP;
                                _cache_unlock(ncp);
                        }
                        _cache_drop(ncp);
-                       spin_lock_wr(&nchpp->spin);
+                       spin_lock(&nchpp->spin);
                        ncp = &dummy;
                }
                LIST_REMOVE(&dummy, nc_hash);
-               spin_unlock_wr(&nchpp->spin);
+               spin_unlock(&nchpp->spin);
        }
 }
 
@@ -3241,17 +3241,17 @@ vn_fullpath(struct proc *p, struct vnode *vn, char **retbuf, char **freebuf, int
                if ((vn = p->p_textvp) == NULL)
                        return (EINVAL);
        }
-       spin_lock_wr(&vn->v_spinlock);
+       spin_lock(&vn->v_spinlock);
        TAILQ_FOREACH(ncp, &vn->v_namecache, nc_vnode) {
                if (ncp->nc_nlen)
                        break;
        }
        if (ncp == NULL) {
-               spin_unlock_wr(&vn->v_spinlock);
+               spin_unlock(&vn->v_spinlock);
                return (EINVAL);
        }
        _cache_hold(ncp);
-       spin_unlock_wr(&vn->v_spinlock);
+       spin_unlock(&vn->v_spinlock);
 
        atomic_add_int(&numfullpathcalls, -1);
        nch.ncp = ncp;;
index 03193d8..c07773d 100644 (file)
@@ -1354,18 +1354,18 @@ jrecord_write_vnode_ref(struct jrecord *jrec, struct vnode *vp)
     struct nchandle nch;
 
     nch.mount = vp->v_mount;
-    spin_lock_wr(&vp->v_spinlock);
+    spin_lock(&vp->v_spinlock);
     TAILQ_FOREACH(nch.ncp, &vp->v_namecache, nc_vnode) {
        if ((nch.ncp->nc_flag & (NCF_UNRESOLVED|NCF_DESTROYED)) == 0)
            break;
     }
     if (nch.ncp) {
        cache_hold(&nch);
-       spin_unlock_wr(&vp->v_spinlock);
+       spin_unlock(&vp->v_spinlock);
        jrecord_write_path(jrec, JLEAF_PATH_REF, nch.ncp);
        cache_drop(&nch);
     } else {
-       spin_unlock_wr(&vp->v_spinlock);
+       spin_unlock(&vp->v_spinlock);
     }
 }
 
@@ -1376,7 +1376,7 @@ jrecord_write_vnode_link(struct jrecord *jrec, struct vnode *vp,
     struct nchandle nch;
 
     nch.mount = vp->v_mount;
-    spin_lock_wr(&vp->v_spinlock);
+    spin_lock(&vp->v_spinlock);
     TAILQ_FOREACH(nch.ncp, &vp->v_namecache, nc_vnode) {
        if (nch.ncp == notncp)
            continue;
@@ -1385,11 +1385,11 @@ jrecord_write_vnode_link(struct jrecord *jrec, struct vnode *vp,
     }
     if (nch.ncp) {
        cache_hold(&nch);
-       spin_unlock_wr(&vp->v_spinlock);
+       spin_unlock(&vp->v_spinlock);
        jrecord_write_path(jrec, JLEAF_PATH_REF, nch.ncp);
        cache_drop(&nch);
     } else {
-       spin_unlock_wr(&vp->v_spinlock);
+       spin_unlock(&vp->v_spinlock);
     }
 }
 
index 44c1aac..a7ecabe 100644 (file)
@@ -174,9 +174,9 @@ __vbusy(struct vnode *vp)
        if ((ulong)vp == trackvnode)
                kprintf("__vbusy %p %08x\n", vp, vp->v_flag);
 #endif
-       spin_lock_wr(&vfs_spin);
+       spin_lock(&vfs_spin);
        __vbusy_interlocked(vp);
-       spin_unlock_wr(&vfs_spin);
+       spin_unlock(&vfs_spin);
 }
 
 /*
@@ -196,7 +196,7 @@ __vfree(struct vnode *vp)
                print_backtrace(-1);
        }
 #endif
-       spin_lock_wr(&vfs_spin);
+       spin_lock(&vfs_spin);
        KKASSERT((vp->v_flag & VFREE) == 0);
 
        /*
@@ -215,7 +215,7 @@ __vfree(struct vnode *vp)
        }
        freevnodes++;
        _vsetflags(vp, VFREE);
-       spin_unlock_wr(&vfs_spin);
+       spin_unlock(&vfs_spin);
 }
 
 /*
@@ -233,12 +233,12 @@ __vfreetail(struct vnode *vp)
        if ((ulong)vp == trackvnode)
                kprintf("__vfreetail %p %08x\n", vp, vp->v_flag);
 #endif
-       spin_lock_wr(&vfs_spin);
+       spin_lock(&vfs_spin);
        KKASSERT((vp->v_flag & VFREE) == 0);
        TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
        freevnodes++;
        _vsetflags(vp, VFREE);
-       spin_unlock_wr(&vfs_spin);
+       spin_unlock(&vfs_spin);
 }
 
 /*
@@ -329,13 +329,13 @@ void
 vdrop(struct vnode *vp)
 {
        KKASSERT(vp->v_sysref.refcnt != 0 && vp->v_auxrefs > 0);
-       spin_lock_wr(&vp->v_spinlock);
+       spin_lock(&vp->v_spinlock);
        atomic_subtract_int(&vp->v_auxrefs, 1);
        if ((vp->v_flag & VCACHED) && vshouldfree(vp)) {
                _vclrflags(vp, VCACHED);
                __vfree(vp);
        }
-       spin_unlock_wr(&vp->v_spinlock);
+       spin_unlock(&vp->v_spinlock);
 }
 
 /*
@@ -394,13 +394,13 @@ vnode_terminate(struct vnode *vp)
                if (vp->v_mount)
                        VOP_INACTIVE(vp);
        }
-       spin_lock_wr(&vp->v_spinlock);
+       spin_lock(&vp->v_spinlock);
        KKASSERT((vp->v_flag & (VFREE|VCACHED)) == 0);
        if (vshouldfree(vp))
                __vfree(vp);
        else
                _vsetflags(vp, VCACHED); /* inactive but not yet free*/
-       spin_unlock_wr(&vp->v_spinlock);
+       spin_unlock(&vp->v_spinlock);
        vx_unlock(vp);
 }
 
@@ -547,16 +547,16 @@ vget(struct vnode *vp, int flags)
                 * We are allowed to reactivate the vnode while we hold
                 * the VX lock, assuming it can be reactivated.
                 */
-               spin_lock_wr(&vp->v_spinlock);
+               spin_lock(&vp->v_spinlock);
                if (vp->v_flag & VFREE) {
                        __vbusy(vp);
                        sysref_activate(&vp->v_sysref);
-                       spin_unlock_wr(&vp->v_spinlock);
+                       spin_unlock(&vp->v_spinlock);
                        sysref_put(&vp->v_sysref);
                } else if (vp->v_flag & VCACHED) {
                        _vclrflags(vp, VCACHED);
                        sysref_activate(&vp->v_sysref);
-                       spin_unlock_wr(&vp->v_spinlock);
+                       spin_unlock(&vp->v_spinlock);
                        sysref_put(&vp->v_sysref);
                } else {
                        if (sysref_isinactive(&vp->v_sysref)) {
@@ -564,7 +564,7 @@ vget(struct vnode *vp, int flags)
                                kprintf("Warning vp %p reactivation race\n",
                                        vp);
                        }
-                       spin_unlock_wr(&vp->v_spinlock);
+                       spin_unlock(&vp->v_spinlock);
                }
                _vclrflags(vp, VINACTIVE);
                error = 0;
@@ -620,12 +620,12 @@ vx_get_nonblock(struct vnode *vp)
 void
 vx_put(struct vnode *vp)
 {
-       spin_lock_wr(&vp->v_spinlock);
+       spin_lock(&vp->v_spinlock);
        if ((vp->v_flag & VCACHED) && vshouldfree(vp)) {
                _vclrflags(vp, VCACHED);
                __vfree(vp);
        }
-       spin_unlock_wr(&vp->v_spinlock);
+       spin_unlock(&vp->v_spinlock);
        lockmgr(&vp->v_lock, LK_RELEASE);
        sysref_put(&vp->v_sysref);
 }
@@ -720,7 +720,7 @@ allocfreevnode(void)
                 * This is very fragile code and I don't want to use
                 * vhold here.
                 */
-               spin_lock_wr(&vfs_spin);
+               spin_lock(&vfs_spin);
                vnode_rover_locked();
                vnode_rover_locked();
                vp = TAILQ_FIRST(&vnode_free_list);
@@ -735,7 +735,7 @@ allocfreevnode(void)
                        TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
                        TAILQ_INSERT_TAIL(&vnode_free_list,
                                          vp, v_freelist);
-                       spin_unlock_wr(&vfs_spin);
+                       spin_unlock(&vfs_spin);
                        continue;
                }
 
@@ -747,7 +747,7 @@ allocfreevnode(void)
                 * the vnode.
                 */
                __vbusy_interlocked(vp);
-               spin_unlock_wr(&vfs_spin);
+               spin_unlock(&vfs_spin);
 #ifdef TRACKVNODE
                if ((ulong)vp == trackvnode)
                        kprintf("allocfreevnode %p %08x\n", vp, vp->v_flag);
index c444503..0ccb903 100644 (file)
@@ -489,14 +489,14 @@ visleaf(struct vnode *vp)
 {
        struct namecache *ncp;
 
-       spin_lock_wr(&vp->v_spinlock);
+       spin_lock(&vp->v_spinlock);
        TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) {
                if (!TAILQ_EMPTY(&ncp->nc_list)) {
-                       spin_unlock_wr(&vp->v_spinlock);
+                       spin_unlock(&vp->v_spinlock);
                        return(0);
                }
        }
-       spin_unlock_wr(&vp->v_spinlock);
+       spin_unlock(&vp->v_spinlock);
        return(1);
 }
 
index ac67ee7..32a333b 100644 (file)
@@ -510,10 +510,10 @@ vtruncbuf(struct vnode *vp, off_t length, int blksize)
        /*
         * Debugging only
         */
-       spin_lock_wr(&vp->v_spinlock);
+       spin_lock(&vp->v_spinlock);
        filename = TAILQ_FIRST(&vp->v_namecache) ?
                   TAILQ_FIRST(&vp->v_namecache)->nc_name : "?";
-       spin_unlock_wr(&vp->v_spinlock);
+       spin_unlock(&vp->v_spinlock);
 
        /*
         * Make sure no buffers were instantiated while we were trying
index 47dfb5d..b9fde31 100644 (file)
@@ -521,7 +521,7 @@ checkdirs_callback(struct proc *p, void *data)
                 * A shared filedesc is ok, we don't have to copy it
                 * because we are making this change globally.
                 */
-               spin_lock_wr(&fdp->fd_spin);
+               spin_lock(&fdp->fd_spin);
                if (fdp->fd_ncdir.mount == info->old_nch.mount &&
                    fdp->fd_ncdir.ncp == info->old_nch.ncp) {
                        vprele1 = fdp->fd_cdir;
@@ -538,7 +538,7 @@ checkdirs_callback(struct proc *p, void *data)
                        ncdrop2 = fdp->fd_nrdir;
                        cache_copy(&info->new_nch, &fdp->fd_nrdir);
                }
-               spin_unlock_wr(&fdp->fd_spin);
+               spin_unlock(&fdp->fd_spin);
                if (ncdrop1.ncp)
                        cache_drop(&ncdrop1);
                if (ncdrop2.ncp)
@@ -2565,7 +2565,7 @@ kern_lseek(int fd, off_t offset, int whence, off_t *res)
 
        switch (whence) {
        case L_INCR:
-               spin_lock_wr(&fp->f_spin);
+               spin_lock(&fp->f_spin);
                new_offset = fp->f_offset + offset;
                error = 0;
                break;
@@ -2573,18 +2573,18 @@ kern_lseek(int fd, off_t offset, int whence, off_t *res)
                get_mplock();
                error = VOP_GETATTR(vp, &vattr);
                rel_mplock();
-               spin_lock_wr(&fp->f_spin);
+               spin_lock(&fp->f_spin);
                new_offset = offset + vattr.va_size;
                break;
        case L_SET:
                new_offset = offset;
                error = 0;
-               spin_lock_wr(&fp->f_spin);
+               spin_lock(&fp->f_spin);
                break;
        default:
                new_offset = 0;
                error = EINVAL;
-               spin_lock_wr(&fp->f_spin);
+               spin_lock(&fp->f_spin);
                break;
        }
 
@@ -2606,7 +2606,7 @@ kern_lseek(int fd, off_t offset, int whence, off_t *res)
                }
        }
        *res = fp->f_offset;
-       spin_unlock_wr(&fp->f_spin);
+       spin_unlock(&fp->f_spin);
 done:
        fdrop(fp);
        return (error);
index b7383f2..2fd41c0 100644 (file)
@@ -220,10 +220,10 @@ nvtruncbuf(struct vnode *vp, off_t length, int blksize, int boff)
        /*
         * Debugging only
         */
-       spin_lock_wr(&vp->v_spinlock);
+       spin_lock(&vp->v_spinlock);
        filename = TAILQ_FIRST(&vp->v_namecache) ?
                   TAILQ_FIRST(&vp->v_namecache)->nc_name : "?";
-       spin_unlock_wr(&vp->v_spinlock);
+       spin_unlock(&vp->v_spinlock);
 
        /*
         * Make sure no buffers were instantiated while we were trying
index 9a13c86..8086bca 100644 (file)
@@ -84,8 +84,8 @@ void m_dumpm(struct mbuf *m);
 #define        smb_slock                       spinlock
 #define        smb_sl_init(sl, desc)           spin_init(sl)
 #define        smb_sl_destroy(sl)
-#define        smb_sl_lock(sl)                 spin_lock_wr(sl)
-#define        smb_sl_unlock(sl)               spin_unlock_wr(sl)
+#define        smb_sl_lock(sl)                 spin_lock(sl)
+#define        smb_sl_unlock(sl)               spin_unlock(sl)
 
 #define SMB_STRFREE(p) do { if (p) smb_strfree(p); } while(0)
 
index d940a5b..2053a86 100644 (file)
@@ -128,10 +128,10 @@ swcr_encdec(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
                if (error)
                        goto done;
        } else {
-               spin_lock_wr(&swcr_spin);
+               spin_lock(&swcr_spin);
                kschedule = sw->sw_kschedule;
                ++sw->sw_kschedule_refs;
-               spin_unlock_wr(&swcr_spin);
+               spin_unlock(&swcr_spin);
                explicit_kschedule = 0;
        }
 
@@ -476,20 +476,20 @@ done:
         *           (horrible semantics for concurrent operation)
         */
        if (explicit_kschedule) {
-               spin_lock_wr(&swcr_spin);
+               spin_lock(&swcr_spin);
                if (sw->sw_kschedule && sw->sw_kschedule_refs == 0) {
                        okschedule = sw->sw_kschedule;
                        sw->sw_kschedule = kschedule;
                } else {
                        okschedule = NULL;
                }
-               spin_unlock_wr(&swcr_spin);
+               spin_unlock(&swcr_spin);
                if (okschedule)
                        exf->zerokey(&okschedule);
        } else {
-               spin_lock_wr(&swcr_spin);
+               spin_lock(&swcr_spin);
                --sw->sw_kschedule_refs;
-               spin_unlock_wr(&swcr_spin);
+               spin_unlock(&swcr_spin);
        }
        return error;
 }
@@ -867,7 +867,7 @@ swcr_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
                /*
                 * Atomically allocate a session
                 */
-               spin_lock_wr(&swcr_spin);
+               spin_lock(&swcr_spin);
                for (i = swcr_minsesnum; i < swcr_sesnum; ++i) {
                        if (swcr_sessions[i] == NULL)
                                break;
@@ -875,11 +875,11 @@ swcr_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
                if (i < swcr_sesnum) {
                        swcr_sessions[i] = swd_base;
                        swcr_minsesnum = i + 1;
-                       spin_unlock_wr(&swcr_spin);
+                       spin_unlock(&swcr_spin);
                        break;
                }
                n = swcr_sesnum;
-               spin_unlock_wr(&swcr_spin);
+               spin_unlock(&swcr_spin);
 
                /*
                 * A larger allocation is required, reallocate the array
@@ -892,9 +892,9 @@ swcr_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
                swd = kmalloc(n * sizeof(struct swcr_data *),
                              M_CRYPTO_DATA, M_WAITOK | M_ZERO);
 
-               spin_lock_wr(&swcr_spin);
+               spin_lock(&swcr_spin);
                if (swcr_sesnum >= n) {
-                       spin_unlock_wr(&swcr_spin);
+                       spin_unlock(&swcr_spin);
                        kfree(swd, M_CRYPTO_DATA);
                } else if (swcr_sesnum) {
                        bcopy(swcr_sessions, swd,
@@ -902,12 +902,12 @@ swcr_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
                        oswd = swcr_sessions;
                        swcr_sessions = swd;
                        swcr_sesnum = n;
-                       spin_unlock_wr(&swcr_spin);
+                       spin_unlock(&swcr_spin);
                        kfree(oswd, M_CRYPTO_DATA);
                } else {
                        swcr_sessions = swd;
                        swcr_sesnum = n;
-                       spin_unlock_wr(&swcr_spin);
+                       spin_unlock(&swcr_spin);
                }
        }
 
@@ -948,12 +948,12 @@ swcr_freesession_slot(struct swcr_data **swdp, u_int32_t sid)
        /*
         * Protect session detachment with the spinlock.
         */
-       spin_lock_wr(&swcr_spin);
+       spin_lock(&swcr_spin);
        swnext = *swdp;
        *swdp = NULL;
        if (sid && swcr_minsesnum > sid)
                swcr_minsesnum = sid;
-       spin_unlock_wr(&swcr_spin);
+       spin_unlock(&swcr_spin);
 
        /*
         * Clean up at our leisure.
index 2721b20..386c4b0 100644 (file)
@@ -131,8 +131,8 @@ struct bounce_zone {
 };
 
 #ifdef SMP
-#define BZ_LOCK(bz)    spin_lock_wr(&(bz)->spin)
-#define BZ_UNLOCK(bz)  spin_unlock_wr(&(bz)->spin)
+#define BZ_LOCK(bz)    spin_lock(&(bz)->spin)
+#define BZ_UNLOCK(bz)  spin_unlock(&(bz)->spin)
 #else
 #define BZ_LOCK(bz)    crit_enter()
 #define BZ_UNLOCK(bz)  crit_exit()
@@ -213,7 +213,7 @@ bus_dma_tag_lock(bus_dma_tag_t tag, bus_dma_segment_t *cache)
        if (tag->nsegments <= BUS_DMA_CACHE_SEGMENTS)
                return(cache);
 #ifdef SMP
-       spin_lock_wr(&tag->spin);
+       spin_lock(&tag->spin);
 #endif
        return(tag->segments);
 }
@@ -224,7 +224,7 @@ bus_dma_tag_unlock(bus_dma_tag_t tag)
 {
 #ifdef SMP
        if (tag->nsegments > BUS_DMA_CACHE_SEGMENTS)
-               spin_unlock_wr(&tag->spin);
+               spin_unlock(&tag->spin);
 #endif
 }
 
index eba440f..10d4c5e 100644 (file)
@@ -131,8 +131,8 @@ struct bounce_zone {
 };
 
 #ifdef SMP
-#define BZ_LOCK(bz)    spin_lock_wr(&(bz)->spin)
-#define BZ_UNLOCK(bz)  spin_unlock_wr(&(bz)->spin)
+#define BZ_LOCK(bz)    spin_lock(&(bz)->spin)
+#define BZ_UNLOCK(bz)  spin_unlock(&(bz)->spin)
 #else
 #define BZ_LOCK(bz)    crit_enter()
 #define BZ_UNLOCK(bz)  crit_exit()
@@ -213,7 +213,7 @@ bus_dma_tag_lock(bus_dma_tag_t tag, bus_dma_segment_t *cache)
        if (tag->nsegments <= BUS_DMA_CACHE_SEGMENTS)
                return(cache);
 #ifdef SMP
-       spin_lock_wr(&tag->spin);
+       spin_lock(&tag->spin);
 #endif
        return(tag->segments);
 }
@@ -224,7 +224,7 @@ bus_dma_tag_unlock(bus_dma_tag_t tag)
 {
 #ifdef SMP
        if (tag->nsegments > BUS_DMA_CACHE_SEGMENTS)
-               spin_unlock_wr(&tag->spin);
+               spin_unlock(&tag->spin);
 #endif
 }
 
index d8326c8..3262e6c 100644 (file)
@@ -121,8 +121,8 @@ struct bounce_zone {
 };
 
 #ifdef SMP
-#define BZ_LOCK(bz)    spin_lock_wr(&(bz)->spin)
-#define BZ_UNLOCK(bz)  spin_unlock_wr(&(bz)->spin)
+#define BZ_LOCK(bz)    spin_lock(&(bz)->spin)
+#define BZ_UNLOCK(bz)  spin_unlock(&(bz)->spin)
 #else
 #define BZ_LOCK(bz)    crit_enter()
 #define BZ_UNLOCK(bz)  crit_exit()
index e5ded77..59daef6 100644 (file)
@@ -119,8 +119,8 @@ struct bounce_zone {
 };
 
 #ifdef SMP
-#define BZ_LOCK(bz)    spin_lock_wr(&(bz)->spin)
-#define BZ_UNLOCK(bz)  spin_unlock_wr(&(bz)->spin)
+#define BZ_LOCK(bz)    spin_lock(&(bz)->spin)
+#define BZ_UNLOCK(bz)  spin_unlock(&(bz)->spin)
 #else
 #define BZ_LOCK(bz)    crit_enter()
 #define BZ_UNLOCK(bz)  crit_exit()
index 4d0af1a..57960c9 100644 (file)
@@ -71,7 +71,7 @@ extern void spin_lock_wr_contested2(struct spinlock *mtx);
  * TRUE on success.
  */
 static __inline boolean_t
-spin_trylock_wr(struct spinlock *mtx)
+spin_trylock(struct spinlock *mtx)
 {
        globaldata_t gd = mycpu;
        int value;
@@ -87,7 +87,7 @@ spin_trylock_wr(struct spinlock *mtx)
 #else
 
 static __inline boolean_t
-spin_trylock_wr(struct spinlock *mtx)
+spin_trylock(struct spinlock *mtx)
 {
        globaldata_t gd = mycpu;
 
@@ -119,7 +119,7 @@ spin_lock_wr_quick(globaldata_t gd, struct spinlock *mtx)
 }
 
 static __inline void
-spin_lock_wr(struct spinlock *mtx)
+spin_lock(struct spinlock *mtx)
 {
        spin_lock_wr_quick(mycpu, mtx);
 }
@@ -142,7 +142,7 @@ spin_unlock_wr_quick(globaldata_t gd, struct spinlock *mtx)
 }
 
 static __inline void
-spin_unlock_wr(struct spinlock *mtx)
+spin_unlock(struct spinlock *mtx)
 {
        spin_unlock_wr_quick(mycpu, mtx);
 }
index f0a8a0e..1f00265 100644 (file)
@@ -2139,9 +2139,9 @@ devfs_fetch_ino(void)
 {
        ino_t   ret;
 
-       spin_lock_wr(&ino_lock);
+       spin_lock(&ino_lock);
        ret = d_ino++;
-       spin_unlock_wr(&ino_lock);
+       spin_unlock(&ino_lock);
 
        return ret;
 }
index 9c32057..20f5600 100644 (file)
@@ -425,19 +425,19 @@ ntfs_ntput(struct ntnode *ip)
        dprintf(("ntfs_ntput: rele ntnode %"PRId64": %p, usecount: %d\n",
                ip->i_number, ip, ip->i_usecount));
 
-       spin_lock_wr(&ip->i_interlock);
+       spin_lock(&ip->i_interlock);
        ip->i_usecount--;
 
 #ifdef DIAGNOSTIC
        if (ip->i_usecount < 0) {
-               spin_unlock_wr(&ip->i_interlock);
+               spin_unlock(&ip->i_interlock);
                panic("ntfs_ntput: ino: %"PRId64" usecount: %d \n",
                      ip->i_number,ip->i_usecount);
        }
 #endif
 
        if (ip->i_usecount > 0) {
-               spin_unlock_wr(&ip->i_interlock);
+               spin_unlock(&ip->i_interlock);
                LOCKMGR(&ip->i_lock, LK_RELEASE);
                return;
        }
@@ -445,7 +445,7 @@ ntfs_ntput(struct ntnode *ip)
        dprintf(("ntfs_ntput: deallocating ntnode: %"PRId64"\n", ip->i_number));
 
        if (ip->i_fnlist.lh_first) {
-               spin_unlock_wr(&ip->i_interlock);
+               spin_unlock(&ip->i_interlock);
                panic("ntfs_ntput: ntnode has fnodes\n");
        }
 
@@ -459,7 +459,7 @@ ntfs_ntput(struct ntnode *ip)
                LIST_REMOVE(vap,va_list);
                ntfs_freentvattr(vap);
        }
-       spin_unlock_wr(&ip->i_interlock);
+       spin_unlock(&ip->i_interlock);
        vrele(ip->i_devvp);
        FREE(ip, M_NTFSNTNODE);
 }
@@ -486,15 +486,15 @@ ntfs_ntrele(struct ntnode *ip)
        dprintf(("ntfs_ntrele: rele ntnode %"PRId64": %p, usecount: %d\n",
                ip->i_number, ip, ip->i_usecount));
 
-       spin_lock_wr(&ip->i_interlock);
+       spin_lock(&ip->i_interlock);
        ip->i_usecount--;
 
        if (ip->i_usecount < 0) {
-               spin_unlock_wr(&ip->i_interlock);
+               spin_unlock(&ip->i_interlock);
                panic("ntfs_ntrele: ino: %"PRId64" usecount: %d \n",
                      ip->i_number,ip->i_usecount);
        }
-       spin_unlock_wr(&ip->i_interlock);
+       spin_unlock(&ip->i_interlock);
 }
 
 /*
index 581f489..7d4abe8 100644 (file)
@@ -239,10 +239,10 @@ procfs_close(struct vop_close_args *ap)
                if ((ap->a_vp->v_opencount < 2)
                    && (p = pfind(pfs->pfs_pid))
                    && !(p->p_pfsflags & PF_LINGER)) {
-                       spin_lock_wr(&p->p_spin);
+                       spin_lock(&p->p_spin);
                        p->p_stops = 0;
                        p->p_step = 0;
-                       spin_unlock_wr(&p->p_spin);
+                       spin_unlock(&p->p_spin);
                        wakeup(&p->p_step);
                }
                break;
@@ -310,15 +310,15 @@ procfs_ioctl(struct vop_ioctl_args *ap)
          psp = (struct procfs_status *)ap->a_data;
          psp->flags = procp->p_pfsflags;
          psp->events = procp->p_stops;
-         spin_lock_wr(&procp->p_spin);
+         spin_lock(&procp->p_spin);
          if (procp->p_step) {
            psp->state = 0;
            psp->why = procp->p_stype;
            psp->val = procp->p_xstat;
-           spin_unlock_wr(&procp->p_spin);
+           spin_unlock(&procp->p_spin);
          } else {
            psp->state = 1;
-           spin_unlock_wr(&procp->p_spin);
+           spin_unlock(&procp->p_spin);
            psp->why = 0;       /* Not defined values */
            psp->val = 0;       /* Not defined values */
          }
@@ -329,16 +329,16 @@ procfs_ioctl(struct vop_ioctl_args *ap)
           *       the MP lock.
           */
          psp = (struct procfs_status *)ap->a_data;
-         spin_lock_wr(&procp->p_spin);
+         spin_lock(&procp->p_spin);
          while (procp->p_step == 0) {
            tsleep_interlock(&procp->p_stype, PCATCH);
-           spin_unlock_wr(&procp->p_spin);
+           spin_unlock(&procp->p_spin);
            error = tsleep(&procp->p_stype, PCATCH | PINTERLOCKED, "piocwait", 0);
            if (error)
              return error;
-           spin_lock_wr(&procp->p_spin);
+           spin_lock(&procp->p_spin);
          }
-         spin_unlock_wr(&procp->p_spin);
+         spin_unlock(&procp->p_spin);
          psp->state = 1;       /* It stopped */
          psp->flags = procp->p_pfsflags;
          psp->events = procp->p_stops;
index 8f286e4..a77069e 100644 (file)
@@ -1437,9 +1437,9 @@ tmpfs_fetch_ino(void)
 {
        ino_t   ret;
 
-       spin_lock_wr(&ino_lock);
+       spin_lock(&ino_lock);
        ret = t_ino++;
-       spin_unlock_wr(&ino_lock);
+       spin_unlock(&ino_lock);
 
        return ret;
 }
index 4d7b557..735033b 100644 (file)
@@ -1136,7 +1136,7 @@ swap_chain_iodone(struct bio *biox)
        /*
         * Remove us from the chain.
         */
-       spin_lock_wr(&bp->b_lock.lk_spinlock);
+       spin_lock(&bp->b_lock.lk_spinlock);
        nextp = &nbio->bio_caller_info1.cluster_head;
        while (*nextp != bufx) {
                KKASSERT(*nextp != NULL);
@@ -1144,7 +1144,7 @@ swap_chain_iodone(struct bio *biox)
        }
        *nextp = bufx->b_cluster_next;
        chain_empty = (nbio->bio_caller_info1.cluster_head == NULL);
-       spin_unlock_wr(&bp->b_lock.lk_spinlock);
+       spin_unlock(&bp->b_lock.lk_spinlock);
 
        /*
         * Clean up bufx.  If the chain is now empty we finish out
index b939a62..468fee3 100644 (file)
@@ -346,7 +346,7 @@ getpbuf(int *pfreecnt)
 {
        struct buf *bp;
 
-       spin_lock_wr(&bswspin);
+       spin_lock(&bswspin);
 
        for (;;) {
                if (pfreecnt) {
@@ -366,7 +366,7 @@ getpbuf(int *pfreecnt)
        if (pfreecnt)
                --*pfreecnt;
 
-       spin_unlock_wr(&bswspin);
+       spin_unlock(&bswspin);
 
        initpbuf(bp);
        KKASSERT(dsched_is_clear_buf_priv(bp));
@@ -379,7 +379,7 @@ getpbuf_kva(int *pfreecnt)
 {
        struct buf *bp;
 
-       spin_lock_wr(&bswspin);
+       spin_lock(&bswspin);
 
        for (;;) {
                if (pfreecnt) {
@@ -399,7 +399,7 @@ getpbuf_kva(int *pfreecnt)
        if (pfreecnt)
                --*pfreecnt;
 
-       spin_unlock_wr(&bswspin);
+       spin_unlock(&bswspin);
 
        initpbuf(bp);
        KKASSERT(dsched_is_clear_buf_priv(bp));
@@ -420,17 +420,17 @@ trypbuf(int *pfreecnt)
 {
        struct buf *bp;
 
-       spin_lock_wr(&bswspin);
+       spin_lock(&bswspin);
 
        if (*pfreecnt == 0 || (bp = TAILQ_FIRST(&bswlist_raw)) == NULL) {
-               spin_unlock_wr(&bswspin);
+               spin_unlock(&bswspin);
                return NULL;
        }
        TAILQ_REMOVE(&bswlist_raw, bp, b_freelist);
        --pbuf_raw_count;
        --*pfreecnt;
 
-       spin_unlock_wr(&bswspin);
+       spin_unlock(&bswspin);
 
        initpbuf(bp);
 
@@ -442,17 +442,17 @@ trypbuf_kva(int *pfreecnt)
 {
        struct buf *bp;
 
-       spin_lock_wr(&bswspin);
+       spin_lock(&bswspin);
 
        if (*pfreecnt == 0 || (bp = TAILQ_FIRST(&bswlist_kva)) == NULL) {
-               spin_unlock_wr(&bswspin);
+               spin_unlock(&bswspin);
                return NULL;
        }
        TAILQ_REMOVE(&bswlist_kva, bp, b_freelist);
        --pbuf_kva_count;
        --*pfreecnt;
 
-       spin_unlock_wr(&bswspin);
+       spin_unlock(&bswspin);
 
        initpbuf(bp);
 
@@ -479,7 +479,7 @@ relpbuf(struct buf *bp, int *pfreecnt)
 
        BUF_UNLOCK(bp);
 
-       spin_lock_wr(&bswspin);
+       spin_lock(&bswspin);
        if (bp->b_kvabase) {
                TAILQ_INSERT_HEAD(&bswlist_kva, bp, b_freelist);
                ++pbuf_kva_count;
@@ -499,7 +499,7 @@ relpbuf(struct buf *bp, int *pfreecnt)
                if (++*pfreecnt == 1)
                        wake_freecnt = 1;
        }
-       spin_unlock_wr(&bswspin);
+       spin_unlock(&bswspin);
 
        if (wake_bsw_kva)
                wakeup(&bswneeded_kva);
index e3dd2f1..6263047 100644 (file)
@@ -64,7 +64,7 @@ zalloc(vm_zone_t z)
        if (z == NULL)
                zerror(ZONE_ERROR_INVALID);
 #endif
-       spin_lock_wr(&z->zlock);
+       spin_lock(&z->zlock);
        if (z->zfreecnt > z->zfreemin) {
                item = z->zitems;
 #ifdef INVARIANTS
@@ -76,9 +76,9 @@ zalloc(vm_zone_t z)
                z->zitems = ((void **) item)[0];
                z->zfreecnt--;
                z->znalloc++;
-               spin_unlock_wr(&z->zlock);
+               spin_unlock(&z->zlock);
        } else {
-               spin_unlock_wr(&z->zlock);
+               spin_unlock(&z->zlock);
                item = zget(z);
                /*
                 * PANICFAIL allows the caller to assume that the zalloc()
@@ -98,7 +98,7 @@ zalloc(vm_zone_t z)
 void
 zfree(vm_zone_t z, void *item)
 {
-       spin_lock_wr(&z->zlock);
+       spin_lock(&z->zlock);
        ((void **) item)[0] = z->zitems;
 #ifdef INVARIANTS
        if (((void **) item)[1] == (void *) ZENTRY_FREE)
@@ -107,7 +107,7 @@ zfree(vm_zone_t z, void *item)
 #endif
        z->zitems = item;
        z->zfreecnt++;
-       spin_unlock_wr(&z->zlock);
+       spin_unlock(&z->zlock);
 }
 
 /*
@@ -490,7 +490,7 @@ zget(vm_zone_t z)
                nitems = nbytes / z->zsize;
        }
 
-       spin_lock_wr(&z->zlock);
+       spin_lock(&z->zlock);
        z->ztotal += nitems;
        /*
         * Save one for immediate allocation
@@ -520,7 +520,7 @@ zget(vm_zone_t z)
        } else {
                item = NULL;
        }
-       spin_unlock_wr(&z->zlock);
+       spin_unlock(&z->zlock);
 
        /*
         * A special zone may have used a kernel-reserved vm_map_entry.  If