HAMMER VFS - Make all entry points MPSAFE, remove giant & critical sections
authorMatthew Dillon <dillon@apollo.backplane.com>
Tue, 24 Aug 2010 23:04:59 +0000 (16:04 -0700)
committerMatthew Dillon <dillon@apollo.backplane.com>
Tue, 24 Aug 2010 23:04:59 +0000 (16:04 -0700)
* All VFS, VOP, ioops, and bio_done entry points are now mpsafe and no
  longer use giant.

* Implement hmp->fs_token and hmp->io_token for each HAMMER mount.

  All operations that previously needed the MP lock now use hmp->fs_token.
  All operations that interact with BIO callbacks now use hmp->io_token.
  All critical sections now use io_token (these previously interlocked
  against IO callbacks).

  NOTE: read (for cached data) and getattr were MPSAFE before and
continue to be MPSAFE.

sys/vfs/hammer/hammer.h
sys/vfs/hammer/hammer_blockmap.c
sys/vfs/hammer/hammer_flusher.c
sys/vfs/hammer/hammer_inode.c
sys/vfs/hammer/hammer_io.c
sys/vfs/hammer/hammer_ondisk.c
sys/vfs/hammer/hammer_signal.c
sys/vfs/hammer/hammer_vfsops.c
sys/vfs/hammer/hammer_vnops.c

index 76914c5..22fa93d 100644 (file)
@@ -859,8 +859,8 @@ struct hammer_mount {
        struct hammer_io_list meta_list;        /* dirty meta bufs    */
        struct hammer_io_list lose_list;        /* loose buffers      */
        int     locked_dirty_space;             /* meta/volu count    */
-       int     io_running_space;               /* track I/O in progress */
-       int     io_running_wakeup;
+       int     io_running_space;               /* io_token */
+       int     io_running_wakeup;              /* io_token */
        int     objid_cache_count;
        int     error;                          /* critical I/O error */
        struct krate    krate;                  /* rate limited kprintf */
@@ -891,6 +891,9 @@ struct hammer_mount {
        TAILQ_HEAD(, hammer_reclaim) reclaim_list;
        TAILQ_HEAD(, hammer_io) iorun_list;
 
+       struct lwkt_token       fs_token;       /* high level */
+       struct lwkt_token       io_token;       /* low level (IO callback) */
+
        struct hammer_inostats  inostats[HAMMER_INOSTATS_HSIZE];
 };
 
index e75615e..b308eab 100644 (file)
@@ -1223,6 +1223,8 @@ failed:
 
 /*
  * Check space availability
+ *
+ * MPSAFE - does not require fs_token
  */
 int
 _hammer_checkspace(hammer_mount_t hmp, int slop, int64_t *resp)
index 1c8a433..45a2b4f 100644 (file)
@@ -171,13 +171,13 @@ hammer_flusher_create(hammer_mount_t hmp)
        TAILQ_INIT(&hmp->flusher.ready_list);
 
        lwkt_create(hammer_flusher_master_thread, hmp,
-                   &hmp->flusher.td, NULL, 0, -1, "hammer-M");
+                   &hmp->flusher.td, NULL, TDF_MPSAFE, -1, "hammer-M");
        for (i = 0; i < HAMMER_MAX_FLUSHERS; ++i) {
                info = kmalloc(sizeof(*info), hmp->m_misc, M_WAITOK|M_ZERO);
                info->hmp = hmp;
                TAILQ_INSERT_TAIL(&hmp->flusher.ready_list, info, entry);
                lwkt_create(hammer_flusher_slave_thread, info,
-                           &info->td, NULL, 0, -1, "hammer-S%d", i);
+                           &info->td, NULL, TDF_MPSAFE, -1, "hammer-S%d", i);
        }
 }
 
@@ -222,6 +222,8 @@ hammer_flusher_master_thread(void *arg)
 
        hmp = arg;
 
+       lwkt_gettoken(&hmp->fs_token);
+
        for (;;) {
                /*
                 * Do at least one flush cycle.  We may have to update the
@@ -264,6 +266,7 @@ hammer_flusher_master_thread(void *arg)
         */
        hmp->flusher.td = NULL;
        wakeup(&hmp->flusher.exiting);
+       lwkt_reltoken(&hmp->fs_token);
        lwkt_exit();
 }
 
@@ -438,6 +441,7 @@ hammer_flusher_slave_thread(void *arg)
 
        info = arg;
        hmp = info->hmp;
+       lwkt_gettoken(&hmp->fs_token);
 
        for (;;) {
                while (info->runstate == 0)
@@ -459,6 +463,7 @@ hammer_flusher_slave_thread(void *arg)
        }
        info->td = NULL;
        wakeup(&info->td);
+       lwkt_reltoken(&hmp->fs_token);
        lwkt_exit();
 }
 
@@ -472,9 +477,11 @@ hammer_flusher_clean_loose_ios(hammer_mount_t hmp)
         * loose ends - buffers without bp's aren't tracked by the kernel
         * and can build up, so clean them out.  This can occur when an
         * IO completes on a buffer with no references left.
+        *
+        * The io_token is needed to protect the list.
         */
        if ((io = TAILQ_FIRST(&hmp->lose_list)) != NULL) {
-               crit_enter();   /* biodone() race */
+               lwkt_gettoken(&hmp->io_token);
                while ((io = TAILQ_FIRST(&hmp->lose_list)) != NULL) {
                        KKASSERT(io->mod_list == &hmp->lose_list);
                        TAILQ_REMOVE(&hmp->lose_list, io, mod_entry);
@@ -483,7 +490,7 @@ hammer_flusher_clean_loose_ios(hammer_mount_t hmp)
                        buffer = (void *)io;
                        hammer_rel_buffer(buffer, 0);
                }
-               crit_exit();
+               lwkt_reltoken(&hmp->io_token);
        }
 }
 
index 4678bf7..d4784fa 100644 (file)
@@ -177,6 +177,7 @@ int
 hammer_vop_inactive(struct vop_inactive_args *ap)
 {
        struct hammer_inode *ip = VTOI(ap->a_vp);
+       hammer_mount_t hmp;
 
        /*
         * Degenerate case
@@ -199,12 +200,13 @@ hammer_vop_inactive(struct vop_inactive_args *ap)
         * multiple inode updates.
         */
        if (ip->ino_data.nlinks == 0) {
-               get_mplock();
+               hmp = ip->hmp;
+               lwkt_gettoken(&hmp->fs_token);
                hammer_inode_unloadable_check(ip, 0);
                if (ip->flags & HAMMER_INODE_MODMASK)
                        hammer_flush_inode(ip, 0);
+               lwkt_reltoken(&hmp->fs_token);
                vrecycle(ap->a_vp);
-               rel_mplock();
        }
        return(0);
 }
@@ -229,6 +231,7 @@ hammer_vop_reclaim(struct vop_reclaim_args *ap)
 
        if ((ip = vp->v_data) != NULL) {
                hmp = ip->hmp;
+               lwkt_gettoken(&hmp->fs_token);
                hammer_lock_ex(&ip->lock);
                vp->v_data = NULL;
                ip->vp = NULL;
@@ -240,6 +243,7 @@ hammer_vop_reclaim(struct vop_reclaim_args *ap)
                }
                hammer_unlock(&ip->lock);
                hammer_rel_inode(ip, 1);
+               lwkt_reltoken(&hmp->fs_token);
        }
        return(0);
 }
index a959a83..c934468 100644 (file)
@@ -77,11 +77,17 @@ hammer_io_init(hammer_io_t io, hammer_volume_t volume, enum hammer_io_type type)
 
 /*
  * Helper routine to disassociate a buffer cache buffer from an I/O
- * structure.  The buffer is unlocked and marked appropriate for reclamation.
+ * structure.  The io must be interlocked marked appropriately for
+ * reclamation.
  *
  * The io may have 0 or 1 references depending on who called us.  The
  * caller is responsible for dealing with the refs.
  *
+ * The io must be in a released state with the io->bp owned and
+ * locked by the caller of this function.  When not called from an
+ * io_deallocate() this cannot race an io_deallocate() since the
+ * kernel would be unable to get the buffer lock in that case.
+ *
  * This call can only be made when no action is required on the buffer.
  *
  * The caller must own the buffer and the IO must indicate that the
@@ -102,7 +108,7 @@ hammer_io_disassociate(hammer_io_structure_t iou)
         * If the buffer was locked someone wanted to get rid of it.
         */
        if (bp->b_flags & B_LOCKED) {
-               --hammer_count_io_locked;
+               atomic_add_int(&hammer_count_io_locked, -1);
                bp->b_flags &= ~B_LOCKED;
        }
        if (iou->io.reclaim) {
@@ -135,15 +141,16 @@ void
 hammer_io_wait(hammer_io_t io)
 {
        if (io->running) {
-               for (;;) {
+               hammer_mount_t hmp = io->hmp;
+
+               lwkt_gettoken(&hmp->io_token);
+               while (io->running) {
                        io->waiting = 1;
                        tsleep_interlock(io, 0);
-                       if (io->running == 0)
-                               break;
-                       tsleep(io, PINTERLOCKED, "hmrflw", hz);
-                       if (io->running == 0)
-                               break;
+                       if (io->running)
+                               tsleep(io, PINTERLOCKED, "hmrflw", hz);
                }
+               lwkt_reltoken(&hmp->io_token);
        }
 }
 
@@ -162,9 +169,9 @@ hammer_io_wait_all(hammer_mount_t hmp, const char *ident, int doflush)
        /*
         * Degenerate case, no I/O is running
         */
-       crit_enter();
+       lwkt_gettoken(&hmp->io_token);
        if (TAILQ_EMPTY(&hmp->iorun_list)) {
-               crit_exit();
+               lwkt_reltoken(&hmp->io_token);
                if (doflush)
                        hammer_io_flush_sync(hmp);
                return;
@@ -188,7 +195,7 @@ hammer_io_wait_all(hammer_mount_t hmp, const char *ident, int doflush)
        io = TAILQ_FIRST(&hmp->iorun_list);
        if (io && io->type == HAMMER_STRUCTURE_DUMMY)
                wakeup(io);
-       crit_exit();
+       lwkt_reltoken(&hmp->io_token);
 
        if (doflush)
                hammer_io_flush_sync(hmp);
@@ -216,11 +223,20 @@ hammer_io_clear_error(struct hammer_io *io)
  * This is used by HAMMER's reblocking code to avoid trying to
  * swapcache the filesystem's data when it is read or written
  * by the reblocking code.
+ *
+ * The caller has a ref on the buffer preventing the bp from
+ * being disassociated from it.
  */
 void
 hammer_io_notmeta(hammer_buffer_t buffer)
 {
-       buffer->io.bp->b_flags |= B_NOTMETA;
+       if ((buffer->io.bp->b_flags & B_NOTMETA) == 0) {
+               hammer_mount_t hmp = buffer->io.hmp;
+
+               lwkt_gettoken(&hmp->io_token);
+               buffer->io.bp->b_flags |= B_NOTMETA;
+               lwkt_reltoken(&hmp->io_token);
+       }
 }
 
 
@@ -238,6 +254,8 @@ hammer_io_notmeta(hammer_buffer_t buffer)
  * zones cannot be clustered due to their mixed buffer sizes.  This is
  * not an issue since such clustering occurs in hammer_vnops at the
  * regular file layer, whereas this is the buffered block device layer.
+ *
+ * No I/O callbacks can occur while we hold the buffer locked.
  */
 int
 hammer_io_read(struct vnode *devvp, struct hammer_io *io, int limit)
@@ -246,7 +264,7 @@ hammer_io_read(struct vnode *devvp, struct hammer_io *io, int limit)
        int   error;
 
        if ((bp = io->bp) == NULL) {
-               hammer_count_io_running_read += io->bytes;
+               atomic_add_int(&hammer_count_io_running_read, io->bytes);
                if (hammer_cluster_enable && limit > io->bytes) {
                        error = cluster_read(devvp, io->offset + limit,
                                             io->offset, io->bytes,
@@ -257,7 +275,7 @@ hammer_io_read(struct vnode *devvp, struct hammer_io *io, int limit)
                        error = bread(devvp, io->offset, io->bytes, &io->bp);
                }
                hammer_stats_disk_read += io->bytes;
-               hammer_count_io_running_read -= io->bytes;
+               atomic_add_int(&hammer_count_io_running_read, -io->bytes);
 
                /*
                 * The code generally assumes b_ops/b_dep has been set-up,
@@ -305,6 +323,8 @@ hammer_io_read(struct vnode *devvp, struct hammer_io *io, int limit)
                bp->b_flags &= ~B_IODEBUG;
                bp->b_ops = &hammer_bioops;
                KKASSERT(LIST_FIRST(&bp->b_dep) == NULL);
+
+               /* io->worklist is locked by the io lock */
                LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node);
                BUF_KERNPROC(bp);
                KKASSERT(io->modified == 0);
@@ -327,6 +347,8 @@ hammer_io_read(struct vnode *devvp, struct hammer_io *io, int limit)
  *
  * This function will also mark the IO as modified but it will not
  * increment the modify_refs count.
+ *
+ * No I/O callbacks can occur while we hold the buffer locked.
  */
 int
 hammer_io_new(struct vnode *devvp, struct hammer_io *io)
@@ -338,6 +360,8 @@ hammer_io_new(struct vnode *devvp, struct hammer_io *io)
                bp = io->bp;
                bp->b_ops = &hammer_bioops;
                KKASSERT(LIST_FIRST(&bp->b_dep) == NULL);
+
+               /* io->worklist is locked by the io lock */
                LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node);
                io->released = 0;
                KKASSERT(io->running == 0);
@@ -358,6 +382,8 @@ hammer_io_new(struct vnode *devvp, struct hammer_io *io)
 /*
  * Advance the activity count on the underlying buffer because
  * HAMMER does not getblk/brelse on every access.
+ *
+ * The io->bp cannot go away while the buffer is referenced.
  */
 void
 hammer_io_advance(struct hammer_io *io)
@@ -382,17 +408,21 @@ int
 hammer_io_inval(hammer_volume_t volume, hammer_off_t zone2_offset)
 {
        hammer_io_structure_t iou;
+       hammer_mount_t hmp;
        hammer_off_t phys_offset;
        struct buf *bp;
        int error;
 
+       hmp = volume->io.hmp;
+       lwkt_gettoken(&hmp->io_token);
+
        phys_offset = volume->ondisk->vol_buf_beg +
                      (zone2_offset & HAMMER_OFF_SHORT_MASK);
-       crit_enter();
        if ((bp = findblk(volume->devvp, phys_offset, FINDBLK_TEST)) != NULL)
                bp = getblk(volume->devvp, phys_offset, bp->b_bufsize, 0, 0);
        else
                bp = getblk(volume->devvp, phys_offset, HAMMER_BUFSIZE, 0, 0);
+
        if ((iou = (void *)LIST_FIRST(&bp->b_dep)) != NULL) {
 #if 0
                hammer_ref(&iou->io.lock);
@@ -415,7 +445,7 @@ hammer_io_inval(hammer_volume_t volume, hammer_off_t zone2_offset)
                brelse(bp);
                error = 0;
        }
-       crit_exit();
+       lwkt_reltoken(&hmp->io_token);
        return(error);
 }
 
@@ -593,9 +623,8 @@ hammer_io_flush(struct hammer_io *io, int reclaim)
        /*
         * Degenerate case - nothing to flush if nothing is dirty.
         */
-       if (io->modified == 0) {
+       if (io->modified == 0)
                return;
-       }
 
        KKASSERT(io->bp);
        KKASSERT(io->modify_refs <= 0);
@@ -607,6 +636,8 @@ hammer_io_flush(struct hammer_io *io, int reclaim)
         * We are going to bawrite() this bp.  Don't leave a window where
         * io->released is set, we actually own the bp rather then our
         * buffer.
+        *
+        * The io_token should not be required here as only
         */
        bp = io->bp;
        if (io->released) {
@@ -615,14 +646,15 @@ hammer_io_flush(struct hammer_io *io, int reclaim)
                /* io->released = 0; */
                KKASSERT(io->released);
                KKASSERT(io->bp == bp);
+       } else {
+               io->released = 1;
        }
-       io->released = 1;
 
        if (reclaim) {
                io->reclaim = 1;
                if ((bp->b_flags & B_LOCKED) == 0) {
                        bp->b_flags |= B_LOCKED;
-                       ++hammer_count_io_locked;
+                       atomic_add_int(&hammer_count_io_locked, 1);
                }
        }
 
@@ -651,11 +683,14 @@ hammer_io_flush(struct hammer_io *io, int reclaim)
 
        /*
         * Transfer ownership to the kernel and initiate I/O.
+        *
+        * NOTE: We do not hold io_token so an atomic op is required to
+        *       update io_running_space.
         */
        io->running = 1;
-       io->hmp->io_running_space += io->bytes;
+       atomic_add_int(&io->hmp->io_running_space, io->bytes);
+       atomic_add_int(&hammer_count_io_running_write, io->bytes);
        TAILQ_INSERT_TAIL(&io->hmp->iorun_list, io, iorun_entry);
-       hammer_count_io_running_write += io->bytes;
        bawrite(bp);
        hammer_io_flush_mark(io->volume);
 }
@@ -826,7 +861,7 @@ hammer_io_clear_modify(struct hammer_io *io, int inval)
        if (io->mod_list == &io->hmp->volu_list ||
            io->mod_list == &io->hmp->meta_list) {
                io->hmp->locked_dirty_space -= io->bytes;
-               hammer_count_dirtybufspace -= io->bytes;
+               atomic_add_int(&hammer_count_dirtybufspace, -io->bytes);
        }
        TAILQ_REMOVE(io->mod_list, io, mod_entry);
        io->mod_list = NULL;
@@ -868,17 +903,23 @@ restart:
  * Clear the IO's modify list.  Even though the IO is no longer modified
  * it may still be on the lose_list.  This routine is called just before
  * the governing hammer_buffer is destroyed.
+ *
+ * mod_list requires io_token protection.
  */
 void
 hammer_io_clear_modlist(struct hammer_io *io)
 {
+       hammer_mount_t hmp = io->hmp;
+
        KKASSERT(io->modified == 0);
        if (io->mod_list) {
-               crit_enter();   /* biodone race against list */
-               KKASSERT(io->mod_list == &io->hmp->lose_list);
-               TAILQ_REMOVE(io->mod_list, io, mod_entry);
-               io->mod_list = NULL;
-               crit_exit();
+               lwkt_gettoken(&hmp->io_token);
+               if (io->mod_list) {
+                       KKASSERT(io->mod_list == &io->hmp->lose_list);
+                       TAILQ_REMOVE(io->mod_list, io, mod_entry);
+                       io->mod_list = NULL;
+               }
+               lwkt_reltoken(&hmp->io_token);
        }
 }
 
@@ -893,12 +934,12 @@ hammer_io_set_modlist(struct hammer_io *io)
        case HAMMER_STRUCTURE_VOLUME:
                io->mod_list = &hmp->volu_list;
                hmp->locked_dirty_space += io->bytes;
-               hammer_count_dirtybufspace += io->bytes;
+               atomic_add_int(&hammer_count_dirtybufspace, io->bytes);
                break;
        case HAMMER_STRUCTURE_META_BUFFER:
                io->mod_list = &hmp->meta_list;
                hmp->locked_dirty_space += io->bytes;
-               hammer_count_dirtybufspace += io->bytes;
+               atomic_add_int(&hammer_count_dirtybufspace, io->bytes);
                break;
        case HAMMER_STRUCTURE_UNDO_BUFFER:
                io->mod_list = &hmp->undo_list;
@@ -921,10 +962,13 @@ hammer_io_set_modlist(struct hammer_io *io)
 
 /*
  * Pre-IO initiation kernel callback - cluster build only
+ *
+ * bioops callback - hold io_token
  */
 static void
 hammer_io_start(struct buf *bp)
 {
+       /* nothing to do, so io_token not needed */
 }
 
 /*
@@ -933,6 +977,8 @@ hammer_io_start(struct buf *bp)
  * NOTE: HAMMER may modify a buffer after initiating I/O.  The modified bit
  * may also be set if we were marking a cluster header open.  Only remove
  * our dependancy if the modified bit is clear.
+ *
+ * bioops callback - hold io_token
  */
 static void
 hammer_io_complete(struct buf *bp)
@@ -941,6 +987,8 @@ hammer_io_complete(struct buf *bp)
        struct hammer_mount *hmp = iou->io.hmp;
        struct hammer_io *ionext;
 
+       lwkt_gettoken(&hmp->io_token);
+
        KKASSERT(iou->io.released == 1);
 
        /*
@@ -981,8 +1029,8 @@ hammer_io_complete(struct buf *bp)
 #endif
                }
                hammer_stats_disk_write += iou->io.bytes;
-               hammer_count_io_running_write -= iou->io.bytes;
-               hmp->io_running_space -= iou->io.bytes;
+               atomic_add_int(&hammer_count_io_running_write, -iou->io.bytes);
+               atomic_add_int(&hmp->io_running_space, -iou->io.bytes);
                if (hmp->io_running_wakeup &&
                    hmp->io_running_space < hammer_limit_running_io / 2) {
                    hmp->io_running_wakeup = 0;
@@ -1016,11 +1064,12 @@ hammer_io_complete(struct buf *bp)
         * interlock.
         */
        if (bp->b_flags & B_LOCKED) {
-               --hammer_count_io_locked;
+               atomic_add_int(&hammer_count_io_locked, -1);
                bp->b_flags &= ~B_LOCKED;
                hammer_io_deallocate(bp);
                /* structure may be dead now */
        }
+       lwkt_reltoken(&hmp->io_token);
 }
 
 /*
@@ -1037,11 +1086,18 @@ hammer_io_complete(struct buf *bp)
  * our only recourse is to set B_LOCKED.
  *
  * WARNING: This may be called from an interrupt via hammer_io_complete()
+ *
+ * bioops callback - hold io_token
  */
 static void
 hammer_io_deallocate(struct buf *bp)
 {
        hammer_io_structure_t iou = (void *)LIST_FIRST(&bp->b_dep);
+       hammer_mount_t hmp;
+
+       hmp = iou->io.hmp;
+
+       lwkt_gettoken(&hmp->io_token);
 
        KKASSERT((bp->b_flags & B_LOCKED) == 0 && iou->io.running == 0);
        if (hammer_try_interlock_norefs(&iou->io.lock) == 0) {
@@ -1050,52 +1106,64 @@ hammer_io_deallocate(struct buf *bp)
                 * or interlocked HAMMER structure.
                 */
                bp->b_flags |= B_LOCKED;
-               ++hammer_count_io_locked;
+               atomic_add_int(&hammer_count_io_locked, 1);
        } else if (iou->io.modified) {
                /*
                 * It is not legal to disassociate a modified buffer.  This
                 * case really shouldn't ever occur.
                 */
                bp->b_flags |= B_LOCKED;
-               ++hammer_count_io_locked;
+               atomic_add_int(&hammer_count_io_locked, 1);
                hammer_put_interlock(&iou->io.lock, 0);
        } else {
                /*
                 * Disassociate the BP.  If the io has no refs left we
-                * have to add it to the loose list.
+                * have to add it to the loose list.  The kernel has
+                * locked the buffer and therefore our io must be
+                * in a released state.
                 */
                hammer_io_disassociate(iou);
                if (iou->io.type != HAMMER_STRUCTURE_VOLUME) {
                        KKASSERT(iou->io.bp == NULL);
                        KKASSERT(iou->io.mod_list == NULL);
-                       crit_enter();   /* biodone race against list */
-                       iou->io.mod_list = &iou->io.hmp->lose_list;
+                       iou->io.mod_list = &hmp->lose_list;
                        TAILQ_INSERT_TAIL(iou->io.mod_list, &iou->io, mod_entry);
-                       crit_exit();
                }
                hammer_put_interlock(&iou->io.lock, 1);
        }
+       lwkt_reltoken(&hmp->io_token);
 }
 
+/*
+ * bioops callback - hold io_token
+ */
 static int
 hammer_io_fsync(struct vnode *vp)
 {
+       /* nothing to do, so io_token not needed */
        return(0);
 }
 
 /*
  * NOTE: will not be called unless we tell the kernel about the
  * bioops.  Unused... we use the mount's VFS_SYNC instead.
+ *
+ * bioops callback - hold io_token
  */
 static int
 hammer_io_sync(struct mount *mp)
 {
+       /* nothing to do, so io_token not needed */
        return(0);
 }
 
+/*
+ * bioops callback - hold io_token
+ */
 static void
 hammer_io_movedeps(struct buf *bp1, struct buf *bp2)
 {
+       /* nothing to do, so io_token not needed */
 }
 
 /*
@@ -1111,29 +1179,38 @@ hammer_io_movedeps(struct buf *bp1, struct buf *bp2)
  *
  * checkwrite will only be called for bdwrite()n buffers.  If we return
  * success the kernel is guaranteed to initiate the buffer write.
+ *
+ * bioops callback - hold io_token
  */
 static int
 hammer_io_checkread(struct buf *bp)
 {
+       /* nothing to do, so io_token not needed */
        return(0);
 }
 
+/*
+ * bioops callback - hold io_token
+ */
 static int
 hammer_io_checkwrite(struct buf *bp)
 {
        hammer_io_t io = (void *)LIST_FIRST(&bp->b_dep);
+       hammer_mount_t hmp = io->hmp;
 
        /*
         * This shouldn't happen under normal operation.
         */
+       lwkt_gettoken(&hmp->io_token);
        if (io->type == HAMMER_STRUCTURE_VOLUME ||
            io->type == HAMMER_STRUCTURE_META_BUFFER) {
                if (!panicstr)
                        panic("hammer_io_checkwrite: illegal buffer");
                if ((bp->b_flags & B_LOCKED) == 0) {
                        bp->b_flags |= B_LOCKED;
-                       ++hammer_count_io_locked;
+                       atomic_add_int(&hammer_count_io_locked, 1);
                }
+               lwkt_reltoken(&hmp->io_token);
                return(1);
        }
 
@@ -1158,19 +1235,25 @@ hammer_io_checkwrite(struct buf *bp)
         */
        KKASSERT(io->running == 0);
        io->running = 1;
-       io->hmp->io_running_space += io->bytes;
+       atomic_add_int(&io->hmp->io_running_space, io->bytes);
+       atomic_add_int(&hammer_count_io_running_write, io->bytes);
        TAILQ_INSERT_TAIL(&io->hmp->iorun_list, io, iorun_entry);
-       hammer_count_io_running_write += io->bytes;
+
+       lwkt_reltoken(&hmp->io_token);
+
        return(0);
 }
 
 /*
  * Return non-zero if we wish to delay the kernel's attempt to flush
  * this buffer to disk.
+ *
+ * bioops callback - hold io_token
  */
 static int
 hammer_io_countdeps(struct buf *bp, int n)
 {
+       /* nothing to do, so io_token not needed */
        return(0);
 }
 
@@ -1284,6 +1367,9 @@ done:
 /*
  * On completion of the BIO this callback must check the data CRC
  * and chain to the previous bio.
+ *
+ * MPSAFE - since we do not modify and hammer_records we do not need
+ *         io_token.
  */
 static
 void
@@ -1442,19 +1528,25 @@ hammer_io_direct_write_complete(struct bio *nbio)
 {
        struct bio *obio;
        struct buf *bp;
-       hammer_record_t record = nbio->bio_caller_info1.ptr;
+       hammer_record_t record;
+       hammer_mount_t hmp;
+
+       record = nbio->bio_caller_info1.ptr;
+       KKASSERT(record != NULL);
+       hmp = record->ip->hmp;
+
+       lwkt_gettoken(&hmp->io_token);
 
        bp = nbio->bio_buf;
        obio = pop_bio(nbio);
        if (bp->b_flags & B_ERROR) {
-               hammer_critical_error(record->ip->hmp, record->ip,
+               hammer_critical_error(hmp, record->ip,
                                      bp->b_error,
                                      "while writing bulk data");
                bp->b_flags |= B_INVAL;
        }
        biodone(obio);
 
-       KKASSERT(record != NULL);
        KKASSERT(record->flags & HAMMER_RECF_DIRECT_IO);
        if (record->flags & HAMMER_RECF_DIRECT_WAIT) {
                record->flags &= ~(HAMMER_RECF_DIRECT_IO |
@@ -1465,6 +1557,7 @@ hammer_io_direct_write_complete(struct bio *nbio)
                record->flags &= ~HAMMER_RECF_DIRECT_IO;
                /* record can disappear once DIRECT_IO flag is cleared */
        }
+       lwkt_reltoken(&hmp->io_token);
 }
 
 
@@ -1481,16 +1574,18 @@ hammer_io_direct_write_complete(struct bio *nbio)
 void
 hammer_io_direct_wait(hammer_record_t record)
 {
+       hammer_mount_t hmp = record->ip->hmp;
+
        /*
         * Wait for I/O to complete
         */
        if (record->flags & HAMMER_RECF_DIRECT_IO) {
-               crit_enter();
+               lwkt_gettoken(&hmp->io_token);
                while (record->flags & HAMMER_RECF_DIRECT_IO) {
                        record->flags |= HAMMER_RECF_DIRECT_WAIT;
                        tsleep(&record->flags, 0, "hmdiow", 0);
                }
-               crit_exit();
+               lwkt_reltoken(&hmp->io_token);
        }
 
        /*
@@ -1505,7 +1600,7 @@ hammer_io_direct_wait(hammer_record_t record)
         */
        if (record->flags & HAMMER_RECF_DIRECT_INVAL) {
                KKASSERT(record->leaf.data_offset);
-               hammer_del_buffers(record->ip->hmp, record->leaf.data_offset,
+               hammer_del_buffers(hmp, record->leaf.data_offset,
                                   record->zone2_offset, record->leaf.data_len,
                                   1);
                record->flags &= ~HAMMER_RECF_DIRECT_INVAL;
index 694e85f..b9d1eec 100644 (file)
@@ -554,15 +554,18 @@ again:
                 * buffers will never be in a modified state.  This should
                 * only occur on the 0->1 transition of refs.
                 *
-                * lose_list can be modified via a biodone() interrupt.
+                * lose_list can be modified via a biodone() interrupt
+                * so the io_token must be held.
                 */
                if (buffer->io.mod_list == &hmp->lose_list) {
-                       crit_enter();   /* biodone race against list */
-                       TAILQ_REMOVE(buffer->io.mod_list, &buffer->io,
-                                    mod_entry);
-                       crit_exit();
-                       buffer->io.mod_list = NULL;
-                       KKASSERT(buffer->io.modified == 0);
+                       lwkt_gettoken(&hmp->io_token);
+                       if (buffer->io.mod_list == &hmp->lose_list) {
+                               TAILQ_REMOVE(buffer->io.mod_list, &buffer->io,
+                                            mod_entry);
+                               buffer->io.mod_list = NULL;
+                               KKASSERT(buffer->io.modified == 0);
+                       }
+                       lwkt_reltoken(&hmp->io_token);
                }
                goto found;
        }
@@ -903,6 +906,7 @@ hammer_unload_buffer(hammer_buffer_t buffer, void *data)
 int
 hammer_ref_buffer(hammer_buffer_t buffer)
 {
+       hammer_mount_t hmp;
        int error;
        int locked;
 
@@ -911,22 +915,23 @@ hammer_ref_buffer(hammer_buffer_t buffer)
         * 0->1 transition.
         */
        locked = hammer_ref_interlock(&buffer->io.lock);
+       hmp = buffer->io.hmp;
 
        /*
         * At this point a biodone() will not touch the buffer other then
         * incidental bits.  However, lose_list can be modified via
         * a biodone() interrupt.
         *
-        * No longer loose
+        * No longer loose.  lose_list requires the io_token.
         */
-       if (buffer->io.mod_list == &buffer->io.hmp->lose_list) {
-               crit_enter();
-               if (buffer->io.mod_list == &buffer->io.hmp->lose_list) {
+       if (buffer->io.mod_list == &hmp->lose_list) {
+               lwkt_gettoken(&hmp->io_token);
+               if (buffer->io.mod_list == &hmp->lose_list) {
                        TAILQ_REMOVE(buffer->io.mod_list, &buffer->io,
                                     mod_entry);
                        buffer->io.mod_list = NULL;
                }
-               crit_exit();
+               lwkt_reltoken(&hmp->io_token);
        }
 
        if (locked) {
index fd48879..8804b13 100644 (file)
 
 #include "hammer.h"
 
+/*
+ * Check for a user signal interrupting a long operation
+ *
+ * MPSAFE
+ */
 int
 hammer_signal_check(hammer_mount_t hmp)
 {
index ff591d6..c9b01fc 100644 (file)
@@ -395,7 +395,7 @@ hammer_vfs_mount(struct mount *mp, char *mntpt, caddr_t data,
        }
 
        /*
-        * Interal mount data structure
+        * Internal mount data structure
         */
        if (hmp == NULL) {
                hmp = kmalloc(sizeof(*hmp), M_HAMMER, M_WAITOK | M_ZERO);
@@ -467,6 +467,7 @@ hammer_vfs_mount(struct mount *mp, char *mntpt, caddr_t data,
         * recovery if it has not already been run.
         */
        if (mp->mnt_flag & MNT_UPDATE) {
+               lwkt_gettoken(&hmp->fs_token);
                error = 0;
                if (hmp->ronly && (mp->mnt_kern_flag & MNTK_WANTRDWR)) {
                        kprintf("HAMMER read-only -> read-write\n");
@@ -498,6 +499,7 @@ hammer_vfs_mount(struct mount *mp, char *mntpt, caddr_t data,
                        RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL,
                                hammer_adjust_volume_mode, NULL);
                }
+               lwkt_reltoken(&hmp->fs_token);
                return(error);
        }
 
@@ -519,6 +521,11 @@ hammer_vfs_mount(struct mount *mp, char *mntpt, caddr_t data,
        TAILQ_INIT(&hmp->lose_list);
        TAILQ_INIT(&hmp->iorun_list);
 
+       lwkt_token_init(&hmp->fs_token, 1, "hammerfs");
+       lwkt_token_init(&hmp->io_token, 1, "hammerio");
+
+       lwkt_gettoken(&hmp->fs_token);
+
        /*
         * Load volumes
         */
@@ -586,6 +593,7 @@ hammer_vfs_mount(struct mount *mp, char *mntpt, caddr_t data,
        }
 
        if (error) {
+               /* called with fs_token held */
                hammer_free_hmp(mp);
                return (error);
        }
@@ -603,8 +611,8 @@ hammer_vfs_mount(struct mount *mp, char *mntpt, caddr_t data,
         * on return, so even if we do not specify it we no longer get
         * the BGL regardlless of how we are flagged.
         */
-       mp->mnt_kern_flag |= MNTK_RD_MPSAFE | MNTK_GA_MPSAFE |
-                            MNTK_IN_MPSAFE;
+       mp->mnt_kern_flag |= MNTK_ALL_MPSAFE;
+       /*MNTK_RD_MPSAFE | MNTK_GA_MPSAFE | MNTK_IN_MPSAFE;*/
 
        /* 
         * note: f_iosize is used by vnode_pager_haspage() when constructing
@@ -741,45 +749,54 @@ failed:
        /*
         * Cleanup and return.
         */
-       if (error)
+       if (error) {
+               /* called with fs_token held */
                hammer_free_hmp(mp);
+       } else {
+               lwkt_reltoken(&hmp->fs_token);
+       }
        return (error);
 }
 
 static int
 hammer_vfs_unmount(struct mount *mp, int mntflags)
 {
-#if 0
-       struct hammer_mount *hmp = (void *)mp->mnt_data;
-#endif
+       hammer_mount_t hmp = (void *)mp->mnt_data;
        int flags;
        int error;
 
        /*
         * Clean out the vnodes
         */
+       lwkt_gettoken(&hmp->fs_token);
        flags = 0;
        if (mntflags & MNT_FORCE)
                flags |= FORCECLOSE;
-       if ((error = vflush(mp, 0, flags)) != 0)
-               return (error);
+       error = vflush(mp, 0, flags);
 
        /*
         * Clean up the internal mount structure and related entities.  This
         * may issue I/O.
         */
-       hammer_free_hmp(mp);
-       return(0);
+       if (error == 0) {
+               /* called with fs_token held */
+               hammer_free_hmp(mp);
+       } else {
+               lwkt_reltoken(&hmp->fs_token);
+       }
+       return(error);
 }
 
 /*
  * Clean up the internal mount structure and disassociate it from the mount.
  * This may issue I/O.
+ *
+ * Called with fs_token held.
  */
 static void
 hammer_free_hmp(struct mount *mp)
 {
-       struct hammer_mount *hmp = (void *)mp->mnt_data;
+       hammer_mount_t hmp = (void *)mp->mnt_data;
        hammer_flush_group_t flg;
        int count;
        int dummy;
@@ -857,6 +874,7 @@ hammer_free_hmp(struct mount *mp)
        hammer_destroy_objid_cache(hmp);
        kmalloc_destroy(&hmp->m_misc);
        kmalloc_destroy(&hmp->m_inodes);
+       lwkt_reltoken(&hmp->fs_token);
        kfree(hmp, M_HAMMER);
 }
 
@@ -901,6 +919,7 @@ hammer_vfs_vget(struct mount *mp, struct vnode *dvp,
        int error;
        u_int32_t localization;
 
+       lwkt_gettoken(&hmp->fs_token);
        hammer_simple_transaction(&trans, hmp);
 
        /*
@@ -925,12 +944,12 @@ hammer_vfs_vget(struct mount *mp, struct vnode *dvp,
                              0, &error);
        if (ip == NULL) {
                *vpp = NULL;
-               hammer_done_transaction(&trans);
-               return(error);
+       } else {
+               error = hammer_get_vnode(ip, vpp);
+               hammer_rel_inode(ip, 0);
        }
-       error = hammer_get_vnode(ip, vpp);
-       hammer_rel_inode(ip, 0);
        hammer_done_transaction(&trans);
+       lwkt_reltoken(&hmp->fs_token);
        return (error);
 }
 
@@ -943,9 +962,6 @@ hammer_vfs_vget(struct mount *mp, struct vnode *dvp,
 static int
 hammer_vfs_root(struct mount *mp, struct vnode **vpp)
 {
-#if 0
-       struct hammer_mount *hmp = (void *)mp->mnt_data;
-#endif
        int error;
 
        error = hammer_vfs_vget(mp, NULL, 1, vpp);
@@ -962,9 +978,12 @@ hammer_vfs_statfs(struct mount *mp, struct statfs *sbp, struct ucred *cred)
        int64_t bfree;
        int64_t breserved;
 
+       lwkt_gettoken(&hmp->fs_token);
        volume = hammer_get_root_volume(hmp, &error);
-       if (error)
+       if (error) {
+               lwkt_reltoken(&hmp->fs_token);
                return(error);
+       }
        ondisk = volume->ondisk;
 
        /*
@@ -981,6 +1000,7 @@ hammer_vfs_statfs(struct mount *mp, struct statfs *sbp, struct ucred *cred)
                mp->mnt_stat.f_files = 0;
 
        *sbp = mp->mnt_stat;
+       lwkt_reltoken(&hmp->fs_token);
        return(0);
 }
 
@@ -994,9 +1014,12 @@ hammer_vfs_statvfs(struct mount *mp, struct statvfs *sbp, struct ucred *cred)
        int64_t bfree;
        int64_t breserved;
 
+       lwkt_gettoken(&hmp->fs_token);
        volume = hammer_get_root_volume(hmp, &error);
-       if (error)
+       if (error) {
+               lwkt_reltoken(&hmp->fs_token);
                return(error);
+       }
        ondisk = volume->ondisk;
 
        /*
@@ -1012,6 +1035,7 @@ hammer_vfs_statvfs(struct mount *mp, struct statvfs *sbp, struct ucred *cred)
        if (mp->mnt_vstat.f_files < 0)
                mp->mnt_vstat.f_files = 0;
        *sbp = mp->mnt_vstat;
+       lwkt_reltoken(&hmp->fs_token);
        return(0);
 }
 
@@ -1029,16 +1053,21 @@ hammer_vfs_sync(struct mount *mp, int waitfor)
        struct hammer_mount *hmp = (void *)mp->mnt_data;
        int error;
 
+       lwkt_gettoken(&hmp->fs_token);
        if (panicstr == NULL) {
                error = hammer_sync_hmp(hmp, waitfor);
        } else {
                error = EIO;
        }
+       lwkt_reltoken(&hmp->fs_token);
        return (error);
 }
 
 /*
  * Convert a vnode to a file handle.
+ *
+ * Accesses read-only fields on already-referenced structures so
+ * no token is needed.
  */
 static int
 hammer_vfs_vptofh(struct vnode *vp, struct fid *fhp)
@@ -1065,6 +1094,7 @@ static int
 hammer_vfs_fhtovp(struct mount *mp, struct vnode *rootvp,
                  struct fid *fhp, struct vnode **vpp)
 {
+       hammer_mount_t hmp = (void *)mp->mnt_data;
        struct hammer_transaction trans;
        struct hammer_inode *ip;
        struct hammer_inode_info info;
@@ -1078,7 +1108,8 @@ hammer_vfs_fhtovp(struct mount *mp, struct vnode *rootvp,
        else
                localization = (u_int32_t)fhp->fid_ext << 16;
 
-       hammer_simple_transaction(&trans, (void *)mp->mnt_data);
+       lwkt_gettoken(&hmp->fs_token);
+       hammer_simple_transaction(&trans, hmp);
 
        /*
         * Get/allocate the hammer_inode structure.  The structure must be
@@ -1094,6 +1125,7 @@ hammer_vfs_fhtovp(struct mount *mp, struct vnode *rootvp,
                *vpp = NULL;
        }
        hammer_done_transaction(&trans);
+       lwkt_reltoken(&hmp->fs_token);
        return (error);
 }
 
@@ -1105,6 +1137,7 @@ hammer_vfs_checkexp(struct mount *mp, struct sockaddr *nam,
        struct netcred *np;
        int error;
 
+       lwkt_gettoken(&hmp->fs_token);
        np = vfs_export_lookup(mp, &hmp->export, nam);
        if (np) {
                *exflagsp = np->netc_exflags;
@@ -1113,6 +1146,7 @@ hammer_vfs_checkexp(struct mount *mp, struct sockaddr *nam,
        } else {
                error = EACCES;
        }
+       lwkt_reltoken(&hmp->fs_token);
        return (error);
 
 }
@@ -1123,6 +1157,8 @@ hammer_vfs_export(struct mount *mp, int op, const struct export_args *export)
        hammer_mount_t hmp = (void *)mp->mnt_data;
        int error;
 
+       lwkt_gettoken(&hmp->fs_token);
+
        switch(op) {
        case MOUNTCTL_SET_EXPORT:
                error = vfs_export(mp, &hmp->export, export);
@@ -1131,6 +1167,8 @@ hammer_vfs_export(struct mount *mp, int op, const struct export_args *export)
                error = EOPNOTSUPP;
                break;
        }
+       lwkt_reltoken(&hmp->fs_token);
+
        return(error);
 }
 
index 05c4084..048d312 100644 (file)
@@ -210,6 +210,8 @@ hammer_vop_fsync(struct vop_fsync_args *ap)
        int waitfor = ap->a_waitfor;
        int mode;
 
+       lwkt_gettoken(&hmp->fs_token);
+
        /*
         * Fsync rule relaxation (default is either full synchronous flush
         * or REDO semantics with synchronous flush).
@@ -242,6 +244,7 @@ mode1:
                        break;
                case 4:
                        /* ignore the fsync() system call */
+                       lwkt_reltoken(&hmp->fs_token);
                        return(0);
                default:
                        /* we have to do something */
@@ -262,6 +265,7 @@ mode1:
                        ++hammer_count_fsyncs;
                        hammer_flusher_flush_undos(hmp, mode);
                        ip->redo_count = 0;
+                       lwkt_reltoken(&hmp->fs_token);
                        return(0);
                }
 
@@ -294,13 +298,14 @@ skip:
                hammer_wait_inode(ip);
                vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY);
        }
+       lwkt_reltoken(&hmp->fs_token);
        return (ip->error);
 }
 
 /*
  * hammer_vop_read { vp, uio, ioflag, cred }
  *
- * MPALMOSTSAFE
+ * MPSAFE (for the cache safe does not require fs_token)
  */
 static
 int
@@ -308,6 +313,7 @@ hammer_vop_read(struct vop_read_args *ap)
 {
        struct hammer_transaction trans;
        hammer_inode_t ip;
+       hammer_mount_t hmp;
        off_t offset;
        struct buf *bp;
        struct uio *uio;
@@ -316,12 +322,13 @@ hammer_vop_read(struct vop_read_args *ap)
        int seqcount;
        int ioseqcount;
        int blksize;
-       int got_mplock;
        int bigread;
+       int got_fstoken;
 
        if (ap->a_vp->v_type != VREG)
                return (EINVAL);
        ip = VTOI(ap->a_vp);
+       hmp = ip->hmp;
        error = 0;
        uio = ap->a_uio;
 
@@ -335,26 +342,12 @@ hammer_vop_read(struct vop_read_args *ap)
                seqcount = ioseqcount;
 
        /*
-        * Temporary hack until more of HAMMER can be made MPSAFE.
-        */
-#ifdef SMP
-       if (curthread->td_mpcount) {
-               got_mplock = -1;
-               hammer_start_transaction(&trans, ip->hmp);
-       } else {
-               got_mplock = 0;
-       }
-#else
-       hammer_start_transaction(&trans, ip->hmp);
-       got_mplock = -1;
-#endif
-
-       /*
         * If reading or writing a huge amount of data we have to break
         * atomicy and allow the operation to be interrupted by a signal
         * or it can DOS the machine.
         */
        bigread = (uio->uio_resid > 100 * 1024 * 1024);
+       got_fstoken = 0;
 
        /*
         * Access the data typically in HAMMER_BUFSIZE blocks via the
@@ -388,9 +381,9 @@ hammer_vop_read(struct vop_read_args *ap)
                /*
                 * MPUNSAFE
                 */
-               if (got_mplock == 0) {
-                       got_mplock = 1;
-                       get_mplock();
+               if (got_fstoken == 0) {
+                       lwkt_gettoken(&hmp->fs_token);
+                       got_fstoken = 1;
                        hammer_start_transaction(&trans, ip->hmp);
                }
 
@@ -445,15 +438,14 @@ skip:
         * XXX only update the atime if we had to get the MP lock.
         * XXX hack hack hack, fixme.
         */
-       if (got_mplock) {
+       if (got_fstoken) {
                if ((ip->flags & HAMMER_INODE_RO) == 0 &&
                    (ip->hmp->mp->mnt_flag & MNT_NOATIME) == 0) {
                        ip->ino_data.atime = trans.time;
                        hammer_modify_inode(&trans, ip, HAMMER_INODE_ATIME);
                }
                hammer_done_transaction(&trans);
-               if (got_mplock > 0)
-                       rel_mplock();
+               lwkt_reltoken(&hmp->fs_token);
        }
        return (error);
 }
@@ -493,6 +485,7 @@ hammer_vop_write(struct vop_write_args *ap)
        /*
         * Create a transaction to cover the operations we perform.
         */
+       lwkt_gettoken(&hmp->fs_token);
        hammer_start_transaction(&trans, hmp);
        uio = ap->a_uio;
 
@@ -510,11 +503,13 @@ hammer_vop_write(struct vop_write_args *ap)
         */
        if (uio->uio_offset < 0) {
                hammer_done_transaction(&trans);
+               lwkt_reltoken(&hmp->fs_token);
                return (EFBIG);
        }
        base_offset = uio->uio_offset + uio->uio_resid; /* work around gcc-4 */
        if (uio->uio_resid > 0 && base_offset <= uio->uio_offset) {
                hammer_done_transaction(&trans);
+               lwkt_reltoken(&hmp->fs_token);
                return (EFBIG);
        }
 
@@ -811,11 +806,14 @@ hammer_vop_write(struct vop_write_args *ap)
        }
        hammer_done_transaction(&trans);
        hammer_knote(ap->a_vp, kflags);
+       lwkt_reltoken(&hmp->fs_token);
        return (error);
 }
 
 /*
  * hammer_vop_access { vp, mode, cred }
+ *
+ * MPSAFE - does not require fs_token
  */
 static
 int
@@ -837,6 +835,8 @@ hammer_vop_access(struct vop_access_args *ap)
 
 /*
  * hammer_vop_advlock { vp, id, op, fl, flags }
+ *
+ * MPSAFE - does not require fs_token
  */
 static
 int
@@ -850,7 +850,7 @@ hammer_vop_advlock(struct vop_advlock_args *ap)
 /*
  * hammer_vop_close { vp, fflag }
  *
- * We can only sync-on-close for normal closes.
+ * We can only sync-on-close for normal closes.  XXX disabled for now.
  */
 static
 int
@@ -890,20 +890,23 @@ hammer_vop_ncreate(struct vop_ncreate_args *ap)
        struct hammer_inode *dip;
        struct hammer_inode *nip;
        struct nchandle *nch;
+       hammer_mount_t hmp;
        int error;
 
        nch = ap->a_nch;
        dip = VTOI(ap->a_dvp);
+       hmp = dip->hmp;
 
        if (dip->flags & HAMMER_INODE_RO)
                return (EROFS);
-       if ((error = hammer_checkspace(dip->hmp, HAMMER_CHKSPC_CREATE)) != 0)
+       if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_CREATE)) != 0)
                return (error);
 
        /*
         * Create a transaction to cover the operations we perform.
         */
-       hammer_start_transaction(&trans, dip->hmp);
+       lwkt_gettoken(&hmp->fs_token);
+       hammer_start_transaction(&trans, hmp);
        ++hammer_stats_file_iopsw;
 
        /*
@@ -918,6 +921,7 @@ hammer_vop_ncreate(struct vop_ncreate_args *ap)
                hkprintf("hammer_create_inode error %d\n", error);
                hammer_done_transaction(&trans);
                *ap->a_vpp = NULL;
+               lwkt_reltoken(&hmp->fs_token);
                return (error);
        }
 
@@ -948,6 +952,7 @@ hammer_vop_ncreate(struct vop_ncreate_args *ap)
                }
                hammer_knote(ap->a_dvp, NOTE_WRITE);
        }
+       lwkt_reltoken(&hmp->fs_token);
        return (error);
 }
 
@@ -959,7 +964,7 @@ hammer_vop_ncreate(struct vop_ncreate_args *ap)
  * The atime field is stored in the B-Tree element and allowed to be
  * updated without cycling the element.
  *
- * MPSAFE
+ * MPSAFE - does not require fs_token
  */
 static
 int
@@ -1070,6 +1075,7 @@ hammer_vop_nresolve(struct vop_nresolve_args *ap)
 {
        struct hammer_transaction trans;
        struct namecache *ncp;
+       hammer_mount_t hmp;
        hammer_inode_t dip;
        hammer_inode_t ip;
        hammer_tid_t asof;
@@ -1097,8 +1103,10 @@ hammer_vop_nresolve(struct vop_nresolve_args *ap)
        nlen = ncp->nc_nlen;
        flags = dip->flags & HAMMER_INODE_RO;
        ispfs = 0;
+       hmp = dip->hmp;
 
-       hammer_simple_transaction(&trans, dip->hmp);
+       lwkt_gettoken(&hmp->fs_token);
+       hammer_simple_transaction(&trans, hmp);
        ++hammer_stats_file_iopsr;
 
        for (i = 0; i < nlen; ++i) {
@@ -1253,6 +1261,7 @@ hammer_vop_nresolve(struct vop_nresolve_args *ap)
        }
 done:
        hammer_done_transaction(&trans);
+       lwkt_reltoken(&hmp->fs_token);
        return (error);
 }
 
@@ -1279,6 +1288,7 @@ hammer_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap)
        struct hammer_transaction trans;
        struct hammer_inode *dip;
        struct hammer_inode *ip;
+       hammer_mount_t hmp;
        int64_t parent_obj_id;
        u_int32_t parent_obj_localization;
        hammer_tid_t asof;
@@ -1286,11 +1296,13 @@ hammer_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap)
 
        dip = VTOI(ap->a_dvp);
        asof = dip->obj_asof;
+       hmp = dip->hmp;
 
        /*
         * Whos are parent?  This could be the root of a pseudo-filesystem
         * whos parent is in another localization domain.
         */
+       lwkt_gettoken(&hmp->fs_token);
        parent_obj_id = dip->ino_data.parent_obj_id;
        if (dip->obj_id == HAMMER_OBJID_ROOT)
                parent_obj_localization = dip->ino_data.ext.obj.parent_obj_localization;
@@ -1299,19 +1311,20 @@ hammer_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap)
 
        if (parent_obj_id == 0) {
                if (dip->obj_id == HAMMER_OBJID_ROOT &&
-                  asof != dip->hmp->asof) {
+                  asof != hmp->asof) {
                        parent_obj_id = dip->obj_id;
-                       asof = dip->hmp->asof;
+                       asof = hmp->asof;
                        *ap->a_fakename = kmalloc(19, M_TEMP, M_WAITOK);
                        ksnprintf(*ap->a_fakename, 19, "0x%016llx",
                                  (long long)dip->obj_asof);
                } else {
                        *ap->a_vpp = NULL;
+                       lwkt_reltoken(&hmp->fs_token);
                        return ENOENT;
                }
        }
 
-       hammer_simple_transaction(&trans, dip->hmp);
+       hammer_simple_transaction(&trans, hmp);
        ++hammer_stats_file_iopsr;
 
        ip = hammer_get_inode(&trans, dip, parent_obj_id,
@@ -1324,6 +1337,7 @@ hammer_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap)
                *ap->a_vpp = NULL;
        }
        hammer_done_transaction(&trans);
+       lwkt_reltoken(&hmp->fs_token);
        return (error);
 }
 
@@ -1338,6 +1352,7 @@ hammer_vop_nlink(struct vop_nlink_args *ap)
        struct hammer_inode *dip;
        struct hammer_inode *ip;
        struct nchandle *nch;
+       hammer_mount_t hmp;
        int error;
 
        if (ap->a_dvp->v_mount != ap->a_vp->v_mount)    
@@ -1346,6 +1361,7 @@ hammer_vop_nlink(struct vop_nlink_args *ap)
        nch = ap->a_nch;
        dip = VTOI(ap->a_dvp);
        ip = VTOI(ap->a_vp);
+       hmp = dip->hmp;
 
        if (dip->obj_localization != ip->obj_localization)
                return(EXDEV);
@@ -1354,13 +1370,14 @@ hammer_vop_nlink(struct vop_nlink_args *ap)
                return (EROFS);
        if (ip->flags & HAMMER_INODE_RO)
                return (EROFS);
-       if ((error = hammer_checkspace(dip->hmp, HAMMER_CHKSPC_CREATE)) != 0)
+       if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_CREATE)) != 0)
                return (error);
 
        /*
         * Create a transaction to cover the operations we perform.
         */
-       hammer_start_transaction(&trans, dip->hmp);
+       lwkt_gettoken(&hmp->fs_token);
+       hammer_start_transaction(&trans, hmp);
        ++hammer_stats_file_iopsw;
 
        /*
@@ -1382,6 +1399,7 @@ hammer_vop_nlink(struct vop_nlink_args *ap)
        hammer_done_transaction(&trans);
        hammer_knote(ap->a_vp, NOTE_LINK);
        hammer_knote(ap->a_dvp, NOTE_WRITE);
+       lwkt_reltoken(&hmp->fs_token);
        return (error);
 }
 
@@ -1399,20 +1417,23 @@ hammer_vop_nmkdir(struct vop_nmkdir_args *ap)
        struct hammer_inode *dip;
        struct hammer_inode *nip;
        struct nchandle *nch;
+       hammer_mount_t hmp;
        int error;
 
        nch = ap->a_nch;
        dip = VTOI(ap->a_dvp);
+       hmp = dip->hmp;
 
        if (dip->flags & HAMMER_INODE_RO)
                return (EROFS);
-       if ((error = hammer_checkspace(dip->hmp, HAMMER_CHKSPC_CREATE)) != 0)
+       if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_CREATE)) != 0)
                return (error);
 
        /*
         * Create a transaction to cover the operations we perform.
         */
-       hammer_start_transaction(&trans, dip->hmp);
+       lwkt_gettoken(&hmp->fs_token);
+       hammer_start_transaction(&trans, hmp);
        ++hammer_stats_file_iopsw;
 
        /*
@@ -1426,6 +1447,7 @@ hammer_vop_nmkdir(struct vop_nmkdir_args *ap)
                hkprintf("hammer_mkdir error %d\n", error);
                hammer_done_transaction(&trans);
                *ap->a_vpp = NULL;
+               lwkt_reltoken(&hmp->fs_token);
                return (error);
        }
        /*
@@ -1455,6 +1477,7 @@ hammer_vop_nmkdir(struct vop_nmkdir_args *ap)
        hammer_done_transaction(&trans);
        if (error == 0)
                hammer_knote(ap->a_dvp, NOTE_WRITE | NOTE_LINK);
+       lwkt_reltoken(&hmp->fs_token);
        return (error);
 }
 
@@ -1472,20 +1495,23 @@ hammer_vop_nmknod(struct vop_nmknod_args *ap)
        struct hammer_inode *dip;
        struct hammer_inode *nip;
        struct nchandle *nch;
+       hammer_mount_t hmp;
        int error;
 
        nch = ap->a_nch;
        dip = VTOI(ap->a_dvp);
+       hmp = dip->hmp;
 
        if (dip->flags & HAMMER_INODE_RO)
                return (EROFS);
-       if ((error = hammer_checkspace(dip->hmp, HAMMER_CHKSPC_CREATE)) != 0)
+       if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_CREATE)) != 0)
                return (error);
 
        /*
         * Create a transaction to cover the operations we perform.
         */
-       hammer_start_transaction(&trans, dip->hmp);
+       lwkt_gettoken(&hmp->fs_token);
+       hammer_start_transaction(&trans, hmp);
        ++hammer_stats_file_iopsw;
 
        /*
@@ -1500,6 +1526,7 @@ hammer_vop_nmknod(struct vop_nmknod_args *ap)
        if (error) {
                hammer_done_transaction(&trans);
                *ap->a_vpp = NULL;
+               lwkt_reltoken(&hmp->fs_token);
                return (error);
        }
 
@@ -1528,11 +1555,14 @@ hammer_vop_nmknod(struct vop_nmknod_args *ap)
        hammer_done_transaction(&trans);
        if (error == 0)
                hammer_knote(ap->a_dvp, NOTE_WRITE);
+       lwkt_reltoken(&hmp->fs_token);
        return (error);
 }
 
 /*
  * hammer_vop_open { vp, mode, cred, fp }
+ *
+ * MPSAFE (does not require fs_token)
  */
 static
 int
@@ -1568,6 +1598,7 @@ hammer_vop_readdir(struct vop_readdir_args *ap)
        struct hammer_transaction trans;
        struct hammer_cursor cursor;
        struct hammer_inode *ip;
+       hammer_mount_t hmp;
        struct uio *uio;
        hammer_base_elm_t base;
        int error;
@@ -1582,6 +1613,7 @@ hammer_vop_readdir(struct vop_readdir_args *ap)
        ip = VTOI(ap->a_vp);
        uio = ap->a_uio;
        saveoff = uio->uio_offset;
+       hmp = ip->hmp;
 
        if (ap->a_ncookies) {
                ncookies = uio->uio_resid / 16 + 1;
@@ -1595,7 +1627,8 @@ hammer_vop_readdir(struct vop_readdir_args *ap)
                cookie_index = 0;
        }
 
-       hammer_simple_transaction(&trans, ip->hmp);
+       lwkt_gettoken(&hmp->fs_token);
+       hammer_simple_transaction(&trans, hmp);
 
        /*
         * Handle artificial entries
@@ -1710,6 +1743,7 @@ done:
                        *ap->a_cookies = cookies;
                }
        }
+       lwkt_reltoken(&hmp->fs_token);
        return(error);
 }
 
@@ -1723,12 +1757,16 @@ hammer_vop_readlink(struct vop_readlink_args *ap)
        struct hammer_transaction trans;
        struct hammer_cursor cursor;
        struct hammer_inode *ip;
+       hammer_mount_t hmp;
        char buf[32];
        u_int32_t localization;
        hammer_pseudofs_inmem_t pfsm;
        int error;
 
        ip = VTOI(ap->a_vp);
+       hmp = ip->hmp;
+
+       lwkt_gettoken(&hmp->fs_token);
 
        /*
         * Shortcut if the symlink data was stuffed into ino_data.
@@ -1747,7 +1785,7 @@ hammer_vop_readlink(struct vop_readlink_args *ap)
                    ip->obj_asof == HAMMER_MAX_TID &&
                    ip->obj_localization == 0 &&
                    strncmp(ptr, "@@PFS", 5) == 0) {
-                       hammer_simple_transaction(&trans, ip->hmp);
+                       hammer_simple_transaction(&trans, hmp);
                        bcopy(ptr + 5, buf, 5);
                        buf[5] = 0;
                        localization = strtoul(buf, NULL, 10) << 16;
@@ -1777,17 +1815,18 @@ hammer_vop_readlink(struct vop_readlink_args *ap)
                                bytes = strlen(buf);
                        }
                        if (pfsm)
-                               hammer_rel_pseudofs(trans.hmp, pfsm);
+                               hammer_rel_pseudofs(hmp, pfsm);
                        hammer_done_transaction(&trans);
                }
                error = uiomove(ptr, bytes, ap->a_uio);
+               lwkt_reltoken(&hmp->fs_token);
                return(error);
        }
 
        /*
         * Long version
         */
-       hammer_simple_transaction(&trans, ip->hmp);
+       hammer_simple_transaction(&trans, hmp);
        ++hammer_stats_file_iopsr;
        hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
 
@@ -1820,6 +1859,7 @@ hammer_vop_readlink(struct vop_readlink_args *ap)
        }
        hammer_done_cursor(&cursor);
        hammer_done_transaction(&trans);
+       lwkt_reltoken(&hmp->fs_token);
        return(error);
 }
 
@@ -1832,21 +1872,25 @@ hammer_vop_nremove(struct vop_nremove_args *ap)
 {
        struct hammer_transaction trans;
        struct hammer_inode *dip;
+       hammer_mount_t hmp;
        int error;
 
        dip = VTOI(ap->a_dvp);
+       hmp = dip->hmp;
 
        if (hammer_nohistory(dip) == 0 &&
-           (error = hammer_checkspace(dip->hmp, HAMMER_CHKSPC_REMOVE)) != 0) {
+           (error = hammer_checkspace(hmp, HAMMER_CHKSPC_REMOVE)) != 0) {
                return (error);
        }
 
-       hammer_start_transaction(&trans, dip->hmp);
+       lwkt_gettoken(&hmp->fs_token);
+       hammer_start_transaction(&trans, hmp);
        ++hammer_stats_file_iopsw;
        error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp, ap->a_cred, 0, 0);
        hammer_done_transaction(&trans);
        if (error == 0)
                hammer_knote(ap->a_dvp, NOTE_WRITE);
+       lwkt_reltoken(&hmp->fs_token);
        return (error);
 }
 
@@ -1863,6 +1907,7 @@ hammer_vop_nrename(struct vop_nrename_args *ap)
        struct hammer_inode *fdip;
        struct hammer_inode *tdip;
        struct hammer_inode *ip;
+       hammer_mount_t hmp;
        struct hammer_cursor cursor;
        int64_t namekey;
        u_int32_t max_iterations;
@@ -1880,6 +1925,8 @@ hammer_vop_nrename(struct vop_nrename_args *ap)
        ip = VTOI(fncp->nc_vp);
        KKASSERT(ip != NULL);
 
+       hmp = ip->hmp;
+
        if (fdip->obj_localization != tdip->obj_localization)
                return(EXDEV);
        if (fdip->obj_localization != ip->obj_localization)
@@ -1891,10 +1938,11 @@ hammer_vop_nrename(struct vop_nrename_args *ap)
                return (EROFS);
        if (ip->flags & HAMMER_INODE_RO)
                return (EROFS);
-       if ((error = hammer_checkspace(fdip->hmp, HAMMER_CHKSPC_CREATE)) != 0)
+       if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_CREATE)) != 0)
                return (error);
 
-       hammer_start_transaction(&trans, fdip->hmp);
+       lwkt_gettoken(&hmp->fs_token);
+       hammer_start_transaction(&trans, hmp);
        ++hammer_stats_file_iopsw;
 
        /*
@@ -1999,6 +2047,7 @@ retry:
 
 failed:
        hammer_done_transaction(&trans);
+       lwkt_reltoken(&hmp->fs_token);
        return (error);
 }
 
@@ -2011,21 +2060,25 @@ hammer_vop_nrmdir(struct vop_nrmdir_args *ap)
 {
        struct hammer_transaction trans;
        struct hammer_inode *dip;
+       hammer_mount_t hmp;
        int error;
 
        dip = VTOI(ap->a_dvp);
+       hmp = dip->hmp;
 
        if (hammer_nohistory(dip) == 0 &&
-           (error = hammer_checkspace(dip->hmp, HAMMER_CHKSPC_REMOVE)) != 0) {
+           (error = hammer_checkspace(hmp, HAMMER_CHKSPC_REMOVE)) != 0) {
                return (error);
        }
 
-       hammer_start_transaction(&trans, dip->hmp);
+       lwkt_gettoken(&hmp->fs_token);
+       hammer_start_transaction(&trans, hmp);
        ++hammer_stats_file_iopsw;
        error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp, ap->a_cred, 0, 1);
        hammer_done_transaction(&trans);
        if (error == 0)
                hammer_knote(ap->a_dvp, NOTE_WRITE | NOTE_LINK);
+       lwkt_reltoken(&hmp->fs_token);
        return (error);
 }
 
@@ -2038,21 +2091,25 @@ hammer_vop_markatime(struct vop_markatime_args *ap)
 {
        struct hammer_transaction trans;
        struct hammer_inode *ip;
+       hammer_mount_t hmp;
 
        ip = VTOI(ap->a_vp);
        if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)
                return (EROFS);
        if (ip->flags & HAMMER_INODE_RO)
                return (EROFS);
-       if (ip->hmp->mp->mnt_flag & MNT_NOATIME)
+       hmp = ip->hmp;
+       if (hmp->mp->mnt_flag & MNT_NOATIME)
                return (0);
-       hammer_start_transaction(&trans, ip->hmp);
+       lwkt_gettoken(&hmp->fs_token);
+       hammer_start_transaction(&trans, hmp);
        ++hammer_stats_file_iopsw;
 
        ip->ino_data.atime = trans.time;
        hammer_modify_inode(&trans, ip, HAMMER_INODE_ATIME);
        hammer_done_transaction(&trans);
        hammer_knote(ap->a_vp, NOTE_ATTRIB);
+       lwkt_reltoken(&hmp->fs_token);
        return (0);
 }
 
@@ -2064,8 +2121,9 @@ int
 hammer_vop_setattr(struct vop_setattr_args *ap)
 {
        struct hammer_transaction trans;
-       struct vattr *vap;
        struct hammer_inode *ip;
+       struct vattr *vap;
+       hammer_mount_t hmp;
        int modflags;
        int error;
        int truncating;
@@ -2080,17 +2138,19 @@ hammer_vop_setattr(struct vop_setattr_args *ap)
        ip = ap->a_vp->v_data;
        modflags = 0;
        kflags = 0;
+       hmp = ip->hmp;
 
        if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)
                return(EROFS);
        if (ip->flags & HAMMER_INODE_RO)
                return (EROFS);
        if (hammer_nohistory(ip) == 0 &&
-           (error = hammer_checkspace(ip->hmp, HAMMER_CHKSPC_REMOVE)) != 0) {
+           (error = hammer_checkspace(hmp, HAMMER_CHKSPC_REMOVE)) != 0) {
                return (error);
        }
 
-       hammer_start_transaction(&trans, ip->hmp);
+       lwkt_gettoken(&hmp->fs_token);
+       hammer_start_transaction(&trans, hmp);
        ++hammer_stats_file_iopsw;
        error = 0;
 
@@ -2296,6 +2356,7 @@ done:
                hammer_modify_inode(&trans, ip, modflags);
        hammer_done_transaction(&trans);
        hammer_knote(ap->a_vp, kflags);
+       lwkt_reltoken(&hmp->fs_token);
        return (error);
 }
 
@@ -2309,8 +2370,9 @@ hammer_vop_nsymlink(struct vop_nsymlink_args *ap)
        struct hammer_transaction trans;
        struct hammer_inode *dip;
        struct hammer_inode *nip;
-       struct nchandle *nch;
        hammer_record_t record;
+       struct nchandle *nch;
+       hammer_mount_t hmp;
        int error;
        int bytes;
 
@@ -2318,16 +2380,18 @@ hammer_vop_nsymlink(struct vop_nsymlink_args *ap)
 
        nch = ap->a_nch;
        dip = VTOI(ap->a_dvp);
+       hmp = dip->hmp;
 
        if (dip->flags & HAMMER_INODE_RO)
                return (EROFS);
-       if ((error = hammer_checkspace(dip->hmp, HAMMER_CHKSPC_CREATE)) != 0)
+       if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_CREATE)) != 0)
                return (error);
 
        /*
         * Create a transaction to cover the operations we perform.
         */
-       hammer_start_transaction(&trans, dip->hmp);
+       lwkt_gettoken(&hmp->fs_token);
+       hammer_start_transaction(&trans, hmp);
        ++hammer_stats_file_iopsw;
 
        /*
@@ -2341,6 +2405,7 @@ hammer_vop_nsymlink(struct vop_nsymlink_args *ap)
        if (error) {
                hammer_done_transaction(&trans);
                *ap->a_vpp = NULL;
+               lwkt_reltoken(&hmp->fs_token);
                return (error);
        }
 
@@ -2395,6 +2460,7 @@ hammer_vop_nsymlink(struct vop_nsymlink_args *ap)
                }
        }
        hammer_done_transaction(&trans);
+       lwkt_reltoken(&hmp->fs_token);
        return (error);
 }
 
@@ -2407,20 +2473,24 @@ hammer_vop_nwhiteout(struct vop_nwhiteout_args *ap)
 {
        struct hammer_transaction trans;
        struct hammer_inode *dip;
+       hammer_mount_t hmp;
        int error;
 
        dip = VTOI(ap->a_dvp);
+       hmp = dip->hmp;
 
        if (hammer_nohistory(dip) == 0 &&
-           (error = hammer_checkspace(dip->hmp, HAMMER_CHKSPC_CREATE)) != 0) {
+           (error = hammer_checkspace(hmp, HAMMER_CHKSPC_CREATE)) != 0) {
                return (error);
        }
 
-       hammer_start_transaction(&trans, dip->hmp);
+       lwkt_gettoken(&hmp->fs_token);
+       hammer_start_transaction(&trans, hmp);
        ++hammer_stats_file_iopsw;
        error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp,
                                ap->a_cred, ap->a_flags, -1);
        hammer_done_transaction(&trans);
+       lwkt_reltoken(&hmp->fs_token);
 
        return (error);
 }
@@ -2433,10 +2503,15 @@ int
 hammer_vop_ioctl(struct vop_ioctl_args *ap)
 {
        struct hammer_inode *ip = ap->a_vp->v_data;
+       hammer_mount_t hmp = ip->hmp;
+       int error;
 
        ++hammer_stats_file_iopsr;
-       return(hammer_ioctl(ip, ap->a_command, ap->a_data,
-                           ap->a_fflag, ap->a_cred));
+       lwkt_gettoken(&hmp->fs_token);
+       error = hammer_ioctl(ip, ap->a_command, ap->a_data,
+                            ap->a_fflag, ap->a_cred);
+       lwkt_reltoken(&hmp->fs_token);
+       return (error);
 }
 
 static
@@ -2460,8 +2535,9 @@ hammer_vop_mountctl(struct vop_mountctl_args *ap)
        KKASSERT(mp->mnt_data != NULL);
        hmp = (struct hammer_mount *)mp->mnt_data;
 
-       switch(ap->a_op) {
+       lwkt_gettoken(&hmp->fs_token);
 
+       switch(ap->a_op) {
        case MOUNTCTL_SET_EXPORT:
                if (ap->a_ctllen != sizeof(struct export_args))
                        error = EINVAL;
@@ -2482,7 +2558,8 @@ hammer_vop_mountctl(struct vop_mountctl_args *ap)
                usedbytes = *ap->a_res;
 
                if (usedbytes > 0 && usedbytes < ap->a_buflen) {
-                       usedbytes += vfs_flagstostr(hmp->hflags, extraopt, ap->a_buf,
+                       usedbytes += vfs_flagstostr(hmp->hflags, extraopt,
+                                                   ap->a_buf,
                                                    ap->a_buflen - usedbytes,
                                                    &error);
                }
@@ -2494,6 +2571,7 @@ hammer_vop_mountctl(struct vop_mountctl_args *ap)
                error = vop_stdmountctl(ap);
                break;
        }
+       lwkt_reltoken(&hmp->fs_token);
        return(error);
 }
 
@@ -2550,6 +2628,7 @@ hammer_vop_strategy_read(struct vop_strategy_args *ap)
        struct hammer_transaction trans;
        struct hammer_inode *ip;
        struct hammer_inode *dip;
+       hammer_mount_t hmp;
        struct hammer_cursor cursor;
        hammer_base_elm_t base;
        hammer_off_t disk_offset;
@@ -2567,6 +2646,7 @@ hammer_vop_strategy_read(struct vop_strategy_args *ap)
        bio = ap->a_bio;
        bp = bio->bio_buf;
        ip = ap->a_vp->v_data;
+       hmp = ip->hmp;
 
        /*
         * The zone-2 disk offset may have been set by the cluster code via
@@ -2577,7 +2657,9 @@ hammer_vop_strategy_read(struct vop_strategy_args *ap)
        nbio = push_bio(bio);
        if ((nbio->bio_offset & HAMMER_OFF_ZONE_MASK) ==
            HAMMER_ZONE_LARGE_DATA) {
-               error = hammer_io_direct_read(ip->hmp, nbio, NULL);
+               lwkt_gettoken(&hmp->fs_token);
+               error = hammer_io_direct_read(hmp, nbio, NULL);
+               lwkt_reltoken(&hmp->fs_token);
                return (error);
        }
 
@@ -2585,7 +2667,8 @@ hammer_vop_strategy_read(struct vop_strategy_args *ap)
         * Well, that sucked.  Do it the hard way.  If all the stars are
         * aligned we may still be able to issue a direct-read.
         */
-       hammer_simple_transaction(&trans, ip->hmp);
+       lwkt_gettoken(&hmp->fs_token);
+       hammer_simple_transaction(&trans, hmp);
        hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
 
        /*
@@ -2719,8 +2802,7 @@ hammer_vop_strategy_read(struct vop_strategy_args *ap)
                        KKASSERT((disk_offset & HAMMER_OFF_ZONE_MASK) ==
                                 HAMMER_ZONE_LARGE_DATA);
                        nbio->bio_offset = disk_offset;
-                       error = hammer_io_direct_read(trans.hmp, nbio,
-                                                     cursor.leaf);
+                       error = hammer_io_direct_read(hmp, nbio, cursor.leaf);
                        goto done;
                } else if (n) {
                        error = hammer_ip_resolve_data(&cursor);
@@ -2781,6 +2863,7 @@ done:
        }
        hammer_done_cursor(&cursor);
        hammer_done_transaction(&trans);
+       lwkt_reltoken(&hmp->fs_token);
        return(error);
 }
 
@@ -2805,6 +2888,7 @@ hammer_vop_bmap(struct vop_bmap_args *ap)
 {
        struct hammer_transaction trans;
        struct hammer_inode *ip;
+       hammer_mount_t hmp;
        struct hammer_cursor cursor;
        hammer_base_elm_t base;
        int64_t rec_offset;
@@ -2821,6 +2905,7 @@ hammer_vop_bmap(struct vop_bmap_args *ap)
 
        ++hammer_stats_file_iopsr;
        ip = ap->a_vp->v_data;
+       hmp = ip->hmp;
 
        /*
         * We can only BMAP regular files.  We can't BMAP database files,
@@ -2840,7 +2925,8 @@ hammer_vop_bmap(struct vop_bmap_args *ap)
         * Scan the B-Tree to acquire blockmap addresses, then translate
         * to raw addresses.
         */
-       hammer_simple_transaction(&trans, ip->hmp);
+       lwkt_gettoken(&hmp->fs_token);
+       hammer_simple_transaction(&trans, hmp);
 #if 0
        kprintf("bmap_beg %016llx ip->cache %p\n",
                (long long)ap->a_loffset, ip->cache[1]);
@@ -2965,6 +3051,7 @@ hammer_vop_bmap(struct vop_bmap_args *ap)
        }
        hammer_done_cursor(&cursor);
        hammer_done_transaction(&trans);
+       lwkt_reltoken(&hmp->fs_token);
 
        /*
         * If we couldn't find any records or the records we did find were
@@ -3057,6 +3144,8 @@ hammer_vop_strategy_write(struct vop_strategy_args *ap)
                return(EROFS);
        }
 
+       lwkt_gettoken(&hmp->fs_token);
+
        /*
         * Interlock with inode destruction (no in-kernel or directory
         * topology visibility).  If we queue new IO while trying to
@@ -3071,6 +3160,7 @@ hammer_vop_strategy_write(struct vop_strategy_args *ap)
            (HAMMER_INODE_DELETING|HAMMER_INODE_DELETED)) {
                bp->b_resid = 0;
                biodone(ap->a_bio);
+               lwkt_reltoken(&hmp->fs_token);
                return(0);
        }
 
@@ -3120,6 +3210,7 @@ hammer_vop_strategy_write(struct vop_strategy_args *ap)
                bp->b_flags |= B_ERROR;
                biodone(ap->a_bio);
        }
+       lwkt_reltoken(&hmp->fs_token);
        return(error);
 }
 
@@ -3136,6 +3227,7 @@ hammer_dounlink(hammer_transaction_t trans, struct nchandle *nch,
        struct namecache *ncp;
        hammer_inode_t dip;
        hammer_inode_t ip;
+       hammer_mount_t hmp;
        struct hammer_cursor cursor;
        int64_t namekey;
        u_int32_t max_iterations;
@@ -3150,6 +3242,7 @@ hammer_dounlink(hammer_transaction_t trans, struct nchandle *nch,
         */
        dip = VTOI(dvp);
        ncp = nch->ncp;
+       hmp = dip->hmp;
 
        if (dip->flags & HAMMER_INODE_RO)
                return (EROFS);
@@ -3206,7 +3299,7 @@ retry:
        if (error == 0) {
                hammer_unlock(&cursor.ip->lock);
                ip = hammer_get_inode(trans, dip, cursor.data->entry.obj_id,
-                                     dip->hmp->asof,
+                                     hmp->asof,
                                      cursor.data->entry.localization,
                                      0, &error);
                hammer_lock_sh(&cursor.ip->lock);
@@ -3304,7 +3397,6 @@ retry:
  ************************************************************************
  *
  */
-
 static int
 hammer_vop_fifoclose (struct vop_close_args *ap)
 {
@@ -3402,14 +3494,17 @@ filt_hammerread(struct knote *kn, long hint)
 {
        struct vnode *vp = (void *)kn->kn_hook;
        hammer_inode_t ip = VTOI(vp);
+       hammer_mount_t hmp = ip->hmp;
        off_t off;
 
        if (hint == NOTE_REVOKE) {
                kn->kn_flags |= (EV_EOF | EV_ONESHOT);
                return(1);
        }
+       lwkt_gettoken(&hmp->fs_token);  /* XXX use per-ip-token */
        off = ip->ino_data.size - kn->kn_fp->f_offset;
        kn->kn_data = (off < INTPTR_MAX) ? off : INTPTR_MAX;
+       lwkt_reltoken(&hmp->fs_token);
        if (kn->kn_sfflags & NOTE_OLDAPI)
                return(1);
        return (kn->kn_data != 0);