hammer2 - refactor filesystem sync 3/N
authorMatthew Dillon <dillon@apollo.backplane.com>
Sat, 10 Nov 2018 02:05:14 +0000 (18:05 -0800)
committerMatthew Dillon <dillon@apollo.backplane.com>
Wed, 5 Dec 2018 18:28:39 +0000 (10:28 -0800)
* Attempt to guarantee filesystem consistency at all sync points.

* Pretty severely hacked, and at the moment this can result in
  syncs which never end if the filesystem is busy.

sys/vfs/hammer2/hammer2.h
sys/vfs/hammer2/hammer2_flush.c
sys/vfs/hammer2/hammer2_inode.c
sys/vfs/hammer2/hammer2_ioctl.c
sys/vfs/hammer2/hammer2_ioctl.h
sys/vfs/hammer2/hammer2_vfsops.c
sys/vfs/hammer2/hammer2_vnops.c

index 99f3901..d3589f6 100644 (file)
@@ -1467,6 +1467,7 @@ void hammer2_inode_lock(hammer2_inode_t *ip, int how);
 void hammer2_inode_lock4(hammer2_inode_t *ip1, hammer2_inode_t *ip2,
                        hammer2_inode_t *ip3, hammer2_inode_t *ip4);
 void hammer2_inode_unlock(hammer2_inode_t *ip);
+void hammer2_inode_depend(hammer2_inode_t *ip1, hammer2_inode_t *ip2);
 hammer2_chain_t *hammer2_inode_chain(hammer2_inode_t *ip, int clindex, int how);
 hammer2_chain_t *hammer2_inode_chain_and_parent(hammer2_inode_t *ip,
                        int clindex, hammer2_chain_t **parentp, int how);
index 258096f..7df3e9d 100644 (file)
@@ -98,11 +98,15 @@ hammer2_trans_manage_init(hammer2_pfs_t *pmp)
  * Transaction support for any modifying operation.  Transactions are used
  * in the pmp layer by the frontend and in the spmp layer by the backend.
  *
- * 0                   - Normal transaction.  No interlock currently.
+ * 0                   - Normal transaction.  Interlocks against just the
+ *                       COPYQ portion of an ISFLUSH transaction.
  *
  * TRANS_ISFLUSH       - Flush transaction.  Interlocks against other flush
  *                       transactions.
  *
+ *                       When COPYQ is also specified, waits for the count
+ *                       to drop to 1.
+ *
  * TRANS_BUFCACHE      - Buffer cache transaction.  No interlock.
  *
  * TRANS_SIDEQ         - Run the sideq (only tested in trans_done())
@@ -177,6 +181,30 @@ hammer2_trans_init(hammer2_pfs_t *pmp, uint32_t flags)
                }
                /* retry */
        }
+
+       /*
+        * When entering a FLUSH transaction with COPYQ set, wait for the
+        * transaction count to drop to 1 (our flush transaction only)
+        * before proceeding.
+        *
+        * This waits for all non-flush transactions to complete and blocks
+        * new non-flush transactions from starting until COPYQ is cleared.
+        * (the flush will then proceed after clearing COPYQ).  This should
+        * be a very short stall on modifying operations.
+        */
+       while ((flags & HAMMER2_TRANS_ISFLUSH) &&
+              (flags & HAMMER2_TRANS_COPYQ)) {
+               oflags = pmp->trans.flags;
+               cpu_ccfence();
+               if ((oflags & HAMMER2_TRANS_MASK) == 1)
+                       break;
+               nflags = oflags | HAMMER2_TRANS_WAITING;
+               tsleep_interlock(&pmp->trans.sync_wait, 0);
+               if (atomic_cmpset_int(&pmp->trans.flags, oflags, nflags)) {
+                       tsleep(&pmp->trans.sync_wait, PINTERLOCKED,
+                              "h2trans2", hz);
+               }
+       }
 }
 
 /*
@@ -206,6 +234,11 @@ hammer2_trans_setflags(hammer2_pfs_t *pmp, uint32_t flags)
        atomic_set_int(&pmp->trans.flags, flags);
 }
 
+/*
+ * Typically used to clear trans flags asynchronously.  If TRANS_WAITING
+ * is in the mask, and was previously set, this function will wake up
+ * any waiters.
+ */
 void
 hammer2_trans_clearflags(hammer2_pfs_t *pmp, uint32_t flags)
 {
@@ -245,7 +278,9 @@ hammer2_trans_done(hammer2_pfs_t *pmp, uint32_t flags)
        }
 
        /*
-        * Clean-up the transaction
+        * Clean-up the transaction.  Wakeup any waiters when finishing
+        * a flush transaction or transitioning the non-flush transaction
+        * count from 2->1 while a flush transaction is pending.
         */
        for (;;) {
                oflags = pmp->trans.flags;
@@ -256,6 +291,10 @@ hammer2_trans_done(hammer2_pfs_t *pmp, uint32_t flags)
                if (flags & HAMMER2_TRANS_ISFLUSH) {
                        nflags &= ~HAMMER2_TRANS_WAITING;
                }
+               if ((oflags & (HAMMER2_TRANS_ISFLUSH|HAMMER2_TRANS_MASK)) ==
+                   (HAMMER2_TRANS_ISFLUSH|2)) {
+                       nflags &= ~HAMMER2_TRANS_WAITING;
+               }
                if (atomic_cmpset_int(&pmp->trans.flags, oflags, nflags)) {
                        if ((oflags ^ nflags) & HAMMER2_TRANS_WAITING)
                                wakeup(&pmp->trans.sync_wait);
index 12044c7..825c244 100644 (file)
@@ -181,6 +181,8 @@ hammer2_inode_lock(hammer2_inode_t *ip, int how)
  * Exclusively lock up to four inodes, in order, with SYNCQ semantics.
  * ip1 and ip2 must not be NULL.  ip3 and ip4 may be NULL, but if ip3 is
  * NULL then ip4 must also be NULL.
+ *
+ * This function will also ensure that if any
  */
 void
 hammer2_inode_lock4(hammer2_inode_t *ip1, hammer2_inode_t *ip2,
@@ -191,7 +193,7 @@ hammer2_inode_lock4(hammer2_inode_t *ip1, hammer2_inode_t *ip2,
        hammer2_pfs_t *pmp;
        size_t count;
        size_t i;
-       size_t j;
+       int dosyncq;
 
        pmp = ip1->pmp;                 /* may be NULL */
        KKASSERT(pmp == ip2->pmp);
@@ -216,23 +218,72 @@ hammer2_inode_lock4(hammer2_inode_t *ip1, hammer2_inode_t *ip2,
                hammer2_inode_ref(ips[i]);
 
 restart:
+       dosyncq = 0;
+
+       /*
+        * Lock the inodes in order
+        */
        for (i = 0; i < count; ++i) {
                iptmp = ips[i];
                hammer2_mtx_ex(&iptmp->lock);
-               if (hammer2_mtx_refs(&iptmp->lock) > 1)
-                       continue;
-               if ((iptmp->flags & HAMMER2_INODE_SYNCQ) == 0 || pmp == NULL)
-                       continue;
-               tsleep_interlock(&iptmp->flags, 0);
+               if (iptmp->flags & HAMMER2_INODE_SYNCQ)
+                       dosyncq |= 1;
+               if (iptmp->flags & HAMMER2_INODE_SYNCQ_PASS2)
+                       dosyncq |= 2;
+       }
+
+       /*
+        * If any of the inodes are part of a filesystem sync then we
+        * have to make sure they ALL are, because their modifications
+        * depend on each other (e.g. inode vs dirent).
+        */
+       for (i = 0; (dosyncq & 3) && i < count; ++i) {
+               iptmp = ips[i];
                hammer2_spin_ex(&pmp->list_spin);
-               if ((iptmp->flags & HAMMER2_INODE_SYNCQ) == 0) {
-                       hammer2_spin_unex(&pmp->list_spin);
-                       continue;
-               }
                atomic_set_int(&iptmp->flags, HAMMER2_INODE_SYNCQ_WAKEUP);
-               TAILQ_REMOVE(&pmp->syncq, iptmp, entry);
-               TAILQ_INSERT_HEAD(&pmp->syncq, iptmp, entry);
+               if (iptmp->flags & HAMMER2_INODE_SYNCQ) {
+                       TAILQ_REMOVE(&pmp->syncq, iptmp, entry);
+                       TAILQ_INSERT_HEAD(&pmp->syncq, iptmp, entry);
+               } else if (iptmp->flags & HAMMER2_INODE_SIDEQ) {
+                       atomic_set_int(&iptmp->flags, HAMMER2_INODE_SYNCQ);
+                       atomic_clear_int(&iptmp->flags, HAMMER2_INODE_SIDEQ);
+                       TAILQ_REMOVE(&pmp->sideq, iptmp, entry);
+                       TAILQ_INSERT_HEAD(&pmp->syncq, iptmp, entry);
+               } else {
+                       atomic_set_int(&iptmp->flags, HAMMER2_INODE_SYNCQ);
+                       hammer2_inode_ref(iptmp);
+                       TAILQ_INSERT_HEAD(&pmp->syncq, iptmp, entry);
+               }
                hammer2_spin_unex(&pmp->list_spin);
+       }
+
+       /*
+        * Block and retry if any of the inodes are on SYNCQ.  It is
+        * important that we allow the operation to proceed in the
+        * PASS2 case, to avoid deadlocking against the vnode.
+        */
+       if (dosyncq & 1) {
+               for (i = 0; i < count; ++i)
+                       hammer2_mtx_unlock(&ips[i]->lock);
+               tsleep(&iptmp->flags, 0, "h2sync", 2);
+               goto restart;
+       }
+#if 0
+               if (pmp == NULL ||
+                   ((iptmp->flags & (HAMMER2_INODE_SYNCQ |
+                                     HAMMER2_INODE_SYNCQ_PASS2)) == 0 &&
+                    dosyncq == 0)) {
+                       continue;
+               }
+               dosyncq = 1;
+               tsleep_interlock(&iptmp->flags, 0);
+
+               /*
+                * We have to accept the inode if it's got more than one
+                * exclusive count because we can't safely unlock it.
+                */
+               if (hammer2_mtx_refs(&iptmp->lock) > 1)
+                       continue;
 
                /*
                 * Unlock everything (including the current index) and wait
@@ -245,6 +296,7 @@ restart:
 
                goto restart;
        }
+#endif
 }
 
 /*
@@ -264,6 +316,63 @@ hammer2_inode_unlock(hammer2_inode_t *ip)
        hammer2_inode_drop(ip);
 }
 
+/*
+ * If either ip1 or ip2 are on SYNCQ, make sure the other one is too.
+ * This ensure that dependencies (e.g. directory-v-inode) are flushed
+ * together.
+ *
+ * We must also check SYNCQ_PASS2, which occurs when the syncer cannot
+ * immediately lock the inode on SYNCQ and must temporarily move it to
+ * SIDEQ to retry again in another pass (but part of the same flush).
+ *
+ * Both ip1 and ip2 must be locked by the caller.  This also ensures
+ * that we can't race the end of the syncer's queue run.
+ */
+void
+hammer2_inode_depend(hammer2_inode_t *ip1, hammer2_inode_t *ip2)
+{
+       hammer2_pfs_t *pmp;
+
+       pmp = ip1->pmp;
+       if (((ip1->flags | ip2->flags) & HAMMER2_INODE_SYNCQ) == 0)
+               return;
+       if ((ip1->flags & (HAMMER2_INODE_SYNCQ |
+                          HAMMER2_INODE_SYNCQ_PASS2)) &&
+           (ip2->flags & (HAMMER2_INODE_SYNCQ |
+                          HAMMER2_INODE_SYNCQ_PASS2))) {
+               return;
+       }
+       KKASSERT(pmp == ip2->pmp);
+       hammer2_spin_ex(&pmp->list_spin);
+       if ((ip1->flags & (HAMMER2_INODE_SYNCQ |
+                          HAMMER2_INODE_SYNCQ_PASS2)) == 0) {
+               if (ip1->flags & HAMMER2_INODE_SIDEQ) {
+                       atomic_set_int(&ip1->flags, HAMMER2_INODE_SYNCQ);
+                       atomic_clear_int(&ip1->flags, HAMMER2_INODE_SIDEQ);
+                       TAILQ_REMOVE(&pmp->sideq, ip1, entry);
+                       TAILQ_INSERT_TAIL(&pmp->syncq, ip1, entry);
+               } else {
+                       atomic_set_int(&ip1->flags, HAMMER2_INODE_SYNCQ);
+                       hammer2_inode_ref(ip1);
+                       TAILQ_INSERT_TAIL(&pmp->syncq, ip1, entry);
+               }
+       }
+       if ((ip2->flags & (HAMMER2_INODE_SYNCQ |
+                          HAMMER2_INODE_SYNCQ_PASS2)) == 0) {
+               if (ip2->flags & HAMMER2_INODE_SIDEQ) {
+                       atomic_set_int(&ip2->flags, HAMMER2_INODE_SYNCQ);
+                       atomic_clear_int(&ip2->flags, HAMMER2_INODE_SIDEQ);
+                       TAILQ_REMOVE(&pmp->sideq, ip2, entry);
+                       TAILQ_INSERT_TAIL(&pmp->syncq, ip2, entry);
+               } else {
+                       atomic_set_int(&ip2->flags, HAMMER2_INODE_SYNCQ);
+                       hammer2_inode_ref(ip2);
+                       TAILQ_INSERT_TAIL(&pmp->syncq, ip2, entry);
+               }
+       }
+       hammer2_spin_unex(&pmp->list_spin);
+}
+
 
 /*
  * Select a chain out of an inode's cluster and lock it.
index 600c725..960d6da 100644 (file)
@@ -660,7 +660,6 @@ hammer2_ioctl_pfs_create(hammer2_inode_t *ip, void *data)
                hammer2_inode_chain_sync(nip);
                hammer2_inode_chain_flush(nip, HAMMER2_XOP_INODE_STOP |
                                               HAMMER2_XOP_FSSYNC);
-               KKASSERT(nip->refs == 1);
                hammer2_inode_drop(nip);
 
                /* 
@@ -813,9 +812,19 @@ hammer2_ioctl_pfs_snapshot(hammer2_inode_t *ip, void *data)
 
        lockmgr(&hmp->bulklk, LK_EXCLUSIVE);
 
-       hammer2_vfs_sync(pmp->mp, MNT_WAIT);
-
-       hammer2_trans_init(pmp, HAMMER2_TRANS_ISFLUSH);
+       /*
+        * NOSYNC is for debugging.  We skip the filesystem sync and use
+        * a normal transaction (which is less likely to stall).  used for
+        * testing filesystem consistency.
+        *
+        * In normal mode we sync the filesystem and use a flush transaction.
+        */
+       if (pfs->pfs_flags & HAMMER2_PFSFLAGS_NOSYNC) {
+               hammer2_trans_init(pmp, 0);
+       } else {
+               hammer2_vfs_sync(pmp->mp, MNT_WAIT);
+               hammer2_trans_init(pmp, HAMMER2_TRANS_ISFLUSH);
+       }
        mtid = hammer2_trans_sub(pmp);
        hammer2_inode_lock(ip, 0);
        hammer2_inode_modify(ip);
@@ -902,7 +911,6 @@ hammer2_ioctl_pfs_snapshot(hammer2_inode_t *ip, void *data)
                hammer2_inode_chain_sync(nip);
                hammer2_inode_chain_flush(nip, HAMMER2_XOP_INODE_STOP |
                                               HAMMER2_XOP_FSSYNC);
-               KKASSERT(nip->refs == 1);
                hammer2_inode_drop(nip);
 
                force_local = (hmp->hflags & HMNT2_LOCAL) ? hmp : NULL;
@@ -922,7 +930,12 @@ hammer2_ioctl_pfs_snapshot(hammer2_inode_t *ip, void *data)
        hammer2_chain_drop(chain);
 
        hammer2_inode_unlock(ip);
-       hammer2_trans_done(pmp, HAMMER2_TRANS_ISFLUSH | HAMMER2_TRANS_SIDEQ);
+       if (pfs->pfs_flags & HAMMER2_PFSFLAGS_NOSYNC) {
+               hammer2_trans_done(pmp, 0);
+       } else {
+               hammer2_trans_done(pmp, HAMMER2_TRANS_ISFLUSH |
+                                       HAMMER2_TRANS_SIDEQ);
+       }
 
        lockmgr(&hmp->bulklk, LK_RELEASE);
 
index 8d9e392..5178750 100644 (file)
@@ -102,6 +102,8 @@ struct hammer2_ioc_pfs {
 
 typedef struct hammer2_ioc_pfs hammer2_ioc_pfs_t;
 
+#define HAMMER2_PFSFLAGS_NOSYNC                0x00000001
+
 /*
  * Ioctls to manage inodes
  */
index 0284826..88cac68 100644 (file)
@@ -2437,22 +2437,40 @@ hammer2_vfs_sync_pmp(hammer2_pfs_t *pmp, int waitfor)
         * op using the same inode.  Either it has already locked the
         * inode and we will block, or it has not yet locked the inode
         * and it will block until we are finished flushing that inode.
+        *
+        * When restarting, only move the inodes flagged as PASS2.
         */
        hammer2_trans_init(pmp, HAMMER2_TRANS_ISFLUSH);
 restart:
        hammer2_trans_setflags(pmp, HAMMER2_TRANS_COPYQ);
-       dorestart = 0;
        hammer2_spin_ex(&pmp->list_spin);
-       TAILQ_FOREACH(ip, &pmp->sideq, entry) {
-               KKASSERT(ip->flags & HAMMER2_INODE_SIDEQ);
-               atomic_set_int(&ip->flags, HAMMER2_INODE_SYNCQ);
-               atomic_clear_int(&ip->flags, HAMMER2_INODE_SIDEQ);
+       if (dorestart == 0) {
+               TAILQ_FOREACH(ip, &pmp->sideq, entry) {
+                       KKASSERT(ip->flags & HAMMER2_INODE_SIDEQ);
+                       atomic_set_int(&ip->flags, HAMMER2_INODE_SYNCQ);
+                       atomic_clear_int(&ip->flags, HAMMER2_INODE_SIDEQ);
+               }
+               TAILQ_CONCAT(&pmp->syncq, &pmp->sideq, entry);
+               pmp->sideq_count = 0;
+       } else {
+               ipdrop = TAILQ_FIRST(&pmp->sideq);
+               while ((ip = ipdrop) != NULL) {
+                       ipdrop = TAILQ_NEXT(ip, entry);
+                       KKASSERT(ip->flags & HAMMER2_INODE_SIDEQ);
+                       if (ip->flags & HAMMER2_INODE_SYNCQ_PASS2) {
+                               TAILQ_REMOVE(&pmp->sideq, ip, entry);
+                               TAILQ_INSERT_TAIL(&pmp->syncq, ip, entry);
+                               atomic_set_int(&ip->flags, HAMMER2_INODE_SYNCQ);
+                               atomic_clear_int(&ip->flags,
+                                                HAMMER2_INODE_SIDEQ);
+                               --pmp->sideq_count;
+                       }
+               }
        }
-       TAILQ_CONCAT(&pmp->syncq, &pmp->sideq, entry);
-       pmp->sideq_count = 0;
        hammer2_spin_unex(&pmp->list_spin);
        hammer2_trans_clearflags(pmp, HAMMER2_TRANS_COPYQ |
                                      HAMMER2_TRANS_WAITING);
+       dorestart = 0;
 
        /*
         * Now run through all inodes on syncq.
index 645e470..c932fcf 100644 (file)
@@ -1398,7 +1398,10 @@ hammer2_vop_nmkdir(struct vop_nmkdir_args *ap)
         * Create the actual inode as a hidden file in the iroot, then
         * create the directory entry.  The creation of the actual inode
         * sets its nlinks to 1 which is the value we desire.
+        *
+        * dip must be locked before nip to avoid deadlock.
         */
+       hammer2_inode_lock(dip, 0);
        nip = hammer2_inode_create_normal(dip, ap->a_vap, ap->a_cred,
                                          inum, &error);
        if (error) {
@@ -1429,12 +1432,13 @@ hammer2_vop_nmkdir(struct vop_nmkdir_args *ap)
        if (error == 0) {
                uint64_t mtime;
 
-               hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
+               /*hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);*/
                hammer2_update_time(&mtime);
                hammer2_inode_modify(dip);
                dip->meta.mtime = mtime;
-               hammer2_inode_unlock(dip);
+               /*hammer2_inode_unlock(dip);*/
        }
+       hammer2_inode_unlock(dip);
 
        hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ);
 
@@ -1601,7 +1605,10 @@ hammer2_vop_ncreate(struct vop_ncreate_args *ap)
         * Create the actual inode as a hidden file in the iroot, then
         * create the directory entry.  The creation of the actual inode
         * sets its nlinks to 1 which is the value we desire.
+        *
+        * dip must be locked before nip to avoid deadlock.
         */
+       hammer2_inode_lock(dip, 0);
        nip = hammer2_inode_create_normal(dip, ap->a_vap, ap->a_cred,
                                          inum, &error);
 
@@ -1629,12 +1636,13 @@ hammer2_vop_ncreate(struct vop_ncreate_args *ap)
        if (error == 0) {
                uint64_t mtime;
 
-               hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
+               /*hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);*/
                hammer2_update_time(&mtime);
                hammer2_inode_modify(dip);
                dip->meta.mtime = mtime;
-               hammer2_inode_unlock(dip);
+               /*hammer2_inode_unlock(dip);*/
        }
+       hammer2_inode_unlock(dip);
 
        hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ);
 
@@ -1675,8 +1683,12 @@ hammer2_vop_nmknod(struct vop_nmknod_args *ap)
 
        /*
         * Create the device inode and then create the directory entry.
+        *
+        * dip must be locked before nip to avoid deadlock.
         */
        inum = hammer2_trans_newinum(dip->pmp);
+
+       hammer2_inode_lock(dip, 0);
        nip = hammer2_inode_create_normal(dip, ap->a_vap, ap->a_cred,
                                          inum, &error);
        if (error == 0) {
@@ -1701,12 +1713,13 @@ hammer2_vop_nmknod(struct vop_nmknod_args *ap)
        if (error == 0) {
                uint64_t mtime;
 
-               hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
+               /*hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);*/
                hammer2_update_time(&mtime);
                hammer2_inode_modify(dip);
                dip->meta.mtime = mtime;
-               hammer2_inode_unlock(dip);
+               /*hammer2_inode_unlock(dip);*/
        }
+       hammer2_inode_unlock(dip);
 
        hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ);
 
@@ -1750,9 +1763,12 @@ hammer2_vop_nsymlink(struct vop_nsymlink_args *ap)
        /*
         * Create the softlink as an inode and then create the directory
         * entry.
+        *
+        * dip must be locked before nip to avoid deadlock.
         */
        inum = hammer2_trans_newinum(dip->pmp);
 
+       hammer2_inode_lock(dip, 0);
        nip = hammer2_inode_create_normal(dip, ap->a_vap, ap->a_cred,
                                          inum, &error);
        if (error == 0) {
@@ -1766,6 +1782,7 @@ hammer2_vop_nsymlink(struct vop_nsymlink_args *ap)
                        nip = NULL;
                }
                *ap->a_vpp = NULL;
+               hammer2_inode_unlock(dip);
                hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ);
                return error;
        }
@@ -1805,12 +1822,13 @@ hammer2_vop_nsymlink(struct vop_nsymlink_args *ap)
        if (error == 0) {
                uint64_t mtime;
 
-               hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
+               /*hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);*/
                hammer2_update_time(&mtime);
                hammer2_inode_modify(dip);
                dip->meta.mtime = mtime;
-               hammer2_inode_unlock(dip);
+               /*hammer2_inode_unlock(dip);*/
        }
+       hammer2_inode_unlock(dip);
 
        hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ);
 
@@ -1882,12 +1900,12 @@ hammer2_vop_nremove(struct vop_nremove_args *ap)
         */
        error = hammer2_xop_collect(&xop->head, 0);
        error = hammer2_error_to_errno(error);
-       hammer2_inode_unlock(dip);
 
        if (error == 0) {
                ip = hammer2_inode_get(dip->pmp, &xop->head, -1, -1);
                hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
                if (ip) {
+                       hammer2_inode_depend(dip, ip);
                        hammer2_inode_unlink_finisher(ip, isopen);
                        hammer2_inode_unlock(ip);
                }
@@ -1901,12 +1919,13 @@ hammer2_vop_nremove(struct vop_nremove_args *ap)
        if (error == 0) {
                uint64_t mtime;
 
-               hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
+               /*hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);*/
                hammer2_update_time(&mtime);
                hammer2_inode_modify(dip);
                dip->meta.mtime = mtime;
-               hammer2_inode_unlock(dip);
+               /*hammer2_inode_unlock(dip);*/
        }
+       hammer2_inode_unlock(dip);
 
        hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ);
        if (error == 0) {
@@ -1959,12 +1978,12 @@ hammer2_vop_nrmdir(struct vop_nrmdir_args *ap)
         */
        error = hammer2_xop_collect(&xop->head, 0);
        error = hammer2_error_to_errno(error);
-       hammer2_inode_unlock(dip);
 
        if (error == 0) {
                ip = hammer2_inode_get(dip->pmp, &xop->head, -1, -1);
                hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
                if (ip) {
+                       hammer2_inode_depend(dip, ip);
                        hammer2_inode_unlink_finisher(ip, isopen);
                        hammer2_inode_unlock(ip);
                }
@@ -1978,12 +1997,13 @@ hammer2_vop_nrmdir(struct vop_nrmdir_args *ap)
        if (error == 0) {
                uint64_t mtime;
 
-               hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
+               /*hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);*/
                hammer2_update_time(&mtime);
                hammer2_inode_modify(dip);
                dip->meta.mtime = mtime;
-               hammer2_inode_unlock(dip);
+               /*hammer2_inode_unlock(dip);*/
        }
+       hammer2_inode_unlock(dip);
 
        hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ);
        if (error == 0) {