* related buffer. The data is assumed to be all-zeros. It
* is primarily used for indirect blocks.
*
- * MODIFIED - The chain's media data has been modified.
+ * MODIFIED - The chain's media data has been modified. Prevents chain
+ * free on lastdrop if still in the topology.
*
- * UPDATE - Chain might not be modified but parent blocktable needs update
+ * UPDATE - Chain might not be modified but parent blocktable needs
+ * an update. Prevents chain free on lastdrop if still in
+ * the topology.
*
* FICTITIOUS - Faked chain as a placeholder for an error condition. This
* chain is unsuitable for I/O.
struct hammer2_cluster_item {
hammer2_chain_t *chain;
int cache_index;
+ int error;
uint32_t flags;
};
hammer2_chain_drop(hammer2_chain_t *chain)
{
u_int refs;
- u_int need = 0;
if (hammer2_debug & 0x200000)
Debugger("drop");
print_backtrace(8);
#endif
- if (chain->flags & HAMMER2_CHAIN_UPDATE)
- ++need;
- if (chain->flags & HAMMER2_CHAIN_MODIFIED)
- ++need;
- KKASSERT(chain->refs > need);
+ KKASSERT(chain->refs > 0);
while (chain) {
refs = chain->refs;
* the kernel stack.
*
* The chain cannot be freed if it has any children.
+ * The chain cannot be freed if flagged MODIFIED unless we can dispose of that.
+ * The chain cannot be freed if flagged UPDATE unless we can dispose of that.
*
* The core spinlock is allowed nest child-to-parent (not parent-to-child).
*/
hammer2_chain_t *rdrop;
/*
- * Spinlock the core and check to see if it is empty. If it is
- * not empty we leave chain intact with refs == 0. The elements
- * in core->rbtree are associated with other chains contemporary
- * with ours but not with our chain directly.
+ * Critical field access.
*/
hammer2_spin_ex(&chain->core.spin);
+ if (chain->parent) {
+ /*
+ * If the chain has a parent the UPDATE bit prevents scrapping
+ * as the chain is needed to properly flush the parent. Try
+ * to complete the 1->0 transition and return NULL. Retry
+ * (return chain) if we are unable to complete the 1->0
+ * transition, else return NULL (nothing more to do).
+ *
+ * If the chain has a parent the MODIFIED bit prevents
+ * scrapping.
+ */
+ if (chain->flags & (HAMMER2_CHAIN_UPDATE |
+ HAMMER2_CHAIN_MODIFIED)) {
+ if (atomic_cmpset_int(&chain->refs, 1, 0)) {
+ hammer2_spin_unex(&chain->core.spin);
+ chain = NULL;
+ } else {
+ hammer2_spin_unex(&chain->core.spin);
+ }
+ return (chain);
+ }
+ /* spinlock still held */
+ } else {
+ /*
+ * The chain has no parent and can be flagged for destruction.
+ * Since it has no parent, UPDATE can also be cleared.
+ */
+ atomic_set_int(&chain->flags, HAMMER2_CHAIN_DESTROY);
+ if (chain->flags & HAMMER2_CHAIN_UPDATE)
+ atomic_clear_int(&chain->flags, HAMMER2_CHAIN_UPDATE);
+
+ /*
+ * If the chain has children or if it has been MODIFIED and
+ * also recorded for DEDUP, we must still flush the chain.
+ *
+ * In the case where it has children, the DESTROY flag test
+ * in the flush code will prevent unnecessary flushes of
+ * MODIFIED chains that are not flagged DEDUP so don't worry
+ * about that here.
+ */
+ if (chain->core.chain_count ||
+ (chain->flags & (HAMMER2_CHAIN_MODIFIED |
+ HAMMER2_CHAIN_DEDUP)) ==
+ (HAMMER2_CHAIN_MODIFIED |
+ HAMMER2_CHAIN_DEDUP)) {
+ /*
+ * Put on flushq (should ensure refs > 1), retry
+ * the drop.
+ */
+ hammer2_spin_unex(&chain->core.spin);
+ hammer2_delayed_flush(chain);
+ return(chain); /* retry drop */
+ }
+
+ /*
+ * Otherwise we can scrap the MODIFIED bit if it is set,
+ * and continue along the freeing path.
+ */
+ if (chain->flags & HAMMER2_CHAIN_MODIFIED) {
+ atomic_clear_int(&chain->flags, HAMMER2_CHAIN_MODIFIED);
+ atomic_add_long(&hammer2_count_modified_chains, -1);
+ }
+ /* spinlock still held */
+ }
+
/*
- * We can't free non-stale chains with children until we are
- * able to free the children because there might be a flush
- * dependency. Flushes of stale children (which should also
- * have their deleted flag set) short-cut recursive flush
- * dependencies and can be freed here. Any flushes which run
- * through stale children due to the flush synchronization
- * point should have a FLUSH_* bit set in the chain and not
- * reach lastdrop at this time.
+ * If any children exist we must leave the chain intact with refs == 0.
+ * They exist because chains are retained below us which have refs or
+ * may require flushing. This case can occur when parent != NULL.
*
- * NOTE: We return (chain) on failure to retry.
+ * Retry (return chain) if we fail to transition the refs to 0, else
+ * return NULL indication nothing more to do.
*/
if (chain->core.chain_count) {
if (atomic_cmpset_int(&chain->refs, 1, 0)) {
hammer2_spin_unex(&chain->core.spin);
- chain = NULL; /* success */
+ chain = NULL;
} else {
hammer2_spin_unex(&chain->core.spin);
}
- return(chain);
+ return (chain);
}
+ /* spinlock still held */
/* no chains left under us */
/*
if ((parent = chain->parent) != NULL) {
hammer2_spin_ex(&parent->core.spin);
if (atomic_cmpset_int(&chain->refs, 1, 0) == 0) {
- /* 1->0 transition failed */
+ /* 1->0 transition failed, retry */
hammer2_spin_unex(&parent->core.spin);
hammer2_spin_unex(&chain->core.spin);
- return(chain); /* retry */
+ return(chain);
}
/*
- * 1->0 transition successful, remove chain from its
- * above core.
+ * 1->0 transition successful, remove chain from the
+ * parent.
*/
if (chain->flags & HAMMER2_CHAIN_ONRBTREE) {
RB_REMOVE(hammer2_chain_tree,
*/
if (parent->core.chain_count == 0) {
rdrop = parent;
- if (atomic_cmpset_int(&rdrop->refs, 0, 1) == 0) {
+ atomic_add_int(&rdrop->refs, 1);
+ /*
+ if (atomic_cmpset_int(&rdrop->refs, 0, 1) == 0)
rdrop = NULL;
- }
+ */
}
hammer2_spin_unex(&parent->core.spin);
parent = NULL; /* safety */
chain->core.chain_count == 0);
/*
- * All spin locks are gone, finish freeing stuff.
+ * All spin locks are gone, no pointers remain to the chain, finish
+ * freeing it.
*/
KKASSERT((chain->flags & (HAMMER2_CHAIN_UPDATE |
HAMMER2_CHAIN_MODIFIED)) == 0);
/*
* Shortcut the case if the data is embedded or not resolved.
+ * Only drop non-DIO-based data if the chain is not modified.
*
* Do NOT NULL out chain->data (e.g. inode data), it might be
* dirty.
newmod = 1;
} else if ((chain->flags & HAMMER2_CHAIN_MODIFIED) == 0) {
/*
- * Must set modified bit. If the chain has been deleted
- * it must be placed on the delayed-flush queue to prevent
- * it from possibly being lost (a normal flush of the topology
- * will no longer see it).
+ * Must set modified bit.
*/
atomic_add_long(&hammer2_count_modified_chains, 1);
atomic_set_int(&chain->flags, HAMMER2_CHAIN_MODIFIED);
- hammer2_chain_ref(chain);
hammer2_pfs_memory_inc(chain->pmp); /* can be NULL */
- if ((chain->flags & HAMMER2_CHAIN_DELETED) &&
- (chain->flags & HAMMER2_CHAIN_DELAYED) == 0) {
- hammer2_delayed_flush(chain);
- }
newmod = 1;
} else {
/*
*/
newmod = 0;
}
- if ((chain->flags & HAMMER2_CHAIN_UPDATE) == 0) {
+
+ /*
+ * Flag parent update required, clear DEDUP flag (already processed
+ * above).
+ */
+ if ((chain->flags & HAMMER2_CHAIN_UPDATE) == 0)
atomic_set_int(&chain->flags, HAMMER2_CHAIN_UPDATE);
- hammer2_chain_ref(chain);
- }
atomic_clear_int(&chain->flags, HAMMER2_CHAIN_DEDUP);
/*
if ((hmp->vchain.flags & HAMMER2_CHAIN_MODIFIED) == 0) {
atomic_add_long(&hammer2_count_modified_chains, 1);
atomic_set_int(&hmp->vchain.flags, HAMMER2_CHAIN_MODIFIED);
- hammer2_chain_ref(&hmp->vchain);
hammer2_pfs_memory_inc(hmp->vchain.pmp);
}
}
* setflush so the flush recognizes that it must update
* the bref in the parent.
*/
- if ((chain->flags & HAMMER2_CHAIN_UPDATE) == 0) {
- hammer2_chain_ref(chain);
+ if ((chain->flags & HAMMER2_CHAIN_UPDATE) == 0)
atomic_set_int(&chain->flags, HAMMER2_CHAIN_UPDATE);
- }
}
/*
/*
* Chain is blockmapped, so there must be a parent.
* Atomically remove the chain from the parent and remove
- * the blockmap entry.
+ * the blockmap entry. The parent must be set modified
+ * to remove the blockmap entry.
*/
hammer2_blockref_t *base;
int count;
}
/*
- * To avoid losing track of a permanent deletion we add the chain
- * to the delayed flush queue. If we were to flush it right now the
- * parent would end up in a modified state and generate I/O.
- * The delayed queue gives the parent a chance to be deleted to
- * (e.g. rm -rf).
+ * Permanent deletions mark the chain as destroyed. H
*/
if (flags & HAMMER2_DELETE_PERMANENT) {
atomic_set_int(&chain->flags, HAMMER2_CHAIN_DESTROY);
- hammer2_delayed_flush(chain);
} else {
/* XXX might not be needed */
hammer2_chain_setflush(chain);
* structure representing the inode locked to prevent
* consolidation/deconsolidation races.
*
+ * The flags passed in are LOOKUP flags, not RESOLVE flags. Only
+ * HAMMER2_LOOKUP_SHARED is supported.
+ *
* We locate the hardlink in the current or a common parent directory.
*
* If we are unable to locate the hardlink, EIO is returned and
hammer2_key_t key_dummy;
hammer2_key_t lhc;
int cache_index = -1;
+ int resolve_flags;
+
+ resolve_flags = (flags & HAMMER2_LOOKUP_SHARED) ?
+ HAMMER2_RESOLVE_SHARED : 0;
/*
* Obtain the key for the hardlink from *chainp.
hammer2_chain_ref(parent);
hammer2_chain_unlock(*parentp);
hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS |
- flags);
+ resolve_flags);
if ((*parentp)->parent == parent) {
hammer2_chain_drop(*parentp);
*parentp = parent;
hammer2_chain_drop(parent);
hammer2_chain_lock(*parentp,
HAMMER2_RESOLVE_ALWAYS |
- flags);
+ resolve_flags);
parent = NULL; /* safety */
/* retry */
}
int nquorum;
int umasters; /* unknown masters (still in progress) */
int smpresent;
+ int error;
int i;
cluster->error = 0;
cluster->array[i].flags |= HAMMER2_CITEM_INVALID;
chain = cluster->array[i].chain;
- if (chain && chain->error) {
+ error = cluster->array[i].error;
+ if (chain && error) {
if (cluster->focus == NULL || cluster->focus == chain) {
/* error will be overridden by valid focus */
- cluster->error = chain->error;
+ /* XXX */
}
/*
nflags |= HAMMER2_CLUSTER_RDHARD;
cluster->focus_index = i;
cluster->focus = chain;
- cluster->error = chain ? chain->error : 0;
+ cluster->error = error;
break;
default:
break;
}
chain = cluster->array[i].chain;
+ error = cluster->array[i].error;
/*
* Skip elements still in progress. umasters keeps
if (chain == NULL) {
++nmasters;
++nmasters_keymatch;
+ if (cluster->error == 0)
+ cluster->error = error;
}
} else if (chain &&
(key == (hammer2_key_t)-1 ||
if (quorum_tid == chain->bref.modify_tid) {
/*
* TID matches current collection.
+ *
+ * (error handled in next pass)
*/
++nmasters;
if (chain->error == 0) {
/*
* Validated end of scan.
*/
- if (flags & HAMMER2_CHECK_NULL)
- return ENOENT;
+ if (flags & HAMMER2_CHECK_NULL) {
+ if (cluster->error == 0)
+ cluster->error = ENOENT;
+ return cluster->error;
+ }
/*
* If we have a NULL focus at this point the agreeing quorum all
*
* We have quorum agreement, validate elements, not end of scan.
*/
+ cluster->error = 0;
for (i = 0; i < cluster->nchains; ++i) {
chain = cluster->array[i].chain;
+ error = cluster->array[i].error;
if (chain == NULL ||
chain->bref.key != key ||
chain->bref.modify_tid != quorum_tid) {
continue;
}
+ /*
+ * Quorum Match
+ *
+ * XXX for now, cumulative error.
+ */
+ if (cluster->error == 0)
+ cluster->error = error;
+
switch (cluster->pmp->pfs_types[i]) {
case HAMMER2_PFSTYPE_MASTER:
cluster->array[i].flags |= HAMMER2_CITEM_FEMOD;
atomic_set_int(&cluster->flags, nflags);
atomic_clear_int(&cluster->flags, HAMMER2_CLUSTER_ZFLAGS & ~nflags);
- return 0;
+ return cluster->error;
}
/*
/*
* Dispose of the modified bit.
*
+ * If parent is present, the UPDATE bit should already be set.
* UPDATE should already be set.
* bref.mirror_tid should already be set.
*/
KKASSERT((chain->flags & HAMMER2_CHAIN_UPDATE) ||
- chain == &hmp->vchain);
+ chain->parent == NULL);
+ if (hammer2_debug & 0x800000) {
+ hammer2_chain_t *pp;
+
+ for (pp = chain; pp->parent; pp = pp->parent)
+ ;
+ kprintf("FLUSH CHAIN %p (p=%p pp=%p/%d) TYPE %d FLAGS %08x (%s)\n",
+ chain, chain->parent, pp, pp->bref.type,
+ chain->bref.type, chain->flags,
+ (chain->bref.type == 1 ? (const char *)chain->data->ipdata.filename : "?")
+
+ );
+ print_backtrace(10);
+ }
atomic_clear_int(&chain->flags, HAMMER2_CHAIN_MODIFIED);
atomic_add_long(&hammer2_count_modified_chains, -1);
if (chain->pmp)
hammer2_pfs_memory_wakeup(chain->pmp);
- if ((chain->flags & HAMMER2_CHAIN_UPDATE) ||
- chain == &hmp->vchain ||
- chain == &hmp->fchain) {
- /*
- * Drop the ref from the MODIFIED bit we cleared,
- * net -1 ref.
- */
- hammer2_chain_drop(chain);
- } else {
+#if 0
+ if ((chain->flags & HAMMER2_CHAIN_UPDATE) == 0 &&
+ chain != &hmp->vchain &&
+ chain != &hmp->fchain) {
/*
- * Drop the ref from the MODIFIED bit we cleared and
- * set a ref for the UPDATE bit we are setting. Net
- * 0 refs.
+ * Set UPDATE bit indicating that the parent block
+ * table requires updating.
*/
atomic_set_int(&chain->flags, HAMMER2_CHAIN_UPDATE);
}
+#endif
/*
* Issue the flush. This is indirect via the DIO.
* chain gets reattached later on the bit will simply get set
* again.
*/
- if ((chain->flags & HAMMER2_CHAIN_UPDATE) && parent == NULL) {
+ if ((chain->flags & HAMMER2_CHAIN_UPDATE) && parent == NULL)
atomic_clear_int(&chain->flags, HAMMER2_CHAIN_UPDATE);
- hammer2_chain_drop(chain);
- }
/*
* The chain may need its blockrefs updated in the parent. This
* Clear UPDATE flag, mark parent modified, update its
* modify_tid if necessary, and adjust the parent blockmap.
*/
- if (chain->flags & HAMMER2_CHAIN_UPDATE) {
+ if (chain->flags & HAMMER2_CHAIN_UPDATE)
atomic_clear_int(&chain->flags, HAMMER2_CHAIN_UPDATE);
- hammer2_chain_drop(chain);
- }
/*
* (optional code)
HAMMER2_RESOLVE_ALWAYS);
if (chain) {
hmp = chain->hmp;
- if (chain->flags & HAMMER2_CHAIN_FLUSH_MASK) {
+ if ((chain->flags & HAMMER2_CHAIN_FLUSH_MASK) ||
+ TAILQ_FIRST(&hmp->flushq) != NULL) {
hammer2_flush(chain, HAMMER2_FLUSH_TOP);
parent = chain->parent;
KKASSERT(chain->pmp != parent->pmp);
return(0);
}
+static
+void
+hammer2_inode_delayed_sideq(hammer2_inode_t *ip)
+{
+ hammer2_inode_sideq_t *ipul;
+ hammer2_pfs_t *pmp = ip->pmp;
+
+ if ((ip->flags & HAMMER2_INODE_ONSIDEQ) == 0) {
+ ipul = kmalloc(sizeof(*ipul), pmp->minode,
+ M_WAITOK | M_ZERO);
+ ipul->ip = ip;
+ hammer2_spin_ex(&pmp->list_spin);
+ if ((ip->flags & HAMMER2_INODE_ONSIDEQ) == 0) {
+ hammer2_inode_ref(ip);
+ atomic_set_int(&ip->flags,
+ HAMMER2_INODE_ONSIDEQ);
+ TAILQ_INSERT_TAIL(&pmp->sideq, ipul, entry);
+ hammer2_spin_unex(&pmp->list_spin);
+ } else {
+ hammer2_spin_unex(&pmp->list_spin);
+ kfree(ipul, pmp->minode);
+ }
+ }
+}
+
/*
* HAMMER2 inode locks
*
void
hammer2_inode_modify(hammer2_inode_t *ip)
{
- hammer2_inode_sideq_t *ipul;
hammer2_pfs_t *pmp;
atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED);
if (ip->vp) {
vsetisdirty(ip->vp);
} else if ((pmp = ip->pmp) != NULL) {
- if ((ip->flags & HAMMER2_INODE_ONSIDEQ) == 0) {
- ipul = kmalloc(sizeof(*ipul), pmp->minode,
- M_WAITOK | M_ZERO);
- ipul->ip = ip;
- hammer2_inode_ref(ip);
- hammer2_spin_ex(&pmp->list_spin);
- if ((ip->flags & HAMMER2_INODE_ONSIDEQ) == 0) {
- atomic_set_int(&ip->flags,
- HAMMER2_INODE_ONSIDEQ);
- TAILQ_INSERT_TAIL(&pmp->sideq, ipul, entry);
- hammer2_spin_unex(&pmp->list_spin);
- } else {
- hammer2_spin_unex(&pmp->list_spin);
- hammer2_inode_drop(ip);
- kfree(ipul, pmp->minode);
- }
- }
+ hammer2_inode_delayed_sideq(ip);
}
}
* The flush code doesn't calculate check codes for
* file data (doing so can result in excessive I/O),
* so we do it here.
+ *
+ * Record for dedup only after the DIO's buffer cache
+ * buffer has been updated.
*/
hammer2_chain_setcheck(chain, bdata);
hammer2_dedup_record(chain, bdata);
* The flush code doesn't calculate check codes for
* file data (doing so can result in excessive I/O),
* so we do it here.
+ *
+ * Record for dedup only after the DIO's buffer cache
+ * buffer has been updated.
*/
hammer2_chain_setcheck(chain, bdata);
hammer2_dedup_record(chain, bdata);
hammer2_chain_ref(chain);
hammer2_chain_push_shared_lock(chain);
}
+ if (error == 0 && chain)
+ error = chain->error;
fifo->errors[fifo->wi & HAMMER2_XOPFIFO_MASK] = error;
fifo->array[fifo->wi & HAMMER2_XOPFIFO_MASK] = chain;
cpu_sfence();
if (fifo->ri != fifo->wi) {
cpu_lfence();
chain = fifo->array[fifo->ri & HAMMER2_XOPFIFO_MASK];
+ error = fifo->errors[fifo->ri & HAMMER2_XOPFIFO_MASK];
++fifo->ri;
xop->cluster.array[i].chain = chain;
+ xop->cluster.array[i].error = error;
if (chain == NULL) {
/* XXX */
xop->cluster.array[i].flags |=
&hammer2_synchronous_flush, 0, "");
SYSCTL_LONG(_vfs_hammer2, OID_AUTO, limit_dirty_chains, CTLFLAG_RW,
&hammer2_limit_dirty_chains, 0, "");
-SYSCTL_LONG(_vfs_hammer2, OID_AUTO, hammer2_count_modified_chains, CTLFLAG_RW,
+SYSCTL_LONG(_vfs_hammer2, OID_AUTO, count_modified_chains, CTLFLAG_RW,
&hammer2_count_modified_chains, 0, "");
SYSCTL_INT(_vfs_hammer2, OID_AUTO, dio_count, CTLFLAG_RD,
&hammer2_dio_count, 0, "");
*/
if (hmp->vchain.flags & HAMMER2_CHAIN_MODIFIED) {
atomic_add_long(&hammer2_count_modified_chains, -1);
- atomic_clear_int(&hmp->vchain.flags,
- HAMMER2_CHAIN_MODIFIED);
+ atomic_clear_int(&hmp->vchain.flags, HAMMER2_CHAIN_MODIFIED);
hammer2_pfs_memory_wakeup(hmp->vchain.pmp);
- hammer2_chain_drop(&hmp->vchain);
}
if (hmp->vchain.flags & HAMMER2_CHAIN_UPDATE) {
- atomic_clear_int(&hmp->vchain.flags,
- HAMMER2_CHAIN_UPDATE);
- hammer2_chain_drop(&hmp->vchain);
+ atomic_clear_int(&hmp->vchain.flags, HAMMER2_CHAIN_UPDATE);
}
if (hmp->fchain.flags & HAMMER2_CHAIN_MODIFIED) {
atomic_add_long(&hammer2_count_modified_chains, -1);
- atomic_clear_int(&hmp->fchain.flags,
- HAMMER2_CHAIN_MODIFIED);
+ atomic_clear_int(&hmp->fchain.flags, HAMMER2_CHAIN_MODIFIED);
hammer2_pfs_memory_wakeup(hmp->fchain.pmp);
- hammer2_chain_drop(&hmp->fchain);
}
if (hmp->fchain.flags & HAMMER2_CHAIN_UPDATE) {
- atomic_clear_int(&hmp->fchain.flags,
- HAMMER2_CHAIN_UPDATE);
- hammer2_chain_drop(&hmp->fchain);
+ atomic_clear_int(&hmp->fchain.flags, HAMMER2_CHAIN_UPDATE);
}
/*
* the ip is left with a reference and placed on a linked list and
* handled later on.
*/
+
if ((ip->flags & (HAMMER2_INODE_ISUNLINKED |
HAMMER2_INODE_MODIFIED |
HAMMER2_INODE_RESIZED)) &&
KKASSERT((ip->flags & HAMMER2_INODE_RESIZED) == 0);
ip->osize = ip->meta.size;
ip->meta.size = nsize;
- atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED |
- HAMMER2_INODE_RESIZED);
+ atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED);
+ hammer2_inode_modify(ip);
LOCKSTOP;
}
LOCKSTART;
KKASSERT((ip->flags & HAMMER2_INODE_RESIZED) == 0);
+ hammer2_inode_modify(ip);
osize = ip->meta.size;
ip->osize = osize;
ip->meta.size = nsize;
- atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED);
if (osize <= HAMMER2_EMBEDDED_BYTES && nsize > HAMMER2_EMBEDDED_BYTES) {
atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED);
* inode to cdip.
*/
hammer2_inode_lock(ip, 0);
+
if (fdip != cdip &&
(ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE) == 0) {
hammer2_xop_nlink_t *xop1;
#include "hammer2.h"
+/*
+ * Determine if the specified directory is empty. Returns 0 on success.
+ */
+static
+int
+checkdirempty(hammer2_chain_t *orig_parent)
+{
+ hammer2_chain_t *parent;
+ hammer2_chain_t *chain;
+ hammer2_key_t key_next;
+ int cache_index = -1;
+ int error;
+
+ parent = hammer2_chain_lookup_init(orig_parent, 0);
+ chain = hammer2_chain_lookup(&parent, &key_next,
+ HAMMER2_DIRHASH_VISIBLE,
+ HAMMER2_KEY_MAX,
+ &cache_index, 0);
+ error = chain ? ENOTEMPTY : 0;
+ if (chain) {
+ hammer2_chain_unlock(chain);
+ hammer2_chain_drop(chain);
+ }
+ hammer2_chain_lookup_done(parent);
+
+ return error;
+}
+
/*
* Backend for hammer2_vfs_root()
*
error = hammer2_chain_hardlink_find(
xop->head.ip1,
&parent, &chain,
- HAMMER2_RESOLVE_SHARED);
+ HAMMER2_LOOKUP_SHARED);
}
}
done:
}
if (type == HAMMER2_OBJTYPE_DIRECTORY &&
+ checkdirempty(chain) != 0) {
+ error = ENOTEMPTY;
+ } else if (type == HAMMER2_OBJTYPE_DIRECTORY &&
xop->isdir == 0) {
error = ENOTDIR;
} else
* also the inode when nlinks == 1. Hardlink targets
* are handled in the next conditional.
*/
+ error = chain->error;
hammer2_chain_delete(parent, chain,
xop->head.mtid, dopermanent);
}