From 623d43d4bac0d56862c31c924389cd6db3c35114 Mon Sep 17 00:00:00 2001 From: Matthew Dillon Date: Fri, 15 Nov 2013 00:21:23 -0800 Subject: [PATCH] hammer2 - Stabilization Yet more stabilization. Generally speaking the code is starting to look pretty good, but finding the corner cases for the in-memory chain state has taken more time than I had hoped. All of this complexity exists solely in order to allow flushes to run concurrently with modifying operations by the front-end (which is a pretty significant performance feature). * Fix a bug in the hammer2_chain_get() race detection code. * Refactor hammer2_freemap_alloc() a bit. * Save debug info in dead block array fields (temporary), and add some debugging field to the chain structure. * Refactor the flush code that checks whether a deleted chain should be ignored or not, fixing a bug at the same time. * Refactor the transaction sequencing. Flushes now allocate an extra transaction id so a distinction can be made between chains whos block arrays have not been updated and chains whos block arrays have been updated. * Refactor the MOVED flag removal algorithm a little, fixing a bug or two. * Adjust the freemap to detect forked chains and ensure that a different reserved block is calculated for the two possible forks. * Fix a few mount fail cases that were panicing. * Update bref.mirror_tid in scan1 skip cases. * Do not update bref.mirror_tid in the skip case as this can confuse how a child's block array is interpreted in a future flush. * Augment chain code with maxloops counters for debugging infinite loops. * Remove the CHAIN_DUPLICATED tests in hammer2_chain_find() as this can catch a delete-duplicate in-progress and incorrectly cause the search to fail. * Remove looping on CHAIN_DUPLICATED detection. Instead what we do now is lock the chain before testing the CHAIN_DELETED bit. Locking the chain will resolve delete-duplicate races and place the chain in a deterministic state. The CHAIN_DELETED test is then sufficient. --- sys/vfs/hammer2/TODO | 22 +++ sys/vfs/hammer2/hammer2.h | 16 +- sys/vfs/hammer2/hammer2_chain.c | 272 ++++++++++++++++++---------- sys/vfs/hammer2/hammer2_disk.h | 7 + sys/vfs/hammer2/hammer2_flush.c | 289 ++++++++++++++++-------------- sys/vfs/hammer2/hammer2_freemap.c | 57 +++++- sys/vfs/hammer2/hammer2_vfsops.c | 240 ++++++++++++++----------- sys/vfs/hammer2/hammer2_vnops.c | 4 +- 8 files changed, 554 insertions(+), 353 deletions(-) diff --git a/sys/vfs/hammer2/TODO b/sys/vfs/hammer2/TODO index d5feeedbe1..8ec02d4c3c 100644 --- a/sys/vfs/hammer2/TODO +++ b/sys/vfs/hammer2/TODO @@ -1,4 +1,26 @@ + + * The block freeing code. At the very least a bulk scan is needed + to implement freeing blocks. + + * Crash stability. Right now the allocation table on-media is not + properly synchronized with the flush. This needs to be adjusted + such that H2 can do an incremental scan on mount to fixup + allocations on mount as part of its crash recovery mechanism. + + * We actually have to start checking and acting upon the CRCs being + generated. + + * Remaining known hardlink issues need to be addressed. + + * Core 'copies' mechanism needs to be implemented to support multiple + copies on the same media. + + * Core clustering mechanism needs to be implemented to support + mirroring and basic multi-master operation from a single host + (multi-host requires additional network protocols and won't + be as easy). + * make sure we aren't using a shared lock during RB_SCAN's? * overwrite in write_file case w/compression - if device block size changes diff --git a/sys/vfs/hammer2/hammer2.h b/sys/vfs/hammer2/hammer2.h index 3c4d6f059f..ca147253b8 100644 --- a/sys/vfs/hammer2/hammer2.h +++ b/sys/vfs/hammer2/hammer2.h @@ -194,6 +194,7 @@ struct hammer2_chain { struct hammer2_state *state; /* if active cache msg */ struct hammer2_mount *hmp; struct hammer2_pfsmount *pmp; /* can be NULL */ + struct hammer2_chain *debug_previous; hammer2_tid_t modify_tid; /* snapshot/flush filter */ hammer2_tid_t delete_tid; @@ -205,7 +206,8 @@ struct hammer2_chain { u_int refs; u_int lockcnt; int debug_reason; - int duplicate_reason; + int src_reason; + int dst_reason; hammer2_media_data_t *data; /* data pointer shortcut */ TAILQ_ENTRY(hammer2_chain) flush_node; /* flush deferral list */ }; @@ -471,8 +473,8 @@ struct hammer2_trans { TAILQ_ENTRY(hammer2_trans) entry; struct hammer2_pfsmount *pmp; /* might be NULL */ struct hammer2_mount *hmp_single; /* if single-targetted */ - hammer2_tid_t real_tid; hammer2_tid_t sync_tid; + hammer2_tid_t real_tid; hammer2_tid_t inode_tid; thread_t td; /* pointer */ int flags; @@ -777,7 +779,7 @@ void hammer2_chain_resize(hammer2_trans_t *trans, hammer2_inode_t *ip, void hammer2_chain_unlock(hammer2_chain_t *chain); void hammer2_chain_wait(hammer2_chain_t *chain); hammer2_chain_t *hammer2_chain_get(hammer2_chain_t *parent, - hammer2_blockref_t *bref); + hammer2_blockref_t *bref, int generation); hammer2_chain_t *hammer2_chain_lookup_init(hammer2_chain_t *parent, int flags); void hammer2_chain_lookup_done(hammer2_chain_t *parent); hammer2_chain_t *hammer2_chain_lookup(hammer2_chain_t **parentp, @@ -823,10 +825,10 @@ int hammer2_base_find(hammer2_chain_t *chain, hammer2_blockref_t *base, int count, int *cache_indexp, hammer2_key_t *key_nextp, hammer2_key_t key_beg, hammer2_key_t key_end); -void hammer2_base_delete(hammer2_chain_t *chain, +void hammer2_base_delete(hammer2_trans_t *trans, hammer2_chain_t *chain, hammer2_blockref_t *base, int count, int *cache_indexp, hammer2_chain_t *child); -void hammer2_base_insert(hammer2_chain_t *chain, +void hammer2_base_insert(hammer2_trans_t *trans, hammer2_chain_t *chain, hammer2_blockref_t *base, int count, int *cache_indexp, hammer2_chain_t *child); @@ -897,8 +899,8 @@ void hammer2_lwinprog_wait(hammer2_pfsmount_t *pmp); /* * hammer2_freemap.c */ -int hammer2_freemap_alloc(hammer2_trans_t *trans, hammer2_mount_t *hmp, - hammer2_blockref_t *bref, size_t bytes); +int hammer2_freemap_alloc(hammer2_trans_t *trans, hammer2_chain_t *chain, + size_t bytes); void hammer2_freemap_adjust(hammer2_trans_t *trans, hammer2_mount_t *hmp, hammer2_blockref_t *bref, int how); diff --git a/sys/vfs/hammer2/hammer2_chain.c b/sys/vfs/hammer2/hammer2_chain.c index b07c53fe55..792b18c3cd 100644 --- a/sys/vfs/hammer2/hammer2_chain.c +++ b/sys/vfs/hammer2/hammer2_chain.c @@ -161,14 +161,6 @@ hammer2_chain_setsubmod(hammer2_trans_t *trans, hammer2_chain_t *chain) { hammer2_chain_core_t *above; -#if 0 - if ((trans->flags & - (HAMMER2_TRANS_ISFLUSH | HAMMER2_TRANS_ISALLOCATING)) == - HAMMER2_TRANS_ISFLUSH) { - return; - } -#endif - while ((above = chain->above) != NULL) { spin_lock(&above->cst.spin); /* XXX optimize */ @@ -373,22 +365,24 @@ hammer2_chain_ref(hammer2_chain_t *chain) #define HAMMER2_CHAIN_INSERT_RACE 0x0004 static -void +int hammer2_chain_insert(hammer2_chain_core_t *above, hammer2_chain_layer_t *layer, - hammer2_chain_t *chain, int flags) + hammer2_chain_t *chain, int flags, int generation) { hammer2_chain_t *xchain; hammer2_chain_layer_t *nlayer; + int error = 0; if (flags & HAMMER2_CHAIN_INSERT_SPIN) spin_lock(&above->cst.spin); chain->above = above; /* - * Special case, place chain in a more recent layer than the specified - * layer. + * Special case, place the chain in the next most-recent layer as the + * specified layer, inserting a layer inbetween if necessary. */ if (layer) { + KKASSERT((flags & HAMMER2_CHAIN_INSERT_RACE) == 0); nlayer = TAILQ_PREV(layer, h2_layer_list, entry); if (nlayer && RB_INSERT(hammer2_chain_tree, &nlayer->rbtree, chain) == NULL) { @@ -410,27 +404,25 @@ hammer2_chain_insert(hammer2_chain_core_t *above, hammer2_chain_layer_t *layer, goto done; } - layer = TAILQ_FIRST(&above->layerq); - xchain = NULL; + /* + * Interlocked by spinlock, check for race + */ + if ((flags & HAMMER2_CHAIN_INSERT_RACE) && + above->generation != generation) { + error = EAGAIN; + goto failed; + } /* - * Try to insert + * Try to insert, allocate a new layer if a nominal collision + * occurs (a collision is different from a SMP race). */ + layer = TAILQ_FIRST(&above->layerq); + xchain = NULL; + if (layer == NULL || (xchain = RB_INSERT(hammer2_chain_tree, &layer->rbtree, chain)) != NULL) { - /* - * Either no layers have been allocated or the insertion - * failed. This is fatal if the conflicted xchain is not - * flagged as deleted. Caller may or may allow the failure. - */ - if ((flags & HAMMER2_CHAIN_INSERT_RACE) && - xchain && (xchain->flags & HAMMER2_CHAIN_DELETED) == 0) { - chain->above = NULL; - chain->inlayer = NULL; - kprintf("insertion race against %p\n", xchain); - goto failed; - } /* * Allocate a new layer to resolve the issue. @@ -441,6 +433,16 @@ hammer2_chain_insert(hammer2_chain_core_t *above, hammer2_chain_layer_t *layer, RB_INIT(&layer->rbtree); layer->good = 0xABCD; spin_lock(&above->cst.spin); + + if ((flags & HAMMER2_CHAIN_INSERT_RACE) && + above->generation != generation) { + spin_unlock(&above->cst.spin); + kfree(layer, chain->hmp->mchain); + spin_lock(&above->cst.spin); + error = EAGAIN; + goto failed; + } + TAILQ_INSERT_HEAD(&above->layerq, layer, entry); RB_INSERT(hammer2_chain_tree, &layer->rbtree, chain); } @@ -457,6 +459,7 @@ done: failed: if (flags & HAMMER2_CHAIN_INSERT_SPIN) spin_unlock(&above->cst.spin); + return error; } /* @@ -1309,7 +1312,7 @@ hammer2_chain_resize(hammer2_trans_t *trans, hammer2_inode_t *ip, * Relocate the block, even if making it smaller (because different * block sizes may be in different regions). */ - hammer2_freemap_alloc(trans, chain->hmp, &chain->bref, nbytes); + hammer2_freemap_alloc(trans, chain, nbytes); chain->bytes = nbytes; atomic_clear_int(&chain->flags, HAMMER2_CHAIN_FORCECOW); /*ip->delta_dcount += (ssize_t)(nbytes - obytes);*/ /* XXX atomic */ @@ -1379,6 +1382,18 @@ hammer2_chain_modify(hammer2_trans_t *trans, hammer2_chain_t **chainp, chain = *chainp; hmp = chain->hmp; +#if 0 + if (chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP || + chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_NODE || + chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_LEAF) { + kprintf("trans %04jx/%08x MODIFY1 %p.%d [%08x] %016jx/%d %016jx C/D %016jx/%016jx\n", + trans->sync_tid, trans->flags, + chain, chain->bref.type, chain->flags, + chain->bref.key, chain->bref.keybits, + chain->bref.data_off, + chain->modify_tid, chain->delete_tid); + } +#endif #if 0 kprintf("MODIFY %p.%d flags %08x mod=%016jx del=%016jx\n", chain, chain->bref.type, chain->flags, chain->modify_tid, chain->delete_tid); #endif @@ -1417,8 +1432,20 @@ hammer2_chain_modify(hammer2_trans_t *trans, hammer2_chain_t **chainp, if (chain != &hmp->fchain && chain != &hmp->vchain) { KKASSERT((flags & HAMMER2_MODIFY_ASSERTNOCOPY) == 0); hammer2_chain_delete_duplicate(trans, chainp, 0); + chain = *chainp; +#if 0 + if (chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP || + chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_NODE || + chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_LEAF) { + kprintf("trans %04jx/%08x MODIFY2 %p.%d [%08x] %016jx/%d %016jx\n", + trans->sync_tid, trans->flags, + chain, chain->bref.type, chain->flags, + chain->bref.key, chain->bref.keybits, + chain->bref.data_off); return; } +#endif + } /* * Fall through if fchain or vchain, clearing the CHAIN_FLUSHED @@ -1450,12 +1477,10 @@ hammer2_chain_modify(hammer2_trans_t *trans, hammer2_chain_t **chainp, ((flags & HAMMER2_MODIFY_NOREALLOC) == 0 && chain->modify_tid != trans->sync_tid) ) { - hammer2_freemap_alloc(trans, chain->hmp, - &chain->bref, chain->bytes); + hammer2_freemap_alloc(trans, chain, chain->bytes); /* XXX failed allocation */ } else if (chain->flags & HAMMER2_CHAIN_FORCECOW) { - hammer2_freemap_alloc(trans, chain->hmp, - &chain->bref, chain->bytes); + hammer2_freemap_alloc(trans, chain, chain->bytes); /* XXX failed allocation */ } atomic_clear_int(&chain->flags, HAMMER2_CHAIN_FORCECOW); @@ -1671,14 +1696,13 @@ hammer2_chain_find_callback(hammer2_chain_t *child, void *data) hammer2_key_t child_end; /* - * This is a live-view find. Ignore chains which have been - * delete-duplicated. - */ - if (child->flags & HAMMER2_CHAIN_DUPLICATED) - return(0); - - /* - * General cases + * WARNING! Do not discard DUPLICATED chains, it is possible that + * we are catching an insertion half-way done. If a + * duplicated chain turns out to be the best choice the + * caller will re-check its flags after locking it. + * + * WARNING! Layerq is scanned forwards, exact matches should keep + * the existing info->best. */ if ((best = info->best) == NULL) { /* @@ -1745,23 +1769,29 @@ hammer2_chain_find_callback(hammer2_chain_t *child, void *data) * in-memory chain structure which reflects it. modify_tid will be * left 0 which forces any modifications to issue a delete-duplicate. * - * NULL is returned if the insertion races. + * To handle insertion races pass the INSERT_RACE flag along with the + * generation number of the core. NULL will be returned if the generation + * number changes before we have a chance to insert the chain. Insert + * races can occur because the parent might be held shared. * * Caller must hold the parent locked shared or exclusive since we may * need the parent's bref array to find our block. */ hammer2_chain_t * -hammer2_chain_get(hammer2_chain_t *parent, hammer2_blockref_t *bref) +hammer2_chain_get(hammer2_chain_t *parent, hammer2_blockref_t *bref, + int generation) { hammer2_mount_t *hmp = parent->hmp; hammer2_chain_core_t *above = parent->core; hammer2_chain_t *chain; + int error; /* * Allocate a chain structure representing the existing media * entry. Resulting chain has one ref and is not locked. */ chain = hammer2_chain_alloc(hmp, parent->pmp, NULL, bref); + chain->dst_reason = 100; hammer2_chain_core_alloc(NULL, chain, NULL); /* ref'd chain returned */ chain->modify_tid = chain->bref.mirror_tid; @@ -1773,17 +1803,22 @@ hammer2_chain_get(hammer2_chain_t *parent, hammer2_blockref_t *bref) * a shared lock on the parent. */ KKASSERT(parent->refs > 0); - hammer2_chain_insert(above, NULL, chain, - HAMMER2_CHAIN_INSERT_SPIN | - HAMMER2_CHAIN_INSERT_RACE); - if ((chain->flags & HAMMER2_CHAIN_ONRBTREE) == 0) { - kprintf("chain %p not on RBTREE\n", chain); + error = hammer2_chain_insert(above, NULL, chain, + HAMMER2_CHAIN_INSERT_SPIN | + HAMMER2_CHAIN_INSERT_RACE, + generation); + if (error) { + KKASSERT((chain->flags & HAMMER2_CHAIN_ONRBTREE) == 0); + kprintf("chain %p get race\n", chain); hammer2_chain_drop(chain); chain = NULL; + } else { + KKASSERT(chain->flags & HAMMER2_CHAIN_ONRBTREE); } /* - * Return our new chain referenced but not locked. + * Return our new chain referenced but not locked, or NULL if + * a race occurred. */ return (chain); } @@ -1917,6 +1952,8 @@ hammer2_chain_lookup(hammer2_chain_t **parentp, hammer2_key_t *key_nextp, int how_always = HAMMER2_RESOLVE_ALWAYS; int how_maybe = HAMMER2_RESOLVE_MAYBE; int how; + int generation; + int maxloops = 300000; if (flags & HAMMER2_LOOKUP_ALWAYS) { how_maybe = how_always; @@ -1954,6 +1991,8 @@ hammer2_chain_lookup(hammer2_chain_t **parentp, hammer2_key_t *key_nextp, } again: + if (--maxloops == 0) + panic("hammer2_chain_lookup: maxloops"); /* * Locate the blockref array. Currently we do a fully associative * search through the array. @@ -2042,6 +2081,7 @@ again: chain = hammer2_combined_find(parent, base, count, cache_indexp, key_nextp, key_beg, key_end, &bref); + generation = above->generation; /* * Exhausted parent chain, iterate. @@ -2061,7 +2101,7 @@ again: if (chain == NULL) { bcopy = *bref; spin_unlock(&above->cst.spin); - chain = hammer2_chain_get(parent, &bcopy); + chain = hammer2_chain_get(parent, &bcopy, generation); if (chain == NULL) { kprintf("retry lookup parent %p keys %016jx:%016jx\n", parent, key_beg, key_end); @@ -2075,16 +2115,30 @@ again: hammer2_chain_ref(chain); spin_unlock(&above->cst.spin); } - /* chain is referenced but not locked */ + + /* + * chain is referenced but not locked. We must lock the chain + * to obtain definitive DUPLICATED/DELETED state + */ + if (chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT || + chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_NODE) { + hammer2_chain_lock(chain, how_maybe | HAMMER2_RESOLVE_NOREF); + } else { + hammer2_chain_lock(chain, how | HAMMER2_RESOLVE_NOREF); + } /* * Skip deleted chains (XXX cache 'i' end-of-block-array? XXX) * - * NOTE: chain's key range is not relevant as there might be + * NOTE: Chain's key range is not relevant as there might be * one-offs within the range that are not deleted. + * + * NOTE: Ignore the DUPLICATED flag, the lock above resolves + * the chain's terminal state so if it is duplicated it + * is virtually certain to be either deleted or live. */ if (chain->flags & HAMMER2_CHAIN_DELETED) { - hammer2_chain_drop(chain); + hammer2_chain_unlock(chain); key_beg = *key_nextp; if (key_beg == 0 || key_beg > key_end) return(NULL); @@ -2109,13 +2163,10 @@ again: */ if (chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT || chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_NODE) { - hammer2_chain_lock(chain, how_maybe | HAMMER2_RESOLVE_NOREF); hammer2_chain_unlock(parent); *parentp = parent = chain; goto again; } - - hammer2_chain_lock(chain, how | HAMMER2_RESOLVE_NOREF); done: /* * All done, return the chain @@ -2223,6 +2274,8 @@ hammer2_chain_scan(hammer2_chain_t *parent, hammer2_chain_t *chain, int how_always = HAMMER2_RESOLVE_ALWAYS; int how_maybe = HAMMER2_RESOLVE_MAYBE; int how; + int generation; + int maxloops = 300000; hmp = parent->hmp; @@ -2259,6 +2312,8 @@ hammer2_chain_scan(hammer2_chain_t *parent, hammer2_chain_t *chain, } again: + if (--maxloops == 0) + panic("hammer2_chain_scan: maxloops"); /* * Locate the blockref array. Currently we do a fully associative * search through the array. @@ -2321,6 +2376,7 @@ again: chain = hammer2_combined_find(parent, base, count, cache_indexp, &next_key, key, HAMMER2_MAX_KEY, &bref); + generation = above->generation; /* * Exhausted parent chain, we're done. @@ -2337,7 +2393,7 @@ again: if (chain == NULL) { bcopy = *bref; spin_unlock(&above->cst.spin); - chain = hammer2_chain_get(parent, &bcopy); + chain = hammer2_chain_get(parent, &bcopy, generation); if (chain == NULL) { kprintf("retry scan parent %p keys %016jx\n", parent, key); @@ -2352,7 +2408,12 @@ again: hammer2_chain_ref(chain); spin_unlock(&above->cst.spin); } - /* chain is referenced but not locked */ + + /* + * chain is referenced but not locked. We must lock the chain + * to obtain definitive DUPLICATED/DELETED state + */ + hammer2_chain_lock(chain, how | HAMMER2_RESOLVE_NOREF); /* * Skip deleted chains (XXX cache 'i' end-of-block-array? XXX) @@ -2362,9 +2423,13 @@ again: * * NOTE: XXX this could create problems with scans used in * situations other than mount-time recovery. + * + * NOTE: Ignore the DUPLICATED flag, the lock above resolves + * the chain's terminal state so if it is duplicated it + * is virtually certain to be either deleted or live. */ if (chain->flags & HAMMER2_CHAIN_DELETED) { - hammer2_chain_drop(chain); + hammer2_chain_unlock(chain); chain = NULL; key = next_key; @@ -2373,11 +2438,6 @@ again: goto again; } - /* - * Lock as requested - */ - hammer2_chain_lock(chain, how | HAMMER2_RESOLVE_NOREF); - done: /* * All done, return the chain or NULL @@ -2423,6 +2483,7 @@ hammer2_chain_create(hammer2_trans_t *trans, hammer2_chain_t **parentp, int allocated = 0; int error = 0; int count; + int maxloops = 300000; above = parent->core; KKASSERT(ccms_thread_lock_owned(&above->cst)); @@ -2443,6 +2504,7 @@ hammer2_chain_create(hammer2_trans_t *trans, hammer2_chain_t **parentp, dummy.data_off = hammer2_getradix(bytes); dummy.methods = parent->bref.methods; chain = hammer2_chain_alloc(hmp, parent->pmp, trans, &dummy); + chain->dst_reason = 101; hammer2_chain_core_alloc(trans, chain, NULL); /* @@ -2530,6 +2592,8 @@ hammer2_chain_create(hammer2_trans_t *trans, hammer2_chain_t **parentp, * determine if an indirect block is required. */ again: + if (--maxloops == 0) + panic("hammer2_chain_create: maxloops"); above = parent->core; switch(parent->bref.type) { @@ -2613,7 +2677,8 @@ again: KKASSERT((chain->flags & HAMMER2_CHAIN_DELETED) == 0); hammer2_chain_insert(above, NULL, chain, HAMMER2_CHAIN_INSERT_SPIN | - HAMMER2_CHAIN_INSERT_LIVE); + HAMMER2_CHAIN_INSERT_LIVE, + 0); if (allocated) { /* @@ -2723,21 +2788,8 @@ hammer2_chain_duplicate(hammer2_trans_t *trans, hammer2_chain_t **parentp, ochain->debug_reason += 0x10000; else ochain->debug_reason += 0x100000; - ochain->duplicate_reason = (ochain->duplicate_reason << 8) | - (duplicate_reason | 0x10); + ochain->src_reason = duplicate_reason; -#if 0 - if (ochain->bref.type == HAMMER2_BREF_TYPE_DATA) { - hammer2_chain_modify(trans, &ochain, - HAMMER2_MODIFY_OPTDATA | - HAMMER2_MODIFY_NOREALLOC); - } else if (ochain->flags & HAMMER2_CHAIN_INITIAL) { - hammer2_chain_modify(trans, &ochain, - HAMMER2_MODIFY_OPTDATA); - } else { - hammer2_chain_modify(trans, &ochain, 0); - } -#endif atomic_set_int(&ochain->flags, HAMMER2_CHAIN_FORCECOW); /* @@ -2756,16 +2808,18 @@ hammer2_chain_duplicate(hammer2_trans_t *trans, hammer2_chain_t **parentp, bref = &ochain->bref; if (snapshot) { nchain = hammer2_chain_alloc(hmp, NULL, trans, bref); + nchain->dst_reason = 102; atomic_set_int(&nchain->flags, HAMMER2_CHAIN_SNAPSHOT); } else { nchain = hammer2_chain_alloc(hmp, ochain->pmp, trans, bref); + nchain->dst_reason = 103; } + nchain->debug_previous = ochain; hammer2_chain_core_alloc(trans, nchain, ochain); bytes = (hammer2_off_t)1 << (int)(bref->data_off & HAMMER2_OFF_MASK_RADIX); nchain->bytes = bytes; nchain->modify_tid = ochain->modify_tid; - nchain->duplicate_reason = duplicate_reason; if (ochain->flags & HAMMER2_CHAIN_INITIAL) atomic_set_int(&nchain->flags, HAMMER2_CHAIN_INITIAL); @@ -2882,6 +2936,7 @@ hammer2_chain_delete_duplicate(hammer2_trans_t *trans, hammer2_chain_t **chainp, hmp = ochain->hmp; ochain->debug_reason += 0x1000; + ochain->src_reason = 99; if ((ochain->debug_reason & 0xF000) > 0x4000) { kprintf("ochain %p\n", ochain); Debugger("shit2"); @@ -2899,6 +2954,8 @@ hammer2_chain_delete_duplicate(hammer2_trans_t *trans, hammer2_chain_t **chainp, * core), until we can give nchain its own official ock. */ nchain = hammer2_chain_alloc(hmp, ochain->pmp, trans, &ochain->bref); + nchain->dst_reason = 104; + nchain->debug_previous = ochain; if (flags & HAMMER2_DELDUP_RECORE) hammer2_chain_core_alloc(trans, nchain, NULL); else @@ -2964,6 +3021,9 @@ hammer2_chain_delete_duplicate(hammer2_trans_t *trans, hammer2_chain_t **chainp, nchain->modify_tid = ochain->modify_tid; if (ochain->flags & HAMMER2_CHAIN_DELETED) { + /* + * ochain was deleted + */ atomic_set_int(&nchain->flags, HAMMER2_CHAIN_DELETED); if (ochain->delete_tid > trans->sync_tid) { /* @@ -3001,17 +3061,24 @@ hammer2_chain_delete_duplicate(hammer2_trans_t *trans, hammer2_chain_t **chainp, * ochain was deleted in a prior transaction. * create and delete nchain in the current * transaction. + * + * (delete_tid might represent a deleted inode + * which still has an open descriptor). */ nchain->delete_tid = trans->sync_tid; } - hammer2_chain_insert(above, ochain->inlayer, nchain, 0); + hammer2_chain_insert(above, ochain->inlayer, nchain, 0, 0); } else { + /* + * ochain was not deleted, delete it in the current + * transaction. + */ KKASSERT(trans->sync_tid >= ochain->modify_tid); ochain->delete_tid = trans->sync_tid; atomic_set_int(&ochain->flags, HAMMER2_CHAIN_DELETED); atomic_add_int(&above->live_count, -1); hammer2_chain_insert(above, NULL, nchain, - HAMMER2_CHAIN_INSERT_LIVE); + HAMMER2_CHAIN_INSERT_LIVE, 0); } if ((ochain->flags & HAMMER2_CHAIN_MOVED) == 0) { @@ -3030,6 +3097,9 @@ hammer2_chain_delete_duplicate(hammer2_trans_t *trans, hammer2_chain_t **chainp, /* * Finishing fixing up nchain. A new block will be allocated if * crossing a synchronization point (meta-data only). + * + * Calling hammer2_chain_modify() will update modify_tid to + * (typically) trans->sync_tid. */ if (nchain->bref.type == HAMMER2_BREF_TYPE_DATA) { hammer2_chain_modify(trans, &nchain, @@ -3246,6 +3316,8 @@ hammer2_chain_create_indirect(hammer2_trans_t *trans, hammer2_chain_t *parent, int cache_index; int loops; int reason; + int generation; + int maxloops = 300000; /* * Calculate the base blockref pointer or NULL if the chain @@ -3367,6 +3439,7 @@ hammer2_chain_create_indirect(hammer2_trans_t *trans, hammer2_chain_t *parent, dummy.bref.methods = parent->bref.methods; ichain = hammer2_chain_alloc(hmp, parent->pmp, trans, &dummy.bref); + ichain->dst_reason = 105; atomic_set_int(&ichain->flags, HAMMER2_CHAIN_INITIAL); hammer2_chain_core_alloc(trans, ichain, NULL); icore = ichain->core; @@ -3413,6 +3486,7 @@ hammer2_chain_create_indirect(hammer2_trans_t *trans, hammer2_chain_t *parent, &cache_index, &key_next, key_beg, key_end, &bref); + generation = above->generation; if (bref == NULL) break; key_next = bref->key + ((hammer2_key_t)1 << bref->keybits); @@ -3431,6 +3505,9 @@ hammer2_chain_create_indirect(hammer2_trans_t *trans, hammer2_chain_t *parent, * chains (potentially from media as it might not be * in-memory). Then move it to the new parent (ichain) * via DELETE-DUPLICATE. + * + * chain is referenced but not locked. We must lock the + * chain to obtain definitive DUPLICATED/DELETED state */ if (chain) { /* @@ -3447,7 +3524,7 @@ hammer2_chain_create_indirect(hammer2_trans_t *trans, hammer2_chain_t *parent, */ bcopy = *bref; spin_unlock(&above->cst.spin); - chain = hammer2_chain_get(parent, bref); + chain = hammer2_chain_get(parent, &bcopy, generation); if (chain == NULL) { reason = 1; spin_lock(&above->cst.spin); @@ -3467,15 +3544,10 @@ hammer2_chain_create_indirect(hammer2_trans_t *trans, hammer2_chain_t *parent, * This is always live so if the chain has been delete- * duplicated we raced someone and we have to retry. * - * If a terminal (i.e. not duplicated) chain has been - * deleted we move on to the next key. + * NOTE: Ignore the DUPLICATED flag, the lock above resolves + * the chain's terminal state so if it is duplicated it + * is virtually certain to be either deleted or live. */ - if (chain->flags & HAMMER2_CHAIN_DUPLICATED) { - reason = 3; - hammer2_chain_unlock(chain); - spin_lock(&above->cst.spin); - continue; - } if (chain->flags & HAMMER2_CHAIN_DELETED) { hammer2_chain_unlock(chain); goto next_key; @@ -3492,6 +3564,8 @@ hammer2_chain_create_indirect(hammer2_trans_t *trans, hammer2_chain_t *parent, next_key: spin_lock(&above->cst.spin); next_key_spinlocked: + if (--maxloops == 0) + panic("hammer2_chain_create_indirect: maxloops"); reason = 4; if (key_next == 0 || key_next > key_end) break; @@ -3514,7 +3588,8 @@ next_key_spinlocked: KKASSERT((ichain->flags & HAMMER2_CHAIN_ONRBTREE) == 0); hammer2_chain_insert(above, NULL, ichain, HAMMER2_CHAIN_INSERT_SPIN | - HAMMER2_CHAIN_INSERT_LIVE); + HAMMER2_CHAIN_INSERT_LIVE, + 0); /* * Mark the new indirect block modified after insertion, which @@ -3584,7 +3659,7 @@ hammer2_chain_indkey_freemap(hammer2_chain_t *parent, hammer2_key_t *keyp, int cache_index; int locount; int hicount; - int loops = 0; + int maxloops = 300000; key = *keyp; above = parent->core; @@ -3602,7 +3677,7 @@ hammer2_chain_indkey_freemap(hammer2_chain_t *parent, hammer2_key_t *keyp, spin_lock(&above->cst.spin); for (;;) { - if (++loops == 100000) { + if (--maxloops == 0) { panic("indkey_freemap shit %p %p:%d\n", parent, base, count); } @@ -3690,7 +3765,7 @@ hammer2_chain_indkey_normal(hammer2_chain_t *parent, hammer2_key_t *keyp, int locount; int hicount; int cache_index; - int loops = 0; + int maxloops = 300000; key = *keyp; above = parent->core; @@ -3709,7 +3784,7 @@ hammer2_chain_indkey_normal(hammer2_chain_t *parent, hammer2_key_t *keyp, spin_lock(&above->cst.spin); for (;;) { - if (++loops == 100000) { + if (--maxloops == 0) { panic("indkey_freemap shit %p %p:%d\n", parent, base, count); } @@ -4128,7 +4203,7 @@ found: * need to be adjusted when we commit the media change. */ void -hammer2_base_delete(hammer2_chain_t *parent, +hammer2_base_delete(hammer2_trans_t *trans, hammer2_chain_t *parent, hammer2_blockref_t *base, int count, int *cache_indexp, hammer2_chain_t *child) { @@ -4153,6 +4228,9 @@ hammer2_base_delete(hammer2_chain_t *parent, return; } bzero(&base[i], sizeof(*base)); + base[i].mirror_tid = (intptr_t)parent; + base[i].modify_tid = (intptr_t)child; + base[i].check.debug.sync_tid = trans->sync_tid; /* * We can only optimize core->live_zero for live chains. @@ -4176,7 +4254,7 @@ hammer2_base_delete(hammer2_chain_t *parent, * need to be adjusted when we commit the media change. */ void -hammer2_base_insert(hammer2_chain_t *parent, +hammer2_base_insert(hammer2_trans_t *trans __unused, hammer2_chain_t *parent, hammer2_blockref_t *base, int count, int *cache_indexp, hammer2_chain_t *child) { diff --git a/sys/vfs/hammer2/hammer2_disk.h b/sys/vfs/hammer2/hammer2_disk.h index 9e5e13adfe..7167e17206 100644 --- a/sys/vfs/hammer2/hammer2_disk.h +++ b/sys/vfs/hammer2/hammer2_disk.h @@ -428,6 +428,13 @@ struct hammer2_blockref { /* MUST BE EXACTLY 64 BYTES */ uint64_t avail; /* total available bytes */ uint64_t unused; /* unused must be 0 */ } freemap; + + /* + * Debugging + */ + struct { + hammer2_tid_t sync_tid; + } debug; } check; }; diff --git a/sys/vfs/hammer2/hammer2_flush.c b/sys/vfs/hammer2/hammer2_flush.c index ea37b3df89..280d6c61a9 100644 --- a/sys/vfs/hammer2/hammer2_flush.c +++ b/sys/vfs/hammer2/hammer2_flush.c @@ -72,6 +72,19 @@ static int hammer2_chain_flush_scan2(hammer2_chain_t *child, void *data); static void hammer2_rollup_stats(hammer2_chain_t *parent, hammer2_chain_t *child, int how); +/* + * Can we ignore a chain for the purposes of flushing modifications + * to the media? + */ +static __inline +int +h2ignore_deleted(hammer2_flush_info_t *info, hammer2_chain_t *chain) +{ + return (chain->delete_tid <= info->sync_tid && + (chain->bref.type != HAMMER2_BREF_TYPE_INODE || + (chain->flags & HAMMER2_CHAIN_DESTROYED))); +} + #if 0 static __inline void @@ -100,24 +113,29 @@ hammer2_updatestats(hammer2_flush_info_t *info, hammer2_blockref_t *bref, * be passed a NULL pmp and non-NULL hmp to indicate a transaction on a single * media target. The latter mode is used by the recovery code. * - * We don't bother marking the volume header MODIFIED. Instead, the volume - * will be synchronized at a later time as part of a larger flush sequence. + * TWO TRANSACTION IDs can run concurrently, where one is a flush and the + * other is a set of any number of concurrent filesystem operations. We + * can either have + + + * or we can have + . * - * Non-flush transactions can typically run concurrently. However if - * there are non-flush transaction both before AND after a flush trans, - * the transactions after stall until the ones before finish. + * During a flush, new fs_ops are only blocked until the fs_ops prior to + * the flush complete. The new fs_ops can then run concurrent with the flush. * - * Non-flush transactions occuring after a flush pointer can run concurrently - * with that flush. They only have to wait for transactions prior to the - * flush trans to complete before they unstall. + * Buffer-cache transactions operate as fs_ops but never block. A + * buffer-cache flush will run either before or after the current pending + * flush depending on its state. * - * WARNING! Transaction ids are only allocated when the transaction becomes - * active, which allows other transactions to insert ahead of us - * if we are forced to block (only bioq transactions do that). + * sync_tid vs real_tid. For flush transactions ONLY, the flush operation + * actually uses two transaction ids, one for the flush operation itself, + * and for any freemap allocations made as a side-effect. real_tid + * is fixed at , sync_tid is adjusted dynamically as-needed. * - * WARNING! Modifications to the root volume cannot dup the root volume - * header to handle synchronization points, so alloc_tid can - * wind up (harmlessly) more advanced on flush. + * NOTE: The sync_tid for a flush's freemap allocation will match the + * sync_tid of the following transaction(s). + * The freemap topology will be out-of-step by one transaction id + * in order to give the flusher a stable freemap topology to flush + * out. This is fixed up at mount-time using a quick incremental + * scan. */ void hammer2_trans_init(hammer2_trans_t *trans, hammer2_pfsmount_t *pmp, @@ -147,10 +165,15 @@ hammer2_trans_init(hammer2_trans_t *trans, hammer2_pfsmount_t *pmp, * * We queue ourselves and then wait to become the head * of the queue, allowing all prior flushes to complete. + * + * A unique transaction id is required to avoid confusion + * when updating the block tables. */ ++hmp->flushcnt; - trans->sync_tid = hmp->voldata.alloc_tid++; + ++hmp->voldata.alloc_tid; + trans->sync_tid = hmp->voldata.alloc_tid; trans->real_tid = trans->sync_tid; + ++hmp->voldata.alloc_tid; TAILQ_INSERT_TAIL(&hmp->transq, trans, entry); if (TAILQ_FIRST(&hmp->transq) != trans) { trans->blocked = 1; @@ -413,7 +436,6 @@ hammer2_chain_flush_core(hammer2_flush_info_t *info, hammer2_chain_t **chainp) /* * Check if we even have any work to do. * - * We do not update bref.mirror_tid if nothing is being modified. * We do not update core->update_lo because there might be other * paths to the core and we haven't actually checked it. * @@ -424,6 +446,9 @@ hammer2_chain_flush_core(hammer2_flush_info_t *info, hammer2_chain_t **chainp) (core->update_lo >= info->sync_tid || chain->bref.mirror_tid >= info->sync_tid || chain->bref.mirror_tid >= core->update_hi)) { + KKASSERT(chain->modify_tid <= info->sync_tid); + /* don't update update_lo, there may be other paths to core */ + /* don't update bref.mirror_tid, scan2 is not called */ return; } @@ -446,7 +471,7 @@ hammer2_chain_flush_core(hammer2_flush_info_t *info, hammer2_chain_t **chainp) if (chain->modify_tid > info->sync_tid && chain != &hmp->fchain && chain != &hmp->vchain) { chain->debug_reason = (chain->debug_reason & ~255) | 5; - /* do not update bref.mirror_tid */ + /* do not update bref.mirror_tid, scan2 ignores chain */ /* do not update core->update_lo, there may be another path */ return; } @@ -467,9 +492,10 @@ retry: (chain->flags & HAMMER2_CHAIN_DUPLICATED)) { chain->debug_reason = (chain->debug_reason & ~255) | 9; if (chain->flags & HAMMER2_CHAIN_MODIFIED) { -#if 1 +#if 0 /* - * XXX what if we have a snapshot? + * XXX should be able to invalidate the buffer here. + * XXX problem if reused, snapshotted, or reactivated. */ if (chain->dio) { hammer2_io_setinval(chain->dio, chain->bytes); @@ -486,7 +512,8 @@ retry: /* * Update mirror_tid, indicating that chain is synchronized - * on its modification and block table. + * on its modification and block table. This probably isn't + * needed since scan2 should ignore deleted chains anyway. */ if (chain->bref.mirror_tid < info->sync_tid) chain->bref.mirror_tid = info->sync_tid; @@ -601,7 +628,11 @@ retry: /* * We unlock the parent during the scan1 recursion, parent * may have been deleted out from under us. + * * parent may have been destroyed out from under us + * + * parent may have been synchronously flushed due to aliasing + * via core (is this possible?). */ if (chain->delete_tid <= info->sync_tid && (chain->flags & HAMMER2_CHAIN_DUPLICATED)) { @@ -629,14 +660,13 @@ retry: * state before we scan the children to update the parent's * block table. This must essentially be done as an atomic * operation (the parent must remain locked throughout the - * operation). + * operation), otherwise other transactions can squeeze a + * delete-duplicate in and create block table havoc. * * Care must be taken to not try to update the parent twice - * during the current flush cycle, which would likely - * result in an assertion getting hit. The MOVED bit on - * the children does not add any measure of safety since it - * cannot be immediately cleared (there might be other - * parents that require action XXX). + * during the current flush cycle, which would cause more + * havoc. It's so important that we assert that we haven't + * double-flushed a parent below by testing modify_tid. * * NOTE: Blockrefs are only updated on live chains. * @@ -650,19 +680,29 @@ retry: * parent's delete_tid it will still appear to be * 'live' for the purposes of the flush. */ - if (info->domodify && chain->delete_tid > info->sync_tid) { - KKASSERT(chain->modify_tid < info->sync_tid || - (chain->flags & HAMMER2_CHAIN_FLUSHED) == 0); + if (info->domodify && !h2ignore_deleted(info, chain)) { + KKASSERT(chain->modify_tid < info->sync_tid); + + /* + * The scan1 loop and/or flush_core is reentrant, + * particularly when core->generation changes. To + * avoid havoc we have to prevent repetitive + * delete-duplicates of the same chain. + * + * After executing the modify set the original chain's + * bref.mirror_tid to prevent any reentrancy during + * the current flush cycle. + */ hammer2_chain_modify(info->trans, &info->parent, HAMMER2_MODIFY_NO_MODIFY_TID); if (info->parent != chain) { + if (chain->bref.mirror_tid < info->sync_tid) + chain->bref.mirror_tid = info->sync_tid; hammer2_chain_drop(chain); hammer2_chain_ref(info->parent); } chain = info->parent; } - if (info->domodify) - KKASSERT(chain->modify_tid == info->sync_tid); chain->debug_reason = (chain->debug_reason & ~255) | 7; KKASSERT(chain == info->parent); @@ -738,14 +778,14 @@ retry: KKASSERT(chain->refs > 1); } else { /* - * chain is now considered up-to-date, adjust - * bref.mirror_tid and update_lo. + * There is no deferral in this path. Chain is now + * considered up-to-date. * - * (no deferral in this path) + * Adjust update_lo now and bref.mirror_tid will be + * updated a bit later on the fall-through. */ if (core->update_lo < info->sync_tid) core->update_lo = info->sync_tid; - } #if FLUSH_DEBUG @@ -765,30 +805,36 @@ retry: chain, chain->refs, chain->flags); } /* do not update core->update_lo */ + /* do not update bref.mirror_tid */ return; } /* - * non-deferred path - mirror_tid and update_lo have been updated - * at this point. + * Non-deferral path, chain is now deterministically being flushed. + * We've finished running the recursion and the blockref update. * - * Deal with deleted chains on the way back up. Deleted inodes may - * still be active due to open descriptors so test whether the inode - * has been DESTROYED (aka deactivated after being unlinked) or not. - * Clear the modified bit if it is set. + * update bref.mirror_tid. update_lo has already been updated. + */ + if (chain->bref.mirror_tid < info->sync_tid) + chain->bref.mirror_tid = info->sync_tid; + + /* + * Deal with deleted and destroyed chains on the way back up. + * + * Deleted inodes may still be active due to open descriptors so + * test whether the inode has been DESTROYED (aka deactivated after + * being unlinked) or not. + * + * Otherwise a delted chain can be optimized by clearing MODIFIED + * without bothering to write it out. * * NOTE: We optimize this by noting that only 'inode' chains require * this treatment. When a file with an open descriptor is * deleted only its inode is marked deleted. Other deletions, * such as indirect block deletions, will no longer be visible * to the live filesystem and do not need to be updated. - * - * NOTE: scan2 has already executed above so statistics have - * already been rolled up. */ - if (chain->delete_tid <= info->sync_tid && - (chain->bref.type != HAMMER2_BREF_TYPE_INODE || - (chain->flags & HAMMER2_CHAIN_DESTROYED))) { + if (h2ignore_deleted(info, chain)) { /* * At the moment we unconditionally set the MOVED bit because * there are situations where it might not have been set due @@ -802,11 +848,10 @@ retry: } chain->debug_reason = (chain->debug_reason & ~255) | 9; if (chain->flags & HAMMER2_CHAIN_MODIFIED) { -#if 1 +#if 0 /* - * XXX what if we have a snapshot? - * Can only destroy the buffer if the chain represents - * the entire contents of the buffer. + * XXX should be able to invalidate the buffer here. + * XXX problem if reused, snapshotted, or reactivated. */ if (chain->dio) { hammer2_io_setinval(chain->dio, chain->bytes); @@ -815,19 +860,15 @@ retry: atomic_clear_int(&chain->flags, HAMMER2_CHAIN_MODIFIED); hammer2_chain_drop(chain); } - - /* - * Update mirror_tid, indicating that chain is synchronized - * on its modification and block table. - */ - if (chain->bref.mirror_tid < info->sync_tid) - chain->bref.mirror_tid = info->sync_tid; return; } /* * A degenerate flush might not have flushed anything and thus not * processed modified blocks on the way back up. Detect the case. + * + * This case can occur when modifications cross flush boundaries + * and cause the submodified recursion to run up multiple parents (?). */ if ((chain->flags & HAMMER2_CHAIN_MODIFIED) == 0) { kprintf("chain %p.%d %08x recursed but wasn't " @@ -843,13 +884,6 @@ retry: } #endif chain->debug_reason = (chain->debug_reason & ~255) | 10; - - /* - * Update mirror_tid, indicating that chain is synchronized - * on its modification and block table. - */ - if (chain->bref.mirror_tid < info->sync_tid) - chain->bref.mirror_tid = info->sync_tid; return; } @@ -870,10 +904,10 @@ retry: * to adjust. */ if (hammer2_debug & 0x1000) { - kprintf("Flush %p.%d %016jx/%d sync_tid %016jx\n", + kprintf("Flush %p.%d %016jx/%d sync_tid=%016jx data=%016jx\n", chain, chain->bref.type, chain->bref.key, chain->bref.keybits, - info->sync_tid); + info->sync_tid, chain->bref.data_off); } if (hammer2_debug & 0x2000) { Debugger("Flush hell"); @@ -897,15 +931,6 @@ retry: atomic_set_int(&chain->flags, HAMMER2_CHAIN_MOVED); } - /* - * We are writing out the parent (writing out of the volume root is - * deferred but we still do some hand-waving). - * - * Update mirror_tid on the parent. - */ - if (chain->bref.mirror_tid < info->sync_tid) - chain->bref.mirror_tid = info->sync_tid; - /* * If this is part of a recursive flush we can go ahead and write * out the buffer cache buffer and pass a new bref back up the chain @@ -1110,6 +1135,7 @@ hammer2_chain_flush_scan1(hammer2_chain_t *child, void *data) child->debug_reason = (child->debug_reason & ~255) | 1; /* do not update child->core->update_lo, core not flushed */ /* do not update core->update_lo, there may be another path */ + /* do not update mirror_tid, scan2 will ignore chain */ return (0); } @@ -1179,6 +1205,8 @@ hammer2_chain_flush_scan1(hammer2_chain_t *child, void *data) if ((child->flags & HAMMER2_CHAIN_MODIFIED) == 0 && child->core->update_lo >= info->sync_tid) { child->debug_reason = (child->debug_reason & ~255) | 2; + if (child->bref.mirror_tid < info->sync_tid) + child->bref.mirror_tid = info->sync_tid; goto skip; } @@ -1197,6 +1225,8 @@ hammer2_chain_flush_scan1(hammer2_chain_t *child, void *data) if ((child->flags & HAMMER2_CHAIN_MODIFIED) == 0 && child->core->update_lo >= info->sync_tid) { child->debug_reason = (child->debug_reason & ~255) | 4; + if (child->bref.mirror_tid < info->sync_tid) + child->bref.mirror_tid = info->sync_tid; goto skip; } @@ -1208,16 +1238,6 @@ hammer2_chain_flush_scan1(hammer2_chain_t *child, void *data) --info->depth; skip: - /* - * Consider us flushed if there was no deferral. This will have - * already been handled by hammer2_chain_flush_core() but we also - * have to deal with anyone who goto'd skip. - */ - if (diddeferral == info->diddeferral) { - if (child->bref.mirror_tid < info->sync_tid) - child->bref.mirror_tid = info->sync_tid; - } - /* * Check the conditions that could cause SCAN2 to modify the parent. * Modify the parent here instead of in SCAN2, which would cause @@ -1226,10 +1246,13 @@ skip: * Scan2 is expected to update bref.mirror_tid in the domodify case, * but will skip the child otherwise giving us the responsibility to * update bref.mirror_tid. + * + * WARNING! Do NOT update the child's bref.mirror_tid right here, + * even if there was no deferral. Doing so would cause + * confusion with the child's block array state in a + * future flush. */ - if (parent->delete_tid <= trans->sync_tid && - (parent->bref.type != HAMMER2_BREF_TYPE_INODE || - (parent->flags & HAMMER2_CHAIN_DESTROYED))) { + if (h2ignore_deleted(info, parent)) { /* * Special optimization matching similar tests done in * flush_core, scan1, and scan2. Avoid updating the block @@ -1450,11 +1473,8 @@ hammer2_chain_flush_scan2(hammer2_chain_t *child, void *data) base, info->pass, parent, parent->bref.type, parent->delete_tid, trans->sync_tid); #endif - if (parent->delete_tid <= trans->sync_tid && - ((parent->flags & HAMMER2_CHAIN_DESTROYED) || - parent->bref.type != HAMMER2_BREF_TYPE_INODE)) { + if (h2ignore_deleted(info, parent)) base = NULL; - } /* * Update the parent's blockref table and propagate mirror_tid. @@ -1492,11 +1512,11 @@ hammer2_chain_flush_scan2(hammer2_chain_t *child, void *data) * last block table synchronization. */ #if FLUSH_DEBUG - kprintf("S2A %p b=%p d/b=%016jx/%016jx m/b=%016jx/%016jx\n", - child, base, child->delete_tid, parent->bref.mirror_tid, + kprintf("S2A %p.%d b=%p d/b=%016jx/%016jx m/b=%016jx/%016jx\n", + child, child->bref.type, + base, child->delete_tid, parent->bref.mirror_tid, child->modify_tid, parent->bref.mirror_tid); #endif - ok = 1; if (base && child->delete_tid > parent->bref.mirror_tid && child->modify_tid <= parent->bref.mirror_tid) { @@ -1516,7 +1536,7 @@ hammer2_chain_flush_scan2(hammer2_chain_t *child, void *data) child->flags, child->bref.key, child->bref.keybits); #endif - hammer2_base_delete(parent, base, count, + hammer2_base_delete(trans, parent, base, count, &info->cache_index, child); spin_unlock(&above->cst.spin); } @@ -1531,7 +1551,12 @@ hammer2_chain_flush_scan2(hammer2_chain_t *child, void *data) * (2) The child is not being deleted in the same * transaction. */ - ok = 1; +#if FLUSH_DEBUG + kprintf("S2B %p.%d b=%p d/b=%016jx/%016jx m/b=%016jx/%016jx\n", + child, child->bref.type, + base, child->delete_tid, parent->bref.mirror_tid, + child->modify_tid, parent->bref.mirror_tid); +#endif if (base && child->modify_tid > parent->bref.mirror_tid) { KKASSERT(child->flags & HAMMER2_CHAIN_MOVED); @@ -1550,18 +1575,22 @@ hammer2_chain_flush_scan2(hammer2_chain_t *child, void *data) child->flags, child->bref.key, child->bref.keybits); #endif - hammer2_base_insert(parent, base, count, + hammer2_base_insert(trans, parent, base, count, &info->cache_index, child); spin_unlock(&above->cst.spin); } - } else if (info->pass == 3 && (child->flags & HAMMER2_CHAIN_MOVED)) { + } else if (info->pass == 3 && + (child->delete_tid == HAMMER2_MAX_TID || + child->delete_tid <= trans->sync_tid) && + (child->flags & HAMMER2_CHAIN_MOVED)) { /* - * Only clear MOVED once all possible parents have been - * flushed. When can we safely clear the MOVED flag? - * Flushes down duplicate paths can occur out of order, - * for example if an inode is moved as part of a hardlink - * consolidation or if an inode is moved into an indirect - * block indexed before the inode. + * We can't clear the MOVED bit on children whos modify_tid + * is beyond our current trans (was tested at top of scan2), + * or on deleted children which have not yet been flushed + * (handled above). + * + * Scan all parents of this child and determine if any of + * them still need the child's MOVED bit. */ hammer2_chain_t *scan; @@ -1575,30 +1604,28 @@ hammer2_chain_flush_scan2(hammer2_chain_t *child, void *data) * Can't clear child's MOVED until all parent's have * synchronized with it. * - * ignore any parents which have been deleted as-of - * our transaction id (their block array doesn't get - * updated). + * Ignore deleted parents as-of this flush TID. + * Ignore the current parent being flushed. */ - if (scan->delete_tid <= trans->sync_tid) + if (h2ignore_deleted(info, scan)) + continue; + if (scan == parent) continue; /* - * parent not synchronized if child modified or - * deleted after the parent's last sync point. - * - * (For the purpose of clearing the MOVED bit - * we do not restrict the tests to just flush - * transactions). + * For parents not already synchronized check to see + * if the flush has gotten past them yet or not. */ - if (scan->bref.mirror_tid < child->modify_tid || - ((child->flags & HAMMER2_CHAIN_DELETED) && - scan->bref.mirror_tid < child->delete_tid)) { - if (hammer2_debug & 0x4000) - kprintf("(fail scan %p %016jx/%016jx)", - scan, scan->bref.mirror_tid, - child->modify_tid); - ok = 0; + if (scan->bref.mirror_tid >= trans->sync_tid) + continue; + + if (hammer2_debug & 0x4000) { + kprintf("(fail scan %p %016jx/%016jx)", + scan, scan->bref.mirror_tid, + child->modify_tid); } + ok = 0; + break; } if (hammer2_debug & 0x4000) kprintf("\n"); @@ -1612,17 +1639,9 @@ hammer2_chain_flush_scan2(hammer2_chain_t *child, void *data) kprintf("clear moved %p.%d %016jx/%d\n", child, child->bref.type, child->bref.key, child->bref.keybits); - if (child->modify_tid <= trans->sync_tid && - (child->delete_tid == HAMMER2_MAX_TID || - child->delete_tid <= trans->sync_tid)) { - atomic_clear_int(&child->flags, - HAMMER2_CHAIN_MOVED); - hammer2_chain_drop(child); /* flag */ - KKASSERT((child->flags & - HAMMER2_CHAIN_MODIFIED) == 0); - } else { - kprintf("ok problem child %p %016jx/%016jx vs %016jx\n", child, child->modify_tid, child->delete_tid, trans->sync_tid); - } + atomic_clear_int(&child->flags, HAMMER2_CHAIN_MOVED); + hammer2_chain_drop(child); /* moved cleared */ + KKASSERT((child->flags & HAMMER2_CHAIN_MODIFIED) == 0); } else { if (hammer2_debug & 0x4000) kprintf("keep moved %p.%d %016jx/%d\n", diff --git a/sys/vfs/hammer2/hammer2_freemap.c b/sys/vfs/hammer2/hammer2_freemap.c index d219045396..8da2ecec1a 100644 --- a/sys/vfs/hammer2/hammer2_freemap.c +++ b/sys/vfs/hammer2/hammer2_freemap.c @@ -87,9 +87,10 @@ hammer2_freemapradix(int radix) static int -hammer2_freemap_reserve(hammer2_mount_t *hmp, hammer2_blockref_t *bref, +hammer2_freemap_reserve(hammer2_trans_t *trans, hammer2_chain_t *chain, int radix) { + hammer2_blockref_t *bref = &chain->bref; hammer2_off_t off; size_t bytes; @@ -106,6 +107,13 @@ hammer2_freemap_reserve(hammer2_mount_t *hmp, hammer2_blockref_t *bref, * Adjust by HAMMER2_ZONE_FREEMAP_{A,B,C,D} using the existing * offset as a basis. Start in zone A if previously unallocated. */ +#if 0 + kprintf("trans %04jx/%08x freemap chain %p.%d [%08x] %016jx/%d %016jx", + trans->sync_tid, trans->flags, + chain, chain->bref.type, chain->flags, + chain->bref.key, chain->bref.keybits, + bref->data_off); +#endif if ((bref->data_off & ~HAMMER2_OFF_MASK_RADIX) == 0) { off = HAMMER2_ZONE_FREEMAP_A; } else { @@ -114,7 +122,35 @@ hammer2_freemap_reserve(hammer2_mount_t *hmp, hammer2_blockref_t *bref, off = off / HAMMER2_PBUFSIZE; KKASSERT(off >= HAMMER2_ZONE_FREEMAP_A); KKASSERT(off < HAMMER2_ZONE_FREEMAP_D + 4); + } + if ((trans->flags & + (HAMMER2_TRANS_ISFLUSH | HAMMER2_TRANS_ISALLOCATING)) == + HAMMER2_TRANS_ISFLUSH) { + /* + * Delete-Duplicates while flushing the fchain topology + * itself. + */ +#if 0 + kprintf(" flush "); +#endif + if (off >= HAMMER2_ZONE_FREEMAP_D) + off = HAMMER2_ZONE_FREEMAP_B; + else if (off >= HAMMER2_ZONE_FREEMAP_C) + off = HAMMER2_ZONE_FREEMAP_A; + else if (off >= HAMMER2_ZONE_FREEMAP_B) + off = HAMMER2_ZONE_FREEMAP_D; + else + off = HAMMER2_ZONE_FREEMAP_C; + } else { + /* + * Allocations from the freemap via a normal transaction + * or a flush whos sync_tid has been bumped (so effectively + * done as a normal transaction). + */ +#if 0 + kprintf(" alloc "); +#endif if (off >= HAMMER2_ZONE_FREEMAP_D) off = HAMMER2_ZONE_FREEMAP_A; else if (off >= HAMMER2_ZONE_FREEMAP_C) @@ -124,6 +160,8 @@ hammer2_freemap_reserve(hammer2_mount_t *hmp, hammer2_blockref_t *bref, else off = HAMMER2_ZONE_FREEMAP_B; } + + off = off * HAMMER2_PBUFSIZE; /* @@ -163,6 +201,9 @@ hammer2_freemap_reserve(hammer2_mount_t *hmp, hammer2_blockref_t *bref, break; } bref->data_off = off | radix; +#if 0 + kprintf("-> %016jx\n", bref->data_off); +#endif return (0); } @@ -184,9 +225,11 @@ hammer2_freemap_reserve(hammer2_mount_t *hmp, hammer2_blockref_t *bref, * any lost allocations. */ int -hammer2_freemap_alloc(hammer2_trans_t *trans, hammer2_mount_t *hmp, - hammer2_blockref_t *bref, size_t bytes) +hammer2_freemap_alloc(hammer2_trans_t *trans, hammer2_chain_t *chain, + size_t bytes) { + hammer2_mount_t *hmp = chain->hmp; + hammer2_blockref_t *bref = &chain->bref; hammer2_chain_t *parent; int radix; int error; @@ -208,7 +251,7 @@ hammer2_freemap_alloc(hammer2_trans_t *trans, hammer2_mount_t *hmp, */ if (bref->type == HAMMER2_BREF_TYPE_FREEMAP_NODE || bref->type == HAMMER2_BREF_TYPE_FREEMAP_LEAF) { - return(hammer2_freemap_reserve(hmp, bref, radix)); + return (hammer2_freemap_reserve(trans, chain, radix)); } /* @@ -342,6 +385,7 @@ hammer2_freemap_try_alloc(hammer2_trans_t *trans, hammer2_chain_t **parentp, HAMMER2_LOOKUP_FREEMAP | HAMMER2_LOOKUP_ALWAYS | HAMMER2_LOOKUP_MATCHIND); + if (chain == NULL) { /* * Create the missing leaf, be sure to initialize @@ -814,8 +858,7 @@ hammer2_freemap_adjust(hammer2_trans_t *trans, hammer2_mount_t *hmp, if (chain == NULL && how != HAMMER2_FREEMAP_DORECOVER) { kprintf("hammer2_freemap_adjust: %016jx: no chain\n", (intmax_t)bref->data_off); - hammer2_chain_unlock(parent); - return; + goto done; } /* @@ -972,8 +1015,8 @@ again: chain->bref.check.freemap.bigmask |= 1 << radix; hammer2_chain_unlock(chain); +done: hammer2_chain_unlock(parent); - atomic_clear_int(&trans->flags, HAMMER2_TRANS_ISALLOCATING); if (trans->flags & HAMMER2_TRANS_ISFLUSH) --trans->sync_tid; diff --git a/sys/vfs/hammer2/hammer2_vfsops.c b/sys/vfs/hammer2/hammer2_vfsops.c index 095ab61cb2..d141664159 100644 --- a/sys/vfs/hammer2/hammer2_vfsops.c +++ b/sys/vfs/hammer2/hammer2_vfsops.c @@ -190,6 +190,9 @@ static int hammer2_sync_scan2(struct mount *mp, struct vnode *vp, void *data); static void hammer2_write_thread(void *arg); +static void hammer2_vfs_unmount_hmp1(struct mount *mp, hammer2_mount_t *hmp); +static void hammer2_vfs_unmount_hmp2(struct mount *mp, hammer2_mount_t *hmp); + /* * Functions for compression in threads, * from hammer2_vnops.c @@ -515,6 +518,9 @@ hammer2_vfs_mount(struct mount *mp, char *path, caddr_t data, */ error = hammer2_install_volume_header(hmp); if (error) { + ++hmp->pmp_count; + hammer2_vfs_unmount_hmp1(mp, hmp); + hammer2_vfs_unmount_hmp2(mp, hmp); hammer2_vfs_unmount(mp, MNT_FORCE); return error; } @@ -524,7 +530,6 @@ hammer2_vfs_mount(struct mount *mp, char *path, caddr_t data, hmp->fchain.bref.mirror_tid = hmp->voldata.freemap_tid; hmp->fchain.modify_tid = hmp->voldata.freemap_tid; - /* * First locate the super-root inode, which is key 0 * relative to the volume header's blockset. @@ -539,6 +544,9 @@ hammer2_vfs_mount(struct mount *mp, char *path, caddr_t data, hammer2_chain_lookup_done(parent); if (schain == NULL) { kprintf("hammer2_mount: invalid super-root\n"); + ++hmp->pmp_count; + hammer2_vfs_unmount_hmp1(mp, hmp); + hammer2_vfs_unmount_hmp2(mp, hmp); hammer2_vfs_unmount(mp, MNT_FORCE); return EINVAL; } @@ -627,25 +635,32 @@ hammer2_vfs_mount(struct mount *mp, char *path, caddr_t data, if (rchain == NULL) { kprintf("hammer2_mount: PFS label not found\n"); - --hmp->pmp_count; + hammer2_vfs_unmount_hmp1(mp, hmp); + hammer2_vfs_unmount_hmp2(mp, hmp); hammer2_vfs_unmount(mp, MNT_FORCE); return EINVAL; } if (rchain->flags & HAMMER2_CHAIN_MOUNTED) { hammer2_chain_unlock(rchain); kprintf("hammer2_mount: PFS label already mounted!\n"); - --hmp->pmp_count; + hammer2_vfs_unmount_hmp1(mp, hmp); + hammer2_vfs_unmount_hmp2(mp, hmp); hammer2_vfs_unmount(mp, MNT_FORCE); return EBUSY; } #if 0 if (rchain->flags & HAMMER2_CHAIN_RECYCLE) { kprintf("hammer2_mount: PFS label currently recycling\n"); - --hmp->pmp_count; + hammer2_vfs_unmount_hmp1(mp, hmp); + hammer2_vfs_unmount_hmp2(mp, hmp); hammer2_vfs_unmount(mp, MNT_FORCE); return EBUSY; } #endif + /* + * After this point hammer2_vfs_unmount() has visibility on hmp + * and manual hmp1/hmp2 calls are not needed on fatal errors. + */ atomic_set_int(&rchain->flags, HAMMER2_CHAIN_MOUNTED); @@ -1370,13 +1385,13 @@ hammer2_vfs_unmount(struct mount *mp, int mntflags) hammer2_chain_t *rchain; int flags; int error = 0; - int ronly = ((mp->mnt_flag & MNT_RDONLY) != 0); - int dumpcnt; int i; - struct vnode *devvp; pmp = MPTOPMP(mp); + if (pmp == NULL) + return(0); + ccms_domain_uninit(&pmp->ccms_dom); kdmsg_iocom_uninit(&pmp->iocom); /* XXX chain dependency */ @@ -1412,46 +1427,7 @@ hammer2_vfs_unmount(struct mount *mp, int mntflags) for (i = 0; i < pmp->cluster.nchains; ++i) { hmp = pmp->cluster.chains[i]->hmp; - hammer2_mount_exlock(hmp); - - --hmp->pmp_count; - kprintf("hammer2_unmount hmp=%p pmpcnt=%d\n", - hmp, hmp->pmp_count); - - /* - * Flush any left over chains. The voldata lock is only used - * to synchronize against HAMMER2_CHAIN_MODIFIED_AUX. - * - * Flush twice to ensure that the freemap is completely - * synchronized. If we only do it once the next mount's - * recovery scan will have to do some fixups (which isn't - * bad, but we don't want it to have to do it except when - * recovering from a crash). - */ - hammer2_voldata_lock(hmp); - if (((hmp->vchain.flags | hmp->fchain.flags) & - HAMMER2_CHAIN_MODIFIED) || - hmp->vchain.core->update_hi > hmp->voldata.mirror_tid || - hmp->fchain.core->update_hi > hmp->voldata.freemap_tid) { - hammer2_voldata_unlock(hmp, 0); - hammer2_vfs_sync(mp, MNT_WAIT); - /*hammer2_vfs_sync(mp, MNT_WAIT);*/ - } else { - hammer2_voldata_unlock(hmp, 0); - } - if (hmp->pmp_count == 0) { - if (((hmp->vchain.flags | hmp->fchain.flags) & - HAMMER2_CHAIN_MODIFIED) || - (hmp->vchain.core->update_hi > - hmp->voldata.mirror_tid) || - (hmp->fchain.core->update_hi > - hmp->voldata.freemap_tid)) { - kprintf("hammer2_unmount: chains left over " - "after final sync\n"); - if (hammer2_debug & 0x0010) - Debugger("entered debugger"); - } - } + hammer2_vfs_unmount_hmp1(mp, hmp); /* * Cleanup the root and super-root chain elements @@ -1484,63 +1460,7 @@ hammer2_vfs_unmount(struct mount *mp, int mntflags) pmp->cluster.chains[i] = NULL; } - /* - * If no PFS's left drop the master hammer2_mount for the - * device. - */ - if (hmp->pmp_count == 0) { - if (hmp->sroot) { - hammer2_inode_drop(hmp->sroot); - hmp->sroot = NULL; - } - - /* - * Finish up with the device vnode - */ - if ((devvp = hmp->devvp) != NULL) { - vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); - vinvalbuf(devvp, (ronly ? 0 : V_SAVE), 0, 0); - hmp->devvp = NULL; - VOP_CLOSE(devvp, - (ronly ? FREAD : FREAD|FWRITE)); - vn_unlock(devvp); - vrele(devvp); - devvp = NULL; - } - - /* - * Final drop of embedded freemap root chain to - * clean up fchain.core (fchain structure is not - * flagged ALLOCATED so it is cleaned out and then - * left to rot). - */ - hammer2_chain_drop(&hmp->fchain); - - /* - * Final drop of embedded volume root chain to clean - * up vchain.core (vchain structure is not flagged - * ALLOCATED so it is cleaned out and then left to - * rot). - */ - dumpcnt = 50; - hammer2_dump_chain(&hmp->vchain, 0, &dumpcnt); - dumpcnt = 50; - hammer2_dump_chain(&hmp->fchain, 0, &dumpcnt); - hammer2_mount_unlock(hmp); - hammer2_chain_drop(&hmp->vchain); - - hammer2_io_cleanup(hmp, &hmp->iotree); - if (hmp->iofree_count) { - kprintf("io_cleanup: %d I/O's left hanging\n", - hmp->iofree_count); - } - - TAILQ_REMOVE(&hammer2_mntlist, hmp, mntentry); - kmalloc_destroy(&hmp->mchain); - kfree(hmp, M_HAMMER2); - } else { - hammer2_mount_unlock(hmp); - } + hammer2_vfs_unmount_hmp2(mp, hmp); } pmp->mp = NULL; @@ -1558,6 +1478,118 @@ failed: return (error); } +static +void +hammer2_vfs_unmount_hmp1(struct mount *mp, hammer2_mount_t *hmp) +{ + hammer2_mount_exlock(hmp); + --hmp->pmp_count; + + kprintf("hammer2_unmount hmp=%p pmpcnt=%d\n", hmp, hmp->pmp_count); + + /* + * Flush any left over chains. The voldata lock is only used + * to synchronize against HAMMER2_CHAIN_MODIFIED_AUX. + * + * Flush twice to ensure that the freemap is completely + * synchronized. If we only do it once the next mount's + * recovery scan will have to do some fixups (which isn't + * bad, but we don't want it to have to do it except when + * recovering from a crash). + */ + hammer2_voldata_lock(hmp); + if (((hmp->vchain.flags | hmp->fchain.flags) & + HAMMER2_CHAIN_MODIFIED) || + hmp->vchain.core->update_hi > hmp->voldata.mirror_tid || + hmp->fchain.core->update_hi > hmp->voldata.freemap_tid) { + hammer2_voldata_unlock(hmp, 0); + hammer2_vfs_sync(mp, MNT_WAIT); + /*hammer2_vfs_sync(mp, MNT_WAIT);*/ + } else { + hammer2_voldata_unlock(hmp, 0); + } + if (hmp->pmp_count == 0) { + if (((hmp->vchain.flags | hmp->fchain.flags) & + HAMMER2_CHAIN_MODIFIED) || + (hmp->vchain.core->update_hi > + hmp->voldata.mirror_tid) || + (hmp->fchain.core->update_hi > + hmp->voldata.freemap_tid)) { + kprintf("hammer2_unmount: chains left over " + "after final sync\n"); + if (hammer2_debug & 0x0010) + Debugger("entered debugger"); + } + } +} + +static +void +hammer2_vfs_unmount_hmp2(struct mount *mp, hammer2_mount_t *hmp) +{ + struct vnode *devvp; + int dumpcnt; + int ronly = ((mp->mnt_flag & MNT_RDONLY) != 0); + + /* + * If no PFS's left drop the master hammer2_mount for the + * device. + */ + if (hmp->pmp_count == 0) { + if (hmp->sroot) { + hammer2_inode_drop(hmp->sroot); + hmp->sroot = NULL; + } + + /* + * Finish up with the device vnode + */ + if ((devvp = hmp->devvp) != NULL) { + vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); + vinvalbuf(devvp, (ronly ? 0 : V_SAVE), 0, 0); + hmp->devvp = NULL; + VOP_CLOSE(devvp, + (ronly ? FREAD : FREAD|FWRITE)); + vn_unlock(devvp); + vrele(devvp); + devvp = NULL; + } + + /* + * Final drop of embedded freemap root chain to + * clean up fchain.core (fchain structure is not + * flagged ALLOCATED so it is cleaned out and then + * left to rot). + */ + hammer2_chain_drop(&hmp->fchain); + + /* + * Final drop of embedded volume root chain to clean + * up vchain.core (vchain structure is not flagged + * ALLOCATED so it is cleaned out and then left to + * rot). + */ + dumpcnt = 50; + hammer2_dump_chain(&hmp->vchain, 0, &dumpcnt); + dumpcnt = 50; + hammer2_dump_chain(&hmp->fchain, 0, &dumpcnt); + hammer2_mount_unlock(hmp); + hammer2_chain_drop(&hmp->vchain); + + hammer2_io_cleanup(hmp, &hmp->iotree); + if (hmp->iofree_count) { + kprintf("io_cleanup: %d I/O's left hanging\n", + hmp->iofree_count); + } + + TAILQ_REMOVE(&hammer2_mntlist, hmp, mntentry); + kmalloc_destroy(&hmp->mchain); + kfree(hmp, M_HAMMER2); + } else { + hammer2_mount_unlock(hmp); + } +} + static int hammer2_vfs_vget(struct mount *mp, struct vnode *dvp, diff --git a/sys/vfs/hammer2/hammer2_vnops.c b/sys/vfs/hammer2/hammer2_vnops.c index 5747337789..04ffe39031 100644 --- a/sys/vfs/hammer2/hammer2_vnops.c +++ b/sys/vfs/hammer2/hammer2_vnops.c @@ -378,10 +378,8 @@ hammer2_vop_fsync(struct vop_fsync_args *ap) vp = ap->a_vp; ip = VTOI(vp); - /* - * WARNING: Cannot use TRANS_ISFLUSH for partial syncs. - */ #if 0 + /* XXX can't do this yet */ hammer2_trans_init(&trans, ip->pmp, NULL, HAMMER2_TRANS_ISFLUSH); vfsync(vp, ap->a_waitfor, 1, NULL, NULL); hammer2_trans_clear_invfsync(&trans); -- 2.41.0