2 * Copyright (c) 2011-2013 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * This subsystem implements most of the core support functions for
37 * the hammer2_chain structure.
39 * Chains are the in-memory version on media objects (volume header, inodes,
40 * indirect blocks, data blocks, etc). Chains represent a portion of the
43 * A chain is topologically stable once it has been inserted into the
44 * in-memory topology. Modifications which copy, move, or resize the chain
45 * are handled via the DELETE-DUPLICATE mechanic where the original chain
46 * stays intact but is marked deleted and a new chain is allocated which
47 * shares the old chain's children.
49 * This sharing is handled via the hammer2_chain_core structure.
51 * The DELETE-DUPLICATE mechanism allows the same topological level to contain
52 * many overloadings. However, our RBTREE mechanics require that there be
53 * no overlaps so we accomplish the overloading by moving conflicting chains
54 * with smaller or equal radii into a sub-RBTREE under the chain being
57 * DELETE-DUPLICATE is also used when a modification to a chain crosses a
58 * flush synchronization boundary, allowing the flush code to continue flushing
59 * the older version of the topology and not be disrupted by new frontend
64 * All lookup and iterate operations and most modifications are done on the
65 * live view. During flushes lookups are not normally done and modifications
66 * may be run on the flush view. However, flushes often needs to allocate
67 * blocks and the freemap_alloc/free code issues lookups. This code is
68 * special cased to use the live view when called from a flush.
70 * General chain lookup/iteration functions are NOT aware of the flush view,
71 * they only know about live views.
73 #include <sys/cdefs.h>
74 #include <sys/param.h>
75 #include <sys/systm.h>
76 #include <sys/types.h>
78 #include <sys/kern_syscall.h>
83 static int hammer2_indirect_optimize; /* XXX SYSCTL */
85 static hammer2_chain_t *hammer2_chain_create_indirect(
86 hammer2_trans_t *trans, hammer2_chain_t *parent,
87 hammer2_key_t key, int keybits, int for_type, int *errorp);
88 static void hammer2_chain_drop_data(hammer2_chain_t *chain, int lastdrop);
89 static void adjreadcounter(hammer2_blockref_t *bref, size_t bytes);
90 static hammer2_chain_t *hammer2_combined_find(
91 hammer2_chain_t *parent,
92 hammer2_blockref_t *base, int count,
93 int *cache_indexp, hammer2_key_t *key_nextp,
94 hammer2_key_t key_beg, hammer2_key_t key_end,
95 hammer2_blockref_t **bresp);
96 static void hammer2_chain_assert_not_present(hammer2_chain_core_t *above,
97 hammer2_chain_t *chain);
100 * Basic RBTree for chains. Chains cannot overlap within any given
101 * core->rbtree without recursing through chain->rbtree. We effectively
102 * guarantee this by checking the full range rather than just the first
103 * key element. By matching on the full range callers can detect when
104 * recursrion through chain->rbtree is needed.
106 * NOTE: This also means the a delete-duplicate on the same key will
107 * overload by placing the deleted element in the new element's
108 * chain->rbtree (when doing a direct replacement).
110 RB_GENERATE(hammer2_chain_tree, hammer2_chain, rbnode, hammer2_chain_cmp);
113 hammer2_chain_cmp(hammer2_chain_t *chain1, hammer2_chain_t *chain2)
115 hammer2_key_t c1_beg;
116 hammer2_key_t c1_end;
117 hammer2_key_t c2_beg;
118 hammer2_key_t c2_end;
120 c1_beg = chain1->bref.key;
121 c1_end = c1_beg + ((hammer2_key_t)1 << chain1->bref.keybits) - 1;
122 c2_beg = chain2->bref.key;
123 c2_end = c2_beg + ((hammer2_key_t)1 << chain2->bref.keybits) - 1;
125 if (c1_end < c2_beg) /* fully to the left */
127 if (c1_beg > c2_end) /* fully to the right */
129 return(0); /* overlap (must not cross edge boundary) */
134 hammer2_isclusterable(hammer2_chain_t *chain)
136 if (hammer2_cluster_enable) {
137 if (chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT ||
138 chain->bref.type == HAMMER2_BREF_TYPE_INODE ||
139 chain->bref.type == HAMMER2_BREF_TYPE_DATA) {
147 * Recursively set the update_hi flag up to the root starting at chain's
148 * parent->core. update_hi is not set in chain's core.
150 * This controls top-down visibility for flushes. The child has just one
151 * 'above' core, but the core itself can be multi-homed with parents iterated
154 * This function is not used during a flush (except when the flush is
155 * allocating which requires the live tree). The flush keeps track of its
158 * XXX needs to be optimized to use roll-up TIDs. update_hi is only really
159 * compared against bref.mirror_tid which itself is only updated by a flush.
162 hammer2_chain_setsubmod(hammer2_trans_t *trans, hammer2_chain_t *chain)
164 hammer2_chain_core_t *above;
166 while ((above = chain->above) != NULL) {
167 spin_lock(&above->cst.spin);
169 if (above->update_hi < trans->sync_tid)
170 above->update_hi = trans->sync_tid;
171 chain = TAILQ_LAST(&above->ownerq, h2_core_list);
173 TAILQ_FOREACH_REVERSE(chain, &above->ownerq,
174 h2_core_list, core_entry) {
175 if (trans->sync_tid >= chain->modify_tid &&
176 trans->sync_tid <= chain->delete_tid) {
181 spin_unlock(&above->cst.spin);
186 * Allocate a new disconnected chain element representing the specified
187 * bref. chain->refs is set to 1 and the passed bref is copied to
188 * chain->bref. chain->bytes is derived from the bref.
190 * chain->core is NOT allocated and the media data and bp pointers are left
191 * NULL. The caller must call chain_core_alloc() to allocate or associate
192 * a core with the chain.
194 * NOTE: Returns a referenced but unlocked (because there is no core) chain.
197 hammer2_chain_alloc(hammer2_mount_t *hmp, hammer2_pfsmount_t *pmp,
198 hammer2_trans_t *trans, hammer2_blockref_t *bref)
200 hammer2_chain_t *chain;
201 u_int bytes = 1U << (int)(bref->data_off & HAMMER2_OFF_MASK_RADIX);
204 * Construct the appropriate system structure.
207 case HAMMER2_BREF_TYPE_INODE:
208 case HAMMER2_BREF_TYPE_INDIRECT:
209 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
210 case HAMMER2_BREF_TYPE_DATA:
211 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
213 * Chain's are really only associated with the hmp but we
214 * maintain a pmp association for per-mount memory tracking
215 * purposes. The pmp can be NULL.
217 chain = kmalloc(sizeof(*chain), hmp->mchain, M_WAITOK | M_ZERO);
221 case HAMMER2_BREF_TYPE_VOLUME:
222 case HAMMER2_BREF_TYPE_FREEMAP:
224 panic("hammer2_chain_alloc volume type illegal for op");
227 panic("hammer2_chain_alloc: unrecognized blockref type: %d",
233 chain->bytes = bytes;
235 chain->flags = HAMMER2_CHAIN_ALLOCATED;
236 chain->delete_tid = HAMMER2_MAX_TID;
239 * Set modify_tid if a transaction is creating the chain. When
240 * loading a chain from backing store trans is passed as NULL and
241 * modify_tid is left set to 0.
244 chain->modify_tid = trans->sync_tid;
250 * Associate an existing core with the chain or allocate a new core.
252 * The core is not locked. No additional refs on the chain are made.
253 * (trans) must not be NULL if (core) is not NULL.
255 * When chains are delete-duplicated during flushes we insert nchain on
256 * the ownerq after ochain instead of at the end in order to give the
257 * drop code visibility in the correct order, otherwise drops can be missed.
260 hammer2_chain_core_alloc(hammer2_trans_t *trans,
261 hammer2_chain_t *nchain, hammer2_chain_t *ochain)
263 hammer2_chain_core_t *core;
265 KKASSERT(nchain->core == NULL);
267 if (ochain == NULL) {
269 * Fresh core under nchain (no multi-homing of ochain's
272 core = kmalloc(sizeof(*core), nchain->hmp->mchain,
274 TAILQ_INIT(&core->layerq);
275 TAILQ_INIT(&core->ownerq);
279 core->update_hi = trans->sync_tid;
281 core->update_hi = nchain->bref.mirror_tid;
283 ccms_cst_init(&core->cst, nchain);
284 TAILQ_INSERT_TAIL(&core->ownerq, nchain, core_entry);
287 * Propagate the PFSROOT flag which we set on all subdirs
288 * under the super-root.
290 atomic_set_int(&nchain->flags,
291 ochain->flags & HAMMER2_CHAIN_PFSROOT);
294 * Duplicating ochain -> nchain. Set the DUPLICATED flag on
295 * ochain if nchain is not a snapshot.
297 * It is possible for the DUPLICATED flag to already be
298 * set when called via a flush operation because flush
299 * operations may have to work on elements with delete_tid's
300 * beyond the flush sync_tid. In this situation we must
301 * ensure that nchain is placed just after ochain in the
302 * ownerq and that the DUPLICATED flag is set on nchain so
303 * 'live' operations skip past it to the correct chain.
305 * The flusher understands the blockref synchronization state
306 * for any stale chains by observing bref.mirror_tid, which
307 * delete-duplicate replicates.
309 * WARNING! However, the case is disallowed when the flusher
310 * is allocating freemap space because this entails
311 * more than just adjusting a block table.
313 if (ochain->flags & HAMMER2_CHAIN_DUPLICATED) {
314 KKASSERT((trans->flags &
315 (HAMMER2_TRANS_ISFLUSH |
316 HAMMER2_TRANS_ISALLOCATING)) ==
317 HAMMER2_TRANS_ISFLUSH);
318 atomic_set_int(&nchain->flags,
319 HAMMER2_CHAIN_DUPLICATED);
321 if ((nchain->flags & HAMMER2_CHAIN_SNAPSHOT) == 0) {
322 atomic_set_int(&ochain->flags,
323 HAMMER2_CHAIN_DUPLICATED);
326 atomic_add_int(&core->sharecnt, 1);
328 spin_lock(&core->cst.spin);
332 if (core->update_hi < trans->sync_tid)
333 core->update_hi = trans->sync_tid;
337 * Maintain ordering for refactor test so we don't skip over
338 * a snapshot. Also, during flushes, delete-duplications
339 * for block-table updates can occur on blocks already
340 * deleted (delete-duplicated by a later transaction). We
341 * must insert nchain after ochain but before the later
342 * transaction's copy.
344 TAILQ_INSERT_AFTER(&core->ownerq, ochain, nchain, core_entry);
346 spin_unlock(&core->cst.spin);
351 * Add a reference to a chain element, preventing its destruction.
354 hammer2_chain_ref(hammer2_chain_t *chain)
356 atomic_add_int(&chain->refs, 1);
360 * Insert the chain in the core rbtree at the first layer
361 * which accepts it (for now we don't sort layers by the transaction tid)
363 #define HAMMER2_CHAIN_INSERT_SPIN 0x0001
364 #define HAMMER2_CHAIN_INSERT_LIVE 0x0002
365 #define HAMMER2_CHAIN_INSERT_RACE 0x0004
369 hammer2_chain_insert(hammer2_chain_core_t *above, hammer2_chain_layer_t *layer,
370 hammer2_chain_t *chain, int flags, int generation)
372 hammer2_chain_t *xchain;
373 hammer2_chain_layer_t *nlayer;
376 if (flags & HAMMER2_CHAIN_INSERT_SPIN)
377 spin_lock(&above->cst.spin);
380 * Special case, place the chain in the next most-recent layer as the
381 * specified layer, inserting a layer inbetween if necessary.
384 KKASSERT((flags & HAMMER2_CHAIN_INSERT_RACE) == 0);
385 nlayer = TAILQ_PREV(layer, h2_layer_list, entry);
386 if (nlayer && RB_INSERT(hammer2_chain_tree,
387 &nlayer->rbtree, chain) == NULL) {
392 spin_unlock(&above->cst.spin);
393 KKASSERT((flags & HAMMER2_CHAIN_INSERT_LIVE) == 0);
394 nlayer = kmalloc(sizeof(*nlayer), chain->hmp->mchain,
396 RB_INIT(&nlayer->rbtree);
397 nlayer->good = 0xABCD;
398 spin_lock(&above->cst.spin);
400 if ((chain->flags & HAMMER2_CHAIN_DELETED) == 0)
401 hammer2_chain_assert_not_present(above, chain);
403 TAILQ_INSERT_BEFORE(layer, nlayer, entry);
404 RB_INSERT(hammer2_chain_tree, &nlayer->rbtree, chain);
410 * Interlocked by spinlock, check for race
412 if ((flags & HAMMER2_CHAIN_INSERT_RACE) &&
413 above->generation != generation) {
419 * Try to insert, allocate a new layer if a nominal collision
420 * occurs (a collision is different from a SMP race).
422 layer = TAILQ_FIRST(&above->layerq);
426 (xchain = RB_INSERT(hammer2_chain_tree,
427 &layer->rbtree, chain)) != NULL) {
430 * Allocate a new layer to resolve the issue.
432 spin_unlock(&above->cst.spin);
433 layer = kmalloc(sizeof(*layer), chain->hmp->mchain,
435 RB_INIT(&layer->rbtree);
436 layer->good = 0xABCD;
437 spin_lock(&above->cst.spin);
439 if ((flags & HAMMER2_CHAIN_INSERT_RACE) &&
440 above->generation != generation) {
441 spin_unlock(&above->cst.spin);
442 kfree(layer, chain->hmp->mchain);
443 spin_lock(&above->cst.spin);
447 hammer2_chain_assert_not_present(above, chain);
449 TAILQ_INSERT_HEAD(&above->layerq, layer, entry);
450 RB_INSERT(hammer2_chain_tree, &layer->rbtree, chain);
453 chain->above = above;
454 chain->inlayer = layer;
455 ++above->chain_count;
457 atomic_set_int(&chain->flags, HAMMER2_CHAIN_ONRBTREE);
460 * We have to keep track of the effective live-view blockref count
461 * so the create code knows when to push an indirect block.
463 if ((flags & HAMMER2_CHAIN_INSERT_LIVE) &&
464 (chain->flags & HAMMER2_CHAIN_DELETED) == 0) {
465 atomic_add_int(&above->live_count, 1);
468 if (flags & HAMMER2_CHAIN_INSERT_SPIN)
469 spin_unlock(&above->cst.spin);
474 * Drop the caller's reference to the chain. When the ref count drops to
475 * zero this function will try to disassociate the chain from its parent and
476 * deallocate it, then recursely drop the parent using the implied ref
477 * from the chain's chain->parent.
479 static hammer2_chain_t *hammer2_chain_lastdrop(hammer2_chain_t *chain,
480 struct h2_core_list *delayq);
483 hammer2_chain_drop(hammer2_chain_t *chain)
485 struct h2_core_list delayq;
486 hammer2_chain_t *scan;
490 if (hammer2_debug & 0x200000)
493 if (chain->flags & HAMMER2_CHAIN_MOVED)
495 if (chain->flags & HAMMER2_CHAIN_MODIFIED)
497 KKASSERT(chain->refs > need);
507 chain = hammer2_chain_lastdrop(chain, &delayq);
509 if (atomic_cmpset_int(&chain->refs, refs, refs - 1))
511 /* retry the same chain */
515 * When we've exhausted lastdrop chaining pull off of delayq.
516 * chains on delayq are dead but are used to placehold other
517 * chains which we added a ref to for the purpose of dropping.
520 hammer2_mount_t *hmp;
522 if ((scan = TAILQ_FIRST(&delayq)) != NULL) {
523 chain = (void *)scan->data;
524 TAILQ_REMOVE(&delayq, scan, core_entry);
525 scan->flags &= ~HAMMER2_CHAIN_ALLOCATED;
528 kfree(scan, hmp->mchain);
535 * Safe handling of the 1->0 transition on chain. Returns a chain for
536 * recursive drop or NULL, possibly returning the same chain if the atomic
539 * Whem two chains need to be recursively dropped we use the chain
540 * we would otherwise free to placehold the additional chain. It's a bit
541 * convoluted but we can't just recurse without potentially blowing out
544 * The chain cannot be freed if it has a non-empty core (children) or
545 * it is not at the head of ownerq.
547 * The cst spinlock is allowed nest child-to-parent (not parent-to-child).
551 hammer2_chain_lastdrop(hammer2_chain_t *chain, struct h2_core_list *delayq)
553 hammer2_pfsmount_t *pmp;
554 hammer2_mount_t *hmp;
555 hammer2_chain_core_t *above;
556 hammer2_chain_core_t *core;
557 hammer2_chain_layer_t *layer;
558 hammer2_chain_t *rdrop1;
559 hammer2_chain_t *rdrop2;
562 * Spinlock the core and check to see if it is empty. If it is
563 * not empty we leave chain intact with refs == 0. The elements
564 * in core->rbtree are associated with other chains contemporary
565 * with ours but not with our chain directly.
567 if ((core = chain->core) != NULL) {
568 spin_lock(&core->cst.spin);
571 * We can't free non-stale chains with children until we are
572 * able to free the children because there might be a flush
573 * dependency. Flushes of stale children (which should also
574 * have their deleted flag set) short-cut recursive flush
575 * dependencies and can be freed here. Any flushes which run
576 * through stale children due to the flush synchronization
577 * point should have the MOVED bit set in the chain and not
578 * reach lastdrop at this time.
580 * NOTE: We return (chain) on failure to retry.
582 if (core->chain_count &&
583 (chain->flags & HAMMER2_CHAIN_DUPLICATED) == 0) {
584 if (atomic_cmpset_int(&chain->refs, 1, 0))
585 chain = NULL; /* success */
586 spin_unlock(&core->cst.spin);
589 /* no chains left under us */
592 * Various parts of the code might be holding a ref on a
593 * stale chain as a placemarker which must be iterated to
594 * locate a later non-stale (live) chain. We must be sure
595 * NOT to free the later non-stale chain (which might have
596 * no refs). Otherwise mass confusion may result.
598 * The DUPLICATED flag tells us whether the chain is stale
599 * or not, so the rule is that any chain whos DUPLICATED flag
600 * is NOT set must also be at the head of the ownerq.
602 * Note that the DELETED flag is not involved. That is, a
603 * live chain can represent a deletion that has not yet been
604 * flushed (or still has refs).
607 if (TAILQ_NEXT(chain, core_entry) == NULL &&
608 TAILQ_FIRST(&core->ownerq) != chain) {
610 if ((chain->flags & HAMMER2_CHAIN_DUPLICATED) == 0 &&
611 TAILQ_FIRST(&core->ownerq) != chain) {
612 if (atomic_cmpset_int(&chain->refs, 1, 0))
613 chain = NULL; /* success */
614 spin_unlock(&core->cst.spin);
620 * chain->core has no children left so no accessors can get to our
621 * chain from there. Now we have to lock the above core to interlock
622 * remaining possible accessors that might bump chain's refs before
623 * we can safely drop chain's refs with intent to free the chain.
626 pmp = chain->pmp; /* can be NULL */
632 * Spinlock the parent and try to drop the last ref on chain.
633 * On success remove chain from its parent, otherwise return NULL.
635 * (normal core locks are top-down recursive but we define core
636 * spinlocks as bottom-up recursive, so this is safe).
638 if ((above = chain->above) != NULL) {
639 spin_lock(&above->cst.spin);
640 if (atomic_cmpset_int(&chain->refs, 1, 0) == 0) {
641 /* 1->0 transition failed */
642 spin_unlock(&above->cst.spin);
644 spin_unlock(&core->cst.spin);
645 return(chain); /* retry */
649 * 1->0 transition successful, remove chain from its
650 * above core. Track layer for removal/freeing.
652 KKASSERT(chain->flags & HAMMER2_CHAIN_ONRBTREE);
653 layer = chain->inlayer;
654 RB_REMOVE(hammer2_chain_tree, &layer->rbtree, chain);
655 --above->chain_count;
656 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_ONRBTREE);
658 chain->inlayer = NULL;
660 if (RB_EMPTY(&layer->rbtree) && layer->refs == 0) {
661 TAILQ_REMOVE(&above->layerq, layer, entry);
667 * If our chain was the last chain in the parent's core the
668 * core is now empty and its parents might now be droppable.
669 * Try to drop the first multi-homed parent by gaining a
670 * ref on it here and then dropping it below.
672 if (above->chain_count == 0) {
673 rdrop1 = TAILQ_FIRST(&above->ownerq);
675 atomic_cmpset_int(&rdrop1->refs, 0, 1) == 0) {
679 spin_unlock(&above->cst.spin);
680 above = NULL; /* safety */
684 * Successful 1->0 transition and the chain can be destroyed now.
686 * We still have the core spinlock (if core is non-NULL), and core's
687 * chain_count is 0. The above spinlock is gone.
689 * Remove chain from ownerq. Once core has no more owners (and no
690 * children which is already the case) we can destroy core.
692 * If core has more owners we may be able to continue a bottom-up
693 * drop with our next sibling.
698 TAILQ_REMOVE(&core->ownerq, chain, core_entry);
699 rdrop2 = TAILQ_FIRST(&core->ownerq);
700 if (rdrop2 && atomic_cmpset_int(&rdrop2->refs, 0, 1) == 0)
702 spin_unlock(&core->cst.spin);
705 * We can do the final 1->0 transition with an atomic op
706 * after releasing core's spinlock.
708 if (atomic_fetchadd_int(&core->sharecnt, -1) == 1) {
710 * On the 1->0 transition of core we can destroy
711 * it. Any remaining layers should no longer be
712 * referenced or visibile to other threads.
714 KKASSERT(TAILQ_EMPTY(&core->ownerq));
716 layer->good = 0xEF00;
717 kfree(layer, hmp->mchain);
719 while ((layer = TAILQ_FIRST(&core->layerq)) != NULL) {
720 KKASSERT(layer->refs == 0 &&
721 RB_EMPTY(&layer->rbtree));
722 TAILQ_REMOVE(&core->layerq, layer, entry);
723 layer->good = 0xEF01;
724 kfree(layer, hmp->mchain);
727 KKASSERT(core->cst.count == 0);
728 KKASSERT(core->cst.upgrade == 0);
730 kfree(core, hmp->mchain);
732 core = NULL; /* safety */
736 * All spin locks are gone, finish freeing stuff.
738 KKASSERT((chain->flags & (HAMMER2_CHAIN_MOVED |
739 HAMMER2_CHAIN_MODIFIED)) == 0);
740 hammer2_chain_drop_data(chain, 1);
742 KKASSERT(chain->dio == NULL);
745 * Free saved empty layer and return chained drop.
748 layer->good = 0xEF02;
749 kfree(layer, hmp->mchain);
753 * Once chain resources are gone we can use the now dead chain
754 * structure to placehold what might otherwise require a recursive
755 * drop, because we have potentially two things to drop and can only
756 * return one directly.
758 if (rdrop1 && rdrop2) {
759 KKASSERT(chain->flags & HAMMER2_CHAIN_ALLOCATED);
760 chain->data = (void *)rdrop1;
761 TAILQ_INSERT_TAIL(delayq, chain, core_entry);
763 } else if (chain->flags & HAMMER2_CHAIN_ALLOCATED) {
764 chain->flags &= ~HAMMER2_CHAIN_ALLOCATED;
766 kfree(chain, hmp->mchain);
770 * Either or both can be NULL. We already handled the case where
771 * both might not have been NULL.
780 * On either last lock release or last drop
783 hammer2_chain_drop_data(hammer2_chain_t *chain, int lastdrop)
785 /*hammer2_mount_t *hmp = chain->hmp;*/
787 switch(chain->bref.type) {
788 case HAMMER2_BREF_TYPE_VOLUME:
789 case HAMMER2_BREF_TYPE_FREEMAP:
794 KKASSERT(chain->data == NULL);
800 * Ref and lock a chain element, acquiring its data with I/O if necessary,
801 * and specify how you would like the data to be resolved.
803 * Returns 0 on success or an error code if the data could not be acquired.
804 * The chain element is locked on return regardless of whether an error
807 * The lock is allowed to recurse, multiple locking ops will aggregate
808 * the requested resolve types. Once data is assigned it will not be
809 * removed until the last unlock.
811 * HAMMER2_RESOLVE_NEVER - Do not resolve the data element.
812 * (typically used to avoid device/logical buffer
815 * HAMMER2_RESOLVE_MAYBE - Do not resolve data elements for chains in
816 * the INITIAL-create state (indirect blocks only).
818 * Do not resolve data elements for DATA chains.
819 * (typically used to avoid device/logical buffer
822 * HAMMER2_RESOLVE_ALWAYS- Always resolve the data element.
824 * HAMMER2_RESOLVE_SHARED- (flag) The chain is locked shared, otherwise
825 * it will be locked exclusive.
827 * NOTE: Embedded elements (volume header, inodes) are always resolved
830 * NOTE: Specifying HAMMER2_RESOLVE_ALWAYS on a newly-created non-embedded
831 * element will instantiate and zero its buffer, and flush it on
834 * NOTE: (data) elements are normally locked RESOLVE_NEVER or RESOLVE_MAYBE
835 * so as not to instantiate a device buffer, which could alias against
836 * a logical file buffer. However, if ALWAYS is specified the
837 * device buffer will be instantiated anyway.
839 * WARNING! If data must be fetched a shared lock will temporarily be
840 * upgraded to exclusive. However, a deadlock can occur if
841 * the caller owns more than one shared lock.
844 hammer2_chain_lock(hammer2_chain_t *chain, int how)
846 hammer2_mount_t *hmp;
847 hammer2_chain_core_t *core;
848 hammer2_blockref_t *bref;
854 * Ref and lock the element. Recursive locks are allowed.
856 if ((how & HAMMER2_RESOLVE_NOREF) == 0)
857 hammer2_chain_ref(chain);
858 atomic_add_int(&chain->lockcnt, 1);
861 KKASSERT(hmp != NULL);
864 * Get the appropriate lock.
867 if (how & HAMMER2_RESOLVE_SHARED)
868 ccms_thread_lock(&core->cst, CCMS_STATE_SHARED);
870 ccms_thread_lock(&core->cst, CCMS_STATE_EXCLUSIVE);
873 * If we already have a valid data pointer no further action is
880 * Do we have to resolve the data?
882 switch(how & HAMMER2_RESOLVE_MASK) {
883 case HAMMER2_RESOLVE_NEVER:
885 case HAMMER2_RESOLVE_MAYBE:
886 if (chain->flags & HAMMER2_CHAIN_INITIAL)
888 if (chain->bref.type == HAMMER2_BREF_TYPE_DATA)
891 if (chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_NODE)
894 if (chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_LEAF)
897 case HAMMER2_RESOLVE_ALWAYS:
902 * Upgrade to an exclusive lock so we can safely manipulate the
903 * buffer cache. If another thread got to it before us we
906 ostate = ccms_thread_lock_upgrade(&core->cst);
908 ccms_thread_lock_downgrade(&core->cst, ostate);
913 * We must resolve to a device buffer, either by issuing I/O or
914 * by creating a zero-fill element. We do not mark the buffer
915 * dirty when creating a zero-fill element (the hammer2_chain_modify()
916 * API must still be used to do that).
918 * The device buffer is variable-sized in powers of 2 down
919 * to HAMMER2_MIN_ALLOC (typically 1K). A 64K physical storage
920 * chunk always contains buffers of the same size. (XXX)
922 * The minimum physical IO size may be larger than the variable
928 * The getblk() optimization can only be used on newly created
929 * elements if the physical block size matches the request.
931 if (chain->flags & HAMMER2_CHAIN_INITIAL) {
932 error = hammer2_io_new(hmp, bref->data_off, chain->bytes,
935 error = hammer2_io_bread(hmp, bref->data_off, chain->bytes,
937 adjreadcounter(&chain->bref, chain->bytes);
941 kprintf("hammer2_chain_lock: I/O error %016jx: %d\n",
942 (intmax_t)bref->data_off, error);
943 hammer2_io_bqrelse(&chain->dio);
944 ccms_thread_lock_downgrade(&core->cst, ostate);
949 * We can clear the INITIAL state now, we've resolved the buffer
950 * to zeros and marked it dirty with hammer2_io_new().
952 bdata = hammer2_io_data(chain->dio, chain->bref.data_off);
953 if (chain->flags & HAMMER2_CHAIN_INITIAL) {
954 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
958 * Setup the data pointer, either pointing it to an embedded data
959 * structure and copying the data from the buffer, or pointing it
962 * The buffer is not retained when copying to an embedded data
963 * structure in order to avoid potential deadlocks or recursions
964 * on the same physical buffer.
966 switch (bref->type) {
967 case HAMMER2_BREF_TYPE_VOLUME:
968 case HAMMER2_BREF_TYPE_FREEMAP:
970 * Copy data from bp to embedded buffer
972 panic("hammer2_chain_lock: called on unresolved volume header");
974 case HAMMER2_BREF_TYPE_INODE:
975 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
976 case HAMMER2_BREF_TYPE_INDIRECT:
977 case HAMMER2_BREF_TYPE_DATA:
978 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
981 * Point data at the device buffer and leave dio intact.
983 chain->data = (void *)bdata;
986 ccms_thread_lock_downgrade(&core->cst, ostate);
991 * This basically calls hammer2_io_breadcb() but does some pre-processing
992 * of the chain first to handle certain cases.
995 hammer2_chain_load_async(hammer2_chain_t *chain,
996 void (*callback)(hammer2_io_t *dio,
997 hammer2_chain_t *chain,
998 void *arg_p, off_t arg_o),
999 void *arg_p, off_t arg_o)
1001 hammer2_mount_t *hmp;
1002 struct hammer2_io *dio;
1003 hammer2_blockref_t *bref;
1007 callback(NULL, chain, arg_p, arg_o);
1012 * We must resolve to a device buffer, either by issuing I/O or
1013 * by creating a zero-fill element. We do not mark the buffer
1014 * dirty when creating a zero-fill element (the hammer2_chain_modify()
1015 * API must still be used to do that).
1017 * The device buffer is variable-sized in powers of 2 down
1018 * to HAMMER2_MIN_ALLOC (typically 1K). A 64K physical storage
1019 * chunk always contains buffers of the same size. (XXX)
1021 * The minimum physical IO size may be larger than the variable
1024 bref = &chain->bref;
1028 * The getblk() optimization can only be used on newly created
1029 * elements if the physical block size matches the request.
1031 if ((chain->flags & HAMMER2_CHAIN_INITIAL) &&
1032 chain->bytes == hammer2_devblksize(chain->bytes)) {
1033 error = hammer2_io_new(hmp, bref->data_off, chain->bytes, &dio);
1034 KKASSERT(error == 0);
1035 callback(dio, chain, arg_p, arg_o);
1040 * Otherwise issue a read
1042 adjreadcounter(&chain->bref, chain->bytes);
1043 hammer2_io_breadcb(hmp, bref->data_off, chain->bytes,
1044 callback, chain, arg_p, arg_o);
1048 * Unlock and deref a chain element.
1050 * On the last lock release any non-embedded data (chain->dio) will be
1054 hammer2_chain_unlock(hammer2_chain_t *chain)
1056 hammer2_chain_core_t *core = chain->core;
1057 ccms_state_t ostate;
1062 * The core->cst lock can be shared across several chains so we
1063 * need to track the per-chain lockcnt separately.
1065 * If multiple locks are present (or being attempted) on this
1066 * particular chain we can just unlock, drop refs, and return.
1068 * Otherwise fall-through on the 1->0 transition.
1071 lockcnt = chain->lockcnt;
1072 KKASSERT(lockcnt > 0);
1075 if (atomic_cmpset_int(&chain->lockcnt,
1076 lockcnt, lockcnt - 1)) {
1077 ccms_thread_unlock(&core->cst);
1078 hammer2_chain_drop(chain);
1082 if (atomic_cmpset_int(&chain->lockcnt, 1, 0))
1089 * On the 1->0 transition we upgrade the core lock (if necessary)
1090 * to exclusive for terminal processing. If after upgrading we find
1091 * that lockcnt is non-zero, another thread is racing us and will
1092 * handle the unload for us later on, so just cleanup and return
1093 * leaving the data/io intact
1095 * Otherwise if lockcnt is still 0 it is possible for it to become
1096 * non-zero and race, but since we hold the core->cst lock
1097 * exclusively all that will happen is that the chain will be
1098 * reloaded after we unload it.
1100 ostate = ccms_thread_lock_upgrade(&core->cst);
1101 if (chain->lockcnt) {
1102 ccms_thread_unlock_upgraded(&core->cst, ostate);
1103 hammer2_chain_drop(chain);
1108 * Shortcut the case if the data is embedded or not resolved.
1110 * Do NOT NULL out chain->data (e.g. inode data), it might be
1113 if (chain->dio == NULL) {
1114 if ((chain->flags & HAMMER2_CHAIN_MODIFIED) == 0)
1115 hammer2_chain_drop_data(chain, 0);
1116 ccms_thread_unlock_upgraded(&core->cst, ostate);
1117 hammer2_chain_drop(chain);
1124 if (hammer2_io_isdirty(chain->dio) == 0) {
1126 } else if (chain->flags & HAMMER2_CHAIN_IOFLUSH) {
1127 switch(chain->bref.type) {
1128 case HAMMER2_BREF_TYPE_DATA:
1129 counterp = &hammer2_ioa_file_write;
1131 case HAMMER2_BREF_TYPE_INODE:
1132 counterp = &hammer2_ioa_meta_write;
1134 case HAMMER2_BREF_TYPE_INDIRECT:
1135 counterp = &hammer2_ioa_indr_write;
1137 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
1138 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
1139 counterp = &hammer2_ioa_fmap_write;
1142 counterp = &hammer2_ioa_volu_write;
1145 *counterp += chain->bytes;
1147 switch(chain->bref.type) {
1148 case HAMMER2_BREF_TYPE_DATA:
1149 counterp = &hammer2_iod_file_write;
1151 case HAMMER2_BREF_TYPE_INODE:
1152 counterp = &hammer2_iod_meta_write;
1154 case HAMMER2_BREF_TYPE_INDIRECT:
1155 counterp = &hammer2_iod_indr_write;
1157 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
1158 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
1159 counterp = &hammer2_iod_fmap_write;
1162 counterp = &hammer2_iod_volu_write;
1165 *counterp += chain->bytes;
1169 * Clean out the dio.
1171 * If a device buffer was used for data be sure to destroy the
1172 * buffer when we are done to avoid aliases (XXX what about the
1173 * underlying VM pages?).
1175 * NOTE: Freemap leaf's use reserved blocks and thus no aliasing
1178 * NOTE: The isdirty check tracks whether we have to bdwrite() the
1179 * buffer or not. The buffer might already be dirty. The
1180 * flag is re-set when chain_modify() is called, even if
1181 * MODIFIED is already set, allowing the OS to retire the
1182 * buffer independent of a hammer2 flush.
1185 if ((chain->flags & HAMMER2_CHAIN_IOFLUSH) &&
1186 hammer2_io_isdirty(chain->dio)) {
1187 hammer2_io_bawrite(&chain->dio);
1189 hammer2_io_bqrelse(&chain->dio);
1191 ccms_thread_unlock_upgraded(&core->cst, ostate);
1192 hammer2_chain_drop(chain);
1196 * This counts the number of live blockrefs in a block array and
1197 * also calculates the point at which all remaining blockrefs are empty.
1198 * This routine can only be called on a live chain (DUPLICATED flag not set).
1200 * NOTE: Flag is not set until after the count is complete, allowing
1201 * callers to test the flag without holding the spinlock.
1203 * NOTE: If base is NULL the related chain is still in the INITIAL
1204 * state and there are no blockrefs to count.
1206 * NOTE: live_count may already have some counts accumulated due to
1207 * creation and deletion and could even be initially negative.
1210 hammer2_chain_countbrefs(hammer2_chain_t *chain,
1211 hammer2_blockref_t *base, int count)
1213 hammer2_chain_core_t *core = chain->core;
1215 KKASSERT((chain->flags & HAMMER2_CHAIN_DUPLICATED) == 0);
1217 spin_lock(&core->cst.spin);
1218 if ((core->flags & HAMMER2_CORE_COUNTEDBREFS) == 0) {
1220 while (--count >= 0) {
1221 if (base[count].type)
1224 core->live_zero = count + 1;
1225 while (count >= 0) {
1226 if (base[count].type)
1227 atomic_add_int(&core->live_count, 1);
1231 core->live_zero = 0;
1233 /* else do not modify live_count */
1234 atomic_set_int(&core->flags, HAMMER2_CORE_COUNTEDBREFS);
1236 spin_unlock(&core->cst.spin);
1240 * Resize the chain's physical storage allocation in-place. This may
1241 * replace the passed-in chain with a new chain.
1243 * Chains can be resized smaller without reallocating the storage.
1244 * Resizing larger will reallocate the storage.
1246 * Must be passed an exclusively locked parent and chain, returns a new
1247 * exclusively locked chain at the same index and unlocks the old chain.
1248 * Flushes the buffer if necessary.
1250 * This function is mostly used with DATA blocks locked RESOLVE_NEVER in order
1251 * to avoid instantiating a device buffer that conflicts with the vnode
1252 * data buffer. That is, the passed-in bp is a logical buffer, whereas
1253 * any chain-oriented bp would be a device buffer.
1255 * XXX return error if cannot resize.
1258 hammer2_chain_resize(hammer2_trans_t *trans, hammer2_inode_t *ip,
1259 hammer2_chain_t *parent, hammer2_chain_t **chainp,
1260 int nradix, int flags)
1262 hammer2_mount_t *hmp;
1263 hammer2_chain_t *chain;
1271 * Only data and indirect blocks can be resized for now.
1272 * (The volu root, inodes, and freemap elements use a fixed size).
1274 KKASSERT(chain != &hmp->vchain);
1275 KKASSERT(chain->bref.type == HAMMER2_BREF_TYPE_DATA ||
1276 chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT);
1279 * Nothing to do if the element is already the proper size
1281 obytes = chain->bytes;
1282 nbytes = 1U << nradix;
1283 if (obytes == nbytes)
1287 * Delete the old chain and duplicate it at the same (parent, index),
1288 * returning a new chain. This allows the old chain to still be
1289 * used by the flush code. The new chain will be returned in a
1292 * The parent does not have to be locked for the delete/duplicate call,
1293 * but is in this particular code path.
1295 * NOTE: If we are not crossing a synchronization point the
1296 * duplication code will simply reuse the existing chain
1299 hammer2_chain_delete_duplicate(trans, &chain, 0);
1302 * Relocate the block, even if making it smaller (because different
1303 * block sizes may be in different regions).
1305 hammer2_freemap_alloc(trans, chain, nbytes);
1306 chain->bytes = nbytes;
1307 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_FORCECOW);
1308 /*ip->delta_dcount += (ssize_t)(nbytes - obytes);*/ /* XXX atomic */
1311 * For now just support it on DATA chains (and not on indirect
1314 KKASSERT(chain->dio == NULL);
1318 * Make sure the chain is marked MOVED and propagate the update
1319 * to the root for flush.
1321 if ((chain->flags & HAMMER2_CHAIN_MOVED) == 0) {
1322 hammer2_chain_ref(chain);
1323 atomic_set_int(&chain->flags, HAMMER2_CHAIN_MOVED);
1325 hammer2_chain_setsubmod(trans, chain);
1331 * Set a chain modified, making it read-write and duplicating it if necessary.
1332 * This function will assign a new physical block to the chain if necessary
1334 * Duplication of already-modified chains is possible when the modification
1335 * crosses a flush synchronization boundary.
1337 * Non-data blocks - The chain should be locked to at least the RESOLVE_MAYBE
1338 * level or the COW operation will not work.
1340 * Data blocks - The chain is usually locked RESOLVE_NEVER so as not to
1341 * run the data through the device buffers.
1343 * This function may return a different chain than was passed, in which case
1344 * the old chain will be unlocked and the new chain will be locked.
1346 * ip->chain may be adjusted by hammer2_chain_modify_ip().
1348 hammer2_inode_data_t *
1349 hammer2_chain_modify_ip(hammer2_trans_t *trans, hammer2_inode_t *ip,
1350 hammer2_chain_t **chainp, int flags)
1352 atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED);
1353 hammer2_chain_modify(trans, chainp, flags);
1354 if (ip->chain != *chainp)
1355 hammer2_inode_repoint(ip, NULL, *chainp);
1357 vsetisdirty(ip->vp);
1358 return(&ip->chain->data->ipdata);
1362 hammer2_chain_modify(hammer2_trans_t *trans, hammer2_chain_t **chainp,
1365 hammer2_mount_t *hmp;
1366 hammer2_chain_t *chain;
1375 KKASSERT(chain->bref.mirror_tid != trans->sync_tid ||
1376 (chain->flags & HAMMER2_CHAIN_MODIFIED));
1379 * data is not optional for freemap chains (we must always be sure
1380 * to copy the data on COW storage allocations).
1382 if (chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_NODE ||
1383 chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_LEAF) {
1384 KKASSERT((chain->flags & HAMMER2_CHAIN_INITIAL) ||
1385 (flags & HAMMER2_MODIFY_OPTDATA) == 0);
1389 * Determine if a delete-duplicate is needed.
1391 * (a) Modify_tid is part of a prior flush
1392 * (b) Transaction is concurrent with a flush (has higher tid)
1393 * (c) and chain is not in the initial state (freshly created)
1394 * (d) and caller didn't request an in-place modification.
1396 * The freemap and volume header special chains are never D-Dd.
1398 if (chain->modify_tid != trans->sync_tid && /* cross boundary */
1399 (flags & HAMMER2_MODIFY_INPLACE) == 0) { /* from d-d */
1400 if (chain != &hmp->fchain && chain != &hmp->vchain) {
1401 KKASSERT((flags & HAMMER2_MODIFY_ASSERTNOCOPY) == 0);
1402 hammer2_chain_delete_duplicate(trans, chainp, 0);
1407 * Fall through if fchain or vchain, clearing the CHAIN_FLUSHED
1408 * flag. Basically other chains are delete-duplicated and so
1409 * the duplicated chains of course will not have the FLUSHED
1410 * flag set, but fchain and vchain are special-cased and the
1411 * flag must be cleared when changing modify_tid.
1413 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_FLUSHED);
1417 * Data must be resolved if already assigned unless explicitly
1418 * flagged otherwise.
1420 if (chain->data == NULL && (flags & HAMMER2_MODIFY_OPTDATA) == 0 &&
1421 (chain->bref.data_off & ~HAMMER2_OFF_MASK_RADIX)) {
1422 hammer2_chain_lock(chain, HAMMER2_RESOLVE_ALWAYS);
1423 hammer2_chain_unlock(chain);
1427 * Otherwise do initial-chain handling
1429 if ((chain->flags & HAMMER2_CHAIN_MODIFIED) == 0) {
1430 atomic_set_int(&chain->flags, HAMMER2_CHAIN_MODIFIED);
1431 hammer2_chain_ref(chain);
1432 hammer2_chain_memory_inc(chain->pmp);
1435 /* shouldn't be needed */
1436 if ((chain->flags & HAMMER2_CHAIN_MOVED) == 0) {
1437 hammer2_chain_ref(chain);
1438 atomic_set_int(&chain->flags, HAMMER2_CHAIN_MOVED);
1443 * The modification or re-modification requires an allocation and
1446 * We normally always allocate new storage here. If storage exists
1447 * and MODIFY_NOREALLOC is passed in, we do not allocate new storage.
1449 if (chain != &hmp->vchain && chain != &hmp->fchain) {
1450 if ((chain->bref.data_off & ~HAMMER2_OFF_MASK_RADIX) == 0 ||
1451 ((flags & HAMMER2_MODIFY_NOREALLOC) == 0 &&
1452 chain->modify_tid != trans->sync_tid)
1454 hammer2_freemap_alloc(trans, chain, chain->bytes);
1455 /* XXX failed allocation */
1456 } else if (chain->flags & HAMMER2_CHAIN_FORCECOW) {
1457 hammer2_freemap_alloc(trans, chain, chain->bytes);
1458 /* XXX failed allocation */
1460 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_FORCECOW);
1464 * Update modify_tid. XXX special-case vchain/fchain because they
1465 * are always modified in-place. Otherwise the chain being modified
1466 * must not be part of a future transaction.
1468 if (chain == &hmp->vchain || chain == &hmp->fchain) {
1469 if (chain->modify_tid <= trans->sync_tid)
1470 chain->modify_tid = trans->sync_tid;
1472 KKASSERT(chain->modify_tid <= trans->sync_tid);
1473 chain->modify_tid = trans->sync_tid;
1476 if ((flags & HAMMER2_MODIFY_NO_MODIFY_TID) == 0)
1477 chain->bref.modify_tid = trans->sync_tid;
1480 * Do not COW BREF_TYPE_DATA when OPTDATA is set. This is because
1481 * data modifications are done via the logical buffer cache so COWing
1482 * it here would result in unnecessary extra copies (and possibly extra
1483 * block reallocations). The INITIAL flag remains unchanged in this
1486 * (This is a bit of a hack).
1488 if (chain->bref.type == HAMMER2_BREF_TYPE_DATA &&
1489 (flags & HAMMER2_MODIFY_OPTDATA)) {
1494 * Clearing the INITIAL flag (for indirect blocks) indicates that
1495 * we've processed the uninitialized storage allocation.
1497 * If this flag is already clear we are likely in a copy-on-write
1498 * situation but we have to be sure NOT to bzero the storage if
1499 * no data is present.
1501 if (chain->flags & HAMMER2_CHAIN_INITIAL) {
1502 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
1509 * Instantiate data buffer and possibly execute COW operation
1511 switch(chain->bref.type) {
1512 case HAMMER2_BREF_TYPE_VOLUME:
1513 case HAMMER2_BREF_TYPE_FREEMAP:
1515 * The data is embedded, no copy-on-write operation is
1518 KKASSERT(chain->dio == NULL);
1520 case HAMMER2_BREF_TYPE_INODE:
1521 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
1522 case HAMMER2_BREF_TYPE_DATA:
1523 case HAMMER2_BREF_TYPE_INDIRECT:
1524 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
1526 * Perform the copy-on-write operation
1528 * zero-fill or copy-on-write depending on whether
1529 * chain->data exists or not and set the dirty state for
1530 * the new buffer. hammer2_io_new() will handle the
1533 KKASSERT(chain != &hmp->vchain && chain != &hmp->fchain);
1536 error = hammer2_io_new(hmp, chain->bref.data_off,
1537 chain->bytes, &dio);
1539 error = hammer2_io_bread(hmp, chain->bref.data_off,
1540 chain->bytes, &dio);
1542 adjreadcounter(&chain->bref, chain->bytes);
1543 KKASSERT(error == 0);
1545 bdata = hammer2_io_data(dio, chain->bref.data_off);
1548 KKASSERT(chain->dio != NULL);
1549 if (chain->data != (void *)bdata) {
1550 bcopy(chain->data, bdata, chain->bytes);
1552 } else if (wasinitial == 0) {
1554 * We have a problem. We were asked to COW but
1555 * we don't have any data to COW with!
1557 panic("hammer2_chain_modify: having a COW %p\n",
1562 * Retire the old buffer, replace with the new
1565 hammer2_io_brelse(&chain->dio);
1566 chain->data = (void *)bdata;
1568 hammer2_io_setdirty(dio); /* modified by bcopy above */
1571 panic("hammer2_chain_modify: illegal non-embedded type %d",
1577 hammer2_chain_setsubmod(trans, chain);
1581 * Mark the volume as having been modified. This short-cut version
1582 * does not have to lock the volume's chain, which allows the ioctl
1583 * code to make adjustments to connections without deadlocking. XXX
1585 * No ref is made on vchain when flagging it MODIFIED.
1588 hammer2_modify_volume(hammer2_mount_t *hmp)
1590 hammer2_voldata_lock(hmp);
1591 hammer2_voldata_unlock(hmp, 1);
1595 * This function returns the chain at the nearest key within the specified
1596 * range with the highest delete_tid. The core spinlock must be held on
1597 * call and the returned chain will be referenced but not locked.
1599 * The returned chain may or may not be in a deleted state. Note that
1600 * live chains have a delete_tid = MAX_TID.
1602 * This function will recurse through chain->rbtree as necessary and will
1603 * return a *key_nextp suitable for iteration. *key_nextp is only set if
1604 * the iteration value is less than the current value of *key_nextp.
1606 * The caller should use (*key_nextp) to calculate the actual range of
1607 * the returned element, which will be (key_beg to *key_nextp - 1), because
1608 * there might be another element which is superior to the returned element
1611 * (*key_nextp) can be passed as key_beg in an iteration only while non-NULL
1612 * chains continue to be returned. On EOF (*key_nextp) may overflow since
1613 * it will wind up being (key_end + 1).
1615 struct hammer2_chain_find_info {
1616 hammer2_chain_t *best;
1617 hammer2_key_t key_beg;
1618 hammer2_key_t key_end;
1619 hammer2_key_t key_next;
1622 static int hammer2_chain_find_cmp(hammer2_chain_t *child, void *data);
1623 static int hammer2_chain_find_callback(hammer2_chain_t *child, void *data);
1626 * DEBUGGING - Assert that the chain will not collide.
1630 hammer2_chain_assert_not_present(hammer2_chain_core_t *core,
1631 hammer2_chain_t *chain)
1633 struct hammer2_chain_find_info info;
1634 hammer2_chain_layer_t *layer;
1636 if (chain->flags & HAMMER2_CHAIN_DELETED)
1640 info.key_beg = chain->bref.key;
1641 info.key_end = chain->bref.key +
1642 ((hammer2_key_t)1 << chain->bref.keybits) - 1;
1643 info.key_next = HAMMER2_MAX_KEY;
1645 TAILQ_FOREACH(layer, &core->layerq, entry) {
1646 KKASSERT(layer->good == 0xABCD);
1647 RB_SCAN(hammer2_chain_tree, &layer->rbtree,
1648 hammer2_chain_find_cmp, hammer2_chain_find_callback,
1651 if (info.best && (info.best->flags & HAMMER2_CHAIN_DELETED) == 0)
1652 panic("hammer2_chain_assert_not_present: %p/%p\n",
1658 hammer2_chain_find(hammer2_chain_t *parent, hammer2_key_t *key_nextp,
1659 hammer2_key_t key_beg, hammer2_key_t key_end)
1661 struct hammer2_chain_find_info info;
1662 hammer2_chain_layer_t *layer;
1665 info.key_beg = key_beg;
1666 info.key_end = key_end;
1667 info.key_next = *key_nextp;
1669 KKASSERT(parent->core->good == 0x1234);
1670 TAILQ_FOREACH(layer, &parent->core->layerq, entry) {
1671 KKASSERT(layer->good == 0xABCD);
1672 RB_SCAN(hammer2_chain_tree, &layer->rbtree,
1673 hammer2_chain_find_cmp, hammer2_chain_find_callback,
1676 *key_nextp = info.key_next;
1678 kprintf("chain_find %p %016jx:%016jx next=%016jx\n",
1679 parent, key_beg, key_end, *key_nextp);
1687 hammer2_chain_find_cmp(hammer2_chain_t *child, void *data)
1689 struct hammer2_chain_find_info *info = data;
1690 hammer2_key_t child_beg;
1691 hammer2_key_t child_end;
1693 child_beg = child->bref.key;
1694 child_end = child_beg + ((hammer2_key_t)1 << child->bref.keybits) - 1;
1696 if (child_end < info->key_beg)
1698 if (child_beg > info->key_end)
1705 hammer2_chain_find_callback(hammer2_chain_t *child, void *data)
1707 struct hammer2_chain_find_info *info = data;
1708 hammer2_chain_t *best;
1709 hammer2_key_t child_end;
1712 * WARNING! Do not discard DUPLICATED chains, it is possible that
1713 * we are catching an insertion half-way done. If a
1714 * duplicated chain turns out to be the best choice the
1715 * caller will re-check its flags after locking it.
1717 * WARNING! Layerq is scanned forwards, exact matches should keep
1718 * the existing info->best.
1720 if ((best = info->best) == NULL) {
1722 * No previous best. Assign best
1725 } else if (best->bref.key <= info->key_beg &&
1726 child->bref.key <= info->key_beg) {
1728 * If our current best is flush with key_beg and child is
1729 * also flush with key_beg choose based on delete_tid.
1731 * key_next will automatically be limited to the smaller of
1732 * the two end-points.
1734 if (child->delete_tid > best->delete_tid)
1736 } else if (child->bref.key < best->bref.key) {
1738 * Child has a nearer key and best is not flush with key_beg.
1739 * Truncate key_next to the old best key iff it had a better
1743 if (best->delete_tid >= child->delete_tid &&
1744 (info->key_next > best->bref.key || info->key_next == 0))
1745 info->key_next = best->bref.key;
1746 } else if (child->bref.key == best->bref.key) {
1748 * If our current best is flush with the child then choose
1749 * based on delete_tid.
1751 * key_next will automatically be limited to the smaller of
1752 * the two end-points.
1754 if (child->delete_tid > best->delete_tid)
1758 * Keep the current best but truncate key_next to the child's
1759 * base iff the child has a higher delete_tid.
1761 * key_next will also automatically be limited to the smaller
1762 * of the two end-points (probably not necessary for this case
1763 * but we do it anyway).
1765 if (child->delete_tid >= best->delete_tid &&
1766 (info->key_next > child->bref.key || info->key_next == 0))
1767 info->key_next = child->bref.key;
1771 * Always truncate key_next based on child's end-of-range.
1773 child_end = child->bref.key + ((hammer2_key_t)1 << child->bref.keybits);
1774 if (child_end && (info->key_next > child_end || info->key_next == 0))
1775 info->key_next = child_end;
1781 * Retrieve the specified chain from a media blockref, creating the
1782 * in-memory chain structure which reflects it. modify_tid will be
1783 * left 0 which forces any modifications to issue a delete-duplicate.
1785 * To handle insertion races pass the INSERT_RACE flag along with the
1786 * generation number of the core. NULL will be returned if the generation
1787 * number changes before we have a chance to insert the chain. Insert
1788 * races can occur because the parent might be held shared.
1790 * Caller must hold the parent locked shared or exclusive since we may
1791 * need the parent's bref array to find our block.
1794 hammer2_chain_get(hammer2_chain_t *parent, hammer2_blockref_t *bref,
1797 hammer2_mount_t *hmp = parent->hmp;
1798 hammer2_chain_core_t *above = parent->core;
1799 hammer2_chain_t *chain;
1803 * Allocate a chain structure representing the existing media
1804 * entry. Resulting chain has one ref and is not locked.
1806 chain = hammer2_chain_alloc(hmp, parent->pmp, NULL, bref);
1807 hammer2_chain_core_alloc(NULL, chain, NULL);
1808 /* ref'd chain returned */
1809 chain->modify_tid = chain->bref.mirror_tid;
1812 * Link the chain into its parent. A spinlock is required to safely
1813 * access the RBTREE, and it is possible to collide with another
1814 * hammer2_chain_get() operation because the caller might only hold
1815 * a shared lock on the parent.
1817 KKASSERT(parent->refs > 0);
1818 error = hammer2_chain_insert(above, NULL, chain,
1819 HAMMER2_CHAIN_INSERT_SPIN |
1820 HAMMER2_CHAIN_INSERT_RACE,
1823 KKASSERT((chain->flags & HAMMER2_CHAIN_ONRBTREE) == 0);
1824 kprintf("chain %p get race\n", chain);
1825 hammer2_chain_drop(chain);
1828 KKASSERT(chain->flags & HAMMER2_CHAIN_ONRBTREE);
1832 * Return our new chain referenced but not locked, or NULL if
1839 * Lookup initialization/completion API
1842 hammer2_chain_lookup_init(hammer2_chain_t *parent, int flags)
1844 if (flags & HAMMER2_LOOKUP_SHARED) {
1845 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS |
1846 HAMMER2_RESOLVE_SHARED);
1848 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS);
1854 hammer2_chain_lookup_done(hammer2_chain_t *parent)
1857 hammer2_chain_unlock(parent);
1862 hammer2_chain_getparent(hammer2_chain_t **parentp, int how)
1864 hammer2_chain_t *oparent;
1865 hammer2_chain_t *bparent;
1866 hammer2_chain_t *nparent;
1867 hammer2_chain_core_t *above;
1870 above = oparent->above;
1872 spin_lock(&above->cst.spin);
1873 bparent = TAILQ_FIRST(&above->ownerq);
1874 hammer2_chain_ref(bparent);
1877 * Be careful of order, oparent must be unlocked before nparent
1878 * is locked below to avoid a deadlock. We might as well delay its
1879 * unlocking until we conveniently no longer have the spinlock (instead
1880 * of cycling the spinlock).
1882 * Theoretically our ref on bparent should prevent elements of the
1883 * following chain from going away and prevent above from going away,
1884 * but we still need the spinlock to safely scan the list.
1888 while (nparent->flags & HAMMER2_CHAIN_DUPLICATED)
1889 nparent = TAILQ_NEXT(nparent, core_entry);
1890 hammer2_chain_ref(nparent);
1891 spin_unlock(&above->cst.spin);
1894 hammer2_chain_unlock(oparent);
1897 hammer2_chain_lock(nparent, how | HAMMER2_RESOLVE_NOREF);
1898 hammer2_chain_drop(bparent);
1901 * We might have raced a delete-duplicate.
1903 if ((nparent->flags & HAMMER2_CHAIN_DUPLICATED) == 0)
1906 hammer2_chain_ref(bparent);
1907 hammer2_chain_unlock(nparent);
1908 spin_lock(&above->cst.spin);
1917 * Locate the first chain whos key range overlaps (key_beg, key_end) inclusive.
1918 * (*parentp) typically points to an inode but can also point to a related
1919 * indirect block and this function will recurse upwards and find the inode
1922 * (*parentp) must be exclusively locked and referenced and can be an inode
1923 * or an existing indirect block within the inode.
1925 * On return (*parentp) will be modified to point at the deepest parent chain
1926 * element encountered during the search, as a helper for an insertion or
1927 * deletion. The new (*parentp) will be locked and referenced and the old
1928 * will be unlocked and dereferenced (no change if they are both the same).
1930 * The matching chain will be returned exclusively locked. If NOLOCK is
1931 * requested the chain will be returned only referenced.
1933 * NULL is returned if no match was found, but (*parentp) will still
1934 * potentially be adjusted.
1936 * On return (*key_nextp) will point to an iterative value for key_beg.
1937 * (If NULL is returned (*key_nextp) is set to key_end).
1939 * This function will also recurse up the chain if the key is not within the
1940 * current parent's range. (*parentp) can never be set to NULL. An iteration
1941 * can simply allow (*parentp) to float inside the loop.
1943 * NOTE! chain->data is not always resolved. By default it will not be
1944 * resolved for BREF_TYPE_DATA, FREEMAP_NODE, or FREEMAP_LEAF. Use
1945 * HAMMER2_LOOKUP_ALWAYS to force resolution (but be careful w/
1946 * BREF_TYPE_DATA as the device buffer can alias the logical file
1950 hammer2_chain_lookup(hammer2_chain_t **parentp, hammer2_key_t *key_nextp,
1951 hammer2_key_t key_beg, hammer2_key_t key_end,
1952 int *cache_indexp, int flags)
1954 hammer2_mount_t *hmp;
1955 hammer2_chain_t *parent;
1956 hammer2_chain_t *chain;
1957 hammer2_blockref_t *base;
1958 hammer2_blockref_t *bref;
1959 hammer2_blockref_t bcopy;
1960 hammer2_key_t scan_beg;
1961 hammer2_key_t scan_end;
1962 hammer2_chain_core_t *above;
1964 int how_always = HAMMER2_RESOLVE_ALWAYS;
1965 int how_maybe = HAMMER2_RESOLVE_MAYBE;
1968 int maxloops = 300000;
1971 if (flags & HAMMER2_LOOKUP_ALWAYS) {
1972 how_maybe = how_always;
1973 how = HAMMER2_RESOLVE_ALWAYS;
1974 } else if (flags & (HAMMER2_LOOKUP_NODATA | HAMMER2_LOOKUP_NOLOCK)) {
1975 how = HAMMER2_RESOLVE_NEVER;
1977 how = HAMMER2_RESOLVE_MAYBE;
1979 if (flags & (HAMMER2_LOOKUP_SHARED | HAMMER2_LOOKUP_NOLOCK)) {
1980 how_maybe |= HAMMER2_RESOLVE_SHARED;
1981 how_always |= HAMMER2_RESOLVE_SHARED;
1982 how |= HAMMER2_RESOLVE_SHARED;
1986 * Recurse (*parentp) upward if necessary until the parent completely
1987 * encloses the key range or we hit the inode.
1989 * This function handles races against the flusher doing a delete-
1990 * duplicate above us and re-homes the parent to the duplicate in
1991 * that case, otherwise we'd wind up recursing down a stale chain.
1996 while (parent->bref.type == HAMMER2_BREF_TYPE_INDIRECT ||
1997 parent->bref.type == HAMMER2_BREF_TYPE_FREEMAP_NODE) {
1998 scan_beg = parent->bref.key;
1999 scan_end = scan_beg +
2000 ((hammer2_key_t)1 << parent->bref.keybits) - 1;
2001 if (key_beg >= scan_beg && key_end <= scan_end)
2003 parent = hammer2_chain_getparent(parentp, how_maybe);
2007 if (--maxloops == 0)
2008 panic("hammer2_chain_lookup: maxloops");
2010 * Locate the blockref array. Currently we do a fully associative
2011 * search through the array.
2013 switch(parent->bref.type) {
2014 case HAMMER2_BREF_TYPE_INODE:
2016 * Special shortcut for embedded data returns the inode
2017 * itself. Callers must detect this condition and access
2018 * the embedded data (the strategy code does this for us).
2020 * This is only applicable to regular files and softlinks.
2022 if (parent->data->ipdata.op_flags & HAMMER2_OPFLAG_DIRECTDATA) {
2023 if (flags & HAMMER2_LOOKUP_NOLOCK)
2024 hammer2_chain_ref(parent);
2026 hammer2_chain_lock(parent, how_always);
2027 *key_nextp = key_end + 1;
2030 base = &parent->data->ipdata.u.blockset.blockref[0];
2031 count = HAMMER2_SET_COUNT;
2033 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2034 case HAMMER2_BREF_TYPE_INDIRECT:
2036 * Handle MATCHIND on the parent
2038 if (flags & HAMMER2_LOOKUP_MATCHIND) {
2039 scan_beg = parent->bref.key;
2040 scan_end = scan_beg +
2041 ((hammer2_key_t)1 << parent->bref.keybits) - 1;
2042 if (key_beg == scan_beg && key_end == scan_end) {
2044 hammer2_chain_lock(chain, how_maybe);
2045 *key_nextp = scan_end + 1;
2050 * Optimize indirect blocks in the INITIAL state to avoid
2053 if (parent->flags & HAMMER2_CHAIN_INITIAL) {
2056 if (parent->data == NULL)
2057 panic("parent->data is NULL");
2058 base = &parent->data->npdata[0];
2060 count = parent->bytes / sizeof(hammer2_blockref_t);
2062 case HAMMER2_BREF_TYPE_VOLUME:
2063 base = &hmp->voldata.sroot_blockset.blockref[0];
2064 count = HAMMER2_SET_COUNT;
2066 case HAMMER2_BREF_TYPE_FREEMAP:
2067 base = &hmp->voldata.freemap_blockset.blockref[0];
2068 count = HAMMER2_SET_COUNT;
2071 panic("hammer2_chain_lookup: unrecognized blockref type: %d",
2073 base = NULL; /* safety */
2074 count = 0; /* safety */
2078 * Merged scan to find next candidate.
2080 * hammer2_base_*() functions require the above->live_* fields
2081 * to be synchronized.
2083 * We need to hold the spinlock to access the block array and RB tree
2084 * and to interlock chain creation.
2086 above = parent->core;
2087 if ((parent->core->flags & HAMMER2_CORE_COUNTEDBREFS) == 0)
2088 hammer2_chain_countbrefs(parent, base, count);
2093 spin_lock(&above->cst.spin);
2094 chain = hammer2_combined_find(parent, base, count,
2095 cache_indexp, key_nextp,
2096 key_beg, key_end, &bref);
2097 generation = above->generation;
2100 * Exhausted parent chain, iterate.
2103 spin_unlock(&above->cst.spin);
2104 if (key_beg == key_end) /* short cut single-key case */
2106 return (hammer2_chain_next(parentp, NULL, key_nextp,
2108 cache_indexp, flags));
2112 * Selected from blockref or in-memory chain.
2114 if (chain == NULL) {
2116 spin_unlock(&above->cst.spin);
2117 chain = hammer2_chain_get(parent, &bcopy, generation);
2118 if (chain == NULL) {
2119 kprintf("retry lookup parent %p keys %016jx:%016jx\n",
2120 parent, key_beg, key_end);
2123 if (bcmp(&bcopy, bref, sizeof(bcopy))) {
2124 hammer2_chain_drop(chain);
2129 hammer2_chain_ref(chain);
2130 wasdup = ((chain->flags & HAMMER2_CHAIN_DUPLICATED) != 0);
2131 spin_unlock(&above->cst.spin);
2135 * chain is referenced but not locked. We must lock the chain
2136 * to obtain definitive DUPLICATED/DELETED state
2138 if (chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT ||
2139 chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_NODE) {
2140 hammer2_chain_lock(chain, how_maybe | HAMMER2_RESOLVE_NOREF);
2142 hammer2_chain_lock(chain, how | HAMMER2_RESOLVE_NOREF);
2146 * Skip deleted chains (XXX cache 'i' end-of-block-array? XXX)
2148 * NOTE: Chain's key range is not relevant as there might be
2149 * one-offs within the range that are not deleted.
2151 * NOTE: Lookups can race delete-duplicate because
2152 * delete-duplicate does not lock the parent's core
2153 * (they just use the spinlock on the core). We must
2154 * check for races by comparing the DUPLICATED flag before
2155 * releasing the spinlock with the flag after locking the
2158 if (chain->flags & HAMMER2_CHAIN_DELETED) {
2159 hammer2_chain_unlock(chain);
2160 if ((chain->flags & HAMMER2_CHAIN_DUPLICATED) == 0 || wasdup) {
2161 key_beg = *key_nextp;
2162 if (key_beg == 0 || key_beg > key_end)
2169 * If the chain element is an indirect block it becomes the new
2170 * parent and we loop on it. We must maintain our top-down locks
2171 * to prevent the flusher from interfering (i.e. doing a
2172 * delete-duplicate and leaving us recursing down a deleted chain).
2174 * The parent always has to be locked with at least RESOLVE_MAYBE
2175 * so we can access its data. It might need a fixup if the caller
2176 * passed incompatible flags. Be careful not to cause a deadlock
2177 * as a data-load requires an exclusive lock.
2179 * If HAMMER2_LOOKUP_MATCHIND is set and the indirect block's key
2180 * range is within the requested key range we return the indirect
2181 * block and do NOT loop. This is usually only used to acquire
2184 if (chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT ||
2185 chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_NODE) {
2186 hammer2_chain_unlock(parent);
2187 *parentp = parent = chain;
2192 * All done, return the chain
2198 * After having issued a lookup we can iterate all matching keys.
2200 * If chain is non-NULL we continue the iteration from just after it's index.
2202 * If chain is NULL we assume the parent was exhausted and continue the
2203 * iteration at the next parent.
2205 * parent must be locked on entry and remains locked throughout. chain's
2206 * lock status must match flags. Chain is always at least referenced.
2208 * WARNING! The MATCHIND flag does not apply to this function.
2211 hammer2_chain_next(hammer2_chain_t **parentp, hammer2_chain_t *chain,
2212 hammer2_key_t *key_nextp,
2213 hammer2_key_t key_beg, hammer2_key_t key_end,
2214 int *cache_indexp, int flags)
2216 hammer2_chain_t *parent;
2220 * Calculate locking flags for upward recursion.
2222 how_maybe = HAMMER2_RESOLVE_MAYBE;
2223 if (flags & (HAMMER2_LOOKUP_SHARED | HAMMER2_LOOKUP_NOLOCK))
2224 how_maybe |= HAMMER2_RESOLVE_SHARED;
2229 * Calculate the next index and recalculate the parent if necessary.
2232 key_beg = chain->bref.key +
2233 ((hammer2_key_t)1 << chain->bref.keybits);
2234 if (flags & HAMMER2_LOOKUP_NOLOCK)
2235 hammer2_chain_drop(chain);
2237 hammer2_chain_unlock(chain);
2240 * Any scan where the lookup returned degenerate data embedded
2241 * in the inode has an invalid index and must terminate.
2243 if (chain == parent)
2245 if (key_beg == 0 || key_beg > key_end)
2248 } else if (parent->bref.type != HAMMER2_BREF_TYPE_INDIRECT &&
2249 parent->bref.type != HAMMER2_BREF_TYPE_FREEMAP_NODE) {
2251 * We reached the end of the iteration.
2256 * Continue iteration with next parent unless the current
2257 * parent covers the range.
2259 key_beg = parent->bref.key +
2260 ((hammer2_key_t)1 << parent->bref.keybits);
2261 if (key_beg == 0 || key_beg > key_end)
2263 parent = hammer2_chain_getparent(parentp, how_maybe);
2269 return (hammer2_chain_lookup(parentp, key_nextp,
2271 cache_indexp, flags));
2275 * The raw scan function is similar to lookup/next but does not seek to a key.
2276 * Blockrefs are iterated via first_chain = (parent, NULL) and
2277 * next_chain = (parent, chain).
2279 * The passed-in parent must be locked and its data resolved. The returned
2280 * chain will be locked. Pass chain == NULL to acquire the first sub-chain
2281 * under parent and then iterate with the passed-in chain (which this
2282 * function will unlock).
2285 hammer2_chain_scan(hammer2_chain_t *parent, hammer2_chain_t *chain,
2286 int *cache_indexp, int flags)
2288 hammer2_mount_t *hmp;
2289 hammer2_blockref_t *base;
2290 hammer2_blockref_t *bref;
2291 hammer2_blockref_t bcopy;
2292 hammer2_chain_core_t *above;
2294 hammer2_key_t next_key;
2296 int how_always = HAMMER2_RESOLVE_ALWAYS;
2297 int how_maybe = HAMMER2_RESOLVE_MAYBE;
2300 int maxloops = 300000;
2306 * Scan flags borrowed from lookup
2308 if (flags & HAMMER2_LOOKUP_ALWAYS) {
2309 how_maybe = how_always;
2310 how = HAMMER2_RESOLVE_ALWAYS;
2311 } else if (flags & (HAMMER2_LOOKUP_NODATA | HAMMER2_LOOKUP_NOLOCK)) {
2312 how = HAMMER2_RESOLVE_NEVER;
2314 how = HAMMER2_RESOLVE_MAYBE;
2316 if (flags & (HAMMER2_LOOKUP_SHARED | HAMMER2_LOOKUP_NOLOCK)) {
2317 how_maybe |= HAMMER2_RESOLVE_SHARED;
2318 how_always |= HAMMER2_RESOLVE_SHARED;
2319 how |= HAMMER2_RESOLVE_SHARED;
2323 * Calculate key to locate first/next element, unlocking the previous
2324 * element as we go. Be careful, the key calculation can overflow.
2327 key = chain->bref.key +
2328 ((hammer2_key_t)1 << chain->bref.keybits);
2329 hammer2_chain_unlock(chain);
2338 if (--maxloops == 0)
2339 panic("hammer2_chain_scan: maxloops");
2341 * Locate the blockref array. Currently we do a fully associative
2342 * search through the array.
2344 switch(parent->bref.type) {
2345 case HAMMER2_BREF_TYPE_INODE:
2347 * An inode with embedded data has no sub-chains.
2349 if (parent->data->ipdata.op_flags & HAMMER2_OPFLAG_DIRECTDATA)
2351 base = &parent->data->ipdata.u.blockset.blockref[0];
2352 count = HAMMER2_SET_COUNT;
2354 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2355 case HAMMER2_BREF_TYPE_INDIRECT:
2357 * Optimize indirect blocks in the INITIAL state to avoid
2360 if (parent->flags & HAMMER2_CHAIN_INITIAL) {
2363 if (parent->data == NULL)
2364 panic("parent->data is NULL");
2365 base = &parent->data->npdata[0];
2367 count = parent->bytes / sizeof(hammer2_blockref_t);
2369 case HAMMER2_BREF_TYPE_VOLUME:
2370 base = &hmp->voldata.sroot_blockset.blockref[0];
2371 count = HAMMER2_SET_COUNT;
2373 case HAMMER2_BREF_TYPE_FREEMAP:
2374 base = &hmp->voldata.freemap_blockset.blockref[0];
2375 count = HAMMER2_SET_COUNT;
2378 panic("hammer2_chain_lookup: unrecognized blockref type: %d",
2380 base = NULL; /* safety */
2381 count = 0; /* safety */
2385 * Merged scan to find next candidate.
2387 * hammer2_base_*() functions require the above->live_* fields
2388 * to be synchronized.
2390 * We need to hold the spinlock to access the block array and RB tree
2391 * and to interlock chain creation.
2393 if ((parent->core->flags & HAMMER2_CORE_COUNTEDBREFS) == 0)
2394 hammer2_chain_countbrefs(parent, base, count);
2396 above = parent->core;
2398 spin_lock(&above->cst.spin);
2399 chain = hammer2_combined_find(parent, base, count,
2400 cache_indexp, &next_key,
2401 key, HAMMER2_MAX_KEY, &bref);
2402 generation = above->generation;
2405 * Exhausted parent chain, we're done.
2408 spin_unlock(&above->cst.spin);
2409 KKASSERT(chain == NULL);
2414 * Selected from blockref or in-memory chain.
2416 if (chain == NULL) {
2418 spin_unlock(&above->cst.spin);
2419 chain = hammer2_chain_get(parent, &bcopy, generation);
2420 if (chain == NULL) {
2421 kprintf("retry scan parent %p keys %016jx\n",
2425 if (bcmp(&bcopy, bref, sizeof(bcopy))) {
2426 hammer2_chain_drop(chain);
2432 hammer2_chain_ref(chain);
2433 wasdup = ((chain->flags & HAMMER2_CHAIN_DUPLICATED) != 0);
2434 spin_unlock(&above->cst.spin);
2438 * chain is referenced but not locked. We must lock the chain
2439 * to obtain definitive DUPLICATED/DELETED state
2441 hammer2_chain_lock(chain, how | HAMMER2_RESOLVE_NOREF);
2444 * Skip deleted chains (XXX cache 'i' end-of-block-array? XXX)
2446 * NOTE: chain's key range is not relevant as there might be
2447 * one-offs within the range that are not deleted.
2449 * NOTE: XXX this could create problems with scans used in
2450 * situations other than mount-time recovery.
2452 * NOTE: Lookups can race delete-duplicate because
2453 * delete-duplicate does not lock the parent's core
2454 * (they just use the spinlock on the core). We must
2455 * check for races by comparing the DUPLICATED flag before
2456 * releasing the spinlock with the flag after locking the
2459 if (chain->flags & HAMMER2_CHAIN_DELETED) {
2460 hammer2_chain_unlock(chain);
2463 if ((chain->flags & HAMMER2_CHAIN_DUPLICATED) == 0 || wasdup) {
2473 * All done, return the chain or NULL
2479 * Create and return a new hammer2 system memory structure of the specified
2480 * key, type and size and insert it under (*parentp). This is a full
2481 * insertion, based on the supplied key/keybits, and may involve creating
2482 * indirect blocks and moving other chains around via delete/duplicate.
2484 * THE CALLER MUST HAVE ALREADY PROPERLY SEEKED (*parentp) TO THE INSERTION
2485 * POINT SANS ANY REQUIRED INDIRECT BLOCK CREATIONS DUE TO THE ARRAY BEING
2486 * FULL. This typically means that the caller is creating the chain after
2487 * doing a hammer2_chain_lookup().
2489 * (*parentp) must be exclusive locked and may be replaced on return
2490 * depending on how much work the function had to do.
2492 * (*chainp) usually starts out NULL and returns the newly created chain,
2493 * but if the caller desires the caller may allocate a disconnected chain
2494 * and pass it in instead. (It is also possible for the caller to use
2495 * chain_duplicate() to create a disconnected chain, manipulate it, then
2496 * pass it into this function to insert it).
2498 * This function should NOT be used to insert INDIRECT blocks. It is
2499 * typically used to create/insert inodes and data blocks.
2501 * Caller must pass-in an exclusively locked parent the new chain is to
2502 * be inserted under, and optionally pass-in a disconnected, exclusively
2503 * locked chain to insert (else we create a new chain). The function will
2504 * adjust (*parentp) as necessary, create or connect the chain, and
2505 * return an exclusively locked chain in *chainp.
2508 hammer2_chain_create(hammer2_trans_t *trans, hammer2_chain_t **parentp,
2509 hammer2_chain_t **chainp,
2510 hammer2_key_t key, int keybits, int type, size_t bytes)
2512 hammer2_mount_t *hmp;
2513 hammer2_chain_t *chain;
2514 hammer2_chain_t *parent = *parentp;
2515 hammer2_chain_core_t *above;
2516 hammer2_blockref_t *base;
2517 hammer2_blockref_t dummy;
2521 int maxloops = 300000;
2523 above = parent->core;
2524 KKASSERT(ccms_thread_lock_owned(&above->cst));
2528 if (chain == NULL) {
2530 * First allocate media space and construct the dummy bref,
2531 * then allocate the in-memory chain structure. Set the
2532 * INITIAL flag for fresh chains which do not have embedded
2535 bzero(&dummy, sizeof(dummy));
2538 dummy.keybits = keybits;
2539 dummy.data_off = hammer2_getradix(bytes);
2540 dummy.methods = parent->bref.methods;
2541 chain = hammer2_chain_alloc(hmp, parent->pmp, trans, &dummy);
2542 hammer2_chain_core_alloc(trans, chain, NULL);
2545 * Lock the chain manually, chain_lock will load the chain
2546 * which we do NOT want to do. (note: chain->refs is set
2547 * to 1 by chain_alloc() for us, but lockcnt is not).
2550 ccms_thread_lock(&chain->core->cst, CCMS_STATE_EXCLUSIVE);
2554 * We do NOT set INITIAL here (yet). INITIAL is only
2555 * used for indirect blocks.
2557 * Recalculate bytes to reflect the actual media block
2560 bytes = (hammer2_off_t)1 <<
2561 (int)(chain->bref.data_off & HAMMER2_OFF_MASK_RADIX);
2562 chain->bytes = bytes;
2565 case HAMMER2_BREF_TYPE_VOLUME:
2566 case HAMMER2_BREF_TYPE_FREEMAP:
2567 panic("hammer2_chain_create: called with volume type");
2569 case HAMMER2_BREF_TYPE_INDIRECT:
2570 panic("hammer2_chain_create: cannot be used to"
2571 "create indirect block");
2573 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2574 panic("hammer2_chain_create: cannot be used to"
2575 "create freemap root or node");
2577 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
2578 KKASSERT(bytes == sizeof(chain->data->bmdata));
2580 case HAMMER2_BREF_TYPE_INODE:
2581 case HAMMER2_BREF_TYPE_DATA:
2584 * leave chain->data NULL, set INITIAL
2586 KKASSERT(chain->data == NULL);
2587 atomic_set_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
2592 * We are reattaching a chain that has been duplicated and
2593 * left disconnected under a DIFFERENT parent with potentially
2594 * different key/keybits.
2596 * The chain must be modified in the current transaction
2597 * (the duplication code should have done that for us),
2598 * and it's modify_tid should be greater than the parent's
2599 * bref.mirror_tid. This should cause it to be created under
2602 * If deleted in the same transaction, the create/delete TIDs
2603 * will be the same and effective the chain will not have
2604 * existed at all from the point of view of the parent.
2606 * Do NOT mess with the current state of the INITIAL flag.
2608 KKASSERT(chain->modify_tid > parent->bref.mirror_tid);
2609 KKASSERT(chain->modify_tid == trans->sync_tid);
2610 chain->bref.key = key;
2611 chain->bref.keybits = keybits;
2612 /* chain->modify_tid = chain->bref.mirror_tid; */
2613 KKASSERT(chain->above == NULL);
2617 * Calculate how many entries we have in the blockref array and
2618 * determine if an indirect block is required.
2621 if (--maxloops == 0)
2622 panic("hammer2_chain_create: maxloops");
2623 above = parent->core;
2625 switch(parent->bref.type) {
2626 case HAMMER2_BREF_TYPE_INODE:
2627 KKASSERT((parent->data->ipdata.op_flags &
2628 HAMMER2_OPFLAG_DIRECTDATA) == 0);
2629 KKASSERT(parent->data != NULL);
2630 base = &parent->data->ipdata.u.blockset.blockref[0];
2631 count = HAMMER2_SET_COUNT;
2633 case HAMMER2_BREF_TYPE_INDIRECT:
2634 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2635 if (parent->flags & HAMMER2_CHAIN_INITIAL)
2638 base = &parent->data->npdata[0];
2639 count = parent->bytes / sizeof(hammer2_blockref_t);
2641 case HAMMER2_BREF_TYPE_VOLUME:
2642 KKASSERT(parent->data != NULL);
2643 base = &hmp->voldata.sroot_blockset.blockref[0];
2644 count = HAMMER2_SET_COUNT;
2646 case HAMMER2_BREF_TYPE_FREEMAP:
2647 KKASSERT(parent->data != NULL);
2648 base = &hmp->voldata.freemap_blockset.blockref[0];
2649 count = HAMMER2_SET_COUNT;
2652 panic("hammer2_chain_create: unrecognized blockref type: %d",
2660 * Make sure we've counted the brefs
2662 if ((parent->core->flags & HAMMER2_CORE_COUNTEDBREFS) == 0)
2663 hammer2_chain_countbrefs(parent, base, count);
2665 KKASSERT(above->live_count >= 0 && above->live_count <= count);
2668 * If no free blockref could be found we must create an indirect
2669 * block and move a number of blockrefs into it. With the parent
2670 * locked we can safely lock each child in order to delete+duplicate
2671 * it without causing a deadlock.
2673 * This may return the new indirect block or the old parent depending
2674 * on where the key falls. NULL is returned on error.
2676 if (above->live_count == count) {
2677 hammer2_chain_t *nparent;
2679 nparent = hammer2_chain_create_indirect(trans, parent,
2682 if (nparent == NULL) {
2684 hammer2_chain_drop(chain);
2688 if (parent != nparent) {
2689 hammer2_chain_unlock(parent);
2690 parent = *parentp = nparent;
2696 * Link the chain into its parent. Later on we will have to set
2697 * the MOVED bit in situations where we don't mark the new chain
2698 * as being modified.
2700 if (chain->above != NULL)
2701 panic("hammer2: hammer2_chain_create: chain already connected");
2702 KKASSERT(chain->above == NULL);
2703 KKASSERT((chain->flags & HAMMER2_CHAIN_DELETED) == 0);
2704 hammer2_chain_insert(above, NULL, chain,
2705 HAMMER2_CHAIN_INSERT_SPIN |
2706 HAMMER2_CHAIN_INSERT_LIVE,
2711 * Mark the newly created chain modified.
2713 * Device buffers are not instantiated for DATA elements
2714 * as these are handled by logical buffers.
2716 * Indirect and freemap node indirect blocks are handled
2717 * by hammer2_chain_create_indirect() and not by this
2720 * Data for all other bref types is expected to be
2721 * instantiated (INODE, LEAF).
2723 switch(chain->bref.type) {
2724 case HAMMER2_BREF_TYPE_DATA:
2725 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
2726 case HAMMER2_BREF_TYPE_INODE:
2727 hammer2_chain_modify(trans, &chain,
2728 HAMMER2_MODIFY_OPTDATA |
2729 HAMMER2_MODIFY_ASSERTNOCOPY);
2733 * Remaining types are not supported by this function.
2734 * In particular, INDIRECT and LEAF_NODE types are
2735 * handled by create_indirect().
2737 panic("hammer2_chain_create: bad type: %d",
2744 * When reconnecting a chain we must set MOVED and setsubmod
2745 * so the flush recognizes that it must update the bref in
2748 if ((chain->flags & HAMMER2_CHAIN_MOVED) == 0) {
2749 hammer2_chain_ref(chain);
2750 atomic_set_int(&chain->flags, HAMMER2_CHAIN_MOVED);
2753 hammer2_chain_setsubmod(trans, chain);
2762 * Replace (*chainp) with a duplicate in-memory chain structure which shares
2763 * the same core and media state as the orignal. The original *chainp is
2764 * unlocked and the replacement will be returned locked. The duplicated
2765 * chain is inserted under (*parentp).
2767 * THE CALLER MUST HAVE ALREADY PROPERLY SEEKED (*parentp) TO THE INSERTION
2768 * POINT SANS ANY REQUIRED INDIRECT BLOCK CREATIONS DUE TO THE ARRAY BEING
2769 * FULL. This typically means that the caller is creating the chain after
2770 * doing a hammer2_chain_lookup().
2772 * The old chain must be in a DELETED state unless snapshot is non-zero.
2774 * The new chain will be live (i.e. not deleted), and modified.
2776 * If (parent) is non-NULL then the new duplicated chain is inserted under
2779 * If (parent) is NULL then the newly duplicated chain is not inserted
2780 * anywhere, similar to if it had just been chain_alloc()'d (suitable for
2781 * passing into hammer2_chain_create() after this function returns).
2783 * WARNING! This function cannot take snapshots all by itself. The caller
2784 * needs to do other massaging for snapshots.
2786 * WARNING! This function calls create which means it can insert indirect
2787 * blocks. Callers may have to refactor locked chains held across
2788 * the call (other than the ones passed into the call).
2791 hammer2_chain_duplicate(hammer2_trans_t *trans, hammer2_chain_t **parentp,
2792 hammer2_chain_t **chainp, hammer2_blockref_t *bref,
2793 int snapshot, int duplicate_reason)
2795 hammer2_mount_t *hmp;
2796 hammer2_chain_t *parent;
2797 hammer2_chain_t *ochain;
2798 hammer2_chain_t *nchain;
2799 hammer2_chain_core_t *above;
2803 * We want nchain to be our go-to live chain, but ochain may be in
2804 * a MODIFIED state within the current flush synchronization segment.
2805 * Force any further modifications of ochain to do another COW
2806 * operation even if modify_tid indicates that one is not needed.
2808 * We don't want to set FORCECOW on nchain simply as an optimization,
2809 * as many duplication calls simply move chains into ichains and
2810 * then delete the original.
2812 * WARNING! We should never resolve DATA to device buffers
2813 * (XXX allow it if the caller did?), and since
2814 * we currently do not have the logical buffer cache
2815 * buffer in-hand to fix its cached physical offset
2816 * we also force the modify code to not COW it. XXX
2820 KKASSERT(snapshot == 1 || (ochain->flags & HAMMER2_CHAIN_DELETED));
2822 atomic_set_int(&ochain->flags, HAMMER2_CHAIN_FORCECOW);
2825 * Now create a duplicate of the chain structure, associating
2826 * it with the same core, making it the same size, pointing it
2827 * to the same bref (the same media block).
2829 * Give the duplicate the same modify_tid that we previously
2830 * ensured was sufficiently advanced to trigger a block table
2831 * insertion on flush.
2833 * NOTE: bref.mirror_tid duplicated by virtue of bref copy in
2834 * hammer2_chain_alloc()
2837 bref = &ochain->bref;
2839 nchain = hammer2_chain_alloc(hmp, NULL, trans, bref);
2840 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_SNAPSHOT);
2842 nchain = hammer2_chain_alloc(hmp, ochain->pmp, trans, bref);
2844 hammer2_chain_core_alloc(trans, nchain, ochain);
2845 bytes = (hammer2_off_t)1 <<
2846 (int)(bref->data_off & HAMMER2_OFF_MASK_RADIX);
2847 nchain->bytes = bytes;
2848 nchain->modify_tid = ochain->modify_tid;
2849 nchain->inode_reason = ochain->inode_reason + 0x100000;
2850 if (ochain->flags & HAMMER2_CHAIN_INITIAL)
2851 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_INITIAL);
2852 if (ochain->flags & HAMMER2_CHAIN_UNLINKED)
2853 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_UNLINKED);
2856 * Switch from ochain to nchain
2858 hammer2_chain_lock(nchain, HAMMER2_RESOLVE_NEVER |
2859 HAMMER2_RESOLVE_NOREF);
2860 /* nchain has 1 ref */
2861 hammer2_chain_unlock(ochain);
2864 * Place nchain in the modified state, instantiate media data
2865 * if necessary. Because modify_tid is already completely
2866 * synchronized this should not result in a delete-duplicate.
2868 * We want nchain at the target to look like a new insertion.
2869 * Forcing the modification to be INPLACE accomplishes this
2870 * because we get the same nchain with an updated modify_tid.
2872 if (nchain->bref.type == HAMMER2_BREF_TYPE_DATA) {
2873 hammer2_chain_modify(trans, &nchain,
2874 HAMMER2_MODIFY_OPTDATA |
2875 HAMMER2_MODIFY_NOREALLOC |
2876 HAMMER2_MODIFY_INPLACE);
2877 } else if (nchain->flags & HAMMER2_CHAIN_INITIAL) {
2878 hammer2_chain_modify(trans, &nchain,
2879 HAMMER2_MODIFY_OPTDATA |
2880 HAMMER2_MODIFY_INPLACE);
2882 hammer2_chain_modify(trans, &nchain,
2883 HAMMER2_MODIFY_INPLACE);
2887 * If parent is not NULL the duplicated chain will be entered under
2888 * the parent and the MOVED bit set.
2890 * Having both chains locked is extremely important for atomicy.
2892 if (parentp && (parent = *parentp) != NULL) {
2893 above = parent->core;
2894 KKASSERT(ccms_thread_lock_owned(&above->cst));
2895 KKASSERT((nchain->flags & HAMMER2_CHAIN_DELETED) == 0);
2896 KKASSERT(parent->refs > 0);
2898 hammer2_chain_create(trans, parentp, &nchain,
2899 nchain->bref.key, nchain->bref.keybits,
2900 nchain->bref.type, nchain->bytes);
2903 if ((nchain->flags & HAMMER2_CHAIN_MOVED) == 0) {
2904 hammer2_chain_ref(nchain);
2905 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_MOVED);
2907 hammer2_chain_setsubmod(trans, nchain);
2912 * Unconditionally set MOVED to force the parent blockrefs to
2913 * update, and adjust update_hi below nchain so nchain's
2914 * blockrefs are updated with the new attachment.
2916 if (nchain->core->update_hi < trans->sync_tid) {
2917 spin_lock(&nchain->core->cst.spin);
2918 if (nchain->core->update_hi < trans->sync_tid)
2919 nchain->core->update_hi = trans->sync_tid;
2920 spin_unlock(&nchain->core->cst.spin);
2928 * Special in-place delete-duplicate sequence which does not require a
2929 * locked parent. (*chainp) is marked DELETED and atomically replaced
2930 * with a duplicate. Atomicy is at the very-fine spin-lock level in
2931 * order to ensure that lookups do not race us.
2933 * If the old chain is already marked deleted the new chain will also be
2934 * marked deleted. This case can occur when an inode is removed from the
2935 * filesystem but programs still have an open descriptor to it, and during
2936 * flushes when the flush needs to operate on a chain that is deleted in
2937 * the live view but still alive in the flush view.
2939 * The new chain will be marked modified for the current transaction.
2942 hammer2_chain_delete_duplicate(hammer2_trans_t *trans, hammer2_chain_t **chainp,
2945 hammer2_mount_t *hmp;
2946 hammer2_chain_t *ochain;
2947 hammer2_chain_t *nchain;
2948 hammer2_chain_core_t *above;
2951 if (hammer2_debug & 0x20000)
2955 * Note that we do not have to call setsubmod on ochain, calling it
2956 * on nchain is sufficient.
2961 if (ochain->bref.type == HAMMER2_BREF_TYPE_INODE) {
2962 KKASSERT(ochain->data);
2966 * First create a duplicate of the chain structure.
2967 * (nchain is allocated with one ref).
2969 * In the case where nchain inherits ochains core, nchain is
2970 * effectively locked due to ochain being locked (and sharing the
2971 * core), until we can give nchain its own official ock.
2973 nchain = hammer2_chain_alloc(hmp, ochain->pmp, trans, &ochain->bref);
2974 if (flags & HAMMER2_DELDUP_RECORE)
2975 hammer2_chain_core_alloc(trans, nchain, NULL);
2977 hammer2_chain_core_alloc(trans, nchain, ochain);
2978 above = ochain->above;
2980 bytes = (hammer2_off_t)1 <<
2981 (int)(ochain->bref.data_off & HAMMER2_OFF_MASK_RADIX);
2982 nchain->bytes = bytes;
2985 * Duplicate inherits ochain's live state including its modification
2986 * state. This function disposes of the original. Because we are
2987 * doing this in-place under the same parent the block array
2988 * inserted/deleted state does not change.
2990 * The caller isn't expected to make further modifications of ochain
2991 * but set the FORCECOW bit anyway, just in case it does. If ochain
2992 * was previously marked FORCECOW we also flag nchain FORCECOW
2993 * (used during hardlink splits). FORCECOW forces a reallocation
2994 * of the block when we modify the chain a little later, it does
2995 * not force another delete-duplicate.
2997 * NOTE: bref.mirror_tid duplicated by virtue of bref copy in
2998 * hammer2_chain_alloc()
3000 nchain->data_count += ochain->data_count;
3001 nchain->inode_count += ochain->inode_count;
3002 atomic_set_int(&nchain->flags,
3003 ochain->flags & (HAMMER2_CHAIN_INITIAL |
3004 HAMMER2_CHAIN_FORCECOW |
3005 HAMMER2_CHAIN_UNLINKED));
3006 atomic_set_int(&ochain->flags, HAMMER2_CHAIN_FORCECOW);
3007 nchain->inode_reason = ochain->inode_reason + 0x1000;
3010 * Lock nchain so both chains are now locked (extremely important
3011 * for atomicy). Mark ochain deleted and reinsert into the topology
3012 * and insert nchain all in one go.
3014 * If the ochain is already deleted it is left alone and nchain
3015 * is inserted into the topology as a deleted chain. This is
3016 * important because it allows ongoing operations to be executed
3017 * on a deleted inode which still has open descriptors.
3019 * The deleted case can also occur when a flush delete-duplicates
3020 * a node which is being concurrently modified by ongoing operations
3021 * in a later transaction. This creates a problem because the flush
3022 * is intended to update blockrefs which then propagate, allowing
3023 * the original covering in-memory chains to be freed up. In this
3024 * situation the flush code does NOT free the original covering
3025 * chains and will re-apply them to successive copies.
3027 hammer2_chain_lock(nchain, HAMMER2_RESOLVE_NEVER);
3028 /* extra ref still present from original allocation */
3030 KKASSERT(ochain->flags & HAMMER2_CHAIN_ONRBTREE);
3031 spin_lock(&above->cst.spin);
3032 KKASSERT(ochain->flags & HAMMER2_CHAIN_ONRBTREE);
3035 * Ultimately nchain->modify_tid will be set to trans->sync_tid,
3036 * but we can't do that here because we want to call
3037 * hammer2_chain_modify() to reallocate the block (if necessary).
3039 nchain->modify_tid = ochain->modify_tid;
3041 if (ochain->flags & HAMMER2_CHAIN_DELETED) {
3043 * ochain was deleted
3045 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_DELETED);
3046 if (ochain->delete_tid > trans->sync_tid) {
3048 * delete-duplicate a chain deleted in a later
3049 * transaction. Only allowed on chains created
3050 * before or during the current transaction (flush
3051 * code should filter out chains created after the
3052 * current transaction).
3054 * To make this work is a bit of a hack. We convert
3055 * ochain's delete_tid to the current sync_tid and
3056 * create a nchain which sets up ochains original
3059 * This effectively forces ochain to flush as a
3060 * deletion and nchain as a creation. Thus MOVED
3061 * must be set in ochain (it should already be
3062 * set since it's original delete_tid could not
3063 * have been flushed yet). Since ochain's delete_tid
3064 * has been moved down to sync_tid, a re-flush at
3065 * sync_tid won't try to delete-duplicate ochain
3068 KKASSERT(ochain->modify_tid <= trans->sync_tid);
3069 nchain->delete_tid = ochain->delete_tid;
3070 ochain->delete_tid = trans->sync_tid;
3071 KKASSERT(ochain->flags & HAMMER2_CHAIN_MOVED);
3072 } else if (ochain->delete_tid == trans->sync_tid) {
3074 * ochain was deleted in the current transaction
3076 nchain->delete_tid = trans->sync_tid;
3079 * ochain was deleted in a prior transaction.
3080 * create and delete nchain in the current
3083 * (delete_tid might represent a deleted inode
3084 * which still has an open descriptor).
3086 nchain->delete_tid = trans->sync_tid;
3088 hammer2_chain_insert(above, ochain->inlayer, nchain, 0, 0);
3091 * ochain was not deleted, delete it in the current
3094 KKASSERT(trans->sync_tid >= ochain->modify_tid);
3095 ochain->delete_tid = trans->sync_tid;
3096 atomic_set_int(&ochain->flags, HAMMER2_CHAIN_DELETED);
3097 atomic_add_int(&above->live_count, -1);
3098 hammer2_chain_insert(above, NULL, nchain,
3099 HAMMER2_CHAIN_INSERT_LIVE, 0);
3102 if ((ochain->flags & HAMMER2_CHAIN_MOVED) == 0) {
3103 hammer2_chain_ref(ochain);
3104 atomic_set_int(&ochain->flags, HAMMER2_CHAIN_MOVED);
3106 spin_unlock(&above->cst.spin);
3109 * ochain must be unlocked because ochain and nchain might share
3110 * a buffer cache buffer, so we need to release it so nchain can
3111 * potentially obtain it.
3113 hammer2_chain_unlock(ochain);
3116 * Finishing fixing up nchain. A new block will be allocated if
3117 * crossing a synchronization point (meta-data only).
3119 * Calling hammer2_chain_modify() will update modify_tid to
3120 * (typically) trans->sync_tid.
3122 if (nchain->bref.type == HAMMER2_BREF_TYPE_DATA) {
3123 hammer2_chain_modify(trans, &nchain,
3124 HAMMER2_MODIFY_OPTDATA |
3125 HAMMER2_MODIFY_NOREALLOC |
3126 HAMMER2_MODIFY_INPLACE);
3127 } else if (nchain->flags & HAMMER2_CHAIN_INITIAL) {
3128 hammer2_chain_modify(trans, &nchain,
3129 HAMMER2_MODIFY_OPTDATA |
3130 HAMMER2_MODIFY_INPLACE);
3132 hammer2_chain_modify(trans, &nchain,
3133 HAMMER2_MODIFY_INPLACE);
3135 hammer2_chain_drop(nchain);
3138 * Unconditionally set MOVED to force the parent blockrefs to
3139 * update as the chain_modify() above won't necessarily do it.
3141 * Adjust update_hi below nchain so nchain's blockrefs are updated
3142 * with the new attachment.
3144 if ((nchain->flags & HAMMER2_CHAIN_MOVED) == 0) {
3145 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_MOVED);
3146 hammer2_chain_ref(nchain);
3149 if (nchain->core->update_hi < trans->sync_tid) {
3150 spin_lock(&nchain->core->cst.spin);
3151 if (nchain->core->update_hi < trans->sync_tid)
3152 nchain->core->update_hi = trans->sync_tid;
3153 spin_unlock(&nchain->core->cst.spin);
3156 hammer2_chain_setsubmod(trans, nchain);
3161 * Create a snapshot of the specified {parent, ochain} with the specified
3162 * label. The originating hammer2_inode must be exclusively locked for
3165 * The ioctl code has already synced the filesystem.
3168 hammer2_chain_snapshot(hammer2_trans_t *trans, hammer2_chain_t **ochainp,
3169 hammer2_ioc_pfs_t *pfs)
3171 hammer2_mount_t *hmp;
3172 hammer2_chain_t *ochain = *ochainp;
3173 hammer2_chain_t *nchain;
3174 hammer2_inode_data_t *ipdata;
3175 hammer2_inode_t *nip;
3182 kprintf("snapshot %s ochain->refs %d ochain->flags %08x\n",
3183 pfs->name, ochain->refs, ochain->flags);
3185 name_len = strlen(pfs->name);
3186 lhc = hammer2_dirhash(pfs->name, name_len);
3189 opfs_clid = ochain->data->ipdata.pfs_clid;
3194 * Create the snapshot directory under the super-root
3196 * Set PFS type, generate a unique filesystem id, and generate
3197 * a cluster id. Use the same clid when snapshotting a PFS root,
3198 * which theoretically allows the snapshot to be used as part of
3199 * the same cluster (perhaps as a cache).
3201 * Copy the (flushed) ochain's blockref array. Theoretically we
3202 * could use chain_duplicate() but it becomes difficult to disentangle
3203 * the shared core so for now just brute-force it.
3209 nip = hammer2_inode_create(trans, hmp->sroot, &vat, proc0.p_ucred,
3210 pfs->name, name_len, &nchain, &error);
3213 ipdata = hammer2_chain_modify_ip(trans, nip, &nchain, 0);
3214 ipdata->pfs_type = HAMMER2_PFSTYPE_SNAPSHOT;
3215 kern_uuidgen(&ipdata->pfs_fsid, 1);
3216 if (ochain->flags & HAMMER2_CHAIN_PFSROOT)
3217 ipdata->pfs_clid = opfs_clid;
3219 kern_uuidgen(&ipdata->pfs_clid, 1);
3220 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_PFSROOT);
3221 ipdata->u.blockset = ochain->data->ipdata.u.blockset;
3223 hammer2_inode_unlock_ex(nip, nchain);
3229 * Create an indirect block that covers one or more of the elements in the
3230 * current parent. Either returns the existing parent with no locking or
3231 * ref changes or returns the new indirect block locked and referenced
3232 * and leaving the original parent lock/ref intact as well.
3234 * If an error occurs, NULL is returned and *errorp is set to the error.
3236 * The returned chain depends on where the specified key falls.
3238 * The key/keybits for the indirect mode only needs to follow three rules:
3240 * (1) That all elements underneath it fit within its key space and
3242 * (2) That all elements outside it are outside its key space.
3244 * (3) When creating the new indirect block any elements in the current
3245 * parent that fit within the new indirect block's keyspace must be
3246 * moved into the new indirect block.
3248 * (4) The keyspace chosen for the inserted indirect block CAN cover a wider
3249 * keyspace the the current parent, but lookup/iteration rules will
3250 * ensure (and must ensure) that rule (2) for all parents leading up
3251 * to the nearest inode or the root volume header is adhered to. This
3252 * is accomplished by always recursing through matching keyspaces in
3253 * the hammer2_chain_lookup() and hammer2_chain_next() API.
3255 * The current implementation calculates the current worst-case keyspace by
3256 * iterating the current parent and then divides it into two halves, choosing
3257 * whichever half has the most elements (not necessarily the half containing
3258 * the requested key).
3260 * We can also opt to use the half with the least number of elements. This
3261 * causes lower-numbered keys (aka logical file offsets) to recurse through
3262 * fewer indirect blocks and higher-numbered keys to recurse through more.
3263 * This also has the risk of not moving enough elements to the new indirect
3264 * block and being forced to create several indirect blocks before the element
3267 * Must be called with an exclusively locked parent.
3269 static int hammer2_chain_indkey_freemap(hammer2_chain_t *parent,
3270 hammer2_key_t *keyp, int keybits,
3271 hammer2_blockref_t *base, int count);
3272 static int hammer2_chain_indkey_normal(hammer2_chain_t *parent,
3273 hammer2_key_t *keyp, int keybits,
3274 hammer2_blockref_t *base, int count);
3277 hammer2_chain_create_indirect(hammer2_trans_t *trans, hammer2_chain_t *parent,
3278 hammer2_key_t create_key, int create_bits,
3279 int for_type, int *errorp)
3281 hammer2_mount_t *hmp;
3282 hammer2_chain_core_t *above;
3283 hammer2_chain_core_t *icore;
3284 hammer2_blockref_t *base;
3285 hammer2_blockref_t *bref;
3286 hammer2_blockref_t bcopy;
3287 hammer2_chain_t *chain;
3288 hammer2_chain_t *ichain;
3289 hammer2_chain_t dummy;
3290 hammer2_key_t key = create_key;
3291 hammer2_key_t key_beg;
3292 hammer2_key_t key_end;
3293 hammer2_key_t key_next;
3294 int keybits = create_bits;
3301 int maxloops = 300000;
3306 * Calculate the base blockref pointer or NULL if the chain
3307 * is known to be empty. We need to calculate the array count
3308 * for RB lookups either way.
3312 KKASSERT(ccms_thread_lock_owned(&parent->core->cst));
3313 above = parent->core;
3315 /*hammer2_chain_modify(trans, &parent, HAMMER2_MODIFY_OPTDATA);*/
3316 if (parent->flags & HAMMER2_CHAIN_INITIAL) {
3319 switch(parent->bref.type) {
3320 case HAMMER2_BREF_TYPE_INODE:
3321 count = HAMMER2_SET_COUNT;
3323 case HAMMER2_BREF_TYPE_INDIRECT:
3324 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
3325 count = parent->bytes / sizeof(hammer2_blockref_t);
3327 case HAMMER2_BREF_TYPE_VOLUME:
3328 count = HAMMER2_SET_COUNT;
3330 case HAMMER2_BREF_TYPE_FREEMAP:
3331 count = HAMMER2_SET_COUNT;
3334 panic("hammer2_chain_create_indirect: "
3335 "unrecognized blockref type: %d",
3341 switch(parent->bref.type) {
3342 case HAMMER2_BREF_TYPE_INODE:
3343 base = &parent->data->ipdata.u.blockset.blockref[0];
3344 count = HAMMER2_SET_COUNT;
3346 case HAMMER2_BREF_TYPE_INDIRECT:
3347 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
3348 base = &parent->data->npdata[0];
3349 count = parent->bytes / sizeof(hammer2_blockref_t);
3351 case HAMMER2_BREF_TYPE_VOLUME:
3352 base = &hmp->voldata.sroot_blockset.blockref[0];
3353 count = HAMMER2_SET_COUNT;
3355 case HAMMER2_BREF_TYPE_FREEMAP:
3356 base = &hmp->voldata.freemap_blockset.blockref[0];
3357 count = HAMMER2_SET_COUNT;
3360 panic("hammer2_chain_create_indirect: "
3361 "unrecognized blockref type: %d",
3369 * dummy used in later chain allocation (no longer used for lookups).
3371 bzero(&dummy, sizeof(dummy));
3372 dummy.delete_tid = HAMMER2_MAX_TID;
3375 * When creating an indirect block for a freemap node or leaf
3376 * the key/keybits must be fitted to static radix levels because
3377 * particular radix levels use particular reserved blocks in the
3380 * This routine calculates the key/radix of the indirect block
3381 * we need to create, and whether it is on the high-side or the
3384 if (for_type == HAMMER2_BREF_TYPE_FREEMAP_NODE ||
3385 for_type == HAMMER2_BREF_TYPE_FREEMAP_LEAF) {
3386 keybits = hammer2_chain_indkey_freemap(parent, &key, keybits,
3389 keybits = hammer2_chain_indkey_normal(parent, &key, keybits,
3394 * Normalize the key for the radix being represented, keeping the
3395 * high bits and throwing away the low bits.
3397 key &= ~(((hammer2_key_t)1 << keybits) - 1);
3400 * How big should our new indirect block be? It has to be at least
3401 * as large as its parent.
3403 if (parent->bref.type == HAMMER2_BREF_TYPE_INODE)
3404 nbytes = HAMMER2_IND_BYTES_MIN;
3406 nbytes = HAMMER2_IND_BYTES_MAX;
3407 if (nbytes < count * sizeof(hammer2_blockref_t))
3408 nbytes = count * sizeof(hammer2_blockref_t);
3411 * Ok, create our new indirect block
3413 if (for_type == HAMMER2_BREF_TYPE_FREEMAP_NODE ||
3414 for_type == HAMMER2_BREF_TYPE_FREEMAP_LEAF) {
3415 dummy.bref.type = HAMMER2_BREF_TYPE_FREEMAP_NODE;
3417 dummy.bref.type = HAMMER2_BREF_TYPE_INDIRECT;
3419 dummy.bref.key = key;
3420 dummy.bref.keybits = keybits;
3421 dummy.bref.data_off = hammer2_getradix(nbytes);
3422 dummy.bref.methods = parent->bref.methods;
3424 ichain = hammer2_chain_alloc(hmp, parent->pmp, trans, &dummy.bref);
3425 atomic_set_int(&ichain->flags, HAMMER2_CHAIN_INITIAL);
3426 hammer2_chain_core_alloc(trans, ichain, NULL);
3427 icore = ichain->core;
3428 hammer2_chain_lock(ichain, HAMMER2_RESOLVE_MAYBE);
3429 hammer2_chain_drop(ichain); /* excess ref from alloc */
3432 * We have to mark it modified to allocate its block, but use
3433 * OPTDATA to allow it to remain in the INITIAL state. Otherwise
3434 * it won't be acted upon by the flush code.
3436 * XXX leave the node unmodified, depend on the update_hi
3437 * flush to assign and modify parent blocks.
3439 hammer2_chain_modify(trans, &ichain, HAMMER2_MODIFY_OPTDATA);
3442 * Iterate the original parent and move the matching brefs into
3443 * the new indirect block.
3445 * XXX handle flushes.
3448 key_end = HAMMER2_MAX_KEY;
3450 spin_lock(&above->cst.spin);
3456 if (++loops > 100000) {
3457 spin_unlock(&above->cst.spin);
3458 panic("excessive loops r=%d p=%p base/count %p:%d %016jx\n",
3459 reason, parent, base, count, key_next);
3463 * NOTE: spinlock stays intact, returned chain (if not NULL)
3464 * is not referenced or locked which means that we
3465 * cannot safely check its flagged / deletion status
3468 chain = hammer2_combined_find(parent, base, count,
3469 &cache_index, &key_next,
3472 generation = above->generation;
3475 key_next = bref->key + ((hammer2_key_t)1 << bref->keybits);
3478 * Skip keys that are not within the key/radix of the new
3479 * indirect block. They stay in the parent.
3481 if ((~(((hammer2_key_t)1 << keybits) - 1) &
3482 (key ^ bref->key)) != 0) {
3483 goto next_key_spinlocked;
3487 * Load the new indirect block by acquiring the related
3488 * chains (potentially from media as it might not be
3489 * in-memory). Then move it to the new parent (ichain)
3490 * via DELETE-DUPLICATE.
3492 * chain is referenced but not locked. We must lock the
3493 * chain to obtain definitive DUPLICATED/DELETED state
3497 * Use chain already present in the RBTREE
3499 hammer2_chain_ref(chain);
3500 wasdup = ((chain->flags &
3501 HAMMER2_CHAIN_DUPLICATED) != 0);
3502 spin_unlock(&above->cst.spin);
3503 hammer2_chain_lock(chain, HAMMER2_RESOLVE_NEVER |
3504 HAMMER2_RESOLVE_NOREF);
3507 * Get chain for blockref element. _get returns NULL
3508 * on insertion race.
3511 spin_unlock(&above->cst.spin);
3512 chain = hammer2_chain_get(parent, &bcopy, generation);
3513 if (chain == NULL) {
3515 spin_lock(&above->cst.spin);
3518 if (bcmp(&bcopy, bref, sizeof(bcopy))) {
3520 hammer2_chain_drop(chain);
3521 spin_lock(&above->cst.spin);
3524 hammer2_chain_lock(chain, HAMMER2_RESOLVE_NEVER |
3525 HAMMER2_RESOLVE_NOREF);
3530 * This is always live so if the chain has been delete-
3531 * duplicated we raced someone and we have to retry.
3533 * NOTE: Lookups can race delete-duplicate because
3534 * delete-duplicate does not lock the parent's core
3535 * (they just use the spinlock on the core). We must
3536 * check for races by comparing the DUPLICATED flag before
3537 * releasing the spinlock with the flag after locking the
3540 * (note reversed logic for this one)
3542 if (chain->flags & HAMMER2_CHAIN_DELETED) {
3543 hammer2_chain_unlock(chain);
3544 if ((chain->flags & HAMMER2_CHAIN_DUPLICATED) &&
3552 * Shift the chain to the indirect block.
3554 * WARNING! Can cause held-over chains to require a refactor.
3555 * Fortunately we have none (our locked chains are
3556 * passed into and modified by the call).
3558 hammer2_chain_delete(trans, chain, 0);
3559 hammer2_chain_duplicate(trans, &ichain, &chain, NULL, 0, 1);
3560 hammer2_chain_unlock(chain);
3561 KKASSERT(parent->refs > 0);
3564 spin_lock(&above->cst.spin);
3565 next_key_spinlocked:
3566 if (--maxloops == 0)
3567 panic("hammer2_chain_create_indirect: maxloops");
3569 if (retry_same == 0) {
3570 if (key_next == 0 || key_next > key_end)
3576 spin_unlock(&above->cst.spin);
3579 * Insert the new indirect block into the parent now that we've
3580 * cleared out some entries in the parent. We calculated a good
3581 * insertion index in the loop above (ichain->index).
3583 * We don't have to set MOVED here because we mark ichain modified
3584 * down below (so the normal modified -> flush -> set-moved sequence
3587 * The insertion shouldn't race as this is a completely new block
3588 * and the parent is locked.
3590 KKASSERT((ichain->flags & HAMMER2_CHAIN_ONRBTREE) == 0);
3591 hammer2_chain_insert(above, NULL, ichain,
3592 HAMMER2_CHAIN_INSERT_SPIN |
3593 HAMMER2_CHAIN_INSERT_LIVE,
3597 * Mark the new indirect block modified after insertion, which
3598 * will propagate up through parent all the way to the root and
3599 * also allocate the physical block in ichain for our caller,
3600 * and assign ichain->data to a pre-zero'd space (because there
3601 * is not prior data to copy into it).
3603 * We have to set update_hi in ichain's flags manually so the
3604 * flusher knows it has to recurse through it to get to all of
3605 * our moved blocks, then call setsubmod() to set the bit
3608 /*hammer2_chain_modify(trans, &ichain, HAMMER2_MODIFY_OPTDATA);*/
3609 if (ichain->core->update_hi < trans->sync_tid) {
3610 spin_lock(&ichain->core->cst.spin);
3611 if (ichain->core->update_hi < trans->sync_tid)
3612 ichain->core->update_hi = trans->sync_tid;
3613 spin_unlock(&ichain->core->cst.spin);
3615 hammer2_chain_setsubmod(trans, ichain);
3618 * Figure out what to return.
3620 if (~(((hammer2_key_t)1 << keybits) - 1) &
3621 (create_key ^ key)) {
3623 * Key being created is outside the key range,
3624 * return the original parent.
3626 hammer2_chain_unlock(ichain);
3629 * Otherwise its in the range, return the new parent.
3630 * (leave both the new and old parent locked).
3639 * Calculate the keybits and highside/lowside of the freemap node the
3640 * caller is creating.
3642 * This routine will specify the next higher-level freemap key/radix
3643 * representing the lowest-ordered set. By doing so, eventually all
3644 * low-ordered sets will be moved one level down.
3646 * We have to be careful here because the freemap reserves a limited
3647 * number of blocks for a limited number of levels. So we can't just
3648 * push indiscriminately.
3651 hammer2_chain_indkey_freemap(hammer2_chain_t *parent, hammer2_key_t *keyp,
3652 int keybits, hammer2_blockref_t *base, int count)
3654 hammer2_chain_core_t *above;
3655 hammer2_chain_t *chain;
3656 hammer2_blockref_t *bref;
3658 hammer2_key_t key_beg;
3659 hammer2_key_t key_end;
3660 hammer2_key_t key_next;
3664 int maxloops = 300000;
3667 above = parent->core;
3673 * Calculate the range of keys in the array being careful to skip
3674 * slots which are overridden with a deletion.
3677 key_end = HAMMER2_MAX_KEY;
3679 spin_lock(&above->cst.spin);
3682 if (--maxloops == 0) {
3683 panic("indkey_freemap shit %p %p:%d\n",
3684 parent, base, count);
3686 chain = hammer2_combined_find(parent, base, count,
3687 &cache_index, &key_next,
3688 key_beg, key_end, &bref);
3697 * NOTE: No need to check DUPLICATED here because we do
3698 * not release the spinlock.
3700 if (chain && (chain->flags & HAMMER2_CHAIN_DELETED)) {
3701 if (key_next == 0 || key_next > key_end)
3708 * Use the full live (not deleted) element for the scan
3709 * iteration. HAMMER2 does not allow partial replacements.
3711 * XXX should be built into hammer2_combined_find().
3713 key_next = bref->key + ((hammer2_key_t)1 << bref->keybits);
3715 if (keybits > bref->keybits) {
3717 keybits = bref->keybits;
3718 } else if (keybits == bref->keybits && bref->key < key) {
3725 spin_unlock(&above->cst.spin);
3728 * Return the keybits for a higher-level FREEMAP_NODE covering
3732 case HAMMER2_FREEMAP_LEVEL0_RADIX:
3733 keybits = HAMMER2_FREEMAP_LEVEL1_RADIX;
3735 case HAMMER2_FREEMAP_LEVEL1_RADIX:
3736 keybits = HAMMER2_FREEMAP_LEVEL2_RADIX;
3738 case HAMMER2_FREEMAP_LEVEL2_RADIX:
3739 keybits = HAMMER2_FREEMAP_LEVEL3_RADIX;
3741 case HAMMER2_FREEMAP_LEVEL3_RADIX:
3742 keybits = HAMMER2_FREEMAP_LEVEL4_RADIX;
3744 case HAMMER2_FREEMAP_LEVEL4_RADIX:
3745 panic("hammer2_chain_indkey_freemap: level too high");
3748 panic("hammer2_chain_indkey_freemap: bad radix");
3757 * Calculate the keybits and highside/lowside of the indirect block the
3758 * caller is creating.
3761 hammer2_chain_indkey_normal(hammer2_chain_t *parent, hammer2_key_t *keyp,
3762 int keybits, hammer2_blockref_t *base, int count)
3764 hammer2_chain_core_t *above;
3765 hammer2_blockref_t *bref;
3766 hammer2_chain_t *chain;
3767 hammer2_key_t key_beg;
3768 hammer2_key_t key_end;
3769 hammer2_key_t key_next;
3775 int maxloops = 300000;
3778 above = parent->core;
3783 * Calculate the range of keys in the array being careful to skip
3784 * slots which are overridden with a deletion. Once the scan
3785 * completes we will cut the key range in half and shift half the
3786 * range into the new indirect block.
3789 key_end = HAMMER2_MAX_KEY;
3791 spin_lock(&above->cst.spin);
3794 if (--maxloops == 0) {
3795 panic("indkey_freemap shit %p %p:%d\n",
3796 parent, base, count);
3798 chain = hammer2_combined_find(parent, base, count,
3799 &cache_index, &key_next,
3800 key_beg, key_end, &bref);
3809 * NOTE: No need to check DUPLICATED here because we do
3810 * not release the spinlock.
3812 if (chain && (chain->flags & HAMMER2_CHAIN_DELETED)) {
3813 if (key_next == 0 || key_next > key_end)
3820 * Use the full live (not deleted) element for the scan
3821 * iteration. HAMMER2 does not allow partial replacements.
3823 * XXX should be built into hammer2_combined_find().
3825 key_next = bref->key + ((hammer2_key_t)1 << bref->keybits);
3828 * Expand our calculated key range (key, keybits) to fit
3829 * the scanned key. nkeybits represents the full range
3830 * that we will later cut in half (two halves @ nkeybits - 1).
3833 if (nkeybits < bref->keybits) {
3834 if (bref->keybits > 64) {
3835 kprintf("bad bref chain %p bref %p\n",
3839 nkeybits = bref->keybits;
3841 while (nkeybits < 64 &&
3842 (~(((hammer2_key_t)1 << nkeybits) - 1) &
3843 (key ^ bref->key)) != 0) {
3848 * If the new key range is larger we have to determine
3849 * which side of the new key range the existing keys fall
3850 * under by checking the high bit, then collapsing the
3851 * locount into the hicount or vise-versa.
3853 if (keybits != nkeybits) {
3854 if (((hammer2_key_t)1 << (nkeybits - 1)) & key) {
3865 * The newly scanned key will be in the lower half or the
3866 * upper half of the (new) key range.
3868 if (((hammer2_key_t)1 << (nkeybits - 1)) & bref->key)
3877 spin_unlock(&above->cst.spin);
3878 bref = NULL; /* now invalid (safety) */
3881 * Adjust keybits to represent half of the full range calculated
3882 * above (radix 63 max)
3887 * Select whichever half contains the most elements. Theoretically
3888 * we can select either side as long as it contains at least one
3889 * element (in order to ensure that a free slot is present to hold
3890 * the indirect block).
3892 if (hammer2_indirect_optimize) {
3894 * Insert node for least number of keys, this will arrange
3895 * the first few blocks of a large file or the first few
3896 * inodes in a directory with fewer indirect blocks when
3899 if (hicount < locount && hicount != 0)
3900 key |= (hammer2_key_t)1 << keybits;
3902 key &= ~(hammer2_key_t)1 << keybits;
3905 * Insert node for most number of keys, best for heavily
3908 if (hicount > locount)
3909 key |= (hammer2_key_t)1 << keybits;
3911 key &= ~(hammer2_key_t)1 << keybits;
3919 * Sets CHAIN_DELETED and CHAIN_MOVED in the chain being deleted and
3920 * set chain->delete_tid. The chain is not actually marked possibly-free
3921 * in the freemap until the deletion is completely flushed out (because
3922 * a flush which doesn't cover the entire deletion is flushing the deleted
3923 * chain as if it were live).
3925 * This function does NOT generate a modification to the parent. It
3926 * would be nearly impossible to figure out which parent to modify anyway.
3927 * Such modifications are handled top-down by the flush code and are
3928 * properly merged using the flush synchronization point.
3930 * The find/get code will properly overload the RBTREE check on top of
3931 * the bref check to detect deleted entries.
3933 * This function is NOT recursive. Any entity already pushed into the
3934 * chain (such as an inode) may still need visibility into its contents,
3935 * as well as the ability to read and modify the contents. For example,
3936 * for an unlinked file which is still open.
3938 * NOTE: This function does NOT set chain->modify_tid, allowing future
3939 * code to distinguish between live and deleted chains by testing
3940 * trans->sync_tid vs chain->modify_tid and chain->delete_tid.
3942 * NOTE: Deletions normally do not occur in the middle of a duplication
3943 * chain but we use a trick for hardlink migration that refactors
3944 * the originating inode without deleting it, so we make no assumptions
3948 hammer2_chain_delete(hammer2_trans_t *trans, hammer2_chain_t *chain, int flags)
3950 KKASSERT(ccms_thread_lock_owned(&chain->core->cst));
3953 * Nothing to do if already marked.
3955 if (chain->flags & HAMMER2_CHAIN_DELETED)
3959 * The setting of DELETED causes finds, lookups, and _next iterations
3960 * to no longer recognize the chain. RB_SCAN()s will still have
3961 * visibility (needed for flush serialization points).
3963 * We need the spinlock on the core whos RBTREE contains chain
3964 * to protect against races.
3966 KKASSERT(chain->flags & HAMMER2_CHAIN_ONRBTREE);
3967 spin_lock(&chain->above->cst.spin);
3969 KKASSERT(trans->sync_tid >= chain->modify_tid);
3970 chain->delete_tid = trans->sync_tid;
3971 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DELETED);
3972 atomic_add_int(&chain->above->live_count, -1);
3973 ++chain->above->generation;
3976 * We must set MOVED along with DELETED for the flush code to
3977 * recognize the operation and properly disconnect the chain
3980 if ((chain->flags & HAMMER2_CHAIN_MOVED) == 0) {
3981 hammer2_chain_ref(chain);
3982 atomic_set_int(&chain->flags, HAMMER2_CHAIN_MOVED);
3984 spin_unlock(&chain->above->cst.spin);
3986 hammer2_chain_setsubmod(trans, chain);
3990 * Called with the core spinlock held to check for freeable layers.
3991 * Used by the flush code. Layers can wind up not being freed due
3992 * to the temporary layer->refs count. This function frees up any
3993 * layers that were missed.
3996 hammer2_chain_layer_check_locked(hammer2_mount_t *hmp,
3997 hammer2_chain_core_t *core)
3999 hammer2_chain_layer_t *layer;
4000 hammer2_chain_layer_t *tmp;
4002 tmp = TAILQ_FIRST(&core->layerq);
4003 while ((layer = tmp) != NULL) {
4004 tmp = TAILQ_NEXT(tmp, entry);
4005 if (layer->refs == 0 && RB_EMPTY(&layer->rbtree)) {
4006 TAILQ_REMOVE(&core->layerq, layer, entry);
4009 spin_unlock(&core->cst.spin);
4010 kfree(layer, hmp->mchain);
4011 spin_lock(&core->cst.spin);
4019 * Returns the index of the nearest element in the blockref array >= elm.
4020 * Returns (count) if no element could be found.
4022 * Sets *key_nextp to the next key for loop purposes but does not modify
4023 * it if the next key would be higher than the current value of *key_nextp.
4024 * Note that *key_nexp can overflow to 0, which should be tested by the
4027 * (*cache_indexp) is a heuristic and can be any value without effecting
4030 * The spin lock on the related chain must be held.
4033 hammer2_base_find(hammer2_chain_t *chain,
4034 hammer2_blockref_t *base, int count,
4035 int *cache_indexp, hammer2_key_t *key_nextp,
4036 hammer2_key_t key_beg, hammer2_key_t key_end)
4038 hammer2_chain_core_t *core = chain->core;
4039 hammer2_blockref_t *scan;
4040 hammer2_key_t scan_end;
4045 * Require the live chain's already have their core's counted
4046 * so we can optimize operations.
4048 KKASSERT((chain->flags & HAMMER2_CHAIN_DUPLICATED) ||
4049 core->flags & HAMMER2_CORE_COUNTEDBREFS);
4054 if (count == 0 || base == NULL)
4058 * Sequential optimization using *cache_indexp. This is the most
4061 * We can avoid trailing empty entries on live chains, otherwise
4062 * we might have to check the whole block array.
4066 if (chain->flags & HAMMER2_CHAIN_DUPLICATED)
4069 limit = core->live_zero;
4074 KKASSERT(i < count);
4080 while (i > 0 && (scan->type == 0 || scan->key > key_beg)) {
4087 * Search forwards, stop when we find a scan element which
4088 * encloses the key or until we know that there are no further
4092 if (scan->type != 0) {
4093 if (scan->key > key_beg)
4095 scan_end = scan->key +
4096 ((hammer2_key_t)1 << scan->keybits) - 1;
4097 if (scan_end >= key_beg)
4110 scan_end = scan->key +
4111 ((hammer2_key_t)1 << scan->keybits);
4112 if (scan_end && (*key_nextp > scan_end ||
4114 *key_nextp = scan_end;
4122 * Do a combined search and return the next match either from the blockref
4123 * array or from the in-memory chain. Sets *bresp to the returned bref in
4124 * both cases, or sets it to NULL if the search exhausted. Only returns
4125 * a non-NULL chain if the search matched from the in-memory chain.
4127 * Must be called with above's spinlock held. Spinlock remains held
4128 * through the operation.
4130 * The returned chain is not locked or referenced. Use the returned bref
4131 * to determine if the search exhausted or not.
4133 static hammer2_chain_t *
4134 hammer2_combined_find(hammer2_chain_t *parent,
4135 hammer2_blockref_t *base, int count,
4136 int *cache_indexp, hammer2_key_t *key_nextp,
4137 hammer2_key_t key_beg, hammer2_key_t key_end,
4138 hammer2_blockref_t **bresp)
4140 hammer2_blockref_t *bref;
4141 hammer2_chain_t *chain;
4144 *key_nextp = key_end + 1;
4145 i = hammer2_base_find(parent, base, count, cache_indexp,
4146 key_nextp, key_beg, key_end);
4147 chain = hammer2_chain_find(parent, key_nextp, key_beg, key_end);
4152 if (i == count && chain == NULL) {
4154 return(chain); /* NULL */
4158 * Only chain matched
4161 bref = &chain->bref;
4166 * Only blockref matched.
4168 if (chain == NULL) {
4174 * Both in-memory and blockref match. Select the nearer element.
4175 * If both are flush with the left-hand side they are considered
4176 * to be the same distance.
4178 * When both are the same distance away select the chain if it is
4179 * live or if it's delete_tid is greater than the parent's
4180 * synchronized bref.mirror_tid (a single test suffices for both
4181 * conditions), otherwise select the element.
4183 * (It is possible for an old deletion to linger after a rename-over
4184 * and flush, which would make the media copy the correct choice).
4188 * Either both are flush with the left-hand side or they are the
4189 * same distance away. Select the chain if it is not deleted
4190 * or it has a higher delete_tid, else select the media.
4192 if ((chain->bref.key <= key_beg && base[i].key <= key_beg) ||
4193 chain->bref.key == base[i].key) {
4194 if (chain->delete_tid > base[i].mirror_tid) {
4195 bref = &chain->bref;
4197 KKASSERT(chain->flags & HAMMER2_CHAIN_DELETED);
4205 * Select the nearer key.
4207 if (chain->bref.key < base[i].key) {
4208 bref = &chain->bref;
4215 * If the bref is out of bounds we've exhausted our search.
4218 if (bref->key > key_end) {
4228 * Locate the specified block array element and delete it. The element
4231 * The spin lock on the related chain must be held.
4233 * NOTE: live_count was adjusted when the chain was deleted, so it does not
4234 * need to be adjusted when we commit the media change.
4237 hammer2_base_delete(hammer2_trans_t *trans, hammer2_chain_t *parent,
4238 hammer2_blockref_t *base, int count,
4239 int *cache_indexp, hammer2_chain_t *child)
4241 hammer2_blockref_t *elm = &child->bref;
4242 hammer2_chain_core_t *core = parent->core;
4243 hammer2_key_t key_next;
4247 * Delete element. Expect the element to exist.
4249 * XXX see caller, flush code not yet sophisticated enough to prevent
4250 * re-flushed in some cases.
4252 key_next = 0; /* max range */
4253 i = hammer2_base_find(parent, base, count, cache_indexp,
4254 &key_next, elm->key, elm->key);
4255 if (i == count || base[i].type == 0 ||
4256 base[i].key != elm->key || base[i].keybits != elm->keybits) {
4257 panic("delete base %p element not found at %d/%d elm %p\n",
4258 base, i, count, elm);
4261 bzero(&base[i], sizeof(*base));
4262 base[i].mirror_tid = (intptr_t)parent; /* debug */
4263 base[i].modify_tid = (intptr_t)child; /* debug */
4264 base[i].check.debug.sync_tid = trans->sync_tid; /* debug */
4267 * We can only optimize core->live_zero for live chains.
4269 if ((parent->flags & HAMMER2_CHAIN_DUPLICATED) == 0) {
4270 if (core->live_zero == i + 1) {
4271 while (--i >= 0 && base[i].type == 0)
4273 core->live_zero = i + 1;
4279 * Insert the specified element. The block array must not already have the
4280 * element and must have space available for the insertion.
4282 * The spin lock on the related chain must be held.
4284 * NOTE: live_count was adjusted when the chain was deleted, so it does not
4285 * need to be adjusted when we commit the media change.
4288 hammer2_base_insert(hammer2_trans_t *trans __unused, hammer2_chain_t *parent,
4289 hammer2_blockref_t *base, int count,
4290 int *cache_indexp, hammer2_chain_t *child)
4292 hammer2_blockref_t *elm = &child->bref;
4293 hammer2_chain_core_t *core = parent->core;
4294 hammer2_key_t key_next;
4303 * Insert new element. Expect the element to not already exist
4304 * unless we are replacing it.
4306 * XXX see caller, flush code not yet sophisticated enough to prevent
4307 * re-flushed in some cases.
4309 key_next = 0; /* max range */
4310 i = hammer2_base_find(parent, base, count, cache_indexp,
4311 &key_next, elm->key, elm->key);
4314 * Shortcut fill optimization, typical ordered insertion(s) may not
4317 KKASSERT(i >= 0 && i <= count);
4320 * We can only optimize core->live_zero for live chains.
4322 if (i == count && core->live_zero < count) {
4323 if ((parent->flags & HAMMER2_CHAIN_DUPLICATED) == 0) {
4324 i = core->live_zero++;
4330 xkey = elm->key + ((hammer2_key_t)1 << elm->keybits) - 1;
4331 if (i != count && (base[i].key < elm->key || xkey >= base[i].key)) {
4332 panic("insert base %p overlapping elements at %d elm %p\n",
4337 * Try to find an empty slot before or after.
4341 while (j > 0 || k < count) {
4343 if (j >= 0 && base[j].type == 0) {
4347 bcopy(&base[j+1], &base[j],
4348 (i - j - 1) * sizeof(*base));
4354 if (k < count && base[k].type == 0) {
4355 bcopy(&base[i], &base[i+1],
4356 (k - i) * sizeof(hammer2_blockref_t));
4360 * We can only update core->live_zero for live
4363 if ((parent->flags & HAMMER2_CHAIN_DUPLICATED) == 0) {
4364 if (core->live_zero <= k)
4365 core->live_zero = k + 1;
4371 panic("hammer2_base_insert: no room!");
4378 for (l = 0; l < count; ++l) {
4380 key_next = base[l].key +
4381 ((hammer2_key_t)1 << base[l].keybits) - 1;
4385 while (++l < count) {
4387 if (base[l].key <= key_next)
4388 panic("base_insert %d %d,%d,%d fail %p:%d", u, i, j, k, base, l);
4389 key_next = base[l].key +
4390 ((hammer2_key_t)1 << base[l].keybits) - 1;
4400 * Sort the blockref array for the chain. Used by the flush code to
4401 * sort the blockref[] array.
4403 * The chain must be exclusively locked AND spin-locked.
4405 typedef hammer2_blockref_t *hammer2_blockref_p;
4409 hammer2_base_sort_callback(const void *v1, const void *v2)
4411 hammer2_blockref_p bref1 = *(const hammer2_blockref_p *)v1;
4412 hammer2_blockref_p bref2 = *(const hammer2_blockref_p *)v2;
4415 * Make sure empty elements are placed at the end of the array
4417 if (bref1->type == 0) {
4418 if (bref2->type == 0)
4421 } else if (bref2->type == 0) {
4428 if (bref1->key < bref2->key)
4430 if (bref1->key > bref2->key)
4436 hammer2_base_sort(hammer2_chain_t *chain)
4438 hammer2_blockref_t *base;
4441 switch(chain->bref.type) {
4442 case HAMMER2_BREF_TYPE_INODE:
4444 * Special shortcut for embedded data returns the inode
4445 * itself. Callers must detect this condition and access
4446 * the embedded data (the strategy code does this for us).
4448 * This is only applicable to regular files and softlinks.
4450 if (chain->data->ipdata.op_flags & HAMMER2_OPFLAG_DIRECTDATA)
4452 base = &chain->data->ipdata.u.blockset.blockref[0];
4453 count = HAMMER2_SET_COUNT;
4455 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
4456 case HAMMER2_BREF_TYPE_INDIRECT:
4458 * Optimize indirect blocks in the INITIAL state to avoid
4461 KKASSERT((chain->flags & HAMMER2_CHAIN_INITIAL) == 0);
4462 base = &chain->data->npdata[0];
4463 count = chain->bytes / sizeof(hammer2_blockref_t);
4465 case HAMMER2_BREF_TYPE_VOLUME:
4466 base = &chain->hmp->voldata.sroot_blockset.blockref[0];
4467 count = HAMMER2_SET_COUNT;
4469 case HAMMER2_BREF_TYPE_FREEMAP:
4470 base = &chain->hmp->voldata.freemap_blockset.blockref[0];
4471 count = HAMMER2_SET_COUNT;
4474 panic("hammer2_chain_lookup: unrecognized blockref type: %d",
4476 base = NULL; /* safety */
4477 count = 0; /* safety */
4479 kqsort(base, count, sizeof(*base), hammer2_base_sort_callback);
4485 * Chain memory management
4488 hammer2_chain_wait(hammer2_chain_t *chain)
4490 tsleep(chain, 0, "chnflw", 1);
4494 * Manage excessive memory resource use for chain and related
4498 hammer2_chain_memory_wait(hammer2_pfsmount_t *pmp)
4508 * Atomic check condition and wait. Also do an early speedup of
4509 * the syncer to try to avoid hitting the wait.
4512 waiting = pmp->inmem_dirty_chains;
4514 count = waiting & HAMMER2_DIRTYCHAIN_MASK;
4516 limit = pmp->mp->mnt_nvnodelistsize / 10;
4517 if (limit < hammer2_limit_dirty_chains)
4518 limit = hammer2_limit_dirty_chains;
4523 if ((int)(ticks - zzticks) > hz) {
4525 kprintf("count %ld %ld\n", count, limit);
4530 * Block if there are too many dirty chains present, wait
4531 * for the flush to clean some out.
4533 if (count > limit) {
4534 tsleep_interlock(&pmp->inmem_dirty_chains, 0);
4535 if (atomic_cmpset_long(&pmp->inmem_dirty_chains,
4537 waiting | HAMMER2_DIRTYCHAIN_WAITING)) {
4538 speedup_syncer(pmp->mp);
4539 tsleep(&pmp->inmem_dirty_chains, PINTERLOCKED,
4542 continue; /* loop on success or fail */
4546 * Try to start an early flush before we are forced to block.
4548 if (count > limit * 7 / 10)
4549 speedup_syncer(pmp->mp);
4555 hammer2_chain_memory_inc(hammer2_pfsmount_t *pmp)
4558 atomic_add_long(&pmp->inmem_dirty_chains, 1);
4562 hammer2_chain_memory_wakeup(hammer2_pfsmount_t *pmp)
4570 waiting = pmp->inmem_dirty_chains;
4572 if (atomic_cmpset_long(&pmp->inmem_dirty_chains,
4575 ~HAMMER2_DIRTYCHAIN_WAITING)) {
4579 if (waiting & HAMMER2_DIRTYCHAIN_WAITING)
4580 wakeup(&pmp->inmem_dirty_chains);
4585 adjreadcounter(hammer2_blockref_t *bref, size_t bytes)
4589 switch(bref->type) {
4590 case HAMMER2_BREF_TYPE_DATA:
4591 counterp = &hammer2_iod_file_read;
4593 case HAMMER2_BREF_TYPE_INODE:
4594 counterp = &hammer2_iod_meta_read;
4596 case HAMMER2_BREF_TYPE_INDIRECT:
4597 counterp = &hammer2_iod_indr_read;
4599 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
4600 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
4601 counterp = &hammer2_iod_fmap_read;
4604 counterp = &hammer2_iod_volu_read;