2 * Copyright (c) 2011-2013 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * This subsystem implements most of the core support functions for
37 * the hammer2_chain structure.
39 * Chains are the in-memory version on media objects (volume header, inodes,
40 * indirect blocks, data blocks, etc). Chains represent a portion of the
43 * A chain is topologically stable once it has been inserted into the
44 * in-memory topology. Modifications which copy, move, or resize the chain
45 * are handled via the DELETE-DUPLICATE mechanic where the original chain
46 * stays intact but is marked deleted and a new chain is allocated which
47 * shares the old chain's children.
49 * This sharing is handled via the hammer2_chain_core structure.
51 * The DELETE-DUPLICATE mechanism allows the same topological level to contain
52 * many overloadings. However, our RBTREE mechanics require that there be
53 * no overlaps so we accomplish the overloading by moving conflicting chains
54 * with smaller or equal radii into a sub-RBTREE under the chain being
57 * DELETE-DUPLICATE is also used when a modification to a chain crosses a
58 * flush synchronization boundary, allowing the flush code to continue flushing
59 * the older version of the topology and not be disrupted by new frontend
64 * All lookup and iterate operations and most modifications are done on the
65 * live view. During flushes lookups are not normally done and modifications
66 * may be run on the flush view. However, flushes often needs to allocate
67 * blocks and the freemap_alloc/free code issues lookups. This code is
68 * special cased to use the live view when called from a flush.
70 * General chain lookup/iteration functions are NOT aware of the flush view,
71 * they only know about live views.
73 #include <sys/cdefs.h>
74 #include <sys/param.h>
75 #include <sys/systm.h>
76 #include <sys/types.h>
78 #include <sys/kern_syscall.h>
83 static int hammer2_indirect_optimize; /* XXX SYSCTL */
85 static hammer2_chain_t *hammer2_chain_create_indirect(
86 hammer2_trans_t *trans, hammer2_chain_t *parent,
87 hammer2_key_t key, int keybits, int for_type, int *errorp);
88 static void hammer2_chain_drop_data(hammer2_chain_t *chain, int lastdrop);
89 static void adjreadcounter(hammer2_blockref_t *bref, size_t bytes);
90 static hammer2_chain_t *hammer2_combined_find(
91 hammer2_chain_t *parent,
92 hammer2_blockref_t *base, int count,
93 int *cache_indexp, hammer2_key_t *key_nextp,
94 hammer2_key_t key_beg, hammer2_key_t key_end,
95 hammer2_blockref_t **bresp);
98 * Basic RBTree for chains. Chains cannot overlap within any given
99 * core->rbtree without recursing through chain->rbtree. We effectively
100 * guarantee this by checking the full range rather than just the first
101 * key element. By matching on the full range callers can detect when
102 * recursrion through chain->rbtree is needed.
104 * NOTE: This also means the a delete-duplicate on the same key will
105 * overload by placing the deleted element in the new element's
106 * chain->rbtree (when doing a direct replacement).
108 RB_GENERATE(hammer2_chain_tree, hammer2_chain, rbnode, hammer2_chain_cmp);
111 hammer2_chain_cmp(hammer2_chain_t *chain1, hammer2_chain_t *chain2)
113 hammer2_key_t c1_beg;
114 hammer2_key_t c1_end;
115 hammer2_key_t c2_beg;
116 hammer2_key_t c2_end;
118 c1_beg = chain1->bref.key;
119 c1_end = c1_beg + ((hammer2_key_t)1 << chain1->bref.keybits) - 1;
120 c2_beg = chain2->bref.key;
121 c2_end = c2_beg + ((hammer2_key_t)1 << chain2->bref.keybits) - 1;
123 if (c1_end < c2_beg) /* fully to the left */
125 if (c1_beg > c2_end) /* fully to the right */
127 return(0); /* overlap (must not cross edge boundary) */
132 hammer2_isclusterable(hammer2_chain_t *chain)
134 if (hammer2_cluster_enable) {
135 if (chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT ||
136 chain->bref.type == HAMMER2_BREF_TYPE_INODE ||
137 chain->bref.type == HAMMER2_BREF_TYPE_DATA) {
145 * Recursively set the update_tid flag up to the root starting at chain's
146 * parent->core. update_tid is not set in chain's core.
148 * This controls top-down visibility for flushes. The child has just one
149 * 'above' core, but the core itself can be multi-homed with parents iterated
152 * This function is not used during a flush (except when the flush is
153 * allocating which requires the live tree). The flush keeps track of its
156 * XXX needs to be optimized to use roll-up TIDs. update_tid is only really
157 * compared against bref.mirror_tid which itself is only updated by a flush.
160 hammer2_chain_setsubmod(hammer2_trans_t *trans, hammer2_chain_t *chain)
162 hammer2_chain_core_t *above;
166 (HAMMER2_TRANS_ISFLUSH | HAMMER2_TRANS_ISALLOCATING)) ==
167 HAMMER2_TRANS_ISFLUSH) {
172 while ((above = chain->above) != NULL) {
173 spin_lock(&above->cst.spin);
175 if (above->update_tid < trans->sync_tid)
176 above->update_tid = trans->sync_tid;
177 chain = TAILQ_LAST(&above->ownerq, h2_core_list);
179 TAILQ_FOREACH_REVERSE(chain, &above->ownerq,
180 h2_core_list, core_entry) {
181 if (trans->sync_tid >= chain->modify_tid &&
182 trans->sync_tid <= chain->delete_tid) {
187 spin_unlock(&above->cst.spin);
192 * Allocate a new disconnected chain element representing the specified
193 * bref. chain->refs is set to 1 and the passed bref is copied to
194 * chain->bref. chain->bytes is derived from the bref.
196 * chain->core is NOT allocated and the media data and bp pointers are left
197 * NULL. The caller must call chain_core_alloc() to allocate or associate
198 * a core with the chain.
200 * NOTE: Returns a referenced but unlocked (because there is no core) chain.
203 hammer2_chain_alloc(hammer2_mount_t *hmp, hammer2_pfsmount_t *pmp,
204 hammer2_trans_t *trans, hammer2_blockref_t *bref)
206 hammer2_chain_t *chain;
207 u_int bytes = 1U << (int)(bref->data_off & HAMMER2_OFF_MASK_RADIX);
210 * Construct the appropriate system structure.
213 case HAMMER2_BREF_TYPE_INODE:
214 case HAMMER2_BREF_TYPE_INDIRECT:
215 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
216 case HAMMER2_BREF_TYPE_DATA:
217 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
219 * Chain's are really only associated with the hmp but we
220 * maintain a pmp association for per-mount memory tracking
221 * purposes. The pmp can be NULL.
223 chain = kmalloc(sizeof(*chain), hmp->mchain, M_WAITOK | M_ZERO);
226 atomic_add_long(&pmp->inmem_chains, 1);
229 case HAMMER2_BREF_TYPE_VOLUME:
230 case HAMMER2_BREF_TYPE_FREEMAP:
232 panic("hammer2_chain_alloc volume type illegal for op");
235 panic("hammer2_chain_alloc: unrecognized blockref type: %d",
241 chain->bytes = bytes;
243 chain->flags = HAMMER2_CHAIN_ALLOCATED;
244 chain->delete_tid = HAMMER2_MAX_TID;
247 * Set modify_tid if a transaction is creating the chain. When
248 * loading a chain from backing store trans is passed as NULL and
249 * modify_tid is left set to 0.
252 chain->modify_tid = trans->sync_tid;
258 * Associate an existing core with the chain or allocate a new core.
260 * The core is not locked. No additional refs on the chain are made.
261 * (trans) must not be NULL if (core) is not NULL.
263 * When chains are delete-duplicated during flushes we insert nchain on
264 * the ownerq after ochain instead of at the end in order to give the
265 * drop code visibility in the correct order, otherwise drops can be missed.
268 hammer2_chain_core_alloc(hammer2_trans_t *trans,
269 hammer2_chain_t *nchain, hammer2_chain_t *ochain)
271 hammer2_chain_core_t *core;
273 KKASSERT(nchain->core == NULL);
275 if (ochain == NULL) {
277 * Fresh core under nchain (no multi-homing of ochain's
280 core = kmalloc(sizeof(*core), nchain->hmp->mchain,
282 TAILQ_INIT(&core->layerq);
283 TAILQ_INIT(&core->ownerq);
287 core->update_tid = trans->sync_tid;
289 core->update_tid = nchain->bref.mirror_tid;
291 ccms_cst_init(&core->cst, nchain);
292 TAILQ_INSERT_TAIL(&core->ownerq, nchain, core_entry);
295 * Propagate the PFSROOT flag which we set on all subdirs
296 * under the super-root.
298 atomic_set_int(&nchain->flags,
299 ochain->flags & HAMMER2_CHAIN_PFSROOT);
302 * Duplicating ochain -> nchain. Set the DUPLICATED flag on
303 * ochain if nchain is not a snapshot.
305 * It is possible for the DUPLICATED flag to already be
306 * set when called via a flush operation because flush
307 * operations may have to work on elements with delete_tid's
308 * beyond the flush sync_tid. In this situation we must
309 * ensure that nchain is placed just after ochain in the
310 * ownerq and that the DUPLICATED flag is set on nchain so
311 * 'live' operations skip past it to the correct chain.
313 * The flusher understands the blockref synchronization state
314 * for any stale chains by observing bref.mirror_tid, which
315 * delete-duplicate replicates.
317 * WARNING! However, the case is disallowed when the flusher
318 * is allocating freemap space because this entails
319 * more than just adjusting a block table.
321 if (ochain->flags & HAMMER2_CHAIN_DUPLICATED) {
322 KKASSERT((trans->flags &
323 (HAMMER2_TRANS_ISFLUSH |
324 HAMMER2_TRANS_ISALLOCATING)) ==
325 HAMMER2_TRANS_ISFLUSH);
326 atomic_set_int(&nchain->flags,
327 HAMMER2_CHAIN_DUPLICATED);
329 if ((nchain->flags & HAMMER2_CHAIN_SNAPSHOT) == 0) {
330 atomic_set_int(&ochain->flags,
331 HAMMER2_CHAIN_DUPLICATED);
334 atomic_add_int(&core->sharecnt, 1);
336 spin_lock(&core->cst.spin);
338 if (core->update_tid < trans->sync_tid)
339 core->update_tid = trans->sync_tid;
342 * Maintain ordering for refactor test so we don't skip over
343 * a snapshot. Also, during flushes, delete-duplications
344 * for block-table updates can occur on blocks already
345 * deleted (delete-duplicated by a later transaction). We
346 * must insert nchain after ochain but before the later
347 * transaction's copy.
349 TAILQ_INSERT_AFTER(&core->ownerq, ochain, nchain, core_entry);
351 spin_unlock(&core->cst.spin);
356 * Add a reference to a chain element, preventing its destruction.
359 hammer2_chain_ref(hammer2_chain_t *chain)
361 atomic_add_int(&chain->refs, 1);
365 * Insert the chain in the core rbtree at the first layer
366 * which accepts it (for now we don't sort layers by the transaction tid)
368 #define HAMMER2_CHAIN_INSERT_SPIN 0x0001
369 #define HAMMER2_CHAIN_INSERT_LIVE 0x0002
370 #define HAMMER2_CHAIN_INSERT_RACE 0x0004
374 hammer2_chain_insert(hammer2_chain_core_t *above, hammer2_chain_t *chain,
377 hammer2_chain_layer_t *layer;
378 hammer2_chain_t *xchain;
380 if (flags & HAMMER2_CHAIN_INSERT_SPIN)
381 spin_lock(&above->cst.spin);
382 chain->above = above;
383 layer = TAILQ_FIRST(&above->layerq);
390 (xchain = RB_INSERT(hammer2_chain_tree,
391 &layer->rbtree, chain)) != NULL) {
393 * Either no layers have been allocated or the insertion
394 * failed. This is fatal if the conflicted xchain is not
395 * flagged as deleted. Caller may or may allow the failure.
397 if ((flags & HAMMER2_CHAIN_INSERT_RACE) &&
398 xchain && (xchain->flags & HAMMER2_CHAIN_DELETED) == 0) {
400 chain->inlayer = NULL;
401 kprintf("insertion race against %p\n", xchain);
406 * Allocate a new layer to resolve the issue.
408 spin_unlock(&above->cst.spin);
409 layer = kmalloc(sizeof(*layer), chain->hmp->mchain,
411 RB_INIT(&layer->rbtree);
412 layer->good = 0xABCD;
413 spin_lock(&above->cst.spin);
414 TAILQ_INSERT_HEAD(&above->layerq, layer, entry);
415 RB_INSERT(hammer2_chain_tree, &layer->rbtree, chain);
417 chain->inlayer = layer;
418 ++above->chain_count;
421 if ((flags & HAMMER2_CHAIN_INSERT_LIVE) &&
422 (chain->flags & HAMMER2_CHAIN_DELETED) == 0) {
423 atomic_add_int(&above->live_count, 1);
425 atomic_set_int(&chain->flags, HAMMER2_CHAIN_ONRBTREE);
427 if (flags & HAMMER2_CHAIN_INSERT_SPIN)
428 spin_unlock(&above->cst.spin);
432 * Drop the caller's reference to the chain. When the ref count drops to
433 * zero this function will disassociate the chain from its parent and
434 * deallocate it, then recursely drop the parent using the implied ref
435 * from the chain's chain->parent.
437 * WARNING! Just because we are able to deallocate a chain doesn't mean
438 * that chain->core->rbtree is empty. There can still be a sharecnt
439 * on chain->core and RBTREE entries that refer to different parents.
441 static hammer2_chain_t *hammer2_chain_lastdrop(hammer2_chain_t *chain,
442 struct h2_core_list *delayq);
445 hammer2_chain_drop(hammer2_chain_t *chain)
447 struct h2_core_list delayq;
448 hammer2_chain_t *scan;
452 if (chain->flags & HAMMER2_CHAIN_MOVED)
454 if (chain->flags & HAMMER2_CHAIN_MODIFIED)
456 KKASSERT(chain->refs > need);
466 chain = hammer2_chain_lastdrop(chain, &delayq);
468 if (atomic_cmpset_int(&chain->refs, refs, refs - 1))
470 /* retry the same chain */
474 * When we've exhausted lastdrop chaining pull off of delayq.
475 * chains on delayq are dead but are used to placehold other
476 * chains which we added a ref to for the purpose of dropping.
479 hammer2_mount_t *hmp;
481 if ((scan = TAILQ_FIRST(&delayq)) != NULL) {
482 chain = (void *)scan->data;
483 TAILQ_REMOVE(&delayq, scan, core_entry);
484 scan->flags &= ~HAMMER2_CHAIN_ALLOCATED;
487 kfree(scan, hmp->mchain);
494 * Safe handling of the 1->0 transition on chain. Returns a chain for
495 * recursive drop or NULL, possibly returning the same chain if the atomic
498 * Whem two chains need to be recursively dropped we use the chain
499 * we would otherwise free to placehold the additional chain. It's a bit
500 * convoluted but we can't just recurse without potentially blowing out
503 * The cst spinlock is allowed nest child-to-parent (not parent-to-child).
507 hammer2_chain_lastdrop(hammer2_chain_t *chain, struct h2_core_list *delayq)
509 hammer2_pfsmount_t *pmp;
510 hammer2_mount_t *hmp;
511 hammer2_chain_core_t *above;
512 hammer2_chain_core_t *core;
513 hammer2_chain_layer_t *layer;
514 hammer2_chain_t *rdrop1;
515 hammer2_chain_t *rdrop2;
518 * Spinlock the core and check to see if it is empty. If it is
519 * not empty we leave chain intact with refs == 0. The elements
520 * in core->rbtree are associated with other chains contemporary
521 * with ours but not with our chain directly.
523 if ((core = chain->core) != NULL) {
524 spin_lock(&core->cst.spin);
527 * We can't free chains with children because there might
528 * be a flush dependency.
530 * NOTE: We return (chain) on failure to retry.
532 if (core->chain_count) {
533 if (atomic_cmpset_int(&chain->refs, 1, 0))
534 chain = NULL; /* success */
535 spin_unlock(&core->cst.spin);
538 /* no chains left under us */
541 * Because various parts of the code, including the inode
542 * structure, might be holding a stale chain and need to
543 * iterate to a non-stale sibling, we cannot remove siblings
544 * unless they are at the head of chain.
546 * We can't free a live chain unless it is a the head
547 * of its ownerq. If we were to then the go-to chain
548 * would revert to the prior deleted chain.
550 if (TAILQ_FIRST(&core->ownerq) != chain) {
551 if (atomic_cmpset_int(&chain->refs, 1, 0))
552 chain = NULL; /* success */
553 spin_unlock(&core->cst.spin);
559 * chain->core has no children left so no accessors can get to our
560 * chain from there. Now we have to lock the above core to interlock
561 * remaining possible accessors that might bump chain's refs before
562 * we can safely drop chain's refs with intent to free the chain.
565 pmp = chain->pmp; /* can be NULL */
571 * Spinlock the parent and try to drop the last ref on chain.
572 * On success remove chain from its parent, otherwise return NULL.
574 * (normal core locks are top-down recursive but we define core
575 * spinlocks as bottom-up recursive, so this is safe).
577 if ((above = chain->above) != NULL) {
578 spin_lock(&above->cst.spin);
579 if (atomic_cmpset_int(&chain->refs, 1, 0) == 0) {
580 /* 1->0 transition failed */
581 spin_unlock(&above->cst.spin);
583 spin_unlock(&core->cst.spin);
584 return(chain); /* retry */
588 * 1->0 transition successful, remove chain from its
589 * above core. Track layer for removal/freeing.
591 KKASSERT(chain->flags & HAMMER2_CHAIN_ONRBTREE);
592 layer = chain->inlayer;
593 RB_REMOVE(hammer2_chain_tree, &layer->rbtree, chain);
594 --above->chain_count;
595 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_ONRBTREE);
597 chain->inlayer = NULL;
599 if (RB_EMPTY(&layer->rbtree) && layer->refs == 0) {
600 TAILQ_REMOVE(&above->layerq, layer, entry);
606 * If our chain was the last chain in the parent's core the
607 * core is now empty and its parents might now be droppable.
608 * Try to drop the first multi-homed parent by gaining a
609 * ref on it here and then dropping it below.
611 if (above->chain_count == 0) {
612 rdrop1 = TAILQ_FIRST(&above->ownerq);
614 atomic_cmpset_int(&rdrop1->refs, 0, 1) == 0) {
618 spin_unlock(&above->cst.spin);
619 above = NULL; /* safety */
623 * Successful 1->0 transition and the chain can be destroyed now.
625 * We still have the core spinlock (if core is non-NULL), and core's
626 * chain_count is 0. The above spinlock is gone.
628 * Remove chain from ownerq. Once core has no more owners (and no
629 * children which is already the case) we can destroy core.
631 * If core has more owners we may be able to continue a bottom-up
632 * drop with our next sibling.
637 TAILQ_REMOVE(&core->ownerq, chain, core_entry);
638 rdrop2 = TAILQ_FIRST(&core->ownerq);
639 if (rdrop2 && atomic_cmpset_int(&rdrop2->refs, 0, 1) == 0)
641 spin_unlock(&core->cst.spin);
644 * We can do the final 1->0 transition with an atomic op
645 * after releasing core's spinlock.
647 if (atomic_fetchadd_int(&core->sharecnt, -1) == 1) {
649 * On the 1->0 transition of core we can destroy
650 * it. Any remaining layers should no longer be
651 * referenced or visibile to other threads.
653 KKASSERT(TAILQ_EMPTY(&core->ownerq));
655 layer->good = 0xEF00;
656 kfree(layer, hmp->mchain);
658 while ((layer = TAILQ_FIRST(&core->layerq)) != NULL) {
659 KKASSERT(layer->refs == 0 &&
660 RB_EMPTY(&layer->rbtree));
661 TAILQ_REMOVE(&core->layerq, layer, entry);
662 layer->good = 0xEF01;
663 kfree(layer, hmp->mchain);
666 KKASSERT(core->cst.count == 0);
667 KKASSERT(core->cst.upgrade == 0);
669 kfree(core, hmp->mchain);
671 core = NULL; /* safety */
675 * All spin locks are gone, finish freeing stuff.
677 KKASSERT((chain->flags & (HAMMER2_CHAIN_MOVED |
678 HAMMER2_CHAIN_MODIFIED)) == 0);
679 hammer2_chain_drop_data(chain, 1);
681 KKASSERT(chain->bp == NULL);
684 * Free saved empty layer and return chained drop.
687 layer->good = 0xEF02;
688 kfree(layer, hmp->mchain);
692 * Once chain resources are gone we can use the now dead chain
693 * structure to placehold what might otherwise require a recursive
694 * drop, because we have potentially two things to drop and can only
695 * return one directly.
697 if (rdrop1 && rdrop2) {
698 KKASSERT(chain->flags & HAMMER2_CHAIN_ALLOCATED);
699 chain->data = (void *)rdrop1;
700 TAILQ_INSERT_TAIL(delayq, chain, core_entry);
702 } else if (chain->flags & HAMMER2_CHAIN_ALLOCATED) {
703 chain->flags &= ~HAMMER2_CHAIN_ALLOCATED;
705 kfree(chain, hmp->mchain);
708 atomic_add_long(&pmp->inmem_chains, -1);
709 hammer2_chain_memory_wakeup(pmp);
718 * On either last lock release or last drop
721 hammer2_chain_drop_data(hammer2_chain_t *chain, int lastdrop)
723 hammer2_mount_t *hmp = chain->hmp;
725 switch(chain->bref.type) {
726 case HAMMER2_BREF_TYPE_VOLUME:
727 case HAMMER2_BREF_TYPE_FREEMAP:
731 case HAMMER2_BREF_TYPE_INODE:
733 kfree(chain->data, hmp->mchain);
737 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
739 kfree(chain->data, hmp->mchain);
744 KKASSERT(chain->data == NULL);
750 * Ref and lock a chain element, acquiring its data with I/O if necessary,
751 * and specify how you would like the data to be resolved.
753 * Returns 0 on success or an error code if the data could not be acquired.
754 * The chain element is locked on return regardless of whether an error
757 * The lock is allowed to recurse, multiple locking ops will aggregate
758 * the requested resolve types. Once data is assigned it will not be
759 * removed until the last unlock.
761 * HAMMER2_RESOLVE_NEVER - Do not resolve the data element.
762 * (typically used to avoid device/logical buffer
765 * HAMMER2_RESOLVE_MAYBE - Do not resolve data elements for chains in
766 * the INITIAL-create state (indirect blocks only).
768 * Do not resolve data elements for DATA chains.
769 * (typically used to avoid device/logical buffer
772 * HAMMER2_RESOLVE_ALWAYS- Always resolve the data element.
774 * HAMMER2_RESOLVE_SHARED- (flag) The chain is locked shared, otherwise
775 * it will be locked exclusive.
777 * NOTE: Embedded elements (volume header, inodes) are always resolved
780 * NOTE: Specifying HAMMER2_RESOLVE_ALWAYS on a newly-created non-embedded
781 * element will instantiate and zero its buffer, and flush it on
784 * NOTE: (data) elements are normally locked RESOLVE_NEVER or RESOLVE_MAYBE
785 * so as not to instantiate a device buffer, which could alias against
786 * a logical file buffer. However, if ALWAYS is specified the
787 * device buffer will be instantiated anyway.
789 * WARNING! If data must be fetched a shared lock will temporarily be
790 * upgraded to exclusive. However, a deadlock can occur if
791 * the caller owns more than one shared lock.
794 hammer2_chain_lock(hammer2_chain_t *chain, int how)
796 hammer2_mount_t *hmp;
797 hammer2_chain_core_t *core;
798 hammer2_blockref_t *bref;
809 * Ref and lock the element. Recursive locks are allowed.
811 if ((how & HAMMER2_RESOLVE_NOREF) == 0)
812 hammer2_chain_ref(chain);
813 atomic_add_int(&chain->lockcnt, 1);
816 KKASSERT(hmp != NULL);
819 * Get the appropriate lock.
822 if (how & HAMMER2_RESOLVE_SHARED)
823 ccms_thread_lock(&core->cst, CCMS_STATE_SHARED);
825 ccms_thread_lock(&core->cst, CCMS_STATE_EXCLUSIVE);
828 * If we already have a valid data pointer no further action is
835 * Do we have to resolve the data?
837 switch(how & HAMMER2_RESOLVE_MASK) {
838 case HAMMER2_RESOLVE_NEVER:
840 case HAMMER2_RESOLVE_MAYBE:
841 if (chain->flags & HAMMER2_CHAIN_INITIAL)
843 if (chain->bref.type == HAMMER2_BREF_TYPE_DATA)
846 if (chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_NODE)
849 if (chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_LEAF)
852 case HAMMER2_RESOLVE_ALWAYS:
857 * Upgrade to an exclusive lock so we can safely manipulate the
858 * buffer cache. If another thread got to it before us we
861 ostate = ccms_thread_lock_upgrade(&core->cst);
863 ccms_thread_lock_downgrade(&core->cst, ostate);
868 * We must resolve to a device buffer, either by issuing I/O or
869 * by creating a zero-fill element. We do not mark the buffer
870 * dirty when creating a zero-fill element (the hammer2_chain_modify()
871 * API must still be used to do that).
873 * The device buffer is variable-sized in powers of 2 down
874 * to HAMMER2_MIN_ALLOC (typically 1K). A 64K physical storage
875 * chunk always contains buffers of the same size. (XXX)
877 * The minimum physical IO size may be larger than the variable
882 psize = hammer2_devblksize(chain->bytes);
883 pmask = (hammer2_off_t)psize - 1;
884 pbase = bref->data_off & ~pmask;
885 boff = bref->data_off & (HAMMER2_OFF_MASK & pmask);
886 KKASSERT(pbase != 0);
887 peof = (pbase + HAMMER2_SEGMASK64) & ~HAMMER2_SEGMASK64;
890 * The getblk() optimization can only be used on newly created
891 * elements if the physical block size matches the request.
893 if ((chain->flags & HAMMER2_CHAIN_INITIAL) &&
894 chain->bytes == psize) {
895 chain->bp = getblk(hmp->devvp, pbase, psize, 0, 0);
897 } else if (hammer2_isclusterable(chain)) {
898 error = cluster_read(hmp->devvp, peof, pbase, psize,
899 psize, HAMMER2_PBUFSIZE*4,
901 adjreadcounter(&chain->bref, chain->bytes);
903 error = bread(hmp->devvp, pbase, psize, &chain->bp);
904 adjreadcounter(&chain->bref, chain->bytes);
908 kprintf("hammer2_chain_lock: I/O error %016jx: %d\n",
909 (intmax_t)pbase, error);
912 ccms_thread_lock_downgrade(&core->cst, ostate);
917 * Zero the data area if the chain is in the INITIAL-create state.
918 * Mark the buffer for bdwrite(). This clears the INITIAL state
919 * but does not mark the chain modified.
921 bdata = (char *)chain->bp->b_data + boff;
922 if (chain->flags & HAMMER2_CHAIN_INITIAL) {
923 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
924 bzero(bdata, chain->bytes);
925 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DIRTYBP);
929 * Setup the data pointer, either pointing it to an embedded data
930 * structure and copying the data from the buffer, or pointing it
933 * The buffer is not retained when copying to an embedded data
934 * structure in order to avoid potential deadlocks or recursions
935 * on the same physical buffer.
937 switch (bref->type) {
938 case HAMMER2_BREF_TYPE_VOLUME:
939 case HAMMER2_BREF_TYPE_FREEMAP:
941 * Copy data from bp to embedded buffer
943 panic("hammer2_chain_lock: called on unresolved volume header");
946 KKASSERT(pbase == 0);
947 KKASSERT(chain->bytes == HAMMER2_PBUFSIZE);
948 bcopy(bdata, &hmp->voldata, chain->bytes);
949 chain->data = (void *)&hmp->voldata;
954 case HAMMER2_BREF_TYPE_INODE:
956 * Copy data from bp to embedded buffer, do not retain the
959 KKASSERT(chain->bytes == sizeof(chain->data->ipdata));
960 atomic_set_int(&chain->flags, HAMMER2_CHAIN_EMBEDDED);
961 chain->data = kmalloc(sizeof(chain->data->ipdata),
962 hmp->mchain, M_WAITOK | M_ZERO);
963 bcopy(bdata, &chain->data->ipdata, chain->bytes);
967 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
968 KKASSERT(chain->bytes == sizeof(chain->data->bmdata));
969 atomic_set_int(&chain->flags, HAMMER2_CHAIN_EMBEDDED);
970 chain->data = kmalloc(sizeof(chain->data->bmdata),
971 hmp->mchain, M_WAITOK | M_ZERO);
972 bcopy(bdata, &chain->data->bmdata, chain->bytes);
976 case HAMMER2_BREF_TYPE_INDIRECT:
977 case HAMMER2_BREF_TYPE_DATA:
978 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
981 * Point data at the device buffer and leave bp intact.
983 chain->data = (void *)bdata;
988 * Make sure the bp is not specifically owned by this thread before
989 * restoring to a possibly shared lock, so another hammer2 thread
993 BUF_KERNPROC(chain->bp);
994 ccms_thread_lock_downgrade(&core->cst, ostate);
999 * Asynchronously read the device buffer (dbp) and execute the specified
1000 * callback. The caller should pass-in a locked chain (shared lock is ok).
1001 * The function is responsible for unlocking the chain and for disposing
1004 * NOTE! A NULL dbp (but non-NULL data) will be passed to the function
1005 * if the dbp is integrated into the chain, because we do not want
1006 * the caller to dispose of dbp in that situation.
1008 static void hammer2_chain_load_async_callback(struct bio *bio);
1011 hammer2_chain_load_async(hammer2_chain_t *chain,
1012 void (*func)(hammer2_chain_t *, struct buf *, char *, void *),
1015 hammer2_cbinfo_t *cbinfo;
1016 hammer2_mount_t *hmp;
1017 hammer2_blockref_t *bref;
1018 hammer2_off_t pbase;
1019 hammer2_off_t pmask;
1027 func(chain, NULL, (char *)chain->data, arg);
1032 * We must resolve to a device buffer, either by issuing I/O or
1033 * by creating a zero-fill element. We do not mark the buffer
1034 * dirty when creating a zero-fill element (the hammer2_chain_modify()
1035 * API must still be used to do that).
1037 * The device buffer is variable-sized in powers of 2 down
1038 * to HAMMER2_MIN_ALLOC (typically 1K). A 64K physical storage
1039 * chunk always contains buffers of the same size. (XXX)
1041 * The minimum physical IO size may be larger than the variable
1044 bref = &chain->bref;
1046 psize = hammer2_devblksize(chain->bytes);
1047 pmask = (hammer2_off_t)psize - 1;
1048 pbase = bref->data_off & ~pmask;
1049 boff = bref->data_off & (HAMMER2_OFF_MASK & pmask);
1050 KKASSERT(pbase != 0);
1051 peof = (pbase + HAMMER2_SEGMASK64) & ~HAMMER2_SEGMASK64;
1056 * The getblk() optimization can only be used on newly created
1057 * elements if the physical block size matches the request.
1059 if ((chain->flags & HAMMER2_CHAIN_INITIAL) &&
1060 chain->bytes == psize) {
1061 dbp = getblk(hmp->devvp, pbase, psize, 0, 0);
1062 /*atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);*/
1063 bdata = (char *)dbp->b_data + boff;
1064 bzero(bdata, chain->bytes);
1065 /*atomic_set_int(&chain->flags, HAMMER2_CHAIN_DIRTYBP);*/
1066 func(chain, dbp, bdata, arg);
1071 adjreadcounter(&chain->bref, chain->bytes);
1072 cbinfo = kmalloc(sizeof(*cbinfo), hmp->mchain, M_INTWAIT | M_ZERO);
1073 cbinfo->chain = chain;
1074 cbinfo->func = func;
1076 cbinfo->boff = boff;
1078 cluster_readcb(hmp->devvp, peof, pbase, psize,
1079 HAMMER2_PBUFSIZE*4, HAMMER2_PBUFSIZE*4,
1080 hammer2_chain_load_async_callback, cbinfo);
1084 hammer2_chain_load_async_callback(struct bio *bio)
1086 hammer2_cbinfo_t *cbinfo;
1087 hammer2_mount_t *hmp;
1092 * Nobody is waiting for bio/dbp to complete, we are
1093 * responsible for handling the biowait() equivalent
1094 * on dbp which means clearing BIO_DONE and BIO_SYNC
1095 * and calling bpdone() if it hasn't already been called
1096 * to restore any covered holes in the buffer's backing
1100 if ((bio->bio_flags & BIO_DONE) == 0)
1102 bio->bio_flags &= ~(BIO_DONE | BIO_SYNC);
1105 * Extract the auxillary info and issue the callback.
1106 * Finish up with the dbp after it returns.
1108 cbinfo = bio->bio_caller_info1.ptr;
1109 /*ccms_thread_lock_setown(cbinfo->chain->core);*/
1110 data = dbp->b_data + cbinfo->boff;
1111 hmp = cbinfo->chain->hmp;
1113 cbinfo = bio->bio_caller_info1.ptr;
1114 if (cbinfo->chain->flags & HAMMER2_CHAIN_INITIAL)
1115 bzero(data, cbinfo->chain->bytes);
1116 cbinfo->func(cbinfo->chain, dbp, data, cbinfo->arg);
1117 /* cbinfo->chain is stale now */
1119 kfree(cbinfo, hmp->mchain);
1123 * Unlock and deref a chain element.
1125 * On the last lock release any non-embedded data (chain->bp) will be
1129 hammer2_chain_unlock(hammer2_chain_t *chain)
1131 hammer2_chain_core_t *core = chain->core;
1132 ccms_state_t ostate;
1137 * The core->cst lock can be shared across several chains so we
1138 * need to track the per-chain lockcnt separately.
1140 * If multiple locks are present (or being attempted) on this
1141 * particular chain we can just unlock, drop refs, and return.
1143 * Otherwise fall-through on the 1->0 transition.
1146 lockcnt = chain->lockcnt;
1147 KKASSERT(lockcnt > 0);
1150 if (atomic_cmpset_int(&chain->lockcnt,
1151 lockcnt, lockcnt - 1)) {
1152 ccms_thread_unlock(&core->cst);
1153 hammer2_chain_drop(chain);
1157 if (atomic_cmpset_int(&chain->lockcnt, 1, 0))
1164 * On the 1->0 transition we upgrade the core lock (if necessary)
1165 * to exclusive for terminal processing. If after upgrading we find
1166 * that lockcnt is non-zero, another thread is racing us and will
1167 * handle the unload for us later on, so just cleanup and return
1168 * leaving the data/bp intact
1170 * Otherwise if lockcnt is still 0 it is possible for it to become
1171 * non-zero and race, but since we hold the core->cst lock
1172 * exclusively all that will happen is that the chain will be
1173 * reloaded after we unload it.
1175 ostate = ccms_thread_lock_upgrade(&core->cst);
1176 if (chain->lockcnt) {
1177 ccms_thread_unlock_upgraded(&core->cst, ostate);
1178 hammer2_chain_drop(chain);
1183 * Shortcut the case if the data is embedded or not resolved.
1185 * Do NOT NULL out chain->data (e.g. inode data), it might be
1188 * The DIRTYBP flag is non-applicable in this situation and can
1189 * be cleared to keep the flags state clean.
1191 if (chain->bp == NULL) {
1192 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_DIRTYBP);
1193 if ((chain->flags & HAMMER2_CHAIN_MODIFIED) == 0)
1194 hammer2_chain_drop_data(chain, 0);
1195 ccms_thread_unlock_upgraded(&core->cst, ostate);
1196 hammer2_chain_drop(chain);
1203 if ((chain->flags & HAMMER2_CHAIN_DIRTYBP) == 0) {
1205 } else if (chain->flags & HAMMER2_CHAIN_IOFLUSH) {
1206 switch(chain->bref.type) {
1207 case HAMMER2_BREF_TYPE_DATA:
1208 counterp = &hammer2_ioa_file_write;
1210 case HAMMER2_BREF_TYPE_INODE:
1211 counterp = &hammer2_ioa_meta_write;
1213 case HAMMER2_BREF_TYPE_INDIRECT:
1214 counterp = &hammer2_ioa_indr_write;
1216 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
1217 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
1218 counterp = &hammer2_ioa_fmap_write;
1221 counterp = &hammer2_ioa_volu_write;
1224 *counterp += chain->bytes;
1226 switch(chain->bref.type) {
1227 case HAMMER2_BREF_TYPE_DATA:
1228 counterp = &hammer2_iod_file_write;
1230 case HAMMER2_BREF_TYPE_INODE:
1231 counterp = &hammer2_iod_meta_write;
1233 case HAMMER2_BREF_TYPE_INDIRECT:
1234 counterp = &hammer2_iod_indr_write;
1236 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
1237 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
1238 counterp = &hammer2_iod_fmap_write;
1241 counterp = &hammer2_iod_volu_write;
1244 *counterp += chain->bytes;
1250 * If a device buffer was used for data be sure to destroy the
1251 * buffer when we are done to avoid aliases (XXX what about the
1252 * underlying VM pages?).
1254 * NOTE: Freemap leaf's use reserved blocks and thus no aliasing
1259 * XXX our primary cache is now the block device, not
1260 * the logical file. don't release the buffer.
1262 if (chain->bref.type == HAMMER2_BREF_TYPE_DATA)
1263 chain->bp->b_flags |= B_RELBUF;
1267 * The DIRTYBP flag tracks whether we have to bdwrite() the buffer
1268 * or not. The flag will get re-set when chain_modify() is called,
1269 * even if MODIFIED is already set, allowing the OS to retire the
1270 * buffer independent of a hammer2 flus.
1273 if (chain->flags & HAMMER2_CHAIN_DIRTYBP) {
1274 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_DIRTYBP);
1275 if (chain->flags & HAMMER2_CHAIN_IOFLUSH) {
1276 atomic_clear_int(&chain->flags,
1277 HAMMER2_CHAIN_IOFLUSH);
1278 chain->bp->b_flags |= B_RELBUF;
1279 cluster_awrite(chain->bp);
1281 chain->bp->b_flags |= B_CLUSTEROK;
1285 if (chain->flags & HAMMER2_CHAIN_IOFLUSH) {
1286 atomic_clear_int(&chain->flags,
1287 HAMMER2_CHAIN_IOFLUSH);
1288 chain->bp->b_flags |= B_RELBUF;
1291 /* bp might still be dirty */
1296 ccms_thread_unlock_upgraded(&core->cst, ostate);
1297 hammer2_chain_drop(chain);
1301 * This counts the number of live blockrefs in a block array and
1302 * also calculates the point at which all remaining blockrefs are empty.
1304 * NOTE: Flag is not set until after the count is complete, allowing
1305 * callers to test the flag without holding the spinlock.
1307 * NOTE: If base is NULL the related chain is still in the INITIAL
1308 * state and there are no blockrefs to count.
1310 * NOTE: live_count may already have some counts accumulated due to
1311 * creation and deletion and could even be initially negative.
1314 hammer2_chain_countbrefs(hammer2_chain_t *chain,
1315 hammer2_blockref_t *base, int count)
1317 hammer2_chain_core_t *core = chain->core;
1319 spin_lock(&core->cst.spin);
1320 if ((core->flags & HAMMER2_CORE_COUNTEDBREFS) == 0) {
1322 while (--count >= 0) {
1323 if (base[count].type)
1326 core->live_zero = count + 1;
1327 while (count >= 0) {
1328 if (base[count].type)
1329 atomic_add_int(&core->live_count, 1);
1333 core->live_zero = 0;
1335 /* else do not modify live_count */
1336 atomic_set_int(&core->flags, HAMMER2_CORE_COUNTEDBREFS);
1338 spin_unlock(&core->cst.spin);
1342 * Resize the chain's physical storage allocation in-place. This may
1343 * replace the passed-in chain with a new chain.
1345 * Chains can be resized smaller without reallocating the storage.
1346 * Resizing larger will reallocate the storage.
1348 * Must be passed an exclusively locked parent and chain, returns a new
1349 * exclusively locked chain at the same index and unlocks the old chain.
1350 * Flushes the buffer if necessary.
1352 * This function is mostly used with DATA blocks locked RESOLVE_NEVER in order
1353 * to avoid instantiating a device buffer that conflicts with the vnode
1354 * data buffer. That is, the passed-in bp is a logical buffer, whereas
1355 * any chain-oriented bp would be a device buffer.
1357 * XXX flags currently ignored, uses chain->bp to detect data/no-data.
1358 * XXX return error if cannot resize.
1361 hammer2_chain_resize(hammer2_trans_t *trans, hammer2_inode_t *ip,
1362 hammer2_chain_t *parent, hammer2_chain_t **chainp,
1363 int nradix, int flags)
1365 hammer2_mount_t *hmp;
1366 hammer2_chain_t *chain;
1367 hammer2_off_t pbase;
1377 * Only data and indirect blocks can be resized for now.
1378 * (The volu root, inodes, and freemap elements use a fixed size).
1380 KKASSERT(chain != &hmp->vchain);
1381 KKASSERT(chain->bref.type == HAMMER2_BREF_TYPE_DATA ||
1382 chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT);
1385 * Nothing to do if the element is already the proper size
1387 obytes = chain->bytes;
1388 nbytes = 1U << nradix;
1389 if (obytes == nbytes)
1393 * Delete the old chain and duplicate it at the same (parent, index),
1394 * returning a new chain. This allows the old chain to still be
1395 * used by the flush code. The new chain will be returned in a
1398 * The parent does not have to be locked for the delete/duplicate call,
1399 * but is in this particular code path.
1401 * NOTE: If we are not crossing a synchronization point the
1402 * duplication code will simply reuse the existing chain
1405 hammer2_chain_delete_duplicate(trans, &chain, 0);
1408 * Relocate the block, even if making it smaller (because different
1409 * block sizes may be in different regions).
1411 hammer2_freemap_alloc(trans, chain->hmp, &chain->bref, nbytes);
1412 chain->bytes = nbytes;
1413 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_FORCECOW);
1414 /*ip->delta_dcount += (ssize_t)(nbytes - obytes);*/ /* XXX atomic */
1417 * The device buffer may be larger than the allocation size.
1419 bbytes = hammer2_devblksize(chain->bytes);
1420 pbase = chain->bref.data_off & ~(hammer2_off_t)(bbytes - 1);
1421 boff = chain->bref.data_off & HAMMER2_OFF_MASK & (bbytes - 1);
1424 * For now just support it on DATA chains (and not on indirect
1427 KKASSERT(chain->bp == NULL);
1431 * Make sure the chain is marked MOVED and propagate the update
1432 * to the root for flush.
1434 if ((chain->flags & HAMMER2_CHAIN_MOVED) == 0) {
1435 hammer2_chain_ref(chain);
1436 atomic_set_int(&chain->flags, HAMMER2_CHAIN_MOVED);
1438 hammer2_chain_setsubmod(trans, chain);
1444 * Set a chain modified, making it read-write and duplicating it if necessary.
1445 * This function will assign a new physical block to the chain if necessary
1447 * Duplication of already-modified chains is possible when the modification
1448 * crosses a flush synchronization boundary.
1450 * Non-data blocks - The chain should be locked to at least the RESOLVE_MAYBE
1451 * level or the COW operation will not work.
1453 * Data blocks - The chain is usually locked RESOLVE_NEVER so as not to
1454 * run the data through the device buffers.
1456 * This function may return a different chain than was passed, in which case
1457 * the old chain will be unlocked and the new chain will be locked.
1459 * ip->chain may be adjusted by hammer2_chain_modify_ip().
1461 hammer2_inode_data_t *
1462 hammer2_chain_modify_ip(hammer2_trans_t *trans, hammer2_inode_t *ip,
1463 hammer2_chain_t **chainp, int flags)
1465 atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED);
1466 hammer2_chain_modify(trans, chainp, flags);
1467 if (ip->chain != *chainp)
1468 hammer2_inode_repoint(ip, NULL, *chainp);
1470 vsetisdirty(ip->vp);
1471 return(&ip->chain->data->ipdata);
1475 hammer2_chain_modify(hammer2_trans_t *trans, hammer2_chain_t **chainp,
1478 hammer2_mount_t *hmp;
1479 hammer2_chain_t *chain;
1480 hammer2_off_t pbase;
1481 hammer2_off_t pmask;
1494 kprintf("MODIFY %p.%d flags %08x mod=%016jx del=%016jx\n", chain, chain->bref.type, chain->flags, chain->modify_tid, chain->delete_tid);
1497 * Data must be resolved if already assigned unless explicitly
1498 * flagged otherwise.
1500 if (chain->data == NULL && (flags & HAMMER2_MODIFY_OPTDATA) == 0 &&
1501 (chain->bref.data_off & ~HAMMER2_OFF_MASK_RADIX)) {
1502 hammer2_chain_lock(chain, HAMMER2_RESOLVE_ALWAYS);
1503 hammer2_chain_unlock(chain);
1507 * data is not optional for freemap chains (we must always be sure
1508 * to copy the data on COW storage allocations).
1510 if (chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_NODE ||
1511 chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_LEAF) {
1512 KKASSERT((chain->flags & HAMMER2_CHAIN_INITIAL) ||
1513 (flags & HAMMER2_MODIFY_OPTDATA) == 0);
1517 * Determine if a delete-duplicate is needed.
1519 * (a) Modify_tid is part of a prior flush
1520 * (b) Transaction is concurrent with a flush (has higher tid)
1521 * (c) and chain is not in the initial state (freshly created)
1522 * (d) and caller didn't request an in-place modification.
1524 * The freemap and volume header special chains are never D-Dd.
1526 if (chain->modify_tid != trans->sync_tid && /* cross boundary */
1527 (flags & HAMMER2_MODIFY_INPLACE) == 0) { /* from d-d */
1528 if (chain != &hmp->fchain && chain != &hmp->vchain) {
1529 KKASSERT((flags & HAMMER2_MODIFY_ASSERTNOCOPY) == 0);
1530 hammer2_chain_delete_duplicate(trans, chainp, 0);
1532 kprintf("RET1A %p.%d flags %08x mod=%016jx del=%016jx\n", chain, chain->bref.type, chain->flags, chain->modify_tid, chain->delete_tid);
1536 kprintf("RET1B %p.%d flags %08x mod=%016jx del=%016jx\n", chain, chain->bref.type, chain->flags, chain->modify_tid, chain->delete_tid);
1540 /* fall through if fchain or vchain */
1544 * Otherwise do initial-chain handling
1546 if ((chain->flags & HAMMER2_CHAIN_MODIFIED) == 0) {
1547 atomic_set_int(&chain->flags, HAMMER2_CHAIN_MODIFIED);
1548 hammer2_chain_ref(chain);
1552 * The modification or re-modification requires an allocation and
1555 * We normally always allocate new storage here. If storage exists
1556 * and MODIFY_NOREALLOC is passed in, we do not allocate new storage.
1558 if (chain != &hmp->vchain && chain != &hmp->fchain) {
1559 if ((chain->bref.data_off & ~HAMMER2_OFF_MASK_RADIX) == 0 ||
1560 ((flags & HAMMER2_MODIFY_NOREALLOC) == 0 &&
1561 chain->modify_tid != trans->sync_tid)
1563 hammer2_freemap_alloc(trans, chain->hmp,
1564 &chain->bref, chain->bytes);
1565 /* XXX failed allocation */
1566 } else if (chain->flags & HAMMER2_CHAIN_FORCECOW) {
1567 hammer2_freemap_alloc(trans, chain->hmp,
1568 &chain->bref, chain->bytes);
1569 /* XXX failed allocation */
1571 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_FORCECOW);
1574 chain->modify_tid = trans->sync_tid;
1575 if ((flags & HAMMER2_MODIFY_NO_MODIFY_TID) == 0)
1576 chain->bref.modify_tid = trans->sync_tid;
1579 * Do not COW if OPTDATA is set. INITIAL flag remains unchanged.
1580 * (OPTDATA does not prevent [re]allocation of storage, only the
1581 * related copy-on-write op).
1583 if (flags & HAMMER2_MODIFY_OPTDATA)
1587 * Clearing the INITIAL flag (for indirect blocks) indicates that
1588 * we've processed the uninitialized storage allocation.
1590 * If this flag is already clear we are likely in a copy-on-write
1591 * situation but we have to be sure NOT to bzero the storage if
1592 * no data is present.
1594 if (chain->flags & HAMMER2_CHAIN_INITIAL) {
1595 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
1602 * Instantiate data buffer and possibly execute COW operation
1604 switch(chain->bref.type) {
1605 case HAMMER2_BREF_TYPE_VOLUME:
1606 case HAMMER2_BREF_TYPE_FREEMAP:
1607 case HAMMER2_BREF_TYPE_INODE:
1608 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
1610 * The data is embedded, no copy-on-write operation is
1613 KKASSERT(chain->bp == NULL);
1615 case HAMMER2_BREF_TYPE_DATA:
1616 case HAMMER2_BREF_TYPE_INDIRECT:
1617 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
1619 * Perform the copy-on-write operation
1621 KKASSERT(chain != &hmp->vchain && chain != &hmp->fchain);
1623 psize = hammer2_devblksize(chain->bytes);
1624 pmask = (hammer2_off_t)psize - 1;
1625 pbase = chain->bref.data_off & ~pmask;
1626 boff = chain->bref.data_off & (HAMMER2_OFF_MASK & pmask);
1627 KKASSERT(pbase != 0);
1628 peof = (pbase + HAMMER2_SEGMASK64) & ~HAMMER2_SEGMASK64;
1631 * The getblk() optimization can only be used if the
1632 * chain element size matches the physical block size.
1634 if (chain->bp && chain->bp->b_loffset == pbase) {
1637 } else if (chain->bytes == psize) {
1638 nbp = getblk(hmp->devvp, pbase, psize, 0, 0);
1640 } else if (hammer2_isclusterable(chain)) {
1641 error = cluster_read(hmp->devvp, peof, pbase, psize,
1642 psize, HAMMER2_PBUFSIZE*4,
1644 adjreadcounter(&chain->bref, chain->bytes);
1646 error = bread(hmp->devvp, pbase, psize, &nbp);
1647 adjreadcounter(&chain->bref, chain->bytes);
1649 KKASSERT(error == 0);
1650 bdata = (char *)nbp->b_data + boff;
1653 * Copy or zero-fill on write depending on whether
1654 * chain->data exists or not. Retire the existing bp
1655 * based on the DIRTYBP flag. Set the DIRTYBP flag to
1656 * indicate that retirement of nbp should use bdwrite().
1659 KKASSERT(chain->bp != NULL);
1660 if (chain->data != bdata) {
1661 bcopy(chain->data, bdata, chain->bytes);
1663 } else if (wasinitial) {
1664 bzero(bdata, chain->bytes);
1667 * We have a problem. We were asked to COW but
1668 * we don't have any data to COW with!
1670 panic("hammer2_chain_modify: having a COW %p\n",
1673 if (chain->bp != nbp) {
1675 if (chain->flags & HAMMER2_CHAIN_DIRTYBP) {
1676 chain->bp->b_flags |= B_CLUSTEROK;
1679 chain->bp->b_flags |= B_RELBUF;
1684 BUF_KERNPROC(chain->bp);
1686 chain->data = bdata;
1687 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DIRTYBP);
1690 panic("hammer2_chain_modify: illegal non-embedded type %d",
1697 kprintf("RET2 %p.%d flags %08x mod=%016jx del=%016jx\n", chain, chain->bref.type, chain->flags, chain->modify_tid, chain->delete_tid);
1699 hammer2_chain_setsubmod(trans, chain);
1703 * Mark the volume as having been modified. This short-cut version
1704 * does not have to lock the volume's chain, which allows the ioctl
1705 * code to make adjustments to connections without deadlocking. XXX
1707 * No ref is made on vchain when flagging it MODIFIED.
1710 hammer2_modify_volume(hammer2_mount_t *hmp)
1712 hammer2_voldata_lock(hmp);
1713 hammer2_voldata_unlock(hmp, 1);
1717 * This function returns the chain at the nearest key within the specified
1718 * range with the highest delete_tid. The core spinlock must be held on
1719 * call and the returned chain will be referenced but not locked.
1721 * The returned chain may or may not be in a deleted state. Note that
1722 * live chains have a delete_tid = MAX_TID.
1724 * This function will recurse through chain->rbtree as necessary and will
1725 * return a *key_nextp suitable for iteration. *key_nextp is only set if
1726 * the iteration value is less than the current value of *key_nextp.
1728 * The caller should use (*key_nextp) to calculate the actual range of
1729 * the returned element, which will be (key_beg to *key_nextp - 1), because
1730 * there might be another element which is superior to the returned element
1733 * (*key_nextp) can be passed as key_beg in an iteration only while non-NULL
1734 * chains continue to be returned. On EOF (*key_nextp) may overflow since
1735 * it will wind up being (key_end + 1).
1737 struct hammer2_chain_find_info {
1738 hammer2_chain_t *best;
1739 hammer2_key_t key_beg;
1740 hammer2_key_t key_end;
1741 hammer2_key_t key_next;
1744 static int hammer2_chain_find_cmp(hammer2_chain_t *child, void *data);
1745 static int hammer2_chain_find_callback(hammer2_chain_t *child, void *data);
1749 hammer2_chain_find(hammer2_chain_t *parent, hammer2_key_t *key_nextp,
1750 hammer2_key_t key_beg, hammer2_key_t key_end)
1752 struct hammer2_chain_find_info info;
1753 hammer2_chain_layer_t *layer;
1756 info.key_beg = key_beg;
1757 info.key_end = key_end;
1758 info.key_next = *key_nextp;
1760 KKASSERT(parent->core->good == 0x1234);
1761 TAILQ_FOREACH(layer, &parent->core->layerq, entry) {
1762 KKASSERT(layer->good == 0xABCD);
1763 RB_SCAN(hammer2_chain_tree, &layer->rbtree,
1764 hammer2_chain_find_cmp, hammer2_chain_find_callback,
1767 *key_nextp = info.key_next;
1769 kprintf("chain_find %p %016jx:%016jx next=%016jx\n",
1770 parent, key_beg, key_end, *key_nextp);
1778 hammer2_chain_find_cmp(hammer2_chain_t *child, void *data)
1780 struct hammer2_chain_find_info *info = data;
1781 hammer2_key_t child_beg;
1782 hammer2_key_t child_end;
1784 child_beg = child->bref.key;
1785 child_end = child_beg + ((hammer2_key_t)1 << child->bref.keybits) - 1;
1787 if (child_end < info->key_beg)
1789 if (child_beg > info->key_end)
1796 hammer2_chain_find_callback(hammer2_chain_t *child, void *data)
1798 struct hammer2_chain_find_info *info = data;
1799 hammer2_chain_t *best;
1800 hammer2_key_t child_end;
1804 * Skip deleted chains which have been flushed (MOVED no longer set),
1805 * causes caller to check blockref array.
1807 if ((child->flags & (HAMMER2_CHAIN_DELETED | HAMMER2_CHAIN_MOVED)) ==
1808 HAMMER2_CHAIN_DELETED) {
1817 if ((best = info->best) == NULL) {
1819 * No previous best. Assign best
1822 } else if (best->bref.key <= info->key_beg &&
1823 child->bref.key <= info->key_beg) {
1825 * If our current best is flush with key_beg and child is
1826 * also flush with key_beg choose based on delete_tid.
1828 * key_next will automatically be limited to the smaller of
1829 * the two end-points.
1831 if (child->delete_tid > best->delete_tid)
1833 } else if (child->bref.key < best->bref.key) {
1835 * Child has a nearer key and best is not flush with key_beg.
1836 * Truncate key_next to the old best key iff it had a better
1840 if (best->delete_tid >= child->delete_tid &&
1841 (info->key_next > best->bref.key || info->key_next == 0))
1842 info->key_next = best->bref.key;
1843 } else if (child->bref.key == best->bref.key) {
1845 * If our current best is flush with the child then choose
1846 * based on delete_tid.
1848 * key_next will automatically be limited to the smaller of
1849 * the two end-points.
1851 if (child->delete_tid > best->delete_tid)
1855 * Keep the current best but truncate key_next to the child's
1856 * base iff the child has a higher delete_tid.
1858 * key_next will also automatically be limited to the smaller
1859 * of the two end-points (probably not necessary for this case
1860 * but we do it anyway).
1862 if (child->delete_tid >= best->delete_tid &&
1863 (info->key_next > child->bref.key || info->key_next == 0))
1864 info->key_next = child->bref.key;
1868 * Always truncate key_next based on child's end-of-range.
1870 child_end = child->bref.key + ((hammer2_key_t)1 << child->bref.keybits);
1871 if (child_end && (info->key_next > child_end || info->key_next == 0))
1872 info->key_next = child_end;
1878 * Retrieve the specified chain from a media blockref, creating the
1879 * in-memory chain structure which reflects it. modify_tid will be
1880 * left 0 which forces any modifications to issue a delete-duplicate.
1882 * NULL is returned if the insertion races.
1884 * Caller must hold the parent locked shared or exclusive since we may
1885 * need the parent's bref array to find our block.
1888 hammer2_chain_get(hammer2_chain_t *parent, hammer2_blockref_t *bref)
1890 hammer2_mount_t *hmp = parent->hmp;
1891 hammer2_chain_core_t *above = parent->core;
1892 hammer2_chain_t *chain;
1895 * Allocate a chain structure representing the existing media
1896 * entry. Resulting chain has one ref and is not locked.
1898 chain = hammer2_chain_alloc(hmp, parent->pmp, NULL, bref);
1899 hammer2_chain_core_alloc(NULL, chain, NULL);
1900 /* ref'd chain returned */
1901 chain->modify_tid = chain->bref.mirror_tid;
1904 * Link the chain into its parent. A spinlock is required to safely
1905 * access the RBTREE, and it is possible to collide with another
1906 * hammer2_chain_get() operation because the caller might only hold
1907 * a shared lock on the parent.
1909 KKASSERT(parent->refs > 0);
1910 hammer2_chain_insert(above, chain, HAMMER2_CHAIN_INSERT_SPIN |
1911 HAMMER2_CHAIN_INSERT_RACE);
1912 if ((chain->flags & HAMMER2_CHAIN_ONRBTREE) == 0) {
1913 kprintf("chain %p not on RBTREE\n", chain);
1914 hammer2_chain_drop(chain);
1919 * Return our new chain referenced but not locked.
1925 * Lookup initialization/completion API
1928 hammer2_chain_lookup_init(hammer2_chain_t *parent, int flags)
1930 if (flags & HAMMER2_LOOKUP_SHARED) {
1931 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS |
1932 HAMMER2_RESOLVE_SHARED);
1934 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS);
1940 hammer2_chain_lookup_done(hammer2_chain_t *parent)
1943 hammer2_chain_unlock(parent);
1948 hammer2_chain_getparent(hammer2_chain_t **parentp, int how)
1950 hammer2_chain_t *oparent;
1951 hammer2_chain_t *bparent;
1952 hammer2_chain_t *nparent;
1953 hammer2_chain_core_t *above;
1956 above = oparent->above;
1958 spin_lock(&above->cst.spin);
1959 bparent = TAILQ_FIRST(&above->ownerq);
1960 hammer2_chain_ref(bparent);
1964 while (nparent->flags & HAMMER2_CHAIN_DUPLICATED)
1965 nparent = TAILQ_NEXT(nparent, core_entry);
1966 hammer2_chain_ref(nparent);
1967 spin_unlock(&above->cst.spin);
1970 * Be careful of order
1972 hammer2_chain_unlock(oparent);
1973 hammer2_chain_lock(nparent, how | HAMMER2_RESOLVE_NOREF);
1974 hammer2_chain_drop(bparent);
1977 * We might have raced a delete-duplicate.
1979 if (nparent->flags & HAMMER2_CHAIN_DUPLICATED) {
1980 spin_lock(&above->cst.spin);
1981 if (nparent->flags & HAMMER2_CHAIN_DUPLICATED) {
1982 spin_unlock(&above->cst.spin);
1983 hammer2_chain_ref(nparent);
1984 hammer2_chain_unlock(nparent);
1986 spin_lock(&above->cst.spin);
1987 continue; /* retry */
1989 spin_unlock(&above->cst.spin);
1999 * Locate the first chain whos key range overlaps (key_beg, key_end) inclusive.
2000 * (*parentp) typically points to an inode but can also point to a related
2001 * indirect block and this function will recurse upwards and find the inode
2004 * (*parentp) must be exclusively locked and referenced and can be an inode
2005 * or an existing indirect block within the inode.
2007 * On return (*parentp) will be modified to point at the deepest parent chain
2008 * element encountered during the search, as a helper for an insertion or
2009 * deletion. The new (*parentp) will be locked and referenced and the old
2010 * will be unlocked and dereferenced (no change if they are both the same).
2012 * The matching chain will be returned exclusively locked. If NOLOCK is
2013 * requested the chain will be returned only referenced.
2015 * NULL is returned if no match was found, but (*parentp) will still
2016 * potentially be adjusted.
2018 * On return (*key_nextp) will point to an iterative value for key_beg.
2019 * (If NULL is returned (*key_nextp) is set to key_end).
2021 * This function will also recurse up the chain if the key is not within the
2022 * current parent's range. (*parentp) can never be set to NULL. An iteration
2023 * can simply allow (*parentp) to float inside the loop.
2025 * NOTE! chain->data is not always resolved. By default it will not be
2026 * resolved for BREF_TYPE_DATA, FREEMAP_NODE, or FREEMAP_LEAF. Use
2027 * HAMMER2_LOOKUP_ALWAYS to force resolution (but be careful w/
2028 * BREF_TYPE_DATA as the device buffer can alias the logical file
2032 hammer2_chain_lookup(hammer2_chain_t **parentp, hammer2_key_t *key_nextp,
2033 hammer2_key_t key_beg, hammer2_key_t key_end,
2034 int *cache_indexp, int flags)
2036 hammer2_mount_t *hmp;
2037 hammer2_chain_t *parent;
2038 hammer2_chain_t *chain;
2039 hammer2_blockref_t *base;
2040 hammer2_blockref_t *bref;
2041 hammer2_blockref_t bcopy;
2042 hammer2_key_t scan_beg;
2043 hammer2_key_t scan_end;
2044 hammer2_chain_core_t *above;
2046 int how_always = HAMMER2_RESOLVE_ALWAYS;
2047 int how_maybe = HAMMER2_RESOLVE_MAYBE;
2050 if (flags & HAMMER2_LOOKUP_ALWAYS) {
2051 how_maybe = how_always;
2052 how = HAMMER2_RESOLVE_ALWAYS;
2053 } else if (flags & (HAMMER2_LOOKUP_NODATA | HAMMER2_LOOKUP_NOLOCK)) {
2054 how = HAMMER2_RESOLVE_NEVER;
2056 how = HAMMER2_RESOLVE_MAYBE;
2058 if (flags & (HAMMER2_LOOKUP_SHARED | HAMMER2_LOOKUP_NOLOCK)) {
2059 how_maybe |= HAMMER2_RESOLVE_SHARED;
2060 how_always |= HAMMER2_RESOLVE_SHARED;
2061 how |= HAMMER2_RESOLVE_SHARED;
2065 * Recurse (*parentp) upward if necessary until the parent completely
2066 * encloses the key range or we hit the inode.
2068 * This function handles races against the flusher doing a delete-
2069 * duplicate above us and re-homes the parent to the duplicate in
2070 * that case, otherwise we'd wind up recursing down a stale chain.
2075 while (parent->bref.type == HAMMER2_BREF_TYPE_INDIRECT ||
2076 parent->bref.type == HAMMER2_BREF_TYPE_FREEMAP_NODE) {
2077 scan_beg = parent->bref.key;
2078 scan_end = scan_beg +
2079 ((hammer2_key_t)1 << parent->bref.keybits) - 1;
2080 if (key_beg >= scan_beg && key_end <= scan_end)
2082 parent = hammer2_chain_getparent(parentp, how_maybe);
2087 * Locate the blockref array. Currently we do a fully associative
2088 * search through the array.
2090 switch(parent->bref.type) {
2091 case HAMMER2_BREF_TYPE_INODE:
2093 * Special shortcut for embedded data returns the inode
2094 * itself. Callers must detect this condition and access
2095 * the embedded data (the strategy code does this for us).
2097 * This is only applicable to regular files and softlinks.
2099 if (parent->data->ipdata.op_flags & HAMMER2_OPFLAG_DIRECTDATA) {
2100 if (flags & HAMMER2_LOOKUP_NOLOCK)
2101 hammer2_chain_ref(parent);
2103 hammer2_chain_lock(parent, how_always);
2104 *key_nextp = key_end + 1;
2107 base = &parent->data->ipdata.u.blockset.blockref[0];
2108 count = HAMMER2_SET_COUNT;
2110 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2111 case HAMMER2_BREF_TYPE_INDIRECT:
2113 * Handle MATCHIND on the parent
2115 if (flags & HAMMER2_LOOKUP_MATCHIND) {
2116 scan_beg = parent->bref.key;
2117 scan_end = scan_beg +
2118 ((hammer2_key_t)1 << parent->bref.keybits) - 1;
2119 if (key_beg == scan_beg && key_end == scan_end) {
2121 hammer2_chain_lock(chain, how_maybe);
2122 *key_nextp = scan_end + 1;
2127 * Optimize indirect blocks in the INITIAL state to avoid
2130 if (parent->flags & HAMMER2_CHAIN_INITIAL) {
2133 if (parent->data == NULL)
2134 panic("parent->data is NULL");
2135 base = &parent->data->npdata[0];
2137 count = parent->bytes / sizeof(hammer2_blockref_t);
2139 case HAMMER2_BREF_TYPE_VOLUME:
2140 base = &hmp->voldata.sroot_blockset.blockref[0];
2141 count = HAMMER2_SET_COUNT;
2143 case HAMMER2_BREF_TYPE_FREEMAP:
2144 base = &hmp->voldata.freemap_blockset.blockref[0];
2145 count = HAMMER2_SET_COUNT;
2148 panic("hammer2_chain_lookup: unrecognized blockref type: %d",
2150 base = NULL; /* safety */
2151 count = 0; /* safety */
2155 * Merged scan to find next candidate.
2157 * hammer2_base_*() functions require the above->live_* fields
2158 * to be synchronized.
2160 * We need to hold the spinlock to access the block array and RB tree
2161 * and to interlock chain creation.
2163 above = parent->core;
2164 if ((parent->core->flags & HAMMER2_CORE_COUNTEDBREFS) == 0)
2165 hammer2_chain_countbrefs(parent, base, count);
2170 spin_lock(&above->cst.spin);
2171 chain = hammer2_combined_find(parent, base, count,
2172 cache_indexp, key_nextp,
2173 key_beg, key_end, &bref);
2176 * Exhausted parent chain, iterate.
2179 spin_unlock(&above->cst.spin);
2180 if (key_beg == key_end) /* short cut single-key case */
2182 return (hammer2_chain_next(parentp, NULL, key_nextp,
2184 cache_indexp, flags));
2188 * Selected from blockref or in-memory chain.
2190 if (chain == NULL) {
2192 spin_unlock(&above->cst.spin);
2193 chain = hammer2_chain_get(parent, &bcopy);
2194 if (chain == NULL) {
2195 kprintf("retry lookup parent %p keys %016jx:%016jx\n",
2196 parent, key_beg, key_end);
2199 if (bcmp(&bcopy, bref, sizeof(bcopy))) {
2200 hammer2_chain_drop(chain);
2204 hammer2_chain_ref(chain);
2205 spin_unlock(&above->cst.spin);
2207 /* chain is referenced but not locked */
2210 * Skip deleted chains (XXX cache 'i' end-of-block-array? XXX)
2212 * NOTE: chain's key range is not relevant as there might be
2213 * one-offs within the range that are not deleted.
2215 if (chain->flags & HAMMER2_CHAIN_DELETED) {
2216 hammer2_chain_drop(chain);
2217 key_beg = *key_nextp;
2218 if (key_beg == 0 || key_beg > key_end)
2224 * If the chain element is an indirect block it becomes the new
2225 * parent and we loop on it. We must maintain our top-down locks
2226 * to prevent the flusher from interfering (i.e. doing a
2227 * delete-duplicate and leaving us recursing down a deleted chain).
2229 * The parent always has to be locked with at least RESOLVE_MAYBE
2230 * so we can access its data. It might need a fixup if the caller
2231 * passed incompatible flags. Be careful not to cause a deadlock
2232 * as a data-load requires an exclusive lock.
2234 * If HAMMER2_LOOKUP_MATCHIND is set and the indirect block's key
2235 * range is within the requested key range we return the indirect
2236 * block and do NOT loop. This is usually only used to acquire
2239 if (chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT ||
2240 chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_NODE) {
2241 hammer2_chain_lock(chain, how_maybe | HAMMER2_RESOLVE_NOREF);
2242 hammer2_chain_unlock(parent);
2243 *parentp = parent = chain;
2247 hammer2_chain_lock(chain, how | HAMMER2_RESOLVE_NOREF);
2250 * All done, return the chain
2256 * After having issued a lookup we can iterate all matching keys.
2258 * If chain is non-NULL we continue the iteration from just after it's index.
2260 * If chain is NULL we assume the parent was exhausted and continue the
2261 * iteration at the next parent.
2263 * parent must be locked on entry and remains locked throughout. chain's
2264 * lock status must match flags. Chain is always at least referenced.
2266 * WARNING! The MATCHIND flag does not apply to this function.
2269 hammer2_chain_next(hammer2_chain_t **parentp, hammer2_chain_t *chain,
2270 hammer2_key_t *key_nextp,
2271 hammer2_key_t key_beg, hammer2_key_t key_end,
2272 int *cache_indexp, int flags)
2274 hammer2_chain_t *parent;
2278 * Calculate locking flags for upward recursion.
2280 how_maybe = HAMMER2_RESOLVE_MAYBE;
2281 if (flags & (HAMMER2_LOOKUP_SHARED | HAMMER2_LOOKUP_NOLOCK))
2282 how_maybe |= HAMMER2_RESOLVE_SHARED;
2287 * Calculate the next index and recalculate the parent if necessary.
2290 key_beg = chain->bref.key +
2291 ((hammer2_key_t)1 << chain->bref.keybits);
2292 if (flags & HAMMER2_LOOKUP_NOLOCK)
2293 hammer2_chain_drop(chain);
2295 hammer2_chain_unlock(chain);
2298 * Any scan where the lookup returned degenerate data embedded
2299 * in the inode has an invalid index and must terminate.
2301 if (chain == parent)
2303 if (key_beg == 0 || key_beg > key_end)
2306 } else if (parent->bref.type != HAMMER2_BREF_TYPE_INDIRECT &&
2307 parent->bref.type != HAMMER2_BREF_TYPE_FREEMAP_NODE) {
2309 * We reached the end of the iteration.
2314 * Continue iteration with next parent unless the current
2315 * parent covers the range.
2317 key_beg = parent->bref.key +
2318 ((hammer2_key_t)1 << parent->bref.keybits);
2319 if (key_beg == 0 || key_beg > key_end)
2321 parent = hammer2_chain_getparent(parentp, how_maybe);
2327 return (hammer2_chain_lookup(parentp, key_nextp,
2329 cache_indexp, flags));
2333 * Create and return a new hammer2 system memory structure of the specified
2334 * key, type and size and insert it under (*parentp). This is a full
2335 * insertion, based on the supplied key/keybits, and may involve creating
2336 * indirect blocks and moving other chains around via delete/duplicate.
2338 * (*parentp) must be exclusive locked and may be replaced on return
2339 * depending on how much work the function had to do.
2341 * (*chainp) usually starts out NULL and returns the newly created chain,
2342 * but if the caller desires the caller may allocate a disconnected chain
2343 * and pass it in instead. (It is also possible for the caller to use
2344 * chain_duplicate() to create a disconnected chain, manipulate it, then
2345 * pass it into this function to insert it).
2347 * This function should NOT be used to insert INDIRECT blocks. It is
2348 * typically used to create/insert inodes and data blocks.
2350 * Caller must pass-in an exclusively locked parent the new chain is to
2351 * be inserted under, and optionally pass-in a disconnected, exclusively
2352 * locked chain to insert (else we create a new chain). The function will
2353 * adjust (*parentp) as necessary, create or connect the chain, and
2354 * return an exclusively locked chain in *chainp.
2357 hammer2_chain_create(hammer2_trans_t *trans, hammer2_chain_t **parentp,
2358 hammer2_chain_t **chainp,
2359 hammer2_key_t key, int keybits, int type, size_t bytes)
2361 hammer2_mount_t *hmp;
2362 hammer2_chain_t *chain;
2363 hammer2_chain_t *parent = *parentp;
2364 hammer2_chain_core_t *above;
2365 hammer2_blockref_t *base;
2366 hammer2_blockref_t dummy;
2371 above = parent->core;
2372 KKASSERT(ccms_thread_lock_owned(&above->cst));
2376 if (chain == NULL) {
2378 * First allocate media space and construct the dummy bref,
2379 * then allocate the in-memory chain structure. Set the
2380 * INITIAL flag for fresh chains.
2382 bzero(&dummy, sizeof(dummy));
2385 dummy.keybits = keybits;
2386 dummy.data_off = hammer2_getradix(bytes);
2387 dummy.methods = parent->bref.methods;
2388 chain = hammer2_chain_alloc(hmp, parent->pmp, trans, &dummy);
2389 hammer2_chain_core_alloc(trans, chain, NULL);
2391 atomic_set_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
2394 * Lock the chain manually, chain_lock will load the chain
2395 * which we do NOT want to do. (note: chain->refs is set
2396 * to 1 by chain_alloc() for us, but lockcnt is not).
2399 ccms_thread_lock(&chain->core->cst, CCMS_STATE_EXCLUSIVE);
2403 * We do NOT set INITIAL here (yet). INITIAL is only
2404 * used for indirect blocks.
2406 * Recalculate bytes to reflect the actual media block
2409 bytes = (hammer2_off_t)1 <<
2410 (int)(chain->bref.data_off & HAMMER2_OFF_MASK_RADIX);
2411 chain->bytes = bytes;
2414 case HAMMER2_BREF_TYPE_VOLUME:
2415 case HAMMER2_BREF_TYPE_FREEMAP:
2416 panic("hammer2_chain_create: called with volume type");
2418 case HAMMER2_BREF_TYPE_INODE:
2419 KKASSERT(bytes == HAMMER2_INODE_BYTES);
2420 atomic_set_int(&chain->flags, HAMMER2_CHAIN_EMBEDDED);
2421 chain->data = kmalloc(sizeof(chain->data->ipdata),
2422 hmp->mchain, M_WAITOK | M_ZERO);
2424 case HAMMER2_BREF_TYPE_INDIRECT:
2425 panic("hammer2_chain_create: cannot be used to"
2426 "create indirect block");
2428 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2429 panic("hammer2_chain_create: cannot be used to"
2430 "create freemap root or node");
2432 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
2433 KKASSERT(bytes == sizeof(chain->data->bmdata));
2434 atomic_set_int(&chain->flags, HAMMER2_CHAIN_EMBEDDED);
2435 chain->data = kmalloc(sizeof(chain->data->bmdata),
2436 hmp->mchain, M_WAITOK | M_ZERO);
2438 case HAMMER2_BREF_TYPE_DATA:
2440 /* leave chain->data NULL */
2441 KKASSERT(chain->data == NULL);
2446 * Potentially update the existing chain's key/keybits.
2448 * Do NOT mess with the current state of the INITIAL flag.
2450 chain->bref.key = key;
2451 chain->bref.keybits = keybits;
2452 KKASSERT(chain->above == NULL);
2456 * Calculate how many entries we have in the blockref array and
2457 * determine if an indirect block is required.
2460 above = parent->core;
2462 switch(parent->bref.type) {
2463 case HAMMER2_BREF_TYPE_INODE:
2464 KKASSERT((parent->data->ipdata.op_flags &
2465 HAMMER2_OPFLAG_DIRECTDATA) == 0);
2466 KKASSERT(parent->data != NULL);
2467 base = &parent->data->ipdata.u.blockset.blockref[0];
2468 count = HAMMER2_SET_COUNT;
2470 case HAMMER2_BREF_TYPE_INDIRECT:
2471 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2472 if (parent->flags & HAMMER2_CHAIN_INITIAL)
2475 base = &parent->data->npdata[0];
2476 count = parent->bytes / sizeof(hammer2_blockref_t);
2478 case HAMMER2_BREF_TYPE_VOLUME:
2479 KKASSERT(parent->data != NULL);
2480 base = &hmp->voldata.sroot_blockset.blockref[0];
2481 count = HAMMER2_SET_COUNT;
2483 case HAMMER2_BREF_TYPE_FREEMAP:
2484 KKASSERT(parent->data != NULL);
2485 base = &hmp->voldata.freemap_blockset.blockref[0];
2486 count = HAMMER2_SET_COUNT;
2489 panic("hammer2_chain_create: unrecognized blockref type: %d",
2497 * Make sure we've counted the brefs
2499 if ((parent->core->flags & HAMMER2_CORE_COUNTEDBREFS) == 0)
2500 hammer2_chain_countbrefs(parent, base, count);
2502 KKASSERT(above->live_count >= 0 && above->live_count <= count);
2505 * If no free blockref could be found we must create an indirect
2506 * block and move a number of blockrefs into it. With the parent
2507 * locked we can safely lock each child in order to delete+duplicate
2508 * it without causing a deadlock.
2510 * This may return the new indirect block or the old parent depending
2511 * on where the key falls. NULL is returned on error.
2513 if (above->live_count == count) {
2514 hammer2_chain_t *nparent;
2516 nparent = hammer2_chain_create_indirect(trans, parent,
2519 if (nparent == NULL) {
2521 hammer2_chain_drop(chain);
2525 if (parent != nparent) {
2526 hammer2_chain_unlock(parent);
2527 parent = *parentp = nparent;
2533 * Link the chain into its parent. Later on we will have to set
2534 * the MOVED bit in situations where we don't mark the new chain
2535 * as being modified.
2537 if (chain->above != NULL)
2538 panic("hammer2: hammer2_chain_create: chain already connected");
2539 KKASSERT(chain->above == NULL);
2540 KKASSERT((chain->flags & HAMMER2_CHAIN_DELETED) == 0);
2541 hammer2_chain_insert(above, chain, HAMMER2_CHAIN_INSERT_SPIN |
2542 HAMMER2_CHAIN_INSERT_LIVE);
2546 * Mark the newly created chain modified.
2548 * Device buffers are not instantiated for DATA elements
2549 * as these are handled by logical buffers.
2551 * Indirect and freemap node indirect blocks are handled
2552 * by hammer2_chain_create_indirect() and not by this
2555 * Data for all other bref types is expected to be
2556 * instantiated (INODE, LEAF).
2558 switch(chain->bref.type) {
2559 case HAMMER2_BREF_TYPE_DATA:
2560 hammer2_chain_modify(trans, &chain,
2561 HAMMER2_MODIFY_OPTDATA |
2562 HAMMER2_MODIFY_ASSERTNOCOPY);
2564 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
2565 case HAMMER2_BREF_TYPE_INODE:
2566 hammer2_chain_modify(trans, &chain,
2567 HAMMER2_MODIFY_ASSERTNOCOPY);
2571 * Remaining types are not supported by this function.
2572 * In particular, INDIRECT and LEAF_NODE types are
2573 * handled by create_indirect().
2575 panic("hammer2_chain_create: bad type: %d",
2582 * When reconnecting a chain we must set MOVED and setsubmod
2583 * so the flush recognizes that it must update the bref in
2586 if ((chain->flags & HAMMER2_CHAIN_MOVED) == 0) {
2587 hammer2_chain_ref(chain);
2588 atomic_set_int(&chain->flags, HAMMER2_CHAIN_MOVED);
2591 hammer2_chain_setsubmod(trans, chain);
2600 * Replace (*chainp) with a duplicate in-memory chain structure which shares
2601 * the same core and media state as the orignal. The original *chainp is
2602 * unlocked and the replacement will be returned locked.
2604 * The old chain may or may not be in a DELETED state. This new chain will
2605 * be live (not deleted).
2607 * The new chain will be marked modified for the current transaction.
2609 * If (parent) is non-NULL then the new duplicated chain is inserted under
2612 * If (parent) is NULL then the new duplicated chain is not inserted anywhere,
2613 * similar to if it had just been chain_alloc()'d (suitable for passing into
2614 * hammer2_chain_create() after this function returns).
2616 * WARNING! This is not a snapshot. Changes made underneath either the old
2617 * or new chain will affect both.
2619 static void hammer2_chain_dup_fixup(hammer2_chain_t *ochain,
2620 hammer2_chain_t *nchain);
2623 hammer2_chain_duplicate(hammer2_trans_t *trans, hammer2_chain_t **parentp,
2624 hammer2_chain_t **chainp, hammer2_blockref_t *bref,
2627 hammer2_mount_t *hmp;
2628 hammer2_chain_t *parent;
2629 hammer2_chain_t *ochain;
2630 hammer2_chain_t *nchain;
2631 hammer2_chain_core_t *above;
2635 * We want nchain to be our go-to live chain, but ochain may be in
2636 * a MODIFIED state within the current flush synchronization segment.
2637 * Force any further modifications of ochain to do another COW
2638 * operation even if modify_tid indicates that one is not needed.
2640 * WARNING! We should never resolve DATA to device buffers
2641 * (XXX allow it if the caller did?), and since
2642 * we currently do not have the logical buffer cache
2643 * buffer in-hand to fix its cached physical offset
2644 * we also force the modify code to not COW it. XXX
2649 ochain->debug_reason += 0x10000;
2651 ochain->debug_reason += 0x100000;
2654 if (ochain->bref.type == HAMMER2_BREF_TYPE_DATA) {
2655 hammer2_chain_modify(trans, &ochain,
2656 HAMMER2_MODIFY_OPTDATA |
2657 HAMMER2_MODIFY_NOREALLOC);
2658 } else if (ochain->flags & HAMMER2_CHAIN_INITIAL) {
2659 hammer2_chain_modify(trans, &ochain,
2660 HAMMER2_MODIFY_OPTDATA);
2662 hammer2_chain_modify(trans, &ochain, 0);
2665 atomic_set_int(&ochain->flags, HAMMER2_CHAIN_FORCECOW);
2668 * Now create a duplicate of the chain structure, associating
2669 * it with the same core, making it the same size, pointing it
2670 * to the same bref (the same media block).
2672 * Give the duplicate the same modify_tid that we previously
2673 * ensured was sufficiently advanced to trigger a block table
2674 * insertion on flush.
2676 * NOTE: bref.mirror_tid duplicated by virtue of bref copy in
2677 * hammer2_chain_alloc()
2680 bref = &ochain->bref;
2682 nchain = hammer2_chain_alloc(hmp, NULL, trans, bref);
2683 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_SNAPSHOT);
2685 nchain = hammer2_chain_alloc(hmp, ochain->pmp, trans, bref);
2687 hammer2_chain_core_alloc(trans, nchain, ochain);
2688 bytes = (hammer2_off_t)1 <<
2689 (int)(bref->data_off & HAMMER2_OFF_MASK_RADIX);
2690 nchain->bytes = bytes;
2691 nchain->modify_tid = ochain->modify_tid;
2692 if (ochain->flags & HAMMER2_CHAIN_INITIAL)
2693 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_INITIAL);
2696 * Fixup (copy) any embedded data. Non-embedded data relies on the
2697 * media block. We must unlock ochain before we can access nchain's
2698 * media block because they might share the same bp and deadlock if
2701 hammer2_chain_lock(nchain, HAMMER2_RESOLVE_NEVER |
2702 HAMMER2_RESOLVE_NOREF);
2703 hammer2_chain_dup_fixup(ochain, nchain);
2704 /* nchain has 1 ref */
2705 hammer2_chain_unlock(ochain);
2706 KKASSERT((ochain->flags & HAMMER2_CHAIN_EMBEDDED) ||
2707 ochain->data == NULL);
2710 * Place nchain in the modified state, instantiate media data
2711 * if necessary. Because modify_tid is already completely
2712 * synchronized this should not result in a delete-duplicate.
2714 * We want nchain at the target to look like a new insertion.
2715 * Forcing the modification to be INPLACE accomplishes this
2716 * because we get the same nchain with an updated modify_tid.
2718 if (nchain->bref.type == HAMMER2_BREF_TYPE_DATA) {
2719 hammer2_chain_modify(trans, &nchain,
2720 HAMMER2_MODIFY_OPTDATA |
2721 HAMMER2_MODIFY_NOREALLOC |
2722 HAMMER2_MODIFY_INPLACE);
2723 } else if (nchain->flags & HAMMER2_CHAIN_INITIAL) {
2724 hammer2_chain_modify(trans, &nchain,
2725 HAMMER2_MODIFY_OPTDATA |
2726 HAMMER2_MODIFY_INPLACE);
2728 hammer2_chain_modify(trans, &nchain,
2729 HAMMER2_MODIFY_INPLACE);
2733 * If parent is not NULL the duplicated chain will be entered under
2734 * the parent and the MOVED bit set.
2736 * Having both chains locked is extremely important for atomicy.
2738 if (parentp && (parent = *parentp) != NULL) {
2739 above = parent->core;
2740 KKASSERT(ccms_thread_lock_owned(&above->cst));
2741 KKASSERT((nchain->flags & HAMMER2_CHAIN_DELETED) == 0);
2742 KKASSERT(parent->refs > 0);
2744 hammer2_chain_create(trans, parentp, &nchain,
2745 nchain->bref.key, nchain->bref.keybits,
2746 nchain->bref.type, nchain->bytes);
2749 if ((nchain->flags & HAMMER2_CHAIN_MOVED) == 0) {
2750 hammer2_chain_ref(nchain);
2751 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_MOVED);
2753 hammer2_chain_setsubmod(trans, nchain);
2757 * Unconditionally set MOVED to force the parent blockrefs to
2758 * update, and adjust update_tid below nchain so nchain's
2759 * blockrefs are updated with the new attachment.
2761 if (nchain->core->update_tid < trans->sync_tid) {
2762 spin_lock(&nchain->core->cst.spin);
2763 if (nchain->core->update_tid < trans->sync_tid)
2764 nchain->core->update_tid = trans->sync_tid;
2765 spin_unlock(&nchain->core->cst.spin);
2772 * Special in-place delete-duplicate sequence which does not require a
2773 * locked parent. (*chainp) is marked DELETED and atomically replaced
2774 * with a duplicate. Atomicy is at the very-fine spin-lock level in
2775 * order to ensure that lookups do not race us.
2777 * If the old chain is already marked deleted the new chain will also be
2778 * marked deleted. This case can occur when an inode is removed from the
2779 * filesystem but programs still have an open descriptor to it, and during
2780 * flushes when the flush needs to operate on a chain that is deleted in
2781 * the live view but still alive in the flush view.
2783 * The new chain will be marked modified for the current transaction.
2786 hammer2_chain_delete_duplicate(hammer2_trans_t *trans, hammer2_chain_t **chainp,
2789 hammer2_mount_t *hmp;
2790 hammer2_chain_t *ochain;
2791 hammer2_chain_t *nchain;
2792 hammer2_chain_core_t *above;
2796 * Note that we do not have to call setsubmod on ochain, calling it
2797 * on nchain is sufficient.
2802 ochain->debug_reason += 0x1000;
2803 if ((ochain->debug_reason & 0xF000) > 0x1000) {
2804 kprintf("ochain %p\n", ochain);
2809 * First create a duplicate of the chain structure.
2810 * (nchain is allocated with one ref).
2812 nchain = hammer2_chain_alloc(hmp, ochain->pmp, trans, &ochain->bref);
2813 if (flags & HAMMER2_DELDUP_RECORE)
2814 hammer2_chain_core_alloc(trans, nchain, NULL);
2816 hammer2_chain_core_alloc(trans, nchain, ochain);
2817 above = ochain->above;
2819 bytes = (hammer2_off_t)1 <<
2820 (int)(ochain->bref.data_off & HAMMER2_OFF_MASK_RADIX);
2821 nchain->bytes = bytes;
2824 * Duplicate inherits ochain's live state including its modification
2825 * state. This function disposes of the original. Because we are
2826 * doing this in-place under the same parent the block array
2827 * inserted/deleted state does not change.
2829 * The caller isn't expected to make further modifications of ochain
2830 * but set the FORCECOW bit anyway, just in case it does. If ochain
2831 * was previously marked FORCECOW we also flag nchain FORCECOW
2832 * (used during hardlink splits).
2834 * NOTE: bref.mirror_tid duplicated by virtue of bref copy in
2835 * hammer2_chain_alloc()
2837 nchain->data_count += ochain->data_count;
2838 nchain->inode_count += ochain->inode_count;
2839 nchain->modify_tid = ochain->modify_tid;
2840 atomic_set_int(&nchain->flags,
2841 ochain->flags & (HAMMER2_CHAIN_INITIAL |
2842 HAMMER2_CHAIN_FORCECOW));
2843 atomic_set_int(&ochain->flags, HAMMER2_CHAIN_FORCECOW);
2846 * Lock nchain so both chains are now locked (extremely important
2847 * for atomicy). Mark ochain deleted and reinsert into the topology
2848 * and insert nchain all in one go.
2850 * If the ochain is already deleted it is left alone and nchain
2851 * is inserted into the topology as a deleted chain. This is
2852 * important because it allows ongoing operations to be executed
2853 * on a deleted inode which still has open descriptors.
2855 * The deleted case can also occur when a flush delete-duplicates
2856 * a node which is being concurrently modified by ongoing operations
2857 * in a later transaction. This creates a problem because the flush
2858 * is intended to update blockrefs which then propagate, allowing
2859 * the original covering in-memory chains to be freed up. In this
2860 * situation the flush code does NOT free the original covering
2861 * chains and will re-apply them to successive copies.
2863 hammer2_chain_lock(nchain, HAMMER2_RESOLVE_NEVER);
2864 hammer2_chain_dup_fixup(ochain, nchain);
2865 /* extra ref still present from original allocation */
2867 KKASSERT(ochain->flags & HAMMER2_CHAIN_ONRBTREE);
2868 spin_lock(&above->cst.spin);
2869 KKASSERT(ochain->flags & HAMMER2_CHAIN_ONRBTREE);
2871 if (ochain->flags & HAMMER2_CHAIN_DELETED) {
2872 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_DELETED);
2873 /* very important to inherit ochain's delete_tid */
2874 KKASSERT(ochain->delete_tid >= nchain->modify_tid);
2875 nchain->delete_tid = ochain->delete_tid;
2876 hammer2_chain_insert(above, nchain, 0);
2878 KKASSERT(trans->sync_tid >= ochain->modify_tid);
2879 ochain->delete_tid = trans->sync_tid;
2880 atomic_set_int(&ochain->flags, HAMMER2_CHAIN_DELETED);
2881 atomic_add_int(&above->live_count, -1);
2882 hammer2_chain_insert(above, nchain, HAMMER2_CHAIN_INSERT_LIVE);
2885 if ((ochain->flags & HAMMER2_CHAIN_MOVED) == 0) {
2886 hammer2_chain_ref(ochain);
2887 atomic_set_int(&ochain->flags, HAMMER2_CHAIN_MOVED);
2889 spin_unlock(&above->cst.spin);
2892 * ochain must be unlocked because ochain and nchain might share
2893 * a buffer cache buffer. Assert that there's no buffer.
2895 hammer2_chain_unlock(ochain);
2896 KKASSERT(ochain->bp == NULL);
2899 * Finishing fixing up nchain. A new block will be allocated if
2900 * crossing a synchronization point (meta-data only).
2902 if (nchain->bref.type == HAMMER2_BREF_TYPE_DATA) {
2903 hammer2_chain_modify(trans, &nchain,
2904 HAMMER2_MODIFY_OPTDATA |
2905 HAMMER2_MODIFY_NOREALLOC |
2906 HAMMER2_MODIFY_INPLACE);
2907 } else if (nchain->flags & HAMMER2_CHAIN_INITIAL) {
2908 hammer2_chain_modify(trans, &nchain,
2909 HAMMER2_MODIFY_OPTDATA |
2910 HAMMER2_MODIFY_INPLACE);
2912 hammer2_chain_modify(trans, &nchain,
2913 HAMMER2_MODIFY_INPLACE);
2915 hammer2_chain_drop(nchain);
2918 * Unconditionally set MOVED to force the parent blockrefs to
2919 * update, and adjust update_tid below nchain so nchain's
2920 * blockrefs are updated with the new attachment.
2922 if ((nchain->flags & HAMMER2_CHAIN_MOVED) == 0) {
2923 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_MOVED);
2924 hammer2_chain_ref(nchain);
2926 if (nchain->core->update_tid < trans->sync_tid) {
2927 spin_lock(&nchain->core->cst.spin);
2928 if (nchain->core->update_tid < trans->sync_tid)
2929 nchain->core->update_tid = trans->sync_tid;
2930 spin_unlock(&nchain->core->cst.spin);
2932 hammer2_chain_setsubmod(trans, nchain);
2937 * Helper function to fixup inodes. The caller procedure stack may hold
2938 * multiple locks on ochain if it represents an inode, preventing our
2939 * unlock from retiring its state to the buffer cache.
2941 * In this situation any attempt to access the buffer cache could result
2942 * either in stale data or a deadlock. Work around the problem by copying
2943 * the embedded data directly.
2947 hammer2_chain_dup_fixup(hammer2_chain_t *ochain, hammer2_chain_t *nchain)
2949 if (ochain->data == NULL)
2951 switch(ochain->bref.type) {
2952 case HAMMER2_BREF_TYPE_INODE:
2953 KKASSERT(nchain->data == NULL);
2954 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_EMBEDDED);
2955 nchain->data = kmalloc(sizeof(nchain->data->ipdata),
2956 ochain->hmp->mchain, M_WAITOK | M_ZERO);
2957 nchain->data->ipdata = ochain->data->ipdata;
2959 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
2960 KKASSERT(nchain->data == NULL);
2961 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_EMBEDDED);
2962 nchain->data = kmalloc(sizeof(nchain->data->bmdata),
2963 ochain->hmp->mchain, M_WAITOK | M_ZERO);
2964 bcopy(ochain->data->bmdata,
2965 nchain->data->bmdata,
2966 sizeof(nchain->data->bmdata));
2974 * Create a snapshot of the specified {parent, ochain} with the specified
2975 * label. The originating hammer2_inode must be exclusively locked for
2978 * The ioctl code has already synced the filesystem.
2981 hammer2_chain_snapshot(hammer2_trans_t *trans, hammer2_chain_t **ochainp,
2982 hammer2_ioc_pfs_t *pfs)
2984 hammer2_mount_t *hmp;
2985 hammer2_chain_t *ochain = *ochainp;
2986 hammer2_chain_t *nchain;
2987 hammer2_inode_data_t *ipdata;
2988 hammer2_inode_t *nip;
2995 kprintf("snapshot %s ochain->refs %d ochain->flags %08x\n",
2996 pfs->name, ochain->refs, ochain->flags);
2998 name_len = strlen(pfs->name);
2999 lhc = hammer2_dirhash(pfs->name, name_len);
3002 opfs_clid = ochain->data->ipdata.pfs_clid;
3003 KKASSERT((trans->flags & HAMMER2_TRANS_RESTRICTED) == 0);
3008 * Create the snapshot directory under the super-root
3010 * Set PFS type, generate a unique filesystem id, and generate
3011 * a cluster id. Use the same clid when snapshotting a PFS root,
3012 * which theoretically allows the snapshot to be used as part of
3013 * the same cluster (perhaps as a cache).
3015 * Copy the (flushed) ochain's blockref array. Theoretically we
3016 * could use chain_duplicate() but it becomes difficult to disentangle
3017 * the shared core so for now just brute-force it.
3023 nip = hammer2_inode_create(trans, hmp->sroot, &vat, proc0.p_ucred,
3024 pfs->name, name_len, &nchain, &error);
3027 ipdata = hammer2_chain_modify_ip(trans, nip, &nchain, 0);
3028 ipdata->pfs_type = HAMMER2_PFSTYPE_SNAPSHOT;
3029 kern_uuidgen(&ipdata->pfs_fsid, 1);
3030 if (ochain->flags & HAMMER2_CHAIN_PFSROOT)
3031 ipdata->pfs_clid = opfs_clid;
3033 kern_uuidgen(&ipdata->pfs_clid, 1);
3034 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_PFSROOT);
3035 ipdata->u.blockset = ochain->data->ipdata.u.blockset;
3037 hammer2_inode_unlock_ex(nip, nchain);
3043 * Create an indirect block that covers one or more of the elements in the
3044 * current parent. Either returns the existing parent with no locking or
3045 * ref changes or returns the new indirect block locked and referenced
3046 * and leaving the original parent lock/ref intact as well.
3048 * If an error occurs, NULL is returned and *errorp is set to the error.
3050 * The returned chain depends on where the specified key falls.
3052 * The key/keybits for the indirect mode only needs to follow three rules:
3054 * (1) That all elements underneath it fit within its key space and
3056 * (2) That all elements outside it are outside its key space.
3058 * (3) When creating the new indirect block any elements in the current
3059 * parent that fit within the new indirect block's keyspace must be
3060 * moved into the new indirect block.
3062 * (4) The keyspace chosen for the inserted indirect block CAN cover a wider
3063 * keyspace the the current parent, but lookup/iteration rules will
3064 * ensure (and must ensure) that rule (2) for all parents leading up
3065 * to the nearest inode or the root volume header is adhered to. This
3066 * is accomplished by always recursing through matching keyspaces in
3067 * the hammer2_chain_lookup() and hammer2_chain_next() API.
3069 * The current implementation calculates the current worst-case keyspace by
3070 * iterating the current parent and then divides it into two halves, choosing
3071 * whichever half has the most elements (not necessarily the half containing
3072 * the requested key).
3074 * We can also opt to use the half with the least number of elements. This
3075 * causes lower-numbered keys (aka logical file offsets) to recurse through
3076 * fewer indirect blocks and higher-numbered keys to recurse through more.
3077 * This also has the risk of not moving enough elements to the new indirect
3078 * block and being forced to create several indirect blocks before the element
3081 * Must be called with an exclusively locked parent.
3083 static int hammer2_chain_indkey_freemap(hammer2_chain_t *parent,
3084 hammer2_key_t *keyp, int keybits,
3085 hammer2_blockref_t *base, int count);
3086 static int hammer2_chain_indkey_normal(hammer2_chain_t *parent,
3087 hammer2_key_t *keyp, int keybits,
3088 hammer2_blockref_t *base, int count);
3091 hammer2_chain_create_indirect(hammer2_trans_t *trans, hammer2_chain_t *parent,
3092 hammer2_key_t create_key, int create_bits,
3093 int for_type, int *errorp)
3095 hammer2_mount_t *hmp;
3096 hammer2_chain_core_t *above;
3097 hammer2_chain_core_t *icore;
3098 hammer2_blockref_t *base;
3099 hammer2_blockref_t *bref;
3100 hammer2_blockref_t bcopy;
3101 hammer2_chain_t *chain;
3102 hammer2_chain_t *ichain;
3103 hammer2_chain_t dummy;
3104 hammer2_key_t key = create_key;
3105 hammer2_key_t key_beg;
3106 hammer2_key_t key_end;
3107 hammer2_key_t key_next;
3108 int keybits = create_bits;
3115 * Calculate the base blockref pointer or NULL if the chain
3116 * is known to be empty. We need to calculate the array count
3117 * for RB lookups either way.
3121 KKASSERT(ccms_thread_lock_owned(&parent->core->cst));
3122 above = parent->core;
3124 /*hammer2_chain_modify(trans, &parent, HAMMER2_MODIFY_OPTDATA);*/
3125 if (parent->flags & HAMMER2_CHAIN_INITIAL) {
3128 switch(parent->bref.type) {
3129 case HAMMER2_BREF_TYPE_INODE:
3130 count = HAMMER2_SET_COUNT;
3132 case HAMMER2_BREF_TYPE_INDIRECT:
3133 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
3134 count = parent->bytes / sizeof(hammer2_blockref_t);
3136 case HAMMER2_BREF_TYPE_VOLUME:
3137 count = HAMMER2_SET_COUNT;
3139 case HAMMER2_BREF_TYPE_FREEMAP:
3140 count = HAMMER2_SET_COUNT;
3143 panic("hammer2_chain_create_indirect: "
3144 "unrecognized blockref type: %d",
3150 switch(parent->bref.type) {
3151 case HAMMER2_BREF_TYPE_INODE:
3152 base = &parent->data->ipdata.u.blockset.blockref[0];
3153 count = HAMMER2_SET_COUNT;
3155 case HAMMER2_BREF_TYPE_INDIRECT:
3156 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
3157 base = &parent->data->npdata[0];
3158 count = parent->bytes / sizeof(hammer2_blockref_t);
3160 case HAMMER2_BREF_TYPE_VOLUME:
3161 base = &hmp->voldata.sroot_blockset.blockref[0];
3162 count = HAMMER2_SET_COUNT;
3164 case HAMMER2_BREF_TYPE_FREEMAP:
3165 base = &hmp->voldata.freemap_blockset.blockref[0];
3166 count = HAMMER2_SET_COUNT;
3169 panic("hammer2_chain_create_indirect: "
3170 "unrecognized blockref type: %d",
3178 * dummy used in later chain allocation (no longer used for lookups).
3180 bzero(&dummy, sizeof(dummy));
3181 dummy.delete_tid = HAMMER2_MAX_TID;
3184 * When creating an indirect block for a freemap node or leaf
3185 * the key/keybits must be fitted to static radix levels because
3186 * particular radix levels use particular reserved blocks in the
3189 * This routine calculates the key/radix of the indirect block
3190 * we need to create, and whether it is on the high-side or the
3193 if (for_type == HAMMER2_BREF_TYPE_FREEMAP_NODE ||
3194 for_type == HAMMER2_BREF_TYPE_FREEMAP_LEAF) {
3195 keybits = hammer2_chain_indkey_freemap(parent, &key, keybits,
3198 keybits = hammer2_chain_indkey_normal(parent, &key, keybits,
3203 * Normalize the key for the radix being represented, keeping the
3204 * high bits and throwing away the low bits.
3206 key &= ~(((hammer2_key_t)1 << keybits) - 1);
3209 * How big should our new indirect block be? It has to be at least
3210 * as large as its parent.
3212 if (parent->bref.type == HAMMER2_BREF_TYPE_INODE)
3213 nbytes = HAMMER2_IND_BYTES_MIN;
3215 nbytes = HAMMER2_IND_BYTES_MAX;
3216 if (nbytes < count * sizeof(hammer2_blockref_t))
3217 nbytes = count * sizeof(hammer2_blockref_t);
3220 * Ok, create our new indirect block
3222 if (for_type == HAMMER2_BREF_TYPE_FREEMAP_NODE ||
3223 for_type == HAMMER2_BREF_TYPE_FREEMAP_LEAF) {
3224 dummy.bref.type = HAMMER2_BREF_TYPE_FREEMAP_NODE;
3226 dummy.bref.type = HAMMER2_BREF_TYPE_INDIRECT;
3228 dummy.bref.key = key;
3229 dummy.bref.keybits = keybits;
3230 dummy.bref.data_off = hammer2_getradix(nbytes);
3231 dummy.bref.methods = parent->bref.methods;
3233 ichain = hammer2_chain_alloc(hmp, parent->pmp, trans, &dummy.bref);
3234 atomic_set_int(&ichain->flags, HAMMER2_CHAIN_INITIAL);
3235 hammer2_chain_core_alloc(trans, ichain, NULL);
3236 icore = ichain->core;
3237 hammer2_chain_lock(ichain, HAMMER2_RESOLVE_MAYBE);
3238 hammer2_chain_drop(ichain); /* excess ref from alloc */
3241 * We have to mark it modified to allocate its block, but use
3242 * OPTDATA to allow it to remain in the INITIAL state. Otherwise
3243 * it won't be acted upon by the flush code.
3245 * XXX leave the node unmodified, depend on the update_tid
3246 * flush to assign and modify parent blocks.
3248 hammer2_chain_modify(trans, &ichain, HAMMER2_MODIFY_OPTDATA);
3251 * Iterate the original parent and move the matching brefs into
3252 * the new indirect block.
3254 * XXX handle flushes.
3257 key_end = HAMMER2_MAX_KEY;
3259 spin_lock(&above->cst.spin);
3263 if (++loops > 8192) {
3264 spin_unlock(&above->cst.spin);
3265 panic("shit parent=%p base/count %p:%d\n",
3266 parent, base, count);
3270 * NOTE: spinlock stays intact, returned chain (if not NULL)
3271 * is not referenced or locked.
3273 chain = hammer2_combined_find(parent, base, count,
3274 &cache_index, &key_next,
3279 if (chain && (chain->flags & HAMMER2_CHAIN_DELETED)) {
3280 if (key_next == 0 || key_next > key_end)
3287 * Use the full live (not deleted) element for the scan
3288 * iteration. HAMMER2 does not allow partial replacements.
3290 * XXX should be built into hammer2_combined_find().
3292 key_next = bref->key + ((hammer2_key_t)1 << bref->keybits);
3295 * Skip keys that are not within the key/radix of the new
3296 * indirect block. They stay in the parent.
3298 if ((~(((hammer2_key_t)1 << keybits) - 1) &
3299 (key ^ bref->key)) != 0) {
3300 if (key_next == 0 || key_next > key_end)
3307 * Load the new indirect block by acquiring or allocating
3308 * the related chain, then move it to the new parent (ichain)
3309 * via DELETE-DUPLICATE.
3311 * WARNING! above->cst.spin must be held when parent is
3312 * modified, even though we own the full blown lock,
3313 * to deal with setsubmod and rename races.
3314 * (XXX remove this req).
3318 * Use chain already present in the RBTREE
3320 hammer2_chain_ref(chain);
3321 spin_unlock(&above->cst.spin);
3322 hammer2_chain_lock(chain, HAMMER2_RESOLVE_NEVER |
3323 HAMMER2_RESOLVE_NOREF);
3326 * Get chain for blockref element. _get returns NULL
3327 * on insertion race.
3330 spin_unlock(&above->cst.spin);
3331 chain = hammer2_chain_get(parent, bref);
3334 if (bcmp(&bcopy, bref, sizeof(bcopy))) {
3335 hammer2_chain_drop(chain);
3338 hammer2_chain_lock(chain, HAMMER2_RESOLVE_NEVER |
3339 HAMMER2_RESOLVE_NOREF);
3341 hammer2_chain_delete(trans, chain, HAMMER2_DELETE_WILLDUP);
3342 hammer2_chain_duplicate(trans, &ichain, &chain, NULL, 0);
3343 hammer2_chain_unlock(chain);
3344 KKASSERT(parent->refs > 0);
3346 spin_lock(&above->cst.spin);
3347 if (key_next == 0 || key_next > key_end)
3351 spin_unlock(&above->cst.spin);
3354 * Insert the new indirect block into the parent now that we've
3355 * cleared out some entries in the parent. We calculated a good
3356 * insertion index in the loop above (ichain->index).
3358 * We don't have to set MOVED here because we mark ichain modified
3359 * down below (so the normal modified -> flush -> set-moved sequence
3362 * The insertion shouldn't race as this is a completely new block
3363 * and the parent is locked.
3365 KKASSERT((ichain->flags & HAMMER2_CHAIN_ONRBTREE) == 0);
3366 hammer2_chain_insert(above, ichain, HAMMER2_CHAIN_INSERT_SPIN |
3367 HAMMER2_CHAIN_INSERT_LIVE);
3370 * Mark the new indirect block modified after insertion, which
3371 * will propagate up through parent all the way to the root and
3372 * also allocate the physical block in ichain for our caller,
3373 * and assign ichain->data to a pre-zero'd space (because there
3374 * is not prior data to copy into it).
3376 * We have to set update_tid in ichain's flags manually so the
3377 * flusher knows it has to recurse through it to get to all of
3378 * our moved blocks, then call setsubmod() to set the bit
3381 /*hammer2_chain_modify(trans, &ichain, HAMMER2_MODIFY_OPTDATA);*/
3382 if (ichain->core->update_tid < trans->sync_tid) {
3383 spin_lock(&ichain->core->cst.spin);
3384 if (ichain->core->update_tid < trans->sync_tid)
3385 ichain->core->update_tid = trans->sync_tid;
3386 spin_unlock(&ichain->core->cst.spin);
3388 hammer2_chain_setsubmod(trans, ichain);
3391 * Figure out what to return.
3393 if (~(((hammer2_key_t)1 << keybits) - 1) &
3394 (create_key ^ key)) {
3396 * Key being created is outside the key range,
3397 * return the original parent.
3399 hammer2_chain_unlock(ichain);
3402 * Otherwise its in the range, return the new parent.
3403 * (leave both the new and old parent locked).
3412 * Calculate the keybits and highside/lowside of the freemap node the
3413 * caller is creating.
3415 * This routine will specify the next higher-level freemap key/radix
3416 * representing the lowest-ordered set. By doing so, eventually all
3417 * low-ordered sets will be moved one level down.
3419 * We have to be careful here because the freemap reserves a limited
3420 * number of blocks for a limited number of levels. So we can't just
3421 * push indiscriminately.
3424 hammer2_chain_indkey_freemap(hammer2_chain_t *parent, hammer2_key_t *keyp,
3425 int keybits, hammer2_blockref_t *base, int count)
3427 hammer2_chain_core_t *above;
3428 hammer2_chain_t *chain;
3429 hammer2_blockref_t *bref;
3431 hammer2_key_t key_beg;
3432 hammer2_key_t key_end;
3433 hammer2_key_t key_next;
3440 above = parent->core;
3446 * Calculate the range of keys in the array being careful to skip
3447 * slots which are overridden with a deletion.
3450 key_end = HAMMER2_MAX_KEY;
3452 spin_lock(&above->cst.spin);
3455 if (++loops == 100000) {
3456 panic("indkey_freemap shit %p %p:%d\n",
3457 parent, base, count);
3459 chain = hammer2_combined_find(parent, base, count,
3460 &cache_index, &key_next,
3461 key_beg, key_end, &bref);
3468 if (chain && (chain->flags & HAMMER2_CHAIN_DELETED)) {
3469 if (key_next == 0 || key_next > key_end)
3476 * Use the full live (not deleted) element for the scan
3477 * iteration. HAMMER2 does not allow partial replacements.
3479 * XXX should be built into hammer2_combined_find().
3481 key_next = bref->key + ((hammer2_key_t)1 << bref->keybits);
3483 if (keybits > bref->keybits) {
3485 keybits = bref->keybits;
3486 } else if (keybits == bref->keybits && bref->key < key) {
3493 spin_unlock(&above->cst.spin);
3496 * Return the keybits for a higher-level FREEMAP_NODE covering
3500 case HAMMER2_FREEMAP_LEVEL0_RADIX:
3501 keybits = HAMMER2_FREEMAP_LEVEL1_RADIX;
3503 case HAMMER2_FREEMAP_LEVEL1_RADIX:
3504 keybits = HAMMER2_FREEMAP_LEVEL2_RADIX;
3506 case HAMMER2_FREEMAP_LEVEL2_RADIX:
3507 keybits = HAMMER2_FREEMAP_LEVEL3_RADIX;
3509 case HAMMER2_FREEMAP_LEVEL3_RADIX:
3510 keybits = HAMMER2_FREEMAP_LEVEL4_RADIX;
3512 case HAMMER2_FREEMAP_LEVEL4_RADIX:
3513 panic("hammer2_chain_indkey_freemap: level too high");
3516 panic("hammer2_chain_indkey_freemap: bad radix");
3525 * Calculate the keybits and highside/lowside of the indirect block the
3526 * caller is creating.
3529 hammer2_chain_indkey_normal(hammer2_chain_t *parent, hammer2_key_t *keyp,
3530 int keybits, hammer2_blockref_t *base, int count)
3532 hammer2_chain_core_t *above;
3533 hammer2_blockref_t *bref;
3534 hammer2_chain_t *chain;
3535 hammer2_key_t key_beg;
3536 hammer2_key_t key_end;
3537 hammer2_key_t key_next;
3546 above = parent->core;
3551 * Calculate the range of keys in the array being careful to skip
3552 * slots which are overridden with a deletion. Once the scan
3553 * completes we will cut the key range in half and shift half the
3554 * range into the new indirect block.
3557 key_end = HAMMER2_MAX_KEY;
3559 spin_lock(&above->cst.spin);
3562 if (++loops == 100000) {
3563 panic("indkey_freemap shit %p %p:%d\n",
3564 parent, base, count);
3566 chain = hammer2_combined_find(parent, base, count,
3567 &cache_index, &key_next,
3568 key_beg, key_end, &bref);
3575 if (chain && (chain->flags & HAMMER2_CHAIN_DELETED)) {
3576 if (key_next == 0 || key_next > key_end)
3583 * Use the full live (not deleted) element for the scan
3584 * iteration. HAMMER2 does not allow partial replacements.
3586 * XXX should be built into hammer2_combined_find().
3588 key_next = bref->key + ((hammer2_key_t)1 << bref->keybits);
3591 * Expand our calculated key range (key, keybits) to fit
3592 * the scanned key. nkeybits represents the full range
3593 * that we will later cut in half (two halves @ nkeybits - 1).
3596 if (nkeybits < bref->keybits) {
3597 if (bref->keybits > 64) {
3598 kprintf("bad bref chain %p bref %p\n",
3602 nkeybits = bref->keybits;
3604 while (nkeybits < 64 &&
3605 (~(((hammer2_key_t)1 << nkeybits) - 1) &
3606 (key ^ bref->key)) != 0) {
3611 * If the new key range is larger we have to determine
3612 * which side of the new key range the existing keys fall
3613 * under by checking the high bit, then collapsing the
3614 * locount into the hicount or vise-versa.
3616 if (keybits != nkeybits) {
3617 if (((hammer2_key_t)1 << (nkeybits - 1)) & key) {
3628 * The newly scanned key will be in the lower half or the
3629 * upper half of the (new) key range.
3631 if (((hammer2_key_t)1 << (nkeybits - 1)) & bref->key)
3640 spin_unlock(&above->cst.spin);
3641 bref = NULL; /* now invalid (safety) */
3644 * Adjust keybits to represent half of the full range calculated
3645 * above (radix 63 max)
3650 * Select whichever half contains the most elements. Theoretically
3651 * we can select either side as long as it contains at least one
3652 * element (in order to ensure that a free slot is present to hold
3653 * the indirect block).
3655 if (hammer2_indirect_optimize) {
3657 * Insert node for least number of keys, this will arrange
3658 * the first few blocks of a large file or the first few
3659 * inodes in a directory with fewer indirect blocks when
3662 if (hicount < locount && hicount != 0)
3663 key |= (hammer2_key_t)1 << keybits;
3665 key &= ~(hammer2_key_t)1 << keybits;
3668 * Insert node for most number of keys, best for heavily
3671 if (hicount > locount)
3672 key |= (hammer2_key_t)1 << keybits;
3674 key &= ~(hammer2_key_t)1 << keybits;
3682 * Sets CHAIN_DELETED and CHAIN_MOVED in the chain being deleted and
3683 * set chain->delete_tid. The chain is not actually marked possibly-free
3684 * in the freemap until the deletion is completely flushed out (because
3685 * a flush which doesn't cover the entire deletion is flushing the deleted
3686 * chain as if it were live).
3688 * This function does NOT generate a modification to the parent. It
3689 * would be nearly impossible to figure out which parent to modify anyway.
3690 * Such modifications are handled top-down by the flush code and are
3691 * properly merged using the flush synchronization point.
3693 * The find/get code will properly overload the RBTREE check on top of
3694 * the bref check to detect deleted entries.
3696 * This function is NOT recursive. Any entity already pushed into the
3697 * chain (such as an inode) may still need visibility into its contents,
3698 * as well as the ability to read and modify the contents. For example,
3699 * for an unlinked file which is still open.
3701 * NOTE: This function does NOT set chain->modify_tid, allowing future
3702 * code to distinguish between live and deleted chains by testing
3703 * trans->sync_tid vs chain->modify_tid and chain->delete_tid.
3705 * NOTE: Deletions normally do not occur in the middle of a duplication
3706 * chain but we use a trick for hardlink migration that refactors
3707 * the originating inode without deleting it, so we make no assumptions
3711 hammer2_chain_delete(hammer2_trans_t *trans, hammer2_chain_t *chain, int flags)
3713 KKASSERT(ccms_thread_lock_owned(&chain->core->cst));
3716 * Nothing to do if already marked.
3718 if (chain->flags & HAMMER2_CHAIN_DELETED)
3722 * The setting of DELETED causes finds, lookups, and _next iterations
3723 * to no longer recognize the chain. RB_SCAN()s will still have
3724 * visibility (needed for flush serialization points).
3726 * We need the spinlock on the core whos RBTREE contains chain
3727 * to protect against races.
3729 KKASSERT(chain->flags & HAMMER2_CHAIN_ONRBTREE);
3730 spin_lock(&chain->above->cst.spin);
3732 KKASSERT(trans->sync_tid >= chain->modify_tid);
3733 chain->delete_tid = trans->sync_tid;
3734 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DELETED);
3735 atomic_add_int(&chain->above->live_count, -1);
3736 ++chain->above->generation;
3739 * We must set MOVED along with DELETED for the flush code to
3740 * recognize the operation and properly disconnect the chain
3743 if ((chain->flags & HAMMER2_CHAIN_MOVED) == 0) {
3744 hammer2_chain_ref(chain);
3745 atomic_set_int(&chain->flags, HAMMER2_CHAIN_MOVED);
3747 spin_unlock(&chain->above->cst.spin);
3749 if (flags & HAMMER2_DELETE_WILLDUP)
3750 atomic_set_int(&chain->flags, HAMMER2_CHAIN_FORCECOW);
3752 hammer2_chain_setsubmod(trans, chain);
3756 * Called with the core spinlock held to check for freeable layers.
3757 * Used by the flush code. Layers can wind up not being freed due
3758 * to the temporary layer->refs count. This function frees up any
3759 * layers that were missed.
3762 hammer2_chain_layer_check_locked(hammer2_mount_t *hmp,
3763 hammer2_chain_core_t *core)
3765 hammer2_chain_layer_t *layer;
3766 hammer2_chain_layer_t *tmp;
3768 tmp = TAILQ_FIRST(&core->layerq);
3769 while ((layer = tmp) != NULL) {
3770 tmp = TAILQ_NEXT(tmp, entry);
3771 if (layer->refs == 0 && RB_EMPTY(&layer->rbtree)) {
3772 TAILQ_REMOVE(&core->layerq, layer, entry);
3775 spin_unlock(&core->cst.spin);
3776 kfree(layer, hmp->mchain);
3777 spin_lock(&core->cst.spin);
3785 * Returns the index of the nearest element in the blockref array >= elm.
3786 * Returns (count) if no element could be found.
3788 * Sets *key_nextp to the next key for loop purposes but does not modify
3789 * it if the next key would be higher than the current value of *key_nextp.
3790 * Note that *key_nexp can overflow to 0, which should be tested by the
3793 * (*cache_indexp) is a heuristic and can be any value without effecting
3796 * The spin lock on the related chain must be held.
3799 hammer2_base_find(hammer2_chain_t *chain,
3800 hammer2_blockref_t *base, int count,
3801 int *cache_indexp, hammer2_key_t *key_nextp,
3802 hammer2_key_t key_beg, hammer2_key_t key_end)
3804 hammer2_chain_core_t *core = chain->core;
3805 hammer2_blockref_t *scan;
3806 hammer2_key_t scan_end;
3812 KKASSERT(core->flags & HAMMER2_CORE_COUNTEDBREFS);
3813 if (count == 0 || base == NULL)
3817 * Sequential optimization
3821 if (i >= core->live_zero)
3822 i = core->live_zero - 1;
3825 KKASSERT(i < count);
3831 while (i > 0 && (scan->type == 0 || scan->key > key_beg)) {
3838 * Search forwards, stop when we find a scan element which
3839 * encloses the key or until we know that there are no further
3843 if (scan->type != 0) {
3844 if (scan->key > key_beg)
3846 scan_end = scan->key +
3847 ((hammer2_key_t)1 << scan->keybits) - 1;
3848 if (scan_end >= key_beg)
3851 if (i >= core->live_zero)
3858 if (i >= core->live_zero) {
3861 scan_end = scan->key +
3862 ((hammer2_key_t)1 << scan->keybits);
3863 if (scan_end && (*key_nextp > scan_end ||
3865 *key_nextp = scan_end;
3873 * Do a combined search and return the next match either from the blockref
3874 * array or from the in-memory chain. Sets *bresp to the returned bref in
3875 * both cases, or sets it to NULL if the search exhausted. Only returns
3876 * a non-NULL chain if the search matched from the in-memory chain.
3878 * Must be called with above's spinlock held. Spinlock remains held
3879 * through the operation.
3881 * The returned chain is not locked or referenced. Use the returned bref
3882 * to determine if the search exhausted or not.
3884 static hammer2_chain_t *
3885 hammer2_combined_find(hammer2_chain_t *parent,
3886 hammer2_blockref_t *base, int count,
3887 int *cache_indexp, hammer2_key_t *key_nextp,
3888 hammer2_key_t key_beg, hammer2_key_t key_end,
3889 hammer2_blockref_t **bresp)
3891 hammer2_blockref_t *bref;
3892 hammer2_chain_t *chain;
3895 *key_nextp = key_end + 1;
3896 i = hammer2_base_find(parent, base, count, cache_indexp,
3897 key_nextp, key_beg, key_end);
3898 chain = hammer2_chain_find(parent, key_nextp, key_beg, key_end);
3903 if (i == count && chain == NULL) {
3905 return(chain); /* NULL */
3909 * Only chain matched
3912 bref = &chain->bref;
3917 * Only blockref matched.
3919 if (chain == NULL) {
3925 * Both in-memory and blockref match.
3927 * If they are both flush with the left hand side select the chain.
3928 * If their starts match select the chain.
3929 * Otherwise the nearer element wins.
3931 if (chain->bref.key <= key_beg && base[i].key <= key_beg) {
3932 bref = &chain->bref;
3935 if (chain->bref.key <= base[i].key) {
3936 bref = &chain->bref;
3944 * If the bref is out of bounds we've exhausted our search.
3947 if (bref->key > key_end) {
3957 * Locate the specified block array element and delete it. The element
3960 * The spin lock on the related chain must be held.
3962 * NOTE: live_count was adjusted when the chain was deleted, so it does not
3963 * need to be adjusted when we commit the media change.
3966 hammer2_base_delete(hammer2_chain_t *chain,
3967 hammer2_blockref_t *base, int count,
3968 int *cache_indexp, hammer2_chain_t *child)
3970 hammer2_blockref_t *elm = &child->bref;
3971 hammer2_chain_core_t *core = chain->core;
3972 hammer2_key_t key_next;
3976 * Delete element. Expect the element to exist.
3978 * XXX see caller, flush code not yet sophisticated enough to prevent
3979 * re-flushed in some cases.
3981 key_next = 0; /* max range */
3982 i = hammer2_base_find(chain, base, count, cache_indexp,
3983 &key_next, elm->key, elm->key);
3984 if (i == count || base[i].type == 0 ||
3985 base[i].key != elm->key || base[i].keybits != elm->keybits) {
3986 panic("delete base %p element not found at %d/%d elm %p\n",
3987 base, i, count, elm);
3990 bzero(&base[i], sizeof(*base));
3991 if (core->live_zero == i + 1) {
3992 while (--i >= 0 && base[i].type == 0)
3994 core->live_zero = i + 1;
3999 * Insert the specified element. The block array must not already have the
4000 * element and must have space available for the insertion.
4002 * The spin lock on the related chain must be held.
4004 * NOTE: live_count was adjusted when the chain was deleted, so it does not
4005 * need to be adjusted when we commit the media change.
4008 hammer2_base_insert(hammer2_chain_t *parent,
4009 hammer2_blockref_t *base, int count,
4010 int *cache_indexp, hammer2_chain_t *child)
4012 hammer2_blockref_t *elm = &child->bref;
4013 hammer2_chain_core_t *core = parent->core;
4014 hammer2_key_t key_next;
4023 * Insert new element. Expect the element to not already exist
4024 * unless we are replacing it.
4026 * XXX see caller, flush code not yet sophisticated enough to prevent
4027 * re-flushed in some cases.
4029 key_next = 0; /* max range */
4030 i = hammer2_base_find(parent, base, count, cache_indexp,
4031 &key_next, elm->key, elm->key);
4034 * Shortcut fill optimization, typical ordered insertion(s) may not
4037 KKASSERT(i >= 0 && i <= count);
4039 if (i == count && core->live_zero < count) {
4040 i = core->live_zero++;
4045 xkey = elm->key + ((hammer2_key_t)1 << elm->keybits) - 1;
4046 if (i != count && (base[i].key < elm->key || xkey >= base[i].key)) {
4047 panic("insert base %p overlapping elements at %d elm %p\n",
4052 * Try to find an empty slot before or after.
4056 while (j > 0 || k < count) {
4058 if (j >= 0 && base[j].type == 0) {
4062 bcopy(&base[j+1], &base[j],
4063 (i - j - 1) * sizeof(*base));
4069 if (k < count && base[k].type == 0) {
4070 bcopy(&base[i], &base[i+1],
4071 (k - i) * sizeof(hammer2_blockref_t));
4073 if (core->live_zero <= k)
4074 core->live_zero = k + 1;
4079 panic("hammer2_base_insert: no room!");
4086 for (l = 0; l < count; ++l) {
4088 key_next = base[l].key +
4089 ((hammer2_key_t)1 << base[l].keybits) - 1;
4093 while (++l < count) {
4095 if (base[l].key <= key_next)
4096 panic("base_insert %d %d,%d,%d fail %p:%d", u, i, j, k, base, l);
4097 key_next = base[l].key +
4098 ((hammer2_key_t)1 << base[l].keybits) - 1;
4108 * Sort the blockref array for the chain. Used by the flush code to
4109 * sort the blockref[] array.
4111 * The chain must be exclusively locked AND spin-locked.
4113 typedef hammer2_blockref_t *hammer2_blockref_p;
4117 hammer2_base_sort_callback(const void *v1, const void *v2)
4119 hammer2_blockref_p bref1 = *(const hammer2_blockref_p *)v1;
4120 hammer2_blockref_p bref2 = *(const hammer2_blockref_p *)v2;
4123 * Make sure empty elements are placed at the end of the array
4125 if (bref1->type == 0) {
4126 if (bref2->type == 0)
4129 } else if (bref2->type == 0) {
4136 if (bref1->key < bref2->key)
4138 if (bref1->key > bref2->key)
4144 hammer2_base_sort(hammer2_chain_t *chain)
4146 hammer2_blockref_t *base;
4149 switch(chain->bref.type) {
4150 case HAMMER2_BREF_TYPE_INODE:
4152 * Special shortcut for embedded data returns the inode
4153 * itself. Callers must detect this condition and access
4154 * the embedded data (the strategy code does this for us).
4156 * This is only applicable to regular files and softlinks.
4158 if (chain->data->ipdata.op_flags & HAMMER2_OPFLAG_DIRECTDATA)
4160 base = &chain->data->ipdata.u.blockset.blockref[0];
4161 count = HAMMER2_SET_COUNT;
4163 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
4164 case HAMMER2_BREF_TYPE_INDIRECT:
4166 * Optimize indirect blocks in the INITIAL state to avoid
4169 KKASSERT((chain->flags & HAMMER2_CHAIN_INITIAL) == 0);
4170 base = &chain->data->npdata[0];
4171 count = chain->bytes / sizeof(hammer2_blockref_t);
4173 case HAMMER2_BREF_TYPE_VOLUME:
4174 base = &chain->hmp->voldata.sroot_blockset.blockref[0];
4175 count = HAMMER2_SET_COUNT;
4177 case HAMMER2_BREF_TYPE_FREEMAP:
4178 base = &chain->hmp->voldata.freemap_blockset.blockref[0];
4179 count = HAMMER2_SET_COUNT;
4182 panic("hammer2_chain_lookup: unrecognized blockref type: %d",
4184 base = NULL; /* safety */
4185 count = 0; /* safety */
4187 kqsort(base, count, sizeof(*base), hammer2_base_sort_callback);
4193 * Chain memory management
4196 hammer2_chain_wait(hammer2_chain_t *chain)
4198 tsleep(chain, 0, "chnflw", 1);
4202 * Manage excessive memory resource use for chain and related
4206 hammer2_chain_memory_wait(hammer2_pfsmount_t *pmp)
4209 while (pmp->inmem_chains > desiredvnodes / 10 &&
4210 pmp->inmem_chains > pmp->mp->mnt_nvnodelistsize * 2) {
4212 speedup_syncer(pmp->mp);
4213 pmp->inmem_waiting = 1;
4214 tsleep(&pmp->inmem_waiting, 0, "chnmem", hz);
4218 if (pmp->inmem_chains > desiredvnodes / 10 &&
4219 pmp->inmem_chains > pmp->mp->mnt_nvnodelistsize * 7 / 4) {
4220 speedup_syncer(pmp->mp);
4226 hammer2_chain_memory_wakeup(hammer2_pfsmount_t *pmp)
4228 if (pmp->inmem_waiting &&
4229 (pmp->inmem_chains <= desiredvnodes / 10 ||
4230 pmp->inmem_chains <= pmp->mp->mnt_nvnodelistsize * 2)) {
4232 pmp->inmem_waiting = 0;
4233 wakeup(&pmp->inmem_waiting);
4239 adjreadcounter(hammer2_blockref_t *bref, size_t bytes)
4243 switch(bref->type) {
4244 case HAMMER2_BREF_TYPE_DATA:
4245 counterp = &hammer2_iod_file_read;
4247 case HAMMER2_BREF_TYPE_INODE:
4248 counterp = &hammer2_iod_meta_read;
4250 case HAMMER2_BREF_TYPE_INDIRECT:
4251 counterp = &hammer2_iod_indr_read;
4253 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
4254 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
4255 counterp = &hammer2_iod_fmap_read;
4258 counterp = &hammer2_iod_volu_read;