2 * Copyright (c) 2011-2013 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * This subsystem implements most of the core support functions for
37 * the hammer2_chain and hammer2_chain_core structures.
39 * Chains represent the filesystem media topology in-memory. Any given
40 * chain can represent an inode, indirect block, data, or other types
43 * This module provides APIs for direct and indirect block searches,
44 * iterations, recursions, creation, deletion, replication, and snapshot
45 * views (used by the flush and snapshot code).
47 * Generally speaking any modification made to a chain must propagate all
48 * the way back to the volume header, issuing copy-on-write updates to the
49 * blockref tables all the way up. Any chain except the volume header itself
50 * can be flushed to disk at any time, in any order. None of it matters
51 * until we get to the point where we want to synchronize the volume header
52 * (see the flush code).
54 * The chain structure supports snapshot views in time, which are primarily
55 * used until the related data and meta-data is flushed to allow the
56 * filesystem to make snapshots without requiring it to first flush,
57 * and to allow the filesystem flush and modify the filesystem concurrently
58 * with minimal or no stalls.
60 #include <sys/cdefs.h>
61 #include <sys/param.h>
62 #include <sys/systm.h>
63 #include <sys/types.h>
65 #include <sys/kern_syscall.h>
70 static int hammer2_indirect_optimize; /* XXX SYSCTL */
72 static hammer2_chain_t *hammer2_chain_create_indirect(
73 hammer2_trans_t *trans, hammer2_chain_t *parent,
74 hammer2_key_t key, int keybits, int *errorp);
77 * We use a red-black tree to guarantee safe lookups under shared locks.
79 * Chains can be overloaded onto the same index, creating a different
80 * view of a blockref table based on a transaction id. The RBTREE
81 * deconflicts the view by sub-sorting on delete_tid.
83 * NOTE: Any 'current' chain which is not yet deleted will have a
84 * delete_tid of HAMMER2_MAX_TID (0xFFF....FFFLLU).
86 RB_GENERATE(hammer2_chain_tree, hammer2_chain, rbnode, hammer2_chain_cmp);
89 hammer2_chain_cmp(hammer2_chain_t *chain1, hammer2_chain_t *chain2)
91 if (chain1->index < chain2->index)
93 if (chain1->index > chain2->index)
95 if (chain1->delete_tid < chain2->delete_tid)
97 if (chain1->delete_tid > chain2->delete_tid)
103 * Recursively set the SUBMODIFIED flag up to the root starting at chain's
104 * parent. SUBMODIFIED is not set in chain itself.
106 * This function only operates on current-time transactions and is not
107 * used during flushes.
110 hammer2_chain_setsubmod(hammer2_trans_t *trans, hammer2_chain_t *chain)
112 hammer2_chain_core_t *above;
114 if (trans->flags & HAMMER2_TRANS_ISFLUSH)
116 while ((above = chain->above) != NULL) {
117 spin_lock(&above->cst.spin);
118 chain = above->first_parent;
119 while (hammer2_chain_refactor_test(chain, 1))
120 chain = chain->next_parent;
121 atomic_set_int(&chain->flags, HAMMER2_CHAIN_SUBMODIFIED);
122 spin_unlock(&above->cst.spin);
127 * Allocate a new disconnected chain element representing the specified
128 * bref. chain->refs is set to 1 and the passed bref is copied to
129 * chain->bref. chain->bytes is derived from the bref.
131 * chain->core is NOT allocated and the media data and bp pointers are left
132 * NULL. The caller must call chain_core_alloc() to allocate or associate
133 * a core with the chain.
135 * NOTE: Returns a referenced but unlocked (because there is no core) chain.
138 hammer2_chain_alloc(hammer2_mount_t *hmp, hammer2_trans_t *trans,
139 hammer2_blockref_t *bref)
141 hammer2_chain_t *chain;
142 u_int bytes = 1U << (int)(bref->data_off & HAMMER2_OFF_MASK_RADIX);
145 * Construct the appropriate system structure.
148 case HAMMER2_BREF_TYPE_INODE:
149 case HAMMER2_BREF_TYPE_INDIRECT:
150 case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
151 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
152 case HAMMER2_BREF_TYPE_DATA:
153 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
154 chain = kmalloc(sizeof(*chain), hmp->mchain, M_WAITOK | M_ZERO);
156 case HAMMER2_BREF_TYPE_VOLUME:
158 panic("hammer2_chain_alloc volume type illegal for op");
161 panic("hammer2_chain_alloc: unrecognized blockref type: %d",
167 chain->index = -1; /* not yet assigned */
168 chain->bytes = bytes;
170 chain->flags = HAMMER2_CHAIN_ALLOCATED;
171 chain->delete_tid = HAMMER2_MAX_TID;
173 chain->modify_tid = trans->sync_tid;
179 * Associate an existing core with the chain or allocate a new core.
181 * The core is not locked. No additional refs on the chain are made.
184 hammer2_chain_core_alloc(hammer2_chain_t *chain, hammer2_chain_core_t *core)
186 hammer2_chain_t **scanp;
188 KKASSERT(chain->core == NULL);
189 KKASSERT(chain->next_parent == NULL);
192 core = kmalloc(sizeof(*core), chain->hmp->mchain,
194 RB_INIT(&core->rbtree);
197 ccms_cst_init(&core->cst, chain);
198 core->first_parent = chain;
200 atomic_add_int(&core->sharecnt, 1);
202 spin_lock(&core->cst.spin);
203 scanp = &core->first_parent;
205 scanp = &(*scanp)->next_parent;
207 spin_unlock(&core->cst.spin);
212 * Add a reference to a chain element, preventing its destruction.
215 hammer2_chain_ref(hammer2_chain_t *chain)
217 atomic_add_int(&chain->refs, 1);
221 * Drop the caller's reference to the chain. When the ref count drops to
222 * zero this function will disassociate the chain from its parent and
223 * deallocate it, then recursely drop the parent using the implied ref
224 * from the chain's chain->parent.
226 * WARNING! Just because we are able to deallocate a chain doesn't mean
227 * that chain->core->rbtree is empty. There can still be a sharecnt
228 * on chain->core and RBTREE entries that refer to different parents.
230 static hammer2_chain_t *hammer2_chain_lastdrop(hammer2_chain_t *chain);
233 hammer2_chain_drop(hammer2_chain_t *chain)
239 if (chain->flags & HAMMER2_CHAIN_MOVED)
241 if (chain->flags & HAMMER2_CHAIN_MODIFIED)
243 KKASSERT(chain->refs > need);
252 chain = hammer2_chain_lastdrop(chain);
254 if (atomic_cmpset_int(&chain->refs, refs, refs - 1))
256 /* retry the same chain */
262 * Safe handling of the 1->0 transition on chain. Returns a chain for
263 * recursive drop or NULL, possibly returning the same chain of the atomic
266 * The cst spinlock is allowed nest child-to-parent (not parent-to-child).
270 hammer2_chain_lastdrop(hammer2_chain_t *chain)
272 hammer2_mount_t *hmp;
273 hammer2_chain_core_t *above;
274 hammer2_chain_core_t *core;
275 hammer2_chain_t **scanp;
276 hammer2_chain_t *parent;
279 * Spinlock the core and check to see if it is empty. If it is
280 * not empty we leave chain intact with refs == 0.
282 if ((core = chain->core) != NULL) {
283 spin_lock(&core->cst.spin);
284 if (!RB_EMPTY(&core->rbtree)) {
285 if (atomic_cmpset_int(&chain->refs, 1, 0)) {
286 /* 1->0 transition successful */
287 spin_unlock(&core->cst.spin);
290 /* 1->0 transition failed, retry */
291 spin_unlock(&core->cst.spin);
301 * Spinlock the parent and try to drop the last ref. On success
302 * remove chain from its parent.
304 if ((above = chain->above) != NULL) {
305 spin_lock(&above->cst.spin);
306 if (!atomic_cmpset_int(&chain->refs, 1, 0)) {
307 /* 1->0 transition failed */
308 spin_unlock(&above->cst.spin);
310 spin_unlock(&core->cst.spin);
316 * 1->0 transition successful
318 KKASSERT(chain->flags & HAMMER2_CHAIN_ONRBTREE);
319 RB_REMOVE(hammer2_chain_tree, &above->rbtree, chain);
320 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_ONRBTREE);
324 * Calculate a chain to return for a recursive drop.
326 * If the rbtree containing chain is empty we try to
327 * recursively drop one of our parents. Otherwise
328 * we try to recursively drop a sibling.
330 if (RB_EMPTY(&above->rbtree)) {
331 scanp = &above->first_parent;
332 while ((parent = *scanp) != NULL) {
333 if (parent->refs == 0 &&
334 atomic_cmpset_int(&parent->refs, 0, 1)) {
337 scanp = &parent->next_parent;
340 parent = RB_ROOT(&above->rbtree);
341 if (atomic_cmpset_int(&parent->refs, 0, 1) == 0)
344 spin_unlock(&above->cst.spin);
345 above = NULL; /* safety */
349 * We still have the core spinlock (if core is non-NULL). The
350 * above spinlock is gone.
353 scanp = &core->first_parent;
354 while (*scanp != chain)
355 scanp = &(*scanp)->next_parent;
356 *scanp = chain->next_parent;
357 chain->next_parent = NULL;
360 if (atomic_fetchadd_int(&core->sharecnt, -1) == 1) {
362 * On the 1->0 transition of core we can destroy
365 spin_unlock(&core->cst.spin);
366 KKASSERT(core->cst.count == 0);
367 KKASSERT(core->cst.upgrade == 0);
368 kfree(core, hmp->mchain);
370 spin_unlock(&core->cst.spin);
372 core = NULL; /* safety */
376 * All spin locks are gone, finish freeing stuff.
378 KKASSERT((chain->flags & (HAMMER2_CHAIN_MOVED |
379 HAMMER2_CHAIN_MODIFIED)) == 0);
381 switch(chain->bref.type) {
382 case HAMMER2_BREF_TYPE_VOLUME:
385 case HAMMER2_BREF_TYPE_INODE:
387 kfree(chain->data, hmp->minode);
392 KKASSERT(chain->data == NULL);
396 KKASSERT(chain->bp == NULL);
399 if (chain->flags & HAMMER2_CHAIN_ALLOCATED) {
400 chain->flags &= ~HAMMER2_CHAIN_ALLOCATED;
401 kfree(chain, hmp->mchain);
408 * Ref and lock a chain element, acquiring its data with I/O if necessary,
409 * and specify how you would like the data to be resolved.
411 * Returns 0 on success or an error code if the data could not be acquired.
412 * The chain element is locked on return regardless of whether an error
415 * The lock is allowed to recurse, multiple locking ops will aggregate
416 * the requested resolve types. Once data is assigned it will not be
417 * removed until the last unlock.
419 * HAMMER2_RESOLVE_NEVER - Do not resolve the data element.
420 * (typically used to avoid device/logical buffer
423 * HAMMER2_RESOLVE_MAYBE - Do not resolve data elements for chains in
424 * the INITIAL-create state (indirect blocks only).
426 * Do not resolve data elements for DATA chains.
427 * (typically used to avoid device/logical buffer
430 * HAMMER2_RESOLVE_ALWAYS- Always resolve the data element.
432 * HAMMER2_RESOLVE_SHARED- (flag) The chain is locked shared, otherwise
433 * it will be locked exclusive.
435 * NOTE: Embedded elements (volume header, inodes) are always resolved
438 * NOTE: Specifying HAMMER2_RESOLVE_ALWAYS on a newly-created non-embedded
439 * element will instantiate and zero its buffer, and flush it on
442 * NOTE: (data) elements are normally locked RESOLVE_NEVER or RESOLVE_MAYBE
443 * so as not to instantiate a device buffer, which could alias against
444 * a logical file buffer. However, if ALWAYS is specified the
445 * device buffer will be instantiated anyway.
447 * WARNING! If data must be fetched a shared lock will temporarily be
448 * upgraded to exclusive. However, a deadlock can occur if
449 * the caller owns more than one shared lock.
452 hammer2_chain_lock(hammer2_chain_t *chain, int how)
454 hammer2_mount_t *hmp;
455 hammer2_chain_core_t *core;
456 hammer2_blockref_t *bref;
466 * Ref and lock the element. Recursive locks are allowed.
468 if ((how & HAMMER2_RESOLVE_NOREF) == 0)
469 hammer2_chain_ref(chain);
470 atomic_add_int(&chain->lockcnt, 1);
473 KKASSERT(hmp != NULL);
476 * Get the appropriate lock.
479 if (how & HAMMER2_RESOLVE_SHARED)
480 ccms_thread_lock(&core->cst, CCMS_STATE_SHARED);
482 ccms_thread_lock(&core->cst, CCMS_STATE_EXCLUSIVE);
485 * If we already have a valid data pointer no further action is
492 * Do we have to resolve the data?
494 switch(how & HAMMER2_RESOLVE_MASK) {
495 case HAMMER2_RESOLVE_NEVER:
497 case HAMMER2_RESOLVE_MAYBE:
498 if (chain->flags & HAMMER2_CHAIN_INITIAL)
500 if (chain->bref.type == HAMMER2_BREF_TYPE_DATA)
502 if (chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_LEAF)
505 case HAMMER2_RESOLVE_ALWAYS:
510 * Upgrade to an exclusive lock so we can safely manipulate the
511 * buffer cache. If another thread got to it before us we
514 ostate = ccms_thread_lock_upgrade(&core->cst);
516 ccms_thread_lock_downgrade(&core->cst, ostate);
521 * We must resolve to a device buffer, either by issuing I/O or
522 * by creating a zero-fill element. We do not mark the buffer
523 * dirty when creating a zero-fill element (the hammer2_chain_modify()
524 * API must still be used to do that).
526 * The device buffer is variable-sized in powers of 2 down
527 * to HAMMER2_MINALLOCSIZE (typically 1K). A 64K physical storage
528 * chunk always contains buffers of the same size. (XXX)
530 * The minimum physical IO size may be larger than the variable
535 if ((bbytes = chain->bytes) < HAMMER2_MINIOSIZE)
536 bbytes = HAMMER2_MINIOSIZE;
537 pbase = bref->data_off & ~(hammer2_off_t)(bbytes - 1);
538 peof = (pbase + HAMMER2_PBUFSIZE64) & ~HAMMER2_PBUFMASK64;
539 boff = bref->data_off & HAMMER2_OFF_MASK & (bbytes - 1);
540 KKASSERT(pbase != 0);
543 * The getblk() optimization can only be used on newly created
544 * elements if the physical block size matches the request.
546 if ((chain->flags & HAMMER2_CHAIN_INITIAL) &&
547 chain->bytes == bbytes) {
548 chain->bp = getblk(hmp->devvp, pbase, bbytes, 0, 0);
550 } else if (hammer2_cluster_enable) {
551 error = cluster_read(hmp->devvp, peof, pbase, bbytes,
552 HAMMER2_PBUFSIZE, HAMMER2_PBUFSIZE,
555 error = bread(hmp->devvp, pbase, bbytes, &chain->bp);
559 kprintf("hammer2_chain_get: I/O error %016jx: %d\n",
560 (intmax_t)pbase, error);
563 ccms_thread_lock_downgrade(&core->cst, ostate);
568 * Zero the data area if the chain is in the INITIAL-create state.
569 * Mark the buffer for bdwrite().
571 bdata = (char *)chain->bp->b_data + boff;
572 if (chain->flags & HAMMER2_CHAIN_INITIAL) {
573 bzero(bdata, chain->bytes);
574 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DIRTYBP);
578 * Setup the data pointer, either pointing it to an embedded data
579 * structure and copying the data from the buffer, or pointing it
582 * The buffer is not retained when copying to an embedded data
583 * structure in order to avoid potential deadlocks or recursions
584 * on the same physical buffer.
586 switch (bref->type) {
587 case HAMMER2_BREF_TYPE_VOLUME:
589 * Copy data from bp to embedded buffer
591 panic("hammer2_chain_lock: called on unresolved volume header");
594 KKASSERT(pbase == 0);
595 KKASSERT(chain->bytes == HAMMER2_PBUFSIZE);
596 bcopy(bdata, &hmp->voldata, chain->bytes);
597 chain->data = (void *)&hmp->voldata;
602 case HAMMER2_BREF_TYPE_INODE:
604 * Copy data from bp to embedded buffer, do not retain the
607 KKASSERT(chain->bytes == sizeof(chain->data->ipdata));
608 chain->data = kmalloc(sizeof(chain->data->ipdata),
609 hmp->minode, M_WAITOK | M_ZERO);
610 bcopy(bdata, &chain->data->ipdata, chain->bytes);
614 case HAMMER2_BREF_TYPE_INDIRECT:
615 case HAMMER2_BREF_TYPE_DATA:
616 case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
617 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
618 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
621 * Point data at the device buffer and leave bp intact.
623 chain->data = (void *)bdata;
628 * Make sure the bp is not specifically owned by this thread before
629 * restoring to a possibly shared lock, so another hammer2 thread
633 BUF_KERNPROC(chain->bp);
634 ccms_thread_lock_downgrade(&core->cst, ostate);
639 * Unlock and deref a chain element.
641 * On the last lock release any non-embedded data (chain->bp) will be
645 hammer2_chain_unlock(hammer2_chain_t *chain)
647 hammer2_chain_core_t *core = chain->core;
653 * The core->cst lock can be shared across several chains so we
654 * need to track the per-chain lockcnt separately.
656 * If multiple locks are present (or being attempted) on this
657 * particular chain we can just unlock, drop refs, and return.
659 * Otherwise fall-through on the 1->0 transition.
662 lockcnt = chain->lockcnt;
663 KKASSERT(lockcnt > 0);
666 if (atomic_cmpset_int(&chain->lockcnt,
667 lockcnt, lockcnt - 1)) {
668 ccms_thread_unlock(&core->cst);
669 hammer2_chain_drop(chain);
673 if (atomic_cmpset_int(&chain->lockcnt, 1, 0))
680 * On the 1->0 transition we upgrade the core lock (if necessary)
681 * to exclusive for terminal processing. If after upgrading we find
682 * that lockcnt is non-zero, another thread is racing us and will
683 * handle the unload for us later on, so just cleanup and return
684 * leaving the data/bp intact
686 * Otherwise if lockcnt is still 0 it is possible for it to become
687 * non-zero and race, but since we hold the core->cst lock
688 * exclusively all that will happen is that the chain will be
689 * reloaded after we unload it.
691 ostate = ccms_thread_lock_upgrade(&core->cst);
692 if (chain->lockcnt) {
693 ccms_thread_unlock_upgraded(&core->cst, ostate);
694 hammer2_chain_drop(chain);
699 * Shortcut the case if the data is embedded or not resolved.
701 * Do NOT NULL out chain->data (e.g. inode data), it might be
704 * The DIRTYBP flag is non-applicable in this situation and can
705 * be cleared to keep the flags state clean.
707 if (chain->bp == NULL) {
708 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_DIRTYBP);
709 ccms_thread_unlock_upgraded(&core->cst, ostate);
710 hammer2_chain_drop(chain);
717 if ((chain->flags & HAMMER2_CHAIN_DIRTYBP) == 0) {
719 } else if (chain->flags & HAMMER2_CHAIN_IOFLUSH) {
720 switch(chain->bref.type) {
721 case HAMMER2_BREF_TYPE_DATA:
722 counterp = &hammer2_ioa_file_write;
724 case HAMMER2_BREF_TYPE_INODE:
725 counterp = &hammer2_ioa_meta_write;
727 case HAMMER2_BREF_TYPE_INDIRECT:
728 counterp = &hammer2_ioa_indr_write;
730 case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
731 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
732 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
733 counterp = &hammer2_ioa_fmap_write;
736 counterp = &hammer2_ioa_volu_write;
741 switch(chain->bref.type) {
742 case HAMMER2_BREF_TYPE_DATA:
743 counterp = &hammer2_iod_file_write;
745 case HAMMER2_BREF_TYPE_INODE:
746 counterp = &hammer2_iod_meta_write;
748 case HAMMER2_BREF_TYPE_INDIRECT:
749 counterp = &hammer2_iod_indr_write;
751 case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
752 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
753 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
754 counterp = &hammer2_iod_fmap_write;
757 counterp = &hammer2_iod_volu_write;
766 * If a device buffer was used for data be sure to destroy the
767 * buffer when we are done to avoid aliases (XXX what about the
768 * underlying VM pages?).
770 * NOTE: Freemap leaf's use reserved blocks and thus no aliasing
773 if (chain->bref.type == HAMMER2_BREF_TYPE_DATA)
774 chain->bp->b_flags |= B_RELBUF;
777 * The DIRTYBP flag tracks whether we have to bdwrite() the buffer
778 * or not. The flag will get re-set when chain_modify() is called,
779 * even if MODIFIED is already set, allowing the OS to retire the
780 * buffer independent of a hammer2 flus.
783 if (chain->flags & HAMMER2_CHAIN_DIRTYBP) {
784 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_DIRTYBP);
785 if (chain->flags & HAMMER2_CHAIN_IOFLUSH) {
786 atomic_clear_int(&chain->flags,
787 HAMMER2_CHAIN_IOFLUSH);
788 chain->bp->b_flags |= B_RELBUF;
789 cluster_awrite(chain->bp);
791 chain->bp->b_flags |= B_CLUSTEROK;
795 if (chain->flags & HAMMER2_CHAIN_IOFLUSH) {
796 atomic_clear_int(&chain->flags,
797 HAMMER2_CHAIN_IOFLUSH);
798 chain->bp->b_flags |= B_RELBUF;
801 /* bp might still be dirty */
806 ccms_thread_unlock_upgraded(&core->cst, ostate);
807 hammer2_chain_drop(chain);
811 * Resize the chain's physical storage allocation in-place. This may
812 * replace the passed-in chain with a new chain.
814 * Chains can be resized smaller without reallocating the storage.
815 * Resizing larger will reallocate the storage.
817 * Must be passed an exclusively locked parent and chain, returns a new
818 * exclusively locked chain at the same index and unlocks the old chain.
819 * Flushes the buffer if necessary.
821 * This function is mostly used with DATA blocks locked RESOLVE_NEVER in order
822 * to avoid instantiating a device buffer that conflicts with the vnode
823 * data buffer. That is, the passed-in bp is a logical buffer, whereas
824 * any chain-oriented bp would be a device buffer.
826 * XXX flags currently ignored, uses chain->bp to detect data/no-data.
827 * XXX return error if cannot resize.
830 hammer2_chain_resize(hammer2_trans_t *trans, hammer2_inode_t *ip,
832 hammer2_chain_t *parent, hammer2_chain_t **chainp,
833 int nradix, int flags)
835 hammer2_mount_t *hmp = trans->hmp;
836 hammer2_chain_t *chain = *chainp;
849 * Only data and indirect blocks can be resized for now.
850 * (The volu root, inodes, and freemap elements use a fixed size).
852 KKASSERT(chain != &hmp->vchain);
853 KKASSERT(chain->bref.type == HAMMER2_BREF_TYPE_DATA ||
854 chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT);
857 * Nothing to do if the element is already the proper size
859 obytes = chain->bytes;
860 nbytes = 1U << nradix;
861 if (obytes == nbytes)
865 * Delete the old chain and duplicate it at the same (parent, index),
866 * returning a new chain. This allows the old chain to still be
867 * used by the flush code. Duplication occurs in-place.
869 * The parent does not have to be locked for the delete/duplicate call,
870 * but is in this particular code path.
872 * NOTE: If we are not crossing a synchronization point the
873 * duplication code will simply reuse the existing chain
876 hammer2_chain_delete_duplicate(trans, &chain);
879 * Set MODIFIED and add a chain ref to prevent destruction. Both
880 * modified flags share the same ref. (duplicated chains do not
881 * start out MODIFIED unless possibly if the duplication code
882 * decided to reuse the existing chain as-is).
884 * If the chain is already marked MODIFIED then we can safely
885 * return the previous allocation to the pool without having to
886 * worry about snapshots. XXX check flush synchronization.
888 if ((chain->flags & HAMMER2_CHAIN_MODIFIED) == 0) {
889 atomic_set_int(&chain->flags, HAMMER2_CHAIN_MODIFIED);
890 hammer2_chain_ref(chain);
893 hammer2_freemap_free(hmp, chain->bref.data_off,
899 * Relocate the block, even if making it smaller (because different
900 * block sizes may be in different regions).
902 chain->bref.data_off = hammer2_freemap_alloc(hmp, chain->bref.type,
904 chain->bytes = nbytes;
905 /*ip->delta_dcount += (ssize_t)(nbytes - obytes);*/ /* XXX atomic */
908 * The device buffer may be larger than the allocation size.
910 if ((bbytes = chain->bytes) < HAMMER2_MINIOSIZE)
911 bbytes = HAMMER2_MINIOSIZE;
912 pbase = chain->bref.data_off & ~(hammer2_off_t)(bbytes - 1);
913 boff = chain->bref.data_off & HAMMER2_OFF_MASK & (bbytes - 1);
915 KKASSERT(chain->bp == NULL);
918 * Only copy the data if resolved, otherwise the caller is
921 * XXX handle device-buffer resizing case too. Right now we
922 * only handle logical buffer resizing.
925 KKASSERT(chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT ||
926 chain->bref.type == HAMMER2_BREF_TYPE_DATA);
927 KKASSERT(chain != &hmp->vchain); /* safety */
930 * The getblk() optimization can only be used if the
931 * physical block size matches the request.
933 if (nbytes == bbytes) {
934 nbp = getblk(hmp->devvp, pbase, bbytes, 0, 0);
937 error = bread(hmp->devvp, pbase, bbytes, &nbp);
938 KKASSERT(error == 0);
940 bdata = (char *)nbp->b_data + boff;
943 * chain->bp and chain->data represent the on-disk version
944 * of the data, where as the passed-in bp is usually a
945 * more up-to-date logical buffer. However, there is no
946 * need to synchronize the more up-to-date data in (bp)
947 * as it will do that on its own when it flushes.
949 if (nbytes < obytes) {
950 bcopy(chain->data, bdata, nbytes);
952 bcopy(chain->data, bdata, obytes);
953 bzero(bdata + obytes, nbytes - obytes);
957 * NOTE: The INITIAL state of the chain is left intact.
958 * We depend on hammer2_chain_modify() to do the
961 * NOTE: We set B_NOCACHE to throw away the previous bp and
962 * any VM backing store, even if it was dirty.
963 * Otherwise we run the risk of a logical/device
964 * conflict on reallocation.
966 chain->bp->b_flags |= B_RELBUF | B_NOCACHE;
969 chain->data = (void *)bdata;
970 hammer2_chain_modify(trans, &chain, 0);
975 * Make sure the chain is marked MOVED and SUBMOD is set in the
976 * parent(s) so the adjustments are picked up by flush.
978 if ((chain->flags & HAMMER2_CHAIN_MOVED) == 0) {
979 hammer2_chain_ref(chain);
980 atomic_set_int(&chain->flags, HAMMER2_CHAIN_MOVED);
982 hammer2_chain_setsubmod(trans, chain);
987 * Set a chain modified, making it read-write and duplicating it if necessary.
988 * This function will assign a new physical block to the chain if necessary
990 * Duplication of already-modified chains is possible when the modification
991 * crosses a flush synchronization boundary.
993 * Non-data blocks - The chain should be locked to at least the RESOLVE_MAYBE
994 * level or the COW operation will not work.
996 * Data blocks - The chain is usually locked RESOLVE_NEVER so as not to
997 * run the data through the device buffers.
999 * This function may return a different chain than was passed, in which case
1000 * the old chain will be unlocked and the new chain will be locked.
1002 * ip->chain may be adjusted by hammer2_chain_modify_ip().
1004 hammer2_inode_data_t *
1005 hammer2_chain_modify_ip(hammer2_trans_t *trans, hammer2_inode_t *ip,
1006 hammer2_chain_t **parentp, int flags)
1008 hammer2_chain_t *ochain;
1009 hammer2_chain_t *nchain;
1011 atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED);
1012 hammer2_chain_modify(trans, parentp, flags);
1015 if (ochain != nchain) {
1016 hammer2_chain_ref(nchain);
1018 hammer2_chain_drop(ochain);
1020 return(&ip->chain->data->ipdata);
1024 hammer2_chain_modify(hammer2_trans_t *trans, hammer2_chain_t **chainp,
1027 hammer2_mount_t *hmp = trans->hmp;
1028 hammer2_chain_t *chain;
1029 hammer2_off_t pbase;
1037 * modify_tid is only update for primary modifications, not for
1038 * propagated brefs. mirror_tid will be updated regardless during
1039 * the flush, no need to set it here.
1044 * If the chain is already marked MODIFIED we can usually just
1045 * return. However, if a modified chain is modified again in
1046 * a synchronization-point-crossing manner we have to
1047 * delete/duplicate the chain so as not to interfere with the
1048 * atomicy of the flush.
1050 if (chain->flags & HAMMER2_CHAIN_MODIFIED) {
1051 if (chain->modify_tid <= hmp->flush_tid &&
1052 trans->sync_tid > hmp->flush_tid) {
1054 * Modifications cross synchronization point,
1055 * requires delete-duplicate.
1057 KKASSERT((flags & HAMMER2_MODIFY_ASSERTNOCOPY) == 0);
1058 hammer2_chain_delete_duplicate(trans, chainp);
1060 /* fall through using duplicate */
1063 * It is possible that a prior lock/modify sequence
1064 * retired the buffer. During this lock/modify
1065 * sequence MODIFIED may still be set but the buffer
1066 * could wind up clean. Since the caller is going
1067 * to modify the buffer further we have to be sure
1068 * that DIRTYBP is set so our chain code knows to
1069 * bwrite/bdwrite the bp.
1071 if ((flags & HAMMER2_MODIFY_OPTDATA) == 0 &&
1072 chain->bp == NULL) {
1075 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DIRTYBP);
1078 * Must still adjust these fields in the
1079 * already-modified path.
1081 if ((flags & HAMMER2_MODIFY_NO_MODIFY_TID) == 0)
1082 chain->bref.modify_tid = trans->sync_tid;
1083 chain->modify_tid = trans->sync_tid;
1088 if ((flags & HAMMER2_MODIFY_NO_MODIFY_TID) == 0)
1089 chain->bref.modify_tid = trans->sync_tid;
1092 * Set MODIFIED and add a chain ref to prevent destruction. Both
1093 * modified flags share the same ref.
1095 if ((chain->flags & HAMMER2_CHAIN_MODIFIED) == 0) {
1096 atomic_set_int(&chain->flags, HAMMER2_CHAIN_MODIFIED);
1097 hammer2_chain_ref(chain);
1101 * Adjust chain->modify_tid so the flusher knows when the
1102 * modification occurred.
1104 chain->modify_tid = trans->sync_tid;
1107 * We must allocate the copy-on-write block.
1109 * If the data is embedded no other action is required.
1111 * If the data is not embedded we acquire and clear the
1112 * new block. If chain->data is not NULL we then do the
1113 * copy-on-write. chain->data will then be repointed to the new
1114 * buffer and the old buffer will be released.
1116 * For newly created elements with no prior allocation we go
1117 * through the copy-on-write steps except without the copying part.
1119 if (chain != &hmp->vchain) {
1120 if ((hammer2_debug & 0x0001) &&
1121 (chain->bref.data_off & HAMMER2_OFF_MASK)) {
1122 kprintf("Replace %d\n", chain->bytes);
1124 chain->bref.data_off =
1125 hammer2_freemap_alloc(hmp, chain->bref.type,
1127 /* XXX failed allocation */
1131 * If data instantiation is optional and the chain has no current
1132 * data association (typical for DATA and newly-created INDIRECT
1133 * elements), don't instantiate the buffer now.
1135 if ((flags & HAMMER2_MODIFY_OPTDATA) && chain->bp == NULL)
1140 * Setting the DIRTYBP flag will cause the buffer to be dirtied or
1141 * written-out on unlock. This bit is independent of the MODIFIED
1142 * bit because the chain may still need meta-data adjustments done
1143 * by virtue of MODIFIED for its parent, and the buffer can be
1144 * flushed out (possibly multiple times) by the OS before that.
1146 * Clearing the INITIAL flag (for indirect blocks) indicates that
1147 * a zero-fill buffer has been instantiated.
1149 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DIRTYBP);
1150 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
1153 * We currently should never instantiate a device buffer for a
1154 * file data chain. (We definitely can for a freemap chain).
1156 KKASSERT(chain->bref.type != HAMMER2_BREF_TYPE_DATA);
1159 * Execute COW operation
1161 switch(chain->bref.type) {
1162 case HAMMER2_BREF_TYPE_VOLUME:
1163 case HAMMER2_BREF_TYPE_INODE:
1165 * The data is embedded, no copy-on-write operation is
1168 KKASSERT(chain->bp == NULL);
1170 case HAMMER2_BREF_TYPE_DATA:
1171 case HAMMER2_BREF_TYPE_INDIRECT:
1172 case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
1173 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
1174 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
1176 * Perform the copy-on-write operation
1178 KKASSERT(chain != &hmp->vchain); /* safety */
1180 * The device buffer may be larger than the allocation size.
1182 if ((bbytes = chain->bytes) < HAMMER2_MINIOSIZE)
1183 bbytes = HAMMER2_MINIOSIZE;
1184 pbase = chain->bref.data_off & ~(hammer2_off_t)(bbytes - 1);
1185 boff = chain->bref.data_off & HAMMER2_OFF_MASK & (bbytes - 1);
1188 * The getblk() optimization can only be used if the
1189 * physical block size matches the request.
1191 if (chain->bytes == bbytes) {
1192 nbp = getblk(hmp->devvp, pbase, bbytes, 0, 0);
1195 error = bread(hmp->devvp, pbase, bbytes, &nbp);
1196 KKASSERT(error == 0);
1198 bdata = (char *)nbp->b_data + boff;
1201 * Copy or zero-fill on write depending on whether
1202 * chain->data exists or not.
1205 bcopy(chain->data, bdata, chain->bytes);
1206 KKASSERT(chain->bp != NULL);
1208 bzero(bdata, chain->bytes);
1211 chain->bp->b_flags |= B_RELBUF;
1215 chain->data = bdata;
1218 panic("hammer2_chain_modify: illegal non-embedded type %d",
1224 hammer2_chain_setsubmod(trans, chain);
1228 * Mark the volume as having been modified. This short-cut version
1229 * does not have to lock the volume's chain, which allows the ioctl
1230 * code to make adjustments to connections without deadlocking. XXX
1232 * No ref is made on vchain when flagging it MODIFIED.
1235 hammer2_modify_volume(hammer2_mount_t *hmp)
1237 hammer2_voldata_lock(hmp);
1238 hammer2_voldata_unlock(hmp, 1);
1242 * Locate an in-memory chain. The parent must be locked. The in-memory
1243 * chain is returned with a reference and without a lock, or NULL
1246 * This function returns the chain at the specified index with the highest
1247 * delete_tid. The caller must check whether the chain is flagged
1248 * CHAIN_DELETED or not.
1250 * NOTE: If no chain is found the caller usually must check the on-media
1251 * array to determine if a blockref exists at the index.
1253 struct hammer2_chain_find_info {
1254 hammer2_chain_t *best;
1255 hammer2_tid_t delete_tid;
1261 hammer2_chain_find_cmp(hammer2_chain_t *child, void *data)
1263 struct hammer2_chain_find_info *info = data;
1265 if (child->index < info->index)
1267 if (child->index > info->index)
1274 hammer2_chain_find_callback(hammer2_chain_t *child, void *data)
1276 struct hammer2_chain_find_info *info = data;
1278 if (info->delete_tid < child->delete_tid) {
1279 info->delete_tid = child->delete_tid;
1287 hammer2_chain_find_locked(hammer2_chain_t *parent, int index)
1289 struct hammer2_chain_find_info info;
1292 info.delete_tid = 0;
1295 RB_SCAN(hammer2_chain_tree, &parent->core->rbtree,
1296 hammer2_chain_find_cmp, hammer2_chain_find_callback,
1303 hammer2_chain_find(hammer2_chain_t *parent, int index)
1305 hammer2_chain_t *chain;
1307 spin_lock(&parent->core->cst.spin);
1308 chain = hammer2_chain_find_locked(parent, index);
1310 hammer2_chain_ref(chain);
1311 spin_unlock(&parent->core->cst.spin);
1317 * Return a locked chain structure with all associated data acquired.
1318 * (if LOOKUP_NOLOCK is requested the returned chain is only referenced).
1320 * Caller must hold the parent locked shared or exclusive since we may
1321 * need the parent's bref array to find our block.
1323 * The returned child is locked as requested. If NOLOCK, the returned
1324 * child is still at least referenced.
1327 hammer2_chain_get(hammer2_chain_t *parent, int index, int flags)
1329 hammer2_blockref_t *bref;
1330 hammer2_mount_t *hmp = parent->hmp;
1331 hammer2_chain_core_t *above = parent->core;
1332 hammer2_chain_t *chain;
1333 hammer2_chain_t dummy;
1337 * Figure out how to lock. MAYBE can be used to optimized
1338 * the initial-create state for indirect blocks.
1340 if (flags & (HAMMER2_LOOKUP_NODATA | HAMMER2_LOOKUP_NOLOCK))
1341 how = HAMMER2_RESOLVE_NEVER;
1343 how = HAMMER2_RESOLVE_MAYBE;
1344 if (flags & (HAMMER2_LOOKUP_SHARED | HAMMER2_LOOKUP_NOLOCK))
1345 how |= HAMMER2_RESOLVE_SHARED;
1349 * First see if we have a (possibly modified) chain element cached
1350 * for this (parent, index). Acquire the data if necessary.
1352 * If chain->data is non-NULL the chain should already be marked
1356 dummy.index = index;
1357 dummy.delete_tid = HAMMER2_MAX_TID;
1358 spin_lock(&above->cst.spin);
1359 chain = RB_FIND(hammer2_chain_tree, &above->rbtree, &dummy);
1361 hammer2_chain_ref(chain);
1362 spin_unlock(&above->cst.spin);
1363 if ((flags & HAMMER2_LOOKUP_NOLOCK) == 0)
1364 hammer2_chain_lock(chain, how | HAMMER2_RESOLVE_NOREF);
1367 spin_unlock(&above->cst.spin);
1370 * The parent chain must not be in the INITIAL state.
1372 if (parent->flags & HAMMER2_CHAIN_INITIAL) {
1373 panic("hammer2_chain_get: Missing bref(1)");
1378 * No RBTREE entry found, lookup the bref and issue I/O (switch on
1379 * the parent's bref to determine where and how big the array is).
1381 switch(parent->bref.type) {
1382 case HAMMER2_BREF_TYPE_INODE:
1383 KKASSERT(index >= 0 && index < HAMMER2_SET_COUNT);
1384 bref = &parent->data->ipdata.u.blockset.blockref[index];
1386 case HAMMER2_BREF_TYPE_INDIRECT:
1387 case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
1388 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
1389 KKASSERT(parent->data != NULL);
1390 KKASSERT(index >= 0 &&
1391 index < parent->bytes / sizeof(hammer2_blockref_t));
1392 bref = &parent->data->npdata.blockref[index];
1394 case HAMMER2_BREF_TYPE_VOLUME:
1395 KKASSERT(index >= 0 && index < HAMMER2_SET_COUNT);
1396 bref = &hmp->voldata.sroot_blockset.blockref[index];
1400 panic("hammer2_chain_get: unrecognized blockref type: %d",
1403 if (bref->type == 0) {
1404 panic("hammer2_chain_get: Missing bref(2)");
1409 * Allocate a chain structure representing the existing media
1410 * entry. Resulting chain has one ref and is not locked.
1412 * The locking operation we do later will issue I/O to read it.
1414 chain = hammer2_chain_alloc(hmp, NULL, bref);
1415 hammer2_chain_core_alloc(chain, NULL); /* ref'd chain returned */
1418 * Link the chain into its parent. A spinlock is required to safely
1419 * access the RBTREE, and it is possible to collide with another
1420 * hammer2_chain_get() operation because the caller might only hold
1421 * a shared lock on the parent.
1423 KKASSERT(parent->refs > 0);
1424 spin_lock(&above->cst.spin);
1425 chain->above = above;
1426 chain->index = index;
1427 if (RB_INSERT(hammer2_chain_tree, &above->rbtree, chain)) {
1428 chain->above = NULL;
1430 spin_unlock(&above->cst.spin);
1431 hammer2_chain_drop(chain);
1434 atomic_set_int(&chain->flags, HAMMER2_CHAIN_ONRBTREE);
1435 spin_unlock(&above->cst.spin);
1438 * Our new chain is referenced but NOT locked. Lock the chain
1439 * below. The locking operation also resolves its data.
1441 * If NOLOCK is set the release will release the one-and-only lock.
1443 if ((flags & HAMMER2_LOOKUP_NOLOCK) == 0) {
1444 hammer2_chain_lock(chain, how); /* recusive lock */
1445 hammer2_chain_drop(chain); /* excess ref */
1451 * Lookup initialization/completion API
1454 hammer2_chain_lookup_init(hammer2_chain_t *parent, int flags)
1456 if (flags & HAMMER2_LOOKUP_SHARED) {
1457 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS |
1458 HAMMER2_RESOLVE_SHARED);
1460 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS);
1466 hammer2_chain_lookup_done(hammer2_chain_t *parent)
1469 hammer2_chain_unlock(parent);
1474 hammer2_chain_getparent(hammer2_chain_t **parentp, int how)
1476 hammer2_chain_t *oparent;
1477 hammer2_chain_t *nparent;
1478 hammer2_chain_core_t *above;
1481 above = oparent->above;
1483 spin_lock(&above->cst.spin);
1484 nparent = above->first_parent;
1485 while (hammer2_chain_refactor_test(nparent, 1))
1486 nparent = nparent->next_parent;
1487 hammer2_chain_ref(nparent); /* protect nparent, use in lock */
1488 spin_unlock(&above->cst.spin);
1490 hammer2_chain_unlock(oparent);
1491 hammer2_chain_lock(nparent, how | HAMMER2_RESOLVE_NOREF);
1498 * Locate any key between key_beg and key_end inclusive. (*parentp)
1499 * typically points to an inode but can also point to a related indirect
1500 * block and this function will recurse upwards and find the inode again.
1502 * WARNING! THIS DOES NOT RETURN KEYS IN LOGICAL KEY ORDER! ANY KEY
1503 * WITHIN THE RANGE CAN BE RETURNED. HOWEVER, AN ITERATION
1504 * WHICH PICKS UP WHERE WE LEFT OFF WILL CONTINUE THE SCAN.
1506 * (*parentp) must be exclusively locked and referenced and can be an inode
1507 * or an existing indirect block within the inode.
1509 * On return (*parentp) will be modified to point at the deepest parent chain
1510 * element encountered during the search, as a helper for an insertion or
1511 * deletion. The new (*parentp) will be locked and referenced and the old
1512 * will be unlocked and dereferenced (no change if they are both the same).
1514 * The matching chain will be returned exclusively locked. If NOLOCK is
1515 * requested the chain will be returned only referenced.
1517 * NULL is returned if no match was found, but (*parentp) will still
1518 * potentially be adjusted.
1520 * This function will also recurse up the chain if the key is not within the
1521 * current parent's range. (*parentp) can never be set to NULL. An iteration
1522 * can simply allow (*parentp) to float inside the loop.
1525 hammer2_chain_lookup(hammer2_chain_t **parentp,
1526 hammer2_key_t key_beg, hammer2_key_t key_end,
1529 hammer2_mount_t *hmp;
1530 hammer2_chain_t *parent;
1531 hammer2_chain_t *chain;
1532 hammer2_chain_t *tmp;
1533 hammer2_blockref_t *base;
1534 hammer2_blockref_t *bref;
1535 hammer2_key_t scan_beg;
1536 hammer2_key_t scan_end;
1539 int how_always = HAMMER2_RESOLVE_ALWAYS;
1540 int how_maybe = HAMMER2_RESOLVE_MAYBE;
1542 if (flags & (HAMMER2_LOOKUP_SHARED | HAMMER2_LOOKUP_NOLOCK)) {
1543 how_maybe |= HAMMER2_RESOLVE_SHARED;
1544 how_always |= HAMMER2_RESOLVE_SHARED;
1548 * Recurse (*parentp) upward if necessary until the parent completely
1549 * encloses the key range or we hit the inode.
1554 while (parent->bref.type == HAMMER2_BREF_TYPE_INDIRECT ||
1555 parent->bref.type == HAMMER2_BREF_TYPE_FREEMAP_NODE) {
1556 scan_beg = parent->bref.key;
1557 scan_end = scan_beg +
1558 ((hammer2_key_t)1 << parent->bref.keybits) - 1;
1559 if (key_beg >= scan_beg && key_end <= scan_end)
1561 parent = hammer2_chain_getparent(parentp, how_maybe);
1566 * Locate the blockref array. Currently we do a fully associative
1567 * search through the array.
1569 switch(parent->bref.type) {
1570 case HAMMER2_BREF_TYPE_INODE:
1572 * Special shortcut for embedded data returns the inode
1573 * itself. Callers must detect this condition and access
1574 * the embedded data (the strategy code does this for us).
1576 * This is only applicable to regular files and softlinks.
1578 if (parent->data->ipdata.op_flags & HAMMER2_OPFLAG_DIRECTDATA) {
1579 if (flags & HAMMER2_LOOKUP_NOLOCK)
1580 hammer2_chain_ref(parent);
1582 hammer2_chain_lock(parent, how_always);
1585 base = &parent->data->ipdata.u.blockset.blockref[0];
1586 count = HAMMER2_SET_COUNT;
1588 case HAMMER2_BREF_TYPE_INDIRECT:
1589 case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
1590 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
1592 * Optimize indirect blocks in the INITIAL state to avoid
1595 if (parent->flags & HAMMER2_CHAIN_INITIAL) {
1598 if (parent->data == NULL)
1599 panic("parent->data is NULL");
1600 base = &parent->data->npdata.blockref[0];
1602 count = parent->bytes / sizeof(hammer2_blockref_t);
1604 case HAMMER2_BREF_TYPE_VOLUME:
1605 base = &hmp->voldata.sroot_blockset.blockref[0];
1606 count = HAMMER2_SET_COUNT;
1609 panic("hammer2_chain_lookup: unrecognized blockref type: %d",
1611 base = NULL; /* safety */
1612 count = 0; /* safety */
1616 * If the element and key overlap we use the element.
1618 * NOTE! Deleted elements are effectively invisible. Deletions
1619 * proactively clear the parent bref to the deleted child
1620 * so we do not try to shadow here to avoid parent updates
1621 * (which would be difficult since multiple deleted elements
1622 * might represent different flush synchronization points).
1625 for (i = 0; i < count; ++i) {
1626 tmp = hammer2_chain_find(parent, i);
1628 if (tmp->flags & HAMMER2_CHAIN_DELETED) {
1629 hammer2_chain_drop(tmp);
1633 KKASSERT(bref->type != 0);
1634 } else if (base == NULL || base[i].type == 0) {
1639 scan_beg = bref->key;
1640 scan_end = scan_beg + ((hammer2_key_t)1 << bref->keybits) - 1;
1642 hammer2_chain_drop(tmp);
1643 if (key_beg <= scan_end && key_end >= scan_beg)
1647 if (key_beg == key_end)
1649 return (hammer2_chain_next(parentp, NULL,
1650 key_beg, key_end, flags));
1654 * Acquire the new chain element. If the chain element is an
1655 * indirect block we must search recursively.
1657 * It is possible for the tmp chain above to be removed from
1658 * the RBTREE but the parent lock ensures it would not have been
1659 * destroyed from the media, so the chain_get() code will simply
1660 * reload it from the media in that case.
1662 chain = hammer2_chain_get(parent, i, flags);
1667 * If the chain element is an indirect block it becomes the new
1668 * parent and we loop on it.
1670 * The parent always has to be locked with at least RESOLVE_MAYBE
1671 * so we can access its data. It might need a fixup if the caller
1672 * passed incompatible flags. Be careful not to cause a deadlock
1673 * as a data-load requires an exclusive lock.
1675 if (chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT ||
1676 chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_NODE) {
1677 hammer2_chain_unlock(parent);
1678 *parentp = parent = chain;
1679 if (flags & HAMMER2_LOOKUP_NOLOCK) {
1680 hammer2_chain_lock(chain, how_maybe |
1681 HAMMER2_RESOLVE_NOREF);
1682 } else if ((flags & HAMMER2_LOOKUP_NODATA) &&
1683 chain->data == NULL) {
1684 hammer2_chain_ref(chain);
1685 hammer2_chain_unlock(chain);
1686 hammer2_chain_lock(chain, how_maybe |
1687 HAMMER2_RESOLVE_NOREF);
1693 * All done, return the chain
1699 * After having issued a lookup we can iterate all matching keys.
1701 * If chain is non-NULL we continue the iteration from just after it's index.
1703 * If chain is NULL we assume the parent was exhausted and continue the
1704 * iteration at the next parent.
1706 * parent must be locked on entry and remains locked throughout. chain's
1707 * lock status must match flags. Chain is always at least referenced.
1710 hammer2_chain_next(hammer2_chain_t **parentp, hammer2_chain_t *chain,
1711 hammer2_key_t key_beg, hammer2_key_t key_end,
1714 hammer2_mount_t *hmp;
1715 hammer2_chain_t *parent;
1716 hammer2_chain_t *tmp;
1717 hammer2_blockref_t *base;
1718 hammer2_blockref_t *bref;
1719 hammer2_key_t scan_beg;
1720 hammer2_key_t scan_end;
1722 int how_maybe = HAMMER2_RESOLVE_MAYBE;
1725 if (flags & (HAMMER2_LOOKUP_SHARED | HAMMER2_LOOKUP_NOLOCK))
1726 how_maybe |= HAMMER2_RESOLVE_SHARED;
1733 * Calculate the next index and recalculate the parent if necessary.
1737 * Continue iteration within current parent. If not NULL
1738 * the passed-in chain may or may not be locked, based on
1739 * the LOOKUP_NOLOCK flag (passed in as returned from lookup
1742 i = chain->index + 1;
1743 if (flags & HAMMER2_LOOKUP_NOLOCK)
1744 hammer2_chain_drop(chain);
1746 hammer2_chain_unlock(chain);
1749 * Any scan where the lookup returned degenerate data embedded
1750 * in the inode has an invalid index and must terminate.
1752 if (chain == parent)
1755 } else if (parent->bref.type != HAMMER2_BREF_TYPE_INDIRECT &&
1756 parent->bref.type != HAMMER2_BREF_TYPE_FREEMAP_NODE) {
1758 * We reached the end of the iteration.
1763 * Continue iteration with next parent unless the current
1764 * parent covers the range.
1766 scan_beg = parent->bref.key;
1767 scan_end = scan_beg +
1768 ((hammer2_key_t)1 << parent->bref.keybits) - 1;
1769 if (key_beg >= scan_beg && key_end <= scan_end)
1772 i = parent->index + 1;
1773 parent = hammer2_chain_getparent(parentp, how_maybe);
1778 * Locate the blockref array. Currently we do a fully associative
1779 * search through the array.
1781 switch(parent->bref.type) {
1782 case HAMMER2_BREF_TYPE_INODE:
1783 base = &parent->data->ipdata.u.blockset.blockref[0];
1784 count = HAMMER2_SET_COUNT;
1786 case HAMMER2_BREF_TYPE_INDIRECT:
1787 case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
1788 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
1789 if (parent->flags & HAMMER2_CHAIN_INITIAL) {
1792 KKASSERT(parent->data != NULL);
1793 base = &parent->data->npdata.blockref[0];
1795 count = parent->bytes / sizeof(hammer2_blockref_t);
1797 case HAMMER2_BREF_TYPE_VOLUME:
1798 base = &hmp->voldata.sroot_blockset.blockref[0];
1799 count = HAMMER2_SET_COUNT;
1802 panic("hammer2_chain_next: unrecognized blockref type: %d",
1804 base = NULL; /* safety */
1805 count = 0; /* safety */
1808 KKASSERT(i <= count);
1811 * Look for the key. If we are unable to find a match and an exact
1812 * match was requested we return NULL. If a range was requested we
1813 * run hammer2_chain_next() to iterate.
1815 * NOTE! Deleted elements are effectively invisible. Deletions
1816 * proactively clear the parent bref to the deleted child
1817 * so we do not try to shadow here to avoid parent updates
1818 * (which would be difficult since multiple deleted elements
1819 * might represent different flush synchronization points).
1823 tmp = hammer2_chain_find(parent, i);
1825 if (tmp->flags & HAMMER2_CHAIN_DELETED) {
1826 hammer2_chain_drop(tmp);
1831 } else if (base == NULL || base[i].type == 0) {
1837 scan_beg = bref->key;
1838 scan_end = scan_beg + ((hammer2_key_t)1 << bref->keybits) - 1;
1840 hammer2_chain_drop(tmp);
1841 if (key_beg <= scan_end && key_end >= scan_beg)
1847 * If we couldn't find a match recurse up a parent to continue the
1854 * Acquire the new chain element. If the chain element is an
1855 * indirect block we must search recursively.
1857 chain = hammer2_chain_get(parent, i, flags);
1862 * If the chain element is an indirect block it becomes the new
1863 * parent and we loop on it.
1865 * The parent always has to be locked with at least RESOLVE_MAYBE
1866 * so we can access its data. It might need a fixup if the caller
1867 * passed incompatible flags. Be careful not to cause a deadlock
1868 * as a data-load requires an exclusive lock.
1870 if (chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT ||
1871 chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_NODE) {
1872 hammer2_chain_unlock(parent);
1873 *parentp = parent = chain;
1875 if (flags & HAMMER2_LOOKUP_NOLOCK) {
1876 hammer2_chain_lock(parent, how_maybe |
1877 HAMMER2_RESOLVE_NOREF);
1878 } else if ((flags & HAMMER2_LOOKUP_NODATA) &&
1879 parent->data == NULL) {
1880 hammer2_chain_ref(parent);
1881 hammer2_chain_unlock(parent);
1882 hammer2_chain_lock(parent, how_maybe |
1883 HAMMER2_RESOLVE_NOREF);
1890 * All done, return chain
1896 * Create and return a new hammer2 system memory structure of the specified
1897 * key, type and size and insert it under (*parentp). This is a full
1898 * insertion, based on the supplied key/keybits, and may involve creating
1899 * indirect blocks and moving other chains around via delete/duplicate.
1901 * (*parentp) must be exclusive locked and may be replaced on return
1902 * depending on how much work the function had to do.
1904 * (*chainp) usually starts out NULL and returns the newly created chain,
1905 * but if the caller desires the caller may allocate a disconnected chain
1906 * and pass it in instead. (It is also possible for the caller to use
1907 * chain_duplicate() to create a disconnected chain, manipulate it, then
1908 * pass it into this function to insert it).
1910 * This function should NOT be used to insert INDIRECT blocks. It is
1911 * typically used to create/insert inodes and data blocks.
1913 * Caller must pass-in an exclusively locked parent the new chain is to
1914 * be inserted under, and optionally pass-in a disconnected, exclusively
1915 * locked chain to insert (else we create a new chain). The function will
1916 * adjust (*parentp) as necessary and return the existing or new chain.
1919 hammer2_chain_create(hammer2_trans_t *trans, hammer2_chain_t **parentp,
1920 hammer2_chain_t **chainp,
1921 hammer2_key_t key, int keybits, int type, size_t bytes)
1923 hammer2_mount_t *hmp;
1924 hammer2_chain_t *chain;
1925 hammer2_chain_t *child;
1926 hammer2_chain_t *parent = *parentp;
1927 hammer2_chain_core_t *above;
1928 hammer2_blockref_t dummy;
1929 hammer2_blockref_t *base;
1935 above = parent->core;
1936 KKASSERT(ccms_thread_lock_owned(&above->cst));
1940 if (chain == NULL) {
1942 * First allocate media space and construct the dummy bref,
1943 * then allocate the in-memory chain structure.
1945 bzero(&dummy, sizeof(dummy));
1948 dummy.keybits = keybits;
1949 dummy.data_off = hammer2_allocsize(bytes);
1950 dummy.methods = parent->bref.methods;
1951 chain = hammer2_chain_alloc(hmp, trans, &dummy);
1952 hammer2_chain_core_alloc(chain, NULL);
1955 * Lock the chain manually, chain_lock will load the chain
1956 * which we do NOT want to do. (note: chain->refs is set
1957 * to 1 by chain_alloc() for us, but lockcnt is not).
1960 ccms_thread_lock(&chain->core->cst, CCMS_STATE_EXCLUSIVE);
1964 * We do NOT set INITIAL here (yet). INITIAL is only
1965 * used for indirect blocks.
1967 * Recalculate bytes to reflect the actual media block
1970 bytes = (hammer2_off_t)1 <<
1971 (int)(chain->bref.data_off & HAMMER2_OFF_MASK_RADIX);
1972 chain->bytes = bytes;
1975 case HAMMER2_BREF_TYPE_VOLUME:
1976 panic("hammer2_chain_create: called with volume type");
1978 case HAMMER2_BREF_TYPE_INODE:
1979 KKASSERT(bytes == HAMMER2_INODE_BYTES);
1980 chain->data = kmalloc(sizeof(chain->data->ipdata),
1981 hmp->minode, M_WAITOK | M_ZERO);
1983 case HAMMER2_BREF_TYPE_INDIRECT:
1984 panic("hammer2_chain_create: cannot be used to"
1985 "create indirect block");
1987 case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
1988 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
1989 panic("hammer2_chain_create: cannot be used to"
1990 "create freemap root or node");
1992 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
1993 case HAMMER2_BREF_TYPE_DATA:
1995 /* leave chain->data NULL */
1996 KKASSERT(chain->data == NULL);
2001 * Potentially update the chain's key/keybits.
2003 chain->bref.key = key;
2004 chain->bref.keybits = keybits;
2005 KKASSERT(chain->above == NULL);
2009 above = parent->core;
2012 * Locate a free blockref in the parent's array
2014 switch(parent->bref.type) {
2015 case HAMMER2_BREF_TYPE_INODE:
2016 KKASSERT((parent->data->ipdata.op_flags &
2017 HAMMER2_OPFLAG_DIRECTDATA) == 0);
2018 KKASSERT(parent->data != NULL);
2019 base = &parent->data->ipdata.u.blockset.blockref[0];
2020 count = HAMMER2_SET_COUNT;
2022 case HAMMER2_BREF_TYPE_INDIRECT:
2023 case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
2024 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2025 if (parent->flags & HAMMER2_CHAIN_INITIAL) {
2028 KKASSERT(parent->data != NULL);
2029 base = &parent->data->npdata.blockref[0];
2031 count = parent->bytes / sizeof(hammer2_blockref_t);
2033 case HAMMER2_BREF_TYPE_VOLUME:
2034 KKASSERT(parent->data != NULL);
2035 base = &hmp->voldata.sroot_blockset.blockref[0];
2036 count = HAMMER2_SET_COUNT;
2039 panic("hammer2_chain_create: unrecognized blockref type: %d",
2046 * Scan for an unallocated bref, also skipping any slots occupied
2047 * by in-memory chain elements that may not yet have been updated
2048 * in the parent's bref array.
2050 * We don't have to hold the spinlock to save an empty slot as
2051 * new slots can only transition from empty if the parent is
2052 * locked exclusively.
2054 spin_lock(&above->cst.spin);
2055 for (i = 0; i < count; ++i) {
2056 child = hammer2_chain_find_locked(parent, i);
2058 if (child->flags & HAMMER2_CHAIN_DELETED)
2064 if (base[i].type == 0)
2067 spin_unlock(&above->cst.spin);
2070 * If no free blockref could be found we must create an indirect
2071 * block and move a number of blockrefs into it. With the parent
2072 * locked we can safely lock each child in order to move it without
2073 * causing a deadlock.
2075 * This may return the new indirect block or the old parent depending
2076 * on where the key falls. NULL is returned on error.
2079 hammer2_chain_t *nparent;
2081 nparent = hammer2_chain_create_indirect(trans, parent,
2084 if (nparent == NULL) {
2086 hammer2_chain_drop(chain);
2090 if (parent != nparent) {
2091 hammer2_chain_unlock(parent);
2092 parent = *parentp = nparent;
2098 * Link the chain into its parent. Later on we will have to set
2099 * the MOVED bit in situations where we don't mark the new chain
2100 * as being modified.
2102 if (chain->above != NULL)
2103 panic("hammer2: hammer2_chain_create: chain already connected");
2104 KKASSERT(chain->above == NULL);
2105 KKASSERT((chain->flags & HAMMER2_CHAIN_DELETED) == 0);
2107 chain->above = above;
2109 spin_lock(&above->cst.spin);
2110 if (RB_INSERT(hammer2_chain_tree, &above->rbtree, chain))
2111 panic("hammer2_chain_create: collision");
2112 atomic_set_int(&chain->flags, HAMMER2_CHAIN_ONRBTREE);
2113 spin_unlock(&above->cst.spin);
2116 * (allocated) indicates that this is a newly-created chain element
2117 * rather than a renamed chain element.
2119 * In this situation we want to place the chain element in
2120 * the MODIFIED state. The caller expects it to NOT be in the
2123 * The data area will be set up as follows:
2125 * VOLUME not allowed here.
2127 * INODE embedded data are will be set-up.
2129 * INDIRECT not allowed here.
2131 * DATA no data area will be set-up (caller is expected
2132 * to have logical buffers, we don't want to alias
2133 * the data onto device buffers!).
2136 switch(chain->bref.type) {
2137 case HAMMER2_BREF_TYPE_DATA:
2138 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
2139 hammer2_chain_modify(trans, &chain,
2140 HAMMER2_MODIFY_OPTDATA |
2141 HAMMER2_MODIFY_ASSERTNOCOPY);
2143 case HAMMER2_BREF_TYPE_INDIRECT:
2144 case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
2145 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2146 /* not supported in this function */
2147 panic("hammer2_chain_create: bad type");
2148 atomic_set_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
2149 hammer2_chain_modify(trans, &chain,
2150 HAMMER2_MODIFY_OPTDATA |
2151 HAMMER2_MODIFY_ASSERTNOCOPY);
2154 hammer2_chain_modify(trans, &chain,
2155 HAMMER2_MODIFY_ASSERTNOCOPY);
2160 * When reconnecting a chain we must set MOVED and setsubmod
2161 * so the flush recognizes that it must update the bref in
2164 if ((chain->flags & HAMMER2_CHAIN_MOVED) == 0) {
2165 hammer2_chain_ref(chain);
2166 atomic_set_int(&chain->flags, HAMMER2_CHAIN_MOVED);
2168 hammer2_chain_setsubmod(trans, chain);
2178 * Replace (*chainp) with a duplicate. The original *chainp is unlocked
2179 * and the replacement will be returned locked. Both the original and the
2180 * new chain will share the same RBTREE (have the same chain->core), with
2181 * the new chain becoming the 'current' chain (meaning it is the first in
2182 * the linked list at core->chain_first).
2184 * If (parent, i) then the new duplicated chain is inserted under the parent
2185 * at the specified index (the parent must not have a ref at that index).
2187 * If (NULL, -1) then the new duplicated chain is not inserted anywhere,
2188 * similar to if it had just been chain_alloc()'d (suitable for passing into
2189 * hammer2_chain_create() after this function returns).
2191 * NOTE! Duplication is used in order to retain the original topology to
2192 * support flush synchronization points. Both the original and the
2193 * new chain will have the same transaction id and thus the operation
2194 * appears atomic on the media.
2197 hammer2_chain_duplicate(hammer2_trans_t *trans, hammer2_chain_t *parent, int i,
2198 hammer2_chain_t **chainp, hammer2_blockref_t *bref)
2200 hammer2_mount_t *hmp = trans->hmp;
2201 hammer2_blockref_t *base;
2202 hammer2_chain_t *ochain;
2203 hammer2_chain_t *nchain;
2204 hammer2_chain_t *scan;
2205 hammer2_chain_core_t *above;
2206 hammer2_chain_core_t *core;
2211 * First create a duplicate of the chain structure, associating
2212 * it with the same core, making it the same size, pointing it
2213 * to the same bref (the same media block), and copying any inline
2218 bref = &ochain->bref;
2219 nchain = hammer2_chain_alloc(hmp, trans, bref);
2220 hammer2_chain_core_alloc(nchain, ochain->core);
2221 core = ochain->core;
2223 bytes = (hammer2_off_t)1 <<
2224 (int)(bref->data_off & HAMMER2_OFF_MASK_RADIX);
2225 nchain->bytes = bytes;
2228 * Be sure to copy the INITIAL flag as well or we could end up
2229 * loading garbage from the bref.
2231 if (ochain->flags & HAMMER2_CHAIN_INITIAL)
2232 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_INITIAL);
2233 if (ochain->flags & HAMMER2_CHAIN_DIRTYBP)
2234 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_DIRTYBP);
2237 * If the old chain is modified the new one must be too,
2238 * but we only want to allocate a new bref.
2240 if (ochain->flags & HAMMER2_CHAIN_MODIFIED) {
2242 * When duplicating chains the MODIFIED state is inherited.
2243 * A new bref typically must be allocated. However, file
2244 * data chains may already have the data offset assigned
2245 * to a logical buffer cache buffer so we absolutely cannot
2246 * allocate a new bref here for TYPE_DATA.
2248 * Basically the flusher core only dumps media topology
2249 * and meta-data, not file data. The VOP_FSYNC code deals
2250 * with the file data. XXX need back-pointer to inode.
2252 if (nchain->bref.type == HAMMER2_BREF_TYPE_DATA) {
2253 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_MODIFIED);
2254 hammer2_chain_ref(nchain);
2256 hammer2_chain_modify(trans, &nchain,
2257 HAMMER2_MODIFY_OPTDATA |
2258 HAMMER2_MODIFY_ASSERTNOCOPY);
2260 } else if (nchain->flags & HAMMER2_CHAIN_INITIAL) {
2262 * When duplicating chains in the INITITAL state we need
2263 * to ensure that the chain is marked modified so a
2264 * block is properly assigned to it, otherwise the MOVED
2265 * bit won't do the right thing.
2267 KKASSERT (nchain->bref.type != HAMMER2_BREF_TYPE_DATA);
2268 hammer2_chain_modify(trans, &nchain,
2269 HAMMER2_MODIFY_OPTDATA |
2270 HAMMER2_MODIFY_ASSERTNOCOPY);
2272 if (parent || (ochain->flags & HAMMER2_CHAIN_MOVED)) {
2273 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_MOVED);
2274 hammer2_chain_ref(nchain);
2276 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_SUBMODIFIED);
2278 switch(nchain->bref.type) {
2279 case HAMMER2_BREF_TYPE_VOLUME:
2280 panic("hammer2_chain_duplicate: cannot be called w/volhdr");
2282 case HAMMER2_BREF_TYPE_INODE:
2283 KKASSERT(bytes == HAMMER2_INODE_BYTES);
2285 nchain->data = kmalloc(sizeof(nchain->data->ipdata),
2286 hmp->minode, M_WAITOK | M_ZERO);
2287 nchain->data->ipdata = ochain->data->ipdata;
2290 case HAMMER2_BREF_TYPE_INDIRECT:
2291 if ((nchain->flags & HAMMER2_CHAIN_MODIFIED) &&
2293 bcopy(ochain->data, nchain->data,
2297 case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
2298 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2299 panic("hammer2_chain_duplicate: cannot be used to"
2300 "create a freemap root or node");
2302 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
2303 case HAMMER2_BREF_TYPE_DATA:
2305 if ((nchain->flags & HAMMER2_CHAIN_MODIFIED) &&
2307 bcopy(ochain->data, nchain->data,
2310 /* leave chain->data NULL */
2311 KKASSERT(nchain->data == NULL);
2316 * Unmodified duplicated blocks may have the same bref, we
2317 * must be careful to avoid buffer cache deadlocks so we
2318 * unlock the old chain before resolving the new one.
2320 * Insert nchain at the end of the duplication list.
2322 hammer2_chain_lock(nchain, HAMMER2_RESOLVE_NEVER);
2323 /* extra ref still present from original allocation */
2325 hammer2_chain_unlock(ochain);
2327 hammer2_chain_lock(nchain, HAMMER2_RESOLVE_MAYBE |
2328 HAMMER2_RESOLVE_NOREF); /* eat excess ref */
2329 hammer2_chain_unlock(nchain);
2332 * If parent is not NULL, insert into the parent at the requested
2333 * index. The newly duplicated chain must be marked MOVED and
2334 * SUBMODIFIED set in its parent(s).
2338 * Locate a free blockref in the parent's array
2340 above = parent->core;
2341 KKASSERT(ccms_thread_lock_owned(&above->cst));
2343 switch(parent->bref.type) {
2344 case HAMMER2_BREF_TYPE_INODE:
2345 KKASSERT((parent->data->ipdata.op_flags &
2346 HAMMER2_OPFLAG_DIRECTDATA) == 0);
2347 KKASSERT(parent->data != NULL);
2348 base = &parent->data->ipdata.u.blockset.blockref[0];
2349 count = HAMMER2_SET_COUNT;
2351 case HAMMER2_BREF_TYPE_INDIRECT:
2352 case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
2353 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2354 if (parent->flags & HAMMER2_CHAIN_INITIAL) {
2357 KKASSERT(parent->data != NULL);
2358 base = &parent->data->npdata.blockref[0];
2360 count = parent->bytes / sizeof(hammer2_blockref_t);
2362 case HAMMER2_BREF_TYPE_VOLUME:
2363 KKASSERT(parent->data != NULL);
2364 base = &hmp->voldata.sroot_blockset.blockref[0];
2365 count = HAMMER2_SET_COUNT;
2368 panic("hammer2_chain_create: unrecognized "
2369 "blockref type: %d",
2374 KKASSERT(i >= 0 && i < count);
2376 KKASSERT((nchain->flags & HAMMER2_CHAIN_DELETED) == 0);
2377 KKASSERT(parent->refs > 0);
2379 spin_lock(&above->cst.spin);
2380 nchain->above = above;
2382 scan = hammer2_chain_find_locked(parent, i);
2383 KKASSERT(base == NULL || base[i].type == 0 ||
2385 (scan->flags & HAMMER2_CHAIN_DELETED));
2386 if (RB_INSERT(hammer2_chain_tree, &above->rbtree,
2388 panic("hammer2_chain_duplicate: collision");
2390 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_ONRBTREE);
2391 spin_unlock(&above->cst.spin);
2393 if ((nchain->flags & HAMMER2_CHAIN_MOVED) == 0) {
2394 hammer2_chain_ref(nchain);
2395 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_MOVED);
2397 hammer2_chain_setsubmod(trans, nchain);
2402 * Special in-place delete-duplicate sequence which does not require a
2403 * locked parent. (*chainp) is marked DELETED and atomically replaced
2404 * with a duplicate. Atomicy is at the very-fine spin-lock level in
2405 * order to ensure that lookups do not race us.
2408 hammer2_chain_delete_duplicate(hammer2_trans_t *trans,
2409 hammer2_chain_t **chainp)
2411 hammer2_mount_t *hmp = trans->hmp;
2412 hammer2_chain_t *ochain;
2413 hammer2_chain_t *nchain;
2414 hammer2_chain_core_t *above;
2415 hammer2_chain_core_t *core;
2419 * First create a duplicate of the chain structure, associating
2420 * it with the same core, making it the same size, pointing it
2421 * to the same bref (the same media block), and copying any inline
2425 nchain = hammer2_chain_alloc(hmp, trans, &ochain->bref); /* 1 ref */
2426 hammer2_chain_core_alloc(nchain, ochain->core);
2427 core = ochain->core;
2428 above = ochain->above;
2430 kprintf("delete_duplicate %p.%d(%d)\n", ochain, ochain->bref.type, ochain->refs);
2432 bytes = (hammer2_off_t)1 <<
2433 (int)(ochain->bref.data_off & HAMMER2_OFF_MASK_RADIX);
2434 nchain->bytes = bytes;
2437 * Be sure to copy the INITIAL flag as well or we could end up
2438 * loading garbage from the bref.
2440 if (ochain->flags & HAMMER2_CHAIN_INITIAL)
2441 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_INITIAL);
2442 if (ochain->flags & HAMMER2_CHAIN_DIRTYBP)
2443 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_DIRTYBP);
2446 * If the old chain is modified the new one must be too,
2447 * but we only want to allocate a new bref.
2449 if (ochain->flags & HAMMER2_CHAIN_MODIFIED) {
2451 * When duplicating chains the MODIFIED state is inherited.
2452 * A new bref typically must be allocated. However, file
2453 * data chains may already have the data offset assigned
2454 * to a logical buffer cache buffer so we absolutely cannot
2455 * allocate a new bref here for TYPE_DATA.
2457 * Basically the flusher core only dumps media topology
2458 * and meta-data, not file data. The VOP_FSYNC code deals
2459 * with the file data. XXX need back-pointer to inode.
2461 if (nchain->bref.type == HAMMER2_BREF_TYPE_DATA) {
2462 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_MODIFIED);
2463 hammer2_chain_ref(nchain);
2464 nchain->modify_tid = trans->sync_tid;
2466 hammer2_chain_modify(trans, &nchain,
2467 HAMMER2_MODIFY_OPTDATA |
2468 HAMMER2_MODIFY_ASSERTNOCOPY);
2470 } else if (nchain->flags & HAMMER2_CHAIN_INITIAL) {
2472 * When duplicating chains in the INITITAL state we need
2473 * to ensure that the chain is marked modified so a
2474 * block is properly assigned to it, otherwise the MOVED
2475 * bit won't do the right thing.
2477 KKASSERT (nchain->bref.type != HAMMER2_BREF_TYPE_DATA);
2478 hammer2_chain_modify(trans, &nchain,
2479 HAMMER2_MODIFY_OPTDATA |
2480 HAMMER2_MODIFY_ASSERTNOCOPY);
2484 * Unconditionally set the MOVED and SUBMODIFIED bit to force
2485 * update of parent bref and indirect blockrefs during flush.
2487 if ((nchain->flags & HAMMER2_CHAIN_MOVED) == 0) {
2488 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_MOVED);
2489 hammer2_chain_ref(nchain);
2491 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_SUBMODIFIED);
2494 * Copy media contents as needed.
2496 switch(nchain->bref.type) {
2497 case HAMMER2_BREF_TYPE_VOLUME:
2498 panic("hammer2_chain_duplicate: cannot be called w/volhdr");
2500 case HAMMER2_BREF_TYPE_INODE:
2501 KKASSERT(bytes == HAMMER2_INODE_BYTES);
2503 nchain->data = kmalloc(sizeof(nchain->data->ipdata),
2504 hmp->minode, M_WAITOK | M_ZERO);
2505 nchain->data->ipdata = ochain->data->ipdata;
2508 case HAMMER2_BREF_TYPE_INDIRECT:
2509 if ((nchain->flags & HAMMER2_CHAIN_MODIFIED) &&
2511 bcopy(ochain->data, nchain->data,
2515 case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
2516 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2517 panic("hammer2_chain_duplicate: cannot be used to"
2518 "create a freemap root or node");
2520 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
2521 case HAMMER2_BREF_TYPE_DATA:
2523 if ((nchain->flags & HAMMER2_CHAIN_MODIFIED) &&
2525 bcopy(ochain->data, nchain->data,
2528 /* leave chain->data NULL */
2529 KKASSERT(nchain->data == NULL);
2534 * Both chains must be locked for us to be able to set the
2535 * duplink. The caller may expect valid data.
2537 * Unmodified duplicated blocks may have the same bref, we
2538 * must be careful to avoid buffer cache deadlocks so we
2539 * unlock the old chain before resolving the new one.
2541 * Insert nchain at the end of the duplication list.
2543 hammer2_chain_lock(nchain, HAMMER2_RESOLVE_NEVER);
2544 /* extra ref still present from original allocation */
2546 nchain->index = ochain->index;
2548 kprintf("duplicate ochain %p(%d) nchain %p(%d) %08x\n",
2549 ochain, ochain->refs, nchain, nchain->refs, nchain->flags);
2551 spin_lock(&above->cst.spin);
2552 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_ONRBTREE);
2553 ochain->delete_tid = trans->sync_tid;
2554 nchain->above = above;
2555 atomic_set_int(&ochain->flags, HAMMER2_CHAIN_DELETED);
2556 if ((ochain->flags & HAMMER2_CHAIN_MOVED) == 0) {
2557 hammer2_chain_ref(ochain);
2558 atomic_set_int(&ochain->flags, HAMMER2_CHAIN_MOVED);
2560 if (RB_INSERT(hammer2_chain_tree, &above->rbtree, nchain)) {
2561 panic("hammer2_chain_delete_duplicate: collision");
2563 spin_unlock(&above->cst.spin);
2566 * Cleanup. Also note that nchain must be re-resolved to ensure
2567 * that it's data is resolved because we locked it RESOLVE_NEVER
2570 *chainp = nchain; /* inherits locked */
2571 hammer2_chain_unlock(ochain); /* replacing ochain */
2572 hammer2_chain_lock(nchain, HAMMER2_RESOLVE_MAYBE |
2573 HAMMER2_RESOLVE_NOREF); /* excess ref */
2574 hammer2_chain_unlock(nchain);
2576 hammer2_chain_setsubmod(trans, nchain);
2580 * Create a snapshot of the specified {parent, chain} with the specified
2583 * (a) We create a duplicate connected to the super-root as the specified
2586 * (b) We issue a restricted flush using the current transaction on the
2589 * (c) We disconnect and reallocate the duplicate's core.
2592 hammer2_chain_snapshot(hammer2_trans_t *trans, hammer2_inode_t *ip,
2593 hammer2_ioc_pfs_t *pfs)
2595 hammer2_mount_t *hmp = trans->hmp;
2596 hammer2_chain_t *chain;
2597 hammer2_chain_t *nchain;
2598 hammer2_chain_t *parent;
2599 hammer2_inode_data_t *ipdata;
2600 size_t name_len = strlen(pfs->name);
2601 hammer2_key_t lhc = hammer2_dirhash(pfs->name, name_len);
2605 * Create disconnected duplicate
2607 KKASSERT((trans->flags & HAMMER2_TRANS_RESTRICTED) == 0);
2609 hammer2_chain_lock(nchain, HAMMER2_RESOLVE_MAYBE);
2610 hammer2_chain_duplicate(trans, NULL, -1, &nchain, NULL);
2611 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_RECYCLE |
2612 HAMMER2_CHAIN_SNAPSHOT);
2615 * Create named entry in the super-root.
2617 parent = hammer2_chain_lookup_init(hmp->schain, 0);
2619 while (error == 0) {
2620 chain = hammer2_chain_lookup(&parent, lhc, lhc, 0);
2623 if ((lhc & HAMMER2_DIRHASH_LOMASK) == HAMMER2_DIRHASH_LOMASK)
2625 hammer2_chain_unlock(chain);
2629 hammer2_chain_create(trans, &parent, &nchain, lhc, 0,
2630 HAMMER2_BREF_TYPE_INODE,
2631 HAMMER2_INODE_BYTES);
2632 hammer2_chain_modify(trans, &nchain, HAMMER2_MODIFY_ASSERTNOCOPY);
2633 hammer2_chain_lookup_done(parent);
2634 parent = NULL; /* safety */
2639 ipdata = &nchain->data->ipdata;
2640 ipdata->name_key = lhc;
2641 ipdata->name_len = name_len;
2642 ksnprintf(ipdata->filename, sizeof(ipdata->filename), "%s", pfs->name);
2645 * Set PFS type, generate a unique filesystem id, and generate
2646 * a cluster id. Use the same clid when snapshotting a PFS root,
2647 * which theoretically allows the snapshot to be used as part of
2648 * the same cluster (perhaps as a cache).
2650 ipdata->pfs_type = HAMMER2_PFSTYPE_SNAPSHOT;
2651 kern_uuidgen(&ipdata->pfs_fsid, 1);
2652 if (ip->chain == ip->pmp->rchain)
2653 ipdata->pfs_clid = ip->chain->data->ipdata.pfs_clid;
2655 kern_uuidgen(&ipdata->pfs_clid, 1);
2658 * Issue a restricted flush of the snapshot. This is a synchronous
2661 trans->flags |= HAMMER2_TRANS_RESTRICTED;
2662 kprintf("SNAPSHOTA\n");
2663 tsleep(trans, 0, "snapslp", hz*4);
2664 kprintf("SNAPSHOTB\n");
2665 hammer2_chain_flush(trans, nchain);
2666 trans->flags &= ~HAMMER2_TRANS_RESTRICTED;
2670 * Remove the link b/c nchain is a snapshot and snapshots don't
2671 * follow CHAIN_DELETED semantics ?
2676 KKASSERT(chain->duplink == nchain);
2677 KKASSERT(chain->core == nchain->core);
2678 KKASSERT(nchain->refs >= 2);
2679 chain->duplink = nchain->duplink;
2680 atomic_clear_int(&nchain->flags, HAMMER2_CHAIN_DUPTARGET);
2681 hammer2_chain_drop(nchain);
2684 kprintf("snapshot %s nchain->refs %d nchain->flags %08x\n",
2685 pfs->name, nchain->refs, nchain->flags);
2686 hammer2_chain_unlock(nchain);
2692 * Create an indirect block that covers one or more of the elements in the
2693 * current parent. Either returns the existing parent with no locking or
2694 * ref changes or returns the new indirect block locked and referenced
2695 * and leaving the original parent lock/ref intact as well.
2697 * If an error occurs, NULL is returned and *errorp is set to the error.
2699 * The returned chain depends on where the specified key falls.
2701 * The key/keybits for the indirect mode only needs to follow three rules:
2703 * (1) That all elements underneath it fit within its key space and
2705 * (2) That all elements outside it are outside its key space.
2707 * (3) When creating the new indirect block any elements in the current
2708 * parent that fit within the new indirect block's keyspace must be
2709 * moved into the new indirect block.
2711 * (4) The keyspace chosen for the inserted indirect block CAN cover a wider
2712 * keyspace the the current parent, but lookup/iteration rules will
2713 * ensure (and must ensure) that rule (2) for all parents leading up
2714 * to the nearest inode or the root volume header is adhered to. This
2715 * is accomplished by always recursing through matching keyspaces in
2716 * the hammer2_chain_lookup() and hammer2_chain_next() API.
2718 * The current implementation calculates the current worst-case keyspace by
2719 * iterating the current parent and then divides it into two halves, choosing
2720 * whichever half has the most elements (not necessarily the half containing
2721 * the requested key).
2723 * We can also opt to use the half with the least number of elements. This
2724 * causes lower-numbered keys (aka logical file offsets) to recurse through
2725 * fewer indirect blocks and higher-numbered keys to recurse through more.
2726 * This also has the risk of not moving enough elements to the new indirect
2727 * block and being forced to create several indirect blocks before the element
2730 * Must be called with an exclusively locked parent.
2734 hammer2_chain_create_indirect(hammer2_trans_t *trans, hammer2_chain_t *parent,
2735 hammer2_key_t create_key, int create_bits,
2738 hammer2_mount_t *hmp = trans->hmp;
2739 hammer2_chain_core_t *above;
2740 hammer2_chain_core_t *icore;
2741 hammer2_blockref_t *base;
2742 hammer2_blockref_t *bref;
2743 hammer2_chain_t *chain;
2744 hammer2_chain_t *child;
2745 hammer2_chain_t *ichain;
2746 hammer2_chain_t dummy;
2747 hammer2_key_t key = create_key;
2748 int keybits = create_bits;
2756 * Calculate the base blockref pointer or NULL if the chain
2757 * is known to be empty. We need to calculate the array count
2758 * for RB lookups either way.
2760 KKASSERT(ccms_thread_lock_owned(&parent->core->cst));
2762 above = parent->core;
2764 /*hammer2_chain_modify(trans, &parent, HAMMER2_MODIFY_OPTDATA);*/
2765 if (parent->flags & HAMMER2_CHAIN_INITIAL) {
2768 switch(parent->bref.type) {
2769 case HAMMER2_BREF_TYPE_INODE:
2770 count = HAMMER2_SET_COUNT;
2772 case HAMMER2_BREF_TYPE_INDIRECT:
2773 case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
2774 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2775 count = parent->bytes / sizeof(hammer2_blockref_t);
2777 case HAMMER2_BREF_TYPE_VOLUME:
2778 count = HAMMER2_SET_COUNT;
2781 panic("hammer2_chain_create_indirect: "
2782 "unrecognized blockref type: %d",
2788 switch(parent->bref.type) {
2789 case HAMMER2_BREF_TYPE_INODE:
2790 base = &parent->data->ipdata.u.blockset.blockref[0];
2791 count = HAMMER2_SET_COUNT;
2793 case HAMMER2_BREF_TYPE_INDIRECT:
2794 case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
2795 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2796 base = &parent->data->npdata.blockref[0];
2797 count = parent->bytes / sizeof(hammer2_blockref_t);
2799 case HAMMER2_BREF_TYPE_VOLUME:
2800 base = &hmp->voldata.sroot_blockset.blockref[0];
2801 count = HAMMER2_SET_COUNT;
2804 panic("hammer2_chain_create_indirect: "
2805 "unrecognized blockref type: %d",
2813 * Scan for an unallocated bref, also skipping any slots occupied
2814 * by in-memory chain elements which may not yet have been updated
2815 * in the parent's bref array.
2817 * Deleted elements are ignored.
2819 bzero(&dummy, sizeof(dummy));
2820 dummy.delete_tid = HAMMER2_MAX_TID;
2822 spin_lock(&above->cst.spin);
2823 for (i = 0; i < count; ++i) {
2826 child = hammer2_chain_find_locked(parent, i);
2828 if (child->flags & HAMMER2_CHAIN_DELETED)
2830 bref = &child->bref;
2831 } else if (base && base[i].type) {
2838 * Expand our calculated key range (key, keybits) to fit
2839 * the scanned key. nkeybits represents the full range
2840 * that we will later cut in half (two halves @ nkeybits - 1).
2843 if (nkeybits < bref->keybits) {
2844 if (bref->keybits > 64) {
2845 kprintf("bad bref index %d chain %p bref %p\n", i, chain, bref);
2848 nkeybits = bref->keybits;
2850 while (nkeybits < 64 &&
2851 (~(((hammer2_key_t)1 << nkeybits) - 1) &
2852 (key ^ bref->key)) != 0) {
2857 * If the new key range is larger we have to determine
2858 * which side of the new key range the existing keys fall
2859 * under by checking the high bit, then collapsing the
2860 * locount into the hicount or vise-versa.
2862 if (keybits != nkeybits) {
2863 if (((hammer2_key_t)1 << (nkeybits - 1)) & key) {
2874 * The newly scanned key will be in the lower half or the
2875 * higher half of the (new) key range.
2877 if (((hammer2_key_t)1 << (nkeybits - 1)) & bref->key)
2882 spin_unlock(&above->cst.spin);
2883 bref = NULL; /* now invalid (safety) */
2886 * Adjust keybits to represent half of the full range calculated
2887 * above (radix 63 max)
2892 * Select whichever half contains the most elements. Theoretically
2893 * we can select either side as long as it contains at least one
2894 * element (in order to ensure that a free slot is present to hold
2895 * the indirect block).
2897 key &= ~(((hammer2_key_t)1 << keybits) - 1);
2898 if (hammer2_indirect_optimize) {
2900 * Insert node for least number of keys, this will arrange
2901 * the first few blocks of a large file or the first few
2902 * inodes in a directory with fewer indirect blocks when
2905 if (hicount < locount && hicount != 0)
2906 key |= (hammer2_key_t)1 << keybits;
2908 key &= ~(hammer2_key_t)1 << keybits;
2911 * Insert node for most number of keys, best for heavily
2914 if (hicount > locount)
2915 key |= (hammer2_key_t)1 << keybits;
2917 key &= ~(hammer2_key_t)1 << keybits;
2921 * How big should our new indirect block be? It has to be at least
2922 * as large as its parent.
2924 if (parent->bref.type == HAMMER2_BREF_TYPE_INODE)
2925 nbytes = HAMMER2_IND_BYTES_MIN;
2927 nbytes = HAMMER2_IND_BYTES_MAX;
2928 if (nbytes < count * sizeof(hammer2_blockref_t))
2929 nbytes = count * sizeof(hammer2_blockref_t);
2932 * Ok, create our new indirect block
2934 switch(parent->bref.type) {
2935 case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
2936 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2937 dummy.bref.type = HAMMER2_BREF_TYPE_FREEMAP_NODE;
2940 dummy.bref.type = HAMMER2_BREF_TYPE_INDIRECT;
2943 dummy.bref.key = key;
2944 dummy.bref.keybits = keybits;
2945 dummy.bref.data_off = hammer2_allocsize(nbytes);
2946 dummy.bref.methods = parent->bref.methods;
2948 ichain = hammer2_chain_alloc(hmp, trans, &dummy.bref);
2949 atomic_set_int(&ichain->flags, HAMMER2_CHAIN_INITIAL);
2950 hammer2_chain_core_alloc(ichain, NULL);
2951 icore = ichain->core;
2952 hammer2_chain_lock(ichain, HAMMER2_RESOLVE_MAYBE);
2953 hammer2_chain_drop(ichain); /* excess ref from alloc */
2956 * We have to mark it modified to allocate its block, but use
2957 * OPTDATA to allow it to remain in the INITIAL state. Otherwise
2958 * it won't be acted upon by the flush code.
2960 hammer2_chain_modify(trans, &ichain, HAMMER2_MODIFY_OPTDATA);
2963 * Iterate the original parent and move the matching brefs into
2964 * the new indirect block.
2966 * XXX handle flushes.
2968 spin_lock(&above->cst.spin);
2969 for (i = 0; i < count; ++i) {
2971 * For keying purposes access the bref from the media or
2972 * from our in-memory cache. In cases where the in-memory
2973 * cache overrides the media the keyrefs will be the same
2974 * anyway so we can avoid checking the cache when the media
2977 child = hammer2_chain_find_locked(parent, i);
2979 if (child->flags & HAMMER2_CHAIN_DELETED) {
2980 if (ichain->index < 0)
2984 bref = &child->bref;
2985 } else if (base && base[i].type) {
2988 if (ichain->index < 0)
2994 * Skip keys not in the chosen half (low or high), only bit
2995 * (keybits - 1) needs to be compared but for safety we
2996 * will compare all msb bits plus that bit again.
2998 if ((~(((hammer2_key_t)1 << keybits) - 1) &
2999 (key ^ bref->key)) != 0) {
3004 * This element is being moved from the parent, its slot
3005 * is available for our new indirect block.
3007 if (ichain->index < 0)
3011 * Load the new indirect block by acquiring or allocating
3012 * the related chain entries, then move them to the new
3013 * parent (ichain) by deleting them from their old location
3014 * and inserting a duplicate of the chain and any modified
3015 * sub-chain in the new location.
3017 * We must set MOVED in the chain being duplicated and
3018 * SUBMODIFIED in the parent(s) so the flush code knows
3019 * what is going on. The latter is done after the loop.
3021 * WARNING! above->cst.spin must be held when parent is
3022 * modified, even though we own the full blown lock,
3023 * to deal with setsubmod and rename races.
3024 * (XXX remove this req).
3026 spin_unlock(&above->cst.spin);
3027 chain = hammer2_chain_get(parent, i, HAMMER2_LOOKUP_NODATA);
3028 hammer2_chain_delete(trans, chain);
3029 hammer2_chain_duplicate(trans, ichain, i, &chain, NULL);
3031 hammer2_chain_unlock(chain);
3032 KKASSERT(parent->refs > 0);
3034 spin_lock(&above->cst.spin);
3036 spin_unlock(&above->cst.spin);
3039 * Insert the new indirect block into the parent now that we've
3040 * cleared out some entries in the parent. We calculated a good
3041 * insertion index in the loop above (ichain->index).
3043 * We don't have to set MOVED here because we mark ichain modified
3044 * down below (so the normal modified -> flush -> set-moved sequence
3047 * The insertion shouldn't race as this is a completely new block
3048 * and the parent is locked.
3050 if (ichain->index < 0)
3051 kprintf("indirect parent %p count %d key %016jx/%d\n",
3052 parent, count, (intmax_t)key, keybits);
3053 KKASSERT(ichain->index >= 0);
3054 KKASSERT((ichain->flags & HAMMER2_CHAIN_ONRBTREE) == 0);
3055 spin_lock(&above->cst.spin);
3056 if (RB_INSERT(hammer2_chain_tree, &above->rbtree, ichain))
3057 panic("hammer2_chain_create_indirect: ichain insertion");
3058 atomic_set_int(&ichain->flags, HAMMER2_CHAIN_ONRBTREE);
3059 ichain->above = above;
3060 spin_unlock(&above->cst.spin);
3063 * Mark the new indirect block modified after insertion, which
3064 * will propagate up through parent all the way to the root and
3065 * also allocate the physical block in ichain for our caller,
3066 * and assign ichain->data to a pre-zero'd space (because there
3067 * is not prior data to copy into it).
3069 * We have to set SUBMODIFIED in ichain's flags manually so the
3070 * flusher knows it has to recurse through it to get to all of
3071 * our moved blocks, then call setsubmod() to set the bit
3074 /*hammer2_chain_modify(trans, &ichain, HAMMER2_MODIFY_OPTDATA);*/
3075 atomic_set_int(&ichain->flags, HAMMER2_CHAIN_SUBMODIFIED);
3076 hammer2_chain_setsubmod(trans, ichain);
3079 * Figure out what to return.
3081 if (create_bits > keybits) {
3083 * Key being created is way outside the key range,
3084 * return the original parent.
3086 hammer2_chain_unlock(ichain);
3087 } else if (~(((hammer2_key_t)1 << keybits) - 1) &
3088 (create_key ^ key)) {
3090 * Key being created is outside the key range,
3091 * return the original parent.
3093 hammer2_chain_unlock(ichain);
3096 * Otherwise its in the range, return the new parent.
3097 * (leave both the new and old parent locked).
3106 * Sets CHAIN_DELETED and CHAIN_MOVED in the chain being deleted and
3107 * set chain->delete_tid.
3109 * This function does NOT generate a modification to the parent. It
3110 * would be nearly impossible to figure out which parent to modify anyway.
3111 * Such modifications are handled by the flush code and are properly merged
3112 * using the flush synchronization point.
3114 * The find/get code will properly overload the RBTREE check on top of
3115 * the bref check to detect deleted entries.
3117 * This function is NOT recursive. Any entity already pushed into the
3118 * chain (such as an inode) may still need visibility into its contents,
3119 * as well as the ability to read and modify the contents. For example,
3120 * for an unlinked file which is still open.
3122 * NOTE: This function does NOT set chain->modify_tid, allowing future
3123 * code to distinguish between live and deleted chains by testing
3126 * NOTE: Deletions normally do not occur in the middle of a duplication
3127 * chain but we use a trick for hardlink migration that refactors
3128 * the originating inode without deleting it, so we make no assumptions
3132 hammer2_chain_delete(hammer2_trans_t *trans, hammer2_chain_t *chain)
3134 KKASSERT(ccms_thread_lock_owned(&chain->core->cst));
3137 * Nothing to do if already marked.
3139 if (chain->flags & HAMMER2_CHAIN_DELETED)
3143 * We must set MOVED along with DELETED for the flush code to
3144 * recognize the operation and properly disconnect the chain
3147 * The setting of DELETED causes finds, lookups, and _next iterations
3148 * to no longer recognize the chain. RB_SCAN()s will still have
3149 * visibility (needed for flush serialization points).
3151 * We need the spinlock on the core whos RBTREE contains chain
3152 * to protect against races.
3154 spin_lock(&chain->above->cst.spin);
3155 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DELETED);
3156 if ((chain->flags & HAMMER2_CHAIN_MOVED) == 0) {
3157 hammer2_chain_ref(chain);
3158 atomic_set_int(&chain->flags, HAMMER2_CHAIN_MOVED);
3160 chain->delete_tid = trans->sync_tid;
3161 spin_unlock(&chain->above->cst.spin);
3162 hammer2_chain_setsubmod(trans, chain);
3166 hammer2_chain_wait(hammer2_chain_t *chain)
3168 tsleep(chain, 0, "chnflw", 1);