2 * Copyright (c) 2011-2013 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * This subsystem implements most of the core support functions for
37 * the hammer2_chain and hammer2_chain_core structures.
39 * Chains represent the filesystem media topology in-memory. Any given
40 * chain can represent an inode, indirect block, data, or other types
43 * This module provides APIs for direct and indirect block searches,
44 * iterations, recursions, creation, deletion, replication, and snapshot
45 * views (used by the flush and snapshot code).
47 * Generally speaking any modification made to a chain must propagate all
48 * the way back to the volume header, issuing copy-on-write updates to the
49 * blockref tables all the way up. Any chain except the volume header itself
50 * can be flushed to disk at any time, in any order. None of it matters
51 * until we get to the point where we want to synchronize the volume header
52 * (see the flush code).
54 * The chain structure supports snapshot views in time, which are primarily
55 * used until the related data and meta-data is flushed to allow the
56 * filesystem to make snapshots without requiring it to first flush,
57 * and to allow the filesystem flush and modify the filesystem concurrently
58 * with minimal or no stalls.
60 #include <sys/cdefs.h>
61 #include <sys/param.h>
62 #include <sys/systm.h>
63 #include <sys/types.h>
65 #include <sys/kern_syscall.h>
70 static int hammer2_indirect_optimize; /* XXX SYSCTL */
72 static hammer2_chain_t *hammer2_chain_create_indirect(
73 hammer2_trans_t *trans, hammer2_chain_t *parent,
74 hammer2_key_t key, int keybits, int *errorp);
77 * We use a red-black tree to guarantee safe lookups under shared locks.
79 * Chains can be overloaded onto the same index, creating a different
80 * view of a blockref table based on a transaction id. The RBTREE
81 * deconflicts the view by sub-sorting on delete_tid.
83 * NOTE: Any 'current' chain which is not yet deleted will have a
84 * delete_tid of HAMMER2_MAX_TID (0xFFF....FFFLLU).
86 RB_GENERATE(hammer2_chain_tree, hammer2_chain, rbnode, hammer2_chain_cmp);
89 hammer2_chain_cmp(hammer2_chain_t *chain1, hammer2_chain_t *chain2)
91 if (chain1->index < chain2->index)
93 if (chain1->index > chain2->index)
95 if (chain1->delete_tid < chain2->delete_tid)
97 if (chain1->delete_tid > chain2->delete_tid)
103 * Recursively set the SUBMODIFIED flag up to the root starting at chain's
104 * parent. SUBMODIFIED is not set in chain itself.
106 * This function only operates on current-time transactions and is not
107 * used during flushes.
110 hammer2_chain_setsubmod(hammer2_trans_t *trans, hammer2_chain_t *chain)
112 hammer2_chain_core_t *above;
114 if (trans->flags & HAMMER2_TRANS_ISFLUSH)
116 while ((above = chain->above) != NULL) {
117 spin_lock(&above->cst.spin);
118 chain = above->first_parent;
119 while (hammer2_chain_refactor_test(chain, 1))
120 chain = chain->next_parent;
121 atomic_set_int(&chain->flags, HAMMER2_CHAIN_SUBMODIFIED);
122 spin_unlock(&above->cst.spin);
127 * Allocate a new disconnected chain element representing the specified
128 * bref. chain->refs is set to 1 and the passed bref is copied to
129 * chain->bref. chain->bytes is derived from the bref.
131 * chain->core is NOT allocated and the media data and bp pointers are left
132 * NULL. The caller must call chain_core_alloc() to allocate or associate
133 * a core with the chain.
135 * NOTE: Returns a referenced but unlocked (because there is no core) chain.
138 hammer2_chain_alloc(hammer2_mount_t *hmp, hammer2_trans_t *trans,
139 hammer2_blockref_t *bref)
141 hammer2_chain_t *chain;
142 u_int bytes = 1U << (int)(bref->data_off & HAMMER2_OFF_MASK_RADIX);
145 * Construct the appropriate system structure.
148 case HAMMER2_BREF_TYPE_INODE:
149 case HAMMER2_BREF_TYPE_INDIRECT:
150 case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
151 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
152 case HAMMER2_BREF_TYPE_DATA:
153 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
154 chain = kmalloc(sizeof(*chain), hmp->mchain, M_WAITOK | M_ZERO);
156 case HAMMER2_BREF_TYPE_VOLUME:
158 panic("hammer2_chain_alloc volume type illegal for op");
161 panic("hammer2_chain_alloc: unrecognized blockref type: %d",
167 chain->index = -1; /* not yet assigned */
168 chain->bytes = bytes;
170 chain->flags = HAMMER2_CHAIN_ALLOCATED;
171 chain->delete_tid = HAMMER2_MAX_TID;
173 chain->modify_tid = trans->sync_tid;
179 * Associate an existing core with the chain or allocate a new core.
181 * The core is not locked. No additional refs on the chain are made.
184 hammer2_chain_core_alloc(hammer2_chain_t *chain, hammer2_chain_core_t *core)
186 hammer2_chain_t **scanp;
188 KKASSERT(chain->core == NULL);
189 KKASSERT(chain->next_parent == NULL);
192 core = kmalloc(sizeof(*core), chain->hmp->mchain,
194 RB_INIT(&core->rbtree);
197 ccms_cst_init(&core->cst, chain);
198 core->first_parent = chain;
200 atomic_add_int(&core->sharecnt, 1);
202 spin_lock(&core->cst.spin);
203 scanp = &core->first_parent;
205 scanp = &(*scanp)->next_parent;
207 spin_unlock(&core->cst.spin);
212 * Add a reference to a chain element, preventing its destruction.
215 hammer2_chain_ref(hammer2_chain_t *chain)
217 atomic_add_int(&chain->refs, 1);
221 * Drop the caller's reference to the chain. When the ref count drops to
222 * zero this function will disassociate the chain from its parent and
223 * deallocate it, then recursely drop the parent using the implied ref
224 * from the chain's chain->parent.
226 * WARNING! Just because we are able to deallocate a chain doesn't mean
227 * that chain->core->rbtree is empty. There can still be a sharecnt
228 * on chain->core and RBTREE entries that refer to different parents.
230 static hammer2_chain_t *hammer2_chain_lastdrop(hammer2_chain_t *chain);
233 hammer2_chain_drop(hammer2_chain_t *chain)
239 if (chain->flags & HAMMER2_CHAIN_MOVED)
241 if (chain->flags & HAMMER2_CHAIN_MODIFIED)
243 KKASSERT(chain->refs > need);
252 chain = hammer2_chain_lastdrop(chain);
254 if (atomic_cmpset_int(&chain->refs, refs, refs - 1))
256 /* retry the same chain */
262 * Safe handling of the 1->0 transition on chain. Returns a chain for
263 * recursive drop or NULL, possibly returning the same chain of the atomic
266 * The cst spinlock is allowed nest child-to-parent (not parent-to-child).
270 hammer2_chain_lastdrop(hammer2_chain_t *chain)
272 hammer2_mount_t *hmp;
273 hammer2_chain_core_t *above;
274 hammer2_chain_core_t *core;
275 hammer2_chain_t **scanp;
276 hammer2_chain_t *parent;
279 * Spinlock the core and check to see if it is empty. If it is
280 * not empty we leave chain intact with refs == 0.
282 if ((core = chain->core) != NULL) {
283 spin_lock(&core->cst.spin);
284 if (!RB_EMPTY(&core->rbtree)) {
285 if (atomic_cmpset_int(&chain->refs, 1, 0)) {
286 /* 1->0 transition successful */
287 spin_unlock(&core->cst.spin);
290 /* 1->0 transition failed, retry */
291 spin_unlock(&core->cst.spin);
301 * Spinlock the parent and try to drop the last ref. On success
302 * remove chain from its parent.
304 if ((above = chain->above) != NULL) {
305 spin_lock(&above->cst.spin);
306 if (!atomic_cmpset_int(&chain->refs, 1, 0)) {
307 /* 1->0 transition failed */
308 spin_unlock(&above->cst.spin);
310 spin_unlock(&core->cst.spin);
316 * 1->0 transition successful
318 KKASSERT(chain->flags & HAMMER2_CHAIN_ONRBTREE);
319 RB_REMOVE(hammer2_chain_tree, &above->rbtree, chain);
320 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_ONRBTREE);
324 * Calculate a chain to return for a recursive drop.
326 * If the rbtree containing chain is empty we try to
327 * recursively drop one of our parents. Otherwise
328 * we try to recursively drop a sibling.
330 if (RB_EMPTY(&above->rbtree)) {
331 scanp = &above->first_parent;
332 while ((parent = *scanp) != NULL) {
333 if (parent->refs == 0 &&
334 atomic_cmpset_int(&parent->refs, 0, 1)) {
337 scanp = &parent->next_parent;
340 parent = RB_ROOT(&above->rbtree);
341 if (atomic_cmpset_int(&parent->refs, 0, 1) == 0)
344 spin_unlock(&above->cst.spin);
345 above = NULL; /* safety */
349 * We still have the core spinlock (if core is non-NULL). The
350 * above spinlock is gone.
353 scanp = &core->first_parent;
354 while (*scanp != chain)
355 scanp = &(*scanp)->next_parent;
356 *scanp = chain->next_parent;
357 chain->next_parent = NULL;
360 if (atomic_fetchadd_int(&core->sharecnt, -1) == 1) {
362 * On the 1->0 transition of core we can destroy
365 spin_unlock(&core->cst.spin);
366 KKASSERT(core->cst.count == 0);
367 KKASSERT(core->cst.upgrade == 0);
368 kfree(core, hmp->mchain);
370 spin_unlock(&core->cst.spin);
372 core = NULL; /* safety */
376 * All spin locks are gone, finish freeing stuff.
378 KKASSERT((chain->flags & (HAMMER2_CHAIN_MOVED |
379 HAMMER2_CHAIN_MODIFIED)) == 0);
381 switch(chain->bref.type) {
382 case HAMMER2_BREF_TYPE_VOLUME:
385 case HAMMER2_BREF_TYPE_INODE:
387 kfree(chain->data, hmp->minode);
392 KKASSERT(chain->data == NULL);
396 KKASSERT(chain->bp == NULL);
399 if (chain->flags & HAMMER2_CHAIN_ALLOCATED) {
400 chain->flags &= ~HAMMER2_CHAIN_ALLOCATED;
401 kfree(chain, hmp->mchain);
408 * Ref and lock a chain element, acquiring its data with I/O if necessary,
409 * and specify how you would like the data to be resolved.
411 * Returns 0 on success or an error code if the data could not be acquired.
412 * The chain element is locked on return regardless of whether an error
415 * The lock is allowed to recurse, multiple locking ops will aggregate
416 * the requested resolve types. Once data is assigned it will not be
417 * removed until the last unlock.
419 * HAMMER2_RESOLVE_NEVER - Do not resolve the data element.
420 * (typically used to avoid device/logical buffer
423 * HAMMER2_RESOLVE_MAYBE - Do not resolve data elements for chains in
424 * the INITIAL-create state (indirect blocks only).
426 * Do not resolve data elements for DATA chains.
427 * (typically used to avoid device/logical buffer
430 * HAMMER2_RESOLVE_ALWAYS- Always resolve the data element.
432 * HAMMER2_RESOLVE_SHARED- (flag) The chain is locked shared, otherwise
433 * it will be locked exclusive.
435 * NOTE: Embedded elements (volume header, inodes) are always resolved
438 * NOTE: Specifying HAMMER2_RESOLVE_ALWAYS on a newly-created non-embedded
439 * element will instantiate and zero its buffer, and flush it on
442 * NOTE: (data) elements are normally locked RESOLVE_NEVER or RESOLVE_MAYBE
443 * so as not to instantiate a device buffer, which could alias against
444 * a logical file buffer. However, if ALWAYS is specified the
445 * device buffer will be instantiated anyway.
447 * WARNING! If data must be fetched a shared lock will temporarily be
448 * upgraded to exclusive. However, a deadlock can occur if
449 * the caller owns more than one shared lock.
452 hammer2_chain_lock(hammer2_chain_t *chain, int how)
454 hammer2_mount_t *hmp;
455 hammer2_chain_core_t *core;
456 hammer2_blockref_t *bref;
466 * Ref and lock the element. Recursive locks are allowed.
468 if ((how & HAMMER2_RESOLVE_NOREF) == 0)
469 hammer2_chain_ref(chain);
470 atomic_add_int(&chain->lockcnt, 1);
473 KKASSERT(hmp != NULL);
476 * Get the appropriate lock.
479 if (how & HAMMER2_RESOLVE_SHARED)
480 ccms_thread_lock(&core->cst, CCMS_STATE_SHARED);
482 ccms_thread_lock(&core->cst, CCMS_STATE_EXCLUSIVE);
485 * If we already have a valid data pointer no further action is
492 * Do we have to resolve the data?
494 switch(how & HAMMER2_RESOLVE_MASK) {
495 case HAMMER2_RESOLVE_NEVER:
497 case HAMMER2_RESOLVE_MAYBE:
498 if (chain->flags & HAMMER2_CHAIN_INITIAL)
500 if (chain->bref.type == HAMMER2_BREF_TYPE_DATA)
502 if (chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_LEAF)
505 case HAMMER2_RESOLVE_ALWAYS:
510 * Upgrade to an exclusive lock so we can safely manipulate the
511 * buffer cache. If another thread got to it before us we
514 ostate = ccms_thread_lock_upgrade(&core->cst);
516 ccms_thread_lock_downgrade(&core->cst, ostate);
521 * We must resolve to a device buffer, either by issuing I/O or
522 * by creating a zero-fill element. We do not mark the buffer
523 * dirty when creating a zero-fill element (the hammer2_chain_modify()
524 * API must still be used to do that).
526 * The device buffer is variable-sized in powers of 2 down
527 * to HAMMER2_MINALLOCSIZE (typically 1K). A 64K physical storage
528 * chunk always contains buffers of the same size. (XXX)
530 * The minimum physical IO size may be larger than the variable
535 if ((bbytes = chain->bytes) < HAMMER2_MINIOSIZE)
536 bbytes = HAMMER2_MINIOSIZE;
537 pbase = bref->data_off & ~(hammer2_off_t)(bbytes - 1);
538 peof = (pbase + HAMMER2_PBUFSIZE64) & ~HAMMER2_PBUFMASK64;
539 boff = bref->data_off & HAMMER2_OFF_MASK & (bbytes - 1);
540 KKASSERT(pbase != 0);
543 * The getblk() optimization can only be used on newly created
544 * elements if the physical block size matches the request.
546 if ((chain->flags & HAMMER2_CHAIN_INITIAL) &&
547 chain->bytes == bbytes) {
548 chain->bp = getblk(hmp->devvp, pbase, bbytes, 0, 0);
550 } else if (hammer2_cluster_enable) {
551 error = cluster_read(hmp->devvp, peof, pbase, bbytes,
552 HAMMER2_PBUFSIZE, HAMMER2_PBUFSIZE,
555 error = bread(hmp->devvp, pbase, bbytes, &chain->bp);
559 kprintf("hammer2_chain_get: I/O error %016jx: %d\n",
560 (intmax_t)pbase, error);
563 ccms_thread_lock_downgrade(&core->cst, ostate);
568 * Zero the data area if the chain is in the INITIAL-create state.
569 * Mark the buffer for bdwrite().
571 bdata = (char *)chain->bp->b_data + boff;
572 if (chain->flags & HAMMER2_CHAIN_INITIAL) {
573 bzero(bdata, chain->bytes);
574 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DIRTYBP);
578 * Setup the data pointer, either pointing it to an embedded data
579 * structure and copying the data from the buffer, or pointing it
582 * The buffer is not retained when copying to an embedded data
583 * structure in order to avoid potential deadlocks or recursions
584 * on the same physical buffer.
586 switch (bref->type) {
587 case HAMMER2_BREF_TYPE_VOLUME:
589 * Copy data from bp to embedded buffer
591 panic("hammer2_chain_lock: called on unresolved volume header");
594 KKASSERT(pbase == 0);
595 KKASSERT(chain->bytes == HAMMER2_PBUFSIZE);
596 bcopy(bdata, &hmp->voldata, chain->bytes);
597 chain->data = (void *)&hmp->voldata;
602 case HAMMER2_BREF_TYPE_INODE:
604 * Copy data from bp to embedded buffer, do not retain the
607 KKASSERT(chain->bytes == sizeof(chain->data->ipdata));
608 chain->data = kmalloc(sizeof(chain->data->ipdata),
609 hmp->minode, M_WAITOK | M_ZERO);
610 bcopy(bdata, &chain->data->ipdata, chain->bytes);
614 case HAMMER2_BREF_TYPE_INDIRECT:
615 case HAMMER2_BREF_TYPE_DATA:
616 case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
617 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
618 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
621 * Point data at the device buffer and leave bp intact.
623 chain->data = (void *)bdata;
628 * Make sure the bp is not specifically owned by this thread before
629 * restoring to a possibly shared lock, so another hammer2 thread
633 BUF_KERNPROC(chain->bp);
634 ccms_thread_lock_downgrade(&core->cst, ostate);
639 * Unlock and deref a chain element.
641 * On the last lock release any non-embedded data (chain->bp) will be
645 hammer2_chain_unlock(hammer2_chain_t *chain)
647 hammer2_chain_core_t *core = chain->core;
653 * The core->cst lock can be shared across several chains so we
654 * need to track the per-chain lockcnt separately.
656 * If multiple locks are present (or being attempted) on this
657 * particular chain we can just unlock, drop refs, and return.
659 * Otherwise fall-through on the 1->0 transition.
662 lockcnt = chain->lockcnt;
663 KKASSERT(lockcnt > 0);
666 if (atomic_cmpset_int(&chain->lockcnt,
667 lockcnt, lockcnt - 1)) {
668 ccms_thread_unlock(&core->cst);
669 hammer2_chain_drop(chain);
673 if (atomic_cmpset_int(&chain->lockcnt, 1, 0))
680 * On the 1->0 transition we upgrade the core lock (if necessary)
681 * to exclusive for terminal processing. If after upgrading we find
682 * that lockcnt is non-zero, another thread is racing us and will
683 * handle the unload for us later on, so just cleanup and return
684 * leaving the data/bp intact
686 * Otherwise if lockcnt is still 0 it is possible for it to become
687 * non-zero and race, but since we hold the core->cst lock
688 * exclusively all that will happen is that the chain will be
689 * reloaded after we unload it.
691 ostate = ccms_thread_lock_upgrade(&core->cst);
692 if (chain->lockcnt) {
693 ccms_thread_unlock_upgraded(&core->cst, ostate);
694 hammer2_chain_drop(chain);
699 * Shortcut the case if the data is embedded or not resolved.
701 * Do NOT NULL out chain->data (e.g. inode data), it might be
704 * The DIRTYBP flag is non-applicable in this situation and can
705 * be cleared to keep the flags state clean.
707 if (chain->bp == NULL) {
708 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_DIRTYBP);
709 ccms_thread_unlock_upgraded(&core->cst, ostate);
710 hammer2_chain_drop(chain);
717 if ((chain->flags & HAMMER2_CHAIN_DIRTYBP) == 0) {
719 } else if (chain->flags & HAMMER2_CHAIN_IOFLUSH) {
720 switch(chain->bref.type) {
721 case HAMMER2_BREF_TYPE_DATA:
722 counterp = &hammer2_ioa_file_write;
724 case HAMMER2_BREF_TYPE_INODE:
725 counterp = &hammer2_ioa_meta_write;
727 case HAMMER2_BREF_TYPE_INDIRECT:
728 counterp = &hammer2_ioa_indr_write;
730 case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
731 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
732 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
733 counterp = &hammer2_ioa_fmap_write;
736 counterp = &hammer2_ioa_volu_write;
741 switch(chain->bref.type) {
742 case HAMMER2_BREF_TYPE_DATA:
743 counterp = &hammer2_iod_file_write;
745 case HAMMER2_BREF_TYPE_INODE:
746 counterp = &hammer2_iod_meta_write;
748 case HAMMER2_BREF_TYPE_INDIRECT:
749 counterp = &hammer2_iod_indr_write;
751 case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
752 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
753 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
754 counterp = &hammer2_iod_fmap_write;
757 counterp = &hammer2_iod_volu_write;
766 * If a device buffer was used for data be sure to destroy the
767 * buffer when we are done to avoid aliases (XXX what about the
768 * underlying VM pages?).
770 * NOTE: Freemap leaf's use reserved blocks and thus no aliasing
773 if (chain->bref.type == HAMMER2_BREF_TYPE_DATA)
774 chain->bp->b_flags |= B_RELBUF;
777 * The DIRTYBP flag tracks whether we have to bdwrite() the buffer
778 * or not. The flag will get re-set when chain_modify() is called,
779 * even if MODIFIED is already set, allowing the OS to retire the
780 * buffer independent of a hammer2 flus.
783 if (chain->flags & HAMMER2_CHAIN_DIRTYBP) {
784 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_DIRTYBP);
785 if (chain->flags & HAMMER2_CHAIN_IOFLUSH) {
786 atomic_clear_int(&chain->flags,
787 HAMMER2_CHAIN_IOFLUSH);
788 chain->bp->b_flags |= B_RELBUF;
789 cluster_awrite(chain->bp);
791 chain->bp->b_flags |= B_CLUSTEROK;
795 if (chain->flags & HAMMER2_CHAIN_IOFLUSH) {
796 atomic_clear_int(&chain->flags,
797 HAMMER2_CHAIN_IOFLUSH);
798 chain->bp->b_flags |= B_RELBUF;
801 /* bp might still be dirty */
806 ccms_thread_unlock_upgraded(&core->cst, ostate);
807 hammer2_chain_drop(chain);
811 * Resize the chain's physical storage allocation in-place. This may
812 * replace the passed-in chain with a new chain.
814 * Chains can be resized smaller without reallocating the storage.
815 * Resizing larger will reallocate the storage.
817 * Must be passed an exclusively locked parent and chain, returns a new
818 * exclusively locked chain at the same index and unlocks the old chain.
819 * Flushes the buffer if necessary.
821 * This function is mostly used with DATA blocks locked RESOLVE_NEVER in order
822 * to avoid instantiating a device buffer that conflicts with the vnode
823 * data buffer. That is, the passed-in bp is a logical buffer, whereas
824 * any chain-oriented bp would be a device buffer.
826 * XXX flags currently ignored, uses chain->bp to detect data/no-data.
827 * XXX return error if cannot resize.
830 hammer2_chain_resize(hammer2_trans_t *trans, hammer2_inode_t *ip,
832 hammer2_chain_t *parent, hammer2_chain_t **chainp,
833 int nradix, int flags)
835 hammer2_mount_t *hmp = trans->hmp;
836 hammer2_chain_t *chain = *chainp;
844 * Only data and indirect blocks can be resized for now.
845 * (The volu root, inodes, and freemap elements use a fixed size).
847 KKASSERT(chain != &hmp->vchain);
848 KKASSERT(chain->bref.type == HAMMER2_BREF_TYPE_DATA ||
849 chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT);
852 * Nothing to do if the element is already the proper size
854 obytes = chain->bytes;
855 nbytes = 1U << nradix;
856 if (obytes == nbytes)
860 * Delete the old chain and duplicate it at the same (parent, index),
861 * returning a new chain. This allows the old chain to still be
862 * used by the flush code. Duplication occurs in-place.
864 * The parent does not have to be locked for the delete/duplicate call,
865 * but is in this particular code path.
867 * NOTE: If we are not crossing a synchronization point the
868 * duplication code will simply reuse the existing chain
871 hammer2_chain_delete_duplicate(trans, &chain);
874 * Set MODIFIED and add a chain ref to prevent destruction. Both
875 * modified flags share the same ref. (duplicated chains do not
876 * start out MODIFIED unless possibly if the duplication code
877 * decided to reuse the existing chain as-is).
879 * If the chain is already marked MODIFIED then we can safely
880 * return the previous allocation to the pool without having to
881 * worry about snapshots. XXX check flush synchronization.
883 if ((chain->flags & HAMMER2_CHAIN_MODIFIED) == 0) {
884 atomic_set_int(&chain->flags, HAMMER2_CHAIN_MODIFIED);
885 hammer2_chain_ref(chain);
889 * Relocate the block, even if making it smaller (because different
890 * block sizes may be in different regions).
892 chain->bref.data_off = hammer2_freemap_alloc(hmp, chain->bref.type,
894 chain->bytes = nbytes;
895 /*ip->delta_dcount += (ssize_t)(nbytes - obytes);*/ /* XXX atomic */
898 * The device buffer may be larger than the allocation size.
900 if ((bbytes = chain->bytes) < HAMMER2_MINIOSIZE)
901 bbytes = HAMMER2_MINIOSIZE;
902 pbase = chain->bref.data_off & ~(hammer2_off_t)(bbytes - 1);
903 boff = chain->bref.data_off & HAMMER2_OFF_MASK & (bbytes - 1);
906 * For now just support it on DATA chains (and not on indirect
909 KKASSERT(chain->bp == NULL);
912 * Make sure the chain is marked MOVED and SUBMOD is set in the
913 * parent(s) so the adjustments are picked up by flush.
915 if ((chain->flags & HAMMER2_CHAIN_MOVED) == 0) {
916 hammer2_chain_ref(chain);
917 atomic_set_int(&chain->flags, HAMMER2_CHAIN_MOVED);
919 hammer2_chain_setsubmod(trans, chain);
924 * Set a chain modified, making it read-write and duplicating it if necessary.
925 * This function will assign a new physical block to the chain if necessary
927 * Duplication of already-modified chains is possible when the modification
928 * crosses a flush synchronization boundary.
930 * Non-data blocks - The chain should be locked to at least the RESOLVE_MAYBE
931 * level or the COW operation will not work.
933 * Data blocks - The chain is usually locked RESOLVE_NEVER so as not to
934 * run the data through the device buffers.
936 * This function may return a different chain than was passed, in which case
937 * the old chain will be unlocked and the new chain will be locked.
939 * ip->chain may be adjusted by hammer2_chain_modify_ip().
941 hammer2_inode_data_t *
942 hammer2_chain_modify_ip(hammer2_trans_t *trans, hammer2_inode_t *ip,
943 hammer2_chain_t **chainp, int flags)
945 atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED);
946 hammer2_chain_modify(trans, chainp, flags);
947 if (ip->chain != *chainp)
948 hammer2_inode_repoint(ip, NULL, *chainp);
949 return(&ip->chain->data->ipdata);
953 hammer2_chain_modify(hammer2_trans_t *trans, hammer2_chain_t **chainp,
956 hammer2_mount_t *hmp = trans->hmp;
957 hammer2_chain_t *chain;
966 * modify_tid is only update for primary modifications, not for
967 * propagated brefs. mirror_tid will be updated regardless during
968 * the flush, no need to set it here.
973 * If the chain is already marked MODIFIED we can usually just
974 * return. However, if a modified chain is modified again in
975 * a synchronization-point-crossing manner we have to
976 * delete/duplicate the chain so as not to interfere with the
977 * atomicy of the flush.
979 if (chain->flags & HAMMER2_CHAIN_MODIFIED) {
980 if (chain->modify_tid <= hmp->flush_tid &&
981 trans->sync_tid > hmp->flush_tid) {
983 * Modifications cross synchronization point,
984 * requires delete-duplicate.
986 KKASSERT((flags & HAMMER2_MODIFY_ASSERTNOCOPY) == 0);
987 hammer2_chain_delete_duplicate(trans, chainp);
989 /* fall through using duplicate */
992 * It is possible that a prior lock/modify sequence
993 * retired the buffer. During this lock/modify
994 * sequence MODIFIED may still be set but the buffer
995 * could wind up clean. Since the caller is going
996 * to modify the buffer further we have to be sure
997 * that DIRTYBP is set so our chain code knows to
998 * bwrite/bdwrite the bp.
1000 if ((flags & HAMMER2_MODIFY_OPTDATA) == 0 &&
1001 chain->bp == NULL) {
1004 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DIRTYBP);
1007 * Must still adjust these fields in the
1008 * already-modified path.
1010 if ((flags & HAMMER2_MODIFY_NO_MODIFY_TID) == 0)
1011 chain->bref.modify_tid = trans->sync_tid;
1012 chain->modify_tid = trans->sync_tid;
1017 if ((flags & HAMMER2_MODIFY_NO_MODIFY_TID) == 0)
1018 chain->bref.modify_tid = trans->sync_tid;
1021 * Set MODIFIED and add a chain ref to prevent destruction. Both
1022 * modified flags share the same ref.
1024 if ((chain->flags & HAMMER2_CHAIN_MODIFIED) == 0) {
1025 atomic_set_int(&chain->flags, HAMMER2_CHAIN_MODIFIED);
1026 hammer2_chain_ref(chain);
1030 * Adjust chain->modify_tid so the flusher knows when the
1031 * modification occurred.
1033 chain->modify_tid = trans->sync_tid;
1036 * We must allocate the copy-on-write block.
1038 * If the data is embedded no other action is required.
1040 * If the data is not embedded we acquire and clear the
1041 * new block. If chain->data is not NULL we then do the
1042 * copy-on-write. chain->data will then be repointed to the new
1043 * buffer and the old buffer will be released.
1045 * For newly created elements with no prior allocation we go
1046 * through the copy-on-write steps except without the copying part.
1048 if (chain != &hmp->vchain) {
1049 if ((hammer2_debug & 0x0001) &&
1050 (chain->bref.data_off & HAMMER2_OFF_MASK)) {
1051 kprintf("Replace %d\n", chain->bytes);
1053 chain->bref.data_off =
1054 hammer2_freemap_alloc(hmp, chain->bref.type,
1056 /* XXX failed allocation */
1060 * If data instantiation is optional and the chain has no current
1061 * data association (typical for DATA and newly-created INDIRECT
1062 * elements), don't instantiate the buffer now.
1064 if ((flags & HAMMER2_MODIFY_OPTDATA) && chain->bp == NULL)
1069 * Setting the DIRTYBP flag will cause the buffer to be dirtied or
1070 * written-out on unlock. This bit is independent of the MODIFIED
1071 * bit because the chain may still need meta-data adjustments done
1072 * by virtue of MODIFIED for its parent, and the buffer can be
1073 * flushed out (possibly multiple times) by the OS before that.
1075 * Clearing the INITIAL flag (for indirect blocks) indicates that
1076 * a zero-fill buffer has been instantiated.
1078 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DIRTYBP);
1079 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
1082 * We currently should never instantiate a device buffer for a
1083 * file data chain. (We definitely can for a freemap chain).
1085 KKASSERT(chain->bref.type != HAMMER2_BREF_TYPE_DATA);
1088 * Execute COW operation
1090 switch(chain->bref.type) {
1091 case HAMMER2_BREF_TYPE_VOLUME:
1092 case HAMMER2_BREF_TYPE_INODE:
1094 * The data is embedded, no copy-on-write operation is
1097 KKASSERT(chain->bp == NULL);
1099 case HAMMER2_BREF_TYPE_DATA:
1100 case HAMMER2_BREF_TYPE_INDIRECT:
1101 case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
1102 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
1103 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
1105 * Perform the copy-on-write operation
1107 KKASSERT(chain != &hmp->vchain); /* safety */
1109 * The device buffer may be larger than the allocation size.
1111 if ((bbytes = chain->bytes) < HAMMER2_MINIOSIZE)
1112 bbytes = HAMMER2_MINIOSIZE;
1113 pbase = chain->bref.data_off & ~(hammer2_off_t)(bbytes - 1);
1114 boff = chain->bref.data_off & HAMMER2_OFF_MASK & (bbytes - 1);
1117 * The getblk() optimization can only be used if the
1118 * physical block size matches the request.
1120 if (chain->bytes == bbytes) {
1121 nbp = getblk(hmp->devvp, pbase, bbytes, 0, 0);
1124 error = bread(hmp->devvp, pbase, bbytes, &nbp);
1125 KKASSERT(error == 0);
1127 bdata = (char *)nbp->b_data + boff;
1130 * Copy or zero-fill on write depending on whether
1131 * chain->data exists or not.
1134 bcopy(chain->data, bdata, chain->bytes);
1135 KKASSERT(chain->bp != NULL);
1137 bzero(bdata, chain->bytes);
1140 chain->bp->b_flags |= B_RELBUF;
1144 chain->data = bdata;
1147 panic("hammer2_chain_modify: illegal non-embedded type %d",
1153 hammer2_chain_setsubmod(trans, chain);
1157 * Mark the volume as having been modified. This short-cut version
1158 * does not have to lock the volume's chain, which allows the ioctl
1159 * code to make adjustments to connections without deadlocking. XXX
1161 * No ref is made on vchain when flagging it MODIFIED.
1164 hammer2_modify_volume(hammer2_mount_t *hmp)
1166 hammer2_voldata_lock(hmp);
1167 hammer2_voldata_unlock(hmp, 1);
1171 * Locate an in-memory chain. The parent must be locked. The in-memory
1172 * chain is returned with a reference and without a lock, or NULL
1175 * This function returns the chain at the specified index with the highest
1176 * delete_tid. The caller must check whether the chain is flagged
1177 * CHAIN_DELETED or not. However, because chain iterations can be removed
1178 * from memory we must ALSO check that DELETED chains are not flushed. A
1179 * DELETED chain which has been flushed must be ignored (the caller must
1180 * check the parent's blockref array).
1182 * NOTE: If no chain is found the caller usually must check the on-media
1183 * array to determine if a blockref exists at the index.
1185 struct hammer2_chain_find_info {
1186 hammer2_chain_t *best;
1187 hammer2_tid_t delete_tid;
1193 hammer2_chain_find_cmp(hammer2_chain_t *child, void *data)
1195 struct hammer2_chain_find_info *info = data;
1197 if (child->index < info->index)
1199 if (child->index > info->index)
1206 hammer2_chain_find_callback(hammer2_chain_t *child, void *data)
1208 struct hammer2_chain_find_info *info = data;
1210 if (info->delete_tid < child->delete_tid) {
1211 info->delete_tid = child->delete_tid;
1219 hammer2_chain_find_locked(hammer2_chain_t *parent, int index)
1221 struct hammer2_chain_find_info info;
1222 hammer2_chain_t *child;
1225 info.delete_tid = 0;
1228 RB_SCAN(hammer2_chain_tree, &parent->core->rbtree,
1229 hammer2_chain_find_cmp, hammer2_chain_find_callback,
1237 hammer2_chain_find(hammer2_chain_t *parent, int index)
1239 hammer2_chain_t *child;
1241 spin_lock(&parent->core->cst.spin);
1242 child = hammer2_chain_find_locked(parent, index);
1244 hammer2_chain_ref(child);
1245 spin_unlock(&parent->core->cst.spin);
1251 * Return a locked chain structure with all associated data acquired.
1252 * (if LOOKUP_NOLOCK is requested the returned chain is only referenced).
1254 * Caller must hold the parent locked shared or exclusive since we may
1255 * need the parent's bref array to find our block.
1257 * The returned child is locked as requested. If NOLOCK, the returned
1258 * child is still at least referenced.
1261 hammer2_chain_get(hammer2_chain_t *parent, int index, int flags)
1263 hammer2_blockref_t *bref;
1264 hammer2_mount_t *hmp = parent->hmp;
1265 hammer2_chain_core_t *above = parent->core;
1266 hammer2_chain_t *chain;
1267 hammer2_chain_t dummy;
1271 * Figure out how to lock. MAYBE can be used to optimized
1272 * the initial-create state for indirect blocks.
1274 if (flags & (HAMMER2_LOOKUP_NODATA | HAMMER2_LOOKUP_NOLOCK))
1275 how = HAMMER2_RESOLVE_NEVER;
1277 how = HAMMER2_RESOLVE_MAYBE;
1278 if (flags & (HAMMER2_LOOKUP_SHARED | HAMMER2_LOOKUP_NOLOCK))
1279 how |= HAMMER2_RESOLVE_SHARED;
1283 * First see if we have a (possibly modified) chain element cached
1284 * for this (parent, index). Acquire the data if necessary.
1286 * If chain->data is non-NULL the chain should already be marked
1290 dummy.index = index;
1291 dummy.delete_tid = HAMMER2_MAX_TID;
1292 spin_lock(&above->cst.spin);
1293 chain = RB_FIND(hammer2_chain_tree, &above->rbtree, &dummy);
1295 hammer2_chain_ref(chain);
1296 spin_unlock(&above->cst.spin);
1297 if ((flags & HAMMER2_LOOKUP_NOLOCK) == 0)
1298 hammer2_chain_lock(chain, how | HAMMER2_RESOLVE_NOREF);
1301 spin_unlock(&above->cst.spin);
1304 * The parent chain must not be in the INITIAL state.
1306 if (parent->flags & HAMMER2_CHAIN_INITIAL) {
1307 panic("hammer2_chain_get: Missing bref(1)");
1312 * No RBTREE entry found, lookup the bref and issue I/O (switch on
1313 * the parent's bref to determine where and how big the array is).
1315 switch(parent->bref.type) {
1316 case HAMMER2_BREF_TYPE_INODE:
1317 KKASSERT(index >= 0 && index < HAMMER2_SET_COUNT);
1318 bref = &parent->data->ipdata.u.blockset.blockref[index];
1320 case HAMMER2_BREF_TYPE_INDIRECT:
1321 case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
1322 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
1323 KKASSERT(parent->data != NULL);
1324 KKASSERT(index >= 0 &&
1325 index < parent->bytes / sizeof(hammer2_blockref_t));
1326 bref = &parent->data->npdata.blockref[index];
1328 case HAMMER2_BREF_TYPE_VOLUME:
1329 KKASSERT(index >= 0 && index < HAMMER2_SET_COUNT);
1330 bref = &hmp->voldata.sroot_blockset.blockref[index];
1334 panic("hammer2_chain_get: unrecognized blockref type: %d",
1337 if (bref->type == 0) {
1338 panic("hammer2_chain_get: Missing bref(2)");
1343 * Allocate a chain structure representing the existing media
1344 * entry. Resulting chain has one ref and is not locked.
1346 * The locking operation we do later will issue I/O to read it.
1348 chain = hammer2_chain_alloc(hmp, NULL, bref);
1349 hammer2_chain_core_alloc(chain, NULL); /* ref'd chain returned */
1352 * Link the chain into its parent. A spinlock is required to safely
1353 * access the RBTREE, and it is possible to collide with another
1354 * hammer2_chain_get() operation because the caller might only hold
1355 * a shared lock on the parent.
1357 KKASSERT(parent->refs > 0);
1358 spin_lock(&above->cst.spin);
1359 chain->above = above;
1360 chain->index = index;
1361 if (RB_INSERT(hammer2_chain_tree, &above->rbtree, chain)) {
1362 chain->above = NULL;
1364 spin_unlock(&above->cst.spin);
1365 hammer2_chain_drop(chain);
1368 atomic_set_int(&chain->flags, HAMMER2_CHAIN_ONRBTREE);
1369 spin_unlock(&above->cst.spin);
1372 * Our new chain is referenced but NOT locked. Lock the chain
1373 * below. The locking operation also resolves its data.
1375 * If NOLOCK is set the release will release the one-and-only lock.
1377 if ((flags & HAMMER2_LOOKUP_NOLOCK) == 0) {
1378 hammer2_chain_lock(chain, how); /* recusive lock */
1379 hammer2_chain_drop(chain); /* excess ref */
1385 * Lookup initialization/completion API
1388 hammer2_chain_lookup_init(hammer2_chain_t *parent, int flags)
1390 if (flags & HAMMER2_LOOKUP_SHARED) {
1391 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS |
1392 HAMMER2_RESOLVE_SHARED);
1394 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS);
1400 hammer2_chain_lookup_done(hammer2_chain_t *parent)
1403 hammer2_chain_unlock(parent);
1408 hammer2_chain_getparent(hammer2_chain_t **parentp, int how)
1410 hammer2_chain_t *oparent;
1411 hammer2_chain_t *nparent;
1412 hammer2_chain_core_t *above;
1415 above = oparent->above;
1417 spin_lock(&above->cst.spin);
1418 nparent = above->first_parent;
1419 while (hammer2_chain_refactor_test(nparent, 1))
1420 nparent = nparent->next_parent;
1421 hammer2_chain_ref(nparent); /* protect nparent, use in lock */
1422 spin_unlock(&above->cst.spin);
1424 hammer2_chain_unlock(oparent);
1425 hammer2_chain_lock(nparent, how | HAMMER2_RESOLVE_NOREF);
1432 * Locate any key between key_beg and key_end inclusive. (*parentp)
1433 * typically points to an inode but can also point to a related indirect
1434 * block and this function will recurse upwards and find the inode again.
1436 * WARNING! THIS DOES NOT RETURN KEYS IN LOGICAL KEY ORDER! ANY KEY
1437 * WITHIN THE RANGE CAN BE RETURNED. HOWEVER, AN ITERATION
1438 * WHICH PICKS UP WHERE WE LEFT OFF WILL CONTINUE THE SCAN.
1440 * (*parentp) must be exclusively locked and referenced and can be an inode
1441 * or an existing indirect block within the inode.
1443 * On return (*parentp) will be modified to point at the deepest parent chain
1444 * element encountered during the search, as a helper for an insertion or
1445 * deletion. The new (*parentp) will be locked and referenced and the old
1446 * will be unlocked and dereferenced (no change if they are both the same).
1448 * The matching chain will be returned exclusively locked. If NOLOCK is
1449 * requested the chain will be returned only referenced.
1451 * NULL is returned if no match was found, but (*parentp) will still
1452 * potentially be adjusted.
1454 * This function will also recurse up the chain if the key is not within the
1455 * current parent's range. (*parentp) can never be set to NULL. An iteration
1456 * can simply allow (*parentp) to float inside the loop.
1459 hammer2_chain_lookup(hammer2_chain_t **parentp,
1460 hammer2_key_t key_beg, hammer2_key_t key_end,
1463 hammer2_mount_t *hmp;
1464 hammer2_chain_t *parent;
1465 hammer2_chain_t *chain;
1466 hammer2_chain_t *tmp;
1467 hammer2_blockref_t *base;
1468 hammer2_blockref_t *bref;
1469 hammer2_key_t scan_beg;
1470 hammer2_key_t scan_end;
1473 int how_always = HAMMER2_RESOLVE_ALWAYS;
1474 int how_maybe = HAMMER2_RESOLVE_MAYBE;
1476 if (flags & (HAMMER2_LOOKUP_SHARED | HAMMER2_LOOKUP_NOLOCK)) {
1477 how_maybe |= HAMMER2_RESOLVE_SHARED;
1478 how_always |= HAMMER2_RESOLVE_SHARED;
1482 * Recurse (*parentp) upward if necessary until the parent completely
1483 * encloses the key range or we hit the inode.
1488 while (parent->bref.type == HAMMER2_BREF_TYPE_INDIRECT ||
1489 parent->bref.type == HAMMER2_BREF_TYPE_FREEMAP_NODE) {
1490 scan_beg = parent->bref.key;
1491 scan_end = scan_beg +
1492 ((hammer2_key_t)1 << parent->bref.keybits) - 1;
1493 if (key_beg >= scan_beg && key_end <= scan_end)
1495 parent = hammer2_chain_getparent(parentp, how_maybe);
1500 * Locate the blockref array. Currently we do a fully associative
1501 * search through the array.
1503 switch(parent->bref.type) {
1504 case HAMMER2_BREF_TYPE_INODE:
1506 * Special shortcut for embedded data returns the inode
1507 * itself. Callers must detect this condition and access
1508 * the embedded data (the strategy code does this for us).
1510 * This is only applicable to regular files and softlinks.
1512 if (parent->data->ipdata.op_flags & HAMMER2_OPFLAG_DIRECTDATA) {
1513 if (flags & HAMMER2_LOOKUP_NOLOCK)
1514 hammer2_chain_ref(parent);
1516 hammer2_chain_lock(parent, how_always);
1519 base = &parent->data->ipdata.u.blockset.blockref[0];
1520 count = HAMMER2_SET_COUNT;
1522 case HAMMER2_BREF_TYPE_INDIRECT:
1523 case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
1524 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
1526 * Optimize indirect blocks in the INITIAL state to avoid
1529 if (parent->flags & HAMMER2_CHAIN_INITIAL) {
1532 if (parent->data == NULL)
1533 panic("parent->data is NULL");
1534 base = &parent->data->npdata.blockref[0];
1536 count = parent->bytes / sizeof(hammer2_blockref_t);
1538 case HAMMER2_BREF_TYPE_VOLUME:
1539 base = &hmp->voldata.sroot_blockset.blockref[0];
1540 count = HAMMER2_SET_COUNT;
1543 panic("hammer2_chain_lookup: unrecognized blockref type: %d",
1545 base = NULL; /* safety */
1546 count = 0; /* safety */
1550 * If the element and key overlap we use the element.
1552 * NOTE! Deleted elements are effectively invisible. Deletions
1553 * proactively clear the parent bref to the deleted child
1554 * so we do not try to shadow here to avoid parent updates
1555 * (which would be difficult since multiple deleted elements
1556 * might represent different flush synchronization points).
1559 for (i = 0; i < count; ++i) {
1560 tmp = hammer2_chain_find(parent, i);
1562 if (tmp->flags & HAMMER2_CHAIN_DELETED) {
1563 hammer2_chain_drop(tmp);
1567 KKASSERT(bref->type != 0);
1568 } else if (base == NULL || base[i].type == 0) {
1573 scan_beg = bref->key;
1574 scan_end = scan_beg + ((hammer2_key_t)1 << bref->keybits) - 1;
1576 hammer2_chain_drop(tmp);
1577 if (key_beg <= scan_end && key_end >= scan_beg)
1581 if (key_beg == key_end)
1583 return (hammer2_chain_next(parentp, NULL,
1584 key_beg, key_end, flags));
1588 * Acquire the new chain element. If the chain element is an
1589 * indirect block we must search recursively.
1591 * It is possible for the tmp chain above to be removed from
1592 * the RBTREE but the parent lock ensures it would not have been
1593 * destroyed from the media, so the chain_get() code will simply
1594 * reload it from the media in that case.
1596 chain = hammer2_chain_get(parent, i, flags);
1601 * If the chain element is an indirect block it becomes the new
1602 * parent and we loop on it.
1604 * The parent always has to be locked with at least RESOLVE_MAYBE
1605 * so we can access its data. It might need a fixup if the caller
1606 * passed incompatible flags. Be careful not to cause a deadlock
1607 * as a data-load requires an exclusive lock.
1609 if (chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT ||
1610 chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_NODE) {
1611 hammer2_chain_unlock(parent);
1612 *parentp = parent = chain;
1613 if (flags & HAMMER2_LOOKUP_NOLOCK) {
1614 hammer2_chain_lock(chain, how_maybe |
1615 HAMMER2_RESOLVE_NOREF);
1616 } else if ((flags & HAMMER2_LOOKUP_NODATA) &&
1617 chain->data == NULL) {
1618 hammer2_chain_ref(chain);
1619 hammer2_chain_unlock(chain);
1620 hammer2_chain_lock(chain, how_maybe |
1621 HAMMER2_RESOLVE_NOREF);
1627 * All done, return the chain
1633 * After having issued a lookup we can iterate all matching keys.
1635 * If chain is non-NULL we continue the iteration from just after it's index.
1637 * If chain is NULL we assume the parent was exhausted and continue the
1638 * iteration at the next parent.
1640 * parent must be locked on entry and remains locked throughout. chain's
1641 * lock status must match flags. Chain is always at least referenced.
1644 hammer2_chain_next(hammer2_chain_t **parentp, hammer2_chain_t *chain,
1645 hammer2_key_t key_beg, hammer2_key_t key_end,
1648 hammer2_mount_t *hmp;
1649 hammer2_chain_t *parent;
1650 hammer2_chain_t *tmp;
1651 hammer2_blockref_t *base;
1652 hammer2_blockref_t *bref;
1653 hammer2_key_t scan_beg;
1654 hammer2_key_t scan_end;
1656 int how_maybe = HAMMER2_RESOLVE_MAYBE;
1659 if (flags & (HAMMER2_LOOKUP_SHARED | HAMMER2_LOOKUP_NOLOCK))
1660 how_maybe |= HAMMER2_RESOLVE_SHARED;
1667 * Calculate the next index and recalculate the parent if necessary.
1671 * Continue iteration within current parent. If not NULL
1672 * the passed-in chain may or may not be locked, based on
1673 * the LOOKUP_NOLOCK flag (passed in as returned from lookup
1676 i = chain->index + 1;
1677 if (flags & HAMMER2_LOOKUP_NOLOCK)
1678 hammer2_chain_drop(chain);
1680 hammer2_chain_unlock(chain);
1683 * Any scan where the lookup returned degenerate data embedded
1684 * in the inode has an invalid index and must terminate.
1686 if (chain == parent)
1689 } else if (parent->bref.type != HAMMER2_BREF_TYPE_INDIRECT &&
1690 parent->bref.type != HAMMER2_BREF_TYPE_FREEMAP_NODE) {
1692 * We reached the end of the iteration.
1697 * Continue iteration with next parent unless the current
1698 * parent covers the range.
1700 scan_beg = parent->bref.key;
1701 scan_end = scan_beg +
1702 ((hammer2_key_t)1 << parent->bref.keybits) - 1;
1703 if (key_beg >= scan_beg && key_end <= scan_end)
1706 i = parent->index + 1;
1707 parent = hammer2_chain_getparent(parentp, how_maybe);
1712 * Locate the blockref array. Currently we do a fully associative
1713 * search through the array.
1715 switch(parent->bref.type) {
1716 case HAMMER2_BREF_TYPE_INODE:
1717 base = &parent->data->ipdata.u.blockset.blockref[0];
1718 count = HAMMER2_SET_COUNT;
1720 case HAMMER2_BREF_TYPE_INDIRECT:
1721 case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
1722 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
1723 if (parent->flags & HAMMER2_CHAIN_INITIAL) {
1726 KKASSERT(parent->data != NULL);
1727 base = &parent->data->npdata.blockref[0];
1729 count = parent->bytes / sizeof(hammer2_blockref_t);
1731 case HAMMER2_BREF_TYPE_VOLUME:
1732 base = &hmp->voldata.sroot_blockset.blockref[0];
1733 count = HAMMER2_SET_COUNT;
1736 panic("hammer2_chain_next: unrecognized blockref type: %d",
1738 base = NULL; /* safety */
1739 count = 0; /* safety */
1742 KKASSERT(i <= count);
1745 * Look for the key. If we are unable to find a match and an exact
1746 * match was requested we return NULL. If a range was requested we
1747 * run hammer2_chain_next() to iterate.
1749 * NOTE! Deleted elements are effectively invisible. Deletions
1750 * proactively clear the parent bref to the deleted child
1751 * so we do not try to shadow here to avoid parent updates
1752 * (which would be difficult since multiple deleted elements
1753 * might represent different flush synchronization points).
1757 tmp = hammer2_chain_find(parent, i);
1759 if (tmp->flags & HAMMER2_CHAIN_DELETED) {
1760 hammer2_chain_drop(tmp);
1765 } else if (base == NULL || base[i].type == 0) {
1771 scan_beg = bref->key;
1772 scan_end = scan_beg + ((hammer2_key_t)1 << bref->keybits) - 1;
1774 hammer2_chain_drop(tmp);
1775 if (key_beg <= scan_end && key_end >= scan_beg)
1781 * If we couldn't find a match recurse up a parent to continue the
1788 * Acquire the new chain element. If the chain element is an
1789 * indirect block we must search recursively.
1791 chain = hammer2_chain_get(parent, i, flags);
1796 * If the chain element is an indirect block it becomes the new
1797 * parent and we loop on it.
1799 * The parent always has to be locked with at least RESOLVE_MAYBE
1800 * so we can access its data. It might need a fixup if the caller
1801 * passed incompatible flags. Be careful not to cause a deadlock
1802 * as a data-load requires an exclusive lock.
1804 if (chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT ||
1805 chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_NODE) {
1806 hammer2_chain_unlock(parent);
1807 *parentp = parent = chain;
1809 if (flags & HAMMER2_LOOKUP_NOLOCK) {
1810 hammer2_chain_lock(parent, how_maybe |
1811 HAMMER2_RESOLVE_NOREF);
1812 } else if ((flags & HAMMER2_LOOKUP_NODATA) &&
1813 parent->data == NULL) {
1814 hammer2_chain_ref(parent);
1815 hammer2_chain_unlock(parent);
1816 hammer2_chain_lock(parent, how_maybe |
1817 HAMMER2_RESOLVE_NOREF);
1824 * All done, return chain
1830 * Create and return a new hammer2 system memory structure of the specified
1831 * key, type and size and insert it under (*parentp). This is a full
1832 * insertion, based on the supplied key/keybits, and may involve creating
1833 * indirect blocks and moving other chains around via delete/duplicate.
1835 * (*parentp) must be exclusive locked and may be replaced on return
1836 * depending on how much work the function had to do.
1838 * (*chainp) usually starts out NULL and returns the newly created chain,
1839 * but if the caller desires the caller may allocate a disconnected chain
1840 * and pass it in instead. (It is also possible for the caller to use
1841 * chain_duplicate() to create a disconnected chain, manipulate it, then
1842 * pass it into this function to insert it).
1844 * This function should NOT be used to insert INDIRECT blocks. It is
1845 * typically used to create/insert inodes and data blocks.
1847 * Caller must pass-in an exclusively locked parent the new chain is to
1848 * be inserted under, and optionally pass-in a disconnected, exclusively
1849 * locked chain to insert (else we create a new chain). The function will
1850 * adjust (*parentp) as necessary and return the existing or new chain.
1853 hammer2_chain_create(hammer2_trans_t *trans, hammer2_chain_t **parentp,
1854 hammer2_chain_t **chainp,
1855 hammer2_key_t key, int keybits, int type, size_t bytes)
1857 hammer2_mount_t *hmp;
1858 hammer2_chain_t *chain;
1859 hammer2_chain_t *child;
1860 hammer2_chain_t *parent = *parentp;
1861 hammer2_chain_core_t *above;
1862 hammer2_blockref_t dummy;
1863 hammer2_blockref_t *base;
1869 above = parent->core;
1870 KKASSERT(ccms_thread_lock_owned(&above->cst));
1874 if (chain == NULL) {
1876 * First allocate media space and construct the dummy bref,
1877 * then allocate the in-memory chain structure.
1879 bzero(&dummy, sizeof(dummy));
1882 dummy.keybits = keybits;
1883 dummy.data_off = hammer2_allocsize(bytes);
1884 dummy.methods = parent->bref.methods;
1885 chain = hammer2_chain_alloc(hmp, trans, &dummy);
1886 hammer2_chain_core_alloc(chain, NULL);
1889 * Lock the chain manually, chain_lock will load the chain
1890 * which we do NOT want to do. (note: chain->refs is set
1891 * to 1 by chain_alloc() for us, but lockcnt is not).
1894 ccms_thread_lock(&chain->core->cst, CCMS_STATE_EXCLUSIVE);
1898 * We do NOT set INITIAL here (yet). INITIAL is only
1899 * used for indirect blocks.
1901 * Recalculate bytes to reflect the actual media block
1904 bytes = (hammer2_off_t)1 <<
1905 (int)(chain->bref.data_off & HAMMER2_OFF_MASK_RADIX);
1906 chain->bytes = bytes;
1909 case HAMMER2_BREF_TYPE_VOLUME:
1910 panic("hammer2_chain_create: called with volume type");
1912 case HAMMER2_BREF_TYPE_INODE:
1913 KKASSERT(bytes == HAMMER2_INODE_BYTES);
1914 chain->data = kmalloc(sizeof(chain->data->ipdata),
1915 hmp->minode, M_WAITOK | M_ZERO);
1917 case HAMMER2_BREF_TYPE_INDIRECT:
1918 panic("hammer2_chain_create: cannot be used to"
1919 "create indirect block");
1921 case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
1922 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
1923 panic("hammer2_chain_create: cannot be used to"
1924 "create freemap root or node");
1926 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
1927 case HAMMER2_BREF_TYPE_DATA:
1929 /* leave chain->data NULL */
1930 KKASSERT(chain->data == NULL);
1935 * Potentially update the chain's key/keybits.
1937 chain->bref.key = key;
1938 chain->bref.keybits = keybits;
1939 KKASSERT(chain->above == NULL);
1943 above = parent->core;
1946 * Locate a free blockref in the parent's array
1948 switch(parent->bref.type) {
1949 case HAMMER2_BREF_TYPE_INODE:
1950 KKASSERT((parent->data->ipdata.op_flags &
1951 HAMMER2_OPFLAG_DIRECTDATA) == 0);
1952 KKASSERT(parent->data != NULL);
1953 base = &parent->data->ipdata.u.blockset.blockref[0];
1954 count = HAMMER2_SET_COUNT;
1956 case HAMMER2_BREF_TYPE_INDIRECT:
1957 case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
1958 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
1959 if (parent->flags & HAMMER2_CHAIN_INITIAL) {
1962 KKASSERT(parent->data != NULL);
1963 base = &parent->data->npdata.blockref[0];
1965 count = parent->bytes / sizeof(hammer2_blockref_t);
1967 case HAMMER2_BREF_TYPE_VOLUME:
1968 KKASSERT(parent->data != NULL);
1969 base = &hmp->voldata.sroot_blockset.blockref[0];
1970 count = HAMMER2_SET_COUNT;
1973 panic("hammer2_chain_create: unrecognized blockref type: %d",
1980 * Scan for an unallocated bref, also skipping any slots occupied
1981 * by in-memory chain elements that may not yet have been updated
1982 * in the parent's bref array.
1984 * We don't have to hold the spinlock to save an empty slot as
1985 * new slots can only transition from empty if the parent is
1986 * locked exclusively.
1988 spin_lock(&above->cst.spin);
1989 for (i = 0; i < count; ++i) {
1990 child = hammer2_chain_find_locked(parent, i);
1992 if (child->flags & HAMMER2_CHAIN_DELETED)
1998 if (base[i].type == 0)
2001 spin_unlock(&above->cst.spin);
2004 * If no free blockref could be found we must create an indirect
2005 * block and move a number of blockrefs into it. With the parent
2006 * locked we can safely lock each child in order to move it without
2007 * causing a deadlock.
2009 * This may return the new indirect block or the old parent depending
2010 * on where the key falls. NULL is returned on error.
2013 hammer2_chain_t *nparent;
2015 nparent = hammer2_chain_create_indirect(trans, parent,
2018 if (nparent == NULL) {
2020 hammer2_chain_drop(chain);
2024 if (parent != nparent) {
2025 hammer2_chain_unlock(parent);
2026 parent = *parentp = nparent;
2032 * Link the chain into its parent. Later on we will have to set
2033 * the MOVED bit in situations where we don't mark the new chain
2034 * as being modified.
2036 if (chain->above != NULL)
2037 panic("hammer2: hammer2_chain_create: chain already connected");
2038 KKASSERT(chain->above == NULL);
2039 KKASSERT((chain->flags & HAMMER2_CHAIN_DELETED) == 0);
2041 chain->above = above;
2043 spin_lock(&above->cst.spin);
2044 if (RB_INSERT(hammer2_chain_tree, &above->rbtree, chain))
2045 panic("hammer2_chain_create: collision");
2046 atomic_set_int(&chain->flags, HAMMER2_CHAIN_ONRBTREE);
2047 spin_unlock(&above->cst.spin);
2050 * (allocated) indicates that this is a newly-created chain element
2051 * rather than a renamed chain element.
2053 * In this situation we want to place the chain element in
2054 * the MODIFIED state. The caller expects it to NOT be in the
2057 * The data area will be set up as follows:
2059 * VOLUME not allowed here.
2061 * INODE embedded data are will be set-up.
2063 * INDIRECT not allowed here.
2065 * DATA no data area will be set-up (caller is expected
2066 * to have logical buffers, we don't want to alias
2067 * the data onto device buffers!).
2070 switch(chain->bref.type) {
2071 case HAMMER2_BREF_TYPE_DATA:
2072 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
2073 hammer2_chain_modify(trans, &chain,
2074 HAMMER2_MODIFY_OPTDATA |
2075 HAMMER2_MODIFY_ASSERTNOCOPY);
2077 case HAMMER2_BREF_TYPE_INDIRECT:
2078 case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
2079 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2080 /* not supported in this function */
2081 panic("hammer2_chain_create: bad type");
2082 atomic_set_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
2083 hammer2_chain_modify(trans, &chain,
2084 HAMMER2_MODIFY_OPTDATA |
2085 HAMMER2_MODIFY_ASSERTNOCOPY);
2088 hammer2_chain_modify(trans, &chain,
2089 HAMMER2_MODIFY_ASSERTNOCOPY);
2094 * When reconnecting a chain we must set MOVED and setsubmod
2095 * so the flush recognizes that it must update the bref in
2098 if ((chain->flags & HAMMER2_CHAIN_MOVED) == 0) {
2099 hammer2_chain_ref(chain);
2100 atomic_set_int(&chain->flags, HAMMER2_CHAIN_MOVED);
2102 hammer2_chain_setsubmod(trans, chain);
2112 * Replace (*chainp) with a duplicate. The original *chainp is unlocked
2113 * and the replacement will be returned locked. Both the original and the
2114 * new chain will share the same RBTREE (have the same chain->core), with
2115 * the new chain becoming the 'current' chain (meaning it is the first in
2116 * the linked list at core->chain_first).
2118 * If (parent, i) then the new duplicated chain is inserted under the parent
2119 * at the specified index (the parent must not have a ref at that index).
2121 * If (NULL, -1) then the new duplicated chain is not inserted anywhere,
2122 * similar to if it had just been chain_alloc()'d (suitable for passing into
2123 * hammer2_chain_create() after this function returns).
2125 * NOTE! Duplication is used in order to retain the original topology to
2126 * support flush synchronization points. Both the original and the
2127 * new chain will have the same transaction id and thus the operation
2128 * appears atomic on the media.
2131 hammer2_chain_duplicate(hammer2_trans_t *trans, hammer2_chain_t *parent, int i,
2132 hammer2_chain_t **chainp, hammer2_blockref_t *bref)
2134 hammer2_mount_t *hmp = trans->hmp;
2135 hammer2_blockref_t *base;
2136 hammer2_chain_t *ochain;
2137 hammer2_chain_t *nchain;
2138 hammer2_chain_t *scan;
2139 hammer2_chain_core_t *above;
2140 hammer2_chain_core_t *core;
2145 * First create a duplicate of the chain structure, associating
2146 * it with the same core, making it the same size, pointing it
2147 * to the same bref (the same media block), and copying any inline
2152 bref = &ochain->bref;
2153 nchain = hammer2_chain_alloc(hmp, trans, bref);
2154 hammer2_chain_core_alloc(nchain, ochain->core);
2155 core = ochain->core;
2157 bytes = (hammer2_off_t)1 <<
2158 (int)(bref->data_off & HAMMER2_OFF_MASK_RADIX);
2159 nchain->bytes = bytes;
2162 * Propagate the IPACTIVE flag. This prevents an out-of-order
2163 * removal of a chain which can cause ip->chain sequences to sit
2166 /*if (core && (ochain->flags & HAMMER2_CHAIN_IPACTIVE))*/ {
2167 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_IPACTIVE);
2168 hammer2_chain_ref(nchain);
2172 * Be sure to copy the INITIAL flag as well or we could end up
2173 * loading garbage from the bref.
2175 if (ochain->flags & HAMMER2_CHAIN_INITIAL)
2176 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_INITIAL);
2177 if (ochain->flags & HAMMER2_CHAIN_DIRTYBP)
2178 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_DIRTYBP);
2181 * If the old chain is modified the new one must be too,
2182 * but we only want to allocate a new bref.
2184 if (ochain->flags & HAMMER2_CHAIN_MODIFIED) {
2186 * When duplicating chains the MODIFIED state is inherited.
2187 * A new bref typically must be allocated. However, file
2188 * data chains may already have the data offset assigned
2189 * to a logical buffer cache buffer so we absolutely cannot
2190 * allocate a new bref here for TYPE_DATA.
2192 * Basically the flusher core only dumps media topology
2193 * and meta-data, not file data. The VOP_FSYNC code deals
2194 * with the file data. XXX need back-pointer to inode.
2196 if (nchain->bref.type == HAMMER2_BREF_TYPE_DATA) {
2197 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_MODIFIED);
2198 hammer2_chain_ref(nchain);
2200 hammer2_chain_modify(trans, &nchain,
2201 HAMMER2_MODIFY_OPTDATA |
2202 HAMMER2_MODIFY_ASSERTNOCOPY);
2204 } else if (nchain->flags & HAMMER2_CHAIN_INITIAL) {
2206 * When duplicating chains in the INITITAL state we need
2207 * to ensure that the chain is marked modified so a
2208 * block is properly assigned to it, otherwise the MOVED
2209 * bit won't do the right thing.
2211 KKASSERT (nchain->bref.type != HAMMER2_BREF_TYPE_DATA);
2212 hammer2_chain_modify(trans, &nchain,
2213 HAMMER2_MODIFY_OPTDATA |
2214 HAMMER2_MODIFY_ASSERTNOCOPY);
2216 if (parent || (ochain->flags & HAMMER2_CHAIN_MOVED)) {
2217 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_MOVED);
2218 hammer2_chain_ref(nchain);
2220 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_SUBMODIFIED);
2222 switch(nchain->bref.type) {
2223 case HAMMER2_BREF_TYPE_VOLUME:
2224 panic("hammer2_chain_duplicate: cannot be called w/volhdr");
2226 case HAMMER2_BREF_TYPE_INODE:
2227 KKASSERT(bytes == HAMMER2_INODE_BYTES);
2229 nchain->data = kmalloc(sizeof(nchain->data->ipdata),
2230 hmp->minode, M_WAITOK | M_ZERO);
2231 nchain->data->ipdata = ochain->data->ipdata;
2234 case HAMMER2_BREF_TYPE_INDIRECT:
2235 if ((nchain->flags & HAMMER2_CHAIN_MODIFIED) &&
2237 bcopy(ochain->data, nchain->data,
2241 case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
2242 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2243 panic("hammer2_chain_duplicate: cannot be used to"
2244 "create a freemap root or node");
2246 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
2247 case HAMMER2_BREF_TYPE_DATA:
2249 if ((nchain->flags & HAMMER2_CHAIN_MODIFIED) &&
2251 bcopy(ochain->data, nchain->data,
2254 /* leave chain->data NULL */
2255 KKASSERT(nchain->data == NULL);
2260 * Unmodified duplicated blocks may have the same bref, we
2261 * must be careful to avoid buffer cache deadlocks so we
2262 * unlock the old chain before resolving the new one.
2264 * Insert nchain at the end of the duplication list.
2266 hammer2_chain_lock(nchain, HAMMER2_RESOLVE_NEVER);
2267 /* extra ref still present from original allocation */
2269 hammer2_chain_unlock(ochain);
2271 hammer2_chain_lock(nchain, HAMMER2_RESOLVE_MAYBE |
2272 HAMMER2_RESOLVE_NOREF); /* eat excess ref */
2273 hammer2_chain_unlock(nchain);
2276 * If parent is not NULL, insert into the parent at the requested
2277 * index. The newly duplicated chain must be marked MOVED and
2278 * SUBMODIFIED set in its parent(s).
2282 * Locate a free blockref in the parent's array
2284 above = parent->core;
2285 KKASSERT(ccms_thread_lock_owned(&above->cst));
2287 switch(parent->bref.type) {
2288 case HAMMER2_BREF_TYPE_INODE:
2289 KKASSERT((parent->data->ipdata.op_flags &
2290 HAMMER2_OPFLAG_DIRECTDATA) == 0);
2291 KKASSERT(parent->data != NULL);
2292 base = &parent->data->ipdata.u.blockset.blockref[0];
2293 count = HAMMER2_SET_COUNT;
2295 case HAMMER2_BREF_TYPE_INDIRECT:
2296 case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
2297 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2298 if (parent->flags & HAMMER2_CHAIN_INITIAL) {
2301 KKASSERT(parent->data != NULL);
2302 base = &parent->data->npdata.blockref[0];
2304 count = parent->bytes / sizeof(hammer2_blockref_t);
2306 case HAMMER2_BREF_TYPE_VOLUME:
2307 KKASSERT(parent->data != NULL);
2308 base = &hmp->voldata.sroot_blockset.blockref[0];
2309 count = HAMMER2_SET_COUNT;
2312 panic("hammer2_chain_create: unrecognized "
2313 "blockref type: %d",
2318 KKASSERT(i >= 0 && i < count);
2320 KKASSERT((nchain->flags & HAMMER2_CHAIN_DELETED) == 0);
2321 KKASSERT(parent->refs > 0);
2323 spin_lock(&above->cst.spin);
2324 nchain->above = above;
2326 scan = hammer2_chain_find_locked(parent, i);
2327 KKASSERT(base == NULL || base[i].type == 0 ||
2329 (scan->flags & HAMMER2_CHAIN_DELETED));
2330 if (RB_INSERT(hammer2_chain_tree, &above->rbtree,
2332 panic("hammer2_chain_duplicate: collision");
2334 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_ONRBTREE);
2335 spin_unlock(&above->cst.spin);
2337 if ((nchain->flags & HAMMER2_CHAIN_MOVED) == 0) {
2338 hammer2_chain_ref(nchain);
2339 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_MOVED);
2341 hammer2_chain_setsubmod(trans, nchain);
2346 * Special in-place delete-duplicate sequence which does not require a
2347 * locked parent. (*chainp) is marked DELETED and atomically replaced
2348 * with a duplicate. Atomicy is at the very-fine spin-lock level in
2349 * order to ensure that lookups do not race us.
2352 hammer2_chain_delete_duplicate(hammer2_trans_t *trans,
2353 hammer2_chain_t **chainp)
2355 hammer2_mount_t *hmp = trans->hmp;
2356 hammer2_chain_t *ochain;
2357 hammer2_chain_t *nchain;
2358 hammer2_chain_core_t *above;
2359 hammer2_chain_core_t *core;
2363 * First create a duplicate of the chain structure, associating
2364 * it with the same core, making it the same size, pointing it
2365 * to the same bref (the same media block), and copying any inline
2369 nchain = hammer2_chain_alloc(hmp, trans, &ochain->bref); /* 1 ref */
2370 hammer2_chain_core_alloc(nchain, ochain->core);
2371 core = ochain->core;
2372 above = ochain->above;
2374 kprintf("delete_duplicate %p.%d(%d)\n",
2375 ochain, ochain->bref.type, ochain->refs);
2377 bytes = (hammer2_off_t)1 <<
2378 (int)(ochain->bref.data_off & HAMMER2_OFF_MASK_RADIX);
2379 nchain->bytes = bytes;
2382 * Propagate the IPACTIVE flag. This prevents an out-of-order
2383 * removal of a chain which can cause ip->chain sequences to sit
2386 /*if (core && (ochain->flags & HAMMER2_CHAIN_IPACTIVE))*/ {
2387 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_IPACTIVE);
2388 hammer2_chain_ref(nchain);
2392 * Be sure to copy the INITIAL flag as well or we could end up
2393 * loading garbage from the bref.
2395 if (ochain->flags & HAMMER2_CHAIN_INITIAL)
2396 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_INITIAL);
2397 if (ochain->flags & HAMMER2_CHAIN_DIRTYBP)
2398 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_DIRTYBP);
2401 * If the old chain is modified the new one must be too,
2402 * but we only want to allocate a new bref.
2404 if (ochain->flags & HAMMER2_CHAIN_MODIFIED) {
2406 * When duplicating chains the MODIFIED state is inherited.
2407 * A new bref typically must be allocated. However, file
2408 * data chains may already have the data offset assigned
2409 * to a logical buffer cache buffer so we absolutely cannot
2410 * allocate a new bref here for TYPE_DATA.
2412 * Basically the flusher core only dumps media topology
2413 * and meta-data, not file data. The VOP_FSYNC code deals
2414 * with the file data. XXX need back-pointer to inode.
2416 if (nchain->bref.type == HAMMER2_BREF_TYPE_DATA) {
2417 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_MODIFIED);
2418 hammer2_chain_ref(nchain);
2419 nchain->modify_tid = trans->sync_tid;
2421 hammer2_chain_modify(trans, &nchain,
2422 HAMMER2_MODIFY_OPTDATA |
2423 HAMMER2_MODIFY_ASSERTNOCOPY);
2425 } else if (nchain->flags & HAMMER2_CHAIN_INITIAL) {
2427 * When duplicating chains in the INITITAL state we need
2428 * to ensure that the chain is marked modified so a
2429 * block is properly assigned to it, otherwise the MOVED
2430 * bit won't do the right thing.
2432 KKASSERT (nchain->bref.type != HAMMER2_BREF_TYPE_DATA);
2433 hammer2_chain_modify(trans, &nchain,
2434 HAMMER2_MODIFY_OPTDATA |
2435 HAMMER2_MODIFY_ASSERTNOCOPY);
2439 * Unconditionally set the MOVED and SUBMODIFIED bit to force
2440 * update of parent bref and indirect blockrefs during flush.
2442 if ((nchain->flags & HAMMER2_CHAIN_MOVED) == 0) {
2443 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_MOVED);
2444 hammer2_chain_ref(nchain);
2446 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_SUBMODIFIED);
2449 * Copy media contents as needed.
2451 switch(nchain->bref.type) {
2452 case HAMMER2_BREF_TYPE_VOLUME:
2453 panic("hammer2_chain_duplicate: cannot be called w/volhdr");
2455 case HAMMER2_BREF_TYPE_INODE:
2456 KKASSERT(bytes == HAMMER2_INODE_BYTES);
2458 nchain->data = kmalloc(sizeof(nchain->data->ipdata),
2459 hmp->minode, M_WAITOK | M_ZERO);
2460 nchain->data->ipdata = ochain->data->ipdata;
2463 case HAMMER2_BREF_TYPE_INDIRECT:
2464 if ((nchain->flags & HAMMER2_CHAIN_MODIFIED) &&
2466 bcopy(ochain->data, nchain->data,
2470 case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
2471 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2472 panic("hammer2_chain_duplicate: cannot be used to"
2473 "create a freemap root or node");
2475 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
2476 case HAMMER2_BREF_TYPE_DATA:
2478 if ((nchain->flags & HAMMER2_CHAIN_MODIFIED) &&
2480 bcopy(ochain->data, nchain->data,
2483 /* leave chain->data NULL */
2484 KKASSERT(nchain->data == NULL);
2489 * Both chains must be locked for us to be able to set the
2490 * duplink. The caller may expect valid data.
2492 * Unmodified duplicated blocks may have the same bref, we
2493 * must be careful to avoid buffer cache deadlocks so we
2494 * unlock the old chain before resolving the new one.
2496 * Insert nchain at the end of the duplication list.
2498 hammer2_chain_lock(nchain, HAMMER2_RESOLVE_NEVER);
2499 /* extra ref still present from original allocation */
2501 nchain->index = ochain->index;
2503 kprintf("duplicate ochain %p(%d) nchain %p(%d) %08x\n",
2504 ochain, ochain->refs, nchain, nchain->refs, nchain->flags);
2506 spin_lock(&above->cst.spin);
2507 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_ONRBTREE);
2508 ochain->delete_tid = trans->sync_tid;
2509 nchain->above = above;
2510 atomic_set_int(&ochain->flags, HAMMER2_CHAIN_DELETED);
2511 if ((ochain->flags & HAMMER2_CHAIN_MOVED) == 0) {
2512 hammer2_chain_ref(ochain);
2513 atomic_set_int(&ochain->flags, HAMMER2_CHAIN_MOVED);
2515 if (RB_INSERT(hammer2_chain_tree, &above->rbtree, nchain)) {
2516 panic("hammer2_chain_delete_duplicate: collision");
2518 spin_unlock(&above->cst.spin);
2521 * Cleanup. Also note that nchain must be re-resolved to ensure
2522 * that it's data is resolved because we locked it RESOLVE_NEVER
2525 *chainp = nchain; /* inherits locked */
2526 hammer2_chain_unlock(ochain); /* replacing ochain */
2527 hammer2_chain_lock(nchain, HAMMER2_RESOLVE_MAYBE |
2528 HAMMER2_RESOLVE_NOREF); /* excess ref */
2529 hammer2_chain_unlock(nchain);
2531 hammer2_chain_setsubmod(trans, nchain);
2535 * Create a snapshot of the specified {parent, chain} with the specified
2538 * (a) We create a duplicate connected to the super-root as the specified
2541 * (b) We issue a restricted flush using the current transaction on the
2544 * (c) We disconnect and reallocate the duplicate's core.
2547 hammer2_chain_snapshot(hammer2_trans_t *trans, hammer2_inode_t *ip,
2548 hammer2_ioc_pfs_t *pfs)
2550 hammer2_mount_t *hmp = trans->hmp;
2551 hammer2_chain_t *chain;
2552 hammer2_chain_t *nchain;
2553 hammer2_chain_t *parent;
2554 hammer2_inode_data_t *ipdata;
2555 size_t name_len = strlen(pfs->name);
2556 hammer2_key_t lhc = hammer2_dirhash(pfs->name, name_len);
2560 * Create disconnected duplicate
2562 KKASSERT((trans->flags & HAMMER2_TRANS_RESTRICTED) == 0);
2564 hammer2_chain_lock(nchain, HAMMER2_RESOLVE_MAYBE);
2565 hammer2_chain_duplicate(trans, NULL, -1, &nchain, NULL);
2566 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_RECYCLE |
2567 HAMMER2_CHAIN_SNAPSHOT);
2570 * Create named entry in the super-root.
2572 parent = hammer2_chain_lookup_init(hmp->schain, 0);
2574 while (error == 0) {
2575 chain = hammer2_chain_lookup(&parent, lhc, lhc, 0);
2578 if ((lhc & HAMMER2_DIRHASH_LOMASK) == HAMMER2_DIRHASH_LOMASK)
2580 hammer2_chain_unlock(chain);
2584 hammer2_chain_create(trans, &parent, &nchain, lhc, 0,
2585 HAMMER2_BREF_TYPE_INODE,
2586 HAMMER2_INODE_BYTES);
2587 hammer2_chain_modify(trans, &nchain, HAMMER2_MODIFY_ASSERTNOCOPY);
2588 hammer2_chain_lookup_done(parent);
2589 parent = NULL; /* safety */
2594 ipdata = &nchain->data->ipdata;
2595 ipdata->name_key = lhc;
2596 ipdata->name_len = name_len;
2597 ksnprintf(ipdata->filename, sizeof(ipdata->filename), "%s", pfs->name);
2600 * Set PFS type, generate a unique filesystem id, and generate
2601 * a cluster id. Use the same clid when snapshotting a PFS root,
2602 * which theoretically allows the snapshot to be used as part of
2603 * the same cluster (perhaps as a cache).
2605 ipdata->pfs_type = HAMMER2_PFSTYPE_SNAPSHOT;
2606 kern_uuidgen(&ipdata->pfs_fsid, 1);
2607 if (ip->chain == ip->pmp->rchain)
2608 ipdata->pfs_clid = ip->chain->data->ipdata.pfs_clid;
2610 kern_uuidgen(&ipdata->pfs_clid, 1);
2613 * Issue a restricted flush of the snapshot. This is a synchronous
2616 trans->flags |= HAMMER2_TRANS_RESTRICTED;
2617 kprintf("SNAPSHOTA\n");
2618 tsleep(trans, 0, "snapslp", hz*4);
2619 kprintf("SNAPSHOTB\n");
2620 hammer2_chain_flush(trans, nchain);
2621 trans->flags &= ~HAMMER2_TRANS_RESTRICTED;
2625 * Remove the link b/c nchain is a snapshot and snapshots don't
2626 * follow CHAIN_DELETED semantics ?
2631 KKASSERT(chain->duplink == nchain);
2632 KKASSERT(chain->core == nchain->core);
2633 KKASSERT(nchain->refs >= 2);
2634 chain->duplink = nchain->duplink;
2635 atomic_clear_int(&nchain->flags, HAMMER2_CHAIN_DUPTARGET);
2636 hammer2_chain_drop(nchain);
2639 kprintf("snapshot %s nchain->refs %d nchain->flags %08x\n",
2640 pfs->name, nchain->refs, nchain->flags);
2641 hammer2_chain_unlock(nchain);
2647 * Create an indirect block that covers one or more of the elements in the
2648 * current parent. Either returns the existing parent with no locking or
2649 * ref changes or returns the new indirect block locked and referenced
2650 * and leaving the original parent lock/ref intact as well.
2652 * If an error occurs, NULL is returned and *errorp is set to the error.
2654 * The returned chain depends on where the specified key falls.
2656 * The key/keybits for the indirect mode only needs to follow three rules:
2658 * (1) That all elements underneath it fit within its key space and
2660 * (2) That all elements outside it are outside its key space.
2662 * (3) When creating the new indirect block any elements in the current
2663 * parent that fit within the new indirect block's keyspace must be
2664 * moved into the new indirect block.
2666 * (4) The keyspace chosen for the inserted indirect block CAN cover a wider
2667 * keyspace the the current parent, but lookup/iteration rules will
2668 * ensure (and must ensure) that rule (2) for all parents leading up
2669 * to the nearest inode or the root volume header is adhered to. This
2670 * is accomplished by always recursing through matching keyspaces in
2671 * the hammer2_chain_lookup() and hammer2_chain_next() API.
2673 * The current implementation calculates the current worst-case keyspace by
2674 * iterating the current parent and then divides it into two halves, choosing
2675 * whichever half has the most elements (not necessarily the half containing
2676 * the requested key).
2678 * We can also opt to use the half with the least number of elements. This
2679 * causes lower-numbered keys (aka logical file offsets) to recurse through
2680 * fewer indirect blocks and higher-numbered keys to recurse through more.
2681 * This also has the risk of not moving enough elements to the new indirect
2682 * block and being forced to create several indirect blocks before the element
2685 * Must be called with an exclusively locked parent.
2689 hammer2_chain_create_indirect(hammer2_trans_t *trans, hammer2_chain_t *parent,
2690 hammer2_key_t create_key, int create_bits,
2693 hammer2_mount_t *hmp = trans->hmp;
2694 hammer2_chain_core_t *above;
2695 hammer2_chain_core_t *icore;
2696 hammer2_blockref_t *base;
2697 hammer2_blockref_t *bref;
2698 hammer2_chain_t *chain;
2699 hammer2_chain_t *child;
2700 hammer2_chain_t *ichain;
2701 hammer2_chain_t dummy;
2702 hammer2_key_t key = create_key;
2703 int keybits = create_bits;
2711 * Calculate the base blockref pointer or NULL if the chain
2712 * is known to be empty. We need to calculate the array count
2713 * for RB lookups either way.
2715 KKASSERT(ccms_thread_lock_owned(&parent->core->cst));
2717 above = parent->core;
2719 /*hammer2_chain_modify(trans, &parent, HAMMER2_MODIFY_OPTDATA);*/
2720 if (parent->flags & HAMMER2_CHAIN_INITIAL) {
2723 switch(parent->bref.type) {
2724 case HAMMER2_BREF_TYPE_INODE:
2725 count = HAMMER2_SET_COUNT;
2727 case HAMMER2_BREF_TYPE_INDIRECT:
2728 case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
2729 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2730 count = parent->bytes / sizeof(hammer2_blockref_t);
2732 case HAMMER2_BREF_TYPE_VOLUME:
2733 count = HAMMER2_SET_COUNT;
2736 panic("hammer2_chain_create_indirect: "
2737 "unrecognized blockref type: %d",
2743 switch(parent->bref.type) {
2744 case HAMMER2_BREF_TYPE_INODE:
2745 base = &parent->data->ipdata.u.blockset.blockref[0];
2746 count = HAMMER2_SET_COUNT;
2748 case HAMMER2_BREF_TYPE_INDIRECT:
2749 case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
2750 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2751 base = &parent->data->npdata.blockref[0];
2752 count = parent->bytes / sizeof(hammer2_blockref_t);
2754 case HAMMER2_BREF_TYPE_VOLUME:
2755 base = &hmp->voldata.sroot_blockset.blockref[0];
2756 count = HAMMER2_SET_COUNT;
2759 panic("hammer2_chain_create_indirect: "
2760 "unrecognized blockref type: %d",
2768 * Scan for an unallocated bref, also skipping any slots occupied
2769 * by in-memory chain elements which may not yet have been updated
2770 * in the parent's bref array.
2772 * Deleted elements are ignored.
2774 bzero(&dummy, sizeof(dummy));
2775 dummy.delete_tid = HAMMER2_MAX_TID;
2777 spin_lock(&above->cst.spin);
2778 for (i = 0; i < count; ++i) {
2781 child = hammer2_chain_find_locked(parent, i);
2783 if (child->flags & HAMMER2_CHAIN_DELETED)
2785 bref = &child->bref;
2786 } else if (base && base[i].type) {
2793 * Expand our calculated key range (key, keybits) to fit
2794 * the scanned key. nkeybits represents the full range
2795 * that we will later cut in half (two halves @ nkeybits - 1).
2798 if (nkeybits < bref->keybits) {
2799 if (bref->keybits > 64) {
2800 kprintf("bad bref index %d chain %p bref %p\n", i, chain, bref);
2803 nkeybits = bref->keybits;
2805 while (nkeybits < 64 &&
2806 (~(((hammer2_key_t)1 << nkeybits) - 1) &
2807 (key ^ bref->key)) != 0) {
2812 * If the new key range is larger we have to determine
2813 * which side of the new key range the existing keys fall
2814 * under by checking the high bit, then collapsing the
2815 * locount into the hicount or vise-versa.
2817 if (keybits != nkeybits) {
2818 if (((hammer2_key_t)1 << (nkeybits - 1)) & key) {
2829 * The newly scanned key will be in the lower half or the
2830 * higher half of the (new) key range.
2832 if (((hammer2_key_t)1 << (nkeybits - 1)) & bref->key)
2837 spin_unlock(&above->cst.spin);
2838 bref = NULL; /* now invalid (safety) */
2841 * Adjust keybits to represent half of the full range calculated
2842 * above (radix 63 max)
2847 * Select whichever half contains the most elements. Theoretically
2848 * we can select either side as long as it contains at least one
2849 * element (in order to ensure that a free slot is present to hold
2850 * the indirect block).
2852 key &= ~(((hammer2_key_t)1 << keybits) - 1);
2853 if (hammer2_indirect_optimize) {
2855 * Insert node for least number of keys, this will arrange
2856 * the first few blocks of a large file or the first few
2857 * inodes in a directory with fewer indirect blocks when
2860 if (hicount < locount && hicount != 0)
2861 key |= (hammer2_key_t)1 << keybits;
2863 key &= ~(hammer2_key_t)1 << keybits;
2866 * Insert node for most number of keys, best for heavily
2869 if (hicount > locount)
2870 key |= (hammer2_key_t)1 << keybits;
2872 key &= ~(hammer2_key_t)1 << keybits;
2876 * How big should our new indirect block be? It has to be at least
2877 * as large as its parent.
2879 if (parent->bref.type == HAMMER2_BREF_TYPE_INODE)
2880 nbytes = HAMMER2_IND_BYTES_MIN;
2882 nbytes = HAMMER2_IND_BYTES_MAX;
2883 if (nbytes < count * sizeof(hammer2_blockref_t))
2884 nbytes = count * sizeof(hammer2_blockref_t);
2887 * Ok, create our new indirect block
2889 switch(parent->bref.type) {
2890 case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
2891 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2892 dummy.bref.type = HAMMER2_BREF_TYPE_FREEMAP_NODE;
2895 dummy.bref.type = HAMMER2_BREF_TYPE_INDIRECT;
2898 dummy.bref.key = key;
2899 dummy.bref.keybits = keybits;
2900 dummy.bref.data_off = hammer2_allocsize(nbytes);
2901 dummy.bref.methods = parent->bref.methods;
2903 ichain = hammer2_chain_alloc(hmp, trans, &dummy.bref);
2904 atomic_set_int(&ichain->flags, HAMMER2_CHAIN_INITIAL);
2905 hammer2_chain_core_alloc(ichain, NULL);
2906 icore = ichain->core;
2907 hammer2_chain_lock(ichain, HAMMER2_RESOLVE_MAYBE);
2908 hammer2_chain_drop(ichain); /* excess ref from alloc */
2911 * We have to mark it modified to allocate its block, but use
2912 * OPTDATA to allow it to remain in the INITIAL state. Otherwise
2913 * it won't be acted upon by the flush code.
2915 hammer2_chain_modify(trans, &ichain, HAMMER2_MODIFY_OPTDATA);
2918 * Iterate the original parent and move the matching brefs into
2919 * the new indirect block.
2921 * XXX handle flushes.
2923 spin_lock(&above->cst.spin);
2924 for (i = 0; i < count; ++i) {
2926 * For keying purposes access the bref from the media or
2927 * from our in-memory cache. In cases where the in-memory
2928 * cache overrides the media the keyrefs will be the same
2929 * anyway so we can avoid checking the cache when the media
2932 child = hammer2_chain_find_locked(parent, i);
2934 if (child->flags & HAMMER2_CHAIN_DELETED) {
2935 if (ichain->index < 0)
2939 bref = &child->bref;
2940 } else if (base && base[i].type) {
2943 if (ichain->index < 0)
2949 * Skip keys not in the chosen half (low or high), only bit
2950 * (keybits - 1) needs to be compared but for safety we
2951 * will compare all msb bits plus that bit again.
2953 if ((~(((hammer2_key_t)1 << keybits) - 1) &
2954 (key ^ bref->key)) != 0) {
2959 * This element is being moved from the parent, its slot
2960 * is available for our new indirect block.
2962 if (ichain->index < 0)
2966 * Load the new indirect block by acquiring or allocating
2967 * the related chain entries, then move them to the new
2968 * parent (ichain) by deleting them from their old location
2969 * and inserting a duplicate of the chain and any modified
2970 * sub-chain in the new location.
2972 * We must set MOVED in the chain being duplicated and
2973 * SUBMODIFIED in the parent(s) so the flush code knows
2974 * what is going on. The latter is done after the loop.
2976 * WARNING! above->cst.spin must be held when parent is
2977 * modified, even though we own the full blown lock,
2978 * to deal with setsubmod and rename races.
2979 * (XXX remove this req).
2981 spin_unlock(&above->cst.spin);
2982 chain = hammer2_chain_get(parent, i, HAMMER2_LOOKUP_NODATA);
2983 hammer2_chain_delete(trans, chain);
2984 hammer2_chain_duplicate(trans, ichain, i, &chain, NULL);
2986 hammer2_chain_unlock(chain);
2987 KKASSERT(parent->refs > 0);
2989 spin_lock(&above->cst.spin);
2991 spin_unlock(&above->cst.spin);
2994 * Insert the new indirect block into the parent now that we've
2995 * cleared out some entries in the parent. We calculated a good
2996 * insertion index in the loop above (ichain->index).
2998 * We don't have to set MOVED here because we mark ichain modified
2999 * down below (so the normal modified -> flush -> set-moved sequence
3002 * The insertion shouldn't race as this is a completely new block
3003 * and the parent is locked.
3005 if (ichain->index < 0)
3006 kprintf("indirect parent %p count %d key %016jx/%d\n",
3007 parent, count, (intmax_t)key, keybits);
3008 KKASSERT(ichain->index >= 0);
3009 KKASSERT((ichain->flags & HAMMER2_CHAIN_ONRBTREE) == 0);
3010 spin_lock(&above->cst.spin);
3011 if (RB_INSERT(hammer2_chain_tree, &above->rbtree, ichain))
3012 panic("hammer2_chain_create_indirect: ichain insertion");
3013 atomic_set_int(&ichain->flags, HAMMER2_CHAIN_ONRBTREE);
3014 ichain->above = above;
3015 spin_unlock(&above->cst.spin);
3018 * Mark the new indirect block modified after insertion, which
3019 * will propagate up through parent all the way to the root and
3020 * also allocate the physical block in ichain for our caller,
3021 * and assign ichain->data to a pre-zero'd space (because there
3022 * is not prior data to copy into it).
3024 * We have to set SUBMODIFIED in ichain's flags manually so the
3025 * flusher knows it has to recurse through it to get to all of
3026 * our moved blocks, then call setsubmod() to set the bit
3029 /*hammer2_chain_modify(trans, &ichain, HAMMER2_MODIFY_OPTDATA);*/
3030 atomic_set_int(&ichain->flags, HAMMER2_CHAIN_SUBMODIFIED);
3031 hammer2_chain_setsubmod(trans, ichain);
3034 * Figure out what to return.
3036 if (create_bits > keybits) {
3038 * Key being created is way outside the key range,
3039 * return the original parent.
3041 hammer2_chain_unlock(ichain);
3042 } else if (~(((hammer2_key_t)1 << keybits) - 1) &
3043 (create_key ^ key)) {
3045 * Key being created is outside the key range,
3046 * return the original parent.
3048 hammer2_chain_unlock(ichain);
3051 * Otherwise its in the range, return the new parent.
3052 * (leave both the new and old parent locked).
3061 * Sets CHAIN_DELETED and CHAIN_MOVED in the chain being deleted and
3062 * set chain->delete_tid.
3064 * This function does NOT generate a modification to the parent. It
3065 * would be nearly impossible to figure out which parent to modify anyway.
3066 * Such modifications are handled by the flush code and are properly merged
3067 * using the flush synchronization point.
3069 * The find/get code will properly overload the RBTREE check on top of
3070 * the bref check to detect deleted entries.
3072 * This function is NOT recursive. Any entity already pushed into the
3073 * chain (such as an inode) may still need visibility into its contents,
3074 * as well as the ability to read and modify the contents. For example,
3075 * for an unlinked file which is still open.
3077 * NOTE: This function does NOT set chain->modify_tid, allowing future
3078 * code to distinguish between live and deleted chains by testing
3081 * NOTE: Deletions normally do not occur in the middle of a duplication
3082 * chain but we use a trick for hardlink migration that refactors
3083 * the originating inode without deleting it, so we make no assumptions
3087 hammer2_chain_delete(hammer2_trans_t *trans, hammer2_chain_t *chain)
3089 KKASSERT(ccms_thread_lock_owned(&chain->core->cst));
3092 * Nothing to do if already marked.
3094 if (chain->flags & HAMMER2_CHAIN_DELETED)
3098 * We must set MOVED along with DELETED for the flush code to
3099 * recognize the operation and properly disconnect the chain
3102 * The setting of DELETED causes finds, lookups, and _next iterations
3103 * to no longer recognize the chain. RB_SCAN()s will still have
3104 * visibility (needed for flush serialization points).
3106 * We need the spinlock on the core whos RBTREE contains chain
3107 * to protect against races.
3109 spin_lock(&chain->above->cst.spin);
3110 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DELETED);
3111 if ((chain->flags & HAMMER2_CHAIN_MOVED) == 0) {
3112 hammer2_chain_ref(chain);
3113 atomic_set_int(&chain->flags, HAMMER2_CHAIN_MOVED);
3115 chain->delete_tid = trans->sync_tid;
3116 spin_unlock(&chain->above->cst.spin);
3117 hammer2_chain_setsubmod(trans, chain);
3121 hammer2_chain_wait(hammer2_chain_t *chain)
3123 tsleep(chain, 0, "chnflw", 1);