2 * Copyright (c) 2011-2013 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * This subsystem implements most of the core support functions for
37 * the hammer2_chain and hammer2_chain_core structures.
39 * Chains represent the filesystem media topology in-memory. Any given
40 * chain can represent an inode, indirect block, data, or other types
43 * This module provides APIs for direct and indirect block searches,
44 * iterations, recursions, creation, deletion, replication, and snapshot
45 * views (used by the flush and snapshot code).
47 * Generally speaking any modification made to a chain must propagate all
48 * the way back to the volume header, issuing copy-on-write updates to the
49 * blockref tables all the way up. Any chain except the volume header itself
50 * can be flushed to disk at any time, in any order. None of it matters
51 * until we get to the point where we want to synchronize the volume header
52 * (see the flush code).
54 * The chain structure supports snapshot views in time, which are primarily
55 * used until the related data and meta-data is flushed to allow the
56 * filesystem to make snapshots without requiring it to first flush,
57 * and to allow the filesystem flush and modify the filesystem concurrently
58 * with minimal or no stalls.
60 #include <sys/cdefs.h>
61 #include <sys/param.h>
62 #include <sys/systm.h>
63 #include <sys/types.h>
69 static int hammer2_indirect_optimize; /* XXX SYSCTL */
71 static hammer2_chain_t *hammer2_chain_create_indirect(
72 hammer2_trans_t *trans, hammer2_chain_t *parent,
73 hammer2_key_t key, int keybits, int *errorp);
76 * We use a red-black tree to guarantee safe lookups under shared locks.
78 * Chains can be overloaded onto the same index, creating a different
79 * view of a blockref table based on a transaction id. The RBTREE
80 * deconflicts the view by sub-sorting on delete_tid.
82 * NOTE: Any 'current' chain which is not yet deleted will have a
83 * delete_tid of HAMMER2_MAX_TID (0xFFF....FFFLLU).
85 RB_GENERATE(hammer2_chain_tree, hammer2_chain, rbnode, hammer2_chain_cmp);
88 hammer2_chain_cmp(hammer2_chain_t *chain1, hammer2_chain_t *chain2)
90 if (chain1->index < chain2->index)
92 if (chain1->index > chain2->index)
94 if (chain1->delete_tid < chain2->delete_tid)
96 if (chain1->delete_tid > chain2->delete_tid)
102 * Flag chain->parent SUBMODIFIED recursively up to the root. The
103 * recursion can terminate when a parent is encountered with SUBMODIFIED
104 * already set. The flag is NOT set on the passed-in chain.
106 * This can be confusing because even though chains are multi-homed,
107 * each chain has a specific idea of its parent (chain->parent) which
110 * This flag is used by the flusher's downward recursion to detect
111 * modifications and can only be cleared bottom-up.
113 * The parent pointer is protected by all the modified children below it
114 * and cannot be changed until they have all been flushed. However, setsubmod
115 * operations on new modifications can race flushes in progress, so we use
116 * the chain->core->cst.spin lock to handle collisions.
119 hammer2_chain_parent_setsubmod(hammer2_chain_t *chain)
121 hammer2_chain_t *parent;
122 hammer2_chain_core_t *core;
124 while ((parent = chain->parent) != NULL) {
126 spin_lock(&core->cst.spin);
128 * XXX flush synchronization
130 while (parent->duplink &&
131 (parent->flags & HAMMER2_CHAIN_DELETED)) {
132 parent = parent->duplink;
134 if (parent->flags & HAMMER2_CHAIN_SUBMODIFIED) {
135 spin_unlock(&core->cst.spin);
138 atomic_set_int(&parent->flags, HAMMER2_CHAIN_SUBMODIFIED);
139 spin_unlock(&core->cst.spin);
145 * Allocate a new disconnected chain element representing the specified
146 * bref. chain->refs is set to 1 and the passed bref is copied to
147 * chain->bref. chain->bytes is derived from the bref.
149 * chain->core is NOT allocated and the media data and bp pointers are left
150 * NULL. The caller must call chain_core_alloc() to allocate or associate
151 * a core with the chain.
153 * NOTE: Returns a referenced but unlocked (because there is no core) chain.
156 hammer2_chain_alloc(hammer2_mount_t *hmp, hammer2_blockref_t *bref)
158 hammer2_chain_t *chain;
159 u_int bytes = 1U << (int)(bref->data_off & HAMMER2_OFF_MASK_RADIX);
162 * Construct the appropriate system structure.
165 case HAMMER2_BREF_TYPE_INODE:
166 case HAMMER2_BREF_TYPE_INDIRECT:
167 case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
168 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
169 case HAMMER2_BREF_TYPE_DATA:
170 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
171 chain = kmalloc(sizeof(*chain), hmp->mchain, M_WAITOK | M_ZERO);
173 case HAMMER2_BREF_TYPE_VOLUME:
175 panic("hammer2_chain_alloc volume type illegal for op");
178 panic("hammer2_chain_alloc: unrecognized blockref type: %d",
184 chain->index = -1; /* not yet assigned */
185 chain->bytes = bytes;
187 chain->flags = HAMMER2_CHAIN_ALLOCATED;
188 chain->delete_tid = HAMMER2_MAX_TID;
194 * Associate an existing core with the chain or allocate a new core.
196 * The core is not locked. No additional refs on the chain are made.
199 hammer2_chain_core_alloc(hammer2_chain_t *chain, hammer2_chain_core_t *core)
201 KKASSERT(chain->core == NULL);
204 core = kmalloc(sizeof(*core), chain->hmp->mchain,
206 RB_INIT(&core->rbtree);
209 ccms_cst_init(&core->cst, chain);
211 atomic_add_int(&core->sharecnt, 1);
217 * Deallocate a chain after the caller has transitioned its refs to 0
218 * and disassociated it from its parent.
220 * We must drop sharecnt on the core (if any) and handle its 1->0 transition
224 hammer2_chain_dealloc(hammer2_chain_t *chain)
226 hammer2_chain_core_t *core;
229 * Chain's flags are expected to be sane.
231 KKASSERT((chain->flags & (HAMMER2_CHAIN_MOVED |
232 HAMMER2_CHAIN_MODIFIED |
233 HAMMER2_CHAIN_ONRBTREE)) == 0);
234 KKASSERT(chain->duplink == NULL);
237 * Disconnect chain->core from chain and free core if it was the
238 * last core. If any children are present in the core's rbtree
239 * they cannot have a pointer to our chain by definition because
240 * our chain's refs have dropped to 0. If this is the last sharecnt
241 * on core, then core's rbtree must be empty by definition.
243 if ((core = chain->core) != NULL) {
245 * Other chains may reference the same core so the core's
246 * spinlock is needed to safely disconnect it.
248 spin_lock(&core->cst.spin);
250 if (atomic_fetchadd_int(&core->sharecnt, -1) == 1) {
251 spin_unlock(&core->cst.spin);
252 KKASSERT(RB_EMPTY(&core->rbtree));
253 KKASSERT(core->cst.count == 0);
254 KKASSERT(core->cst.upgrade == 0);
255 kfree(core, chain->hmp->mchain);
257 spin_unlock(&core->cst.spin);
259 core = NULL; /* safety */
263 * Finally free the structure and return for possible recursion.
265 hammer2_chain_free(chain);
269 * Free a disconnected chain element.
272 hammer2_chain_free(hammer2_chain_t *chain)
274 hammer2_mount_t *hmp = chain->hmp;
276 switch(chain->bref.type) {
277 case HAMMER2_BREF_TYPE_VOLUME:
280 case HAMMER2_BREF_TYPE_INODE:
282 kfree(chain->data, hmp->minode);
287 KKASSERT(chain->data == NULL);
291 KKASSERT(chain->core == NULL);
292 KKASSERT(chain->bp == NULL);
295 if (chain->flags & HAMMER2_CHAIN_ALLOCATED)
296 kfree(chain, hmp->mchain);
300 * Add a reference to a chain element, preventing its destruction.
303 hammer2_chain_ref(hammer2_chain_t *chain)
305 atomic_add_int(&chain->refs, 1);
309 * Drop the caller's reference to the chain. When the ref count drops to
310 * zero this function will disassociate the chain from its parent and
311 * deallocate it, then recursely drop the parent using the implied ref
312 * from the chain's chain->parent.
314 * WARNING! Just because we are able to deallocate a chain doesn't mean
315 * that chain->core->rbtree is empty. There can still be a sharecnt
316 * on chain->core and RBTREE entries that refer to different parents.
318 static hammer2_chain_t *hammer2_chain_lastdrop(hammer2_chain_t *chain);
321 hammer2_chain_drop(hammer2_chain_t *chain)
327 if (chain->flags & HAMMER2_CHAIN_MOVED)
329 if (chain->flags & HAMMER2_CHAIN_MODIFIED)
331 KKASSERT(chain->refs > need);
341 chain = hammer2_chain_lastdrop(chain);
342 /* recursively drop parent or retry same */
343 } else if (atomic_cmpset_int(&chain->refs, 1, 0)) {
344 hammer2_chain_dealloc(chain);
346 /* no parent to recurse on */
348 /* retry the same chain */
351 if (atomic_cmpset_int(&chain->refs, refs, refs - 1))
353 /* retry the same chain */
359 * Safe handling of the 1->0 transition on chain when the chain has a
362 * NOTE: A chain can only be removed from its parent core's RBTREE on
363 * the 1->0 transition by definition. No other code is allowed
364 * to remove chain from its RBTREE, so no race is possible.
368 hammer2_chain_lastdrop(hammer2_chain_t *chain)
370 hammer2_chain_t *parent;
371 hammer2_chain_t *tmp;
372 hammer2_chain_core_t *parent_core;
374 parent = chain->parent;
375 parent_core = parent->core;
376 KKASSERT(chain->flags & HAMMER2_CHAIN_ONRBTREE);
378 spin_lock(&parent_core->cst.spin);
379 if (atomic_cmpset_int(&chain->refs, 1, 0)) {
380 RB_REMOVE(hammer2_chain_tree, &parent_core->rbtree, chain);
381 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_ONRBTREE);
382 chain->parent = NULL; /* NULL field, must drop implied ref */
383 spin_unlock(&parent_core->cst.spin);
384 if ((tmp = chain->duplink) != NULL) {
385 chain->duplink = NULL;
386 hammer2_chain_drop(tmp);
388 hammer2_chain_dealloc(chain);
389 chain = parent; /* recursively drop parent */
391 spin_unlock(&parent_core->cst.spin);
397 * Ref and lock a chain element, acquiring its data with I/O if necessary,
398 * and specify how you would like the data to be resolved.
400 * Returns 0 on success or an error code if the data could not be acquired.
401 * The chain element is locked either way.
403 * The lock is allowed to recurse, multiple locking ops will aggregate
404 * the requested resolve types. Once data is assigned it will not be
405 * removed until the last unlock.
407 * HAMMER2_RESOLVE_NEVER - Do not resolve the data element.
408 * (typically used to avoid device/logical buffer
411 * HAMMER2_RESOLVE_MAYBE - Do not resolve data elements for chains in
412 * the INITIAL-create state (indirect blocks only).
414 * Do not resolve data elements for DATA chains.
415 * (typically used to avoid device/logical buffer
418 * HAMMER2_RESOLVE_ALWAYS- Always resolve the data element.
420 * HAMMER2_RESOLVE_SHARED- (flag) The chain is locked shared, otherwise
421 * it will be locked exclusive.
423 * NOTE: Embedded elements (volume header, inodes) are always resolved
426 * NOTE: Specifying HAMMER2_RESOLVE_ALWAYS on a newly-created non-embedded
427 * element will instantiate and zero its buffer, and flush it on
430 * NOTE: (data) elements are normally locked RESOLVE_NEVER or RESOLVE_MAYBE
431 * so as not to instantiate a device buffer, which could alias against
432 * a logical file buffer. However, if ALWAYS is specified the
433 * device buffer will be instantiated anyway.
435 * WARNING! If data must be fetched a shared lock will temporarily be
436 * upgraded to exclusive. However, a deadlock can occur if
437 * the caller owns more than one shared lock.
440 hammer2_chain_lock(hammer2_chain_t *chain, int how)
442 hammer2_mount_t *hmp;
443 hammer2_chain_core_t *core;
444 hammer2_blockref_t *bref;
454 * Ref and lock the element. Recursive locks are allowed.
456 if ((how & HAMMER2_RESOLVE_NOREF) == 0)
457 hammer2_chain_ref(chain);
459 KKASSERT(hmp != NULL);
462 * Get the appropriate lock.
465 if (how & HAMMER2_RESOLVE_SHARED)
466 ccms_thread_lock(&core->cst, CCMS_STATE_SHARED);
468 ccms_thread_lock(&core->cst, CCMS_STATE_EXCLUSIVE);
471 * If we already have a valid data pointer no further action is
478 * Do we have to resolve the data?
480 switch(how & HAMMER2_RESOLVE_MASK) {
481 case HAMMER2_RESOLVE_NEVER:
483 case HAMMER2_RESOLVE_MAYBE:
484 if (chain->flags & HAMMER2_CHAIN_INITIAL)
486 if (chain->bref.type == HAMMER2_BREF_TYPE_DATA)
488 if (chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_LEAF)
491 case HAMMER2_RESOLVE_ALWAYS:
496 * Upgrade to an exclusive lock so we can safely manipulate the
497 * buffer cache. If another thread got to it before us we
500 ostate = ccms_thread_lock_upgrade(&core->cst);
502 ccms_thread_lock_downgrade(&core->cst, ostate);
507 * We must resolve to a device buffer, either by issuing I/O or
508 * by creating a zero-fill element. We do not mark the buffer
509 * dirty when creating a zero-fill element (the hammer2_chain_modify()
510 * API must still be used to do that).
512 * The device buffer is variable-sized in powers of 2 down
513 * to HAMMER2_MINALLOCSIZE (typically 1K). A 64K physical storage
514 * chunk always contains buffers of the same size. (XXX)
516 * The minimum physical IO size may be larger than the variable
521 if ((bbytes = chain->bytes) < HAMMER2_MINIOSIZE)
522 bbytes = HAMMER2_MINIOSIZE;
523 pbase = bref->data_off & ~(hammer2_off_t)(bbytes - 1);
524 peof = (pbase + HAMMER2_PBUFSIZE64) & ~HAMMER2_PBUFMASK64;
525 boff = bref->data_off & HAMMER2_OFF_MASK & (bbytes - 1);
526 KKASSERT(pbase != 0);
529 * The getblk() optimization can only be used on newly created
530 * elements if the physical block size matches the request.
532 if ((chain->flags & HAMMER2_CHAIN_INITIAL) &&
533 chain->bytes == bbytes) {
534 chain->bp = getblk(hmp->devvp, pbase, bbytes, 0, 0);
536 } else if (hammer2_cluster_enable) {
537 error = cluster_read(hmp->devvp, peof, pbase, bbytes,
538 HAMMER2_PBUFSIZE, HAMMER2_PBUFSIZE,
541 error = bread(hmp->devvp, pbase, bbytes, &chain->bp);
545 kprintf("hammer2_chain_get: I/O error %016jx: %d\n",
546 (intmax_t)pbase, error);
549 ccms_thread_lock_downgrade(&core->cst, ostate);
554 * Zero the data area if the chain is in the INITIAL-create state.
555 * Mark the buffer for bdwrite().
557 bdata = (char *)chain->bp->b_data + boff;
558 if (chain->flags & HAMMER2_CHAIN_INITIAL) {
559 bzero(bdata, chain->bytes);
560 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DIRTYBP);
564 * Setup the data pointer, either pointing it to an embedded data
565 * structure and copying the data from the buffer, or pointing it
568 * The buffer is not retained when copying to an embedded data
569 * structure in order to avoid potential deadlocks or recursions
570 * on the same physical buffer.
572 switch (bref->type) {
573 case HAMMER2_BREF_TYPE_VOLUME:
575 * Copy data from bp to embedded buffer
577 panic("hammer2_chain_lock: called on unresolved volume header");
580 KKASSERT(pbase == 0);
581 KKASSERT(chain->bytes == HAMMER2_PBUFSIZE);
582 bcopy(bdata, &hmp->voldata, chain->bytes);
583 chain->data = (void *)&hmp->voldata;
588 case HAMMER2_BREF_TYPE_INODE:
590 * Copy data from bp to embedded buffer, do not retain the
593 KKASSERT(chain->bytes == sizeof(chain->data->ipdata));
594 chain->data = kmalloc(sizeof(chain->data->ipdata),
595 hmp->minode, M_WAITOK | M_ZERO);
596 bcopy(bdata, &chain->data->ipdata, chain->bytes);
600 case HAMMER2_BREF_TYPE_INDIRECT:
601 case HAMMER2_BREF_TYPE_DATA:
602 case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
603 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
604 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
607 * Point data at the device buffer and leave bp intact.
609 chain->data = (void *)bdata;
614 * Make sure the bp is not specifically owned by this thread before
615 * restoring to a possibly shared lock, so another hammer2 thread
619 BUF_KERNPROC(chain->bp);
620 ccms_thread_lock_downgrade(&core->cst, ostate);
625 * Unlock and deref a chain element.
627 * On the last lock release any non-embedded data (chain->bp) will be
631 hammer2_chain_unlock(hammer2_chain_t *chain)
633 hammer2_chain_core_t *core = chain->core;
637 * Release the CST lock but with a special 1->0 transition case
638 * to also drop the refs on chain. Multiple CST locks only
640 * Returns non-zero if lock references remain. When zero is
641 * returned the last lock reference is retained and any shared
642 * lock is upgraded to an exclusive lock for final disposition.
644 if (ccms_thread_unlock_zero(&core->cst)) {
645 KKASSERT(chain->refs > 1);
646 atomic_add_int(&chain->refs, -1);
651 * Shortcut the case if the data is embedded or not resolved.
653 * Do NOT NULL out chain->data (e.g. inode data), it might be
656 * The DIRTYBP flag is non-applicable in this situation and can
657 * be cleared to keep the flags state clean.
659 if (chain->bp == NULL) {
660 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_DIRTYBP);
661 ccms_thread_unlock(&core->cst);
662 hammer2_chain_drop(chain);
669 if ((chain->flags & HAMMER2_CHAIN_DIRTYBP) == 0) {
671 } else if (chain->flags & HAMMER2_CHAIN_IOFLUSH) {
672 switch(chain->bref.type) {
673 case HAMMER2_BREF_TYPE_DATA:
674 counterp = &hammer2_ioa_file_write;
676 case HAMMER2_BREF_TYPE_INODE:
677 counterp = &hammer2_ioa_meta_write;
679 case HAMMER2_BREF_TYPE_INDIRECT:
680 counterp = &hammer2_ioa_indr_write;
682 case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
683 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
684 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
685 counterp = &hammer2_ioa_fmap_write;
688 counterp = &hammer2_ioa_volu_write;
693 switch(chain->bref.type) {
694 case HAMMER2_BREF_TYPE_DATA:
695 counterp = &hammer2_iod_file_write;
697 case HAMMER2_BREF_TYPE_INODE:
698 counterp = &hammer2_iod_meta_write;
700 case HAMMER2_BREF_TYPE_INDIRECT:
701 counterp = &hammer2_iod_indr_write;
703 case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
704 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
705 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
706 counterp = &hammer2_iod_fmap_write;
709 counterp = &hammer2_iod_volu_write;
718 * If a device buffer was used for data be sure to destroy the
719 * buffer when we are done to avoid aliases (XXX what about the
720 * underlying VM pages?).
722 * NOTE: Freemap leaf's use reserved blocks and thus no aliasing
725 if (chain->bref.type == HAMMER2_BREF_TYPE_DATA)
726 chain->bp->b_flags |= B_RELBUF;
729 * The DIRTYBP flag tracks whether we have to bdwrite() the buffer
730 * or not. The flag will get re-set when chain_modify() is called,
731 * even if MODIFIED is already set, allowing the OS to retire the
732 * buffer independent of a hammer2 flus.
735 if (chain->flags & HAMMER2_CHAIN_DIRTYBP) {
736 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_DIRTYBP);
737 if (chain->flags & HAMMER2_CHAIN_IOFLUSH) {
738 atomic_clear_int(&chain->flags,
739 HAMMER2_CHAIN_IOFLUSH);
740 chain->bp->b_flags |= B_RELBUF;
741 cluster_awrite(chain->bp);
743 chain->bp->b_flags |= B_CLUSTEROK;
747 if (chain->flags & HAMMER2_CHAIN_IOFLUSH) {
748 atomic_clear_int(&chain->flags,
749 HAMMER2_CHAIN_IOFLUSH);
750 chain->bp->b_flags |= B_RELBUF;
753 /* bp might still be dirty */
758 ccms_thread_unlock(&core->cst);
759 hammer2_chain_drop(chain);
763 * Resize the chain's physical storage allocation in-place. This may
764 * replace the passed-in chain with a new chain.
766 * Chains can be resized smaller without reallocating the storage.
767 * Resizing larger will reallocate the storage.
769 * Must be passed an exclusively locked parent and chain, returns a new
770 * exclusively locked chain at the same index and unlocks the old chain.
771 * Flushes the buffer if necessary.
773 * This function is mostly used with DATA blocks locked RESOLVE_NEVER in order
774 * to avoid instantiating a device buffer that conflicts with the vnode
775 * data buffer. That is, the passed-in bp is a logical buffer, whereas
776 * any chain-oriented bp would be a device buffer.
778 * XXX flags currently ignored, uses chain->bp to detect data/no-data.
779 * XXX return error if cannot resize.
782 hammer2_chain_resize(hammer2_trans_t *trans, hammer2_inode_t *ip,
784 hammer2_chain_t *parent, hammer2_chain_t **chainp,
785 int nradix, int flags)
787 hammer2_mount_t *hmp = trans->hmp;
788 hammer2_chain_t *chain = *chainp;
801 * Only data and indirect blocks can be resized for now.
802 * (The volu root, inodes, and freemap elements use a fixed size).
804 KKASSERT(chain != &hmp->vchain);
805 KKASSERT(chain->bref.type == HAMMER2_BREF_TYPE_DATA ||
806 chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT);
809 * Nothing to do if the element is already the proper size
811 obytes = chain->bytes;
812 nbytes = 1U << nradix;
813 if (obytes == nbytes)
817 * Delete the old chain and duplicate it at the same (parent, index),
818 * returning a new chain. This allows the old chain to still be
819 * used by the flush code. Duplication occurs in-place.
821 * NOTE: If we are not crossing a synchronization point the
822 * duplication code will simply reuse the existing chain
825 hammer2_chain_delete(trans, parent, chain);
826 hammer2_chain_duplicate(trans, parent, chain->index, &chain, NULL);
829 * Set MODIFIED and add a chain ref to prevent destruction. Both
830 * modified flags share the same ref. (duplicated chains do not
831 * start out MODIFIED unless possibly if the duplication code
832 * decided to reuse the existing chain as-is).
834 * If the chain is already marked MODIFIED then we can safely
835 * return the previous allocation to the pool without having to
836 * worry about snapshots. XXX check flush synchronization.
838 if ((chain->flags & HAMMER2_CHAIN_MODIFIED) == 0) {
839 atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED);
840 atomic_set_int(&chain->flags, HAMMER2_CHAIN_MODIFIED);
841 hammer2_chain_ref(chain);
844 hammer2_freemap_free(hmp, chain->bref.data_off,
850 * Relocate the block, even if making it smaller (because different
851 * block sizes may be in different regions).
853 chain->bref.data_off = hammer2_freemap_alloc(hmp, chain->bref.type,
855 chain->bytes = nbytes;
856 /*ip->delta_dcount += (ssize_t)(nbytes - obytes);*/ /* XXX atomic */
859 * The device buffer may be larger than the allocation size.
861 if ((bbytes = chain->bytes) < HAMMER2_MINIOSIZE)
862 bbytes = HAMMER2_MINIOSIZE;
863 pbase = chain->bref.data_off & ~(hammer2_off_t)(bbytes - 1);
864 boff = chain->bref.data_off & HAMMER2_OFF_MASK & (bbytes - 1);
866 KKASSERT(chain->bp == NULL);
869 * Only copy the data if resolved, otherwise the caller is
872 * XXX handle device-buffer resizing case too. Right now we
873 * only handle logical buffer resizing.
876 KKASSERT(chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT ||
877 chain->bref.type == HAMMER2_BREF_TYPE_DATA);
878 KKASSERT(chain != &hmp->vchain); /* safety */
881 * The getblk() optimization can only be used if the
882 * physical block size matches the request.
884 if (nbytes == bbytes) {
885 nbp = getblk(hmp->devvp, pbase, bbytes, 0, 0);
888 error = bread(hmp->devvp, pbase, bbytes, &nbp);
889 KKASSERT(error == 0);
891 bdata = (char *)nbp->b_data + boff;
894 * chain->bp and chain->data represent the on-disk version
895 * of the data, where as the passed-in bp is usually a
896 * more up-to-date logical buffer. However, there is no
897 * need to synchronize the more up-to-date data in (bp)
898 * as it will do that on its own when it flushes.
900 if (nbytes < obytes) {
901 bcopy(chain->data, bdata, nbytes);
903 bcopy(chain->data, bdata, obytes);
904 bzero(bdata + obytes, nbytes - obytes);
908 * NOTE: The INITIAL state of the chain is left intact.
909 * We depend on hammer2_chain_modify() to do the
912 * NOTE: We set B_NOCACHE to throw away the previous bp and
913 * any VM backing store, even if it was dirty.
914 * Otherwise we run the risk of a logical/device
915 * conflict on reallocation.
917 chain->bp->b_flags |= B_RELBUF | B_NOCACHE;
920 chain->data = (void *)bdata;
921 hammer2_chain_modify(trans, chain, 0);
926 * Make sure the chain is marked MOVED and SUBMOD is set in the
927 * parent(s) so the adjustments are picked up by flush.
929 if ((chain->flags & HAMMER2_CHAIN_MOVED) == 0) {
930 hammer2_chain_ref(chain);
931 atomic_set_int(&chain->flags, HAMMER2_CHAIN_MOVED);
933 hammer2_chain_parent_setsubmod(chain);
938 * Convert a locked chain that was retrieved read-only to read-write,
939 * duplicating it if necessary to satisfy active flush points.
941 * If not already marked modified a new physical block will be allocated
942 * and assigned to the bref.
944 * If already modified and the new modification crosses a synchronization
945 * point the chain is duplicated in order to allow the flush to synchronize
946 * the old chain. The new chain replaces the old.
948 * Non-data blocks - The chain should be locked to at least the RESOLVE_MAYBE
949 * level or the COW operation will not work.
951 * Data blocks - The chain is usually locked RESOLVE_NEVER so as not to
952 * run the data through the device buffers.
954 * This function may return a different chain than was passed, in which case
955 * the old chain will be unlocked and the new chain will be locked.
957 hammer2_inode_data_t *
958 hammer2_chain_modify_ip(hammer2_trans_t *trans, hammer2_inode_t *ip,
962 hammer2_chain_t *ochain;
966 hammer2_chain_modify(trans, ip->chain, flags);
968 if (ochain != ip->chain) {
969 hammer2_chain_ref(ip->chain);
970 hammer2_chain_drop(ochain);
973 return(&ip->chain->data->ipdata);
977 hammer2_chain_modify(hammer2_trans_t *trans, hammer2_chain_t *chain, int flags)
979 hammer2_mount_t *hmp = trans->hmp;
988 * modify_tid is only update for primary modifications, not for
989 * propagated brefs. mirror_tid will be updated regardless during
990 * the flush, no need to set it here.
992 if ((flags & HAMMER2_MODIFY_NO_MODIFY_TID) == 0)
993 chain->bref.modify_tid = trans->sync_tid;
996 * If the chain is already marked MODIFIED we can usually just
999 * WARNING! It is possible that a prior lock/modify sequence
1000 * retired the buffer. During this lock/modify sequence
1001 * MODIFIED may still be set but the buffer could wind up
1002 * clean. Since the caller is going to modify the buffer
1003 * further we have to be sure that DIRTYBP is set again.
1005 * WARNING! Currently the caller is responsible for handling
1006 * any delete/duplication roll of the chain to account
1007 * for modifications crossing synchronization points.
1009 if (chain->flags & HAMMER2_CHAIN_MODIFIED) {
1010 if ((flags & HAMMER2_MODIFY_OPTDATA) == 0 &&
1011 chain->bp == NULL) {
1014 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DIRTYBP);
1019 * Set MODIFIED and add a chain ref to prevent destruction. Both
1020 * modified flags share the same ref.
1022 atomic_set_int(&chain->flags, HAMMER2_CHAIN_MODIFIED);
1023 hammer2_chain_ref(chain);
1026 * Adjust chain->modify_tid so the flusher knows when the
1027 * modification occurred.
1029 chain->modify_tid = trans->sync_tid;
1032 * We must allocate the copy-on-write block.
1034 * If the data is embedded no other action is required.
1036 * If the data is not embedded we acquire and clear the
1037 * new block. If chain->data is not NULL we then do the
1038 * copy-on-write. chain->data will then be repointed to the new
1039 * buffer and the old buffer will be released.
1041 * For newly created elements with no prior allocation we go
1042 * through the copy-on-write steps except without the copying part.
1044 if (chain != &hmp->vchain) {
1045 if ((hammer2_debug & 0x0001) &&
1046 (chain->bref.data_off & HAMMER2_OFF_MASK)) {
1047 kprintf("Replace %d\n", chain->bytes);
1049 chain->bref.data_off =
1050 hammer2_freemap_alloc(hmp, chain->bref.type,
1052 /* XXX failed allocation */
1056 * If data instantiation is optional and the chain has no current
1057 * data association (typical for DATA and newly-created INDIRECT
1058 * elements), don't instantiate the buffer now.
1060 if ((flags & HAMMER2_MODIFY_OPTDATA) && chain->bp == NULL)
1065 * Setting the DIRTYBP flag will cause the buffer to be dirtied or
1066 * written-out on unlock. This bit is independent of the MODIFIED
1067 * bit because the chain may still need meta-data adjustments done
1068 * by virtue of MODIFIED for its parent, and the buffer can be
1069 * flushed out (possibly multiple times) by the OS before that.
1071 * Clearing the INITIAL flag (for indirect blocks) indicates that
1072 * a zero-fill buffer has been instantiated.
1074 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DIRTYBP);
1075 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
1078 * We currently should never instantiate a device buffer for a
1079 * file data chain. (We definitely can for a freemap chain).
1081 KKASSERT(chain->bref.type != HAMMER2_BREF_TYPE_DATA);
1084 * Execute COW operation
1086 switch(chain->bref.type) {
1087 case HAMMER2_BREF_TYPE_VOLUME:
1088 case HAMMER2_BREF_TYPE_INODE:
1090 * The data is embedded, no copy-on-write operation is
1093 KKASSERT(chain->bp == NULL);
1095 case HAMMER2_BREF_TYPE_DATA:
1096 case HAMMER2_BREF_TYPE_INDIRECT:
1097 case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
1098 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
1099 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
1101 * Perform the copy-on-write operation
1103 KKASSERT(chain != &hmp->vchain); /* safety */
1105 * The device buffer may be larger than the allocation size.
1107 if ((bbytes = chain->bytes) < HAMMER2_MINIOSIZE)
1108 bbytes = HAMMER2_MINIOSIZE;
1109 pbase = chain->bref.data_off & ~(hammer2_off_t)(bbytes - 1);
1110 boff = chain->bref.data_off & HAMMER2_OFF_MASK & (bbytes - 1);
1113 * The getblk() optimization can only be used if the
1114 * physical block size matches the request.
1116 if (chain->bytes == bbytes) {
1117 nbp = getblk(hmp->devvp, pbase, bbytes, 0, 0);
1120 error = bread(hmp->devvp, pbase, bbytes, &nbp);
1121 KKASSERT(error == 0);
1123 bdata = (char *)nbp->b_data + boff;
1126 * Copy or zero-fill on write depending on whether
1127 * chain->data exists or not.
1130 bcopy(chain->data, bdata, chain->bytes);
1131 KKASSERT(chain->bp != NULL);
1133 bzero(bdata, chain->bytes);
1136 chain->bp->b_flags |= B_RELBUF;
1140 chain->data = bdata;
1143 panic("hammer2_chain_modify: illegal non-embedded type %d",
1149 if ((flags & HAMMER2_MODIFY_NOSUB) == 0)
1150 hammer2_chain_parent_setsubmod(chain);
1154 * Mark the volume as having been modified. This short-cut version
1155 * does not have to lock the volume's chain, which allows the ioctl
1156 * code to make adjustments to connections without deadlocking. XXX
1158 * No ref is made on vchain when flagging it MODIFIED.
1161 hammer2_modify_volume(hammer2_mount_t *hmp)
1163 hammer2_voldata_lock(hmp);
1164 hammer2_voldata_unlock(hmp, 1);
1168 * Locate an in-memory chain. The parent must be locked. The in-memory
1169 * chain is returned with a reference and without a lock, or NULL
1172 * This function returns the chain at the specified index with the highest
1173 * delete_tid. The caller must check whether the chain is flagged
1174 * CHAIN_DELETED or not.
1176 * NOTE: If no chain is found the caller usually must check the on-media
1177 * array to determine if a blockref exists at the index.
1179 struct hammer2_chain_find_info {
1180 hammer2_chain_t *best;
1181 hammer2_tid_t delete_tid;
1187 hammer2_chain_find_cmp(hammer2_chain_t *child, void *data)
1189 struct hammer2_chain_find_info *info = data;
1191 if (child->index < info->index)
1193 if (child->index > info->index)
1200 hammer2_chain_find_callback(hammer2_chain_t *child, void *data)
1202 struct hammer2_chain_find_info *info = data;
1204 if (info->delete_tid < child->delete_tid) {
1205 info->delete_tid = child->delete_tid;
1213 hammer2_chain_find_locked(hammer2_chain_t *parent, int index)
1215 struct hammer2_chain_find_info info;
1218 info.delete_tid = 0;
1221 RB_SCAN(hammer2_chain_tree, &parent->core->rbtree,
1222 hammer2_chain_find_cmp, hammer2_chain_find_callback,
1229 hammer2_chain_find(hammer2_chain_t *parent, int index)
1231 hammer2_chain_t *chain;
1233 spin_lock(&parent->core->cst.spin);
1234 chain = hammer2_chain_find_locked(parent, index);
1236 hammer2_chain_ref(chain);
1237 spin_unlock(&parent->core->cst.spin);
1243 * Return a locked chain structure with all associated data acquired.
1244 * (if LOOKUP_NOLOCK is requested the returned chain is only referenced).
1246 * Caller must hold the parent locked shared or exclusive since we may
1247 * need the parent's bref array to find our block.
1249 * The returned child is locked as requested. If NOLOCK, the returned
1250 * child is still at least referenced.
1253 hammer2_chain_get(hammer2_chain_t *parent, int index, int flags)
1255 hammer2_blockref_t *bref;
1256 hammer2_mount_t *hmp = parent->hmp;
1257 hammer2_chain_t *chain;
1258 hammer2_chain_t dummy;
1262 * Figure out how to lock. MAYBE can be used to optimized
1263 * the initial-create state for indirect blocks.
1265 if (flags & (HAMMER2_LOOKUP_NODATA | HAMMER2_LOOKUP_NOLOCK))
1266 how = HAMMER2_RESOLVE_NEVER;
1268 how = HAMMER2_RESOLVE_MAYBE;
1269 if (flags & (HAMMER2_LOOKUP_SHARED | HAMMER2_LOOKUP_NOLOCK))
1270 how |= HAMMER2_RESOLVE_SHARED;
1274 * First see if we have a (possibly modified) chain element cached
1275 * for this (parent, index). Acquire the data if necessary.
1277 * If chain->data is non-NULL the chain should already be marked
1281 dummy.index = index;
1282 dummy.delete_tid = HAMMER2_MAX_TID;
1283 spin_lock(&parent->core->cst.spin);
1284 chain = RB_FIND(hammer2_chain_tree, &parent->core->rbtree, &dummy);
1286 hammer2_chain_ref(chain);
1287 spin_unlock(&parent->core->cst.spin);
1288 if ((flags & HAMMER2_LOOKUP_NOLOCK) == 0)
1289 hammer2_chain_lock(chain, how | HAMMER2_RESOLVE_NOREF);
1292 spin_unlock(&parent->core->cst.spin);
1295 * The parent chain must not be in the INITIAL state.
1297 if (parent->flags & HAMMER2_CHAIN_INITIAL) {
1298 panic("hammer2_chain_get: Missing bref(1)");
1303 * No RBTREE entry found, lookup the bref and issue I/O (switch on
1304 * the parent's bref to determine where and how big the array is).
1306 switch(parent->bref.type) {
1307 case HAMMER2_BREF_TYPE_INODE:
1308 KKASSERT(index >= 0 && index < HAMMER2_SET_COUNT);
1309 bref = &parent->data->ipdata.u.blockset.blockref[index];
1311 case HAMMER2_BREF_TYPE_INDIRECT:
1312 case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
1313 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
1314 KKASSERT(parent->data != NULL);
1315 KKASSERT(index >= 0 &&
1316 index < parent->bytes / sizeof(hammer2_blockref_t));
1317 bref = &parent->data->npdata.blockref[index];
1319 case HAMMER2_BREF_TYPE_VOLUME:
1320 KKASSERT(index >= 0 && index < HAMMER2_SET_COUNT);
1321 bref = &hmp->voldata.sroot_blockset.blockref[index];
1325 panic("hammer2_chain_get: unrecognized blockref type: %d",
1328 if (bref->type == 0) {
1329 panic("hammer2_chain_get: Missing bref(2)");
1334 * Allocate a chain structure representing the existing media
1335 * entry. Resulting chain has one ref and is not locked.
1337 * The locking operation we do later will issue I/O to read it.
1339 chain = hammer2_chain_alloc(hmp, bref);
1340 hammer2_chain_core_alloc(chain, NULL); /* ref'd chain returned */
1343 * Link the chain into its parent. A spinlock is required to safely
1344 * access the RBTREE, and it is possible to collide with another
1345 * hammer2_chain_get() operation because the caller might only hold
1346 * a shared lock on the parent.
1348 KKASSERT(parent->refs > 0);
1349 spin_lock(&parent->core->cst.spin);
1350 chain->parent = parent;
1351 chain->index = index;
1352 if (RB_INSERT(hammer2_chain_tree, &parent->core->rbtree, chain)) {
1353 chain->parent = NULL;
1355 spin_unlock(&parent->core->cst.spin);
1356 hammer2_chain_drop(chain);
1359 atomic_set_int(&chain->flags, HAMMER2_CHAIN_ONRBTREE);
1360 hammer2_chain_ref(parent); /* chain->parent ref */
1361 spin_unlock(&parent->core->cst.spin);
1364 * Our new chain is referenced but NOT locked. Lock the chain
1365 * below. The locking operation also resolves its data.
1367 * If NOLOCK is set the release will release the one-and-only lock.
1369 if ((flags & HAMMER2_LOOKUP_NOLOCK) == 0) {
1370 hammer2_chain_lock(chain, how); /* recusive lock */
1371 hammer2_chain_drop(chain); /* excess ref */
1377 * Lookup initialization/completion API
1380 hammer2_chain_lookup_init(hammer2_chain_t *parent, int flags)
1382 if (flags & HAMMER2_LOOKUP_SHARED) {
1383 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS |
1384 HAMMER2_RESOLVE_SHARED);
1386 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS);
1392 hammer2_chain_lookup_done(hammer2_chain_t *parent)
1395 hammer2_chain_unlock(parent);
1400 * Locate any key between key_beg and key_end inclusive. (*parentp)
1401 * typically points to an inode but can also point to a related indirect
1402 * block and this function will recurse upwards and find the inode again.
1404 * WARNING! THIS DOES NOT RETURN KEYS IN LOGICAL KEY ORDER! ANY KEY
1405 * WITHIN THE RANGE CAN BE RETURNED. HOWEVER, AN ITERATION
1406 * WHICH PICKS UP WHERE WE LEFT OFF WILL CONTINUE THE SCAN.
1408 * (*parentp) must be exclusively locked and referenced and can be an inode
1409 * or an existing indirect block within the inode.
1411 * On return (*parentp) will be modified to point at the deepest parent chain
1412 * element encountered during the search, as a helper for an insertion or
1413 * deletion. The new (*parentp) will be locked and referenced and the old
1414 * will be unlocked and dereferenced (no change if they are both the same).
1416 * The matching chain will be returned exclusively locked. If NOLOCK is
1417 * requested the chain will be returned only referenced.
1419 * NULL is returned if no match was found, but (*parentp) will still
1420 * potentially be adjusted.
1422 * This function will also recurse up the chain if the key is not within the
1423 * current parent's range. (*parentp) can never be set to NULL. An iteration
1424 * can simply allow (*parentp) to float inside the loop.
1427 hammer2_chain_lookup(hammer2_chain_t **parentp,
1428 hammer2_key_t key_beg, hammer2_key_t key_end,
1431 hammer2_mount_t *hmp;
1432 hammer2_chain_t *parent;
1433 hammer2_chain_t *chain;
1434 hammer2_chain_t *tmp;
1435 hammer2_blockref_t *base;
1436 hammer2_blockref_t *bref;
1437 hammer2_key_t scan_beg;
1438 hammer2_key_t scan_end;
1441 int how_always = HAMMER2_RESOLVE_ALWAYS;
1442 int how_maybe = HAMMER2_RESOLVE_MAYBE;
1444 if (flags & (HAMMER2_LOOKUP_SHARED | HAMMER2_LOOKUP_NOLOCK)) {
1445 how_maybe |= HAMMER2_RESOLVE_SHARED;
1446 how_always |= HAMMER2_RESOLVE_SHARED;
1450 * Recurse (*parentp) upward if necessary until the parent completely
1451 * encloses the key range or we hit the inode.
1456 while (parent->bref.type == HAMMER2_BREF_TYPE_INDIRECT ||
1457 parent->bref.type == HAMMER2_BREF_TYPE_FREEMAP_NODE) {
1458 scan_beg = parent->bref.key;
1459 scan_end = scan_beg +
1460 ((hammer2_key_t)1 << parent->bref.keybits) - 1;
1461 if (key_beg >= scan_beg && key_end <= scan_end)
1464 * XXX flush synchronization
1466 tmp = parent->parent;
1467 while (tmp->duplink &&
1468 (tmp->flags & HAMMER2_CHAIN_DELETED)) {
1471 hammer2_chain_ref(tmp); /* ref new parent */
1472 hammer2_chain_unlock(parent); /* unlock old parent */
1473 /* lock new parent */
1474 hammer2_chain_lock(tmp, how_maybe |
1475 HAMMER2_RESOLVE_NOREF);
1476 *parentp = parent = tmp; /* new parent */
1481 * Locate the blockref array. Currently we do a fully associative
1482 * search through the array.
1484 switch(parent->bref.type) {
1485 case HAMMER2_BREF_TYPE_INODE:
1487 * Special shortcut for embedded data returns the inode
1488 * itself. Callers must detect this condition and access
1489 * the embedded data (the strategy code does this for us).
1491 * This is only applicable to regular files and softlinks.
1493 if (parent->data->ipdata.op_flags & HAMMER2_OPFLAG_DIRECTDATA) {
1494 if (flags & HAMMER2_LOOKUP_NOLOCK)
1495 hammer2_chain_ref(parent);
1497 hammer2_chain_lock(parent, how_always);
1500 base = &parent->data->ipdata.u.blockset.blockref[0];
1501 count = HAMMER2_SET_COUNT;
1503 case HAMMER2_BREF_TYPE_INDIRECT:
1504 case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
1505 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
1507 * Optimize indirect blocks in the INITIAL state to avoid
1510 if (parent->flags & HAMMER2_CHAIN_INITIAL) {
1513 if (parent->data == NULL)
1514 panic("parent->data is NULL");
1515 base = &parent->data->npdata.blockref[0];
1517 count = parent->bytes / sizeof(hammer2_blockref_t);
1519 case HAMMER2_BREF_TYPE_VOLUME:
1520 base = &hmp->voldata.sroot_blockset.blockref[0];
1521 count = HAMMER2_SET_COUNT;
1524 panic("hammer2_chain_lookup: unrecognized blockref type: %d",
1526 base = NULL; /* safety */
1527 count = 0; /* safety */
1531 * If the element and key overlap we use the element.
1533 * NOTE! Deleted elements are effectively invisible. Deletions
1534 * proactively clear the parent bref to the deleted child
1535 * so we do not try to shadow here to avoid parent updates
1536 * (which would be difficult since multiple deleted elements
1537 * might represent different flush synchronization points).
1540 for (i = 0; i < count; ++i) {
1541 tmp = hammer2_chain_find(parent, i);
1543 if (tmp->flags & HAMMER2_CHAIN_DELETED) {
1544 hammer2_chain_drop(tmp);
1548 KKASSERT(bref->type != 0);
1549 } else if (base == NULL || base[i].type == 0) {
1554 scan_beg = bref->key;
1555 scan_end = scan_beg + ((hammer2_key_t)1 << bref->keybits) - 1;
1557 hammer2_chain_drop(tmp);
1558 if (key_beg <= scan_end && key_end >= scan_beg)
1562 if (key_beg == key_end)
1564 return (hammer2_chain_next(parentp, NULL,
1565 key_beg, key_end, flags));
1569 * Acquire the new chain element. If the chain element is an
1570 * indirect block we must search recursively.
1572 * It is possible for the tmp chain above to be removed from
1573 * the RBTREE but the parent lock ensures it would not have been
1574 * destroyed from the media, so the chain_get() code will simply
1575 * reload it from the media in that case.
1577 chain = hammer2_chain_get(parent, i, flags);
1582 * If the chain element is an indirect block it becomes the new
1583 * parent and we loop on it.
1585 * The parent always has to be locked with at least RESOLVE_MAYBE
1586 * so we can access its data. It might need a fixup if the caller
1587 * passed incompatible flags. Be careful not to cause a deadlock
1588 * as a data-load requires an exclusive lock.
1590 if (chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT ||
1591 chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_NODE) {
1592 hammer2_chain_unlock(parent);
1593 *parentp = parent = chain;
1594 if (flags & HAMMER2_LOOKUP_NOLOCK) {
1595 hammer2_chain_lock(chain, how_maybe |
1596 HAMMER2_RESOLVE_NOREF);
1597 } else if ((flags & HAMMER2_LOOKUP_NODATA) &&
1598 chain->data == NULL) {
1599 hammer2_chain_ref(chain);
1600 hammer2_chain_unlock(chain);
1601 hammer2_chain_lock(chain, how_maybe |
1602 HAMMER2_RESOLVE_NOREF);
1608 * All done, return the chain
1614 * After having issued a lookup we can iterate all matching keys.
1616 * If chain is non-NULL we continue the iteration from just after it's index.
1618 * If chain is NULL we assume the parent was exhausted and continue the
1619 * iteration at the next parent.
1621 * parent must be locked on entry and remains locked throughout. chain's
1622 * lock status must match flags. Chain is always at least referenced.
1625 hammer2_chain_next(hammer2_chain_t **parentp, hammer2_chain_t *chain,
1626 hammer2_key_t key_beg, hammer2_key_t key_end,
1629 hammer2_mount_t *hmp;
1630 hammer2_chain_t *parent;
1631 hammer2_chain_t *tmp;
1632 hammer2_blockref_t *base;
1633 hammer2_blockref_t *bref;
1634 hammer2_key_t scan_beg;
1635 hammer2_key_t scan_end;
1637 int how_maybe = HAMMER2_RESOLVE_MAYBE;
1640 if (flags & (HAMMER2_LOOKUP_SHARED | HAMMER2_LOOKUP_NOLOCK))
1641 how_maybe |= HAMMER2_RESOLVE_SHARED;
1648 * Calculate the next index and recalculate the parent if necessary.
1652 * Continue iteration within current parent. If not NULL
1653 * the passed-in chain may or may not be locked, based on
1654 * the LOOKUP_NOLOCK flag (passed in as returned from lookup
1657 i = chain->index + 1;
1658 if (flags & HAMMER2_LOOKUP_NOLOCK)
1659 hammer2_chain_drop(chain);
1661 hammer2_chain_unlock(chain);
1664 * Any scan where the lookup returned degenerate data embedded
1665 * in the inode has an invalid index and must terminate.
1667 if (chain == parent)
1670 } else if (parent->bref.type != HAMMER2_BREF_TYPE_INDIRECT &&
1671 parent->bref.type != HAMMER2_BREF_TYPE_FREEMAP_NODE) {
1673 * We reached the end of the iteration.
1678 * Continue iteration with next parent unless the current
1679 * parent covers the range.
1681 scan_beg = parent->bref.key;
1682 scan_end = scan_beg +
1683 ((hammer2_key_t)1 << parent->bref.keybits) - 1;
1684 if (key_beg >= scan_beg && key_end <= scan_end)
1687 i = parent->index + 1;
1689 * XXX flush synchronization
1691 tmp = parent->parent;
1692 while (tmp->duplink &&
1693 (tmp->flags & HAMMER2_CHAIN_DELETED)) {
1696 hammer2_chain_ref(tmp); /* ref new parent */
1697 hammer2_chain_unlock(parent); /* unlock old parent */
1698 /* lock new parent */
1699 hammer2_chain_lock(tmp, how_maybe |
1700 HAMMER2_RESOLVE_NOREF);
1701 *parentp = parent = tmp;
1706 * Locate the blockref array. Currently we do a fully associative
1707 * search through the array.
1709 switch(parent->bref.type) {
1710 case HAMMER2_BREF_TYPE_INODE:
1711 base = &parent->data->ipdata.u.blockset.blockref[0];
1712 count = HAMMER2_SET_COUNT;
1714 case HAMMER2_BREF_TYPE_INDIRECT:
1715 case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
1716 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
1717 if (parent->flags & HAMMER2_CHAIN_INITIAL) {
1720 KKASSERT(parent->data != NULL);
1721 base = &parent->data->npdata.blockref[0];
1723 count = parent->bytes / sizeof(hammer2_blockref_t);
1725 case HAMMER2_BREF_TYPE_VOLUME:
1726 base = &hmp->voldata.sroot_blockset.blockref[0];
1727 count = HAMMER2_SET_COUNT;
1730 panic("hammer2_chain_next: unrecognized blockref type: %d",
1732 base = NULL; /* safety */
1733 count = 0; /* safety */
1736 KKASSERT(i <= count);
1739 * Look for the key. If we are unable to find a match and an exact
1740 * match was requested we return NULL. If a range was requested we
1741 * run hammer2_chain_next() to iterate.
1743 * NOTE! Deleted elements are effectively invisible. Deletions
1744 * proactively clear the parent bref to the deleted child
1745 * so we do not try to shadow here to avoid parent updates
1746 * (which would be difficult since multiple deleted elements
1747 * might represent different flush synchronization points).
1751 tmp = hammer2_chain_find(parent, i);
1753 if (tmp->flags & HAMMER2_CHAIN_DELETED) {
1754 hammer2_chain_drop(tmp);
1759 } else if (base == NULL || base[i].type == 0) {
1765 scan_beg = bref->key;
1766 scan_end = scan_beg + ((hammer2_key_t)1 << bref->keybits) - 1;
1768 hammer2_chain_drop(tmp);
1769 if (key_beg <= scan_end && key_end >= scan_beg)
1775 * If we couldn't find a match recurse up a parent to continue the
1782 * Acquire the new chain element. If the chain element is an
1783 * indirect block we must search recursively.
1785 chain = hammer2_chain_get(parent, i, flags);
1790 * If the chain element is an indirect block it becomes the new
1791 * parent and we loop on it.
1793 * The parent always has to be locked with at least RESOLVE_MAYBE
1794 * so we can access its data. It might need a fixup if the caller
1795 * passed incompatible flags. Be careful not to cause a deadlock
1796 * as a data-load requires an exclusive lock.
1798 if (chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT ||
1799 chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_NODE) {
1800 hammer2_chain_unlock(parent);
1801 *parentp = parent = chain;
1803 if (flags & HAMMER2_LOOKUP_NOLOCK) {
1804 hammer2_chain_lock(parent, how_maybe |
1805 HAMMER2_RESOLVE_NOREF);
1806 } else if ((flags & HAMMER2_LOOKUP_NODATA) &&
1807 parent->data == NULL) {
1808 hammer2_chain_ref(parent);
1809 hammer2_chain_unlock(parent);
1810 hammer2_chain_lock(parent, how_maybe |
1811 HAMMER2_RESOLVE_NOREF);
1818 * All done, return chain
1824 * Create and return a new hammer2 system memory structure of the specified
1825 * key, type and size and insert it RELATIVE TO (PARENT).
1827 * (parent) is typically either an inode or an indirect block, acquired
1828 * acquired as a side effect of issuing a prior failed lookup. parent
1829 * must be locked and held. Do not pass the inode chain to this function
1830 * unless that is the chain returned by the failed lookup.
1832 * (*chainp) is either NULL, a newly allocated chain, or a chain allocated
1833 * via hammer2_chain_duplicate(). When not NULL, the passed-in chain must
1834 * NOT be attached to any parent, and will be attached by this function.
1835 * This mechanic is used by the rename code.
1837 * Non-indirect types will automatically allocate indirect blocks as required
1838 * if the new item does not fit in the current (parent).
1840 * Indirect types will move a portion of the existing blockref array in
1841 * (parent) into the new indirect type and then use one of the free slots
1842 * to emplace the new indirect type.
1844 * A new locked chain element is returned of the specified type. The
1845 * element may or may not have a data area associated with it:
1847 * VOLUME not allowed here
1848 * INODE kmalloc()'d data area is set up
1849 * INDIRECT not allowed here
1850 * DATA no data area will be set-up (caller is expected
1851 * to have logical buffers, we don't want to alias
1852 * the data onto device buffers!).
1854 * Requires an exclusively locked parent.
1857 hammer2_chain_create(hammer2_trans_t *trans, hammer2_chain_t **parentp,
1858 hammer2_chain_t **chainp,
1859 hammer2_key_t key, int keybits, int type, size_t bytes)
1861 hammer2_mount_t *hmp;
1862 hammer2_chain_t *chain;
1863 hammer2_chain_t *child;
1864 hammer2_chain_t *parent = *parentp;
1865 hammer2_blockref_t dummy;
1866 hammer2_blockref_t *base;
1872 KKASSERT(ccms_thread_lock_owned(&parent->core->cst));
1876 if (chain == NULL) {
1878 * First allocate media space and construct the dummy bref,
1879 * then allocate the in-memory chain structure.
1881 bzero(&dummy, sizeof(dummy));
1884 dummy.keybits = keybits;
1885 dummy.data_off = hammer2_allocsize(bytes);
1886 dummy.methods = parent->bref.methods;
1887 chain = hammer2_chain_alloc(hmp, &dummy);
1888 hammer2_chain_core_alloc(chain, NULL);
1889 ccms_thread_lock(&chain->core->cst, CCMS_STATE_EXCLUSIVE);
1893 * We do NOT set INITIAL here (yet). INITIAL is only
1894 * used for indirect blocks.
1896 * Recalculate bytes to reflect the actual media block
1899 bytes = (hammer2_off_t)1 <<
1900 (int)(chain->bref.data_off & HAMMER2_OFF_MASK_RADIX);
1901 chain->bytes = bytes;
1904 case HAMMER2_BREF_TYPE_VOLUME:
1905 panic("hammer2_chain_create: called with volume type");
1907 case HAMMER2_BREF_TYPE_INODE:
1908 KKASSERT(bytes == HAMMER2_INODE_BYTES);
1909 chain->data = kmalloc(sizeof(chain->data->ipdata),
1910 hmp->minode, M_WAITOK | M_ZERO);
1912 case HAMMER2_BREF_TYPE_INDIRECT:
1913 panic("hammer2_chain_create: cannot be used to"
1914 "create indirect block");
1916 case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
1917 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
1918 panic("hammer2_chain_create: cannot be used to"
1919 "create freemap root or node");
1921 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
1922 case HAMMER2_BREF_TYPE_DATA:
1924 /* leave chain->data NULL */
1925 KKASSERT(chain->data == NULL);
1930 * Potentially update the chain's key/keybits.
1932 chain->bref.key = key;
1933 chain->bref.keybits = keybits;
1938 * Locate a free blockref in the parent's array
1940 switch(parent->bref.type) {
1941 case HAMMER2_BREF_TYPE_INODE:
1942 KKASSERT((parent->data->ipdata.op_flags &
1943 HAMMER2_OPFLAG_DIRECTDATA) == 0);
1944 KKASSERT(parent->data != NULL);
1945 base = &parent->data->ipdata.u.blockset.blockref[0];
1946 count = HAMMER2_SET_COUNT;
1948 case HAMMER2_BREF_TYPE_INDIRECT:
1949 case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
1950 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
1951 if (parent->flags & HAMMER2_CHAIN_INITIAL) {
1954 KKASSERT(parent->data != NULL);
1955 base = &parent->data->npdata.blockref[0];
1957 count = parent->bytes / sizeof(hammer2_blockref_t);
1959 case HAMMER2_BREF_TYPE_VOLUME:
1960 KKASSERT(parent->data != NULL);
1961 base = &hmp->voldata.sroot_blockset.blockref[0];
1962 count = HAMMER2_SET_COUNT;
1965 panic("hammer2_chain_create: unrecognized blockref type: %d",
1972 * Scan for an unallocated bref, also skipping any slots occupied
1973 * by in-memory chain elements that may not yet have been updated
1974 * in the parent's bref array.
1976 * We don't have to hold the spinlock to save an empty slot as
1977 * new slots can only transition from empty if the parent is
1978 * locked exclusively.
1981 spin_lock(&parent->core->cst.spin);
1982 for (i = 0; i < count; ++i) {
1983 child = hammer2_chain_find_locked(parent, i);
1985 if (child->flags & HAMMER2_CHAIN_DELETED)
1991 if (base[i].type == 0)
1994 spin_unlock(&parent->core->cst.spin);
1997 * If no free blockref could be found we must create an indirect
1998 * block and move a number of blockrefs into it. With the parent
1999 * locked we can safely lock each child in order to move it without
2000 * causing a deadlock.
2002 * This may return the new indirect block or the old parent depending
2003 * on where the key falls. NULL is returned on error.
2006 hammer2_chain_t *nparent;
2008 nparent = hammer2_chain_create_indirect(trans, parent,
2011 if (nparent == NULL) {
2013 hammer2_chain_free(chain);
2017 if (parent != nparent) {
2018 hammer2_chain_unlock(parent);
2019 parent = *parentp = nparent;
2025 * Link the chain into its parent. Later on we will have to set
2026 * the MOVED bit in situations where we don't mark the new chain
2027 * as being modified.
2029 if (chain->parent != NULL)
2030 panic("hammer2: hammer2_chain_create: chain already connected");
2031 KKASSERT(chain->parent == NULL);
2032 KKASSERT((chain->flags & HAMMER2_CHAIN_DELETED) == 0);
2034 chain->parent = parent;
2036 KKASSERT(parent->refs > 0);
2037 spin_lock(&parent->core->cst.spin);
2038 if (RB_INSERT(hammer2_chain_tree, &parent->core->rbtree, chain))
2039 panic("hammer2_chain_link: collision");
2040 atomic_set_int(&chain->flags, HAMMER2_CHAIN_ONRBTREE);
2041 hammer2_chain_ref(parent); /* chain->parent ref */
2042 spin_unlock(&parent->core->cst.spin);
2045 * (allocated) indicates that this is a newly-created chain element
2046 * rather than a renamed chain element.
2048 * In this situation we want to place the chain element in
2049 * the MODIFIED state. The caller expects it to NOT be in the
2052 * The data area will be set up as follows:
2054 * VOLUME not allowed here.
2056 * INODE embedded data are will be set-up.
2058 * INDIRECT not allowed here.
2060 * DATA no data area will be set-up (caller is expected
2061 * to have logical buffers, we don't want to alias
2062 * the data onto device buffers!).
2065 switch(chain->bref.type) {
2066 case HAMMER2_BREF_TYPE_DATA:
2067 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
2068 hammer2_chain_modify(trans, chain,
2069 HAMMER2_MODIFY_OPTDATA);
2071 case HAMMER2_BREF_TYPE_INDIRECT:
2072 case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
2073 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2074 /* not supported in this function */
2075 panic("hammer2_chain_create: bad type");
2076 atomic_set_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
2077 hammer2_chain_modify(trans, chain,
2078 HAMMER2_MODIFY_OPTDATA);
2081 hammer2_chain_modify(trans, chain, 0);
2086 * When reconnecting a chain we must set MOVED and setsubmod
2087 * so the flush recognizes that it must update the bref in
2090 if ((chain->flags & HAMMER2_CHAIN_MOVED) == 0) {
2091 hammer2_chain_ref(chain);
2092 atomic_set_int(&chain->flags, HAMMER2_CHAIN_MOVED);
2094 hammer2_chain_parent_setsubmod(chain);
2104 * Replace (*chainp) with a duplicate. The original *chainp is unlocked
2105 * and the replacement will be returned locked. Both the original and the
2106 * new chain will share the same RBTREE (have the same chain->core), with
2107 * the new chain becoming the 'current' chain (meaning it is the first in
2108 * the linked list at core->chain_first).
2110 * If (parent, i) then the new duplicated chain is inserted under the parent
2111 * at the specified index (the parent must not have a ref at that index).
2113 * If (NULL, -1) then the new duplicated chain is not inserted anywhere,
2114 * similar to if it had just been chain_alloc()'d (suitable for passing into
2115 * hammer2_chain_create() after this function returns).
2117 * NOTE! Duplication is used in order to retain the original topology to
2118 * support flush synchronization points. Both the original and the
2119 * new chain will have the same transaction id and thus the operation
2120 * appears atomic on the media.
2123 hammer2_chain_duplicate(hammer2_trans_t *trans, hammer2_chain_t *parent, int i,
2124 hammer2_chain_t **chainp, hammer2_blockref_t *bref)
2126 hammer2_mount_t *hmp = trans->hmp;
2127 hammer2_blockref_t *base;
2128 hammer2_chain_t *ochain;
2129 hammer2_chain_t *nchain;
2130 hammer2_chain_t *scan;
2135 * First create a duplicate of the chain structure, associating
2136 * it with the same core, making it the same size, pointing it
2137 * to the same bref (the same media block), and copying any inline
2142 bref = &ochain->bref;
2143 nchain = hammer2_chain_alloc(hmp, bref);
2144 hammer2_chain_core_alloc(nchain, ochain->core);
2146 bytes = (hammer2_off_t)1 <<
2147 (int)(bref->data_off & HAMMER2_OFF_MASK_RADIX);
2148 nchain->bytes = bytes;
2151 * Be sure to copy the INITIAL flag as well or we could end up
2152 * loading garbage from the bref.
2154 if (ochain->flags & HAMMER2_CHAIN_INITIAL)
2155 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_INITIAL);
2158 * If the old chain is modified the new one must be too,
2159 * but we only want to allocate a new bref.
2161 if (ochain->flags & HAMMER2_CHAIN_MODIFIED) {
2163 * When duplicating chains the MODIFIED state is inherited.
2164 * A new bref typically must be allocated. However, file
2165 * data chains may already have the data offset assigned
2166 * to a logical buffer cache buffer so we absolutely cannot
2167 * allocate a new bref here for TYPE_DATA.
2169 * Basically the flusher core only dumps media topology
2170 * and meta-data, not file data. The VOP_FSYNC code deals
2171 * with the file data. XXX need back-pointer to inode.
2173 if (nchain->bref.type == HAMMER2_BREF_TYPE_DATA) {
2174 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_MODIFIED);
2175 hammer2_chain_ref(nchain);
2177 hammer2_chain_modify(trans, nchain,
2178 HAMMER2_MODIFY_OPTDATA);
2180 } else if (nchain->flags & HAMMER2_CHAIN_INITIAL) {
2182 * When duplicating chains in the INITITAL state we need
2183 * to ensure that the chain is marked modified so a
2184 * block is properly assigned to it, otherwise the MOVED
2185 * bit won't do the right thing.
2187 KKASSERT (nchain->bref.type != HAMMER2_BREF_TYPE_DATA);
2188 hammer2_chain_modify(trans, nchain, HAMMER2_MODIFY_OPTDATA);
2190 if (parent || (ochain->flags & HAMMER2_CHAIN_MOVED)) {
2191 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_MOVED);
2192 hammer2_chain_ref(nchain);
2194 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_SUBMODIFIED);
2196 switch(nchain->bref.type) {
2197 case HAMMER2_BREF_TYPE_VOLUME:
2198 panic("hammer2_chain_duplicate: cannot be called w/volhdr");
2200 case HAMMER2_BREF_TYPE_INODE:
2201 KKASSERT(bytes == HAMMER2_INODE_BYTES);
2203 nchain->data = kmalloc(sizeof(nchain->data->ipdata),
2204 hmp->minode, M_WAITOK | M_ZERO);
2205 nchain->data->ipdata = ochain->data->ipdata;
2208 case HAMMER2_BREF_TYPE_INDIRECT:
2209 if ((nchain->flags & HAMMER2_CHAIN_MODIFIED) &&
2211 bcopy(ochain->data, nchain->data,
2215 case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
2216 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2217 panic("hammer2_chain_duplicate: cannot be used to"
2218 "create a freemap root or node");
2220 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
2221 case HAMMER2_BREF_TYPE_DATA:
2223 if ((nchain->flags & HAMMER2_CHAIN_MODIFIED) &&
2225 bcopy(ochain->data, nchain->data,
2228 /* leave chain->data NULL */
2229 KKASSERT(nchain->data == NULL);
2234 * Both chains must be locked for us to be able to set the
2235 * duplink. The caller may expect valid data.
2237 * Unmodified duplicated blocks may have the same bref, we
2238 * must be careful to avoid buffer cache deadlocks so we
2239 * unlock the old chain before resolving the new one.
2241 * Insert nchain at the end of the duplication list.
2243 hammer2_chain_lock(nchain, HAMMER2_RESOLVE_NEVER);
2244 /* extra ref still present from original allocation */
2246 spin_lock(&ochain->core->cst.spin);
2247 KKASSERT(nchain->duplink == NULL);
2248 nchain->duplink = ochain->duplink;
2249 ochain->duplink = nchain; /* inherits excess ref from alloc */
2250 spin_unlock(&ochain->core->cst.spin);
2252 hammer2_chain_unlock(ochain);
2254 hammer2_chain_lock(nchain, HAMMER2_RESOLVE_MAYBE);
2255 hammer2_chain_unlock(nchain);
2258 * If parent is not NULL, insert into the parent at the requested
2259 * index. The newly duplicated chain must be marked MOVED and
2260 * SUBMODIFIED set in its parent(s).
2264 * Locate a free blockref in the parent's array
2266 KKASSERT(ccms_thread_lock_owned(&parent->core->cst));
2267 switch(parent->bref.type) {
2268 case HAMMER2_BREF_TYPE_INODE:
2269 KKASSERT((parent->data->ipdata.op_flags &
2270 HAMMER2_OPFLAG_DIRECTDATA) == 0);
2271 KKASSERT(parent->data != NULL);
2272 base = &parent->data->ipdata.u.blockset.blockref[0];
2273 count = HAMMER2_SET_COUNT;
2275 case HAMMER2_BREF_TYPE_INDIRECT:
2276 case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
2277 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2278 if (parent->flags & HAMMER2_CHAIN_INITIAL) {
2281 KKASSERT(parent->data != NULL);
2282 base = &parent->data->npdata.blockref[0];
2284 count = parent->bytes / sizeof(hammer2_blockref_t);
2286 case HAMMER2_BREF_TYPE_VOLUME:
2287 KKASSERT(parent->data != NULL);
2288 base = &hmp->voldata.sroot_blockset.blockref[0];
2289 count = HAMMER2_SET_COUNT;
2292 panic("hammer2_chain_create: unrecognized "
2293 "blockref type: %d",
2298 KKASSERT(i >= 0 && i < count);
2300 nchain->parent = parent;
2302 KKASSERT((nchain->flags & HAMMER2_CHAIN_DELETED) == 0);
2303 KKASSERT(parent->refs > 0);
2305 spin_lock(&parent->core->cst.spin);
2306 scan = hammer2_chain_find_locked(parent, i);
2307 KKASSERT(base == NULL || base[i].type == 0 ||
2309 (scan->flags & HAMMER2_CHAIN_DELETED));
2310 if (RB_INSERT(hammer2_chain_tree, &parent->core->rbtree,
2312 panic("hammer2_chain_link: collision");
2314 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_ONRBTREE);
2315 hammer2_chain_ref(parent); /* nchain->parent ref */
2316 spin_unlock(&parent->core->cst.spin);
2318 if ((nchain->flags & HAMMER2_CHAIN_MOVED) == 0) {
2319 hammer2_chain_ref(nchain);
2320 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_MOVED);
2322 hammer2_chain_parent_setsubmod(nchain);
2327 * Create an indirect block that covers one or more of the elements in the
2328 * current parent. Either returns the existing parent with no locking or
2329 * ref changes or returns the new indirect block locked and referenced
2330 * and leaving the original parent lock/ref intact as well.
2332 * If an error occurs, NULL is returned and *errorp is set to the error.
2334 * The returned chain depends on where the specified key falls.
2336 * The key/keybits for the indirect mode only needs to follow three rules:
2338 * (1) That all elements underneath it fit within its key space and
2340 * (2) That all elements outside it are outside its key space.
2342 * (3) When creating the new indirect block any elements in the current
2343 * parent that fit within the new indirect block's keyspace must be
2344 * moved into the new indirect block.
2346 * (4) The keyspace chosen for the inserted indirect block CAN cover a wider
2347 * keyspace the the current parent, but lookup/iteration rules will
2348 * ensure (and must ensure) that rule (2) for all parents leading up
2349 * to the nearest inode or the root volume header is adhered to. This
2350 * is accomplished by always recursing through matching keyspaces in
2351 * the hammer2_chain_lookup() and hammer2_chain_next() API.
2353 * The current implementation calculates the current worst-case keyspace by
2354 * iterating the current parent and then divides it into two halves, choosing
2355 * whichever half has the most elements (not necessarily the half containing
2356 * the requested key).
2358 * We can also opt to use the half with the least number of elements. This
2359 * causes lower-numbered keys (aka logical file offsets) to recurse through
2360 * fewer indirect blocks and higher-numbered keys to recurse through more.
2361 * This also has the risk of not moving enough elements to the new indirect
2362 * block and being forced to create several indirect blocks before the element
2365 * Must be called with an exclusively locked parent.
2369 hammer2_chain_create_indirect(hammer2_trans_t *trans, hammer2_chain_t *parent,
2370 hammer2_key_t create_key, int create_bits,
2373 hammer2_mount_t *hmp = trans->hmp;
2374 hammer2_blockref_t *base;
2375 hammer2_blockref_t *bref;
2376 hammer2_chain_t *chain;
2377 hammer2_chain_t *child;
2378 hammer2_chain_t *ichain;
2379 hammer2_chain_t dummy;
2380 hammer2_key_t key = create_key;
2381 int keybits = create_bits;
2389 * Calculate the base blockref pointer or NULL if the chain
2390 * is known to be empty. We need to calculate the array count
2391 * for RB lookups either way.
2393 KKASSERT(ccms_thread_lock_owned(&parent->core->cst));
2396 /*hammer2_chain_modify(trans, parent, HAMMER2_MODIFY_OPTDATA);*/
2397 if (parent->flags & HAMMER2_CHAIN_INITIAL) {
2400 switch(parent->bref.type) {
2401 case HAMMER2_BREF_TYPE_INODE:
2402 count = HAMMER2_SET_COUNT;
2404 case HAMMER2_BREF_TYPE_INDIRECT:
2405 case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
2406 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2407 count = parent->bytes / sizeof(hammer2_blockref_t);
2409 case HAMMER2_BREF_TYPE_VOLUME:
2410 count = HAMMER2_SET_COUNT;
2413 panic("hammer2_chain_create_indirect: "
2414 "unrecognized blockref type: %d",
2420 switch(parent->bref.type) {
2421 case HAMMER2_BREF_TYPE_INODE:
2422 base = &parent->data->ipdata.u.blockset.blockref[0];
2423 count = HAMMER2_SET_COUNT;
2425 case HAMMER2_BREF_TYPE_INDIRECT:
2426 case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
2427 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2428 base = &parent->data->npdata.blockref[0];
2429 count = parent->bytes / sizeof(hammer2_blockref_t);
2431 case HAMMER2_BREF_TYPE_VOLUME:
2432 base = &hmp->voldata.sroot_blockset.blockref[0];
2433 count = HAMMER2_SET_COUNT;
2436 panic("hammer2_chain_create_indirect: "
2437 "unrecognized blockref type: %d",
2445 * Scan for an unallocated bref, also skipping any slots occupied
2446 * by in-memory chain elements which may not yet have been updated
2447 * in the parent's bref array.
2449 * Deleted elements are ignored.
2451 bzero(&dummy, sizeof(dummy));
2452 dummy.delete_tid = HAMMER2_MAX_TID;
2454 spin_lock(&parent->core->cst.spin);
2455 for (i = 0; i < count; ++i) {
2458 child = hammer2_chain_find_locked(parent, i);
2460 if (child->flags & HAMMER2_CHAIN_DELETED)
2462 bref = &child->bref;
2463 } else if (base && base[i].type) {
2470 * Expand our calculated key range (key, keybits) to fit
2471 * the scanned key. nkeybits represents the full range
2472 * that we will later cut in half (two halves @ nkeybits - 1).
2475 if (nkeybits < bref->keybits) {
2476 if (bref->keybits > 64) {
2477 kprintf("bad bref index %d chain %p bref %p\n", i, chain, bref);
2480 nkeybits = bref->keybits;
2482 while (nkeybits < 64 &&
2483 (~(((hammer2_key_t)1 << nkeybits) - 1) &
2484 (key ^ bref->key)) != 0) {
2489 * If the new key range is larger we have to determine
2490 * which side of the new key range the existing keys fall
2491 * under by checking the high bit, then collapsing the
2492 * locount into the hicount or vise-versa.
2494 if (keybits != nkeybits) {
2495 if (((hammer2_key_t)1 << (nkeybits - 1)) & key) {
2506 * The newly scanned key will be in the lower half or the
2507 * higher half of the (new) key range.
2509 if (((hammer2_key_t)1 << (nkeybits - 1)) & bref->key)
2514 spin_unlock(&parent->core->cst.spin);
2515 bref = NULL; /* now invalid (safety) */
2518 * Adjust keybits to represent half of the full range calculated
2519 * above (radix 63 max)
2524 * Select whichever half contains the most elements. Theoretically
2525 * we can select either side as long as it contains at least one
2526 * element (in order to ensure that a free slot is present to hold
2527 * the indirect block).
2529 key &= ~(((hammer2_key_t)1 << keybits) - 1);
2530 if (hammer2_indirect_optimize) {
2532 * Insert node for least number of keys, this will arrange
2533 * the first few blocks of a large file or the first few
2534 * inodes in a directory with fewer indirect blocks when
2537 if (hicount < locount && hicount != 0)
2538 key |= (hammer2_key_t)1 << keybits;
2540 key &= ~(hammer2_key_t)1 << keybits;
2543 * Insert node for most number of keys, best for heavily
2546 if (hicount > locount)
2547 key |= (hammer2_key_t)1 << keybits;
2549 key &= ~(hammer2_key_t)1 << keybits;
2553 * How big should our new indirect block be? It has to be at least
2554 * as large as its parent.
2556 if (parent->bref.type == HAMMER2_BREF_TYPE_INODE)
2557 nbytes = HAMMER2_IND_BYTES_MIN;
2559 nbytes = HAMMER2_IND_BYTES_MAX;
2560 if (nbytes < count * sizeof(hammer2_blockref_t))
2561 nbytes = count * sizeof(hammer2_blockref_t);
2564 * Ok, create our new indirect block
2566 switch(parent->bref.type) {
2567 case HAMMER2_BREF_TYPE_FREEMAP_ROOT:
2568 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2569 dummy.bref.type = HAMMER2_BREF_TYPE_FREEMAP_NODE;
2572 dummy.bref.type = HAMMER2_BREF_TYPE_INDIRECT;
2575 dummy.bref.key = key;
2576 dummy.bref.keybits = keybits;
2577 dummy.bref.data_off = hammer2_allocsize(nbytes);
2578 dummy.bref.methods = parent->bref.methods;
2580 ichain = hammer2_chain_alloc(hmp, &dummy.bref);
2581 atomic_set_int(&ichain->flags, HAMMER2_CHAIN_INITIAL);
2582 hammer2_chain_core_alloc(ichain, NULL);
2583 hammer2_chain_lock(ichain, HAMMER2_RESOLVE_MAYBE);
2584 hammer2_chain_drop(ichain); /* excess ref from alloc */
2587 * We have to mark it modified to allocate its block, but use
2588 * OPTDATA to allow it to remain in the INITIAL state. Otherwise
2589 * it won't be acted upon by the flush code.
2591 hammer2_chain_modify(trans, ichain, HAMMER2_MODIFY_OPTDATA);
2594 * Iterate the original parent and move the matching brefs into
2595 * the new indirect block.
2597 * XXX handle flushes.
2599 spin_lock(&parent->core->cst.spin);
2600 for (i = 0; i < count; ++i) {
2602 * For keying purposes access the bref from the media or
2603 * from our in-memory cache. In cases where the in-memory
2604 * cache overrides the media the keyrefs will be the same
2605 * anyway so we can avoid checking the cache when the media
2608 child = hammer2_chain_find_locked(parent, i);
2610 if (child->flags & HAMMER2_CHAIN_DELETED) {
2611 if (ichain->index < 0)
2615 bref = &child->bref;
2616 } else if (base && base[i].type) {
2619 if (ichain->index < 0)
2625 * Skip keys not in the chosen half (low or high), only bit
2626 * (keybits - 1) needs to be compared but for safety we
2627 * will compare all msb bits plus that bit again.
2629 if ((~(((hammer2_key_t)1 << keybits) - 1) &
2630 (key ^ bref->key)) != 0) {
2635 * This element is being moved from the parent, its slot
2636 * is available for our new indirect block.
2638 if (ichain->index < 0)
2642 * Load the new indirect block by acquiring or allocating
2643 * the related chain entries, then move them to the new
2644 * parent (ichain) by deleting them from their old location
2645 * and inserting a duplicate of the chain and any modified
2646 * sub-chain in the new location.
2648 * We must set MOVED in the chain being duplicated and
2649 * SUBMODIFIED in the parent(s) so the flush code knows
2650 * what is going on. The latter is done after the loop.
2652 * WARNING! chain->cst.spin must be held when chain->parent is
2653 * modified, even though we own the full blown lock,
2654 * to deal with setsubmod and rename races.
2655 * (XXX remove this req).
2657 spin_unlock(&parent->core->cst.spin);
2658 chain = hammer2_chain_get(parent, i, HAMMER2_LOOKUP_NODATA);
2659 hammer2_chain_delete(trans, parent, chain);
2660 hammer2_chain_duplicate(trans, ichain, i, &chain, NULL);
2662 hammer2_chain_unlock(chain);
2663 KKASSERT(parent->refs > 0);
2665 spin_lock(&parent->core->cst.spin);
2667 spin_unlock(&parent->core->cst.spin);
2670 * Insert the new indirect block into the parent now that we've
2671 * cleared out some entries in the parent. We calculated a good
2672 * insertion index in the loop above (ichain->index).
2674 * We don't have to set MOVED here because we mark ichain modified
2675 * down below (so the normal modified -> flush -> set-moved sequence
2678 * The insertion shouldn't race as this is a completely new block
2679 * and the parent is locked.
2681 if (ichain->index < 0)
2682 kprintf("indirect parent %p count %d key %016jx/%d\n",
2683 parent, count, (intmax_t)key, keybits);
2684 KKASSERT(ichain->index >= 0);
2685 KKASSERT((ichain->flags & HAMMER2_CHAIN_ONRBTREE) == 0);
2686 spin_lock(&parent->core->cst.spin);
2687 if (RB_INSERT(hammer2_chain_tree, &parent->core->rbtree, ichain))
2688 panic("hammer2_chain_create_indirect: ichain insertion");
2689 atomic_set_int(&ichain->flags, HAMMER2_CHAIN_ONRBTREE);
2690 ichain->parent = parent;
2691 hammer2_chain_ref(parent); /* ichain->parent ref */
2692 spin_unlock(&parent->core->cst.spin);
2693 KKASSERT(parent->duplink == NULL); /* XXX mus be inside spin */
2696 * Mark the new indirect block modified after insertion, which
2697 * will propagate up through parent all the way to the root and
2698 * also allocate the physical block in ichain for our caller,
2699 * and assign ichain->data to a pre-zero'd space (because there
2700 * is not prior data to copy into it).
2702 * We have to set SUBMODIFIED in ichain's flags manually so the
2703 * flusher knows it has to recurse through it to get to all of
2704 * our moved blocks, then call setsubmod() to set the bit
2707 /*hammer2_chain_modify(trans, ichain, HAMMER2_MODIFY_OPTDATA);*/
2708 hammer2_chain_parent_setsubmod(ichain);
2709 atomic_set_int(&ichain->flags, HAMMER2_CHAIN_SUBMODIFIED);
2712 * Figure out what to return.
2714 if (create_bits > keybits) {
2716 * Key being created is way outside the key range,
2717 * return the original parent.
2719 hammer2_chain_unlock(ichain);
2720 } else if (~(((hammer2_key_t)1 << keybits) - 1) &
2721 (create_key ^ key)) {
2723 * Key being created is outside the key range,
2724 * return the original parent.
2726 hammer2_chain_unlock(ichain);
2729 * Otherwise its in the range, return the new parent.
2730 * (leave both the new and old parent locked).
2739 * Sets CHAIN_DELETED and CHAIN_MOVED in the chain being deleted and
2740 * set chain->delete_tid.
2742 * This function does NOT generate a modification to the parent. It
2743 * would be nearly impossible to figure out which parent to modify anyway.
2744 * Such modifications are handled by the flush code and are properly merged
2745 * using the flush synchronization point.
2747 * The find/get code will properly overload the RBTREE check on top of
2748 * the bref check to detect deleted entries.
2750 * This function is NOT recursive. Any entity already pushed into the
2751 * chain (such as an inode) may still need visibility into its contents,
2752 * as well as the ability to read and modify the contents. For example,
2753 * for an unlinked file which is still open.
2755 * NOTE: This function does NOT set chain->modify_tid, allowing future
2756 * code to distinguish between live and deleted chains by testing
2759 * NOTE: Deletions normally do not occur in the middle of a duplication
2760 * chain but we use a trick for hardlink migration that refactors
2761 * the originating inode without deleting it, so we make no assumptions
2765 hammer2_chain_delete(hammer2_trans_t *trans, hammer2_chain_t *parent,
2766 hammer2_chain_t *chain)
2768 KKASSERT(ccms_thread_lock_owned(&parent->core->cst));
2771 * Nothing to do if already marked.
2773 if (chain->flags & HAMMER2_CHAIN_DELETED)
2777 * We must set MOVED along with DELETED for the flush code to
2778 * recognize the operation and properly disconnect the chain
2781 * The setting of DELETED causes finds, lookups, and _next iterations
2782 * to no longer recognize the chain. RB_SCAN()s will still have
2783 * visibility (needed for flush serialization points).
2785 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DELETED);
2786 if ((chain->flags & HAMMER2_CHAIN_MOVED) == 0) {
2787 hammer2_chain_ref(chain);
2788 atomic_set_int(&chain->flags, HAMMER2_CHAIN_MOVED);
2790 chain->delete_tid = trans->sync_tid;
2791 hammer2_chain_parent_setsubmod(chain);
2795 hammer2_chain_wait(hammer2_chain_t *chain)
2797 tsleep(chain, 0, "chnflw", 1);