2 * Copyright (c) 2011-2013 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * This subsystem implements most of the core support functions for
37 * the hammer2_chain and hammer2_chain_core structures.
39 * Chains represent the filesystem media topology in-memory. Any given
40 * chain can represent an inode, indirect block, data, or other types
43 * This module provides APIs for direct and indirect block searches,
44 * iterations, recursions, creation, deletion, replication, and snapshot
45 * views (used by the flush and snapshot code).
47 * Generally speaking any modification made to a chain must propagate all
48 * the way back to the volume header, issuing copy-on-write updates to the
49 * blockref tables all the way up. Any chain except the volume header itself
50 * can be flushed to disk at any time, in any order. None of it matters
51 * until we get to the point where we want to synchronize the volume header
52 * (see the flush code).
54 * The chain structure supports snapshot views in time, which are primarily
55 * used until the related data and meta-data is flushed to allow the
56 * filesystem to make snapshots without requiring it to first flush,
57 * and to allow the filesystem flush and modify the filesystem concurrently
58 * with minimal or no stalls.
60 #include <sys/cdefs.h>
61 #include <sys/param.h>
62 #include <sys/systm.h>
63 #include <sys/types.h>
65 #include <sys/kern_syscall.h>
70 static int hammer2_indirect_optimize; /* XXX SYSCTL */
72 static hammer2_chain_t *hammer2_chain_create_indirect(
73 hammer2_trans_t *trans, hammer2_chain_t *parent,
74 hammer2_key_t key, int keybits, int for_type, int *errorp);
75 static void adjreadcounter(hammer2_blockref_t *bref, size_t bytes);
78 * We use a red-black tree to guarantee safe lookups under shared locks.
80 * Chains can be overloaded onto the same index, creating a different
81 * view of a blockref table based on a transaction id. The RBTREE
82 * deconflicts the view by sub-sorting on delete_tid.
84 * NOTE: Any 'current' chain which is not yet deleted will have a
85 * delete_tid of HAMMER2_MAX_TID (0xFFF....FFFLLU).
87 RB_GENERATE(hammer2_chain_tree, hammer2_chain, rbnode, hammer2_chain_cmp);
90 hammer2_chain_cmp(hammer2_chain_t *chain1, hammer2_chain_t *chain2)
92 if (chain1->index < chain2->index)
94 if (chain1->index > chain2->index)
96 if (chain1->delete_tid < chain2->delete_tid)
98 if (chain1->delete_tid > chain2->delete_tid)
105 hammer2_isclusterable(hammer2_chain_t *chain)
107 if (hammer2_cluster_enable) {
108 if (chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT ||
109 chain->bref.type == HAMMER2_BREF_TYPE_INODE ||
110 chain->bref.type == HAMMER2_BREF_TYPE_DATA) {
118 * Recursively set the SUBMODIFIED flag up to the root starting at chain's
119 * parent. SUBMODIFIED is not set in chain itself.
121 * This function only operates on current-time transactions and is not
122 * used during flushes. Instead, the flush code manages the flag itself.
125 hammer2_chain_setsubmod(hammer2_trans_t *trans, hammer2_chain_t *chain)
127 hammer2_chain_core_t *above;
129 if (trans->flags & HAMMER2_TRANS_ISFLUSH)
131 while ((above = chain->above) != NULL) {
132 spin_lock(&above->cst.spin);
133 chain = above->first_parent;
134 while (hammer2_chain_refactor_test(chain, 1))
135 chain = chain->next_parent;
136 atomic_set_int(&chain->flags, HAMMER2_CHAIN_SUBMODIFIED);
137 spin_unlock(&above->cst.spin);
142 * Allocate a new disconnected chain element representing the specified
143 * bref. chain->refs is set to 1 and the passed bref is copied to
144 * chain->bref. chain->bytes is derived from the bref.
146 * chain->core is NOT allocated and the media data and bp pointers are left
147 * NULL. The caller must call chain_core_alloc() to allocate or associate
148 * a core with the chain.
150 * NOTE: Returns a referenced but unlocked (because there is no core) chain.
153 hammer2_chain_alloc(hammer2_mount_t *hmp, hammer2_trans_t *trans,
154 hammer2_blockref_t *bref)
156 hammer2_chain_t *chain;
157 u_int bytes = 1U << (int)(bref->data_off & HAMMER2_OFF_MASK_RADIX);
160 * Construct the appropriate system structure.
163 case HAMMER2_BREF_TYPE_INODE:
164 case HAMMER2_BREF_TYPE_INDIRECT:
165 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
166 case HAMMER2_BREF_TYPE_DATA:
167 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
168 chain = kmalloc(sizeof(*chain), hmp->mchain, M_WAITOK | M_ZERO);
170 case HAMMER2_BREF_TYPE_VOLUME:
171 case HAMMER2_BREF_TYPE_FREEMAP:
173 panic("hammer2_chain_alloc volume type illegal for op");
176 panic("hammer2_chain_alloc: unrecognized blockref type: %d",
182 chain->index = -1; /* not yet assigned */
183 chain->bytes = bytes;
185 chain->flags = HAMMER2_CHAIN_ALLOCATED;
186 chain->delete_tid = HAMMER2_MAX_TID;
188 chain->modify_tid = trans->sync_tid;
194 * Associate an existing core with the chain or allocate a new core.
196 * The core is not locked. No additional refs on the chain are made.
199 hammer2_chain_core_alloc(hammer2_chain_t *chain, hammer2_chain_core_t *core)
201 hammer2_chain_t **scanp;
203 KKASSERT(chain->core == NULL);
204 KKASSERT(chain->next_parent == NULL);
207 core = kmalloc(sizeof(*core), chain->hmp->mchain,
209 RB_INIT(&core->rbtree);
212 ccms_cst_init(&core->cst, chain);
213 core->first_parent = chain;
215 atomic_add_int(&core->sharecnt, 1);
217 spin_lock(&core->cst.spin);
218 if (core->first_parent == NULL) {
219 core->first_parent = chain;
221 scanp = &core->first_parent;
223 scanp = &(*scanp)->next_parent;
225 hammer2_chain_ref(chain); /* next_parent link */
227 spin_unlock(&core->cst.spin);
232 * Add a reference to a chain element, preventing its destruction.
235 hammer2_chain_ref(hammer2_chain_t *chain)
237 atomic_add_int(&chain->refs, 1);
241 * Drop the caller's reference to the chain. When the ref count drops to
242 * zero this function will disassociate the chain from its parent and
243 * deallocate it, then recursely drop the parent using the implied ref
244 * from the chain's chain->parent.
246 * WARNING! Just because we are able to deallocate a chain doesn't mean
247 * that chain->core->rbtree is empty. There can still be a sharecnt
248 * on chain->core and RBTREE entries that refer to different parents.
250 static hammer2_chain_t *hammer2_chain_lastdrop(hammer2_chain_t *chain);
253 hammer2_chain_drop(hammer2_chain_t *chain)
259 if (chain->flags & HAMMER2_CHAIN_MOVED)
261 if (chain->flags & HAMMER2_CHAIN_MODIFIED)
263 KKASSERT(chain->refs > need);
272 chain = hammer2_chain_lastdrop(chain);
274 if (atomic_cmpset_int(&chain->refs, refs, refs - 1))
276 /* retry the same chain */
282 * Safe handling of the 1->0 transition on chain. Returns a chain for
283 * recursive drop or NULL, possibly returning the same chain of the atomic
286 * The cst spinlock is allowed nest child-to-parent (not parent-to-child).
290 hammer2_chain_lastdrop(hammer2_chain_t *chain)
292 hammer2_mount_t *hmp;
293 hammer2_chain_core_t *above;
294 hammer2_chain_core_t *core;
295 hammer2_chain_t *rdrop1;
296 hammer2_chain_t *rdrop2;
299 * Spinlock the core and check to see if it is empty. If it is
300 * not empty we leave chain intact with refs == 0.
302 if ((core = chain->core) != NULL) {
303 spin_lock(&core->cst.spin);
304 if (RB_ROOT(&core->rbtree)) {
305 if (atomic_cmpset_int(&chain->refs, 1, 0)) {
306 /* 1->0 transition successful */
307 spin_unlock(&core->cst.spin);
310 /* 1->0 transition failed, retry */
311 spin_unlock(&core->cst.spin);
322 * Spinlock the parent and try to drop the last ref. On success
323 * remove chain from its parent.
325 if ((above = chain->above) != NULL) {
326 spin_lock(&above->cst.spin);
327 if (!atomic_cmpset_int(&chain->refs, 1, 0)) {
328 /* 1->0 transition failed */
329 spin_unlock(&above->cst.spin);
331 spin_unlock(&core->cst.spin);
337 * 1->0 transition successful
339 KKASSERT(chain->flags & HAMMER2_CHAIN_ONRBTREE);
340 RB_REMOVE(hammer2_chain_tree, &above->rbtree, chain);
341 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_ONRBTREE);
345 * Calculate a chain to return for a recursive drop.
347 * XXX this needs help, we have a potential deep-recursion
348 * problem which we try to address but sometimes we wind up
349 * with two elements that have to be dropped.
351 * If the chain has an associated core with refs at 0
352 * the chain must be the first in the core's linked list
353 * by definition, and we will recursively drop the ref
354 * implied by the chain->next_parent field.
356 * Otherwise if the rbtree containing chain is empty we try
357 * to recursively drop our parent (only the first one could
358 * possibly have refs == 0 since the rest are linked via
361 * Otherwise we try to recursively drop a sibling.
363 if (chain->next_parent) {
364 KKASSERT(core != NULL);
365 rdrop1 = chain->next_parent;
367 if (RB_EMPTY(&above->rbtree)) {
368 rdrop2 = above->first_parent;
369 if (rdrop2 == NULL || rdrop2->refs ||
370 atomic_cmpset_int(&rdrop2->refs, 0, 1) == 0) {
374 rdrop2 = RB_ROOT(&above->rbtree);
375 if (atomic_cmpset_int(&rdrop2->refs, 0, 1) == 0)
378 spin_unlock(&above->cst.spin);
379 above = NULL; /* safety */
381 if (chain->next_parent) {
382 KKASSERT(core != NULL);
383 rdrop1 = chain->next_parent;
388 * We still have the core spinlock (if core is non-NULL). The
389 * above spinlock is gone.
392 KKASSERT(core->first_parent == chain);
393 if (chain->next_parent) {
394 /* parent should already be set */
395 KKASSERT(rdrop1 == chain->next_parent);
397 core->first_parent = chain->next_parent;
398 chain->next_parent = NULL;
401 if (atomic_fetchadd_int(&core->sharecnt, -1) == 1) {
403 * On the 1->0 transition of core we can destroy
406 spin_unlock(&core->cst.spin);
407 KKASSERT(core->cst.count == 0);
408 KKASSERT(core->cst.upgrade == 0);
409 kfree(core, hmp->mchain);
411 spin_unlock(&core->cst.spin);
413 core = NULL; /* safety */
417 * All spin locks are gone, finish freeing stuff.
419 KKASSERT((chain->flags & (HAMMER2_CHAIN_MOVED |
420 HAMMER2_CHAIN_MODIFIED)) == 0);
422 switch(chain->bref.type) {
423 case HAMMER2_BREF_TYPE_VOLUME:
424 case HAMMER2_BREF_TYPE_FREEMAP:
427 case HAMMER2_BREF_TYPE_INODE:
429 kfree(chain->data, hmp->mchain);
433 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
435 kfree(chain->data, hmp->mchain);
440 KKASSERT(chain->data == NULL);
444 KKASSERT(chain->bp == NULL);
447 if (chain->flags & HAMMER2_CHAIN_ALLOCATED) {
448 chain->flags &= ~HAMMER2_CHAIN_ALLOCATED;
449 kfree(chain, hmp->mchain);
451 if (rdrop1 && rdrop2) {
452 hammer2_chain_drop(rdrop1);
461 * Ref and lock a chain element, acquiring its data with I/O if necessary,
462 * and specify how you would like the data to be resolved.
464 * Returns 0 on success or an error code if the data could not be acquired.
465 * The chain element is locked on return regardless of whether an error
468 * The lock is allowed to recurse, multiple locking ops will aggregate
469 * the requested resolve types. Once data is assigned it will not be
470 * removed until the last unlock.
472 * HAMMER2_RESOLVE_NEVER - Do not resolve the data element.
473 * (typically used to avoid device/logical buffer
476 * HAMMER2_RESOLVE_MAYBE - Do not resolve data elements for chains in
477 * the INITIAL-create state (indirect blocks only).
479 * Do not resolve data elements for DATA chains.
480 * (typically used to avoid device/logical buffer
483 * HAMMER2_RESOLVE_ALWAYS- Always resolve the data element.
485 * HAMMER2_RESOLVE_SHARED- (flag) The chain is locked shared, otherwise
486 * it will be locked exclusive.
488 * NOTE: Embedded elements (volume header, inodes) are always resolved
491 * NOTE: Specifying HAMMER2_RESOLVE_ALWAYS on a newly-created non-embedded
492 * element will instantiate and zero its buffer, and flush it on
495 * NOTE: (data) elements are normally locked RESOLVE_NEVER or RESOLVE_MAYBE
496 * so as not to instantiate a device buffer, which could alias against
497 * a logical file buffer. However, if ALWAYS is specified the
498 * device buffer will be instantiated anyway.
500 * WARNING! If data must be fetched a shared lock will temporarily be
501 * upgraded to exclusive. However, a deadlock can occur if
502 * the caller owns more than one shared lock.
505 hammer2_chain_lock(hammer2_chain_t *chain, int how)
507 hammer2_mount_t *hmp;
508 hammer2_chain_core_t *core;
509 hammer2_blockref_t *bref;
520 * Ref and lock the element. Recursive locks are allowed.
522 if ((how & HAMMER2_RESOLVE_NOREF) == 0)
523 hammer2_chain_ref(chain);
524 atomic_add_int(&chain->lockcnt, 1);
527 KKASSERT(hmp != NULL);
530 * Get the appropriate lock.
533 if (how & HAMMER2_RESOLVE_SHARED)
534 ccms_thread_lock(&core->cst, CCMS_STATE_SHARED);
536 ccms_thread_lock(&core->cst, CCMS_STATE_EXCLUSIVE);
539 * If we already have a valid data pointer no further action is
546 * Do we have to resolve the data?
548 switch(how & HAMMER2_RESOLVE_MASK) {
549 case HAMMER2_RESOLVE_NEVER:
551 case HAMMER2_RESOLVE_MAYBE:
552 if (chain->flags & HAMMER2_CHAIN_INITIAL)
554 if (chain->bref.type == HAMMER2_BREF_TYPE_DATA)
557 if (chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_NODE)
560 if (chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_LEAF)
563 case HAMMER2_RESOLVE_ALWAYS:
568 * Upgrade to an exclusive lock so we can safely manipulate the
569 * buffer cache. If another thread got to it before us we
572 ostate = ccms_thread_lock_upgrade(&core->cst);
574 ccms_thread_lock_downgrade(&core->cst, ostate);
579 * We must resolve to a device buffer, either by issuing I/O or
580 * by creating a zero-fill element. We do not mark the buffer
581 * dirty when creating a zero-fill element (the hammer2_chain_modify()
582 * API must still be used to do that).
584 * The device buffer is variable-sized in powers of 2 down
585 * to HAMMER2_MIN_ALLOC (typically 1K). A 64K physical storage
586 * chunk always contains buffers of the same size. (XXX)
588 * The minimum physical IO size may be larger than the variable
593 psize = hammer2_devblksize(chain->bytes);
594 pmask = (hammer2_off_t)psize - 1;
595 pbase = bref->data_off & ~pmask;
596 boff = bref->data_off & (HAMMER2_OFF_MASK & pmask);
597 KKASSERT(pbase != 0);
598 peof = (pbase + HAMMER2_SEGMASK64) & ~HAMMER2_SEGMASK64;
601 * The getblk() optimization can only be used on newly created
602 * elements if the physical block size matches the request.
604 if ((chain->flags & HAMMER2_CHAIN_INITIAL) &&
605 chain->bytes == psize) {
606 chain->bp = getblk(hmp->devvp, pbase, psize, 0, 0);
608 } else if (hammer2_isclusterable(chain)) {
609 error = cluster_read(hmp->devvp, peof, pbase, psize,
610 psize, HAMMER2_PBUFSIZE*4,
612 adjreadcounter(&chain->bref, chain->bytes);
614 error = bread(hmp->devvp, pbase, psize, &chain->bp);
615 adjreadcounter(&chain->bref, chain->bytes);
619 kprintf("hammer2_chain_get: I/O error %016jx: %d\n",
620 (intmax_t)pbase, error);
623 ccms_thread_lock_downgrade(&core->cst, ostate);
628 * Zero the data area if the chain is in the INITIAL-create state.
629 * Mark the buffer for bdwrite(). This clears the INITIAL state
630 * but does not mark the chain modified.
632 bdata = (char *)chain->bp->b_data + boff;
633 if (chain->flags & HAMMER2_CHAIN_INITIAL) {
634 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
635 bzero(bdata, chain->bytes);
636 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DIRTYBP);
640 * Setup the data pointer, either pointing it to an embedded data
641 * structure and copying the data from the buffer, or pointing it
644 * The buffer is not retained when copying to an embedded data
645 * structure in order to avoid potential deadlocks or recursions
646 * on the same physical buffer.
648 switch (bref->type) {
649 case HAMMER2_BREF_TYPE_VOLUME:
650 case HAMMER2_BREF_TYPE_FREEMAP:
652 * Copy data from bp to embedded buffer
654 panic("hammer2_chain_lock: called on unresolved volume header");
657 KKASSERT(pbase == 0);
658 KKASSERT(chain->bytes == HAMMER2_PBUFSIZE);
659 bcopy(bdata, &hmp->voldata, chain->bytes);
660 chain->data = (void *)&hmp->voldata;
665 case HAMMER2_BREF_TYPE_INODE:
667 * Copy data from bp to embedded buffer, do not retain the
670 KKASSERT(chain->bytes == sizeof(chain->data->ipdata));
671 atomic_set_int(&chain->flags, HAMMER2_CHAIN_EMBEDDED);
672 chain->data = kmalloc(sizeof(chain->data->ipdata),
673 hmp->mchain, M_WAITOK | M_ZERO);
674 bcopy(bdata, &chain->data->ipdata, chain->bytes);
678 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
679 KKASSERT(chain->bytes == sizeof(chain->data->bmdata));
680 atomic_set_int(&chain->flags, HAMMER2_CHAIN_EMBEDDED);
681 chain->data = kmalloc(sizeof(chain->data->bmdata),
682 hmp->mchain, M_WAITOK | M_ZERO);
683 bcopy(bdata, &chain->data->bmdata, chain->bytes);
687 case HAMMER2_BREF_TYPE_INDIRECT:
688 case HAMMER2_BREF_TYPE_DATA:
689 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
692 * Point data at the device buffer and leave bp intact.
694 chain->data = (void *)bdata;
699 * Make sure the bp is not specifically owned by this thread before
700 * restoring to a possibly shared lock, so another hammer2 thread
704 BUF_KERNPROC(chain->bp);
705 ccms_thread_lock_downgrade(&core->cst, ostate);
710 * Asynchronously read the device buffer (dbp) and execute the specified
711 * callback. The caller should pass-in a locked chain (shared lock is ok).
712 * The function is responsible for unlocking the chain and for disposing
715 * NOTE! A NULL dbp (but non-NULL data) will be passed to the function
716 * if the dbp is integrated into the chain, because we do not want
717 * the caller to dispose of dbp in that situation.
719 static void hammer2_chain_load_async_callback(struct bio *bio);
722 hammer2_chain_load_async(hammer2_chain_t *chain,
723 void (*func)(hammer2_chain_t *, struct buf *, char *, void *),
726 hammer2_cbinfo_t *cbinfo;
727 hammer2_mount_t *hmp;
728 hammer2_blockref_t *bref;
738 func(chain, NULL, (char *)chain->data, arg);
743 * We must resolve to a device buffer, either by issuing I/O or
744 * by creating a zero-fill element. We do not mark the buffer
745 * dirty when creating a zero-fill element (the hammer2_chain_modify()
746 * API must still be used to do that).
748 * The device buffer is variable-sized in powers of 2 down
749 * to HAMMER2_MIN_ALLOC (typically 1K). A 64K physical storage
750 * chunk always contains buffers of the same size. (XXX)
752 * The minimum physical IO size may be larger than the variable
757 psize = hammer2_devblksize(chain->bytes);
758 pmask = (hammer2_off_t)psize - 1;
759 pbase = bref->data_off & ~pmask;
760 boff = bref->data_off & (HAMMER2_OFF_MASK & pmask);
761 KKASSERT(pbase != 0);
762 peof = (pbase + HAMMER2_SEGMASK64) & ~HAMMER2_SEGMASK64;
767 * The getblk() optimization can only be used on newly created
768 * elements if the physical block size matches the request.
770 if ((chain->flags & HAMMER2_CHAIN_INITIAL) &&
771 chain->bytes == psize) {
772 dbp = getblk(hmp->devvp, pbase, psize, 0, 0);
773 /*atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);*/
774 bdata = (char *)dbp->b_data + boff;
775 bzero(bdata, chain->bytes);
776 /*atomic_set_int(&chain->flags, HAMMER2_CHAIN_DIRTYBP);*/
777 func(chain, dbp, bdata, arg);
782 adjreadcounter(&chain->bref, chain->bytes);
783 cbinfo = kmalloc(sizeof(*cbinfo), hmp->mchain, M_INTWAIT | M_ZERO);
784 cbinfo->chain = chain;
789 cluster_readcb(hmp->devvp, peof, pbase, psize,
790 HAMMER2_PBUFSIZE*4, HAMMER2_PBUFSIZE*4,
791 hammer2_chain_load_async_callback, cbinfo);
795 hammer2_chain_load_async_callback(struct bio *bio)
797 hammer2_cbinfo_t *cbinfo;
798 hammer2_mount_t *hmp;
803 * Nobody is waiting for bio/dbp to complete, we are
804 * responsible for handling the biowait() equivalent
805 * on dbp which means clearing BIO_DONE and BIO_SYNC
806 * and calling bpdone() if it hasn't already been called
807 * to restore any covered holes in the buffer's backing
811 if ((bio->bio_flags & BIO_DONE) == 0)
813 bio->bio_flags &= ~(BIO_DONE | BIO_SYNC);
816 * Extract the auxillary info and issue the callback.
817 * Finish up with the dbp after it returns.
819 cbinfo = bio->bio_caller_info1.ptr;
820 /*ccms_thread_lock_setown(cbinfo->chain->core);*/
821 data = dbp->b_data + cbinfo->boff;
822 hmp = cbinfo->chain->hmp;
824 cbinfo = bio->bio_caller_info1.ptr;
825 if (cbinfo->chain->flags & HAMMER2_CHAIN_INITIAL)
826 bzero(data, cbinfo->chain->bytes);
827 cbinfo->func(cbinfo->chain, dbp, data, cbinfo->arg);
828 /* cbinfo->chain is stale now */
830 kfree(cbinfo, hmp->mchain);
834 * Unlock and deref a chain element.
836 * On the last lock release any non-embedded data (chain->bp) will be
840 hammer2_chain_unlock(hammer2_chain_t *chain)
842 hammer2_chain_core_t *core = chain->core;
848 * The core->cst lock can be shared across several chains so we
849 * need to track the per-chain lockcnt separately.
851 * If multiple locks are present (or being attempted) on this
852 * particular chain we can just unlock, drop refs, and return.
854 * Otherwise fall-through on the 1->0 transition.
857 lockcnt = chain->lockcnt;
858 KKASSERT(lockcnt > 0);
861 if (atomic_cmpset_int(&chain->lockcnt,
862 lockcnt, lockcnt - 1)) {
863 ccms_thread_unlock(&core->cst);
864 hammer2_chain_drop(chain);
868 if (atomic_cmpset_int(&chain->lockcnt, 1, 0))
875 * On the 1->0 transition we upgrade the core lock (if necessary)
876 * to exclusive for terminal processing. If after upgrading we find
877 * that lockcnt is non-zero, another thread is racing us and will
878 * handle the unload for us later on, so just cleanup and return
879 * leaving the data/bp intact
881 * Otherwise if lockcnt is still 0 it is possible for it to become
882 * non-zero and race, but since we hold the core->cst lock
883 * exclusively all that will happen is that the chain will be
884 * reloaded after we unload it.
886 ostate = ccms_thread_lock_upgrade(&core->cst);
887 if (chain->lockcnt) {
888 ccms_thread_unlock_upgraded(&core->cst, ostate);
889 hammer2_chain_drop(chain);
894 * Shortcut the case if the data is embedded or not resolved.
896 * Do NOT NULL out chain->data (e.g. inode data), it might be
899 * The DIRTYBP flag is non-applicable in this situation and can
900 * be cleared to keep the flags state clean.
902 if (chain->bp == NULL) {
903 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_DIRTYBP);
904 ccms_thread_unlock_upgraded(&core->cst, ostate);
905 hammer2_chain_drop(chain);
912 if ((chain->flags & HAMMER2_CHAIN_DIRTYBP) == 0) {
914 } else if (chain->flags & HAMMER2_CHAIN_IOFLUSH) {
915 switch(chain->bref.type) {
916 case HAMMER2_BREF_TYPE_DATA:
917 counterp = &hammer2_ioa_file_write;
919 case HAMMER2_BREF_TYPE_INODE:
920 counterp = &hammer2_ioa_meta_write;
922 case HAMMER2_BREF_TYPE_INDIRECT:
923 counterp = &hammer2_ioa_indr_write;
925 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
926 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
927 counterp = &hammer2_ioa_fmap_write;
930 counterp = &hammer2_ioa_volu_write;
933 *counterp += chain->bytes;
935 switch(chain->bref.type) {
936 case HAMMER2_BREF_TYPE_DATA:
937 counterp = &hammer2_iod_file_write;
939 case HAMMER2_BREF_TYPE_INODE:
940 counterp = &hammer2_iod_meta_write;
942 case HAMMER2_BREF_TYPE_INDIRECT:
943 counterp = &hammer2_iod_indr_write;
945 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
946 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
947 counterp = &hammer2_iod_fmap_write;
950 counterp = &hammer2_iod_volu_write;
953 *counterp += chain->bytes;
959 * If a device buffer was used for data be sure to destroy the
960 * buffer when we are done to avoid aliases (XXX what about the
961 * underlying VM pages?).
963 * NOTE: Freemap leaf's use reserved blocks and thus no aliasing
968 * XXX our primary cache is now the block device, not
969 * the logical file. don't release the buffer.
971 if (chain->bref.type == HAMMER2_BREF_TYPE_DATA)
972 chain->bp->b_flags |= B_RELBUF;
976 * The DIRTYBP flag tracks whether we have to bdwrite() the buffer
977 * or not. The flag will get re-set when chain_modify() is called,
978 * even if MODIFIED is already set, allowing the OS to retire the
979 * buffer independent of a hammer2 flus.
982 if (chain->flags & HAMMER2_CHAIN_DIRTYBP) {
983 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_DIRTYBP);
984 if (chain->flags & HAMMER2_CHAIN_IOFLUSH) {
985 atomic_clear_int(&chain->flags,
986 HAMMER2_CHAIN_IOFLUSH);
987 chain->bp->b_flags |= B_RELBUF;
988 cluster_awrite(chain->bp);
990 chain->bp->b_flags |= B_CLUSTEROK;
994 if (chain->flags & HAMMER2_CHAIN_IOFLUSH) {
995 atomic_clear_int(&chain->flags,
996 HAMMER2_CHAIN_IOFLUSH);
997 chain->bp->b_flags |= B_RELBUF;
1000 /* bp might still be dirty */
1005 ccms_thread_unlock_upgraded(&core->cst, ostate);
1006 hammer2_chain_drop(chain);
1010 * Resize the chain's physical storage allocation in-place. This may
1011 * replace the passed-in chain with a new chain.
1013 * Chains can be resized smaller without reallocating the storage.
1014 * Resizing larger will reallocate the storage.
1016 * Must be passed an exclusively locked parent and chain, returns a new
1017 * exclusively locked chain at the same index and unlocks the old chain.
1018 * Flushes the buffer if necessary.
1020 * This function is mostly used with DATA blocks locked RESOLVE_NEVER in order
1021 * to avoid instantiating a device buffer that conflicts with the vnode
1022 * data buffer. That is, the passed-in bp is a logical buffer, whereas
1023 * any chain-oriented bp would be a device buffer.
1025 * XXX flags currently ignored, uses chain->bp to detect data/no-data.
1026 * XXX return error if cannot resize.
1029 hammer2_chain_resize(hammer2_trans_t *trans, hammer2_inode_t *ip,
1031 hammer2_chain_t *parent, hammer2_chain_t **chainp,
1032 int nradix, int flags)
1034 hammer2_mount_t *hmp;
1035 hammer2_chain_t *chain;
1036 hammer2_off_t pbase;
1046 * Only data and indirect blocks can be resized for now.
1047 * (The volu root, inodes, and freemap elements use a fixed size).
1049 KKASSERT(chain != &hmp->vchain);
1050 KKASSERT(chain->bref.type == HAMMER2_BREF_TYPE_DATA ||
1051 chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT);
1054 * Nothing to do if the element is already the proper size
1056 obytes = chain->bytes;
1057 nbytes = 1U << nradix;
1058 if (obytes == nbytes)
1062 * Delete the old chain and duplicate it at the same (parent, index),
1063 * returning a new chain. This allows the old chain to still be
1064 * used by the flush code. Duplication occurs in-place.
1066 * The parent does not have to be locked for the delete/duplicate call,
1067 * but is in this particular code path.
1069 * NOTE: If we are not crossing a synchronization point the
1070 * duplication code will simply reuse the existing chain
1073 hammer2_chain_delete_duplicate(trans, &chain, 0);
1076 * Set MODIFIED and add a chain ref to prevent destruction. Both
1077 * modified flags share the same ref. (duplicated chains do not
1078 * start out MODIFIED unless possibly if the duplication code
1079 * decided to reuse the existing chain as-is).
1081 * If the chain is already marked MODIFIED then we can safely
1082 * return the previous allocation to the pool without having to
1083 * worry about snapshots. XXX check flush synchronization.
1085 if ((chain->flags & HAMMER2_CHAIN_MODIFIED) == 0) {
1086 atomic_set_int(&chain->flags, HAMMER2_CHAIN_MODIFIED);
1087 hammer2_chain_ref(chain);
1091 * Relocate the block, even if making it smaller (because different
1092 * block sizes may be in different regions).
1094 hammer2_freemap_alloc(trans, chain->hmp, &chain->bref, nbytes);
1095 chain->bytes = nbytes;
1096 /*ip->delta_dcount += (ssize_t)(nbytes - obytes);*/ /* XXX atomic */
1099 * The device buffer may be larger than the allocation size.
1101 bbytes = hammer2_devblksize(chain->bytes);
1102 pbase = chain->bref.data_off & ~(hammer2_off_t)(bbytes - 1);
1103 boff = chain->bref.data_off & HAMMER2_OFF_MASK & (bbytes - 1);
1106 * For now just support it on DATA chains (and not on indirect
1109 KKASSERT(chain->bp == NULL);
1112 * Make sure the chain is marked MOVED and SUBMOD is set in the
1113 * parent(s) so the adjustments are picked up by flush.
1115 if ((chain->flags & HAMMER2_CHAIN_MOVED) == 0) {
1116 hammer2_chain_ref(chain);
1117 atomic_set_int(&chain->flags, HAMMER2_CHAIN_MOVED);
1119 hammer2_chain_setsubmod(trans, chain);
1124 * Set a chain modified, making it read-write and duplicating it if necessary.
1125 * This function will assign a new physical block to the chain if necessary
1127 * Duplication of already-modified chains is possible when the modification
1128 * crosses a flush synchronization boundary.
1130 * Non-data blocks - The chain should be locked to at least the RESOLVE_MAYBE
1131 * level or the COW operation will not work.
1133 * Data blocks - The chain is usually locked RESOLVE_NEVER so as not to
1134 * run the data through the device buffers.
1136 * This function may return a different chain than was passed, in which case
1137 * the old chain will be unlocked and the new chain will be locked.
1139 * ip->chain may be adjusted by hammer2_chain_modify_ip().
1141 hammer2_inode_data_t *
1142 hammer2_chain_modify_ip(hammer2_trans_t *trans, hammer2_inode_t *ip,
1143 hammer2_chain_t **chainp, int flags)
1145 atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED);
1146 hammer2_chain_modify(trans, chainp, flags);
1147 if (ip->chain != *chainp)
1148 hammer2_inode_repoint(ip, NULL, *chainp);
1149 return(&ip->chain->data->ipdata);
1153 hammer2_chain_modify(hammer2_trans_t *trans, hammer2_chain_t **chainp,
1156 hammer2_mount_t *hmp;
1157 hammer2_chain_t *chain;
1158 hammer2_off_t pbase;
1159 hammer2_off_t pmask;
1161 hammer2_tid_t flush_tid;
1170 * Data must be resolved if already assigned unless explicitly
1171 * flagged otherwise.
1175 if (chain->data == NULL && (flags & HAMMER2_MODIFY_OPTDATA) == 0 &&
1176 (chain->bref.data_off & ~HAMMER2_OFF_MASK_RADIX)) {
1177 hammer2_chain_lock(chain, HAMMER2_RESOLVE_ALWAYS);
1178 hammer2_chain_unlock(chain);
1182 * data is not optional for freemap chains (we must always be sure
1183 * to copy the data on COW storage allocations).
1185 if (chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_NODE ||
1186 chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_LEAF) {
1187 KKASSERT((chain->flags & HAMMER2_CHAIN_INITIAL) ||
1188 (flags & HAMMER2_MODIFY_OPTDATA) == 0);
1192 * If the chain is already marked MODIFIED we can usually just
1193 * return. However, if a modified chain is modified again in
1194 * a synchronization-point-crossing manner we have to issue a
1195 * delete/duplicate on the chain to avoid flush interference.
1197 if (chain->flags & HAMMER2_CHAIN_MODIFIED) {
1199 * Which flush_tid do we need to check? If the chain is
1200 * related to the freemap we have to use the freemap flush
1201 * tid (free_flush_tid), otherwise we use the normal filesystem
1202 * flush tid (topo_flush_tid). The two flush domains are
1203 * almost completely independent of each other.
1205 if (chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_NODE ||
1206 chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_LEAF) {
1207 flush_tid = hmp->topo_flush_tid; /* XXX */
1208 goto skipxx; /* XXX */
1210 flush_tid = hmp->topo_flush_tid;
1216 if (chain->modify_tid <= flush_tid &&
1217 trans->sync_tid > flush_tid) {
1219 * Modifications cross synchronization point,
1220 * requires delete-duplicate.
1222 KKASSERT((flags & HAMMER2_MODIFY_ASSERTNOCOPY) == 0);
1223 hammer2_chain_delete_duplicate(trans, chainp, 0);
1225 /* fall through using duplicate */
1229 * Quick return path, set DIRTYBP to ensure that
1230 * the later retirement of bp will write it out.
1232 * quick return path also needs the modify_tid
1236 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DIRTYBP);
1237 if ((flags & HAMMER2_MODIFY_NO_MODIFY_TID) == 0)
1238 chain->bref.modify_tid = trans->sync_tid;
1239 chain->modify_tid = trans->sync_tid;
1244 * modify_tid is only update for primary modifications, not for
1245 * propagated brefs. mirror_tid will be updated regardless during
1246 * the flush, no need to set it here.
1248 if ((flags & HAMMER2_MODIFY_NO_MODIFY_TID) == 0)
1249 chain->bref.modify_tid = trans->sync_tid;
1252 * Set MODIFIED and add a chain ref to prevent destruction. Both
1253 * modified flags share the same ref.
1255 if ((chain->flags & HAMMER2_CHAIN_MODIFIED) == 0) {
1256 atomic_set_int(&chain->flags, HAMMER2_CHAIN_MODIFIED);
1257 hammer2_chain_ref(chain);
1261 * Adjust chain->modify_tid so the flusher knows when the
1262 * modification occurred.
1264 chain->modify_tid = trans->sync_tid;
1267 * The modification or re-modification requires an allocation and
1270 * We normally always allocate new storage here. If storage exists
1271 * and MODIFY_NOREALLOC is passed in, we do not allocate new storage.
1273 if (chain != &hmp->vchain &&
1274 chain != &hmp->fchain &&
1275 ((chain->bref.data_off & ~HAMMER2_OFF_MASK_RADIX) == 0 ||
1276 (flags & HAMMER2_MODIFY_NOREALLOC) == 0)
1278 hammer2_freemap_alloc(trans, chain->hmp,
1279 &chain->bref, chain->bytes);
1280 /* XXX failed allocation */
1284 * Do not COW if OPTDATA is set. INITIAL flag remains unchanged.
1285 * (OPTDATA does not prevent [re]allocation of storage, only the
1286 * related copy-on-write op).
1288 if (flags & HAMMER2_MODIFY_OPTDATA)
1292 * Clearing the INITIAL flag (for indirect blocks) indicates that
1293 * we've processed the uninitialized storage allocation.
1295 * If this flag is already clear we are likely in a copy-on-write
1296 * situation but we have to be sure NOT to bzero the storage if
1297 * no data is present.
1299 if (chain->flags & HAMMER2_CHAIN_INITIAL) {
1300 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
1308 * We currently should never instantiate a device buffer for a
1309 * file data chain. (We definitely can for a freemap chain).
1311 * XXX we can now do this
1313 KKASSERT(chain->bref.type != HAMMER2_BREF_TYPE_DATA);
1317 * Instantiate data buffer and possibly execute COW operation
1319 switch(chain->bref.type) {
1320 case HAMMER2_BREF_TYPE_VOLUME:
1321 case HAMMER2_BREF_TYPE_FREEMAP:
1322 case HAMMER2_BREF_TYPE_INODE:
1323 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
1325 * The data is embedded, no copy-on-write operation is
1328 KKASSERT(chain->bp == NULL);
1330 case HAMMER2_BREF_TYPE_DATA:
1331 case HAMMER2_BREF_TYPE_INDIRECT:
1332 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
1334 * Perform the copy-on-write operation
1336 KKASSERT(chain != &hmp->vchain && chain != &hmp->fchain);
1338 psize = hammer2_devblksize(chain->bytes);
1339 pmask = (hammer2_off_t)psize - 1;
1340 pbase = chain->bref.data_off & ~pmask;
1341 boff = chain->bref.data_off & (HAMMER2_OFF_MASK & pmask);
1342 KKASSERT(pbase != 0);
1343 peof = (pbase + HAMMER2_SEGMASK64) & ~HAMMER2_SEGMASK64;
1346 * The getblk() optimization can only be used if the
1347 * chain element size matches the physical block size.
1349 if (chain->bp && chain->bp->b_loffset == pbase) {
1352 } else if (chain->bytes == psize) {
1353 nbp = getblk(hmp->devvp, pbase, psize, 0, 0);
1355 } else if (hammer2_isclusterable(chain)) {
1356 error = cluster_read(hmp->devvp, peof, pbase, psize,
1357 psize, HAMMER2_PBUFSIZE*4,
1359 adjreadcounter(&chain->bref, chain->bytes);
1361 error = bread(hmp->devvp, pbase, psize, &nbp);
1362 adjreadcounter(&chain->bref, chain->bytes);
1364 KKASSERT(error == 0);
1365 bdata = (char *)nbp->b_data + boff;
1368 * Copy or zero-fill on write depending on whether
1369 * chain->data exists or not. Retire the existing bp
1370 * based on the DIRTYBP flag. Set the DIRTYBP flag to
1371 * indicate that retirement of nbp should use bdwrite().
1374 KKASSERT(chain->bp != NULL);
1375 if (chain->data != bdata) {
1376 bcopy(chain->data, bdata, chain->bytes);
1378 } else if (wasinitial) {
1379 bzero(bdata, chain->bytes);
1382 * We have a problem. We were asked to COW but
1383 * we don't have any data to COW with!
1385 panic("hammer2_chain_modify: having a COW %p\n",
1388 if (chain->bp != nbp) {
1390 if (chain->flags & HAMMER2_CHAIN_DIRTYBP) {
1391 chain->bp->b_flags |= B_CLUSTEROK;
1394 chain->bp->b_flags |= B_RELBUF;
1399 BUF_KERNPROC(chain->bp);
1401 chain->data = bdata;
1402 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DIRTYBP);
1405 panic("hammer2_chain_modify: illegal non-embedded type %d",
1411 hammer2_chain_setsubmod(trans, chain);
1415 * Mark the volume as having been modified. This short-cut version
1416 * does not have to lock the volume's chain, which allows the ioctl
1417 * code to make adjustments to connections without deadlocking. XXX
1419 * No ref is made on vchain when flagging it MODIFIED.
1422 hammer2_modify_volume(hammer2_mount_t *hmp)
1424 hammer2_voldata_lock(hmp);
1425 hammer2_voldata_unlock(hmp, 1);
1429 * Locate an in-memory chain. The parent must be locked. The in-memory
1430 * chain is returned with a reference and without a lock, or NULL
1433 * This function returns the chain at the specified index with the highest
1434 * delete_tid. The caller must check whether the chain is flagged
1435 * CHAIN_DELETED or not. However, because chain iterations can be removed
1436 * from memory we must ALSO check that DELETED chains are not flushed. A
1437 * DELETED chain which has been flushed must be ignored (the caller must
1438 * check the parent's blockref array).
1440 * NOTE: If no chain is found the caller usually must check the on-media
1441 * array to determine if a blockref exists at the index.
1443 struct hammer2_chain_find_info {
1444 hammer2_chain_t *best;
1445 hammer2_tid_t delete_tid;
1451 hammer2_chain_find_cmp(hammer2_chain_t *child, void *data)
1453 struct hammer2_chain_find_info *info = data;
1455 if (child->index < info->index)
1457 if (child->index > info->index)
1464 hammer2_chain_find_callback(hammer2_chain_t *child, void *data)
1466 struct hammer2_chain_find_info *info = data;
1468 if (info->delete_tid < child->delete_tid) {
1469 info->delete_tid = child->delete_tid;
1477 hammer2_chain_find_locked(hammer2_chain_t *parent, int index)
1479 struct hammer2_chain_find_info info;
1480 hammer2_chain_t *child;
1483 info.delete_tid = 0;
1486 RB_SCAN(hammer2_chain_tree, &parent->core->rbtree,
1487 hammer2_chain_find_cmp, hammer2_chain_find_callback,
1495 hammer2_chain_find(hammer2_chain_t *parent, int index)
1497 hammer2_chain_t *child;
1499 spin_lock(&parent->core->cst.spin);
1500 child = hammer2_chain_find_locked(parent, index);
1502 hammer2_chain_ref(child);
1503 spin_unlock(&parent->core->cst.spin);
1509 * Return a locked chain structure with all associated data acquired.
1510 * (if LOOKUP_NOLOCK is requested the returned chain is only referenced).
1512 * Caller must hold the parent locked shared or exclusive since we may
1513 * need the parent's bref array to find our block.
1515 * The returned child is locked as requested. If NOLOCK, the returned
1516 * child is still at least referenced.
1519 hammer2_chain_get(hammer2_chain_t *parent, int index, int flags)
1521 hammer2_blockref_t *bref;
1522 hammer2_mount_t *hmp = parent->hmp;
1523 hammer2_chain_core_t *above = parent->core;
1524 hammer2_chain_t *chain;
1525 hammer2_chain_t dummy;
1529 * Figure out how to lock. MAYBE can be used to optimized
1530 * the initial-create state for indirect blocks.
1532 if (flags & HAMMER2_LOOKUP_ALWAYS)
1533 how = HAMMER2_RESOLVE_ALWAYS;
1534 else if (flags & (HAMMER2_LOOKUP_NODATA | HAMMER2_LOOKUP_NOLOCK))
1535 how = HAMMER2_RESOLVE_NEVER;
1537 how = HAMMER2_RESOLVE_MAYBE;
1538 if (flags & (HAMMER2_LOOKUP_SHARED | HAMMER2_LOOKUP_NOLOCK))
1539 how |= HAMMER2_RESOLVE_SHARED;
1543 * First see if we have a (possibly modified) chain element cached
1544 * for this (parent, index). Acquire the data if necessary.
1546 * If chain->data is non-NULL the chain should already be marked
1550 dummy.index = index;
1551 dummy.delete_tid = HAMMER2_MAX_TID;
1552 spin_lock(&above->cst.spin);
1553 chain = RB_FIND(hammer2_chain_tree, &above->rbtree, &dummy);
1555 hammer2_chain_ref(chain);
1556 spin_unlock(&above->cst.spin);
1557 if ((flags & HAMMER2_LOOKUP_NOLOCK) == 0)
1558 hammer2_chain_lock(chain, how | HAMMER2_RESOLVE_NOREF);
1561 spin_unlock(&above->cst.spin);
1564 * The parent chain must not be in the INITIAL state.
1566 if (parent->flags & HAMMER2_CHAIN_INITIAL) {
1567 panic("hammer2_chain_get: Missing bref(1)");
1572 * No RBTREE entry found, lookup the bref and issue I/O (switch on
1573 * the parent's bref to determine where and how big the array is).
1575 switch(parent->bref.type) {
1576 case HAMMER2_BREF_TYPE_INODE:
1577 KKASSERT(index >= 0 && index < HAMMER2_SET_COUNT);
1578 bref = &parent->data->ipdata.u.blockset.blockref[index];
1580 case HAMMER2_BREF_TYPE_INDIRECT:
1581 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
1582 KKASSERT(parent->data != NULL);
1583 KKASSERT(index >= 0 &&
1584 index < parent->bytes / sizeof(hammer2_blockref_t));
1585 bref = &parent->data->npdata[index];
1587 case HAMMER2_BREF_TYPE_VOLUME:
1588 KKASSERT(index >= 0 && index < HAMMER2_SET_COUNT);
1589 bref = &hmp->voldata.sroot_blockset.blockref[index];
1591 case HAMMER2_BREF_TYPE_FREEMAP:
1592 KKASSERT(index >= 0 && index < HAMMER2_SET_COUNT);
1593 bref = &hmp->voldata.freemap_blockset.blockref[index];
1597 panic("hammer2_chain_get: unrecognized blockref type: %d",
1600 if (bref->type == 0) {
1601 panic("hammer2_chain_get: Missing bref(2)");
1606 * Allocate a chain structure representing the existing media
1607 * entry. Resulting chain has one ref and is not locked.
1609 * The locking operation we do later will issue I/O to read it.
1611 chain = hammer2_chain_alloc(hmp, NULL, bref);
1612 hammer2_chain_core_alloc(chain, NULL); /* ref'd chain returned */
1615 * Link the chain into its parent. A spinlock is required to safely
1616 * access the RBTREE, and it is possible to collide with another
1617 * hammer2_chain_get() operation because the caller might only hold
1618 * a shared lock on the parent.
1620 KKASSERT(parent->refs > 0);
1621 spin_lock(&above->cst.spin);
1622 chain->above = above;
1623 chain->index = index;
1624 if (RB_INSERT(hammer2_chain_tree, &above->rbtree, chain)) {
1625 chain->above = NULL;
1627 spin_unlock(&above->cst.spin);
1628 hammer2_chain_drop(chain);
1631 atomic_set_int(&chain->flags, HAMMER2_CHAIN_ONRBTREE);
1632 spin_unlock(&above->cst.spin);
1635 * Our new chain is referenced but NOT locked. Lock the chain
1636 * below. The locking operation also resolves its data.
1638 * If NOLOCK is set the release will release the one-and-only lock.
1640 if ((flags & HAMMER2_LOOKUP_NOLOCK) == 0) {
1641 hammer2_chain_lock(chain, how); /* recusive lock */
1642 hammer2_chain_drop(chain); /* excess ref */
1648 * Lookup initialization/completion API
1651 hammer2_chain_lookup_init(hammer2_chain_t *parent, int flags)
1653 if (flags & HAMMER2_LOOKUP_SHARED) {
1654 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS |
1655 HAMMER2_RESOLVE_SHARED);
1657 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS);
1663 hammer2_chain_lookup_done(hammer2_chain_t *parent)
1666 hammer2_chain_unlock(parent);
1671 hammer2_chain_getparent(hammer2_chain_t **parentp, int how)
1673 hammer2_chain_t *oparent;
1674 hammer2_chain_t *nparent;
1675 hammer2_chain_core_t *above;
1678 above = oparent->above;
1680 spin_lock(&above->cst.spin);
1681 nparent = above->first_parent;
1682 while (hammer2_chain_refactor_test(nparent, 1))
1683 nparent = nparent->next_parent;
1684 hammer2_chain_ref(nparent); /* protect nparent, use in lock */
1685 spin_unlock(&above->cst.spin);
1687 hammer2_chain_unlock(oparent);
1688 hammer2_chain_lock(nparent, how | HAMMER2_RESOLVE_NOREF);
1695 * Locate any key between key_beg and key_end inclusive. (*parentp)
1696 * typically points to an inode but can also point to a related indirect
1697 * block and this function will recurse upwards and find the inode again.
1699 * WARNING! THIS DOES NOT RETURN KEYS IN LOGICAL KEY ORDER! ANY KEY
1700 * WITHIN THE RANGE CAN BE RETURNED. HOWEVER, AN ITERATION
1701 * WHICH PICKS UP WHERE WE LEFT OFF WILL CONTINUE THE SCAN
1702 * AND ALL IN-RANGE KEYS WILL EVENTUALLY BE RETURNED (NOT
1703 * NECESSARILY IN ORDER).
1705 * (*parentp) must be exclusively locked and referenced and can be an inode
1706 * or an existing indirect block within the inode.
1708 * On return (*parentp) will be modified to point at the deepest parent chain
1709 * element encountered during the search, as a helper for an insertion or
1710 * deletion. The new (*parentp) will be locked and referenced and the old
1711 * will be unlocked and dereferenced (no change if they are both the same).
1713 * The matching chain will be returned exclusively locked. If NOLOCK is
1714 * requested the chain will be returned only referenced.
1716 * NULL is returned if no match was found, but (*parentp) will still
1717 * potentially be adjusted.
1719 * This function will also recurse up the chain if the key is not within the
1720 * current parent's range. (*parentp) can never be set to NULL. An iteration
1721 * can simply allow (*parentp) to float inside the loop.
1723 * NOTE! chain->data is not always resolved. By default it will not be
1724 * resolved for BREF_TYPE_DATA, FREEMAP_NODE, or FREEMAP_LEAF. Use
1725 * HAMMER2_LOOKUP_ALWAYS to force resolution (but be careful w/
1726 * BREF_TYPE_DATA as the device buffer can alias the logical file
1730 hammer2_chain_lookup(hammer2_chain_t **parentp,
1731 hammer2_key_t key_beg, hammer2_key_t key_end,
1734 hammer2_mount_t *hmp;
1735 hammer2_chain_t *parent;
1736 hammer2_chain_t *chain;
1737 hammer2_chain_t *tmp;
1738 hammer2_blockref_t *base;
1739 hammer2_blockref_t *bref;
1740 hammer2_key_t scan_beg;
1741 hammer2_key_t scan_end;
1744 int how_always = HAMMER2_RESOLVE_ALWAYS;
1745 int how_maybe = HAMMER2_RESOLVE_MAYBE;
1747 if (flags & HAMMER2_LOOKUP_ALWAYS)
1748 how_maybe = how_always;
1750 if (flags & (HAMMER2_LOOKUP_SHARED | HAMMER2_LOOKUP_NOLOCK)) {
1751 how_maybe |= HAMMER2_RESOLVE_SHARED;
1752 how_always |= HAMMER2_RESOLVE_SHARED;
1756 * Recurse (*parentp) upward if necessary until the parent completely
1757 * encloses the key range or we hit the inode.
1762 while (parent->bref.type == HAMMER2_BREF_TYPE_INDIRECT ||
1763 parent->bref.type == HAMMER2_BREF_TYPE_FREEMAP_NODE) {
1764 scan_beg = parent->bref.key;
1765 scan_end = scan_beg +
1766 ((hammer2_key_t)1 << parent->bref.keybits) - 1;
1767 if (key_beg >= scan_beg && key_end <= scan_end)
1769 parent = hammer2_chain_getparent(parentp, how_maybe);
1774 * Locate the blockref array. Currently we do a fully associative
1775 * search through the array.
1777 switch(parent->bref.type) {
1778 case HAMMER2_BREF_TYPE_INODE:
1780 * Special shortcut for embedded data returns the inode
1781 * itself. Callers must detect this condition and access
1782 * the embedded data (the strategy code does this for us).
1784 * This is only applicable to regular files and softlinks.
1786 if (parent->data->ipdata.op_flags & HAMMER2_OPFLAG_DIRECTDATA) {
1787 if (flags & HAMMER2_LOOKUP_NOLOCK)
1788 hammer2_chain_ref(parent);
1790 hammer2_chain_lock(parent, how_always);
1793 base = &parent->data->ipdata.u.blockset.blockref[0];
1794 count = HAMMER2_SET_COUNT;
1796 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
1797 case HAMMER2_BREF_TYPE_INDIRECT:
1799 * Handle MATCHIND on the parent
1801 if (flags & HAMMER2_LOOKUP_MATCHIND) {
1802 scan_beg = parent->bref.key;
1803 scan_end = scan_beg +
1804 ((hammer2_key_t)1 << parent->bref.keybits) - 1;
1805 if (key_beg == scan_beg && key_end == scan_end) {
1807 hammer2_chain_lock(chain, how_maybe);
1812 * Optimize indirect blocks in the INITIAL state to avoid
1815 if (parent->flags & HAMMER2_CHAIN_INITIAL) {
1818 if (parent->data == NULL)
1819 panic("parent->data is NULL");
1820 base = &parent->data->npdata[0];
1822 count = parent->bytes / sizeof(hammer2_blockref_t);
1824 case HAMMER2_BREF_TYPE_VOLUME:
1825 base = &hmp->voldata.sroot_blockset.blockref[0];
1826 count = HAMMER2_SET_COUNT;
1828 case HAMMER2_BREF_TYPE_FREEMAP:
1829 base = &hmp->voldata.freemap_blockset.blockref[0];
1830 count = HAMMER2_SET_COUNT;
1833 panic("hammer2_chain_lookup: unrecognized blockref type: %d",
1835 base = NULL; /* safety */
1836 count = 0; /* safety */
1840 * If the element and key overlap we use the element.
1842 * NOTE! Deleted elements are effectively invisible. Deletions
1843 * proactively clear the parent bref to the deleted child
1844 * so we do not try to shadow here to avoid parent updates
1845 * (which would be difficult since multiple deleted elements
1846 * might represent different flush synchronization points).
1849 scan_beg = 0; /* avoid compiler warning */
1850 scan_end = 0; /* avoid compiler warning */
1852 for (i = 0; i < count; ++i) {
1853 tmp = hammer2_chain_find(parent, i);
1855 if (tmp->flags & HAMMER2_CHAIN_DELETED) {
1856 hammer2_chain_drop(tmp);
1860 KKASSERT(bref->type != 0);
1861 } else if (base == NULL || base[i].type == 0) {
1866 scan_beg = bref->key;
1867 scan_end = scan_beg + ((hammer2_key_t)1 << bref->keybits) - 1;
1869 hammer2_chain_drop(tmp);
1870 if (key_beg <= scan_end && key_end >= scan_beg)
1874 if (key_beg == key_end)
1876 return (hammer2_chain_next(parentp, NULL,
1877 key_beg, key_end, flags));
1881 * Acquire the new chain element. If the chain element is an
1882 * indirect block we must search recursively.
1884 * It is possible for the tmp chain above to be removed from
1885 * the RBTREE but the parent lock ensures it would not have been
1886 * destroyed from the media, so the chain_get() code will simply
1887 * reload it from the media in that case.
1889 chain = hammer2_chain_get(parent, i, flags);
1894 * If the chain element is an indirect block it becomes the new
1895 * parent and we loop on it.
1897 * The parent always has to be locked with at least RESOLVE_MAYBE
1898 * so we can access its data. It might need a fixup if the caller
1899 * passed incompatible flags. Be careful not to cause a deadlock
1900 * as a data-load requires an exclusive lock.
1902 * If HAMMER2_LOOKUP_MATCHIND is set and the indirect block's key
1903 * range is within the requested key range we return the indirect
1904 * block and do NOT loop. This is usually only used to acquire
1907 if (chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT ||
1908 chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_NODE) {
1909 hammer2_chain_unlock(parent);
1910 *parentp = parent = chain;
1911 if (flags & HAMMER2_LOOKUP_NOLOCK) {
1912 hammer2_chain_lock(chain,
1914 HAMMER2_RESOLVE_NOREF);
1915 } else if ((flags & HAMMER2_LOOKUP_NODATA) &&
1916 chain->data == NULL) {
1917 hammer2_chain_ref(chain);
1918 hammer2_chain_unlock(chain);
1919 hammer2_chain_lock(chain,
1921 HAMMER2_RESOLVE_NOREF);
1927 * All done, return the chain
1933 * After having issued a lookup we can iterate all matching keys.
1935 * If chain is non-NULL we continue the iteration from just after it's index.
1937 * If chain is NULL we assume the parent was exhausted and continue the
1938 * iteration at the next parent.
1940 * parent must be locked on entry and remains locked throughout. chain's
1941 * lock status must match flags. Chain is always at least referenced.
1943 * WARNING! The MATCHIND flag does not apply to this function.
1946 hammer2_chain_next(hammer2_chain_t **parentp, hammer2_chain_t *chain,
1947 hammer2_key_t key_beg, hammer2_key_t key_end,
1950 hammer2_mount_t *hmp;
1951 hammer2_chain_t *parent;
1952 hammer2_chain_t *tmp;
1953 hammer2_blockref_t *base;
1954 hammer2_blockref_t *bref;
1955 hammer2_key_t scan_beg;
1956 hammer2_key_t scan_end;
1958 int how_maybe = HAMMER2_RESOLVE_MAYBE;
1961 if (flags & (HAMMER2_LOOKUP_SHARED | HAMMER2_LOOKUP_NOLOCK))
1962 how_maybe |= HAMMER2_RESOLVE_SHARED;
1969 * Calculate the next index and recalculate the parent if necessary.
1973 * Continue iteration within current parent. If not NULL
1974 * the passed-in chain may or may not be locked, based on
1975 * the LOOKUP_NOLOCK flag (passed in as returned from lookup
1978 i = chain->index + 1;
1979 if (flags & HAMMER2_LOOKUP_NOLOCK)
1980 hammer2_chain_drop(chain);
1982 hammer2_chain_unlock(chain);
1985 * Any scan where the lookup returned degenerate data embedded
1986 * in the inode has an invalid index and must terminate.
1988 if (chain == parent)
1991 } else if (parent->bref.type != HAMMER2_BREF_TYPE_INDIRECT &&
1992 parent->bref.type != HAMMER2_BREF_TYPE_FREEMAP_NODE) {
1994 * We reached the end of the iteration.
1999 * Continue iteration with next parent unless the current
2000 * parent covers the range.
2002 scan_beg = parent->bref.key;
2003 scan_end = scan_beg +
2004 ((hammer2_key_t)1 << parent->bref.keybits) - 1;
2005 if (key_beg >= scan_beg && key_end <= scan_end)
2008 i = parent->index + 1;
2009 parent = hammer2_chain_getparent(parentp, how_maybe);
2014 * Locate the blockref array. Currently we do a fully associative
2015 * search through the array.
2017 switch(parent->bref.type) {
2018 case HAMMER2_BREF_TYPE_INODE:
2019 base = &parent->data->ipdata.u.blockset.blockref[0];
2020 count = HAMMER2_SET_COUNT;
2022 case HAMMER2_BREF_TYPE_INDIRECT:
2023 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2024 if (parent->flags & HAMMER2_CHAIN_INITIAL) {
2027 KKASSERT(parent->data != NULL);
2028 base = &parent->data->npdata[0];
2030 count = parent->bytes / sizeof(hammer2_blockref_t);
2032 case HAMMER2_BREF_TYPE_VOLUME:
2033 base = &hmp->voldata.sroot_blockset.blockref[0];
2034 count = HAMMER2_SET_COUNT;
2036 case HAMMER2_BREF_TYPE_FREEMAP:
2037 base = &hmp->voldata.freemap_blockset.blockref[0];
2038 count = HAMMER2_SET_COUNT;
2041 panic("hammer2_chain_next: unrecognized blockref type: %d",
2043 base = NULL; /* safety */
2044 count = 0; /* safety */
2047 KKASSERT(i <= count);
2050 * Look for the key. If we are unable to find a match and an exact
2051 * match was requested we return NULL. If a range was requested we
2052 * run hammer2_chain_next() to iterate.
2054 * NOTE! Deleted elements are effectively invisible. Deletions
2055 * proactively clear the parent bref to the deleted child
2056 * so we do not try to shadow here to avoid parent updates
2057 * (which would be difficult since multiple deleted elements
2058 * might represent different flush synchronization points).
2061 scan_beg = 0; /* avoid compiler warning */
2062 scan_end = 0; /* avoid compiler warning */
2065 tmp = hammer2_chain_find(parent, i);
2067 if (tmp->flags & HAMMER2_CHAIN_DELETED) {
2068 hammer2_chain_drop(tmp);
2073 } else if (base == NULL || base[i].type == 0) {
2079 scan_beg = bref->key;
2080 scan_end = scan_beg + ((hammer2_key_t)1 << bref->keybits) - 1;
2082 hammer2_chain_drop(tmp);
2083 if (key_beg <= scan_end && key_end >= scan_beg)
2089 * If we couldn't find a match recurse up a parent to continue the
2096 * Acquire the new chain element. If the chain element is an
2097 * indirect block we must search recursively.
2099 chain = hammer2_chain_get(parent, i, flags);
2104 * If the chain element is an indirect block it becomes the new
2105 * parent and we loop on it.
2107 * The parent always has to be locked with at least RESOLVE_MAYBE
2108 * so we can access its data. It might need a fixup if the caller
2109 * passed incompatible flags. Be careful not to cause a deadlock
2110 * as a data-load requires an exclusive lock.
2112 * If HAMMER2_LOOKUP_MATCHIND is set and the indirect block's key
2113 * range is within the requested key range we return the indirect
2114 * block and do NOT loop. This is usually only used to acquire
2117 if (chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT ||
2118 chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_NODE) {
2119 if ((flags & HAMMER2_LOOKUP_MATCHIND) == 0 ||
2120 key_beg > scan_beg || key_end < scan_end) {
2121 hammer2_chain_unlock(parent);
2122 *parentp = parent = chain;
2124 if (flags & HAMMER2_LOOKUP_NOLOCK) {
2125 hammer2_chain_lock(parent,
2127 HAMMER2_RESOLVE_NOREF);
2128 } else if ((flags & HAMMER2_LOOKUP_NODATA) &&
2129 parent->data == NULL) {
2130 hammer2_chain_ref(parent);
2131 hammer2_chain_unlock(parent);
2132 hammer2_chain_lock(parent,
2134 HAMMER2_RESOLVE_NOREF);
2142 * All done, return chain
2148 * Loop on parent's children, issuing the callback for each child.
2150 * Uses LOOKUP flags.
2153 hammer2_chain_iterate(hammer2_chain_t *parent,
2154 int (*callback)(hammer2_chain_t *parent,
2155 hammer2_chain_t **chainp,
2157 void *arg, int flags)
2159 hammer2_chain_t *chain;
2160 hammer2_blockref_t *base;
2166 * Scan the children (if any)
2172 * Calculate the blockref array on each loop in order
2173 * to allow the callback to temporarily unlock/relock
2176 switch(parent->bref.type) {
2177 case HAMMER2_BREF_TYPE_INODE:
2178 base = &parent->data->ipdata.u.blockset.blockref[0];
2179 count = HAMMER2_SET_COUNT;
2181 case HAMMER2_BREF_TYPE_INDIRECT:
2182 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2183 if (parent->flags & HAMMER2_CHAIN_INITIAL) {
2186 KKASSERT(parent->data != NULL);
2187 base = &parent->data->npdata[0];
2189 count = parent->bytes / sizeof(hammer2_blockref_t);
2191 case HAMMER2_BREF_TYPE_VOLUME:
2192 base = &parent->hmp->voldata.sroot_blockset.blockref[0];
2193 count = HAMMER2_SET_COUNT;
2195 case HAMMER2_BREF_TYPE_FREEMAP:
2196 base = &parent->hmp->voldata.freemap_blockset.blockref[0];
2197 count = HAMMER2_SET_COUNT;
2201 * The function allows calls on non-recursive
2202 * chains and will effectively be a nop() in that
2217 * Lookup the child, properly overloading any elements
2220 * NOTE: Deleted elements cover any underlying base[] entry
2221 * (which might not have been zero'd out yet).
2223 * NOTE: The fact that there can be multiple stacked
2224 * deleted elements at the same index is hidden
2225 * by hammer2_chain_find().
2227 chain = hammer2_chain_find(parent, i);
2229 if (chain->flags & HAMMER2_CHAIN_DELETED) {
2230 hammer2_chain_drop(chain);
2234 } else if (base == NULL || base[i].type == 0) {
2239 hammer2_chain_drop(chain);
2240 chain = hammer2_chain_get(parent, i, flags);
2242 res = callback(parent, &chain, arg);
2244 if (flags & HAMMER2_LOOKUP_NOLOCK)
2245 hammer2_chain_drop(chain);
2247 hammer2_chain_unlock(chain);
2258 * Create and return a new hammer2 system memory structure of the specified
2259 * key, type and size and insert it under (*parentp). This is a full
2260 * insertion, based on the supplied key/keybits, and may involve creating
2261 * indirect blocks and moving other chains around via delete/duplicate.
2263 * (*parentp) must be exclusive locked and may be replaced on return
2264 * depending on how much work the function had to do.
2266 * (*chainp) usually starts out NULL and returns the newly created chain,
2267 * but if the caller desires the caller may allocate a disconnected chain
2268 * and pass it in instead. (It is also possible for the caller to use
2269 * chain_duplicate() to create a disconnected chain, manipulate it, then
2270 * pass it into this function to insert it).
2272 * This function should NOT be used to insert INDIRECT blocks. It is
2273 * typically used to create/insert inodes and data blocks.
2275 * Caller must pass-in an exclusively locked parent the new chain is to
2276 * be inserted under, and optionally pass-in a disconnected, exclusively
2277 * locked chain to insert (else we create a new chain). The function will
2278 * adjust (*parentp) as necessary, create or connect the chain, and
2279 * return an exclusively locked chain in *chainp.
2282 hammer2_chain_create(hammer2_trans_t *trans, hammer2_chain_t **parentp,
2283 hammer2_chain_t **chainp,
2284 hammer2_key_t key, int keybits, int type, size_t bytes)
2286 hammer2_mount_t *hmp;
2287 hammer2_chain_t *chain;
2288 hammer2_chain_t *child;
2289 hammer2_chain_t *parent = *parentp;
2290 hammer2_chain_core_t *above;
2291 hammer2_blockref_t dummy;
2292 hammer2_blockref_t *base;
2298 above = parent->core;
2299 KKASSERT(ccms_thread_lock_owned(&above->cst));
2303 if (chain == NULL) {
2305 * First allocate media space and construct the dummy bref,
2306 * then allocate the in-memory chain structure. Set the
2307 * INITIAL flag for fresh chains.
2309 bzero(&dummy, sizeof(dummy));
2312 dummy.keybits = keybits;
2313 dummy.data_off = hammer2_getradix(bytes);
2314 dummy.methods = parent->bref.methods;
2315 chain = hammer2_chain_alloc(hmp, trans, &dummy);
2316 hammer2_chain_core_alloc(chain, NULL);
2318 atomic_set_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
2321 * Lock the chain manually, chain_lock will load the chain
2322 * which we do NOT want to do. (note: chain->refs is set
2323 * to 1 by chain_alloc() for us, but lockcnt is not).
2326 ccms_thread_lock(&chain->core->cst, CCMS_STATE_EXCLUSIVE);
2330 * We do NOT set INITIAL here (yet). INITIAL is only
2331 * used for indirect blocks.
2333 * Recalculate bytes to reflect the actual media block
2336 bytes = (hammer2_off_t)1 <<
2337 (int)(chain->bref.data_off & HAMMER2_OFF_MASK_RADIX);
2338 chain->bytes = bytes;
2341 case HAMMER2_BREF_TYPE_VOLUME:
2342 case HAMMER2_BREF_TYPE_FREEMAP:
2343 panic("hammer2_chain_create: called with volume type");
2345 case HAMMER2_BREF_TYPE_INODE:
2346 KKASSERT(bytes == HAMMER2_INODE_BYTES);
2347 atomic_set_int(&chain->flags, HAMMER2_CHAIN_EMBEDDED);
2348 chain->data = kmalloc(sizeof(chain->data->ipdata),
2349 hmp->mchain, M_WAITOK | M_ZERO);
2351 case HAMMER2_BREF_TYPE_INDIRECT:
2352 panic("hammer2_chain_create: cannot be used to"
2353 "create indirect block");
2355 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2356 panic("hammer2_chain_create: cannot be used to"
2357 "create freemap root or node");
2359 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
2360 KKASSERT(bytes == sizeof(chain->data->bmdata));
2361 atomic_set_int(&chain->flags, HAMMER2_CHAIN_EMBEDDED);
2362 chain->data = kmalloc(sizeof(chain->data->bmdata),
2363 hmp->mchain, M_WAITOK | M_ZERO);
2365 case HAMMER2_BREF_TYPE_DATA:
2367 /* leave chain->data NULL */
2368 KKASSERT(chain->data == NULL);
2373 * Potentially update the existing chain's key/keybits.
2375 * Do NOT mess with the current state of the INITIAL flag.
2377 chain->bref.key = key;
2378 chain->bref.keybits = keybits;
2379 KKASSERT(chain->above == NULL);
2383 above = parent->core;
2386 * Locate a free blockref in the parent's array
2388 switch(parent->bref.type) {
2389 case HAMMER2_BREF_TYPE_INODE:
2390 KKASSERT((parent->data->ipdata.op_flags &
2391 HAMMER2_OPFLAG_DIRECTDATA) == 0);
2392 KKASSERT(parent->data != NULL);
2393 base = &parent->data->ipdata.u.blockset.blockref[0];
2394 count = HAMMER2_SET_COUNT;
2396 case HAMMER2_BREF_TYPE_INDIRECT:
2397 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2398 if (parent->flags & HAMMER2_CHAIN_INITIAL) {
2401 KKASSERT(parent->data != NULL);
2402 base = &parent->data->npdata[0];
2404 count = parent->bytes / sizeof(hammer2_blockref_t);
2406 case HAMMER2_BREF_TYPE_VOLUME:
2407 KKASSERT(parent->data != NULL);
2408 base = &hmp->voldata.sroot_blockset.blockref[0];
2409 count = HAMMER2_SET_COUNT;
2411 case HAMMER2_BREF_TYPE_FREEMAP:
2412 KKASSERT(parent->data != NULL);
2413 base = &hmp->voldata.freemap_blockset.blockref[0];
2414 count = HAMMER2_SET_COUNT;
2417 panic("hammer2_chain_create: unrecognized blockref type: %d",
2424 * Scan for an unallocated bref, also skipping any slots occupied
2425 * by in-memory chain elements that may not yet have been updated
2426 * in the parent's bref array.
2428 * We don't have to hold the spinlock to save an empty slot as
2429 * new slots can only transition from empty if the parent is
2430 * locked exclusively.
2432 spin_lock(&above->cst.spin);
2433 for (i = 0; i < count; ++i) {
2434 child = hammer2_chain_find_locked(parent, i);
2436 if (child->flags & HAMMER2_CHAIN_DELETED)
2442 if (base[i].type == 0)
2445 spin_unlock(&above->cst.spin);
2448 * If no free blockref could be found we must create an indirect
2449 * block and move a number of blockrefs into it. With the parent
2450 * locked we can safely lock each child in order to move it without
2451 * causing a deadlock.
2453 * This may return the new indirect block or the old parent depending
2454 * on where the key falls. NULL is returned on error.
2457 hammer2_chain_t *nparent;
2459 nparent = hammer2_chain_create_indirect(trans, parent,
2462 if (nparent == NULL) {
2464 hammer2_chain_drop(chain);
2468 if (parent != nparent) {
2469 hammer2_chain_unlock(parent);
2470 parent = *parentp = nparent;
2476 * Link the chain into its parent. Later on we will have to set
2477 * the MOVED bit in situations where we don't mark the new chain
2478 * as being modified.
2480 if (chain->above != NULL)
2481 panic("hammer2: hammer2_chain_create: chain already connected");
2482 KKASSERT(chain->above == NULL);
2483 KKASSERT((chain->flags & HAMMER2_CHAIN_DELETED) == 0);
2485 chain->above = above;
2487 spin_lock(&above->cst.spin);
2488 if (RB_INSERT(hammer2_chain_tree, &above->rbtree, chain))
2489 panic("hammer2_chain_create: collision");
2490 atomic_set_int(&chain->flags, HAMMER2_CHAIN_ONRBTREE);
2491 spin_unlock(&above->cst.spin);
2495 * Mark the newly created chain modified.
2497 * Device buffers are not instantiated for DATA elements
2498 * as these are handled by logical buffers.
2500 * Indirect and freemap node indirect blocks are handled
2501 * by hammer2_chain_create_indirect() and not by this
2504 * Data for all other bref types is expected to be
2505 * instantiated (INODE, LEAF).
2507 switch(chain->bref.type) {
2508 case HAMMER2_BREF_TYPE_DATA:
2509 hammer2_chain_modify(trans, &chain,
2510 HAMMER2_MODIFY_OPTDATA |
2511 HAMMER2_MODIFY_ASSERTNOCOPY);
2513 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
2514 case HAMMER2_BREF_TYPE_INODE:
2515 hammer2_chain_modify(trans, &chain,
2516 HAMMER2_MODIFY_ASSERTNOCOPY);
2520 * Remaining types are not supported by this function.
2521 * In particular, INDIRECT and LEAF_NODE types are
2522 * handled by create_indirect().
2524 panic("hammer2_chain_create: bad type: %d",
2531 * When reconnecting a chain we must set MOVED and setsubmod
2532 * so the flush recognizes that it must update the bref in
2535 if ((chain->flags & HAMMER2_CHAIN_MOVED) == 0) {
2536 hammer2_chain_ref(chain);
2537 atomic_set_int(&chain->flags, HAMMER2_CHAIN_MOVED);
2539 hammer2_chain_setsubmod(trans, chain);
2549 * Replace (*chainp) with a duplicate. The original *chainp is unlocked
2550 * and the replacement will be returned locked. Both the original and the
2551 * new chain will share the same RBTREE (have the same chain->core), with
2552 * the new chain becoming the 'current' chain (meaning it is the first in
2553 * the linked list at core->chain_first).
2555 * If (parent, i) then the new duplicated chain is inserted under the parent
2556 * at the specified index (the parent must not have a ref at that index).
2558 * If (NULL, -1) then the new duplicated chain is not inserted anywhere,
2559 * similar to if it had just been chain_alloc()'d (suitable for passing into
2560 * hammer2_chain_create() after this function returns).
2562 * NOTE! Duplication is used in order to retain the original topology to
2563 * support flush synchronization points. Both the original and the
2564 * new chain will have the same transaction id and thus the operation
2565 * appears atomic w/regards to media flushes.
2567 static void hammer2_chain_dup_fixup(hammer2_chain_t *ochain,
2568 hammer2_chain_t *nchain);
2571 hammer2_chain_duplicate(hammer2_trans_t *trans, hammer2_chain_t *parent, int i,
2572 hammer2_chain_t **chainp, hammer2_blockref_t *bref)
2574 hammer2_mount_t *hmp;
2575 hammer2_blockref_t *base;
2576 hammer2_chain_t *ochain;
2577 hammer2_chain_t *nchain;
2578 hammer2_chain_t *scan;
2579 hammer2_chain_core_t *above;
2586 * First create a duplicate of the chain structure, associating
2587 * it with the same core, making it the same size, pointing it
2588 * to the same bref (the same media block).
2593 bref = &ochain->bref;
2594 nchain = hammer2_chain_alloc(hmp, trans, bref);
2595 hammer2_chain_core_alloc(nchain, ochain->core);
2596 bytes = (hammer2_off_t)1 <<
2597 (int)(bref->data_off & HAMMER2_OFF_MASK_RADIX);
2598 nchain->bytes = bytes;
2599 nchain->modify_tid = ochain->modify_tid;
2601 hammer2_chain_lock(nchain, HAMMER2_RESOLVE_NEVER);
2602 hammer2_chain_dup_fixup(ochain, nchain);
2605 * If parent is not NULL, insert into the parent at the requested
2606 * index. The newly duplicated chain must be marked MOVED and
2607 * SUBMODIFIED set in its parent(s).
2609 * Having both chains locked is extremely important for atomicy.
2613 * Locate a free blockref in the parent's array
2615 above = parent->core;
2616 KKASSERT(ccms_thread_lock_owned(&above->cst));
2618 switch(parent->bref.type) {
2619 case HAMMER2_BREF_TYPE_INODE:
2620 KKASSERT((parent->data->ipdata.op_flags &
2621 HAMMER2_OPFLAG_DIRECTDATA) == 0);
2622 KKASSERT(parent->data != NULL);
2623 base = &parent->data->ipdata.u.blockset.blockref[0];
2624 count = HAMMER2_SET_COUNT;
2626 case HAMMER2_BREF_TYPE_INDIRECT:
2627 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2628 if (parent->flags & HAMMER2_CHAIN_INITIAL) {
2631 KKASSERT(parent->data != NULL);
2632 base = &parent->data->npdata[0];
2634 count = parent->bytes / sizeof(hammer2_blockref_t);
2636 case HAMMER2_BREF_TYPE_VOLUME:
2637 KKASSERT(parent->data != NULL);
2638 base = &hmp->voldata.sroot_blockset.blockref[0];
2639 count = HAMMER2_SET_COUNT;
2641 case HAMMER2_BREF_TYPE_FREEMAP:
2642 KKASSERT(parent->data != NULL);
2643 base = &hmp->voldata.freemap_blockset.blockref[0];
2644 count = HAMMER2_SET_COUNT;
2647 panic("hammer2_chain_create: unrecognized "
2648 "blockref type: %d",
2653 KKASSERT(i >= 0 && i < count);
2655 KKASSERT((nchain->flags & HAMMER2_CHAIN_DELETED) == 0);
2656 KKASSERT(parent->refs > 0);
2658 spin_lock(&above->cst.spin);
2659 nchain->above = above;
2661 scan = hammer2_chain_find_locked(parent, i);
2662 KKASSERT(base == NULL || base[i].type == 0 ||
2664 (scan->flags & HAMMER2_CHAIN_DELETED));
2665 if (RB_INSERT(hammer2_chain_tree, &above->rbtree,
2667 panic("hammer2_chain_duplicate: collision");
2669 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_ONRBTREE);
2670 spin_unlock(&above->cst.spin);
2672 if ((nchain->flags & HAMMER2_CHAIN_MOVED) == 0) {
2673 hammer2_chain_ref(nchain);
2674 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_MOVED);
2676 hammer2_chain_setsubmod(trans, nchain);
2680 * We have to unlock ochain to flush any dirty data, asserting the
2681 * case (data == NULL) to catch any extra locks that might have been
2682 * present, then transfer state to nchain.
2684 oflags = ochain->flags;
2685 odata = ochain->data;
2686 hammer2_chain_unlock(ochain);
2687 KKASSERT((ochain->flags & HAMMER2_CHAIN_EMBEDDED) ||
2688 ochain->data == NULL);
2690 if (oflags & HAMMER2_CHAIN_INITIAL)
2691 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_INITIAL);
2694 * WARNING! We should never resolve DATA to device buffers
2695 * (XXX allow it if the caller did?), and since
2696 * we currently do not have the logical buffer cache
2697 * buffer in-hand to fix its cached physical offset
2698 * we also force the modify code to not COW it. XXX
2700 if (oflags & HAMMER2_CHAIN_MODIFIED) {
2701 if (nchain->bref.type == HAMMER2_BREF_TYPE_DATA) {
2702 hammer2_chain_modify(trans, &nchain,
2703 HAMMER2_MODIFY_OPTDATA |
2704 HAMMER2_MODIFY_NOREALLOC |
2705 HAMMER2_MODIFY_ASSERTNOCOPY);
2706 } else if (oflags & HAMMER2_CHAIN_INITIAL) {
2707 hammer2_chain_modify(trans, &nchain,
2708 HAMMER2_MODIFY_OPTDATA |
2709 HAMMER2_MODIFY_ASSERTNOCOPY);
2711 hammer2_chain_modify(trans, &nchain,
2712 HAMMER2_MODIFY_ASSERTNOCOPY);
2714 hammer2_chain_drop(nchain);
2716 if (nchain->bref.type == HAMMER2_BREF_TYPE_DATA) {
2717 hammer2_chain_drop(nchain);
2718 } else if (oflags & HAMMER2_CHAIN_INITIAL) {
2719 hammer2_chain_drop(nchain);
2721 hammer2_chain_lock(nchain, HAMMER2_RESOLVE_ALWAYS |
2722 HAMMER2_RESOLVE_NOREF);
2723 hammer2_chain_unlock(nchain);
2726 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_SUBMODIFIED);
2732 * When the chain is in the INITIAL state we must still
2733 * ensure that a block has been assigned so MOVED processing
2734 * works as expected.
2736 KKASSERT (nchain->bref.type != HAMMER2_BREF_TYPE_DATA);
2737 hammer2_chain_modify(trans, &nchain,
2738 HAMMER2_MODIFY_OPTDATA |
2739 HAMMER2_MODIFY_ASSERTNOCOPY);
2742 hammer2_chain_lock(nchain, HAMMER2_RESOLVE_MAYBE |
2743 HAMMER2_RESOLVE_NOREF); /* eat excess ref */
2744 hammer2_chain_unlock(nchain);
2748 * Special in-place delete-duplicate sequence which does not require a
2749 * locked parent. (*chainp) is marked DELETED and atomically replaced
2750 * with a duplicate. Atomicy is at the very-fine spin-lock level in
2751 * order to ensure that lookups do not race us.
2754 hammer2_chain_delete_duplicate(hammer2_trans_t *trans, hammer2_chain_t **chainp,
2757 hammer2_mount_t *hmp;
2758 hammer2_chain_t *ochain;
2759 hammer2_chain_t *nchain;
2760 hammer2_chain_core_t *above;
2766 * First create a duplicate of the chain structure
2770 nchain = hammer2_chain_alloc(hmp, trans, &ochain->bref); /* 1 ref */
2771 if (flags & HAMMER2_DELDUP_RECORE)
2772 hammer2_chain_core_alloc(nchain, NULL);
2774 hammer2_chain_core_alloc(nchain, ochain->core);
2775 above = ochain->above;
2777 bytes = (hammer2_off_t)1 <<
2778 (int)(ochain->bref.data_off & HAMMER2_OFF_MASK_RADIX);
2779 nchain->bytes = bytes;
2780 nchain->modify_tid = ochain->modify_tid;
2781 nchain->data_count += ochain->data_count;
2782 nchain->inode_count += ochain->inode_count;
2785 * Lock nchain and insert into ochain's core hierarchy, marking
2786 * ochain DELETED at the same time. Having both chains locked
2787 * is extremely important for atomicy.
2789 hammer2_chain_lock(nchain, HAMMER2_RESOLVE_NEVER);
2790 hammer2_chain_dup_fixup(ochain, nchain);
2791 /* extra ref still present from original allocation */
2793 nchain->index = ochain->index;
2795 spin_lock(&above->cst.spin);
2796 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_ONRBTREE);
2797 ochain->delete_tid = trans->sync_tid;
2798 nchain->above = above;
2799 atomic_set_int(&ochain->flags, HAMMER2_CHAIN_DELETED);
2800 if ((ochain->flags & HAMMER2_CHAIN_MOVED) == 0) {
2801 hammer2_chain_ref(ochain);
2802 atomic_set_int(&ochain->flags, HAMMER2_CHAIN_MOVED);
2804 if (RB_INSERT(hammer2_chain_tree, &above->rbtree, nchain)) {
2805 panic("hammer2_chain_delete_duplicate: collision");
2807 spin_unlock(&above->cst.spin);
2810 * We have to unlock ochain to flush any dirty data, asserting the
2811 * case (data == NULL) to catch any extra locks that might have been
2812 * present, then transfer state to nchain.
2814 oflags = ochain->flags;
2815 odata = ochain->data;
2816 hammer2_chain_unlock(ochain); /* replacing ochain */
2817 KKASSERT(ochain->bref.type == HAMMER2_BREF_TYPE_INODE ||
2818 ochain->data == NULL);
2820 if (oflags & HAMMER2_CHAIN_INITIAL)
2821 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_INITIAL);
2824 * WARNING! We should never resolve DATA to device buffers
2825 * (XXX allow it if the caller did?), and since
2826 * we currently do not have the logical buffer cache
2827 * buffer in-hand to fix its cached physical offset
2828 * we also force the modify code to not COW it. XXX
2830 if (oflags & HAMMER2_CHAIN_MODIFIED) {
2831 if (nchain->bref.type == HAMMER2_BREF_TYPE_DATA) {
2832 hammer2_chain_modify(trans, &nchain,
2833 HAMMER2_MODIFY_OPTDATA |
2834 HAMMER2_MODIFY_NOREALLOC |
2835 HAMMER2_MODIFY_ASSERTNOCOPY);
2836 } else if (oflags & HAMMER2_CHAIN_INITIAL) {
2837 hammer2_chain_modify(trans, &nchain,
2838 HAMMER2_MODIFY_OPTDATA |
2839 HAMMER2_MODIFY_ASSERTNOCOPY);
2841 hammer2_chain_modify(trans, &nchain,
2842 HAMMER2_MODIFY_ASSERTNOCOPY);
2844 hammer2_chain_drop(nchain);
2846 if (nchain->bref.type == HAMMER2_BREF_TYPE_DATA) {
2847 hammer2_chain_drop(nchain);
2848 } else if (oflags & HAMMER2_CHAIN_INITIAL) {
2849 hammer2_chain_drop(nchain);
2851 hammer2_chain_lock(nchain, HAMMER2_RESOLVE_ALWAYS |
2852 HAMMER2_RESOLVE_NOREF);
2853 hammer2_chain_unlock(nchain);
2858 * Unconditionally set the MOVED and SUBMODIFIED bit to force
2859 * update of parent bref and indirect blockrefs during flush.
2861 if ((nchain->flags & HAMMER2_CHAIN_MOVED) == 0) {
2862 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_MOVED);
2863 hammer2_chain_ref(nchain);
2865 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_SUBMODIFIED);
2866 hammer2_chain_setsubmod(trans, nchain);
2871 * Helper function to fixup inodes. The caller procedure stack may hold
2872 * multiple locks on ochain if it represents an inode, preventing our
2873 * unlock from retiring its state to the buffer cache.
2875 * In this situation any attempt to access the buffer cache could result
2876 * either in stale data or a deadlock. Work around the problem by copying
2877 * the embedded data directly.
2881 hammer2_chain_dup_fixup(hammer2_chain_t *ochain, hammer2_chain_t *nchain)
2883 if (ochain->data == NULL)
2885 switch(ochain->bref.type) {
2886 case HAMMER2_BREF_TYPE_INODE:
2887 KKASSERT(nchain->data == NULL);
2888 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_EMBEDDED);
2889 nchain->data = kmalloc(sizeof(nchain->data->ipdata),
2890 ochain->hmp->mchain, M_WAITOK | M_ZERO);
2891 nchain->data->ipdata = ochain->data->ipdata;
2893 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
2894 KKASSERT(nchain->data == NULL);
2895 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_EMBEDDED);
2896 nchain->data = kmalloc(sizeof(nchain->data->bmdata),
2897 ochain->hmp->mchain, M_WAITOK | M_ZERO);
2898 bcopy(ochain->data->bmdata,
2899 nchain->data->bmdata,
2900 sizeof(nchain->data->bmdata));
2908 * Create a snapshot of the specified {parent, chain} with the specified
2911 * (a) We create a duplicate connected to the super-root as the specified
2914 * (b) We issue a restricted flush using the current transaction on the
2917 * (c) We disconnect and reallocate the duplicate's core.
2920 hammer2_chain_snapshot(hammer2_trans_t *trans, hammer2_inode_t *ip,
2921 hammer2_ioc_pfs_t *pfs)
2923 hammer2_cluster_t *cluster;
2924 hammer2_mount_t *hmp;
2925 hammer2_chain_t *chain;
2926 hammer2_chain_t *nchain;
2927 hammer2_chain_t *parent;
2928 hammer2_inode_data_t *ipdata;
2933 name_len = strlen(pfs->name);
2934 lhc = hammer2_dirhash(pfs->name, name_len);
2935 cluster = ip->pmp->mount_cluster;
2936 hmp = ip->chain->hmp;
2937 KKASSERT(hmp == cluster->hmp); /* XXX */
2940 * Create disconnected duplicate
2942 KKASSERT((trans->flags & HAMMER2_TRANS_RESTRICTED) == 0);
2944 hammer2_chain_lock(nchain, HAMMER2_RESOLVE_MAYBE);
2945 hammer2_chain_duplicate(trans, NULL, -1, &nchain, NULL);
2946 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_RECYCLE |
2947 HAMMER2_CHAIN_SNAPSHOT);
2950 * Create named entry in the super-root.
2952 parent = hammer2_chain_lookup_init(hmp->schain, 0);
2954 while (error == 0) {
2955 chain = hammer2_chain_lookup(&parent, lhc, lhc, 0);
2958 if ((lhc & HAMMER2_DIRHASH_LOMASK) == HAMMER2_DIRHASH_LOMASK)
2960 hammer2_chain_unlock(chain);
2964 hammer2_chain_create(trans, &parent, &nchain, lhc, 0,
2965 HAMMER2_BREF_TYPE_INODE,
2966 HAMMER2_INODE_BYTES);
2967 hammer2_chain_modify(trans, &nchain, HAMMER2_MODIFY_ASSERTNOCOPY);
2968 hammer2_chain_lookup_done(parent);
2969 parent = NULL; /* safety */
2974 ipdata = &nchain->data->ipdata;
2975 ipdata->name_key = lhc;
2976 ipdata->name_len = name_len;
2977 ksnprintf(ipdata->filename, sizeof(ipdata->filename), "%s", pfs->name);
2980 * Set PFS type, generate a unique filesystem id, and generate
2981 * a cluster id. Use the same clid when snapshotting a PFS root,
2982 * which theoretically allows the snapshot to be used as part of
2983 * the same cluster (perhaps as a cache).
2985 ipdata->pfs_type = HAMMER2_PFSTYPE_SNAPSHOT;
2986 kern_uuidgen(&ipdata->pfs_fsid, 1);
2987 if (ip->chain == cluster->rchain)
2988 ipdata->pfs_clid = ip->chain->data->ipdata.pfs_clid;
2990 kern_uuidgen(&ipdata->pfs_clid, 1);
2993 * Issue a restricted flush of the snapshot. This is a synchronous
2996 trans->flags |= HAMMER2_TRANS_RESTRICTED;
2997 kprintf("SNAPSHOTA\n");
2998 tsleep(trans, 0, "snapslp", hz*4);
2999 kprintf("SNAPSHOTB\n");
3000 hammer2_chain_flush(trans, nchain);
3001 trans->flags &= ~HAMMER2_TRANS_RESTRICTED;
3005 * Remove the link b/c nchain is a snapshot and snapshots don't
3006 * follow CHAIN_DELETED semantics ?
3011 KKASSERT(chain->duplink == nchain);
3012 KKASSERT(chain->core == nchain->core);
3013 KKASSERT(nchain->refs >= 2);
3014 chain->duplink = nchain->duplink;
3015 atomic_clear_int(&nchain->flags, HAMMER2_CHAIN_DUPTARGET);
3016 hammer2_chain_drop(nchain);
3019 kprintf("snapshot %s nchain->refs %d nchain->flags %08x\n",
3020 pfs->name, nchain->refs, nchain->flags);
3021 hammer2_chain_unlock(nchain);
3027 * Create an indirect block that covers one or more of the elements in the
3028 * current parent. Either returns the existing parent with no locking or
3029 * ref changes or returns the new indirect block locked and referenced
3030 * and leaving the original parent lock/ref intact as well.
3032 * If an error occurs, NULL is returned and *errorp is set to the error.
3034 * The returned chain depends on where the specified key falls.
3036 * The key/keybits for the indirect mode only needs to follow three rules:
3038 * (1) That all elements underneath it fit within its key space and
3040 * (2) That all elements outside it are outside its key space.
3042 * (3) When creating the new indirect block any elements in the current
3043 * parent that fit within the new indirect block's keyspace must be
3044 * moved into the new indirect block.
3046 * (4) The keyspace chosen for the inserted indirect block CAN cover a wider
3047 * keyspace the the current parent, but lookup/iteration rules will
3048 * ensure (and must ensure) that rule (2) for all parents leading up
3049 * to the nearest inode or the root volume header is adhered to. This
3050 * is accomplished by always recursing through matching keyspaces in
3051 * the hammer2_chain_lookup() and hammer2_chain_next() API.
3053 * The current implementation calculates the current worst-case keyspace by
3054 * iterating the current parent and then divides it into two halves, choosing
3055 * whichever half has the most elements (not necessarily the half containing
3056 * the requested key).
3058 * We can also opt to use the half with the least number of elements. This
3059 * causes lower-numbered keys (aka logical file offsets) to recurse through
3060 * fewer indirect blocks and higher-numbered keys to recurse through more.
3061 * This also has the risk of not moving enough elements to the new indirect
3062 * block and being forced to create several indirect blocks before the element
3065 * Must be called with an exclusively locked parent.
3067 static int hammer2_chain_indkey_freemap(hammer2_chain_t *parent,
3068 hammer2_key_t *keyp, int keybits,
3069 hammer2_blockref_t *base, int count);
3070 static int hammer2_chain_indkey_normal(hammer2_chain_t *parent,
3071 hammer2_key_t *keyp, int keybits,
3072 hammer2_blockref_t *base, int count);
3075 hammer2_chain_create_indirect(hammer2_trans_t *trans, hammer2_chain_t *parent,
3076 hammer2_key_t create_key, int create_bits,
3077 int for_type, int *errorp)
3079 hammer2_mount_t *hmp;
3080 hammer2_chain_core_t *above;
3081 hammer2_chain_core_t *icore;
3082 hammer2_blockref_t *base;
3083 hammer2_blockref_t *bref;
3084 hammer2_chain_t *chain;
3085 hammer2_chain_t *child;
3086 hammer2_chain_t *ichain;
3087 hammer2_chain_t dummy;
3088 hammer2_key_t key = create_key;
3089 int keybits = create_bits;
3095 * Calculate the base blockref pointer or NULL if the chain
3096 * is known to be empty. We need to calculate the array count
3097 * for RB lookups either way.
3101 KKASSERT(ccms_thread_lock_owned(&parent->core->cst));
3102 above = parent->core;
3104 /*hammer2_chain_modify(trans, &parent, HAMMER2_MODIFY_OPTDATA);*/
3105 if (parent->flags & HAMMER2_CHAIN_INITIAL) {
3108 switch(parent->bref.type) {
3109 case HAMMER2_BREF_TYPE_INODE:
3110 count = HAMMER2_SET_COUNT;
3112 case HAMMER2_BREF_TYPE_INDIRECT:
3113 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
3114 count = parent->bytes / sizeof(hammer2_blockref_t);
3116 case HAMMER2_BREF_TYPE_VOLUME:
3117 count = HAMMER2_SET_COUNT;
3119 case HAMMER2_BREF_TYPE_FREEMAP:
3120 count = HAMMER2_SET_COUNT;
3123 panic("hammer2_chain_create_indirect: "
3124 "unrecognized blockref type: %d",
3130 switch(parent->bref.type) {
3131 case HAMMER2_BREF_TYPE_INODE:
3132 base = &parent->data->ipdata.u.blockset.blockref[0];
3133 count = HAMMER2_SET_COUNT;
3135 case HAMMER2_BREF_TYPE_INDIRECT:
3136 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
3137 base = &parent->data->npdata[0];
3138 count = parent->bytes / sizeof(hammer2_blockref_t);
3140 case HAMMER2_BREF_TYPE_VOLUME:
3141 base = &hmp->voldata.sroot_blockset.blockref[0];
3142 count = HAMMER2_SET_COUNT;
3144 case HAMMER2_BREF_TYPE_FREEMAP:
3145 base = &hmp->voldata.freemap_blockset.blockref[0];
3146 count = HAMMER2_SET_COUNT;
3149 panic("hammer2_chain_create_indirect: "
3150 "unrecognized blockref type: %d",
3158 * dummy used in later chain allocation (no longer used for lookups).
3160 bzero(&dummy, sizeof(dummy));
3161 dummy.delete_tid = HAMMER2_MAX_TID;
3164 * When creating an indirect block for a freemap node or leaf
3165 * the key/keybits must be fitted to static radix levels because
3166 * particular radix levels use particular reserved blocks in the
3169 * This routine calculates the key/radix of the indirect block
3170 * we need to create, and whether it is on the high-side or the
3173 if (for_type == HAMMER2_BREF_TYPE_FREEMAP_NODE ||
3174 for_type == HAMMER2_BREF_TYPE_FREEMAP_LEAF) {
3175 keybits = hammer2_chain_indkey_freemap(parent, &key, keybits,
3178 keybits = hammer2_chain_indkey_normal(parent, &key, keybits,
3183 * Normalize the key for the radix being represented, keeping the
3184 * high bits and throwing away the low bits.
3186 key &= ~(((hammer2_key_t)1 << keybits) - 1);
3189 * How big should our new indirect block be? It has to be at least
3190 * as large as its parent.
3192 if (parent->bref.type == HAMMER2_BREF_TYPE_INODE)
3193 nbytes = HAMMER2_IND_BYTES_MIN;
3195 nbytes = HAMMER2_IND_BYTES_MAX;
3196 if (nbytes < count * sizeof(hammer2_blockref_t))
3197 nbytes = count * sizeof(hammer2_blockref_t);
3200 * Ok, create our new indirect block
3202 if (for_type == HAMMER2_BREF_TYPE_FREEMAP_NODE ||
3203 for_type == HAMMER2_BREF_TYPE_FREEMAP_LEAF) {
3204 dummy.bref.type = HAMMER2_BREF_TYPE_FREEMAP_NODE;
3206 dummy.bref.type = HAMMER2_BREF_TYPE_INDIRECT;
3208 dummy.bref.key = key;
3209 dummy.bref.keybits = keybits;
3210 dummy.bref.data_off = hammer2_getradix(nbytes);
3211 dummy.bref.methods = parent->bref.methods;
3213 ichain = hammer2_chain_alloc(hmp, trans, &dummy.bref);
3214 atomic_set_int(&ichain->flags, HAMMER2_CHAIN_INITIAL);
3215 hammer2_chain_core_alloc(ichain, NULL);
3216 icore = ichain->core;
3217 hammer2_chain_lock(ichain, HAMMER2_RESOLVE_MAYBE);
3218 hammer2_chain_drop(ichain); /* excess ref from alloc */
3221 * We have to mark it modified to allocate its block, but use
3222 * OPTDATA to allow it to remain in the INITIAL state. Otherwise
3223 * it won't be acted upon by the flush code.
3225 * XXX leave the node unmodified, depend on the SUBMODIFIED
3226 * flush to assign and modify parent blocks.
3228 hammer2_chain_modify(trans, &ichain, HAMMER2_MODIFY_OPTDATA);
3231 * Iterate the original parent and move the matching brefs into
3232 * the new indirect block.
3234 * At the same time locate an empty slot (or what will become an
3235 * empty slot) and assign the new indirect block to that slot.
3237 * XXX handle flushes.
3239 spin_lock(&above->cst.spin);
3240 for (i = 0; i < count; ++i) {
3242 * For keying purposes access the bref from the media or
3243 * from our in-memory cache. In cases where the in-memory
3244 * cache overrides the media the keyrefs will be the same
3245 * anyway so we can avoid checking the cache when the media
3248 child = hammer2_chain_find_locked(parent, i);
3250 if (child->flags & HAMMER2_CHAIN_DELETED) {
3251 if (ichain->index < 0)
3255 bref = &child->bref;
3256 } else if (base && base[i].type) {
3259 if (ichain->index < 0)
3265 * Skip keys that are not within the key/radix of the new
3266 * indirect block. They stay in the parent.
3268 if ((~(((hammer2_key_t)1 << keybits) - 1) &
3269 (key ^ bref->key)) != 0) {
3274 * This element is being moved from the parent, its slot
3275 * is available for our new indirect block.
3277 if (ichain->index < 0)
3281 * Load the new indirect block by acquiring or allocating
3282 * the related chain entries, then move them to the new
3283 * parent (ichain) by deleting them from their old location
3284 * and inserting a duplicate of the chain and any modified
3285 * sub-chain in the new location.
3287 * We must set MOVED in the chain being duplicated and
3288 * SUBMODIFIED in the parent(s) so the flush code knows
3289 * what is going on. The latter is done after the loop.
3291 * WARNING! above->cst.spin must be held when parent is
3292 * modified, even though we own the full blown lock,
3293 * to deal with setsubmod and rename races.
3294 * (XXX remove this req).
3296 spin_unlock(&above->cst.spin);
3297 chain = hammer2_chain_get(parent, i, HAMMER2_LOOKUP_NODATA);
3298 hammer2_chain_delete(trans, chain, HAMMER2_DELETE_WILLDUP);
3299 hammer2_chain_duplicate(trans, ichain, i, &chain, NULL);
3300 hammer2_chain_unlock(chain);
3301 KKASSERT(parent->refs > 0);
3303 spin_lock(&above->cst.spin);
3305 spin_unlock(&above->cst.spin);
3308 * Insert the new indirect block into the parent now that we've
3309 * cleared out some entries in the parent. We calculated a good
3310 * insertion index in the loop above (ichain->index).
3312 * We don't have to set MOVED here because we mark ichain modified
3313 * down below (so the normal modified -> flush -> set-moved sequence
3316 * The insertion shouldn't race as this is a completely new block
3317 * and the parent is locked.
3319 if (ichain->index < 0)
3320 kprintf("indirect parent %p count %d key %016jx/%d\n",
3321 parent, count, (intmax_t)key, keybits);
3322 KKASSERT(ichain->index >= 0);
3323 KKASSERT((ichain->flags & HAMMER2_CHAIN_ONRBTREE) == 0);
3324 spin_lock(&above->cst.spin);
3325 if (RB_INSERT(hammer2_chain_tree, &above->rbtree, ichain))
3326 panic("hammer2_chain_create_indirect: ichain insertion");
3327 atomic_set_int(&ichain->flags, HAMMER2_CHAIN_ONRBTREE);
3328 ichain->above = above;
3329 spin_unlock(&above->cst.spin);
3332 * Mark the new indirect block modified after insertion, which
3333 * will propagate up through parent all the way to the root and
3334 * also allocate the physical block in ichain for our caller,
3335 * and assign ichain->data to a pre-zero'd space (because there
3336 * is not prior data to copy into it).
3338 * We have to set SUBMODIFIED in ichain's flags manually so the
3339 * flusher knows it has to recurse through it to get to all of
3340 * our moved blocks, then call setsubmod() to set the bit
3343 /*hammer2_chain_modify(trans, &ichain, HAMMER2_MODIFY_OPTDATA);*/
3344 atomic_set_int(&ichain->flags, HAMMER2_CHAIN_SUBMODIFIED);
3345 hammer2_chain_setsubmod(trans, ichain);
3348 * Figure out what to return.
3350 if (~(((hammer2_key_t)1 << keybits) - 1) &
3351 (create_key ^ key)) {
3353 * Key being created is outside the key range,
3354 * return the original parent.
3356 hammer2_chain_unlock(ichain);
3359 * Otherwise its in the range, return the new parent.
3360 * (leave both the new and old parent locked).
3369 * Calculate the keybits and highside/lowside of the freemap node the
3370 * caller is creating.
3372 * This routine will specify the next higher-level freemap key/radix
3373 * representing the lowest-ordered set. By doing so, eventually all
3374 * low-ordered sets will be moved one level down.
3376 * We have to be careful here because the freemap reserves a limited
3377 * number of blocks for a limited number of levels. So we can't just
3378 * push indiscriminately.
3381 hammer2_chain_indkey_freemap(hammer2_chain_t *parent, hammer2_key_t *keyp,
3382 int keybits, hammer2_blockref_t *base, int count)
3384 hammer2_chain_core_t *above;
3385 hammer2_chain_t *child;
3386 hammer2_blockref_t *bref;
3393 above = parent->core;
3399 * Calculate the range of keys in the array being careful to skip
3400 * slots which are overridden with a deletion.
3402 spin_lock(&above->cst.spin);
3403 for (i = 0; i < count; ++i) {
3404 child = hammer2_chain_find_locked(parent, i);
3406 if (child->flags & HAMMER2_CHAIN_DELETED)
3408 bref = &child->bref;
3409 } else if (base && base[i].type) {
3415 if (keybits > bref->keybits) {
3417 keybits = bref->keybits;
3418 } else if (keybits == bref->keybits && bref->key < key) {
3422 spin_unlock(&above->cst.spin);
3425 * Return the keybits for a higher-level FREEMAP_NODE covering
3429 case HAMMER2_FREEMAP_LEVEL0_RADIX:
3430 keybits = HAMMER2_FREEMAP_LEVEL1_RADIX;
3432 case HAMMER2_FREEMAP_LEVEL1_RADIX:
3433 keybits = HAMMER2_FREEMAP_LEVEL2_RADIX;
3435 case HAMMER2_FREEMAP_LEVEL2_RADIX:
3436 keybits = HAMMER2_FREEMAP_LEVEL3_RADIX;
3438 case HAMMER2_FREEMAP_LEVEL3_RADIX:
3439 keybits = HAMMER2_FREEMAP_LEVEL4_RADIX;
3441 case HAMMER2_FREEMAP_LEVEL4_RADIX:
3442 panic("hammer2_chain_indkey_freemap: level too high");
3445 panic("hammer2_chain_indkey_freemap: bad radix");
3454 * Calculate the keybits and highside/lowside of the indirect block the
3455 * caller is creating.
3458 hammer2_chain_indkey_normal(hammer2_chain_t *parent, hammer2_key_t *keyp,
3459 int keybits, hammer2_blockref_t *base, int count)
3461 hammer2_chain_core_t *above;
3462 hammer2_chain_t *child;
3463 hammer2_blockref_t *bref;
3471 above = parent->core;
3476 * Calculate the range of keys in the array being careful to skip
3477 * slots which are overridden with a deletion. Once the scan
3478 * completes we will cut the key range in half and shift half the
3479 * range into the new indirect block.
3481 spin_lock(&above->cst.spin);
3482 for (i = 0; i < count; ++i) {
3483 child = hammer2_chain_find_locked(parent, i);
3485 if (child->flags & HAMMER2_CHAIN_DELETED)
3487 bref = &child->bref;
3488 } else if (base && base[i].type) {
3495 * Expand our calculated key range (key, keybits) to fit
3496 * the scanned key. nkeybits represents the full range
3497 * that we will later cut in half (two halves @ nkeybits - 1).
3500 if (nkeybits < bref->keybits) {
3501 if (bref->keybits > 64) {
3502 kprintf("bad bref index %d chain %p bref %p\n",
3506 nkeybits = bref->keybits;
3508 while (nkeybits < 64 &&
3509 (~(((hammer2_key_t)1 << nkeybits) - 1) &
3510 (key ^ bref->key)) != 0) {
3515 * If the new key range is larger we have to determine
3516 * which side of the new key range the existing keys fall
3517 * under by checking the high bit, then collapsing the
3518 * locount into the hicount or vise-versa.
3520 if (keybits != nkeybits) {
3521 if (((hammer2_key_t)1 << (nkeybits - 1)) & key) {
3532 * The newly scanned key will be in the lower half or the
3533 * higher half of the (new) key range.
3535 if (((hammer2_key_t)1 << (nkeybits - 1)) & bref->key)
3540 spin_unlock(&above->cst.spin);
3541 bref = NULL; /* now invalid (safety) */
3544 * Adjust keybits to represent half of the full range calculated
3545 * above (radix 63 max)
3550 * Select whichever half contains the most elements. Theoretically
3551 * we can select either side as long as it contains at least one
3552 * element (in order to ensure that a free slot is present to hold
3553 * the indirect block).
3555 if (hammer2_indirect_optimize) {
3557 * Insert node for least number of keys, this will arrange
3558 * the first few blocks of a large file or the first few
3559 * inodes in a directory with fewer indirect blocks when
3562 if (hicount < locount && hicount != 0)
3563 key |= (hammer2_key_t)1 << keybits;
3565 key &= ~(hammer2_key_t)1 << keybits;
3568 * Insert node for most number of keys, best for heavily
3571 if (hicount > locount)
3572 key |= (hammer2_key_t)1 << keybits;
3574 key &= ~(hammer2_key_t)1 << keybits;
3582 * Sets CHAIN_DELETED and CHAIN_MOVED in the chain being deleted and
3583 * set chain->delete_tid.
3585 * This function does NOT generate a modification to the parent. It
3586 * would be nearly impossible to figure out which parent to modify anyway.
3587 * Such modifications are handled by the flush code and are properly merged
3588 * using the flush synchronization point.
3590 * The find/get code will properly overload the RBTREE check on top of
3591 * the bref check to detect deleted entries.
3593 * This function is NOT recursive. Any entity already pushed into the
3594 * chain (such as an inode) may still need visibility into its contents,
3595 * as well as the ability to read and modify the contents. For example,
3596 * for an unlinked file which is still open.
3598 * NOTE: This function does NOT set chain->modify_tid, allowing future
3599 * code to distinguish between live and deleted chains by testing
3602 * NOTE: Deletions normally do not occur in the middle of a duplication
3603 * chain but we use a trick for hardlink migration that refactors
3604 * the originating inode without deleting it, so we make no assumptions
3608 hammer2_chain_delete(hammer2_trans_t *trans, hammer2_chain_t *chain, int flags)
3610 KKASSERT(ccms_thread_lock_owned(&chain->core->cst));
3613 * Nothing to do if already marked.
3615 if (chain->flags & HAMMER2_CHAIN_DELETED)
3619 * We must set MOVED along with DELETED for the flush code to
3620 * recognize the operation and properly disconnect the chain
3623 * The setting of DELETED causes finds, lookups, and _next iterations
3624 * to no longer recognize the chain. RB_SCAN()s will still have
3625 * visibility (needed for flush serialization points).
3627 * We need the spinlock on the core whos RBTREE contains chain
3628 * to protect against races.
3630 spin_lock(&chain->above->cst.spin);
3631 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DELETED);
3632 if ((chain->flags & HAMMER2_CHAIN_MOVED) == 0) {
3633 hammer2_chain_ref(chain);
3634 atomic_set_int(&chain->flags, HAMMER2_CHAIN_MOVED);
3636 chain->delete_tid = trans->sync_tid;
3637 spin_unlock(&chain->above->cst.spin);
3640 * Mark the underlying block as possibly being free unless WILLDUP
3641 * is set. Duplication can occur in many situations, particularly
3642 * when chains are moved to indirect blocks.
3644 if ((flags & HAMMER2_DELETE_WILLDUP) == 0)
3645 hammer2_freemap_free(trans, chain->hmp, &chain->bref, 0);
3646 hammer2_chain_setsubmod(trans, chain);
3650 hammer2_chain_wait(hammer2_chain_t *chain)
3652 tsleep(chain, 0, "chnflw", 1);
3657 adjreadcounter(hammer2_blockref_t *bref, size_t bytes)
3661 switch(bref->type) {
3662 case HAMMER2_BREF_TYPE_DATA:
3663 counterp = &hammer2_iod_file_read;
3665 case HAMMER2_BREF_TYPE_INODE:
3666 counterp = &hammer2_iod_meta_read;
3668 case HAMMER2_BREF_TYPE_INDIRECT:
3669 counterp = &hammer2_iod_indr_read;
3671 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
3672 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
3673 counterp = &hammer2_iod_fmap_read;
3676 counterp = &hammer2_iod_volu_read;