2 * Copyright (c) 2011-2012 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * This subsystem handles direct and indirect block searches, recursions,
37 * creation, and deletion. Chains of blockrefs are tracked and modifications
38 * are flag for propagation... eventually all the way back to the volume
42 #include <sys/cdefs.h>
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/types.h>
51 static int hammer2_indirect_optimize; /* XXX SYSCTL */
53 static hammer2_chain_t *hammer2_chain_create_indirect(
54 hammer2_mount_t *hmp, hammer2_chain_t *parent,
55 hammer2_key_t key, int keybits);
56 static void hammer2_chain_movelock(hammer2_mount_t *hmp,
57 hammer2_chain_t *chain);
58 static void hammer2_chain_moveunlock(hammer2_mount_t *hmp,
59 hammer2_chain_t *chain);
60 static void hammer2_chain_movewait(hammer2_mount_t *hmp,
61 hammer2_chain_t *chain);
64 * We use a red-black tree to guarantee safe lookups under shared locks.
66 RB_GENERATE(hammer2_chain_tree, hammer2_chain, rbnode, hammer2_chain_cmp);
69 hammer2_chain_cmp(hammer2_chain_t *chain1, hammer2_chain_t *chain2)
71 return(chain2->index - chain1->index);
75 * Recursively mark the parent chain elements so flushes can find
76 * modified elements. Stop when we hit a chain already flagged
77 * SUBMODIFIED, but ignore the SUBMODIFIED bit that might be set
80 * SUBMODIFIED is not set on the chain passed in.
82 * The chain->cst.spin lock can be held to stabilize the chain->parent
83 * pointer. The first parent is stabilized by virtue of chain being
87 hammer2_chain_parent_setsubmod(hammer2_mount_t *hmp, hammer2_chain_t *chain)
89 hammer2_chain_t *parent;
91 parent = chain->parent;
92 if (parent && (parent->flags & HAMMER2_CHAIN_SUBMODIFIED) == 0) {
93 spin_lock(&parent->cst.spin);
95 atomic_set_int(&parent->flags,
96 HAMMER2_CHAIN_SUBMODIFIED);
97 if ((chain = parent->parent) == NULL)
99 spin_lock(&chain->cst.spin); /* upward interlock */
100 spin_unlock(&parent->cst.spin);
103 spin_unlock(&parent->cst.spin);
108 * Allocate a new disconnected chain element representing the specified
109 * bref. The chain element is locked exclusively and refs is set to 1.
111 * This essentially allocates a system memory structure representing one
112 * of the media structure types, including inodes.
115 hammer2_chain_alloc(hammer2_mount_t *hmp, hammer2_blockref_t *bref)
117 hammer2_chain_t *chain;
119 hammer2_indblock_t *np;
121 u_int bytes = 1U << (int)(bref->data_off & HAMMER2_OFF_MASK_RADIX);
124 * Construct the appropriate system structure.
127 case HAMMER2_BREF_TYPE_INODE:
128 ip = kmalloc(sizeof(*ip), hmp->minode, M_WAITOK | M_ZERO);
133 case HAMMER2_BREF_TYPE_INDIRECT:
134 np = kmalloc(sizeof(*np), hmp->mchain, M_WAITOK | M_ZERO);
138 case HAMMER2_BREF_TYPE_DATA:
139 dp = kmalloc(sizeof(*dp), hmp->mchain, M_WAITOK | M_ZERO);
143 case HAMMER2_BREF_TYPE_VOLUME:
145 panic("hammer2_chain_alloc volume type illegal for op");
148 panic("hammer2_chain_alloc: unrecognized blockref type: %d",
153 * Only set bref_flush if the bref has a real media offset, otherwise
154 * the caller has to wait for the chain to be modified/block-allocated
155 * before a blockref can be synchronized with its (future) parent.
158 if (bref->data_off & ~HAMMER2_OFF_MASK_RADIX)
159 chain->bref_flush = *bref;
160 chain->index = -1; /* not yet assigned */
162 chain->bytes = bytes;
163 ccms_cst_init(&chain->cst, chain);
164 ccms_thread_lock(&chain->cst, CCMS_STATE_EXCLUSIVE);
170 * Deallocate a chain (the step before freeing it). Remove the chain from
173 * Caller must hold the parent and the chain exclusively locked, and
174 * chain->refs must be 0.
176 * This function unlocks, removes, and destroys chain, and will recursively
177 * destroy any sub-chains under chain (whos refs must also be 0 at this
180 * parent can be NULL.
183 hammer2_chain_dealloc(hammer2_mount_t *hmp, hammer2_chain_t *chain)
186 hammer2_chain_t *parent;
187 hammer2_chain_t *child;
190 * NOTE: All movelock holders hold a ref so it shouldn't be possible
191 * for movelock to be non-zero here.
193 KKASSERT(chain->refs == 0);
194 KKASSERT((chain->flags &
195 (HAMMER2_CHAIN_MOVED | HAMMER2_CHAIN_MODIFIED)) == 0);
196 KKASSERT(chain->movelock == 0);
198 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE)
204 * If the sub-tree is not empty all the elements on it must have
205 * 0 refs and be deallocatable.
207 while ((child = RB_ROOT(&chain->rbhead)) != NULL) {
208 ccms_thread_lock(&child->cst, CCMS_STATE_EXCLUSIVE);
209 hammer2_chain_dealloc(hmp, child);
213 * If the DELETED flag is not set the chain must be removed from
216 * WARNING! chain->cst.spin must be held when chain->parent is
217 * modified, even though we own the full blown lock,
218 * to deal with setsubmod and rename races.
220 if ((chain->flags & HAMMER2_CHAIN_DELETED) == 0) {
221 spin_lock(&chain->cst.spin); /* shouldn't be needed */
222 parent = chain->parent;
223 RB_REMOVE(hammer2_chain_tree, &parent->rbhead, chain);
224 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DELETED);
227 chain->parent = NULL;
228 spin_unlock(&chain->cst.spin);
232 * When cleaning out a hammer2_inode we must
233 * also clean out the related ccms_inode.
236 ccms_cst_uninit(&ip->topo_cst);
237 hammer2_chain_free(hmp, chain);
241 * Free a disconnected chain element
244 hammer2_chain_free(hammer2_mount_t *hmp, hammer2_chain_t *chain)
248 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE ||
249 chain->bref.type == HAMMER2_BREF_TYPE_VOLUME) {
253 KKASSERT(chain->bp == NULL);
254 KKASSERT(chain->data == NULL);
255 KKASSERT(chain->bref.type != HAMMER2_BREF_TYPE_INODE ||
256 chain->u.ip->vp == NULL);
257 ccms_thread_unlock(&chain->cst);
258 KKASSERT(chain->cst.count == 0);
259 KKASSERT(chain->cst.upgrade == 0);
261 if ((mem = chain->u.mem) != NULL) {
263 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE)
264 kfree(mem, hmp->minode);
266 kfree(mem, hmp->mchain);
271 * Add a reference to a chain element, preventing its destruction.
273 * The parent chain must be locked shared or exclusive or otherwise be
274 * stable and already have a reference.
277 hammer2_chain_ref(hammer2_mount_t *hmp, hammer2_chain_t *chain)
283 KKASSERT(chain->refs >= 0);
287 * 0 -> 1 transition must bump the refs on the parent
288 * too. The caller has stabilized the parent.
290 if (atomic_cmpset_int(&chain->refs, 0, 1)) {
291 chain = chain->parent;
292 KKASSERT(chain == NULL || chain->refs > 0);
294 /* retry or continue along the parent chain */
299 if (atomic_cmpset_int(&chain->refs, refs, refs + 1))
307 * Drop the callers reference to the chain element. If the ref count
308 * reaches zero we attempt to recursively drop the parent.
310 * MOVED and MODIFIED elements hold additional references so it should not
311 * be possible for the count on a modified element to drop to 0.
313 * The chain element must NOT be locked by the caller on the 1->0 transition.
315 * The parent might or might not be locked by the caller. If we are unable
316 * to lock the parent on the 1->0 transition the destruction of the chain
317 * will be deferred but we still recurse upward and drop the ref on the
318 * parent (see the lastdrop() function)
320 static hammer2_chain_t *hammer2_chain_lastdrop(hammer2_mount_t *hmp,
321 hammer2_chain_t *chain);
324 hammer2_chain_drop(hammer2_mount_t *hmp, hammer2_chain_t *chain)
334 * (1) lastdrop successfully drops the chain to 0
335 * refs and may may not have destroyed it.
336 * lastdrop will return the parent so we can
337 * recursively drop the implied ref from the
340 * (2) lastdrop fails to transition refs from 1 to 0
341 * and returns the same chain, we retry.
343 chain = hammer2_chain_lastdrop(hmp, chain);
345 if (atomic_cmpset_int(&chain->refs, refs, refs - 1)) {
347 * Succeeded, count did not reach zero so
348 * cut out of the loop.
352 /* retry the same chain */
358 * Handle SMP races during the last drop. We must obtain a lock on
359 * chain->parent to stabilize the last pointer reference to chain
360 * (if applicable). This reference does not have a parallel ref count,
361 * that is idle chains in the topology can have a ref count of 0.
363 * The 1->0 transition implies a ref on the parent.
367 hammer2_chain_lastdrop(hammer2_mount_t *hmp, hammer2_chain_t *chain)
369 hammer2_chain_t *parent;
372 * Stablize chain->parent with the chain cst's spinlock.
373 * (parent can be NULL here).
375 spin_lock(&chain->cst.spin);
376 parent = chain->parent;
379 * CST spin locks are allowed to be held recursively bottom-up
380 * (whereas full CST thread locks can only be held recursively
383 * This makes things fairly easy. We still must not block while
384 * obtaining the CST lock on the parent. If this fails we have to
388 if (ccms_thread_lock_nonblock(&parent->cst,
389 CCMS_STATE_EXCLUSIVE)) {
390 /* parent cst lock attempt failed */
391 if (atomic_cmpset_int(&chain->refs, 1, 0)) {
392 spin_unlock(&chain->cst.spin); /* success */
395 spin_unlock(&chain->cst.spin); /* failure */
402 * With the parent now held we control the last pointer reference
403 * to chain ONLY IF this is the 1->0 drop. If we fail to transition
404 * from 1->0 we unwind and retry at chain.
406 spin_unlock(&chain->cst.spin);
407 if (!atomic_cmpset_int(&chain->refs, 1, 0)) {
409 ccms_thread_unlock(&parent->cst);
414 * The flusher is allowe to set movelock on a child and then release
415 * the parent's lock, and ultimately also release the child's lock
416 * while still holding it referenced. The reference should prevent
417 * this case from being hit.
419 KKASSERT(chain->movelock == 0);
422 * Ok, we succeeded. We now own the implied ref on the parent
423 * associated with the 1->0 transition of the child. It should not
424 * be possible for ANYTHING to access the child now, as we own the
425 * lock on the parent, so we should be able to safely lock the
426 * child and destroy it.
428 ccms_thread_lock(&chain->cst, CCMS_STATE_EXCLUSIVE);
429 hammer2_chain_dealloc(hmp, chain);
432 * We want to return parent with its implied ref to the caller
433 * to recurse and drop the parent.
436 ccms_thread_unlock(&parent->cst);
441 * Ref and lock a chain element, acquiring its data with I/O if necessary,
442 * and specify how you would like the data to be resolved.
444 * Returns 0 on success or an error code if the data could not be acquired.
445 * The chain element is locked either way.
447 * The lock is allowed to recurse, multiple locking ops will aggregate
448 * the requested resolve types. Once data is assigned it will not be
449 * removed until the last unlock.
451 * HAMMER2_RESOLVE_NEVER - Do not resolve the data element.
452 * (typically used to avoid device/logical buffer
455 * HAMMER2_RESOLVE_MAYBE - Do not resolve data elements for chains in
456 * the INITIAL-create state (indirect blocks only).
458 * Do not resolve data elements for DATA chains.
459 * (typically used to avoid device/logical buffer
462 * HAMMER2_RESOLVE_ALWAYS- Always resolve the data element.
464 * HAMMER2_RESOLVE_SHARED- (flag) The chain is locked shared, otherwise
465 * it will be locked exclusive.
467 * HAMMER2_RESOLVE_MAYDELETE - The caller may attempt to delete the element
468 * being locked. We block as long as the
469 * parent's movelock is non-zero.
471 * NOTE: Embedded elements (volume header, inodes) are always resolved
474 * NOTE: Specifying HAMMER2_RESOLVE_ALWAYS on a newly-created non-embedded
475 * element will instantiate and zero its buffer, and flush it on
478 * NOTE: (data) elements are normally locked RESOLVE_NEVER or RESOLVE_MAYBE
479 * so as not to instantiate a device buffer, which could alias against
480 * a logical file buffer. However, if ALWAYS is specified the
481 * device buffer will be instantiated anyway.
484 hammer2_chain_lock(hammer2_mount_t *hmp, hammer2_chain_t *chain, int how)
486 hammer2_blockref_t *bref;
496 * Ref and lock the element. Recursive locks are allowed.
498 hammer2_chain_ref(hmp, chain);
500 if (how & HAMMER2_RESOLVE_SHARED)
501 ccms_thread_lock(&chain->cst, CCMS_STATE_SHARED);
503 ccms_thread_lock(&chain->cst, CCMS_STATE_EXCLUSIVE);
504 if ((how & HAMMER2_RESOLVE_MAYDELETE) == 0 ||
505 chain->parent == NULL ||
506 chain->parent->movelock == 0) {
509 ccms_thread_unlock(&chain->cst);
510 hammer2_chain_movewait(hmp, chain);
514 * If we already have a valid data pointer no further action is
521 * Do we have to resolve the data?
523 switch(how & HAMMER2_RESOLVE_MASK) {
524 case HAMMER2_RESOLVE_NEVER:
526 case HAMMER2_RESOLVE_MAYBE:
527 if (chain->flags & HAMMER2_CHAIN_INITIAL)
529 if (chain->bref.type == HAMMER2_BREF_TYPE_DATA)
532 case HAMMER2_RESOLVE_ALWAYS:
537 * Upgrade to an exclusive lock so we can safely manipulate the
538 * buffer cache. If another thread got to it before us we
541 ostate = ccms_thread_lock_upgrade(&chain->cst);
543 ccms_thread_lock_restore(&chain->cst, ostate);
548 * We must resolve to a device buffer, either by issuing I/O or
549 * by creating a zero-fill element. We do not mark the buffer
550 * dirty when creating a zero-fill element (the hammer2_chain_modify()
551 * API must still be used to do that).
553 * The device buffer is variable-sized in powers of 2 down
554 * to HAMMER2_MINALLOCSIZE (typically 1K). A 64K physical storage
555 * chunk always contains buffers of the same size. (XXX)
557 * The minimum physical IO size may be larger than the variable
562 if ((bbytes = chain->bytes) < HAMMER2_MINIOSIZE)
563 bbytes = HAMMER2_MINIOSIZE;
564 pbase = bref->data_off & ~(hammer2_off_t)(bbytes - 1);
565 peof = (pbase + HAMMER2_PBUFSIZE64) & ~HAMMER2_PBUFMASK64;
566 boff = bref->data_off & HAMMER2_OFF_MASK & (bbytes - 1);
567 KKASSERT(pbase != 0);
570 * The getblk() optimization can only be used on newly created
571 * elements if the physical block size matches the request.
573 if ((chain->flags & HAMMER2_CHAIN_INITIAL) &&
574 chain->bytes == bbytes) {
575 chain->bp = getblk(hmp->devvp, pbase, bbytes, 0, 0);
577 } else if (hammer2_cluster_enable) {
578 error = cluster_read(hmp->devvp, peof, pbase, bbytes,
579 HAMMER2_PBUFSIZE, HAMMER2_PBUFSIZE,
582 error = bread(hmp->devvp, pbase, bbytes, &chain->bp);
586 kprintf("hammer2_chain_get: I/O error %016jx: %d\n",
587 (intmax_t)pbase, error);
590 ccms_thread_lock_restore(&chain->cst, ostate);
595 * Zero the data area if the chain is in the INITIAL-create state.
596 * Mark the buffer for bdwrite().
598 bdata = (char *)chain->bp->b_data + boff;
599 if (chain->flags & HAMMER2_CHAIN_INITIAL) {
600 bzero(bdata, chain->bytes);
601 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DIRTYBP);
605 * Setup the data pointer, either pointing it to an embedded data
606 * structure and copying the data from the buffer, or pointing it
609 * The buffer is not retained when copying to an embedded data
610 * structure in order to avoid potential deadlocks or recursions
611 * on the same physical buffer.
613 switch (bref->type) {
614 case HAMMER2_BREF_TYPE_VOLUME:
616 * Copy data from bp to embedded buffer
618 panic("hammer2_chain_lock: called on unresolved volume header");
621 KKASSERT(pbase == 0);
622 KKASSERT(chain->bytes == HAMMER2_PBUFSIZE);
623 bcopy(bdata, &hmp->voldata, chain->bytes);
624 chain->data = (void *)&hmp->voldata;
629 case HAMMER2_BREF_TYPE_INODE:
631 * Copy data from bp to embedded buffer, do not retain the
634 bcopy(bdata, &chain->u.ip->ip_data, chain->bytes);
635 chain->data = (void *)&chain->u.ip->ip_data;
639 case HAMMER2_BREF_TYPE_INDIRECT:
640 case HAMMER2_BREF_TYPE_DATA:
643 * Point data at the device buffer and leave bp intact.
645 chain->data = (void *)bdata;
650 * Make sure the bp is not specifically owned by this thread before
651 * restoring to a possibly shared lock, so another hammer2 thread
655 BUF_KERNPROC(chain->bp);
656 ccms_thread_lock_restore(&chain->cst, ostate);
661 * Similar to the normal chain_lock but handles HAMMER2_RESOLVE_MAYDELETE
662 * in a more intelligent fashion.
665 hammer2_chain_lock_pair(hammer2_mount_t *hmp, hammer2_chain_t *parent,
666 hammer2_chain_t *chain, int how)
670 error = hammer2_chain_lock(hmp, parent,
671 how & ~HAMMER2_RESOLVE_MAYDELETE);
673 error = hammer2_chain_lock(hmp, chain, how);
675 hammer2_chain_unlock(hmp, parent);
682 * Unlock and deref a chain element.
684 * On the last lock release any non-embedded data (chain->bp) will be
688 hammer2_chain_unlock(hammer2_mount_t *hmp, hammer2_chain_t *chain)
693 * Release the CST lock but with a special 1->0 transition case.
695 * Returns non-zero if lock references remain. When zero is
696 * returned the last lock reference is retained and any shared
697 * lock is upgraded to an exclusive lock for final disposition.
699 if (ccms_thread_unlock_zero(&chain->cst)) {
700 KKASSERT(chain->refs > 1);
701 atomic_add_int(&chain->refs, -1);
706 * Shortcut the case if the data is embedded or not resolved.
708 * Do NOT null-out pointers to embedded data (e.g. inode).
710 * The DIRTYBP flag is non-applicable in this situation and can
711 * be cleared to keep the flags state clean.
713 if (chain->bp == NULL) {
714 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_DIRTYBP);
715 ccms_thread_unlock(&chain->cst);
716 hammer2_chain_drop(hmp, chain);
723 if ((chain->flags & HAMMER2_CHAIN_DIRTYBP) == 0) {
725 } else if (chain->flags & HAMMER2_CHAIN_IOFLUSH) {
726 switch(chain->bref.type) {
727 case HAMMER2_BREF_TYPE_DATA:
728 counterp = &hammer2_ioa_file_write;
730 case HAMMER2_BREF_TYPE_INODE:
731 counterp = &hammer2_ioa_meta_write;
733 case HAMMER2_BREF_TYPE_INDIRECT:
734 counterp = &hammer2_ioa_indr_write;
737 counterp = &hammer2_ioa_volu_write;
742 switch(chain->bref.type) {
743 case HAMMER2_BREF_TYPE_DATA:
744 counterp = &hammer2_iod_file_write;
746 case HAMMER2_BREF_TYPE_INODE:
747 counterp = &hammer2_iod_meta_write;
749 case HAMMER2_BREF_TYPE_INDIRECT:
750 counterp = &hammer2_iod_indr_write;
753 counterp = &hammer2_iod_volu_write;
762 * If a device buffer was used for data be sure to destroy the
763 * buffer when we are done to avoid aliases (XXX what about the
764 * underlying VM pages?).
766 if (chain->bref.type == HAMMER2_BREF_TYPE_DATA)
767 chain->bp->b_flags |= B_RELBUF;
770 * The DIRTYBP flag tracks whether we have to bdwrite() the buffer
771 * or not. The flag will get re-set when chain_modify() is called,
772 * even if MODIFIED is already set, allowing the OS to retire the
773 * buffer independent of a hammer2 flus.
776 if (chain->flags & HAMMER2_CHAIN_DIRTYBP) {
777 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_DIRTYBP);
778 if (chain->flags & HAMMER2_CHAIN_IOFLUSH) {
779 atomic_clear_int(&chain->flags,
780 HAMMER2_CHAIN_IOFLUSH);
781 chain->bp->b_flags |= B_RELBUF;
782 cluster_awrite(chain->bp);
784 chain->bp->b_flags |= B_CLUSTEROK;
788 if (chain->flags & HAMMER2_CHAIN_IOFLUSH) {
789 atomic_clear_int(&chain->flags,
790 HAMMER2_CHAIN_IOFLUSH);
791 chain->bp->b_flags |= B_RELBUF;
794 /* bp might still be dirty */
799 ccms_thread_unlock(&chain->cst);
800 hammer2_chain_drop(hmp, chain);
804 * Called on a locked chain element. Allows the caller to temporarily
805 * unlock the element (caller must be sure to hold an extra ref on it
806 * to prevent destruction), thus allowing other accessors to lock it,
807 * but disallows deletions.
810 hammer2_chain_movelock(hammer2_mount_t *hmp, hammer2_chain_t *chain)
812 atomic_add_int(&chain->movelock, 1);
816 hammer2_chain_moveunlock(hammer2_mount_t *hmp, hammer2_chain_t *chain)
822 omovelock = chain->movelock;
824 nmovelock = (omovelock - 1) & 0x7FFFFFFF;
825 if (atomic_cmpset_int(&chain->movelock, omovelock, nmovelock)) {
826 if (omovelock & 0x80000000)
827 wakeup(&chain->movelock);
834 hammer2_chain_movewait(hammer2_mount_t *hmp, hammer2_chain_t *chain)
840 omovelock = chain->movelock;
844 nmovelock = omovelock | 0x80000000;
845 tsleep_interlock(&chain->movelock, 0);
846 if (atomic_cmpset_int(&chain->movelock, omovelock, nmovelock)) {
847 tsleep(&chain->movelock, PINTERLOCKED, "movelk", 0);
853 * Resize the chain's physical storage allocation. Chains can be resized
854 * smaller without reallocating the storage. Resizing larger will reallocate
857 * Must be passed a locked chain.
859 * If you want the resize code to copy the data to the new block then the
860 * caller should lock the chain RESOLVE_MAYBE or RESOLVE_ALWAYS.
862 * If the caller already holds a logical buffer containing the data and
863 * intends to bdwrite() that buffer resolve with RESOLVE_NEVER. The resize
864 * operation will then not copy the data.
866 * This function is mostly used with DATA blocks locked RESOLVE_NEVER in order
867 * to avoid instantiating a device buffer that conflicts with the vnode
870 * XXX flags currently ignored, uses chain->bp to detect data/no-data.
873 hammer2_chain_resize(hammer2_inode_t *ip, hammer2_chain_t *chain,
874 int nradix, int flags)
876 hammer2_mount_t *hmp = ip->hmp;
887 * Only data and indirect blocks can be resized for now
889 KKASSERT(chain != &hmp->vchain);
890 KKASSERT(chain->bref.type == HAMMER2_BREF_TYPE_DATA ||
891 chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT);
894 * Nothing to do if the element is already the proper size
896 obytes = chain->bytes;
897 nbytes = 1U << nradix;
898 if (obytes == nbytes)
902 * Set MODIFIED and add a chain ref to prevent destruction. Both
903 * modified flags share the same ref.
905 * If the chain is already marked MODIFIED then we can safely
906 * return the previous allocation to the pool without having to
907 * worry about snapshots.
909 if ((chain->flags & HAMMER2_CHAIN_MODIFIED) == 0) {
910 atomic_set_int(&chain->flags, HAMMER2_CHAIN_MODIFIED |
911 HAMMER2_CHAIN_MODIFY_TID);
912 hammer2_chain_ref(hmp, chain);
914 hammer2_freemap_free(hmp, chain->bref.data_off,
919 * Relocate the block, even if making it smaller (because different
920 * block sizes may be in different regions).
922 chain->bref.data_off = hammer2_freemap_alloc(hmp, chain->bref.type,
924 chain->bytes = nbytes;
925 ip->delta_dcount += (ssize_t)(nbytes - obytes); /* XXX atomic */
928 * The device buffer may be larger than the allocation size.
930 if ((bbytes = chain->bytes) < HAMMER2_MINIOSIZE)
931 bbytes = HAMMER2_MINIOSIZE;
932 pbase = chain->bref.data_off & ~(hammer2_off_t)(bbytes - 1);
933 boff = chain->bref.data_off & HAMMER2_OFF_MASK & (bbytes - 1);
936 * Only copy the data if resolved, otherwise the caller is
940 KKASSERT(chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT ||
941 chain->bref.type == HAMMER2_BREF_TYPE_DATA);
942 KKASSERT(chain != &hmp->vchain); /* safety */
945 * The getblk() optimization can only be used if the
946 * physical block size matches the request.
948 if (nbytes == bbytes) {
949 nbp = getblk(hmp->devvp, pbase, bbytes, 0, 0);
952 error = bread(hmp->devvp, pbase, bbytes, &nbp);
953 KKASSERT(error == 0);
955 bdata = (char *)nbp->b_data + boff;
957 if (nbytes < obytes) {
958 bcopy(chain->data, bdata, nbytes);
960 bcopy(chain->data, bdata, obytes);
961 bzero(bdata + obytes, nbytes - obytes);
965 * NOTE: The INITIAL state of the chain is left intact.
966 * We depend on hammer2_chain_modify() to do the
969 * NOTE: We set B_NOCACHE to throw away the previous bp and
970 * any VM backing store, even if it was dirty.
971 * Otherwise we run the risk of a logical/device
972 * conflict on reallocation.
974 chain->bp->b_flags |= B_RELBUF | B_NOCACHE;
977 chain->data = (void *)bdata;
978 hammer2_chain_modify(hmp, chain, 0);
982 * Make sure the chain is marked MOVED and SUBMOD is set in the
983 * parent(s) so the adjustments are picked up by flush.
985 if ((chain->flags & HAMMER2_CHAIN_MOVED) == 0) {
986 hammer2_chain_ref(hmp, chain);
987 atomic_set_int(&chain->flags, HAMMER2_CHAIN_MOVED);
989 hammer2_chain_parent_setsubmod(hmp, chain);
993 * Convert a locked chain that was retrieved read-only to read-write.
995 * If not already marked modified a new physical block will be allocated
996 * and assigned to the bref.
998 * Non-data blocks - The chain should be locked to at least the RESOLVE_MAYBE
999 * level or the COW operation will not work.
1001 * Data blocks - The chain is usually locked RESOLVE_NEVER so as not to
1002 * run the data through the device buffers.
1005 hammer2_chain_modify(hammer2_mount_t *hmp, hammer2_chain_t *chain, int flags)
1009 hammer2_off_t pbase;
1015 * Tells flush that modify_tid must be updated, otherwise only
1016 * mirror_tid is updated. This is the default.
1018 if ((flags & HAMMER2_MODIFY_NO_MODIFY_TID) == 0)
1019 atomic_set_int(&chain->flags, HAMMER2_CHAIN_MODIFY_TID);
1022 * If the chain is already marked MODIFIED we can just return.
1024 * However, it is possible that a prior lock/modify sequence
1025 * retired the buffer. During this lock/modify sequence MODIFIED
1026 * may still be set but the buffer could wind up clean. Since
1027 * the caller is going to modify the buffer further we have to
1028 * be sure that DIRTYBP is set again.
1030 if (chain->flags & HAMMER2_CHAIN_MODIFIED) {
1031 if ((flags & HAMMER2_MODIFY_OPTDATA) == 0 &&
1032 chain->bp == NULL) {
1035 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DIRTYBP);
1040 * Set MODIFIED and add a chain ref to prevent destruction. Both
1041 * modified flags share the same ref.
1043 atomic_set_int(&chain->flags, HAMMER2_CHAIN_MODIFIED);
1044 hammer2_chain_ref(hmp, chain);
1047 * We must allocate the copy-on-write block.
1049 * If the data is embedded no other action is required.
1051 * If the data is not embedded we acquire and clear the
1052 * new block. If chain->data is not NULL we then do the
1053 * copy-on-write. chain->data will then be repointed to the new
1054 * buffer and the old buffer will be released.
1056 * For newly created elements with no prior allocation we go
1057 * through the copy-on-write steps except without the copying part.
1059 if (chain != &hmp->vchain) {
1060 if ((hammer2_debug & 0x0001) &&
1061 (chain->bref.data_off & HAMMER2_OFF_MASK)) {
1062 kprintf("Replace %d\n", chain->bytes);
1064 chain->bref.data_off =
1065 hammer2_freemap_alloc(hmp, chain->bref.type,
1067 /* XXX failed allocation */
1071 * If data instantiation is optional and the chain has no current
1072 * data association (typical for DATA and newly-created INDIRECT
1073 * elements), don't instantiate the buffer now.
1075 if ((flags & HAMMER2_MODIFY_OPTDATA) && chain->bp == NULL)
1080 * Setting the DIRTYBP flag will cause the buffer to be dirtied or
1081 * written-out on unlock. This bit is independent of the MODIFIED
1082 * bit because the chain may still need meta-data adjustments done
1083 * by virtue of MODIFIED for its parent, and the buffer can be
1084 * flushed out (possibly multiple times) by the OS before that.
1086 * Clearing the INITIAL flag (for indirect blocks) indicates that
1087 * a zero-fill buffer has been instantiated.
1089 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DIRTYBP);
1090 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
1093 * We currently should never instantiate a device buffer for a
1096 KKASSERT(chain->bref.type != HAMMER2_BREF_TYPE_DATA);
1099 * Execute COW operation
1101 switch(chain->bref.type) {
1102 case HAMMER2_BREF_TYPE_VOLUME:
1103 case HAMMER2_BREF_TYPE_INODE:
1105 * The data is embedded, no copy-on-write operation is
1108 KKASSERT(chain->bp == NULL);
1110 case HAMMER2_BREF_TYPE_DATA:
1111 case HAMMER2_BREF_TYPE_INDIRECT:
1113 * Perform the copy-on-write operation
1115 KKASSERT(chain != &hmp->vchain); /* safety */
1117 * The device buffer may be larger than the allocation size.
1119 if ((bbytes = chain->bytes) < HAMMER2_MINIOSIZE)
1120 bbytes = HAMMER2_MINIOSIZE;
1121 pbase = chain->bref.data_off & ~(hammer2_off_t)(bbytes - 1);
1122 boff = chain->bref.data_off & HAMMER2_OFF_MASK & (bbytes - 1);
1125 * The getblk() optimization can only be used if the
1126 * physical block size matches the request.
1128 if (chain->bytes == bbytes) {
1129 nbp = getblk(hmp->devvp, pbase, bbytes, 0, 0);
1132 error = bread(hmp->devvp, pbase, bbytes, &nbp);
1133 KKASSERT(error == 0);
1135 bdata = (char *)nbp->b_data + boff;
1138 * Copy or zero-fill on write depending on whether
1139 * chain->data exists or not.
1142 bcopy(chain->data, bdata, chain->bytes);
1143 KKASSERT(chain->bp != NULL);
1145 bzero(bdata, chain->bytes);
1148 chain->bp->b_flags |= B_RELBUF;
1152 chain->data = bdata;
1155 panic("hammer2_chain_modify: illegal non-embedded type %d",
1161 if ((flags & HAMMER2_MODIFY_NOSUB) == 0)
1162 hammer2_chain_parent_setsubmod(hmp, chain);
1166 * Mark the volume as having been modified. This short-cut version
1167 * does not have to lock the volume's chain, which allows the ioctl
1168 * code to make adjustments to connections without deadlocking.
1171 hammer2_modify_volume(hammer2_mount_t *hmp)
1173 hammer2_voldata_lock(hmp);
1174 atomic_set_int(&hmp->vchain.flags, HAMMER2_CHAIN_MODIFIED_AUX);
1175 hammer2_voldata_unlock(hmp);
1179 * Locate an in-memory chain. The parent must be locked. The in-memory
1180 * chain is returned or NULL if no in-memory chain is present.
1182 * NOTE: A chain on-media might exist for this index when NULL is returned.
1185 hammer2_chain_find(hammer2_mount_t *hmp, hammer2_chain_t *parent, int index)
1187 hammer2_chain_t dummy;
1188 hammer2_chain_t *chain;
1190 dummy.index = index;
1191 chain = RB_FIND(hammer2_chain_tree, &parent->rbhead, &dummy);
1196 * Return a locked chain structure with all associated data acquired.
1198 * Caller must lock the parent on call, the returned child will be locked.
1201 hammer2_chain_get(hammer2_mount_t *hmp, hammer2_chain_t *parent,
1202 int index, int flags)
1204 hammer2_blockref_t *bref;
1205 hammer2_inode_t *ip;
1206 hammer2_chain_t *chain;
1207 hammer2_chain_t dummy;
1209 ccms_state_t ostate;
1212 * Figure out how to lock. MAYBE can be used to optimized
1213 * the initial-create state for indirect blocks.
1215 if (flags & (HAMMER2_LOOKUP_NODATA | HAMMER2_LOOKUP_NOLOCK))
1216 how = HAMMER2_RESOLVE_NEVER;
1218 how = HAMMER2_RESOLVE_MAYBE;
1219 if (flags & (HAMMER2_LOOKUP_SHARED | HAMMER2_LOOKUP_NOLOCK))
1220 how |= HAMMER2_RESOLVE_SHARED;
1221 if (flags & HAMMER2_LOOKUP_MAYDELETE)
1222 how |= HAMMER2_RESOLVE_MAYDELETE;
1225 * First see if we have a (possibly modified) chain element cached
1226 * for this (parent, index). Acquire the data if necessary.
1228 * If chain->data is non-NULL the chain should already be marked
1231 dummy.index = index;
1232 chain = RB_FIND(hammer2_chain_tree, &parent->rbhead, &dummy);
1234 if (flags & HAMMER2_LOOKUP_NOLOCK)
1235 hammer2_chain_ref(hmp, chain);
1237 hammer2_chain_lock(hmp, chain, how);
1242 * Upgrade our thread lock and handle any race that may have
1243 * occurred. Leave the lock upgraded for the rest of the get.
1244 * We have to do this because we will be modifying the chain
1247 ostate = ccms_thread_lock_upgrade(&parent->cst);
1248 chain = RB_FIND(hammer2_chain_tree, &parent->rbhead, &dummy);
1250 if (flags & HAMMER2_LOOKUP_NOLOCK)
1251 hammer2_chain_ref(hmp, chain);
1253 hammer2_chain_lock(hmp, chain, how);
1254 ccms_thread_lock_restore(&parent->cst, ostate);
1259 * The get function must always succeed, panic if there's no
1262 if (parent->flags & HAMMER2_CHAIN_INITIAL) {
1263 ccms_thread_lock_restore(&parent->cst, ostate);
1264 panic("hammer2_chain_get: Missing bref(1)");
1269 * Otherwise lookup the bref and issue I/O (switch on the parent)
1271 switch(parent->bref.type) {
1272 case HAMMER2_BREF_TYPE_INODE:
1273 KKASSERT(index >= 0 && index < HAMMER2_SET_COUNT);
1274 bref = &parent->data->ipdata.u.blockset.blockref[index];
1276 case HAMMER2_BREF_TYPE_INDIRECT:
1277 KKASSERT(parent->data != NULL);
1278 KKASSERT(index >= 0 &&
1279 index < parent->bytes / sizeof(hammer2_blockref_t));
1280 bref = &parent->data->npdata.blockref[index];
1282 case HAMMER2_BREF_TYPE_VOLUME:
1283 KKASSERT(index >= 0 && index < HAMMER2_SET_COUNT);
1284 bref = &hmp->voldata.sroot_blockset.blockref[index];
1288 panic("hammer2_chain_get: unrecognized blockref type: %d",
1291 if (bref->type == 0) {
1292 panic("hammer2_chain_get: Missing bref(2)");
1297 * Allocate a chain structure representing the existing media
1300 * The locking operation we do later will issue I/O to read it.
1302 chain = hammer2_chain_alloc(hmp, bref);
1305 * Link the chain into its parent. Caller is expected to hold an
1306 * exclusive lock on the parent.
1308 chain->parent = parent;
1309 chain->index = index;
1310 if (RB_INSERT(hammer2_chain_tree, &parent->rbhead, chain))
1311 panic("hammer2_chain_link: collision");
1312 KKASSERT(parent->refs > 0);
1313 atomic_add_int(&parent->refs, 1); /* for red-black entry */
1314 ccms_thread_lock_restore(&parent->cst, ostate);
1317 * Additional linkage for inodes. Reuse the parent pointer to
1318 * find the parent directory.
1320 * The ccms_inode is initialized from its parent directory. The
1321 * chain of ccms_inode's is seeded by the mount code.
1323 if (bref->type == HAMMER2_BREF_TYPE_INODE) {
1325 while (parent->bref.type == HAMMER2_BREF_TYPE_INDIRECT)
1326 parent = parent->parent;
1327 if (parent->bref.type == HAMMER2_BREF_TYPE_INODE) {
1328 ip->pip = parent->u.ip;
1329 ip->pmp = parent->u.ip->pmp;
1330 ccms_cst_init(&ip->topo_cst, &ip->chain);
1335 * Our new chain structure has already been referenced and locked
1336 * but the lock code handles the I/O so call it to resolve the data.
1337 * Then release one of our two exclusive locks.
1339 * If NOLOCK is set the release will release the one-and-only lock.
1341 if ((flags & HAMMER2_LOOKUP_NOLOCK) == 0) {
1342 hammer2_chain_lock(hmp, chain, how); /* recusive lock */
1343 hammer2_chain_drop(hmp, chain); /* excess ref */
1345 ccms_thread_unlock(&chain->cst); /* from alloc */
1351 * Locate any key between key_beg and key_end inclusive. (*parentp)
1352 * typically points to an inode but can also point to a related indirect
1353 * block and this function will recurse upwards and find the inode again.
1355 * WARNING! THIS DOES NOT RETURN KEYS IN LOGICAL KEY ORDER! ANY KEY
1356 * WITHIN THE RANGE CAN BE RETURNED. HOWEVER, AN ITERATION
1357 * WHICH PICKS UP WHERE WE LEFT OFF WILL CONTINUE THE SCAN.
1359 * (*parentp) must be exclusively locked and referenced and can be an inode
1360 * or an existing indirect block within the inode.
1362 * On return (*parentp) will be modified to point at the deepest parent chain
1363 * element encountered during the search, as a helper for an insertion or
1364 * deletion. The new (*parentp) will be locked and referenced and the old
1365 * will be unlocked and dereferenced (no change if they are both the same).
1367 * The matching chain will be returned exclusively locked and referenced.
1369 * NULL is returned if no match was found, but (*parentp) will still
1370 * potentially be adjusted.
1372 * This function will also recurse up the chain if the key is not within the
1373 * current parent's range. (*parentp) can never be set to NULL. An iteration
1374 * can simply allow (*parentp) to float inside the loop.
1377 hammer2_chain_lookup(hammer2_mount_t *hmp, hammer2_chain_t **parentp,
1378 hammer2_key_t key_beg, hammer2_key_t key_end,
1381 hammer2_chain_t *parent;
1382 hammer2_chain_t *chain;
1383 hammer2_chain_t *tmp;
1384 hammer2_blockref_t *base;
1385 hammer2_blockref_t *bref;
1386 hammer2_key_t scan_beg;
1387 hammer2_key_t scan_end;
1390 int how_always = HAMMER2_RESOLVE_ALWAYS;
1391 int how_maybe = HAMMER2_RESOLVE_MAYBE;
1393 if (flags & (HAMMER2_LOOKUP_SHARED | HAMMER2_LOOKUP_NOLOCK)) {
1394 how_maybe |= HAMMER2_RESOLVE_SHARED;
1395 how_always |= HAMMER2_RESOLVE_SHARED;
1397 if (flags & HAMMER2_LOOKUP_MAYDELETE) {
1398 how_maybe |= HAMMER2_RESOLVE_MAYDELETE;
1399 how_always |= HAMMER2_RESOLVE_MAYDELETE;
1403 * Recurse (*parentp) upward if necessary until the parent completely
1404 * encloses the key range or we hit the inode.
1407 while (parent->bref.type == HAMMER2_BREF_TYPE_INDIRECT) {
1408 scan_beg = parent->bref.key;
1409 scan_end = scan_beg +
1410 ((hammer2_key_t)1 << parent->bref.keybits) - 1;
1411 if (key_beg >= scan_beg && key_end <= scan_end)
1413 hammer2_chain_ref(hmp, parent); /* ref old parent */
1414 hammer2_chain_unlock(hmp, parent); /* unlock old parent */
1415 parent = parent->parent;
1416 /* lock new parent */
1417 hammer2_chain_lock(hmp, parent, how_maybe);
1418 hammer2_chain_drop(hmp, *parentp); /* drop old parent */
1419 *parentp = parent; /* new parent */
1424 * Locate the blockref array. Currently we do a fully associative
1425 * search through the array.
1427 switch(parent->bref.type) {
1428 case HAMMER2_BREF_TYPE_INODE:
1430 * Special shortcut for embedded data returns the inode
1431 * itself. Callers must detect this condition and access
1432 * the embedded data (the strategy code does this for us).
1434 * This is only applicable to regular files and softlinks.
1436 if (parent->data->ipdata.op_flags & HAMMER2_OPFLAG_DIRECTDATA) {
1437 if (flags & HAMMER2_LOOKUP_NOLOCK)
1438 hammer2_chain_ref(hmp, parent);
1440 hammer2_chain_lock(hmp, parent, how_always);
1443 base = &parent->data->ipdata.u.blockset.blockref[0];
1444 count = HAMMER2_SET_COUNT;
1446 case HAMMER2_BREF_TYPE_INDIRECT:
1448 * Optimize indirect blocks in the INITIAL state to avoid
1451 if (parent->flags & HAMMER2_CHAIN_INITIAL) {
1454 if (parent->data == NULL)
1455 panic("parent->data is NULL");
1456 base = &parent->data->npdata.blockref[0];
1458 count = parent->bytes / sizeof(hammer2_blockref_t);
1460 case HAMMER2_BREF_TYPE_VOLUME:
1461 base = &hmp->voldata.sroot_blockset.blockref[0];
1462 count = HAMMER2_SET_COUNT;
1465 panic("hammer2_chain_lookup: unrecognized blockref type: %d",
1467 base = NULL; /* safety */
1468 count = 0; /* safety */
1472 * If the element and key overlap we use the element.
1475 for (i = 0; i < count; ++i) {
1476 tmp = hammer2_chain_find(hmp, parent, i);
1479 KKASSERT(bref->type != 0);
1480 } else if (base == NULL || base[i].type == 0) {
1485 scan_beg = bref->key;
1486 scan_end = scan_beg + ((hammer2_key_t)1 << bref->keybits) - 1;
1487 if (key_beg <= scan_end && key_end >= scan_beg)
1491 if (key_beg == key_end)
1493 return (hammer2_chain_next(hmp, parentp, NULL,
1494 key_beg, key_end, flags));
1498 * Acquire the new chain element. If the chain element is an
1499 * indirect block we must search recursively.
1501 chain = hammer2_chain_get(hmp, parent, i, flags);
1506 * If the chain element is an indirect block it becomes the new
1507 * parent and we loop on it.
1509 * The parent always has to be locked with at least RESOLVE_MAYBE,
1510 * so it might need a fixup if the caller passed incompatible flags.
1512 if (chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT) {
1513 hammer2_chain_unlock(hmp, parent);
1514 *parentp = parent = chain;
1515 if (flags & HAMMER2_LOOKUP_NOLOCK) {
1516 hammer2_chain_lock(hmp, chain, how_maybe);
1517 hammer2_chain_drop(hmp, chain); /* excess ref */
1518 } else if (flags & HAMMER2_LOOKUP_NODATA) {
1519 hammer2_chain_lock(hmp, chain, how_maybe);
1520 hammer2_chain_unlock(hmp, chain);
1526 * All done, return chain
1532 * After having issued a lookup we can iterate all matching keys.
1534 * If chain is non-NULL we continue the iteration from just after it's index.
1536 * If chain is NULL we assume the parent was exhausted and continue the
1537 * iteration at the next parent.
1539 * parent must be locked on entry and remains locked throughout. chain's
1540 * lock status must match flags.
1543 hammer2_chain_next(hammer2_mount_t *hmp, hammer2_chain_t **parentp,
1544 hammer2_chain_t *chain,
1545 hammer2_key_t key_beg, hammer2_key_t key_end,
1548 hammer2_chain_t *parent;
1549 hammer2_chain_t *tmp;
1550 hammer2_blockref_t *base;
1551 hammer2_blockref_t *bref;
1552 hammer2_key_t scan_beg;
1553 hammer2_key_t scan_end;
1555 int how_maybe = HAMMER2_RESOLVE_MAYBE;
1558 if (flags & (HAMMER2_LOOKUP_SHARED | HAMMER2_LOOKUP_NOLOCK))
1559 how_maybe |= HAMMER2_RESOLVE_SHARED;
1560 if (flags & HAMMER2_LOOKUP_MAYDELETE)
1561 how_maybe |= HAMMER2_RESOLVE_MAYDELETE;
1567 * Calculate the next index and recalculate the parent if necessary.
1571 * Continue iteration within current parent. If not NULL
1572 * the passed-in chain may or may not be locked, based on
1573 * the LOOKUP_NOLOCK flag (passed in as returned from lookup
1576 i = chain->index + 1;
1577 if (flags & HAMMER2_LOOKUP_NOLOCK)
1578 hammer2_chain_drop(hmp, chain);
1580 hammer2_chain_unlock(hmp, chain);
1583 * Any scan where the lookup returned degenerate data embedded
1584 * in the inode has an invalid index and must terminate.
1586 if (chain == parent)
1589 } else if (parent->bref.type != HAMMER2_BREF_TYPE_INDIRECT) {
1591 * We reached the end of the iteration.
1596 * Continue iteration with next parent unless the current
1597 * parent covers the range.
1599 hammer2_chain_t *nparent;
1601 scan_beg = parent->bref.key;
1602 scan_end = scan_beg +
1603 ((hammer2_key_t)1 << parent->bref.keybits) - 1;
1604 if (key_beg >= scan_beg && key_end <= scan_end)
1607 i = parent->index + 1;
1608 nparent = parent->parent;
1609 hammer2_chain_ref(hmp, nparent); /* ref new parent */
1610 hammer2_chain_unlock(hmp, parent); /* unlock old parent */
1611 /* lock new parent */
1612 hammer2_chain_lock(hmp, nparent, how_maybe);
1613 hammer2_chain_drop(hmp, nparent); /* drop excess ref */
1614 *parentp = parent = nparent;
1619 * Locate the blockref array. Currently we do a fully associative
1620 * search through the array.
1622 switch(parent->bref.type) {
1623 case HAMMER2_BREF_TYPE_INODE:
1624 base = &parent->data->ipdata.u.blockset.blockref[0];
1625 count = HAMMER2_SET_COUNT;
1627 case HAMMER2_BREF_TYPE_INDIRECT:
1628 if (parent->flags & HAMMER2_CHAIN_INITIAL) {
1631 KKASSERT(parent->data != NULL);
1632 base = &parent->data->npdata.blockref[0];
1634 count = parent->bytes / sizeof(hammer2_blockref_t);
1636 case HAMMER2_BREF_TYPE_VOLUME:
1637 base = &hmp->voldata.sroot_blockset.blockref[0];
1638 count = HAMMER2_SET_COUNT;
1641 panic("hammer2_chain_next: unrecognized blockref type: %d",
1643 base = NULL; /* safety */
1644 count = 0; /* safety */
1647 KKASSERT(i <= count);
1650 * Look for the key. If we are unable to find a match and an exact
1651 * match was requested we return NULL. If a range was requested we
1652 * run hammer2_chain_next() to iterate.
1656 tmp = hammer2_chain_find(hmp, parent, i);
1659 } else if (base == NULL || base[i].type == 0) {
1665 scan_beg = bref->key;
1666 scan_end = scan_beg + ((hammer2_key_t)1 << bref->keybits) - 1;
1667 if (key_beg <= scan_end && key_end >= scan_beg)
1673 * If we couldn't find a match recurse up a parent to continue the
1680 * Acquire the new chain element. If the chain element is an
1681 * indirect block we must search recursively.
1683 chain = hammer2_chain_get(hmp, parent, i, flags);
1688 * If the chain element is an indirect block it becomes the new
1689 * parent and we loop on it.
1691 * The parent always has to be locked with at least RESOLVE_MAYBE,
1692 * so it might need a fixup if the caller passed incompatible flags.
1694 if (chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT) {
1695 hammer2_chain_unlock(hmp, parent);
1696 *parentp = parent = chain;
1698 if (flags & HAMMER2_LOOKUP_NOLOCK) {
1699 hammer2_chain_lock(hmp, parent, how_maybe);
1700 hammer2_chain_drop(hmp, parent); /* excess ref */
1701 } else if (flags & HAMMER2_LOOKUP_NODATA) {
1702 hammer2_chain_lock(hmp, parent, how_maybe);
1703 hammer2_chain_unlock(hmp, parent);
1710 * All done, return chain
1716 * Create and return a new hammer2 system memory structure of the specified
1717 * key, type and size and insert it RELATIVE TO (PARENT).
1719 * (parent) is typically either an inode or an indirect block, acquired
1720 * acquired as a side effect of issuing a prior failed lookup. parent
1721 * must be locked and held. Do not pass the inode chain to this function
1722 * unless that is the chain returned by the failed lookup.
1724 * Non-indirect types will automatically allocate indirect blocks as required
1725 * if the new item does not fit in the current (parent).
1727 * Indirect types will move a portion of the existing blockref array in
1728 * (parent) into the new indirect type and then use one of the free slots
1729 * to emplace the new indirect type.
1731 * A new locked, referenced chain element is returned of the specified type.
1732 * The element may or may not have a data area associated with it:
1734 * VOLUME not allowed here
1735 * INODE embedded data are will be set-up
1736 * INDIRECT not allowed here
1737 * DATA no data area will be set-up (caller is expected
1738 * to have logical buffers, we don't want to alias
1739 * the data onto device buffers!).
1741 * Requires an exclusively locked parent.
1744 hammer2_chain_create(hammer2_mount_t *hmp, hammer2_chain_t *parent,
1745 hammer2_chain_t *chain,
1746 hammer2_key_t key, int keybits, int type, size_t bytes)
1748 hammer2_blockref_t dummy;
1749 hammer2_blockref_t *base;
1750 hammer2_chain_t dummy_chain;
1751 int unlock_parent = 0;
1756 KKASSERT(ccms_thread_lock_owned(&parent->cst));
1758 if (chain == NULL) {
1760 * First allocate media space and construct the dummy bref,
1761 * then allocate the in-memory chain structure.
1763 bzero(&dummy, sizeof(dummy));
1766 dummy.keybits = keybits;
1767 dummy.data_off = hammer2_bytes_to_radix(bytes);
1768 chain = hammer2_chain_alloc(hmp, &dummy);
1772 * We do NOT set INITIAL here (yet). INITIAL is only
1773 * used for indirect blocks.
1775 * Recalculate bytes to reflect the actual media block
1778 bytes = (hammer2_off_t)1 <<
1779 (int)(chain->bref.data_off & HAMMER2_OFF_MASK_RADIX);
1780 chain->bytes = bytes;
1783 case HAMMER2_BREF_TYPE_VOLUME:
1784 panic("hammer2_chain_create: called with volume type");
1786 case HAMMER2_BREF_TYPE_INODE:
1787 KKASSERT(bytes == HAMMER2_INODE_BYTES);
1788 chain->data = (void *)&chain->u.ip->ip_data;
1790 case HAMMER2_BREF_TYPE_INDIRECT:
1791 panic("hammer2_chain_create: cannot be used to"
1792 "create indirect block");
1794 case HAMMER2_BREF_TYPE_DATA:
1796 /* leave chain->data NULL */
1797 KKASSERT(chain->data == NULL);
1802 * Potentially update the chain's key/keybits.
1804 chain->bref.key = key;
1805 chain->bref.keybits = keybits;
1810 * Locate a free blockref in the parent's array
1812 switch(parent->bref.type) {
1813 case HAMMER2_BREF_TYPE_INODE:
1814 KKASSERT((parent->u.ip->ip_data.op_flags &
1815 HAMMER2_OPFLAG_DIRECTDATA) == 0);
1816 KKASSERT(parent->data != NULL);
1817 base = &parent->data->ipdata.u.blockset.blockref[0];
1818 count = HAMMER2_SET_COUNT;
1820 case HAMMER2_BREF_TYPE_INDIRECT:
1821 if (parent->flags & HAMMER2_CHAIN_INITIAL) {
1824 KKASSERT(parent->data != NULL);
1825 base = &parent->data->npdata.blockref[0];
1827 count = parent->bytes / sizeof(hammer2_blockref_t);
1829 case HAMMER2_BREF_TYPE_VOLUME:
1830 KKASSERT(parent->data != NULL);
1831 base = &hmp->voldata.sroot_blockset.blockref[0];
1832 count = HAMMER2_SET_COUNT;
1835 panic("hammer2_chain_create: unrecognized blockref type: %d",
1842 * Scan for an unallocated bref, also skipping any slots occupied
1843 * by in-memory chain elements that may not yet have been updated
1844 * in the parent's bref array.
1846 bzero(&dummy_chain, sizeof(dummy_chain));
1847 for (i = 0; i < count; ++i) {
1849 dummy_chain.index = i;
1850 if (RB_FIND(hammer2_chain_tree,
1851 &parent->rbhead, &dummy_chain) == NULL) {
1854 } else if (base[i].type == 0) {
1855 dummy_chain.index = i;
1856 if (RB_FIND(hammer2_chain_tree,
1857 &parent->rbhead, &dummy_chain) == NULL) {
1864 * If no free blockref could be found we must create an indirect
1865 * block and move a number of blockrefs into it. With the parent
1866 * locked we can safely lock each child in order to move it without
1867 * causing a deadlock.
1869 * This may return the new indirect block or the old parent depending
1870 * on where the key falls.
1873 hammer2_chain_t *nparent;
1875 nparent = hammer2_chain_create_indirect(hmp, parent,
1877 if (nparent == NULL) {
1879 hammer2_chain_free(hmp, chain);
1883 if (parent != nparent) {
1885 hammer2_chain_unlock(hmp, parent);
1893 * Link the chain into its parent. Later on we will have to set
1894 * the MOVED bit in situations where we don't mark the new chain
1895 * as being modified.
1897 if (chain->parent != NULL)
1898 panic("hammer2: hammer2_chain_create: chain already connected");
1899 KKASSERT(chain->parent == NULL);
1900 chain->parent = parent;
1902 if (RB_INSERT(hammer2_chain_tree, &parent->rbhead, chain))
1903 panic("hammer2_chain_link: collision");
1904 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_DELETED);
1905 KKASSERT(parent->refs > 0);
1906 atomic_add_int(&parent->refs, 1);
1909 * Additional linkage for inodes. Reuse the parent pointer to
1910 * find the parent directory.
1912 * Cumulative adjustments are inherited on [re]attach and will
1913 * propagate up the tree on the next flush.
1915 * The ccms_inode is initialized from its parent directory. The
1916 * chain of ccms_inode's is seeded by the mount code.
1918 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
1919 hammer2_chain_t *scan = parent;
1920 hammer2_inode_t *ip = chain->u.ip;
1922 while (scan->bref.type == HAMMER2_BREF_TYPE_INDIRECT)
1923 scan = scan->parent;
1924 if (scan->bref.type == HAMMER2_BREF_TYPE_INODE) {
1925 ip->pip = scan->u.ip;
1926 ip->pmp = scan->u.ip->pmp;
1927 ip->pip->delta_icount += ip->ip_data.inode_count;
1928 ip->pip->delta_dcount += ip->ip_data.data_count;
1929 ++ip->pip->delta_icount;
1930 ccms_cst_init(&ip->topo_cst, &ip->chain);
1935 * (allocated) indicates that this is a newly-created chain element
1936 * rather than a renamed chain element. In this situation we want
1937 * to place the chain element in the MODIFIED state.
1939 * The data area will be set up as follows:
1941 * VOLUME not allowed here.
1943 * INODE embedded data are will be set-up.
1945 * INDIRECT not allowed here.
1947 * DATA no data area will be set-up (caller is expected
1948 * to have logical buffers, we don't want to alias
1949 * the data onto device buffers!).
1952 if (chain->bref.type == HAMMER2_BREF_TYPE_DATA) {
1953 hammer2_chain_modify(hmp, chain,
1954 HAMMER2_MODIFY_OPTDATA);
1955 } else if (chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT) {
1956 /* not supported in this function */
1957 panic("hammer2_chain_create: bad type");
1958 atomic_set_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
1959 hammer2_chain_modify(hmp, chain,
1960 HAMMER2_MODIFY_OPTDATA);
1962 hammer2_chain_modify(hmp, chain, 0);
1966 * When reconnecting inodes we have to call setsubmod()
1967 * to ensure that its state propagates up the newly
1970 * Make sure MOVED is set but do not update bref_flush. If
1971 * the chain is undergoing modification bref_flush will be
1972 * updated when it gets flushed. If it is not then the
1973 * bref may not have been flushed yet and we do not want to
1974 * set MODIFIED here as this could result in unnecessary
1977 if ((chain->flags & HAMMER2_CHAIN_MOVED) == 0) {
1978 hammer2_chain_ref(hmp, chain);
1979 atomic_set_int(&chain->flags, HAMMER2_CHAIN_MOVED);
1981 hammer2_chain_parent_setsubmod(hmp, chain);
1986 hammer2_chain_unlock(hmp, parent);
1991 * Create an indirect block that covers one or more of the elements in the
1992 * current parent. Either returns the existing parent with no locking or
1993 * ref changes or returns the new indirect block locked and referenced
1994 * and leaving the original parent lock/ref intact as well.
1996 * The returned chain depends on where the specified key falls.
1998 * The key/keybits for the indirect mode only needs to follow three rules:
2000 * (1) That all elements underneath it fit within its key space and
2002 * (2) That all elements outside it are outside its key space.
2004 * (3) When creating the new indirect block any elements in the current
2005 * parent that fit within the new indirect block's keyspace must be
2006 * moved into the new indirect block.
2008 * (4) The keyspace chosen for the inserted indirect block CAN cover a wider
2009 * keyspace the the current parent, but lookup/iteration rules will
2010 * ensure (and must ensure) that rule (2) for all parents leading up
2011 * to the nearest inode or the root volume header is adhered to. This
2012 * is accomplished by always recursing through matching keyspaces in
2013 * the hammer2_chain_lookup() and hammer2_chain_next() API.
2015 * The current implementation calculates the current worst-case keyspace by
2016 * iterating the current parent and then divides it into two halves, choosing
2017 * whichever half has the most elements (not necessarily the half containing
2018 * the requested key).
2020 * We can also opt to use the half with the least number of elements. This
2021 * causes lower-numbered keys (aka logical file offsets) to recurse through
2022 * fewer indirect blocks and higher-numbered keys to recurse through more.
2023 * This also has the risk of not moving enough elements to the new indirect
2024 * block and being forced to create several indirect blocks before the element
2027 * Must be called with an exclusively locked parent
2031 hammer2_chain_create_indirect(hammer2_mount_t *hmp, hammer2_chain_t *parent,
2032 hammer2_key_t create_key, int create_bits)
2034 hammer2_blockref_t *base;
2035 hammer2_blockref_t *bref;
2036 hammer2_chain_t *chain;
2037 hammer2_chain_t *ichain;
2038 hammer2_chain_t dummy;
2039 hammer2_key_t key = create_key;
2040 int keybits = create_bits;
2048 * Calculate the base blockref pointer or NULL if the chain
2049 * is known to be empty. We need to calculate the array count
2050 * for RB lookups either way.
2052 KKASSERT(ccms_thread_lock_owned(&parent->cst));
2054 hammer2_chain_modify(hmp, parent, HAMMER2_MODIFY_OPTDATA);
2055 if (parent->flags & HAMMER2_CHAIN_INITIAL) {
2058 switch(parent->bref.type) {
2059 case HAMMER2_BREF_TYPE_INODE:
2060 count = HAMMER2_SET_COUNT;
2062 case HAMMER2_BREF_TYPE_INDIRECT:
2063 count = parent->bytes / sizeof(hammer2_blockref_t);
2065 case HAMMER2_BREF_TYPE_VOLUME:
2066 count = HAMMER2_SET_COUNT;
2069 panic("hammer2_chain_create_indirect: "
2070 "unrecognized blockref type: %d",
2076 switch(parent->bref.type) {
2077 case HAMMER2_BREF_TYPE_INODE:
2078 base = &parent->data->ipdata.u.blockset.blockref[0];
2079 count = HAMMER2_SET_COUNT;
2081 case HAMMER2_BREF_TYPE_INDIRECT:
2082 base = &parent->data->npdata.blockref[0];
2083 count = parent->bytes / sizeof(hammer2_blockref_t);
2085 case HAMMER2_BREF_TYPE_VOLUME:
2086 base = &hmp->voldata.sroot_blockset.blockref[0];
2087 count = HAMMER2_SET_COUNT;
2090 panic("hammer2_chain_create_indirect: "
2091 "unrecognized blockref type: %d",
2099 * Scan for an unallocated bref, also skipping any slots occupied
2100 * by in-memory chain elements which may not yet have been updated
2101 * in the parent's bref array.
2103 bzero(&dummy, sizeof(dummy));
2104 for (i = 0; i < count; ++i) {
2108 chain = RB_FIND(hammer2_chain_tree, &parent->rbhead, &dummy);
2110 bref = &chain->bref;
2111 } else if (base && base[i].type) {
2118 * Expand our calculated key range (key, keybits) to fit
2119 * the scanned key. nkeybits represents the full range
2120 * that we will later cut in half (two halves @ nkeybits - 1).
2123 if (nkeybits < bref->keybits)
2124 nkeybits = bref->keybits;
2125 while (nkeybits < 64 &&
2126 (~(((hammer2_key_t)1 << nkeybits) - 1) &
2127 (key ^ bref->key)) != 0) {
2132 * If the new key range is larger we have to determine
2133 * which side of the new key range the existing keys fall
2134 * under by checking the high bit, then collapsing the
2135 * locount into the hicount or vise-versa.
2137 if (keybits != nkeybits) {
2138 if (((hammer2_key_t)1 << (nkeybits - 1)) & key) {
2149 * The newly scanned key will be in the lower half or the
2150 * higher half of the (new) key range.
2152 if (((hammer2_key_t)1 << (nkeybits - 1)) & bref->key)
2159 * Adjust keybits to represent half of the full range calculated
2160 * above (radix 63 max)
2165 * Select whichever half contains the most elements. Theoretically
2166 * we can select either side as long as it contains at least one
2167 * element (in order to ensure that a free slot is present to hold
2168 * the indirect block).
2170 key &= ~(((hammer2_key_t)1 << keybits) - 1);
2171 if (hammer2_indirect_optimize) {
2173 * Insert node for least number of keys, this will arrange
2174 * the first few blocks of a large file or the first few
2175 * inodes in a directory with fewer indirect blocks when
2178 if (hicount < locount && hicount != 0)
2179 key |= (hammer2_key_t)1 << keybits;
2181 key &= ~(hammer2_key_t)1 << keybits;
2184 * Insert node for most number of keys, best for heavily
2187 if (hicount > locount)
2188 key |= (hammer2_key_t)1 << keybits;
2190 key &= ~(hammer2_key_t)1 << keybits;
2194 * How big should our new indirect block be? It has to be at least
2195 * as large as its parent.
2197 if (parent->bref.type == HAMMER2_BREF_TYPE_INODE)
2198 nbytes = HAMMER2_IND_BYTES_MIN;
2200 nbytes = HAMMER2_IND_BYTES_MAX;
2201 if (nbytes < count * sizeof(hammer2_blockref_t))
2202 nbytes = count * sizeof(hammer2_blockref_t);
2205 * Ok, create our new indirect block
2207 dummy.bref.type = HAMMER2_BREF_TYPE_INDIRECT;
2208 dummy.bref.key = key;
2209 dummy.bref.keybits = keybits;
2210 dummy.bref.data_off = hammer2_bytes_to_radix(nbytes);
2211 ichain = hammer2_chain_alloc(hmp, &dummy.bref);
2212 atomic_set_int(&ichain->flags, HAMMER2_CHAIN_INITIAL);
2215 * Iterate the original parent and move the matching brefs into
2216 * the new indirect block.
2218 for (i = 0; i < count; ++i) {
2220 * For keying purposes access the bref from the media or
2221 * from our in-memory cache. In cases where the in-memory
2222 * cache overrides the media the keyrefs will be the same
2223 * anyway so we can avoid checking the cache when the media
2227 chain = RB_FIND(hammer2_chain_tree, &parent->rbhead, &dummy);
2229 bref = &chain->bref;
2230 } else if (base && base[i].type) {
2233 if (ichain->index < 0)
2239 * Skip keys not in the chosen half (low or high), only bit
2240 * (keybits - 1) needs to be compared but for safety we
2241 * will compare all msb bits plus that bit again.
2243 if ((~(((hammer2_key_t)1 << keybits) - 1) &
2244 (key ^ bref->key)) != 0) {
2249 * This element is being moved from the parent, its slot
2250 * is available for our new indirect block.
2252 if (ichain->index < 0)
2256 * Load the new indirect block by acquiring or allocating
2257 * the related chain entries, then simply move them to the
2258 * new parent (ichain).
2260 * When adjusting the parent/child relationship we must
2261 * set the MOVED bit but we do NOT update bref_flush
2262 * because otherwise we might synchronize a bref that has
2263 * not yet been flushed. We depend on chain's bref_flush
2264 * either being correct or the chain being in a MODIFIED
2267 * We do not want to set MODIFIED here as this would result
2268 * in unnecessary reallocations.
2270 * We must still set SUBMODIFIED in the parent but we do
2271 * that after the loop.
2273 * WARNING! chain->cst.spin must be held when chain->parent is
2274 * modified, even though we own the full blown lock,
2275 * to deal with setsubmod and rename races.
2277 * XXX we really need a lock here but we don't need the
2278 * data. NODATA feature needed.
2280 chain = hammer2_chain_get(hmp, parent, i,
2281 HAMMER2_LOOKUP_NODATA);
2282 spin_lock(&chain->cst.spin);
2283 RB_REMOVE(hammer2_chain_tree, &parent->rbhead, chain);
2284 if (RB_INSERT(hammer2_chain_tree, &ichain->rbhead, chain))
2285 panic("hammer2_chain_create_indirect: collision");
2286 chain->parent = ichain;
2287 spin_unlock(&chain->cst.spin);
2289 bzero(&base[i], sizeof(base[i]));
2290 atomic_add_int(&parent->refs, -1);
2291 atomic_add_int(&ichain->refs, 1);
2292 if ((chain->flags & HAMMER2_CHAIN_MOVED) == 0) {
2293 hammer2_chain_ref(hmp, chain);
2294 atomic_set_int(&chain->flags, HAMMER2_CHAIN_MOVED);
2296 hammer2_chain_unlock(hmp, chain);
2297 KKASSERT(parent->refs > 0);
2302 * Insert the new indirect block into the parent now that we've
2303 * cleared out some entries in the parent. We calculated a good
2304 * insertion index in the loop above (ichain->index).
2306 * We don't have to set MOVED here because we mark ichain modified
2307 * down below (so the normal modified -> flush -> set-moved sequence
2310 KKASSERT(ichain->index >= 0);
2311 if (RB_INSERT(hammer2_chain_tree, &parent->rbhead, ichain))
2312 panic("hammer2_chain_create_indirect: ichain insertion");
2313 ichain->parent = parent;
2314 atomic_add_int(&parent->refs, 1);
2317 * Mark the new indirect block modified after insertion, which
2318 * will propagate up through parent all the way to the root and
2319 * also allocate the physical block in ichain for our caller,
2320 * and assign ichain->data to a pre-zero'd space (because there
2321 * is not prior data to copy into it).
2323 * We have to set SUBMODIFIED in ichain's flags manually so the
2324 * flusher knows it has to recurse through it to get to all of
2325 * our moved blocks, then call setsubmod() to set the bit
2328 hammer2_chain_modify(hmp, ichain, HAMMER2_MODIFY_OPTDATA);
2329 hammer2_chain_parent_setsubmod(hmp, ichain);
2330 atomic_set_int(&ichain->flags, HAMMER2_CHAIN_SUBMODIFIED);
2333 * Figure out what to return.
2335 if (create_bits > keybits) {
2337 * Key being created is way outside the key range,
2338 * return the original parent.
2340 hammer2_chain_unlock(hmp, ichain);
2341 } else if (~(((hammer2_key_t)1 << keybits) - 1) &
2342 (create_key ^ key)) {
2344 * Key being created is outside the key range,
2345 * return the original parent.
2347 hammer2_chain_unlock(hmp, ichain);
2350 * Otherwise its in the range, return the new parent.
2351 * (leave both the new and old parent locked).
2360 * Physically delete the specified chain element. Note that inodes with
2361 * open descriptors should not be deleted (as with other filesystems) until
2362 * the last open descriptor is closed.
2364 * This routine will remove the chain element from its parent and potentially
2365 * also recurse upward and delete indirect blocks which become empty as a
2368 * The caller must pass a pointer to the chain's parent, also locked and
2369 * referenced. (*parentp) will be modified in a manner similar to a lookup
2370 * or iteration when indirect blocks are also deleted as a side effect.
2372 * The caller must ensure that the chain is locked with the MAYDELETE
2373 * flag to interlock chain->movelock.
2375 * XXX This currently does not adhere to the MOVED flag protocol in that
2376 * the removal is immediately indicated in the parent's blockref[]
2379 * Must be called with an exclusively locked parent and chain.
2382 hammer2_chain_delete(hammer2_mount_t *hmp, hammer2_chain_t *parent,
2383 hammer2_chain_t *chain, int retain)
2385 hammer2_blockref_t *base;
2386 hammer2_inode_t *ip;
2390 * NOTE: Caller is responsible for using MAYDELETE flags when
2391 * acquiring chain elements that it desires to delete.
2392 * This flag should interlock the movelock flag. Parent
2393 * chain must remain locked (the flusher can set movelock
2394 * on children otherwise).
2396 if (chain->parent != parent)
2397 panic("hammer2_chain_delete: parent mismatch");
2398 KKASSERT(ccms_thread_lock_owned(&parent->cst));
2399 KKASSERT(chain->movelock == 0);
2402 * Mark the parent modified so our base[] pointer remains valid
2403 * while we move entries. For the optimized indirect block
2404 * case mark the parent moved instead.
2406 * Calculate the blockref reference in the parent
2408 switch(parent->bref.type) {
2409 case HAMMER2_BREF_TYPE_INODE:
2410 hammer2_chain_modify(hmp, parent, HAMMER2_MODIFY_NO_MODIFY_TID);
2411 base = &parent->data->ipdata.u.blockset.blockref[0];
2412 count = HAMMER2_SET_COUNT;
2414 case HAMMER2_BREF_TYPE_INDIRECT:
2415 hammer2_chain_modify(hmp, parent, HAMMER2_MODIFY_OPTDATA |
2416 HAMMER2_MODIFY_NO_MODIFY_TID);
2417 if (parent->flags & HAMMER2_CHAIN_INITIAL)
2420 base = &parent->data->npdata.blockref[0];
2421 count = parent->bytes / sizeof(hammer2_blockref_t);
2423 case HAMMER2_BREF_TYPE_VOLUME:
2424 hammer2_chain_modify(hmp, parent, HAMMER2_MODIFY_NO_MODIFY_TID);
2425 base = &hmp->voldata.sroot_blockset.blockref[0];
2426 count = HAMMER2_SET_COUNT;
2429 panic("hammer2_chain_delete: unrecognized blockref type: %d",
2436 * Disconnect the bref in the parent, remove the chain, and
2437 * disconnect in-memory fields from the parent.
2439 * WARNING! chain->cst.spin must be held when chain->parent is
2440 * modified, even though we own the full blown lock,
2441 * to deal with setsubmod and rename races.
2443 KKASSERT(chain->index >= 0 && chain->index < count);
2445 bzero(&base[chain->index], sizeof(*base));
2447 spin_lock(&chain->cst.spin);
2448 RB_REMOVE(hammer2_chain_tree, &parent->rbhead, chain);
2449 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DELETED);
2450 atomic_add_int(&parent->refs, -1); /* for red-black entry */
2452 chain->parent = NULL;
2453 spin_unlock(&chain->cst.spin);
2456 * Cumulative adjustments must be propagated to the parent inode
2457 * when deleting and synchronized to ip.
2459 * NOTE: We do not propagate ip->delta_*count to the parent because
2460 * these represent adjustments that have not yet been
2461 * propagated upward, so we don't need to remove them from
2464 * Clear the pointer to the parent inode.
2466 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
2469 ip->pip->delta_icount -= ip->ip_data.inode_count;
2470 ip->pip->delta_dcount -= ip->ip_data.data_count;
2471 ip->ip_data.inode_count += ip->delta_icount;
2472 ip->ip_data.data_count += ip->delta_dcount;
2473 ip->delta_icount = 0;
2474 ip->delta_dcount = 0;
2475 --ip->pip->delta_icount;
2476 spin_lock(&chain->cst.spin); /* XXX */
2478 spin_unlock(&chain->cst.spin);
2483 * If retain is 0 the deletion is permanent. Because the chain is
2484 * no longer connected to the topology a flush will have no
2485 * visibility into it. We must dispose of the references related
2486 * to the MODIFIED and MOVED flags, otherwise the ref count will
2487 * never transition to 0.
2489 * If retain is non-zero the deleted element is likely an inode
2490 * which the vnops frontend will mark DESTROYED and flush. In that
2491 * situation we must retain the flags for any open file descriptors
2492 * on the (removed) inode. The final close will destroy the
2493 * disconnected chain.
2496 if (chain->flags & HAMMER2_CHAIN_MODIFIED) {
2497 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_MODIFIED);
2498 hammer2_chain_drop(hmp, chain);
2500 if (chain->flags & HAMMER2_CHAIN_MOVED) {
2501 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_MOVED);
2502 hammer2_chain_drop(hmp, chain);
2507 * The chain is still likely referenced, possibly even by a vnode
2508 * (if an inode), so defer further action until the chain gets
2514 * Recursively flush the specified chain. The chain is locked and
2515 * referenced by the caller and will remain so on return. The chain
2516 * will remain referenced throughout but can temporarily lose its
2517 * lock during the recursion to avoid unnecessarily stalling user
2520 struct hammer2_flush_info {
2521 struct flush_deferral_list flush_list;
2523 hammer2_tid_t modify_tid;
2526 typedef struct hammer2_flush_info hammer2_flush_info_t;
2529 hammer2_chain_flush_pass1(hammer2_mount_t *hmp, hammer2_chain_t *chain,
2530 hammer2_flush_info_t *info)
2532 hammer2_blockref_t *bref;
2533 hammer2_off_t pbase;
2542 * If we hit the stack recursion depth limit defer the operation.
2543 * The controller of the info structure will execute the deferral
2544 * list and then retry.
2546 * This is only applicable if SUBMODIFIED is set. After a reflush
2547 * SUBMODIFIED will probably be cleared and we want to drop through
2548 * to finish processing the current element so our direct parent
2549 * can process the results.
2551 if (info->depth == HAMMER2_FLUSH_DEPTH_LIMIT &&
2552 (chain->flags & HAMMER2_CHAIN_SUBMODIFIED)) {
2553 if ((chain->flags & HAMMER2_CHAIN_DEFERRED) == 0) {
2554 hammer2_chain_ref(hmp, chain);
2555 TAILQ_INSERT_TAIL(&info->flush_list,
2557 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DEFERRED);
2562 if (hammer2_debug & 0x0008)
2563 kprintf("%*.*sCHAIN type=%d@%08jx %p/%d %04x {\n",
2564 info->depth, info->depth, "",
2565 chain->bref.type, chain->bref.data_off,
2566 chain, chain->refs, chain->flags);
2569 * If SUBMODIFIED is set we recurse the flush and adjust the
2570 * blockrefs accordingly.
2572 * NOTE: Looping on SUBMODIFIED can prevent a flush from ever
2573 * finishing in the face of filesystem activity.
2575 if (chain->flags & HAMMER2_CHAIN_SUBMODIFIED) {
2576 hammer2_chain_t *child;
2577 hammer2_chain_t *saved;
2578 hammer2_blockref_t *base;
2582 * Clear SUBMODIFIED to catch races. Note that if any
2583 * child has to be flushed SUBMODIFIED will wind up being
2584 * set again (for next time), but this does not stop us from
2585 * synchronizing block updates which occurred.
2587 * We don't want to set our chain to MODIFIED gratuitously.
2589 * We need an extra ref on chain because we are going to
2590 * release its lock temporarily in our child loop.
2592 /* XXX SUBMODIFIED not interlocked, can race */
2593 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_SUBMODIFIED);
2594 hammer2_chain_ref(hmp, chain);
2597 * Flush the children and update the blockrefs in the chain.
2598 * Be careful of ripouts during the loop.
2601 RB_FOREACH(child, hammer2_chain_tree, &chain->rbhead) {
2602 KKASSERT(child->parent == chain);
2605 hammer2_chain_moveunlock(hmp, saved);
2606 hammer2_chain_drop(hmp, saved);
2611 * We only recurse if SUBMODIFIED (internal node)
2612 * or MODIFIED (internal node or leaf) is set.
2613 * However, we must still track whether any MOVED
2614 * entries are present to determine if the chain's
2615 * blockref's need updating or not.
2617 if ((child->flags & (HAMMER2_CHAIN_SUBMODIFIED |
2618 HAMMER2_CHAIN_MODIFIED |
2619 HAMMER2_CHAIN_MODIFIED_AUX)) == 0) {
2623 hammer2_chain_ref(hmp, child);
2624 hammer2_chain_movelock(hmp, child);
2625 hammer2_chain_unlock(hmp, chain);
2626 hammer2_chain_lock(hmp, child, HAMMER2_RESOLVE_MAYBE);
2627 KKASSERT(child->parent == chain);
2628 if ((child->flags & (HAMMER2_CHAIN_SUBMODIFIED |
2629 HAMMER2_CHAIN_MODIFIED |
2630 HAMMER2_CHAIN_MODIFIED_AUX)) == 0) {
2631 hammer2_chain_unlock(hmp, child);
2632 hammer2_chain_lock(hmp, chain,
2633 HAMMER2_RESOLVE_MAYBE);
2638 * Propagate the DESTROYED flag if found set, then
2639 * recurse the flush.
2641 if ((chain->flags & HAMMER2_CHAIN_DESTROYED) &&
2642 (child->flags & HAMMER2_CHAIN_DESTROYED) == 0) {
2643 atomic_set_int(&child->flags,
2644 HAMMER2_CHAIN_DESTROYED |
2645 HAMMER2_CHAIN_SUBMODIFIED);
2648 hammer2_chain_flush_pass1(hmp, child, info);
2650 hammer2_chain_unlock(hmp, child);
2651 hammer2_chain_lock(hmp, chain, HAMMER2_RESOLVE_MAYBE);
2652 KKASSERT(child->parent == chain);
2655 hammer2_chain_moveunlock(hmp, saved);
2656 hammer2_chain_drop(hmp, saved);
2657 /*saved = NULL; not needed */
2661 * Now synchronize any block updates.
2663 RB_FOREACH(child, hammer2_chain_tree, &chain->rbhead) {
2664 if ((child->flags & HAMMER2_CHAIN_MOVED) == 0)
2666 hammer2_chain_lock(hmp, child, HAMMER2_RESOLVE_NEVER);
2667 KKASSERT(child->parent == chain);
2668 if ((child->flags & HAMMER2_CHAIN_MOVED) == 0) {
2669 hammer2_chain_unlock(hmp, child);
2673 hammer2_chain_modify(hmp, chain,
2674 HAMMER2_MODIFY_NO_MODIFY_TID);
2676 switch(chain->bref.type) {
2677 case HAMMER2_BREF_TYPE_INODE:
2678 KKASSERT((chain->data->ipdata.op_flags &
2679 HAMMER2_OPFLAG_DIRECTDATA) == 0);
2680 base = &chain->data->ipdata.u.blockset.
2682 count = HAMMER2_SET_COUNT;
2684 case HAMMER2_BREF_TYPE_INDIRECT:
2685 base = &chain->data->npdata.blockref[0];
2686 count = chain->bytes /
2687 sizeof(hammer2_blockref_t);
2689 case HAMMER2_BREF_TYPE_VOLUME:
2690 base = &hmp->voldata.sroot_blockset.blockref[0];
2691 count = HAMMER2_SET_COUNT;
2695 panic("hammer2_chain_get: "
2696 "unrecognized blockref type: %d",
2700 KKASSERT(child->index >= 0);
2701 base[child->index] = child->bref_flush;
2703 if (chain->bref.mirror_tid <
2704 child->bref_flush.mirror_tid) {
2705 chain->bref.mirror_tid =
2706 child->bref_flush.mirror_tid;
2709 if (chain->bref.type == HAMMER2_BREF_TYPE_VOLUME &&
2710 hmp->voldata.mirror_tid <
2711 child->bref_flush.mirror_tid) {
2712 hmp->voldata.mirror_tid =
2713 child->bref_flush.mirror_tid;
2715 atomic_clear_int(&child->flags, HAMMER2_CHAIN_MOVED);
2716 hammer2_chain_drop(hmp, child); /* MOVED flag */
2717 hammer2_chain_unlock(hmp, child);
2719 hammer2_chain_drop(hmp, chain);
2723 * If destroying the object we unconditonally clear the MODIFIED
2724 * and MOVED bits, and we destroy the buffer without writing it
2727 * We don't bother updating the hash/crc or the chain bref.
2729 * NOTE: The destroy'd object's bref has already been updated.
2730 * so we can clear MOVED without propagating mirror_tid
2731 * or modify_tid upward.
2733 * XXX allocations for unflushed data can be returned to the
2736 if (chain->flags & HAMMER2_CHAIN_DESTROYED) {
2737 if (chain->flags & HAMMER2_CHAIN_MODIFIED) {
2739 chain->bp->b_flags |= B_INVAL|B_RELBUF;
2741 atomic_clear_int(&chain->flags,
2742 HAMMER2_CHAIN_MODIFIED |
2743 HAMMER2_CHAIN_MODIFY_TID);
2744 hammer2_chain_drop(hmp, chain);
2746 if (chain->flags & HAMMER2_CHAIN_MODIFIED_AUX) {
2747 atomic_clear_int(&chain->flags,
2748 HAMMER2_CHAIN_MODIFIED_AUX);
2750 if (chain->flags & HAMMER2_CHAIN_MOVED) {
2751 atomic_clear_int(&chain->flags,
2752 HAMMER2_CHAIN_MOVED);
2753 hammer2_chain_drop(hmp, chain);
2759 * Flush this chain entry only if it is marked modified.
2761 if ((chain->flags & (HAMMER2_CHAIN_MODIFIED |
2762 HAMMER2_CHAIN_MODIFIED_AUX)) == 0) {
2767 * Synchronize cumulative data and inode count adjustments to
2768 * the inode and propagate the deltas upward to the parent.
2770 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
2771 hammer2_inode_t *ip;
2774 ip->ip_data.inode_count += ip->delta_icount;
2775 ip->ip_data.data_count += ip->delta_dcount;
2777 ip->pip->delta_icount += ip->delta_icount;
2778 ip->pip->delta_dcount += ip->delta_dcount;
2780 ip->delta_icount = 0;
2781 ip->delta_dcount = 0;
2785 * Flush if MODIFIED or MODIFIED_AUX is set. MODIFIED_AUX is only
2786 * used by the volume header (&hmp->vchain).
2788 if ((chain->flags & (HAMMER2_CHAIN_MODIFIED |
2789 HAMMER2_CHAIN_MODIFIED_AUX)) == 0) {
2792 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_MODIFIED_AUX);
2795 * Clear MODIFIED and set HAMMER2_CHAIN_MOVED. The caller
2796 * will re-test the MOVED bit. We must also update the mirror_tid
2797 * and modify_tid fields as appropriate.
2799 * bits own a single chain ref and the MOVED bit owns its own
2802 chain->bref.mirror_tid = info->modify_tid;
2803 if (chain->flags & HAMMER2_CHAIN_MODIFY_TID)
2804 chain->bref.modify_tid = info->modify_tid;
2805 wasmodified = (chain->flags & HAMMER2_CHAIN_MODIFIED) != 0;
2806 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_MODIFIED |
2807 HAMMER2_CHAIN_MODIFY_TID);
2809 if (chain->flags & HAMMER2_CHAIN_MOVED) {
2811 * Drop the ref from the MODIFIED bit we cleared.
2814 hammer2_chain_drop(hmp, chain);
2817 * If we were MODIFIED we inherit the ref from clearing
2818 * that bit, otherwise we need another ref.
2820 if (wasmodified == 0)
2821 hammer2_chain_ref(hmp, chain);
2822 atomic_set_int(&chain->flags, HAMMER2_CHAIN_MOVED);
2824 chain->bref_flush = chain->bref;
2827 * If this is part of a recursive flush we can go ahead and write
2828 * out the buffer cache buffer and pass a new bref back up the chain.
2830 * This will never be a volume header.
2832 switch(chain->bref.type) {
2833 case HAMMER2_BREF_TYPE_VOLUME:
2835 * The volume header is flushed manually by the syncer, not
2839 case HAMMER2_BREF_TYPE_DATA:
2841 * Data elements have already been flushed via the logical
2842 * file buffer cache. Their hash was set in the bref by
2843 * the vop_write code.
2845 * Make sure the buffer(s) have been flushed out here.
2847 bbytes = chain->bytes;
2848 pbase = chain->bref.data_off & ~(hammer2_off_t)(bbytes - 1);
2849 boff = chain->bref.data_off & HAMMER2_OFF_MASK & (bbytes - 1);
2851 bp = getblk(hmp->devvp, pbase, bbytes, GETBLK_NOWAIT, 0);
2853 if ((bp->b_flags & (B_CACHE | B_DIRTY)) ==
2854 (B_CACHE | B_DIRTY)) {
2858 bp->b_flags |= B_RELBUF;
2863 case HAMMER2_BREF_TYPE_INDIRECT:
2865 * Indirect blocks may be in an INITIAL state. Use the
2866 * chain_lock() call to ensure that the buffer has been
2867 * instantiated (even though it is already locked the buffer
2868 * might not have been instantiated).
2870 * Only write the buffer out if it is dirty, it is possible
2871 * the operating system had already written out the buffer.
2873 hammer2_chain_lock(hmp, chain, HAMMER2_RESOLVE_ALWAYS);
2874 KKASSERT(chain->bp != NULL);
2877 if ((chain->flags & HAMMER2_CHAIN_DIRTYBP) ||
2878 (bp->b_flags & B_DIRTY)) {
2885 hammer2_chain_unlock(hmp, chain);
2889 * Embedded elements have to be flushed out.
2891 KKASSERT(chain->data != NULL);
2892 KKASSERT(chain->bp == NULL);
2893 bref = &chain->bref;
2895 KKASSERT((bref->data_off & HAMMER2_OFF_MASK) != 0);
2897 if (chain->bp == NULL) {
2899 * The data is embedded, we have to acquire the
2900 * buffer cache buffer and copy the data into it.
2902 if ((bbytes = chain->bytes) < HAMMER2_MINIOSIZE)
2903 bbytes = HAMMER2_MINIOSIZE;
2904 pbase = bref->data_off & ~(hammer2_off_t)(bbytes - 1);
2905 boff = bref->data_off & HAMMER2_OFF_MASK & (bbytes - 1);
2908 * The getblk() optimization can only be used if the
2909 * physical block size matches the request.
2911 if (chain->bytes == bbytes) {
2912 bp = getblk(hmp->devvp, pbase, bbytes, 0, 0);
2915 error = bread(hmp->devvp, pbase, bbytes, &bp);
2916 KKASSERT(error == 0);
2918 bdata = (char *)bp->b_data + boff;
2921 * Copy the data to the buffer, mark the buffer
2922 * dirty, and convert the chain to unmodified.
2924 * We expect we might have to make adjustments to
2925 * non-data delayed-write buffers when doing an
2926 * actual flush so use bawrite() instead of
2927 * cluster_awrite() here.
2929 bcopy(chain->data, bdata, chain->bytes);
2930 bp->b_flags |= B_CLUSTEROK;
2933 chain->bref.check.iscsi32.value =
2934 hammer2_icrc32(chain->data, chain->bytes);
2935 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE)
2936 ++hammer2_iod_meta_write;
2938 ++hammer2_iod_indr_write;
2940 chain->bref.check.iscsi32.value =
2941 hammer2_icrc32(chain->data, chain->bytes);
2946 * Adjustments to the bref. The caller will use this to adjust
2947 * our chain's pointer to this chain element.
2949 bref = &chain->bref;
2951 switch(bref->type) {
2952 case HAMMER2_BREF_TYPE_VOLUME:
2953 KKASSERT(chain->data != NULL);
2954 KKASSERT(chain->bp == NULL);
2956 hmp->voldata.icrc_sects[HAMMER2_VOL_ICRC_SECT1]=
2958 (char *)&hmp->voldata +
2959 HAMMER2_VOLUME_ICRC1_OFF,
2960 HAMMER2_VOLUME_ICRC1_SIZE);
2961 hmp->voldata.icrc_sects[HAMMER2_VOL_ICRC_SECT0]=
2963 (char *)&hmp->voldata +
2964 HAMMER2_VOLUME_ICRC0_OFF,
2965 HAMMER2_VOLUME_ICRC0_SIZE);
2966 hmp->voldata.icrc_volheader =
2968 (char *)&hmp->voldata +
2969 HAMMER2_VOLUME_ICRCVH_OFF,
2970 HAMMER2_VOLUME_ICRCVH_SIZE);
2977 if (hammer2_debug & 0x0008) {
2978 kprintf("%*.*s} %p/%d %04x ",
2979 info->depth, info->depth, "",
2980 chain, chain->refs, chain->flags);
2986 * PASS2 - not yet implemented (should be called only with the root chain?)
2989 hammer2_chain_flush_pass2(hammer2_mount_t *hmp, hammer2_chain_t *chain)
2995 * Stand-alone flush. If the chain is unable to completely flush we have
2996 * to be sure that SUBMODIFIED propagates up the parent chain. We must not
2997 * clear the MOVED bit after flushing in this situation or our desynchronized
2998 * bref will not properly update in the parent.
3000 * This routine can be called from several places but the most important
3001 * is from the hammer2_vop_reclaim() function. We want to try to completely
3002 * clean out the inode structure to prevent disconnected inodes from
3003 * building up and blowing out the kmalloc pool.
3005 * If modify_tid is 0 (usual case), a new modify_tid is allocated and
3006 * applied to the flush. The depth-limit handling code is the only
3007 * code which passes a non-zero modify_tid to hammer2_chain_flush().
3009 * chain is locked on call and will remain locked on return.
3012 hammer2_chain_flush(hammer2_mount_t *hmp, hammer2_chain_t *chain,
3013 hammer2_tid_t modify_tid)
3015 hammer2_chain_t *parent;
3016 hammer2_chain_t *scan;
3017 hammer2_blockref_t *base;
3018 hammer2_flush_info_t info;
3023 * Execute the recursive flush and handle deferrals.
3025 * Chains can be ridiculously long (thousands deep), so to
3026 * avoid blowing out the kernel stack the recursive flush has a
3027 * depth limit. Elements at the limit are placed on a list
3028 * for re-execution after the stack has been popped.
3030 bzero(&info, sizeof(info));
3031 TAILQ_INIT(&info.flush_list);
3033 if (modify_tid == 0) {
3034 hammer2_voldata_lock(hmp);
3035 info.modify_tid = hmp->voldata.alloc_tid++;
3036 atomic_set_int(&hmp->vchain.flags, HAMMER2_CHAIN_MODIFIED_AUX);
3037 hammer2_voldata_unlock(hmp);
3039 info.modify_tid = modify_tid;
3047 hammer2_chain_flush_pass1(hmp, chain, &info);
3050 while ((scan = TAILQ_FIRST(&info.flush_list)) != NULL) {
3052 * Secondary recursion. Note that a reference is
3053 * retained from the element's presence on the
3056 KKASSERT(scan->flags & HAMMER2_CHAIN_DEFERRED);
3057 TAILQ_REMOVE(&info.flush_list, scan, flush_node);
3058 atomic_clear_int(&scan->flags, HAMMER2_CHAIN_DEFERRED);
3061 * Now that we've popped back up we can do a secondary
3062 * recursion on the deferred elements.
3064 if (hammer2_debug & 0x0040)
3065 kprintf("defered flush %p\n", scan);
3066 hammer2_chain_lock(hmp, scan, HAMMER2_RESOLVE_MAYBE);
3067 hammer2_chain_flush(hmp, scan, info.modify_tid);
3068 hammer2_chain_unlock(hmp, scan);
3071 * Only flag a reflush if SUBMODIFIED is no longer
3072 * set. If SUBMODIFIED is set the element will just
3073 * wind up on our flush_list again.
3075 if ((scan->flags & (HAMMER2_CHAIN_SUBMODIFIED |
3076 HAMMER2_CHAIN_MODIFIED |
3077 HAMMER2_CHAIN_MODIFIED_AUX)) == 0) {
3080 hammer2_chain_drop(hmp, scan);
3082 if ((hammer2_debug & 0x0040) && reflush)
3083 kprintf("reflush %p\n", chain);
3087 * The SUBMODIFIED bit must propagate upward if the chain could not
3088 * be completely flushed.
3090 if (chain->flags & (HAMMER2_CHAIN_SUBMODIFIED |
3091 HAMMER2_CHAIN_MODIFIED |
3092 HAMMER2_CHAIN_MODIFIED_AUX |
3093 HAMMER2_CHAIN_MOVED)) {
3094 hammer2_chain_parent_setsubmod(hmp, chain);
3098 * If the only thing left is a simple bref update try to
3099 * pro-actively update the parent, otherwise return early.
3101 parent = chain->parent;
3102 if (parent == NULL) {
3105 if (chain->bref.type != HAMMER2_BREF_TYPE_INODE ||
3106 (chain->flags & (HAMMER2_CHAIN_SUBMODIFIED |
3107 HAMMER2_CHAIN_MODIFIED |
3108 HAMMER2_CHAIN_MODIFIED_AUX |
3109 HAMMER2_CHAIN_MOVED)) != HAMMER2_CHAIN_MOVED) {
3114 * We are locking backwards so allow the lock to fail.
3116 if (ccms_thread_lock_nonblock(&parent->cst, CCMS_STATE_EXCLUSIVE))
3120 * We are updating brefs but we have to call chain_modify()
3121 * because our caller is not being run from a recursive flush.
3123 * This will also chain up the parent list and set the SUBMODIFIED
3126 * We do not want to set HAMMER2_CHAIN_MODIFY_TID here because the
3127 * modification is only related to updating a bref in the parent.
3129 * When updating the blockset embedded in the volume header we must
3130 * also update voldata.mirror_tid.
3132 hammer2_chain_lock(hmp, parent, HAMMER2_RESOLVE_MAYBE);
3133 hammer2_chain_modify(hmp, parent, HAMMER2_MODIFY_NO_MODIFY_TID);
3135 switch(parent->bref.type) {
3136 case HAMMER2_BREF_TYPE_INODE:
3137 base = &parent->data->ipdata.u.blockset.
3139 count = HAMMER2_SET_COUNT;
3141 case HAMMER2_BREF_TYPE_INDIRECT:
3142 base = &parent->data->npdata.blockref[0];
3143 count = parent->bytes /
3144 sizeof(hammer2_blockref_t);
3146 case HAMMER2_BREF_TYPE_VOLUME:
3147 base = &hmp->voldata.sroot_blockset.blockref[0];
3148 count = HAMMER2_SET_COUNT;
3149 if (chain->flags & HAMMER2_CHAIN_MOVED) {
3150 if (hmp->voldata.mirror_tid < chain->bref.mirror_tid) {
3151 hmp->voldata.mirror_tid =
3152 chain->bref.mirror_tid;
3158 panic("hammer2_chain_flush: "
3159 "unrecognized blockref type: %d",
3164 * Update the blockref in the parent. We do not have to set
3165 * MOVED in the parent because the parent has been marked modified,
3166 * so the flush sequence will pick up the bref change.
3168 * We do have to propagate mirror_tid upward.
3170 KKASSERT(chain->index >= 0 &&
3171 chain->index < count);
3172 KKASSERT(chain->parent == parent);
3173 if (chain->flags & HAMMER2_CHAIN_MOVED) {
3174 base[chain->index] = chain->bref_flush;
3175 if (parent->bref.mirror_tid < chain->bref_flush.mirror_tid)
3176 parent->bref.mirror_tid = chain->bref_flush.mirror_tid;
3177 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_MOVED);
3178 hammer2_chain_drop(hmp, chain);
3179 } else if (bcmp(&base[chain->index], &chain->bref_flush,
3180 sizeof(chain->bref)) != 0) {
3181 panic("hammer2: unflagged bref update(2)");
3183 ccms_thread_unlock(&parent->cst); /* release manual op */
3184 hammer2_chain_unlock(hmp, parent);