2 * Copyright (c) 2011-2012 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * This subsystem handles direct and indirect block searches, recursions,
37 * creation, and deletion. Chains of blockrefs are tracked and modifications
38 * are flag for propagation... eventually all the way back to the volume
42 #include <sys/cdefs.h>
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/types.h>
51 static int hammer2_indirect_optimize; /* XXX SYSCTL */
53 static hammer2_chain_t *hammer2_chain_create_indirect(
54 hammer2_mount_t *hmp, hammer2_chain_t *parent,
55 hammer2_key_t key, int keybits);
60 SPLAY_GENERATE(hammer2_chain_splay, hammer2_chain, snode, hammer2_chain_cmp);
63 hammer2_chain_cmp(hammer2_chain_t *chain1, hammer2_chain_t *chain2)
65 return(chain2->index - chain1->index);
69 * Recursively mark the parent chain elements so flushes can find
72 * NOTE: The flush code will modify a SUBMODIFIED-flagged chain
73 * during the flush recursion after clearing the parent's
74 * SUBMODIFIED bit. We don't want to re-set the parent's
75 * SUBMODIFIED bit in this case!
77 * XXX rename of parent can create a SMP race
80 hammer2_chain_parent_setsubmod(hammer2_mount_t *hmp, hammer2_chain_t *chain)
82 hammer2_chain_t *parent;
84 if ((chain->flags & HAMMER2_CHAIN_SUBMODIFIED) == 0) {
85 parent = chain->parent;
87 (parent->flags & HAMMER2_CHAIN_SUBMODIFIED) == 0) {
88 atomic_set_int(&parent->flags,
89 HAMMER2_CHAIN_SUBMODIFIED);
90 parent = parent->parent;
96 * Allocate a new disconnected chain element representing the specified
97 * bref. The chain element is locked exclusively and refs is set to 1.
99 * This essentially allocates a system memory structure representing one
100 * of the media structure types, including inodes.
103 hammer2_chain_alloc(hammer2_mount_t *hmp, hammer2_blockref_t *bref)
105 hammer2_chain_t *chain;
107 hammer2_indblock_t *np;
109 u_int bytes = 1U << (int)(bref->data_off & HAMMER2_OFF_MASK_RADIX);
112 * Construct the appropriate system structure.
115 case HAMMER2_BREF_TYPE_INODE:
116 ip = kmalloc(sizeof(*ip), hmp->minode, M_WAITOK | M_ZERO);
119 lockinit(&chain->lk, "inode", 0, LK_CANRECURSE);
122 case HAMMER2_BREF_TYPE_INDIRECT:
123 np = kmalloc(sizeof(*np), hmp->mchain, M_WAITOK | M_ZERO);
126 lockinit(&chain->lk, "iblk", 0, LK_CANRECURSE);
128 case HAMMER2_BREF_TYPE_DATA:
129 dp = kmalloc(sizeof(*dp), hmp->mchain, M_WAITOK | M_ZERO);
132 lockinit(&chain->lk, "dblk", 0, LK_CANRECURSE);
134 case HAMMER2_BREF_TYPE_VOLUME:
136 panic("hammer2_chain_alloc volume type illegal for op");
139 panic("hammer2_chain_alloc: unrecognized blockref type: %d",
143 chain->index = -1; /* not yet assigned */
145 chain->bytes = bytes;
146 lockmgr(&chain->lk, LK_EXCLUSIVE);
152 * Free a disconnected chain element
155 hammer2_chain_free(hammer2_mount_t *hmp, hammer2_chain_t *chain)
159 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE ||
160 chain->bref.type == HAMMER2_BREF_TYPE_VOLUME) {
164 KKASSERT(chain->bp == NULL);
165 KKASSERT(chain->data == NULL);
166 KKASSERT(chain->bref.type != HAMMER2_BREF_TYPE_INODE ||
167 chain->u.ip->vp == NULL);
169 if ((mem = chain->u.mem) != NULL) {
171 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE)
172 kfree(mem, hmp->minode);
174 kfree(mem, hmp->mchain);
179 * Add a reference to a chain element (for shared access). The chain
180 * element must already have at least 1 ref controlled by the caller.
183 hammer2_chain_ref(hammer2_mount_t *hmp, hammer2_chain_t *chain)
185 KKASSERT(chain->refs > 0);
186 atomic_add_int(&chain->refs, 1);
190 * Drop the callers reference to the chain element. If the ref count
191 * reaches zero the chain element and its related structure (typically an
192 * inode or indirect block) will be freed and the parent will be
193 * recursively dropped.
195 * Modified elements hold an additional reference so it should not be
196 * possible for the count on a modified element to drop to 0.
198 * The chain element must NOT be locked by the caller.
200 * The parent might or might not be locked by the caller but if so it
201 * will also be referenced so we shouldn't recurse upward.
204 hammer2_chain_drop(hammer2_mount_t *hmp, hammer2_chain_t *chain)
206 hammer2_chain_t *parent;
214 KKASSERT(chain != &hmp->vchain);
215 parent = chain->parent;
217 lockmgr(&parent->lk, LK_EXCLUSIVE);
218 if (atomic_cmpset_int(&chain->refs, 1, 0)) {
220 * Succeeded, recurse and drop parent.
221 * These chain elements should be synchronized
222 * so no delta data or inode count updates
225 if (!(chain->flags & HAMMER2_CHAIN_DELETED)) {
226 SPLAY_REMOVE(hammer2_chain_splay,
227 &parent->shead, chain);
228 atomic_set_int(&chain->flags,
229 HAMMER2_CHAIN_DELETED);
230 /* parent refs dropped via recursion */
232 chain->parent = NULL;
234 lockmgr(&parent->lk, LK_RELEASE);
235 hammer2_chain_free(hmp, chain);
237 /* recurse on parent */
240 lockmgr(&parent->lk, LK_RELEASE);
241 /* retry the same chain */
244 if (atomic_cmpset_int(&chain->refs, refs, refs - 1)) {
246 * Succeeded, count did not reach zero so
247 * cut out of the loop.
251 /* retry the same chain */
257 * Ref and lock a chain element, acquiring its data with I/O if necessary,
258 * and specify how you would like the data to be resolved.
260 * Returns 0 on success or an error code if the data could not be acquired.
261 * The chain element is locked either way.
263 * The lock is allowed to recurse, multiple locking ops will aggregate
264 * the requested resolve types. Once data is assigned it will not be
265 * removed until the last unlock.
267 * HAMMER2_RESOLVE_NEVER - Do not resolve the data element.
268 * (typically used to avoid device/logical buffer
271 * HAMMER2_RESOLVE_MAYBE - Do not resolve data elements for chains in
272 * the INITIAL-create state (indirect blocks only).
274 * Do not resolve data elements for DATA chains.
275 * (typically used to avoid device/logical buffer
278 * HAMMER2_RESOLVE_ALWAYS- Always resolve the data element.
281 * NOTE: Embedded elements (volume header, inodes) are always resolved
284 * NOTE: Specifying HAMMER2_RESOLVE_ALWAYS on a newly-created non-embedded
285 * element will instantiate and zero its buffer, and flush it on
288 * NOTE: (data) elements are normally locked RESOLVE_NEVER or RESOLVE_MAYBE
289 * so as not to instantiate a device buffer, which could alias against
290 * a logical file buffer. However, if ALWAYS is specified the
291 * device buffer will be instantiated anyway.
294 hammer2_chain_lock(hammer2_mount_t *hmp, hammer2_chain_t *chain, int how)
296 hammer2_blockref_t *bref;
305 * Lock the element. Under certain conditions this might end up
306 * being a recursive lock.
308 KKASSERT(chain->refs > 0);
309 atomic_add_int(&chain->refs, 1);
310 lockmgr(&chain->lk, LK_EXCLUSIVE);
313 * If we already have a valid data pointer no further action is
320 * Do we have to resolve the data?
323 case HAMMER2_RESOLVE_NEVER:
325 case HAMMER2_RESOLVE_MAYBE:
326 if (chain->flags & HAMMER2_CHAIN_INITIAL)
328 if (chain->bref.type == HAMMER2_BREF_TYPE_DATA)
331 case HAMMER2_RESOLVE_ALWAYS:
336 * We must resolve to a device buffer, either by issuing I/O or
337 * by creating a zero-fill element. We do not mark the buffer
338 * dirty when creating a zero-fill element (the hammer2_chain_modify()
339 * API must still be used to do that).
341 * The device buffer is variable-sized in powers of 2 down
342 * to HAMMER2_MINALLOCSIZE (typically 1K). A 64K physical storage
343 * chunk always contains buffers of the same size. (XXX)
345 * The minimum physical IO size may be larger than the variable
350 if ((bbytes = chain->bytes) < HAMMER2_MINIOSIZE)
351 bbytes = HAMMER2_MINIOSIZE;
352 pbase = bref->data_off & ~(hammer2_off_t)(bbytes - 1);
353 peof = (pbase + HAMMER2_PBUFSIZE64) & ~HAMMER2_PBUFMASK64;
354 boff = bref->data_off & HAMMER2_OFF_MASK & (bbytes - 1);
355 KKASSERT(pbase != 0);
358 * The getblk() optimization can only be used on newly created
359 * elements if the physical block size matches the request.
361 if ((chain->flags & HAMMER2_CHAIN_INITIAL) &&
362 chain->bytes == bbytes) {
363 chain->bp = getblk(hmp->devvp, pbase, bbytes, 0, 0);
365 } else if (hammer2_cluster_enable) {
366 error = cluster_read(hmp->devvp, peof, pbase, bbytes,
367 HAMMER2_PBUFSIZE, HAMMER2_PBUFSIZE,
370 error = bread(hmp->devvp, pbase, bbytes, &chain->bp);
374 kprintf("hammer2_chain_get: I/O error %016jx: %d\n",
375 (intmax_t)pbase, error);
382 * Zero the data area if the chain is in the INITIAL-create state
384 bdata = (char *)chain->bp->b_data + boff;
385 if (chain->flags & HAMMER2_CHAIN_INITIAL)
386 bzero(bdata, chain->bytes);
389 * Setup the data pointer, either pointing it to an embedded data
390 * structure and copying the data from the buffer, or pointing it
393 * The buffer is not retained when copying to an embedded data
394 * structure in order to avoid potential deadlocks or recursions
395 * on the same physical buffer.
397 switch (bref->type) {
398 case HAMMER2_BREF_TYPE_VOLUME:
400 * Copy data from bp to embedded buffer
402 panic("hammer2_chain_lock: called on unresolved volume header");
405 KKASSERT(pbase == 0);
406 KKASSERT(chain->bytes == HAMMER2_PBUFSIZE);
407 bcopy(bdata, &hmp->voldata, chain->bytes);
408 chain->data = (void *)&hmp->voldata;
413 case HAMMER2_BREF_TYPE_INODE:
415 * Copy data from bp to embedded buffer, do not retain the
418 bcopy(bdata, &chain->u.ip->ip_data, chain->bytes);
419 chain->data = (void *)&chain->u.ip->ip_data;
423 case HAMMER2_BREF_TYPE_INDIRECT:
424 case HAMMER2_BREF_TYPE_DATA:
427 * Point data at the device buffer and leave bp intact.
429 chain->data = (void *)bdata;
436 * Unlock and deref a chain element.
438 * On the last lock release any non-embedded data (chain->bp) will be
442 hammer2_chain_unlock(hammer2_mount_t *hmp, hammer2_chain_t *chain)
447 * Undo a recursive lock
449 if (lockcountnb(&chain->lk) > 1) {
450 KKASSERT(chain->refs > 1);
451 atomic_add_int(&chain->refs, -1);
452 lockmgr(&chain->lk, LK_RELEASE);
457 * Shortcut the case if the data is embedded or not resolved.
458 * Do NOT null-out pointers to embedded data (e.g. inode).
460 if (chain->bp == NULL) {
461 lockmgr(&chain->lk, LK_RELEASE);
462 hammer2_chain_drop(hmp, chain);
469 if ((chain->flags & HAMMER2_CHAIN_DIRTYBP) == 0) {
471 } else if (chain->flags & HAMMER2_CHAIN_IOFLUSH) {
472 switch(chain->bref.type) {
473 case HAMMER2_BREF_TYPE_DATA:
474 counterp = &hammer2_ioa_file_write;
476 case HAMMER2_BREF_TYPE_INODE:
477 counterp = &hammer2_ioa_meta_write;
479 case HAMMER2_BREF_TYPE_INDIRECT:
480 counterp = &hammer2_ioa_indr_write;
483 counterp = &hammer2_ioa_volu_write;
488 switch(chain->bref.type) {
489 case HAMMER2_BREF_TYPE_DATA:
490 counterp = &hammer2_iod_file_write;
492 case HAMMER2_BREF_TYPE_INODE:
493 counterp = &hammer2_iod_meta_write;
495 case HAMMER2_BREF_TYPE_INDIRECT:
496 counterp = &hammer2_iod_indr_write;
499 counterp = &hammer2_iod_volu_write;
508 * If a device buffer was used for data be sure to destroy the
509 * buffer when we are done to avoid aliases (XXX what about the
510 * underlying VM pages?).
512 if (chain->bref.type == HAMMER2_BREF_TYPE_DATA)
513 chain->bp->b_flags |= B_RELBUF;
516 if (chain->flags & HAMMER2_CHAIN_DIRTYBP) {
517 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_DIRTYBP);
518 if (chain->flags & HAMMER2_CHAIN_IOFLUSH) {
519 atomic_clear_int(&chain->flags,
520 HAMMER2_CHAIN_IOFLUSH);
521 chain->bp->b_flags |= B_RELBUF;
522 cluster_awrite(chain->bp);
524 chain->bp->b_flags |= B_CLUSTEROK;
528 if (chain->flags & HAMMER2_CHAIN_IOFLUSH) {
529 atomic_clear_int(&chain->flags,
530 HAMMER2_CHAIN_IOFLUSH);
531 chain->bp->b_flags |= B_RELBUF;
534 /* bp might still be dirty */
539 lockmgr(&chain->lk, LK_RELEASE);
540 hammer2_chain_drop(hmp, chain);
544 * Resize the chain's physical storage allocation. Chains can be resized
545 * smaller without reallocating the storage. Resizing larger will reallocate
548 * Must be passed a locked chain. If you want the resize to copy the data
549 * you should lock the chain with RESOLVE_MAYBE or RESOLVE_ALWAYS, otherwise
550 * the resize operation will not copy the data.
552 * This function is mostly used with DATA blocks locked RESOLVE_NEVER in order
553 * to avoid instantiating a device buffer that conflicts with the vnode
556 * XXX flags currently ignored, uses chain->bp to detect data/no-data.
559 hammer2_chain_resize(hammer2_inode_t *ip, hammer2_chain_t *chain,
560 int nradix, int flags)
562 hammer2_mount_t *hmp = ip->hmp;
573 * Only data and indirect blocks can be resized for now
575 KKASSERT(chain != &hmp->vchain);
576 KKASSERT(chain->bref.type == HAMMER2_BREF_TYPE_DATA ||
577 chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT);
580 * Nothing to do if the element is already the proper size
582 obytes = chain->bytes;
583 nbytes = 1U << nradix;
584 if (obytes == nbytes)
588 * Set MODIFIED and add a chain ref to prevent destruction. Both
589 * modified flags share the same ref.
591 if ((chain->flags & HAMMER2_CHAIN_MODIFIED) == 0) {
592 atomic_set_int(&chain->flags, HAMMER2_CHAIN_MODIFIED |
593 HAMMER2_CHAIN_MODIFY_TID);
594 hammer2_chain_ref(hmp, chain);
598 * Relocate the block, even if making it smaller (because different
599 * block sizes may be in different regions).
601 chain->bref.data_off = hammer2_freemap_alloc(hmp, chain->bref.type,
603 chain->bytes = nbytes;
604 ip->delta_dcount += (ssize_t)(nbytes - obytes); /* XXX atomic */
607 * The device buffer may be larger than the allocation size.
609 if ((bbytes = chain->bytes) < HAMMER2_MINIOSIZE)
610 bbytes = HAMMER2_MINIOSIZE;
611 pbase = chain->bref.data_off & ~(hammer2_off_t)(bbytes - 1);
612 boff = chain->bref.data_off & HAMMER2_OFF_MASK & (bbytes - 1);
615 * Only copy the data if resolved, otherwise the caller is
619 KKASSERT(chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT ||
620 chain->bref.type == HAMMER2_BREF_TYPE_DATA);
621 KKASSERT(chain != &hmp->vchain); /* safety */
624 * The getblk() optimization can only be used if the
625 * physical block size matches the request.
627 if (nbytes == bbytes) {
628 nbp = getblk(hmp->devvp, pbase, bbytes, 0, 0);
631 error = bread(hmp->devvp, pbase, bbytes, &nbp);
632 KKASSERT(error == 0);
634 bdata = (char *)nbp->b_data + boff;
636 if (nbytes < obytes) {
637 bcopy(chain->data, bdata, nbytes);
639 bcopy(chain->data, bdata, obytes);
640 bzero(bdata + obytes, nbytes - obytes);
644 * NOTE: The INITIAL state of the chain is left intact.
646 * NOTE: Because of the reallocation we have to set DIRTYBP
647 * if INITIAL is not set.
649 chain->bp->b_flags |= B_RELBUF;
652 chain->data = (void *)bdata;
653 if ((chain->flags & HAMMER2_CHAIN_INITIAL) == 0)
654 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DIRTYBP);
656 hammer2_chain_parent_setsubmod(hmp, chain);
660 * Convert a locked chain that was retrieved read-only to read-write.
662 * If not already marked modified a new physical block will be allocated
663 * and assigned to the bref.
665 * Non-data blocks - The chain should be locked to at least the RESOLVE_MAYBE
666 * level or the COW operation will not work.
668 * Data blocks - The chain is usually locked RESOLVE_NEVER so as not to
669 * run the data through the device buffers.
672 hammer2_chain_modify(hammer2_mount_t *hmp, hammer2_chain_t *chain, int flags)
682 * Tells flush that modify_tid must be updated, otherwise only
683 * mirror_tid is updated. This is the default.
685 if ((flags & HAMMER2_MODIFY_NO_MODIFY_TID) == 0)
686 atomic_set_int(&chain->flags, HAMMER2_CHAIN_MODIFY_TID);
689 * If the chain is already marked MODIFIED we can just return.
691 if (chain->flags & HAMMER2_CHAIN_MODIFIED) {
692 if ((flags & HAMMER2_MODIFY_OPTDATA) == 0 &&
700 * Set MODIFIED and add a chain ref to prevent destruction. Both
701 * modified flags share the same ref.
703 atomic_set_int(&chain->flags, HAMMER2_CHAIN_MODIFIED);
704 hammer2_chain_ref(hmp, chain);
707 * We must allocate the copy-on-write block.
709 * If the data is embedded no other action is required.
711 * If the data is not embedded we acquire and clear the
712 * new block. If chain->data is not NULL we then do the
713 * copy-on-write. chain->data will then be repointed to the new
714 * buffer and the old buffer will be released.
716 * For newly created elements with no prior allocation we go
717 * through the copy-on-write steps except without the copying part.
719 if (chain != &hmp->vchain) {
720 if ((hammer2_debug & 0x0001) &&
721 (chain->bref.data_off & HAMMER2_OFF_MASK)) {
722 kprintf("Replace %d\n", chain->bytes);
724 chain->bref.data_off =
725 hammer2_freemap_alloc(hmp, chain->bref.type,
727 /* XXX failed allocation */
731 * If data instantiation is optional and the chain has no current
732 * data association (typical for DATA and newly-created INDIRECT
733 * elements), don't instantiate the buffer now.
735 if ((flags & HAMMER2_MODIFY_OPTDATA) && chain->bp == NULL)
740 * Setting the DIRTYBP flag will cause the buffer to be dirtied or
741 * written-out on unlock. This bit is independent of the MODIFIED
742 * bit because the chain may still need meta-data adjustments done
743 * by virtue of MODIFIED for its parent, and the buffer can be
744 * flushed out (possibly multiple times) by the OS before that.
746 * Clearing the INITIAL flag (for indirect blocks) indicates that
747 * a zero-fill buffer has been instantiated.
749 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DIRTYBP);
750 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
753 * We currently should never instantiate a device buffer for a
756 KKASSERT(chain->bref.type != HAMMER2_BREF_TYPE_DATA);
759 * Execute COW operation
761 switch(chain->bref.type) {
762 case HAMMER2_BREF_TYPE_VOLUME:
763 case HAMMER2_BREF_TYPE_INODE:
765 * The data is embedded, no copy-on-write operation is
768 KKASSERT(chain->bp == NULL);
770 case HAMMER2_BREF_TYPE_DATA:
771 case HAMMER2_BREF_TYPE_INDIRECT:
773 * Perform the copy-on-write operation
775 KKASSERT(chain != &hmp->vchain); /* safety */
777 * The device buffer may be larger than the allocation size.
779 if ((bbytes = chain->bytes) < HAMMER2_MINIOSIZE)
780 bbytes = HAMMER2_MINIOSIZE;
781 pbase = chain->bref.data_off & ~(hammer2_off_t)(bbytes - 1);
782 boff = chain->bref.data_off & HAMMER2_OFF_MASK & (bbytes - 1);
785 * The getblk() optimization can only be used if the
786 * physical block size matches the request.
788 if (chain->bytes == bbytes) {
789 nbp = getblk(hmp->devvp, pbase, bbytes, 0, 0);
792 error = bread(hmp->devvp, pbase, bbytes, &nbp);
793 KKASSERT(error == 0);
795 bdata = (char *)nbp->b_data + boff;
798 * Copy or zero-fill on write depending on whether
799 * chain->data exists or not.
802 bcopy(chain->data, bdata, chain->bytes);
803 KKASSERT(chain->bp != NULL);
805 bzero(bdata, chain->bytes);
808 chain->bp->b_flags |= B_RELBUF;
815 panic("hammer2_chain_modify: illegal non-embedded type %d",
821 if ((flags & HAMMER2_MODIFY_NOSUB) == 0)
822 hammer2_chain_parent_setsubmod(hmp, chain);
826 * Mark the volume as having been modified. This short-cut version
827 * does not have to lock the volume's chain, which allows the ioctl
828 * code to make adjustments to connections without deadlocking.
831 hammer2_modify_volume(hammer2_mount_t *hmp)
833 hammer2_voldata_lock(hmp);
834 atomic_set_int(&hmp->vchain.flags, HAMMER2_CHAIN_MODIFIED_AUX);
835 hammer2_voldata_unlock(hmp);
839 * Locate an in-memory chain. The parent must be locked. The in-memory
840 * chain is returned or NULL if no in-memory chain is present.
842 * NOTE: A chain on-media might exist for this index when NULL is returned.
845 hammer2_chain_find(hammer2_mount_t *hmp, hammer2_chain_t *parent, int index)
847 hammer2_chain_t dummy;
848 hammer2_chain_t *chain;
851 chain = SPLAY_FIND(hammer2_chain_splay, &parent->shead, &dummy);
856 * Return a locked chain structure with all associated data acquired.
858 * Caller must lock the parent on call, the returned child will be locked.
861 hammer2_chain_get(hammer2_mount_t *hmp, hammer2_chain_t *parent,
862 int index, int flags)
864 hammer2_blockref_t *bref;
865 hammer2_chain_t *chain;
866 hammer2_chain_t dummy;
870 * Figure out how to lock. MAYBE can be used to optimized
871 * the initial-create state for indirect blocks.
873 if (flags & (HAMMER2_LOOKUP_NODATA | HAMMER2_LOOKUP_NOLOCK))
874 how = HAMMER2_RESOLVE_NEVER;
876 how = HAMMER2_RESOLVE_MAYBE;
879 * First see if we have a (possibly modified) chain element cached
880 * for this (parent, index). Acquire the data if necessary.
882 * If chain->data is non-NULL the chain should already be marked
886 chain = SPLAY_FIND(hammer2_chain_splay, &parent->shead, &dummy);
888 if (flags & HAMMER2_LOOKUP_NOLOCK)
889 hammer2_chain_ref(hmp, chain);
891 hammer2_chain_lock(hmp, chain, how);
896 * the get function must always succeed, panic if there's no
899 if (parent->flags & HAMMER2_CHAIN_INITIAL) {
900 panic("hammer2_chain_get: Missing bref(1)");
905 * Otherwise lookup the bref and issue I/O (switch on the parent)
907 switch(parent->bref.type) {
908 case HAMMER2_BREF_TYPE_INODE:
909 KKASSERT(index >= 0 && index < HAMMER2_SET_COUNT);
910 bref = &parent->data->ipdata.u.blockset.blockref[index];
912 case HAMMER2_BREF_TYPE_INDIRECT:
913 KKASSERT(parent->data != NULL);
914 KKASSERT(index >= 0 &&
915 index < parent->bytes / sizeof(hammer2_blockref_t));
916 bref = &parent->data->npdata.blockref[index];
918 case HAMMER2_BREF_TYPE_VOLUME:
919 KKASSERT(index >= 0 && index < HAMMER2_SET_COUNT);
920 bref = &hmp->voldata.sroot_blockset.blockref[index];
924 panic("hammer2_chain_get: unrecognized blockref type: %d",
927 if (bref->type == 0) {
928 panic("hammer2_chain_get: Missing bref(2)");
933 * Allocate a chain structure representing the existing media
936 * The locking operation we do later will issue I/O to read it.
938 chain = hammer2_chain_alloc(hmp, bref);
941 * Link the chain into its parent. Caller is expected to hold an
942 * exclusive lock on the parent.
944 chain->parent = parent;
945 chain->index = index;
946 if (SPLAY_INSERT(hammer2_chain_splay, &parent->shead, chain))
947 panic("hammer2_chain_link: collision");
948 KKASSERT(parent->refs > 0);
949 atomic_add_int(&parent->refs, 1); /* for splay entry */
952 * Additional linkage for inodes. Reuse the parent pointer to
953 * find the parent directory.
955 if (bref->type == HAMMER2_BREF_TYPE_INODE) {
956 while (parent->bref.type == HAMMER2_BREF_TYPE_INDIRECT)
957 parent = parent->parent;
958 if (parent->bref.type == HAMMER2_BREF_TYPE_INODE) {
959 chain->u.ip->pip = parent->u.ip;
960 chain->u.ip->pmp = parent->u.ip->pmp;
961 chain->u.ip->depth = parent->u.ip->depth + 1;
966 * Our new chain structure has already been referenced and locked
967 * but the lock code handles the I/O so call it to resolve the data.
968 * Then release one of our two exclusive locks.
970 * If NOLOCK is set the release will release the one-and-only lock.
972 if ((flags & HAMMER2_LOOKUP_NOLOCK) == 0) {
973 hammer2_chain_lock(hmp, chain, how); /* recusive lock */
974 hammer2_chain_drop(hmp, chain); /* excess ref */
976 lockmgr(&chain->lk, LK_RELEASE); /* from alloc */
982 * Locate any key between key_beg and key_end inclusive. (*parentp)
983 * typically points to an inode but can also point to a related indirect
984 * block and this function will recurse upwards and find the inode again.
986 * WARNING! THIS DOES NOT RETURN KEYS IN LOGICAL KEY ORDER! ANY KEY
987 * WITHIN THE RANGE CAN BE RETURNED. HOWEVER, AN ITERATION
988 * WHICH PICKS UP WHERE WE LEFT OFF WILL CONTINUE THE SCAN.
990 * (*parentp) must be exclusively locked and referenced and can be an inode
991 * or an existing indirect block within the inode.
993 * On return (*parentp) will be modified to point at the deepest parent chain
994 * element encountered during the search, as a helper for an insertion or
995 * deletion. The new (*parentp) will be locked and referenced and the old
996 * will be unlocked and dereferenced (no change if they are both the same).
998 * The matching chain will be returned exclusively locked and referenced.
1000 * NULL is returned if no match was found, but (*parentp) will still
1001 * potentially be adjusted.
1003 * This function will also recurse up the chain if the key is not within the
1004 * current parent's range. (*parentp) can never be set to NULL. An iteration
1005 * can simply allow (*parentp) to float inside the loop.
1008 hammer2_chain_lookup(hammer2_mount_t *hmp, hammer2_chain_t **parentp,
1009 hammer2_key_t key_beg, hammer2_key_t key_end,
1012 hammer2_chain_t *parent;
1013 hammer2_chain_t *chain;
1014 hammer2_chain_t *tmp;
1015 hammer2_blockref_t *base;
1016 hammer2_blockref_t *bref;
1017 hammer2_key_t scan_beg;
1018 hammer2_key_t scan_end;
1023 * Recurse (*parentp) upward if necessary until the parent completely
1024 * encloses the key range or we hit the inode.
1027 while (parent->bref.type == HAMMER2_BREF_TYPE_INDIRECT) {
1028 scan_beg = parent->bref.key;
1029 scan_end = scan_beg +
1030 ((hammer2_key_t)1 << parent->bref.keybits) - 1;
1031 if (key_beg >= scan_beg && key_end <= scan_end)
1033 hammer2_chain_ref(hmp, parent); /* ref old parent */
1034 hammer2_chain_unlock(hmp, parent); /* unlock old parent */
1035 parent = parent->parent;
1036 /* lock new parent */
1037 hammer2_chain_lock(hmp, parent, HAMMER2_RESOLVE_MAYBE);
1038 hammer2_chain_drop(hmp, *parentp); /* drop old parent */
1039 *parentp = parent; /* new parent */
1044 * Locate the blockref array. Currently we do a fully associative
1045 * search through the array.
1047 switch(parent->bref.type) {
1048 case HAMMER2_BREF_TYPE_INODE:
1050 * Special shortcut for embedded data returns the inode
1051 * itself. Callers must detect this condition and access
1052 * the embedded data (the strategy code does this for us).
1054 * This is only applicable to regular files and softlinks.
1056 if (parent->data->ipdata.op_flags & HAMMER2_OPFLAG_DIRECTDATA) {
1057 if (flags & HAMMER2_LOOKUP_NOLOCK)
1058 hammer2_chain_ref(hmp, parent);
1060 hammer2_chain_lock(hmp, parent,
1061 HAMMER2_RESOLVE_ALWAYS);
1064 base = &parent->data->ipdata.u.blockset.blockref[0];
1065 count = HAMMER2_SET_COUNT;
1067 case HAMMER2_BREF_TYPE_INDIRECT:
1069 * Optimize indirect blocks in the INITIAL state to avoid
1072 if (parent->flags & HAMMER2_CHAIN_INITIAL) {
1075 if (parent->data == NULL)
1076 panic("parent->data is NULL");
1077 base = &parent->data->npdata.blockref[0];
1079 count = parent->bytes / sizeof(hammer2_blockref_t);
1081 case HAMMER2_BREF_TYPE_VOLUME:
1082 base = &hmp->voldata.sroot_blockset.blockref[0];
1083 count = HAMMER2_SET_COUNT;
1086 panic("hammer2_chain_lookup: unrecognized blockref type: %d",
1088 base = NULL; /* safety */
1089 count = 0; /* safety */
1093 * If the element and key overlap we use the element.
1096 for (i = 0; i < count; ++i) {
1097 tmp = hammer2_chain_find(hmp, parent, i);
1100 KKASSERT(bref->type != 0);
1101 } else if (base == NULL || base[i].type == 0) {
1106 scan_beg = bref->key;
1107 scan_end = scan_beg + ((hammer2_key_t)1 << bref->keybits) - 1;
1108 if (key_beg <= scan_end && key_end >= scan_beg)
1112 if (key_beg == key_end)
1114 return (hammer2_chain_next(hmp, parentp, NULL,
1115 key_beg, key_end, flags));
1119 * Acquire the new chain element. If the chain element is an
1120 * indirect block we must search recursively.
1122 chain = hammer2_chain_get(hmp, parent, i, flags);
1127 * If the chain element is an indirect block it becomes the new
1128 * parent and we loop on it.
1130 * The parent always has to be locked with at least RESOLVE_MAYBE,
1131 * so it might need a fixup if the caller passed incompatible flags.
1133 if (chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT) {
1134 hammer2_chain_unlock(hmp, parent);
1135 *parentp = parent = chain;
1136 if (flags & HAMMER2_LOOKUP_NOLOCK) {
1137 hammer2_chain_lock(hmp, chain, HAMMER2_RESOLVE_MAYBE);
1138 hammer2_chain_drop(hmp, chain); /* excess ref */
1139 } else if (flags & HAMMER2_LOOKUP_NODATA) {
1140 hammer2_chain_lock(hmp, chain, HAMMER2_RESOLVE_MAYBE);
1141 hammer2_chain_unlock(hmp, chain);
1147 * All done, return chain
1153 * After having issued a lookup we can iterate all matching keys.
1155 * If chain is non-NULL we continue the iteration from just after it's index.
1157 * If chain is NULL we assume the parent was exhausted and continue the
1158 * iteration at the next parent.
1160 * parent must be locked on entry and remains locked throughout. chain's
1161 * lock status must match flags.
1164 hammer2_chain_next(hammer2_mount_t *hmp, hammer2_chain_t **parentp,
1165 hammer2_chain_t *chain,
1166 hammer2_key_t key_beg, hammer2_key_t key_end,
1169 hammer2_chain_t *parent;
1170 hammer2_chain_t *tmp;
1171 hammer2_blockref_t *base;
1172 hammer2_blockref_t *bref;
1173 hammer2_key_t scan_beg;
1174 hammer2_key_t scan_end;
1182 * Calculate the next index and recalculate the parent if necessary.
1186 * Continue iteration within current parent. If not NULL
1187 * the passed-in chain may or may not be locked, based on
1188 * the LOOKUP_NOLOCK flag (passed in as returned from lookup
1191 i = chain->index + 1;
1192 if (flags & HAMMER2_LOOKUP_NOLOCK)
1193 hammer2_chain_drop(hmp, chain);
1195 hammer2_chain_unlock(hmp, chain);
1198 * Any scan where the lookup returned degenerate data embedded
1199 * in the inode has an invalid index and must terminate.
1201 if (chain == parent)
1204 } else if (parent->bref.type != HAMMER2_BREF_TYPE_INDIRECT) {
1206 * We reached the end of the iteration.
1211 * Continue iteration with next parent unless the current
1212 * parent covers the range.
1214 hammer2_chain_t *nparent;
1216 scan_beg = parent->bref.key;
1217 scan_end = scan_beg +
1218 ((hammer2_key_t)1 << parent->bref.keybits) - 1;
1219 if (key_beg >= scan_beg && key_end <= scan_end)
1222 i = parent->index + 1;
1223 nparent = parent->parent;
1224 hammer2_chain_ref(hmp, nparent); /* ref new parent */
1225 hammer2_chain_unlock(hmp, parent); /* unlock old parent */
1226 /* lock new parent */
1227 hammer2_chain_lock(hmp, nparent, HAMMER2_RESOLVE_MAYBE);
1228 hammer2_chain_drop(hmp, nparent); /* drop excess ref */
1229 *parentp = parent = nparent;
1234 * Locate the blockref array. Currently we do a fully associative
1235 * search through the array.
1237 switch(parent->bref.type) {
1238 case HAMMER2_BREF_TYPE_INODE:
1239 base = &parent->data->ipdata.u.blockset.blockref[0];
1240 count = HAMMER2_SET_COUNT;
1242 case HAMMER2_BREF_TYPE_INDIRECT:
1243 if (parent->flags & HAMMER2_CHAIN_INITIAL) {
1246 KKASSERT(parent->data != NULL);
1247 base = &parent->data->npdata.blockref[0];
1249 count = parent->bytes / sizeof(hammer2_blockref_t);
1251 case HAMMER2_BREF_TYPE_VOLUME:
1252 base = &hmp->voldata.sroot_blockset.blockref[0];
1253 count = HAMMER2_SET_COUNT;
1256 panic("hammer2_chain_next: unrecognized blockref type: %d",
1258 base = NULL; /* safety */
1259 count = 0; /* safety */
1262 KKASSERT(i <= count);
1265 * Look for the key. If we are unable to find a match and an exact
1266 * match was requested we return NULL. If a range was requested we
1267 * run hammer2_chain_next() to iterate.
1271 tmp = hammer2_chain_find(hmp, parent, i);
1274 } else if (base == NULL || base[i].type == 0) {
1280 scan_beg = bref->key;
1281 scan_end = scan_beg + ((hammer2_key_t)1 << bref->keybits) - 1;
1282 if (key_beg <= scan_end && key_end >= scan_beg)
1288 * If we couldn't find a match recurse up a parent to continue the
1295 * Acquire the new chain element. If the chain element is an
1296 * indirect block we must search recursively.
1298 chain = hammer2_chain_get(hmp, parent, i, flags);
1303 * If the chain element is an indirect block it becomes the new
1304 * parent and we loop on it.
1306 * The parent always has to be locked with at least RESOLVE_MAYBE,
1307 * so it might need a fixup if the caller passed incompatible flags.
1309 if (chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT) {
1310 hammer2_chain_unlock(hmp, parent);
1311 *parentp = parent = chain;
1313 if (flags & HAMMER2_LOOKUP_NOLOCK) {
1314 hammer2_chain_lock(hmp, parent, HAMMER2_RESOLVE_MAYBE);
1315 hammer2_chain_drop(hmp, parent); /* excess ref */
1316 } else if (flags & HAMMER2_LOOKUP_NODATA) {
1317 hammer2_chain_lock(hmp, parent, HAMMER2_RESOLVE_MAYBE);
1318 hammer2_chain_unlock(hmp, parent);
1325 * All done, return chain
1331 * Create and return a new hammer2 system memory structure of the specified
1332 * key, type and size and insert it RELATIVE TO (PARENT).
1334 * (parent) is typically either an inode or an indirect block, acquired
1335 * acquired as a side effect of issuing a prior failed lookup. parent
1336 * must be locked and held. Do not pass the inode chain to this function
1337 * unless that is the chain returned by the failed lookup.
1339 * Non-indirect types will automatically allocate indirect blocks as required
1340 * if the new item does not fit in the current (parent).
1342 * Indirect types will move a portion of the existing blockref array in
1343 * (parent) into the new indirect type and then use one of the free slots
1344 * to emplace the new indirect type.
1346 * A new locked, referenced chain element is returned of the specified type.
1347 * The element may or may not have a data area associated with it:
1349 * VOLUME not allowed here
1350 * INODE embedded data are will be set-up
1351 * INDIRECT not allowed here
1352 * DATA no data area will be set-up (caller is expected
1353 * to have logical buffers, we don't want to alias
1354 * the data onto device buffers!).
1357 hammer2_chain_create(hammer2_mount_t *hmp, hammer2_chain_t *parent,
1358 hammer2_chain_t *chain,
1359 hammer2_key_t key, int keybits, int type, size_t bytes)
1361 hammer2_blockref_t dummy;
1362 hammer2_blockref_t *base;
1363 hammer2_chain_t dummy_chain;
1364 int unlock_parent = 0;
1369 if (chain == NULL) {
1371 * First allocate media space and construct the dummy bref,
1372 * then allocate the in-memory chain structure.
1374 bzero(&dummy, sizeof(dummy));
1377 dummy.keybits = keybits;
1378 dummy.data_off = hammer2_bytes_to_radix(bytes);
1379 chain = hammer2_chain_alloc(hmp, &dummy);
1383 * We do NOT set INITIAL here (yet). INITIAL is only
1384 * used for indirect blocks.
1386 * Recalculate bytes to reflect the actual media block
1389 bytes = (hammer2_off_t)1 <<
1390 (int)(chain->bref.data_off & HAMMER2_OFF_MASK_RADIX);
1391 chain->bytes = bytes;
1394 case HAMMER2_BREF_TYPE_VOLUME:
1395 panic("hammer2_chain_create: called with volume type");
1397 case HAMMER2_BREF_TYPE_INODE:
1398 KKASSERT(bytes == HAMMER2_INODE_BYTES);
1399 chain->data = (void *)&chain->u.ip->ip_data;
1401 case HAMMER2_BREF_TYPE_INDIRECT:
1402 panic("hammer2_chain_create: cannot be used to"
1403 "create indirect block");
1405 case HAMMER2_BREF_TYPE_DATA:
1407 /* leave chain->data NULL */
1408 KKASSERT(chain->data == NULL);
1413 * Potentially update the chain's key/keybits.
1415 chain->bref.key = key;
1416 chain->bref.keybits = keybits;
1421 * Locate a free blockref in the parent's array
1423 switch(parent->bref.type) {
1424 case HAMMER2_BREF_TYPE_INODE:
1425 KKASSERT((parent->u.ip->ip_data.op_flags &
1426 HAMMER2_OPFLAG_DIRECTDATA) == 0);
1427 KKASSERT(parent->data != NULL);
1428 base = &parent->data->ipdata.u.blockset.blockref[0];
1429 count = HAMMER2_SET_COUNT;
1431 case HAMMER2_BREF_TYPE_INDIRECT:
1432 if (parent->flags & HAMMER2_CHAIN_INITIAL) {
1435 KKASSERT(parent->data != NULL);
1436 base = &parent->data->npdata.blockref[0];
1438 count = parent->bytes / sizeof(hammer2_blockref_t);
1440 case HAMMER2_BREF_TYPE_VOLUME:
1441 KKASSERT(parent->data != NULL);
1442 base = &hmp->voldata.sroot_blockset.blockref[0];
1443 count = HAMMER2_SET_COUNT;
1446 panic("hammer2_chain_create: unrecognized blockref type: %d",
1453 * Scan for an unallocated bref, also skipping any slots occupied
1454 * by in-memory chain elements that may not yet have been updated
1455 * in the parent's bref array.
1457 bzero(&dummy_chain, sizeof(dummy_chain));
1458 for (i = 0; i < count; ++i) {
1460 dummy_chain.index = i;
1461 if (SPLAY_FIND(hammer2_chain_splay,
1462 &parent->shead, &dummy_chain) == NULL) {
1465 } else if (base[i].type == 0) {
1466 dummy_chain.index = i;
1467 if (SPLAY_FIND(hammer2_chain_splay,
1468 &parent->shead, &dummy_chain) == NULL) {
1475 * If no free blockref count be found we must create an indirect
1476 * block and move a number of blockrefs into it. With the parent
1477 * locked we can safely lock each child in order to move it without
1478 * causing a deadlock.
1480 * This may return the new indirect block or the old parent depending
1481 * on where the key falls.
1484 hammer2_chain_t *nparent;
1486 nparent = hammer2_chain_create_indirect(hmp, parent,
1488 if (nparent == NULL) {
1490 hammer2_chain_free(hmp, chain);
1494 if (parent != nparent) {
1496 hammer2_chain_unlock(hmp, parent);
1504 * Link the chain into its parent. Later on we will have to set
1505 * the MOVED bit in situations where we don't mark the new chain
1506 * as being modified.
1508 if (chain->parent != NULL)
1509 panic("hammer2: hammer2_chain_create: chain already connected");
1510 KKASSERT(chain->parent == NULL);
1511 chain->parent = parent;
1513 if (SPLAY_INSERT(hammer2_chain_splay, &parent->shead, chain))
1514 panic("hammer2_chain_link: collision");
1515 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_DELETED);
1516 KKASSERT(parent->refs > 0);
1517 atomic_add_int(&parent->refs, 1);
1520 * Additional linkage for inodes. Reuse the parent pointer to
1521 * find the parent directory.
1523 * Cumulative adjustments are inherited on [re]attach.
1525 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
1526 hammer2_chain_t *scan = parent;
1527 hammer2_inode_t *ip = chain->u.ip;
1529 while (scan->bref.type == HAMMER2_BREF_TYPE_INDIRECT)
1530 scan = scan->parent;
1531 if (scan->bref.type == HAMMER2_BREF_TYPE_INODE) {
1532 ip->pip = scan->u.ip;
1533 ip->pmp = scan->u.ip->pmp;
1534 ip->depth = scan->u.ip->depth + 1;
1535 ip->delta_icount += ip->ip_data.inode_count;
1536 ip->delta_dcount += ip->ip_data.data_count;
1537 ++ip->pip->delta_icount;
1542 * (allocated) indicates that this is a newly-created chain element
1543 * rather than a renamed chain element. In this situation we want
1544 * to place the chain element in the MODIFIED state.
1546 * The data area will be set up as follows:
1548 * VOLUME not allowed here.
1550 * INODE embedded data are will be set-up.
1552 * INDIRECT not allowed here.
1554 * DATA no data area will be set-up (caller is expected
1555 * to have logical buffers, we don't want to alias
1556 * the data onto device buffers!).
1559 if (chain->bref.type == HAMMER2_BREF_TYPE_DATA) {
1560 hammer2_chain_modify(hmp, chain,
1561 HAMMER2_MODIFY_OPTDATA);
1562 } else if (chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT) {
1563 /* not supported in this function */
1564 panic("hammer2_chain_create: bad type");
1565 atomic_set_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
1566 hammer2_chain_modify(hmp, chain,
1567 HAMMER2_MODIFY_OPTDATA);
1569 hammer2_chain_modify(hmp, chain, 0);
1573 * When reconnecting inodes we have to call setsubmod()
1574 * to ensure that its state propagates up the newly
1577 * We cannot depend on the chain being in a MODIFIED
1578 * state, or it might already be in that state, so
1579 * even if the parent calls hammer2_chain_modify()
1580 * MOVED might not get set. Thus we have to set it
1583 if ((chain->flags & HAMMER2_CHAIN_MOVED) == 0) {
1584 hammer2_chain_ref(hmp, chain);
1585 atomic_set_int(&chain->flags, HAMMER2_CHAIN_MOVED);
1587 hammer2_chain_parent_setsubmod(hmp, chain);
1592 hammer2_chain_unlock(hmp, parent);
1597 * Create an indirect block that covers one or more of the elements in the
1598 * current parent. Either returns the existing parent with no locking or
1599 * ref changes or returns the new indirect block locked and referenced,
1600 * depending on what the specified key falls into.
1602 * The key/keybits for the indirect mode only needs to follow three rules:
1604 * (1) That all elements underneath it fit within its key space and
1606 * (2) That all elements outside it are outside its key space.
1608 * (3) When creating the new indirect block any elements in the current
1609 * parent that fit within the new indirect block's keyspace must be
1610 * moved into the new indirect block.
1612 * (4) The keyspace chosen for the inserted indirect block CAN cover a wider
1613 * keyspace the the current parent, but lookup/iteration rules will
1614 * ensure (and must ensure) that rule (2) for all parents leading up
1615 * to the nearest inode or the root volume header is adhered to. This
1616 * is accomplished by always recursing through matching keyspaces in
1617 * the hammer2_chain_lookup() and hammer2_chain_next() API.
1619 * The current implementation calculates the current worst-case keyspace by
1620 * iterating the current parent and then divides it into two halves, choosing
1621 * whichever half has the most elements (not necessarily the half containing
1622 * the requested key).
1624 * We can also opt to use the half with the least number of elements. This
1625 * causes lower-numbered keys (aka logical file offsets) to recurse through
1626 * fewer indirect blocks and higher-numbered keys to recurse through more.
1627 * This also has the risk of not moving enough elements to the new indirect
1628 * block and being forced to create several indirect blocks before the element
1633 hammer2_chain_create_indirect(hammer2_mount_t *hmp, hammer2_chain_t *parent,
1634 hammer2_key_t create_key, int create_bits)
1636 hammer2_blockref_t *base;
1637 hammer2_blockref_t *bref;
1638 hammer2_chain_t *chain;
1639 hammer2_chain_t *ichain;
1640 hammer2_chain_t dummy;
1641 hammer2_key_t key = create_key;
1642 int keybits = create_bits;
1650 * Calculate the base blockref pointer or NULL if the chain
1651 * is known to be empty.
1653 hammer2_chain_modify(hmp, parent, HAMMER2_MODIFY_OPTDATA);
1654 if (parent->flags & HAMMER2_CHAIN_INITIAL) {
1658 * We still need to calculate the count for SPLAY lookups
1660 switch(parent->bref.type) {
1661 case HAMMER2_BREF_TYPE_INODE:
1662 count = HAMMER2_SET_COUNT;
1664 case HAMMER2_BREF_TYPE_INDIRECT:
1665 count = parent->bytes / sizeof(hammer2_blockref_t);
1667 case HAMMER2_BREF_TYPE_VOLUME:
1668 count = HAMMER2_SET_COUNT;
1671 panic("hammer2_chain_create_indirect: "
1672 "unrecognized blockref type: %d",
1679 * Locate a free blockref in the parent's array
1681 switch(parent->bref.type) {
1682 case HAMMER2_BREF_TYPE_INODE:
1683 base = &parent->data->ipdata.u.blockset.blockref[0];
1684 count = HAMMER2_SET_COUNT;
1686 case HAMMER2_BREF_TYPE_INDIRECT:
1687 base = &parent->data->npdata.blockref[0];
1688 count = parent->bytes / sizeof(hammer2_blockref_t);
1690 case HAMMER2_BREF_TYPE_VOLUME:
1691 base = &hmp->voldata.sroot_blockset.blockref[0];
1692 count = HAMMER2_SET_COUNT;
1695 panic("hammer2_chain_create_indirect: "
1696 "unrecognized blockref type: %d",
1704 * Scan for an unallocated bref, also skipping any slots occupied
1705 * by in-memory chain elements that may not yet have been updated
1706 * in the parent's bref array.
1708 bzero(&dummy, sizeof(dummy));
1709 for (i = 0; i < count; ++i) {
1713 * Optimize the case where the parent is still in its
1714 * initially created state.
1716 if (base == NULL || base[i].type == 0) {
1718 chain = SPLAY_FIND(hammer2_chain_splay,
1719 &parent->shead, &dummy);
1722 bref = &chain->bref;
1728 * Expand our calculated key range (key, keybits) to fit
1729 * the scanned key. nkeybits represents the full range
1730 * that we will later cut in half (two halves @ nkeybits - 1).
1733 if (nkeybits < bref->keybits)
1734 nkeybits = bref->keybits;
1735 while (nkeybits < 64 &&
1736 (~(((hammer2_key_t)1 << nkeybits) - 1) &
1737 (key ^ bref->key)) != 0) {
1742 * If the new key range is larger we have to determine
1743 * which side of the new key range the existing keys fall
1744 * under by checking the high bit, then collapsing the
1745 * locount into the hicount or vise-versa.
1747 if (keybits != nkeybits) {
1748 if (((hammer2_key_t)1 << (nkeybits - 1)) & key) {
1759 * The newly scanned key will be in the lower half or the
1760 * higher half of the (new) key range.
1762 if (((hammer2_key_t)1 << (nkeybits - 1)) & bref->key)
1769 * Adjust keybits to represent half of the full range calculated
1775 * Select whichever half contains the most elements. Theoretically
1776 * we can select either side as long as it contains at least one
1777 * element (in order to ensure that a free slot is present to hold
1778 * the indirect block).
1780 key &= ~(((hammer2_key_t)1 << keybits) - 1);
1781 if (hammer2_indirect_optimize) {
1783 * Insert node for least number of keys, this will arrange
1784 * the first few blocks of a large file or the first few
1785 * inodes in a directory with fewer indirect blocks when
1788 if (hicount < locount && hicount != 0)
1789 key |= (hammer2_key_t)1 << keybits;
1791 key &= ~(hammer2_key_t)1 << keybits;
1794 * Insert node for most number of keys, best for heavily
1797 if (hicount > locount)
1798 key |= (hammer2_key_t)1 << keybits;
1800 key &= ~(hammer2_key_t)1 << keybits;
1804 * How big should our new indirect block be? It has to be at least
1805 * as large as its parent.
1807 if (parent->bref.type == HAMMER2_BREF_TYPE_INODE)
1808 nbytes = HAMMER2_IND_BYTES_MIN;
1810 nbytes = HAMMER2_IND_BYTES_MAX;
1811 if (nbytes < count * sizeof(hammer2_blockref_t))
1812 nbytes = count * sizeof(hammer2_blockref_t);
1815 * Ok, create our new indirect block
1817 dummy.bref.type = HAMMER2_BREF_TYPE_INDIRECT;
1818 dummy.bref.key = key;
1819 dummy.bref.keybits = keybits;
1820 dummy.bref.data_off = hammer2_bytes_to_radix(nbytes);
1821 ichain = hammer2_chain_alloc(hmp, &dummy.bref);
1822 atomic_set_int(&ichain->flags, HAMMER2_CHAIN_INITIAL);
1825 * Iterate the original parent and move the matching brefs into
1826 * the new indirect block.
1828 for (i = 0; i < count; ++i) {
1830 * For keying purposes access the bref from the media or
1831 * from our in-memory cache. In cases where the in-memory
1832 * cache overrides the media the keyrefs will be the same
1833 * anyway so we can avoid checking the cache when the media
1836 if (base == NULL || base[i].type == 0) {
1838 chain = SPLAY_FIND(hammer2_chain_splay,
1839 &parent->shead, &dummy);
1840 if (chain == NULL) {
1842 * Select index indirect block is placed in
1844 if (ichain->index < 0)
1848 bref = &chain->bref;
1854 * Skip keys not in the chosen half (low or high), only bit
1855 * (keybits - 1) needs to be compared but for safety we
1856 * will compare all msb bits plus that bit again.
1858 if ((~(((hammer2_key_t)1 << keybits) - 1) &
1859 (key ^ bref->key)) != 0) {
1864 * This element is being moved, its slot is available
1865 * for our indirect block.
1867 if (ichain->index < 0)
1871 * Load the new indirect block by acquiring or allocating
1872 * the related chain entries, then simply move them to the
1873 * new parent (ichain).
1875 * When adjusting the parent/child relationship we must
1876 * set the MOVED bit if we do not otherwise set the
1877 * MODIFIED bit, and call setsubmod() to ensure that the
1878 * parent sees the bref adjustment.
1880 * We must still set SUBMODIFIED in the parent but we do
1881 * that after the loop.
1883 * XXX we really need a lock here but we don't need the
1884 * data. NODATA feature needed.
1886 chain = hammer2_chain_get(hmp, parent, i,
1887 HAMMER2_LOOKUP_NODATA);
1888 SPLAY_REMOVE(hammer2_chain_splay, &parent->shead, chain);
1889 if (SPLAY_INSERT(hammer2_chain_splay, &ichain->shead, chain))
1890 panic("hammer2_chain_create_indirect: collision");
1891 chain->parent = ichain;
1893 bzero(&base[i], sizeof(base[i]));
1894 atomic_add_int(&parent->refs, -1);
1895 atomic_add_int(&ichain->refs, 1);
1896 if ((chain->flags & HAMMER2_CHAIN_MOVED) == 0) {
1897 hammer2_chain_ref(hmp, chain);
1898 atomic_set_int(&chain->flags, HAMMER2_CHAIN_MOVED);
1900 hammer2_chain_unlock(hmp, chain);
1901 KKASSERT(parent->refs > 0);
1906 * Insert the new indirect block into the parent now that we've
1907 * cleared out some entries in the parent. We calculated a good
1908 * insertion index in the loop above (ichain->index).
1910 * We don't have to set MOVED here because we mark ichain modified
1911 * down below (so the normal modified -> flush -> set-moved sequence
1914 KKASSERT(ichain->index >= 0);
1915 if (SPLAY_INSERT(hammer2_chain_splay, &parent->shead, ichain))
1916 panic("hammer2_chain_create_indirect: ichain insertion");
1917 ichain->parent = parent;
1918 atomic_add_int(&parent->refs, 1);
1921 * Mark the new indirect block modified after insertion, which
1922 * will propagate up through parent all the way to the root and
1923 * also allocate the physical block in ichain for our caller,
1924 * and assign ichain->data to a pre-zero'd space (because there
1925 * is not prior data to copy into it).
1927 * We have to set SUBMODIFIED in ichain's flags manually so the
1928 * flusher knows it has to recurse through it to get to all of
1929 * our moved blocks, then call setsubmod() to set the bit
1932 hammer2_chain_modify(hmp, ichain, HAMMER2_MODIFY_OPTDATA);
1933 atomic_set_int(&ichain->flags, HAMMER2_CHAIN_SUBMODIFIED);
1934 hammer2_chain_parent_setsubmod(hmp, ichain);
1937 * Figure out what to return.
1939 if (create_bits >= keybits) {
1941 * Key being created is way outside the key range,
1942 * return the original parent.
1944 hammer2_chain_unlock(hmp, ichain);
1945 } else if (~(((hammer2_key_t)1 << keybits) - 1) &
1946 (create_key ^ key)) {
1948 * Key being created is outside the key range,
1949 * return the original parent.
1951 hammer2_chain_unlock(hmp, ichain);
1954 * Otherwise its in the range, return the new parent.
1963 * Physically delete the specified chain element. Note that inodes with
1964 * open descriptors should not be deleted (as with other filesystems) until
1965 * the last open descriptor is closed.
1967 * This routine will remove the chain element from its parent and potentially
1968 * also recurse upward and delete indirect blocks which become empty as a
1971 * The caller must pass a pointer to the chain's parent, also locked and
1972 * referenced. (*parentp) will be modified in a manner similar to a lookup
1973 * or iteration when indirect blocks are also deleted as a side effect.
1976 hammer2_chain_delete(hammer2_mount_t *hmp, hammer2_chain_t *parent,
1977 hammer2_chain_t *chain)
1979 hammer2_blockref_t *base;
1980 hammer2_inode_t *ip;
1983 if (chain->parent != parent)
1984 panic("hammer2_chain_delete: parent mismatch");
1987 * Mark the parent modified so our base[] pointer remains valid
1988 * while we move entries. For the optimized indirect block
1989 * case mark the parent moved instead.
1991 * Calculate the blockref reference in the parent
1993 switch(parent->bref.type) {
1994 case HAMMER2_BREF_TYPE_INODE:
1995 hammer2_chain_modify(hmp, parent, HAMMER2_MODIFY_NO_MODIFY_TID);
1996 base = &parent->data->ipdata.u.blockset.blockref[0];
1997 count = HAMMER2_SET_COUNT;
1999 case HAMMER2_BREF_TYPE_INDIRECT:
2000 hammer2_chain_modify(hmp, parent, HAMMER2_MODIFY_OPTDATA |
2001 HAMMER2_MODIFY_NO_MODIFY_TID);
2002 if (parent->flags & HAMMER2_CHAIN_INITIAL)
2005 base = &parent->data->npdata.blockref[0];
2006 count = parent->bytes / sizeof(hammer2_blockref_t);
2008 case HAMMER2_BREF_TYPE_VOLUME:
2009 hammer2_chain_modify(hmp, parent, HAMMER2_MODIFY_NO_MODIFY_TID);
2010 base = &hmp->voldata.sroot_blockset.blockref[0];
2011 count = HAMMER2_SET_COUNT;
2014 panic("hammer2_chain_delete: unrecognized blockref type: %d",
2021 * Disconnect the bref in the parent, remove the chain, and
2022 * disconnect in-memory fields from the parent.
2024 KKASSERT(chain->index >= 0 && chain->index < count);
2026 bzero(&base[chain->index], sizeof(*base));
2028 SPLAY_REMOVE(hammer2_chain_splay, &parent->shead, chain);
2029 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DELETED);
2030 atomic_add_int(&parent->refs, -1); /* for splay entry */
2032 chain->parent = NULL;
2035 * Cumulative adjustments must be propagated to the parent inode
2036 * when deleting and synchronized to ip. A future reattachment
2037 * (e.g. during a rename) expects only to use ip_data.*_count.
2039 * Clear the pointer to the parent inode.
2041 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
2044 ip->pip->delta_icount += ip->delta_icount;
2045 ip->pip->delta_dcount += ip->delta_dcount;
2046 ip->ip_data.inode_count += ip->delta_icount;
2047 ip->ip_data.data_count += ip->delta_dcount;
2048 ip->delta_icount = 0;
2049 ip->delta_dcount = 0;
2050 --ip->pip->delta_icount;
2053 chain->u.ip->depth = 0;
2057 * The chain is still likely referenced, possibly even by a vnode
2058 * (if an inode), so defer further action until the chain gets
2064 * Recursively flush the specified chain. The chain is locked and
2065 * referenced by the caller and will remain so on return. The chain
2066 * will remain referenced throughout but can temporarily lose its
2067 * lock during the recursion to avoid unnecessarily stalling user
2072 TAILQ_HEAD(flush_deferral_list, hammer2_chain);
2074 struct hammer2_flush_info {
2075 struct flush_deferral_list flush_list;
2077 hammer2_tid_t modify_tid;
2080 typedef struct hammer2_flush_info hammer2_flush_info_t;
2083 hammer2_chain_flush_pass1(hammer2_mount_t *hmp, hammer2_chain_t *chain,
2084 hammer2_flush_info_t *info)
2086 hammer2_blockref_t *bref;
2087 hammer2_off_t pbase;
2095 * If we hit the stack recursion depth limit defer the operation.
2096 * The controller of the info structure will execute the deferral
2097 * list and then retry.
2099 * This is only applicable if SUBMODIFIED is set. After a reflush
2100 * SUBMODIFIED will probably be cleared and we want to drop through
2101 * to finish processing the current element so our direct parent
2102 * can process the results.
2104 if (info->depth == HAMMER2_FLUSH_DEPTH_LIMIT &&
2105 (chain->flags & HAMMER2_CHAIN_SUBMODIFIED)) {
2106 if ((chain->flags & HAMMER2_CHAIN_DEFERRED) == 0) {
2107 hammer2_chain_ref(hmp, chain);
2108 TAILQ_INSERT_TAIL(&info->flush_list,
2110 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DEFERRED);
2115 if (hammer2_debug & 0x0008)
2116 kprintf("%*.*sCHAIN type=%d@%08jx %p/%d %04x {\n",
2117 info->depth, info->depth, "",
2118 chain->bref.type, chain->bref.data_off,
2119 chain, chain->refs, chain->flags);
2122 * Flush any children of this chain.
2124 * NOTE: If we use a while() here an active filesystem can
2125 * prevent the flush from ever finishing.
2127 if (chain->flags & HAMMER2_CHAIN_SUBMODIFIED) {
2128 hammer2_blockref_t *base;
2129 hammer2_chain_t *child;
2130 hammer2_chain_t *next;
2132 int submodified = 0;
2136 * Clear SUBMODIFIED now. Flag any races during the flush
2137 * with the (submodified) local variable and re-arm it
2138 * as necessary after the loop is done.
2140 * Delaying the setting of the chain to MODIFIED can reduce
2143 * Modifications to the children will propagate up, forcing
2144 * us to become modified and copy-on-write too. Be sure
2145 * to modify chain (as a side effect of the recursive
2146 * flush) ONLY if it is actually being modified by the
2149 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_SUBMODIFIED);
2152 * Flush the children and update the blockrefs in the chain.
2153 * Be careful of ripouts during the loop.
2155 next = SPLAY_MIN(hammer2_chain_splay, &chain->shead);
2156 while ((child = next) != NULL) {
2157 next = SPLAY_NEXT(hammer2_chain_splay,
2158 &chain->shead, child);
2160 * We only recurse if SUBMODIFIED (internal node)
2161 * or MODIFIED (internal node or leaf) is set.
2162 * However, we must still track whether any MOVED
2163 * entries are present to determine if the chain's
2164 * blockref's need updating or not.
2166 if (child->flags & HAMMER2_CHAIN_MOVED)
2168 if ((child->flags & (HAMMER2_CHAIN_SUBMODIFIED |
2169 HAMMER2_CHAIN_MODIFIED |
2170 HAMMER2_CHAIN_MODIFIED_AUX)) == 0) {
2175 * Propagate the DESTROYED flag if found set, then
2176 * recurse the flush.
2178 hammer2_chain_lock(hmp, child, HAMMER2_RESOLVE_MAYBE);
2179 if ((chain->flags & HAMMER2_CHAIN_DESTROYED) &&
2180 (child->flags & HAMMER2_CHAIN_DESTROYED) == 0) {
2181 atomic_set_int(&child->flags,
2182 HAMMER2_CHAIN_DESTROYED |
2183 HAMMER2_CHAIN_SUBMODIFIED);
2186 hammer2_chain_flush_pass1(hmp, child, info);
2190 * No point loading blockrefs yet if the
2191 * child (recursively) is still dirty.
2193 if (child->flags & (HAMMER2_CHAIN_SUBMODIFIED |
2194 HAMMER2_CHAIN_MODIFIED |
2195 HAMMER2_CHAIN_MODIFIED_AUX)) {
2197 if (hammer2_debug & 0x0008)
2200 if (child->flags & HAMMER2_CHAIN_MOVED) {
2201 if (hammer2_debug & 0x0008)
2205 if (hammer2_debug & 0x0008)
2207 hammer2_chain_unlock(hmp, child);
2211 * If the sub-tree was not completely synced we currently do
2212 * not attempt to propagate the bref all the way back up.
2213 * Our bref pointers to the children are not updated yet in
2214 * this situation but the children will have CHAIN_MOVED set
2215 * and cannot be destroyed until the parent synchronizes
2218 * If the sub-tree had to be recursed the bref propagates
2219 * back up and may require 'chain' to become modified.
2223 (chain->flags & HAMMER2_CHAIN_SUBMODIFIED)) {
2225 * No point loading up the blockrefs if submodified
2226 * got re-set. The modified and flushed children
2227 * will have set HAMMER2_CHAIN_MOVED and cannot be
2228 * freed until we've synchronized the case.
2230 * NOTE: Even though we cleared the SUBMODIFIED flag
2231 * it can still get re-set by operations
2232 * occuring under our chain, so check both.
2234 atomic_set_int(&chain->flags,
2235 HAMMER2_CHAIN_SUBMODIFIED);
2236 } else if (submoved) {
2238 * Ok, we can modify the blockrefs in this chain
2239 * entry. Mark it modified. Calculate the
2240 * blockref array after marking it modified (since
2241 * that may change the underlying data ptr).
2243 * NOTE: We only do this if submoved != 0, otherwise
2244 * there may not be any changes and setting
2245 * the chain modified will re-arm the MOVED
2246 * bit recursively, resulting in O(N^2)
2249 * NOTE: We don't want hammer2_chain_modify() to
2250 * recursively set the SUBMODIFIED flag
2251 * upward in this case!
2253 hammer2_chain_modify(hmp, chain,
2254 HAMMER2_MODIFY_NOSUB |
2255 HAMMER2_MODIFY_NO_MODIFY_TID);
2257 switch(chain->bref.type) {
2258 case HAMMER2_BREF_TYPE_INODE:
2259 KKASSERT((chain->data->ipdata.op_flags &
2260 HAMMER2_OPFLAG_DIRECTDATA) == 0);
2261 base = &chain->data->ipdata.u.blockset.
2263 count = HAMMER2_SET_COUNT;
2265 case HAMMER2_BREF_TYPE_INDIRECT:
2266 base = &chain->data->npdata.blockref[0];
2267 count = chain->bytes /
2268 sizeof(hammer2_blockref_t);
2270 case HAMMER2_BREF_TYPE_VOLUME:
2271 base = &hmp->voldata.sroot_blockset.blockref[0];
2272 count = HAMMER2_SET_COUNT;
2276 panic("hammer2_chain_get: "
2277 "unrecognized blockref type: %d",
2282 * Update the blockrefs.
2284 * When updating the blockset embedded in the volume
2285 * header we must also update voldata.mirror_tid.
2287 next = SPLAY_MIN(hammer2_chain_splay, &chain->shead);
2288 while ((child = next) != NULL) {
2289 next = SPLAY_NEXT(hammer2_chain_splay,
2290 &chain->shead, child);
2291 KKASSERT(child->index >= 0 &&
2292 child->index < count);
2293 hammer2_chain_lock(hmp, child,
2294 HAMMER2_RESOLVE_NEVER);
2295 KKASSERT(child->parent == chain);
2296 if (child->flags & HAMMER2_CHAIN_MOVED) {
2297 base[child->index] = child->bref;
2298 if (chain->bref.mirror_tid <
2299 child->bref.mirror_tid) {
2300 chain->bref.mirror_tid =
2301 child->bref.mirror_tid;
2303 if (chain->bref.type ==
2304 HAMMER2_BREF_TYPE_VOLUME &&
2305 hmp->voldata.mirror_tid <
2306 child->bref.mirror_tid) {
2307 hmp->voldata.mirror_tid =
2308 child->bref.mirror_tid;
2310 atomic_clear_int(&child->flags,
2311 HAMMER2_CHAIN_MOVED);
2312 hammer2_chain_drop(hmp, child);
2313 } else if (bcmp(&base[child->index],
2315 sizeof(child->bref)) != 0) {
2316 kprintf("child %p index %d\n",
2317 child, child->index);
2318 panic("hammer2: unflagged bref update");
2320 hammer2_chain_unlock(hmp, child);
2326 * If destroying the object we unconditonally clear the MODIFIED
2327 * and MOVED bits, and we destroy the buffer without writing it
2330 * We don't bother updating the hash/crc or the chain bref.
2332 * NOTE: The destroy'd object's bref has already been updated.
2333 * so we can clear MOVED without propagating mirror_tid
2334 * or modify_tid upward.
2336 * XXX allocations for unflushed data can be returned to the
2339 if (chain->flags & HAMMER2_CHAIN_DESTROYED) {
2340 if (chain->flags & HAMMER2_CHAIN_MODIFIED) {
2342 chain->bp->b_flags |= B_INVAL|B_RELBUF;
2344 atomic_clear_int(&chain->flags,
2345 HAMMER2_CHAIN_MODIFIED |
2346 HAMMER2_CHAIN_MODIFY_TID);
2347 hammer2_chain_drop(hmp, chain);
2349 if (chain->flags & HAMMER2_CHAIN_MODIFIED_AUX) {
2350 atomic_clear_int(&chain->flags,
2351 HAMMER2_CHAIN_MODIFIED_AUX);
2353 if (chain->flags & HAMMER2_CHAIN_MOVED) {
2354 atomic_clear_int(&chain->flags,
2355 HAMMER2_CHAIN_MOVED);
2356 hammer2_chain_drop(hmp, chain);
2362 * Flush this chain entry only if it is marked modified.
2364 if ((chain->flags & (HAMMER2_CHAIN_MODIFIED |
2365 HAMMER2_CHAIN_MODIFIED_AUX)) == 0) {
2370 * Synchronize cumulative data and inode count adjustments to
2371 * the inode and propagate the deltas upward to the parent.
2373 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
2374 hammer2_inode_t *ip;
2377 ip->ip_data.inode_count += ip->delta_icount;
2378 ip->ip_data.data_count += ip->delta_dcount;
2380 ip->pip->delta_icount += ip->delta_icount;
2381 ip->pip->delta_dcount += ip->delta_dcount;
2383 ip->delta_icount = 0;
2384 ip->delta_dcount = 0;
2388 * Clear MODIFIED and set HAMMER2_CHAIN_MOVED. The caller
2389 * will re-test the MOVED bit. We must also update the mirror_tid
2390 * and modify_tid fields as appropriate.
2392 * bits own a single chain ref and the MOVED bit owns its own
2395 if (chain->flags & HAMMER2_CHAIN_MODIFIED) {
2396 chain->bref.mirror_tid = info->modify_tid;
2397 if (chain->flags & HAMMER2_CHAIN_MODIFY_TID)
2398 chain->bref.modify_tid = info->modify_tid;
2399 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_MODIFIED |
2400 HAMMER2_CHAIN_MODIFY_TID);
2401 if (chain->flags & HAMMER2_CHAIN_MOVED) {
2402 hammer2_chain_drop(hmp, chain);
2404 /* inherit ref from the MODIFIED we cleared */
2405 atomic_set_int(&chain->flags, HAMMER2_CHAIN_MOVED);
2408 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_MODIFIED_AUX);
2411 * If this is part of a recursive flush we can go ahead and write
2412 * out the buffer cache buffer and pass a new bref back up the chain.
2414 * This will never be a volume header.
2416 switch(chain->bref.type) {
2417 case HAMMER2_BREF_TYPE_VOLUME:
2419 * The volume header is flushed manually by the syncer, not
2423 case HAMMER2_BREF_TYPE_DATA:
2425 * Data elements have already been flushed via the logical
2426 * file buffer cache. Their hash was set in the bref by
2427 * the vop_write code.
2429 * Make sure the buffer(s) have been flushed out here.
2432 bbytes = chain->bytes;
2433 pbase = chain->bref.data_off & ~(hammer2_off_t)(bbytes - 1);
2434 boff = chain->bref.data_off & HAMMER2_OFF_MASK & (bbytes - 1);
2436 bp = getblk(hmp->devvp, pbase, bbytes, GETBLK_NOWAIT, 0);
2438 if ((bp->b_flags & (B_CACHE | B_DIRTY)) ==
2439 (B_CACHE | B_DIRTY)) {
2443 bp->b_flags |= B_RELBUF;
2449 case HAMMER2_BREF_TYPE_INDIRECT:
2451 * Indirect blocks may be in an INITIAL state.
2456 * Embedded elements have to be flushed out.
2458 KKASSERT(chain->data != NULL);
2459 bref = &chain->bref;
2461 KKASSERT((bref->data_off & HAMMER2_OFF_MASK) != 0);
2463 if (chain->bp == NULL) {
2465 * The data is embedded, we have to acquire the
2466 * buffer cache buffer and copy the data into it.
2468 if ((bbytes = chain->bytes) < HAMMER2_MINIOSIZE)
2469 bbytes = HAMMER2_MINIOSIZE;
2470 pbase = bref->data_off & ~(hammer2_off_t)(bbytes - 1);
2471 boff = bref->data_off & HAMMER2_OFF_MASK & (bbytes - 1);
2474 * The getblk() optimization can only be used if the
2475 * physical block size matches the request.
2477 if (chain->bytes == bbytes) {
2478 bp = getblk(hmp->devvp, pbase, bbytes, 0, 0);
2481 error = bread(hmp->devvp, pbase, bbytes, &bp);
2482 KKASSERT(error == 0);
2484 bdata = (char *)bp->b_data + boff;
2487 * Copy the data to the buffer, mark the buffer
2488 * dirty, and convert the chain to unmodified.
2490 bcopy(chain->data, bdata, chain->bytes);
2491 bp->b_flags |= B_CLUSTEROK;
2494 chain->bref.check.iscsi32.value =
2495 hammer2_icrc32(chain->data, chain->bytes);
2496 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE)
2497 ++hammer2_iod_meta_write;
2499 ++hammer2_iod_indr_write;
2501 chain->bref.check.iscsi32.value =
2502 hammer2_icrc32(chain->data, chain->bytes);
2507 * Adjustments to the bref. The caller will use this to adjust
2508 * our chain's pointer to this chain element.
2510 bref = &chain->bref;
2512 switch(bref->type) {
2513 case HAMMER2_BREF_TYPE_VOLUME:
2514 KKASSERT(chain->data != NULL);
2515 KKASSERT(chain->bp == NULL);
2517 hmp->voldata.icrc_sects[HAMMER2_VOL_ICRC_SECT1]=
2519 (char *)&hmp->voldata +
2520 HAMMER2_VOLUME_ICRC1_OFF,
2521 HAMMER2_VOLUME_ICRC1_SIZE);
2522 hmp->voldata.icrc_sects[HAMMER2_VOL_ICRC_SECT0]=
2524 (char *)&hmp->voldata +
2525 HAMMER2_VOLUME_ICRC0_OFF,
2526 HAMMER2_VOLUME_ICRC0_SIZE);
2527 hmp->voldata.icrc_volheader =
2529 (char *)&hmp->voldata +
2530 HAMMER2_VOLUME_ICRCVH_OFF,
2531 HAMMER2_VOLUME_ICRCVH_SIZE);
2538 if (hammer2_debug & 0x0008) {
2539 kprintf("%*.*s} %p/%d %04x ",
2540 info->depth, info->depth, "",
2541 chain, chain->refs, chain->flags);
2547 * PASS2 - not yet implemented (should be called only with the root chain?)
2550 hammer2_chain_flush_pass2(hammer2_mount_t *hmp, hammer2_chain_t *chain)
2556 * Stand-alone flush. If the chain is unable to completely flush we have
2557 * to be sure that SUBMODIFIED propagates up the parent chain. We must not
2558 * clear the MOVED bit after flushing in this situation or our desynchronized
2559 * bref will not properly update in the parent.
2561 * This routine can be called from several places but the most important
2562 * is from the hammer2_vop_reclaim() function. We want to try to completely
2563 * clean out the inode structure to prevent disconnected inodes from
2564 * building up and blowing out the kmalloc pool.
2566 * If modify_tid is 0 (usual case), a new modify_tid is allocated and
2567 * applied to the flush. The depth-limit handling code is the only
2568 * code which passes a non-zero modify_tid to hammer2_chain_flush().
2571 hammer2_chain_flush(hammer2_mount_t *hmp, hammer2_chain_t *chain,
2572 hammer2_tid_t modify_tid)
2574 hammer2_chain_t *parent;
2575 hammer2_chain_t *scan;
2576 hammer2_blockref_t *base;
2577 hammer2_flush_info_t info;
2582 * Execute the recursive flush and handle deferrals.
2584 * Chains can be ridiculously long (thousands deep), so to
2585 * avoid blowing out the kernel stack the recursive flush has a
2586 * depth limit. Elements at the limit are placed on a list
2587 * for re-execution after the stack has been popped.
2589 bzero(&info, sizeof(info));
2590 TAILQ_INIT(&info.flush_list);
2592 if (modify_tid == 0) {
2593 hammer2_voldata_lock(hmp);
2594 info.modify_tid = hmp->voldata.alloc_tid++;
2595 hammer2_voldata_unlock(hmp);
2597 info.modify_tid = modify_tid;
2605 hammer2_chain_flush_pass1(hmp, chain, &info);
2608 while ((scan = TAILQ_FIRST(&info.flush_list)) != NULL) {
2610 * Secondary recursion. Note that a reference is
2611 * retained from the element's presence on the
2614 KKASSERT(scan->flags & HAMMER2_CHAIN_DEFERRED);
2615 TAILQ_REMOVE(&info.flush_list, scan, flush_node);
2616 atomic_clear_int(&scan->flags, HAMMER2_CHAIN_DEFERRED);
2619 * Now that we've popped back up we can do a secondary
2620 * recursion on the deferred elements.
2622 if (hammer2_debug & 0x0040)
2623 kprintf("defered flush %p\n", scan);
2624 hammer2_chain_lock(hmp, scan, HAMMER2_RESOLVE_MAYBE);
2625 hammer2_chain_flush(hmp, scan, info.modify_tid);
2626 hammer2_chain_unlock(hmp, scan);
2629 * Only flag a reflush if SUBMODIFIED is no longer
2630 * set. If SUBMODIFIED is set the element will just
2631 * wind up on our flush_list again.
2633 if ((scan->flags & (HAMMER2_CHAIN_SUBMODIFIED |
2634 HAMMER2_CHAIN_MODIFIED |
2635 HAMMER2_CHAIN_MODIFIED_AUX)) == 0) {
2638 hammer2_chain_drop(hmp, scan);
2640 if ((hammer2_debug & 0x0040) && reflush)
2641 kprintf("reflush %p\n", chain);
2645 * The SUBMODIFIED bit must propagate upward if the chain could not
2646 * be completely flushed.
2648 if (chain->flags & (HAMMER2_CHAIN_SUBMODIFIED |
2649 HAMMER2_CHAIN_MODIFIED |
2650 HAMMER2_CHAIN_MODIFIED_AUX |
2651 HAMMER2_CHAIN_MOVED)) {
2652 hammer2_chain_parent_setsubmod(hmp, chain);
2656 * If the only thing left is a simple bref update try to
2657 * pro-actively update the parent, otherwise return early.
2659 parent = chain->parent;
2660 if (parent == NULL) {
2663 if (chain->bref.type != HAMMER2_BREF_TYPE_INODE ||
2664 (chain->flags & (HAMMER2_CHAIN_SUBMODIFIED |
2665 HAMMER2_CHAIN_MODIFIED |
2666 HAMMER2_CHAIN_MODIFIED_AUX |
2667 HAMMER2_CHAIN_MOVED)) != HAMMER2_CHAIN_MOVED) {
2672 * We are locking backwards so allow the lock to fail
2674 if (lockmgr(&parent->lk, LK_EXCLUSIVE | LK_NOWAIT) != 0) {
2679 * We are updating brefs but we have to call chain_modify()
2680 * because our caller is not being run from a recursive flush.
2682 * This will also chain up the parent list and set the SUBMODIFIED
2685 * We do not want to set HAMMER2_CHAIN_MODIFY_TID here because the
2686 * modification is only related to updating a bref in the parent.
2688 * When updating the blockset embedded in the volume header we must
2689 * also update voldata.mirror_tid.
2691 hammer2_chain_lock(hmp, parent, HAMMER2_RESOLVE_MAYBE);
2692 hammer2_chain_modify(hmp, parent, HAMMER2_MODIFY_NO_MODIFY_TID);
2694 switch(parent->bref.type) {
2695 case HAMMER2_BREF_TYPE_INODE:
2696 base = &parent->data->ipdata.u.blockset.
2698 count = HAMMER2_SET_COUNT;
2700 case HAMMER2_BREF_TYPE_INDIRECT:
2701 base = &parent->data->npdata.blockref[0];
2702 count = parent->bytes /
2703 sizeof(hammer2_blockref_t);
2705 case HAMMER2_BREF_TYPE_VOLUME:
2706 base = &hmp->voldata.sroot_blockset.blockref[0];
2707 count = HAMMER2_SET_COUNT;
2708 if (chain->flags & HAMMER2_CHAIN_MOVED) {
2709 if (hmp->voldata.mirror_tid < chain->bref.mirror_tid) {
2710 hmp->voldata.mirror_tid =
2711 chain->bref.mirror_tid;
2717 panic("hammer2_chain_flush: "
2718 "unrecognized blockref type: %d",
2723 * Update the blockref in the parent. We do not have to set
2724 * MOVED in the parent because the parent has been marked modified,
2725 * so the flush sequence will pick up the bref change.
2727 * We do have to propagate mirror_tid upward.
2729 KKASSERT(chain->index >= 0 &&
2730 chain->index < count);
2731 KKASSERT(chain->parent == parent);
2732 if (chain->flags & HAMMER2_CHAIN_MOVED) {
2733 base[chain->index] = chain->bref;
2734 if (parent->bref.mirror_tid < chain->bref.mirror_tid)
2735 parent->bref.mirror_tid = chain->bref.mirror_tid;
2736 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_MOVED);
2737 hammer2_chain_drop(hmp, chain);
2738 } else if (bcmp(&base[chain->index],
2740 sizeof(chain->bref)) != 0) {
2741 panic("hammer2: unflagged bref update(2)");
2744 lockmgr(&parent->lk, LK_RELEASE); /* release manual lockmgr op */
2745 hammer2_chain_unlock(hmp, parent);