2 * Copyright (c) 2011-2012 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * This subsystem handles direct and indirect block searches, recursions,
37 * creation, and deletion. Chains of blockrefs are tracked and modifications
38 * are flag for propagation... eventually all the way back to the volume
42 #include <sys/cdefs.h>
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/types.h>
51 static int hammer2_indirect_optimize; /* XXX SYSCTL */
53 static hammer2_chain_t *hammer2_chain_create_indirect(
54 hammer2_mount_t *hmp, hammer2_chain_t *parent,
55 hammer2_key_t key, int keybits);
60 SPLAY_GENERATE(hammer2_chain_splay, hammer2_chain, snode, hammer2_chain_cmp);
63 hammer2_chain_cmp(hammer2_chain_t *chain1, hammer2_chain_t *chain2)
65 return(chain2->index - chain1->index);
69 * Recursively mark the parent chain elements so flushes can find
70 * modified elements. Stop when we hit a chain already flagged
71 * SUBMODIFIED, but ignore the SUBMODIFIED bit that might be set
74 * SUBMODIFIED is not set on the chain passed in.
76 * XXX rename of parent can create a SMP race
79 hammer2_chain_parent_setsubmod(hammer2_mount_t *hmp, hammer2_chain_t *chain)
81 hammer2_chain_t *parent;
83 parent = chain->parent;
84 while (parent && (parent->flags & HAMMER2_CHAIN_SUBMODIFIED) == 0) {
85 atomic_set_int(&parent->flags, HAMMER2_CHAIN_SUBMODIFIED);
86 parent = parent->parent;
91 * Allocate a new disconnected chain element representing the specified
92 * bref. The chain element is locked exclusively and refs is set to 1.
94 * This essentially allocates a system memory structure representing one
95 * of the media structure types, including inodes.
98 hammer2_chain_alloc(hammer2_mount_t *hmp, hammer2_blockref_t *bref)
100 hammer2_chain_t *chain;
102 hammer2_indblock_t *np;
104 u_int bytes = 1U << (int)(bref->data_off & HAMMER2_OFF_MASK_RADIX);
107 * Construct the appropriate system structure.
110 case HAMMER2_BREF_TYPE_INODE:
111 ip = kmalloc(sizeof(*ip), hmp->minode, M_WAITOK | M_ZERO);
116 case HAMMER2_BREF_TYPE_INDIRECT:
117 np = kmalloc(sizeof(*np), hmp->mchain, M_WAITOK | M_ZERO);
121 case HAMMER2_BREF_TYPE_DATA:
122 dp = kmalloc(sizeof(*dp), hmp->mchain, M_WAITOK | M_ZERO);
126 case HAMMER2_BREF_TYPE_VOLUME:
128 panic("hammer2_chain_alloc volume type illegal for op");
131 panic("hammer2_chain_alloc: unrecognized blockref type: %d",
136 * Only set bref_flush if the bref has a real media offset, otherwise
137 * the caller has to wait for the chain to be modified/block-allocated
138 * before a blockref can be synchronized with its (future) parent.
141 if (bref->data_off & ~HAMMER2_OFF_MASK_RADIX)
142 chain->bref_flush = *bref;
143 chain->index = -1; /* not yet assigned */
145 chain->bytes = bytes;
146 ccms_cst_init(&chain->cst, chain);
147 ccms_thread_lock(&chain->cst, CCMS_STATE_EXCLUSIVE);
153 * Free a disconnected chain element
156 hammer2_chain_free(hammer2_mount_t *hmp, hammer2_chain_t *chain)
160 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE ||
161 chain->bref.type == HAMMER2_BREF_TYPE_VOLUME) {
165 KKASSERT(chain->bp == NULL);
166 KKASSERT(chain->data == NULL);
167 KKASSERT(chain->bref.type != HAMMER2_BREF_TYPE_INODE ||
168 chain->u.ip->vp == NULL);
170 if ((mem = chain->u.mem) != NULL) {
172 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE)
173 kfree(mem, hmp->minode);
175 kfree(mem, hmp->mchain);
180 * Add a reference to a chain element (for shared access). The chain
181 * element must already have at least 1 ref controlled by the caller.
184 hammer2_chain_ref(hammer2_mount_t *hmp, hammer2_chain_t *chain)
186 KKASSERT(chain->refs > 0);
187 atomic_add_int(&chain->refs, 1);
191 * Drop the callers reference to the chain element. If the ref count
192 * reaches zero the chain element and its related structure (typically an
193 * inode or indirect block) will be freed and the parent will be
194 * recursively dropped.
196 * MOVED and MODIFIED elements hold additional references so it should not
197 * be possible for the count on a modified element to drop to 0.
199 * The chain element must NOT be locked by the caller.
201 * The parent might or might not be locked by the caller but if so it
202 * will also be referenced so we shouldn't recurse upward.
205 hammer2_chain_drop(hammer2_mount_t *hmp, hammer2_chain_t *chain)
207 hammer2_chain_t *parent;
216 KKASSERT(chain != &hmp->vchain);
217 parent = chain->parent;
219 ccms_thread_lock(&parent->cst,
220 CCMS_STATE_EXCLUSIVE);
222 if (atomic_cmpset_int(&chain->refs, 1, 0)) {
224 * Succeeded, recurse and drop parent.
225 * These chain elements should be synchronized
226 * so no delta data or inode count updates
229 KKASSERT((chain->flags &
230 (HAMMER2_CHAIN_MOVED |
231 HAMMER2_CHAIN_MODIFIED)) == 0);
233 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE)
241 if (!(chain->flags & HAMMER2_CHAIN_DELETED)) {
243 * Disconnect the chain and clear
244 * pip if it was an inode.
246 SPLAY_REMOVE(hammer2_chain_splay,
247 &parent->shead, chain);
248 atomic_set_int(&chain->flags,
249 HAMMER2_CHAIN_DELETED);
252 /* parent refs dropped via recursion */
256 * When cleaning out a hammer2_inode we must
257 * also clean out the related ccms_inode.
260 ccms_cst_uninit(&ip->topo_cst);
261 chain->parent = NULL;
263 ccms_thread_unlock(&parent->cst);
264 hammer2_chain_free(hmp, chain);
266 /* recurse on parent */
269 ccms_thread_unlock(&parent->cst);
270 /* retry the same chain */
273 if (atomic_cmpset_int(&chain->refs, refs, refs - 1)) {
275 * Succeeded, count did not reach zero so
276 * cut out of the loop.
280 /* retry the same chain */
286 * Ref and lock a chain element, acquiring its data with I/O if necessary,
287 * and specify how you would like the data to be resolved.
289 * Returns 0 on success or an error code if the data could not be acquired.
290 * The chain element is locked either way.
292 * The lock is allowed to recurse, multiple locking ops will aggregate
293 * the requested resolve types. Once data is assigned it will not be
294 * removed until the last unlock.
296 * HAMMER2_RESOLVE_NEVER - Do not resolve the data element.
297 * (typically used to avoid device/logical buffer
300 * HAMMER2_RESOLVE_MAYBE - Do not resolve data elements for chains in
301 * the INITIAL-create state (indirect blocks only).
303 * Do not resolve data elements for DATA chains.
304 * (typically used to avoid device/logical buffer
307 * HAMMER2_RESOLVE_ALWAYS- Always resolve the data element.
310 * NOTE: Embedded elements (volume header, inodes) are always resolved
313 * NOTE: Specifying HAMMER2_RESOLVE_ALWAYS on a newly-created non-embedded
314 * element will instantiate and zero its buffer, and flush it on
317 * NOTE: (data) elements are normally locked RESOLVE_NEVER or RESOLVE_MAYBE
318 * so as not to instantiate a device buffer, which could alias against
319 * a logical file buffer. However, if ALWAYS is specified the
320 * device buffer will be instantiated anyway.
323 hammer2_chain_lock(hammer2_mount_t *hmp, hammer2_chain_t *chain, int how)
325 hammer2_blockref_t *bref;
334 * Lock the element. Under certain conditions this might end up
335 * being a recursive lock.
337 KKASSERT(chain->refs > 0);
338 atomic_add_int(&chain->refs, 1);
339 ccms_thread_lock(&chain->cst, CCMS_STATE_EXCLUSIVE);
342 * If we already have a valid data pointer no further action is
349 * Do we have to resolve the data?
352 case HAMMER2_RESOLVE_NEVER:
354 case HAMMER2_RESOLVE_MAYBE:
355 if (chain->flags & HAMMER2_CHAIN_INITIAL)
357 if (chain->bref.type == HAMMER2_BREF_TYPE_DATA)
360 case HAMMER2_RESOLVE_ALWAYS:
365 * We must resolve to a device buffer, either by issuing I/O or
366 * by creating a zero-fill element. We do not mark the buffer
367 * dirty when creating a zero-fill element (the hammer2_chain_modify()
368 * API must still be used to do that).
370 * The device buffer is variable-sized in powers of 2 down
371 * to HAMMER2_MINALLOCSIZE (typically 1K). A 64K physical storage
372 * chunk always contains buffers of the same size. (XXX)
374 * The minimum physical IO size may be larger than the variable
379 if ((bbytes = chain->bytes) < HAMMER2_MINIOSIZE)
380 bbytes = HAMMER2_MINIOSIZE;
381 pbase = bref->data_off & ~(hammer2_off_t)(bbytes - 1);
382 peof = (pbase + HAMMER2_PBUFSIZE64) & ~HAMMER2_PBUFMASK64;
383 boff = bref->data_off & HAMMER2_OFF_MASK & (bbytes - 1);
384 KKASSERT(pbase != 0);
387 * The getblk() optimization can only be used on newly created
388 * elements if the physical block size matches the request.
390 if ((chain->flags & HAMMER2_CHAIN_INITIAL) &&
391 chain->bytes == bbytes) {
392 chain->bp = getblk(hmp->devvp, pbase, bbytes, 0, 0);
394 } else if (hammer2_cluster_enable) {
395 error = cluster_read(hmp->devvp, peof, pbase, bbytes,
396 HAMMER2_PBUFSIZE, HAMMER2_PBUFSIZE,
399 error = bread(hmp->devvp, pbase, bbytes, &chain->bp);
403 kprintf("hammer2_chain_get: I/O error %016jx: %d\n",
404 (intmax_t)pbase, error);
411 * Zero the data area if the chain is in the INITIAL-create state.
412 * Mark the buffer for bdwrite().
414 bdata = (char *)chain->bp->b_data + boff;
415 if (chain->flags & HAMMER2_CHAIN_INITIAL) {
416 bzero(bdata, chain->bytes);
417 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DIRTYBP);
421 * Setup the data pointer, either pointing it to an embedded data
422 * structure and copying the data from the buffer, or pointing it
425 * The buffer is not retained when copying to an embedded data
426 * structure in order to avoid potential deadlocks or recursions
427 * on the same physical buffer.
429 switch (bref->type) {
430 case HAMMER2_BREF_TYPE_VOLUME:
432 * Copy data from bp to embedded buffer
434 panic("hammer2_chain_lock: called on unresolved volume header");
437 KKASSERT(pbase == 0);
438 KKASSERT(chain->bytes == HAMMER2_PBUFSIZE);
439 bcopy(bdata, &hmp->voldata, chain->bytes);
440 chain->data = (void *)&hmp->voldata;
445 case HAMMER2_BREF_TYPE_INODE:
447 * Copy data from bp to embedded buffer, do not retain the
450 bcopy(bdata, &chain->u.ip->ip_data, chain->bytes);
451 chain->data = (void *)&chain->u.ip->ip_data;
455 case HAMMER2_BREF_TYPE_INDIRECT:
456 case HAMMER2_BREF_TYPE_DATA:
459 * Point data at the device buffer and leave bp intact.
461 chain->data = (void *)bdata;
468 * Unlock and deref a chain element.
470 * On the last lock release any non-embedded data (chain->bp) will be
474 hammer2_chain_unlock(hammer2_mount_t *hmp, hammer2_chain_t *chain)
479 * Release the CST lock but with a special 1->0 transition case.
481 * Returns non-zero if lock references remain. When zero is
482 * returned the last lock reference is retained and any shared
483 * lock is upgraded to an exclusive lock for final disposition.
485 if (ccms_thread_unlock_zero(&chain->cst)) {
486 KKASSERT(chain->refs > 1);
487 atomic_add_int(&chain->refs, -1);
492 * Shortcut the case if the data is embedded or not resolved.
494 * Do NOT null-out pointers to embedded data (e.g. inode).
496 * The DIRTYBP flag is non-applicable in this situation and can
497 * be cleared to keep the flags state clean.
499 if (chain->bp == NULL) {
500 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_DIRTYBP);
501 ccms_thread_unlock(&chain->cst);
502 hammer2_chain_drop(hmp, chain);
509 if ((chain->flags & HAMMER2_CHAIN_DIRTYBP) == 0) {
511 } else if (chain->flags & HAMMER2_CHAIN_IOFLUSH) {
512 switch(chain->bref.type) {
513 case HAMMER2_BREF_TYPE_DATA:
514 counterp = &hammer2_ioa_file_write;
516 case HAMMER2_BREF_TYPE_INODE:
517 counterp = &hammer2_ioa_meta_write;
519 case HAMMER2_BREF_TYPE_INDIRECT:
520 counterp = &hammer2_ioa_indr_write;
523 counterp = &hammer2_ioa_volu_write;
528 switch(chain->bref.type) {
529 case HAMMER2_BREF_TYPE_DATA:
530 counterp = &hammer2_iod_file_write;
532 case HAMMER2_BREF_TYPE_INODE:
533 counterp = &hammer2_iod_meta_write;
535 case HAMMER2_BREF_TYPE_INDIRECT:
536 counterp = &hammer2_iod_indr_write;
539 counterp = &hammer2_iod_volu_write;
548 * If a device buffer was used for data be sure to destroy the
549 * buffer when we are done to avoid aliases (XXX what about the
550 * underlying VM pages?).
552 if (chain->bref.type == HAMMER2_BREF_TYPE_DATA)
553 chain->bp->b_flags |= B_RELBUF;
556 * The DIRTYBP flag tracks whether we have to bdwrite() the buffer
557 * or not. The flag will get re-set when chain_modify() is called,
558 * even if MODIFIED is already set, allowing the OS to retire the
559 * buffer independent of a hammer2 flus.
562 if (chain->flags & HAMMER2_CHAIN_DIRTYBP) {
563 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_DIRTYBP);
564 if (chain->flags & HAMMER2_CHAIN_IOFLUSH) {
565 atomic_clear_int(&chain->flags,
566 HAMMER2_CHAIN_IOFLUSH);
567 chain->bp->b_flags |= B_RELBUF;
568 cluster_awrite(chain->bp);
570 chain->bp->b_flags |= B_CLUSTEROK;
574 if (chain->flags & HAMMER2_CHAIN_IOFLUSH) {
575 atomic_clear_int(&chain->flags,
576 HAMMER2_CHAIN_IOFLUSH);
577 chain->bp->b_flags |= B_RELBUF;
580 /* bp might still be dirty */
585 ccms_thread_unlock(&chain->cst);
586 hammer2_chain_drop(hmp, chain);
590 * Resize the chain's physical storage allocation. Chains can be resized
591 * smaller without reallocating the storage. Resizing larger will reallocate
594 * Must be passed a locked chain.
596 * If you want the resize code to copy the data to the new block then the
597 * caller should lock the chain RESOLVE_MAYBE or RESOLVE_ALWAYS.
599 * If the caller already holds a logical buffer containing the data and
600 * intends to bdwrite() that buffer resolve with RESOLVE_NEVER. The resize
601 * operation will then not copy the data.
603 * This function is mostly used with DATA blocks locked RESOLVE_NEVER in order
604 * to avoid instantiating a device buffer that conflicts with the vnode
607 * XXX flags currently ignored, uses chain->bp to detect data/no-data.
610 hammer2_chain_resize(hammer2_inode_t *ip, hammer2_chain_t *chain,
611 int nradix, int flags)
613 hammer2_mount_t *hmp = ip->hmp;
624 * Only data and indirect blocks can be resized for now
626 KKASSERT(chain != &hmp->vchain);
627 KKASSERT(chain->bref.type == HAMMER2_BREF_TYPE_DATA ||
628 chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT);
631 * Nothing to do if the element is already the proper size
633 obytes = chain->bytes;
634 nbytes = 1U << nradix;
635 if (obytes == nbytes)
639 * Set MODIFIED and add a chain ref to prevent destruction. Both
640 * modified flags share the same ref.
642 * If the chain is already marked MODIFIED then we can safely
643 * return the previous allocation to the pool without having to
644 * worry about snapshots.
646 if ((chain->flags & HAMMER2_CHAIN_MODIFIED) == 0) {
647 atomic_set_int(&chain->flags, HAMMER2_CHAIN_MODIFIED |
648 HAMMER2_CHAIN_MODIFY_TID);
649 hammer2_chain_ref(hmp, chain);
651 hammer2_freemap_free(hmp, chain->bref.data_off,
656 * Relocate the block, even if making it smaller (because different
657 * block sizes may be in different regions).
659 chain->bref.data_off = hammer2_freemap_alloc(hmp, chain->bref.type,
661 chain->bytes = nbytes;
662 ip->delta_dcount += (ssize_t)(nbytes - obytes); /* XXX atomic */
665 * The device buffer may be larger than the allocation size.
667 if ((bbytes = chain->bytes) < HAMMER2_MINIOSIZE)
668 bbytes = HAMMER2_MINIOSIZE;
669 pbase = chain->bref.data_off & ~(hammer2_off_t)(bbytes - 1);
670 boff = chain->bref.data_off & HAMMER2_OFF_MASK & (bbytes - 1);
673 * Only copy the data if resolved, otherwise the caller is
677 KKASSERT(chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT ||
678 chain->bref.type == HAMMER2_BREF_TYPE_DATA);
679 KKASSERT(chain != &hmp->vchain); /* safety */
682 * The getblk() optimization can only be used if the
683 * physical block size matches the request.
685 if (nbytes == bbytes) {
686 nbp = getblk(hmp->devvp, pbase, bbytes, 0, 0);
689 error = bread(hmp->devvp, pbase, bbytes, &nbp);
690 KKASSERT(error == 0);
692 bdata = (char *)nbp->b_data + boff;
694 if (nbytes < obytes) {
695 bcopy(chain->data, bdata, nbytes);
697 bcopy(chain->data, bdata, obytes);
698 bzero(bdata + obytes, nbytes - obytes);
702 * NOTE: The INITIAL state of the chain is left intact.
703 * We depend on hammer2_chain_modify() to do the
706 * NOTE: We set B_NOCACHE to throw away the previous bp and
707 * any VM backing store, even if it was dirty.
708 * Otherwise we run the risk of a logical/device
709 * conflict on reallocation.
711 chain->bp->b_flags |= B_RELBUF | B_NOCACHE;
714 chain->data = (void *)bdata;
715 hammer2_chain_modify(hmp, chain, 0);
719 * Make sure the chain is marked MOVED and SUBMOD is set in the
720 * parent(s) so the adjustments are picked up by flush.
722 if ((chain->flags & HAMMER2_CHAIN_MOVED) == 0) {
723 hammer2_chain_ref(hmp, chain);
724 atomic_set_int(&chain->flags, HAMMER2_CHAIN_MOVED);
726 hammer2_chain_parent_setsubmod(hmp, chain);
730 * Convert a locked chain that was retrieved read-only to read-write.
732 * If not already marked modified a new physical block will be allocated
733 * and assigned to the bref.
735 * Non-data blocks - The chain should be locked to at least the RESOLVE_MAYBE
736 * level or the COW operation will not work.
738 * Data blocks - The chain is usually locked RESOLVE_NEVER so as not to
739 * run the data through the device buffers.
742 hammer2_chain_modify(hammer2_mount_t *hmp, hammer2_chain_t *chain, int flags)
752 * Tells flush that modify_tid must be updated, otherwise only
753 * mirror_tid is updated. This is the default.
755 if ((flags & HAMMER2_MODIFY_NO_MODIFY_TID) == 0)
756 atomic_set_int(&chain->flags, HAMMER2_CHAIN_MODIFY_TID);
759 * If the chain is already marked MODIFIED we can just return.
761 * However, it is possible that a prior lock/modify sequence
762 * retired the buffer. During this lock/modify sequence MODIFIED
763 * may still be set but the buffer could wind up clean. Since
764 * the caller is going to modify the buffer further we have to
765 * be sure that DIRTYBP is set again.
767 if (chain->flags & HAMMER2_CHAIN_MODIFIED) {
768 if ((flags & HAMMER2_MODIFY_OPTDATA) == 0 &&
772 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DIRTYBP);
777 * Set MODIFIED and add a chain ref to prevent destruction. Both
778 * modified flags share the same ref.
780 atomic_set_int(&chain->flags, HAMMER2_CHAIN_MODIFIED);
781 hammer2_chain_ref(hmp, chain);
784 * We must allocate the copy-on-write block.
786 * If the data is embedded no other action is required.
788 * If the data is not embedded we acquire and clear the
789 * new block. If chain->data is not NULL we then do the
790 * copy-on-write. chain->data will then be repointed to the new
791 * buffer and the old buffer will be released.
793 * For newly created elements with no prior allocation we go
794 * through the copy-on-write steps except without the copying part.
796 if (chain != &hmp->vchain) {
797 if ((hammer2_debug & 0x0001) &&
798 (chain->bref.data_off & HAMMER2_OFF_MASK)) {
799 kprintf("Replace %d\n", chain->bytes);
801 chain->bref.data_off =
802 hammer2_freemap_alloc(hmp, chain->bref.type,
804 /* XXX failed allocation */
808 * If data instantiation is optional and the chain has no current
809 * data association (typical for DATA and newly-created INDIRECT
810 * elements), don't instantiate the buffer now.
812 if ((flags & HAMMER2_MODIFY_OPTDATA) && chain->bp == NULL)
817 * Setting the DIRTYBP flag will cause the buffer to be dirtied or
818 * written-out on unlock. This bit is independent of the MODIFIED
819 * bit because the chain may still need meta-data adjustments done
820 * by virtue of MODIFIED for its parent, and the buffer can be
821 * flushed out (possibly multiple times) by the OS before that.
823 * Clearing the INITIAL flag (for indirect blocks) indicates that
824 * a zero-fill buffer has been instantiated.
826 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DIRTYBP);
827 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
830 * We currently should never instantiate a device buffer for a
833 KKASSERT(chain->bref.type != HAMMER2_BREF_TYPE_DATA);
836 * Execute COW operation
838 switch(chain->bref.type) {
839 case HAMMER2_BREF_TYPE_VOLUME:
840 case HAMMER2_BREF_TYPE_INODE:
842 * The data is embedded, no copy-on-write operation is
845 KKASSERT(chain->bp == NULL);
847 case HAMMER2_BREF_TYPE_DATA:
848 case HAMMER2_BREF_TYPE_INDIRECT:
850 * Perform the copy-on-write operation
852 KKASSERT(chain != &hmp->vchain); /* safety */
854 * The device buffer may be larger than the allocation size.
856 if ((bbytes = chain->bytes) < HAMMER2_MINIOSIZE)
857 bbytes = HAMMER2_MINIOSIZE;
858 pbase = chain->bref.data_off & ~(hammer2_off_t)(bbytes - 1);
859 boff = chain->bref.data_off & HAMMER2_OFF_MASK & (bbytes - 1);
862 * The getblk() optimization can only be used if the
863 * physical block size matches the request.
865 if (chain->bytes == bbytes) {
866 nbp = getblk(hmp->devvp, pbase, bbytes, 0, 0);
869 error = bread(hmp->devvp, pbase, bbytes, &nbp);
870 KKASSERT(error == 0);
872 bdata = (char *)nbp->b_data + boff;
875 * Copy or zero-fill on write depending on whether
876 * chain->data exists or not.
879 bcopy(chain->data, bdata, chain->bytes);
880 KKASSERT(chain->bp != NULL);
882 bzero(bdata, chain->bytes);
885 chain->bp->b_flags |= B_RELBUF;
892 panic("hammer2_chain_modify: illegal non-embedded type %d",
898 if ((flags & HAMMER2_MODIFY_NOSUB) == 0)
899 hammer2_chain_parent_setsubmod(hmp, chain);
903 * Mark the volume as having been modified. This short-cut version
904 * does not have to lock the volume's chain, which allows the ioctl
905 * code to make adjustments to connections without deadlocking.
908 hammer2_modify_volume(hammer2_mount_t *hmp)
910 hammer2_voldata_lock(hmp);
911 atomic_set_int(&hmp->vchain.flags, HAMMER2_CHAIN_MODIFIED_AUX);
912 hammer2_voldata_unlock(hmp);
916 * Locate an in-memory chain. The parent must be locked. The in-memory
917 * chain is returned or NULL if no in-memory chain is present.
919 * NOTE: A chain on-media might exist for this index when NULL is returned.
922 hammer2_chain_find(hammer2_mount_t *hmp, hammer2_chain_t *parent, int index)
924 hammer2_chain_t dummy;
925 hammer2_chain_t *chain;
928 chain = SPLAY_FIND(hammer2_chain_splay, &parent->shead, &dummy);
933 * Return a locked chain structure with all associated data acquired.
935 * Caller must lock the parent on call, the returned child will be locked.
938 hammer2_chain_get(hammer2_mount_t *hmp, hammer2_chain_t *parent,
939 int index, int flags)
941 hammer2_blockref_t *bref;
943 hammer2_chain_t *chain;
944 hammer2_chain_t dummy;
948 * Figure out how to lock. MAYBE can be used to optimized
949 * the initial-create state for indirect blocks.
951 if (flags & (HAMMER2_LOOKUP_NODATA | HAMMER2_LOOKUP_NOLOCK))
952 how = HAMMER2_RESOLVE_NEVER;
954 how = HAMMER2_RESOLVE_MAYBE;
957 * First see if we have a (possibly modified) chain element cached
958 * for this (parent, index). Acquire the data if necessary.
960 * If chain->data is non-NULL the chain should already be marked
964 chain = SPLAY_FIND(hammer2_chain_splay, &parent->shead, &dummy);
966 if (flags & HAMMER2_LOOKUP_NOLOCK)
967 hammer2_chain_ref(hmp, chain);
969 hammer2_chain_lock(hmp, chain, how);
974 * The get function must always succeed, panic if there's no
977 if (parent->flags & HAMMER2_CHAIN_INITIAL) {
978 panic("hammer2_chain_get: Missing bref(1)");
983 * Otherwise lookup the bref and issue I/O (switch on the parent)
985 switch(parent->bref.type) {
986 case HAMMER2_BREF_TYPE_INODE:
987 KKASSERT(index >= 0 && index < HAMMER2_SET_COUNT);
988 bref = &parent->data->ipdata.u.blockset.blockref[index];
990 case HAMMER2_BREF_TYPE_INDIRECT:
991 KKASSERT(parent->data != NULL);
992 KKASSERT(index >= 0 &&
993 index < parent->bytes / sizeof(hammer2_blockref_t));
994 bref = &parent->data->npdata.blockref[index];
996 case HAMMER2_BREF_TYPE_VOLUME:
997 KKASSERT(index >= 0 && index < HAMMER2_SET_COUNT);
998 bref = &hmp->voldata.sroot_blockset.blockref[index];
1002 panic("hammer2_chain_get: unrecognized blockref type: %d",
1005 if (bref->type == 0) {
1006 panic("hammer2_chain_get: Missing bref(2)");
1011 * Allocate a chain structure representing the existing media
1014 * The locking operation we do later will issue I/O to read it.
1016 chain = hammer2_chain_alloc(hmp, bref);
1019 * Link the chain into its parent. Caller is expected to hold an
1020 * exclusive lock on the parent.
1022 chain->parent = parent;
1023 chain->index = index;
1024 if (SPLAY_INSERT(hammer2_chain_splay, &parent->shead, chain))
1025 panic("hammer2_chain_link: collision");
1026 KKASSERT(parent->refs > 0);
1027 atomic_add_int(&parent->refs, 1); /* for splay entry */
1030 * Additional linkage for inodes. Reuse the parent pointer to
1031 * find the parent directory.
1033 * The ccms_inode is initialized from its parent directory. The
1034 * chain of ccms_inode's is seeded by the mount code.
1036 if (bref->type == HAMMER2_BREF_TYPE_INODE) {
1038 while (parent->bref.type == HAMMER2_BREF_TYPE_INDIRECT)
1039 parent = parent->parent;
1040 if (parent->bref.type == HAMMER2_BREF_TYPE_INODE) {
1041 ip->pip = parent->u.ip;
1042 ip->pmp = parent->u.ip->pmp;
1043 ip->depth = parent->u.ip->depth + 1;
1044 ccms_cst_init(&ip->topo_cst, &ip->chain);
1049 * Our new chain structure has already been referenced and locked
1050 * but the lock code handles the I/O so call it to resolve the data.
1051 * Then release one of our two exclusive locks.
1053 * If NOLOCK is set the release will release the one-and-only lock.
1055 if ((flags & HAMMER2_LOOKUP_NOLOCK) == 0) {
1056 hammer2_chain_lock(hmp, chain, how); /* recusive lock */
1057 hammer2_chain_drop(hmp, chain); /* excess ref */
1059 ccms_thread_unlock(&chain->cst); /* from alloc */
1065 * Locate any key between key_beg and key_end inclusive. (*parentp)
1066 * typically points to an inode but can also point to a related indirect
1067 * block and this function will recurse upwards and find the inode again.
1069 * WARNING! THIS DOES NOT RETURN KEYS IN LOGICAL KEY ORDER! ANY KEY
1070 * WITHIN THE RANGE CAN BE RETURNED. HOWEVER, AN ITERATION
1071 * WHICH PICKS UP WHERE WE LEFT OFF WILL CONTINUE THE SCAN.
1073 * (*parentp) must be exclusively locked and referenced and can be an inode
1074 * or an existing indirect block within the inode.
1076 * On return (*parentp) will be modified to point at the deepest parent chain
1077 * element encountered during the search, as a helper for an insertion or
1078 * deletion. The new (*parentp) will be locked and referenced and the old
1079 * will be unlocked and dereferenced (no change if they are both the same).
1081 * The matching chain will be returned exclusively locked and referenced.
1083 * NULL is returned if no match was found, but (*parentp) will still
1084 * potentially be adjusted.
1086 * This function will also recurse up the chain if the key is not within the
1087 * current parent's range. (*parentp) can never be set to NULL. An iteration
1088 * can simply allow (*parentp) to float inside the loop.
1091 hammer2_chain_lookup(hammer2_mount_t *hmp, hammer2_chain_t **parentp,
1092 hammer2_key_t key_beg, hammer2_key_t key_end,
1095 hammer2_chain_t *parent;
1096 hammer2_chain_t *chain;
1097 hammer2_chain_t *tmp;
1098 hammer2_blockref_t *base;
1099 hammer2_blockref_t *bref;
1100 hammer2_key_t scan_beg;
1101 hammer2_key_t scan_end;
1106 * Recurse (*parentp) upward if necessary until the parent completely
1107 * encloses the key range or we hit the inode.
1110 while (parent->bref.type == HAMMER2_BREF_TYPE_INDIRECT) {
1111 scan_beg = parent->bref.key;
1112 scan_end = scan_beg +
1113 ((hammer2_key_t)1 << parent->bref.keybits) - 1;
1114 if (key_beg >= scan_beg && key_end <= scan_end)
1116 hammer2_chain_ref(hmp, parent); /* ref old parent */
1117 hammer2_chain_unlock(hmp, parent); /* unlock old parent */
1118 parent = parent->parent;
1119 /* lock new parent */
1120 hammer2_chain_lock(hmp, parent, HAMMER2_RESOLVE_MAYBE);
1121 hammer2_chain_drop(hmp, *parentp); /* drop old parent */
1122 *parentp = parent; /* new parent */
1127 * Locate the blockref array. Currently we do a fully associative
1128 * search through the array.
1130 switch(parent->bref.type) {
1131 case HAMMER2_BREF_TYPE_INODE:
1133 * Special shortcut for embedded data returns the inode
1134 * itself. Callers must detect this condition and access
1135 * the embedded data (the strategy code does this for us).
1137 * This is only applicable to regular files and softlinks.
1139 if (parent->data->ipdata.op_flags & HAMMER2_OPFLAG_DIRECTDATA) {
1140 if (flags & HAMMER2_LOOKUP_NOLOCK)
1141 hammer2_chain_ref(hmp, parent);
1143 hammer2_chain_lock(hmp, parent,
1144 HAMMER2_RESOLVE_ALWAYS);
1147 base = &parent->data->ipdata.u.blockset.blockref[0];
1148 count = HAMMER2_SET_COUNT;
1150 case HAMMER2_BREF_TYPE_INDIRECT:
1152 * Optimize indirect blocks in the INITIAL state to avoid
1155 if (parent->flags & HAMMER2_CHAIN_INITIAL) {
1158 if (parent->data == NULL)
1159 panic("parent->data is NULL");
1160 base = &parent->data->npdata.blockref[0];
1162 count = parent->bytes / sizeof(hammer2_blockref_t);
1164 case HAMMER2_BREF_TYPE_VOLUME:
1165 base = &hmp->voldata.sroot_blockset.blockref[0];
1166 count = HAMMER2_SET_COUNT;
1169 panic("hammer2_chain_lookup: unrecognized blockref type: %d",
1171 base = NULL; /* safety */
1172 count = 0; /* safety */
1176 * If the element and key overlap we use the element.
1179 for (i = 0; i < count; ++i) {
1180 tmp = hammer2_chain_find(hmp, parent, i);
1183 KKASSERT(bref->type != 0);
1184 } else if (base == NULL || base[i].type == 0) {
1189 scan_beg = bref->key;
1190 scan_end = scan_beg + ((hammer2_key_t)1 << bref->keybits) - 1;
1191 if (key_beg <= scan_end && key_end >= scan_beg)
1195 if (key_beg == key_end)
1197 return (hammer2_chain_next(hmp, parentp, NULL,
1198 key_beg, key_end, flags));
1202 * Acquire the new chain element. If the chain element is an
1203 * indirect block we must search recursively.
1205 chain = hammer2_chain_get(hmp, parent, i, flags);
1210 * If the chain element is an indirect block it becomes the new
1211 * parent and we loop on it.
1213 * The parent always has to be locked with at least RESOLVE_MAYBE,
1214 * so it might need a fixup if the caller passed incompatible flags.
1216 if (chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT) {
1217 hammer2_chain_unlock(hmp, parent);
1218 *parentp = parent = chain;
1219 if (flags & HAMMER2_LOOKUP_NOLOCK) {
1220 hammer2_chain_lock(hmp, chain, HAMMER2_RESOLVE_MAYBE);
1221 hammer2_chain_drop(hmp, chain); /* excess ref */
1222 } else if (flags & HAMMER2_LOOKUP_NODATA) {
1223 hammer2_chain_lock(hmp, chain, HAMMER2_RESOLVE_MAYBE);
1224 hammer2_chain_unlock(hmp, chain);
1230 * All done, return chain
1236 * After having issued a lookup we can iterate all matching keys.
1238 * If chain is non-NULL we continue the iteration from just after it's index.
1240 * If chain is NULL we assume the parent was exhausted and continue the
1241 * iteration at the next parent.
1243 * parent must be locked on entry and remains locked throughout. chain's
1244 * lock status must match flags.
1247 hammer2_chain_next(hammer2_mount_t *hmp, hammer2_chain_t **parentp,
1248 hammer2_chain_t *chain,
1249 hammer2_key_t key_beg, hammer2_key_t key_end,
1252 hammer2_chain_t *parent;
1253 hammer2_chain_t *tmp;
1254 hammer2_blockref_t *base;
1255 hammer2_blockref_t *bref;
1256 hammer2_key_t scan_beg;
1257 hammer2_key_t scan_end;
1265 * Calculate the next index and recalculate the parent if necessary.
1269 * Continue iteration within current parent. If not NULL
1270 * the passed-in chain may or may not be locked, based on
1271 * the LOOKUP_NOLOCK flag (passed in as returned from lookup
1274 i = chain->index + 1;
1275 if (flags & HAMMER2_LOOKUP_NOLOCK)
1276 hammer2_chain_drop(hmp, chain);
1278 hammer2_chain_unlock(hmp, chain);
1281 * Any scan where the lookup returned degenerate data embedded
1282 * in the inode has an invalid index and must terminate.
1284 if (chain == parent)
1287 } else if (parent->bref.type != HAMMER2_BREF_TYPE_INDIRECT) {
1289 * We reached the end of the iteration.
1294 * Continue iteration with next parent unless the current
1295 * parent covers the range.
1297 hammer2_chain_t *nparent;
1299 scan_beg = parent->bref.key;
1300 scan_end = scan_beg +
1301 ((hammer2_key_t)1 << parent->bref.keybits) - 1;
1302 if (key_beg >= scan_beg && key_end <= scan_end)
1305 i = parent->index + 1;
1306 nparent = parent->parent;
1307 hammer2_chain_ref(hmp, nparent); /* ref new parent */
1308 hammer2_chain_unlock(hmp, parent); /* unlock old parent */
1309 /* lock new parent */
1310 hammer2_chain_lock(hmp, nparent, HAMMER2_RESOLVE_MAYBE);
1311 hammer2_chain_drop(hmp, nparent); /* drop excess ref */
1312 *parentp = parent = nparent;
1317 * Locate the blockref array. Currently we do a fully associative
1318 * search through the array.
1320 switch(parent->bref.type) {
1321 case HAMMER2_BREF_TYPE_INODE:
1322 base = &parent->data->ipdata.u.blockset.blockref[0];
1323 count = HAMMER2_SET_COUNT;
1325 case HAMMER2_BREF_TYPE_INDIRECT:
1326 if (parent->flags & HAMMER2_CHAIN_INITIAL) {
1329 KKASSERT(parent->data != NULL);
1330 base = &parent->data->npdata.blockref[0];
1332 count = parent->bytes / sizeof(hammer2_blockref_t);
1334 case HAMMER2_BREF_TYPE_VOLUME:
1335 base = &hmp->voldata.sroot_blockset.blockref[0];
1336 count = HAMMER2_SET_COUNT;
1339 panic("hammer2_chain_next: unrecognized blockref type: %d",
1341 base = NULL; /* safety */
1342 count = 0; /* safety */
1345 KKASSERT(i <= count);
1348 * Look for the key. If we are unable to find a match and an exact
1349 * match was requested we return NULL. If a range was requested we
1350 * run hammer2_chain_next() to iterate.
1354 tmp = hammer2_chain_find(hmp, parent, i);
1357 } else if (base == NULL || base[i].type == 0) {
1363 scan_beg = bref->key;
1364 scan_end = scan_beg + ((hammer2_key_t)1 << bref->keybits) - 1;
1365 if (key_beg <= scan_end && key_end >= scan_beg)
1371 * If we couldn't find a match recurse up a parent to continue the
1378 * Acquire the new chain element. If the chain element is an
1379 * indirect block we must search recursively.
1381 chain = hammer2_chain_get(hmp, parent, i, flags);
1386 * If the chain element is an indirect block it becomes the new
1387 * parent and we loop on it.
1389 * The parent always has to be locked with at least RESOLVE_MAYBE,
1390 * so it might need a fixup if the caller passed incompatible flags.
1392 if (chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT) {
1393 hammer2_chain_unlock(hmp, parent);
1394 *parentp = parent = chain;
1396 if (flags & HAMMER2_LOOKUP_NOLOCK) {
1397 hammer2_chain_lock(hmp, parent, HAMMER2_RESOLVE_MAYBE);
1398 hammer2_chain_drop(hmp, parent); /* excess ref */
1399 } else if (flags & HAMMER2_LOOKUP_NODATA) {
1400 hammer2_chain_lock(hmp, parent, HAMMER2_RESOLVE_MAYBE);
1401 hammer2_chain_unlock(hmp, parent);
1408 * All done, return chain
1414 * Create and return a new hammer2 system memory structure of the specified
1415 * key, type and size and insert it RELATIVE TO (PARENT).
1417 * (parent) is typically either an inode or an indirect block, acquired
1418 * acquired as a side effect of issuing a prior failed lookup. parent
1419 * must be locked and held. Do not pass the inode chain to this function
1420 * unless that is the chain returned by the failed lookup.
1422 * Non-indirect types will automatically allocate indirect blocks as required
1423 * if the new item does not fit in the current (parent).
1425 * Indirect types will move a portion of the existing blockref array in
1426 * (parent) into the new indirect type and then use one of the free slots
1427 * to emplace the new indirect type.
1429 * A new locked, referenced chain element is returned of the specified type.
1430 * The element may or may not have a data area associated with it:
1432 * VOLUME not allowed here
1433 * INODE embedded data are will be set-up
1434 * INDIRECT not allowed here
1435 * DATA no data area will be set-up (caller is expected
1436 * to have logical buffers, we don't want to alias
1437 * the data onto device buffers!).
1440 hammer2_chain_create(hammer2_mount_t *hmp, hammer2_chain_t *parent,
1441 hammer2_chain_t *chain,
1442 hammer2_key_t key, int keybits, int type, size_t bytes)
1444 hammer2_blockref_t dummy;
1445 hammer2_blockref_t *base;
1446 hammer2_chain_t dummy_chain;
1447 int unlock_parent = 0;
1452 if (chain == NULL) {
1454 * First allocate media space and construct the dummy bref,
1455 * then allocate the in-memory chain structure.
1457 bzero(&dummy, sizeof(dummy));
1460 dummy.keybits = keybits;
1461 dummy.data_off = hammer2_bytes_to_radix(bytes);
1462 chain = hammer2_chain_alloc(hmp, &dummy);
1466 * We do NOT set INITIAL here (yet). INITIAL is only
1467 * used for indirect blocks.
1469 * Recalculate bytes to reflect the actual media block
1472 bytes = (hammer2_off_t)1 <<
1473 (int)(chain->bref.data_off & HAMMER2_OFF_MASK_RADIX);
1474 chain->bytes = bytes;
1477 case HAMMER2_BREF_TYPE_VOLUME:
1478 panic("hammer2_chain_create: called with volume type");
1480 case HAMMER2_BREF_TYPE_INODE:
1481 KKASSERT(bytes == HAMMER2_INODE_BYTES);
1482 chain->data = (void *)&chain->u.ip->ip_data;
1484 case HAMMER2_BREF_TYPE_INDIRECT:
1485 panic("hammer2_chain_create: cannot be used to"
1486 "create indirect block");
1488 case HAMMER2_BREF_TYPE_DATA:
1490 /* leave chain->data NULL */
1491 KKASSERT(chain->data == NULL);
1496 * Potentially update the chain's key/keybits.
1498 chain->bref.key = key;
1499 chain->bref.keybits = keybits;
1504 * Locate a free blockref in the parent's array
1506 switch(parent->bref.type) {
1507 case HAMMER2_BREF_TYPE_INODE:
1508 KKASSERT((parent->u.ip->ip_data.op_flags &
1509 HAMMER2_OPFLAG_DIRECTDATA) == 0);
1510 KKASSERT(parent->data != NULL);
1511 base = &parent->data->ipdata.u.blockset.blockref[0];
1512 count = HAMMER2_SET_COUNT;
1514 case HAMMER2_BREF_TYPE_INDIRECT:
1515 if (parent->flags & HAMMER2_CHAIN_INITIAL) {
1518 KKASSERT(parent->data != NULL);
1519 base = &parent->data->npdata.blockref[0];
1521 count = parent->bytes / sizeof(hammer2_blockref_t);
1523 case HAMMER2_BREF_TYPE_VOLUME:
1524 KKASSERT(parent->data != NULL);
1525 base = &hmp->voldata.sroot_blockset.blockref[0];
1526 count = HAMMER2_SET_COUNT;
1529 panic("hammer2_chain_create: unrecognized blockref type: %d",
1536 * Scan for an unallocated bref, also skipping any slots occupied
1537 * by in-memory chain elements that may not yet have been updated
1538 * in the parent's bref array.
1540 bzero(&dummy_chain, sizeof(dummy_chain));
1541 for (i = 0; i < count; ++i) {
1543 dummy_chain.index = i;
1544 if (SPLAY_FIND(hammer2_chain_splay,
1545 &parent->shead, &dummy_chain) == NULL) {
1548 } else if (base[i].type == 0) {
1549 dummy_chain.index = i;
1550 if (SPLAY_FIND(hammer2_chain_splay,
1551 &parent->shead, &dummy_chain) == NULL) {
1558 * If no free blockref could be found we must create an indirect
1559 * block and move a number of blockrefs into it. With the parent
1560 * locked we can safely lock each child in order to move it without
1561 * causing a deadlock.
1563 * This may return the new indirect block or the old parent depending
1564 * on where the key falls.
1567 hammer2_chain_t *nparent;
1569 nparent = hammer2_chain_create_indirect(hmp, parent,
1571 if (nparent == NULL) {
1573 hammer2_chain_free(hmp, chain);
1577 if (parent != nparent) {
1579 hammer2_chain_unlock(hmp, parent);
1587 * Link the chain into its parent. Later on we will have to set
1588 * the MOVED bit in situations where we don't mark the new chain
1589 * as being modified.
1591 if (chain->parent != NULL)
1592 panic("hammer2: hammer2_chain_create: chain already connected");
1593 KKASSERT(chain->parent == NULL);
1594 chain->parent = parent;
1596 if (SPLAY_INSERT(hammer2_chain_splay, &parent->shead, chain))
1597 panic("hammer2_chain_link: collision");
1598 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_DELETED);
1599 KKASSERT(parent->refs > 0);
1600 atomic_add_int(&parent->refs, 1);
1603 * Additional linkage for inodes. Reuse the parent pointer to
1604 * find the parent directory.
1606 * Cumulative adjustments are inherited on [re]attach and will
1607 * propagate up the tree on the next flush.
1609 * The ccms_inode is initialized from its parent directory. The
1610 * chain of ccms_inode's is seeded by the mount code.
1612 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
1613 hammer2_chain_t *scan = parent;
1614 hammer2_inode_t *ip = chain->u.ip;
1616 while (scan->bref.type == HAMMER2_BREF_TYPE_INDIRECT)
1617 scan = scan->parent;
1618 if (scan->bref.type == HAMMER2_BREF_TYPE_INODE) {
1619 ip->pip = scan->u.ip;
1620 ip->pmp = scan->u.ip->pmp;
1621 ip->depth = scan->u.ip->depth + 1;
1622 ip->pip->delta_icount += ip->ip_data.inode_count;
1623 ip->pip->delta_dcount += ip->ip_data.data_count;
1624 ++ip->pip->delta_icount;
1625 ccms_cst_init(&ip->topo_cst, &ip->chain);
1630 * (allocated) indicates that this is a newly-created chain element
1631 * rather than a renamed chain element. In this situation we want
1632 * to place the chain element in the MODIFIED state.
1634 * The data area will be set up as follows:
1636 * VOLUME not allowed here.
1638 * INODE embedded data are will be set-up.
1640 * INDIRECT not allowed here.
1642 * DATA no data area will be set-up (caller is expected
1643 * to have logical buffers, we don't want to alias
1644 * the data onto device buffers!).
1647 if (chain->bref.type == HAMMER2_BREF_TYPE_DATA) {
1648 hammer2_chain_modify(hmp, chain,
1649 HAMMER2_MODIFY_OPTDATA);
1650 } else if (chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT) {
1651 /* not supported in this function */
1652 panic("hammer2_chain_create: bad type");
1653 atomic_set_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
1654 hammer2_chain_modify(hmp, chain,
1655 HAMMER2_MODIFY_OPTDATA);
1657 hammer2_chain_modify(hmp, chain, 0);
1661 * When reconnecting inodes we have to call setsubmod()
1662 * to ensure that its state propagates up the newly
1665 * Make sure MOVED is set but do not update bref_flush. If
1666 * the chain is undergoing modification bref_flush will be
1667 * updated when it gets flushed. If it is not then the
1668 * bref may not have been flushed yet and we do not want to
1669 * set MODIFIED here as this could result in unnecessary
1672 if ((chain->flags & HAMMER2_CHAIN_MOVED) == 0) {
1673 hammer2_chain_ref(hmp, chain);
1674 atomic_set_int(&chain->flags, HAMMER2_CHAIN_MOVED);
1676 hammer2_chain_parent_setsubmod(hmp, chain);
1681 hammer2_chain_unlock(hmp, parent);
1686 * Create an indirect block that covers one or more of the elements in the
1687 * current parent. Either returns the existing parent with no locking or
1688 * ref changes or returns the new indirect block locked and referenced
1689 * and leaving the original parent lock/ref intact as well.
1691 * The returned chain depends on where the specified key falls.
1693 * The key/keybits for the indirect mode only needs to follow three rules:
1695 * (1) That all elements underneath it fit within its key space and
1697 * (2) That all elements outside it are outside its key space.
1699 * (3) When creating the new indirect block any elements in the current
1700 * parent that fit within the new indirect block's keyspace must be
1701 * moved into the new indirect block.
1703 * (4) The keyspace chosen for the inserted indirect block CAN cover a wider
1704 * keyspace the the current parent, but lookup/iteration rules will
1705 * ensure (and must ensure) that rule (2) for all parents leading up
1706 * to the nearest inode or the root volume header is adhered to. This
1707 * is accomplished by always recursing through matching keyspaces in
1708 * the hammer2_chain_lookup() and hammer2_chain_next() API.
1710 * The current implementation calculates the current worst-case keyspace by
1711 * iterating the current parent and then divides it into two halves, choosing
1712 * whichever half has the most elements (not necessarily the half containing
1713 * the requested key).
1715 * We can also opt to use the half with the least number of elements. This
1716 * causes lower-numbered keys (aka logical file offsets) to recurse through
1717 * fewer indirect blocks and higher-numbered keys to recurse through more.
1718 * This also has the risk of not moving enough elements to the new indirect
1719 * block and being forced to create several indirect blocks before the element
1724 hammer2_chain_create_indirect(hammer2_mount_t *hmp, hammer2_chain_t *parent,
1725 hammer2_key_t create_key, int create_bits)
1727 hammer2_blockref_t *base;
1728 hammer2_blockref_t *bref;
1729 hammer2_chain_t *chain;
1730 hammer2_chain_t *ichain;
1731 hammer2_chain_t dummy;
1732 hammer2_key_t key = create_key;
1733 int keybits = create_bits;
1741 * Calculate the base blockref pointer or NULL if the chain
1742 * is known to be empty. We need to calculate the array count
1743 * for SPLAY lookups either way.
1745 hammer2_chain_modify(hmp, parent, HAMMER2_MODIFY_OPTDATA);
1746 if (parent->flags & HAMMER2_CHAIN_INITIAL) {
1749 switch(parent->bref.type) {
1750 case HAMMER2_BREF_TYPE_INODE:
1751 count = HAMMER2_SET_COUNT;
1753 case HAMMER2_BREF_TYPE_INDIRECT:
1754 count = parent->bytes / sizeof(hammer2_blockref_t);
1756 case HAMMER2_BREF_TYPE_VOLUME:
1757 count = HAMMER2_SET_COUNT;
1760 panic("hammer2_chain_create_indirect: "
1761 "unrecognized blockref type: %d",
1767 switch(parent->bref.type) {
1768 case HAMMER2_BREF_TYPE_INODE:
1769 base = &parent->data->ipdata.u.blockset.blockref[0];
1770 count = HAMMER2_SET_COUNT;
1772 case HAMMER2_BREF_TYPE_INDIRECT:
1773 base = &parent->data->npdata.blockref[0];
1774 count = parent->bytes / sizeof(hammer2_blockref_t);
1776 case HAMMER2_BREF_TYPE_VOLUME:
1777 base = &hmp->voldata.sroot_blockset.blockref[0];
1778 count = HAMMER2_SET_COUNT;
1781 panic("hammer2_chain_create_indirect: "
1782 "unrecognized blockref type: %d",
1790 * Scan for an unallocated bref, also skipping any slots occupied
1791 * by in-memory chain elements which may not yet have been updated
1792 * in the parent's bref array.
1794 bzero(&dummy, sizeof(dummy));
1795 for (i = 0; i < count; ++i) {
1799 chain = SPLAY_FIND(hammer2_chain_splay, &parent->shead, &dummy);
1801 bref = &chain->bref;
1802 } else if (base && base[i].type) {
1809 * Expand our calculated key range (key, keybits) to fit
1810 * the scanned key. nkeybits represents the full range
1811 * that we will later cut in half (two halves @ nkeybits - 1).
1814 if (nkeybits < bref->keybits)
1815 nkeybits = bref->keybits;
1816 while (nkeybits < 64 &&
1817 (~(((hammer2_key_t)1 << nkeybits) - 1) &
1818 (key ^ bref->key)) != 0) {
1823 * If the new key range is larger we have to determine
1824 * which side of the new key range the existing keys fall
1825 * under by checking the high bit, then collapsing the
1826 * locount into the hicount or vise-versa.
1828 if (keybits != nkeybits) {
1829 if (((hammer2_key_t)1 << (nkeybits - 1)) & key) {
1840 * The newly scanned key will be in the lower half or the
1841 * higher half of the (new) key range.
1843 if (((hammer2_key_t)1 << (nkeybits - 1)) & bref->key)
1850 * Adjust keybits to represent half of the full range calculated
1851 * above (radix 63 max)
1856 * Select whichever half contains the most elements. Theoretically
1857 * we can select either side as long as it contains at least one
1858 * element (in order to ensure that a free slot is present to hold
1859 * the indirect block).
1861 key &= ~(((hammer2_key_t)1 << keybits) - 1);
1862 if (hammer2_indirect_optimize) {
1864 * Insert node for least number of keys, this will arrange
1865 * the first few blocks of a large file or the first few
1866 * inodes in a directory with fewer indirect blocks when
1869 if (hicount < locount && hicount != 0)
1870 key |= (hammer2_key_t)1 << keybits;
1872 key &= ~(hammer2_key_t)1 << keybits;
1875 * Insert node for most number of keys, best for heavily
1878 if (hicount > locount)
1879 key |= (hammer2_key_t)1 << keybits;
1881 key &= ~(hammer2_key_t)1 << keybits;
1885 * How big should our new indirect block be? It has to be at least
1886 * as large as its parent.
1888 if (parent->bref.type == HAMMER2_BREF_TYPE_INODE)
1889 nbytes = HAMMER2_IND_BYTES_MIN;
1891 nbytes = HAMMER2_IND_BYTES_MAX;
1892 if (nbytes < count * sizeof(hammer2_blockref_t))
1893 nbytes = count * sizeof(hammer2_blockref_t);
1896 * Ok, create our new indirect block
1898 dummy.bref.type = HAMMER2_BREF_TYPE_INDIRECT;
1899 dummy.bref.key = key;
1900 dummy.bref.keybits = keybits;
1901 dummy.bref.data_off = hammer2_bytes_to_radix(nbytes);
1902 ichain = hammer2_chain_alloc(hmp, &dummy.bref);
1903 atomic_set_int(&ichain->flags, HAMMER2_CHAIN_INITIAL);
1906 * Iterate the original parent and move the matching brefs into
1907 * the new indirect block.
1909 for (i = 0; i < count; ++i) {
1911 * For keying purposes access the bref from the media or
1912 * from our in-memory cache. In cases where the in-memory
1913 * cache overrides the media the keyrefs will be the same
1914 * anyway so we can avoid checking the cache when the media
1918 chain = SPLAY_FIND(hammer2_chain_splay, &parent->shead, &dummy);
1920 bref = &chain->bref;
1921 } else if (base && base[i].type) {
1924 if (ichain->index < 0)
1930 * Skip keys not in the chosen half (low or high), only bit
1931 * (keybits - 1) needs to be compared but for safety we
1932 * will compare all msb bits plus that bit again.
1934 if ((~(((hammer2_key_t)1 << keybits) - 1) &
1935 (key ^ bref->key)) != 0) {
1940 * This element is being moved from the parent, its slot
1941 * is available for our new indirect block.
1943 if (ichain->index < 0)
1947 * Load the new indirect block by acquiring or allocating
1948 * the related chain entries, then simply move them to the
1949 * new parent (ichain).
1951 * When adjusting the parent/child relationship we must
1952 * set the MOVED bit but we do NOT update bref_flush
1953 * because otherwise we might synchronize a bref that has
1954 * not yet been flushed. We depend on chain's bref_flush
1955 * either being correct or the chain being in a MODIFIED
1958 * We do not want to set MODIFIED here as this would result
1959 * in unnecessary reallocations.
1961 * We must still set SUBMODIFIED in the parent but we do
1962 * that after the loop.
1964 * XXX we really need a lock here but we don't need the
1965 * data. NODATA feature needed.
1967 chain = hammer2_chain_get(hmp, parent, i,
1968 HAMMER2_LOOKUP_NODATA);
1969 SPLAY_REMOVE(hammer2_chain_splay, &parent->shead, chain);
1970 if (SPLAY_INSERT(hammer2_chain_splay, &ichain->shead, chain))
1971 panic("hammer2_chain_create_indirect: collision");
1972 chain->parent = ichain;
1974 bzero(&base[i], sizeof(base[i]));
1975 atomic_add_int(&parent->refs, -1);
1976 atomic_add_int(&ichain->refs, 1);
1977 if ((chain->flags & HAMMER2_CHAIN_MOVED) == 0) {
1978 hammer2_chain_ref(hmp, chain);
1979 atomic_set_int(&chain->flags, HAMMER2_CHAIN_MOVED);
1981 hammer2_chain_unlock(hmp, chain);
1982 KKASSERT(parent->refs > 0);
1987 * Insert the new indirect block into the parent now that we've
1988 * cleared out some entries in the parent. We calculated a good
1989 * insertion index in the loop above (ichain->index).
1991 * We don't have to set MOVED here because we mark ichain modified
1992 * down below (so the normal modified -> flush -> set-moved sequence
1995 KKASSERT(ichain->index >= 0);
1996 if (SPLAY_INSERT(hammer2_chain_splay, &parent->shead, ichain))
1997 panic("hammer2_chain_create_indirect: ichain insertion");
1998 ichain->parent = parent;
1999 atomic_add_int(&parent->refs, 1);
2002 * Mark the new indirect block modified after insertion, which
2003 * will propagate up through parent all the way to the root and
2004 * also allocate the physical block in ichain for our caller,
2005 * and assign ichain->data to a pre-zero'd space (because there
2006 * is not prior data to copy into it).
2008 * We have to set SUBMODIFIED in ichain's flags manually so the
2009 * flusher knows it has to recurse through it to get to all of
2010 * our moved blocks, then call setsubmod() to set the bit
2013 hammer2_chain_modify(hmp, ichain, HAMMER2_MODIFY_OPTDATA);
2014 hammer2_chain_parent_setsubmod(hmp, ichain);
2015 atomic_set_int(&ichain->flags, HAMMER2_CHAIN_SUBMODIFIED);
2018 * Figure out what to return.
2020 if (create_bits > keybits) {
2022 * Key being created is way outside the key range,
2023 * return the original parent.
2025 hammer2_chain_unlock(hmp, ichain);
2026 } else if (~(((hammer2_key_t)1 << keybits) - 1) &
2027 (create_key ^ key)) {
2029 * Key being created is outside the key range,
2030 * return the original parent.
2032 hammer2_chain_unlock(hmp, ichain);
2035 * Otherwise its in the range, return the new parent.
2036 * (leave both the new and old parent locked).
2045 * Physically delete the specified chain element. Note that inodes with
2046 * open descriptors should not be deleted (as with other filesystems) until
2047 * the last open descriptor is closed.
2049 * This routine will remove the chain element from its parent and potentially
2050 * also recurse upward and delete indirect blocks which become empty as a
2053 * The caller must pass a pointer to the chain's parent, also locked and
2054 * referenced. (*parentp) will be modified in a manner similar to a lookup
2055 * or iteration when indirect blocks are also deleted as a side effect.
2057 * XXX This currently does not adhere to the MOVED flag protocol in that
2058 * the removal is immediately indicated in the parent's blockref[]
2062 hammer2_chain_delete(hammer2_mount_t *hmp, hammer2_chain_t *parent,
2063 hammer2_chain_t *chain, int retain)
2065 hammer2_blockref_t *base;
2066 hammer2_inode_t *ip;
2069 if (chain->parent != parent)
2070 panic("hammer2_chain_delete: parent mismatch");
2073 * Mark the parent modified so our base[] pointer remains valid
2074 * while we move entries. For the optimized indirect block
2075 * case mark the parent moved instead.
2077 * Calculate the blockref reference in the parent
2079 switch(parent->bref.type) {
2080 case HAMMER2_BREF_TYPE_INODE:
2081 hammer2_chain_modify(hmp, parent, HAMMER2_MODIFY_NO_MODIFY_TID);
2082 base = &parent->data->ipdata.u.blockset.blockref[0];
2083 count = HAMMER2_SET_COUNT;
2085 case HAMMER2_BREF_TYPE_INDIRECT:
2086 hammer2_chain_modify(hmp, parent, HAMMER2_MODIFY_OPTDATA |
2087 HAMMER2_MODIFY_NO_MODIFY_TID);
2088 if (parent->flags & HAMMER2_CHAIN_INITIAL)
2091 base = &parent->data->npdata.blockref[0];
2092 count = parent->bytes / sizeof(hammer2_blockref_t);
2094 case HAMMER2_BREF_TYPE_VOLUME:
2095 hammer2_chain_modify(hmp, parent, HAMMER2_MODIFY_NO_MODIFY_TID);
2096 base = &hmp->voldata.sroot_blockset.blockref[0];
2097 count = HAMMER2_SET_COUNT;
2100 panic("hammer2_chain_delete: unrecognized blockref type: %d",
2107 * Disconnect the bref in the parent, remove the chain, and
2108 * disconnect in-memory fields from the parent.
2110 KKASSERT(chain->index >= 0 && chain->index < count);
2112 bzero(&base[chain->index], sizeof(*base));
2114 SPLAY_REMOVE(hammer2_chain_splay, &parent->shead, chain);
2115 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DELETED);
2116 atomic_add_int(&parent->refs, -1); /* for splay entry */
2118 chain->parent = NULL;
2121 * Cumulative adjustments must be propagated to the parent inode
2122 * when deleting and synchronized to ip.
2124 * NOTE: We do not propagate ip->delta_*count to the parent because
2125 * these represent adjustments that have not yet been
2126 * propagated upward, so we don't need to remove them from
2129 * Clear the pointer to the parent inode.
2131 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
2134 ip->pip->delta_icount -= ip->ip_data.inode_count;
2135 ip->pip->delta_dcount -= ip->ip_data.data_count;
2136 ip->ip_data.inode_count += ip->delta_icount;
2137 ip->ip_data.data_count += ip->delta_dcount;
2138 ip->delta_icount = 0;
2139 ip->delta_dcount = 0;
2140 --ip->pip->delta_icount;
2143 chain->u.ip->depth = 0;
2147 * If retain is 0 the deletion is permanent. Because the chain is
2148 * no longer connected to the topology a flush will have no
2149 * visibility into it. We must dispose of the references related
2150 * to the MODIFIED and MOVED flags, otherwise the ref count will
2151 * never transition to 0.
2153 * If retain is non-zero the deleted element is likely an inode
2154 * which the vnops frontend will mark DESTROYED and flush. In that
2155 * situation we must retain the flags for any open file descriptors
2156 * on the (removed) inode. The final close will destroy the
2157 * disconnected chain.
2160 if (chain->flags & HAMMER2_CHAIN_MODIFIED) {
2161 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_MODIFIED);
2162 hammer2_chain_drop(hmp, chain);
2164 if (chain->flags & HAMMER2_CHAIN_MOVED) {
2165 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_MOVED);
2166 hammer2_chain_drop(hmp, chain);
2171 * The chain is still likely referenced, possibly even by a vnode
2172 * (if an inode), so defer further action until the chain gets
2178 * Recursively flush the specified chain. The chain is locked and
2179 * referenced by the caller and will remain so on return. The chain
2180 * will remain referenced throughout but can temporarily lose its
2181 * lock during the recursion to avoid unnecessarily stalling user
2186 TAILQ_HEAD(flush_deferral_list, hammer2_chain);
2188 struct hammer2_flush_info {
2189 struct flush_deferral_list flush_list;
2191 hammer2_tid_t modify_tid;
2194 typedef struct hammer2_flush_info hammer2_flush_info_t;
2197 hammer2_chain_flush_pass1(hammer2_mount_t *hmp, hammer2_chain_t *chain,
2198 hammer2_flush_info_t *info)
2200 hammer2_blockref_t *bref;
2201 hammer2_off_t pbase;
2210 * If we hit the stack recursion depth limit defer the operation.
2211 * The controller of the info structure will execute the deferral
2212 * list and then retry.
2214 * This is only applicable if SUBMODIFIED is set. After a reflush
2215 * SUBMODIFIED will probably be cleared and we want to drop through
2216 * to finish processing the current element so our direct parent
2217 * can process the results.
2219 if (info->depth == HAMMER2_FLUSH_DEPTH_LIMIT &&
2220 (chain->flags & HAMMER2_CHAIN_SUBMODIFIED)) {
2221 if ((chain->flags & HAMMER2_CHAIN_DEFERRED) == 0) {
2222 hammer2_chain_ref(hmp, chain);
2223 TAILQ_INSERT_TAIL(&info->flush_list,
2225 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DEFERRED);
2230 if (hammer2_debug & 0x0008)
2231 kprintf("%*.*sCHAIN type=%d@%08jx %p/%d %04x {\n",
2232 info->depth, info->depth, "",
2233 chain->bref.type, chain->bref.data_off,
2234 chain, chain->refs, chain->flags);
2237 * If SUBMODIFIED is set we recurse the flush and adjust the
2238 * blockrefs accordingly.
2240 * NOTE: Looping on SUBMODIFIED can prevent a flush from ever
2241 * finishing in the face of filesystem activity.
2243 if (chain->flags & HAMMER2_CHAIN_SUBMODIFIED) {
2244 hammer2_chain_t *child;
2245 hammer2_chain_t *next;
2246 hammer2_blockref_t *base;
2250 * Clear SUBMODIFIED to catch races. Note that if any
2251 * child has to be flushed SUBMODIFIED will wind up being
2252 * set again (for next time), but this does not stop us from
2253 * synchronizing block updates which occurred.
2255 * We don't want to set our chain to MODIFIED gratuitously.
2257 /* XXX SUBMODIFIED not interlocked, can race */
2258 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_SUBMODIFIED);
2261 * Flush the children and update the blockrefs in the chain.
2262 * Be careful of ripouts during the loop.
2264 next = SPLAY_MIN(hammer2_chain_splay, &chain->shead);
2266 hammer2_chain_ref(hmp, next);
2267 while ((child = next) != NULL) {
2268 next = SPLAY_NEXT(hammer2_chain_splay,
2269 &chain->shead, child);
2271 hammer2_chain_ref(hmp, next);
2273 * We only recurse if SUBMODIFIED (internal node)
2274 * or MODIFIED (internal node or leaf) is set.
2275 * However, we must still track whether any MOVED
2276 * entries are present to determine if the chain's
2277 * blockref's need updating or not.
2279 if ((child->flags & (HAMMER2_CHAIN_SUBMODIFIED |
2280 HAMMER2_CHAIN_MODIFIED |
2281 HAMMER2_CHAIN_MODIFIED_AUX)) == 0) {
2282 hammer2_chain_drop(hmp, child);
2285 hammer2_chain_lock(hmp, child, HAMMER2_RESOLVE_MAYBE);
2286 hammer2_chain_drop(hmp, child);
2287 if (child->parent != chain ||
2288 (child->flags & (HAMMER2_CHAIN_SUBMODIFIED |
2289 HAMMER2_CHAIN_MODIFIED |
2290 HAMMER2_CHAIN_MODIFIED_AUX)) == 0) {
2291 hammer2_chain_unlock(hmp, child);
2296 * Propagate the DESTROYED flag if found set, then
2297 * recurse the flush.
2299 if ((chain->flags & HAMMER2_CHAIN_DESTROYED) &&
2300 (child->flags & HAMMER2_CHAIN_DESTROYED) == 0) {
2301 atomic_set_int(&child->flags,
2302 HAMMER2_CHAIN_DESTROYED |
2303 HAMMER2_CHAIN_SUBMODIFIED);
2306 hammer2_chain_flush_pass1(hmp, child, info);
2308 hammer2_chain_unlock(hmp, child);
2312 * Now synchronize any block updates.
2314 next = SPLAY_MIN(hammer2_chain_splay, &chain->shead);
2316 hammer2_chain_ref(hmp, next);
2317 while ((child = next) != NULL) {
2318 next = SPLAY_NEXT(hammer2_chain_splay,
2319 &chain->shead, child);
2321 hammer2_chain_ref(hmp, next);
2322 if ((child->flags & HAMMER2_CHAIN_MOVED) == 0) {
2323 hammer2_chain_drop(hmp, child);
2326 hammer2_chain_lock(hmp, child, HAMMER2_RESOLVE_NEVER);
2327 hammer2_chain_drop(hmp, child);
2328 if (child->parent != chain ||
2329 (child->flags & HAMMER2_CHAIN_MOVED) == 0) {
2330 hammer2_chain_unlock(hmp, child);
2334 hammer2_chain_modify(hmp, chain,
2335 HAMMER2_MODIFY_NO_MODIFY_TID);
2337 switch(chain->bref.type) {
2338 case HAMMER2_BREF_TYPE_INODE:
2339 KKASSERT((chain->data->ipdata.op_flags &
2340 HAMMER2_OPFLAG_DIRECTDATA) == 0);
2341 base = &chain->data->ipdata.u.blockset.
2343 count = HAMMER2_SET_COUNT;
2345 case HAMMER2_BREF_TYPE_INDIRECT:
2346 base = &chain->data->npdata.blockref[0];
2347 count = chain->bytes /
2348 sizeof(hammer2_blockref_t);
2350 case HAMMER2_BREF_TYPE_VOLUME:
2351 base = &hmp->voldata.sroot_blockset.blockref[0];
2352 count = HAMMER2_SET_COUNT;
2356 panic("hammer2_chain_get: "
2357 "unrecognized blockref type: %d",
2361 KKASSERT(child->index >= 0);
2362 base[child->index] = child->bref_flush;
2364 if (chain->bref.mirror_tid <
2365 child->bref_flush.mirror_tid) {
2366 chain->bref.mirror_tid =
2367 child->bref_flush.mirror_tid;
2370 if (chain->bref.type == HAMMER2_BREF_TYPE_VOLUME &&
2371 hmp->voldata.mirror_tid <
2372 child->bref_flush.mirror_tid) {
2373 hmp->voldata.mirror_tid =
2374 child->bref_flush.mirror_tid;
2376 atomic_clear_int(&child->flags, HAMMER2_CHAIN_MOVED);
2377 hammer2_chain_drop(hmp, child); /* MOVED flag */
2378 hammer2_chain_unlock(hmp, child);
2383 * If destroying the object we unconditonally clear the MODIFIED
2384 * and MOVED bits, and we destroy the buffer without writing it
2387 * We don't bother updating the hash/crc or the chain bref.
2389 * NOTE: The destroy'd object's bref has already been updated.
2390 * so we can clear MOVED without propagating mirror_tid
2391 * or modify_tid upward.
2393 * XXX allocations for unflushed data can be returned to the
2396 if (chain->flags & HAMMER2_CHAIN_DESTROYED) {
2397 if (chain->flags & HAMMER2_CHAIN_MODIFIED) {
2399 chain->bp->b_flags |= B_INVAL|B_RELBUF;
2401 atomic_clear_int(&chain->flags,
2402 HAMMER2_CHAIN_MODIFIED |
2403 HAMMER2_CHAIN_MODIFY_TID);
2404 hammer2_chain_drop(hmp, chain);
2406 if (chain->flags & HAMMER2_CHAIN_MODIFIED_AUX) {
2407 atomic_clear_int(&chain->flags,
2408 HAMMER2_CHAIN_MODIFIED_AUX);
2410 if (chain->flags & HAMMER2_CHAIN_MOVED) {
2411 atomic_clear_int(&chain->flags,
2412 HAMMER2_CHAIN_MOVED);
2413 hammer2_chain_drop(hmp, chain);
2419 * Flush this chain entry only if it is marked modified.
2421 if ((chain->flags & (HAMMER2_CHAIN_MODIFIED |
2422 HAMMER2_CHAIN_MODIFIED_AUX)) == 0) {
2427 * Synchronize cumulative data and inode count adjustments to
2428 * the inode and propagate the deltas upward to the parent.
2430 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
2431 hammer2_inode_t *ip;
2434 ip->ip_data.inode_count += ip->delta_icount;
2435 ip->ip_data.data_count += ip->delta_dcount;
2437 ip->pip->delta_icount += ip->delta_icount;
2438 ip->pip->delta_dcount += ip->delta_dcount;
2440 ip->delta_icount = 0;
2441 ip->delta_dcount = 0;
2445 * Flush if MODIFIED or MODIFIED_AUX is set. MODIFIED_AUX is only
2446 * used by the volume header (&hmp->vchain).
2448 if ((chain->flags & (HAMMER2_CHAIN_MODIFIED |
2449 HAMMER2_CHAIN_MODIFIED_AUX)) == 0) {
2452 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_MODIFIED_AUX);
2455 * Clear MODIFIED and set HAMMER2_CHAIN_MOVED. The caller
2456 * will re-test the MOVED bit. We must also update the mirror_tid
2457 * and modify_tid fields as appropriate.
2459 * bits own a single chain ref and the MOVED bit owns its own
2462 chain->bref.mirror_tid = info->modify_tid;
2463 if (chain->flags & HAMMER2_CHAIN_MODIFY_TID)
2464 chain->bref.modify_tid = info->modify_tid;
2465 wasmodified = (chain->flags & HAMMER2_CHAIN_MODIFIED) != 0;
2466 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_MODIFIED |
2467 HAMMER2_CHAIN_MODIFY_TID);
2469 if (chain->flags & HAMMER2_CHAIN_MOVED) {
2471 * Drop the ref from the MODIFIED bit we cleared.
2474 hammer2_chain_drop(hmp, chain);
2477 * If we were MODIFIED we inherit the ref from clearing
2478 * that bit, otherwise we need another ref.
2480 if (wasmodified == 0)
2481 hammer2_chain_ref(hmp, chain);
2482 atomic_set_int(&chain->flags, HAMMER2_CHAIN_MOVED);
2484 chain->bref_flush = chain->bref;
2487 * If this is part of a recursive flush we can go ahead and write
2488 * out the buffer cache buffer and pass a new bref back up the chain.
2490 * This will never be a volume header.
2492 switch(chain->bref.type) {
2493 case HAMMER2_BREF_TYPE_VOLUME:
2495 * The volume header is flushed manually by the syncer, not
2499 case HAMMER2_BREF_TYPE_DATA:
2501 * Data elements have already been flushed via the logical
2502 * file buffer cache. Their hash was set in the bref by
2503 * the vop_write code.
2505 * Make sure the buffer(s) have been flushed out here.
2507 bbytes = chain->bytes;
2508 pbase = chain->bref.data_off & ~(hammer2_off_t)(bbytes - 1);
2509 boff = chain->bref.data_off & HAMMER2_OFF_MASK & (bbytes - 1);
2511 bp = getblk(hmp->devvp, pbase, bbytes, GETBLK_NOWAIT, 0);
2513 if ((bp->b_flags & (B_CACHE | B_DIRTY)) ==
2514 (B_CACHE | B_DIRTY)) {
2518 bp->b_flags |= B_RELBUF;
2523 case HAMMER2_BREF_TYPE_INDIRECT:
2525 * Indirect blocks may be in an INITIAL state. Use the
2526 * chain_lock() call to ensure that the buffer has been
2527 * instantiated (even though it is already locked the buffer
2528 * might not have been instantiated).
2530 * Only write the buffer out if it is dirty, it is possible
2531 * the operating system had already written out the buffer.
2533 hammer2_chain_lock(hmp, chain, HAMMER2_RESOLVE_ALWAYS);
2534 KKASSERT(chain->bp != NULL);
2537 if ((chain->flags & HAMMER2_CHAIN_DIRTYBP) ||
2538 (bp->b_flags & B_DIRTY)) {
2545 hammer2_chain_unlock(hmp, chain);
2549 * Embedded elements have to be flushed out.
2551 KKASSERT(chain->data != NULL);
2552 KKASSERT(chain->bp == NULL);
2553 bref = &chain->bref;
2555 KKASSERT((bref->data_off & HAMMER2_OFF_MASK) != 0);
2557 if (chain->bp == NULL) {
2559 * The data is embedded, we have to acquire the
2560 * buffer cache buffer and copy the data into it.
2562 if ((bbytes = chain->bytes) < HAMMER2_MINIOSIZE)
2563 bbytes = HAMMER2_MINIOSIZE;
2564 pbase = bref->data_off & ~(hammer2_off_t)(bbytes - 1);
2565 boff = bref->data_off & HAMMER2_OFF_MASK & (bbytes - 1);
2568 * The getblk() optimization can only be used if the
2569 * physical block size matches the request.
2571 if (chain->bytes == bbytes) {
2572 bp = getblk(hmp->devvp, pbase, bbytes, 0, 0);
2575 error = bread(hmp->devvp, pbase, bbytes, &bp);
2576 KKASSERT(error == 0);
2578 bdata = (char *)bp->b_data + boff;
2581 * Copy the data to the buffer, mark the buffer
2582 * dirty, and convert the chain to unmodified.
2584 * We expect we might have to make adjustments to
2585 * non-data delayed-write buffers when doing an
2586 * actual flush so use bawrite() instead of
2587 * cluster_awrite() here.
2589 bcopy(chain->data, bdata, chain->bytes);
2590 bp->b_flags |= B_CLUSTEROK;
2593 chain->bref.check.iscsi32.value =
2594 hammer2_icrc32(chain->data, chain->bytes);
2595 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE)
2596 ++hammer2_iod_meta_write;
2598 ++hammer2_iod_indr_write;
2600 chain->bref.check.iscsi32.value =
2601 hammer2_icrc32(chain->data, chain->bytes);
2606 * Adjustments to the bref. The caller will use this to adjust
2607 * our chain's pointer to this chain element.
2609 bref = &chain->bref;
2611 switch(bref->type) {
2612 case HAMMER2_BREF_TYPE_VOLUME:
2613 KKASSERT(chain->data != NULL);
2614 KKASSERT(chain->bp == NULL);
2616 hmp->voldata.icrc_sects[HAMMER2_VOL_ICRC_SECT1]=
2618 (char *)&hmp->voldata +
2619 HAMMER2_VOLUME_ICRC1_OFF,
2620 HAMMER2_VOLUME_ICRC1_SIZE);
2621 hmp->voldata.icrc_sects[HAMMER2_VOL_ICRC_SECT0]=
2623 (char *)&hmp->voldata +
2624 HAMMER2_VOLUME_ICRC0_OFF,
2625 HAMMER2_VOLUME_ICRC0_SIZE);
2626 hmp->voldata.icrc_volheader =
2628 (char *)&hmp->voldata +
2629 HAMMER2_VOLUME_ICRCVH_OFF,
2630 HAMMER2_VOLUME_ICRCVH_SIZE);
2637 if (hammer2_debug & 0x0008) {
2638 kprintf("%*.*s} %p/%d %04x ",
2639 info->depth, info->depth, "",
2640 chain, chain->refs, chain->flags);
2646 * PASS2 - not yet implemented (should be called only with the root chain?)
2649 hammer2_chain_flush_pass2(hammer2_mount_t *hmp, hammer2_chain_t *chain)
2655 * Stand-alone flush. If the chain is unable to completely flush we have
2656 * to be sure that SUBMODIFIED propagates up the parent chain. We must not
2657 * clear the MOVED bit after flushing in this situation or our desynchronized
2658 * bref will not properly update in the parent.
2660 * This routine can be called from several places but the most important
2661 * is from the hammer2_vop_reclaim() function. We want to try to completely
2662 * clean out the inode structure to prevent disconnected inodes from
2663 * building up and blowing out the kmalloc pool.
2665 * If modify_tid is 0 (usual case), a new modify_tid is allocated and
2666 * applied to the flush. The depth-limit handling code is the only
2667 * code which passes a non-zero modify_tid to hammer2_chain_flush().
2670 hammer2_chain_flush(hammer2_mount_t *hmp, hammer2_chain_t *chain,
2671 hammer2_tid_t modify_tid)
2673 hammer2_chain_t *parent;
2674 hammer2_chain_t *scan;
2675 hammer2_blockref_t *base;
2676 hammer2_flush_info_t info;
2681 * Execute the recursive flush and handle deferrals.
2683 * Chains can be ridiculously long (thousands deep), so to
2684 * avoid blowing out the kernel stack the recursive flush has a
2685 * depth limit. Elements at the limit are placed on a list
2686 * for re-execution after the stack has been popped.
2688 bzero(&info, sizeof(info));
2689 TAILQ_INIT(&info.flush_list);
2691 if (modify_tid == 0) {
2692 hammer2_voldata_lock(hmp);
2693 info.modify_tid = hmp->voldata.alloc_tid++;
2694 atomic_set_int(&hmp->vchain.flags, HAMMER2_CHAIN_MODIFIED_AUX);
2695 hammer2_voldata_unlock(hmp);
2697 info.modify_tid = modify_tid;
2705 hammer2_chain_flush_pass1(hmp, chain, &info);
2708 while ((scan = TAILQ_FIRST(&info.flush_list)) != NULL) {
2710 * Secondary recursion. Note that a reference is
2711 * retained from the element's presence on the
2714 KKASSERT(scan->flags & HAMMER2_CHAIN_DEFERRED);
2715 TAILQ_REMOVE(&info.flush_list, scan, flush_node);
2716 atomic_clear_int(&scan->flags, HAMMER2_CHAIN_DEFERRED);
2719 * Now that we've popped back up we can do a secondary
2720 * recursion on the deferred elements.
2722 if (hammer2_debug & 0x0040)
2723 kprintf("defered flush %p\n", scan);
2724 hammer2_chain_lock(hmp, scan, HAMMER2_RESOLVE_MAYBE);
2725 hammer2_chain_flush(hmp, scan, info.modify_tid);
2726 hammer2_chain_unlock(hmp, scan);
2729 * Only flag a reflush if SUBMODIFIED is no longer
2730 * set. If SUBMODIFIED is set the element will just
2731 * wind up on our flush_list again.
2733 if ((scan->flags & (HAMMER2_CHAIN_SUBMODIFIED |
2734 HAMMER2_CHAIN_MODIFIED |
2735 HAMMER2_CHAIN_MODIFIED_AUX)) == 0) {
2738 hammer2_chain_drop(hmp, scan);
2740 if ((hammer2_debug & 0x0040) && reflush)
2741 kprintf("reflush %p\n", chain);
2745 * The SUBMODIFIED bit must propagate upward if the chain could not
2746 * be completely flushed.
2748 if (chain->flags & (HAMMER2_CHAIN_SUBMODIFIED |
2749 HAMMER2_CHAIN_MODIFIED |
2750 HAMMER2_CHAIN_MODIFIED_AUX |
2751 HAMMER2_CHAIN_MOVED)) {
2752 hammer2_chain_parent_setsubmod(hmp, chain);
2756 * If the only thing left is a simple bref update try to
2757 * pro-actively update the parent, otherwise return early.
2759 parent = chain->parent;
2760 if (parent == NULL) {
2763 if (chain->bref.type != HAMMER2_BREF_TYPE_INODE ||
2764 (chain->flags & (HAMMER2_CHAIN_SUBMODIFIED |
2765 HAMMER2_CHAIN_MODIFIED |
2766 HAMMER2_CHAIN_MODIFIED_AUX |
2767 HAMMER2_CHAIN_MOVED)) != HAMMER2_CHAIN_MOVED) {
2772 * We are locking backwards so allow the lock to fail.
2774 if (ccms_thread_lock_nonblock(&parent->cst, CCMS_STATE_EXCLUSIVE))
2778 * We are updating brefs but we have to call chain_modify()
2779 * because our caller is not being run from a recursive flush.
2781 * This will also chain up the parent list and set the SUBMODIFIED
2784 * We do not want to set HAMMER2_CHAIN_MODIFY_TID here because the
2785 * modification is only related to updating a bref in the parent.
2787 * When updating the blockset embedded in the volume header we must
2788 * also update voldata.mirror_tid.
2790 hammer2_chain_lock(hmp, parent, HAMMER2_RESOLVE_MAYBE);
2791 hammer2_chain_modify(hmp, parent, HAMMER2_MODIFY_NO_MODIFY_TID);
2793 switch(parent->bref.type) {
2794 case HAMMER2_BREF_TYPE_INODE:
2795 base = &parent->data->ipdata.u.blockset.
2797 count = HAMMER2_SET_COUNT;
2799 case HAMMER2_BREF_TYPE_INDIRECT:
2800 base = &parent->data->npdata.blockref[0];
2801 count = parent->bytes /
2802 sizeof(hammer2_blockref_t);
2804 case HAMMER2_BREF_TYPE_VOLUME:
2805 base = &hmp->voldata.sroot_blockset.blockref[0];
2806 count = HAMMER2_SET_COUNT;
2807 if (chain->flags & HAMMER2_CHAIN_MOVED) {
2808 if (hmp->voldata.mirror_tid < chain->bref.mirror_tid) {
2809 hmp->voldata.mirror_tid =
2810 chain->bref.mirror_tid;
2816 panic("hammer2_chain_flush: "
2817 "unrecognized blockref type: %d",
2822 * Update the blockref in the parent. We do not have to set
2823 * MOVED in the parent because the parent has been marked modified,
2824 * so the flush sequence will pick up the bref change.
2826 * We do have to propagate mirror_tid upward.
2828 KKASSERT(chain->index >= 0 &&
2829 chain->index < count);
2830 KKASSERT(chain->parent == parent);
2831 if (chain->flags & HAMMER2_CHAIN_MOVED) {
2832 base[chain->index] = chain->bref_flush;
2833 if (parent->bref.mirror_tid < chain->bref_flush.mirror_tid)
2834 parent->bref.mirror_tid = chain->bref_flush.mirror_tid;
2835 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_MOVED);
2836 hammer2_chain_drop(hmp, chain);
2837 } else if (bcmp(&base[chain->index], &chain->bref_flush,
2838 sizeof(chain->bref)) != 0) {
2839 panic("hammer2: unflagged bref update(2)");
2841 ccms_thread_unlock(&parent->cst); /* release manual op */
2842 hammer2_chain_unlock(hmp, parent);