2 * Copyright (c) 2011-2013 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 #include <sys/cdefs.h>
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/types.h>
46 * Recursively flush the specified chain. The chain is locked and
47 * referenced by the caller and will remain so on return. The chain
48 * will remain referenced throughout but can temporarily lose its
49 * lock during the recursion to avoid unnecessarily stalling user
52 struct hammer2_flush_info {
54 hammer2_chain_t *parent;
55 hammer2_trans_t *trans;
58 struct flush_deferral_list flush_list;
59 hammer2_tid_t sync_tid; /* flush synchronization point */
60 hammer2_tid_t mirror_tid; /* collect mirror TID updates */
63 typedef struct hammer2_flush_info hammer2_flush_info_t;
65 static void hammer2_chain_flush_core(hammer2_flush_info_t *info,
66 hammer2_chain_t *chain);
67 static int hammer2_chain_flush_scan1(hammer2_chain_t *child, void *data);
68 static int hammer2_chain_flush_scan2(hammer2_chain_t *child, void *data);
71 * Transaction support functions for writing to the filesystem.
73 * Initializing a new transaction allocates a transaction ID. We
74 * don't bother marking the volume header MODIFIED. Instead, the volume
75 * will be synchronized at a later time as part of a larger flush sequence.
77 * Non-flush transactions can typically run concurrently. However if
78 * there are non-flush transaction both before AND after a flush trans,
79 * the transactions after stall until the ones before finish.
81 * Non-flush transactions occuring after a flush pointer can run concurrently
82 * with that flush. They only have to wait for transactions prior to the
83 * flush trans to complete before they unstall.
85 * WARNING! Modifications to the root volume cannot dup the root volume
86 * header to handle synchronization points, so alloc_tid can
87 * wind up (harmlessly) more advanced on flush.
89 * WARNING! Operations which might call inode_duplicate()/chain_duplicate()
90 * depend heavily on having a unique sync_tid to avoid duplication
91 * collisions (which key off of delete_tid).
94 hammer2_trans_init(hammer2_mount_t *hmp, hammer2_trans_t *trans, int flags)
96 hammer2_trans_t *scan;
98 bzero(trans, sizeof(*trans));
101 hammer2_voldata_lock(hmp);
102 trans->sync_tid = hmp->voldata.alloc_tid++;
103 trans->flags = flags;
104 trans->td = curthread;
105 TAILQ_INSERT_TAIL(&hmp->transq, trans, entry);
107 if (flags & HAMMER2_TRANS_ISFLUSH) {
109 * If we are a flush we have to wait for all transactions
110 * prior to our flush synchronization point to complete
111 * before we can start our flush.
114 if (hmp->curflush == NULL) {
115 hmp->curflush = trans;
116 hmp->flush_tid = trans->sync_tid;
118 while (TAILQ_FIRST(&hmp->transq) != trans) {
119 lksleep(&trans->sync_tid, &hmp->voldatalk,
124 * Once we become the running flush we can wakeup anyone
128 while ((scan = TAILQ_NEXT(scan, entry)) != NULL) {
129 if (scan->flags & HAMMER2_TRANS_ISFLUSH)
131 if (scan->blocked == 0)
134 wakeup(&scan->blocked);
138 * If we are not a flush but our sync_tid is after a
139 * stalled flush, we have to wait until that flush unstalls
140 * (that is, all transactions prior to that flush complete),
141 * but then we can run concurrently with that flush.
143 * (flushcnt check only good as pre-condition, otherwise it
144 * may represent elements queued after us after we block).
146 if (hmp->flushcnt > 1 ||
148 TAILQ_FIRST(&hmp->transq) != hmp->curflush)) {
150 while (trans->blocked) {
151 lksleep(&trans->blocked, &hmp->voldatalk,
156 hammer2_voldata_unlock(hmp, 0);
160 hammer2_trans_done(hammer2_trans_t *trans)
162 hammer2_mount_t *hmp = trans->hmp;
163 hammer2_trans_t *scan;
165 hammer2_voldata_lock(hmp);
166 TAILQ_REMOVE(&hmp->transq, trans, entry);
167 if (trans->flags & HAMMER2_TRANS_ISFLUSH) {
169 * If we were a flush we have to adjust curflush to the
172 * flush_tid is used to partition copy-on-write operations
173 * (mostly duplicate-on-modify ops), which is what allows
174 * us to execute a flush concurrent with modifying operations
179 TAILQ_FOREACH(scan, &hmp->transq, entry) {
180 if (scan->flags & HAMMER2_TRANS_ISFLUSH)
184 hmp->curflush = scan;
185 hmp->flush_tid = scan->sync_tid;
188 * Theoretically we don't have to clear flush_tid
189 * here since the flush will have synchronized
190 * all operations <= flush_tid already. But for
193 hmp->curflush = NULL;
198 * If we are not a flush but a flush is now at the head
199 * of the queue and we were previously blocking it,
200 * we can now unblock it.
203 (scan = TAILQ_FIRST(&hmp->transq)) != NULL &&
204 trans->sync_tid < scan->sync_tid &&
205 (scan->flags & HAMMER2_TRANS_ISFLUSH)) {
206 wakeup(&scan->sync_tid);
209 hammer2_voldata_unlock(hmp, 0);
215 * Flush the chain and all modified sub-chains through the specified
216 * synchronization point (sync_tid), propagating parent chain modifications
217 * and mirror_tid updates back up as needed. Since we are recursing downward
218 * we do not have to deal with the complexities of multi-homed chains (chains
219 * with multiple parents).
221 * Caller must have interlocked against any non-flush-related modifying
222 * operations in progress whos modify_tid values are less than or equal
223 * to the passed sync_tid.
225 * Caller must have already vetted synchronization points to ensure they
226 * are properly flushed. Only snapshots and cluster flushes can create
227 * these sorts of synchronization points.
229 * This routine can be called from several places but the most important
230 * is from the hammer2_vop_reclaim() function. We want to try to completely
231 * clean out the inode structure to prevent disconnected inodes from
232 * building up and blowing out the kmalloc pool. However, it is not actually
233 * necessary to flush reclaimed inodes to maintain HAMMER2's crash recovery
236 * chain is locked on call and will remain locked on return. If a flush
237 * occured, the chain's MOVED bit will be set indicating that its parent
238 * (which is not part of the flush) should be updated.
241 hammer2_chain_flush(hammer2_trans_t *trans, hammer2_chain_t *chain)
243 hammer2_chain_t *scan;
244 hammer2_chain_core_t *core;
245 hammer2_flush_info_t info;
248 * Execute the recursive flush and handle deferrals.
250 * Chains can be ridiculously long (thousands deep), so to
251 * avoid blowing out the kernel stack the recursive flush has a
252 * depth limit. Elements at the limit are placed on a list
253 * for re-execution after the stack has been popped.
255 bzero(&info, sizeof(info));
256 TAILQ_INIT(&info.flush_list);
257 info.hmp = trans->hmp;
259 info.sync_tid = trans->sync_tid;
266 * Unwind deep recursions which had been deferred. This
267 * can leave MOVED set for these chains, which will be
268 * handled when we [re]flush chain after the unwind.
270 while ((scan = TAILQ_FIRST(&info.flush_list)) != NULL) {
271 KKASSERT(scan->flags & HAMMER2_CHAIN_DEFERRED);
272 TAILQ_REMOVE(&info.flush_list, scan, flush_node);
273 atomic_clear_int(&scan->flags, HAMMER2_CHAIN_DEFERRED);
276 * Now that we've popped back up we can do a secondary
277 * recursion on the deferred elements.
279 if (hammer2_debug & 0x0040)
280 kprintf("defered flush %p\n", scan);
281 hammer2_chain_lock(scan, HAMMER2_RESOLVE_MAYBE);
282 hammer2_chain_flush(trans, scan);
283 hammer2_chain_unlock(scan);
284 hammer2_chain_drop(scan); /* ref from deferral */
288 * Flush pass1 on root.
290 info.diddeferral = 0;
291 hammer2_chain_flush_core(&info, chain);
293 kprintf("flush_core_done parent=<base> chain=%p.%d %08x\n",
294 chain, chain->bref.type, chain->flags);
298 * Only loop if deep recursions have been deferred.
300 if (TAILQ_EMPTY(&info.flush_list))
306 * This is the core of the chain flushing code. The chain is locked by the
307 * caller and remains locked on return. This function is keyed off of
308 * the SUBMODIFIED bit but must make fine-grained choices based on the
309 * synchronization point we are flushing to.
311 * If the flush accomplished any work chain will be flagged MOVED
312 * indicating a copy-on-write propagation back up is required.
313 * Deep sub-nodes may also have been entered onto the deferral list.
314 * MOVED is never set on the volume root.
316 * NOTE: modify_tid is different from MODIFIED. modify_tid is updated
317 * only when a chain is specifically modified, and not updated
318 * for copy-on-write propagations. MODIFIED is set on any modification
319 * including copy-on-write propagations.
322 hammer2_chain_flush_core(hammer2_flush_info_t *info, hammer2_chain_t *chain)
324 hammer2_mount_t *hmp;
325 hammer2_blockref_t *bref;
327 hammer2_tid_t saved_sync;
328 hammer2_trans_t *trans = info->trans;
329 hammer2_chain_core_t *core;
342 kprintf("flush_core %p->%p.%d %08x (%s)\n",
343 info->parent, chain, chain->bref.type,
345 ((chain->bref.type == HAMMER2_BREF_TYPE_INODE) ?
346 chain->data->ipdata.filename : "?"));
348 kprintf("flush_core NULL->%p.%d %08x (%s)\n",
349 chain, chain->bref.type,
351 ((chain->bref.type == HAMMER2_BREF_TYPE_INODE) ?
352 chain->data->ipdata.filename : "?"));
355 * Ignore chains modified beyond the current flush point. These
356 * will be treated as if they did not exist.
358 if (chain->modify_tid > info->sync_tid)
362 * Deleted chains which have not been destroyed must be retained,
363 * and we probably have to recurse to clean-up any sub-trees.
364 * However, restricted flushes can stop processing here because
365 * the chain cleanup will be handled by a later normal flush.
367 * The MODIFIED bit can likely be cleared in this situation and we
368 * will do so later on in this procedure.
370 if (chain->delete_tid <= info->sync_tid) {
371 if (trans->flags & HAMMER2_TRANS_RESTRICTED)
375 saved_sync = info->sync_tid;
379 * If SUBMODIFIED is set we recurse the flush and adjust the
380 * blockrefs accordingly.
382 * NOTE: Looping on SUBMODIFIED can prevent a flush from ever
383 * finishing in the face of filesystem activity.
385 if (chain->flags & HAMMER2_CHAIN_SUBMODIFIED) {
386 hammer2_chain_t *saved_parent;
387 hammer2_tid_t saved_mirror;
390 * Clear SUBMODIFIED to catch races. Note that any child
391 * with MODIFIED, DELETED, or MOVED set during Scan2, after
392 * it processes the child, will cause SUBMODIFIED to be
394 * child has to be flushed SUBMODIFIED will wind up being
395 * set again (for next time), but this does not stop us from
396 * synchronizing block updates which occurred.
398 * We don't want to set our chain to MODIFIED gratuitously.
400 * We need an extra ref on chain because we are going to
401 * release its lock temporarily in our child loop.
403 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_SUBMODIFIED);
404 hammer2_chain_ref(chain);
407 * Run two passes. The first pass handles MODIFIED and
408 * SUBMODIFIED chains and recurses while the second pass
409 * handles MOVED chains on the way back up.
411 * If the stack gets too deep we defer scan1, but must
412 * be sure to still run scan2 if on the next loop the
413 * deferred chain has been flushed and now needs MOVED
414 * handling on the way back up.
416 * Scan1 is recursive.
418 * NOTE: The act of handling a modified/submodified chain can
419 * cause the MOVED Flag to be set. It can also be set
420 * via hammer2_chain_delete() and in other situations.
422 * NOTE: RB_SCAN() must be used instead of RB_FOREACH()
423 * because children can be physically removed during
426 saved_parent = info->parent;
427 saved_mirror = info->mirror_tid;
428 info->parent = chain;
429 info->mirror_tid = chain->bref.mirror_tid;
431 if (info->depth == HAMMER2_FLUSH_DEPTH_LIMIT) {
432 if ((chain->flags & HAMMER2_CHAIN_DEFERRED) == 0) {
433 hammer2_chain_ref(chain);
434 TAILQ_INSERT_TAIL(&info->flush_list,
436 atomic_set_int(&chain->flags,
437 HAMMER2_CHAIN_DEFERRED);
441 info->diddeferral = 0;
442 spin_lock(&core->cst.spin);
443 RB_SCAN(hammer2_chain_tree, &chain->core->rbtree,
444 NULL, hammer2_chain_flush_scan1, info);
445 spin_unlock(&core->cst.spin);
446 diddeferral += info->diddeferral;
450 * Handle successfully flushed children who are in the MOVED
451 * state on the way back up the recursion. This can have
452 * the side-effect of clearing MOVED.
454 * We execute this even if there were deferrals to try to
455 * keep the chain topology cleaner.
457 * Scan2 is non-recursive.
460 atomic_set_int(&chain->flags,
461 HAMMER2_CHAIN_SUBMODIFIED);
464 kprintf("scan2_start parent %p %08x\n",
465 chain, chain->flags);
467 spin_lock(&core->cst.spin);
468 RB_SCAN(hammer2_chain_tree, &core->rbtree,
469 NULL, hammer2_chain_flush_scan2, info);
470 spin_unlock(&core->cst.spin);
472 kprintf("scan2_stop parent %p %08x\n",
473 chain, chain->flags);
476 chain->bref.mirror_tid = info->mirror_tid;
477 info->mirror_tid = saved_mirror;
478 info->parent = saved_parent;
479 hammer2_chain_drop(chain);
483 * Restore sync_tid in case it was restricted by a delete/duplicate.
485 info->sync_tid = saved_sync;
488 * Rollup diddeferral for caller. Note direct assignment, not +=.
490 info->diddeferral = diddeferral;
493 * Do not flush chain if there were any deferrals. It will be
494 * retried later after the deferrals are independently handled.
497 if (hammer2_debug & 0x0008) {
498 kprintf("%*.*s} %p/%d %04x (deferred)",
499 info->depth, info->depth, "",
500 chain, chain->refs, chain->flags);
506 * If we encounter a deleted chain within our flush we can clear
507 * the MODIFIED bit and avoid flushing it whether it has been
510 if (chain->delete_tid <= info->sync_tid) {
511 if (chain->flags & HAMMER2_CHAIN_MODIFIED) {
513 chain->bp->b_flags |= B_INVAL|B_RELBUF;
514 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_MODIFIED);
515 hammer2_chain_drop(chain);
520 if ((chain->flags & HAMMER2_CHAIN_DESTROYED) &&
521 (chain->flags & HAMMER2_CHAIN_DELETED) &&
522 (trans->flags & HAMMER2_TRANS_RESTRICTED) == 0) {
524 * Throw-away the MODIFIED flag
526 if (chain->flags & HAMMER2_CHAIN_MODIFIED) {
528 chain->bp->b_flags |= B_INVAL|B_RELBUF;
529 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_MODIFIED);
530 hammer2_chain_drop(chain);
537 * A degenerate flush might not have flushed anything and thus not
538 * processed modified blocks on the way back up. Detect the case.
540 * Note that MOVED can be set without MODIFIED being set due to
541 * a deletion, in which case it is handled by Scan2 later on.
543 * Both bits can be set along with DELETED due to a deletion if
544 * modified data within the synchronization zone and the chain
545 * was then deleted beyond the zone, in which case we still have
546 * to flush for synchronization point consistency. Otherwise though
547 * DELETED and MODIFIED are treated as separate flags.
549 if ((chain->flags & HAMMER2_CHAIN_MODIFIED) == 0)
555 * A DESTROYED node that reaches this point must be flushed for
556 * synchronization point consistency.
560 * Update mirror_tid, clear MODIFIED, and set MOVED.
562 * The caller will update the parent's reference to this chain
563 * by testing MOVED as long as the modification was in-bounds.
565 * MOVED is never set on the volume root as there is no parent
568 if (chain->bref.mirror_tid < info->sync_tid)
569 chain->bref.mirror_tid = info->sync_tid;
570 wasmodified = (chain->flags & HAMMER2_CHAIN_MODIFIED) != 0;
571 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_MODIFIED);
572 if (chain == &hmp->vchain)
573 kprintf("(FLUSHED VOLUME HEADER)\n");
575 if ((chain->flags & HAMMER2_CHAIN_MOVED) ||
576 chain == &hmp->vchain) {
578 * Drop the ref from the MODIFIED bit we cleared.
581 hammer2_chain_drop(chain);
584 * If we were MODIFIED we inherit the ref from clearing
585 * that bit, otherwise we need another ref.
587 if (wasmodified == 0)
588 hammer2_chain_ref(chain);
589 atomic_set_int(&chain->flags, HAMMER2_CHAIN_MOVED);
593 * If this is part of a recursive flush we can go ahead and write
594 * out the buffer cache buffer and pass a new bref back up the chain
597 * Volume headers are NOT flushed here as they require special
600 switch(chain->bref.type) {
601 case HAMMER2_BREF_TYPE_VOLUME:
603 * The volume header is flushed manually by the syncer, not
604 * here. All we do is adjust the crc's.
606 KKASSERT(chain->data != NULL);
607 KKASSERT(chain->bp == NULL);
608 kprintf("volume header mirror_tid %jd\n",
609 hmp->voldata.mirror_tid);
611 hmp->voldata.icrc_sects[HAMMER2_VOL_ICRC_SECT1]=
613 (char *)&hmp->voldata +
614 HAMMER2_VOLUME_ICRC1_OFF,
615 HAMMER2_VOLUME_ICRC1_SIZE);
616 hmp->voldata.icrc_sects[HAMMER2_VOL_ICRC_SECT0]=
618 (char *)&hmp->voldata +
619 HAMMER2_VOLUME_ICRC0_OFF,
620 HAMMER2_VOLUME_ICRC0_SIZE);
621 hmp->voldata.icrc_volheader =
623 (char *)&hmp->voldata +
624 HAMMER2_VOLUME_ICRCVH_OFF,
625 HAMMER2_VOLUME_ICRCVH_SIZE);
626 hmp->volsync = hmp->voldata;
627 atomic_set_int(&chain->flags, HAMMER2_CHAIN_VOLUMESYNC);
629 case HAMMER2_BREF_TYPE_DATA:
631 * Data elements have already been flushed via the logical
632 * file buffer cache. Their hash was set in the bref by
633 * the vop_write code.
635 * Make sure any device buffer(s) have been flushed out here.
636 * (there aren't usually any to flush).
638 bbytes = chain->bytes;
639 pbase = chain->bref.data_off & ~(hammer2_off_t)(bbytes - 1);
640 boff = chain->bref.data_off & HAMMER2_OFF_MASK & (bbytes - 1);
642 bp = getblk(hmp->devvp, pbase, bbytes, GETBLK_NOWAIT, 0);
644 if ((bp->b_flags & (B_CACHE | B_DIRTY)) ==
645 (B_CACHE | B_DIRTY)) {
648 bp->b_flags |= B_RELBUF;
653 case HAMMER2_BREF_TYPE_INDIRECT:
655 * Indirect blocks may be in an INITIAL state. Use the
656 * chain_lock() call to ensure that the buffer has been
657 * instantiated (even though it is already locked the buffer
658 * might not have been instantiated).
660 * Only write the buffer out if it is dirty, it is possible
661 * the operating system had already written out the buffer.
663 hammer2_chain_lock(chain, HAMMER2_RESOLVE_ALWAYS);
664 KKASSERT(chain->bp != NULL);
667 if ((chain->flags & HAMMER2_CHAIN_DIRTYBP) ||
668 (bp->b_flags & B_DIRTY)) {
675 hammer2_chain_unlock(chain);
679 * Embedded elements have to be flushed out.
681 KKASSERT(chain->data != NULL);
682 KKASSERT(chain->bp == NULL);
685 KKASSERT((bref->data_off & HAMMER2_OFF_MASK) != 0);
686 KKASSERT(HAMMER2_DEC_CHECK(chain->bref.methods) ==
687 HAMMER2_CHECK_ISCSI32);
689 if (chain->bp == NULL) {
691 * The data is embedded, we have to acquire the
692 * buffer cache buffer and copy the data into it.
694 if ((bbytes = chain->bytes) < HAMMER2_MINIOSIZE)
695 bbytes = HAMMER2_MINIOSIZE;
696 pbase = bref->data_off & ~(hammer2_off_t)(bbytes - 1);
697 boff = bref->data_off & HAMMER2_OFF_MASK & (bbytes - 1);
700 * The getblk() optimization can only be used if the
701 * physical block size matches the request.
703 if (chain->bytes == bbytes) {
704 bp = getblk(hmp->devvp, pbase, bbytes, 0, 0);
707 error = bread(hmp->devvp, pbase, bbytes, &bp);
708 KKASSERT(error == 0);
710 bdata = (char *)bp->b_data + boff;
713 * Copy the data to the buffer, mark the buffer
714 * dirty, and convert the chain to unmodified.
716 bcopy(chain->data, bdata, chain->bytes);
717 bp->b_flags |= B_CLUSTEROK;
720 chain->bref.check.iscsi32.value =
721 hammer2_icrc32(chain->data, chain->bytes);
722 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE)
723 ++hammer2_iod_meta_write;
725 ++hammer2_iod_indr_write;
727 chain->bref.check.iscsi32.value =
728 hammer2_icrc32(chain->data, chain->bytes);
734 * Flush helper scan1 (recursive)
736 * Flushes the children of the caller's chain (parent) and updates
737 * the blockref, restricted by sync_tid.
739 * Ripouts during the loop should not cause any problems. Because we are
740 * flushing to a synchronization point, modification races will occur after
741 * sync_tid and do not have to be flushed anyway.
743 * It is also ok if the parent is chain_duplicate()'d while unlocked because
744 * the delete/duplication will install a delete_tid that is still larger than
745 * our current sync_tid.
748 hammer2_chain_flush_scan1(hammer2_chain_t *child, void *data)
750 hammer2_flush_info_t *info = data;
751 hammer2_trans_t *trans = info->trans;
752 hammer2_chain_t *parent = info->parent;
753 /*hammer2_mount_t *hmp = info->hmp;*/
757 * We should only need to recurse if SUBMODIFIED is set, but as
758 * a safety also recurse if MODIFIED is also set.
760 * Return early if neither bit is set. We must re-assert the
761 * SUBMODIFIED flag in the parent if any child covered by the
762 * parent (via delete_tid) is skipped.
764 if ((child->flags & (HAMMER2_CHAIN_MODIFIED |
765 HAMMER2_CHAIN_SUBMODIFIED)) == 0) {
768 if (child->modify_tid > trans->sync_tid) {
769 if (parent->delete_tid > trans->sync_tid) {
770 atomic_set_int(&parent->flags,
771 HAMMER2_CHAIN_SUBMODIFIED);
776 hammer2_chain_ref(child);
777 spin_unlock(&parent->core->cst.spin);
780 * The caller has added a ref to the parent so we can temporarily
781 * unlock it in order to lock the child. Re-check the flags before
784 hammer2_chain_unlock(parent);
785 hammer2_chain_lock(child, HAMMER2_RESOLVE_MAYBE);
787 if ((child->flags & (HAMMER2_CHAIN_MODIFIED |
788 HAMMER2_CHAIN_SUBMODIFIED)) == 0) {
789 hammer2_chain_unlock(child);
790 hammer2_chain_drop(child);
791 hammer2_chain_lock(parent, HAMMER2_RESOLVE_MAYBE);
792 spin_lock(&parent->core->cst.spin);
795 if (child->modify_tid > trans->sync_tid) {
796 hammer2_chain_unlock(child);
797 hammer2_chain_drop(child);
798 hammer2_chain_lock(parent, HAMMER2_RESOLVE_MAYBE);
799 spin_lock(&parent->core->cst.spin);
800 if (parent->delete_tid > trans->sync_tid) {
801 atomic_set_int(&parent->flags,
802 HAMMER2_CHAIN_SUBMODIFIED);
808 * The DESTROYED flag can only be initially set on an unreferenced
809 * deleted inode and will propagate downward via the mechanic below.
810 * Such inode chains have been deleted for good and should no longer
811 * be subject to delete/duplication.
813 * This optimization allows the inode reclaim (destroy unlinked file
814 * on vnode reclamation after last close) to be flagged by just
815 * setting HAMMER2_CHAIN_DESTROYED at the top level and then will
816 * cause the chains to be terminated and related buffers to be
817 * invalidated and not flushed out.
819 * We have to be careful not to propagate the DESTROYED flag if
820 * the destruction occurred after our flush sync_tid.
822 if ((parent->flags & HAMMER2_CHAIN_DESTROYED) &&
823 (child->flags & HAMMER2_CHAIN_DELETED) &&
824 (child->flags & HAMMER2_CHAIN_DESTROYED) == 0) {
825 atomic_set_int(&child->flags, HAMMER2_CHAIN_DESTROYED |
826 HAMMER2_CHAIN_SUBMODIFIED);
830 * Recurse and collect deferral data.
832 diddeferral = info->diddeferral;
834 hammer2_chain_flush_core(info, child);
836 kprintf("flush_core_done parent=%p flags=%08x child=%p.%d %08x\n",
837 parent, parent->flags, child, child->bref.type, child->flags);
840 info->diddeferral += diddeferral;
842 if (child->flags & HAMMER2_CHAIN_SUBMODIFIED)
843 atomic_set_int(&parent->flags, HAMMER2_CHAIN_SUBMODIFIED);
845 hammer2_chain_unlock(child);
846 hammer2_chain_drop(child);
848 hammer2_chain_lock(parent, HAMMER2_RESOLVE_MAYBE);
850 spin_lock(&parent->core->cst.spin);
855 * Flush helper scan2 (non-recursive)
857 * This pass on a chain's children propagates any MOVED or DELETED
858 * elements back up the chain towards the root after those elements have
859 * been fully flushed. Unlike scan1, this function is NOT recursive and
860 * the parent remains locked across the entire scan.
862 * NOTE! We must re-set SUBMODIFIED on the parent(s) as appropriate, and
863 * due to the above conditions it is possible to do this and still
864 * have some children flagged MOVED depending on the synchronization.
866 * NOTE! A deletion is a visbility issue, there can still be referenced to
867 * deleted elements (for example, to an unlinked file which is still
868 * open), and there can also be multiple chains pointing to the same
869 * bref where some are deleted and some are not (for example due to
870 * a rename). So a chain marked for deletion is basically considered
871 * to be live until it is explicitly destroyed or until its ref-count
872 * reaches zero (also implying that MOVED and MODIFIED are clear).
875 hammer2_chain_flush_scan2(hammer2_chain_t *child, void *data)
877 hammer2_flush_info_t *info = data;
878 hammer2_chain_t *parent = info->parent;
879 hammer2_chain_core_t *above = child->above;
880 hammer2_mount_t *hmp = info->hmp;
881 hammer2_trans_t *trans = info->trans;
882 hammer2_blockref_t *base;
886 * Inodes with stale children that have been converted to DIRECTDATA
887 * mode (file extension or hardlink conversion typically) need to
888 * skipped right now before we start messing with a non-existant
891 if (parent->bref.type == HAMMER2_BREF_TYPE_INODE &&
892 (parent->data->ipdata.op_flags & HAMMER2_OPFLAG_DIRECTDATA)) {
900 * Ignore children created after our flush point, treating them as
901 * if they did not exist). These children will not cause the parent
904 * When we encounter such children and the parent chain has not been
905 * deleted, delete/duplicated, or delete/duplicated-for-move, then
906 * the parent may be used to funnel through several flush points.
907 * We must re-set the SUBMODIFIED flag in the parent to ensure that
908 * those flushes have visbility. A simple test of delete_tid suffices
909 * to determine if the parent spans beyond our current flush.
911 if (child->modify_tid > trans->sync_tid) {
919 * Ignore children which have not changed. The parent's block table
920 * is already correct.
922 if ((child->flags & HAMMER2_CHAIN_MOVED) == 0) {
930 hammer2_chain_ref(child);
931 spin_unlock(&above->cst.spin);
934 * The MOVED bit implies an additional reference which prevents
935 * the child from being destroyed out from under our operation
936 * so we can lock the child safely without worrying about it
937 * getting ripped up (?).
939 * We can only update parents where child->parent matches. The
940 * child->parent link will migrate along the chain but the flush
941 * order must be enforced absolutely. Parent reflushed after the
942 * child has passed them by should skip due to the modify_tid test.
944 hammer2_chain_lock(child, HAMMER2_RESOLVE_NEVER);
947 * The parent's blockref to the child must be deleted or updated.
949 * This point is not reached on successful DESTROYED optimizations
950 * but can be reached on recursive deletions and restricted flushes.
952 * Because flushes are ordered we do not have to make a
953 * modify/duplicate of indirect blocks. That is, the flush
954 * code does not have to kmalloc or duplicate anything. We
955 * can adjust the indirect block table in-place and reuse the
956 * chain. It IS possible that the chain has already been duplicated
957 * or may wind up being duplicated on-the-fly by modifying code
958 * on the frontend. We simply use the original and ignore such
959 * chains. However, it does mean we can't clear the MOVED bit.
961 * XXX recursive deletions not optimized.
963 hammer2_chain_modify(trans, &parent,
964 HAMMER2_MODIFY_NO_MODIFY_TID |
965 HAMMER2_MODIFY_ASSERTNOCOPY);
967 switch(parent->bref.type) {
968 case HAMMER2_BREF_TYPE_INODE:
970 * XXX Should assert that OPFLAG_DIRECTDATA is 0 once we
971 * properly duplicate the inode headers and do proper flush
972 * range checks (all the children should be beyond the flush
973 * point). For now just don't sync the non-applicable
976 * XXX Can also occur due to hardlink consolidation. We
977 * set OPFLAG_DIRECTDATA to prevent the indirect and data
978 * blocks from syncing ot the hardlink pointer.
981 KKASSERT((parent->data->ipdata.op_flags &
982 HAMMER2_OPFLAG_DIRECTDATA) == 0);
984 if (parent->data->ipdata.op_flags &
985 HAMMER2_OPFLAG_DIRECTDATA) {
988 base = &parent->data->ipdata.u.blockset.blockref[0];
989 count = HAMMER2_SET_COUNT;
992 case HAMMER2_BREF_TYPE_INDIRECT:
994 base = &parent->data->npdata.blockref[0];
997 KKASSERT(child->flags & HAMMER2_CHAIN_DELETED);
999 count = parent->bytes / sizeof(hammer2_blockref_t);
1001 case HAMMER2_BREF_TYPE_VOLUME:
1002 base = &hmp->voldata.sroot_blockset.blockref[0];
1003 count = HAMMER2_SET_COUNT;
1008 panic("hammer2_chain_get: "
1009 "unrecognized blockref type: %d",
1014 * Update the parent's blockref table and propagate mirror_tid.
1016 * NOTE! Children with modify_tid's beyond our flush point are
1017 * considered to not exist for the purposes of updating the
1018 * parent's blockref array.
1020 * NOTE! Updates to a parent's blockref table do not adjust the
1021 * parent's bref.modify_tid, only its bref.mirror_tid.
1023 KKASSERT(child->index >= 0);
1024 if (child->delete_tid <= trans->sync_tid) {
1026 KKASSERT(child->index < count);
1027 bzero(&base[child->index], sizeof(child->bref));
1028 if (info->mirror_tid < child->delete_tid)
1029 info->mirror_tid = child->delete_tid;
1033 KKASSERT(child->index < count);
1034 base[child->index] = child->bref;
1035 if (info->mirror_tid < child->modify_tid)
1036 info->mirror_tid = child->modify_tid;
1040 if (info->mirror_tid < child->bref.mirror_tid) {
1041 info->mirror_tid = child->bref.mirror_tid;
1043 if (parent->bref.type == HAMMER2_BREF_TYPE_VOLUME &&
1044 hmp->voldata.mirror_tid < child->bref.mirror_tid) {
1045 hmp->voldata.mirror_tid = child->bref.mirror_tid;
1049 * When can we safely clear the MOVED flag? Flushes down duplicate
1050 * paths can occur out of order, for example if an inode is moved
1051 * as part of a hardlink consolidation or if an inode is moved into
1052 * an indirect block indexed before the inode.
1054 * Only clear MOVED once all possible parents have been flushed.
1056 if (child->flags & HAMMER2_CHAIN_MOVED) {
1057 hammer2_chain_t *scan;
1060 spin_lock(&above->cst.spin);
1061 for (scan = above->first_parent;
1063 scan = scan->next_parent) {
1064 if (scan->flags & HAMMER2_CHAIN_SUBMODIFIED) {
1069 spin_unlock(&above->cst.spin);
1071 atomic_clear_int(&child->flags, HAMMER2_CHAIN_MOVED);
1072 hammer2_chain_drop(child); /* flag */
1077 * Unlock the child. This can wind up dropping the child's
1078 * last ref, removing it from the parent's RB tree, and deallocating
1079 * the structure. The RB_SCAN() our caller is doing handles the
1082 hammer2_chain_unlock(child);
1083 hammer2_chain_drop(child);
1084 spin_lock(&above->cst.spin);
1090 * The parent cleared SUBMODIFIED prior to the scan. If the child
1091 * still requires a flush (possibly due to being outside the current
1092 * synchronization zone), we must re-set SUBMODIFIED on the way back
1097 kprintf("G child %p 08x\n", child, child->flags);