2 * Copyright (c) 2011-2015 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * TRANSACTION AND FLUSH HANDLING
38 * Deceptively simple but actually fairly difficult to implement properly is
39 * how I would describe it.
41 * Flushing generally occurs bottom-up but requires a top-down scan to
42 * locate chains with MODIFIED and/or UPDATE bits set. The ONFLUSH flag
43 * tells how to recurse downward to find these chains.
46 #include <sys/cdefs.h>
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/types.h>
57 #define HAMMER2_FLUSH_DEPTH_LIMIT 60 /* stack recursion limit */
61 * Recursively flush the specified chain. The chain is locked and
62 * referenced by the caller and will remain so on return. The chain
63 * will remain referenced throughout but can temporarily lose its
64 * lock during the recursion to avoid unnecessarily stalling user
67 struct hammer2_flush_info {
68 hammer2_chain_t *parent;
71 int error; /* cumulative error */
73 #ifdef HAMMER2_SCAN_DEBUG
82 struct h2_flush_list flushq;
83 hammer2_chain_t *debug;
86 typedef struct hammer2_flush_info hammer2_flush_info_t;
88 static void hammer2_flush_core(hammer2_flush_info_t *info,
89 hammer2_chain_t *chain, int flags);
90 static int hammer2_flush_recurse(hammer2_chain_t *child, void *data);
93 * Any per-pfs transaction initialization goes here.
96 hammer2_trans_manage_init(hammer2_pfs_t *pmp)
101 * Transaction support for any modifying operation. Transactions are used
102 * in the pmp layer by the frontend and in the spmp layer by the backend.
104 * 0 - Normal transaction, interlocked against flush
107 * TRANS_ISFLUSH - Flush transaction, interlocked against normal
110 * TRANS_BUFCACHE - Buffer cache transaction, no interlock.
112 * Initializing a new transaction allocates a transaction ID. Typically
113 * passed a pmp (hmp passed as NULL), indicating a cluster transaction. Can
114 * be passed a NULL pmp and non-NULL hmp to indicate a transaction on a single
115 * media target. The latter mode is used by the recovery code.
117 * TWO TRANSACTION IDs can run concurrently, where one is a flush and the
118 * other is a set of any number of concurrent filesystem operations. We
119 * can either have <running_fs_ops> + <waiting_flush> + <blocked_fs_ops>
120 * or we can have <running_flush> + <concurrent_fs_ops>.
122 * During a flush, new fs_ops are only blocked until the fs_ops prior to
123 * the flush complete. The new fs_ops can then run concurrent with the flush.
125 * Buffer-cache transactions operate as fs_ops but never block. A
126 * buffer-cache flush will run either before or after the current pending
127 * flush depending on its state.
130 hammer2_trans_init(hammer2_pfs_t *pmp, uint32_t flags)
137 oflags = pmp->trans.flags;
141 if (flags & HAMMER2_TRANS_ISFLUSH) {
143 * Requesting flush transaction. Wait for all
144 * currently running transactions to finish.
145 * Afterwords, normal transactions will be
148 if (oflags & HAMMER2_TRANS_MASK) {
149 nflags = oflags | HAMMER2_TRANS_FPENDING |
150 HAMMER2_TRANS_WAITING;
153 nflags = (oflags | flags) + 1;
155 } else if (flags & HAMMER2_TRANS_BUFCACHE) {
157 * Requesting strategy transaction from buffer-cache,
158 * or a VM getpages/putpages through the buffer cache.
159 * We must allow such transactions in all situations
160 * to avoid deadlocks.
162 nflags = (oflags | flags) + 1;
165 * (old) previous code interlocked against the main
168 if ((oflags & (HAMMER2_TRANS_ISFLUSH |
169 HAMMER2_TRANS_PREFLUSH)) ==
170 HAMMER2_TRANS_ISFLUSH) {
171 nflags = oflags | HAMMER2_TRANS_WAITING;
174 nflags = (oflags | flags) + 1;
179 * Requesting normal modifying transaction (read-only
180 * operations do not use transactions). Waits for
181 * any flush to finish before allowing. Multiple
182 * modifying transactions can run concurrently.
184 if (oflags & HAMMER2_TRANS_ISFLUSH) {
185 nflags = oflags | HAMMER2_TRANS_WAITING;
188 nflags = (oflags | flags) + 1;
192 tsleep_interlock(&pmp->trans.sync_wait, 0);
193 if (atomic_cmpset_int(&pmp->trans.flags, oflags, nflags)) {
196 tsleep(&pmp->trans.sync_wait, PINTERLOCKED,
206 * Start a sub-transaction, there is no 'subdone' function. This will
207 * issue a new modify_tid (mtid) for the current transaction, which is a
208 * CLC (cluster level change) id and not a per-node id.
210 * This function must be called for each XOP when multiple XOPs are run in
211 * sequence within a transaction.
213 * Callers typically update the inode with the transaction mtid manually
214 * to enforce sequencing.
217 hammer2_trans_sub(hammer2_pfs_t *pmp)
221 mtid = atomic_fetchadd_64(&pmp->modify_tid, 1);
227 hammer2_trans_done(hammer2_pfs_t *pmp)
233 oflags = pmp->trans.flags;
235 KKASSERT(oflags & HAMMER2_TRANS_MASK);
236 if ((oflags & HAMMER2_TRANS_MASK) == 1) {
238 * This was the last transaction
240 nflags = (oflags - 1) & ~(HAMMER2_TRANS_ISFLUSH |
241 HAMMER2_TRANS_BUFCACHE |
242 HAMMER2_TRANS_FPENDING |
243 HAMMER2_TRANS_WAITING);
246 * Still transactions pending
250 if (atomic_cmpset_int(&pmp->trans.flags, oflags, nflags)) {
251 if ((nflags & HAMMER2_TRANS_MASK) == 0 &&
252 (oflags & HAMMER2_TRANS_WAITING)) {
253 wakeup(&pmp->trans.sync_wait);
264 * Obtain new, unique inode number (not serialized by caller).
267 hammer2_trans_newinum(hammer2_pfs_t *pmp)
271 tid = atomic_fetchadd_64(&pmp->inode_tid, 1);
277 * Assert that a strategy call is ok here. Currently we allow strategy
278 * calls in all situations, including during flushes. Previously:
279 * (old) (1) In a normal transaction.
280 * (old) (2) In a flush transaction only if PREFLUSH is also set.
283 hammer2_trans_assert_strategy(hammer2_pfs_t *pmp)
286 KKASSERT((pmp->trans.flags & HAMMER2_TRANS_ISFLUSH) == 0 ||
287 (pmp->trans.flags & HAMMER2_TRANS_PREFLUSH));
293 * Chains undergoing destruction are removed from the in-memory topology.
294 * To avoid getting lost these chains are placed on the delayed flush
295 * queue which will properly dispose of them.
297 * We do this instead of issuing an immediate flush in order to give
298 * recursive deletions (rm -rf, etc) a chance to remove more of the
299 * hierarchy, potentially allowing an enormous amount of write I/O to
302 * NOTE: The flush code tests HAMMER2_CHAIN_DESTROY to differentiate
303 * between these chains and the deep-recursion requeue.
306 hammer2_delayed_flush(hammer2_chain_t *chain)
308 if ((chain->flags & HAMMER2_CHAIN_DELAYED) == 0) {
309 hammer2_spin_ex(&chain->hmp->list_spin);
310 if ((chain->flags & (HAMMER2_CHAIN_DELAYED |
311 HAMMER2_CHAIN_DEFERRED)) == 0) {
312 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DELAYED |
313 HAMMER2_CHAIN_DEFERRED);
314 TAILQ_INSERT_TAIL(&chain->hmp->flushq,
316 hammer2_chain_ref(chain);
318 hammer2_spin_unex(&chain->hmp->list_spin);
319 hammer2_voldata_modify(chain->hmp);
324 * Flush the chain and all modified sub-chains through the specified
325 * synchronization point, propagating blockref updates back up. As
326 * part of this propagation, mirror_tid and inode/data usage statistics
327 * propagates back upward.
329 * Returns a HAMMER2 error code, 0 if no error. Note that I/O errors from
330 * buffers dirtied during the flush operation can occur later.
332 * modify_tid (clc - cluster level change) is not propagated.
334 * update_tid (clc) is used for validation and is not propagated by this
337 * This routine can be called from several places but the most important
338 * is from VFS_SYNC (frontend) via hammer2_inode_xop_flush (backend).
340 * chain is locked on call and will remain locked on return. The chain's
341 * UPDATE flag indicates that its parent's block table (which is not yet
342 * part of the flush) should be updated.
345 * HAMMER2_FLUSH_TOP Indicates that this is the top of the flush.
346 * Is cleared for the recursion.
348 * HAMMER2_FLUSH_ALL Recurse everything
350 * HAMMER2_FLUSH_INODE_RECURSE
351 * Recurse one inode level, flush includes
352 * sub-inodes but do not go deeper (thus UPDATE
353 * can wind up remaining set).
356 hammer2_flush(hammer2_chain_t *chain, int flags)
358 hammer2_chain_t *scan;
359 hammer2_flush_info_t info;
364 * Execute the recursive flush and handle deferrals.
366 * Chains can be ridiculously long (thousands deep), so to
367 * avoid blowing out the kernel stack the recursive flush has a
368 * depth limit. Elements at the limit are placed on a list
369 * for re-execution after the stack has been popped.
371 bzero(&info, sizeof(info));
372 TAILQ_INIT(&info.flushq);
373 info.flags = flags & ~HAMMER2_FLUSH_TOP;
376 * Calculate parent (can be NULL), if not NULL the flush core
377 * expects the parent to be referenced so it can easily lock/unlock
378 * it without it getting ripped up.
380 if ((info.parent = chain->parent) != NULL)
381 hammer2_chain_ref(info.parent);
384 * Extra ref needed because flush_core expects it when replacing
387 hammer2_chain_ref(chain);
393 * Move hmp->flushq to info.flushq if non-empty so it can
396 if (TAILQ_FIRST(&hmp->flushq) != NULL) {
397 hammer2_spin_ex(&chain->hmp->list_spin);
398 TAILQ_CONCAT(&info.flushq, &hmp->flushq, flush_node);
399 hammer2_spin_unex(&chain->hmp->list_spin);
403 * Unwind deep recursions which had been deferred. This
404 * can leave the FLUSH_* bits set for these chains, which
405 * will be handled when we [re]flush chain after the unwind.
407 while ((scan = TAILQ_FIRST(&info.flushq)) != NULL) {
408 KKASSERT(scan->flags & HAMMER2_CHAIN_DEFERRED);
409 TAILQ_REMOVE(&info.flushq, scan, flush_node);
410 #ifdef HAMMER2_SCAN_DEBUG
413 atomic_clear_int(&scan->flags, HAMMER2_CHAIN_DEFERRED |
414 HAMMER2_CHAIN_DELAYED);
417 * Now that we've popped back up we can do a secondary
418 * recursion on the deferred elements.
420 * NOTE: hmp->flushq chains (marked DESTROY) must be
421 * handled unconditionally so they can be cleaned
424 * NOTE: hammer2_flush() may replace scan.
426 if (hammer2_debug & 0x0040)
427 kprintf("deferred flush %p\n", scan);
428 hammer2_chain_lock(scan, HAMMER2_RESOLVE_MAYBE);
429 if (scan->error == 0) {
430 if (scan->flags & HAMMER2_CHAIN_DESTROY) {
437 flags & ~HAMMER2_FLUSH_TOP);
440 info.error |= scan->error;
442 hammer2_chain_unlock(scan);
443 hammer2_chain_drop(scan);/* ref from defer */
447 * [re]flush chain as the deep recursion may have generated
448 * additional modifications.
450 info.diddeferral = 0;
451 if (info.parent != chain->parent) {
452 kprintf("LOST CHILD4 %p->%p (actual parent %p)\n",
453 info.parent, chain, chain->parent);
454 hammer2_chain_drop(info.parent);
455 info.parent = chain->parent;
456 hammer2_chain_ref(info.parent);
458 hammer2_flush_core(&info, chain, flags);
461 * Only loop if deep recursions have been deferred.
463 if (TAILQ_EMPTY(&info.flushq))
466 if (++loops % 1000 == 0) {
467 kprintf("hammer2_flush: excessive loops on %p\n",
469 if (hammer2_debug & 0x100000)
473 #ifdef HAMMER2_SCAN_DEBUG
474 if (info.scan_count >= 10)
475 kprintf("hammer2_flush: scan_count %ld (%ld,%ld,%ld,%ld) "
476 "bt(%ld,%ld,%ld,%ld,%ld,%ld) flushq %ld\n",
490 hammer2_chain_drop(chain);
492 hammer2_chain_drop(info.parent);
497 * This is the core of the chain flushing code. The chain is locked by the
498 * caller and must also have an extra ref on it by the caller, and remains
499 * locked and will have an extra ref on return. info.parent is referenced
502 * Upon return, the caller can test the UPDATE bit on the chain to determine
503 * if the parent needs updating.
505 * (1) Determine if this node is a candidate for the flush, return if it is
506 * not. fchain and vchain are always candidates for the flush.
508 * (2) If we recurse too deep the chain is entered onto the deferral list and
509 * the current flush stack is aborted until after the deferral list is
512 * (3) Recursively flush live children (rbtree). This can create deferrals.
513 * A successful flush clears the MODIFIED and UPDATE bits on the children
514 * and typically causes the parent to be marked MODIFIED as the children
515 * update the parent's block table. A parent might already be marked
516 * MODIFIED due to a deletion (whos blocktable update in the parent is
517 * handled by the frontend), or if the parent itself is modified by the
518 * frontend for other reasons.
520 * (4) Permanently disconnected sub-trees are cleaned up by the front-end.
521 * Deleted-but-open inodes can still be individually flushed via the
524 * (5) Delete parents on the way back up if they are normal indirect blocks
525 * and have no children.
527 * (6) Note that an unmodified child may still need the block table in its
528 * parent updated (e.g. rename/move). The child will have UPDATE set
531 * WARNING ON BREF MODIFY_TID/MIRROR_TID
533 * blockref.modify_tid is consistent only within a PFS, and will not be
534 * consistent during synchronization. mirror_tid is consistent across the
535 * block device regardless of the PFS.
538 hammer2_flush_core(hammer2_flush_info_t *info, hammer2_chain_t *chain,
541 hammer2_chain_t *parent;
546 * (1) Optimize downward recursion to locate nodes needing action.
547 * Nothing to do if none of these flags are set.
549 if ((chain->flags & HAMMER2_CHAIN_FLUSH_MASK) == 0) {
550 if (hammer2_debug & 0x200) {
551 if (info->debug == NULL)
561 * NOTE: parent can be NULL, usually due to destroy races.
563 parent = info->parent;
564 KKASSERT(chain->parent == parent);
567 * Downward search recursion
569 * We must be careful on cold stops. If CHAIN_UPDATE is set and
570 * we stop cold (verses a deferral which will re-run the chain later),
571 * the update can wind up never being applied. This situation most
572 * typically occurs on inode boundaries due to the way
573 * hammer2_vfs_sync() breaks-up the flush. As a safety, we
574 * flush-through such situations.
576 if (chain->flags & (HAMMER2_CHAIN_DEFERRED | HAMMER2_CHAIN_DELAYED)) {
581 } else if ((chain->flags & HAMMER2_CHAIN_PFSBOUNDARY) &&
582 (chain->flags & HAMMER2_CHAIN_UPDATE) == 0 &&
583 (flags & HAMMER2_FLUSH_ALL) == 0 &&
584 (flags & HAMMER2_FLUSH_TOP) == 0 &&
585 chain->pmp && chain->pmp->mp) {
587 * If FLUSH_ALL is not specified the caller does not want
588 * to recurse through PFS roots that have been mounted.
590 * (If the PFS has not been mounted there may not be
591 * anything monitoring its chains and its up to us
594 * The typical sequence is to flush dirty PFS's starting at
595 * their root downward, then flush the device root (vchain).
596 * It is this second flush that typically leaves out the
599 * However we must still process the PFSROOT chains for block
600 * table updates in their parent (which IS part of our flush).
602 * NOTE: The volume root, vchain, does not set PFSBOUNDARY.
604 * NOTE: This test must be done before the depth-limit test,
605 * else it might become the top on a flushq iteration.
607 * NOTE: We must re-set ONFLUSH in the parent to retain if
608 * this chain (that we are skipping) requires work.
610 if (chain->flags & (HAMMER2_CHAIN_ONFLUSH |
611 HAMMER2_CHAIN_DESTROY |
612 HAMMER2_CHAIN_MODIFIED)) {
613 hammer2_chain_setflush(parent);
615 } else if (chain->bref.type == HAMMER2_BREF_TYPE_INODE &&
616 (chain->flags & HAMMER2_CHAIN_UPDATE) == 0 &&
617 (flags & HAMMER2_FLUSH_INODE_STOP) &&
618 (flags & HAMMER2_FLUSH_ALL) == 0 &&
619 (flags & HAMMER2_FLUSH_TOP) == 0 &&
620 chain->pmp && chain->pmp->mp) {
622 * If FLUSH_INODE_STOP is specified and both ALL and TOP
623 * are clear, we must not flush the chain. The chain should
624 * have already been flushed and any further ONFLUSH/UPDATE
625 * setting will be related to the next flush.
627 * This features allows us to flush inodes independently of
628 * each other and meta-data above the inodes separately.
630 if (chain->flags & (HAMMER2_CHAIN_ONFLUSH |
631 HAMMER2_CHAIN_DESTROY |
632 HAMMER2_CHAIN_MODIFIED)) {
634 hammer2_chain_setflush(parent);
636 } else if (info->depth == HAMMER2_FLUSH_DEPTH_LIMIT) {
638 * Recursion depth reached.
640 KKASSERT((chain->flags & HAMMER2_CHAIN_DELAYED) == 0);
641 hammer2_chain_ref(chain);
642 TAILQ_INSERT_TAIL(&info->flushq, chain, flush_node);
643 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DEFERRED);
645 } else if (chain->flags & (HAMMER2_CHAIN_ONFLUSH |
646 HAMMER2_CHAIN_DESTROY)) {
648 * Downward recursion search (actual flush occurs bottom-up).
649 * pre-clear ONFLUSH. It can get set again due to races or
650 * flush errors, which we want so the scan finds us again in
653 * We must also recurse if DESTROY is set so we can finally
654 * get rid of the related children, otherwise the node will
655 * just get re-flushed on lastdrop.
657 * WARNING! The recursion will unlock/relock info->parent
658 * (which is 'chain'), potentially allowing it
661 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_ONFLUSH);
662 save_error = info->error;
664 info->parent = chain;
667 * We may have to do this twice to catch any indirect
668 * block maintenance that occurs. Other conditions which
669 * can keep setting ONFLUSH (such as deferrals) ought to
670 * be handled by the flushq code. XXX needs more help
672 hammer2_spin_ex(&chain->core.spin);
673 RB_SCAN(hammer2_chain_tree, &chain->core.rbtree,
674 NULL, hammer2_flush_recurse, info);
675 if (chain->flags & HAMMER2_CHAIN_ONFLUSH) {
676 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_ONFLUSH);
677 RB_SCAN(hammer2_chain_tree, &chain->core.rbtree,
678 NULL, hammer2_flush_recurse, info);
680 hammer2_spin_unex(&chain->core.spin);
681 info->parent = parent;
684 * Re-set the flush bits if the flush was incomplete or
685 * an error occurred. If an error occurs it is typically
686 * an allocation error. Errors do not cause deferrals.
689 hammer2_chain_setflush(chain);
690 info->error |= save_error;
691 if (info->diddeferral)
692 hammer2_chain_setflush(chain);
695 * If we lost the parent->chain association we have to
696 * stop processing this chain because it is no longer
697 * in this recursion. If it moved, it will be handled
698 * by the ONFLUSH flag elsewhere.
700 if (chain->parent != parent) {
701 kprintf("LOST CHILD2 %p->%p (actual parent %p)\n",
702 parent, chain, chain->parent);
708 * Now we are in the bottom-up part of the recursion.
710 * Do not update chain if lower layers were deferred. We continue
711 * to try to update the chain on lower-level errors, but the flush
712 * code may decide not to flush the volume root.
714 * XXX should we continue to try to update the chain if an error
717 if (info->diddeferral)
721 * Both parent and chain must be locked in order to flush chain,
722 * in order to properly update the parent under certain conditions.
724 * In addition, we can't safely unlock/relock the chain once we
725 * start flushing the chain itself, which we would have to do later
726 * on in order to lock the parent if we didn't do that now.
728 hammer2_chain_ref_hold(chain);
729 hammer2_chain_unlock(chain);
731 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS);
732 hammer2_chain_lock(chain, HAMMER2_RESOLVE_MAYBE);
733 hammer2_chain_drop_unhold(chain);
736 * Can't process if we can't access their content.
738 if ((parent && parent->error) || chain->error) {
739 kprintf("hammer2: chain error during flush\n");
740 info->error |= chain->error;
742 info->error |= parent->error;
743 hammer2_chain_unlock(parent);
748 if (chain->parent != parent) {
749 kprintf("LOST CHILD3 %p->%p (actual parent %p)\n",
750 parent, chain, chain->parent);
751 KKASSERT(parent != NULL);
752 hammer2_chain_unlock(parent);
753 if ((chain->flags & HAMMER2_CHAIN_DELAYED) == 0) {
754 hammer2_chain_ref(chain);
755 TAILQ_INSERT_TAIL(&info->flushq, chain, flush_node);
756 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DEFERRED);
763 * Propagate the DESTROY flag downwards. This dummies up the flush
764 * code and tries to invalidate related buffer cache buffers to
765 * avoid the disk write.
767 if (parent && (parent->flags & HAMMER2_CHAIN_DESTROY))
768 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DESTROY);
771 * Dispose of the modified bit.
773 * If parent is present, the UPDATE bit should already be set.
774 * UPDATE should already be set.
775 * bref.mirror_tid should already be set.
777 if (chain->flags & HAMMER2_CHAIN_MODIFIED) {
778 KKASSERT((chain->flags & HAMMER2_CHAIN_UPDATE) ||
779 chain->parent == NULL);
780 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_MODIFIED);
781 atomic_add_long(&hammer2_count_modified_chains, -1);
784 * Manage threads waiting for excessive dirty memory to
788 hammer2_pfs_memory_wakeup(chain->pmp);
791 if ((chain->flags & HAMMER2_CHAIN_UPDATE) == 0 &&
792 chain != &hmp->vchain &&
793 chain != &hmp->fchain) {
795 * Set UPDATE bit indicating that the parent block
796 * table requires updating.
798 atomic_set_int(&chain->flags, HAMMER2_CHAIN_UPDATE);
803 * Issue the flush. This is indirect via the DIO.
805 * NOTE: A DELETED node that reaches this point must be
806 * flushed for synchronization point consistency.
808 * NOTE: Even though MODIFIED was already set, the related DIO
809 * might not be dirty due to a system buffer cache
810 * flush and must be set dirty if we are going to make
811 * further modifications to the buffer. Chains with
812 * embedded data don't need this.
814 if (hammer2_debug & 0x1000) {
815 kprintf("Flush %p.%d %016jx/%d data=%016jx\n",
816 chain, chain->bref.type,
817 (uintmax_t)chain->bref.key,
819 (uintmax_t)chain->bref.data_off);
821 if (hammer2_debug & 0x2000) {
822 Debugger("Flush hell");
826 * Update chain CRCs for flush.
828 * NOTE: Volume headers are NOT flushed here as they require
829 * special processing.
831 switch(chain->bref.type) {
832 case HAMMER2_BREF_TYPE_FREEMAP:
834 * Update the volume header's freemap_tid to the
835 * freemap's flushing mirror_tid.
837 * (note: embedded data, do not call setdirty)
839 KKASSERT(hmp->vchain.flags & HAMMER2_CHAIN_MODIFIED);
840 KKASSERT(chain == &hmp->fchain);
841 hmp->voldata.freemap_tid = chain->bref.mirror_tid;
842 if (hammer2_debug & 0x8000) {
843 /* debug only, avoid syslogd loop */
844 kprintf("sync freemap mirror_tid %08jx\n",
845 (intmax_t)chain->bref.mirror_tid);
849 * The freemap can be flushed independently of the
850 * main topology, but for the case where it is
851 * flushed in the same transaction, and flushed
852 * before vchain (a case we want to allow for
853 * performance reasons), make sure modifications
854 * made during the flush under vchain use a new
857 * Otherwise the mount recovery code will get confused.
859 ++hmp->voldata.mirror_tid;
861 case HAMMER2_BREF_TYPE_VOLUME:
863 * The free block table is flushed by
864 * hammer2_vfs_sync() before it flushes vchain.
865 * We must still hold fchain locked while copying
866 * voldata to volsync, however.
868 * These do not error per-say since their data does
869 * not need to be re-read from media on lock.
871 * (note: embedded data, do not call setdirty)
873 hammer2_chain_lock(&hmp->fchain,
874 HAMMER2_RESOLVE_ALWAYS);
875 hammer2_voldata_lock(hmp);
876 if (hammer2_debug & 0x8000) {
877 /* debug only, avoid syslogd loop */
878 kprintf("sync volume mirror_tid %08jx\n",
879 (intmax_t)chain->bref.mirror_tid);
883 * Update the volume header's mirror_tid to the
884 * main topology's flushing mirror_tid. It is
885 * possible that voldata.mirror_tid is already
886 * beyond bref.mirror_tid due to the bump we made
887 * above in BREF_TYPE_FREEMAP.
889 if (hmp->voldata.mirror_tid < chain->bref.mirror_tid) {
890 hmp->voldata.mirror_tid =
891 chain->bref.mirror_tid;
895 * The volume header is flushed manually by the
896 * syncer, not here. All we do here is adjust the
899 KKASSERT(chain->data != NULL);
900 KKASSERT(chain->dio == NULL);
902 hmp->voldata.icrc_sects[HAMMER2_VOL_ICRC_SECT1]=
904 (char *)&hmp->voldata +
905 HAMMER2_VOLUME_ICRC1_OFF,
906 HAMMER2_VOLUME_ICRC1_SIZE);
907 hmp->voldata.icrc_sects[HAMMER2_VOL_ICRC_SECT0]=
909 (char *)&hmp->voldata +
910 HAMMER2_VOLUME_ICRC0_OFF,
911 HAMMER2_VOLUME_ICRC0_SIZE);
912 hmp->voldata.icrc_volheader =
914 (char *)&hmp->voldata +
915 HAMMER2_VOLUME_ICRCVH_OFF,
916 HAMMER2_VOLUME_ICRCVH_SIZE);
918 if (hammer2_debug & 0x8000) {
919 /* debug only, avoid syslogd loop */
920 kprintf("syncvolhdr %016jx %016jx\n",
921 hmp->voldata.mirror_tid,
922 hmp->vchain.bref.mirror_tid);
924 hmp->volsync = hmp->voldata;
925 atomic_set_int(&chain->flags, HAMMER2_CHAIN_VOLUMESYNC);
926 hammer2_voldata_unlock(hmp);
927 hammer2_chain_unlock(&hmp->fchain);
929 case HAMMER2_BREF_TYPE_DATA:
931 * Data elements have already been flushed via the
932 * logical file buffer cache. Their hash was set in
933 * the bref by the vop_write code. Do not re-dirty.
935 * Make sure any device buffer(s) have been flushed
936 * out here (there aren't usually any to flush) XXX.
939 case HAMMER2_BREF_TYPE_INDIRECT:
940 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
941 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
943 * Buffer I/O will be cleaned up when the volume is
944 * flushed (but the kernel is free to flush it before
947 KKASSERT((chain->flags & HAMMER2_CHAIN_EMBEDDED) == 0);
948 hammer2_chain_setcheck(chain, chain->data);
950 case HAMMER2_BREF_TYPE_DIRENT:
952 * A directory entry can use the check area to store
953 * the filename for filenames <= 64 bytes, don't blow
956 KKASSERT((chain->flags & HAMMER2_CHAIN_EMBEDDED) == 0);
958 hammer2_chain_setcheck(chain, chain->data);
960 case HAMMER2_BREF_TYPE_INODE:
962 * NOTE: We must call io_setdirty() to make any late
963 * changes to the inode data, the system might
964 * have already flushed the buffer.
966 if (chain->data->ipdata.meta.op_flags &
967 HAMMER2_OPFLAG_PFSROOT) {
969 * non-NULL pmp if mounted as a PFS. We must
970 * sync fields cached in the pmp? XXX
972 hammer2_inode_data_t *ipdata;
974 hammer2_io_setdirty(chain->dio);
975 ipdata = &chain->data->ipdata;
977 ipdata->meta.pfs_inum =
978 chain->pmp->inode_tid;
981 /* can't be mounted as a PFS */
984 KKASSERT((chain->flags & HAMMER2_CHAIN_EMBEDDED) == 0);
985 hammer2_chain_setcheck(chain, chain->data);
987 hammer2_inode_data_t *ipdata;
988 ipdata = &chain->data->ipdata;
991 KKASSERT(chain->flags & HAMMER2_CHAIN_EMBEDDED);
992 panic("hammer2_flush_core: unsupported "
999 * If the chain was destroyed try to avoid unnecessary I/O
1000 * that might not have yet occurred. Remove the data range
1001 * from dedup candidacy and attempt to invalidation that
1002 * potentially dirty portion of the I/O buffer.
1004 if (chain->flags & HAMMER2_CHAIN_DESTROY) {
1005 hammer2_io_dedup_delete(hmp,
1007 chain->bref.data_off,
1012 hammer2_io_inval(chain->dio,
1013 chain->bref.data_off,
1015 } else if ((dio = hammer2_io_getquick(hmp,
1016 chain->bref.data_off,
1019 hammer2_io_inval(dio,
1020 chain->bref.data_off,
1022 hammer2_io_putblk(&dio);
1029 * If UPDATE is set the parent block table may need to be updated.
1030 * This can fail if the hammer2_chain_modify() fails.
1032 * NOTE: UPDATE may be set on vchain or fchain in which case
1033 * parent could be NULL. It's easiest to allow the case
1034 * and test for NULL. parent can also wind up being NULL
1035 * due to a deletion so we need to handle the case anyway.
1037 * If no parent exists we can just clear the UPDATE bit. If the
1038 * chain gets reattached later on the bit will simply get set
1041 if ((chain->flags & HAMMER2_CHAIN_UPDATE) && parent == NULL)
1042 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_UPDATE);
1045 * The chain may need its blockrefs updated in the parent.
1047 if (chain->flags & HAMMER2_CHAIN_UPDATE) {
1048 hammer2_blockref_t *base;
1052 * Clear UPDATE flag, mark parent modified, update its
1053 * modify_tid if necessary, and adjust the parent blockmap.
1055 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_UPDATE);
1060 * Avoid actually modifying and updating the parent if it
1061 * was flagged for destruction. This can greatly reduce
1062 * disk I/O in large tree removals because the
1063 * hammer2_io_setinval() call in the upward recursion
1064 * (see MODIFIED code above) can only handle a few cases.
1066 if (parent->flags & HAMMER2_CHAIN_DESTROY) {
1067 if (parent->bref.modify_tid < chain->bref.modify_tid) {
1068 parent->bref.modify_tid =
1069 chain->bref.modify_tid;
1071 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_BMAPPED |
1072 HAMMER2_CHAIN_BMAPUPD);
1077 * The flusher is responsible for deleting empty indirect
1078 * blocks at this point. If we don't do this, no major harm
1079 * will be done but the empty indirect blocks will stay in
1080 * the topology and make it a messy and inefficient.
1082 * The flusher is also responsible for collapsing the
1083 * content of an indirect block into its parent whenever
1084 * possible (with some hysteresis). Not doing this will also
1085 * not harm the topology, but would make it messy and
1088 if (chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT) {
1089 if (hammer2_chain_indirect_maintenance(parent, chain))
1094 * We are updating the parent's blockmap, the parent must
1095 * be set modified. If this fails we re-set the UPDATE flag
1098 * NOTE! A modification error can be ENOSPC. We still want
1099 * to flush modified chains recursively, not break out,
1100 * so we just skip the update in this situation and
1101 * continue. That is, we still need to try to clean
1102 * out dirty chains and buffers.
1104 * This may not help bulkfree though. XXX
1106 save_error = hammer2_chain_modify(parent, 0, 0, 0);
1108 info->error |= save_error;
1109 kprintf("hammer2_flush: %016jx.%02x error=%08x\n",
1110 parent->bref.data_off, parent->bref.type,
1112 atomic_set_int(&chain->flags, HAMMER2_CHAIN_UPDATE);
1115 if (parent->bref.modify_tid < chain->bref.modify_tid)
1116 parent->bref.modify_tid = chain->bref.modify_tid;
1119 * Calculate blockmap pointer
1121 switch(parent->bref.type) {
1122 case HAMMER2_BREF_TYPE_INODE:
1124 * Access the inode's block array. However, there is
1125 * no block array if the inode is flagged DIRECTDATA.
1128 (parent->data->ipdata.meta.op_flags &
1129 HAMMER2_OPFLAG_DIRECTDATA) == 0) {
1130 base = &parent->data->
1131 ipdata.u.blockset.blockref[0];
1135 count = HAMMER2_SET_COUNT;
1137 case HAMMER2_BREF_TYPE_INDIRECT:
1138 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
1140 base = &parent->data->npdata[0];
1143 count = parent->bytes / sizeof(hammer2_blockref_t);
1145 case HAMMER2_BREF_TYPE_VOLUME:
1146 base = &chain->hmp->voldata.sroot_blockset.blockref[0];
1147 count = HAMMER2_SET_COUNT;
1149 case HAMMER2_BREF_TYPE_FREEMAP:
1150 base = &parent->data->npdata[0];
1151 count = HAMMER2_SET_COUNT;
1156 panic("hammer2_flush_core: "
1157 "unrecognized blockref type: %d",
1162 * Blocktable updates
1164 * We synchronize pending statistics at this time. Delta
1165 * adjustments designated for the current and upper level
1168 if (base && (chain->flags & HAMMER2_CHAIN_BMAPUPD)) {
1169 if (chain->flags & HAMMER2_CHAIN_BMAPPED) {
1170 hammer2_spin_ex(&parent->core.spin);
1171 hammer2_base_delete(parent, base, count, chain);
1172 hammer2_spin_unex(&parent->core.spin);
1173 /* base_delete clears both bits */
1175 atomic_clear_int(&chain->flags,
1176 HAMMER2_CHAIN_BMAPUPD);
1179 if (base && (chain->flags & HAMMER2_CHAIN_BMAPPED) == 0) {
1180 hammer2_spin_ex(&parent->core.spin);
1181 hammer2_base_insert(parent, base, count,
1182 chain, &chain->bref);
1183 hammer2_spin_unex(&parent->core.spin);
1184 /* base_insert sets BMAPPED */
1189 hammer2_chain_unlock(parent);
1192 * Final cleanup after flush
1195 KKASSERT(chain->refs > 0);
1196 if (hammer2_debug & 0x200) {
1197 if (info->debug == chain)
1203 * Flush recursion helper, called from flush_core, calls flush_core.
1205 * Flushes the children of the caller's chain (info->parent), restricted
1206 * by sync_tid. Set info->domodify if the child's blockref must propagate
1207 * back up to the parent.
1209 * This function may set info->error as a side effect.
1211 * Ripouts can move child from rbtree to dbtree or dbq but the caller's
1212 * flush scan order prevents any chains from being lost. A child can be
1213 * executes more than once.
1215 * WARNING! If we do not call hammer2_flush_core() we must update
1216 * bref.mirror_tid ourselves to indicate that the flush has
1217 * processed the child.
1219 * WARNING! parent->core spinlock is held on entry and return.
1222 hammer2_flush_recurse(hammer2_chain_t *child, void *data)
1224 hammer2_flush_info_t *info = data;
1225 hammer2_chain_t *parent = info->parent;
1227 #ifdef HAMMER2_SCAN_DEBUG
1229 if (child->flags & HAMMER2_CHAIN_MODIFIED)
1230 ++info->scan_mod_count;
1231 if (child->flags & HAMMER2_CHAIN_UPDATE)
1232 ++info->scan_upd_count;
1233 if (child->flags & HAMMER2_CHAIN_ONFLUSH)
1234 ++info->scan_onf_count;
1238 * (child can never be fchain or vchain so a special check isn't
1241 * We must ref the child before unlocking the spinlock.
1243 * The caller has added a ref to the parent so we can temporarily
1244 * unlock it in order to lock the child. However, if it no longer
1245 * winds up being the child of the parent we must skip this child.
1247 * NOTE! chain locking errors are fatal. They are never out-of-space
1250 hammer2_chain_ref(child);
1251 hammer2_spin_unex(&parent->core.spin);
1253 hammer2_chain_ref_hold(parent);
1254 hammer2_chain_unlock(parent);
1255 hammer2_chain_lock(child, HAMMER2_RESOLVE_MAYBE);
1256 if (child->parent != parent) {
1257 kprintf("LOST CHILD1 %p->%p (actual parent %p)\n",
1258 parent, child, child->parent);
1262 kprintf("CHILD ERROR DURING FLUSH LOCK %p->%p\n",
1264 info->error |= child->error;
1269 * Must propagate the DESTROY flag downwards, otherwise the
1270 * parent could end up never being removed because it will
1271 * be requeued to the flusher if it survives this run due to
1274 if (parent && (parent->flags & HAMMER2_CHAIN_DESTROY))
1275 atomic_set_int(&child->flags, HAMMER2_CHAIN_DESTROY);
1276 #ifdef HAMMER2_SCAN_DEBUG
1277 if (child->flags & HAMMER2_CHAIN_DESTROY)
1278 ++info->scan_del_count;
1282 * Recurse and collect deferral data. We're in the media flush,
1283 * this can cross PFS boundaries.
1285 if (child->flags & HAMMER2_CHAIN_FLUSH_MASK) {
1286 #ifdef HAMMER2_SCAN_DEBUG
1287 if (child->bref.type < 7)
1288 ++info->scan_btype[child->bref.type];
1291 hammer2_flush_core(info, child, info->flags);
1293 } else if (hammer2_debug & 0x200) {
1294 if (info->debug == NULL)
1295 info->debug = child;
1297 hammer2_flush_core(info, child, info->flags);
1299 if (info->debug == child)
1305 * Relock to continue the loop.
1307 hammer2_chain_unlock(child);
1308 hammer2_chain_lock(parent, HAMMER2_RESOLVE_MAYBE);
1309 hammer2_chain_drop_unhold(parent);
1310 if (parent->error) {
1311 kprintf("PARENT ERROR DURING FLUSH LOCK %p->%p\n",
1313 info->error |= parent->error;
1315 hammer2_chain_drop(child);
1316 KKASSERT(info->parent == parent);
1317 hammer2_spin_ex(&parent->core.spin);
1323 * flush helper (backend threaded)
1325 * Flushes chain topology for the specified inode.
1327 * If HAMMER2_XOP_FLUSH is set we flush all chains from the current inode
1328 * through but stop at sub-inodes (we flush the inode chains for sub-inodes,
1329 * but do not go further as deeper modifications do not belong to the current
1332 * If HAMMER2_XOP_FLUSH is not set we flush the current inode's chains only
1333 * and do not recurse through sub-inodes, including not including those
1336 * Remember that HAMMER2 is currently using a flat inode model, so directory
1337 * hierarchies do not translate to inode hierarchies. PFS ROOTs, however,
1340 * chain->parent can be NULL, usually due to destroy races.
1342 * Primarily called from vfs_sync().
1345 hammer2_inode_xop_flush(hammer2_thread_t *thr, hammer2_xop_t *arg)
1347 hammer2_xop_flush_t *xop = &arg->xop_flush;
1348 hammer2_chain_t *chain;
1349 hammer2_chain_t *parent;
1351 int flush_error = 0;
1352 int fsync_error = 0;
1353 int total_error = 0;
1358 xflags = HAMMER2_FLUSH_TOP;
1359 if (xop->head.flags & HAMMER2_XOP_INODE_STOP)
1360 xflags |= HAMMER2_FLUSH_INODE_STOP;
1365 chain = hammer2_inode_chain(xop->head.ip1, thr->clindex,
1366 HAMMER2_RESOLVE_ALWAYS);
1369 if ((chain->flags & HAMMER2_CHAIN_FLUSH_MASK) ||
1370 TAILQ_FIRST(&hmp->flushq) != NULL) {
1371 hammer2_flush(chain, xflags);
1372 parent = chain->parent;
1374 hammer2_chain_setflush(parent);
1376 if (chain->flags & HAMMER2_CHAIN_PFSBOUNDARY)
1378 hammer2_chain_unlock(chain);
1379 hammer2_chain_drop(chain);
1386 * Don't flush from the volume root to the PFSROOT unless ip was
1387 * a PFSROOT. If it isn't then this flush is probably related to
1394 * Flush volume roots. Avoid replication, we only want to
1395 * flush each hammer2_dev (hmp) once.
1397 for (j = thr->clindex - 1; j >= 0; --j) {
1398 if ((chain = xop->head.ip1->cluster.array[j].chain) != NULL) {
1399 if (chain->hmp == hmp) {
1400 chain = NULL; /* safety */
1405 chain = NULL; /* safety */
1408 * spmp transaction. The super-root is never directly mounted so
1409 * there shouldn't be any vnodes, let alone any dirty vnodes
1410 * associated with it, so we shouldn't have to mess around with any
1411 * vnode flushes here.
1413 hammer2_trans_init(hmp->spmp, HAMMER2_TRANS_ISFLUSH);
1416 * Media mounts have two 'roots', vchain for the topology
1417 * and fchain for the free block table. Flush both.
1419 * Note that the topology and free block table are handled
1420 * independently, so the free block table can wind up being
1421 * ahead of the topology. We depend on the bulk free scan
1422 * code to deal with any loose ends.
1424 * vchain and fchain do not error on-lock since their data does
1425 * not have to be re-read from media.
1427 hammer2_chain_ref(&hmp->vchain);
1428 hammer2_chain_lock(&hmp->vchain, HAMMER2_RESOLVE_ALWAYS);
1429 hammer2_chain_ref(&hmp->fchain);
1430 hammer2_chain_lock(&hmp->fchain, HAMMER2_RESOLVE_ALWAYS);
1431 if (hmp->fchain.flags & HAMMER2_CHAIN_FLUSH_MASK) {
1433 * This will also modify vchain as a side effect,
1434 * mark vchain as modified now.
1436 hammer2_voldata_modify(hmp);
1437 chain = &hmp->fchain;
1438 flush_error |= hammer2_flush(chain, HAMMER2_FLUSH_TOP);
1439 KKASSERT(chain == &hmp->fchain);
1441 hammer2_chain_unlock(&hmp->fchain);
1442 hammer2_chain_unlock(&hmp->vchain);
1443 hammer2_chain_drop(&hmp->fchain);
1444 /* vchain dropped down below */
1446 hammer2_chain_lock(&hmp->vchain, HAMMER2_RESOLVE_ALWAYS);
1447 if (hmp->vchain.flags & HAMMER2_CHAIN_FLUSH_MASK) {
1448 chain = &hmp->vchain;
1449 flush_error |= hammer2_flush(chain, HAMMER2_FLUSH_TOP);
1450 KKASSERT(chain == &hmp->vchain);
1452 hammer2_chain_unlock(&hmp->vchain);
1453 hammer2_chain_drop(&hmp->vchain);
1456 * We can't safely flush the volume header until we have
1457 * flushed any device buffers which have built up.
1459 * XXX this isn't being incremental
1461 vn_lock(hmp->devvp, LK_EXCLUSIVE | LK_RETRY);
1462 fsync_error = VOP_FSYNC(hmp->devvp, MNT_WAIT, 0);
1463 vn_unlock(hmp->devvp);
1464 if (fsync_error || flush_error) {
1465 kprintf("hammer2: sync error fsync=%d h2flush=0x%04x dev=%s\n",
1466 fsync_error, flush_error, hmp->devrepname);
1470 * The flush code sets CHAIN_VOLUMESYNC to indicate that the
1471 * volume header needs synchronization via hmp->volsync.
1473 * XXX synchronize the flag & data with only this flush XXX
1475 if (fsync_error == 0 && flush_error == 0 &&
1476 (hmp->vchain.flags & HAMMER2_CHAIN_VOLUMESYNC)) {
1481 * Synchronize the disk before flushing the volume
1485 bp->b_bio1.bio_offset = 0;
1488 bp->b_cmd = BUF_CMD_FLUSH;
1489 bp->b_bio1.bio_done = biodone_sync;
1490 bp->b_bio1.bio_flags |= BIO_SYNC;
1491 vn_strategy(hmp->devvp, &bp->b_bio1);
1492 fsync_error = biowait(&bp->b_bio1, "h2vol");
1496 * Then we can safely flush the version of the
1497 * volume header synchronized by the flush code.
1499 j = hmp->volhdrno + 1;
1502 if (j >= HAMMER2_NUM_VOLHDRS)
1504 if (j * HAMMER2_ZONE_BYTES64 + HAMMER2_SEGSIZE >
1505 hmp->volsync.volu_size) {
1508 if (hammer2_debug & 0x8000) {
1509 /* debug only, avoid syslogd loop */
1510 kprintf("sync volhdr %d %jd\n",
1511 j, (intmax_t)hmp->volsync.volu_size);
1513 bp = getblk(hmp->devvp, j * HAMMER2_ZONE_BYTES64,
1514 HAMMER2_PBUFSIZE, GETBLK_KVABIO, 0);
1515 atomic_clear_int(&hmp->vchain.flags,
1516 HAMMER2_CHAIN_VOLUMESYNC);
1518 bcopy(&hmp->volsync, bp->b_data, HAMMER2_PBUFSIZE);
1519 vol_error = bwrite(bp);
1522 fsync_error = vol_error;
1525 total_error = flush_error;
1527 total_error = hammer2_errno_to_error(fsync_error);
1529 hammer2_trans_done(hmp->spmp); /* spmp trans */
1531 hammer2_xop_feed(&xop->head, NULL, thr->clindex, total_error);