2 * Copyright (c) 2011-2015 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * TRANSACTION AND FLUSH HANDLING
38 * Deceptively simple but actually fairly difficult to implement properly is
39 * how I would describe it.
41 * Flushing generally occurs bottom-up but requires a top-down scan to
42 * locate chains with MODIFIED and/or UPDATE bits set. The ONFLUSH flag
43 * tells how to recurse downward to find these chains.
46 #include <sys/cdefs.h>
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/types.h>
57 #define HAMMER2_FLUSH_DEPTH_LIMIT 10 /* stack recursion limit */
61 * Recursively flush the specified chain. The chain is locked and
62 * referenced by the caller and will remain so on return. The chain
63 * will remain referenced throughout but can temporarily lose its
64 * lock during the recursion to avoid unnecessarily stalling user
67 struct hammer2_flush_info {
68 hammer2_chain_t *parent;
73 struct h2_flush_list flushq;
74 hammer2_chain_t *debug;
77 typedef struct hammer2_flush_info hammer2_flush_info_t;
79 static void hammer2_flush_core(hammer2_flush_info_t *info,
80 hammer2_chain_t *chain, int flags);
81 static int hammer2_flush_recurse(hammer2_chain_t *child, void *data);
84 * Any per-pfs transaction initialization goes here.
87 hammer2_trans_manage_init(hammer2_pfs_t *pmp)
92 * Transaction support for any modifying operation. Transactions are used
93 * in the pmp layer by the frontend and in the spmp layer by the backend.
95 * 0 - Normal transaction, interlocked against flush
98 * TRANS_ISFLUSH - Flush transaction, interlocked against normal
101 * TRANS_BUFCACHE - Buffer cache transaction, no interlock.
103 * Initializing a new transaction allocates a transaction ID. Typically
104 * passed a pmp (hmp passed as NULL), indicating a cluster transaction. Can
105 * be passed a NULL pmp and non-NULL hmp to indicate a transaction on a single
106 * media target. The latter mode is used by the recovery code.
108 * TWO TRANSACTION IDs can run concurrently, where one is a flush and the
109 * other is a set of any number of concurrent filesystem operations. We
110 * can either have <running_fs_ops> + <waiting_flush> + <blocked_fs_ops>
111 * or we can have <running_flush> + <concurrent_fs_ops>.
113 * During a flush, new fs_ops are only blocked until the fs_ops prior to
114 * the flush complete. The new fs_ops can then run concurrent with the flush.
116 * Buffer-cache transactions operate as fs_ops but never block. A
117 * buffer-cache flush will run either before or after the current pending
118 * flush depending on its state.
121 hammer2_trans_init(hammer2_pfs_t *pmp, uint32_t flags)
128 oflags = pmp->trans.flags;
132 if (flags & HAMMER2_TRANS_ISFLUSH) {
134 * Requesting flush transaction. Wait for all
135 * currently running transactions to finish.
137 if (oflags & HAMMER2_TRANS_MASK) {
138 nflags = oflags | HAMMER2_TRANS_FPENDING |
139 HAMMER2_TRANS_WAITING;
142 nflags = (oflags | flags) + 1;
144 } else if (flags & HAMMER2_TRANS_BUFCACHE) {
146 * Requesting strategy transaction. Generally
147 * allowed in all situations unless a flush
148 * is running without the preflush flag.
150 if ((oflags & (HAMMER2_TRANS_ISFLUSH |
151 HAMMER2_TRANS_PREFLUSH)) ==
152 HAMMER2_TRANS_ISFLUSH) {
153 nflags = oflags | HAMMER2_TRANS_WAITING;
156 nflags = (oflags | flags) + 1;
160 * Requesting normal transaction. Wait for any
161 * flush to finish before allowing.
163 if (oflags & HAMMER2_TRANS_ISFLUSH) {
164 nflags = oflags | HAMMER2_TRANS_WAITING;
167 nflags = (oflags | flags) + 1;
171 tsleep_interlock(&pmp->trans.sync_wait, 0);
172 if (atomic_cmpset_int(&pmp->trans.flags, oflags, nflags)) {
175 tsleep(&pmp->trans.sync_wait, PINTERLOCKED,
185 * Start a sub-transaction, there is no 'subdone' function. This will
186 * issue a new modify_tid (mtid) for the current transaction, which is a
187 * CLC (cluster level change) id and not a per-node id.
189 * This function must be called for each XOP when multiple XOPs are run in
190 * sequence within a transaction.
192 * Callers typically update the inode with the transaction mtid manually
193 * to enforce sequencing.
196 hammer2_trans_sub(hammer2_pfs_t *pmp)
200 mtid = atomic_fetchadd_64(&pmp->modify_tid, 1);
206 * Clears the PREFLUSH stage, called during a flush transaction after all
207 * logical buffer I/O has completed.
210 hammer2_trans_clear_preflush(hammer2_pfs_t *pmp)
212 atomic_clear_int(&pmp->trans.flags, HAMMER2_TRANS_PREFLUSH);
216 hammer2_trans_done(hammer2_pfs_t *pmp)
222 oflags = pmp->trans.flags;
224 KKASSERT(oflags & HAMMER2_TRANS_MASK);
225 if ((oflags & HAMMER2_TRANS_MASK) == 1) {
227 * This was the last transaction
229 nflags = (oflags - 1) & ~(HAMMER2_TRANS_ISFLUSH |
230 HAMMER2_TRANS_BUFCACHE |
231 HAMMER2_TRANS_PREFLUSH |
232 HAMMER2_TRANS_FPENDING |
233 HAMMER2_TRANS_WAITING);
236 * Still transactions pending
240 if (atomic_cmpset_int(&pmp->trans.flags, oflags, nflags)) {
241 if ((nflags & HAMMER2_TRANS_MASK) == 0 &&
242 (oflags & HAMMER2_TRANS_WAITING)) {
243 wakeup(&pmp->trans.sync_wait);
254 * Obtain new, unique inode number (not serialized by caller).
257 hammer2_trans_newinum(hammer2_pfs_t *pmp)
261 tid = atomic_fetchadd_64(&pmp->inode_tid, 1);
267 * Assert that a strategy call is ok here. Strategy calls are legal
269 * (1) In a normal transaction.
270 * (2) In a flush transaction only if PREFLUSH is also set.
273 hammer2_trans_assert_strategy(hammer2_pfs_t *pmp)
275 KKASSERT((pmp->trans.flags & HAMMER2_TRANS_ISFLUSH) == 0 ||
276 (pmp->trans.flags & HAMMER2_TRANS_PREFLUSH));
281 * Chains undergoing destruction are removed from the in-memory topology.
282 * To avoid getting lost these chains are placed on the delayed flush
283 * queue which will properly dispose of them.
285 * We do this instead of issuing an immediate flush in order to give
286 * recursive deletions (rm -rf, etc) a chance to remove more of the
287 * hierarchy, potentially allowing an enormous amount of write I/O to
291 hammer2_delayed_flush(hammer2_chain_t *chain)
293 if ((chain->flags & HAMMER2_CHAIN_DELAYED) == 0) {
294 hammer2_spin_ex(&chain->hmp->list_spin);
295 if ((chain->flags & (HAMMER2_CHAIN_DELAYED |
296 HAMMER2_CHAIN_DEFERRED)) == 0) {
297 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DELAYED |
298 HAMMER2_CHAIN_DEFERRED);
299 TAILQ_INSERT_TAIL(&chain->hmp->flushq,
301 hammer2_chain_ref(chain);
303 hammer2_spin_unex(&chain->hmp->list_spin);
304 hammer2_voldata_modify(chain->hmp);
309 * Flush the chain and all modified sub-chains through the specified
310 * synchronization point, propagating blockref updates back up. As
311 * part of this propagation, mirror_tid and inode/data usage statistics
312 * propagates back upward.
314 * modify_tid (clc - cluster level change) is not propagated.
316 * update_tid (clc) is used for validation and is not propagated by this
319 * This routine can be called from several places but the most important
320 * is from VFS_SYNC (frontend) via hammer2_inode_xop_flush (backend).
322 * chain is locked on call and will remain locked on return. The chain's
323 * UPDATE flag indicates that its parent's block table (which is not yet
324 * part of the flush) should be updated. The chain may be replaced by
325 * the call if it was modified.
328 hammer2_flush(hammer2_chain_t *chain, int flags)
330 hammer2_chain_t *scan;
331 hammer2_flush_info_t info;
336 * Execute the recursive flush and handle deferrals.
338 * Chains can be ridiculously long (thousands deep), so to
339 * avoid blowing out the kernel stack the recursive flush has a
340 * depth limit. Elements at the limit are placed on a list
341 * for re-execution after the stack has been popped.
343 bzero(&info, sizeof(info));
344 TAILQ_INIT(&info.flushq);
345 info.cache_index = -1;
346 info.flags = flags & ~HAMMER2_FLUSH_TOP;
349 * Calculate parent (can be NULL), if not NULL the flush core
350 * expects the parent to be referenced so it can easily lock/unlock
351 * it without it getting ripped up.
353 if ((info.parent = chain->parent) != NULL)
354 hammer2_chain_ref(info.parent);
357 * Extra ref needed because flush_core expects it when replacing
360 hammer2_chain_ref(chain);
366 * Move hmp->flushq to info.flushq if non-empty so it can
369 if (TAILQ_FIRST(&hmp->flushq) != NULL) {
370 hammer2_spin_ex(&chain->hmp->list_spin);
371 TAILQ_CONCAT(&info.flushq, &hmp->flushq, flush_node);
372 hammer2_spin_unex(&chain->hmp->list_spin);
376 * Unwind deep recursions which had been deferred. This
377 * can leave the FLUSH_* bits set for these chains, which
378 * will be handled when we [re]flush chain after the unwind.
380 while ((scan = TAILQ_FIRST(&info.flushq)) != NULL) {
381 KKASSERT(scan->flags & HAMMER2_CHAIN_DEFERRED);
382 TAILQ_REMOVE(&info.flushq, scan, flush_node);
383 atomic_clear_int(&scan->flags, HAMMER2_CHAIN_DEFERRED |
384 HAMMER2_CHAIN_DELAYED);
387 * Now that we've popped back up we can do a secondary
388 * recursion on the deferred elements.
390 * NOTE: hammer2_flush() may replace scan.
392 if (hammer2_debug & 0x0040)
393 kprintf("deferred flush %p\n", scan);
394 hammer2_chain_lock(scan, HAMMER2_RESOLVE_MAYBE);
395 hammer2_flush(scan, flags & ~HAMMER2_FLUSH_TOP);
396 hammer2_chain_unlock(scan);
397 hammer2_chain_drop(scan); /* ref from deferral */
403 info.diddeferral = 0;
404 hammer2_flush_core(&info, chain, flags);
407 * Only loop if deep recursions have been deferred.
409 if (TAILQ_EMPTY(&info.flushq))
412 if (++loops % 1000 == 0) {
413 kprintf("hammer2_flush: excessive loops on %p\n",
415 if (hammer2_debug & 0x100000)
419 hammer2_chain_drop(chain);
421 hammer2_chain_drop(info.parent);
425 * This is the core of the chain flushing code. The chain is locked by the
426 * caller and must also have an extra ref on it by the caller, and remains
427 * locked and will have an extra ref on return. Upon return, the caller can
428 * test the UPDATE bit on the child to determine if the parent needs updating.
430 * (1) Determine if this node is a candidate for the flush, return if it is
431 * not. fchain and vchain are always candidates for the flush.
433 * (2) If we recurse too deep the chain is entered onto the deferral list and
434 * the current flush stack is aborted until after the deferral list is
437 * (3) Recursively flush live children (rbtree). This can create deferrals.
438 * A successful flush clears the MODIFIED and UPDATE bits on the children
439 * and typically causes the parent to be marked MODIFIED as the children
440 * update the parent's block table. A parent might already be marked
441 * MODIFIED due to a deletion (whos blocktable update in the parent is
442 * handled by the frontend), or if the parent itself is modified by the
443 * frontend for other reasons.
445 * (4) Permanently disconnected sub-trees are cleaned up by the front-end.
446 * Deleted-but-open inodes can still be individually flushed via the
449 * (5) Note that an unmodified child may still need the block table in its
450 * parent updated (e.g. rename/move). The child will have UPDATE set
453 * WARNING ON BREF MODIFY_TID/MIRROR_TID
455 * blockref.modify_tid is consistent only within a PFS, and will not be
456 * consistent during synchronization. mirror_tid is consistent across the
457 * block device regardless of the PFS.
460 hammer2_flush_core(hammer2_flush_info_t *info, hammer2_chain_t *chain,
463 hammer2_chain_t *parent;
468 * (1) Optimize downward recursion to locate nodes needing action.
469 * Nothing to do if none of these flags are set.
471 if ((chain->flags & HAMMER2_CHAIN_FLUSH_MASK) == 0) {
472 if (hammer2_debug & 0x200) {
473 if (info->debug == NULL)
481 diddeferral = info->diddeferral;
482 parent = info->parent; /* can be NULL */
485 * Downward search recursion
487 if (chain->flags & (HAMMER2_CHAIN_DEFERRED | HAMMER2_CHAIN_DELAYED)) {
492 } else if ((chain->flags & HAMMER2_CHAIN_PFSBOUNDARY) &&
493 (flags & HAMMER2_FLUSH_ALL) == 0 &&
494 (flags & HAMMER2_FLUSH_TOP) == 0) {
496 * We do not recurse through PFSROOTs. PFSROOT flushes are
497 * handled by the related pmp's (whether mounted or not,
498 * including during recovery).
500 * But we must still process the PFSROOT chains for block
501 * table updates in their parent (which IS part of our flush).
503 * Note that the volume root, vchain, does not set this flag.
504 * Note the logic here requires that this test be done before
505 * the depth-limit test, else it might become the top on a
509 } else if (info->depth == HAMMER2_FLUSH_DEPTH_LIMIT) {
511 * Recursion depth reached.
513 KKASSERT((chain->flags & HAMMER2_CHAIN_DELAYED) == 0);
514 hammer2_chain_ref(chain);
515 TAILQ_INSERT_TAIL(&info->flushq, chain, flush_node);
516 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DEFERRED);
518 } else if (chain->flags & HAMMER2_CHAIN_ONFLUSH) {
520 * Downward recursion search (actual flush occurs bottom-up).
521 * pre-clear ONFLUSH. It can get set again due to races,
522 * which we want so the scan finds us again in the next flush.
524 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_ONFLUSH);
525 info->parent = chain;
526 hammer2_spin_ex(&chain->core.spin);
527 RB_SCAN(hammer2_chain_tree, &chain->core.rbtree,
528 NULL, hammer2_flush_recurse, info);
529 hammer2_spin_unex(&chain->core.spin);
530 info->parent = parent;
531 if (info->diddeferral)
532 hammer2_chain_setflush(chain);
536 * Now we are in the bottom-up part of the recursion.
538 * Do not update chain if lower layers were deferred.
540 if (info->diddeferral)
544 * Propagate the DESTROY flag downwards. This dummies up the flush
545 * code and tries to invalidate related buffer cache buffers to
546 * avoid the disk write.
548 if (parent && (parent->flags & HAMMER2_CHAIN_DESTROY))
549 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DESTROY);
552 * Chain was already modified or has become modified, flush it out.
555 if ((hammer2_debug & 0x200) &&
557 (chain->flags & (HAMMER2_CHAIN_MODIFIED | HAMMER2_CHAIN_UPDATE))) {
558 hammer2_chain_t *scan = chain;
560 kprintf("DISCONNECTED FLUSH %p->%p\n", info->debug, chain);
562 kprintf(" chain %p [%08x] bref=%016jx:%02x\n",
564 scan->bref.key, scan->bref.type);
565 if (scan == info->debug)
571 if (chain->flags & HAMMER2_CHAIN_MODIFIED) {
573 * Dispose of the modified bit.
575 * If parent is present, the UPDATE bit should already be set.
576 * UPDATE should already be set.
577 * bref.mirror_tid should already be set.
579 KKASSERT((chain->flags & HAMMER2_CHAIN_UPDATE) ||
580 chain->parent == NULL);
581 if (hammer2_debug & 0x800000) {
584 for (pp = chain; pp->parent; pp = pp->parent)
586 kprintf("FLUSH CHAIN %p (p=%p pp=%p/%d) TYPE %d FLAGS %08x (%s)\n",
587 chain, chain->parent, pp, pp->bref.type,
588 chain->bref.type, chain->flags,
589 (chain->bref.type == 1 ? (const char *)chain->data->ipdata.filename : "?")
594 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_MODIFIED);
595 atomic_add_long(&hammer2_count_modified_chains, -1);
598 * Manage threads waiting for excessive dirty memory to
602 hammer2_pfs_memory_wakeup(chain->pmp);
605 if ((chain->flags & HAMMER2_CHAIN_UPDATE) == 0 &&
606 chain != &hmp->vchain &&
607 chain != &hmp->fchain) {
609 * Set UPDATE bit indicating that the parent block
610 * table requires updating.
612 atomic_set_int(&chain->flags, HAMMER2_CHAIN_UPDATE);
617 * Issue the flush. This is indirect via the DIO.
619 * NOTE: A DELETED node that reaches this point must be
620 * flushed for synchronization point consistency.
622 * NOTE: Even though MODIFIED was already set, the related DIO
623 * might not be dirty due to a system buffer cache
624 * flush and must be set dirty if we are going to make
625 * further modifications to the buffer. Chains with
626 * embedded data don't need this.
628 if (hammer2_debug & 0x1000) {
629 kprintf("Flush %p.%d %016jx/%d data=%016jx",
630 chain, chain->bref.type,
631 (uintmax_t)chain->bref.key,
633 (uintmax_t)chain->bref.data_off);
635 if (hammer2_debug & 0x2000) {
636 Debugger("Flush hell");
640 * Update chain CRCs for flush.
642 * NOTE: Volume headers are NOT flushed here as they require
643 * special processing.
645 switch(chain->bref.type) {
646 case HAMMER2_BREF_TYPE_FREEMAP:
648 * Update the volume header's freemap_tid to the
649 * freemap's flushing mirror_tid.
651 * (note: embedded data, do not call setdirty)
653 KKASSERT(hmp->vchain.flags & HAMMER2_CHAIN_MODIFIED);
654 KKASSERT(chain == &hmp->fchain);
655 hmp->voldata.freemap_tid = chain->bref.mirror_tid;
656 if (hammer2_debug & 0x8000) {
657 /* debug only, avoid syslogd loop */
658 kprintf("sync freemap mirror_tid %08jx\n",
659 (intmax_t)chain->bref.mirror_tid);
663 * The freemap can be flushed independently of the
664 * main topology, but for the case where it is
665 * flushed in the same transaction, and flushed
666 * before vchain (a case we want to allow for
667 * performance reasons), make sure modifications
668 * made during the flush under vchain use a new
671 * Otherwise the mount recovery code will get confused.
673 ++hmp->voldata.mirror_tid;
675 case HAMMER2_BREF_TYPE_VOLUME:
677 * The free block table is flushed by
678 * hammer2_vfs_sync() before it flushes vchain.
679 * We must still hold fchain locked while copying
680 * voldata to volsync, however.
682 * (note: embedded data, do not call setdirty)
684 hammer2_chain_lock(&hmp->fchain,
685 HAMMER2_RESOLVE_ALWAYS);
686 hammer2_voldata_lock(hmp);
687 if (hammer2_debug & 0x8000) {
688 /* debug only, avoid syslogd loop */
689 kprintf("sync volume mirror_tid %08jx\n",
690 (intmax_t)chain->bref.mirror_tid);
694 * Update the volume header's mirror_tid to the
695 * main topology's flushing mirror_tid. It is
696 * possible that voldata.mirror_tid is already
697 * beyond bref.mirror_tid due to the bump we made
698 * above in BREF_TYPE_FREEMAP.
700 if (hmp->voldata.mirror_tid < chain->bref.mirror_tid) {
701 hmp->voldata.mirror_tid =
702 chain->bref.mirror_tid;
706 * The volume header is flushed manually by the
707 * syncer, not here. All we do here is adjust the
710 KKASSERT(chain->data != NULL);
711 KKASSERT(chain->dio == NULL);
713 hmp->voldata.icrc_sects[HAMMER2_VOL_ICRC_SECT1]=
715 (char *)&hmp->voldata +
716 HAMMER2_VOLUME_ICRC1_OFF,
717 HAMMER2_VOLUME_ICRC1_SIZE);
718 hmp->voldata.icrc_sects[HAMMER2_VOL_ICRC_SECT0]=
720 (char *)&hmp->voldata +
721 HAMMER2_VOLUME_ICRC0_OFF,
722 HAMMER2_VOLUME_ICRC0_SIZE);
723 hmp->voldata.icrc_volheader =
725 (char *)&hmp->voldata +
726 HAMMER2_VOLUME_ICRCVH_OFF,
727 HAMMER2_VOLUME_ICRCVH_SIZE);
729 if (hammer2_debug & 0x8000) {
730 /* debug only, avoid syslogd loop */
731 kprintf("syncvolhdr %016jx %016jx\n",
732 hmp->voldata.mirror_tid,
733 hmp->vchain.bref.mirror_tid);
735 hmp->volsync = hmp->voldata;
736 atomic_set_int(&chain->flags, HAMMER2_CHAIN_VOLUMESYNC);
737 hammer2_voldata_unlock(hmp);
738 hammer2_chain_unlock(&hmp->fchain);
740 case HAMMER2_BREF_TYPE_DATA:
742 * Data elements have already been flushed via the
743 * logical file buffer cache. Their hash was set in
744 * the bref by the vop_write code. Do not re-dirty.
746 * Make sure any device buffer(s) have been flushed
747 * out here (there aren't usually any to flush) XXX.
750 case HAMMER2_BREF_TYPE_INDIRECT:
751 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
752 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
754 * Buffer I/O will be cleaned up when the volume is
755 * flushed (but the kernel is free to flush it before
758 KKASSERT((chain->flags & HAMMER2_CHAIN_EMBEDDED) == 0);
759 hammer2_chain_setcheck(chain, chain->data);
761 case HAMMER2_BREF_TYPE_INODE:
763 * NOTE: We must call io_setdirty() to make any late
764 * changes to the inode data, the system might
765 * have already flushed the buffer.
767 if (chain->data->ipdata.meta.op_flags &
768 HAMMER2_OPFLAG_PFSROOT) {
770 * non-NULL pmp if mounted as a PFS. We must
771 * sync fields cached in the pmp? XXX
773 hammer2_inode_data_t *ipdata;
775 hammer2_io_setdirty(chain->dio);
776 ipdata = &chain->data->ipdata;
778 ipdata->meta.pfs_inum =
779 chain->pmp->inode_tid;
782 /* can't be mounted as a PFS */
785 KKASSERT((chain->flags & HAMMER2_CHAIN_EMBEDDED) == 0);
786 hammer2_chain_setcheck(chain, chain->data);
789 KKASSERT(chain->flags & HAMMER2_CHAIN_EMBEDDED);
790 panic("hammer2_flush_core: unsupported "
797 * If the chain was destroyed try to avoid unnecessary I/O.
798 * (this only really works if the DIO system buffer is the
799 * same size as chain->bytes).
801 if ((chain->flags & HAMMER2_CHAIN_DESTROY) &&
802 (chain->flags & HAMMER2_CHAIN_DEDUP) == 0 &&
804 hammer2_io_setinval(chain->dio, chain->bytes);
809 * If UPDATE is set the parent block table may need to be updated.
811 * NOTE: UPDATE may be set on vchain or fchain in which case
812 * parent could be NULL. It's easiest to allow the case
813 * and test for NULL. parent can also wind up being NULL
814 * due to a deletion so we need to handle the case anyway.
816 * If no parent exists we can just clear the UPDATE bit. If the
817 * chain gets reattached later on the bit will simply get set
820 if ((chain->flags & HAMMER2_CHAIN_UPDATE) && parent == NULL)
821 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_UPDATE);
824 * The chain may need its blockrefs updated in the parent. This
825 * requires some fancy footwork.
827 if (chain->flags & HAMMER2_CHAIN_UPDATE) {
828 hammer2_blockref_t *base;
832 * Both parent and chain must be locked. This requires
833 * temporarily unlocking the chain. We have to deal with
834 * the case where the chain might be reparented or modified
835 * while it was unlocked.
837 hammer2_chain_unlock(chain);
838 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS);
839 hammer2_chain_lock(chain, HAMMER2_RESOLVE_MAYBE);
840 if (chain->parent != parent) {
841 kprintf("PARENT MISMATCH ch=%p p=%p/%p\n",
842 chain, chain->parent, parent);
843 hammer2_chain_unlock(parent);
848 * Check race condition. If someone got in and modified
849 * it again while it was unlocked, we have to loop up.
851 if (chain->flags & HAMMER2_CHAIN_MODIFIED) {
852 hammer2_chain_unlock(parent);
853 kprintf("hammer2_flush: chain %p flush-mod race\n",
859 * Clear UPDATE flag, mark parent modified, update its
860 * modify_tid if necessary, and adjust the parent blockmap.
862 if (chain->flags & HAMMER2_CHAIN_UPDATE)
863 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_UPDATE);
868 * Avoid actually modifying and updating the parent if it
869 * was flagged for destruction. This can greatly reduce
870 * disk I/O in large tree removals because the
871 * hammer2_io_setinval() call in the upward recursion
872 * (see MODIFIED code above) can only handle a few cases.
874 if (parent->flags & HAMMER2_CHAIN_DESTROY) {
875 if (parent->bref.modify_tid < chain->bref.modify_tid) {
876 parent->bref.modify_tid =
877 chain->bref.modify_tid;
879 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_BMAPPED |
880 HAMMER2_CHAIN_BMAPUPD);
881 hammer2_chain_unlock(parent);
886 * We are updating the parent's blockmap, the parent must
889 hammer2_chain_modify(parent, 0, 0, 0);
890 if (parent->bref.modify_tid < chain->bref.modify_tid)
891 parent->bref.modify_tid = chain->bref.modify_tid;
894 * Calculate blockmap pointer
896 switch(parent->bref.type) {
897 case HAMMER2_BREF_TYPE_INODE:
899 * Access the inode's block array. However, there is
900 * no block array if the inode is flagged DIRECTDATA.
903 (parent->data->ipdata.meta.op_flags &
904 HAMMER2_OPFLAG_DIRECTDATA) == 0) {
905 base = &parent->data->
906 ipdata.u.blockset.blockref[0];
910 count = HAMMER2_SET_COUNT;
912 case HAMMER2_BREF_TYPE_INDIRECT:
913 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
915 base = &parent->data->npdata[0];
918 count = parent->bytes / sizeof(hammer2_blockref_t);
920 case HAMMER2_BREF_TYPE_VOLUME:
921 base = &chain->hmp->voldata.sroot_blockset.blockref[0];
922 count = HAMMER2_SET_COUNT;
924 case HAMMER2_BREF_TYPE_FREEMAP:
925 base = &parent->data->npdata[0];
926 count = HAMMER2_SET_COUNT;
931 panic("hammer2_flush_core: "
932 "unrecognized blockref type: %d",
939 * We synchronize pending statistics at this time. Delta
940 * adjustments designated for the current and upper level
943 if (base && (chain->flags & HAMMER2_CHAIN_BMAPUPD)) {
944 if (chain->flags & HAMMER2_CHAIN_BMAPPED) {
945 hammer2_spin_ex(&parent->core.spin);
946 hammer2_base_delete(parent, base, count,
947 &info->cache_index, chain);
948 hammer2_spin_unex(&parent->core.spin);
949 /* base_delete clears both bits */
951 atomic_clear_int(&chain->flags,
952 HAMMER2_CHAIN_BMAPUPD);
955 if (base && (chain->flags & HAMMER2_CHAIN_BMAPPED) == 0) {
956 hammer2_spin_ex(&parent->core.spin);
957 hammer2_base_insert(parent, base, count,
958 &info->cache_index, chain);
959 hammer2_spin_unex(&parent->core.spin);
960 /* base_insert sets BMAPPED */
962 hammer2_chain_unlock(parent);
968 * Final cleanup after flush
971 KKASSERT(chain->refs > 0);
972 if (hammer2_debug & 0x200) {
973 if (info->debug == chain)
979 * Flush recursion helper, called from flush_core, calls flush_core.
981 * Flushes the children of the caller's chain (info->parent), restricted
982 * by sync_tid. Set info->domodify if the child's blockref must propagate
983 * back up to the parent.
985 * Ripouts can move child from rbtree to dbtree or dbq but the caller's
986 * flush scan order prevents any chains from being lost. A child can be
987 * executes more than once.
989 * WARNING! If we do not call hammer2_flush_core() we must update
990 * bref.mirror_tid ourselves to indicate that the flush has
991 * processed the child.
993 * WARNING! parent->core spinlock is held on entry and return.
996 hammer2_flush_recurse(hammer2_chain_t *child, void *data)
998 hammer2_flush_info_t *info = data;
999 hammer2_chain_t *parent = info->parent;
1002 * (child can never be fchain or vchain so a special check isn't
1005 * We must ref the child before unlocking the spinlock.
1007 * The caller has added a ref to the parent so we can temporarily
1008 * unlock it in order to lock the child.
1010 hammer2_chain_ref(child);
1011 hammer2_spin_unex(&parent->core.spin);
1013 hammer2_chain_unlock(parent);
1014 hammer2_chain_lock(child, HAMMER2_RESOLVE_MAYBE);
1017 * Recurse and collect deferral data. We're in the media flush,
1018 * this can cross PFS boundaries.
1020 if (child->flags & HAMMER2_CHAIN_FLUSH_MASK) {
1022 hammer2_flush_core(info, child, info->flags);
1024 } else if (hammer2_debug & 0x200) {
1025 if (info->debug == NULL)
1026 info->debug = child;
1028 hammer2_flush_core(info, child, info->flags);
1030 if (info->debug == child)
1035 * Relock to continue the loop
1037 hammer2_chain_unlock(child);
1038 hammer2_chain_lock(parent, HAMMER2_RESOLVE_MAYBE);
1039 hammer2_chain_drop(child);
1040 KKASSERT(info->parent == parent);
1041 hammer2_spin_ex(&parent->core.spin);
1047 * flush helper (direct)
1049 * Quickly flushes any dirty chains for a device. This will update our
1050 * concept of the volume root but does NOT flush the actual volume root
1051 * and does not flush dirty device buffers.
1053 * This function is primarily used by the bulkfree code to allow it to
1054 * create a snapshot for the pass. It doesn't care about any pending
1055 * work (dirty vnodes, dirty inodes, dirty logical buffers) for which blocks
1056 * have not yet been allocated.
1059 hammer2_flush_quick(hammer2_dev_t *hmp)
1061 hammer2_chain_t *chain;
1063 hammer2_trans_init(hmp->spmp, HAMMER2_TRANS_ISFLUSH);
1065 hammer2_chain_ref(&hmp->vchain);
1066 hammer2_chain_lock(&hmp->vchain, HAMMER2_RESOLVE_ALWAYS);
1067 if (hmp->vchain.flags & HAMMER2_CHAIN_FLUSH_MASK) {
1068 chain = &hmp->vchain;
1069 hammer2_flush(chain, HAMMER2_FLUSH_TOP |
1071 KKASSERT(chain == &hmp->vchain);
1073 hammer2_chain_unlock(&hmp->vchain);
1074 hammer2_chain_drop(&hmp->vchain);
1076 hammer2_trans_done(hmp->spmp); /* spmp trans */
1080 * flush helper (backend threaded)
1082 * Flushes core chains, issues disk sync, flushes volume roots.
1084 * Primarily called from vfs_sync().
1087 hammer2_inode_xop_flush(hammer2_xop_t *arg, int clindex)
1089 hammer2_xop_flush_t *xop = &arg->xop_flush;
1090 hammer2_chain_t *chain;
1091 hammer2_chain_t *parent;
1094 int total_error = 0;
1100 chain = hammer2_inode_chain(xop->head.ip1, clindex,
1101 HAMMER2_RESOLVE_ALWAYS);
1104 if ((chain->flags & HAMMER2_CHAIN_FLUSH_MASK) ||
1105 TAILQ_FIRST(&hmp->flushq) != NULL) {
1106 hammer2_flush(chain, HAMMER2_FLUSH_TOP);
1107 parent = chain->parent;
1108 KKASSERT(chain->pmp != parent->pmp);
1109 hammer2_chain_setflush(parent);
1111 hammer2_chain_unlock(chain);
1112 hammer2_chain_drop(chain);
1119 * Flush volume roots. Avoid replication, we only want to
1120 * flush each hammer2_dev (hmp) once.
1122 for (j = clindex - 1; j >= 0; --j) {
1123 if ((chain = xop->head.ip1->cluster.array[j].chain) != NULL) {
1124 if (chain->hmp == hmp) {
1125 chain = NULL; /* safety */
1130 chain = NULL; /* safety */
1133 * spmp transaction. The super-root is never directly mounted so
1134 * there shouldn't be any vnodes, let alone any dirty vnodes
1135 * associated with it, so we shouldn't have to mess around with any
1136 * vnode flushes here.
1138 hammer2_trans_init(hmp->spmp, HAMMER2_TRANS_ISFLUSH);
1141 * Media mounts have two 'roots', vchain for the topology
1142 * and fchain for the free block table. Flush both.
1144 * Note that the topology and free block table are handled
1145 * independently, so the free block table can wind up being
1146 * ahead of the topology. We depend on the bulk free scan
1147 * code to deal with any loose ends.
1149 hammer2_chain_ref(&hmp->vchain);
1150 hammer2_chain_lock(&hmp->vchain, HAMMER2_RESOLVE_ALWAYS);
1151 hammer2_chain_ref(&hmp->fchain);
1152 hammer2_chain_lock(&hmp->fchain, HAMMER2_RESOLVE_ALWAYS);
1153 if (hmp->fchain.flags & HAMMER2_CHAIN_FLUSH_MASK) {
1155 * This will also modify vchain as a side effect,
1156 * mark vchain as modified now.
1158 hammer2_voldata_modify(hmp);
1159 chain = &hmp->fchain;
1160 hammer2_flush(chain, HAMMER2_FLUSH_TOP);
1161 KKASSERT(chain == &hmp->fchain);
1163 hammer2_chain_unlock(&hmp->fchain);
1164 hammer2_chain_unlock(&hmp->vchain);
1165 hammer2_chain_drop(&hmp->fchain);
1166 /* vchain dropped down below */
1168 hammer2_chain_lock(&hmp->vchain, HAMMER2_RESOLVE_ALWAYS);
1169 if (hmp->vchain.flags & HAMMER2_CHAIN_FLUSH_MASK) {
1170 chain = &hmp->vchain;
1171 hammer2_flush(chain, HAMMER2_FLUSH_TOP);
1172 KKASSERT(chain == &hmp->vchain);
1174 hammer2_chain_unlock(&hmp->vchain);
1175 hammer2_chain_drop(&hmp->vchain);
1180 * We can't safely flush the volume header until we have
1181 * flushed any device buffers which have built up.
1183 * XXX this isn't being incremental
1185 vn_lock(hmp->devvp, LK_EXCLUSIVE | LK_RETRY);
1186 error = VOP_FSYNC(hmp->devvp, MNT_WAIT, 0);
1187 vn_unlock(hmp->devvp);
1190 * The flush code sets CHAIN_VOLUMESYNC to indicate that the
1191 * volume header needs synchronization via hmp->volsync.
1193 * XXX synchronize the flag & data with only this flush XXX
1196 (hmp->vchain.flags & HAMMER2_CHAIN_VOLUMESYNC)) {
1200 * Synchronize the disk before flushing the volume
1204 bp->b_bio1.bio_offset = 0;
1207 bp->b_cmd = BUF_CMD_FLUSH;
1208 bp->b_bio1.bio_done = biodone_sync;
1209 bp->b_bio1.bio_flags |= BIO_SYNC;
1210 vn_strategy(hmp->devvp, &bp->b_bio1);
1211 biowait(&bp->b_bio1, "h2vol");
1215 * Then we can safely flush the version of the
1216 * volume header synchronized by the flush code.
1218 j = hmp->volhdrno + 1;
1219 if (j >= HAMMER2_NUM_VOLHDRS)
1221 if (j * HAMMER2_ZONE_BYTES64 + HAMMER2_SEGSIZE >
1222 hmp->volsync.volu_size) {
1225 if (hammer2_debug & 0x8000) {
1226 /* debug only, avoid syslogd loop */
1227 kprintf("sync volhdr %d %jd\n",
1228 j, (intmax_t)hmp->volsync.volu_size);
1230 bp = getblk(hmp->devvp, j * HAMMER2_ZONE_BYTES64,
1231 HAMMER2_PBUFSIZE, 0, 0);
1232 atomic_clear_int(&hmp->vchain.flags,
1233 HAMMER2_CHAIN_VOLUMESYNC);
1234 bcopy(&hmp->volsync, bp->b_data, HAMMER2_PBUFSIZE);
1239 total_error = error;
1241 hammer2_trans_done(hmp->spmp); /* spmp trans */
1243 error = hammer2_xop_feed(&xop->head, NULL, clindex, total_error);