2 * Copyright (c) 2011-2013 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 #include <sys/cdefs.h>
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/types.h>
46 * Recursively flush the specified chain. The chain is locked and
47 * referenced by the caller and will remain so on return. The chain
48 * will remain referenced throughout but can temporarily lose its
49 * lock during the recursion to avoid unnecessarily stalling user
52 struct hammer2_flush_info {
53 hammer2_chain_t *parent;
54 hammer2_trans_t *trans;
59 struct h2_flush_deferral_list flush_list;
60 hammer2_tid_t sync_tid; /* flush synchronization point */
61 hammer2_tid_t mirror_tid; /* collect mirror TID updates */
64 typedef struct hammer2_flush_info hammer2_flush_info_t;
66 static void hammer2_chain_flush_core(hammer2_flush_info_t *info,
67 hammer2_chain_t **chainp);
68 static int hammer2_chain_flush_scan1(hammer2_chain_t *child, void *data);
69 static int hammer2_chain_flush_scan2(hammer2_chain_t *child, void *data);
70 static void hammer2_rollup_stats(hammer2_chain_t *parent,
71 hammer2_chain_t *child, int how);
76 hammer2_updatestats(hammer2_flush_info_t *info, hammer2_blockref_t *bref,
81 if (bref->type != 0) {
82 bytes = 1 << (bref->data_off & HAMMER2_OFF_MASK_RADIX);
83 if (bref->type == HAMMER2_BREF_TYPE_INODE)
84 info->inode_count += how;
86 info->data_count -= bytes;
88 info->data_count += bytes;
94 * Transaction support functions for writing to the filesystem.
96 * Initializing a new transaction allocates a transaction ID. We
97 * don't bother marking the volume header MODIFIED. Instead, the volume
98 * will be synchronized at a later time as part of a larger flush sequence.
100 * Non-flush transactions can typically run concurrently. However if
101 * there are non-flush transaction both before AND after a flush trans,
102 * the transactions after stall until the ones before finish.
104 * Non-flush transactions occuring after a flush pointer can run concurrently
105 * with that flush. They only have to wait for transactions prior to the
106 * flush trans to complete before they unstall.
108 * WARNING! Transaction ids are only allocated when the transaction becomes
109 * active, which allows other transactions to insert ahead of us
110 * if we are forced to block (only bioq transactions do that).
112 * WARNING! Modifications to the root volume cannot dup the root volume
113 * header to handle synchronization points, so alloc_tid can
114 * wind up (harmlessly) more advanced on flush.
117 hammer2_trans_init(hammer2_trans_t *trans, hammer2_pfsmount_t *pmp, int flags)
119 hammer2_mount_t *hmp;
120 hammer2_trans_t *scan;
122 bzero(trans, sizeof(*trans));
124 hmp = pmp->cluster.chains[0]->hmp; /* XXX */
126 hammer2_voldata_lock(hmp);
127 trans->flags = flags;
128 trans->td = curthread;
129 /*trans->delete_gen = 0;*/ /* multiple deletions within trans */
131 if (flags & HAMMER2_TRANS_ISFLUSH) {
133 * If multiple flushes are trying to run we have to
134 * wait until it is our turn, then set curflush to
135 * indicate that a flush is now pending (but not
136 * necessarily active yet).
138 * NOTE: Do not set trans->blocked here.
141 while (hmp->curflush != NULL) {
142 lksleep(&hmp->curflush, &hmp->voldatalk,
145 hmp->curflush = trans;
146 TAILQ_INSERT_TAIL(&hmp->transq, trans, entry);
149 * If we are a flush we have to wait for all transactions
150 * prior to our flush synchronization point to complete
151 * before we can start our flush.
153 * Most importantly, this includes bioq flushes.
155 * NOTE: Do not set trans->blocked here.
157 while (TAILQ_FIRST(&hmp->transq) != trans) {
158 lksleep(&trans->sync_tid, &hmp->voldatalk,
163 * don't assign sync_tid until we become the running
164 * flush. last_flush_tid and topo_flush_tid eare used
165 * to determine when a copy-on-write (aka delete-duplicate)
168 trans->sync_tid = hmp->voldata.alloc_tid;
169 hmp->voldata.alloc_tid += 2;
170 hmp->topo_flush_tid = trans->sync_tid;
173 * Once we become the running flush we can wakeup anyone
174 * who blocked on us, up to the next flush. That is,
175 * our flush can run concurrent with frontend operations.
178 while ((scan = TAILQ_NEXT(scan, entry)) != NULL) {
179 if (scan->flags & HAMMER2_TRANS_ISFLUSH)
181 if (scan->blocked == 0)
184 wakeup(&scan->blocked);
186 } else if ((flags & HAMMER2_TRANS_BUFCACHE) && hmp->curflush) {
188 * We cannot block if we are the bioq thread.
190 * When possible we steal the flush's TID and flush buffers
191 * as part of the larger filesystem flush. The flush will
192 * interlock against buffer cache transactions when INVFSYNC
195 * NOTE: Transactions are not ordered by sync_tid on the
196 * transq. Append to avoid confusion. Other waiting
197 * flushes will have not added themselves to transq
200 TAILQ_INSERT_TAIL(&hmp->transq, trans, entry);
201 if ((scan = hmp->curflush) != NULL) {
202 if (scan->flags & HAMMER2_TRANS_INVFSYNC) {
203 trans->sync_tid = scan->sync_tid;
205 trans->sync_tid = hmp->voldata.alloc_tid++;
208 trans->sync_tid = hmp->voldata.alloc_tid++;
212 * If this is a normal transaction and not a flush, or
213 * if this is a bioq transaction and no flush is pending,
214 * we can queue normally.
216 * Normal transactions must block while a pending flush is
217 * waiting for prior transactions to complete. Once the
218 * pending flush becomes active we can run concurrently
221 TAILQ_INSERT_TAIL(&hmp->transq, trans, entry);
222 scan = TAILQ_FIRST(&hmp->transq);
223 if (hmp->curflush && hmp->curflush != scan) {
225 while (trans->blocked) {
226 lksleep(&trans->blocked, &hmp->voldatalk,
230 trans->sync_tid = hmp->voldata.alloc_tid++;
232 hammer2_voldata_unlock(hmp, 0);
236 * Clear the flag that allowed buffer cache flushes to steal the
237 * main flush's transaction id and wait for any in-progress BC flushes
241 hammer2_trans_clear_invfsync(hammer2_trans_t *trans)
243 hammer2_mount_t *hmp = trans->pmp->cluster.chains[0]->hmp;
245 hammer2_bioq_sync(trans->pmp);
246 atomic_clear_int(&trans->flags, HAMMER2_TRANS_INVFSYNC);
247 if (TAILQ_FIRST(&hmp->transq) != trans) {
248 hammer2_voldata_lock(hmp);
249 while (TAILQ_FIRST(&hmp->transq) != trans) {
250 tsleep(&trans->sync_tid, 0, "h2flbw", 0);
252 hammer2_voldata_unlock(hmp, 0);
254 hammer2_bioq_sync(trans->pmp);
256 hmp->topo_flush_tid = trans->sync_tid;
260 hammer2_trans_done(hammer2_trans_t *trans)
262 hammer2_mount_t *hmp;
263 hammer2_trans_t *scan;
266 hmp = trans->pmp->cluster.chains[0]->hmp;
268 hammer2_voldata_lock(hmp);
269 wasathead = (TAILQ_FIRST(&hmp->transq) == trans);
270 TAILQ_REMOVE(&hmp->transq, trans, entry);
272 if (trans->flags & HAMMER2_TRANS_ISFLUSH) {
276 * If we were a flush then wakeup anyone waiting on
277 * curflush (i.e. other flushes that want to run).
279 hmp->curflush = NULL;
280 wakeup(&hmp->curflush);
283 * Cycle the flush_tid.
285 hmp->curflush = NULL;
287 hmp->last_flush_tid = hmp->topo_flush_tid;
288 hmp->topo_flush_tid = HAMMER2_MAX_TID;
291 * If we are not a flush but a flush is now at the head
292 * of the queue and we were previously blocking it,
293 * we can now unblock it.
295 * Special case where sync_tid == scan->sync_tid occurs
296 * when buffer flush is issued while a normal flush is
297 * running (and in the correct stager), which is typically
298 * semi-synchronous but not always.
301 (scan = TAILQ_FIRST(&hmp->transq)) != NULL &&
303 (scan->flags & HAMMER2_TRANS_ISFLUSH)) {
304 wakeup(&scan->sync_tid);
307 hammer2_voldata_unlock(hmp, 0);
311 * Flush the chain and all modified sub-chains through the specified
312 * synchronization point (sync_tid), propagating parent chain modifications
313 * and mirror_tid updates back up as needed. Since we are recursing downward
314 * we do not have to deal with the complexities of multi-homed chains (chains
315 * with multiple parents).
317 * Caller must have interlocked against any non-flush-related modifying
318 * operations in progress whos modify_tid values are less than or equal
319 * to the passed sync_tid.
321 * Caller must have already vetted synchronization points to ensure they
322 * are properly flushed. Only snapshots and cluster flushes can create
323 * these sorts of synchronization points.
325 * This routine can be called from several places but the most important
326 * is from the hammer2_vop_reclaim() function. We want to try to completely
327 * clean out the inode structure to prevent disconnected inodes from
328 * building up and blowing out the kmalloc pool. However, it is not actually
329 * necessary to flush reclaimed inodes to maintain HAMMER2's crash recovery
332 * chain is locked on call and will remain locked on return. If a flush
333 * occured, the chain's MOVED bit will be set indicating that its parent
334 * (which is not part of the flush) should be updated.
337 hammer2_chain_flush(hammer2_trans_t *trans, hammer2_chain_t **chainp)
339 hammer2_chain_t *chain = *chainp;
340 hammer2_chain_t *scan;
341 hammer2_chain_core_t *core;
342 hammer2_flush_info_t info;
345 * Execute the recursive flush and handle deferrals.
347 * Chains can be ridiculously long (thousands deep), so to
348 * avoid blowing out the kernel stack the recursive flush has a
349 * depth limit. Elements at the limit are placed on a list
350 * for re-execution after the stack has been popped.
352 bzero(&info, sizeof(info));
353 TAILQ_INIT(&info.flush_list);
355 info.sync_tid = trans->sync_tid;
357 info.cache_index = -1;
362 * Extra ref needed because flush_core expects it when replacing
365 hammer2_chain_ref(chain);
369 * Unwind deep recursions which had been deferred. This
370 * can leave MOVED set for these chains, which will be
371 * handled when we [re]flush chain after the unwind.
373 while ((scan = TAILQ_FIRST(&info.flush_list)) != NULL) {
374 KKASSERT(scan->flags & HAMMER2_CHAIN_DEFERRED);
375 TAILQ_REMOVE(&info.flush_list, scan, flush_node);
376 atomic_clear_int(&scan->flags, HAMMER2_CHAIN_DEFERRED);
379 * Now that we've popped back up we can do a secondary
380 * recursion on the deferred elements.
382 if (hammer2_debug & 0x0040)
383 kprintf("defered flush %p\n", scan);
384 hammer2_chain_lock(scan, HAMMER2_RESOLVE_MAYBE);
385 hammer2_chain_flush(trans, &scan);
386 hammer2_chain_unlock(scan);
387 hammer2_chain_drop(scan); /* ref from deferral */
391 * Flush pass1 on root.
393 info.diddeferral = 0;
394 hammer2_chain_flush_core(&info, &chain);
396 kprintf("flush_core_done parent=<base> chain=%p.%d %08x\n",
397 chain, chain->bref.type, chain->flags);
401 * Only loop if deep recursions have been deferred.
403 if (TAILQ_EMPTY(&info.flush_list))
406 hammer2_chain_drop(chain);
411 * This is the core of the chain flushing code. The chain is locked by the
412 * caller and must also have an extra ref on it by the caller, and remains
413 * locked and will have an extra ref on return.
415 * This function is keyed off of the update_tid bit but must make
416 * fine-grained choices based on the synchronization point we are flushing to.
418 * If the flush accomplished any work chain will be flagged MOVED
419 * indicating a copy-on-write propagation back up is required.
420 * Deep sub-nodes may also have been entered onto the deferral list.
421 * MOVED is never set on the volume root.
423 * NOTE: modify_tid is different from MODIFIED. modify_tid is updated
424 * only when a chain is specifically modified, and not updated
425 * for copy-on-write propagations. MODIFIED is set on any modification
426 * including copy-on-write propagations.
429 hammer2_chain_flush_core(hammer2_flush_info_t *info, hammer2_chain_t **chainp)
431 hammer2_chain_t *chain = *chainp;
432 hammer2_mount_t *hmp;
433 hammer2_blockref_t *bref;
437 hammer2_trans_t *trans = info->trans;
439 hammer2_chain_core_t *core;
452 kprintf("flush_core %p->%p.%d %08x (%s)\n",
453 info->parent, chain, chain->bref.type,
455 ((chain->bref.type == HAMMER2_BREF_TYPE_INODE) ?
456 chain->data->ipdata.filename : "?"));
458 kprintf("flush_core NULL->%p.%d %08x (%s)\n",
459 chain, chain->bref.type,
461 ((chain->bref.type == HAMMER2_BREF_TYPE_INODE) ?
462 chain->data->ipdata.filename : "?"));
465 * Ignore chains modified beyond the current flush point. These
466 * will be treated as if they did not exist.
468 if (chain->modify_tid > info->sync_tid)
474 kprintf("PUSH %p.%d %08x mirror=%016jx\n", chain, chain->bref.type, chain->flags, chain->bref.mirror_tid);
478 * If update_tid triggers we recurse the flush and adjust the
479 * blockrefs accordingly.
481 * NOTE: Looping on update_tid can prevent a flush from ever
482 * finishing in the face of filesystem activity.
484 * NOTE: We must recurse whether chain is flagged DELETED or not.
485 * However, if it is flagged DELETED we limit sync_tid to
486 * delete_tid to ensure that the chain's bref.mirror_tid is
487 * not fully updated and causes it to miss the non-DELETED
490 if (chain->bref.mirror_tid < core->update_tid) {
491 hammer2_chain_t *saved_parent;
492 hammer2_tid_t saved_mirror;
493 hammer2_chain_layer_t *layer;
496 * Races will bump update_tid above trans->sync_tid causing
497 * us to catch the issue in a later flush. We do not update
498 * update_tid if a deferral (or error XXX) occurs.
500 * We don't want to set our chain to MODIFIED gratuitously.
502 * We need an extra ref on chain because we are going to
503 * release its lock temporarily in our child loop.
507 * Run two passes. The first pass handles MODIFIED and
508 * update_tid recursions while the second pass handles
509 * MOVED chains on the way back up.
511 * If the stack gets too deep we defer scan1, but must
512 * be sure to still run scan2 if on the next loop the
513 * deferred chain has been flushed and now needs MOVED
514 * handling on the way back up.
516 * Scan1 is recursive.
518 * NOTE: The act of handling a modified/submodified chain can
519 * cause the MOVED Flag to be set. It can also be set
520 * via hammer2_chain_delete() and in other situations.
522 * NOTE: RB_SCAN() must be used instead of RB_FOREACH()
523 * because children can be physically removed during
526 saved_parent = info->parent;
527 saved_mirror = info->mirror_tid;
528 info->parent = chain;
529 info->mirror_tid = chain->bref.mirror_tid;
531 if (info->depth == HAMMER2_FLUSH_DEPTH_LIMIT) {
532 if ((chain->flags & HAMMER2_CHAIN_DEFERRED) == 0) {
533 hammer2_chain_ref(chain);
534 TAILQ_INSERT_TAIL(&info->flush_list,
536 atomic_set_int(&chain->flags,
537 HAMMER2_CHAIN_DEFERRED);
541 info->diddeferral = 0;
542 spin_lock(&core->cst.spin);
543 KKASSERT(core->good == 0x1234 && core->sharecnt > 0);
544 TAILQ_FOREACH_REVERSE(layer, &core->layerq,
545 h2_layer_list, entry) {
547 KKASSERT(layer->good == 0xABCD);
548 RB_SCAN(hammer2_chain_tree, &layer->rbtree,
549 NULL, hammer2_chain_flush_scan1, info);
551 diddeferral += info->diddeferral;
553 spin_unlock(&core->cst.spin);
556 KKASSERT(info->parent == chain);
559 * Handle successfully flushed children who are in the MOVED
560 * state on the way back up the recursion. This can have
561 * the side-effect of clearing MOVED.
563 * Scan2 may replace info->parent. If it does it will also
564 * replace the extra ref we made.
566 * Scan2 is non-recursive.
569 spin_lock(&core->cst.spin);
571 spin_lock(&core->cst.spin);
572 KKASSERT(core->good == 0x1234 && core->sharecnt > 0);
573 TAILQ_FOREACH_REVERSE(layer, &core->layerq,
574 h2_layer_list, entry) {
577 KKASSERT(layer->good == 0xABCD);
578 RB_SCAN(hammer2_chain_tree, &layer->rbtree,
579 NULL, hammer2_chain_flush_scan2, info);
581 RB_SCAN(hammer2_chain_tree, &layer->rbtree,
582 NULL, hammer2_chain_flush_scan2, info);
584 KKASSERT(info->parent->core == core);
588 * Mirror_tid propagates all changes. It is also used
589 * in scan2 to determine when a chain must be applied
590 * to the related block table.
593 kprintf("chainA %p.%d set parent bref mirror_tid %016jx -> %016jx\n",
594 info->parent, info->parent->bref.type,
595 info->mirror_tid, info->parent->bref.mirror_tid);
597 KKASSERT(info->parent->bref.mirror_tid <=
599 info->parent->bref.mirror_tid = info->mirror_tid;
603 * chain may have been replaced.
606 if (info->parent != *chainp)
607 kprintf("SWITCH PARENT %p->%p\n",
608 *chainp, info->parent);
610 chain = info->parent;
613 hammer2_chain_layer_check_locked(chain->hmp, core);
614 spin_unlock(&core->cst.spin);
616 info->mirror_tid = saved_mirror;
617 info->parent = saved_parent;
618 KKASSERT(chain->refs > 1);
622 kprintf("POP %p.%d\n", chain, chain->bref.type);
626 * Rollup diddeferral for caller. Note direct assignment, not +=.
628 info->diddeferral = diddeferral;
631 * Do not flush chain if there were any deferrals. It will be
632 * retried later after the deferrals are independently handled.
635 if (hammer2_debug & 0x0008) {
636 kprintf("%*.*s} %p/%d %04x (deferred)",
637 info->depth, info->depth, "",
638 chain, chain->refs, chain->flags);
644 * If we encounter a deleted chain within our flush we can clear
645 * the MODIFIED bit and avoid flushing it whether it has been
646 * destroyed or not. We must make sure that the chain is flagged
647 * MOVED in this situation so the parent picks up the deletion.
649 * Since this chain will now never be written to disk we need to
650 * adjust bref.mirror_tid such that it does not prevent sub-chains
651 * from clearing their MOVED bits.
653 * NOTE: scan2 has already executed above so statistics have
654 * already been rolled up.
656 * NOTE: Deletions do not prevent flush recursion as a deleted
657 * inode (removed file) which is still open may still require
658 * on-media storage to be able to clean related pages out from
661 * NOTE: Even though this chain will not issue write I/O, we must
662 * still update chain->bref.mirror_tid for flush management
665 if (chain->delete_tid <= info->sync_tid) {
666 if (chain->flags & HAMMER2_CHAIN_MODIFIED) {
668 if (chain->bytes == chain->bp->b_bufsize)
669 chain->bp->b_flags |= B_INVAL|B_RELBUF;
671 if ((chain->flags & HAMMER2_CHAIN_MOVED) == 0) {
672 hammer2_chain_ref(chain);
673 atomic_set_int(&chain->flags,
674 HAMMER2_CHAIN_MOVED);
676 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_MODIFIED);
677 if (chain->bref.mirror_tid < info->sync_tid)
678 chain->bref.mirror_tid = info->sync_tid;
679 hammer2_chain_drop(chain);
681 if (chain->bref.mirror_tid < info->sync_tid)
682 chain->bref.mirror_tid = info->sync_tid;
686 if ((chain->flags & HAMMER2_CHAIN_DESTROYED) &&
687 (chain->flags & HAMMER2_CHAIN_DELETED) &&
688 (trans->flags & HAMMER2_TRANS_RESTRICTED) == 0) {
690 * Throw-away the MODIFIED flag
692 if (chain->flags & HAMMER2_CHAIN_MODIFIED) {
694 if (chain->bytes == chain->bp->b_bufsize)
695 chain->bp->b_flags |= B_INVAL|B_RELBUF;
697 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_MODIFIED);
698 hammer2_chain_drop(chain);
705 * A degenerate flush might not have flushed anything and thus not
706 * processed modified blocks on the way back up. Detect the case.
708 * Note that MOVED can be set without MODIFIED being set due to
709 * a deletion, in which case it is handled by Scan2 later on.
711 * Both bits can be set along with DELETED due to a deletion if
712 * modified data within the synchronization zone and the chain
713 * was then deleted beyond the zone, in which case we still have
714 * to flush for synchronization point consistency. Otherwise though
715 * DELETED and MODIFIED are treated as separate flags.
717 if ((chain->flags & HAMMER2_CHAIN_MODIFIED) == 0) {
718 if (chain->bref.mirror_tid < info->sync_tid)
719 chain->bref.mirror_tid = info->sync_tid;
726 * A DESTROYED node that reaches this point must be flushed for
727 * synchronization point consistency.
731 * Update mirror_tid, clear MODIFIED, and set MOVED.
733 * The caller will update the parent's reference to this chain
734 * by testing MOVED as long as the modification was in-bounds.
736 * MOVED is never set on the volume root as there is no parent
739 if (hammer2_debug & 0x1000) {
740 kprintf("Flush %p.%d %016jx/%d sync_tid %016jx\n",
741 chain, chain->bref.type,
742 chain->bref.key, chain->bref.keybits,
745 if (hammer2_debug & 0x2000) {
746 Debugger("Flush hell");
748 if (chain->bref.mirror_tid < info->sync_tid)
749 chain->bref.mirror_tid = info->sync_tid;
750 wasmodified = (chain->flags & HAMMER2_CHAIN_MODIFIED) != 0;
751 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_MODIFIED);
752 if (chain == &hmp->vchain)
753 kprintf("(FLUSHED VOLUME HEADER)\n");
754 if (chain == &hmp->fchain)
755 kprintf("(FLUSHED FREEMAP HEADER)\n");
757 if ((chain->flags & HAMMER2_CHAIN_MOVED) ||
758 chain == &hmp->vchain ||
759 chain == &hmp->fchain) {
761 * Drop the ref from the MODIFIED bit we cleared.
762 * Net is -0 or -1 ref depending.
765 hammer2_chain_drop(chain);
768 * Drop the ref from the MODIFIED bit we cleared and
769 * set a ref for the MOVED bit we are setting. Net
770 * is +0 or +1 ref depending.
772 if (wasmodified == 0)
773 hammer2_chain_ref(chain);
774 atomic_set_int(&chain->flags, HAMMER2_CHAIN_MOVED);
778 * If this is part of a recursive flush we can go ahead and write
779 * out the buffer cache buffer and pass a new bref back up the chain
782 * Volume headers are NOT flushed here as they require special
785 switch(chain->bref.type) {
786 case HAMMER2_BREF_TYPE_FREEMAP:
787 hammer2_modify_volume(hmp);
789 case HAMMER2_BREF_TYPE_VOLUME:
791 * We should flush the free block table before we calculate
792 * CRCs and copy voldata -> volsync.
794 * To prevent SMP races, fchain must remain locked until
795 * voldata is copied to volsync.
797 hammer2_chain_lock(&hmp->fchain, HAMMER2_RESOLVE_ALWAYS);
798 if ((hmp->fchain.flags & HAMMER2_CHAIN_MODIFIED) ||
799 hmp->voldata.mirror_tid < hmp->fchain.core->update_tid) {
800 /* this will modify vchain as a side effect */
801 hammer2_chain_t *tmp = &hmp->fchain;
802 hammer2_chain_flush(info->trans, &tmp);
803 KKASSERT(tmp == &hmp->fchain);
807 * The volume header is flushed manually by the syncer, not
808 * here. All we do is adjust the crc's.
810 KKASSERT(chain->data != NULL);
811 KKASSERT(chain->bp == NULL);
812 kprintf("volume header mirror_tid %jd\n",
813 hmp->voldata.mirror_tid);
815 hmp->voldata.icrc_sects[HAMMER2_VOL_ICRC_SECT1]=
817 (char *)&hmp->voldata +
818 HAMMER2_VOLUME_ICRC1_OFF,
819 HAMMER2_VOLUME_ICRC1_SIZE);
820 hmp->voldata.icrc_sects[HAMMER2_VOL_ICRC_SECT0]=
822 (char *)&hmp->voldata +
823 HAMMER2_VOLUME_ICRC0_OFF,
824 HAMMER2_VOLUME_ICRC0_SIZE);
825 hmp->voldata.icrc_volheader =
827 (char *)&hmp->voldata +
828 HAMMER2_VOLUME_ICRCVH_OFF,
829 HAMMER2_VOLUME_ICRCVH_SIZE);
830 hmp->volsync = hmp->voldata;
831 atomic_set_int(&chain->flags, HAMMER2_CHAIN_VOLUMESYNC);
832 hammer2_chain_unlock(&hmp->fchain);
834 case HAMMER2_BREF_TYPE_DATA:
836 * Data elements have already been flushed via the logical
837 * file buffer cache. Their hash was set in the bref by
838 * the vop_write code.
840 * Make sure any device buffer(s) have been flushed out here.
841 * (there aren't usually any to flush).
843 psize = hammer2_devblksize(chain->bytes);
844 pmask = (hammer2_off_t)psize - 1;
845 pbase = chain->bref.data_off & ~pmask;
846 boff = chain->bref.data_off & (HAMMER2_OFF_MASK & pmask);
848 bp = getblk(hmp->devvp, pbase, psize, GETBLK_NOWAIT, 0);
850 if ((bp->b_flags & (B_CACHE | B_DIRTY)) ==
851 (B_CACHE | B_DIRTY)) {
854 bp->b_flags |= B_RELBUF;
860 case HAMMER2_BREF_TYPE_INDIRECT:
862 * Indirect blocks may be in an INITIAL state. Use the
863 * chain_lock() call to ensure that the buffer has been
864 * instantiated (even though it is already locked the buffer
865 * might not have been instantiated).
867 * Only write the buffer out if it is dirty, it is possible
868 * the operating system had already written out the buffer.
870 hammer2_chain_lock(chain, HAMMER2_RESOLVE_ALWAYS);
871 KKASSERT(chain->bp != NULL);
874 if ((chain->flags & HAMMER2_CHAIN_DIRTYBP) ||
875 (bp->b_flags & B_DIRTY)) {
882 hammer2_chain_unlock(chain);
885 case HAMMER2_BREF_TYPE_INDIRECT:
886 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
888 * Device-backed. Buffer will be flushed by the sync
891 KKASSERT((chain->flags & HAMMER2_CHAIN_EMBEDDED) == 0);
893 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
896 * Embedded elements have to be flushed out.
897 * (Basically just BREF_TYPE_INODE).
899 KKASSERT(chain->flags & HAMMER2_CHAIN_EMBEDDED);
900 KKASSERT(chain->data != NULL);
901 KKASSERT(chain->bp == NULL);
904 KKASSERT((bref->data_off & HAMMER2_OFF_MASK) != 0);
905 KKASSERT(HAMMER2_DEC_CHECK(chain->bref.methods) ==
906 HAMMER2_CHECK_ISCSI32 ||
907 HAMMER2_DEC_CHECK(chain->bref.methods) ==
908 HAMMER2_CHECK_FREEMAP);
911 * The data is embedded, we have to acquire the
912 * buffer cache buffer and copy the data into it.
914 psize = hammer2_devblksize(chain->bytes);
915 pmask = (hammer2_off_t)psize - 1;
916 pbase = bref->data_off & ~pmask;
917 boff = bref->data_off & (HAMMER2_OFF_MASK & pmask);
920 * The getblk() optimization can only be used if the
921 * physical block size matches the request.
923 error = bread(hmp->devvp, pbase, psize, &bp);
924 KKASSERT(error == 0);
926 bdata = (char *)bp->b_data + boff;
929 * Copy the data to the buffer, mark the buffer
930 * dirty, and convert the chain to unmodified.
932 bcopy(chain->data, bdata, chain->bytes);
933 bp->b_flags |= B_CLUSTEROK;
937 switch(HAMMER2_DEC_CHECK(chain->bref.methods)) {
938 case HAMMER2_CHECK_FREEMAP:
939 chain->bref.check.freemap.icrc32 =
940 hammer2_icrc32(chain->data, chain->bytes);
942 case HAMMER2_CHECK_ISCSI32:
943 chain->bref.check.iscsi32.value =
944 hammer2_icrc32(chain->data, chain->bytes);
947 panic("hammer2_flush_core: bad crc type");
948 break; /* NOT REACHED */
950 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE)
951 ++hammer2_iod_meta_write;
953 ++hammer2_iod_indr_write;
958 * Flush helper scan1 (recursive)
960 * Flushes the children of the caller's chain (parent) and updates
961 * the blockref, restricted by sync_tid.
963 * Ripouts during the loop should not cause any problems. Because we are
964 * flushing to a synchronization point, modification races will occur after
965 * sync_tid and do not have to be flushed anyway.
967 * It is also ok if the parent is chain_duplicate()'d while unlocked because
968 * the delete/duplication will install a delete_tid that is still larger than
969 * our current sync_tid.
972 hammer2_chain_flush_scan1(hammer2_chain_t *child, void *data)
974 hammer2_flush_info_t *info = data;
975 hammer2_trans_t *trans = info->trans;
976 hammer2_chain_t *parent = info->parent;
980 * We only need to recurse if MODIFIED is set or
981 * child->bref.mirror_tid has not caught up to update_tid.
983 if ((child->flags & HAMMER2_CHAIN_MODIFIED) == 0 &&
984 child->bref.mirror_tid >= child->core->update_tid) {
987 if (child->modify_tid > trans->sync_tid)
990 hammer2_chain_ref(child);
991 spin_unlock(&parent->core->cst.spin);
994 * The caller has added a ref to the parent so we can temporarily
995 * unlock it in order to lock the child. Re-check the flags before
998 hammer2_chain_unlock(parent);
999 hammer2_chain_lock(child, HAMMER2_RESOLVE_MAYBE);
1001 if ((child->flags & HAMMER2_CHAIN_MODIFIED) == 0 &&
1002 child->bref.mirror_tid >= child->core->update_tid) {
1003 hammer2_chain_unlock(child);
1004 hammer2_chain_drop(child);
1005 hammer2_chain_lock(parent, HAMMER2_RESOLVE_MAYBE);
1006 spin_lock(&parent->core->cst.spin);
1009 if (child->modify_tid > trans->sync_tid) {
1010 hammer2_chain_unlock(child);
1011 hammer2_chain_drop(child);
1012 hammer2_chain_lock(parent, HAMMER2_RESOLVE_MAYBE);
1013 spin_lock(&parent->core->cst.spin);
1018 * The DESTROYED flag can only be initially set on an unreferenced
1019 * deleted inode and will propagate downward via the mechanic below.
1020 * Such inode chains have been deleted for good and should no longer
1021 * be subject to delete/duplication.
1023 * This optimization allows the inode reclaim (destroy unlinked file
1024 * on vnode reclamation after last close) to be flagged by just
1025 * setting HAMMER2_CHAIN_DESTROYED at the top level and then will
1026 * cause the chains to be terminated and related buffers to be
1027 * invalidated and not flushed out.
1029 * We have to be careful not to propagate the DESTROYED flag if
1030 * the destruction occurred after our flush sync_tid.
1032 if ((parent->flags & HAMMER2_CHAIN_DESTROYED) &&
1033 (child->flags & HAMMER2_CHAIN_DELETED) &&
1034 (child->flags & HAMMER2_CHAIN_DESTROYED) == 0) {
1035 atomic_set_int(&child->flags, HAMMER2_CHAIN_DESTROYED);
1037 * Force downward recursion by bringing update_tid up to
1038 * at least sync_tid. Parent's mirror_tid has not yet
1041 * Vnode reclamation may have forced update_tid to MAX_TID.
1042 * In this situation bring it down to something reasonable
1043 * so the elements being destroyed can be retired.
1045 spin_lock(&child->core->cst.spin);
1046 if (child->core->update_tid < trans->sync_tid ||
1047 child->core->update_tid == HAMMER2_MAX_TID) {
1048 child->core->update_tid = trans->sync_tid;
1050 spin_unlock(&child->core->cst.spin);
1054 * Recurse and collect deferral data.
1056 diddeferral = info->diddeferral;
1058 hammer2_chain_flush_core(info, &child);
1060 kprintf("flush_core_done parent=%p flags=%08x child=%p.%d %08x\n",
1061 parent, parent->flags, child, child->bref.type, child->flags);
1064 * NOTE: If child failed to fully synchronize, child's bref.mirror_tid
1065 * will not have been updated. Bumping diddeferral prevents
1066 * the parent chain from updating bref.mirror_tid on the way
1067 * back up in order to force a retry later.
1069 if (child->bref.mirror_tid < child->core->update_tid)
1073 info->diddeferral += diddeferral;
1074 hammer2_chain_unlock(child);
1075 hammer2_chain_drop(child);
1077 hammer2_chain_lock(parent, HAMMER2_RESOLVE_MAYBE);
1079 spin_lock(&parent->core->cst.spin);
1085 * Flush helper scan2 (non-recursive)
1087 * This pass on a chain's children propagates any MOVED or DELETED
1088 * elements back up the chain towards the root after those elements have
1089 * been fully flushed. Unlike scan1, this function is NOT recursive and
1090 * the parent remains locked across the entire scan.
1092 * SCAN2 is called twice, once with pass set to 1 and once with it set to 2.
1093 * We have to do this so base[] elements can be deleted in pass 1 to make
1094 * room for adding new elements in pass 2.
1096 * This function also rolls up storage statistics.
1098 * NOTE! A deletion is a visbility issue, there can still be references to
1099 * deleted elements (for example, to an unlinked file which is still
1100 * open), and there can also be multiple chains pointing to the same
1101 * bref where some are deleted and some are not (for example due to
1102 * a rename). So a chain marked for deletion is basically considered
1103 * to be live until it is explicitly destroyed or until its ref-count
1104 * reaches zero (also implying that MOVED and MODIFIED are clear).
1107 hammer2_chain_flush_scan2(hammer2_chain_t *child, void *data)
1109 hammer2_flush_info_t *info = data;
1110 hammer2_chain_t *parent = info->parent;
1111 hammer2_chain_core_t *above = child->above;
1112 hammer2_mount_t *hmp = child->hmp;
1113 hammer2_trans_t *trans = info->trans;
1114 hammer2_blockref_t *base;
1119 * Inodes with stale children that have been converted to DIRECTDATA
1120 * mode (file extension or hardlink conversion typically) need to
1121 * skipped right now before we start messing with a non-existant
1125 if (parent->bref.type == HAMMER2_BREF_TYPE_INODE &&
1126 (parent->data->ipdata.op_flags & HAMMER2_OPFLAG_DIRECTDATA)) {
1132 * Ignore children created after our flush point, treating them as
1133 * if they did not exist). These children will not cause the parent
1136 * Children deleted after our flush point are treated as having been
1137 * created for the purposes of the flush. The parent's update_tid
1138 * will already be higher than our trans->sync_tid so the flush path
1141 * When we encounter such children and the parent chain has not been
1142 * deleted, delete/duplicated, or delete/duplicated-for-move, then
1143 * the parent may be used to funnel through several flush points.
1144 * These chains will still be visible to later flushes due to having
1145 * a higher update_tid than we can set in the current flush.
1147 if (child->modify_tid > trans->sync_tid) {
1152 * Ignore children which have not changed. The parent's block table
1153 * is already correct.
1155 * XXX The MOVED bit is only cleared when all multi-homed parents
1156 * have flushed, creating a situation where a re-flush can occur
1157 * via a parent which has already flushed. The hammer2_base_*()
1158 * functions currently have a hack to deal with this case but
1159 * we need something better.
1161 if ((child->flags & HAMMER2_CHAIN_MOVED) == 0) {
1166 * Make sure child is referenced before we unlock.
1168 hammer2_chain_ref(child);
1169 spin_unlock(&above->cst.spin);
1172 * Parent reflushed after the child has passed them by should skip
1173 * due to the modify_tid test. XXX
1175 hammer2_chain_lock(child, HAMMER2_RESOLVE_NEVER);
1176 KKASSERT(child->above == above);
1177 KKASSERT(parent->core == above);
1180 * The parent's blockref to the child must be deleted or updated.
1182 * This point is not reached on successful DESTROYED optimizations
1183 * but can be reached on recursive deletions and restricted flushes.
1185 * The chain_modify here may delete-duplicate the block. This can
1186 * cause a multitude of issues if the block was already modified
1187 * by a later (post-flush) transaction. Primarily blockrefs in
1188 * the later block can be out-of-date, so if the situation occurs
1189 * we can't throw away the MOVED bit on the current blocks until
1190 * the later blocks are flushed (so as to be able to regenerate all
1191 * the changes that were made).
1193 * Because flushes are ordered we do not have to make a
1194 * modify/duplicate of indirect blocks. That is, the flush
1195 * code does not have to kmalloc or duplicate anything. We
1196 * can adjust the indirect block table in-place and reuse the
1197 * chain. It IS possible that the chain has already been duplicated
1198 * or may wind up being duplicated on-the-fly by modifying code
1199 * on the frontend. We simply use the original and ignore such
1200 * chains. However, it does mean we can't clear the MOVED bit.
1202 * XXX recursive deletions not optimized.
1204 hammer2_chain_modify(trans, &parent, HAMMER2_MODIFY_NO_MODIFY_TID);
1205 if (info->parent != parent) {
1206 /* extra ref from flush_core */
1207 hammer2_chain_drop(info->parent);
1208 info->parent = parent;
1209 hammer2_chain_ref(info->parent);
1212 switch(parent->bref.type) {
1213 case HAMMER2_BREF_TYPE_INODE:
1215 * XXX Should assert that OPFLAG_DIRECTDATA is 0 once we
1216 * properly duplicate the inode headers and do proper flush
1217 * range checks (all the children should be beyond the flush
1218 * point). For now just don't sync the non-applicable
1221 * XXX Can also occur due to hardlink consolidation. We
1222 * set OPFLAG_DIRECTDATA to prevent the indirect and data
1223 * blocks from syncing ot the hardlink pointer.
1226 KKASSERT((parent->data->ipdata.op_flags &
1227 HAMMER2_OPFLAG_DIRECTDATA) == 0);
1230 if (parent->data->ipdata.op_flags & HAMMER2_OPFLAG_DIRECTDATA) {
1235 base = &parent->data->ipdata.u.blockset.blockref[0];
1236 count = HAMMER2_SET_COUNT;
1239 case HAMMER2_BREF_TYPE_INDIRECT:
1240 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
1242 base = &parent->data->npdata[0];
1245 KKASSERT(child->flags & HAMMER2_CHAIN_DELETED);
1247 count = parent->bytes / sizeof(hammer2_blockref_t);
1249 case HAMMER2_BREF_TYPE_VOLUME:
1250 base = &hmp->voldata.sroot_blockset.blockref[0];
1251 count = HAMMER2_SET_COUNT;
1253 case HAMMER2_BREF_TYPE_FREEMAP:
1254 base = &parent->data->npdata[0];
1255 count = HAMMER2_SET_COUNT;
1260 panic("hammer2_chain_flush_scan2: "
1261 "unrecognized blockref type: %d",
1266 * Don't bother updating a deleted parent's blockrefs (caller will
1267 * optimize-out the disk write). Note that this is not optional,
1268 * a deleted parent's blockref array might not be synchronized at
1269 * all so calling hammer2_base*() functions could result in a panic.
1271 * Otherwise, we need to be COUNTEDBREFS synchronized for the
1272 * hammer2_base_*() functions.
1274 if (parent->delete_tid <= trans->sync_tid)
1276 else if ((parent->core->flags & HAMMER2_CORE_COUNTEDBREFS) == 0)
1277 hammer2_chain_countbrefs(parent, base, count);
1280 * Update the parent's blockref table and propagate mirror_tid.
1282 * NOTE! Children with modify_tid's beyond our flush point are
1283 * considered to not exist for the purposes of updating the
1284 * parent's blockref array.
1286 * NOTE! Updates to a parent's blockref table do not adjust the
1287 * parent's bref.modify_tid, only its bref.mirror_tid.
1289 * NOTE! chain->modify_tid vs chain->bref.modify_tid. The chain's
1290 * internal modify_tid is always updated based on creation
1291 * or delete-duplicate. However, the bref.modify_tid is NOT
1292 * updated due to simple blockref updates.
1295 kprintf("chain %p->%p pass %d trans %016jx sync %p.%d %016jx/%d C=%016jx D=%016jx PMIRROR %016jx\n",
1297 info->pass, trans->sync_tid,
1298 child, child->bref.type,
1299 child->bref.key, child->bref.keybits,
1300 child->modify_tid, child->delete_tid, parent->bref.mirror_tid);
1303 if (info->pass == 1 && child->delete_tid <= trans->sync_tid) {
1305 * Deleting. The block array is expected to contain the
1308 * (1) The deletion occurred after the parent's block table
1309 * was last synchronized (delete_tid), and
1311 * (2) The creation occurred before or during the parent's
1312 * last block table synchronization.
1316 child->delete_tid > parent->bref.mirror_tid &&
1317 child->modify_tid <= parent->bref.mirror_tid) {
1318 hammer2_rollup_stats(parent, child, -1);
1319 spin_lock(&above->cst.spin);
1321 kprintf("trans %jx parent %p.%d child %p.%d m/d %016jx/%016jx "
1322 "flg=%08x %016jx/%d delete\n",
1324 parent, parent->bref.type,
1325 child, child->bref.type,
1326 child->modify_tid, child->delete_tid,
1328 child->bref.key, child->bref.keybits);
1330 hammer2_base_delete(parent, base, count,
1331 &info->cache_index, child);
1332 spin_unlock(&above->cst.spin);
1334 if (info->mirror_tid < child->delete_tid)
1335 info->mirror_tid = child->delete_tid;
1336 } else if (info->pass == 2 && child->delete_tid > trans->sync_tid) {
1338 * Inserting. The block array is expected to NOT contain
1339 * the child's entry if:
1341 * (1) The creation occurred after the parent's block table
1342 * was last synchronized (modify_tid), and
1344 * (2) The child is not being deleted in the same
1349 child->modify_tid > parent->bref.mirror_tid &&
1350 child->delete_tid > trans->sync_tid) {
1351 hammer2_rollup_stats(parent, child, 1);
1352 spin_lock(&above->cst.spin);
1354 kprintf("trans %jx parent %p.%d child %p.%d m/d %016jx/%016jx "
1355 "flg=%08x %016jx/%d insert\n",
1357 parent, parent->bref.type,
1358 child, child->bref.type,
1359 child->modify_tid, child->delete_tid,
1361 child->bref.key, child->bref.keybits);
1363 hammer2_base_insert(parent, base, count,
1364 &info->cache_index, child);
1365 spin_unlock(&above->cst.spin);
1367 if (info->mirror_tid < child->modify_tid)
1368 info->mirror_tid = child->modify_tid;
1373 if (info->mirror_tid < child->bref.mirror_tid) {
1374 info->mirror_tid = child->bref.mirror_tid;
1376 if ((parent->bref.type == HAMMER2_BREF_TYPE_VOLUME ||
1377 parent->bref.type == HAMMER2_BREF_TYPE_FREEMAP) &&
1378 hmp->voldata.mirror_tid < child->bref.mirror_tid) {
1379 hmp->voldata.mirror_tid = child->bref.mirror_tid;
1383 * Only clear MOVED once all possible parents have been flushed.
1385 * When can we safely clear the MOVED flag? Flushes down duplicate
1386 * paths can occur out of order, for example if an inode is moved
1387 * as part of a hardlink consolidation or if an inode is moved into
1388 * an indirect block indexed before the inode.
1390 if (ok && (child->flags & HAMMER2_CHAIN_MOVED)) {
1391 hammer2_chain_t *scan;
1393 if (hammer2_debug & 0x4000)
1394 kprintf("CHECKMOVED %p (parent=%p)", child, parent);
1396 spin_lock(&above->cst.spin);
1397 TAILQ_FOREACH(scan, &above->ownerq, core_entry) {
1399 * Can't destroy the child until all parent's have
1400 * synchronized with its move.
1402 * NOTE: A deleted parent will synchronize with a
1403 * child's move without bothering to update
1406 if (scan == parent ||
1407 scan->delete_tid <= trans->sync_tid)
1409 if (scan->bref.mirror_tid < child->modify_tid) {
1410 if (hammer2_debug & 0x4000)
1411 kprintf("(fail scan %p %016jx/%016jx)",
1412 scan, scan->bref.mirror_tid,
1417 if (hammer2_debug & 0x4000)
1419 spin_unlock(&above->cst.spin);
1421 if (hammer2_debug & 0x4000)
1422 kprintf("clear moved %p.%d %016jx/%d\n",
1423 child, child->bref.type,
1424 child->bref.key, child->bref.keybits);
1425 atomic_clear_int(&child->flags, HAMMER2_CHAIN_MOVED);
1426 hammer2_chain_drop(child); /* flag */
1428 if (hammer2_debug & 0x4000)
1429 kprintf("keep moved %p.%d %016jx/%d\n",
1430 child, child->bref.type,
1431 child->bref.key, child->bref.keybits);
1436 * Unlock the child. This can wind up dropping the child's
1437 * last ref, removing it from the parent's RB tree, and deallocating
1438 * the structure. The RB_SCAN() our caller is doing handles the
1441 hammer2_chain_unlock(child);
1442 hammer2_chain_drop(child);
1443 spin_lock(&above->cst.spin);
1446 * The parent may have been delete-duplicated.
1448 info->parent = parent;
1455 hammer2_rollup_stats(hammer2_chain_t *parent, hammer2_chain_t *child, int how)
1458 hammer2_chain_t *grandp;
1461 parent->data_count += child->data_count;
1462 parent->inode_count += child->inode_count;
1463 child->data_count = 0;
1464 child->inode_count = 0;
1466 parent->data_count -= child->bytes;
1467 if (child->bref.type == HAMMER2_BREF_TYPE_INODE) {
1468 parent->inode_count -= 1;
1470 /* XXX child->data may be NULL atm */
1471 parent->data_count -= child->data->ipdata.data_count;
1472 parent->inode_count -= child->data->ipdata.inode_count;
1475 } else if (how > 0) {
1476 parent->data_count += child->bytes;
1477 if (child->bref.type == HAMMER2_BREF_TYPE_INODE) {
1478 parent->inode_count += 1;
1480 /* XXX child->data may be NULL atm */
1481 parent->data_count += child->data->ipdata.data_count;
1482 parent->inode_count += child->data->ipdata.inode_count;
1486 if (parent->bref.type == HAMMER2_BREF_TYPE_INODE) {
1487 parent->data->ipdata.data_count += parent->data_count;
1488 parent->data->ipdata.inode_count += parent->inode_count;
1490 for (grandp = parent->above->first_parent;
1492 grandp = grandp->next_parent) {
1493 grandp->data_count += parent->data_count;
1494 grandp->inode_count += parent->inode_count;
1497 parent->data_count = 0;
1498 parent->inode_count = 0;