7e9a0dde2ba16a4727f907b4d4fcebe594c52d0a
[dragonfly.git] / sys / vfs / hammer2 / hammer2_flush.c
1 /*
2  * Copyright (c) 2011-2018 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the
17  *    distribution.
18  * 3. Neither the name of The DragonFly Project nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific, prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
26  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 /*
36  *                      TRANSACTION AND FLUSH HANDLING
37  *
38  * Deceptively simple but actually fairly difficult to implement properly is
39  * how I would describe it.
40  *
41  * Flushing generally occurs bottom-up but requires a top-down scan to
42  * locate chains with MODIFIED and/or UPDATE bits set.  The ONFLUSH flag
43  * tells how to recurse downward to find these chains.
44  */
45
46 #include <sys/cdefs.h>
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/types.h>
50 #include <sys/lock.h>
51 #include <sys/uuid.h>
52
53 #include "hammer2.h"
54
55 #define FLUSH_DEBUG 0
56
57 #define HAMMER2_FLUSH_DEPTH_LIMIT       60      /* stack recursion limit */
58
59
60 /*
61  * Recursively flush the specified chain.  The chain is locked and
62  * referenced by the caller and will remain so on return.  The chain
63  * will remain referenced throughout but can temporarily lose its
64  * lock during the recursion to avoid unnecessarily stalling user
65  * processes.
66  */
67 struct hammer2_flush_info {
68         hammer2_chain_t *parent;
69         int             depth;
70         long            diddeferral;
71         int             error;                  /* cumulative error */
72         int             flags;
73 #ifdef HAMMER2_SCAN_DEBUG
74         long            scan_count;
75         long            scan_mod_count;
76         long            scan_upd_count;
77         long            scan_onf_count;
78         long            scan_del_count;
79         long            scan_btype[7];
80         long            flushq_count;
81 #endif
82         struct h2_flush_list flushq;
83         hammer2_chain_t *debug;
84 };
85
86 typedef struct hammer2_flush_info hammer2_flush_info_t;
87
88 static void hammer2_flush_core(hammer2_flush_info_t *info,
89                                 hammer2_chain_t *chain, int flags);
90 static int hammer2_flush_recurse(hammer2_chain_t *child, void *data);
91
92 /*
93  * Any per-pfs transaction initialization goes here.
94  */
95 void
96 hammer2_trans_manage_init(hammer2_pfs_t *pmp)
97 {
98 }
99
100 /*
101  * Transaction support for any modifying operation.  Transactions are used
102  * in the pmp layer by the frontend and in the spmp layer by the backend.
103  *
104  * 0                    - Normal transaction, interlocked against flush
105  *                        transaction.
106  *
107  * TRANS_ISFLUSH        - Flush transaction, interlocked against normal
108  *                        transaction.
109  *
110  * TRANS_BUFCACHE       - Buffer cache transaction, no interlock.
111  *
112  * Initializing a new transaction allocates a transaction ID.  Typically
113  * passed a pmp (hmp passed as NULL), indicating a cluster transaction.  Can
114  * be passed a NULL pmp and non-NULL hmp to indicate a transaction on a single
115  * media target.  The latter mode is used by the recovery code.
116  *
117  * TWO TRANSACTION IDs can run concurrently, where one is a flush and the
118  * other is a set of any number of concurrent filesystem operations.  We
119  * can either have <running_fs_ops> + <waiting_flush> + <blocked_fs_ops>
120  * or we can have <running_flush> + <concurrent_fs_ops>.
121  *
122  * During a flush, new fs_ops are only blocked until the fs_ops prior to
123  * the flush complete.  The new fs_ops can then run concurrent with the flush.
124  *
125  * Buffer-cache transactions operate as fs_ops but never block.  A
126  * buffer-cache flush will run either before or after the current pending
127  * flush depending on its state.
128  */
129 void
130 hammer2_trans_init(hammer2_pfs_t *pmp, uint32_t flags)
131 {
132         uint32_t oflags;
133         uint32_t nflags;
134         int dowait;
135
136         for (;;) {
137                 oflags = pmp->trans.flags;
138                 cpu_ccfence();
139                 dowait = 0;
140
141                 if (flags & HAMMER2_TRANS_ISFLUSH) {
142                         /*
143                          * Requesting flush transaction.  This interlocks
144                          * only with other flush transactions.  Note that
145                          * non-flush modifying transactions can run
146                          * concurrently, but will interlock on any inode
147                          * that are on the SYNCQ.
148                          */
149                         if (oflags & HAMMER2_TRANS_ISFLUSH) {
150                                 nflags = oflags | HAMMER2_TRANS_WAITING;
151                                 dowait = 1;
152                         } else {
153                                 nflags = (oflags | flags) + 1;
154                         }
155 #if 0
156                         if (oflags & HAMMER2_TRANS_MASK) {
157                                 nflags = oflags | HAMMER2_TRANS_FPENDING |
158                                                   HAMMER2_TRANS_WAITING;
159                                 dowait = 1;
160                         } else {
161                                 nflags = (oflags | flags) + 1;
162                         }
163 #endif
164                 } else if (flags & HAMMER2_TRANS_BUFCACHE) {
165                         /*
166                          * Requesting strategy transaction from buffer-cache,
167                          * or a VM getpages/putpages through the buffer cache.
168                          * We must allow such transactions in all situations
169                          * to avoid deadlocks.
170                          */
171                         nflags = (oflags | flags) + 1;
172                 } else {
173                         /*
174                          * Requesting a normal modifying transaction.
175                          * Does not interlock with flushes.  Multiple
176                          * modifying transactions can run concurrently.
177                          * These do not mess with the on-media topology
178                          * above the inode.
179                          *
180                          * If a flush is pending for more than one second
181                          * but can't run because many modifying transactions
182                          * are active, we wait for the flush to be granted.
183                          *
184                          * NOTE: Remember that non-modifying operations
185                          *       such as read, stat, readdir, etc, do
186                          *       not use transactions.
187                          */
188 #if 0
189                         if ((oflags & HAMMER2_TRANS_FPENDING) &&
190                             (u_int)(ticks - pmp->trans.fticks) >= (u_int)hz) {
191                                 nflags = oflags | HAMMER2_TRANS_WAITING;
192                                 dowait = 1;
193                         } else if (oflags & HAMMER2_TRANS_ISFLUSH) {
194                                 nflags = oflags | HAMMER2_TRANS_WAITING;
195                                 dowait = 1;
196                         } else
197 #endif
198                         {
199                                 nflags = (oflags | flags) + 1;
200                         }
201                 }
202                 if (dowait)
203                         tsleep_interlock(&pmp->trans.sync_wait, 0);
204                 if (atomic_cmpset_int(&pmp->trans.flags, oflags, nflags)) {
205                         if ((oflags & HAMMER2_TRANS_FPENDING) == 0 &&
206                             (nflags & HAMMER2_TRANS_FPENDING)) {
207                                 pmp->trans.fticks = ticks;
208                         }
209                         if (dowait == 0)
210                                 break;
211                         tsleep(&pmp->trans.sync_wait, PINTERLOCKED,
212                                "h2trans", hz);
213                 } else {
214                         cpu_pause();
215                 }
216                 /* retry */
217         }
218 }
219
220 /*
221  * Start a sub-transaction, there is no 'subdone' function.  This will
222  * issue a new modify_tid (mtid) for the current transaction, which is a
223  * CLC (cluster level change) id and not a per-node id.
224  *
225  * This function must be called for each XOP when multiple XOPs are run in
226  * sequence within a transaction.
227  *
228  * Callers typically update the inode with the transaction mtid manually
229  * to enforce sequencing.
230  */
231 hammer2_tid_t
232 hammer2_trans_sub(hammer2_pfs_t *pmp)
233 {
234         hammer2_tid_t mtid;
235
236         mtid = atomic_fetchadd_64(&pmp->modify_tid, 1);
237
238         return (mtid);
239 }
240
241 void
242 hammer2_trans_done(hammer2_pfs_t *pmp, int quicksideq)
243 {
244         uint32_t oflags;
245         uint32_t nflags;
246
247         /*
248          * Modifying ops on the front-end can cause dirty inodes to
249          * build up in the sideq.  We don't flush these on inactive/reclaim
250          * due to potential deadlocks, so we have to deal with them from
251          * inside other nominal modifying front-end transactions.
252          */
253         if (quicksideq && pmp->sideq_count > (pmp->inum_count >> 3) && pmp->mp)
254                 speedup_syncer(pmp->mp);
255 #if 0
256                 hammer2_inode_run_sideq(pmp, 0);
257 #endif
258
259         /*
260          * Clean-up the transaction
261          */
262         for (;;) {
263                 oflags = pmp->trans.flags;
264                 cpu_ccfence();
265                 KKASSERT(oflags & HAMMER2_TRANS_MASK);
266                 if ((oflags & HAMMER2_TRANS_MASK) == 1) {
267                         /*
268                          * This was the last transaction
269                          */
270                         nflags = (oflags - 1) & ~(HAMMER2_TRANS_ISFLUSH |
271                                                   HAMMER2_TRANS_BUFCACHE |
272                                                   HAMMER2_TRANS_FPENDING |
273                                                   HAMMER2_TRANS_WAITING);
274                 } else {
275                         /*
276                          * Still transactions pending
277                          */
278                         nflags = oflags - 1;
279                 }
280                 if (atomic_cmpset_int(&pmp->trans.flags, oflags, nflags)) {
281                         if ((nflags & HAMMER2_TRANS_MASK) == 0 &&
282                             (oflags & HAMMER2_TRANS_WAITING)) {
283                                 wakeup(&pmp->trans.sync_wait);
284                         }
285                         break;
286                 } else {
287                         cpu_pause();
288                 }
289                 /* retry */
290         }
291 }
292
293 /*
294  * Obtain new, unique inode number (not serialized by caller).
295  */
296 hammer2_tid_t
297 hammer2_trans_newinum(hammer2_pfs_t *pmp)
298 {
299         hammer2_tid_t tid;
300
301         tid = atomic_fetchadd_64(&pmp->inode_tid, 1);
302
303         return tid;
304 }
305
306 /*
307  * Assert that a strategy call is ok here.  Currently we allow strategy
308  * calls in all situations, including during flushes.  Previously:
309  *      (old) (1) In a normal transaction.
310  *      (old) (2) In a flush transaction only if PREFLUSH is also set.
311  */
312 void
313 hammer2_trans_assert_strategy(hammer2_pfs_t *pmp)
314 {
315 #if 0
316         KKASSERT((pmp->trans.flags & HAMMER2_TRANS_ISFLUSH) == 0 ||
317                  (pmp->trans.flags & HAMMER2_TRANS_PREFLUSH));
318 #endif
319 }
320
321
322 /*
323  * Chains undergoing destruction are removed from the in-memory topology.
324  * To avoid getting lost these chains are placed on the delayed flush
325  * queue which will properly dispose of them.
326  *
327  * We do this instead of issuing an immediate flush in order to give
328  * recursive deletions (rm -rf, etc) a chance to remove more of the
329  * hierarchy, potentially allowing an enormous amount of write I/O to
330  * be avoided.
331  *
332  * NOTE: The flush code tests HAMMER2_CHAIN_DESTROY to differentiate
333  *       between these chains and the deep-recursion requeue.
334  */
335 void
336 hammer2_delayed_flush(hammer2_chain_t *chain)
337 {
338         if ((chain->flags & HAMMER2_CHAIN_DELAYED) == 0) {
339                 hammer2_spin_ex(&chain->hmp->list_spin);
340                 if ((chain->flags & (HAMMER2_CHAIN_DELAYED |
341                                      HAMMER2_CHAIN_DEFERRED)) == 0) {
342                         atomic_set_int(&chain->flags, HAMMER2_CHAIN_DELAYED |
343                                                       HAMMER2_CHAIN_DEFERRED);
344                         TAILQ_INSERT_TAIL(&chain->hmp->flushq,
345                                           chain, flush_node);
346                         hammer2_chain_ref(chain);
347                 }
348                 hammer2_spin_unex(&chain->hmp->list_spin);
349                 hammer2_voldata_modify(chain->hmp);
350         }
351 }
352
353 /*
354  * Flush the chain and all modified sub-chains through the specified
355  * synchronization point, propagating blockref updates back up.  As
356  * part of this propagation, mirror_tid and inode/data usage statistics
357  * propagates back upward.
358  *
359  * Returns a HAMMER2 error code, 0 if no error.  Note that I/O errors from
360  * buffers dirtied during the flush operation can occur later.
361  *
362  * modify_tid (clc - cluster level change) is not propagated.
363  *
364  * update_tid (clc) is used for validation and is not propagated by this
365  * function.
366  *
367  * This routine can be called from several places but the most important
368  * is from VFS_SYNC (frontend) via hammer2_xop_inode_flush (backend).
369  *
370  * chain is locked on call and will remain locked on return.  The chain's
371  * UPDATE flag indicates that its parent's block table (which is not yet
372  * part of the flush) should be updated.
373  *
374  * flags:
375  *      HAMMER2_FLUSH_TOP       Indicates that this is the top of the flush.
376  *                              Is cleared for the recursion.
377  *
378  *      HAMMER2_FLUSH_ALL       Recurse everything
379  *
380  *      HAMMER2_FLUSH_INODE_STOP
381  *                              Stop at PFS inode or normal inode boundary
382  */
383 int
384 hammer2_flush(hammer2_chain_t *chain, int flags)
385 {
386         hammer2_chain_t *scan;
387         hammer2_flush_info_t info;
388         hammer2_dev_t *hmp;
389         int loops;
390
391         /*
392          * Execute the recursive flush and handle deferrals.
393          *
394          * Chains can be ridiculously long (thousands deep), so to
395          * avoid blowing out the kernel stack the recursive flush has a
396          * depth limit.  Elements at the limit are placed on a list
397          * for re-execution after the stack has been popped.
398          */
399         bzero(&info, sizeof(info));
400         TAILQ_INIT(&info.flushq);
401         info.flags = flags & ~HAMMER2_FLUSH_TOP;
402
403         /*
404          * Calculate parent (can be NULL), if not NULL the flush core
405          * expects the parent to be referenced so it can easily lock/unlock
406          * it without it getting ripped up.
407          */
408         if ((info.parent = chain->parent) != NULL)
409                 hammer2_chain_ref(info.parent);
410
411         /*
412          * Extra ref needed because flush_core expects it when replacing
413          * chain.
414          */
415         hammer2_chain_ref(chain);
416         hmp = chain->hmp;
417         loops = 0;
418
419         for (;;) {
420                 /*
421                  * Move hmp->flushq to info.flushq if non-empty so it can
422                  * be processed.
423                  */
424                 if (TAILQ_FIRST(&hmp->flushq) != NULL) {
425                         hammer2_spin_ex(&chain->hmp->list_spin);
426                         TAILQ_CONCAT(&info.flushq, &hmp->flushq, flush_node);
427                         hammer2_spin_unex(&chain->hmp->list_spin);
428                 }
429
430                 /*
431                  * Unwind deep recursions which had been deferred.  This
432                  * can leave the FLUSH_* bits set for these chains, which
433                  * will be handled when we [re]flush chain after the unwind.
434                  */
435                 while ((scan = TAILQ_FIRST(&info.flushq)) != NULL) {
436                         KKASSERT(scan->flags & HAMMER2_CHAIN_DEFERRED);
437                         TAILQ_REMOVE(&info.flushq, scan, flush_node);
438 #ifdef HAMMER2_SCAN_DEBUG
439                         ++info.flushq_count;
440 #endif
441                         atomic_clear_int(&scan->flags, HAMMER2_CHAIN_DEFERRED |
442                                                        HAMMER2_CHAIN_DELAYED);
443
444                         /*
445                          * Now that we've popped back up we can do a secondary
446                          * recursion on the deferred elements.
447                          *
448                          * NOTE: hmp->flushq chains (marked DESTROY) must be
449                          *       handled unconditionally so they can be cleaned
450                          *       out.
451                          *
452                          * NOTE: hammer2_flush() may replace scan.
453                          */
454                         if (hammer2_debug & 0x0040)
455                                 kprintf("deferred flush %p\n", scan);
456                         hammer2_chain_lock(scan, HAMMER2_RESOLVE_MAYBE);
457                         if (scan->error == 0) {
458                                 if (scan->flags & HAMMER2_CHAIN_DESTROY) {
459                                         hammer2_flush(scan,
460                                                     flags |
461                                                     HAMMER2_FLUSH_TOP |
462                                                     HAMMER2_FLUSH_ALL);
463                                 } else {
464                                         hammer2_flush(scan,
465                                                     flags & ~HAMMER2_FLUSH_TOP);
466                                 }
467                         } else {
468                                 info.error |= scan->error;
469                         }
470                         hammer2_chain_unlock(scan);
471                         hammer2_chain_drop(scan);/* ref from defer */
472                 }
473
474                 /*
475                  * [re]flush chain as the deep recursion may have generated
476                  * additional modifications.
477                  */
478                 info.diddeferral = 0;
479                 if (info.parent != chain->parent) {
480                         if (hammer2_debug & 0x0040) {
481                                 kprintf("LOST CHILD4 %p->%p "
482                                         "(actual parent %p)\n",
483                                         info.parent, chain, chain->parent);
484                         }
485                         hammer2_chain_drop(info.parent);
486                         info.parent = chain->parent;
487                         hammer2_chain_ref(info.parent);
488                 }
489                 hammer2_flush_core(&info, chain, flags);
490
491                 /*
492                  * Only loop if deep recursions have been deferred.
493                  */
494                 if (TAILQ_EMPTY(&info.flushq))
495                         break;
496
497                 if (++loops % 1000 == 0) {
498                         kprintf("hammer2_flush: excessive loops on %p\n",
499                                 chain);
500                         if (hammer2_debug & 0x100000)
501                                 Debugger("hell4");
502                 }
503         }
504 #ifdef HAMMER2_SCAN_DEBUG
505         if (info.scan_count >= 10)
506         kprintf("hammer2_flush: scan_count %ld (%ld,%ld,%ld,%ld) "
507                 "bt(%ld,%ld,%ld,%ld,%ld,%ld) flushq %ld\n",
508                 info.scan_count,
509                 info.scan_mod_count,
510                 info.scan_upd_count,
511                 info.scan_onf_count,
512                 info.scan_del_count,
513                 info.scan_btype[1],
514                 info.scan_btype[2],
515                 info.scan_btype[3],
516                 info.scan_btype[4],
517                 info.scan_btype[5],
518                 info.scan_btype[6],
519                 info.flushq_count);
520 #endif
521         hammer2_chain_drop(chain);
522         if (info.parent)
523                 hammer2_chain_drop(info.parent);
524         return (info.error);
525 }
526
527 /*
528  * This is the core of the chain flushing code.  The chain is locked by the
529  * caller and must also have an extra ref on it by the caller, and remains
530  * locked and will have an extra ref on return.  info.parent is referenced
531  * but not locked.
532  *
533  * Upon return, the caller can test the UPDATE bit on the chain to determine
534  * if the parent needs updating.
535  *
536  * (1) Determine if this node is a candidate for the flush, return if it is
537  *     not.  fchain and vchain are always candidates for the flush.
538  *
539  * (2) If we recurse too deep the chain is entered onto the deferral list and
540  *     the current flush stack is aborted until after the deferral list is
541  *     run.
542  *
543  * (3) Recursively flush live children (rbtree).  This can create deferrals.
544  *     A successful flush clears the MODIFIED and UPDATE bits on the children
545  *     and typically causes the parent to be marked MODIFIED as the children
546  *     update the parent's block table.  A parent might already be marked
547  *     MODIFIED due to a deletion (whos blocktable update in the parent is
548  *     handled by the frontend), or if the parent itself is modified by the
549  *     frontend for other reasons.
550  *
551  * (4) Permanently disconnected sub-trees are cleaned up by the front-end.
552  *     Deleted-but-open inodes can still be individually flushed via the
553  *     filesystem syncer.
554  *
555  * (5) Delete parents on the way back up if they are normal indirect blocks
556  *     and have no children.
557  *
558  * (6) Note that an unmodified child may still need the block table in its
559  *     parent updated (e.g. rename/move).  The child will have UPDATE set
560  *     in this case.
561  *
562  *                      WARNING ON BREF MODIFY_TID/MIRROR_TID
563  *
564  * blockref.modify_tid is consistent only within a PFS, and will not be
565  * consistent during synchronization.  mirror_tid is consistent across the
566  * block device regardless of the PFS.
567  */
568 static void
569 hammer2_flush_core(hammer2_flush_info_t *info, hammer2_chain_t *chain,
570                    int flags)
571 {
572         hammer2_chain_t *parent;
573         hammer2_dev_t *hmp;
574         int save_error;
575
576         /*
577          * (1) Optimize downward recursion to locate nodes needing action.
578          *     Nothing to do if none of these flags are set.
579          */
580         if ((chain->flags & HAMMER2_CHAIN_FLUSH_MASK) == 0) {
581                 if (hammer2_debug & 0x200) {
582                         if (info->debug == NULL)
583                                 info->debug = chain;
584                 } else {
585                         return;
586                 }
587         }
588
589         hmp = chain->hmp;
590
591         /*
592          * NOTE: parent can be NULL, usually due to destroy races.
593          */
594         parent = info->parent;
595         KKASSERT(chain->parent == parent);
596
597         /*
598          * Downward search recursion
599          *
600          * We must be careful on cold stops.  If CHAIN_UPDATE is set and
601          * we stop cold (verses a deferral which will re-run the chain later),
602          * the update can wind up never being applied.  This situation most
603          * typically occurs on inode boundaries due to the way
604          * hammer2_vfs_sync() breaks-up the flush.  As a safety, we
605          * flush-through such situations.
606          */
607         if (chain->flags & (HAMMER2_CHAIN_DEFERRED | HAMMER2_CHAIN_DELAYED)) {
608                 /*
609                  * Already deferred.
610                  */
611                 ++info->diddeferral;
612         } else if ((chain->flags & HAMMER2_CHAIN_PFSBOUNDARY) &&
613                    (chain->flags & HAMMER2_CHAIN_UPDATE) == 0 &&
614                    (flags & HAMMER2_FLUSH_ALL) == 0 &&
615                    (flags & HAMMER2_FLUSH_TOP) == 0 &&
616                    chain->pmp && chain->pmp->mp) {
617                 /*
618                  * If FLUSH_ALL is not specified the caller does not want
619                  * to recurse through PFS roots that have been mounted.
620                  *
621                  * (If the PFS has not been mounted there may not be
622                  *  anything monitoring its chains and its up to us
623                  *  to flush it).
624                  *
625                  * The typical sequence is to flush dirty PFS's starting at
626                  * their root downward, then flush the device root (vchain).
627                  * It is this second flush that typically leaves out the
628                  * ALL flag.
629                  *
630                  * However we must still process the PFSROOT chains for block
631                  * table updates in their parent (which IS part of our flush).
632                  *
633                  * NOTE: The volume root, vchain, does not set PFSBOUNDARY.
634                  *
635                  * NOTE: This test must be done before the depth-limit test,
636                  *       else it might become the top on a flushq iteration.
637                  *
638                  * NOTE: We must re-set ONFLUSH in the parent to retain if
639                  *       this chain (that we are skipping) requires work.
640                  */
641                 if (chain->flags & (HAMMER2_CHAIN_ONFLUSH |
642                                     HAMMER2_CHAIN_DESTROY |
643                                     HAMMER2_CHAIN_MODIFIED)) {
644                         hammer2_chain_setflush(parent);
645                 }
646         } else if (chain->bref.type == HAMMER2_BREF_TYPE_INODE &&
647                    (chain->flags & HAMMER2_CHAIN_UPDATE) == 0 &&
648                    (flags & HAMMER2_FLUSH_INODE_STOP) &&
649                    (flags & HAMMER2_FLUSH_ALL) == 0 &&
650                    (flags & HAMMER2_FLUSH_TOP) == 0 &&
651                    chain->pmp && chain->pmp->mp) {
652                 /*
653                  * If FLUSH_INODE_STOP is specified and both ALL and TOP
654                  * are clear, we must not flush the chain.  The chain should
655                  * have already been flushed and any further ONFLUSH/UPDATE
656                  * setting will be related to the next flush.
657                  *
658                  * This features allows us to flush inodes independently of
659                  * each other and meta-data above the inodes separately.
660                  */
661                 if (chain->flags & (HAMMER2_CHAIN_ONFLUSH |
662                                     HAMMER2_CHAIN_DESTROY |
663                                     HAMMER2_CHAIN_MODIFIED)) {
664                         if (parent)
665                                 hammer2_chain_setflush(parent);
666                 }
667         } else if (info->depth == HAMMER2_FLUSH_DEPTH_LIMIT) {
668                 /*
669                  * Recursion depth reached.
670                  */
671                 KKASSERT((chain->flags & HAMMER2_CHAIN_DELAYED) == 0);
672                 hammer2_chain_ref(chain);
673                 TAILQ_INSERT_TAIL(&info->flushq, chain, flush_node);
674                 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DEFERRED);
675                 ++info->diddeferral;
676         } else if (chain->flags & (HAMMER2_CHAIN_ONFLUSH |
677                                    HAMMER2_CHAIN_DESTROY)) {
678                 /*
679                  * Downward recursion search (actual flush occurs bottom-up).
680                  * pre-clear ONFLUSH.  It can get set again due to races or
681                  * flush errors, which we want so the scan finds us again in
682                  * the next flush.
683                  *
684                  * We must also recurse if DESTROY is set so we can finally
685                  * get rid of the related children, otherwise the node will
686                  * just get re-flushed on lastdrop.
687                  *
688                  * WARNING!  The recursion will unlock/relock info->parent
689                  *           (which is 'chain'), potentially allowing it
690                  *           to be ripped up.
691                  */
692                 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_ONFLUSH);
693                 save_error = info->error;
694                 info->error = 0;
695                 info->parent = chain;
696
697                 /*
698                  * We may have to do this twice to catch any indirect
699                  * block maintenance that occurs.  Other conditions which
700                  * can keep setting ONFLUSH (such as deferrals) ought to
701                  * be handled by the flushq code.  XXX needs more help
702                  */
703                 hammer2_spin_ex(&chain->core.spin);
704                 RB_SCAN(hammer2_chain_tree, &chain->core.rbtree,
705                         NULL, hammer2_flush_recurse, info);
706                 if (chain->flags & HAMMER2_CHAIN_ONFLUSH) {
707                         atomic_clear_int(&chain->flags, HAMMER2_CHAIN_ONFLUSH);
708                         RB_SCAN(hammer2_chain_tree, &chain->core.rbtree,
709                                 NULL, hammer2_flush_recurse, info);
710                 }
711                 hammer2_spin_unex(&chain->core.spin);
712                 info->parent = parent;
713
714                 /*
715                  * Re-set the flush bits if the flush was incomplete or
716                  * an error occurred.  If an error occurs it is typically
717                  * an allocation error.  Errors do not cause deferrals.
718                  */
719                 if (info->error)
720                         hammer2_chain_setflush(chain);
721                 info->error |= save_error;
722                 if (info->diddeferral)
723                         hammer2_chain_setflush(chain);
724
725                 /*
726                  * If we lost the parent->chain association we have to
727                  * stop processing this chain because it is no longer
728                  * in this recursion.  If it moved, it will be handled
729                  * by the ONFLUSH flag elsewhere.
730                  */
731                 if (chain->parent != parent) {
732                         kprintf("LOST CHILD2 %p->%p (actual parent %p)\n",
733                                 parent, chain, chain->parent);
734                         goto done;
735                 }
736         }
737
738         /*
739          * Now we are in the bottom-up part of the recursion.
740          *
741          * Do not update chain if lower layers were deferred.  We continue
742          * to try to update the chain on lower-level errors, but the flush
743          * code may decide not to flush the volume root.
744          *
745          * XXX should we continue to try to update the chain if an error
746          *     occurred?
747          */
748         if (info->diddeferral)
749                 goto done;
750
751         /*
752          * Both parent and chain must be locked in order to flush chain,
753          * in order to properly update the parent under certain conditions.
754          *
755          * In addition, we can't safely unlock/relock the chain once we
756          * start flushing the chain itself, which we would have to do later
757          * on in order to lock the parent if we didn't do that now.
758          */
759         hammer2_chain_ref_hold(chain);
760         hammer2_chain_unlock(chain);
761         if (parent)
762                 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS);
763         hammer2_chain_lock(chain, HAMMER2_RESOLVE_MAYBE);
764         hammer2_chain_drop_unhold(chain);
765
766         /*
767          * Can't process if we can't access their content.
768          */
769         if ((parent && parent->error) || chain->error) {
770                 kprintf("hammer2: chain error during flush\n");
771                 info->error |= chain->error;
772                 if (parent) {
773                         info->error |= parent->error;
774                         hammer2_chain_unlock(parent);
775                 }
776                 goto done;
777         }
778
779         if (chain->parent != parent) {
780                 if (hammer2_debug & 0x0040) {
781                         kprintf("LOST CHILD3 %p->%p (actual parent %p)\n",
782                                 parent, chain, chain->parent);
783                 }
784                 KKASSERT(parent != NULL);
785                 hammer2_chain_unlock(parent);
786                 if ((chain->flags & HAMMER2_CHAIN_DELAYED) == 0) {
787                         hammer2_chain_ref(chain);
788                         TAILQ_INSERT_TAIL(&info->flushq, chain, flush_node);
789                         atomic_set_int(&chain->flags, HAMMER2_CHAIN_DEFERRED);
790                         ++info->diddeferral;
791                 }
792                 goto done;
793         }
794
795         /*
796          * Propagate the DESTROY flag downwards.  This dummies up the flush
797          * code and tries to invalidate related buffer cache buffers to
798          * avoid the disk write.
799          */
800         if (parent && (parent->flags & HAMMER2_CHAIN_DESTROY))
801                 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DESTROY);
802
803         /*
804          * Dispose of the modified bit.
805          *
806          * If parent is present, the UPDATE bit should already be set.
807          * UPDATE should already be set.
808          * bref.mirror_tid should already be set.
809          */
810         if (chain->flags & HAMMER2_CHAIN_MODIFIED) {
811                 KKASSERT((chain->flags & HAMMER2_CHAIN_UPDATE) ||
812                          chain->parent == NULL);
813                 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_MODIFIED);
814                 atomic_add_long(&hammer2_count_modified_chains, -1);
815
816                 /*
817                  * Manage threads waiting for excessive dirty memory to
818                  * be retired.
819                  */
820                 if (chain->pmp)
821                         hammer2_pfs_memory_wakeup(chain->pmp);
822
823 #if 0
824                 if ((chain->flags & HAMMER2_CHAIN_UPDATE) == 0 &&
825                     chain != &hmp->vchain &&
826                     chain != &hmp->fchain) {
827                         /*
828                          * Set UPDATE bit indicating that the parent block
829                          * table requires updating.
830                          */
831                         atomic_set_int(&chain->flags, HAMMER2_CHAIN_UPDATE);
832                 }
833 #endif
834
835                 /*
836                  * Issue the flush.  This is indirect via the DIO.
837                  *
838                  * NOTE: A DELETED node that reaches this point must be
839                  *       flushed for synchronization point consistency.
840                  *
841                  * NOTE: Even though MODIFIED was already set, the related DIO
842                  *       might not be dirty due to a system buffer cache
843                  *       flush and must be set dirty if we are going to make
844                  *       further modifications to the buffer.  Chains with
845                  *       embedded data don't need this.
846                  */
847                 if (hammer2_debug & 0x1000) {
848                         kprintf("Flush %p.%d %016jx/%d data=%016jx\n",
849                                 chain, chain->bref.type,
850                                 (uintmax_t)chain->bref.key,
851                                 chain->bref.keybits,
852                                 (uintmax_t)chain->bref.data_off);
853                 }
854                 if (hammer2_debug & 0x2000) {
855                         Debugger("Flush hell");
856                 }
857
858                 /*
859                  * Update chain CRCs for flush.
860                  *
861                  * NOTE: Volume headers are NOT flushed here as they require
862                  *       special processing.
863                  */
864                 switch(chain->bref.type) {
865                 case HAMMER2_BREF_TYPE_FREEMAP:
866                         /*
867                          * Update the volume header's freemap_tid to the
868                          * freemap's flushing mirror_tid.
869                          *
870                          * (note: embedded data, do not call setdirty)
871                          */
872                         KKASSERT(hmp->vchain.flags & HAMMER2_CHAIN_MODIFIED);
873                         KKASSERT(chain == &hmp->fchain);
874                         hmp->voldata.freemap_tid = chain->bref.mirror_tid;
875                         if (hammer2_debug & 0x8000) {
876                                 /* debug only, avoid syslogd loop */
877                                 kprintf("sync freemap mirror_tid %08jx\n",
878                                         (intmax_t)chain->bref.mirror_tid);
879                         }
880
881                         /*
882                          * The freemap can be flushed independently of the
883                          * main topology, but for the case where it is
884                          * flushed in the same transaction, and flushed
885                          * before vchain (a case we want to allow for
886                          * performance reasons), make sure modifications
887                          * made during the flush under vchain use a new
888                          * transaction id.
889                          *
890                          * Otherwise the mount recovery code will get confused.
891                          */
892                         ++hmp->voldata.mirror_tid;
893                         break;
894                 case HAMMER2_BREF_TYPE_VOLUME:
895                         /*
896                          * The free block table is flushed by
897                          * hammer2_vfs_sync() before it flushes vchain.
898                          * We must still hold fchain locked while copying
899                          * voldata to volsync, however.
900                          *
901                          * These do not error per-say since their data does
902                          * not need to be re-read from media on lock.
903                          *
904                          * (note: embedded data, do not call setdirty)
905                          */
906                         hammer2_chain_lock(&hmp->fchain,
907                                            HAMMER2_RESOLVE_ALWAYS);
908                         hammer2_voldata_lock(hmp);
909                         if (hammer2_debug & 0x8000) {
910                                 /* debug only, avoid syslogd loop */
911                                 kprintf("sync volume  mirror_tid %08jx\n",
912                                         (intmax_t)chain->bref.mirror_tid);
913                         }
914
915                         /*
916                          * Update the volume header's mirror_tid to the
917                          * main topology's flushing mirror_tid.  It is
918                          * possible that voldata.mirror_tid is already
919                          * beyond bref.mirror_tid due to the bump we made
920                          * above in BREF_TYPE_FREEMAP.
921                          */
922                         if (hmp->voldata.mirror_tid < chain->bref.mirror_tid) {
923                                 hmp->voldata.mirror_tid =
924                                         chain->bref.mirror_tid;
925                         }
926
927                         /*
928                          * The volume header is flushed manually by the
929                          * syncer, not here.  All we do here is adjust the
930                          * crc's.
931                          */
932                         KKASSERT(chain->data != NULL);
933                         KKASSERT(chain->dio == NULL);
934
935                         hmp->voldata.icrc_sects[HAMMER2_VOL_ICRC_SECT1]=
936                                 hammer2_icrc32(
937                                         (char *)&hmp->voldata +
938                                          HAMMER2_VOLUME_ICRC1_OFF,
939                                         HAMMER2_VOLUME_ICRC1_SIZE);
940                         hmp->voldata.icrc_sects[HAMMER2_VOL_ICRC_SECT0]=
941                                 hammer2_icrc32(
942                                         (char *)&hmp->voldata +
943                                          HAMMER2_VOLUME_ICRC0_OFF,
944                                         HAMMER2_VOLUME_ICRC0_SIZE);
945                         hmp->voldata.icrc_volheader =
946                                 hammer2_icrc32(
947                                         (char *)&hmp->voldata +
948                                          HAMMER2_VOLUME_ICRCVH_OFF,
949                                         HAMMER2_VOLUME_ICRCVH_SIZE);
950
951                         if (hammer2_debug & 0x8000) {
952                                 /* debug only, avoid syslogd loop */
953                                 kprintf("syncvolhdr %016jx %016jx\n",
954                                         hmp->voldata.mirror_tid,
955                                         hmp->vchain.bref.mirror_tid);
956                         }
957                         hmp->volsync = hmp->voldata;
958                         atomic_set_int(&chain->flags, HAMMER2_CHAIN_VOLUMESYNC);
959                         hammer2_voldata_unlock(hmp);
960                         hammer2_chain_unlock(&hmp->fchain);
961                         break;
962                 case HAMMER2_BREF_TYPE_DATA:
963                         /*
964                          * Data elements have already been flushed via the
965                          * logical file buffer cache.  Their hash was set in
966                          * the bref by the vop_write code.  Do not re-dirty.
967                          *
968                          * Make sure any device buffer(s) have been flushed
969                          * out here (there aren't usually any to flush) XXX.
970                          */
971                         break;
972                 case HAMMER2_BREF_TYPE_INDIRECT:
973                 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
974                 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
975                         /*
976                          * Buffer I/O will be cleaned up when the volume is
977                          * flushed (but the kernel is free to flush it before
978                          * then, as well).
979                          */
980                         KKASSERT((chain->flags & HAMMER2_CHAIN_EMBEDDED) == 0);
981                         hammer2_chain_setcheck(chain, chain->data);
982                         break;
983                 case HAMMER2_BREF_TYPE_DIRENT:
984                         /*
985                          * A directory entry can use the check area to store
986                          * the filename for filenames <= 64 bytes, don't blow
987                          * it up!
988                          */
989                         KKASSERT((chain->flags & HAMMER2_CHAIN_EMBEDDED) == 0);
990                         if (chain->bytes)
991                                 hammer2_chain_setcheck(chain, chain->data);
992                         break;
993                 case HAMMER2_BREF_TYPE_INODE:
994                         /*
995                          * NOTE: We must call io_setdirty() to make any late
996                          *       changes to the inode data, the system might
997                          *       have already flushed the buffer.
998                          */
999                         if (chain->data->ipdata.meta.op_flags &
1000                             HAMMER2_OPFLAG_PFSROOT) {
1001                                 /*
1002                                  * non-NULL pmp if mounted as a PFS.  We must
1003                                  * sync fields cached in the pmp? XXX
1004                                  */
1005                                 hammer2_inode_data_t *ipdata;
1006
1007                                 hammer2_io_setdirty(chain->dio);
1008                                 ipdata = &chain->data->ipdata;
1009                                 if (chain->pmp) {
1010                                         ipdata->meta.pfs_inum =
1011                                                 chain->pmp->inode_tid;
1012                                 }
1013                         } else {
1014                                 /* can't be mounted as a PFS */
1015                         }
1016
1017                         KKASSERT((chain->flags & HAMMER2_CHAIN_EMBEDDED) == 0);
1018                         hammer2_chain_setcheck(chain, chain->data);
1019
1020                                 hammer2_inode_data_t *ipdata;
1021                         ipdata = &chain->data->ipdata;
1022                         break;
1023                 default:
1024                         KKASSERT(chain->flags & HAMMER2_CHAIN_EMBEDDED);
1025                         panic("hammer2_flush_core: unsupported "
1026                               "embedded bref %d",
1027                               chain->bref.type);
1028                         /* NOT REACHED */
1029                 }
1030
1031                 /*
1032                  * If the chain was destroyed try to avoid unnecessary I/O
1033                  * that might not have yet occurred.  Remove the data range
1034                  * from dedup candidacy and attempt to invalidation that
1035                  * potentially dirty portion of the I/O buffer.
1036                  */
1037                 if (chain->flags & HAMMER2_CHAIN_DESTROY) {
1038                         hammer2_io_dedup_delete(hmp,
1039                                                 chain->bref.type,
1040                                                 chain->bref.data_off,
1041                                                 chain->bytes);
1042 #if 0
1043                         hammer2_io_t *dio;
1044                         if (chain->dio) {
1045                                 hammer2_io_inval(chain->dio,
1046                                                  chain->bref.data_off,
1047                                                  chain->bytes);
1048                         } else if ((dio = hammer2_io_getquick(hmp,
1049                                                   chain->bref.data_off,
1050                                                   chain->bytes,
1051                                                   1)) != NULL) {
1052                                 hammer2_io_inval(dio,
1053                                                  chain->bref.data_off,
1054                                                  chain->bytes);
1055                                 hammer2_io_putblk(&dio);
1056                         }
1057 #endif
1058                 }
1059         }
1060
1061         /*
1062          * If UPDATE is set the parent block table may need to be updated.
1063          * This can fail if the hammer2_chain_modify() fails.
1064          *
1065          * NOTE: UPDATE may be set on vchain or fchain in which case
1066          *       parent could be NULL.  It's easiest to allow the case
1067          *       and test for NULL.  parent can also wind up being NULL
1068          *       due to a deletion so we need to handle the case anyway.
1069          *
1070          * If no parent exists we can just clear the UPDATE bit.  If the
1071          * chain gets reattached later on the bit will simply get set
1072          * again.
1073          */
1074         if ((chain->flags & HAMMER2_CHAIN_UPDATE) && parent == NULL)
1075                 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_UPDATE);
1076
1077         /*
1078          * The chain may need its blockrefs updated in the parent.
1079          */
1080         if (chain->flags & HAMMER2_CHAIN_UPDATE) {
1081                 hammer2_blockref_t *base;
1082                 int count;
1083
1084                 /*
1085                  * Clear UPDATE flag, mark parent modified, update its
1086                  * modify_tid if necessary, and adjust the parent blockmap.
1087                  */
1088                 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_UPDATE);
1089
1090                 /*
1091                  * (optional code)
1092                  *
1093                  * Avoid actually modifying and updating the parent if it
1094                  * was flagged for destruction.  This can greatly reduce
1095                  * disk I/O in large tree removals because the
1096                  * hammer2_io_setinval() call in the upward recursion
1097                  * (see MODIFIED code above) can only handle a few cases.
1098                  */
1099                 if (parent->flags & HAMMER2_CHAIN_DESTROY) {
1100                         if (parent->bref.modify_tid < chain->bref.modify_tid) {
1101                                 parent->bref.modify_tid =
1102                                         chain->bref.modify_tid;
1103                         }
1104                         atomic_clear_int(&chain->flags, HAMMER2_CHAIN_BMAPPED |
1105                                                         HAMMER2_CHAIN_BMAPUPD);
1106                         goto skipupdate;
1107                 }
1108
1109                 /*
1110                  * The flusher is responsible for deleting empty indirect
1111                  * blocks at this point.  If we don't do this, no major harm
1112                  * will be done but the empty indirect blocks will stay in
1113                  * the topology and make it a messy and inefficient.
1114                  *
1115                  * The flusher is also responsible for collapsing the
1116                  * content of an indirect block into its parent whenever
1117                  * possible (with some hysteresis).  Not doing this will also
1118                  * not harm the topology, but would make it messy and
1119                  * inefficient.
1120                  */
1121                 if (chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT) {
1122                         if (hammer2_chain_indirect_maintenance(parent, chain))
1123                                 goto skipupdate;
1124                 }
1125
1126                 /*
1127                  * We are updating the parent's blockmap, the parent must
1128                  * be set modified.  If this fails we re-set the UPDATE flag
1129                  * in the child.
1130                  *
1131                  * NOTE! A modification error can be ENOSPC.  We still want
1132                  *       to flush modified chains recursively, not break out,
1133                  *       so we just skip the update in this situation and
1134                  *       continue.  That is, we still need to try to clean
1135                  *       out dirty chains and buffers.
1136                  *
1137                  *       This may not help bulkfree though. XXX
1138                  */
1139                 save_error = hammer2_chain_modify(parent, 0, 0, 0);
1140                 if (save_error) {
1141                         info->error |= save_error;
1142                         kprintf("hammer2_flush: %016jx.%02x error=%08x\n",
1143                                 parent->bref.data_off, parent->bref.type,
1144                                 save_error);
1145                         atomic_set_int(&chain->flags, HAMMER2_CHAIN_UPDATE);
1146                         goto skipupdate;
1147                 }
1148                 if (parent->bref.modify_tid < chain->bref.modify_tid)
1149                         parent->bref.modify_tid = chain->bref.modify_tid;
1150
1151                 /*
1152                  * Calculate blockmap pointer
1153                  */
1154                 switch(parent->bref.type) {
1155                 case HAMMER2_BREF_TYPE_INODE:
1156                         /*
1157                          * Access the inode's block array.  However, there is
1158                          * no block array if the inode is flagged DIRECTDATA.
1159                          */
1160                         if (parent->data &&
1161                             (parent->data->ipdata.meta.op_flags &
1162                              HAMMER2_OPFLAG_DIRECTDATA) == 0) {
1163                                 base = &parent->data->
1164                                         ipdata.u.blockset.blockref[0];
1165                         } else {
1166                                 base = NULL;
1167                         }
1168                         count = HAMMER2_SET_COUNT;
1169                         break;
1170                 case HAMMER2_BREF_TYPE_INDIRECT:
1171                 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
1172                         if (parent->data)
1173                                 base = &parent->data->npdata[0];
1174                         else
1175                                 base = NULL;
1176                         count = parent->bytes / sizeof(hammer2_blockref_t);
1177                         break;
1178                 case HAMMER2_BREF_TYPE_VOLUME:
1179                         base = &chain->hmp->voldata.sroot_blockset.blockref[0];
1180                         count = HAMMER2_SET_COUNT;
1181                         break;
1182                 case HAMMER2_BREF_TYPE_FREEMAP:
1183                         base = &parent->data->npdata[0];
1184                         count = HAMMER2_SET_COUNT;
1185                         break;
1186                 default:
1187                         base = NULL;
1188                         count = 0;
1189                         panic("hammer2_flush_core: "
1190                               "unrecognized blockref type: %d",
1191                               parent->bref.type);
1192                 }
1193
1194                 /*
1195                  * Blocktable updates
1196                  *
1197                  * We synchronize pending statistics at this time.  Delta
1198                  * adjustments designated for the current and upper level
1199                  * are synchronized.
1200                  */
1201                 if (base && (chain->flags & HAMMER2_CHAIN_BMAPUPD)) {
1202                         if (chain->flags & HAMMER2_CHAIN_BMAPPED) {
1203                                 hammer2_spin_ex(&parent->core.spin);
1204                                 hammer2_base_delete(parent, base, count, chain);
1205                                 hammer2_spin_unex(&parent->core.spin);
1206                                 /* base_delete clears both bits */
1207                         } else {
1208                                 atomic_clear_int(&chain->flags,
1209                                                  HAMMER2_CHAIN_BMAPUPD);
1210                         }
1211                 }
1212                 if (base && (chain->flags & HAMMER2_CHAIN_BMAPPED) == 0) {
1213                         hammer2_spin_ex(&parent->core.spin);
1214                         hammer2_base_insert(parent, base, count,
1215                                             chain, &chain->bref);
1216                         hammer2_spin_unex(&parent->core.spin);
1217                         /* base_insert sets BMAPPED */
1218                 }
1219         }
1220 skipupdate:
1221         if (parent)
1222                 hammer2_chain_unlock(parent);
1223
1224         /*
1225          * Final cleanup after flush
1226          */
1227 done:
1228         KKASSERT(chain->refs > 0);
1229         if (hammer2_debug & 0x200) {
1230                 if (info->debug == chain)
1231                         info->debug = NULL;
1232         }
1233 }
1234
1235 /*
1236  * Flush recursion helper, called from flush_core, calls flush_core.
1237  *
1238  * Flushes the children of the caller's chain (info->parent), restricted
1239  * by sync_tid.  Set info->domodify if the child's blockref must propagate
1240  * back up to the parent.
1241  *
1242  * This function may set info->error as a side effect.
1243  *
1244  * Ripouts can move child from rbtree to dbtree or dbq but the caller's
1245  * flush scan order prevents any chains from being lost.  A child can be
1246  * executes more than once.
1247  *
1248  * WARNING! If we do not call hammer2_flush_core() we must update
1249  *          bref.mirror_tid ourselves to indicate that the flush has
1250  *          processed the child.
1251  *
1252  * WARNING! parent->core spinlock is held on entry and return.
1253  */
1254 static int
1255 hammer2_flush_recurse(hammer2_chain_t *child, void *data)
1256 {
1257         hammer2_flush_info_t *info = data;
1258         hammer2_chain_t *parent = info->parent;
1259
1260 #ifdef HAMMER2_SCAN_DEBUG
1261         ++info->scan_count;
1262         if (child->flags & HAMMER2_CHAIN_MODIFIED)
1263                 ++info->scan_mod_count;
1264         if (child->flags & HAMMER2_CHAIN_UPDATE)
1265                 ++info->scan_upd_count;
1266         if (child->flags & HAMMER2_CHAIN_ONFLUSH)
1267                 ++info->scan_onf_count;
1268 #endif
1269
1270         /*
1271          * (child can never be fchain or vchain so a special check isn't
1272          *  needed).
1273          *
1274          * We must ref the child before unlocking the spinlock.
1275          *
1276          * The caller has added a ref to the parent so we can temporarily
1277          * unlock it in order to lock the child.  However, if it no longer
1278          * winds up being the child of the parent we must skip this child.
1279          *
1280          * NOTE! chain locking errors are fatal.  They are never out-of-space
1281          *       errors.
1282          */
1283         hammer2_chain_ref(child);
1284         hammer2_spin_unex(&parent->core.spin);
1285
1286         hammer2_chain_ref_hold(parent);
1287         hammer2_chain_unlock(parent);
1288         hammer2_chain_lock(child, HAMMER2_RESOLVE_MAYBE);
1289         if (child->parent != parent) {
1290                 kprintf("LOST CHILD1 %p->%p (actual parent %p)\n",
1291                         parent, child, child->parent);
1292                 goto done;
1293         }
1294         if (child->error) {
1295                 kprintf("CHILD ERROR DURING FLUSH LOCK %p->%p\n",
1296                         parent, child);
1297                 info->error |= child->error;
1298                 goto done;
1299         }
1300
1301         /*
1302          * Must propagate the DESTROY flag downwards, otherwise the
1303          * parent could end up never being removed because it will
1304          * be requeued to the flusher if it survives this run due to
1305          * the flag.
1306          */
1307         if (parent && (parent->flags & HAMMER2_CHAIN_DESTROY))
1308                 atomic_set_int(&child->flags, HAMMER2_CHAIN_DESTROY);
1309 #ifdef HAMMER2_SCAN_DEBUG
1310         if (child->flags & HAMMER2_CHAIN_DESTROY)
1311                 ++info->scan_del_count;
1312 #endif
1313
1314         /*
1315          * Recurse and collect deferral data.  We're in the media flush,
1316          * this can cross PFS boundaries.
1317          */
1318         if (child->flags & HAMMER2_CHAIN_FLUSH_MASK) {
1319 #ifdef HAMMER2_SCAN_DEBUG
1320                 if (child->bref.type < 7)
1321                         ++info->scan_btype[child->bref.type];
1322 #endif
1323                 ++info->depth;
1324                 hammer2_flush_core(info, child, info->flags);
1325                 --info->depth;
1326         } else if (hammer2_debug & 0x200) {
1327                 if (info->debug == NULL)
1328                         info->debug = child;
1329                 ++info->depth;
1330                 hammer2_flush_core(info, child, info->flags);
1331                 --info->depth;
1332                 if (info->debug == child)
1333                         info->debug = NULL;
1334         }
1335
1336 done:
1337         /*
1338          * Relock to continue the loop.
1339          */
1340         hammer2_chain_unlock(child);
1341         hammer2_chain_lock(parent, HAMMER2_RESOLVE_MAYBE);
1342         hammer2_chain_drop_unhold(parent);
1343         if (parent->error) {
1344                 kprintf("PARENT ERROR DURING FLUSH LOCK %p->%p\n",
1345                         parent, child);
1346                 info->error |= parent->error;
1347         }
1348         hammer2_chain_drop(child);
1349         KKASSERT(info->parent == parent);
1350         hammer2_spin_ex(&parent->core.spin);
1351
1352         return (0);
1353 }
1354
1355 /*
1356  * flush helper (backend threaded)
1357  *
1358  * Flushes chain topology for the specified inode.
1359  *
1360  * If HAMMER2_XOP_FLUSH is set we flush all chains from the current inode
1361  * through but stop at sub-inodes (we flush the inode chains for sub-inodes,
1362  * but do not go further as deeper modifications do not belong to the current
1363  * flush cycle).
1364  *
1365  * If HAMMER2_XOP_FLUSH is not set we flush the current inode's chains only
1366  * and do not recurse through sub-inodes, including not including those
1367  * sub-inodes.
1368  *
1369  * Remember that HAMMER2 is currently using a flat inode model, so directory
1370  * hierarchies do not translate to inode hierarchies.  PFS ROOTs, however,
1371  * do.
1372  *
1373  * chain->parent can be NULL, usually due to destroy races.
1374  *
1375  * Primarily called from vfs_sync().
1376  */
1377 void
1378 hammer2_xop_inode_flush(hammer2_xop_t *arg, void *scratch __unused, int clindex)
1379 {
1380         hammer2_xop_flush_t *xop = &arg->xop_flush;
1381         hammer2_chain_t *chain;
1382         hammer2_chain_t *parent;
1383         hammer2_dev_t *hmp;
1384         int flush_error = 0;
1385         int fsync_error = 0;
1386         int total_error = 0;
1387         int j;
1388         int xflags;
1389         int ispfsroot = 0;
1390
1391         xflags = HAMMER2_FLUSH_TOP;
1392         if (xop->head.flags & HAMMER2_XOP_INODE_STOP)
1393                 xflags |= HAMMER2_FLUSH_INODE_STOP;
1394
1395         /*
1396          * Flush core chains
1397          */
1398         chain = hammer2_inode_chain(xop->head.ip1, clindex,
1399                                     HAMMER2_RESOLVE_ALWAYS);
1400         if (chain) {
1401                 hmp = chain->hmp;
1402                 if ((chain->flags & HAMMER2_CHAIN_FLUSH_MASK) ||
1403                     TAILQ_FIRST(&hmp->flushq) != NULL) {
1404                         hammer2_flush(chain, xflags);
1405                         parent = chain->parent;
1406                         if (parent)
1407                                 hammer2_chain_setflush(parent);
1408                 }
1409                 if (chain->flags & HAMMER2_CHAIN_PFSBOUNDARY)
1410                         ispfsroot = 1;
1411                 hammer2_chain_unlock(chain);
1412                 hammer2_chain_drop(chain);
1413                 chain = NULL;
1414         } else {
1415                 hmp = NULL;
1416         }
1417
1418         /*
1419          * Only flush the volume header if asked to, plus the inode must also
1420          * be the PFS root.
1421          */
1422         if ((xop->head.flags & HAMMER2_XOP_VOLHDR) == 0)
1423                 goto skip;
1424         if (ispfsroot == 0)
1425                 goto skip;
1426
1427         /*
1428          * Flush volume roots.  Avoid replication, we only want to
1429          * flush each hammer2_dev (hmp) once.
1430          */
1431         for (j = clindex - 1; j >= 0; --j) {
1432                 if ((chain = xop->head.ip1->cluster.array[j].chain) != NULL) {
1433                         if (chain->hmp == hmp) {
1434                                 chain = NULL;   /* safety */
1435                                 goto skip;
1436                         }
1437                 }
1438         }
1439         chain = NULL;   /* safety */
1440
1441         /*
1442          * spmp transaction.  The super-root is never directly mounted so
1443          * there shouldn't be any vnodes, let alone any dirty vnodes
1444          * associated with it, so we shouldn't have to mess around with any
1445          * vnode flushes here.
1446          */
1447         hammer2_trans_init(hmp->spmp, HAMMER2_TRANS_ISFLUSH);
1448
1449         /*
1450          * Media mounts have two 'roots', vchain for the topology
1451          * and fchain for the free block table.  Flush both.
1452          *
1453          * Note that the topology and free block table are handled
1454          * independently, so the free block table can wind up being
1455          * ahead of the topology.  We depend on the bulk free scan
1456          * code to deal with any loose ends.
1457          *
1458          * vchain and fchain do not error on-lock since their data does
1459          * not have to be re-read from media.
1460          */
1461         hammer2_chain_ref(&hmp->vchain);
1462         hammer2_chain_lock(&hmp->vchain, HAMMER2_RESOLVE_ALWAYS);
1463         hammer2_chain_ref(&hmp->fchain);
1464         hammer2_chain_lock(&hmp->fchain, HAMMER2_RESOLVE_ALWAYS);
1465         if (hmp->fchain.flags & HAMMER2_CHAIN_FLUSH_MASK) {
1466                 /*
1467                  * This will also modify vchain as a side effect,
1468                  * mark vchain as modified now.
1469                  */
1470                 hammer2_voldata_modify(hmp);
1471                 chain = &hmp->fchain;
1472                 flush_error |= hammer2_flush(chain, HAMMER2_FLUSH_TOP);
1473                 KKASSERT(chain == &hmp->fchain);
1474         }
1475         hammer2_chain_unlock(&hmp->fchain);
1476         hammer2_chain_unlock(&hmp->vchain);
1477         hammer2_chain_drop(&hmp->fchain);
1478         /* vchain dropped down below */
1479
1480         hammer2_chain_lock(&hmp->vchain, HAMMER2_RESOLVE_ALWAYS);
1481         if (hmp->vchain.flags & HAMMER2_CHAIN_FLUSH_MASK) {
1482                 chain = &hmp->vchain;
1483                 flush_error |= hammer2_flush(chain, HAMMER2_FLUSH_TOP);
1484                 KKASSERT(chain == &hmp->vchain);
1485         }
1486         hammer2_chain_unlock(&hmp->vchain);
1487         hammer2_chain_drop(&hmp->vchain);
1488
1489         /*
1490          * We can't safely flush the volume header until we have
1491          * flushed any device buffers which have built up.
1492          *
1493          * XXX this isn't being incremental
1494          */
1495         vn_lock(hmp->devvp, LK_EXCLUSIVE | LK_RETRY);
1496         fsync_error = VOP_FSYNC(hmp->devvp, MNT_WAIT, 0);
1497         vn_unlock(hmp->devvp);
1498         if (fsync_error || flush_error) {
1499                 kprintf("hammer2: sync error fsync=%d h2flush=0x%04x dev=%s\n",
1500                         fsync_error, flush_error, hmp->devrepname);
1501         }
1502
1503         /*
1504          * The flush code sets CHAIN_VOLUMESYNC to indicate that the
1505          * volume header needs synchronization via hmp->volsync.
1506          *
1507          * XXX synchronize the flag & data with only this flush XXX
1508          */
1509         if (fsync_error == 0 && flush_error == 0 &&
1510             (hmp->vchain.flags & HAMMER2_CHAIN_VOLUMESYNC)) {
1511                 struct buf *bp;
1512                 int vol_error = 0;
1513
1514                 /*
1515                  * Synchronize the disk before flushing the volume
1516                  * header.
1517                  */
1518                 bp = getpbuf(NULL);
1519                 bp->b_bio1.bio_offset = 0;
1520                 bp->b_bufsize = 0;
1521                 bp->b_bcount = 0;
1522                 bp->b_cmd = BUF_CMD_FLUSH;
1523                 bp->b_bio1.bio_done = biodone_sync;
1524                 bp->b_bio1.bio_flags |= BIO_SYNC;
1525                 vn_strategy(hmp->devvp, &bp->b_bio1);
1526                 fsync_error = biowait(&bp->b_bio1, "h2vol");
1527                 relpbuf(bp, NULL);
1528
1529                 /*
1530                  * Then we can safely flush the version of the
1531                  * volume header synchronized by the flush code.
1532                  */
1533                 j = hmp->volhdrno + 1;
1534                 if (j < 0)
1535                         j = 0;
1536                 if (j >= HAMMER2_NUM_VOLHDRS)
1537                         j = 0;
1538                 if (j * HAMMER2_ZONE_BYTES64 + HAMMER2_SEGSIZE >
1539                     hmp->volsync.volu_size) {
1540                         j = 0;
1541                 }
1542                 if (hammer2_debug & 0x8000) {
1543                         /* debug only, avoid syslogd loop */
1544                         kprintf("sync volhdr %d %jd\n",
1545                                 j, (intmax_t)hmp->volsync.volu_size);
1546                 }
1547                 bp = getblk(hmp->devvp, j * HAMMER2_ZONE_BYTES64,
1548                             HAMMER2_PBUFSIZE, GETBLK_KVABIO, 0);
1549                 atomic_clear_int(&hmp->vchain.flags,
1550                                  HAMMER2_CHAIN_VOLUMESYNC);
1551                 bkvasync(bp);
1552                 bcopy(&hmp->volsync, bp->b_data, HAMMER2_PBUFSIZE);
1553                 vol_error = bwrite(bp);
1554                 hmp->volhdrno = j;
1555                 if (vol_error)
1556                         fsync_error = vol_error;
1557         }
1558         if (flush_error)
1559                 total_error = flush_error;
1560         if (fsync_error)
1561                 total_error = hammer2_errno_to_error(fsync_error);
1562
1563         hammer2_trans_done(hmp->spmp, 0);  /* spmp trans */
1564 skip:
1565         hammer2_xop_feed(&xop->head, NULL, clindex, total_error);
1566 }