HAMMER VFS - The backend flusher now sorts inodes
[dragonfly.git] / sys / vfs / hammer / hammer_flusher.c
index d2c698e..5991f3d 100644 (file)
@@ -31,7 +31,7 @@
  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  * 
- * $DragonFly: src/sys/vfs/hammer/hammer_flusher.c,v 1.8 2008/04/29 04:43:08 dillon Exp $
+ * $DragonFly: src/sys/vfs/hammer/hammer_flusher.c,v 1.45 2008/07/31 04:42:04 dillon Exp $
  */
 /*
  * HAMMER dependancy flusher thread
 
 #include "hammer.h"
 
-static void hammer_flusher_thread(void *arg);
-static void hammer_flusher_clean_loose_ios(hammer_mount_t hmp);
+static void hammer_flusher_master_thread(void *arg);
+static void hammer_flusher_slave_thread(void *arg);
 static void hammer_flusher_flush(hammer_mount_t hmp);
-static int hammer_must_finalize_undo(hammer_mount_t hmp);
-static void hammer_flusher_finalize(hammer_mount_t hmp,
-                   hammer_volume_t root_volume, hammer_off_t start_offset);
+static void hammer_flusher_flush_inode(hammer_inode_t ip,
+                                       hammer_transaction_t trans);
 
+RB_GENERATE(hammer_fls_rb_tree, hammer_inode, rb_flsnode,
+              hammer_ino_rb_compare);
+
+/*
+ * Inodes are sorted and assigned to slave threads in groups of 128.
+ * We want a flush group size large enough such that the slave threads
+ * are not likely to interfere with each other when accessing the B-Tree,
+ * but not so large that we lose concurrency.
+ */
+#define HAMMER_FLUSH_GROUP_SIZE 128
+
+/*
+ * Support structures for the flusher threads.
+ */
+struct hammer_flusher_info {
+       TAILQ_ENTRY(hammer_flusher_info) entry;
+       struct hammer_mount *hmp;
+       thread_t        td;
+       int             runstate;
+       int             count;
+       hammer_flush_group_t flg;
+       hammer_inode_t  work_array[HAMMER_FLUSH_GROUP_SIZE];
+};
+
+typedef struct hammer_flusher_info *hammer_flusher_info_t;
+
+/*
+ * Sync all inodes pending on the flusher.
+ *
+ * All flush groups will be flushed.  This does not queue dirty inodes
+ * to the flush groups, it just flushes out what has already been queued!
+ */
 void
 hammer_flusher_sync(hammer_mount_t hmp)
 {
        int seq;
 
-       if (hmp->flusher_td) {
-               seq = ++hmp->flusher_seq;
-               wakeup(&hmp->flusher_seq);
-               while ((int)(seq - hmp->flusher_act) > 0)
-                       tsleep(&hmp->flusher_act, 0, "hmrfls", 0);
+       seq = hammer_flusher_async(hmp, NULL);
+       hammer_flusher_wait(hmp, seq);
+}
+
+/*
+ * Sync all inodes pending on the flusher - return immediately.
+ *
+ * All flush groups will be flushed.
+ */
+int
+hammer_flusher_async(hammer_mount_t hmp, hammer_flush_group_t close_flg)
+{
+       hammer_flush_group_t flg;
+       int seq = hmp->flusher.next;
+
+       TAILQ_FOREACH(flg, &hmp->flush_group_list, flush_entry) {
+               if (flg->running == 0)
+                       ++seq;
+               flg->closed = 1;
+               if (flg == close_flg)
+                       break;
+       }
+       if (hmp->flusher.td) {
+               if (hmp->flusher.signal++ == 0)
+                       wakeup(&hmp->flusher.signal);
+       } else {
+               seq = hmp->flusher.done;
+       }
+       return(seq);
+}
+
+int
+hammer_flusher_async_one(hammer_mount_t hmp)
+{
+       int seq;
+
+       if (hmp->flusher.td) {
+               seq = hmp->flusher.next;
+               if (hmp->flusher.signal++ == 0)
+                       wakeup(&hmp->flusher.signal);
+       } else {
+               seq = hmp->flusher.done;
        }
+       return(seq);
 }
 
+/*
+ * Wait for the flusher to get to the specified sequence number.
+ * Signal the flusher as often as necessary to keep it going.
+ */
 void
-hammer_flusher_async(hammer_mount_t hmp)
+hammer_flusher_wait(hammer_mount_t hmp, int seq)
 {
-       if (hmp->flusher_td) {
-               ++hmp->flusher_seq;
-               wakeup(&hmp->flusher_seq);
+       while ((int)(seq - hmp->flusher.done) > 0) {
+               if (hmp->flusher.act != seq) {
+                       if (hmp->flusher.signal++ == 0)
+                               wakeup(&hmp->flusher.signal);
+               }
+               tsleep(&hmp->flusher.done, 0, "hmrfls", 0);
        }
 }
 
+void
+hammer_flusher_wait_next(hammer_mount_t hmp)
+{
+       int seq;
+
+       seq = hammer_flusher_async_one(hmp);
+       hammer_flusher_wait(hmp, seq);
+}
+
 void
 hammer_flusher_create(hammer_mount_t hmp)
 {
-       lwkt_create(hammer_flusher_thread, hmp, &hmp->flusher_td, NULL,
-                   0, -1, "hammer");
+       hammer_flusher_info_t info;
+       int i;
+
+       hmp->flusher.signal = 0;
+       hmp->flusher.act = 0;
+       hmp->flusher.done = 0;
+       hmp->flusher.next = 1;
+       hammer_ref(&hmp->flusher.finalize_lock);
+       TAILQ_INIT(&hmp->flusher.run_list);
+       TAILQ_INIT(&hmp->flusher.ready_list);
+
+       lwkt_create(hammer_flusher_master_thread, hmp,
+                   &hmp->flusher.td, NULL, 0, -1, "hammer-M");
+       for (i = 0; i < HAMMER_MAX_FLUSHERS; ++i) {
+               info = kmalloc(sizeof(*info), hmp->m_misc, M_WAITOK|M_ZERO);
+               info->hmp = hmp;
+               TAILQ_INSERT_TAIL(&hmp->flusher.ready_list, info, entry);
+               lwkt_create(hammer_flusher_slave_thread, info,
+                           &info->td, NULL, 0, -1, "hammer-S%d", i);
+       }
 }
 
 void
 hammer_flusher_destroy(hammer_mount_t hmp)
 {
-       if (hmp->flusher_td) {
-               hmp->flusher_exiting = 1;
-               ++hmp->flusher_seq;
-               wakeup(&hmp->flusher_seq);
-               while (hmp->flusher_td)
-                       tsleep(&hmp->flusher_exiting, 0, "hmrwex", 0);
+       hammer_flusher_info_t info;
+
+       /*
+        * Kill the master
+        */
+       hmp->flusher.exiting = 1;
+       while (hmp->flusher.td) {
+               ++hmp->flusher.signal;
+               wakeup(&hmp->flusher.signal);
+               tsleep(&hmp->flusher.exiting, 0, "hmrwex", hz);
+       }
+
+       /*
+        * Kill the slaves
+        */
+       while ((info = TAILQ_FIRST(&hmp->flusher.ready_list)) != NULL) {
+               KKASSERT(info->runstate == 0);
+               TAILQ_REMOVE(&hmp->flusher.ready_list, info, entry);
+               info->runstate = -1;
+               wakeup(&info->runstate);
+               while (info->td)
+                       tsleep(&info->td, 0, "hmrwwc", 0);
+               kfree(info, hmp->m_misc);
        }
 }
 
+/*
+ * The master flusher thread manages the flusher sequence id and
+ * synchronization with the slave work threads.
+ */
 static void
-hammer_flusher_thread(void *arg)
+hammer_flusher_master_thread(void *arg)
 {
-       hammer_mount_t hmp = arg;
-       int seq;
+       hammer_flush_group_t flg;
+       hammer_mount_t hmp;
 
-       hmp->flusher_demark = kmalloc(sizeof(struct hammer_inode),
-                                     M_HAMMER, M_WAITOK | M_ZERO);
-       TAILQ_INSERT_TAIL(&hmp->flush_list, hmp->flusher_demark, flush_entry);
+       hmp = arg;
 
        for (;;) {
-               seq = hmp->flusher_seq;
-               hammer_flusher_clean_loose_ios(hmp);
-               hammer_flusher_flush(hmp);
-               hammer_flusher_clean_loose_ios(hmp);
-               hmp->flusher_act = seq;
-               wakeup(&hmp->flusher_act);
-
                /*
-                * Loop if more got queued after our demark.
+                * Do at least one flush cycle.  We may have to update the
+                * UNDO FIFO even if no inodes are queued.
                 */
-               if (TAILQ_NEXT(hmp->flusher_demark, flush_entry))
-                       continue;
+               for (;;) {
+                       while (hmp->flusher.group_lock)
+                               tsleep(&hmp->flusher.group_lock, 0, "hmrhld", 0);
+                       hmp->flusher.act = hmp->flusher.next;
+                       ++hmp->flusher.next;
+                       hammer_flusher_clean_loose_ios(hmp);
+                       hammer_flusher_flush(hmp);
+                       hmp->flusher.done = hmp->flusher.act;
+                       wakeup(&hmp->flusher.done);
+                       flg = TAILQ_FIRST(&hmp->flush_group_list);
+                       if (flg == NULL || flg->closed == 0)
+                               break;
+                       if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
+                               break;
+               }
 
-               if (hmp->flusher_exiting)
+               /*
+                * Wait for activity.
+                */
+               if (hmp->flusher.exiting && TAILQ_EMPTY(&hmp->flush_group_list))
                        break;
-               while (hmp->flusher_seq == hmp->flusher_act)
-                       tsleep(&hmp->flusher_seq, 0, "hmrwwa", 0);
+               while (hmp->flusher.signal == 0)
+                       tsleep(&hmp->flusher.signal, 0, "hmrwwa", 0);
+
+               /*
+                * Flush for each count on signal but only allow one extra
+                * flush request to build up.
+                */
+               if (--hmp->flusher.signal != 0)
+                       hmp->flusher.signal = 1;
        }
-       TAILQ_REMOVE(&hmp->flush_list, hmp->flusher_demark, flush_entry);
-       kfree(hmp->flusher_demark, M_HAMMER);
-       hmp->flusher_demark = NULL;
-       hmp->flusher_td = NULL;
-       wakeup(&hmp->flusher_exiting);
+
+       /*
+        * And we are done.
+        */
+       hmp->flusher.td = NULL;
+       wakeup(&hmp->flusher.exiting);
        lwkt_exit();
 }
 
+/*
+ * Flush all inodes in the current flush group.
+ */
 static void
-hammer_flusher_clean_loose_ios(hammer_mount_t hmp)
+hammer_flusher_flush(hammer_mount_t hmp)
 {
-       hammer_buffer_t buffer;
-       hammer_io_t io;
+       hammer_flusher_info_t info;
+       hammer_flush_group_t flg;
+       hammer_reserve_t resv;
+       hammer_inode_t ip;
+       hammer_inode_t next_ip;
+       int slave_index;
+       int count;
 
        /*
-        * loose ends - buffers without bp's aren't tracked by the kernel
-        * and can build up, so clean them out.  This can occur when an
-        * IO completes on a buffer with no references left.
+        * Just in-case there's a flush race on mount
         */
-       while ((io = TAILQ_FIRST(&hmp->lose_list)) != NULL) {
-               KKASSERT(io->mod_list == &hmp->lose_list);
-               TAILQ_REMOVE(io->mod_list, io, mod_entry);
-               io->mod_list = NULL;
-               hammer_ref(&io->lock);
-               buffer = (void *)io;
-               hammer_rel_buffer(buffer, 0);
+       if (TAILQ_FIRST(&hmp->flusher.ready_list) == NULL)
+               return;
+
+       /*
+        * We only do one flg but we may have to loop/retry.
+        */
+       count = 0;
+       while ((flg = TAILQ_FIRST(&hmp->flush_group_list)) != NULL) {
+               ++count;
+               if (hammer_debug_general & 0x0001) {
+                       kprintf("hammer_flush %d ttl=%d recs=%d\n",
+                               hmp->flusher.act,
+                               flg->total_count, flg->refs);
+               }
+               if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
+                       break;
+               hammer_start_transaction_fls(&hmp->flusher.trans, hmp);
+
+               /*
+                * If the previous flush cycle just about exhausted our
+                * UNDO space we may have to do a dummy cycle to move the
+                * first_offset up before actually digging into a new cycle,
+                * or the new cycle will not have sufficient undo space.
+                */
+               if (hammer_flusher_undo_exhausted(&hmp->flusher.trans, 3))
+                       hammer_flusher_finalize(&hmp->flusher.trans, 0);
+
+               /*
+                * Ok, we are running this flush group now (this prevents new
+                * additions to it).
+                */
+               flg->running = 1;
+               if (hmp->next_flush_group == flg)
+                       hmp->next_flush_group = TAILQ_NEXT(flg, flush_entry);
+
+               /*
+                * Iterate the inodes in the flg's flush_tree and assign
+                * them to slaves.
+                */
+               slave_index = 0;
+               info = TAILQ_FIRST(&hmp->flusher.ready_list);
+               next_ip = RB_FIRST(hammer_fls_rb_tree, &flg->flush_tree);
+
+               while ((ip = next_ip) != NULL) {
+                       next_ip = RB_NEXT(hammer_fls_rb_tree,
+                                         &flg->flush_tree, ip);
+
+                       if (++hmp->check_yield > hammer_yield_check) {
+                               hmp->check_yield = 0;
+                               lwkt_user_yield();
+                       }
+
+                       /*
+                        * Add ip to the slave's work array.  The slave is
+                        * not currently running.
+                        */
+                       info->work_array[info->count++] = ip;
+                       if (info->count != HAMMER_FLUSH_GROUP_SIZE)
+                               continue;
+
+                       /*
+                        * Get the slave running
+                        */
+                       TAILQ_REMOVE(&hmp->flusher.ready_list, info, entry);
+                       TAILQ_INSERT_TAIL(&hmp->flusher.run_list, info, entry);
+                       info->flg = flg;
+                       info->runstate = 1;
+                       wakeup(&info->runstate);
+
+                       /*
+                        * Get a new slave.  We may have to wait for one to
+                        * finish running.
+                        */
+                       while ((info = TAILQ_FIRST(&hmp->flusher.ready_list)) == NULL) {
+                               tsleep(&hmp->flusher.ready_list, 0, "hmrfcc", 0);
+                       }
+               }
+
+               /*
+                * Run the current slave if necessary
+                */
+               if (info->count) {
+                       TAILQ_REMOVE(&hmp->flusher.ready_list, info, entry);
+                       TAILQ_INSERT_TAIL(&hmp->flusher.run_list, info, entry);
+                       info->flg = flg;
+                       info->runstate = 1;
+                       wakeup(&info->runstate);
+               }
+
+               /*
+                * Wait for all slaves to finish running
+                */
+               while (TAILQ_FIRST(&hmp->flusher.run_list) != NULL)
+                       tsleep(&hmp->flusher.ready_list, 0, "hmrfcc", 0);
+
+               /*
+                * Do the final finalization, clean up
+                */
+               hammer_flusher_finalize(&hmp->flusher.trans, 1);
+               hmp->flusher.tid = hmp->flusher.trans.tid;
+
+               hammer_done_transaction(&hmp->flusher.trans);
+
+               /*
+                * Loop up on the same flg.  If the flg is done clean it up
+                * and break out.  We only flush one flg.
+                */
+               if (RB_EMPTY(&flg->flush_tree)) {
+                       KKASSERT(flg->refs == 0);
+                       TAILQ_REMOVE(&hmp->flush_group_list, flg, flush_entry);
+                       kfree(flg, hmp->m_misc);
+                       break;
+               }
+       }
+
+       /*
+        * We may have pure meta-data to flush, or we may have to finish
+        * cycling the UNDO FIFO, even if there were no flush groups.
+        */
+       if (count == 0 && hammer_flusher_haswork(hmp)) {
+               hammer_start_transaction_fls(&hmp->flusher.trans, hmp);
+               hammer_flusher_finalize(&hmp->flusher.trans, 1);
+               hammer_done_transaction(&hmp->flusher.trans);
+       }
+
+       /*
+        * Clean up any freed big-blocks (typically zone-2). 
+        * resv->flush_group is typically set several flush groups ahead
+        * of the free to ensure that the freed block is not reused until
+        * it can no longer be reused.
+        */
+       while ((resv = TAILQ_FIRST(&hmp->delay_list)) != NULL) {
+               if (resv->flush_group != hmp->flusher.act)
+                       break;
+               hammer_reserve_clrdelay(hmp, resv);
        }
 }
 
+
 /*
- * Flush stuff
+ * The slave flusher thread pulls work off the master flush list until no
+ * work is left.
  */
 static void
-hammer_flusher_flush(hammer_mount_t hmp)
+hammer_flusher_slave_thread(void *arg)
 {
-       hammer_volume_t root_volume;
-       hammer_blockmap_t rootmap;
+       hammer_flush_group_t flg;
+       hammer_flusher_info_t info;
+       hammer_mount_t hmp;
        hammer_inode_t ip;
-       hammer_off_t start_offset;
-       int error;
+       int i;
 
-       root_volume = hammer_get_root_volume(hmp, &error);
-       rootmap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
-       start_offset = rootmap->next_offset;
+       info = arg;
+       hmp = info->hmp;
 
-       if (hammer_debug_general & 0x00010000)
-               kprintf("x");
+       for (;;) {
+               while (info->runstate == 0)
+                       tsleep(&info->runstate, 0, "hmrssw", 0);
+               if (info->runstate < 0)
+                       break;
+               flg = info->flg;
 
-       TAILQ_REMOVE(&hmp->flush_list, hmp->flusher_demark, flush_entry);
-       TAILQ_INSERT_TAIL(&hmp->flush_list, hmp->flusher_demark, flush_entry);
+               for (i = 0; i < info->count; ++i) {
+                       ip = info->work_array[i];
+                       hammer_flusher_flush_inode(ip, &hmp->flusher.trans);
+                       ++hammer_stats_inode_flushes;
+               }
+               info->count = 0;
+               info->runstate = 0;
+               TAILQ_REMOVE(&hmp->flusher.run_list, info, entry);
+               TAILQ_INSERT_TAIL(&hmp->flusher.ready_list, info, entry);
+               wakeup(&hmp->flusher.ready_list);
+       }
+       info->td = NULL;
+       wakeup(&info->td);
+       lwkt_exit();
+}
 
-       while ((ip = TAILQ_FIRST(&hmp->flush_list)) != hmp->flusher_demark) {
-               TAILQ_REMOVE(&hmp->flush_list, ip, flush_entry);
+void
+hammer_flusher_clean_loose_ios(hammer_mount_t hmp)
+{
+       hammer_buffer_t buffer;
+       hammer_io_t io;
 
-               /*
-                * We inherit the inode ref from the flush list
-                */
-               ip->error = hammer_sync_inode(ip, (ip->vp ? 0 : 1));
-               hammer_flush_inode_done(ip);
-               if (hmp->locked_dirty_count > 64 ||
-                   hammer_must_finalize_undo(hmp)) {
-                       hammer_flusher_finalize(hmp, root_volume, start_offset);
-                       start_offset = rootmap->next_offset;
+       /*
+        * loose ends - buffers without bp's aren't tracked by the kernel
+        * and can build up, so clean them out.  This can occur when an
+        * IO completes on a buffer with no references left.
+        */
+       if ((io = TAILQ_FIRST(&hmp->lose_list)) != NULL) {
+               crit_enter();   /* biodone() race */
+               while ((io = TAILQ_FIRST(&hmp->lose_list)) != NULL) {
+                       KKASSERT(io->mod_list == &hmp->lose_list);
+                       TAILQ_REMOVE(&hmp->lose_list, io, mod_entry);
+                       io->mod_list = NULL;
+                       if (io->lock.refs == 0)
+                               ++hammer_count_refedbufs;
+                       hammer_ref(&io->lock);
+                       buffer = (void *)io;
+                       hammer_rel_buffer(buffer, 0);
                }
+               crit_exit();
        }
-       hammer_flusher_finalize(hmp, root_volume, start_offset);
-       hammer_rel_volume(root_volume, 0);
 }
 
 /*
- * If the UNDO area gets over half full we have to flush it.  We can't
- * afford the UNDO area becoming completely full as that would break
- * the crash recovery atomicy.
+ * Flush a single inode that is part of a flush group.
+ *
+ * Flusher errors are extremely serious, even ENOSPC shouldn't occur because
+ * the front-end should have reserved sufficient space on the media.  Any
+ * error other then EWOULDBLOCK will force the mount to be read-only.
  */
 static
-int
-hammer_must_finalize_undo(hammer_mount_t hmp)
+void
+hammer_flusher_flush_inode(hammer_inode_t ip, hammer_transaction_t trans)
 {
-       hammer_blockmap_t rootmap;
-       int bytes;
-       int max_bytes;
+       hammer_mount_t hmp = ip->hmp;
+       int error;
+
+       hammer_flusher_clean_loose_ios(hmp);
+       error = hammer_sync_inode(trans, ip);
 
-       rootmap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
+       /*
+        * EWOULDBLOCK can happen under normal operation, all other errors
+        * are considered extremely serious.  We must set WOULDBLOCK
+        * mechanics to deal with the mess left over from the abort of the
+        * previous flush.
+        */
+       if (error) {
+               ip->flags |= HAMMER_INODE_WOULDBLOCK;
+               if (error == EWOULDBLOCK)
+                       error = 0;
+       }
+       hammer_flush_inode_done(ip, error);
+       while (hmp->flusher.finalize_want)
+               tsleep(&hmp->flusher.finalize_want, 0, "hmrsxx", 0);
+       if (hammer_flusher_undo_exhausted(trans, 1)) {
+               kprintf("HAMMER: Warning: UNDO area too small!\n");
+               hammer_flusher_finalize(trans, 1);
+       } else if (hammer_flusher_meta_limit(trans->hmp)) {
+               hammer_flusher_finalize(trans, 0);
+       }
+}
 
-       if (rootmap->first_offset <= rootmap->next_offset) {
-               bytes = (int)(rootmap->next_offset - rootmap->first_offset);
+/*
+ * Return non-zero if the UNDO area has less then (QUARTER / 4) of its
+ * space left.
+ *
+ * 1/4 - Emergency free undo space level.  Below this point the flusher
+ *      will finalize even if directory dependancies have not been resolved.
+ *
+ * 2/4 - Used by the pruning and reblocking code.  These functions may be
+ *      running in parallel with a flush and cannot be allowed to drop
+ *      available undo space to emergency levels.
+ *
+ * 3/4 - Used at the beginning of a flush to force-sync the volume header
+ *      to give the flush plenty of runway to work in.
+ */
+int
+hammer_flusher_undo_exhausted(hammer_transaction_t trans, int quarter)
+{
+       if (hammer_undo_space(trans) <
+           hammer_undo_max(trans->hmp) * quarter / 4) {
+               return(1);
        } else {
-               bytes = (int)(rootmap->alloc_offset - rootmap->first_offset +
-                             rootmap->next_offset);
+               return(0);
        }
-       max_bytes = (int)(rootmap->alloc_offset & HAMMER_OFF_SHORT_MASK);
-       if (bytes > max_bytes / 2)
-               kprintf("*");
-       return (bytes > max_bytes / 2);
 }
 
 /*
- * To finalize the flush we finish flushing all undo and data buffers
- * still present, then we update the volume header and flush it,
- * then we flush out the mata-data (that can now be undone).
+ * Flush all pending UNDOs, wait for write completion, update the volume
+ * header with the new UNDO end position, and flush it.  Then
+ * asynchronously flush the meta-data.
  *
- * Note that as long as the undo fifo's start and end points do not
- * match, we always must at least update the volume header.
+ * If this is the last finalization in a flush group we also synchronize
+ * our cached blockmap and set hmp->flusher_undo_start and our cached undo
+ * fifo first_offset so the next flush resets the FIFO pointers.
  *
- * The sync_lock is used by other threads to issue modifying operations
- * to HAMMER media without crossing a synchronization boundary or messing
- * up the media synchronization operation.  Specifically, the pruning
- * the reblocking ioctls, and allowing the frontend strategy code to
- * allocate media data space.
+ * If this is not final it is being called because too many dirty meta-data
+ * buffers have built up and must be flushed with UNDO synchronization to
+ * avoid a buffer cache deadlock.
  */
-static
 void
-hammer_flusher_finalize(hammer_mount_t hmp, hammer_volume_t root_volume,
-                       hammer_off_t start_offset)
+hammer_flusher_finalize(hammer_transaction_t trans, int final)
 {
-       hammer_blockmap_t rootmap;
+       hammer_volume_t root_volume;
+       hammer_blockmap_t cundomap, dundomap;
+       hammer_mount_t hmp;
        hammer_io_t io;
+       int count;
+       int i;
 
-       hammer_lock_ex(&hmp->sync_lock);
+       hmp = trans->hmp;
+       root_volume = trans->rootvol;
 
        /*
-        * Flush undo bufs
+        * Exclusively lock the flusher.  This guarantees that all dirty
+        * buffers will be idled (have a mod-count of 0).
         */
-       while ((io = TAILQ_FIRST(&hmp->undo_list)) != NULL) {
-               KKASSERT(io->modify_refs == 0);
+       ++hmp->flusher.finalize_want;
+       hammer_lock_ex(&hmp->flusher.finalize_lock);
+
+       /*
+        * If this isn't the final sync several threads may have hit the
+        * meta-limit at the same time and raced.  Only sync if we really
+        * have to, after acquiring the lock.
+        */
+       if (final == 0 && !hammer_flusher_meta_limit(hmp))
+               goto done;
+
+       if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
+               goto done;
+
+       /*
+        * Flush data buffers.  This can occur asynchronously and at any
+        * time.  We must interlock against the frontend direct-data write
+        * but do not have to acquire the sync-lock yet.
+        */
+       count = 0;
+       while ((io = TAILQ_FIRST(&hmp->data_list)) != NULL) {
+               if (io->ioerror)
+                       break;
+               if (io->lock.refs == 0)
+                       ++hammer_count_refedbufs;
                hammer_ref(&io->lock);
+               hammer_io_write_interlock(io);
                KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
-               hammer_io_flush(io);
-               hammer_rel_buffer((hammer_buffer_t)io, 1);
+               hammer_io_flush(io, 0);
+               hammer_io_done_interlock(io);
+               hammer_rel_buffer((hammer_buffer_t)io, 0);
+               ++count;
        }
 
        /*
-        * Flush data bufs
+        * The sync-lock is required for the remaining sequence.  This lock
+        * prevents meta-data from being modified.
         */
-       while ((io = TAILQ_FIRST(&hmp->data_list)) != NULL) {
+       hammer_sync_lock_ex(trans);
+
+       /*
+        * If we have been asked to finalize the volume header sync the
+        * cached blockmap to the on-disk blockmap.  Generate an UNDO
+        * record for the update.
+        */
+       if (final) {
+               cundomap = &hmp->blockmap[0];
+               dundomap = &root_volume->ondisk->vol0_blockmap[0];
+               if (root_volume->io.modified) {
+                       hammer_modify_volume(trans, root_volume,
+                                            dundomap, sizeof(hmp->blockmap));
+                       for (i = 0; i < HAMMER_MAX_ZONES; ++i)
+                               hammer_crc_set_blockmap(&cundomap[i]);
+                       bcopy(cundomap, dundomap, sizeof(hmp->blockmap));
+                       hammer_modify_volume_done(root_volume);
+               }
+       }
+
+       /*
+        * Flush UNDOs
+        */
+       count = 0;
+       while ((io = TAILQ_FIRST(&hmp->undo_list)) != NULL) {
+               if (io->ioerror)
+                       break;
                KKASSERT(io->modify_refs == 0);
+               if (io->lock.refs == 0)
+                       ++hammer_count_refedbufs;
                hammer_ref(&io->lock);
                KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
-               hammer_io_flush(io);
-               hammer_rel_buffer((hammer_buffer_t)io, 1);
+               hammer_io_flush(io, hammer_undo_reclaim(io));
+               hammer_rel_buffer((hammer_buffer_t)io, 0);
+               ++count;
        }
 
        /*
-        * Wait for I/O to complete
+        * Wait for I/Os to complete and flush the cache on the target disk.
         */
-       crit_enter();
-       while (hmp->io_running_count) {
-               kprintf("W[%d]", hmp->io_running_count);
-               tsleep(&hmp->io_running_count, 0, "hmrfl1", 0);
-       }
-       crit_exit();
+       hammer_flusher_clean_loose_ios(hmp);
+       hammer_io_wait_all(hmp, "hmrfl1");
+
+       if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
+               goto failed;
 
        /*
-        * Update the volume header
+        * HAMMER VERSION < 4:
+        *      Update the on-disk volume header with new UNDO FIFO end
+        *      position (do not generate new UNDO records for this change).
+        *      We have to do this for the UNDO FIFO whether (final) is
+        *      set or not in order for the UNDOs to be recognized on
+        *      recovery.
+        *
+        * HAMMER VERSION >= 4:
+        *      The UNDO FIFO data written above will be recognized on
+        *      recovery without us having to sync the volume header.
+        *
+        * Also update the on-disk next_tid field.  This does not require
+        * an UNDO.  However, because our TID is generated before we get
+        * the sync lock another sync may have beat us to the punch.
+        *
+        * This also has the side effect of updating first_offset based on
+        * a prior finalization when the first finalization of the next flush
+        * cycle occurs, removing any undo info from the prior finalization
+        * from consideration.
+        *
+        * The volume header will be flushed out synchronously.
         */
-       rootmap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
-       if (rootmap->first_offset != start_offset) {
-               hammer_modify_volume(NULL, root_volume, NULL, 0);
-               rootmap->first_offset = start_offset;
-               hammer_modify_volume_done(root_volume);
-       }
-       if (root_volume->ondisk->vol0_next_tid != hmp->next_tid) {
+       dundomap = &root_volume->ondisk->vol0_blockmap[HAMMER_ZONE_UNDO_INDEX];
+       cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
+
+       if (dundomap->first_offset != cundomap->first_offset ||
+                  dundomap->next_offset != cundomap->next_offset) {
                hammer_modify_volume(NULL, root_volume, NULL, 0);
-               root_volume->ondisk->vol0_next_tid = hmp->next_tid;
+               dundomap->first_offset = cundomap->first_offset;
+               dundomap->next_offset = cundomap->next_offset;
+               hammer_crc_set_blockmap(dundomap);
                hammer_modify_volume_done(root_volume);
        }
 
        /*
-        * Sync our cached blockmap array with the one in the root
-        * volume header.
+        * vol0_next_tid is used for TID selection and is updated without
+        * an UNDO so we do not reuse a TID that may have been rolled-back.
+        *
+        * vol0_last_tid is the highest fully-synchronized TID.  It is
+        * set-up when the UNDO fifo is fully synced, later on (not here).
         */
        if (root_volume->io.modified) {
-               bcopy(hmp->blockmap, root_volume->ondisk->vol0_blockmap,
-                     sizeof(hmp->blockmap));
-               hammer_io_flush(&root_volume->io);
+               hammer_modify_volume(NULL, root_volume, NULL, 0);
+               if (root_volume->ondisk->vol0_next_tid < trans->tid)
+                       root_volume->ondisk->vol0_next_tid = trans->tid;
+               hammer_crc_set_volume(root_volume->ondisk);
+               hammer_modify_volume_done(root_volume);
+               hammer_io_flush(&root_volume->io, 0);
        }
 
        /*
-        * Wait for I/O to complete
+        * Wait for I/Os to complete.
+        *
+        * For HAMMER VERSION 4+ filesystems we do not have to wait for
+        * the I/O to complete as the new UNDO FIFO entries are recognized
+        * even without the volume header update.  This allows the volume
+        * header to flushed along with meta-data, significantly reducing
+        * flush overheads.
         */
-       crit_enter();
-       while (hmp->io_running_count) {
-               tsleep(&hmp->io_running_count, 0, "hmrfl2", 0);
-       }
-       crit_exit();
+       hammer_flusher_clean_loose_ios(hmp);
+       if (hmp->version < HAMMER_VOL_VERSION_FOUR)
+               hammer_io_wait_all(hmp, "hmrfl2");
+
+       if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
+               goto failed;
 
        /*
-        * Flush meta-data
+        * Flush meta-data.  The meta-data will be undone if we crash
+        * so we can safely flush it asynchronously.  There is no need
+        * to wait for I/O to complete (or issue a synchronous disk flush).
+        *
+        * In fact, even if we did wait the meta-data will still be undone
+        * by a crash up until the next flush cycle due to the first_offset
+        * in the volume header for the UNDO FIFO not being adjusted until
+        * the following flush cycle.
         */
+       count = 0;
        while ((io = TAILQ_FIRST(&hmp->meta_list)) != NULL) {
+               if (io->ioerror)
+                       break;
                KKASSERT(io->modify_refs == 0);
+               if (io->lock.refs == 0)
+                       ++hammer_count_refedbufs;
                hammer_ref(&io->lock);
                KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
-               hammer_io_flush(io);
-               hammer_rel_buffer((hammer_buffer_t)io, 1);
+               hammer_io_flush(io, 0);
+               hammer_rel_buffer((hammer_buffer_t)io, 0);
+               ++count;
+       }
+
+       /*
+        * If this is the final finalization for the flush group set
+        * up for the next sequence by setting a new first_offset in
+        * our cached blockmap and clearing the undo history.
+        *
+        * Even though we have updated our cached first_offset, the on-disk
+        * first_offset still governs available-undo-space calculations.
+        */
+       if (final) {
+               cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
+               if (cundomap->first_offset == cundomap->next_offset) {
+                       hmp->hflags &= ~HMNT_UNDO_DIRTY;
+               } else {
+                       cundomap->first_offset = cundomap->next_offset;
+                       hmp->hflags |= HMNT_UNDO_DIRTY;
+               }
+               hammer_clear_undo_history(hmp);
+
+               /*
+                * Flush tid sequencing.  flush_tid1 is fully synchronized,
+                * meaning a crash will not roll it back.  flush_tid2 has
+                * been written out asynchronously and a crash will roll
+                * it back.  flush_tid1 is used for all mirroring masters.
+                */
+               if (hmp->flush_tid1 != hmp->flush_tid2) {
+                       hmp->flush_tid1 = hmp->flush_tid2;
+                       wakeup(&hmp->flush_tid1);
+               }
+               hmp->flush_tid2 = trans->tid;
+       }
+
+       /*
+        * Cleanup.  Report any critical errors.
+        */
+failed:
+       hammer_sync_unlock(trans);
+
+       if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) {
+               kprintf("HAMMER(%s): Critical write error during flush, "
+                       "refusing to sync UNDO FIFO\n",
+                       root_volume->ondisk->vol_name);
+       }
+
+done:
+       hammer_unlock(&hmp->flusher.finalize_lock);
+
+       if (--hmp->flusher.finalize_want == 0)
+               wakeup(&hmp->flusher.finalize_want);
+       hammer_stats_commits += final;
+}
+
+/*
+ * Return non-zero if too many dirty meta-data buffers have built up.
+ *
+ * Since we cannot allow such buffers to flush until we have dealt with
+ * the UNDOs, we risk deadlocking the kernel's buffer cache.
+ */
+int
+hammer_flusher_meta_limit(hammer_mount_t hmp)
+{
+       if (hmp->locked_dirty_space + hmp->io_running_space >
+           hammer_limit_dirtybufspace) {
+               return(1);
+       }
+       return(0);
+}
+
+/*
+ * Return non-zero if too many dirty meta-data buffers have built up.
+ *
+ * This version is used by background operations (mirror, prune, reblock)
+ * to leave room for foreground operations.
+ */
+int
+hammer_flusher_meta_halflimit(hammer_mount_t hmp)
+{
+       if (hmp->locked_dirty_space + hmp->io_running_space >
+           hammer_limit_dirtybufspace / 2) {
+               return(1);
+       }
+       return(0);
+}
+
+/*
+ * Return non-zero if the flusher still has something to flush.
+ */
+int
+hammer_flusher_haswork(hammer_mount_t hmp)
+{
+       if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
+               return(0);
+       if (TAILQ_FIRST(&hmp->flush_group_list) ||      /* dirty inodes */
+           TAILQ_FIRST(&hmp->volu_list) ||             /* dirty bufffers */
+           TAILQ_FIRST(&hmp->undo_list) ||
+           TAILQ_FIRST(&hmp->data_list) ||
+           TAILQ_FIRST(&hmp->meta_list) ||
+           (hmp->hflags & HMNT_UNDO_DIRTY)             /* UNDO FIFO sync */
+       ) {
+               return(1);
        }
-       hammer_unlock(&hmp->sync_lock);
+       return(0);
 }