HAMMER VFS - The backend flusher now sorts inodes
[dragonfly.git] / sys / vfs / hammer / hammer_flusher.c
index ad46677..5991f3d 100644 (file)
@@ -31,7 +31,7 @@
  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  * 
- * $DragonFly: src/sys/vfs/hammer/hammer_flusher.c,v 1.24 2008/06/10 22:30:21 dillon Exp $
+ * $DragonFly: src/sys/vfs/hammer/hammer_flusher.c,v 1.45 2008/07/31 04:42:04 dillon Exp $
  */
 /*
  * HAMMER dependancy flusher thread
 
 static void hammer_flusher_master_thread(void *arg);
 static void hammer_flusher_slave_thread(void *arg);
-static void hammer_flusher_clean_loose_ios(hammer_mount_t hmp);
 static void hammer_flusher_flush(hammer_mount_t hmp);
 static void hammer_flusher_flush_inode(hammer_inode_t ip,
                                        hammer_transaction_t trans);
-static int hammer_must_finalize_undo(hammer_mount_t hmp);
-static void hammer_flusher_finalize(hammer_transaction_t trans, int final);
+
+RB_GENERATE(hammer_fls_rb_tree, hammer_inode, rb_flsnode,
+              hammer_ino_rb_compare);
+
+/*
+ * Inodes are sorted and assigned to slave threads in groups of 128.
+ * We want a flush group size large enough such that the slave threads
+ * are not likely to interfere with each other when accessing the B-Tree,
+ * but not so large that we lose concurrency.
+ */
+#define HAMMER_FLUSH_GROUP_SIZE 128
 
 /*
  * Support structures for the flusher threads.
  */
 struct hammer_flusher_info {
+       TAILQ_ENTRY(hammer_flusher_info) entry;
        struct hammer_mount *hmp;
        thread_t        td;
-       int             startit;
-       TAILQ_HEAD(,hammer_inode) work_list;
+       int             runstate;
+       int             count;
+       hammer_flush_group_t flg;
+       hammer_inode_t  work_array[HAMMER_FLUSH_GROUP_SIZE];
 };
 
 typedef struct hammer_flusher_info *hammer_flusher_info_t;
 
+/*
+ * Sync all inodes pending on the flusher.
+ *
+ * All flush groups will be flushed.  This does not queue dirty inodes
+ * to the flush groups, it just flushes out what has already been queued!
+ */
 void
 hammer_flusher_sync(hammer_mount_t hmp)
 {
        int seq;
 
+       seq = hammer_flusher_async(hmp, NULL);
+       hammer_flusher_wait(hmp, seq);
+}
+
+/*
+ * Sync all inodes pending on the flusher - return immediately.
+ *
+ * All flush groups will be flushed.
+ */
+int
+hammer_flusher_async(hammer_mount_t hmp, hammer_flush_group_t close_flg)
+{
+       hammer_flush_group_t flg;
+       int seq = hmp->flusher.next;
+
+       TAILQ_FOREACH(flg, &hmp->flush_group_list, flush_entry) {
+               if (flg->running == 0)
+                       ++seq;
+               flg->closed = 1;
+               if (flg == close_flg)
+                       break;
+       }
        if (hmp->flusher.td) {
-               seq = hmp->flusher.next;
                if (hmp->flusher.signal++ == 0)
                        wakeup(&hmp->flusher.signal);
-               while ((int)(seq - hmp->flusher.done) > 0)
-                       tsleep(&hmp->flusher.done, 0, "hmrfls", 0);
+       } else {
+               seq = hmp->flusher.done;
        }
+       return(seq);
 }
 
-void
-hammer_flusher_async(hammer_mount_t hmp)
+int
+hammer_flusher_async_one(hammer_mount_t hmp)
 {
+       int seq;
+
        if (hmp->flusher.td) {
+               seq = hmp->flusher.next;
                if (hmp->flusher.signal++ == 0)
                        wakeup(&hmp->flusher.signal);
+       } else {
+               seq = hmp->flusher.done;
        }
+       return(seq);
+}
+
+/*
+ * Wait for the flusher to get to the specified sequence number.
+ * Signal the flusher as often as necessary to keep it going.
+ */
+void
+hammer_flusher_wait(hammer_mount_t hmp, int seq)
+{
+       while ((int)(seq - hmp->flusher.done) > 0) {
+               if (hmp->flusher.act != seq) {
+                       if (hmp->flusher.signal++ == 0)
+                               wakeup(&hmp->flusher.signal);
+               }
+               tsleep(&hmp->flusher.done, 0, "hmrfls", 0);
+       }
+}
+
+void
+hammer_flusher_wait_next(hammer_mount_t hmp)
+{
+       int seq;
+
+       seq = hammer_flusher_async_one(hmp);
+       hammer_flusher_wait(hmp, seq);
 }
 
 void
@@ -96,17 +166,16 @@ hammer_flusher_create(hammer_mount_t hmp)
        hmp->flusher.act = 0;
        hmp->flusher.done = 0;
        hmp->flusher.next = 1;
-       hmp->flusher.count = 0;
        hammer_ref(&hmp->flusher.finalize_lock);
+       TAILQ_INIT(&hmp->flusher.run_list);
+       TAILQ_INIT(&hmp->flusher.ready_list);
 
        lwkt_create(hammer_flusher_master_thread, hmp,
                    &hmp->flusher.td, NULL, 0, -1, "hammer-M");
        for (i = 0; i < HAMMER_MAX_FLUSHERS; ++i) {
-               info = kmalloc(sizeof(*info), M_HAMMER, M_WAITOK|M_ZERO);
+               info = kmalloc(sizeof(*info), hmp->m_misc, M_WAITOK|M_ZERO);
                info->hmp = hmp;
-               TAILQ_INIT(&info->work_list);
-               ++hmp->flusher.count;
-               hmp->flusher.info[i] = info;
+               TAILQ_INSERT_TAIL(&hmp->flusher.ready_list, info, entry);
                lwkt_create(hammer_flusher_slave_thread, info,
                            &info->td, NULL, 0, -1, "hammer-S%d", i);
        }
@@ -116,7 +185,6 @@ void
 hammer_flusher_destroy(hammer_mount_t hmp)
 {
        hammer_flusher_info_t info;
-       int i;
 
        /*
         * Kill the master
@@ -131,20 +199,15 @@ hammer_flusher_destroy(hammer_mount_t hmp)
        /*
         * Kill the slaves
         */
-       for (i = 0; i < HAMMER_MAX_FLUSHERS; ++i) {
-               if ((info = hmp->flusher.info[i]) != NULL) {
-                       KKASSERT(info->startit == 0);
-                       info->startit = -1;
-                       wakeup(&info->startit);
-                       while (info->td) {
-                               tsleep(&info->td, 0, "hmrwwc", 0);
-                       }
-                       hmp->flusher.info[i] = NULL;
-                       kfree(info, M_HAMMER);
-                       --hmp->flusher.count;
-               }
+       while ((info = TAILQ_FIRST(&hmp->flusher.ready_list)) != NULL) {
+               KKASSERT(info->runstate == 0);
+               TAILQ_REMOVE(&hmp->flusher.ready_list, info, entry);
+               info->runstate = -1;
+               wakeup(&info->runstate);
+               while (info->td)
+                       tsleep(&info->td, 0, "hmrwwc", 0);
+               kfree(info, hmp->m_misc);
        }
-       KKASSERT(hmp->flusher.count == 0);
 }
 
 /*
@@ -154,33 +217,46 @@ hammer_flusher_destroy(hammer_mount_t hmp)
 static void
 hammer_flusher_master_thread(void *arg)
 {
-       hammer_mount_t hmp = arg;
+       hammer_flush_group_t flg;
+       hammer_mount_t hmp;
+
+       hmp = arg;
 
        for (;;) {
-               while (hmp->flusher.group_lock)
-                       tsleep(&hmp->flusher.group_lock, 0, "hmrhld", 0);
-               kprintf("S");
-               hmp->flusher.act = hmp->flusher.next;
-               ++hmp->flusher.next;
-               hammer_flusher_clean_loose_ios(hmp);
-               hammer_flusher_flush(hmp);
-               hammer_flusher_clean_loose_ios(hmp);
-               hmp->flusher.done = hmp->flusher.act;
-               wakeup(&hmp->flusher.done);
+               /*
+                * Do at least one flush cycle.  We may have to update the
+                * UNDO FIFO even if no inodes are queued.
+                */
+               for (;;) {
+                       while (hmp->flusher.group_lock)
+                               tsleep(&hmp->flusher.group_lock, 0, "hmrhld", 0);
+                       hmp->flusher.act = hmp->flusher.next;
+                       ++hmp->flusher.next;
+                       hammer_flusher_clean_loose_ios(hmp);
+                       hammer_flusher_flush(hmp);
+                       hmp->flusher.done = hmp->flusher.act;
+                       wakeup(&hmp->flusher.done);
+                       flg = TAILQ_FIRST(&hmp->flush_group_list);
+                       if (flg == NULL || flg->closed == 0)
+                               break;
+                       if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
+                               break;
+               }
 
                /*
                 * Wait for activity.
                 */
-               if (hmp->flusher.exiting && TAILQ_EMPTY(&hmp->flush_list))
+               if (hmp->flusher.exiting && TAILQ_EMPTY(&hmp->flush_group_list))
                        break;
+               while (hmp->flusher.signal == 0)
+                       tsleep(&hmp->flusher.signal, 0, "hmrwwa", 0);
 
                /*
-                * This is a hack until we can dispose of frontend buffer
-                * cache buffers on the frontend.
+                * Flush for each count on signal but only allow one extra
+                * flush request to build up.
                 */
-               while (hmp->flusher.signal == 0)
-                       tsleep(&hmp->flusher.signal, 0, "hmrwwa", 0);
-               hmp->flusher.signal = 0;
+               if (--hmp->flusher.signal != 0)
+                       hmp->flusher.signal = 1;
        }
 
        /*
@@ -192,40 +268,201 @@ hammer_flusher_master_thread(void *arg)
 }
 
 /*
- * The slave flusher thread pulls work off the master flush_list until no
+ * Flush all inodes in the current flush group.
+ */
+static void
+hammer_flusher_flush(hammer_mount_t hmp)
+{
+       hammer_flusher_info_t info;
+       hammer_flush_group_t flg;
+       hammer_reserve_t resv;
+       hammer_inode_t ip;
+       hammer_inode_t next_ip;
+       int slave_index;
+       int count;
+
+       /*
+        * Just in-case there's a flush race on mount
+        */
+       if (TAILQ_FIRST(&hmp->flusher.ready_list) == NULL)
+               return;
+
+       /*
+        * We only do one flg but we may have to loop/retry.
+        */
+       count = 0;
+       while ((flg = TAILQ_FIRST(&hmp->flush_group_list)) != NULL) {
+               ++count;
+               if (hammer_debug_general & 0x0001) {
+                       kprintf("hammer_flush %d ttl=%d recs=%d\n",
+                               hmp->flusher.act,
+                               flg->total_count, flg->refs);
+               }
+               if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
+                       break;
+               hammer_start_transaction_fls(&hmp->flusher.trans, hmp);
+
+               /*
+                * If the previous flush cycle just about exhausted our
+                * UNDO space we may have to do a dummy cycle to move the
+                * first_offset up before actually digging into a new cycle,
+                * or the new cycle will not have sufficient undo space.
+                */
+               if (hammer_flusher_undo_exhausted(&hmp->flusher.trans, 3))
+                       hammer_flusher_finalize(&hmp->flusher.trans, 0);
+
+               /*
+                * Ok, we are running this flush group now (this prevents new
+                * additions to it).
+                */
+               flg->running = 1;
+               if (hmp->next_flush_group == flg)
+                       hmp->next_flush_group = TAILQ_NEXT(flg, flush_entry);
+
+               /*
+                * Iterate the inodes in the flg's flush_tree and assign
+                * them to slaves.
+                */
+               slave_index = 0;
+               info = TAILQ_FIRST(&hmp->flusher.ready_list);
+               next_ip = RB_FIRST(hammer_fls_rb_tree, &flg->flush_tree);
+
+               while ((ip = next_ip) != NULL) {
+                       next_ip = RB_NEXT(hammer_fls_rb_tree,
+                                         &flg->flush_tree, ip);
+
+                       if (++hmp->check_yield > hammer_yield_check) {
+                               hmp->check_yield = 0;
+                               lwkt_user_yield();
+                       }
+
+                       /*
+                        * Add ip to the slave's work array.  The slave is
+                        * not currently running.
+                        */
+                       info->work_array[info->count++] = ip;
+                       if (info->count != HAMMER_FLUSH_GROUP_SIZE)
+                               continue;
+
+                       /*
+                        * Get the slave running
+                        */
+                       TAILQ_REMOVE(&hmp->flusher.ready_list, info, entry);
+                       TAILQ_INSERT_TAIL(&hmp->flusher.run_list, info, entry);
+                       info->flg = flg;
+                       info->runstate = 1;
+                       wakeup(&info->runstate);
+
+                       /*
+                        * Get a new slave.  We may have to wait for one to
+                        * finish running.
+                        */
+                       while ((info = TAILQ_FIRST(&hmp->flusher.ready_list)) == NULL) {
+                               tsleep(&hmp->flusher.ready_list, 0, "hmrfcc", 0);
+                       }
+               }
+
+               /*
+                * Run the current slave if necessary
+                */
+               if (info->count) {
+                       TAILQ_REMOVE(&hmp->flusher.ready_list, info, entry);
+                       TAILQ_INSERT_TAIL(&hmp->flusher.run_list, info, entry);
+                       info->flg = flg;
+                       info->runstate = 1;
+                       wakeup(&info->runstate);
+               }
+
+               /*
+                * Wait for all slaves to finish running
+                */
+               while (TAILQ_FIRST(&hmp->flusher.run_list) != NULL)
+                       tsleep(&hmp->flusher.ready_list, 0, "hmrfcc", 0);
+
+               /*
+                * Do the final finalization, clean up
+                */
+               hammer_flusher_finalize(&hmp->flusher.trans, 1);
+               hmp->flusher.tid = hmp->flusher.trans.tid;
+
+               hammer_done_transaction(&hmp->flusher.trans);
+
+               /*
+                * Loop up on the same flg.  If the flg is done clean it up
+                * and break out.  We only flush one flg.
+                */
+               if (RB_EMPTY(&flg->flush_tree)) {
+                       KKASSERT(flg->refs == 0);
+                       TAILQ_REMOVE(&hmp->flush_group_list, flg, flush_entry);
+                       kfree(flg, hmp->m_misc);
+                       break;
+               }
+       }
+
+       /*
+        * We may have pure meta-data to flush, or we may have to finish
+        * cycling the UNDO FIFO, even if there were no flush groups.
+        */
+       if (count == 0 && hammer_flusher_haswork(hmp)) {
+               hammer_start_transaction_fls(&hmp->flusher.trans, hmp);
+               hammer_flusher_finalize(&hmp->flusher.trans, 1);
+               hammer_done_transaction(&hmp->flusher.trans);
+       }
+
+       /*
+        * Clean up any freed big-blocks (typically zone-2). 
+        * resv->flush_group is typically set several flush groups ahead
+        * of the free to ensure that the freed block is not reused until
+        * it can no longer be reused.
+        */
+       while ((resv = TAILQ_FIRST(&hmp->delay_list)) != NULL) {
+               if (resv->flush_group != hmp->flusher.act)
+                       break;
+               hammer_reserve_clrdelay(hmp, resv);
+       }
+}
+
+
+/*
+ * The slave flusher thread pulls work off the master flush list until no
  * work is left.
  */
 static void
 hammer_flusher_slave_thread(void *arg)
 {
+       hammer_flush_group_t flg;
        hammer_flusher_info_t info;
        hammer_mount_t hmp;
        hammer_inode_t ip;
+       int i;
 
        info = arg;
        hmp = info->hmp;
 
        for (;;) {
-               while (info->startit == 0)
-                       tsleep(&info->startit, 0, "hmrssw", 0);
-               if (info->startit < 0)
+               while (info->runstate == 0)
+                       tsleep(&info->runstate, 0, "hmrssw", 0);
+               if (info->runstate < 0)
                        break;
-               info->startit = 0;
-               while ((ip = TAILQ_FIRST(&hmp->flush_list)) != NULL) {
-                       if (ip->flush_group != hmp->flusher.act)
-                               break;
-                       TAILQ_REMOVE(&hmp->flush_list, ip, flush_entry);
+               flg = info->flg;
+
+               for (i = 0; i < info->count; ++i) {
+                       ip = info->work_array[i];
                        hammer_flusher_flush_inode(ip, &hmp->flusher.trans);
+                       ++hammer_stats_inode_flushes;
                }
-               if (--hmp->flusher.running == 0)
-                       wakeup(&hmp->flusher.running);
+               info->count = 0;
+               info->runstate = 0;
+               TAILQ_REMOVE(&hmp->flusher.run_list, info, entry);
+               TAILQ_INSERT_TAIL(&hmp->flusher.ready_list, info, entry);
+               wakeup(&hmp->flusher.ready_list);
        }
        info->td = NULL;
        wakeup(&info->td);
        lwkt_exit();
 }
 
-static void
+void
 hammer_flusher_clean_loose_ios(hammer_mount_t hmp)
 {
        hammer_buffer_t buffer;
@@ -236,115 +473,80 @@ hammer_flusher_clean_loose_ios(hammer_mount_t hmp)
         * and can build up, so clean them out.  This can occur when an
         * IO completes on a buffer with no references left.
         */
-       while ((io = TAILQ_FIRST(&hmp->lose_list)) != NULL) {
-               KKASSERT(io->mod_list == &hmp->lose_list);
-               TAILQ_REMOVE(io->mod_list, io, mod_entry);
-               io->mod_list = NULL;
-               hammer_ref(&io->lock);
-               buffer = (void *)io;
-               hammer_rel_buffer(buffer, 0);
-       }
-}
-
-/*
- * Flush all inodes in the current flush group.
- */
-static void
-hammer_flusher_flush(hammer_mount_t hmp)
-{
-       hammer_flusher_info_t info;
-       hammer_reserve_t resv;
-       int i;
-       int n;
-
-       hammer_start_transaction_fls(&hmp->flusher.trans, hmp);
-
-       /*
-        * Start work threads.
-        */
-       i = 0;
-       n = hmp->count_iqueued / 64;
-       if (TAILQ_FIRST(&hmp->flush_list)) {
-               for (i = 0; i <= hmp->count_iqueued / 64; ++i) {
-                       if (i == HAMMER_MAX_FLUSHERS ||
-                           hmp->flusher.info[i] == NULL) {
-                               break;
-                       }
-                       info = hmp->flusher.info[i];
-                       if (info->startit == 0) {
-                               ++hmp->flusher.running;
-                               info->startit = 1;
-                               wakeup(&info->startit);
-                       }
+       if ((io = TAILQ_FIRST(&hmp->lose_list)) != NULL) {
+               crit_enter();   /* biodone() race */
+               while ((io = TAILQ_FIRST(&hmp->lose_list)) != NULL) {
+                       KKASSERT(io->mod_list == &hmp->lose_list);
+                       TAILQ_REMOVE(&hmp->lose_list, io, mod_entry);
+                       io->mod_list = NULL;
+                       if (io->lock.refs == 0)
+                               ++hammer_count_refedbufs;
+                       hammer_ref(&io->lock);
+                       buffer = (void *)io;
+                       hammer_rel_buffer(buffer, 0);
                }
+               crit_exit();
        }
-       while (hmp->flusher.running)
-               tsleep(&hmp->flusher.running, 0, "hmrfcc", 0);
-
-       hammer_flusher_finalize(&hmp->flusher.trans, 1);
-       hmp->flusher.tid = hmp->flusher.trans.tid;
-
-       /*
-        * Clean up any freed big-blocks (typically zone-2). 
-        * resv->flush_group is typically set several flush groups ahead
-        * of the free to ensure that the freed block is not reused until
-        * it can no longer be reused.
-        */
-       while ((resv = TAILQ_FIRST(&hmp->delay_list)) != NULL) {
-               if (resv->flush_group != hmp->flusher.act)
-                       break;
-               TAILQ_REMOVE(&hmp->delay_list, resv, delay_entry);
-               hammer_blockmap_reserve_complete(hmp, resv);
-       }
-       hammer_done_transaction(&hmp->flusher.trans);
 }
 
 /*
  * Flush a single inode that is part of a flush group.
+ *
+ * Flusher errors are extremely serious, even ENOSPC shouldn't occur because
+ * the front-end should have reserved sufficient space on the media.  Any
+ * error other then EWOULDBLOCK will force the mount to be read-only.
  */
 static
 void
 hammer_flusher_flush_inode(hammer_inode_t ip, hammer_transaction_t trans)
 {
        hammer_mount_t hmp = ip->hmp;
+       int error;
 
-       hammer_lock_sh(&hmp->flusher.finalize_lock);
-       ip->error = hammer_sync_inode(ip);
-       hammer_flush_inode_done(ip);
-       hammer_unlock(&hmp->flusher.finalize_lock);
+       hammer_flusher_clean_loose_ios(hmp);
+       error = hammer_sync_inode(trans, ip);
+
+       /*
+        * EWOULDBLOCK can happen under normal operation, all other errors
+        * are considered extremely serious.  We must set WOULDBLOCK
+        * mechanics to deal with the mess left over from the abort of the
+        * previous flush.
+        */
+       if (error) {
+               ip->flags |= HAMMER_INODE_WOULDBLOCK;
+               if (error == EWOULDBLOCK)
+                       error = 0;
+       }
+       hammer_flush_inode_done(ip, error);
        while (hmp->flusher.finalize_want)
                tsleep(&hmp->flusher.finalize_want, 0, "hmrsxx", 0);
-       if (hammer_must_finalize_undo(hmp)) {
-               hmp->flusher.finalize_want = 1;
-               hammer_lock_ex(&hmp->flusher.finalize_lock);
-               kprintf("HAMMER: Warning: UNDO area too small!");
+       if (hammer_flusher_undo_exhausted(trans, 1)) {
+               kprintf("HAMMER: Warning: UNDO area too small!\n");
                hammer_flusher_finalize(trans, 1);
-               hammer_unlock(&hmp->flusher.finalize_lock);
-               hmp->flusher.finalize_want = 0;
-               wakeup(&hmp->flusher.finalize_want);
-       } else if (trans->hmp->locked_dirty_count +
-                  trans->hmp->io_running_count > hammer_limit_dirtybufs) {
-               hmp->flusher.finalize_want = 1;
-               hammer_lock_ex(&hmp->flusher.finalize_lock);
-               kprintf("t");
+       } else if (hammer_flusher_meta_limit(trans->hmp)) {
                hammer_flusher_finalize(trans, 0);
-               hammer_unlock(&hmp->flusher.finalize_lock);
-               hmp->flusher.finalize_want = 0;
-               wakeup(&hmp->flusher.finalize_want);
        }
 }
 
 /*
- * If the UNDO area gets over half full we have to flush it.  We can't
- * afford the UNDO area becoming completely full as that would break
- * the crash recovery atomicy.
+ * Return non-zero if the UNDO area has less then (QUARTER / 4) of its
+ * space left.
+ *
+ * 1/4 - Emergency free undo space level.  Below this point the flusher
+ *      will finalize even if directory dependancies have not been resolved.
+ *
+ * 2/4 - Used by the pruning and reblocking code.  These functions may be
+ *      running in parallel with a flush and cannot be allowed to drop
+ *      available undo space to emergency levels.
+ *
+ * 3/4 - Used at the beginning of a flush to force-sync the volume header
+ *      to give the flush plenty of runway to work in.
  */
-static
 int
-hammer_must_finalize_undo(hammer_mount_t hmp)
+hammer_flusher_undo_exhausted(hammer_transaction_t trans, int quarter)
 {
-       if (hammer_undo_space(hmp) < hammer_undo_max(hmp) / 2) {
-               hkprintf("*");
+       if (hammer_undo_space(trans) <
+           hammer_undo_max(trans->hmp) * quarter / 4) {
                return(1);
        } else {
                return(0);
@@ -359,8 +561,11 @@ hammer_must_finalize_undo(hammer_mount_t hmp)
  * If this is the last finalization in a flush group we also synchronize
  * our cached blockmap and set hmp->flusher_undo_start and our cached undo
  * fifo first_offset so the next flush resets the FIFO pointers.
+ *
+ * If this is not final it is being called because too many dirty meta-data
+ * buffers have built up and must be flushed with UNDO synchronization to
+ * avoid a buffer cache deadlock.
  */
-static
 void
 hammer_flusher_finalize(hammer_transaction_t trans, int final)
 {
@@ -374,6 +579,24 @@ hammer_flusher_finalize(hammer_transaction_t trans, int final)
        hmp = trans->hmp;
        root_volume = trans->rootvol;
 
+       /*
+        * Exclusively lock the flusher.  This guarantees that all dirty
+        * buffers will be idled (have a mod-count of 0).
+        */
+       ++hmp->flusher.finalize_want;
+       hammer_lock_ex(&hmp->flusher.finalize_lock);
+
+       /*
+        * If this isn't the final sync several threads may have hit the
+        * meta-limit at the same time and raced.  Only sync if we really
+        * have to, after acquiring the lock.
+        */
+       if (final == 0 && !hammer_flusher_meta_limit(hmp))
+               goto done;
+
+       if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
+               goto done;
+
        /*
         * Flush data buffers.  This can occur asynchronously and at any
         * time.  We must interlock against the frontend direct-data write
@@ -381,10 +604,14 @@ hammer_flusher_finalize(hammer_transaction_t trans, int final)
         */
        count = 0;
        while ((io = TAILQ_FIRST(&hmp->data_list)) != NULL) {
+               if (io->ioerror)
+                       break;
+               if (io->lock.refs == 0)
+                       ++hammer_count_refedbufs;
                hammer_ref(&io->lock);
                hammer_io_write_interlock(io);
                KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
-               hammer_io_flush(io);
+               hammer_io_flush(io, 0);
                hammer_io_done_interlock(io);
                hammer_rel_buffer((hammer_buffer_t)io, 0);
                ++count;
@@ -419,68 +646,114 @@ hammer_flusher_finalize(hammer_transaction_t trans, int final)
         */
        count = 0;
        while ((io = TAILQ_FIRST(&hmp->undo_list)) != NULL) {
+               if (io->ioerror)
+                       break;
                KKASSERT(io->modify_refs == 0);
+               if (io->lock.refs == 0)
+                       ++hammer_count_refedbufs;
                hammer_ref(&io->lock);
                KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
-               hammer_io_flush(io);
+               hammer_io_flush(io, hammer_undo_reclaim(io));
                hammer_rel_buffer((hammer_buffer_t)io, 0);
                ++count;
        }
 
        /*
-        * Wait for I/Os to complete
+        * Wait for I/Os to complete and flush the cache on the target disk.
         */
+       hammer_flusher_clean_loose_ios(hmp);
        hammer_io_wait_all(hmp, "hmrfl1");
 
+       if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
+               goto failed;
+
        /*
-        * Update the on-disk volume header with new UNDO FIFO end position
-        * (do not generate new UNDO records for this change).  We have to
-        * do this for the UNDO FIFO whether (final) is set or not.
+        * HAMMER VERSION < 4:
+        *      Update the on-disk volume header with new UNDO FIFO end
+        *      position (do not generate new UNDO records for this change).
+        *      We have to do this for the UNDO FIFO whether (final) is
+        *      set or not in order for the UNDOs to be recognized on
+        *      recovery.
+        *
+        * HAMMER VERSION >= 4:
+        *      The UNDO FIFO data written above will be recognized on
+        *      recovery without us having to sync the volume header.
         *
         * Also update the on-disk next_tid field.  This does not require
         * an UNDO.  However, because our TID is generated before we get
         * the sync lock another sync may have beat us to the punch.
         *
+        * This also has the side effect of updating first_offset based on
+        * a prior finalization when the first finalization of the next flush
+        * cycle occurs, removing any undo info from the prior finalization
+        * from consideration.
+        *
         * The volume header will be flushed out synchronously.
         */
        dundomap = &root_volume->ondisk->vol0_blockmap[HAMMER_ZONE_UNDO_INDEX];
        cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
 
        if (dundomap->first_offset != cundomap->first_offset ||
-           dundomap->next_offset != cundomap->next_offset) {
+                  dundomap->next_offset != cundomap->next_offset) {
                hammer_modify_volume(NULL, root_volume, NULL, 0);
                dundomap->first_offset = cundomap->first_offset;
                dundomap->next_offset = cundomap->next_offset;
                hammer_crc_set_blockmap(dundomap);
-               hammer_crc_set_volume(root_volume->ondisk);
-               if (root_volume->ondisk->vol0_next_tid < trans->tid)
-                       root_volume->ondisk->vol0_next_tid = trans->tid;
                hammer_modify_volume_done(root_volume);
        }
 
+       /*
+        * vol0_next_tid is used for TID selection and is updated without
+        * an UNDO so we do not reuse a TID that may have been rolled-back.
+        *
+        * vol0_last_tid is the highest fully-synchronized TID.  It is
+        * set-up when the UNDO fifo is fully synced, later on (not here).
+        */
        if (root_volume->io.modified) {
-               hammer_io_flush(&root_volume->io);
+               hammer_modify_volume(NULL, root_volume, NULL, 0);
+               if (root_volume->ondisk->vol0_next_tid < trans->tid)
+                       root_volume->ondisk->vol0_next_tid = trans->tid;
+               hammer_crc_set_volume(root_volume->ondisk);
+               hammer_modify_volume_done(root_volume);
+               hammer_io_flush(&root_volume->io, 0);
        }
 
        /*
-        * Wait for I/Os to complete
+        * Wait for I/Os to complete.
+        *
+        * For HAMMER VERSION 4+ filesystems we do not have to wait for
+        * the I/O to complete as the new UNDO FIFO entries are recognized
+        * even without the volume header update.  This allows the volume
+        * header to flushed along with meta-data, significantly reducing
+        * flush overheads.
         */
-       hammer_io_wait_all(hmp, "hmrfl2");
+       hammer_flusher_clean_loose_ios(hmp);
+       if (hmp->version < HAMMER_VOL_VERSION_FOUR)
+               hammer_io_wait_all(hmp, "hmrfl2");
+
+       if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
+               goto failed;
 
        /*
         * Flush meta-data.  The meta-data will be undone if we crash
-        * so we can safely flush it asynchronously.
+        * so we can safely flush it asynchronously.  There is no need
+        * to wait for I/O to complete (or issue a synchronous disk flush).
         *
-        * Repeated catchups will wind up flushing this update's meta-data
-        * and the UNDO buffers for the next update simultaniously.  This
-        * is ok.
+        * In fact, even if we did wait the meta-data will still be undone
+        * by a crash up until the next flush cycle due to the first_offset
+        * in the volume header for the UNDO FIFO not being adjusted until
+        * the following flush cycle.
         */
        count = 0;
        while ((io = TAILQ_FIRST(&hmp->meta_list)) != NULL) {
+               if (io->ioerror)
+                       break;
                KKASSERT(io->modify_refs == 0);
+               if (io->lock.refs == 0)
+                       ++hammer_count_refedbufs;
                hammer_ref(&io->lock);
                KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
-               hammer_io_flush(io);
+               hammer_io_flush(io, 0);
                hammer_rel_buffer((hammer_buffer_t)io, 0);
                ++count;
        }
@@ -488,15 +761,103 @@ hammer_flusher_finalize(hammer_transaction_t trans, int final)
        /*
         * If this is the final finalization for the flush group set
         * up for the next sequence by setting a new first_offset in
-        * our cached blockmap and
-        * clearing the undo history.
+        * our cached blockmap and clearing the undo history.
+        *
+        * Even though we have updated our cached first_offset, the on-disk
+        * first_offset still governs available-undo-space calculations.
         */
        if (final) {
                cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
-               cundomap->first_offset = cundomap->next_offset;
+               if (cundomap->first_offset == cundomap->next_offset) {
+                       hmp->hflags &= ~HMNT_UNDO_DIRTY;
+               } else {
+                       cundomap->first_offset = cundomap->next_offset;
+                       hmp->hflags |= HMNT_UNDO_DIRTY;
+               }
                hammer_clear_undo_history(hmp);
+
+               /*
+                * Flush tid sequencing.  flush_tid1 is fully synchronized,
+                * meaning a crash will not roll it back.  flush_tid2 has
+                * been written out asynchronously and a crash will roll
+                * it back.  flush_tid1 is used for all mirroring masters.
+                */
+               if (hmp->flush_tid1 != hmp->flush_tid2) {
+                       hmp->flush_tid1 = hmp->flush_tid2;
+                       wakeup(&hmp->flush_tid1);
+               }
+               hmp->flush_tid2 = trans->tid;
        }
 
+       /*
+        * Cleanup.  Report any critical errors.
+        */
+failed:
        hammer_sync_unlock(trans);
+
+       if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) {
+               kprintf("HAMMER(%s): Critical write error during flush, "
+                       "refusing to sync UNDO FIFO\n",
+                       root_volume->ondisk->vol_name);
+       }
+
+done:
+       hammer_unlock(&hmp->flusher.finalize_lock);
+
+       if (--hmp->flusher.finalize_want == 0)
+               wakeup(&hmp->flusher.finalize_want);
+       hammer_stats_commits += final;
+}
+
+/*
+ * Return non-zero if too many dirty meta-data buffers have built up.
+ *
+ * Since we cannot allow such buffers to flush until we have dealt with
+ * the UNDOs, we risk deadlocking the kernel's buffer cache.
+ */
+int
+hammer_flusher_meta_limit(hammer_mount_t hmp)
+{
+       if (hmp->locked_dirty_space + hmp->io_running_space >
+           hammer_limit_dirtybufspace) {
+               return(1);
+       }
+       return(0);
+}
+
+/*
+ * Return non-zero if too many dirty meta-data buffers have built up.
+ *
+ * This version is used by background operations (mirror, prune, reblock)
+ * to leave room for foreground operations.
+ */
+int
+hammer_flusher_meta_halflimit(hammer_mount_t hmp)
+{
+       if (hmp->locked_dirty_space + hmp->io_running_space >
+           hammer_limit_dirtybufspace / 2) {
+               return(1);
+       }
+       return(0);
+}
+
+/*
+ * Return non-zero if the flusher still has something to flush.
+ */
+int
+hammer_flusher_haswork(hammer_mount_t hmp)
+{
+       if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
+               return(0);
+       if (TAILQ_FIRST(&hmp->flush_group_list) ||      /* dirty inodes */
+           TAILQ_FIRST(&hmp->volu_list) ||             /* dirty bufffers */
+           TAILQ_FIRST(&hmp->undo_list) ||
+           TAILQ_FIRST(&hmp->data_list) ||
+           TAILQ_FIRST(&hmp->meta_list) ||
+           (hmp->hflags & HMNT_UNDO_DIRTY)             /* UNDO FIFO sync */
+       ) {
+               return(1);
+       }
+       return(0);
 }