HAMMER VFS - The backend flusher now sorts inodes
[dragonfly.git] / sys / vfs / hammer / hammer_flusher.c
index 7209e5c..5991f3d 100644 (file)
@@ -31,7 +31,7 @@
  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  * 
- * $DragonFly: src/sys/vfs/hammer/hammer_flusher.c,v 1.44 2008/07/19 04:49:39 dillon Exp $
+ * $DragonFly: src/sys/vfs/hammer/hammer_flusher.c,v 1.45 2008/07/31 04:42:04 dillon Exp $
  */
 /*
  * HAMMER dependancy flusher thread
@@ -48,6 +48,17 @@ static void hammer_flusher_flush(hammer_mount_t hmp);
 static void hammer_flusher_flush_inode(hammer_inode_t ip,
                                        hammer_transaction_t trans);
 
+RB_GENERATE(hammer_fls_rb_tree, hammer_inode, rb_flsnode,
+              hammer_ino_rb_compare);
+
+/*
+ * Inodes are sorted and assigned to slave threads in groups of 128.
+ * We want a flush group size large enough such that the slave threads
+ * are not likely to interfere with each other when accessing the B-Tree,
+ * but not so large that we lose concurrency.
+ */
+#define HAMMER_FLUSH_GROUP_SIZE 128
+
 /*
  * Support structures for the flusher threads.
  */
@@ -137,6 +148,15 @@ hammer_flusher_wait(hammer_mount_t hmp, int seq)
 }
 
 void
+hammer_flusher_wait_next(hammer_mount_t hmp)
+{
+       int seq;
+
+       seq = hammer_flusher_async_one(hmp);
+       hammer_flusher_wait(hmp, seq);
+}
+
+void
 hammer_flusher_create(hammer_mount_t hmp)
 {
        hammer_flusher_info_t info;
@@ -153,7 +173,7 @@ hammer_flusher_create(hammer_mount_t hmp)
        lwkt_create(hammer_flusher_master_thread, hmp,
                    &hmp->flusher.td, NULL, 0, -1, "hammer-M");
        for (i = 0; i < HAMMER_MAX_FLUSHERS; ++i) {
-               info = kmalloc(sizeof(*info), M_HAMMER, M_WAITOK|M_ZERO);
+               info = kmalloc(sizeof(*info), hmp->m_misc, M_WAITOK|M_ZERO);
                info->hmp = hmp;
                TAILQ_INSERT_TAIL(&hmp->flusher.ready_list, info, entry);
                lwkt_create(hammer_flusher_slave_thread, info,
@@ -186,8 +206,7 @@ hammer_flusher_destroy(hammer_mount_t hmp)
                wakeup(&info->runstate);
                while (info->td)
                        tsleep(&info->td, 0, "hmrwwc", 0);
-               TAILQ_REMOVE(&hmp->flusher.ready_list, info, entry);
-               kfree(info, M_HAMMER);
+               kfree(info, hmp->m_misc);
        }
 }
 
@@ -231,7 +250,13 @@ hammer_flusher_master_thread(void *arg)
                        break;
                while (hmp->flusher.signal == 0)
                        tsleep(&hmp->flusher.signal, 0, "hmrwwa", 0);
-               hmp->flusher.signal = 0;
+
+               /*
+                * Flush for each count on signal but only allow one extra
+                * flush request to build up.
+                */
+               if (--hmp->flusher.signal != 0)
+                       hmp->flusher.signal = 1;
        }
 
        /*
@@ -295,15 +320,21 @@ hammer_flusher_flush(hammer_mount_t hmp)
                        hmp->next_flush_group = TAILQ_NEXT(flg, flush_entry);
 
                /*
-                * Iterate the inodes in the flg's flush_list and assign
+                * Iterate the inodes in the flg's flush_tree and assign
                 * them to slaves.
                 */
                slave_index = 0;
                info = TAILQ_FIRST(&hmp->flusher.ready_list);
-               next_ip = TAILQ_FIRST(&flg->flush_list);
+               next_ip = RB_FIRST(hammer_fls_rb_tree, &flg->flush_tree);
 
                while ((ip = next_ip) != NULL) {
-                       next_ip = TAILQ_NEXT(ip, flush_entry);
+                       next_ip = RB_NEXT(hammer_fls_rb_tree,
+                                         &flg->flush_tree, ip);
+
+                       if (++hmp->check_yield > hammer_yield_check) {
+                               hmp->check_yield = 0;
+                               lwkt_user_yield();
+                       }
 
                        /*
                         * Add ip to the slave's work array.  The slave is
@@ -360,11 +391,10 @@ hammer_flusher_flush(hammer_mount_t hmp)
                 * Loop up on the same flg.  If the flg is done clean it up
                 * and break out.  We only flush one flg.
                 */
-               if (TAILQ_FIRST(&flg->flush_list) == NULL) {
-                       KKASSERT(TAILQ_EMPTY(&flg->flush_list));
+               if (RB_EMPTY(&flg->flush_tree)) {
                        KKASSERT(flg->refs == 0);
                        TAILQ_REMOVE(&hmp->flush_group_list, flg, flush_entry);
-                       kfree(flg, M_HAMMER);
+                       kfree(flg, hmp->m_misc);
                        break;
                }
        }
@@ -394,7 +424,7 @@ hammer_flusher_flush(hammer_mount_t hmp)
 
 
 /*
- * The slave flusher thread pulls work off the master flush_list until no
+ * The slave flusher thread pulls work off the master flush list until no
  * work is left.
  */
 static void
@@ -581,7 +611,7 @@ hammer_flusher_finalize(hammer_transaction_t trans, int final)
                hammer_ref(&io->lock);
                hammer_io_write_interlock(io);
                KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
-               hammer_io_flush(io);
+               hammer_io_flush(io, 0);
                hammer_io_done_interlock(io);
                hammer_rel_buffer((hammer_buffer_t)io, 0);
                ++count;
@@ -623,13 +653,13 @@ hammer_flusher_finalize(hammer_transaction_t trans, int final)
                        ++hammer_count_refedbufs;
                hammer_ref(&io->lock);
                KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
-               hammer_io_flush(io);
+               hammer_io_flush(io, hammer_undo_reclaim(io));
                hammer_rel_buffer((hammer_buffer_t)io, 0);
                ++count;
        }
 
        /*
-        * Wait for I/Os to complete
+        * Wait for I/Os to complete and flush the cache on the target disk.
         */
        hammer_flusher_clean_loose_ios(hmp);
        hammer_io_wait_all(hmp, "hmrfl1");
@@ -638,9 +668,16 @@ hammer_flusher_finalize(hammer_transaction_t trans, int final)
                goto failed;
 
        /*
-        * Update the on-disk volume header with new UNDO FIFO end position
-        * (do not generate new UNDO records for this change).  We have to
-        * do this for the UNDO FIFO whether (final) is set or not.
+        * HAMMER VERSION < 4:
+        *      Update the on-disk volume header with new UNDO FIFO end
+        *      position (do not generate new UNDO records for this change).
+        *      We have to do this for the UNDO FIFO whether (final) is
+        *      set or not in order for the UNDOs to be recognized on
+        *      recovery.
+        *
+        * HAMMER VERSION >= 4:
+        *      The UNDO FIFO data written above will be recognized on
+        *      recovery without us having to sync the volume header.
         *
         * Also update the on-disk next_tid field.  This does not require
         * an UNDO.  However, because our TID is generated before we get
@@ -665,31 +702,47 @@ hammer_flusher_finalize(hammer_transaction_t trans, int final)
                hammer_modify_volume_done(root_volume);
        }
 
+       /*
+        * vol0_next_tid is used for TID selection and is updated without
+        * an UNDO so we do not reuse a TID that may have been rolled-back.
+        *
+        * vol0_last_tid is the highest fully-synchronized TID.  It is
+        * set-up when the UNDO fifo is fully synced, later on (not here).
+        */
        if (root_volume->io.modified) {
                hammer_modify_volume(NULL, root_volume, NULL, 0);
                if (root_volume->ondisk->vol0_next_tid < trans->tid)
                        root_volume->ondisk->vol0_next_tid = trans->tid;
                hammer_crc_set_volume(root_volume->ondisk);
                hammer_modify_volume_done(root_volume);
-               hammer_io_flush(&root_volume->io);
+               hammer_io_flush(&root_volume->io, 0);
        }
 
        /*
-        * Wait for I/Os to complete
+        * Wait for I/Os to complete.
+        *
+        * For HAMMER VERSION 4+ filesystems we do not have to wait for
+        * the I/O to complete as the new UNDO FIFO entries are recognized
+        * even without the volume header update.  This allows the volume
+        * header to flushed along with meta-data, significantly reducing
+        * flush overheads.
         */
        hammer_flusher_clean_loose_ios(hmp);
-       hammer_io_wait_all(hmp, "hmrfl2");
+       if (hmp->version < HAMMER_VOL_VERSION_FOUR)
+               hammer_io_wait_all(hmp, "hmrfl2");
 
        if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
                goto failed;
 
        /*
         * Flush meta-data.  The meta-data will be undone if we crash
-        * so we can safely flush it asynchronously.
+        * so we can safely flush it asynchronously.  There is no need
+        * to wait for I/O to complete (or issue a synchronous disk flush).
         *
-        * Repeated catchups will wind up flushing this update's meta-data
-        * and the UNDO buffers for the next update simultaniously.  This
-        * is ok.
+        * In fact, even if we did wait the meta-data will still be undone
+        * by a crash up until the next flush cycle due to the first_offset
+        * in the volume header for the UNDO FIFO not being adjusted until
+        * the following flush cycle.
         */
        count = 0;
        while ((io = TAILQ_FIRST(&hmp->meta_list)) != NULL) {
@@ -700,7 +753,7 @@ hammer_flusher_finalize(hammer_transaction_t trans, int final)
                        ++hammer_count_refedbufs;
                hammer_ref(&io->lock);
                KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
-               hammer_io_flush(io);
+               hammer_io_flush(io, 0);
                hammer_rel_buffer((hammer_buffer_t)io, 0);
                ++count;
        }
@@ -722,6 +775,18 @@ hammer_flusher_finalize(hammer_transaction_t trans, int final)
                        hmp->hflags |= HMNT_UNDO_DIRTY;
                }
                hammer_clear_undo_history(hmp);
+
+               /*
+                * Flush tid sequencing.  flush_tid1 is fully synchronized,
+                * meaning a crash will not roll it back.  flush_tid2 has
+                * been written out asynchronously and a crash will roll
+                * it back.  flush_tid1 is used for all mirroring masters.
+                */
+               if (hmp->flush_tid1 != hmp->flush_tid2) {
+                       hmp->flush_tid1 = hmp->flush_tid2;
+                       wakeup(&hmp->flush_tid1);
+               }
+               hmp->flush_tid2 = trans->tid;
        }
 
        /*
@@ -738,6 +803,7 @@ failed:
 
 done:
        hammer_unlock(&hmp->flusher.finalize_lock);
+
        if (--hmp->flusher.finalize_want == 0)
                wakeup(&hmp->flusher.finalize_want);
        hammer_stats_commits += final;