HAMMER VFS - The backend flusher now sorts inodes
[dragonfly.git] / sys / vfs / hammer / hammer_flusher.c
index aad7b3c..5991f3d 100644 (file)
@@ -31,7 +31,7 @@
  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  * 
- * $DragonFly: src/sys/vfs/hammer/hammer_flusher.c,v 1.39 2008/07/13 09:32:48 dillon Exp $
+ * $DragonFly: src/sys/vfs/hammer/hammer_flusher.c,v 1.45 2008/07/31 04:42:04 dillon Exp $
  */
 /*
  * HAMMER dependancy flusher thread
@@ -48,6 +48,17 @@ static void hammer_flusher_flush(hammer_mount_t hmp);
 static void hammer_flusher_flush_inode(hammer_inode_t ip,
                                        hammer_transaction_t trans);
 
+RB_GENERATE(hammer_fls_rb_tree, hammer_inode, rb_flsnode,
+              hammer_ino_rb_compare);
+
+/*
+ * Inodes are sorted and assigned to slave threads in groups of 128.
+ * We want a flush group size large enough such that the slave threads
+ * are not likely to interfere with each other when accessing the B-Tree,
+ * but not so large that we lose concurrency.
+ */
+#define HAMMER_FLUSH_GROUP_SIZE 128
+
 /*
  * Support structures for the flusher threads.
  */
@@ -75,8 +86,7 @@ hammer_flusher_sync(hammer_mount_t hmp)
        int seq;
 
        seq = hammer_flusher_async(hmp, NULL);
-       while ((int)(seq - hmp->flusher.done) > 0)
-               tsleep(&hmp->flusher.done, 0, "hmrfls", 0);
+       hammer_flusher_wait(hmp, seq);
 }
 
 /*
@@ -121,11 +131,29 @@ hammer_flusher_async_one(hammer_mount_t hmp)
        return(seq);
 }
 
+/*
+ * Wait for the flusher to get to the specified sequence number.
+ * Signal the flusher as often as necessary to keep it going.
+ */
 void
 hammer_flusher_wait(hammer_mount_t hmp, int seq)
 {
-       while ((int)(seq - hmp->flusher.done) > 0)
+       while ((int)(seq - hmp->flusher.done) > 0) {
+               if (hmp->flusher.act != seq) {
+                       if (hmp->flusher.signal++ == 0)
+                               wakeup(&hmp->flusher.signal);
+               }
                tsleep(&hmp->flusher.done, 0, "hmrfls", 0);
+       }
+}
+
+void
+hammer_flusher_wait_next(hammer_mount_t hmp)
+{
+       int seq;
+
+       seq = hammer_flusher_async_one(hmp);
+       hammer_flusher_wait(hmp, seq);
 }
 
 void
@@ -145,7 +173,7 @@ hammer_flusher_create(hammer_mount_t hmp)
        lwkt_create(hammer_flusher_master_thread, hmp,
                    &hmp->flusher.td, NULL, 0, -1, "hammer-M");
        for (i = 0; i < HAMMER_MAX_FLUSHERS; ++i) {
-               info = kmalloc(sizeof(*info), M_HAMMER, M_WAITOK|M_ZERO);
+               info = kmalloc(sizeof(*info), hmp->m_misc, M_WAITOK|M_ZERO);
                info->hmp = hmp;
                TAILQ_INSERT_TAIL(&hmp->flusher.ready_list, info, entry);
                lwkt_create(hammer_flusher_slave_thread, info,
@@ -178,8 +206,7 @@ hammer_flusher_destroy(hammer_mount_t hmp)
                wakeup(&info->runstate);
                while (info->td)
                        tsleep(&info->td, 0, "hmrwwc", 0);
-               TAILQ_REMOVE(&hmp->flusher.ready_list, info, entry);
-               kfree(info, M_HAMMER);
+               kfree(info, hmp->m_misc);
        }
 }
 
@@ -212,6 +239,8 @@ hammer_flusher_master_thread(void *arg)
                        flg = TAILQ_FIRST(&hmp->flush_group_list);
                        if (flg == NULL || flg->closed == 0)
                                break;
+                       if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
+                               break;
                }
 
                /*
@@ -221,7 +250,13 @@ hammer_flusher_master_thread(void *arg)
                        break;
                while (hmp->flusher.signal == 0)
                        tsleep(&hmp->flusher.signal, 0, "hmrwwa", 0);
-               hmp->flusher.signal = 0;
+
+               /*
+                * Flush for each count on signal but only allow one extra
+                * flush request to build up.
+                */
+               if (--hmp->flusher.signal != 0)
+                       hmp->flusher.signal = 1;
        }
 
        /*
@@ -263,9 +298,9 @@ hammer_flusher_flush(hammer_mount_t hmp)
                                hmp->flusher.act,
                                flg->total_count, flg->refs);
                }
+               if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
+                       break;
                hammer_start_transaction_fls(&hmp->flusher.trans, hmp);
-               if (hammer_debug_general & 0x0001)
-                       kprintf("T");
 
                /*
                 * If the previous flush cycle just about exhausted our
@@ -277,16 +312,29 @@ hammer_flusher_flush(hammer_mount_t hmp)
                        hammer_flusher_finalize(&hmp->flusher.trans, 0);
 
                /*
-                * Iterate the inodes in the flg's flush_list and assign
-                * them to slaves.
+                * Ok, we are running this flush group now (this prevents new
+                * additions to it).
                 */
                flg->running = 1;
+               if (hmp->next_flush_group == flg)
+                       hmp->next_flush_group = TAILQ_NEXT(flg, flush_entry);
+
+               /*
+                * Iterate the inodes in the flg's flush_tree and assign
+                * them to slaves.
+                */
                slave_index = 0;
                info = TAILQ_FIRST(&hmp->flusher.ready_list);
-               next_ip = TAILQ_FIRST(&flg->flush_list);
+               next_ip = RB_FIRST(hammer_fls_rb_tree, &flg->flush_tree);
 
                while ((ip = next_ip) != NULL) {
-                       next_ip = TAILQ_NEXT(ip, flush_entry);
+                       next_ip = RB_NEXT(hammer_fls_rb_tree,
+                                         &flg->flush_tree, ip);
+
+                       if (++hmp->check_yield > hammer_yield_check) {
+                               hmp->check_yield = 0;
+                               lwkt_user_yield();
+                       }
 
                        /*
                         * Add ip to the slave's work array.  The slave is
@@ -343,20 +391,19 @@ hammer_flusher_flush(hammer_mount_t hmp)
                 * Loop up on the same flg.  If the flg is done clean it up
                 * and break out.  We only flush one flg.
                 */
-               if (TAILQ_FIRST(&flg->flush_list) == NULL) {
-                       KKASSERT(TAILQ_EMPTY(&flg->flush_list));
+               if (RB_EMPTY(&flg->flush_tree)) {
                        KKASSERT(flg->refs == 0);
                        TAILQ_REMOVE(&hmp->flush_group_list, flg, flush_entry);
-                       kfree(flg, M_HAMMER);
+                       kfree(flg, hmp->m_misc);
                        break;
                }
        }
 
        /*
-        * We may have pure meta-data to flush, even if there were no
-        * flush groups.
+        * We may have pure meta-data to flush, or we may have to finish
+        * cycling the UNDO FIFO, even if there were no flush groups.
         */
-       if (count == 0 && hmp->locked_dirty_space) {
+       if (count == 0 && hammer_flusher_haswork(hmp)) {
                hammer_start_transaction_fls(&hmp->flusher.trans, hmp);
                hammer_flusher_finalize(&hmp->flusher.trans, 1);
                hammer_done_transaction(&hmp->flusher.trans);
@@ -377,7 +424,7 @@ hammer_flusher_flush(hammer_mount_t hmp)
 
 
 /*
- * The slave flusher thread pulls work off the master flush_list until no
+ * The slave flusher thread pulls work off the master flush list until no
  * work is left.
  */
 static void
@@ -402,6 +449,7 @@ hammer_flusher_slave_thread(void *arg)
                for (i = 0; i < info->count; ++i) {
                        ip = info->work_array[i];
                        hammer_flusher_flush_inode(ip, &hmp->flusher.trans);
+                       ++hammer_stats_inode_flushes;
                }
                info->count = 0;
                info->runstate = 0;
@@ -444,10 +492,9 @@ hammer_flusher_clean_loose_ios(hammer_mount_t hmp)
 /*
  * Flush a single inode that is part of a flush group.
  *
- * NOTE!  The sync code can return EWOULDBLOCK if the flush operation
- * would otherwise blow out the buffer cache.  hammer_flush_inode_done()
- * will re-queue the inode for the next flush sequence and force the
- * flusher to run again if this occurs.
+ * Flusher errors are extremely serious, even ENOSPC shouldn't occur because
+ * the front-end should have reserved sufficient space on the media.  Any
+ * error other then EWOULDBLOCK will force the mount to be read-only.
  */
 static
 void
@@ -458,9 +505,19 @@ hammer_flusher_flush_inode(hammer_inode_t ip, hammer_transaction_t trans)
 
        hammer_flusher_clean_loose_ios(hmp);
        error = hammer_sync_inode(trans, ip);
-       if (error != EWOULDBLOCK)
-               ip->error = error;
-       hammer_flush_inode_done(ip);
+
+       /*
+        * EWOULDBLOCK can happen under normal operation, all other errors
+        * are considered extremely serious.  We must set WOULDBLOCK
+        * mechanics to deal with the mess left over from the abort of the
+        * previous flush.
+        */
+       if (error) {
+               ip->flags |= HAMMER_INODE_WOULDBLOCK;
+               if (error == EWOULDBLOCK)
+                       error = 0;
+       }
+       hammer_flush_inode_done(ip, error);
        while (hmp->flusher.finalize_want)
                tsleep(&hmp->flusher.finalize_want, 0, "hmrsxx", 0);
        if (hammer_flusher_undo_exhausted(trans, 1)) {
@@ -537,6 +594,9 @@ hammer_flusher_finalize(hammer_transaction_t trans, int final)
        if (final == 0 && !hammer_flusher_meta_limit(hmp))
                goto done;
 
+       if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
+               goto done;
+
        /*
         * Flush data buffers.  This can occur asynchronously and at any
         * time.  We must interlock against the frontend direct-data write
@@ -544,12 +604,14 @@ hammer_flusher_finalize(hammer_transaction_t trans, int final)
         */
        count = 0;
        while ((io = TAILQ_FIRST(&hmp->data_list)) != NULL) {
+               if (io->ioerror)
+                       break;
                if (io->lock.refs == 0)
                        ++hammer_count_refedbufs;
                hammer_ref(&io->lock);
                hammer_io_write_interlock(io);
                KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
-               hammer_io_flush(io);
+               hammer_io_flush(io, 0);
                hammer_io_done_interlock(io);
                hammer_rel_buffer((hammer_buffer_t)io, 0);
                ++count;
@@ -584,26 +646,38 @@ hammer_flusher_finalize(hammer_transaction_t trans, int final)
         */
        count = 0;
        while ((io = TAILQ_FIRST(&hmp->undo_list)) != NULL) {
+               if (io->ioerror)
+                       break;
                KKASSERT(io->modify_refs == 0);
                if (io->lock.refs == 0)
                        ++hammer_count_refedbufs;
                hammer_ref(&io->lock);
                KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
-               hammer_io_flush(io);
+               hammer_io_flush(io, hammer_undo_reclaim(io));
                hammer_rel_buffer((hammer_buffer_t)io, 0);
                ++count;
        }
 
        /*
-        * Wait for I/Os to complete
+        * Wait for I/Os to complete and flush the cache on the target disk.
         */
        hammer_flusher_clean_loose_ios(hmp);
        hammer_io_wait_all(hmp, "hmrfl1");
 
+       if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
+               goto failed;
+
        /*
-        * Update the on-disk volume header with new UNDO FIFO end position
-        * (do not generate new UNDO records for this change).  We have to
-        * do this for the UNDO FIFO whether (final) is set or not.
+        * HAMMER VERSION < 4:
+        *      Update the on-disk volume header with new UNDO FIFO end
+        *      position (do not generate new UNDO records for this change).
+        *      We have to do this for the UNDO FIFO whether (final) is
+        *      set or not in order for the UNDOs to be recognized on
+        *      recovery.
+        *
+        * HAMMER VERSION >= 4:
+        *      The UNDO FIFO data written above will be recognized on
+        *      recovery without us having to sync the volume header.
         *
         * Also update the on-disk next_tid field.  This does not require
         * an UNDO.  However, because our TID is generated before we get
@@ -620,7 +694,7 @@ hammer_flusher_finalize(hammer_transaction_t trans, int final)
        cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
 
        if (dundomap->first_offset != cundomap->first_offset ||
-           dundomap->next_offset != cundomap->next_offset) {
+                  dundomap->next_offset != cundomap->next_offset) {
                hammer_modify_volume(NULL, root_volume, NULL, 0);
                dundomap->first_offset = cundomap->first_offset;
                dundomap->next_offset = cundomap->next_offset;
@@ -628,37 +702,58 @@ hammer_flusher_finalize(hammer_transaction_t trans, int final)
                hammer_modify_volume_done(root_volume);
        }
 
+       /*
+        * vol0_next_tid is used for TID selection and is updated without
+        * an UNDO so we do not reuse a TID that may have been rolled-back.
+        *
+        * vol0_last_tid is the highest fully-synchronized TID.  It is
+        * set-up when the UNDO fifo is fully synced, later on (not here).
+        */
        if (root_volume->io.modified) {
                hammer_modify_volume(NULL, root_volume, NULL, 0);
                if (root_volume->ondisk->vol0_next_tid < trans->tid)
                        root_volume->ondisk->vol0_next_tid = trans->tid;
                hammer_crc_set_volume(root_volume->ondisk);
                hammer_modify_volume_done(root_volume);
-               hammer_io_flush(&root_volume->io);
+               hammer_io_flush(&root_volume->io, 0);
        }
 
        /*
-        * Wait for I/Os to complete
+        * Wait for I/Os to complete.
+        *
+        * For HAMMER VERSION 4+ filesystems we do not have to wait for
+        * the I/O to complete as the new UNDO FIFO entries are recognized
+        * even without the volume header update.  This allows the volume
+        * header to flushed along with meta-data, significantly reducing
+        * flush overheads.
         */
        hammer_flusher_clean_loose_ios(hmp);
-       hammer_io_wait_all(hmp, "hmrfl2");
+       if (hmp->version < HAMMER_VOL_VERSION_FOUR)
+               hammer_io_wait_all(hmp, "hmrfl2");
+
+       if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
+               goto failed;
 
        /*
         * Flush meta-data.  The meta-data will be undone if we crash
-        * so we can safely flush it asynchronously.
+        * so we can safely flush it asynchronously.  There is no need
+        * to wait for I/O to complete (or issue a synchronous disk flush).
         *
-        * Repeated catchups will wind up flushing this update's meta-data
-        * and the UNDO buffers for the next update simultaniously.  This
-        * is ok.
+        * In fact, even if we did wait the meta-data will still be undone
+        * by a crash up until the next flush cycle due to the first_offset
+        * in the volume header for the UNDO FIFO not being adjusted until
+        * the following flush cycle.
         */
        count = 0;
        while ((io = TAILQ_FIRST(&hmp->meta_list)) != NULL) {
+               if (io->ioerror)
+                       break;
                KKASSERT(io->modify_refs == 0);
                if (io->lock.refs == 0)
                        ++hammer_count_refedbufs;
                hammer_ref(&io->lock);
                KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
-               hammer_io_flush(io);
+               hammer_io_flush(io, 0);
                hammer_rel_buffer((hammer_buffer_t)io, 0);
                ++count;
        }
@@ -673,16 +768,45 @@ hammer_flusher_finalize(hammer_transaction_t trans, int final)
         */
        if (final) {
                cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
-               cundomap->first_offset = cundomap->next_offset;
+               if (cundomap->first_offset == cundomap->next_offset) {
+                       hmp->hflags &= ~HMNT_UNDO_DIRTY;
+               } else {
+                       cundomap->first_offset = cundomap->next_offset;
+                       hmp->hflags |= HMNT_UNDO_DIRTY;
+               }
                hammer_clear_undo_history(hmp);
+
+               /*
+                * Flush tid sequencing.  flush_tid1 is fully synchronized,
+                * meaning a crash will not roll it back.  flush_tid2 has
+                * been written out asynchronously and a crash will roll
+                * it back.  flush_tid1 is used for all mirroring masters.
+                */
+               if (hmp->flush_tid1 != hmp->flush_tid2) {
+                       hmp->flush_tid1 = hmp->flush_tid2;
+                       wakeup(&hmp->flush_tid1);
+               }
+               hmp->flush_tid2 = trans->tid;
        }
 
+       /*
+        * Cleanup.  Report any critical errors.
+        */
+failed:
        hammer_sync_unlock(trans);
 
+       if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) {
+               kprintf("HAMMER(%s): Critical write error during flush, "
+                       "refusing to sync UNDO FIFO\n",
+                       root_volume->ondisk->vol_name);
+       }
+
 done:
        hammer_unlock(&hmp->flusher.finalize_lock);
+
        if (--hmp->flusher.finalize_want == 0)
                wakeup(&hmp->flusher.finalize_want);
+       hammer_stats_commits += final;
 }
 
 /*
@@ -701,6 +825,12 @@ hammer_flusher_meta_limit(hammer_mount_t hmp)
        return(0);
 }
 
+/*
+ * Return non-zero if too many dirty meta-data buffers have built up.
+ *
+ * This version is used by background operations (mirror, prune, reblock)
+ * to leave room for foreground operations.
+ */
 int
 hammer_flusher_meta_halflimit(hammer_mount_t hmp)
 {
@@ -711,3 +841,23 @@ hammer_flusher_meta_halflimit(hammer_mount_t hmp)
        return(0);
 }
 
+/*
+ * Return non-zero if the flusher still has something to flush.
+ */
+int
+hammer_flusher_haswork(hammer_mount_t hmp)
+{
+       if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
+               return(0);
+       if (TAILQ_FIRST(&hmp->flush_group_list) ||      /* dirty inodes */
+           TAILQ_FIRST(&hmp->volu_list) ||             /* dirty bufffers */
+           TAILQ_FIRST(&hmp->undo_list) ||
+           TAILQ_FIRST(&hmp->data_list) ||
+           TAILQ_FIRST(&hmp->meta_list) ||
+           (hmp->hflags & HMNT_UNDO_DIRTY)             /* UNDO FIFO sync */
+       ) {
+               return(1);
+       }
+       return(0);
+}
+