X-Git-Url: https://gitweb.dragonflybsd.org/dragonfly.git/blobdiff_plain/3cdb9703ecf1c06dbbe87078cd9515f95ad547d1..ff003b11ff22dd10f420eba8a38b7c0838d79acd:/sys/vfs/hammer/hammer_flusher.c diff --git a/sys/vfs/hammer/hammer_flusher.c b/sys/vfs/hammer/hammer_flusher.c index d50997cf13..5991f3de40 100644 --- a/sys/vfs/hammer/hammer_flusher.c +++ b/sys/vfs/hammer/hammer_flusher.c @@ -48,6 +48,17 @@ static void hammer_flusher_flush(hammer_mount_t hmp); static void hammer_flusher_flush_inode(hammer_inode_t ip, hammer_transaction_t trans); +RB_GENERATE(hammer_fls_rb_tree, hammer_inode, rb_flsnode, + hammer_ino_rb_compare); + +/* + * Inodes are sorted and assigned to slave threads in groups of 128. + * We want a flush group size large enough such that the slave threads + * are not likely to interfere with each other when accessing the B-Tree, + * but not so large that we lose concurrency. + */ +#define HAMMER_FLUSH_GROUP_SIZE 128 + /* * Support structures for the flusher threads. */ @@ -195,7 +206,6 @@ hammer_flusher_destroy(hammer_mount_t hmp) wakeup(&info->runstate); while (info->td) tsleep(&info->td, 0, "hmrwwc", 0); - TAILQ_REMOVE(&hmp->flusher.ready_list, info, entry); kfree(info, hmp->m_misc); } } @@ -310,15 +320,21 @@ hammer_flusher_flush(hammer_mount_t hmp) hmp->next_flush_group = TAILQ_NEXT(flg, flush_entry); /* - * Iterate the inodes in the flg's flush_list and assign + * Iterate the inodes in the flg's flush_tree and assign * them to slaves. */ slave_index = 0; info = TAILQ_FIRST(&hmp->flusher.ready_list); - next_ip = TAILQ_FIRST(&flg->flush_list); + next_ip = RB_FIRST(hammer_fls_rb_tree, &flg->flush_tree); while ((ip = next_ip) != NULL) { - next_ip = TAILQ_NEXT(ip, flush_entry); + next_ip = RB_NEXT(hammer_fls_rb_tree, + &flg->flush_tree, ip); + + if (++hmp->check_yield > hammer_yield_check) { + hmp->check_yield = 0; + lwkt_user_yield(); + } /* * Add ip to the slave's work array. The slave is @@ -375,8 +391,7 @@ hammer_flusher_flush(hammer_mount_t hmp) * Loop up on the same flg. If the flg is done clean it up * and break out. We only flush one flg. */ - if (TAILQ_FIRST(&flg->flush_list) == NULL) { - KKASSERT(TAILQ_EMPTY(&flg->flush_list)); + if (RB_EMPTY(&flg->flush_tree)) { KKASSERT(flg->refs == 0); TAILQ_REMOVE(&hmp->flush_group_list, flg, flush_entry); kfree(flg, hmp->m_misc); @@ -409,7 +424,7 @@ hammer_flusher_flush(hammer_mount_t hmp) /* - * The slave flusher thread pulls work off the master flush_list until no + * The slave flusher thread pulls work off the master flush list until no * work is left. */ static void @@ -596,7 +611,7 @@ hammer_flusher_finalize(hammer_transaction_t trans, int final) hammer_ref(&io->lock); hammer_io_write_interlock(io); KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME); - hammer_io_flush(io); + hammer_io_flush(io, 0); hammer_io_done_interlock(io); hammer_rel_buffer((hammer_buffer_t)io, 0); ++count; @@ -638,13 +653,13 @@ hammer_flusher_finalize(hammer_transaction_t trans, int final) ++hammer_count_refedbufs; hammer_ref(&io->lock); KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME); - hammer_io_flush(io); + hammer_io_flush(io, hammer_undo_reclaim(io)); hammer_rel_buffer((hammer_buffer_t)io, 0); ++count; } /* - * Wait for I/Os to complete + * Wait for I/Os to complete and flush the cache on the target disk. */ hammer_flusher_clean_loose_ios(hmp); hammer_io_wait_all(hmp, "hmrfl1"); @@ -653,9 +668,16 @@ hammer_flusher_finalize(hammer_transaction_t trans, int final) goto failed; /* - * Update the on-disk volume header with new UNDO FIFO end position - * (do not generate new UNDO records for this change). We have to - * do this for the UNDO FIFO whether (final) is set or not. + * HAMMER VERSION < 4: + * Update the on-disk volume header with new UNDO FIFO end + * position (do not generate new UNDO records for this change). + * We have to do this for the UNDO FIFO whether (final) is + * set or not in order for the UNDOs to be recognized on + * recovery. + * + * HAMMER VERSION >= 4: + * The UNDO FIFO data written above will be recognized on + * recovery without us having to sync the volume header. * * Also update the on-disk next_tid field. This does not require * an UNDO. However, because our TID is generated before we get @@ -693,25 +715,34 @@ hammer_flusher_finalize(hammer_transaction_t trans, int final) root_volume->ondisk->vol0_next_tid = trans->tid; hammer_crc_set_volume(root_volume->ondisk); hammer_modify_volume_done(root_volume); - hammer_io_flush(&root_volume->io); + hammer_io_flush(&root_volume->io, 0); } /* - * Wait for I/Os to complete + * Wait for I/Os to complete. + * + * For HAMMER VERSION 4+ filesystems we do not have to wait for + * the I/O to complete as the new UNDO FIFO entries are recognized + * even without the volume header update. This allows the volume + * header to flushed along with meta-data, significantly reducing + * flush overheads. */ hammer_flusher_clean_loose_ios(hmp); - hammer_io_wait_all(hmp, "hmrfl2"); + if (hmp->version < HAMMER_VOL_VERSION_FOUR) + hammer_io_wait_all(hmp, "hmrfl2"); if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) goto failed; /* * Flush meta-data. The meta-data will be undone if we crash - * so we can safely flush it asynchronously. + * so we can safely flush it asynchronously. There is no need + * to wait for I/O to complete (or issue a synchronous disk flush). * - * Repeated catchups will wind up flushing this update's meta-data - * and the UNDO buffers for the next update simultaniously. This - * is ok. + * In fact, even if we did wait the meta-data will still be undone + * by a crash up until the next flush cycle due to the first_offset + * in the volume header for the UNDO FIFO not being adjusted until + * the following flush cycle. */ count = 0; while ((io = TAILQ_FIRST(&hmp->meta_list)) != NULL) { @@ -722,7 +753,7 @@ hammer_flusher_finalize(hammer_transaction_t trans, int final) ++hammer_count_refedbufs; hammer_ref(&io->lock); KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME); - hammer_io_flush(io); + hammer_io_flush(io, 0); hammer_rel_buffer((hammer_buffer_t)io, 0); ++count; }