2 * Copyright (c) 2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_flusher.c,v 1.45 2008/07/31 04:42:04 dillon Exp $
37 * HAMMER dependancy flusher thread
39 * Meta data updates create buffer dependancies which are arranged as a
45 static void hammer_flusher_master_thread(void *arg);
46 static void hammer_flusher_slave_thread(void *arg);
47 static void hammer_flusher_flush(hammer_mount_t hmp);
48 static void hammer_flusher_flush_inode(hammer_inode_t ip,
49 hammer_transaction_t trans);
51 RB_GENERATE(hammer_fls_rb_tree, hammer_inode, rb_flsnode,
52 hammer_ino_rb_compare);
55 * Inodes are sorted and assigned to slave threads in groups of 128.
56 * We want a flush group size large enough such that the slave threads
57 * are not likely to interfere with each other when accessing the B-Tree,
58 * but not so large that we lose concurrency.
60 #define HAMMER_FLUSH_GROUP_SIZE 128
63 * Support structures for the flusher threads.
65 struct hammer_flusher_info {
66 TAILQ_ENTRY(hammer_flusher_info) entry;
67 struct hammer_mount *hmp;
71 hammer_flush_group_t flg;
72 hammer_inode_t work_array[HAMMER_FLUSH_GROUP_SIZE];
75 typedef struct hammer_flusher_info *hammer_flusher_info_t;
78 * Sync all inodes pending on the flusher.
80 * All flush groups will be flushed. This does not queue dirty inodes
81 * to the flush groups, it just flushes out what has already been queued!
84 hammer_flusher_sync(hammer_mount_t hmp)
88 seq = hammer_flusher_async(hmp, NULL);
89 hammer_flusher_wait(hmp, seq);
93 * Sync all inodes pending on the flusher - return immediately.
95 * All flush groups will be flushed.
98 hammer_flusher_async(hammer_mount_t hmp, hammer_flush_group_t close_flg)
100 hammer_flush_group_t flg;
101 int seq = hmp->flusher.next;
103 TAILQ_FOREACH(flg, &hmp->flush_group_list, flush_entry) {
104 if (flg->running == 0)
107 if (flg == close_flg)
110 if (hmp->flusher.td) {
111 if (hmp->flusher.signal++ == 0)
112 wakeup(&hmp->flusher.signal);
114 seq = hmp->flusher.done;
120 hammer_flusher_async_one(hammer_mount_t hmp)
124 if (hmp->flusher.td) {
125 seq = hmp->flusher.next;
126 if (hmp->flusher.signal++ == 0)
127 wakeup(&hmp->flusher.signal);
129 seq = hmp->flusher.done;
135 * Wait for the flusher to get to the specified sequence number.
136 * Signal the flusher as often as necessary to keep it going.
139 hammer_flusher_wait(hammer_mount_t hmp, int seq)
141 while ((int)(seq - hmp->flusher.done) > 0) {
142 if (hmp->flusher.act != seq) {
143 if (hmp->flusher.signal++ == 0)
144 wakeup(&hmp->flusher.signal);
146 tsleep(&hmp->flusher.done, 0, "hmrfls", 0);
151 hammer_flusher_wait_next(hammer_mount_t hmp)
155 seq = hammer_flusher_async_one(hmp);
156 hammer_flusher_wait(hmp, seq);
160 hammer_flusher_create(hammer_mount_t hmp)
162 hammer_flusher_info_t info;
165 hmp->flusher.signal = 0;
166 hmp->flusher.act = 0;
167 hmp->flusher.done = 0;
168 hmp->flusher.next = 1;
169 hammer_ref(&hmp->flusher.finalize_lock);
170 TAILQ_INIT(&hmp->flusher.run_list);
171 TAILQ_INIT(&hmp->flusher.ready_list);
173 lwkt_create(hammer_flusher_master_thread, hmp,
174 &hmp->flusher.td, NULL, TDF_MPSAFE, -1, "hammer-M");
175 for (i = 0; i < HAMMER_MAX_FLUSHERS; ++i) {
176 info = kmalloc(sizeof(*info), hmp->m_misc, M_WAITOK|M_ZERO);
178 TAILQ_INSERT_TAIL(&hmp->flusher.ready_list, info, entry);
179 lwkt_create(hammer_flusher_slave_thread, info,
180 &info->td, NULL, TDF_MPSAFE, -1, "hammer-S%d", i);
185 hammer_flusher_destroy(hammer_mount_t hmp)
187 hammer_flusher_info_t info;
192 hmp->flusher.exiting = 1;
193 while (hmp->flusher.td) {
194 ++hmp->flusher.signal;
195 wakeup(&hmp->flusher.signal);
196 tsleep(&hmp->flusher.exiting, 0, "hmrwex", hz);
202 while ((info = TAILQ_FIRST(&hmp->flusher.ready_list)) != NULL) {
203 KKASSERT(info->runstate == 0);
204 TAILQ_REMOVE(&hmp->flusher.ready_list, info, entry);
206 wakeup(&info->runstate);
208 tsleep(&info->td, 0, "hmrwwc", 0);
209 kfree(info, hmp->m_misc);
214 * The master flusher thread manages the flusher sequence id and
215 * synchronization with the slave work threads.
218 hammer_flusher_master_thread(void *arg)
220 hammer_flush_group_t flg;
225 lwkt_gettoken(&hmp->fs_token);
229 * Do at least one flush cycle. We may have to update the
230 * UNDO FIFO even if no inodes are queued.
233 while (hmp->flusher.group_lock)
234 tsleep(&hmp->flusher.group_lock, 0, "hmrhld", 0);
235 hmp->flusher.act = hmp->flusher.next;
237 hammer_flusher_clean_loose_ios(hmp);
238 hammer_flusher_flush(hmp);
239 hmp->flusher.done = hmp->flusher.act;
240 wakeup(&hmp->flusher.done);
241 flg = TAILQ_FIRST(&hmp->flush_group_list);
242 if (flg == NULL || flg->closed == 0)
244 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
251 if (hmp->flusher.exiting && TAILQ_EMPTY(&hmp->flush_group_list))
253 while (hmp->flusher.signal == 0)
254 tsleep(&hmp->flusher.signal, 0, "hmrwwa", 0);
257 * Flush for each count on signal but only allow one extra
258 * flush request to build up.
260 if (--hmp->flusher.signal != 0)
261 hmp->flusher.signal = 1;
267 hmp->flusher.td = NULL;
268 wakeup(&hmp->flusher.exiting);
269 lwkt_reltoken(&hmp->fs_token);
274 * Flush all inodes in the current flush group.
277 hammer_flusher_flush(hammer_mount_t hmp)
279 hammer_flusher_info_t info;
280 hammer_flush_group_t flg;
281 hammer_reserve_t resv;
283 hammer_inode_t next_ip;
288 * Just in-case there's a flush race on mount
290 if (TAILQ_FIRST(&hmp->flusher.ready_list) == NULL)
294 * We only do one flg but we may have to loop/retry.
297 while ((flg = TAILQ_FIRST(&hmp->flush_group_list)) != NULL) {
299 if (hammer_debug_general & 0x0001) {
300 kprintf("hammer_flush %d ttl=%d recs=%d\n",
302 flg->total_count, flg->refs);
304 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
306 hammer_start_transaction_fls(&hmp->flusher.trans, hmp);
309 * If the previous flush cycle just about exhausted our
310 * UNDO space we may have to do a dummy cycle to move the
311 * first_offset up before actually digging into a new cycle,
312 * or the new cycle will not have sufficient undo space.
314 if (hammer_flusher_undo_exhausted(&hmp->flusher.trans, 3))
315 hammer_flusher_finalize(&hmp->flusher.trans, 0);
318 * Ok, we are running this flush group now (this prevents new
322 if (hmp->next_flush_group == flg)
323 hmp->next_flush_group = TAILQ_NEXT(flg, flush_entry);
326 * Iterate the inodes in the flg's flush_tree and assign
330 info = TAILQ_FIRST(&hmp->flusher.ready_list);
331 next_ip = RB_FIRST(hammer_fls_rb_tree, &flg->flush_tree);
333 while ((ip = next_ip) != NULL) {
334 next_ip = RB_NEXT(hammer_fls_rb_tree,
335 &flg->flush_tree, ip);
337 if (++hmp->check_yield > hammer_yield_check) {
338 hmp->check_yield = 0;
343 * Add ip to the slave's work array. The slave is
344 * not currently running.
346 info->work_array[info->count++] = ip;
347 if (info->count != HAMMER_FLUSH_GROUP_SIZE)
351 * Get the slave running
353 TAILQ_REMOVE(&hmp->flusher.ready_list, info, entry);
354 TAILQ_INSERT_TAIL(&hmp->flusher.run_list, info, entry);
357 wakeup(&info->runstate);
360 * Get a new slave. We may have to wait for one to
363 while ((info = TAILQ_FIRST(&hmp->flusher.ready_list)) == NULL) {
364 tsleep(&hmp->flusher.ready_list, 0, "hmrfcc", 0);
369 * Run the current slave if necessary
372 TAILQ_REMOVE(&hmp->flusher.ready_list, info, entry);
373 TAILQ_INSERT_TAIL(&hmp->flusher.run_list, info, entry);
376 wakeup(&info->runstate);
380 * Wait for all slaves to finish running
382 while (TAILQ_FIRST(&hmp->flusher.run_list) != NULL)
383 tsleep(&hmp->flusher.ready_list, 0, "hmrfcc", 0);
386 * Do the final finalization, clean up
388 hammer_flusher_finalize(&hmp->flusher.trans, 1);
389 hmp->flusher.tid = hmp->flusher.trans.tid;
391 hammer_done_transaction(&hmp->flusher.trans);
394 * Loop up on the same flg. If the flg is done clean it up
395 * and break out. We only flush one flg.
397 if (RB_EMPTY(&flg->flush_tree)) {
398 KKASSERT(flg->refs == 0);
399 TAILQ_REMOVE(&hmp->flush_group_list, flg, flush_entry);
400 kfree(flg, hmp->m_misc);
406 * We may have pure meta-data to flush, or we may have to finish
407 * cycling the UNDO FIFO, even if there were no flush groups.
409 if (count == 0 && hammer_flusher_haswork(hmp)) {
410 hammer_start_transaction_fls(&hmp->flusher.trans, hmp);
411 hammer_flusher_finalize(&hmp->flusher.trans, 1);
412 hammer_done_transaction(&hmp->flusher.trans);
416 * Clean up any freed big-blocks (typically zone-2).
417 * resv->flush_group is typically set several flush groups ahead
418 * of the free to ensure that the freed block is not reused until
419 * it can no longer be reused.
421 while ((resv = TAILQ_FIRST(&hmp->delay_list)) != NULL) {
422 if (resv->flush_group != hmp->flusher.act)
424 hammer_reserve_clrdelay(hmp, resv);
430 * The slave flusher thread pulls work off the master flush list until no
434 hammer_flusher_slave_thread(void *arg)
436 hammer_flush_group_t flg;
437 hammer_flusher_info_t info;
444 lwkt_gettoken(&hmp->fs_token);
447 while (info->runstate == 0)
448 tsleep(&info->runstate, 0, "hmrssw", 0);
449 if (info->runstate < 0)
453 for (i = 0; i < info->count; ++i) {
454 ip = info->work_array[i];
455 hammer_flusher_flush_inode(ip, &hmp->flusher.trans);
456 ++hammer_stats_inode_flushes;
460 TAILQ_REMOVE(&hmp->flusher.run_list, info, entry);
461 TAILQ_INSERT_TAIL(&hmp->flusher.ready_list, info, entry);
462 wakeup(&hmp->flusher.ready_list);
466 lwkt_reltoken(&hmp->fs_token);
471 hammer_flusher_clean_loose_ios(hammer_mount_t hmp)
473 hammer_buffer_t buffer;
477 * loose ends - buffers without bp's aren't tracked by the kernel
478 * and can build up, so clean them out. This can occur when an
479 * IO completes on a buffer with no references left.
481 * The io_token is needed to protect the list.
483 if ((io = TAILQ_FIRST(&hmp->lose_list)) != NULL) {
484 lwkt_gettoken(&hmp->io_token);
485 while ((io = TAILQ_FIRST(&hmp->lose_list)) != NULL) {
486 KKASSERT(io->mod_list == &hmp->lose_list);
487 TAILQ_REMOVE(&hmp->lose_list, io, mod_entry);
489 hammer_ref(&io->lock);
491 hammer_rel_buffer(buffer, 0);
493 lwkt_reltoken(&hmp->io_token);
498 * Flush a single inode that is part of a flush group.
500 * Flusher errors are extremely serious, even ENOSPC shouldn't occur because
501 * the front-end should have reserved sufficient space on the media. Any
502 * error other then EWOULDBLOCK will force the mount to be read-only.
506 hammer_flusher_flush_inode(hammer_inode_t ip, hammer_transaction_t trans)
508 hammer_mount_t hmp = ip->hmp;
511 hammer_flusher_clean_loose_ios(hmp);
512 error = hammer_sync_inode(trans, ip);
515 * EWOULDBLOCK can happen under normal operation, all other errors
516 * are considered extremely serious. We must set WOULDBLOCK
517 * mechanics to deal with the mess left over from the abort of the
521 ip->flags |= HAMMER_INODE_WOULDBLOCK;
522 if (error == EWOULDBLOCK)
525 hammer_flush_inode_done(ip, error);
526 while (hmp->flusher.finalize_want)
527 tsleep(&hmp->flusher.finalize_want, 0, "hmrsxx", 0);
528 if (hammer_flusher_undo_exhausted(trans, 1)) {
529 kprintf("HAMMER: Warning: UNDO area too small!\n");
530 hammer_flusher_finalize(trans, 1);
531 } else if (hammer_flusher_meta_limit(trans->hmp)) {
532 hammer_flusher_finalize(trans, 0);
537 * Return non-zero if the UNDO area has less then (QUARTER / 4) of its
540 * 1/4 - Emergency free undo space level. Below this point the flusher
541 * will finalize even if directory dependancies have not been resolved.
543 * 2/4 - Used by the pruning and reblocking code. These functions may be
544 * running in parallel with a flush and cannot be allowed to drop
545 * available undo space to emergency levels.
547 * 3/4 - Used at the beginning of a flush to force-sync the volume header
548 * to give the flush plenty of runway to work in.
551 hammer_flusher_undo_exhausted(hammer_transaction_t trans, int quarter)
553 if (hammer_undo_space(trans) <
554 hammer_undo_max(trans->hmp) * quarter / 4) {
562 * Flush all pending UNDOs, wait for write completion, update the volume
563 * header with the new UNDO end position, and flush it. Then
564 * asynchronously flush the meta-data.
566 * If this is the last finalization in a flush group we also synchronize
567 * our cached blockmap and set hmp->flusher_undo_start and our cached undo
568 * fifo first_offset so the next flush resets the FIFO pointers.
570 * If this is not final it is being called because too many dirty meta-data
571 * buffers have built up and must be flushed with UNDO synchronization to
572 * avoid a buffer cache deadlock.
575 hammer_flusher_finalize(hammer_transaction_t trans, int final)
577 hammer_volume_t root_volume;
578 hammer_blockmap_t cundomap, dundomap;
581 hammer_off_t save_undo_next_offset;
586 root_volume = trans->rootvol;
589 * Exclusively lock the flusher. This guarantees that all dirty
590 * buffers will be idled (have a mod-count of 0).
592 ++hmp->flusher.finalize_want;
593 hammer_lock_ex(&hmp->flusher.finalize_lock);
596 * If this isn't the final sync several threads may have hit the
597 * meta-limit at the same time and raced. Only sync if we really
598 * have to, after acquiring the lock.
600 if (final == 0 && !hammer_flusher_meta_limit(hmp))
603 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
607 * Flush data buffers. This can occur asynchronously and at any
608 * time. We must interlock against the frontend direct-data write
609 * but do not have to acquire the sync-lock yet.
611 * These data buffers have already been collected prior to the
612 * related inode(s) getting queued to the flush group.
615 while ((io = TAILQ_FIRST(&hmp->data_list)) != NULL) {
618 hammer_ref(&io->lock);
619 hammer_io_write_interlock(io);
620 KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
621 hammer_io_flush(io, 0);
622 hammer_io_done_interlock(io);
623 hammer_rel_buffer((hammer_buffer_t)io, 0);
624 hammer_io_limit_backlog(hmp);
629 * The sync-lock is required for the remaining sequence. This lock
630 * prevents meta-data from being modified.
632 hammer_sync_lock_ex(trans);
635 * If we have been asked to finalize the volume header sync the
636 * cached blockmap to the on-disk blockmap. Generate an UNDO
637 * record for the update.
640 cundomap = &hmp->blockmap[0];
641 dundomap = &root_volume->ondisk->vol0_blockmap[0];
642 if (root_volume->io.modified) {
643 hammer_modify_volume(trans, root_volume,
644 dundomap, sizeof(hmp->blockmap));
645 for (i = 0; i < HAMMER_MAX_ZONES; ++i)
646 hammer_crc_set_blockmap(&cundomap[i]);
647 bcopy(cundomap, dundomap, sizeof(hmp->blockmap));
648 hammer_modify_volume_done(root_volume);
653 * Flush UNDOs. This can occur concurrently with the data flush
654 * because data writes never overwrite.
656 * This also waits for I/Os to complete and flushes the cache on
659 * Record the UNDO append point as this can continue to change
660 * after we have flushed the UNDOs.
662 cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
663 hammer_lock_ex(&hmp->undo_lock);
664 save_undo_next_offset = cundomap->next_offset;
665 hammer_unlock(&hmp->undo_lock);
666 hammer_flusher_flush_undos(hmp, HAMMER_FLUSH_UNDOS_FORCED);
668 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
672 * HAMMER VERSION < 4:
673 * Update the on-disk volume header with new UNDO FIFO end
674 * position (do not generate new UNDO records for this change).
675 * We have to do this for the UNDO FIFO whether (final) is
676 * set or not in order for the UNDOs to be recognized on
679 * HAMMER VERSION >= 4:
680 * The UNDO FIFO data written above will be recognized on
681 * recovery without us having to sync the volume header.
683 * Also update the on-disk next_tid field. This does not require
684 * an UNDO. However, because our TID is generated before we get
685 * the sync lock another sync may have beat us to the punch.
687 * This also has the side effect of updating first_offset based on
688 * a prior finalization when the first finalization of the next flush
689 * cycle occurs, removing any undo info from the prior finalization
690 * from consideration.
692 * The volume header will be flushed out synchronously.
694 dundomap = &root_volume->ondisk->vol0_blockmap[HAMMER_ZONE_UNDO_INDEX];
695 cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
697 if (dundomap->first_offset != cundomap->first_offset ||
698 dundomap->next_offset != save_undo_next_offset) {
699 hammer_modify_volume(NULL, root_volume, NULL, 0);
700 dundomap->first_offset = cundomap->first_offset;
701 dundomap->next_offset = save_undo_next_offset;
702 hammer_crc_set_blockmap(dundomap);
703 hammer_modify_volume_done(root_volume);
707 * vol0_next_tid is used for TID selection and is updated without
708 * an UNDO so we do not reuse a TID that may have been rolled-back.
710 * vol0_last_tid is the highest fully-synchronized TID. It is
711 * set-up when the UNDO fifo is fully synced, later on (not here).
713 * The root volume can be open for modification by other threads
714 * generating UNDO or REDO records. For example, reblocking,
715 * pruning, REDO mode fast-fsyncs, so the write interlock is
718 if (root_volume->io.modified) {
719 hammer_modify_volume(NULL, root_volume, NULL, 0);
720 if (root_volume->ondisk->vol0_next_tid < trans->tid)
721 root_volume->ondisk->vol0_next_tid = trans->tid;
722 hammer_crc_set_volume(root_volume->ondisk);
723 hammer_modify_volume_done(root_volume);
724 hammer_io_write_interlock(&root_volume->io);
725 hammer_io_flush(&root_volume->io, 0);
726 hammer_io_done_interlock(&root_volume->io);
730 * Wait for I/Os to complete.
732 * For HAMMER VERSION 4+ filesystems we do not have to wait for
733 * the I/O to complete as the new UNDO FIFO entries are recognized
734 * even without the volume header update. This allows the volume
735 * header to flushed along with meta-data, significantly reducing
738 hammer_flusher_clean_loose_ios(hmp);
739 if (hmp->version < HAMMER_VOL_VERSION_FOUR)
740 hammer_io_wait_all(hmp, "hmrfl3", 1);
742 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
746 * Flush meta-data. The meta-data will be undone if we crash
747 * so we can safely flush it asynchronously. There is no need
748 * to wait for I/O to complete (or issue a synchronous disk flush).
750 * In fact, even if we did wait the meta-data will still be undone
751 * by a crash up until the next flush cycle due to the first_offset
752 * in the volume header for the UNDO FIFO not being adjusted until
753 * the following flush cycle.
755 * No io interlock is needed, bioops callbacks will not mess with
759 while ((io = TAILQ_FIRST(&hmp->meta_list)) != NULL) {
762 KKASSERT(io->modify_refs == 0);
763 hammer_ref(&io->lock);
764 KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
765 hammer_io_flush(io, 0);
766 hammer_rel_buffer((hammer_buffer_t)io, 0);
767 hammer_io_limit_backlog(hmp);
772 * If this is the final finalization for the flush group set
773 * up for the next sequence by setting a new first_offset in
774 * our cached blockmap and clearing the undo history.
776 * Even though we have updated our cached first_offset, the on-disk
777 * first_offset still governs available-undo-space calculations.
779 * We synchronize to save_undo_next_offset rather than
780 * cundomap->next_offset because that is what we flushed out
783 * NOTE! UNDOs can only be added with the sync_lock held
784 * so we can clear the undo history without racing.
785 * REDOs can be added at any time which is why we
786 * have to be careful and use save_undo_next_offset
787 * when setting the new first_offset.
790 cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
791 if (cundomap->first_offset != save_undo_next_offset) {
792 cundomap->first_offset = save_undo_next_offset;
793 hmp->hflags |= HMNT_UNDO_DIRTY;
794 } else if (cundomap->first_offset != cundomap->next_offset) {
795 hmp->hflags |= HMNT_UNDO_DIRTY;
797 hmp->hflags &= ~HMNT_UNDO_DIRTY;
799 hammer_clear_undo_history(hmp);
802 * Flush tid sequencing. flush_tid1 is fully synchronized,
803 * meaning a crash will not roll it back. flush_tid2 has
804 * been written out asynchronously and a crash will roll
805 * it back. flush_tid1 is used for all mirroring masters.
807 if (hmp->flush_tid1 != hmp->flush_tid2) {
808 hmp->flush_tid1 = hmp->flush_tid2;
809 wakeup(&hmp->flush_tid1);
811 hmp->flush_tid2 = trans->tid;
814 * Clear the REDO SYNC flag. This flag is used to ensure
815 * that the recovery span in the UNDO/REDO FIFO contains
816 * at least one REDO SYNC record.
818 hmp->flags &= ~HAMMER_MOUNT_REDO_SYNC;
822 * Cleanup. Report any critical errors.
825 hammer_sync_unlock(trans);
827 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) {
828 kprintf("HAMMER(%s): Critical write error during flush, "
829 "refusing to sync UNDO FIFO\n",
830 root_volume->ondisk->vol_name);
834 hammer_unlock(&hmp->flusher.finalize_lock);
836 if (--hmp->flusher.finalize_want == 0)
837 wakeup(&hmp->flusher.finalize_want);
838 hammer_stats_commits += final;
845 hammer_flusher_flush_undos(hammer_mount_t hmp, int mode)
851 while ((io = TAILQ_FIRST(&hmp->undo_list)) != NULL) {
854 hammer_ref(&io->lock);
855 KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
856 hammer_io_write_interlock(io);
857 hammer_io_flush(io, hammer_undo_reclaim(io));
858 hammer_io_done_interlock(io);
859 hammer_rel_buffer((hammer_buffer_t)io, 0);
860 hammer_io_limit_backlog(hmp);
863 hammer_flusher_clean_loose_ios(hmp);
864 if (mode == HAMMER_FLUSH_UNDOS_FORCED ||
865 (mode == HAMMER_FLUSH_UNDOS_AUTO && count)) {
866 hammer_io_wait_all(hmp, "hmrfl1", 1);
868 hammer_io_wait_all(hmp, "hmrfl2", 0);
873 * Return non-zero if too many dirty meta-data buffers have built up.
875 * Since we cannot allow such buffers to flush until we have dealt with
876 * the UNDOs, we risk deadlocking the kernel's buffer cache.
879 hammer_flusher_meta_limit(hammer_mount_t hmp)
881 if (hmp->locked_dirty_space + hmp->io_running_space >
882 hammer_limit_dirtybufspace) {
889 * Return non-zero if too many dirty meta-data buffers have built up.
891 * This version is used by background operations (mirror, prune, reblock)
892 * to leave room for foreground operations.
895 hammer_flusher_meta_halflimit(hammer_mount_t hmp)
897 if (hmp->locked_dirty_space + hmp->io_running_space >
898 hammer_limit_dirtybufspace / 2) {
905 * Return non-zero if the flusher still has something to flush.
908 hammer_flusher_haswork(hammer_mount_t hmp)
912 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
914 if (TAILQ_FIRST(&hmp->flush_group_list) || /* dirty inodes */
915 TAILQ_FIRST(&hmp->volu_list) || /* dirty buffers */
916 TAILQ_FIRST(&hmp->undo_list) ||
917 TAILQ_FIRST(&hmp->data_list) ||
918 TAILQ_FIRST(&hmp->meta_list) ||
919 (hmp->hflags & HMNT_UNDO_DIRTY) /* UNDO FIFO sync */