2 * Copyright (c) 2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_flusher.c,v 1.45 2008/07/31 04:42:04 dillon Exp $
37 * HAMMER dependancy flusher thread
39 * Meta data updates create buffer dependancies which are arranged as a
45 static void hammer_flusher_master_thread(void *arg);
46 static void hammer_flusher_slave_thread(void *arg);
47 static int hammer_flusher_flush(hammer_mount_t hmp, int *nomorep);
48 static int hammer_flusher_flush_inode(hammer_inode_t ip, void *data);
50 RB_GENERATE(hammer_fls_rb_tree, hammer_inode, rb_flsnode,
51 hammer_ino_rb_compare);
54 * Support structures for the flusher threads.
56 typedef struct hammer_flusher_info {
57 TAILQ_ENTRY(hammer_flusher_info) entry;
61 hammer_flush_group_t flg;
62 struct hammer_transaction trans; /* per-slave transaction */
63 } *hammer_flusher_info_t;
66 * Sync all inodes pending on the flusher.
68 * All flush groups will be flushed. This does not queue dirty inodes
69 * to the flush groups, it just flushes out what has already been queued!
72 hammer_flusher_sync(hammer_mount_t hmp)
76 seq = hammer_flusher_async(hmp, NULL);
77 hammer_flusher_wait(hmp, seq);
81 * Sync all flush groups through to close_flg - return immediately.
82 * If close_flg is NULL all flush groups are synced.
84 * Returns the sequence number of the last closed flush group,
85 * which may be close_flg. When syncing to the end if there
86 * are no flush groups pending we still cycle the flusher, and
87 * must allocate a sequence number to placemark the spot even
88 * though no flush group will ever be associated with it.
91 hammer_flusher_async(hammer_mount_t hmp, hammer_flush_group_t close_flg)
93 hammer_flush_group_t flg;
99 if (close_flg && close_flg->closed)
100 return(close_flg->seq);
103 * Close flush groups until we hit the end of the list
106 while ((flg = hmp->next_flush_group) != NULL) {
107 KKASSERT(flg->closed == 0 && flg->running == 0);
109 hmp->next_flush_group = TAILQ_NEXT(flg, flush_entry);
110 if (flg == close_flg)
114 if (hmp->flusher.td) {
115 if (hmp->flusher.signal++ == 0)
116 wakeup(&hmp->flusher.signal);
120 seq = hmp->flusher.next;
124 seq = hmp->flusher.done;
130 * Flush the current/next flushable flg. This function is typically called
131 * in a loop along with hammer_flusher_wait(hmp, returned_seq) to iterate
132 * flush groups until specific conditions are met.
134 * If a flush is currently in progress its seq is returned.
136 * If no flush is currently in progress the next available flush group
137 * will be flushed and its seq returned.
139 * If no flush groups are present a dummy seq will be allocated and
140 * returned and the flusher will be activated (e.g. to flush the
141 * undo/redo and the volume header).
144 hammer_flusher_async_one(hammer_mount_t hmp)
146 hammer_flush_group_t flg;
149 if (hmp->flusher.td) {
150 flg = TAILQ_FIRST(&hmp->flush_group_list);
151 seq = hammer_flusher_async(hmp, flg);
153 seq = hmp->flusher.done;
159 * Wait for the flusher to finish flushing the specified sequence
160 * number. The flush is already running and will signal us on
164 hammer_flusher_wait(hammer_mount_t hmp, int seq)
166 while (seq - hmp->flusher.done > 0)
167 tsleep(&hmp->flusher.done, 0, "hmrfls", 0);
171 * Returns non-zero if the flusher is currently running. Used for
172 * time-domain multiplexing of frontend operations in order to avoid
173 * starving the backend flusher.
176 hammer_flusher_running(hammer_mount_t hmp)
178 int seq = hmp->flusher.next - 1;
179 if (seq - hmp->flusher.done > 0)
185 hammer_flusher_wait_next(hammer_mount_t hmp)
189 seq = hammer_flusher_async_one(hmp);
190 hammer_flusher_wait(hmp, seq);
194 hammer_flusher_create(hammer_mount_t hmp)
196 hammer_flusher_info_t info;
199 hmp->flusher.signal = 0;
200 hmp->flusher.done = 0;
201 hmp->flusher.next = 1;
202 hammer_ref(&hmp->flusher.finalize_lock);
203 TAILQ_INIT(&hmp->flusher.run_list);
204 TAILQ_INIT(&hmp->flusher.ready_list);
206 lwkt_create(hammer_flusher_master_thread, hmp,
207 &hmp->flusher.td, NULL, 0, -1, "hammer-M");
208 for (i = 0; i < HAMMER_MAX_FLUSHERS; ++i) {
209 info = kmalloc(sizeof(*info), hmp->m_misc, M_WAITOK|M_ZERO);
211 TAILQ_INSERT_TAIL(&hmp->flusher.ready_list, info, entry);
212 lwkt_create(hammer_flusher_slave_thread, info,
213 &info->td, NULL, 0, -1, "hammer-S%d", i);
218 hammer_flusher_destroy(hammer_mount_t hmp)
220 hammer_flusher_info_t info;
225 hmp->flusher.exiting = 1;
226 while (hmp->flusher.td) {
227 ++hmp->flusher.signal;
228 wakeup(&hmp->flusher.signal);
229 tsleep(&hmp->flusher.exiting, 0, "hmrwex", hz);
235 while ((info = TAILQ_FIRST(&hmp->flusher.ready_list)) != NULL) {
236 KKASSERT(info->runstate == 0);
237 TAILQ_REMOVE(&hmp->flusher.ready_list, info, entry);
239 wakeup(&info->runstate);
241 tsleep(&info->td, 0, "hmrwwc", 0);
242 kfree(info, hmp->m_misc);
247 * The master flusher thread manages the flusher sequence id and
248 * synchronization with the slave work threads.
251 hammer_flusher_master_thread(void *arg)
259 lwkt_gettoken(&hmp->fs_token);
263 * Flush all sequence numbers up to but not including .next,
264 * or until an open flush group is encountered.
267 while (hmp->flusher.group_lock)
268 tsleep(&hmp->flusher.group_lock, 0, "hmrhld",0);
269 hammer_flusher_clean_loose_ios(hmp);
271 seq = hammer_flusher_flush(hmp, &nomore);
272 hmp->flusher.done = seq;
273 wakeup(&hmp->flusher.done);
275 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
284 if (hmp->flusher.exiting && TAILQ_EMPTY(&hmp->flush_group_list))
286 while (hmp->flusher.signal == 0)
287 tsleep(&hmp->flusher.signal, 0, "hmrwwa", 0);
288 hmp->flusher.signal = 0;
294 hmp->flusher.td = NULL;
295 wakeup(&hmp->flusher.exiting);
296 lwkt_reltoken(&hmp->fs_token);
301 * Flush the next sequence number until an open flush group is encountered
302 * or we reach (next). Not all sequence numbers will have flush groups
303 * associated with them. These require that the UNDO/REDO FIFO still be
304 * flushed since it can take at least one additional run to synchronize
305 * the FIFO, and more to also synchronize the reserve structures.
308 hammer_flusher_flush(hammer_mount_t hmp, int *nomorep)
310 hammer_flusher_info_t info;
311 hammer_flush_group_t flg;
312 hammer_reserve_t resv;
317 * Just in-case there's a flush race on mount. Seq number
320 if (TAILQ_FIRST(&hmp->flusher.ready_list) == NULL) {
322 return (hmp->flusher.done);
327 * Flush the next sequence number. Sequence numbers can exist
328 * without an assigned flush group, indicating that just a FIFO flush
331 seq = hmp->flusher.done + 1;
332 flg = TAILQ_FIRST(&hmp->flush_group_list);
334 if (seq == hmp->flusher.next) {
336 return (hmp->flusher.done);
338 } else if (seq == flg->seq) {
340 KKASSERT(flg->running == 0);
342 if (hmp->fill_flush_group == flg) {
343 hmp->fill_flush_group =
344 TAILQ_NEXT(flg, flush_entry);
348 return (hmp->flusher.done);
352 * Sequence number problems can only happen if a critical
353 * filesystem error occurred which forced the filesystem into
356 KKASSERT(flg->seq - seq > 0 || hmp->ronly >= 2);
361 * We only do one flg but we may have to loop/retry.
363 * Due to various races it is possible to come across a flush
364 * group which as not yet been closed.
367 while (flg && flg->running) {
369 if (hammer_debug_general & 0x0001) {
370 hdkprintf("%d ttl=%d recs=%d\n",
371 flg->seq, flg->total_count, flg->refs);
373 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
375 hammer_start_transaction_fls(&hmp->flusher.trans, hmp);
378 * If the previous flush cycle just about exhausted our
379 * UNDO space we may have to do a dummy cycle to move the
380 * first_offset up before actually digging into a new cycle,
381 * or the new cycle will not have sufficient undo space.
383 if (hammer_flusher_undo_exhausted(&hmp->flusher.trans, 3))
384 hammer_flusher_finalize(&hmp->flusher.trans, 0);
386 KKASSERT(hmp->next_flush_group != flg);
389 * Place the flg in the flusher structure and start the
390 * slaves running. The slaves will compete for inodes
393 * Make a per-thread copy of the transaction.
395 while ((info = TAILQ_FIRST(&hmp->flusher.ready_list)) != NULL) {
396 TAILQ_REMOVE(&hmp->flusher.ready_list, info, entry);
399 info->trans = hmp->flusher.trans;
400 TAILQ_INSERT_TAIL(&hmp->flusher.run_list, info, entry);
401 wakeup(&info->runstate);
405 * Wait for all slaves to finish running
407 while (TAILQ_FIRST(&hmp->flusher.run_list) != NULL)
408 tsleep(&hmp->flusher.ready_list, 0, "hmrfcc", 0);
411 * Do the final finalization, clean up
413 hammer_flusher_finalize(&hmp->flusher.trans, 1);
414 hmp->flusher.tid = hmp->flusher.trans.tid;
416 hammer_done_transaction(&hmp->flusher.trans);
419 * Loop up on the same flg. If the flg is done clean it up
420 * and break out. We only flush one flg.
422 if (RB_EMPTY(&flg->flush_tree)) {
423 KKASSERT(flg->refs == 0);
424 TAILQ_REMOVE(&hmp->flush_group_list, flg, flush_entry);
425 kfree(flg, hmp->m_misc);
428 KKASSERT(TAILQ_FIRST(&hmp->flush_group_list) == flg);
432 * We may have pure meta-data to flush, or we may have to finish
433 * cycling the UNDO FIFO, even if there were no flush groups.
435 if (count == 0 && hammer_flusher_haswork(hmp)) {
436 hammer_start_transaction_fls(&hmp->flusher.trans, hmp);
437 hammer_flusher_finalize(&hmp->flusher.trans, 1);
438 hammer_done_transaction(&hmp->flusher.trans);
442 * Clean up any freed big-blocks (typically zone-2).
443 * resv->flush_group is typically set several flush groups ahead
444 * of the free to ensure that the freed block is not reused until
445 * it can no longer be reused.
447 while ((resv = TAILQ_FIRST(&hmp->delay_list)) != NULL) {
448 if (resv->flg_no - seq > 0)
450 hammer_reserve_clrdelay(hmp, resv);
457 * The slave flusher thread pulls work off the master flush list until no
461 hammer_flusher_slave_thread(void *arg)
463 hammer_flush_group_t flg;
464 hammer_flusher_info_t info;
469 lwkt_gettoken(&hmp->fs_token);
472 while (info->runstate == 0)
473 tsleep(&info->runstate, 0, "hmrssw", 0);
474 if (info->runstate < 0)
478 RB_SCAN(hammer_fls_rb_tree, &flg->flush_tree, NULL,
479 hammer_flusher_flush_inode, info);
483 TAILQ_REMOVE(&hmp->flusher.run_list, info, entry);
484 TAILQ_INSERT_TAIL(&hmp->flusher.ready_list, info, entry);
485 wakeup(&hmp->flusher.ready_list);
489 lwkt_reltoken(&hmp->fs_token);
494 hammer_flusher_clean_loose_ios(hammer_mount_t hmp)
496 hammer_buffer_t buffer;
500 * loose ends - buffers without bp's aren't tracked by the kernel
501 * and can build up, so clean them out. This can occur when an
502 * IO completes on a buffer with no references left.
504 * The io_token is needed to protect the list.
506 if ((io = RB_ROOT(&hmp->lose_root)) != NULL) {
507 lwkt_gettoken(&hmp->io_token);
508 while ((io = RB_ROOT(&hmp->lose_root)) != NULL) {
509 KKASSERT(io->mod_root == &hmp->lose_root);
510 RB_REMOVE(hammer_mod_rb_tree, io->mod_root, io);
512 hammer_ref(&io->lock);
514 hammer_rel_buffer(buffer, 0);
516 lwkt_reltoken(&hmp->io_token);
521 * Flush a single inode that is part of a flush group.
523 * Flusher errors are extremely serious, even ENOSPC shouldn't occur because
524 * the front-end should have reserved sufficient space on the media. Any
525 * error other then EWOULDBLOCK will force the mount to be read-only.
529 hammer_flusher_flush_inode(hammer_inode_t ip, void *data)
531 hammer_flusher_info_t info = data;
532 hammer_mount_t hmp = info->hmp;
533 hammer_transaction_t trans = &info->trans;
537 * Several slaves are operating on the same flush group concurrently.
538 * The SLAVEFLUSH flag prevents them from tripping over each other.
540 * NOTE: It is possible for a EWOULDBLOCK'd ip returned by one slave
541 * to be resynced by another, but normally such inodes are not
542 * revisited until the master loop gets to them.
544 if (ip->flags & HAMMER_INODE_SLAVEFLUSH)
546 ip->flags |= HAMMER_INODE_SLAVEFLUSH;
547 ++hammer_stats_inode_flushes;
549 hammer_flusher_clean_loose_ios(hmp);
551 error = hammer_sync_inode(trans, ip);
554 * EWOULDBLOCK can happen under normal operation, all other errors
555 * are considered extremely serious. We must set WOULDBLOCK
556 * mechanics to deal with the mess left over from the abort of the
560 ip->flags |= HAMMER_INODE_WOULDBLOCK;
561 if (error == EWOULDBLOCK)
564 hammer_sync_inode_done(ip, error);
567 while (hmp->flusher.finalize_want)
568 tsleep(&hmp->flusher.finalize_want, 0, "hmrsxx", 0);
569 if (hammer_flusher_undo_exhausted(trans, 1)) {
570 hkprintf("Warning: UNDO area too small!\n");
571 hammer_flusher_finalize(trans, 1);
572 } else if (hammer_flusher_meta_limit(trans->hmp)) {
573 hammer_flusher_finalize(trans, 0);
579 * Return non-zero if the UNDO area has less then (QUARTER / 4) of its
582 * 1/4 - Emergency free undo space level. Below this point the flusher
583 * will finalize even if directory dependancies have not been resolved.
585 * 2/4 - Used by the pruning and reblocking code. These functions may be
586 * running in parallel with a flush and cannot be allowed to drop
587 * available undo space to emergency levels.
589 * 3/4 - Used at the beginning of a flush to force-sync the volume header
590 * to give the flush plenty of runway to work in.
593 hammer_flusher_undo_exhausted(hammer_transaction_t trans, int quarter)
595 if (hammer_undo_space(trans) <
596 hammer_undo_max(trans->hmp) * quarter / 4) {
604 * Flush all pending UNDOs, wait for write completion, update the volume
605 * header with the new UNDO end position, and flush it. Then
606 * asynchronously flush the meta-data.
608 * If this is the last finalization in a flush group we also synchronize
609 * our cached blockmap and set hmp->flusher_undo_start and our cached undo
610 * fifo first_offset so the next flush resets the FIFO pointers.
612 * If this is not final it is being called because too many dirty meta-data
613 * buffers have built up and must be flushed with UNDO synchronization to
614 * avoid a buffer cache deadlock.
617 hammer_flusher_finalize(hammer_transaction_t trans, int final)
619 hammer_volume_t root_volume;
620 hammer_blockmap_t cundomap, dundomap;
623 hammer_off_t save_undo_next_offset;
628 root_volume = trans->rootvol;
631 * Exclusively lock the flusher. This guarantees that all dirty
632 * buffers will be idled (have a mod-count of 0).
634 ++hmp->flusher.finalize_want;
635 hammer_lock_ex(&hmp->flusher.finalize_lock);
638 * If this isn't the final sync several threads may have hit the
639 * meta-limit at the same time and raced. Only sync if we really
640 * have to, after acquiring the lock.
642 if (final == 0 && !hammer_flusher_meta_limit(hmp))
645 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
649 * Flush data buffers. This can occur asynchronously and at any
650 * time. We must interlock against the frontend direct-data write
651 * but do not have to acquire the sync-lock yet.
653 * These data buffers have already been collected prior to the
654 * related inode(s) getting queued to the flush group.
657 while ((io = RB_FIRST(hammer_mod_rb_tree, &hmp->data_root)) != NULL) {
660 hammer_ref(&io->lock);
661 hammer_io_write_interlock(io);
662 KKASSERT(io->type != HAMMER_IOTYPE_VOLUME);
663 hammer_io_flush(io, 0);
664 hammer_io_done_interlock(io);
665 hammer_rel_buffer(HAMMER_ITOB(io), 0);
666 hammer_io_limit_backlog(hmp);
671 * The sync-lock is required for the remaining sequence. This lock
672 * prevents meta-data from being modified.
674 hammer_sync_lock_ex(trans);
677 * If we have been asked to finalize the volume header sync the
678 * cached blockmap to the on-disk blockmap. Generate an UNDO
679 * record for the update.
682 cundomap = &hmp->blockmap[0];
683 dundomap = &root_volume->ondisk->vol0_blockmap[0];
684 if (root_volume->io.modified) {
685 hammer_modify_volume(trans, root_volume,
686 dundomap, sizeof(hmp->blockmap));
687 for (i = 0; i < HAMMER_MAX_ZONES; ++i) {
688 hammer_crc_set_blockmap(hmp->version,
691 bcopy(cundomap, dundomap, sizeof(hmp->blockmap));
692 hammer_modify_volume_done(root_volume);
697 * Flush UNDOs. This can occur concurrently with the data flush
698 * because data writes never overwrite.
700 * This also waits for I/Os to complete and flushes the cache on
703 * Record the UNDO append point as this can continue to change
704 * after we have flushed the UNDOs.
706 cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
707 hammer_lock_ex(&hmp->undo_lock);
708 save_undo_next_offset = cundomap->next_offset;
709 hammer_unlock(&hmp->undo_lock);
710 hammer_flusher_flush_undos(hmp, HAMMER_FLUSH_UNDOS_FORCED);
712 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
716 * HAMMER VERSION < 4:
717 * Update the on-disk volume header with new UNDO FIFO end
718 * position (do not generate new UNDO records for this change).
719 * We have to do this for the UNDO FIFO whether (final) is
720 * set or not in order for the UNDOs to be recognized on
723 * HAMMER VERSION >= 4:
724 * The UNDO FIFO data written above will be recognized on
725 * recovery without us having to sync the volume header.
727 * Also update the on-disk next_tid field. This does not require
728 * an UNDO. However, because our TID is generated before we get
729 * the sync lock another sync may have beat us to the punch.
731 * This also has the side effect of updating first_offset based on
732 * a prior finalization when the first finalization of the next flush
733 * cycle occurs, removing any undo info from the prior finalization
734 * from consideration.
736 * The volume header will be flushed out synchronously.
738 dundomap = &root_volume->ondisk->vol0_blockmap[HAMMER_ZONE_UNDO_INDEX];
739 cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
741 if (dundomap->first_offset != cundomap->first_offset ||
742 dundomap->next_offset != save_undo_next_offset) {
743 hammer_modify_volume_noundo(NULL, root_volume);
744 dundomap->first_offset = cundomap->first_offset;
745 dundomap->next_offset = save_undo_next_offset;
746 hammer_crc_set_blockmap(hmp->version, dundomap);
747 hammer_modify_volume_done(root_volume);
751 * vol0_next_tid is used for TID selection and is updated without
752 * an UNDO so we do not reuse a TID that may have been rolled-back.
754 * vol0_last_tid is the highest fully-synchronized TID. It is
755 * set-up when the UNDO fifo is fully synced, later on (not here).
757 * The root volume can be open for modification by other threads
758 * generating UNDO or REDO records. For example, reblocking,
759 * pruning, REDO mode fast-fsyncs, so the write interlock is
762 if (root_volume->io.modified) {
763 hammer_modify_volume_noundo(NULL, root_volume);
764 if (root_volume->ondisk->vol0_next_tid < trans->tid)
765 root_volume->ondisk->vol0_next_tid = trans->tid;
766 hammer_crc_set_volume(hmp->version, root_volume->ondisk);
767 hammer_modify_volume_done(root_volume);
768 hammer_io_write_interlock(&root_volume->io);
769 hammer_io_flush(&root_volume->io, 0);
770 hammer_io_done_interlock(&root_volume->io);
774 * Wait for I/Os to complete.
776 * For HAMMER VERSION 4+ filesystems we do not have to wait for
777 * the I/O to complete as the new UNDO FIFO entries are recognized
778 * even without the volume header update. This allows the volume
779 * header to flushed along with meta-data, significantly reducing
782 hammer_flusher_clean_loose_ios(hmp);
783 if (hmp->version < HAMMER_VOL_VERSION_FOUR)
784 hammer_io_wait_all(hmp, "hmrfl3", 1);
786 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
790 * Flush meta-data. The meta-data will be undone if we crash
791 * so we can safely flush it asynchronously. There is no need
792 * to wait for I/O to complete (or issue a synchronous disk flush).
794 * In fact, even if we did wait the meta-data will still be undone
795 * by a crash up until the next flush cycle due to the first_offset
796 * in the volume header for the UNDO FIFO not being adjusted until
797 * the following flush cycle.
799 * No io interlock is needed, bioops callbacks will not mess with
803 while ((io = RB_FIRST(hammer_mod_rb_tree, &hmp->meta_root)) != NULL) {
806 KKASSERT(io->modify_refs == 0);
807 hammer_ref(&io->lock);
808 KKASSERT(io->type != HAMMER_IOTYPE_VOLUME);
809 hammer_io_flush(io, 0);
810 hammer_rel_buffer(HAMMER_ITOB(io), 0);
811 hammer_io_limit_backlog(hmp);
816 * If this is the final finalization for the flush group set
817 * up for the next sequence by setting a new first_offset in
818 * our cached blockmap and clearing the undo history.
820 * Even though we have updated our cached first_offset, the on-disk
821 * first_offset still governs available-undo-space calculations.
823 * We synchronize to save_undo_next_offset rather than
824 * cundomap->next_offset because that is what we flushed out
827 * NOTE! UNDOs can only be added with the sync_lock held
828 * so we can clear the undo history without racing.
829 * REDOs can be added at any time which is why we
830 * have to be careful and use save_undo_next_offset
831 * when setting the new first_offset.
834 cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
835 if (cundomap->first_offset != save_undo_next_offset) {
836 cundomap->first_offset = save_undo_next_offset;
837 hmp->hflags |= HMNT_UNDO_DIRTY;
838 } else if (cundomap->first_offset != cundomap->next_offset) {
839 hmp->hflags |= HMNT_UNDO_DIRTY;
841 hmp->hflags &= ~HMNT_UNDO_DIRTY;
843 hammer_clear_undo_history(hmp);
846 * Flush tid sequencing. flush_tid1 is fully synchronized,
847 * meaning a crash will not roll it back. flush_tid2 has
848 * been written out asynchronously and a crash will roll
849 * it back. flush_tid1 is used for all mirroring masters.
851 if (hmp->flush_tid1 != hmp->flush_tid2) {
852 hmp->flush_tid1 = hmp->flush_tid2;
853 wakeup(&hmp->flush_tid1);
855 hmp->flush_tid2 = trans->tid;
858 * Clear the REDO SYNC flag. This flag is used to ensure
859 * that the recovery span in the UNDO/REDO FIFO contains
860 * at least one REDO SYNC record.
862 hmp->flags &= ~HAMMER_MOUNT_REDO_SYNC;
866 * Cleanup. Report any critical errors.
869 hammer_sync_unlock(trans);
871 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) {
872 hvkprintf(root_volume,
873 "Critical write error during flush, "
874 "refusing to sync UNDO FIFO\n");
878 hammer_unlock(&hmp->flusher.finalize_lock);
880 if (--hmp->flusher.finalize_want == 0)
881 wakeup(&hmp->flusher.finalize_want);
882 hammer_stats_commits += final;
889 hammer_flusher_flush_undos(hammer_mount_t hmp, int mode)
895 while ((io = RB_FIRST(hammer_mod_rb_tree, &hmp->undo_root)) != NULL) {
898 hammer_ref(&io->lock);
899 KKASSERT(io->type != HAMMER_IOTYPE_VOLUME);
900 hammer_io_write_interlock(io);
901 hammer_io_flush(io, hammer_undo_reclaim(io));
902 hammer_io_done_interlock(io);
903 hammer_rel_buffer(HAMMER_ITOB(io), 0);
904 hammer_io_limit_backlog(hmp);
907 hammer_flusher_clean_loose_ios(hmp);
908 if (mode == HAMMER_FLUSH_UNDOS_FORCED ||
909 (mode == HAMMER_FLUSH_UNDOS_AUTO && count)) {
910 hammer_io_wait_all(hmp, "hmrfl1", 1);
912 hammer_io_wait_all(hmp, "hmrfl2", 0);
917 * Return non-zero if too many dirty meta-data buffers have built up.
919 * Since we cannot allow such buffers to flush until we have dealt with
920 * the UNDOs, we risk deadlocking the kernel's buffer cache.
923 hammer_flusher_meta_limit(hammer_mount_t hmp)
925 if (hmp->locked_dirty_space + hmp->io_running_space >
926 hammer_limit_dirtybufspace) {
933 * Return non-zero if too many dirty meta-data buffers have built up.
935 * This version is used by background operations (mirror, prune, reblock)
936 * to leave room for foreground operations.
939 hammer_flusher_meta_halflimit(hammer_mount_t hmp)
941 if (hmp->locked_dirty_space + hmp->io_running_space >
942 hammer_limit_dirtybufspace / 2) {
949 * Return non-zero if the flusher still has something to flush.
952 hammer_flusher_haswork(hammer_mount_t hmp)
956 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
958 if (TAILQ_FIRST(&hmp->flush_group_list) || /* dirty inodes */
959 RB_ROOT(&hmp->volu_root) || /* dirty buffers */
960 RB_ROOT(&hmp->undo_root) ||
961 RB_ROOT(&hmp->data_root) ||
962 RB_ROOT(&hmp->meta_root) ||
963 (hmp->hflags & HMNT_UNDO_DIRTY)) { /* UNDO FIFO sync */
970 hammer_flush_dirty(hammer_mount_t hmp, int max_count)
975 while (hammer_flusher_haswork(hmp)) {
976 hammer_flusher_sync(hmp);
980 hkprintf("flushing.");
983 tsleep(&dummy, 0, "hmrufl", hz);
985 if (max_count != -1 && count == max_count) {
986 kprintf("giving up");
993 if (count >= max_count)