HAMMER 63/Many: IO Error handling features
[dragonfly.git] / sys / vfs / hammer / hammer_flusher.c
1 /*
2  * Copyright (c) 2008 The DragonFly Project.  All rights reserved.
3  * 
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  * 
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  * 
34  * $DragonFly: src/sys/vfs/hammer/hammer_flusher.c,v 1.43 2008/07/18 00:19:53 dillon Exp $
35  */
36 /*
37  * HAMMER dependancy flusher thread
38  *
39  * Meta data updates create buffer dependancies which are arranged as a
40  * hierarchy of lists.
41  */
42
43 #include "hammer.h"
44
45 static void hammer_flusher_master_thread(void *arg);
46 static void hammer_flusher_slave_thread(void *arg);
47 static void hammer_flusher_flush(hammer_mount_t hmp);
48 static void hammer_flusher_flush_inode(hammer_inode_t ip,
49                                         hammer_transaction_t trans);
50
51 /*
52  * Support structures for the flusher threads.
53  */
54 struct hammer_flusher_info {
55         TAILQ_ENTRY(hammer_flusher_info) entry;
56         struct hammer_mount *hmp;
57         thread_t        td;
58         int             runstate;
59         int             count;
60         hammer_flush_group_t flg;
61         hammer_inode_t  work_array[HAMMER_FLUSH_GROUP_SIZE];
62 };
63
64 typedef struct hammer_flusher_info *hammer_flusher_info_t;
65
66 /*
67  * Sync all inodes pending on the flusher.
68  *
69  * All flush groups will be flushed.  This does not queue dirty inodes
70  * to the flush groups, it just flushes out what has already been queued!
71  */
72 void
73 hammer_flusher_sync(hammer_mount_t hmp)
74 {
75         int seq;
76
77         seq = hammer_flusher_async(hmp, NULL);
78         while ((int)(seq - hmp->flusher.done) > 0)
79                 tsleep(&hmp->flusher.done, 0, "hmrfls", 0);
80 }
81
82 /*
83  * Sync all inodes pending on the flusher - return immediately.
84  *
85  * All flush groups will be flushed.
86  */
87 int
88 hammer_flusher_async(hammer_mount_t hmp, hammer_flush_group_t close_flg)
89 {
90         hammer_flush_group_t flg;
91         int seq = hmp->flusher.next;
92
93         TAILQ_FOREACH(flg, &hmp->flush_group_list, flush_entry) {
94                 if (flg->running == 0)
95                         ++seq;
96                 flg->closed = 1;
97                 if (flg == close_flg)
98                         break;
99         }
100         if (hmp->flusher.td) {
101                 if (hmp->flusher.signal++ == 0)
102                         wakeup(&hmp->flusher.signal);
103         } else {
104                 seq = hmp->flusher.done;
105         }
106         return(seq);
107 }
108
109 int
110 hammer_flusher_async_one(hammer_mount_t hmp)
111 {
112         int seq;
113
114         if (hmp->flusher.td) {
115                 seq = hmp->flusher.next;
116                 if (hmp->flusher.signal++ == 0)
117                         wakeup(&hmp->flusher.signal);
118         } else {
119                 seq = hmp->flusher.done;
120         }
121         return(seq);
122 }
123
124 void
125 hammer_flusher_wait(hammer_mount_t hmp, int seq)
126 {
127         while ((int)(seq - hmp->flusher.done) > 0) {
128                 tsleep(&hmp->flusher.done, 0, "hmrfls", 0);
129         }
130 }
131
132 void
133 hammer_flusher_create(hammer_mount_t hmp)
134 {
135         hammer_flusher_info_t info;
136         int i;
137
138         hmp->flusher.signal = 0;
139         hmp->flusher.act = 0;
140         hmp->flusher.done = 0;
141         hmp->flusher.next = 1;
142         hammer_ref(&hmp->flusher.finalize_lock);
143         TAILQ_INIT(&hmp->flusher.run_list);
144         TAILQ_INIT(&hmp->flusher.ready_list);
145
146         lwkt_create(hammer_flusher_master_thread, hmp,
147                     &hmp->flusher.td, NULL, 0, -1, "hammer-M");
148         for (i = 0; i < HAMMER_MAX_FLUSHERS; ++i) {
149                 info = kmalloc(sizeof(*info), M_HAMMER, M_WAITOK|M_ZERO);
150                 info->hmp = hmp;
151                 TAILQ_INSERT_TAIL(&hmp->flusher.ready_list, info, entry);
152                 lwkt_create(hammer_flusher_slave_thread, info,
153                             &info->td, NULL, 0, -1, "hammer-S%d", i);
154         }
155 }
156
157 void
158 hammer_flusher_destroy(hammer_mount_t hmp)
159 {
160         hammer_flusher_info_t info;
161
162         /*
163          * Kill the master
164          */
165         hmp->flusher.exiting = 1;
166         while (hmp->flusher.td) {
167                 ++hmp->flusher.signal;
168                 wakeup(&hmp->flusher.signal);
169                 tsleep(&hmp->flusher.exiting, 0, "hmrwex", hz);
170         }
171
172         /*
173          * Kill the slaves
174          */
175         while ((info = TAILQ_FIRST(&hmp->flusher.ready_list)) != NULL) {
176                 KKASSERT(info->runstate == 0);
177                 TAILQ_REMOVE(&hmp->flusher.ready_list, info, entry);
178                 info->runstate = -1;
179                 wakeup(&info->runstate);
180                 while (info->td)
181                         tsleep(&info->td, 0, "hmrwwc", 0);
182                 TAILQ_REMOVE(&hmp->flusher.ready_list, info, entry);
183                 kfree(info, M_HAMMER);
184         }
185 }
186
187 /*
188  * The master flusher thread manages the flusher sequence id and
189  * synchronization with the slave work threads.
190  */
191 static void
192 hammer_flusher_master_thread(void *arg)
193 {
194         hammer_flush_group_t flg;
195         hammer_mount_t hmp;
196
197         hmp = arg;
198
199         for (;;) {
200                 /*
201                  * Do at least one flush cycle.  We may have to update the
202                  * UNDO FIFO even if no inodes are queued.
203                  */
204                 for (;;) {
205                         while (hmp->flusher.group_lock)
206                                 tsleep(&hmp->flusher.group_lock, 0, "hmrhld", 0);
207                         hmp->flusher.act = hmp->flusher.next;
208                         ++hmp->flusher.next;
209                         hammer_flusher_clean_loose_ios(hmp);
210                         hammer_flusher_flush(hmp);
211                         hmp->flusher.done = hmp->flusher.act;
212                         wakeup(&hmp->flusher.done);
213                         flg = TAILQ_FIRST(&hmp->flush_group_list);
214                         if (flg == NULL || flg->closed == 0)
215                                 break;
216                         if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
217                                 break;
218                 }
219
220                 /*
221                  * Wait for activity.
222                  */
223                 if (hmp->flusher.exiting && TAILQ_EMPTY(&hmp->flush_group_list))
224                         break;
225                 while (hmp->flusher.signal == 0)
226                         tsleep(&hmp->flusher.signal, 0, "hmrwwa", 0);
227                 hmp->flusher.signal = 0;
228         }
229
230         /*
231          * And we are done.
232          */
233         hmp->flusher.td = NULL;
234         wakeup(&hmp->flusher.exiting);
235         lwkt_exit();
236 }
237
238 /*
239  * Flush all inodes in the current flush group.
240  */
241 static void
242 hammer_flusher_flush(hammer_mount_t hmp)
243 {
244         hammer_flusher_info_t info;
245         hammer_flush_group_t flg;
246         hammer_reserve_t resv;
247         hammer_inode_t ip;
248         hammer_inode_t next_ip;
249         int slave_index;
250         int count;
251
252         /*
253          * Just in-case there's a flush race on mount
254          */
255         if (TAILQ_FIRST(&hmp->flusher.ready_list) == NULL)
256                 return;
257
258         /*
259          * We only do one flg but we may have to loop/retry.
260          */
261         count = 0;
262         while ((flg = TAILQ_FIRST(&hmp->flush_group_list)) != NULL) {
263                 ++count;
264                 if (hammer_debug_general & 0x0001) {
265                         kprintf("hammer_flush %d ttl=%d recs=%d\n",
266                                 hmp->flusher.act,
267                                 flg->total_count, flg->refs);
268                 }
269                 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
270                         break;
271                 hammer_start_transaction_fls(&hmp->flusher.trans, hmp);
272
273                 /*
274                  * If the previous flush cycle just about exhausted our
275                  * UNDO space we may have to do a dummy cycle to move the
276                  * first_offset up before actually digging into a new cycle,
277                  * or the new cycle will not have sufficient undo space.
278                  */
279                 if (hammer_flusher_undo_exhausted(&hmp->flusher.trans, 3))
280                         hammer_flusher_finalize(&hmp->flusher.trans, 0);
281
282                 /*
283                  * Ok, we are running this flush group now (this prevents new
284                  * additions to it).
285                  */
286                 flg->running = 1;
287                 if (hmp->next_flush_group == flg)
288                         hmp->next_flush_group = TAILQ_NEXT(flg, flush_entry);
289
290                 /*
291                  * Iterate the inodes in the flg's flush_list and assign
292                  * them to slaves.
293                  */
294                 slave_index = 0;
295                 info = TAILQ_FIRST(&hmp->flusher.ready_list);
296                 next_ip = TAILQ_FIRST(&flg->flush_list);
297
298                 while ((ip = next_ip) != NULL) {
299                         next_ip = TAILQ_NEXT(ip, flush_entry);
300
301                         /*
302                          * Add ip to the slave's work array.  The slave is
303                          * not currently running.
304                          */
305                         info->work_array[info->count++] = ip;
306                         if (info->count != HAMMER_FLUSH_GROUP_SIZE)
307                                 continue;
308
309                         /*
310                          * Get the slave running
311                          */
312                         TAILQ_REMOVE(&hmp->flusher.ready_list, info, entry);
313                         TAILQ_INSERT_TAIL(&hmp->flusher.run_list, info, entry);
314                         info->flg = flg;
315                         info->runstate = 1;
316                         wakeup(&info->runstate);
317
318                         /*
319                          * Get a new slave.  We may have to wait for one to
320                          * finish running.
321                          */
322                         while ((info = TAILQ_FIRST(&hmp->flusher.ready_list)) == NULL) {
323                                 tsleep(&hmp->flusher.ready_list, 0, "hmrfcc", 0);
324                         }
325                 }
326
327                 /*
328                  * Run the current slave if necessary
329                  */
330                 if (info->count) {
331                         TAILQ_REMOVE(&hmp->flusher.ready_list, info, entry);
332                         TAILQ_INSERT_TAIL(&hmp->flusher.run_list, info, entry);
333                         info->flg = flg;
334                         info->runstate = 1;
335                         wakeup(&info->runstate);
336                 }
337
338                 /*
339                  * Wait for all slaves to finish running
340                  */
341                 while (TAILQ_FIRST(&hmp->flusher.run_list) != NULL)
342                         tsleep(&hmp->flusher.ready_list, 0, "hmrfcc", 0);
343
344                 /*
345                  * Do the final finalization, clean up
346                  */
347                 hammer_flusher_finalize(&hmp->flusher.trans, 1);
348                 hmp->flusher.tid = hmp->flusher.trans.tid;
349
350                 hammer_done_transaction(&hmp->flusher.trans);
351
352                 /*
353                  * Loop up on the same flg.  If the flg is done clean it up
354                  * and break out.  We only flush one flg.
355                  */
356                 if (TAILQ_FIRST(&flg->flush_list) == NULL) {
357                         KKASSERT(TAILQ_EMPTY(&flg->flush_list));
358                         KKASSERT(flg->refs == 0);
359                         TAILQ_REMOVE(&hmp->flush_group_list, flg, flush_entry);
360                         kfree(flg, M_HAMMER);
361                         break;
362                 }
363         }
364
365         /*
366          * We may have pure meta-data to flush, or we may have to finish
367          * cycling the UNDO FIFO, even if there were no flush groups.
368          */
369         if (count == 0 && hammer_flusher_haswork(hmp)) {
370                 hammer_start_transaction_fls(&hmp->flusher.trans, hmp);
371                 hammer_flusher_finalize(&hmp->flusher.trans, 1);
372                 hammer_done_transaction(&hmp->flusher.trans);
373         }
374
375         /*
376          * Clean up any freed big-blocks (typically zone-2). 
377          * resv->flush_group is typically set several flush groups ahead
378          * of the free to ensure that the freed block is not reused until
379          * it can no longer be reused.
380          */
381         while ((resv = TAILQ_FIRST(&hmp->delay_list)) != NULL) {
382                 if (resv->flush_group != hmp->flusher.act)
383                         break;
384                 hammer_reserve_clrdelay(hmp, resv);
385         }
386 }
387
388
389 /*
390  * The slave flusher thread pulls work off the master flush_list until no
391  * work is left.
392  */
393 static void
394 hammer_flusher_slave_thread(void *arg)
395 {
396         hammer_flush_group_t flg;
397         hammer_flusher_info_t info;
398         hammer_mount_t hmp;
399         hammer_inode_t ip;
400         int i;
401
402         info = arg;
403         hmp = info->hmp;
404
405         for (;;) {
406                 while (info->runstate == 0)
407                         tsleep(&info->runstate, 0, "hmrssw", 0);
408                 if (info->runstate < 0)
409                         break;
410                 flg = info->flg;
411
412                 for (i = 0; i < info->count; ++i) {
413                         ip = info->work_array[i];
414                         hammer_flusher_flush_inode(ip, &hmp->flusher.trans);
415                         ++hammer_stats_inode_flushes;
416                 }
417                 info->count = 0;
418                 info->runstate = 0;
419                 TAILQ_REMOVE(&hmp->flusher.run_list, info, entry);
420                 TAILQ_INSERT_TAIL(&hmp->flusher.ready_list, info, entry);
421                 wakeup(&hmp->flusher.ready_list);
422         }
423         info->td = NULL;
424         wakeup(&info->td);
425         lwkt_exit();
426 }
427
428 void
429 hammer_flusher_clean_loose_ios(hammer_mount_t hmp)
430 {
431         hammer_buffer_t buffer;
432         hammer_io_t io;
433
434         /*
435          * loose ends - buffers without bp's aren't tracked by the kernel
436          * and can build up, so clean them out.  This can occur when an
437          * IO completes on a buffer with no references left.
438          */
439         if ((io = TAILQ_FIRST(&hmp->lose_list)) != NULL) {
440                 crit_enter();   /* biodone() race */
441                 while ((io = TAILQ_FIRST(&hmp->lose_list)) != NULL) {
442                         KKASSERT(io->mod_list == &hmp->lose_list);
443                         TAILQ_REMOVE(&hmp->lose_list, io, mod_entry);
444                         io->mod_list = NULL;
445                         if (io->lock.refs == 0)
446                                 ++hammer_count_refedbufs;
447                         hammer_ref(&io->lock);
448                         buffer = (void *)io;
449                         hammer_rel_buffer(buffer, 0);
450                 }
451                 crit_exit();
452         }
453 }
454
455 /*
456  * Flush a single inode that is part of a flush group.
457  *
458  * Flusher errors are extremely serious, even ENOSPC shouldn't occur because
459  * the front-end should have reserved sufficient space on the media.  Any
460  * error other then EWOULDBLOCK will force the mount to be read-only.
461  */
462 static
463 void
464 hammer_flusher_flush_inode(hammer_inode_t ip, hammer_transaction_t trans)
465 {
466         hammer_mount_t hmp = ip->hmp;
467         int error;
468
469         hammer_flusher_clean_loose_ios(hmp);
470         error = hammer_sync_inode(trans, ip);
471
472         /*
473          * EWOULDBLOCK can happen under normal operation, all other errors
474          * are considered extremely serious.  We must set WOULDBLOCK
475          * mechanics to deal with the mess left over from the abort of the
476          * previous flush.
477          */
478         if (error) {
479                 ip->flags |= HAMMER_INODE_WOULDBLOCK;
480                 if (error == EWOULDBLOCK)
481                         error = 0;
482         }
483         hammer_flush_inode_done(ip, error);
484         while (hmp->flusher.finalize_want)
485                 tsleep(&hmp->flusher.finalize_want, 0, "hmrsxx", 0);
486         if (hammer_flusher_undo_exhausted(trans, 1)) {
487                 kprintf("HAMMER: Warning: UNDO area too small!\n");
488                 hammer_flusher_finalize(trans, 1);
489         } else if (hammer_flusher_meta_limit(trans->hmp)) {
490                 hammer_flusher_finalize(trans, 0);
491         }
492 }
493
494 /*
495  * Return non-zero if the UNDO area has less then (QUARTER / 4) of its
496  * space left.
497  *
498  * 1/4 - Emergency free undo space level.  Below this point the flusher
499  *       will finalize even if directory dependancies have not been resolved.
500  *
501  * 2/4 - Used by the pruning and reblocking code.  These functions may be
502  *       running in parallel with a flush and cannot be allowed to drop
503  *       available undo space to emergency levels.
504  *
505  * 3/4 - Used at the beginning of a flush to force-sync the volume header
506  *       to give the flush plenty of runway to work in.
507  */
508 int
509 hammer_flusher_undo_exhausted(hammer_transaction_t trans, int quarter)
510 {
511         if (hammer_undo_space(trans) <
512             hammer_undo_max(trans->hmp) * quarter / 4) {
513                 return(1);
514         } else {
515                 return(0);
516         }
517 }
518
519 /*
520  * Flush all pending UNDOs, wait for write completion, update the volume
521  * header with the new UNDO end position, and flush it.  Then
522  * asynchronously flush the meta-data.
523  *
524  * If this is the last finalization in a flush group we also synchronize
525  * our cached blockmap and set hmp->flusher_undo_start and our cached undo
526  * fifo first_offset so the next flush resets the FIFO pointers.
527  *
528  * If this is not final it is being called because too many dirty meta-data
529  * buffers have built up and must be flushed with UNDO synchronization to
530  * avoid a buffer cache deadlock.
531  */
532 void
533 hammer_flusher_finalize(hammer_transaction_t trans, int final)
534 {
535         hammer_volume_t root_volume;
536         hammer_blockmap_t cundomap, dundomap;
537         hammer_mount_t hmp;
538         hammer_io_t io;
539         int count;
540         int i;
541
542         hmp = trans->hmp;
543         root_volume = trans->rootvol;
544
545         /*
546          * Exclusively lock the flusher.  This guarantees that all dirty
547          * buffers will be idled (have a mod-count of 0).
548          */
549         ++hmp->flusher.finalize_want;
550         hammer_lock_ex(&hmp->flusher.finalize_lock);
551
552         /*
553          * If this isn't the final sync several threads may have hit the
554          * meta-limit at the same time and raced.  Only sync if we really
555          * have to, after acquiring the lock.
556          */
557         if (final == 0 && !hammer_flusher_meta_limit(hmp))
558                 goto done;
559
560         if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
561                 goto done;
562
563         /*
564          * Flush data buffers.  This can occur asynchronously and at any
565          * time.  We must interlock against the frontend direct-data write
566          * but do not have to acquire the sync-lock yet.
567          */
568         count = 0;
569         while ((io = TAILQ_FIRST(&hmp->data_list)) != NULL) {
570                 if (io->ioerror)
571                         break;
572                 if (io->lock.refs == 0)
573                         ++hammer_count_refedbufs;
574                 hammer_ref(&io->lock);
575                 hammer_io_write_interlock(io);
576                 KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
577                 hammer_io_flush(io);
578                 hammer_io_done_interlock(io);
579                 hammer_rel_buffer((hammer_buffer_t)io, 0);
580                 ++count;
581         }
582
583         /*
584          * The sync-lock is required for the remaining sequence.  This lock
585          * prevents meta-data from being modified.
586          */
587         hammer_sync_lock_ex(trans);
588
589         /*
590          * If we have been asked to finalize the volume header sync the
591          * cached blockmap to the on-disk blockmap.  Generate an UNDO
592          * record for the update.
593          */
594         if (final) {
595                 cundomap = &hmp->blockmap[0];
596                 dundomap = &root_volume->ondisk->vol0_blockmap[0];
597                 if (root_volume->io.modified) {
598                         hammer_modify_volume(trans, root_volume,
599                                              dundomap, sizeof(hmp->blockmap));
600                         for (i = 0; i < HAMMER_MAX_ZONES; ++i)
601                                 hammer_crc_set_blockmap(&cundomap[i]);
602                         bcopy(cundomap, dundomap, sizeof(hmp->blockmap));
603                         hammer_modify_volume_done(root_volume);
604                 }
605         }
606
607         /*
608          * Flush UNDOs
609          */
610         count = 0;
611         while ((io = TAILQ_FIRST(&hmp->undo_list)) != NULL) {
612                 if (io->ioerror)
613                         break;
614                 KKASSERT(io->modify_refs == 0);
615                 if (io->lock.refs == 0)
616                         ++hammer_count_refedbufs;
617                 hammer_ref(&io->lock);
618                 KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
619                 hammer_io_flush(io);
620                 hammer_rel_buffer((hammer_buffer_t)io, 0);
621                 ++count;
622         }
623
624         /*
625          * Wait for I/Os to complete
626          */
627         hammer_flusher_clean_loose_ios(hmp);
628         hammer_io_wait_all(hmp, "hmrfl1");
629
630         if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
631                 goto failed;
632
633         /*
634          * Update the on-disk volume header with new UNDO FIFO end position
635          * (do not generate new UNDO records for this change).  We have to
636          * do this for the UNDO FIFO whether (final) is set or not.
637          *
638          * Also update the on-disk next_tid field.  This does not require
639          * an UNDO.  However, because our TID is generated before we get
640          * the sync lock another sync may have beat us to the punch.
641          *
642          * This also has the side effect of updating first_offset based on
643          * a prior finalization when the first finalization of the next flush
644          * cycle occurs, removing any undo info from the prior finalization
645          * from consideration.
646          *
647          * The volume header will be flushed out synchronously.
648          */
649         dundomap = &root_volume->ondisk->vol0_blockmap[HAMMER_ZONE_UNDO_INDEX];
650         cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
651
652         if (dundomap->first_offset != cundomap->first_offset ||
653                    dundomap->next_offset != cundomap->next_offset) {
654                 hammer_modify_volume(NULL, root_volume, NULL, 0);
655                 dundomap->first_offset = cundomap->first_offset;
656                 dundomap->next_offset = cundomap->next_offset;
657                 hammer_crc_set_blockmap(dundomap);
658                 hammer_modify_volume_done(root_volume);
659         }
660
661         if (root_volume->io.modified) {
662                 hammer_modify_volume(NULL, root_volume, NULL, 0);
663                 if (root_volume->ondisk->vol0_next_tid < trans->tid)
664                         root_volume->ondisk->vol0_next_tid = trans->tid;
665                 hammer_crc_set_volume(root_volume->ondisk);
666                 hammer_modify_volume_done(root_volume);
667                 hammer_io_flush(&root_volume->io);
668         }
669
670         /*
671          * Wait for I/Os to complete
672          */
673         hammer_flusher_clean_loose_ios(hmp);
674         hammer_io_wait_all(hmp, "hmrfl2");
675
676         if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
677                 goto failed;
678
679         /*
680          * Flush meta-data.  The meta-data will be undone if we crash
681          * so we can safely flush it asynchronously.
682          *
683          * Repeated catchups will wind up flushing this update's meta-data
684          * and the UNDO buffers for the next update simultaniously.  This
685          * is ok.
686          */
687         count = 0;
688         while ((io = TAILQ_FIRST(&hmp->meta_list)) != NULL) {
689                 if (io->ioerror)
690                         break;
691                 KKASSERT(io->modify_refs == 0);
692                 if (io->lock.refs == 0)
693                         ++hammer_count_refedbufs;
694                 hammer_ref(&io->lock);
695                 KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
696                 hammer_io_flush(io);
697                 hammer_rel_buffer((hammer_buffer_t)io, 0);
698                 ++count;
699         }
700
701         /*
702          * If this is the final finalization for the flush group set
703          * up for the next sequence by setting a new first_offset in
704          * our cached blockmap and clearing the undo history.
705          *
706          * Even though we have updated our cached first_offset, the on-disk
707          * first_offset still governs available-undo-space calculations.
708          */
709         if (final) {
710                 cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
711                 if (cundomap->first_offset == cundomap->next_offset) {
712                         hmp->hflags &= ~HMNT_UNDO_DIRTY;
713                 } else {
714                         cundomap->first_offset = cundomap->next_offset;
715                         hmp->hflags |= HMNT_UNDO_DIRTY;
716                 }
717                 hammer_clear_undo_history(hmp);
718         }
719
720         /*
721          * Cleanup.  Report any critical errors.
722          */
723 failed:
724         hammer_sync_unlock(trans);
725
726         if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) {
727                 kprintf("HAMMER(%s): Critical write error during flush, "
728                         "refusing to sync UNDO FIFO\n",
729                         root_volume->ondisk->vol_name);
730         }
731
732 done:
733         hammer_unlock(&hmp->flusher.finalize_lock);
734         if (--hmp->flusher.finalize_want == 0)
735                 wakeup(&hmp->flusher.finalize_want);
736         hammer_stats_commits += final;
737 }
738
739 /*
740  * Return non-zero if too many dirty meta-data buffers have built up.
741  *
742  * Since we cannot allow such buffers to flush until we have dealt with
743  * the UNDOs, we risk deadlocking the kernel's buffer cache.
744  */
745 int
746 hammer_flusher_meta_limit(hammer_mount_t hmp)
747 {
748         if (hmp->locked_dirty_space + hmp->io_running_space >
749             hammer_limit_dirtybufspace) {
750                 return(1);
751         }
752         return(0);
753 }
754
755 /*
756  * Return non-zero if too many dirty meta-data buffers have built up.
757  *
758  * This version is used by background operations (mirror, prune, reblock)
759  * to leave room for foreground operations.
760  */
761 int
762 hammer_flusher_meta_halflimit(hammer_mount_t hmp)
763 {
764         if (hmp->locked_dirty_space + hmp->io_running_space >
765             hammer_limit_dirtybufspace / 2) {
766                 return(1);
767         }
768         return(0);
769 }
770
771 /*
772  * Return non-zero if the flusher still has something to flush.
773  */
774 int
775 hammer_flusher_haswork(hammer_mount_t hmp)
776 {
777         if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
778                 return(0);
779         if (TAILQ_FIRST(&hmp->flush_group_list) ||      /* dirty inodes */
780             TAILQ_FIRST(&hmp->volu_list) ||             /* dirty bufffers */
781             TAILQ_FIRST(&hmp->undo_list) ||
782             TAILQ_FIRST(&hmp->data_list) ||
783             TAILQ_FIRST(&hmp->meta_list) ||
784             (hmp->hflags & HMNT_UNDO_DIRTY)             /* UNDO FIFO sync */
785         ) {
786                 return(1);
787         }
788         return(0);
789 }
790