7209e5c61f77ea38a5f8b5699d255c721a66446b
[dragonfly.git] / sys / vfs / hammer / hammer_flusher.c
1 /*
2  * Copyright (c) 2008 The DragonFly Project.  All rights reserved.
3  * 
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  * 
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  * 
34  * $DragonFly: src/sys/vfs/hammer/hammer_flusher.c,v 1.44 2008/07/19 04:49:39 dillon Exp $
35  */
36 /*
37  * HAMMER dependancy flusher thread
38  *
39  * Meta data updates create buffer dependancies which are arranged as a
40  * hierarchy of lists.
41  */
42
43 #include "hammer.h"
44
45 static void hammer_flusher_master_thread(void *arg);
46 static void hammer_flusher_slave_thread(void *arg);
47 static void hammer_flusher_flush(hammer_mount_t hmp);
48 static void hammer_flusher_flush_inode(hammer_inode_t ip,
49                                         hammer_transaction_t trans);
50
51 /*
52  * Support structures for the flusher threads.
53  */
54 struct hammer_flusher_info {
55         TAILQ_ENTRY(hammer_flusher_info) entry;
56         struct hammer_mount *hmp;
57         thread_t        td;
58         int             runstate;
59         int             count;
60         hammer_flush_group_t flg;
61         hammer_inode_t  work_array[HAMMER_FLUSH_GROUP_SIZE];
62 };
63
64 typedef struct hammer_flusher_info *hammer_flusher_info_t;
65
66 /*
67  * Sync all inodes pending on the flusher.
68  *
69  * All flush groups will be flushed.  This does not queue dirty inodes
70  * to the flush groups, it just flushes out what has already been queued!
71  */
72 void
73 hammer_flusher_sync(hammer_mount_t hmp)
74 {
75         int seq;
76
77         seq = hammer_flusher_async(hmp, NULL);
78         hammer_flusher_wait(hmp, seq);
79 }
80
81 /*
82  * Sync all inodes pending on the flusher - return immediately.
83  *
84  * All flush groups will be flushed.
85  */
86 int
87 hammer_flusher_async(hammer_mount_t hmp, hammer_flush_group_t close_flg)
88 {
89         hammer_flush_group_t flg;
90         int seq = hmp->flusher.next;
91
92         TAILQ_FOREACH(flg, &hmp->flush_group_list, flush_entry) {
93                 if (flg->running == 0)
94                         ++seq;
95                 flg->closed = 1;
96                 if (flg == close_flg)
97                         break;
98         }
99         if (hmp->flusher.td) {
100                 if (hmp->flusher.signal++ == 0)
101                         wakeup(&hmp->flusher.signal);
102         } else {
103                 seq = hmp->flusher.done;
104         }
105         return(seq);
106 }
107
108 int
109 hammer_flusher_async_one(hammer_mount_t hmp)
110 {
111         int seq;
112
113         if (hmp->flusher.td) {
114                 seq = hmp->flusher.next;
115                 if (hmp->flusher.signal++ == 0)
116                         wakeup(&hmp->flusher.signal);
117         } else {
118                 seq = hmp->flusher.done;
119         }
120         return(seq);
121 }
122
123 /*
124  * Wait for the flusher to get to the specified sequence number.
125  * Signal the flusher as often as necessary to keep it going.
126  */
127 void
128 hammer_flusher_wait(hammer_mount_t hmp, int seq)
129 {
130         while ((int)(seq - hmp->flusher.done) > 0) {
131                 if (hmp->flusher.act != seq) {
132                         if (hmp->flusher.signal++ == 0)
133                                 wakeup(&hmp->flusher.signal);
134                 }
135                 tsleep(&hmp->flusher.done, 0, "hmrfls", 0);
136         }
137 }
138
139 void
140 hammer_flusher_create(hammer_mount_t hmp)
141 {
142         hammer_flusher_info_t info;
143         int i;
144
145         hmp->flusher.signal = 0;
146         hmp->flusher.act = 0;
147         hmp->flusher.done = 0;
148         hmp->flusher.next = 1;
149         hammer_ref(&hmp->flusher.finalize_lock);
150         TAILQ_INIT(&hmp->flusher.run_list);
151         TAILQ_INIT(&hmp->flusher.ready_list);
152
153         lwkt_create(hammer_flusher_master_thread, hmp,
154                     &hmp->flusher.td, NULL, 0, -1, "hammer-M");
155         for (i = 0; i < HAMMER_MAX_FLUSHERS; ++i) {
156                 info = kmalloc(sizeof(*info), M_HAMMER, M_WAITOK|M_ZERO);
157                 info->hmp = hmp;
158                 TAILQ_INSERT_TAIL(&hmp->flusher.ready_list, info, entry);
159                 lwkt_create(hammer_flusher_slave_thread, info,
160                             &info->td, NULL, 0, -1, "hammer-S%d", i);
161         }
162 }
163
164 void
165 hammer_flusher_destroy(hammer_mount_t hmp)
166 {
167         hammer_flusher_info_t info;
168
169         /*
170          * Kill the master
171          */
172         hmp->flusher.exiting = 1;
173         while (hmp->flusher.td) {
174                 ++hmp->flusher.signal;
175                 wakeup(&hmp->flusher.signal);
176                 tsleep(&hmp->flusher.exiting, 0, "hmrwex", hz);
177         }
178
179         /*
180          * Kill the slaves
181          */
182         while ((info = TAILQ_FIRST(&hmp->flusher.ready_list)) != NULL) {
183                 KKASSERT(info->runstate == 0);
184                 TAILQ_REMOVE(&hmp->flusher.ready_list, info, entry);
185                 info->runstate = -1;
186                 wakeup(&info->runstate);
187                 while (info->td)
188                         tsleep(&info->td, 0, "hmrwwc", 0);
189                 TAILQ_REMOVE(&hmp->flusher.ready_list, info, entry);
190                 kfree(info, M_HAMMER);
191         }
192 }
193
194 /*
195  * The master flusher thread manages the flusher sequence id and
196  * synchronization with the slave work threads.
197  */
198 static void
199 hammer_flusher_master_thread(void *arg)
200 {
201         hammer_flush_group_t flg;
202         hammer_mount_t hmp;
203
204         hmp = arg;
205
206         for (;;) {
207                 /*
208                  * Do at least one flush cycle.  We may have to update the
209                  * UNDO FIFO even if no inodes are queued.
210                  */
211                 for (;;) {
212                         while (hmp->flusher.group_lock)
213                                 tsleep(&hmp->flusher.group_lock, 0, "hmrhld", 0);
214                         hmp->flusher.act = hmp->flusher.next;
215                         ++hmp->flusher.next;
216                         hammer_flusher_clean_loose_ios(hmp);
217                         hammer_flusher_flush(hmp);
218                         hmp->flusher.done = hmp->flusher.act;
219                         wakeup(&hmp->flusher.done);
220                         flg = TAILQ_FIRST(&hmp->flush_group_list);
221                         if (flg == NULL || flg->closed == 0)
222                                 break;
223                         if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
224                                 break;
225                 }
226
227                 /*
228                  * Wait for activity.
229                  */
230                 if (hmp->flusher.exiting && TAILQ_EMPTY(&hmp->flush_group_list))
231                         break;
232                 while (hmp->flusher.signal == 0)
233                         tsleep(&hmp->flusher.signal, 0, "hmrwwa", 0);
234                 hmp->flusher.signal = 0;
235         }
236
237         /*
238          * And we are done.
239          */
240         hmp->flusher.td = NULL;
241         wakeup(&hmp->flusher.exiting);
242         lwkt_exit();
243 }
244
245 /*
246  * Flush all inodes in the current flush group.
247  */
248 static void
249 hammer_flusher_flush(hammer_mount_t hmp)
250 {
251         hammer_flusher_info_t info;
252         hammer_flush_group_t flg;
253         hammer_reserve_t resv;
254         hammer_inode_t ip;
255         hammer_inode_t next_ip;
256         int slave_index;
257         int count;
258
259         /*
260          * Just in-case there's a flush race on mount
261          */
262         if (TAILQ_FIRST(&hmp->flusher.ready_list) == NULL)
263                 return;
264
265         /*
266          * We only do one flg but we may have to loop/retry.
267          */
268         count = 0;
269         while ((flg = TAILQ_FIRST(&hmp->flush_group_list)) != NULL) {
270                 ++count;
271                 if (hammer_debug_general & 0x0001) {
272                         kprintf("hammer_flush %d ttl=%d recs=%d\n",
273                                 hmp->flusher.act,
274                                 flg->total_count, flg->refs);
275                 }
276                 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
277                         break;
278                 hammer_start_transaction_fls(&hmp->flusher.trans, hmp);
279
280                 /*
281                  * If the previous flush cycle just about exhausted our
282                  * UNDO space we may have to do a dummy cycle to move the
283                  * first_offset up before actually digging into a new cycle,
284                  * or the new cycle will not have sufficient undo space.
285                  */
286                 if (hammer_flusher_undo_exhausted(&hmp->flusher.trans, 3))
287                         hammer_flusher_finalize(&hmp->flusher.trans, 0);
288
289                 /*
290                  * Ok, we are running this flush group now (this prevents new
291                  * additions to it).
292                  */
293                 flg->running = 1;
294                 if (hmp->next_flush_group == flg)
295                         hmp->next_flush_group = TAILQ_NEXT(flg, flush_entry);
296
297                 /*
298                  * Iterate the inodes in the flg's flush_list and assign
299                  * them to slaves.
300                  */
301                 slave_index = 0;
302                 info = TAILQ_FIRST(&hmp->flusher.ready_list);
303                 next_ip = TAILQ_FIRST(&flg->flush_list);
304
305                 while ((ip = next_ip) != NULL) {
306                         next_ip = TAILQ_NEXT(ip, flush_entry);
307
308                         /*
309                          * Add ip to the slave's work array.  The slave is
310                          * not currently running.
311                          */
312                         info->work_array[info->count++] = ip;
313                         if (info->count != HAMMER_FLUSH_GROUP_SIZE)
314                                 continue;
315
316                         /*
317                          * Get the slave running
318                          */
319                         TAILQ_REMOVE(&hmp->flusher.ready_list, info, entry);
320                         TAILQ_INSERT_TAIL(&hmp->flusher.run_list, info, entry);
321                         info->flg = flg;
322                         info->runstate = 1;
323                         wakeup(&info->runstate);
324
325                         /*
326                          * Get a new slave.  We may have to wait for one to
327                          * finish running.
328                          */
329                         while ((info = TAILQ_FIRST(&hmp->flusher.ready_list)) == NULL) {
330                                 tsleep(&hmp->flusher.ready_list, 0, "hmrfcc", 0);
331                         }
332                 }
333
334                 /*
335                  * Run the current slave if necessary
336                  */
337                 if (info->count) {
338                         TAILQ_REMOVE(&hmp->flusher.ready_list, info, entry);
339                         TAILQ_INSERT_TAIL(&hmp->flusher.run_list, info, entry);
340                         info->flg = flg;
341                         info->runstate = 1;
342                         wakeup(&info->runstate);
343                 }
344
345                 /*
346                  * Wait for all slaves to finish running
347                  */
348                 while (TAILQ_FIRST(&hmp->flusher.run_list) != NULL)
349                         tsleep(&hmp->flusher.ready_list, 0, "hmrfcc", 0);
350
351                 /*
352                  * Do the final finalization, clean up
353                  */
354                 hammer_flusher_finalize(&hmp->flusher.trans, 1);
355                 hmp->flusher.tid = hmp->flusher.trans.tid;
356
357                 hammer_done_transaction(&hmp->flusher.trans);
358
359                 /*
360                  * Loop up on the same flg.  If the flg is done clean it up
361                  * and break out.  We only flush one flg.
362                  */
363                 if (TAILQ_FIRST(&flg->flush_list) == NULL) {
364                         KKASSERT(TAILQ_EMPTY(&flg->flush_list));
365                         KKASSERT(flg->refs == 0);
366                         TAILQ_REMOVE(&hmp->flush_group_list, flg, flush_entry);
367                         kfree(flg, M_HAMMER);
368                         break;
369                 }
370         }
371
372         /*
373          * We may have pure meta-data to flush, or we may have to finish
374          * cycling the UNDO FIFO, even if there were no flush groups.
375          */
376         if (count == 0 && hammer_flusher_haswork(hmp)) {
377                 hammer_start_transaction_fls(&hmp->flusher.trans, hmp);
378                 hammer_flusher_finalize(&hmp->flusher.trans, 1);
379                 hammer_done_transaction(&hmp->flusher.trans);
380         }
381
382         /*
383          * Clean up any freed big-blocks (typically zone-2). 
384          * resv->flush_group is typically set several flush groups ahead
385          * of the free to ensure that the freed block is not reused until
386          * it can no longer be reused.
387          */
388         while ((resv = TAILQ_FIRST(&hmp->delay_list)) != NULL) {
389                 if (resv->flush_group != hmp->flusher.act)
390                         break;
391                 hammer_reserve_clrdelay(hmp, resv);
392         }
393 }
394
395
396 /*
397  * The slave flusher thread pulls work off the master flush_list until no
398  * work is left.
399  */
400 static void
401 hammer_flusher_slave_thread(void *arg)
402 {
403         hammer_flush_group_t flg;
404         hammer_flusher_info_t info;
405         hammer_mount_t hmp;
406         hammer_inode_t ip;
407         int i;
408
409         info = arg;
410         hmp = info->hmp;
411
412         for (;;) {
413                 while (info->runstate == 0)
414                         tsleep(&info->runstate, 0, "hmrssw", 0);
415                 if (info->runstate < 0)
416                         break;
417                 flg = info->flg;
418
419                 for (i = 0; i < info->count; ++i) {
420                         ip = info->work_array[i];
421                         hammer_flusher_flush_inode(ip, &hmp->flusher.trans);
422                         ++hammer_stats_inode_flushes;
423                 }
424                 info->count = 0;
425                 info->runstate = 0;
426                 TAILQ_REMOVE(&hmp->flusher.run_list, info, entry);
427                 TAILQ_INSERT_TAIL(&hmp->flusher.ready_list, info, entry);
428                 wakeup(&hmp->flusher.ready_list);
429         }
430         info->td = NULL;
431         wakeup(&info->td);
432         lwkt_exit();
433 }
434
435 void
436 hammer_flusher_clean_loose_ios(hammer_mount_t hmp)
437 {
438         hammer_buffer_t buffer;
439         hammer_io_t io;
440
441         /*
442          * loose ends - buffers without bp's aren't tracked by the kernel
443          * and can build up, so clean them out.  This can occur when an
444          * IO completes on a buffer with no references left.
445          */
446         if ((io = TAILQ_FIRST(&hmp->lose_list)) != NULL) {
447                 crit_enter();   /* biodone() race */
448                 while ((io = TAILQ_FIRST(&hmp->lose_list)) != NULL) {
449                         KKASSERT(io->mod_list == &hmp->lose_list);
450                         TAILQ_REMOVE(&hmp->lose_list, io, mod_entry);
451                         io->mod_list = NULL;
452                         if (io->lock.refs == 0)
453                                 ++hammer_count_refedbufs;
454                         hammer_ref(&io->lock);
455                         buffer = (void *)io;
456                         hammer_rel_buffer(buffer, 0);
457                 }
458                 crit_exit();
459         }
460 }
461
462 /*
463  * Flush a single inode that is part of a flush group.
464  *
465  * Flusher errors are extremely serious, even ENOSPC shouldn't occur because
466  * the front-end should have reserved sufficient space on the media.  Any
467  * error other then EWOULDBLOCK will force the mount to be read-only.
468  */
469 static
470 void
471 hammer_flusher_flush_inode(hammer_inode_t ip, hammer_transaction_t trans)
472 {
473         hammer_mount_t hmp = ip->hmp;
474         int error;
475
476         hammer_flusher_clean_loose_ios(hmp);
477         error = hammer_sync_inode(trans, ip);
478
479         /*
480          * EWOULDBLOCK can happen under normal operation, all other errors
481          * are considered extremely serious.  We must set WOULDBLOCK
482          * mechanics to deal with the mess left over from the abort of the
483          * previous flush.
484          */
485         if (error) {
486                 ip->flags |= HAMMER_INODE_WOULDBLOCK;
487                 if (error == EWOULDBLOCK)
488                         error = 0;
489         }
490         hammer_flush_inode_done(ip, error);
491         while (hmp->flusher.finalize_want)
492                 tsleep(&hmp->flusher.finalize_want, 0, "hmrsxx", 0);
493         if (hammer_flusher_undo_exhausted(trans, 1)) {
494                 kprintf("HAMMER: Warning: UNDO area too small!\n");
495                 hammer_flusher_finalize(trans, 1);
496         } else if (hammer_flusher_meta_limit(trans->hmp)) {
497                 hammer_flusher_finalize(trans, 0);
498         }
499 }
500
501 /*
502  * Return non-zero if the UNDO area has less then (QUARTER / 4) of its
503  * space left.
504  *
505  * 1/4 - Emergency free undo space level.  Below this point the flusher
506  *       will finalize even if directory dependancies have not been resolved.
507  *
508  * 2/4 - Used by the pruning and reblocking code.  These functions may be
509  *       running in parallel with a flush and cannot be allowed to drop
510  *       available undo space to emergency levels.
511  *
512  * 3/4 - Used at the beginning of a flush to force-sync the volume header
513  *       to give the flush plenty of runway to work in.
514  */
515 int
516 hammer_flusher_undo_exhausted(hammer_transaction_t trans, int quarter)
517 {
518         if (hammer_undo_space(trans) <
519             hammer_undo_max(trans->hmp) * quarter / 4) {
520                 return(1);
521         } else {
522                 return(0);
523         }
524 }
525
526 /*
527  * Flush all pending UNDOs, wait for write completion, update the volume
528  * header with the new UNDO end position, and flush it.  Then
529  * asynchronously flush the meta-data.
530  *
531  * If this is the last finalization in a flush group we also synchronize
532  * our cached blockmap and set hmp->flusher_undo_start and our cached undo
533  * fifo first_offset so the next flush resets the FIFO pointers.
534  *
535  * If this is not final it is being called because too many dirty meta-data
536  * buffers have built up and must be flushed with UNDO synchronization to
537  * avoid a buffer cache deadlock.
538  */
539 void
540 hammer_flusher_finalize(hammer_transaction_t trans, int final)
541 {
542         hammer_volume_t root_volume;
543         hammer_blockmap_t cundomap, dundomap;
544         hammer_mount_t hmp;
545         hammer_io_t io;
546         int count;
547         int i;
548
549         hmp = trans->hmp;
550         root_volume = trans->rootvol;
551
552         /*
553          * Exclusively lock the flusher.  This guarantees that all dirty
554          * buffers will be idled (have a mod-count of 0).
555          */
556         ++hmp->flusher.finalize_want;
557         hammer_lock_ex(&hmp->flusher.finalize_lock);
558
559         /*
560          * If this isn't the final sync several threads may have hit the
561          * meta-limit at the same time and raced.  Only sync if we really
562          * have to, after acquiring the lock.
563          */
564         if (final == 0 && !hammer_flusher_meta_limit(hmp))
565                 goto done;
566
567         if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
568                 goto done;
569
570         /*
571          * Flush data buffers.  This can occur asynchronously and at any
572          * time.  We must interlock against the frontend direct-data write
573          * but do not have to acquire the sync-lock yet.
574          */
575         count = 0;
576         while ((io = TAILQ_FIRST(&hmp->data_list)) != NULL) {
577                 if (io->ioerror)
578                         break;
579                 if (io->lock.refs == 0)
580                         ++hammer_count_refedbufs;
581                 hammer_ref(&io->lock);
582                 hammer_io_write_interlock(io);
583                 KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
584                 hammer_io_flush(io);
585                 hammer_io_done_interlock(io);
586                 hammer_rel_buffer((hammer_buffer_t)io, 0);
587                 ++count;
588         }
589
590         /*
591          * The sync-lock is required for the remaining sequence.  This lock
592          * prevents meta-data from being modified.
593          */
594         hammer_sync_lock_ex(trans);
595
596         /*
597          * If we have been asked to finalize the volume header sync the
598          * cached blockmap to the on-disk blockmap.  Generate an UNDO
599          * record for the update.
600          */
601         if (final) {
602                 cundomap = &hmp->blockmap[0];
603                 dundomap = &root_volume->ondisk->vol0_blockmap[0];
604                 if (root_volume->io.modified) {
605                         hammer_modify_volume(trans, root_volume,
606                                              dundomap, sizeof(hmp->blockmap));
607                         for (i = 0; i < HAMMER_MAX_ZONES; ++i)
608                                 hammer_crc_set_blockmap(&cundomap[i]);
609                         bcopy(cundomap, dundomap, sizeof(hmp->blockmap));
610                         hammer_modify_volume_done(root_volume);
611                 }
612         }
613
614         /*
615          * Flush UNDOs
616          */
617         count = 0;
618         while ((io = TAILQ_FIRST(&hmp->undo_list)) != NULL) {
619                 if (io->ioerror)
620                         break;
621                 KKASSERT(io->modify_refs == 0);
622                 if (io->lock.refs == 0)
623                         ++hammer_count_refedbufs;
624                 hammer_ref(&io->lock);
625                 KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
626                 hammer_io_flush(io);
627                 hammer_rel_buffer((hammer_buffer_t)io, 0);
628                 ++count;
629         }
630
631         /*
632          * Wait for I/Os to complete
633          */
634         hammer_flusher_clean_loose_ios(hmp);
635         hammer_io_wait_all(hmp, "hmrfl1");
636
637         if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
638                 goto failed;
639
640         /*
641          * Update the on-disk volume header with new UNDO FIFO end position
642          * (do not generate new UNDO records for this change).  We have to
643          * do this for the UNDO FIFO whether (final) is set or not.
644          *
645          * Also update the on-disk next_tid field.  This does not require
646          * an UNDO.  However, because our TID is generated before we get
647          * the sync lock another sync may have beat us to the punch.
648          *
649          * This also has the side effect of updating first_offset based on
650          * a prior finalization when the first finalization of the next flush
651          * cycle occurs, removing any undo info from the prior finalization
652          * from consideration.
653          *
654          * The volume header will be flushed out synchronously.
655          */
656         dundomap = &root_volume->ondisk->vol0_blockmap[HAMMER_ZONE_UNDO_INDEX];
657         cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
658
659         if (dundomap->first_offset != cundomap->first_offset ||
660                    dundomap->next_offset != cundomap->next_offset) {
661                 hammer_modify_volume(NULL, root_volume, NULL, 0);
662                 dundomap->first_offset = cundomap->first_offset;
663                 dundomap->next_offset = cundomap->next_offset;
664                 hammer_crc_set_blockmap(dundomap);
665                 hammer_modify_volume_done(root_volume);
666         }
667
668         if (root_volume->io.modified) {
669                 hammer_modify_volume(NULL, root_volume, NULL, 0);
670                 if (root_volume->ondisk->vol0_next_tid < trans->tid)
671                         root_volume->ondisk->vol0_next_tid = trans->tid;
672                 hammer_crc_set_volume(root_volume->ondisk);
673                 hammer_modify_volume_done(root_volume);
674                 hammer_io_flush(&root_volume->io);
675         }
676
677         /*
678          * Wait for I/Os to complete
679          */
680         hammer_flusher_clean_loose_ios(hmp);
681         hammer_io_wait_all(hmp, "hmrfl2");
682
683         if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
684                 goto failed;
685
686         /*
687          * Flush meta-data.  The meta-data will be undone if we crash
688          * so we can safely flush it asynchronously.
689          *
690          * Repeated catchups will wind up flushing this update's meta-data
691          * and the UNDO buffers for the next update simultaniously.  This
692          * is ok.
693          */
694         count = 0;
695         while ((io = TAILQ_FIRST(&hmp->meta_list)) != NULL) {
696                 if (io->ioerror)
697                         break;
698                 KKASSERT(io->modify_refs == 0);
699                 if (io->lock.refs == 0)
700                         ++hammer_count_refedbufs;
701                 hammer_ref(&io->lock);
702                 KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
703                 hammer_io_flush(io);
704                 hammer_rel_buffer((hammer_buffer_t)io, 0);
705                 ++count;
706         }
707
708         /*
709          * If this is the final finalization for the flush group set
710          * up for the next sequence by setting a new first_offset in
711          * our cached blockmap and clearing the undo history.
712          *
713          * Even though we have updated our cached first_offset, the on-disk
714          * first_offset still governs available-undo-space calculations.
715          */
716         if (final) {
717                 cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
718                 if (cundomap->first_offset == cundomap->next_offset) {
719                         hmp->hflags &= ~HMNT_UNDO_DIRTY;
720                 } else {
721                         cundomap->first_offset = cundomap->next_offset;
722                         hmp->hflags |= HMNT_UNDO_DIRTY;
723                 }
724                 hammer_clear_undo_history(hmp);
725         }
726
727         /*
728          * Cleanup.  Report any critical errors.
729          */
730 failed:
731         hammer_sync_unlock(trans);
732
733         if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) {
734                 kprintf("HAMMER(%s): Critical write error during flush, "
735                         "refusing to sync UNDO FIFO\n",
736                         root_volume->ondisk->vol_name);
737         }
738
739 done:
740         hammer_unlock(&hmp->flusher.finalize_lock);
741         if (--hmp->flusher.finalize_want == 0)
742                 wakeup(&hmp->flusher.finalize_want);
743         hammer_stats_commits += final;
744 }
745
746 /*
747  * Return non-zero if too many dirty meta-data buffers have built up.
748  *
749  * Since we cannot allow such buffers to flush until we have dealt with
750  * the UNDOs, we risk deadlocking the kernel's buffer cache.
751  */
752 int
753 hammer_flusher_meta_limit(hammer_mount_t hmp)
754 {
755         if (hmp->locked_dirty_space + hmp->io_running_space >
756             hammer_limit_dirtybufspace) {
757                 return(1);
758         }
759         return(0);
760 }
761
762 /*
763  * Return non-zero if too many dirty meta-data buffers have built up.
764  *
765  * This version is used by background operations (mirror, prune, reblock)
766  * to leave room for foreground operations.
767  */
768 int
769 hammer_flusher_meta_halflimit(hammer_mount_t hmp)
770 {
771         if (hmp->locked_dirty_space + hmp->io_running_space >
772             hammer_limit_dirtybufspace / 2) {
773                 return(1);
774         }
775         return(0);
776 }
777
778 /*
779  * Return non-zero if the flusher still has something to flush.
780  */
781 int
782 hammer_flusher_haswork(hammer_mount_t hmp)
783 {
784         if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
785                 return(0);
786         if (TAILQ_FIRST(&hmp->flush_group_list) ||      /* dirty inodes */
787             TAILQ_FIRST(&hmp->volu_list) ||             /* dirty bufffers */
788             TAILQ_FIRST(&hmp->undo_list) ||
789             TAILQ_FIRST(&hmp->data_list) ||
790             TAILQ_FIRST(&hmp->meta_list) ||
791             (hmp->hflags & HMNT_UNDO_DIRTY)             /* UNDO FIFO sync */
792         ) {
793                 return(1);
794         }
795         return(0);
796 }
797