kernel - Major MPSAFE Infrastructure
[dragonfly.git] / sys / vfs / hammer / hammer_flusher.c
1 /*
2  * Copyright (c) 2008 The DragonFly Project.  All rights reserved.
3  * 
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  * 
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  * 
34  * $DragonFly: src/sys/vfs/hammer/hammer_flusher.c,v 1.45 2008/07/31 04:42:04 dillon Exp $
35  */
36 /*
37  * HAMMER dependancy flusher thread
38  *
39  * Meta data updates create buffer dependancies which are arranged as a
40  * hierarchy of lists.
41  */
42
43 #include "hammer.h"
44
45 static void hammer_flusher_master_thread(void *arg);
46 static void hammer_flusher_slave_thread(void *arg);
47 static void hammer_flusher_flush(hammer_mount_t hmp);
48 static void hammer_flusher_flush_inode(hammer_inode_t ip,
49                                         hammer_transaction_t trans);
50
51 RB_GENERATE(hammer_fls_rb_tree, hammer_inode, rb_flsnode,
52               hammer_ino_rb_compare);
53
54 /*
55  * Inodes are sorted and assigned to slave threads in groups of 128.
56  * We want a flush group size large enough such that the slave threads
57  * are not likely to interfere with each other when accessing the B-Tree,
58  * but not so large that we lose concurrency.
59  */
60 #define HAMMER_FLUSH_GROUP_SIZE 128
61
62 /*
63  * Support structures for the flusher threads.
64  */
65 struct hammer_flusher_info {
66         TAILQ_ENTRY(hammer_flusher_info) entry;
67         struct hammer_mount *hmp;
68         thread_t        td;
69         int             runstate;
70         int             count;
71         hammer_flush_group_t flg;
72         hammer_inode_t  work_array[HAMMER_FLUSH_GROUP_SIZE];
73 };
74
75 typedef struct hammer_flusher_info *hammer_flusher_info_t;
76
77 /*
78  * Sync all inodes pending on the flusher.
79  *
80  * All flush groups will be flushed.  This does not queue dirty inodes
81  * to the flush groups, it just flushes out what has already been queued!
82  */
83 void
84 hammer_flusher_sync(hammer_mount_t hmp)
85 {
86         int seq;
87
88         seq = hammer_flusher_async(hmp, NULL);
89         hammer_flusher_wait(hmp, seq);
90 }
91
92 /*
93  * Sync all inodes pending on the flusher - return immediately.
94  *
95  * All flush groups will be flushed.
96  */
97 int
98 hammer_flusher_async(hammer_mount_t hmp, hammer_flush_group_t close_flg)
99 {
100         hammer_flush_group_t flg;
101         int seq = hmp->flusher.next;
102
103         TAILQ_FOREACH(flg, &hmp->flush_group_list, flush_entry) {
104                 if (flg->running == 0)
105                         ++seq;
106                 flg->closed = 1;
107                 if (flg == close_flg)
108                         break;
109         }
110         if (hmp->flusher.td) {
111                 if (hmp->flusher.signal++ == 0)
112                         wakeup(&hmp->flusher.signal);
113         } else {
114                 seq = hmp->flusher.done;
115         }
116         return(seq);
117 }
118
119 int
120 hammer_flusher_async_one(hammer_mount_t hmp)
121 {
122         int seq;
123
124         if (hmp->flusher.td) {
125                 seq = hmp->flusher.next;
126                 if (hmp->flusher.signal++ == 0)
127                         wakeup(&hmp->flusher.signal);
128         } else {
129                 seq = hmp->flusher.done;
130         }
131         return(seq);
132 }
133
134 /*
135  * Wait for the flusher to get to the specified sequence number.
136  * Signal the flusher as often as necessary to keep it going.
137  */
138 void
139 hammer_flusher_wait(hammer_mount_t hmp, int seq)
140 {
141         while ((int)(seq - hmp->flusher.done) > 0) {
142                 if (hmp->flusher.act != seq) {
143                         if (hmp->flusher.signal++ == 0)
144                                 wakeup(&hmp->flusher.signal);
145                 }
146                 tsleep(&hmp->flusher.done, 0, "hmrfls", 0);
147         }
148 }
149
150 void
151 hammer_flusher_wait_next(hammer_mount_t hmp)
152 {
153         int seq;
154
155         seq = hammer_flusher_async_one(hmp);
156         hammer_flusher_wait(hmp, seq);
157 }
158
159 void
160 hammer_flusher_create(hammer_mount_t hmp)
161 {
162         hammer_flusher_info_t info;
163         int i;
164
165         hmp->flusher.signal = 0;
166         hmp->flusher.act = 0;
167         hmp->flusher.done = 0;
168         hmp->flusher.next = 1;
169         hammer_ref(&hmp->flusher.finalize_lock);
170         TAILQ_INIT(&hmp->flusher.run_list);
171         TAILQ_INIT(&hmp->flusher.ready_list);
172
173         lwkt_create(hammer_flusher_master_thread, hmp,
174                     &hmp->flusher.td, NULL, TDF_MPSAFE, -1, "hammer-M");
175         for (i = 0; i < HAMMER_MAX_FLUSHERS; ++i) {
176                 info = kmalloc(sizeof(*info), hmp->m_misc, M_WAITOK|M_ZERO);
177                 info->hmp = hmp;
178                 TAILQ_INSERT_TAIL(&hmp->flusher.ready_list, info, entry);
179                 lwkt_create(hammer_flusher_slave_thread, info,
180                             &info->td, NULL, TDF_MPSAFE, -1, "hammer-S%d", i);
181         }
182 }
183
184 void
185 hammer_flusher_destroy(hammer_mount_t hmp)
186 {
187         hammer_flusher_info_t info;
188
189         /*
190          * Kill the master
191          */
192         hmp->flusher.exiting = 1;
193         while (hmp->flusher.td) {
194                 ++hmp->flusher.signal;
195                 wakeup(&hmp->flusher.signal);
196                 tsleep(&hmp->flusher.exiting, 0, "hmrwex", hz);
197         }
198
199         /*
200          * Kill the slaves
201          */
202         while ((info = TAILQ_FIRST(&hmp->flusher.ready_list)) != NULL) {
203                 KKASSERT(info->runstate == 0);
204                 TAILQ_REMOVE(&hmp->flusher.ready_list, info, entry);
205                 info->runstate = -1;
206                 wakeup(&info->runstate);
207                 while (info->td)
208                         tsleep(&info->td, 0, "hmrwwc", 0);
209                 kfree(info, hmp->m_misc);
210         }
211 }
212
213 /*
214  * The master flusher thread manages the flusher sequence id and
215  * synchronization with the slave work threads.
216  */
217 static void
218 hammer_flusher_master_thread(void *arg)
219 {
220         hammer_flush_group_t flg;
221         hammer_mount_t hmp;
222
223         hmp = arg;
224
225         lwkt_gettoken(&hmp->fs_token);
226
227         for (;;) {
228                 /*
229                  * Do at least one flush cycle.  We may have to update the
230                  * UNDO FIFO even if no inodes are queued.
231                  */
232                 for (;;) {
233                         while (hmp->flusher.group_lock)
234                                 tsleep(&hmp->flusher.group_lock, 0, "hmrhld", 0);
235                         hmp->flusher.act = hmp->flusher.next;
236                         ++hmp->flusher.next;
237                         hammer_flusher_clean_loose_ios(hmp);
238                         hammer_flusher_flush(hmp);
239                         hmp->flusher.done = hmp->flusher.act;
240                         wakeup(&hmp->flusher.done);
241                         flg = TAILQ_FIRST(&hmp->flush_group_list);
242                         if (flg == NULL || flg->closed == 0)
243                                 break;
244                         if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
245                                 break;
246                 }
247
248                 /*
249                  * Wait for activity.
250                  */
251                 if (hmp->flusher.exiting && TAILQ_EMPTY(&hmp->flush_group_list))
252                         break;
253                 while (hmp->flusher.signal == 0)
254                         tsleep(&hmp->flusher.signal, 0, "hmrwwa", 0);
255
256                 /*
257                  * Flush for each count on signal but only allow one extra
258                  * flush request to build up.
259                  */
260                 if (--hmp->flusher.signal != 0)
261                         hmp->flusher.signal = 1;
262         }
263
264         /*
265          * And we are done.
266          */
267         hmp->flusher.td = NULL;
268         wakeup(&hmp->flusher.exiting);
269         lwkt_reltoken(&hmp->fs_token);
270         lwkt_exit();
271 }
272
273 /*
274  * Flush all inodes in the current flush group.
275  */
276 static void
277 hammer_flusher_flush(hammer_mount_t hmp)
278 {
279         hammer_flusher_info_t info;
280         hammer_flush_group_t flg;
281         hammer_reserve_t resv;
282         hammer_inode_t ip;
283         hammer_inode_t next_ip;
284         int slave_index;
285         int count;
286
287         /*
288          * Just in-case there's a flush race on mount
289          */
290         if (TAILQ_FIRST(&hmp->flusher.ready_list) == NULL)
291                 return;
292
293         /*
294          * We only do one flg but we may have to loop/retry.
295          */
296         count = 0;
297         while ((flg = TAILQ_FIRST(&hmp->flush_group_list)) != NULL) {
298                 ++count;
299                 if (hammer_debug_general & 0x0001) {
300                         kprintf("hammer_flush %d ttl=%d recs=%d\n",
301                                 hmp->flusher.act,
302                                 flg->total_count, flg->refs);
303                 }
304                 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
305                         break;
306                 hammer_start_transaction_fls(&hmp->flusher.trans, hmp);
307
308                 /*
309                  * If the previous flush cycle just about exhausted our
310                  * UNDO space we may have to do a dummy cycle to move the
311                  * first_offset up before actually digging into a new cycle,
312                  * or the new cycle will not have sufficient undo space.
313                  */
314                 if (hammer_flusher_undo_exhausted(&hmp->flusher.trans, 3))
315                         hammer_flusher_finalize(&hmp->flusher.trans, 0);
316
317                 /*
318                  * Ok, we are running this flush group now (this prevents new
319                  * additions to it).
320                  */
321                 flg->running = 1;
322                 if (hmp->next_flush_group == flg)
323                         hmp->next_flush_group = TAILQ_NEXT(flg, flush_entry);
324
325                 /*
326                  * Iterate the inodes in the flg's flush_tree and assign
327                  * them to slaves.
328                  */
329                 slave_index = 0;
330                 info = TAILQ_FIRST(&hmp->flusher.ready_list);
331                 next_ip = RB_FIRST(hammer_fls_rb_tree, &flg->flush_tree);
332
333                 while ((ip = next_ip) != NULL) {
334                         next_ip = RB_NEXT(hammer_fls_rb_tree,
335                                           &flg->flush_tree, ip);
336
337                         if (++hmp->check_yield > hammer_yield_check) {
338                                 hmp->check_yield = 0;
339                                 lwkt_yield();
340                         }
341
342                         /*
343                          * Add ip to the slave's work array.  The slave is
344                          * not currently running.
345                          */
346                         info->work_array[info->count++] = ip;
347                         if (info->count != HAMMER_FLUSH_GROUP_SIZE)
348                                 continue;
349
350                         /*
351                          * Get the slave running
352                          */
353                         TAILQ_REMOVE(&hmp->flusher.ready_list, info, entry);
354                         TAILQ_INSERT_TAIL(&hmp->flusher.run_list, info, entry);
355                         info->flg = flg;
356                         info->runstate = 1;
357                         wakeup(&info->runstate);
358
359                         /*
360                          * Get a new slave.  We may have to wait for one to
361                          * finish running.
362                          */
363                         while ((info = TAILQ_FIRST(&hmp->flusher.ready_list)) == NULL) {
364                                 tsleep(&hmp->flusher.ready_list, 0, "hmrfcc", 0);
365                         }
366                 }
367
368                 /*
369                  * Run the current slave if necessary
370                  */
371                 if (info->count) {
372                         TAILQ_REMOVE(&hmp->flusher.ready_list, info, entry);
373                         TAILQ_INSERT_TAIL(&hmp->flusher.run_list, info, entry);
374                         info->flg = flg;
375                         info->runstate = 1;
376                         wakeup(&info->runstate);
377                 }
378
379                 /*
380                  * Wait for all slaves to finish running
381                  */
382                 while (TAILQ_FIRST(&hmp->flusher.run_list) != NULL)
383                         tsleep(&hmp->flusher.ready_list, 0, "hmrfcc", 0);
384
385                 /*
386                  * Do the final finalization, clean up
387                  */
388                 hammer_flusher_finalize(&hmp->flusher.trans, 1);
389                 hmp->flusher.tid = hmp->flusher.trans.tid;
390
391                 hammer_done_transaction(&hmp->flusher.trans);
392
393                 /*
394                  * Loop up on the same flg.  If the flg is done clean it up
395                  * and break out.  We only flush one flg.
396                  */
397                 if (RB_EMPTY(&flg->flush_tree)) {
398                         KKASSERT(flg->refs == 0);
399                         TAILQ_REMOVE(&hmp->flush_group_list, flg, flush_entry);
400                         kfree(flg, hmp->m_misc);
401                         break;
402                 }
403         }
404
405         /*
406          * We may have pure meta-data to flush, or we may have to finish
407          * cycling the UNDO FIFO, even if there were no flush groups.
408          */
409         if (count == 0 && hammer_flusher_haswork(hmp)) {
410                 hammer_start_transaction_fls(&hmp->flusher.trans, hmp);
411                 hammer_flusher_finalize(&hmp->flusher.trans, 1);
412                 hammer_done_transaction(&hmp->flusher.trans);
413         }
414
415         /*
416          * Clean up any freed big-blocks (typically zone-2). 
417          * resv->flush_group is typically set several flush groups ahead
418          * of the free to ensure that the freed block is not reused until
419          * it can no longer be reused.
420          */
421         while ((resv = TAILQ_FIRST(&hmp->delay_list)) != NULL) {
422                 if (resv->flush_group != hmp->flusher.act)
423                         break;
424                 hammer_reserve_clrdelay(hmp, resv);
425         }
426 }
427
428
429 /*
430  * The slave flusher thread pulls work off the master flush list until no
431  * work is left.
432  */
433 static void
434 hammer_flusher_slave_thread(void *arg)
435 {
436         hammer_flush_group_t flg;
437         hammer_flusher_info_t info;
438         hammer_mount_t hmp;
439         hammer_inode_t ip;
440         int i;
441
442         info = arg;
443         hmp = info->hmp;
444         lwkt_gettoken(&hmp->fs_token);
445
446         for (;;) {
447                 while (info->runstate == 0)
448                         tsleep(&info->runstate, 0, "hmrssw", 0);
449                 if (info->runstate < 0)
450                         break;
451                 flg = info->flg;
452
453                 for (i = 0; i < info->count; ++i) {
454                         ip = info->work_array[i];
455                         hammer_flusher_flush_inode(ip, &hmp->flusher.trans);
456                         ++hammer_stats_inode_flushes;
457                 }
458                 info->count = 0;
459                 info->runstate = 0;
460                 TAILQ_REMOVE(&hmp->flusher.run_list, info, entry);
461                 TAILQ_INSERT_TAIL(&hmp->flusher.ready_list, info, entry);
462                 wakeup(&hmp->flusher.ready_list);
463         }
464         info->td = NULL;
465         wakeup(&info->td);
466         lwkt_reltoken(&hmp->fs_token);
467         lwkt_exit();
468 }
469
470 void
471 hammer_flusher_clean_loose_ios(hammer_mount_t hmp)
472 {
473         hammer_buffer_t buffer;
474         hammer_io_t io;
475
476         /*
477          * loose ends - buffers without bp's aren't tracked by the kernel
478          * and can build up, so clean them out.  This can occur when an
479          * IO completes on a buffer with no references left.
480          *
481          * The io_token is needed to protect the list.
482          */
483         if ((io = TAILQ_FIRST(&hmp->lose_list)) != NULL) {
484                 lwkt_gettoken(&hmp->io_token);
485                 while ((io = TAILQ_FIRST(&hmp->lose_list)) != NULL) {
486                         KKASSERT(io->mod_list == &hmp->lose_list);
487                         TAILQ_REMOVE(&hmp->lose_list, io, mod_entry);
488                         io->mod_list = NULL;
489                         hammer_ref(&io->lock);
490                         buffer = (void *)io;
491                         hammer_rel_buffer(buffer, 0);
492                 }
493                 lwkt_reltoken(&hmp->io_token);
494         }
495 }
496
497 /*
498  * Flush a single inode that is part of a flush group.
499  *
500  * Flusher errors are extremely serious, even ENOSPC shouldn't occur because
501  * the front-end should have reserved sufficient space on the media.  Any
502  * error other then EWOULDBLOCK will force the mount to be read-only.
503  */
504 static
505 void
506 hammer_flusher_flush_inode(hammer_inode_t ip, hammer_transaction_t trans)
507 {
508         hammer_mount_t hmp = ip->hmp;
509         int error;
510
511         hammer_flusher_clean_loose_ios(hmp);
512         error = hammer_sync_inode(trans, ip);
513
514         /*
515          * EWOULDBLOCK can happen under normal operation, all other errors
516          * are considered extremely serious.  We must set WOULDBLOCK
517          * mechanics to deal with the mess left over from the abort of the
518          * previous flush.
519          */
520         if (error) {
521                 ip->flags |= HAMMER_INODE_WOULDBLOCK;
522                 if (error == EWOULDBLOCK)
523                         error = 0;
524         }
525         hammer_flush_inode_done(ip, error);
526         while (hmp->flusher.finalize_want)
527                 tsleep(&hmp->flusher.finalize_want, 0, "hmrsxx", 0);
528         if (hammer_flusher_undo_exhausted(trans, 1)) {
529                 kprintf("HAMMER: Warning: UNDO area too small!\n");
530                 hammer_flusher_finalize(trans, 1);
531         } else if (hammer_flusher_meta_limit(trans->hmp)) {
532                 hammer_flusher_finalize(trans, 0);
533         }
534 }
535
536 /*
537  * Return non-zero if the UNDO area has less then (QUARTER / 4) of its
538  * space left.
539  *
540  * 1/4 - Emergency free undo space level.  Below this point the flusher
541  *       will finalize even if directory dependancies have not been resolved.
542  *
543  * 2/4 - Used by the pruning and reblocking code.  These functions may be
544  *       running in parallel with a flush and cannot be allowed to drop
545  *       available undo space to emergency levels.
546  *
547  * 3/4 - Used at the beginning of a flush to force-sync the volume header
548  *       to give the flush plenty of runway to work in.
549  */
550 int
551 hammer_flusher_undo_exhausted(hammer_transaction_t trans, int quarter)
552 {
553         if (hammer_undo_space(trans) <
554             hammer_undo_max(trans->hmp) * quarter / 4) {
555                 return(1);
556         } else {
557                 return(0);
558         }
559 }
560
561 /*
562  * Flush all pending UNDOs, wait for write completion, update the volume
563  * header with the new UNDO end position, and flush it.  Then
564  * asynchronously flush the meta-data.
565  *
566  * If this is the last finalization in a flush group we also synchronize
567  * our cached blockmap and set hmp->flusher_undo_start and our cached undo
568  * fifo first_offset so the next flush resets the FIFO pointers.
569  *
570  * If this is not final it is being called because too many dirty meta-data
571  * buffers have built up and must be flushed with UNDO synchronization to
572  * avoid a buffer cache deadlock.
573  */
574 void
575 hammer_flusher_finalize(hammer_transaction_t trans, int final)
576 {
577         hammer_volume_t root_volume;
578         hammer_blockmap_t cundomap, dundomap;
579         hammer_mount_t hmp;
580         hammer_io_t io;
581         hammer_off_t save_undo_next_offset;
582         int count;
583         int i;
584
585         hmp = trans->hmp;
586         root_volume = trans->rootvol;
587
588         /*
589          * Exclusively lock the flusher.  This guarantees that all dirty
590          * buffers will be idled (have a mod-count of 0).
591          */
592         ++hmp->flusher.finalize_want;
593         hammer_lock_ex(&hmp->flusher.finalize_lock);
594
595         /*
596          * If this isn't the final sync several threads may have hit the
597          * meta-limit at the same time and raced.  Only sync if we really
598          * have to, after acquiring the lock.
599          */
600         if (final == 0 && !hammer_flusher_meta_limit(hmp))
601                 goto done;
602
603         if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
604                 goto done;
605
606         /*
607          * Flush data buffers.  This can occur asynchronously and at any
608          * time.  We must interlock against the frontend direct-data write
609          * but do not have to acquire the sync-lock yet.
610          *
611          * These data buffers have already been collected prior to the
612          * related inode(s) getting queued to the flush group.
613          */
614         count = 0;
615         while ((io = TAILQ_FIRST(&hmp->data_list)) != NULL) {
616                 if (io->ioerror)
617                         break;
618                 hammer_ref(&io->lock);
619                 hammer_io_write_interlock(io);
620                 KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
621                 hammer_io_flush(io, 0);
622                 hammer_io_done_interlock(io);
623                 hammer_rel_buffer((hammer_buffer_t)io, 0);
624                 hammer_io_limit_backlog(hmp);
625                 ++count;
626         }
627
628         /*
629          * The sync-lock is required for the remaining sequence.  This lock
630          * prevents meta-data from being modified.
631          */
632         hammer_sync_lock_ex(trans);
633
634         /*
635          * If we have been asked to finalize the volume header sync the
636          * cached blockmap to the on-disk blockmap.  Generate an UNDO
637          * record for the update.
638          */
639         if (final) {
640                 cundomap = &hmp->blockmap[0];
641                 dundomap = &root_volume->ondisk->vol0_blockmap[0];
642                 if (root_volume->io.modified) {
643                         hammer_modify_volume(trans, root_volume,
644                                              dundomap, sizeof(hmp->blockmap));
645                         for (i = 0; i < HAMMER_MAX_ZONES; ++i)
646                                 hammer_crc_set_blockmap(&cundomap[i]);
647                         bcopy(cundomap, dundomap, sizeof(hmp->blockmap));
648                         hammer_modify_volume_done(root_volume);
649                 }
650         }
651
652         /*
653          * Flush UNDOs.  This can occur concurrently with the data flush
654          * because data writes never overwrite.
655          *
656          * This also waits for I/Os to complete and flushes the cache on
657          * the target disk.
658          *
659          * Record the UNDO append point as this can continue to change
660          * after we have flushed the UNDOs.
661          */
662         cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
663         hammer_lock_ex(&hmp->undo_lock);
664         save_undo_next_offset = cundomap->next_offset;
665         hammer_unlock(&hmp->undo_lock);
666         hammer_flusher_flush_undos(hmp, HAMMER_FLUSH_UNDOS_FORCED);
667
668         if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
669                 goto failed;
670
671         /*
672          * HAMMER VERSION < 4:
673          *      Update the on-disk volume header with new UNDO FIFO end
674          *      position (do not generate new UNDO records for this change).
675          *      We have to do this for the UNDO FIFO whether (final) is
676          *      set or not in order for the UNDOs to be recognized on
677          *      recovery.
678          *
679          * HAMMER VERSION >= 4:
680          *      The UNDO FIFO data written above will be recognized on
681          *      recovery without us having to sync the volume header.
682          *
683          * Also update the on-disk next_tid field.  This does not require
684          * an UNDO.  However, because our TID is generated before we get
685          * the sync lock another sync may have beat us to the punch.
686          *
687          * This also has the side effect of updating first_offset based on
688          * a prior finalization when the first finalization of the next flush
689          * cycle occurs, removing any undo info from the prior finalization
690          * from consideration.
691          *
692          * The volume header will be flushed out synchronously.
693          */
694         dundomap = &root_volume->ondisk->vol0_blockmap[HAMMER_ZONE_UNDO_INDEX];
695         cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
696
697         if (dundomap->first_offset != cundomap->first_offset ||
698                    dundomap->next_offset != save_undo_next_offset) {
699                 hammer_modify_volume(NULL, root_volume, NULL, 0);
700                 dundomap->first_offset = cundomap->first_offset;
701                 dundomap->next_offset = save_undo_next_offset;
702                 hammer_crc_set_blockmap(dundomap);
703                 hammer_modify_volume_done(root_volume);
704         }
705
706         /*
707          * vol0_next_tid is used for TID selection and is updated without
708          * an UNDO so we do not reuse a TID that may have been rolled-back.
709          *
710          * vol0_last_tid is the highest fully-synchronized TID.  It is
711          * set-up when the UNDO fifo is fully synced, later on (not here).
712          *
713          * The root volume can be open for modification by other threads
714          * generating UNDO or REDO records.  For example, reblocking,
715          * pruning, REDO mode fast-fsyncs, so the write interlock is
716          * mandatory.
717          */
718         if (root_volume->io.modified) {
719                 hammer_modify_volume(NULL, root_volume, NULL, 0);
720                 if (root_volume->ondisk->vol0_next_tid < trans->tid)
721                         root_volume->ondisk->vol0_next_tid = trans->tid;
722                 hammer_crc_set_volume(root_volume->ondisk);
723                 hammer_modify_volume_done(root_volume);
724                 hammer_io_write_interlock(&root_volume->io);
725                 hammer_io_flush(&root_volume->io, 0);
726                 hammer_io_done_interlock(&root_volume->io);
727         }
728
729         /*
730          * Wait for I/Os to complete.
731          *
732          * For HAMMER VERSION 4+ filesystems we do not have to wait for
733          * the I/O to complete as the new UNDO FIFO entries are recognized
734          * even without the volume header update.  This allows the volume
735          * header to flushed along with meta-data, significantly reducing
736          * flush overheads.
737          */
738         hammer_flusher_clean_loose_ios(hmp);
739         if (hmp->version < HAMMER_VOL_VERSION_FOUR)
740                 hammer_io_wait_all(hmp, "hmrfl3", 1);
741
742         if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
743                 goto failed;
744
745         /*
746          * Flush meta-data.  The meta-data will be undone if we crash
747          * so we can safely flush it asynchronously.  There is no need
748          * to wait for I/O to complete (or issue a synchronous disk flush).
749          *
750          * In fact, even if we did wait the meta-data will still be undone
751          * by a crash up until the next flush cycle due to the first_offset
752          * in the volume header for the UNDO FIFO not being adjusted until
753          * the following flush cycle.
754          *
755          * No io interlock is needed, bioops callbacks will not mess with
756          * meta data buffers.
757          */
758         count = 0;
759         while ((io = TAILQ_FIRST(&hmp->meta_list)) != NULL) {
760                 if (io->ioerror)
761                         break;
762                 KKASSERT(io->modify_refs == 0);
763                 hammer_ref(&io->lock);
764                 KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
765                 hammer_io_flush(io, 0);
766                 hammer_rel_buffer((hammer_buffer_t)io, 0);
767                 hammer_io_limit_backlog(hmp);
768                 ++count;
769         }
770
771         /*
772          * If this is the final finalization for the flush group set
773          * up for the next sequence by setting a new first_offset in
774          * our cached blockmap and clearing the undo history.
775          *
776          * Even though we have updated our cached first_offset, the on-disk
777          * first_offset still governs available-undo-space calculations.
778          *
779          * We synchronize to save_undo_next_offset rather than
780          * cundomap->next_offset because that is what we flushed out
781          * above.
782          *
783          * NOTE! UNDOs can only be added with the sync_lock held
784          *       so we can clear the undo history without racing.
785          *       REDOs can be added at any time which is why we
786          *       have to be careful and use save_undo_next_offset
787          *       when setting the new first_offset.
788          */
789         if (final) {
790                 cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
791                 if (cundomap->first_offset != save_undo_next_offset) {
792                         cundomap->first_offset = save_undo_next_offset;
793                         hmp->hflags |= HMNT_UNDO_DIRTY;
794                 } else if (cundomap->first_offset != cundomap->next_offset) {
795                         hmp->hflags |= HMNT_UNDO_DIRTY;
796                 } else {
797                         hmp->hflags &= ~HMNT_UNDO_DIRTY;
798                 }
799                 hammer_clear_undo_history(hmp);
800
801                 /*
802                  * Flush tid sequencing.  flush_tid1 is fully synchronized,
803                  * meaning a crash will not roll it back.  flush_tid2 has
804                  * been written out asynchronously and a crash will roll
805                  * it back.  flush_tid1 is used for all mirroring masters.
806                  */
807                 if (hmp->flush_tid1 != hmp->flush_tid2) {
808                         hmp->flush_tid1 = hmp->flush_tid2;
809                         wakeup(&hmp->flush_tid1);
810                 }
811                 hmp->flush_tid2 = trans->tid;
812
813                 /*
814                  * Clear the REDO SYNC flag.  This flag is used to ensure
815                  * that the recovery span in the UNDO/REDO FIFO contains
816                  * at least one REDO SYNC record.
817                  */
818                 hmp->flags &= ~HAMMER_MOUNT_REDO_SYNC;
819         }
820
821         /*
822          * Cleanup.  Report any critical errors.
823          */
824 failed:
825         hammer_sync_unlock(trans);
826
827         if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) {
828                 kprintf("HAMMER(%s): Critical write error during flush, "
829                         "refusing to sync UNDO FIFO\n",
830                         root_volume->ondisk->vol_name);
831         }
832
833 done:
834         hammer_unlock(&hmp->flusher.finalize_lock);
835
836         if (--hmp->flusher.finalize_want == 0)
837                 wakeup(&hmp->flusher.finalize_want);
838         hammer_stats_commits += final;
839 }
840
841 /*
842  * Flush UNDOs.
843  */
844 void
845 hammer_flusher_flush_undos(hammer_mount_t hmp, int mode)
846 {
847         hammer_io_t io;
848         int count;
849
850         count = 0;
851         while ((io = TAILQ_FIRST(&hmp->undo_list)) != NULL) {
852                 if (io->ioerror)
853                         break;
854                 hammer_ref(&io->lock);
855                 KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
856                 hammer_io_write_interlock(io);
857                 hammer_io_flush(io, hammer_undo_reclaim(io));
858                 hammer_io_done_interlock(io);
859                 hammer_rel_buffer((hammer_buffer_t)io, 0);
860                 hammer_io_limit_backlog(hmp);
861                 ++count;
862         }
863         hammer_flusher_clean_loose_ios(hmp);
864         if (mode == HAMMER_FLUSH_UNDOS_FORCED ||
865             (mode == HAMMER_FLUSH_UNDOS_AUTO && count)) {
866                 hammer_io_wait_all(hmp, "hmrfl1", 1);
867         } else {
868                 hammer_io_wait_all(hmp, "hmrfl2", 0);
869         }
870 }
871
872 /*
873  * Return non-zero if too many dirty meta-data buffers have built up.
874  *
875  * Since we cannot allow such buffers to flush until we have dealt with
876  * the UNDOs, we risk deadlocking the kernel's buffer cache.
877  */
878 int
879 hammer_flusher_meta_limit(hammer_mount_t hmp)
880 {
881         if (hmp->locked_dirty_space + hmp->io_running_space >
882             hammer_limit_dirtybufspace) {
883                 return(1);
884         }
885         return(0);
886 }
887
888 /*
889  * Return non-zero if too many dirty meta-data buffers have built up.
890  *
891  * This version is used by background operations (mirror, prune, reblock)
892  * to leave room for foreground operations.
893  */
894 int
895 hammer_flusher_meta_halflimit(hammer_mount_t hmp)
896 {
897         if (hmp->locked_dirty_space + hmp->io_running_space >
898             hammer_limit_dirtybufspace / 2) {
899                 return(1);
900         }
901         return(0);
902 }
903
904 /*
905  * Return non-zero if the flusher still has something to flush.
906  */
907 int
908 hammer_flusher_haswork(hammer_mount_t hmp)
909 {
910         if (hmp->ronly)
911                 return(0);
912         if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
913                 return(0);
914         if (TAILQ_FIRST(&hmp->flush_group_list) ||      /* dirty inodes */
915             TAILQ_FIRST(&hmp->volu_list) ||             /* dirty buffers */
916             TAILQ_FIRST(&hmp->undo_list) ||
917             TAILQ_FIRST(&hmp->data_list) ||
918             TAILQ_FIRST(&hmp->meta_list) ||
919             (hmp->hflags & HMNT_UNDO_DIRTY)             /* UNDO FIFO sync */
920         ) {
921                 return(1);
922         }
923         return(0);
924 }
925