kernel - Fix excessive mbuf use in nfs_realign()
[dragonfly.git] / sys / vfs / hammer / hammer_flusher.c
CommitLineData
059819e3
MD
1/*
2 * Copyright (c) 2008 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
4889cbd4 34 * $DragonFly: src/sys/vfs/hammer/hammer_flusher.c,v 1.45 2008/07/31 04:42:04 dillon Exp $
059819e3
MD
35 */
36/*
37 * HAMMER dependancy flusher thread
38 *
39 * Meta data updates create buffer dependancies which are arranged as a
40 * hierarchy of lists.
41 */
42
43#include "hammer.h"
44
da2da375
MD
45static void hammer_flusher_master_thread(void *arg);
46static void hammer_flusher_slave_thread(void *arg);
059819e3 47static void hammer_flusher_flush(hammer_mount_t hmp);
9f5097dc
MD
48static void hammer_flusher_flush_inode(hammer_inode_t ip,
49 hammer_transaction_t trans);
c9b9e29d 50
ff003b11
MD
51RB_GENERATE(hammer_fls_rb_tree, hammer_inode, rb_flsnode,
52 hammer_ino_rb_compare);
53
54/*
55 * Inodes are sorted and assigned to slave threads in groups of 128.
56 * We want a flush group size large enough such that the slave threads
57 * are not likely to interfere with each other when accessing the B-Tree,
58 * but not so large that we lose concurrency.
59 */
60#define HAMMER_FLUSH_GROUP_SIZE 128
61
af209b0f
MD
62/*
63 * Support structures for the flusher threads.
64 */
65struct hammer_flusher_info {
7a61b85d 66 TAILQ_ENTRY(hammer_flusher_info) entry;
af209b0f
MD
67 struct hammer_mount *hmp;
68 thread_t td;
7a61b85d
MD
69 int runstate;
70 int count;
71 hammer_flush_group_t flg;
cb51be26 72 hammer_inode_t work_array[HAMMER_FLUSH_GROUP_SIZE];
af209b0f
MD
73};
74
75typedef struct hammer_flusher_info *hammer_flusher_info_t;
059819e3 76
7bc5b8c2 77/*
7a61b85d
MD
78 * Sync all inodes pending on the flusher.
79 *
80 * All flush groups will be flushed. This does not queue dirty inodes
81 * to the flush groups, it just flushes out what has already been queued!
7bc5b8c2 82 */
059819e3
MD
83void
84hammer_flusher_sync(hammer_mount_t hmp)
85{
86 int seq;
87
7a61b85d 88 seq = hammer_flusher_async(hmp, NULL);
f437a2ab 89 hammer_flusher_wait(hmp, seq);
059819e3
MD
90}
91
7bc5b8c2
MD
92/*
93 * Sync all inodes pending on the flusher - return immediately.
7a61b85d
MD
94 *
95 * All flush groups will be flushed.
7bc5b8c2 96 */
93291532 97int
7a61b85d 98hammer_flusher_async(hammer_mount_t hmp, hammer_flush_group_t close_flg)
059819e3 99{
7a61b85d
MD
100 hammer_flush_group_t flg;
101 int seq = hmp->flusher.next;
102
103 TAILQ_FOREACH(flg, &hmp->flush_group_list, flush_entry) {
104 if (flg->running == 0)
105 ++seq;
106 flg->closed = 1;
107 if (flg == close_flg)
108 break;
109 }
da2da375
MD
110 if (hmp->flusher.td) {
111 if (hmp->flusher.signal++ == 0)
112 wakeup(&hmp->flusher.signal);
93291532
MD
113 } else {
114 seq = hmp->flusher.done;
f90dde4c 115 }
93291532
MD
116 return(seq);
117}
118
15e75dab
MD
119int
120hammer_flusher_async_one(hammer_mount_t hmp)
121{
122 int seq;
123
124 if (hmp->flusher.td) {
125 seq = hmp->flusher.next;
126 if (hmp->flusher.signal++ == 0)
127 wakeup(&hmp->flusher.signal);
128 } else {
129 seq = hmp->flusher.done;
130 }
131 return(seq);
132}
133
f437a2ab
MD
134/*
135 * Wait for the flusher to get to the specified sequence number.
136 * Signal the flusher as often as necessary to keep it going.
137 */
93291532
MD
138void
139hammer_flusher_wait(hammer_mount_t hmp, int seq)
140{
cdb6e4e6 141 while ((int)(seq - hmp->flusher.done) > 0) {
f437a2ab
MD
142 if (hmp->flusher.act != seq) {
143 if (hmp->flusher.signal++ == 0)
144 wakeup(&hmp->flusher.signal);
145 }
93291532 146 tsleep(&hmp->flusher.done, 0, "hmrfls", 0);
cdb6e4e6 147 }
059819e3
MD
148}
149
82010f9f
MD
150void
151hammer_flusher_wait_next(hammer_mount_t hmp)
152{
153 int seq;
154
155 seq = hammer_flusher_async_one(hmp);
156 hammer_flusher_wait(hmp, seq);
157}
158
059819e3
MD
159void
160hammer_flusher_create(hammer_mount_t hmp)
161{
da2da375
MD
162 hammer_flusher_info_t info;
163 int i;
164
165 hmp->flusher.signal = 0;
166 hmp->flusher.act = 0;
167 hmp->flusher.done = 0;
168 hmp->flusher.next = 1;
da2da375 169 hammer_ref(&hmp->flusher.finalize_lock);
7a61b85d
MD
170 TAILQ_INIT(&hmp->flusher.run_list);
171 TAILQ_INIT(&hmp->flusher.ready_list);
da2da375
MD
172
173 lwkt_create(hammer_flusher_master_thread, hmp,
174 &hmp->flusher.td, NULL, 0, -1, "hammer-M");
175 for (i = 0; i < HAMMER_MAX_FLUSHERS; ++i) {
bac808fe 176 info = kmalloc(sizeof(*info), hmp->m_misc, M_WAITOK|M_ZERO);
da2da375 177 info->hmp = hmp;
7a61b85d 178 TAILQ_INSERT_TAIL(&hmp->flusher.ready_list, info, entry);
da2da375
MD
179 lwkt_create(hammer_flusher_slave_thread, info,
180 &info->td, NULL, 0, -1, "hammer-S%d", i);
181 }
059819e3
MD
182}
183
184void
185hammer_flusher_destroy(hammer_mount_t hmp)
186{
da2da375 187 hammer_flusher_info_t info;
da2da375
MD
188
189 /*
190 * Kill the master
191 */
192 hmp->flusher.exiting = 1;
193 while (hmp->flusher.td) {
194 ++hmp->flusher.signal;
195 wakeup(&hmp->flusher.signal);
196 tsleep(&hmp->flusher.exiting, 0, "hmrwex", hz);
197 }
198
199 /*
200 * Kill the slaves
201 */
7a61b85d
MD
202 while ((info = TAILQ_FIRST(&hmp->flusher.ready_list)) != NULL) {
203 KKASSERT(info->runstate == 0);
204 TAILQ_REMOVE(&hmp->flusher.ready_list, info, entry);
205 info->runstate = -1;
206 wakeup(&info->runstate);
207 while (info->td)
208 tsleep(&info->td, 0, "hmrwwc", 0);
bac808fe 209 kfree(info, hmp->m_misc);
f90dde4c 210 }
059819e3
MD
211}
212
af209b0f
MD
213/*
214 * The master flusher thread manages the flusher sequence id and
215 * synchronization with the slave work threads.
216 */
059819e3 217static void
da2da375 218hammer_flusher_master_thread(void *arg)
059819e3 219{
7a61b85d
MD
220 hammer_flush_group_t flg;
221 hammer_mount_t hmp;
0729c8c8 222
7a61b85d 223 hmp = arg;
c32a6806 224
7a61b85d 225 for (;;) {
c32a6806 226 /*
7a61b85d
MD
227 * Do at least one flush cycle. We may have to update the
228 * UNDO FIFO even if no inodes are queued.
c32a6806 229 */
7a61b85d
MD
230 for (;;) {
231 while (hmp->flusher.group_lock)
232 tsleep(&hmp->flusher.group_lock, 0, "hmrhld", 0);
233 hmp->flusher.act = hmp->flusher.next;
234 ++hmp->flusher.next;
235 hammer_flusher_clean_loose_ios(hmp);
236 hammer_flusher_flush(hmp);
237 hmp->flusher.done = hmp->flusher.act;
238 wakeup(&hmp->flusher.done);
239 flg = TAILQ_FIRST(&hmp->flush_group_list);
240 if (flg == NULL || flg->closed == 0)
241 break;
cdb6e4e6
MD
242 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
243 break;
7a61b85d 244 }
1f07f686 245
c9b9e29d 246 /*
7a61b85d 247 * Wait for activity.
c9b9e29d 248 */
7a61b85d
MD
249 if (hmp->flusher.exiting && TAILQ_EMPTY(&hmp->flush_group_list))
250 break;
da2da375
MD
251 while (hmp->flusher.signal == 0)
252 tsleep(&hmp->flusher.signal, 0, "hmrwwa", 0);
4889cbd4
MD
253
254 /*
255 * Flush for each count on signal but only allow one extra
256 * flush request to build up.
257 */
258 if (--hmp->flusher.signal != 0)
259 hmp->flusher.signal = 1;
059819e3 260 }
da2da375
MD
261
262 /*
263 * And we are done.
264 */
265 hmp->flusher.td = NULL;
266 wakeup(&hmp->flusher.exiting);
267 lwkt_exit();
268}
269
7a61b85d
MD
270/*
271 * Flush all inodes in the current flush group.
272 */
273static void
274hammer_flusher_flush(hammer_mount_t hmp)
275{
276 hammer_flusher_info_t info;
277 hammer_flush_group_t flg;
278 hammer_reserve_t resv;
279 hammer_inode_t ip;
280 hammer_inode_t next_ip;
281 int slave_index;
15e75dab 282 int count;
7a61b85d
MD
283
284 /*
285 * Just in-case there's a flush race on mount
286 */
287 if (TAILQ_FIRST(&hmp->flusher.ready_list) == NULL)
288 return;
289
290 /*
291 * We only do one flg but we may have to loop/retry.
292 */
15e75dab 293 count = 0;
7a61b85d 294 while ((flg = TAILQ_FIRST(&hmp->flush_group_list)) != NULL) {
15e75dab 295 ++count;
7a61b85d
MD
296 if (hammer_debug_general & 0x0001) {
297 kprintf("hammer_flush %d ttl=%d recs=%d\n",
298 hmp->flusher.act,
299 flg->total_count, flg->refs);
300 }
cdb6e4e6
MD
301 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
302 break;
7a61b85d
MD
303 hammer_start_transaction_fls(&hmp->flusher.trans, hmp);
304
305 /*
306 * If the previous flush cycle just about exhausted our
307 * UNDO space we may have to do a dummy cycle to move the
308 * first_offset up before actually digging into a new cycle,
309 * or the new cycle will not have sufficient undo space.
310 */
311 if (hammer_flusher_undo_exhausted(&hmp->flusher.trans, 3))
312 hammer_flusher_finalize(&hmp->flusher.trans, 0);
313
7b6ccb11
MD
314 /*
315 * Ok, we are running this flush group now (this prevents new
316 * additions to it).
317 */
318 flg->running = 1;
319 if (hmp->next_flush_group == flg)
320 hmp->next_flush_group = TAILQ_NEXT(flg, flush_entry);
321
7a61b85d 322 /*
ff003b11 323 * Iterate the inodes in the flg's flush_tree and assign
7a61b85d
MD
324 * them to slaves.
325 */
7a61b85d
MD
326 slave_index = 0;
327 info = TAILQ_FIRST(&hmp->flusher.ready_list);
ff003b11 328 next_ip = RB_FIRST(hammer_fls_rb_tree, &flg->flush_tree);
7a61b85d
MD
329
330 while ((ip = next_ip) != NULL) {
ff003b11
MD
331 next_ip = RB_NEXT(hammer_fls_rb_tree,
332 &flg->flush_tree, ip);
7a61b85d 333
3e583440
MD
334 if (++hmp->check_yield > hammer_yield_check) {
335 hmp->check_yield = 0;
336 lwkt_user_yield();
337 }
338
7a61b85d
MD
339 /*
340 * Add ip to the slave's work array. The slave is
341 * not currently running.
342 */
343 info->work_array[info->count++] = ip;
344 if (info->count != HAMMER_FLUSH_GROUP_SIZE)
345 continue;
346
347 /*
348 * Get the slave running
349 */
350 TAILQ_REMOVE(&hmp->flusher.ready_list, info, entry);
351 TAILQ_INSERT_TAIL(&hmp->flusher.run_list, info, entry);
352 info->flg = flg;
353 info->runstate = 1;
354 wakeup(&info->runstate);
355
356 /*
357 * Get a new slave. We may have to wait for one to
358 * finish running.
359 */
360 while ((info = TAILQ_FIRST(&hmp->flusher.ready_list)) == NULL) {
361 tsleep(&hmp->flusher.ready_list, 0, "hmrfcc", 0);
362 }
363 }
364
365 /*
366 * Run the current slave if necessary
367 */
368 if (info->count) {
369 TAILQ_REMOVE(&hmp->flusher.ready_list, info, entry);
370 TAILQ_INSERT_TAIL(&hmp->flusher.run_list, info, entry);
371 info->flg = flg;
372 info->runstate = 1;
373 wakeup(&info->runstate);
374 }
375
376 /*
377 * Wait for all slaves to finish running
378 */
379 while (TAILQ_FIRST(&hmp->flusher.run_list) != NULL)
380 tsleep(&hmp->flusher.ready_list, 0, "hmrfcc", 0);
381
382 /*
383 * Do the final finalization, clean up
384 */
385 hammer_flusher_finalize(&hmp->flusher.trans, 1);
386 hmp->flusher.tid = hmp->flusher.trans.tid;
387
388 hammer_done_transaction(&hmp->flusher.trans);
389
390 /*
391 * Loop up on the same flg. If the flg is done clean it up
392 * and break out. We only flush one flg.
393 */
ff003b11 394 if (RB_EMPTY(&flg->flush_tree)) {
7a61b85d
MD
395 KKASSERT(flg->refs == 0);
396 TAILQ_REMOVE(&hmp->flush_group_list, flg, flush_entry);
bac808fe 397 kfree(flg, hmp->m_misc);
7a61b85d
MD
398 break;
399 }
400 }
401
15e75dab 402 /*
1b0ab2c3
MD
403 * We may have pure meta-data to flush, or we may have to finish
404 * cycling the UNDO FIFO, even if there were no flush groups.
15e75dab 405 */
1b0ab2c3 406 if (count == 0 && hammer_flusher_haswork(hmp)) {
15e75dab
MD
407 hammer_start_transaction_fls(&hmp->flusher.trans, hmp);
408 hammer_flusher_finalize(&hmp->flusher.trans, 1);
409 hammer_done_transaction(&hmp->flusher.trans);
410 }
411
7a61b85d
MD
412 /*
413 * Clean up any freed big-blocks (typically zone-2).
414 * resv->flush_group is typically set several flush groups ahead
415 * of the free to ensure that the freed block is not reused until
416 * it can no longer be reused.
417 */
418 while ((resv = TAILQ_FIRST(&hmp->delay_list)) != NULL) {
419 if (resv->flush_group != hmp->flusher.act)
420 break;
421 hammer_reserve_clrdelay(hmp, resv);
422 }
423}
424
425
af209b0f 426/*
ff003b11 427 * The slave flusher thread pulls work off the master flush list until no
af209b0f
MD
428 * work is left.
429 */
da2da375
MD
430static void
431hammer_flusher_slave_thread(void *arg)
432{
7a61b85d 433 hammer_flush_group_t flg;
da2da375
MD
434 hammer_flusher_info_t info;
435 hammer_mount_t hmp;
436 hammer_inode_t ip;
cb51be26 437 int i;
da2da375
MD
438
439 info = arg;
440 hmp = info->hmp;
441
442 for (;;) {
7a61b85d
MD
443 while (info->runstate == 0)
444 tsleep(&info->runstate, 0, "hmrssw", 0);
445 if (info->runstate < 0)
da2da375 446 break;
7a61b85d 447 flg = info->flg;
cb51be26 448
7a61b85d
MD
449 for (i = 0; i < info->count; ++i) {
450 ip = info->work_array[i];
451 hammer_flusher_flush_inode(ip, &hmp->flusher.trans);
ce0138a6 452 ++hammer_stats_inode_flushes;
cb51be26 453 }
7a61b85d
MD
454 info->count = 0;
455 info->runstate = 0;
456 TAILQ_REMOVE(&hmp->flusher.run_list, info, entry);
457 TAILQ_INSERT_TAIL(&hmp->flusher.ready_list, info, entry);
458 wakeup(&hmp->flusher.ready_list);
da2da375
MD
459 }
460 info->td = NULL;
461 wakeup(&info->td);
059819e3
MD
462 lwkt_exit();
463}
464
525aad3a 465void
10a5d1ba
MD
466hammer_flusher_clean_loose_ios(hammer_mount_t hmp)
467{
468 hammer_buffer_t buffer;
469 hammer_io_t io;
470
471 /*
472 * loose ends - buffers without bp's aren't tracked by the kernel
473 * and can build up, so clean them out. This can occur when an
474 * IO completes on a buffer with no references left.
475 */
525aad3a
MD
476 if ((io = TAILQ_FIRST(&hmp->lose_list)) != NULL) {
477 crit_enter(); /* biodone() race */
478 while ((io = TAILQ_FIRST(&hmp->lose_list)) != NULL) {
479 KKASSERT(io->mod_list == &hmp->lose_list);
480 TAILQ_REMOVE(&hmp->lose_list, io, mod_entry);
481 io->mod_list = NULL;
525aad3a
MD
482 hammer_ref(&io->lock);
483 buffer = (void *)io;
484 hammer_rel_buffer(buffer, 0);
485 }
486 crit_exit();
10a5d1ba
MD
487 }
488}
489
9f5097dc
MD
490/*
491 * Flush a single inode that is part of a flush group.
06ad81ff 492 *
cdb6e4e6
MD
493 * Flusher errors are extremely serious, even ENOSPC shouldn't occur because
494 * the front-end should have reserved sufficient space on the media. Any
495 * error other then EWOULDBLOCK will force the mount to be read-only.
9f5097dc
MD
496 */
497static
498void
499hammer_flusher_flush_inode(hammer_inode_t ip, hammer_transaction_t trans)
500{
501 hammer_mount_t hmp = ip->hmp;
06ad81ff 502 int error;
9f5097dc 503
525aad3a 504 hammer_flusher_clean_loose_ios(hmp);
02325004 505 error = hammer_sync_inode(trans, ip);
cdb6e4e6
MD
506
507 /*
508 * EWOULDBLOCK can happen under normal operation, all other errors
509 * are considered extremely serious. We must set WOULDBLOCK
510 * mechanics to deal with the mess left over from the abort of the
511 * previous flush.
512 */
513 if (error) {
514 ip->flags |= HAMMER_INODE_WOULDBLOCK;
515 if (error == EWOULDBLOCK)
516 error = 0;
517 }
518 hammer_flush_inode_done(ip, error);
da2da375
MD
519 while (hmp->flusher.finalize_want)
520 tsleep(&hmp->flusher.finalize_want, 0, "hmrsxx", 0);
06ad81ff 521 if (hammer_flusher_undo_exhausted(trans, 1)) {
5a930e66 522 kprintf("HAMMER: Warning: UNDO area too small!\n");
9f5097dc 523 hammer_flusher_finalize(trans, 1);
06ad81ff 524 } else if (hammer_flusher_meta_limit(trans->hmp)) {
9f5097dc
MD
525 hammer_flusher_finalize(trans, 0);
526 }
527}
528
ec4e8497 529/*
06ad81ff
MD
530 * Return non-zero if the UNDO area has less then (QUARTER / 4) of its
531 * space left.
532 *
533 * 1/4 - Emergency free undo space level. Below this point the flusher
534 * will finalize even if directory dependancies have not been resolved.
535 *
536 * 2/4 - Used by the pruning and reblocking code. These functions may be
537 * running in parallel with a flush and cannot be allowed to drop
538 * available undo space to emergency levels.
539 *
540 * 3/4 - Used at the beginning of a flush to force-sync the volume header
541 * to give the flush plenty of runway to work in.
ec4e8497 542 */
ec4e8497 543int
06ad81ff 544hammer_flusher_undo_exhausted(hammer_transaction_t trans, int quarter)
ec4e8497 545{
06ad81ff
MD
546 if (hammer_undo_space(trans) <
547 hammer_undo_max(trans->hmp) * quarter / 4) {
1f07f686 548 return(1);
ec4e8497 549 } else {
1f07f686 550 return(0);
ec4e8497 551 }
ec4e8497
MD
552}
553
10a5d1ba 554/*
9f5097dc
MD
555 * Flush all pending UNDOs, wait for write completion, update the volume
556 * header with the new UNDO end position, and flush it. Then
557 * asynchronously flush the meta-data.
10a5d1ba 558 *
9f5097dc
MD
559 * If this is the last finalization in a flush group we also synchronize
560 * our cached blockmap and set hmp->flusher_undo_start and our cached undo
561 * fifo first_offset so the next flush resets the FIFO pointers.
6c1f89f4
MD
562 *
563 * If this is not final it is being called because too many dirty meta-data
564 * buffers have built up and must be flushed with UNDO synchronization to
565 * avoid a buffer cache deadlock.
10a5d1ba 566 */
10a5d1ba 567void
9f5097dc 568hammer_flusher_finalize(hammer_transaction_t trans, int final)
059819e3 569{
9f5097dc
MD
570 hammer_volume_t root_volume;
571 hammer_blockmap_t cundomap, dundomap;
572 hammer_mount_t hmp;
10a5d1ba 573 hammer_io_t io;
91ed3855 574 hammer_off_t save_undo_next_offset;
c9b9e29d 575 int count;
19619882 576 int i;
059819e3 577
9f5097dc
MD
578 hmp = trans->hmp;
579 root_volume = trans->rootvol;
580
6c1f89f4
MD
581 /*
582 * Exclusively lock the flusher. This guarantees that all dirty
583 * buffers will be idled (have a mod-count of 0).
584 */
585 ++hmp->flusher.finalize_want;
586 hammer_lock_ex(&hmp->flusher.finalize_lock);
587
588 /*
589 * If this isn't the final sync several threads may have hit the
590 * meta-limit at the same time and raced. Only sync if we really
591 * have to, after acquiring the lock.
592 */
593 if (final == 0 && !hammer_flusher_meta_limit(hmp))
594 goto done;
595
cdb6e4e6
MD
596 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
597 goto done;
598
47637bff
MD
599 /*
600 * Flush data buffers. This can occur asynchronously and at any
9f5097dc
MD
601 * time. We must interlock against the frontend direct-data write
602 * but do not have to acquire the sync-lock yet.
9192654c
MD
603 *
604 * These data buffers have already been collected prior to the
605 * related inode(s) getting queued to the flush group.
47637bff
MD
606 */
607 count = 0;
608 while ((io = TAILQ_FIRST(&hmp->data_list)) != NULL) {
cdb6e4e6
MD
609 if (io->ioerror)
610 break;
47637bff 611 hammer_ref(&io->lock);
9f5097dc 612 hammer_io_write_interlock(io);
47637bff 613 KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
710733a6 614 hammer_io_flush(io, 0);
9f5097dc 615 hammer_io_done_interlock(io);
47637bff
MD
616 hammer_rel_buffer((hammer_buffer_t)io, 0);
617 ++count;
ba298df1 618 hammer_io_limit_backlog(hmp);
47637bff 619 }
47637bff 620
9f5097dc
MD
621 /*
622 * The sync-lock is required for the remaining sequence. This lock
623 * prevents meta-data from being modified.
624 */
2f85fa4d 625 hammer_sync_lock_ex(trans);
9480ff55 626
e8599db1 627 /*
9f5097dc
MD
628 * If we have been asked to finalize the volume header sync the
629 * cached blockmap to the on-disk blockmap. Generate an UNDO
630 * record for the update.
e8599db1 631 */
9f5097dc
MD
632 if (final) {
633 cundomap = &hmp->blockmap[0];
634 dundomap = &root_volume->ondisk->vol0_blockmap[0];
635 if (root_volume->io.modified) {
636 hammer_modify_volume(trans, root_volume,
637 dundomap, sizeof(hmp->blockmap));
638 for (i = 0; i < HAMMER_MAX_ZONES; ++i)
639 hammer_crc_set_blockmap(&cundomap[i]);
640 bcopy(cundomap, dundomap, sizeof(hmp->blockmap));
641 hammer_modify_volume_done(root_volume);
642 }
e8599db1
MD
643 }
644
059819e3 645 /*
eddadaee
MD
646 * Flush UNDOs. This can occur concurrently with the data flush
647 * because data writes never overwrite.
648 *
649 * This also waits for I/Os to complete and flushes the cache on
650 * the target disk.
91ed3855
MD
651 *
652 * Record the UNDO append point as this can continue to change
653 * after we have flushed the UNDOs.
059819e3 654 */
91ed3855
MD
655 cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
656 hammer_lock_ex(&hmp->undo_lock);
657 save_undo_next_offset = cundomap->next_offset;
658 hammer_unlock(&hmp->undo_lock);
9192654c 659 hammer_flusher_flush_undos(hmp, HAMMER_FLUSH_UNDOS_FORCED);
059819e3 660
cdb6e4e6
MD
661 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
662 goto failed;
663
059819e3 664 /*
02428fb6
MD
665 * HAMMER VERSION < 4:
666 * Update the on-disk volume header with new UNDO FIFO end
667 * position (do not generate new UNDO records for this change).
668 * We have to do this for the UNDO FIFO whether (final) is
669 * set or not in order for the UNDOs to be recognized on
670 * recovery.
671 *
672 * HAMMER VERSION >= 4:
673 * The UNDO FIFO data written above will be recognized on
674 * recovery without us having to sync the volume header.
9f5097dc
MD
675 *
676 * Also update the on-disk next_tid field. This does not require
677 * an UNDO. However, because our TID is generated before we get
678 * the sync lock another sync may have beat us to the punch.
679 *
06ad81ff
MD
680 * This also has the side effect of updating first_offset based on
681 * a prior finalization when the first finalization of the next flush
682 * cycle occurs, removing any undo info from the prior finalization
683 * from consideration.
684 *
9f5097dc 685 * The volume header will be flushed out synchronously.
059819e3 686 */
9f5097dc
MD
687 dundomap = &root_volume->ondisk->vol0_blockmap[HAMMER_ZONE_UNDO_INDEX];
688 cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
09ac686b 689
9f5097dc 690 if (dundomap->first_offset != cundomap->first_offset ||
91ed3855 691 dundomap->next_offset != save_undo_next_offset) {
0729c8c8 692 hammer_modify_volume(NULL, root_volume, NULL, 0);
9f5097dc 693 dundomap->first_offset = cundomap->first_offset;
91ed3855 694 dundomap->next_offset = save_undo_next_offset;
9f5097dc 695 hammer_crc_set_blockmap(dundomap);
0729c8c8
MD
696 hammer_modify_volume_done(root_volume);
697 }
c9b9e29d 698
4889cbd4
MD
699 /*
700 * vol0_next_tid is used for TID selection and is updated without
701 * an UNDO so we do not reuse a TID that may have been rolled-back.
702 *
703 * vol0_last_tid is the highest fully-synchronized TID. It is
704 * set-up when the UNDO fifo is fully synced, later on (not here).
47f363f1
MD
705 *
706 * The root volume can be open for modification by other threads
707 * generating UNDO or REDO records. For example, reblocking,
708 * pruning, REDO mode fast-fsyncs, so the write interlock is
709 * mandatory.
4889cbd4 710 */
19619882 711 if (root_volume->io.modified) {
adf01747
MD
712 hammer_modify_volume(NULL, root_volume, NULL, 0);
713 if (root_volume->ondisk->vol0_next_tid < trans->tid)
714 root_volume->ondisk->vol0_next_tid = trans->tid;
715 hammer_crc_set_volume(root_volume->ondisk);
716 hammer_modify_volume_done(root_volume);
47f363f1 717 hammer_io_write_interlock(&root_volume->io);
710733a6 718 hammer_io_flush(&root_volume->io, 0);
47f363f1 719 hammer_io_done_interlock(&root_volume->io);
19619882 720 }
059819e3
MD
721
722 /*
02428fb6
MD
723 * Wait for I/Os to complete.
724 *
725 * For HAMMER VERSION 4+ filesystems we do not have to wait for
726 * the I/O to complete as the new UNDO FIFO entries are recognized
727 * even without the volume header update. This allows the volume
728 * header to flushed along with meta-data, significantly reducing
729 * flush overheads.
059819e3 730 */
a99b9ea2 731 hammer_flusher_clean_loose_ios(hmp);
02428fb6 732 if (hmp->version < HAMMER_VOL_VERSION_FOUR)
eddadaee 733 hammer_io_wait_all(hmp, "hmrfl3", 1);
059819e3 734
cdb6e4e6
MD
735 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
736 goto failed;
737
059819e3 738 /*
e8599db1 739 * Flush meta-data. The meta-data will be undone if we crash
02428fb6
MD
740 * so we can safely flush it asynchronously. There is no need
741 * to wait for I/O to complete (or issue a synchronous disk flush).
9f5097dc 742 *
02428fb6
MD
743 * In fact, even if we did wait the meta-data will still be undone
744 * by a crash up until the next flush cycle due to the first_offset
745 * in the volume header for the UNDO FIFO not being adjusted until
746 * the following flush cycle.
059819e3 747 */
c9b9e29d 748 count = 0;
10a5d1ba 749 while ((io = TAILQ_FIRST(&hmp->meta_list)) != NULL) {
cdb6e4e6
MD
750 if (io->ioerror)
751 break;
10a5d1ba
MD
752 KKASSERT(io->modify_refs == 0);
753 hammer_ref(&io->lock);
754 KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
710733a6 755 hammer_io_flush(io, 0);
09ac686b 756 hammer_rel_buffer((hammer_buffer_t)io, 0);
c9b9e29d 757 ++count;
ba298df1 758 hammer_io_limit_backlog(hmp);
059819e3 759 }
9f5097dc
MD
760
761 /*
762 * If this is the final finalization for the flush group set
763 * up for the next sequence by setting a new first_offset in
06ad81ff
MD
764 * our cached blockmap and clearing the undo history.
765 *
766 * Even though we have updated our cached first_offset, the on-disk
767 * first_offset still governs available-undo-space calculations.
91ed3855
MD
768 *
769 * We synchronize to save_undo_next_offset rather than
770 * cundomap->next_offset because that is what we flushed out
771 * above.
772 *
773 * NOTE! UNDOs can only be added with the sync_lock held
774 * so we can clear the undo history without racing.
775 * REDOs can be added at any time which is why we
776 * have to be careful and use save_undo_next_offset
777 * when setting the new first_offset.
9f5097dc
MD
778 */
779 if (final) {
780 cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
91ed3855
MD
781 if (cundomap->first_offset != save_undo_next_offset) {
782 cundomap->first_offset = save_undo_next_offset;
1b0ab2c3 783 hmp->hflags |= HMNT_UNDO_DIRTY;
91ed3855
MD
784 } else if (cundomap->first_offset != cundomap->next_offset) {
785 hmp->hflags |= HMNT_UNDO_DIRTY;
786 } else {
787 hmp->hflags &= ~HMNT_UNDO_DIRTY;
1b0ab2c3 788 }
9f5097dc 789 hammer_clear_undo_history(hmp);
4889cbd4
MD
790
791 /*
792 * Flush tid sequencing. flush_tid1 is fully synchronized,
793 * meaning a crash will not roll it back. flush_tid2 has
794 * been written out asynchronously and a crash will roll
795 * it back. flush_tid1 is used for all mirroring masters.
796 */
797 if (hmp->flush_tid1 != hmp->flush_tid2) {
798 hmp->flush_tid1 = hmp->flush_tid2;
799 wakeup(&hmp->flush_tid1);
800 }
801 hmp->flush_tid2 = trans->tid;
47f363f1
MD
802
803 /*
804 * Clear the REDO SYNC flag. This flag is used to ensure
805 * that the recovery span in the UNDO/REDO FIFO contains
806 * at least one REDO SYNC record.
807 */
808 hmp->flags &= ~HAMMER_MOUNT_REDO_SYNC;
9f5097dc
MD
809 }
810
cdb6e4e6
MD
811 /*
812 * Cleanup. Report any critical errors.
813 */
814failed:
2f85fa4d 815 hammer_sync_unlock(trans);
6c1f89f4 816
cdb6e4e6
MD
817 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) {
818 kprintf("HAMMER(%s): Critical write error during flush, "
819 "refusing to sync UNDO FIFO\n",
820 root_volume->ondisk->vol_name);
821 }
822
6c1f89f4
MD
823done:
824 hammer_unlock(&hmp->flusher.finalize_lock);
4889cbd4 825
6c1f89f4
MD
826 if (--hmp->flusher.finalize_want == 0)
827 wakeup(&hmp->flusher.finalize_want);
ce0138a6 828 hammer_stats_commits += final;
059819e3
MD
829}
830
6048b411 831/*
9192654c 832 * Flush UNDOs.
6048b411
MD
833 */
834void
9192654c 835hammer_flusher_flush_undos(hammer_mount_t hmp, int mode)
6048b411
MD
836{
837 hammer_io_t io;
838 int count;
839
6048b411
MD
840 count = 0;
841 while ((io = TAILQ_FIRST(&hmp->undo_list)) != NULL) {
842 if (io->ioerror)
843 break;
6048b411
MD
844 hammer_ref(&io->lock);
845 KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
91ed3855 846 hammer_io_write_interlock(io);
6048b411 847 hammer_io_flush(io, hammer_undo_reclaim(io));
91ed3855 848 hammer_io_done_interlock(io);
6048b411
MD
849 hammer_rel_buffer((hammer_buffer_t)io, 0);
850 ++count;
851 }
852 hammer_flusher_clean_loose_ios(hmp);
9192654c
MD
853 if (mode == HAMMER_FLUSH_UNDOS_FORCED ||
854 (mode == HAMMER_FLUSH_UNDOS_AUTO && count)) {
eddadaee
MD
855 hammer_io_wait_all(hmp, "hmrfl1", 1);
856 } else {
857 hammer_io_wait_all(hmp, "hmrfl2", 0);
9192654c 858 }
6048b411
MD
859}
860
06ad81ff
MD
861/*
862 * Return non-zero if too many dirty meta-data buffers have built up.
863 *
864 * Since we cannot allow such buffers to flush until we have dealt with
865 * the UNDOs, we risk deadlocking the kernel's buffer cache.
866 */
867int
868hammer_flusher_meta_limit(hammer_mount_t hmp)
869{
f5a07a7a
MD
870 if (hmp->locked_dirty_space + hmp->io_running_space >
871 hammer_limit_dirtybufspace) {
06ad81ff
MD
872 return(1);
873 }
874 return(0);
875}
876
1b0ab2c3
MD
877/*
878 * Return non-zero if too many dirty meta-data buffers have built up.
879 *
880 * This version is used by background operations (mirror, prune, reblock)
881 * to leave room for foreground operations.
882 */
93291532
MD
883int
884hammer_flusher_meta_halflimit(hammer_mount_t hmp)
885{
886 if (hmp->locked_dirty_space + hmp->io_running_space >
887 hammer_limit_dirtybufspace / 2) {
888 return(1);
889 }
890 return(0);
891}
892
1b0ab2c3
MD
893/*
894 * Return non-zero if the flusher still has something to flush.
895 */
896int
897hammer_flusher_haswork(hammer_mount_t hmp)
898{
c58123da
MD
899 if (hmp->ronly)
900 return(0);
cdb6e4e6
MD
901 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
902 return(0);
1b0ab2c3 903 if (TAILQ_FIRST(&hmp->flush_group_list) || /* dirty inodes */
83ec399b 904 TAILQ_FIRST(&hmp->volu_list) || /* dirty buffers */
1b0ab2c3
MD
905 TAILQ_FIRST(&hmp->undo_list) ||
906 TAILQ_FIRST(&hmp->data_list) ||
907 TAILQ_FIRST(&hmp->meta_list) ||
908 (hmp->hflags & HMNT_UNDO_DIRTY) /* UNDO FIFO sync */
909 ) {
910 return(1);
911 }
912 return(0);
913}
914