HAMMER VFS - Fix degenerate stall condition in flusher during unmount
[dragonfly.git] / sys / vfs / hammer / hammer_flusher.c
CommitLineData
059819e3
MD
1/*
2 * Copyright (c) 2008 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
4889cbd4 34 * $DragonFly: src/sys/vfs/hammer/hammer_flusher.c,v 1.45 2008/07/31 04:42:04 dillon Exp $
059819e3
MD
35 */
36/*
37 * HAMMER dependancy flusher thread
38 *
39 * Meta data updates create buffer dependancies which are arranged as a
40 * hierarchy of lists.
41 */
42
43#include "hammer.h"
44
da2da375
MD
45static void hammer_flusher_master_thread(void *arg);
46static void hammer_flusher_slave_thread(void *arg);
e86903d8 47static int hammer_flusher_flush(hammer_mount_t hmp, int *nomorep);
9f5097dc
MD
48static void hammer_flusher_flush_inode(hammer_inode_t ip,
49 hammer_transaction_t trans);
c9b9e29d 50
ff003b11
MD
51RB_GENERATE(hammer_fls_rb_tree, hammer_inode, rb_flsnode,
52 hammer_ino_rb_compare);
53
54/*
55 * Inodes are sorted and assigned to slave threads in groups of 128.
56 * We want a flush group size large enough such that the slave threads
57 * are not likely to interfere with each other when accessing the B-Tree,
58 * but not so large that we lose concurrency.
59 */
60#define HAMMER_FLUSH_GROUP_SIZE 128
61
af209b0f
MD
62/*
63 * Support structures for the flusher threads.
64 */
65struct hammer_flusher_info {
7a61b85d 66 TAILQ_ENTRY(hammer_flusher_info) entry;
af209b0f
MD
67 struct hammer_mount *hmp;
68 thread_t td;
7a61b85d
MD
69 int runstate;
70 int count;
71 hammer_flush_group_t flg;
cb51be26 72 hammer_inode_t work_array[HAMMER_FLUSH_GROUP_SIZE];
af209b0f
MD
73};
74
75typedef struct hammer_flusher_info *hammer_flusher_info_t;
059819e3 76
7bc5b8c2 77/*
7a61b85d
MD
78 * Sync all inodes pending on the flusher.
79 *
80 * All flush groups will be flushed. This does not queue dirty inodes
81 * to the flush groups, it just flushes out what has already been queued!
7bc5b8c2 82 */
059819e3
MD
83void
84hammer_flusher_sync(hammer_mount_t hmp)
85{
86 int seq;
87
7a61b85d 88 seq = hammer_flusher_async(hmp, NULL);
f437a2ab 89 hammer_flusher_wait(hmp, seq);
059819e3
MD
90}
91
7bc5b8c2 92/*
37646115
MD
93 * Sync all flush groups through to close_flg - return immediately.
94 * If close_flg is NULL all flush groups are synced.
7a61b85d 95 *
37646115
MD
96 * Returns the sequence number of the last closed flush group,
97 * which may be close_flg. When syncing to the end if there
e86903d8
MD
98 * are no flush groups pending we still cycle the flusher, and
99 * must allocate a sequence number to placemark the spot even
100 * though no flush group will ever be associated with it.
7bc5b8c2 101 */
93291532 102int
7a61b85d 103hammer_flusher_async(hammer_mount_t hmp, hammer_flush_group_t close_flg)
059819e3 104{
7a61b85d 105 hammer_flush_group_t flg;
37646115
MD
106 int seq;
107
108 /*
109 * Already closed
110 */
111 if (close_flg && close_flg->closed)
112 return(close_flg->seq);
7a61b85d 113
37646115
MD
114 /*
115 * Close flush groups until we hit the end of the list
116 * or close_flg.
117 */
118 while ((flg = hmp->next_flush_group) != NULL) {
119 KKASSERT(flg->closed == 0 && flg->running == 0);
7a61b85d 120 flg->closed = 1;
37646115 121 hmp->next_flush_group = TAILQ_NEXT(flg, flush_entry);
7a61b85d
MD
122 if (flg == close_flg)
123 break;
124 }
37646115 125
da2da375
MD
126 if (hmp->flusher.td) {
127 if (hmp->flusher.signal++ == 0)
128 wakeup(&hmp->flusher.signal);
e86903d8
MD
129 if (flg) {
130 seq = flg->seq;
131 } else {
132 seq = hmp->flusher.next;
133 ++hmp->flusher.next;
134 }
93291532
MD
135 } else {
136 seq = hmp->flusher.done;
f90dde4c 137 }
93291532
MD
138 return(seq);
139}
140
37646115
MD
141/*
142 * Flush the current/next flushable flg. This function is typically called
143 * in a loop along with hammer_flusher_wait(hmp, returned_seq) to iterate
144 * flush groups until specific conditions are met.
145 *
146 * If a flush is currently in progress its seq is returned.
147 *
148 * If no flush is currently in progress the next available flush group
149 * will be flushed and its seq returned.
150 *
151 * If no flush groups are present a dummy seq will be allocated and
152 * returned and the flusher will be activated (e.g. to flush the
153 * undo/redo and the volume header).
154 */
15e75dab
MD
155int
156hammer_flusher_async_one(hammer_mount_t hmp)
157{
37646115 158 hammer_flush_group_t flg;
15e75dab
MD
159 int seq;
160
161 if (hmp->flusher.td) {
37646115
MD
162 flg = TAILQ_FIRST(&hmp->flush_group_list);
163 seq = hammer_flusher_async(hmp, flg);
15e75dab
MD
164 } else {
165 seq = hmp->flusher.done;
166 }
167 return(seq);
168}
169
f437a2ab 170/*
e86903d8
MD
171 * Wait for the flusher to finish flushing the specified sequence
172 * number. The flush is already running and will signal us on
173 * each completion.
f437a2ab 174 */
93291532
MD
175void
176hammer_flusher_wait(hammer_mount_t hmp, int seq)
177{
e86903d8 178 while ((int)(seq - hmp->flusher.done) > 0)
93291532 179 tsleep(&hmp->flusher.done, 0, "hmrfls", 0);
059819e3
MD
180}
181
182void
82010f9f
MD
183hammer_flusher_wait_next(hammer_mount_t hmp)
184{
185 int seq;
186
187 seq = hammer_flusher_async_one(hmp);
188 hammer_flusher_wait(hmp, seq);
189}
190
191void
059819e3
MD
192hammer_flusher_create(hammer_mount_t hmp)
193{
da2da375
MD
194 hammer_flusher_info_t info;
195 int i;
196
197 hmp->flusher.signal = 0;
da2da375
MD
198 hmp->flusher.done = 0;
199 hmp->flusher.next = 1;
da2da375 200 hammer_ref(&hmp->flusher.finalize_lock);
7a61b85d
MD
201 TAILQ_INIT(&hmp->flusher.run_list);
202 TAILQ_INIT(&hmp->flusher.ready_list);
da2da375
MD
203
204 lwkt_create(hammer_flusher_master_thread, hmp,
fdce8919 205 &hmp->flusher.td, NULL, 0, -1, "hammer-M");
da2da375 206 for (i = 0; i < HAMMER_MAX_FLUSHERS; ++i) {
bac808fe 207 info = kmalloc(sizeof(*info), hmp->m_misc, M_WAITOK|M_ZERO);
da2da375 208 info->hmp = hmp;
7a61b85d 209 TAILQ_INSERT_TAIL(&hmp->flusher.ready_list, info, entry);
da2da375 210 lwkt_create(hammer_flusher_slave_thread, info,
fdce8919 211 &info->td, NULL, 0, -1, "hammer-S%d", i);
da2da375 212 }
059819e3
MD
213}
214
215void
216hammer_flusher_destroy(hammer_mount_t hmp)
217{
da2da375 218 hammer_flusher_info_t info;
da2da375
MD
219
220 /*
221 * Kill the master
222 */
223 hmp->flusher.exiting = 1;
224 while (hmp->flusher.td) {
225 ++hmp->flusher.signal;
226 wakeup(&hmp->flusher.signal);
227 tsleep(&hmp->flusher.exiting, 0, "hmrwex", hz);
228 }
229
230 /*
231 * Kill the slaves
232 */
7a61b85d
MD
233 while ((info = TAILQ_FIRST(&hmp->flusher.ready_list)) != NULL) {
234 KKASSERT(info->runstate == 0);
235 TAILQ_REMOVE(&hmp->flusher.ready_list, info, entry);
236 info->runstate = -1;
237 wakeup(&info->runstate);
238 while (info->td)
239 tsleep(&info->td, 0, "hmrwwc", 0);
bac808fe 240 kfree(info, hmp->m_misc);
f90dde4c 241 }
059819e3
MD
242}
243
af209b0f
MD
244/*
245 * The master flusher thread manages the flusher sequence id and
246 * synchronization with the slave work threads.
247 */
059819e3 248static void
da2da375 249hammer_flusher_master_thread(void *arg)
059819e3 250{
7a61b85d 251 hammer_mount_t hmp;
e86903d8
MD
252 int seq;
253 int nomore;
0729c8c8 254
7a61b85d 255 hmp = arg;
c32a6806 256
b0aab9b9
MD
257 lwkt_gettoken(&hmp->fs_token);
258
7a61b85d 259 for (;;) {
c32a6806 260 /*
e86903d8
MD
261 * Flush all sequence numbers up to but not including .next,
262 * or until an open flush group is encountered.
c32a6806 263 */
7a61b85d
MD
264 for (;;) {
265 while (hmp->flusher.group_lock)
e86903d8 266 tsleep(&hmp->flusher.group_lock, 0, "hmrhld",0);
7a61b85d 267 hammer_flusher_clean_loose_ios(hmp);
e86903d8
MD
268
269 seq = hammer_flusher_flush(hmp, &nomore);
270 hmp->flusher.done = seq;
7a61b85d 271 wakeup(&hmp->flusher.done);
e86903d8 272
cdb6e4e6
MD
273 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
274 break;
e86903d8
MD
275 if (nomore)
276 break;
7a61b85d 277 }
1f07f686 278
c9b9e29d 279 /*
7a61b85d 280 * Wait for activity.
c9b9e29d 281 */
7a61b85d
MD
282 if (hmp->flusher.exiting && TAILQ_EMPTY(&hmp->flush_group_list))
283 break;
da2da375
MD
284 while (hmp->flusher.signal == 0)
285 tsleep(&hmp->flusher.signal, 0, "hmrwwa", 0);
e86903d8 286 hmp->flusher.signal = 0;
059819e3 287 }
da2da375
MD
288
289 /*
290 * And we are done.
291 */
292 hmp->flusher.td = NULL;
293 wakeup(&hmp->flusher.exiting);
b0aab9b9 294 lwkt_reltoken(&hmp->fs_token);
da2da375
MD
295 lwkt_exit();
296}
297
af209b0f 298/*
e86903d8
MD
299 * Flush the next sequence number until an open flush group is encountered
300 * or we reach (next). Not all sequence numbers will have flush groups
301 * associated with them. These require that the UNDO/REDO FIFO still be
302 * flushed since it can take at least one additional run to synchronize
303 * the FIFO, and more to also synchronize the reserve structures.
7a61b85d 304 */
e86903d8
MD
305static int
306hammer_flusher_flush(hammer_mount_t hmp, int *nomorep)
7a61b85d
MD
307{
308 hammer_flusher_info_t info;
309 hammer_flush_group_t flg;
310 hammer_reserve_t resv;
311 hammer_inode_t ip;
312 hammer_inode_t next_ip;
313 int slave_index;
15e75dab 314 int count;
e86903d8 315 int seq;
7a61b85d
MD
316
317 /*
e86903d8
MD
318 * Just in-case there's a flush race on mount. Seq number
319 * does not change.
7a61b85d 320 */
37646115 321 if (TAILQ_FIRST(&hmp->flusher.ready_list) == NULL) {
e86903d8
MD
322 *nomorep = 1;
323 return (hmp->flusher.done);
37646115 324 }
e86903d8 325 *nomorep = 0;
37646115
MD
326
327 /*
e86903d8
MD
328 * Flush the next sequence number. Sequence numbers can exist
329 * without an assigned flush group, indicating that just a FIFO flush
330 * should occur.
37646115 331 */
e86903d8 332 seq = hmp->flusher.done + 1;
37646115
MD
333 flg = TAILQ_FIRST(&hmp->flush_group_list);
334 if (flg == NULL) {
e86903d8
MD
335 if (seq == hmp->flusher.next) {
336 *nomorep = 1;
337 return (hmp->flusher.done);
338 }
339 } else if (seq == flg->seq) {
340 if (flg->closed) {
341 KKASSERT(flg->running == 0);
342 flg->running = 1;
343 if (hmp->fill_flush_group == flg) {
344 hmp->fill_flush_group =
345 TAILQ_NEXT(flg, flush_entry);
346 }
347 } else {
348 *nomorep = 1;
349 return (hmp->flusher.done);
350 }
351 } else {
352 KKASSERT((int)(flg->seq - seq) > 0);
353 flg = NULL;
37646115 354 }
7a61b85d
MD
355
356 /*
357 * We only do one flg but we may have to loop/retry.
37646115
MD
358 *
359 * Due to various races it is possible to come across a flush
360 * group which as not yet been closed.
7a61b85d 361 */
15e75dab 362 count = 0;
37646115 363 while (flg && flg->running) {
15e75dab 364 ++count;
7a61b85d
MD
365 if (hammer_debug_general & 0x0001) {
366 kprintf("hammer_flush %d ttl=%d recs=%d\n",
e86903d8 367 flg->seq, flg->total_count, flg->refs);
7a61b85d 368 }
cdb6e4e6
MD
369 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
370 break;
7a61b85d
MD
371 hammer_start_transaction_fls(&hmp->flusher.trans, hmp);
372
373 /*
374 * If the previous flush cycle just about exhausted our
375 * UNDO space we may have to do a dummy cycle to move the
376 * first_offset up before actually digging into a new cycle,
377 * or the new cycle will not have sufficient undo space.
378 */
379 if (hammer_flusher_undo_exhausted(&hmp->flusher.trans, 3))
380 hammer_flusher_finalize(&hmp->flusher.trans, 0);
381
37646115 382 KKASSERT(hmp->next_flush_group != flg);
7b6ccb11
MD
383
384 /*
ff003b11 385 * Iterate the inodes in the flg's flush_tree and assign
7a61b85d
MD
386 * them to slaves.
387 */
7a61b85d
MD
388 slave_index = 0;
389 info = TAILQ_FIRST(&hmp->flusher.ready_list);
ff003b11 390 next_ip = RB_FIRST(hammer_fls_rb_tree, &flg->flush_tree);
7a61b85d
MD
391
392 while ((ip = next_ip) != NULL) {
ff003b11
MD
393 next_ip = RB_NEXT(hammer_fls_rb_tree,
394 &flg->flush_tree, ip);
7a61b85d 395
3e583440
MD
396 if (++hmp->check_yield > hammer_yield_check) {
397 hmp->check_yield = 0;
f9235b6d 398 lwkt_yield();
3e583440
MD
399 }
400
7a61b85d
MD
401 /*
402 * Add ip to the slave's work array. The slave is
403 * not currently running.
404 */
405 info->work_array[info->count++] = ip;
406 if (info->count != HAMMER_FLUSH_GROUP_SIZE)
407 continue;
408
409 /*
410 * Get the slave running
411 */
412 TAILQ_REMOVE(&hmp->flusher.ready_list, info, entry);
413 TAILQ_INSERT_TAIL(&hmp->flusher.run_list, info, entry);
414 info->flg = flg;
415 info->runstate = 1;
416 wakeup(&info->runstate);
417
418 /*
419 * Get a new slave. We may have to wait for one to
420 * finish running.
421 */
422 while ((info = TAILQ_FIRST(&hmp->flusher.ready_list)) == NULL) {
423 tsleep(&hmp->flusher.ready_list, 0, "hmrfcc", 0);
424 }
425 }
426
427 /*
428 * Run the current slave if necessary
429 */
430 if (info->count) {
431 TAILQ_REMOVE(&hmp->flusher.ready_list, info, entry);
432 TAILQ_INSERT_TAIL(&hmp->flusher.run_list, info, entry);
433 info->flg = flg;
434 info->runstate = 1;
435 wakeup(&info->runstate);
436 }
437
438 /*
439 * Wait for all slaves to finish running
440 */
441 while (TAILQ_FIRST(&hmp->flusher.run_list) != NULL)
442 tsleep(&hmp->flusher.ready_list, 0, "hmrfcc", 0);
443
444 /*
445 * Do the final finalization, clean up
446 */
447 hammer_flusher_finalize(&hmp->flusher.trans, 1);
448 hmp->flusher.tid = hmp->flusher.trans.tid;
449
450 hammer_done_transaction(&hmp->flusher.trans);
451
452 /*
453 * Loop up on the same flg. If the flg is done clean it up
454 * and break out. We only flush one flg.
455 */
ff003b11 456 if (RB_EMPTY(&flg->flush_tree)) {
7a61b85d
MD
457 KKASSERT(flg->refs == 0);
458 TAILQ_REMOVE(&hmp->flush_group_list, flg, flush_entry);
bac808fe 459 kfree(flg, hmp->m_misc);
7a61b85d
MD
460 break;
461 }
37646115 462 KKASSERT(TAILQ_FIRST(&hmp->flush_group_list) == flg);
7a61b85d
MD
463 }
464
465 /*
1b0ab2c3
MD
466 * We may have pure meta-data to flush, or we may have to finish
467 * cycling the UNDO FIFO, even if there were no flush groups.
15e75dab 468 */
1b0ab2c3 469 if (count == 0 && hammer_flusher_haswork(hmp)) {
15e75dab
MD
470 hammer_start_transaction_fls(&hmp->flusher.trans, hmp);
471 hammer_flusher_finalize(&hmp->flusher.trans, 1);
472 hammer_done_transaction(&hmp->flusher.trans);
473 }
474
475 /*
7a61b85d
MD
476 * Clean up any freed big-blocks (typically zone-2).
477 * resv->flush_group is typically set several flush groups ahead
478 * of the free to ensure that the freed block is not reused until
479 * it can no longer be reused.
480 */
481 while ((resv = TAILQ_FIRST(&hmp->delay_list)) != NULL) {
e86903d8 482 if ((int)(resv->flush_group - seq) > 0)
7a61b85d
MD
483 break;
484 hammer_reserve_clrdelay(hmp, resv);
485 }
e86903d8 486 return (seq);
7a61b85d
MD
487}
488
489
490/*
ff003b11 491 * The slave flusher thread pulls work off the master flush list until no
af209b0f
MD
492 * work is left.
493 */
da2da375
MD
494static void
495hammer_flusher_slave_thread(void *arg)
496{
7a61b85d 497 hammer_flush_group_t flg;
da2da375
MD
498 hammer_flusher_info_t info;
499 hammer_mount_t hmp;
500 hammer_inode_t ip;
cb51be26 501 int i;
da2da375
MD
502
503 info = arg;
504 hmp = info->hmp;
b0aab9b9 505 lwkt_gettoken(&hmp->fs_token);
da2da375
MD
506
507 for (;;) {
7a61b85d
MD
508 while (info->runstate == 0)
509 tsleep(&info->runstate, 0, "hmrssw", 0);
510 if (info->runstate < 0)
da2da375 511 break;
7a61b85d 512 flg = info->flg;
cb51be26 513
7a61b85d
MD
514 for (i = 0; i < info->count; ++i) {
515 ip = info->work_array[i];
516 hammer_flusher_flush_inode(ip, &hmp->flusher.trans);
ce0138a6 517 ++hammer_stats_inode_flushes;
cb51be26 518 }
7a61b85d
MD
519 info->count = 0;
520 info->runstate = 0;
521 TAILQ_REMOVE(&hmp->flusher.run_list, info, entry);
522 TAILQ_INSERT_TAIL(&hmp->flusher.ready_list, info, entry);
523 wakeup(&hmp->flusher.ready_list);
da2da375
MD
524 }
525 info->td = NULL;
526 wakeup(&info->td);
b0aab9b9 527 lwkt_reltoken(&hmp->fs_token);
059819e3
MD
528 lwkt_exit();
529}
530
525aad3a 531void
10a5d1ba
MD
532hammer_flusher_clean_loose_ios(hammer_mount_t hmp)
533{
534 hammer_buffer_t buffer;
535 hammer_io_t io;
536
537 /*
538 * loose ends - buffers without bp's aren't tracked by the kernel
539 * and can build up, so clean them out. This can occur when an
540 * IO completes on a buffer with no references left.
b0aab9b9
MD
541 *
542 * The io_token is needed to protect the list.
10a5d1ba 543 */
1afb73cf 544 if ((io = RB_ROOT(&hmp->lose_root)) != NULL) {
b0aab9b9 545 lwkt_gettoken(&hmp->io_token);
1afb73cf
MD
546 while ((io = RB_ROOT(&hmp->lose_root)) != NULL) {
547 KKASSERT(io->mod_root == &hmp->lose_root);
548 RB_REMOVE(hammer_mod_rb_tree, io->mod_root, io);
549 io->mod_root = NULL;
525aad3a
MD
550 hammer_ref(&io->lock);
551 buffer = (void *)io;
552 hammer_rel_buffer(buffer, 0);
553 }
b0aab9b9 554 lwkt_reltoken(&hmp->io_token);
10a5d1ba
MD
555 }
556}
557
059819e3 558/*
9f5097dc 559 * Flush a single inode that is part of a flush group.
06ad81ff 560 *
cdb6e4e6
MD
561 * Flusher errors are extremely serious, even ENOSPC shouldn't occur because
562 * the front-end should have reserved sufficient space on the media. Any
563 * error other then EWOULDBLOCK will force the mount to be read-only.
9f5097dc
MD
564 */
565static
566void
567hammer_flusher_flush_inode(hammer_inode_t ip, hammer_transaction_t trans)
568{
569 hammer_mount_t hmp = ip->hmp;
06ad81ff 570 int error;
9f5097dc 571
525aad3a 572 hammer_flusher_clean_loose_ios(hmp);
02325004 573 error = hammer_sync_inode(trans, ip);
cdb6e4e6
MD
574
575 /*
576 * EWOULDBLOCK can happen under normal operation, all other errors
577 * are considered extremely serious. We must set WOULDBLOCK
578 * mechanics to deal with the mess left over from the abort of the
579 * previous flush.
580 */
581 if (error) {
582 ip->flags |= HAMMER_INODE_WOULDBLOCK;
583 if (error == EWOULDBLOCK)
584 error = 0;
585 }
586 hammer_flush_inode_done(ip, error);
da2da375
MD
587 while (hmp->flusher.finalize_want)
588 tsleep(&hmp->flusher.finalize_want, 0, "hmrsxx", 0);
06ad81ff 589 if (hammer_flusher_undo_exhausted(trans, 1)) {
5a930e66 590 kprintf("HAMMER: Warning: UNDO area too small!\n");
9f5097dc 591 hammer_flusher_finalize(trans, 1);
06ad81ff 592 } else if (hammer_flusher_meta_limit(trans->hmp)) {
9f5097dc
MD
593 hammer_flusher_finalize(trans, 0);
594 }
595}
596
597/*
06ad81ff
MD
598 * Return non-zero if the UNDO area has less then (QUARTER / 4) of its
599 * space left.
600 *
601 * 1/4 - Emergency free undo space level. Below this point the flusher
602 * will finalize even if directory dependancies have not been resolved.
603 *
604 * 2/4 - Used by the pruning and reblocking code. These functions may be
605 * running in parallel with a flush and cannot be allowed to drop
606 * available undo space to emergency levels.
607 *
608 * 3/4 - Used at the beginning of a flush to force-sync the volume header
609 * to give the flush plenty of runway to work in.
ec4e8497 610 */
ec4e8497 611int
06ad81ff 612hammer_flusher_undo_exhausted(hammer_transaction_t trans, int quarter)
ec4e8497 613{
06ad81ff
MD
614 if (hammer_undo_space(trans) <
615 hammer_undo_max(trans->hmp) * quarter / 4) {
1f07f686 616 return(1);
ec4e8497 617 } else {
1f07f686 618 return(0);
ec4e8497 619 }
ec4e8497
MD
620}
621
622/*
9f5097dc
MD
623 * Flush all pending UNDOs, wait for write completion, update the volume
624 * header with the new UNDO end position, and flush it. Then
625 * asynchronously flush the meta-data.
10a5d1ba 626 *
9f5097dc
MD
627 * If this is the last finalization in a flush group we also synchronize
628 * our cached blockmap and set hmp->flusher_undo_start and our cached undo
629 * fifo first_offset so the next flush resets the FIFO pointers.
6c1f89f4
MD
630 *
631 * If this is not final it is being called because too many dirty meta-data
632 * buffers have built up and must be flushed with UNDO synchronization to
633 * avoid a buffer cache deadlock.
10a5d1ba 634 */
10a5d1ba 635void
9f5097dc 636hammer_flusher_finalize(hammer_transaction_t trans, int final)
059819e3 637{
9f5097dc
MD
638 hammer_volume_t root_volume;
639 hammer_blockmap_t cundomap, dundomap;
640 hammer_mount_t hmp;
10a5d1ba 641 hammer_io_t io;
91ed3855 642 hammer_off_t save_undo_next_offset;
c9b9e29d 643 int count;
19619882 644 int i;
059819e3 645
9f5097dc
MD
646 hmp = trans->hmp;
647 root_volume = trans->rootvol;
648
47637bff 649 /*
6c1f89f4
MD
650 * Exclusively lock the flusher. This guarantees that all dirty
651 * buffers will be idled (have a mod-count of 0).
652 */
653 ++hmp->flusher.finalize_want;
654 hammer_lock_ex(&hmp->flusher.finalize_lock);
655
656 /*
657 * If this isn't the final sync several threads may have hit the
658 * meta-limit at the same time and raced. Only sync if we really
659 * have to, after acquiring the lock.
660 */
661 if (final == 0 && !hammer_flusher_meta_limit(hmp))
662 goto done;
663
cdb6e4e6
MD
664 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
665 goto done;
666
6c1f89f4 667 /*
47637bff 668 * Flush data buffers. This can occur asynchronously and at any
9f5097dc
MD
669 * time. We must interlock against the frontend direct-data write
670 * but do not have to acquire the sync-lock yet.
9192654c
MD
671 *
672 * These data buffers have already been collected prior to the
673 * related inode(s) getting queued to the flush group.
47637bff
MD
674 */
675 count = 0;
1afb73cf 676 while ((io = RB_FIRST(hammer_mod_rb_tree, &hmp->data_root)) != NULL) {
cdb6e4e6
MD
677 if (io->ioerror)
678 break;
47637bff 679 hammer_ref(&io->lock);
9f5097dc 680 hammer_io_write_interlock(io);
47637bff 681 KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
710733a6 682 hammer_io_flush(io, 0);
9f5097dc 683 hammer_io_done_interlock(io);
47637bff 684 hammer_rel_buffer((hammer_buffer_t)io, 0);
ba298df1 685 hammer_io_limit_backlog(hmp);
daaabaa0 686 ++count;
47637bff 687 }
47637bff 688
9f5097dc
MD
689 /*
690 * The sync-lock is required for the remaining sequence. This lock
691 * prevents meta-data from being modified.
692 */
2f85fa4d 693 hammer_sync_lock_ex(trans);
9480ff55 694
059819e3 695 /*
9f5097dc
MD
696 * If we have been asked to finalize the volume header sync the
697 * cached blockmap to the on-disk blockmap. Generate an UNDO
698 * record for the update.
e8599db1 699 */
9f5097dc
MD
700 if (final) {
701 cundomap = &hmp->blockmap[0];
702 dundomap = &root_volume->ondisk->vol0_blockmap[0];
703 if (root_volume->io.modified) {
704 hammer_modify_volume(trans, root_volume,
705 dundomap, sizeof(hmp->blockmap));
706 for (i = 0; i < HAMMER_MAX_ZONES; ++i)
707 hammer_crc_set_blockmap(&cundomap[i]);
708 bcopy(cundomap, dundomap, sizeof(hmp->blockmap));
709 hammer_modify_volume_done(root_volume);
710 }
e8599db1
MD
711 }
712
713 /*
eddadaee
MD
714 * Flush UNDOs. This can occur concurrently with the data flush
715 * because data writes never overwrite.
716 *
717 * This also waits for I/Os to complete and flushes the cache on
718 * the target disk.
91ed3855
MD
719 *
720 * Record the UNDO append point as this can continue to change
721 * after we have flushed the UNDOs.
059819e3 722 */
91ed3855
MD
723 cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
724 hammer_lock_ex(&hmp->undo_lock);
725 save_undo_next_offset = cundomap->next_offset;
726 hammer_unlock(&hmp->undo_lock);
9192654c 727 hammer_flusher_flush_undos(hmp, HAMMER_FLUSH_UNDOS_FORCED);
059819e3 728
cdb6e4e6
MD
729 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
730 goto failed;
731
059819e3 732 /*
02428fb6
MD
733 * HAMMER VERSION < 4:
734 * Update the on-disk volume header with new UNDO FIFO end
735 * position (do not generate new UNDO records for this change).
736 * We have to do this for the UNDO FIFO whether (final) is
737 * set or not in order for the UNDOs to be recognized on
738 * recovery.
739 *
740 * HAMMER VERSION >= 4:
741 * The UNDO FIFO data written above will be recognized on
742 * recovery without us having to sync the volume header.
9f5097dc
MD
743 *
744 * Also update the on-disk next_tid field. This does not require
745 * an UNDO. However, because our TID is generated before we get
746 * the sync lock another sync may have beat us to the punch.
747 *
06ad81ff
MD
748 * This also has the side effect of updating first_offset based on
749 * a prior finalization when the first finalization of the next flush
750 * cycle occurs, removing any undo info from the prior finalization
751 * from consideration.
752 *
9f5097dc 753 * The volume header will be flushed out synchronously.
059819e3 754 */
9f5097dc
MD
755 dundomap = &root_volume->ondisk->vol0_blockmap[HAMMER_ZONE_UNDO_INDEX];
756 cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
09ac686b 757
9f5097dc 758 if (dundomap->first_offset != cundomap->first_offset ||
91ed3855 759 dundomap->next_offset != save_undo_next_offset) {
0729c8c8 760 hammer_modify_volume(NULL, root_volume, NULL, 0);
9f5097dc 761 dundomap->first_offset = cundomap->first_offset;
91ed3855 762 dundomap->next_offset = save_undo_next_offset;
9f5097dc 763 hammer_crc_set_blockmap(dundomap);
0729c8c8
MD
764 hammer_modify_volume_done(root_volume);
765 }
c9b9e29d 766
4889cbd4
MD
767 /*
768 * vol0_next_tid is used for TID selection and is updated without
769 * an UNDO so we do not reuse a TID that may have been rolled-back.
770 *
771 * vol0_last_tid is the highest fully-synchronized TID. It is
772 * set-up when the UNDO fifo is fully synced, later on (not here).
47f363f1
MD
773 *
774 * The root volume can be open for modification by other threads
775 * generating UNDO or REDO records. For example, reblocking,
776 * pruning, REDO mode fast-fsyncs, so the write interlock is
777 * mandatory.
4889cbd4 778 */
19619882 779 if (root_volume->io.modified) {
adf01747
MD
780 hammer_modify_volume(NULL, root_volume, NULL, 0);
781 if (root_volume->ondisk->vol0_next_tid < trans->tid)
782 root_volume->ondisk->vol0_next_tid = trans->tid;
783 hammer_crc_set_volume(root_volume->ondisk);
784 hammer_modify_volume_done(root_volume);
47f363f1 785 hammer_io_write_interlock(&root_volume->io);
710733a6 786 hammer_io_flush(&root_volume->io, 0);
47f363f1 787 hammer_io_done_interlock(&root_volume->io);
19619882 788 }
059819e3
MD
789
790 /*
02428fb6
MD
791 * Wait for I/Os to complete.
792 *
793 * For HAMMER VERSION 4+ filesystems we do not have to wait for
794 * the I/O to complete as the new UNDO FIFO entries are recognized
795 * even without the volume header update. This allows the volume
796 * header to flushed along with meta-data, significantly reducing
797 * flush overheads.
059819e3 798 */
a99b9ea2 799 hammer_flusher_clean_loose_ios(hmp);
02428fb6 800 if (hmp->version < HAMMER_VOL_VERSION_FOUR)
eddadaee 801 hammer_io_wait_all(hmp, "hmrfl3", 1);
059819e3 802
cdb6e4e6
MD
803 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
804 goto failed;
805
059819e3 806 /*
e8599db1 807 * Flush meta-data. The meta-data will be undone if we crash
02428fb6
MD
808 * so we can safely flush it asynchronously. There is no need
809 * to wait for I/O to complete (or issue a synchronous disk flush).
9f5097dc 810 *
02428fb6
MD
811 * In fact, even if we did wait the meta-data will still be undone
812 * by a crash up until the next flush cycle due to the first_offset
813 * in the volume header for the UNDO FIFO not being adjusted until
814 * the following flush cycle.
77912481
MD
815 *
816 * No io interlock is needed, bioops callbacks will not mess with
817 * meta data buffers.
059819e3 818 */
c9b9e29d 819 count = 0;
1afb73cf 820 while ((io = RB_FIRST(hammer_mod_rb_tree, &hmp->meta_root)) != NULL) {
cdb6e4e6
MD
821 if (io->ioerror)
822 break;
10a5d1ba
MD
823 KKASSERT(io->modify_refs == 0);
824 hammer_ref(&io->lock);
825 KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
710733a6 826 hammer_io_flush(io, 0);
09ac686b 827 hammer_rel_buffer((hammer_buffer_t)io, 0);
ba298df1 828 hammer_io_limit_backlog(hmp);
daaabaa0 829 ++count;
059819e3 830 }
9f5097dc
MD
831
832 /*
833 * If this is the final finalization for the flush group set
834 * up for the next sequence by setting a new first_offset in
06ad81ff
MD
835 * our cached blockmap and clearing the undo history.
836 *
837 * Even though we have updated our cached first_offset, the on-disk
838 * first_offset still governs available-undo-space calculations.
91ed3855
MD
839 *
840 * We synchronize to save_undo_next_offset rather than
841 * cundomap->next_offset because that is what we flushed out
842 * above.
843 *
844 * NOTE! UNDOs can only be added with the sync_lock held
845 * so we can clear the undo history without racing.
846 * REDOs can be added at any time which is why we
847 * have to be careful and use save_undo_next_offset
848 * when setting the new first_offset.
9f5097dc
MD
849 */
850 if (final) {
851 cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
91ed3855
MD
852 if (cundomap->first_offset != save_undo_next_offset) {
853 cundomap->first_offset = save_undo_next_offset;
1b0ab2c3 854 hmp->hflags |= HMNT_UNDO_DIRTY;
91ed3855
MD
855 } else if (cundomap->first_offset != cundomap->next_offset) {
856 hmp->hflags |= HMNT_UNDO_DIRTY;
857 } else {
858 hmp->hflags &= ~HMNT_UNDO_DIRTY;
1b0ab2c3 859 }
9f5097dc 860 hammer_clear_undo_history(hmp);
4889cbd4
MD
861
862 /*
863 * Flush tid sequencing. flush_tid1 is fully synchronized,
864 * meaning a crash will not roll it back. flush_tid2 has
865 * been written out asynchronously and a crash will roll
866 * it back. flush_tid1 is used for all mirroring masters.
867 */
868 if (hmp->flush_tid1 != hmp->flush_tid2) {
869 hmp->flush_tid1 = hmp->flush_tid2;
870 wakeup(&hmp->flush_tid1);
871 }
872 hmp->flush_tid2 = trans->tid;
47f363f1
MD
873
874 /*
875 * Clear the REDO SYNC flag. This flag is used to ensure
876 * that the recovery span in the UNDO/REDO FIFO contains
877 * at least one REDO SYNC record.
878 */
879 hmp->flags &= ~HAMMER_MOUNT_REDO_SYNC;
9f5097dc
MD
880 }
881
cdb6e4e6
MD
882 /*
883 * Cleanup. Report any critical errors.
884 */
885failed:
2f85fa4d 886 hammer_sync_unlock(trans);
6c1f89f4 887
cdb6e4e6
MD
888 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) {
889 kprintf("HAMMER(%s): Critical write error during flush, "
890 "refusing to sync UNDO FIFO\n",
891 root_volume->ondisk->vol_name);
892 }
893
6c1f89f4
MD
894done:
895 hammer_unlock(&hmp->flusher.finalize_lock);
4889cbd4 896
6c1f89f4
MD
897 if (--hmp->flusher.finalize_want == 0)
898 wakeup(&hmp->flusher.finalize_want);
ce0138a6 899 hammer_stats_commits += final;
059819e3
MD
900}
901
06ad81ff 902/*
9192654c 903 * Flush UNDOs.
6048b411
MD
904 */
905void
9192654c 906hammer_flusher_flush_undos(hammer_mount_t hmp, int mode)
6048b411
MD
907{
908 hammer_io_t io;
909 int count;
910
6048b411 911 count = 0;
1afb73cf 912 while ((io = RB_FIRST(hammer_mod_rb_tree, &hmp->undo_root)) != NULL) {
6048b411
MD
913 if (io->ioerror)
914 break;
6048b411
MD
915 hammer_ref(&io->lock);
916 KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
91ed3855 917 hammer_io_write_interlock(io);
6048b411 918 hammer_io_flush(io, hammer_undo_reclaim(io));
91ed3855 919 hammer_io_done_interlock(io);
6048b411 920 hammer_rel_buffer((hammer_buffer_t)io, 0);
daaabaa0 921 hammer_io_limit_backlog(hmp);
6048b411
MD
922 ++count;
923 }
924 hammer_flusher_clean_loose_ios(hmp);
9192654c
MD
925 if (mode == HAMMER_FLUSH_UNDOS_FORCED ||
926 (mode == HAMMER_FLUSH_UNDOS_AUTO && count)) {
eddadaee
MD
927 hammer_io_wait_all(hmp, "hmrfl1", 1);
928 } else {
929 hammer_io_wait_all(hmp, "hmrfl2", 0);
9192654c 930 }
6048b411
MD
931}
932
933/*
06ad81ff
MD
934 * Return non-zero if too many dirty meta-data buffers have built up.
935 *
936 * Since we cannot allow such buffers to flush until we have dealt with
937 * the UNDOs, we risk deadlocking the kernel's buffer cache.
938 */
939int
940hammer_flusher_meta_limit(hammer_mount_t hmp)
941{
f5a07a7a
MD
942 if (hmp->locked_dirty_space + hmp->io_running_space >
943 hammer_limit_dirtybufspace) {
06ad81ff
MD
944 return(1);
945 }
946 return(0);
947}
948
1b0ab2c3
MD
949/*
950 * Return non-zero if too many dirty meta-data buffers have built up.
951 *
952 * This version is used by background operations (mirror, prune, reblock)
953 * to leave room for foreground operations.
954 */
93291532
MD
955int
956hammer_flusher_meta_halflimit(hammer_mount_t hmp)
957{
958 if (hmp->locked_dirty_space + hmp->io_running_space >
959 hammer_limit_dirtybufspace / 2) {
960 return(1);
961 }
962 return(0);
963}
964
1b0ab2c3
MD
965/*
966 * Return non-zero if the flusher still has something to flush.
967 */
968int
969hammer_flusher_haswork(hammer_mount_t hmp)
970{
c58123da
MD
971 if (hmp->ronly)
972 return(0);
cdb6e4e6
MD
973 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
974 return(0);
1b0ab2c3 975 if (TAILQ_FIRST(&hmp->flush_group_list) || /* dirty inodes */
1afb73cf
MD
976 RB_ROOT(&hmp->volu_root) || /* dirty buffers */
977 RB_ROOT(&hmp->undo_root) ||
978 RB_ROOT(&hmp->data_root) ||
979 RB_ROOT(&hmp->meta_root) ||
1b0ab2c3
MD
980 (hmp->hflags & HMNT_UNDO_DIRTY) /* UNDO FIFO sync */
981 ) {
982 return(1);
983 }
984 return(0);
985}
986