HAMMER VFS - Improve saturated write performance (2).
[dragonfly.git] / sys / vfs / hammer / hammer_flusher.c
CommitLineData
059819e3
MD
1/*
2 * Copyright (c) 2008 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
4889cbd4 34 * $DragonFly: src/sys/vfs/hammer/hammer_flusher.c,v 1.45 2008/07/31 04:42:04 dillon Exp $
059819e3
MD
35 */
36/*
37 * HAMMER dependancy flusher thread
38 *
39 * Meta data updates create buffer dependancies which are arranged as a
40 * hierarchy of lists.
41 */
42
43#include "hammer.h"
44
da2da375
MD
45static void hammer_flusher_master_thread(void *arg);
46static void hammer_flusher_slave_thread(void *arg);
059819e3 47static void hammer_flusher_flush(hammer_mount_t hmp);
9f5097dc
MD
48static void hammer_flusher_flush_inode(hammer_inode_t ip,
49 hammer_transaction_t trans);
c9b9e29d 50
ff003b11
MD
51RB_GENERATE(hammer_fls_rb_tree, hammer_inode, rb_flsnode,
52 hammer_ino_rb_compare);
53
54/*
55 * Inodes are sorted and assigned to slave threads in groups of 128.
56 * We want a flush group size large enough such that the slave threads
57 * are not likely to interfere with each other when accessing the B-Tree,
58 * but not so large that we lose concurrency.
59 */
60#define HAMMER_FLUSH_GROUP_SIZE 128
61
af209b0f
MD
62/*
63 * Support structures for the flusher threads.
64 */
65struct hammer_flusher_info {
7a61b85d 66 TAILQ_ENTRY(hammer_flusher_info) entry;
af209b0f
MD
67 struct hammer_mount *hmp;
68 thread_t td;
7a61b85d
MD
69 int runstate;
70 int count;
71 hammer_flush_group_t flg;
cb51be26 72 hammer_inode_t work_array[HAMMER_FLUSH_GROUP_SIZE];
af209b0f
MD
73};
74
75typedef struct hammer_flusher_info *hammer_flusher_info_t;
059819e3 76
7bc5b8c2 77/*
7a61b85d
MD
78 * Sync all inodes pending on the flusher.
79 *
80 * All flush groups will be flushed. This does not queue dirty inodes
81 * to the flush groups, it just flushes out what has already been queued!
7bc5b8c2 82 */
059819e3
MD
83void
84hammer_flusher_sync(hammer_mount_t hmp)
85{
86 int seq;
87
7a61b85d 88 seq = hammer_flusher_async(hmp, NULL);
f437a2ab 89 hammer_flusher_wait(hmp, seq);
059819e3
MD
90}
91
7bc5b8c2 92/*
37646115
MD
93 * Sync all flush groups through to close_flg - return immediately.
94 * If close_flg is NULL all flush groups are synced.
7a61b85d 95 *
37646115
MD
96 * Returns the sequence number of the last closed flush group,
97 * which may be close_flg. When syncing to the end if there
98 * are no flush groups pending we still cycle the flusher, so
99 * we return the next seq number not yet allocated.
7bc5b8c2 100 */
93291532 101int
7a61b85d 102hammer_flusher_async(hammer_mount_t hmp, hammer_flush_group_t close_flg)
059819e3 103{
7a61b85d 104 hammer_flush_group_t flg;
37646115
MD
105 int seq;
106
107 /*
108 * Already closed
109 */
110 if (close_flg && close_flg->closed)
111 return(close_flg->seq);
7a61b85d 112
37646115
MD
113 /*
114 * Close flush groups until we hit the end of the list
115 * or close_flg.
116 */
117 while ((flg = hmp->next_flush_group) != NULL) {
118 KKASSERT(flg->closed == 0 && flg->running == 0);
7a61b85d 119 flg->closed = 1;
37646115 120 hmp->next_flush_group = TAILQ_NEXT(flg, flush_entry);
7a61b85d
MD
121 if (flg == close_flg)
122 break;
123 }
37646115 124
da2da375
MD
125 if (hmp->flusher.td) {
126 if (hmp->flusher.signal++ == 0)
127 wakeup(&hmp->flusher.signal);
37646115 128 seq = flg ? flg->seq : hmp->flusher.next;
93291532
MD
129 } else {
130 seq = hmp->flusher.done;
f90dde4c 131 }
93291532
MD
132 return(seq);
133}
134
37646115
MD
135/*
136 * Flush the current/next flushable flg. This function is typically called
137 * in a loop along with hammer_flusher_wait(hmp, returned_seq) to iterate
138 * flush groups until specific conditions are met.
139 *
140 * If a flush is currently in progress its seq is returned.
141 *
142 * If no flush is currently in progress the next available flush group
143 * will be flushed and its seq returned.
144 *
145 * If no flush groups are present a dummy seq will be allocated and
146 * returned and the flusher will be activated (e.g. to flush the
147 * undo/redo and the volume header).
148 */
15e75dab
MD
149int
150hammer_flusher_async_one(hammer_mount_t hmp)
151{
37646115 152 hammer_flush_group_t flg;
15e75dab
MD
153 int seq;
154
155 if (hmp->flusher.td) {
37646115
MD
156 flg = TAILQ_FIRST(&hmp->flush_group_list);
157 seq = hammer_flusher_async(hmp, flg);
15e75dab
MD
158 } else {
159 seq = hmp->flusher.done;
160 }
161 return(seq);
162}
163
f437a2ab
MD
164/*
165 * Wait for the flusher to get to the specified sequence number.
166 * Signal the flusher as often as necessary to keep it going.
167 */
93291532
MD
168void
169hammer_flusher_wait(hammer_mount_t hmp, int seq)
170{
cdb6e4e6 171 while ((int)(seq - hmp->flusher.done) > 0) {
37646115 172 if ((int)(seq - hmp->flusher.act) > 0) {
f437a2ab
MD
173 if (hmp->flusher.signal++ == 0)
174 wakeup(&hmp->flusher.signal);
175 }
93291532 176 tsleep(&hmp->flusher.done, 0, "hmrfls", 0);
cdb6e4e6 177 }
059819e3
MD
178}
179
180void
82010f9f
MD
181hammer_flusher_wait_next(hammer_mount_t hmp)
182{
183 int seq;
184
185 seq = hammer_flusher_async_one(hmp);
186 hammer_flusher_wait(hmp, seq);
187}
188
189void
059819e3
MD
190hammer_flusher_create(hammer_mount_t hmp)
191{
da2da375
MD
192 hammer_flusher_info_t info;
193 int i;
194
195 hmp->flusher.signal = 0;
196 hmp->flusher.act = 0;
197 hmp->flusher.done = 0;
198 hmp->flusher.next = 1;
da2da375 199 hammer_ref(&hmp->flusher.finalize_lock);
7a61b85d
MD
200 TAILQ_INIT(&hmp->flusher.run_list);
201 TAILQ_INIT(&hmp->flusher.ready_list);
da2da375
MD
202
203 lwkt_create(hammer_flusher_master_thread, hmp,
fdce8919 204 &hmp->flusher.td, NULL, 0, -1, "hammer-M");
da2da375 205 for (i = 0; i < HAMMER_MAX_FLUSHERS; ++i) {
bac808fe 206 info = kmalloc(sizeof(*info), hmp->m_misc, M_WAITOK|M_ZERO);
da2da375 207 info->hmp = hmp;
7a61b85d 208 TAILQ_INSERT_TAIL(&hmp->flusher.ready_list, info, entry);
da2da375 209 lwkt_create(hammer_flusher_slave_thread, info,
fdce8919 210 &info->td, NULL, 0, -1, "hammer-S%d", i);
da2da375 211 }
059819e3
MD
212}
213
214void
215hammer_flusher_destroy(hammer_mount_t hmp)
216{
da2da375 217 hammer_flusher_info_t info;
da2da375
MD
218
219 /*
220 * Kill the master
221 */
222 hmp->flusher.exiting = 1;
223 while (hmp->flusher.td) {
224 ++hmp->flusher.signal;
225 wakeup(&hmp->flusher.signal);
226 tsleep(&hmp->flusher.exiting, 0, "hmrwex", hz);
227 }
228
229 /*
230 * Kill the slaves
231 */
7a61b85d
MD
232 while ((info = TAILQ_FIRST(&hmp->flusher.ready_list)) != NULL) {
233 KKASSERT(info->runstate == 0);
234 TAILQ_REMOVE(&hmp->flusher.ready_list, info, entry);
235 info->runstate = -1;
236 wakeup(&info->runstate);
237 while (info->td)
238 tsleep(&info->td, 0, "hmrwwc", 0);
bac808fe 239 kfree(info, hmp->m_misc);
f90dde4c 240 }
059819e3
MD
241}
242
af209b0f
MD
243/*
244 * The master flusher thread manages the flusher sequence id and
245 * synchronization with the slave work threads.
246 */
059819e3 247static void
da2da375 248hammer_flusher_master_thread(void *arg)
059819e3 249{
7a61b85d
MD
250 hammer_flush_group_t flg;
251 hammer_mount_t hmp;
0729c8c8 252
7a61b85d 253 hmp = arg;
c32a6806 254
b0aab9b9
MD
255 lwkt_gettoken(&hmp->fs_token);
256
7a61b85d 257 for (;;) {
c32a6806 258 /*
37646115
MD
259 * Flush all closed flgs. If no flg's are closed we still
260 * do at least one flush cycle as we may have to update
261 * the UNDO FIFO even if no inodes are queued.
c32a6806 262 */
7a61b85d
MD
263 for (;;) {
264 while (hmp->flusher.group_lock)
265 tsleep(&hmp->flusher.group_lock, 0, "hmrhld", 0);
7a61b85d
MD
266 hammer_flusher_clean_loose_ios(hmp);
267 hammer_flusher_flush(hmp);
268 hmp->flusher.done = hmp->flusher.act;
269 wakeup(&hmp->flusher.done);
270 flg = TAILQ_FIRST(&hmp->flush_group_list);
271 if (flg == NULL || flg->closed == 0)
272 break;
cdb6e4e6
MD
273 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
274 break;
7a61b85d 275 }
1f07f686 276
c9b9e29d 277 /*
7a61b85d 278 * Wait for activity.
c9b9e29d 279 */
7a61b85d
MD
280 if (hmp->flusher.exiting && TAILQ_EMPTY(&hmp->flush_group_list))
281 break;
da2da375
MD
282 while (hmp->flusher.signal == 0)
283 tsleep(&hmp->flusher.signal, 0, "hmrwwa", 0);
4889cbd4
MD
284
285 /*
286 * Flush for each count on signal but only allow one extra
287 * flush request to build up.
288 */
289 if (--hmp->flusher.signal != 0)
290 hmp->flusher.signal = 1;
059819e3 291 }
da2da375
MD
292
293 /*
294 * And we are done.
295 */
296 hmp->flusher.td = NULL;
297 wakeup(&hmp->flusher.exiting);
b0aab9b9 298 lwkt_reltoken(&hmp->fs_token);
da2da375
MD
299 lwkt_exit();
300}
301
af209b0f 302/*
7a61b85d
MD
303 * Flush all inodes in the current flush group.
304 */
305static void
306hammer_flusher_flush(hammer_mount_t hmp)
307{
308 hammer_flusher_info_t info;
309 hammer_flush_group_t flg;
310 hammer_reserve_t resv;
311 hammer_inode_t ip;
312 hammer_inode_t next_ip;
313 int slave_index;
15e75dab 314 int count;
7a61b85d
MD
315
316 /*
317 * Just in-case there's a flush race on mount
318 */
37646115 319 if (TAILQ_FIRST(&hmp->flusher.ready_list) == NULL) {
7a61b85d 320 return;
37646115
MD
321 }
322
323 /*
324 * Set the actively flushing sequence number. If no flushable
325 * groups are present allocate a dummy sequence number for the
326 * operation.
327 */
328 flg = TAILQ_FIRST(&hmp->flush_group_list);
329 if (flg == NULL) {
330 hmp->flusher.act = hmp->flusher.next;
331 ++hmp->flusher.next;
332 } else if (flg->closed) {
333 KKASSERT(flg->running == 0);
334 flg->running = 1;
335 hmp->flusher.act = flg->seq;
336 if (hmp->fill_flush_group == flg)
337 hmp->fill_flush_group = TAILQ_NEXT(flg, flush_entry);
338 }
7a61b85d
MD
339
340 /*
341 * We only do one flg but we may have to loop/retry.
37646115
MD
342 *
343 * Due to various races it is possible to come across a flush
344 * group which as not yet been closed.
7a61b85d 345 */
15e75dab 346 count = 0;
37646115 347 while (flg && flg->running) {
15e75dab 348 ++count;
7a61b85d
MD
349 if (hammer_debug_general & 0x0001) {
350 kprintf("hammer_flush %d ttl=%d recs=%d\n",
351 hmp->flusher.act,
37646115
MD
352 flg->total_count,
353 flg->refs);
7a61b85d 354 }
cdb6e4e6
MD
355 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
356 break;
7a61b85d
MD
357 hammer_start_transaction_fls(&hmp->flusher.trans, hmp);
358
359 /*
360 * If the previous flush cycle just about exhausted our
361 * UNDO space we may have to do a dummy cycle to move the
362 * first_offset up before actually digging into a new cycle,
363 * or the new cycle will not have sufficient undo space.
364 */
365 if (hammer_flusher_undo_exhausted(&hmp->flusher.trans, 3))
366 hammer_flusher_finalize(&hmp->flusher.trans, 0);
367
37646115 368 KKASSERT(hmp->next_flush_group != flg);
7b6ccb11
MD
369
370 /*
ff003b11 371 * Iterate the inodes in the flg's flush_tree and assign
7a61b85d
MD
372 * them to slaves.
373 */
7a61b85d
MD
374 slave_index = 0;
375 info = TAILQ_FIRST(&hmp->flusher.ready_list);
ff003b11 376 next_ip = RB_FIRST(hammer_fls_rb_tree, &flg->flush_tree);
7a61b85d
MD
377
378 while ((ip = next_ip) != NULL) {
ff003b11
MD
379 next_ip = RB_NEXT(hammer_fls_rb_tree,
380 &flg->flush_tree, ip);
7a61b85d 381
3e583440
MD
382 if (++hmp->check_yield > hammer_yield_check) {
383 hmp->check_yield = 0;
f9235b6d 384 lwkt_yield();
3e583440
MD
385 }
386
7a61b85d
MD
387 /*
388 * Add ip to the slave's work array. The slave is
389 * not currently running.
390 */
391 info->work_array[info->count++] = ip;
392 if (info->count != HAMMER_FLUSH_GROUP_SIZE)
393 continue;
394
395 /*
396 * Get the slave running
397 */
398 TAILQ_REMOVE(&hmp->flusher.ready_list, info, entry);
399 TAILQ_INSERT_TAIL(&hmp->flusher.run_list, info, entry);
400 info->flg = flg;
401 info->runstate = 1;
402 wakeup(&info->runstate);
403
404 /*
405 * Get a new slave. We may have to wait for one to
406 * finish running.
407 */
408 while ((info = TAILQ_FIRST(&hmp->flusher.ready_list)) == NULL) {
409 tsleep(&hmp->flusher.ready_list, 0, "hmrfcc", 0);
410 }
411 }
412
413 /*
414 * Run the current slave if necessary
415 */
416 if (info->count) {
417 TAILQ_REMOVE(&hmp->flusher.ready_list, info, entry);
418 TAILQ_INSERT_TAIL(&hmp->flusher.run_list, info, entry);
419 info->flg = flg;
420 info->runstate = 1;
421 wakeup(&info->runstate);
422 }
423
424 /*
425 * Wait for all slaves to finish running
426 */
427 while (TAILQ_FIRST(&hmp->flusher.run_list) != NULL)
428 tsleep(&hmp->flusher.ready_list, 0, "hmrfcc", 0);
429
430 /*
431 * Do the final finalization, clean up
432 */
433 hammer_flusher_finalize(&hmp->flusher.trans, 1);
434 hmp->flusher.tid = hmp->flusher.trans.tid;
435
436 hammer_done_transaction(&hmp->flusher.trans);
437
438 /*
439 * Loop up on the same flg. If the flg is done clean it up
440 * and break out. We only flush one flg.
441 */
ff003b11 442 if (RB_EMPTY(&flg->flush_tree)) {
7a61b85d
MD
443 KKASSERT(flg->refs == 0);
444 TAILQ_REMOVE(&hmp->flush_group_list, flg, flush_entry);
bac808fe 445 kfree(flg, hmp->m_misc);
7a61b85d
MD
446 break;
447 }
37646115 448 KKASSERT(TAILQ_FIRST(&hmp->flush_group_list) == flg);
7a61b85d
MD
449 }
450
451 /*
1b0ab2c3
MD
452 * We may have pure meta-data to flush, or we may have to finish
453 * cycling the UNDO FIFO, even if there were no flush groups.
15e75dab 454 */
1b0ab2c3 455 if (count == 0 && hammer_flusher_haswork(hmp)) {
15e75dab
MD
456 hammer_start_transaction_fls(&hmp->flusher.trans, hmp);
457 hammer_flusher_finalize(&hmp->flusher.trans, 1);
458 hammer_done_transaction(&hmp->flusher.trans);
459 }
460
461 /*
7a61b85d
MD
462 * Clean up any freed big-blocks (typically zone-2).
463 * resv->flush_group is typically set several flush groups ahead
464 * of the free to ensure that the freed block is not reused until
465 * it can no longer be reused.
466 */
467 while ((resv = TAILQ_FIRST(&hmp->delay_list)) != NULL) {
468 if (resv->flush_group != hmp->flusher.act)
469 break;
470 hammer_reserve_clrdelay(hmp, resv);
471 }
472}
473
474
475/*
ff003b11 476 * The slave flusher thread pulls work off the master flush list until no
af209b0f
MD
477 * work is left.
478 */
da2da375
MD
479static void
480hammer_flusher_slave_thread(void *arg)
481{
7a61b85d 482 hammer_flush_group_t flg;
da2da375
MD
483 hammer_flusher_info_t info;
484 hammer_mount_t hmp;
485 hammer_inode_t ip;
cb51be26 486 int i;
da2da375
MD
487
488 info = arg;
489 hmp = info->hmp;
b0aab9b9 490 lwkt_gettoken(&hmp->fs_token);
da2da375
MD
491
492 for (;;) {
7a61b85d
MD
493 while (info->runstate == 0)
494 tsleep(&info->runstate, 0, "hmrssw", 0);
495 if (info->runstate < 0)
da2da375 496 break;
7a61b85d 497 flg = info->flg;
cb51be26 498
7a61b85d
MD
499 for (i = 0; i < info->count; ++i) {
500 ip = info->work_array[i];
501 hammer_flusher_flush_inode(ip, &hmp->flusher.trans);
ce0138a6 502 ++hammer_stats_inode_flushes;
cb51be26 503 }
7a61b85d
MD
504 info->count = 0;
505 info->runstate = 0;
506 TAILQ_REMOVE(&hmp->flusher.run_list, info, entry);
507 TAILQ_INSERT_TAIL(&hmp->flusher.ready_list, info, entry);
508 wakeup(&hmp->flusher.ready_list);
da2da375
MD
509 }
510 info->td = NULL;
511 wakeup(&info->td);
b0aab9b9 512 lwkt_reltoken(&hmp->fs_token);
059819e3
MD
513 lwkt_exit();
514}
515
525aad3a 516void
10a5d1ba
MD
517hammer_flusher_clean_loose_ios(hammer_mount_t hmp)
518{
519 hammer_buffer_t buffer;
520 hammer_io_t io;
521
522 /*
523 * loose ends - buffers without bp's aren't tracked by the kernel
524 * and can build up, so clean them out. This can occur when an
525 * IO completes on a buffer with no references left.
b0aab9b9
MD
526 *
527 * The io_token is needed to protect the list.
10a5d1ba 528 */
1afb73cf 529 if ((io = RB_ROOT(&hmp->lose_root)) != NULL) {
b0aab9b9 530 lwkt_gettoken(&hmp->io_token);
1afb73cf
MD
531 while ((io = RB_ROOT(&hmp->lose_root)) != NULL) {
532 KKASSERT(io->mod_root == &hmp->lose_root);
533 RB_REMOVE(hammer_mod_rb_tree, io->mod_root, io);
534 io->mod_root = NULL;
525aad3a
MD
535 hammer_ref(&io->lock);
536 buffer = (void *)io;
537 hammer_rel_buffer(buffer, 0);
538 }
b0aab9b9 539 lwkt_reltoken(&hmp->io_token);
10a5d1ba
MD
540 }
541}
542
059819e3 543/*
9f5097dc 544 * Flush a single inode that is part of a flush group.
06ad81ff 545 *
cdb6e4e6
MD
546 * Flusher errors are extremely serious, even ENOSPC shouldn't occur because
547 * the front-end should have reserved sufficient space on the media. Any
548 * error other then EWOULDBLOCK will force the mount to be read-only.
9f5097dc
MD
549 */
550static
551void
552hammer_flusher_flush_inode(hammer_inode_t ip, hammer_transaction_t trans)
553{
554 hammer_mount_t hmp = ip->hmp;
06ad81ff 555 int error;
9f5097dc 556
525aad3a 557 hammer_flusher_clean_loose_ios(hmp);
02325004 558 error = hammer_sync_inode(trans, ip);
cdb6e4e6
MD
559
560 /*
561 * EWOULDBLOCK can happen under normal operation, all other errors
562 * are considered extremely serious. We must set WOULDBLOCK
563 * mechanics to deal with the mess left over from the abort of the
564 * previous flush.
565 */
566 if (error) {
567 ip->flags |= HAMMER_INODE_WOULDBLOCK;
568 if (error == EWOULDBLOCK)
569 error = 0;
570 }
571 hammer_flush_inode_done(ip, error);
da2da375
MD
572 while (hmp->flusher.finalize_want)
573 tsleep(&hmp->flusher.finalize_want, 0, "hmrsxx", 0);
06ad81ff 574 if (hammer_flusher_undo_exhausted(trans, 1)) {
5a930e66 575 kprintf("HAMMER: Warning: UNDO area too small!\n");
9f5097dc 576 hammer_flusher_finalize(trans, 1);
06ad81ff 577 } else if (hammer_flusher_meta_limit(trans->hmp)) {
9f5097dc
MD
578 hammer_flusher_finalize(trans, 0);
579 }
580}
581
582/*
06ad81ff
MD
583 * Return non-zero if the UNDO area has less then (QUARTER / 4) of its
584 * space left.
585 *
586 * 1/4 - Emergency free undo space level. Below this point the flusher
587 * will finalize even if directory dependancies have not been resolved.
588 *
589 * 2/4 - Used by the pruning and reblocking code. These functions may be
590 * running in parallel with a flush and cannot be allowed to drop
591 * available undo space to emergency levels.
592 *
593 * 3/4 - Used at the beginning of a flush to force-sync the volume header
594 * to give the flush plenty of runway to work in.
ec4e8497 595 */
ec4e8497 596int
06ad81ff 597hammer_flusher_undo_exhausted(hammer_transaction_t trans, int quarter)
ec4e8497 598{
06ad81ff
MD
599 if (hammer_undo_space(trans) <
600 hammer_undo_max(trans->hmp) * quarter / 4) {
1f07f686 601 return(1);
ec4e8497 602 } else {
1f07f686 603 return(0);
ec4e8497 604 }
ec4e8497
MD
605}
606
607/*
9f5097dc
MD
608 * Flush all pending UNDOs, wait for write completion, update the volume
609 * header with the new UNDO end position, and flush it. Then
610 * asynchronously flush the meta-data.
10a5d1ba 611 *
9f5097dc
MD
612 * If this is the last finalization in a flush group we also synchronize
613 * our cached blockmap and set hmp->flusher_undo_start and our cached undo
614 * fifo first_offset so the next flush resets the FIFO pointers.
6c1f89f4
MD
615 *
616 * If this is not final it is being called because too many dirty meta-data
617 * buffers have built up and must be flushed with UNDO synchronization to
618 * avoid a buffer cache deadlock.
10a5d1ba 619 */
10a5d1ba 620void
9f5097dc 621hammer_flusher_finalize(hammer_transaction_t trans, int final)
059819e3 622{
9f5097dc
MD
623 hammer_volume_t root_volume;
624 hammer_blockmap_t cundomap, dundomap;
625 hammer_mount_t hmp;
10a5d1ba 626 hammer_io_t io;
91ed3855 627 hammer_off_t save_undo_next_offset;
c9b9e29d 628 int count;
19619882 629 int i;
059819e3 630
9f5097dc
MD
631 hmp = trans->hmp;
632 root_volume = trans->rootvol;
633
47637bff 634 /*
6c1f89f4
MD
635 * Exclusively lock the flusher. This guarantees that all dirty
636 * buffers will be idled (have a mod-count of 0).
637 */
638 ++hmp->flusher.finalize_want;
639 hammer_lock_ex(&hmp->flusher.finalize_lock);
640
641 /*
642 * If this isn't the final sync several threads may have hit the
643 * meta-limit at the same time and raced. Only sync if we really
644 * have to, after acquiring the lock.
645 */
646 if (final == 0 && !hammer_flusher_meta_limit(hmp))
647 goto done;
648
cdb6e4e6
MD
649 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
650 goto done;
651
6c1f89f4 652 /*
47637bff 653 * Flush data buffers. This can occur asynchronously and at any
9f5097dc
MD
654 * time. We must interlock against the frontend direct-data write
655 * but do not have to acquire the sync-lock yet.
9192654c
MD
656 *
657 * These data buffers have already been collected prior to the
658 * related inode(s) getting queued to the flush group.
47637bff
MD
659 */
660 count = 0;
1afb73cf 661 while ((io = RB_FIRST(hammer_mod_rb_tree, &hmp->data_root)) != NULL) {
cdb6e4e6
MD
662 if (io->ioerror)
663 break;
47637bff 664 hammer_ref(&io->lock);
9f5097dc 665 hammer_io_write_interlock(io);
47637bff 666 KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
710733a6 667 hammer_io_flush(io, 0);
9f5097dc 668 hammer_io_done_interlock(io);
47637bff 669 hammer_rel_buffer((hammer_buffer_t)io, 0);
ba298df1 670 hammer_io_limit_backlog(hmp);
daaabaa0 671 ++count;
47637bff 672 }
47637bff 673
9f5097dc
MD
674 /*
675 * The sync-lock is required for the remaining sequence. This lock
676 * prevents meta-data from being modified.
677 */
2f85fa4d 678 hammer_sync_lock_ex(trans);
9480ff55 679
059819e3 680 /*
9f5097dc
MD
681 * If we have been asked to finalize the volume header sync the
682 * cached blockmap to the on-disk blockmap. Generate an UNDO
683 * record for the update.
e8599db1 684 */
9f5097dc
MD
685 if (final) {
686 cundomap = &hmp->blockmap[0];
687 dundomap = &root_volume->ondisk->vol0_blockmap[0];
688 if (root_volume->io.modified) {
689 hammer_modify_volume(trans, root_volume,
690 dundomap, sizeof(hmp->blockmap));
691 for (i = 0; i < HAMMER_MAX_ZONES; ++i)
692 hammer_crc_set_blockmap(&cundomap[i]);
693 bcopy(cundomap, dundomap, sizeof(hmp->blockmap));
694 hammer_modify_volume_done(root_volume);
695 }
e8599db1
MD
696 }
697
698 /*
eddadaee
MD
699 * Flush UNDOs. This can occur concurrently with the data flush
700 * because data writes never overwrite.
701 *
702 * This also waits for I/Os to complete and flushes the cache on
703 * the target disk.
91ed3855
MD
704 *
705 * Record the UNDO append point as this can continue to change
706 * after we have flushed the UNDOs.
059819e3 707 */
91ed3855
MD
708 cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
709 hammer_lock_ex(&hmp->undo_lock);
710 save_undo_next_offset = cundomap->next_offset;
711 hammer_unlock(&hmp->undo_lock);
9192654c 712 hammer_flusher_flush_undos(hmp, HAMMER_FLUSH_UNDOS_FORCED);
059819e3 713
cdb6e4e6
MD
714 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
715 goto failed;
716
059819e3 717 /*
02428fb6
MD
718 * HAMMER VERSION < 4:
719 * Update the on-disk volume header with new UNDO FIFO end
720 * position (do not generate new UNDO records for this change).
721 * We have to do this for the UNDO FIFO whether (final) is
722 * set or not in order for the UNDOs to be recognized on
723 * recovery.
724 *
725 * HAMMER VERSION >= 4:
726 * The UNDO FIFO data written above will be recognized on
727 * recovery without us having to sync the volume header.
9f5097dc
MD
728 *
729 * Also update the on-disk next_tid field. This does not require
730 * an UNDO. However, because our TID is generated before we get
731 * the sync lock another sync may have beat us to the punch.
732 *
06ad81ff
MD
733 * This also has the side effect of updating first_offset based on
734 * a prior finalization when the first finalization of the next flush
735 * cycle occurs, removing any undo info from the prior finalization
736 * from consideration.
737 *
9f5097dc 738 * The volume header will be flushed out synchronously.
059819e3 739 */
9f5097dc
MD
740 dundomap = &root_volume->ondisk->vol0_blockmap[HAMMER_ZONE_UNDO_INDEX];
741 cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
09ac686b 742
9f5097dc 743 if (dundomap->first_offset != cundomap->first_offset ||
91ed3855 744 dundomap->next_offset != save_undo_next_offset) {
0729c8c8 745 hammer_modify_volume(NULL, root_volume, NULL, 0);
9f5097dc 746 dundomap->first_offset = cundomap->first_offset;
91ed3855 747 dundomap->next_offset = save_undo_next_offset;
9f5097dc 748 hammer_crc_set_blockmap(dundomap);
0729c8c8
MD
749 hammer_modify_volume_done(root_volume);
750 }
c9b9e29d 751
4889cbd4
MD
752 /*
753 * vol0_next_tid is used for TID selection and is updated without
754 * an UNDO so we do not reuse a TID that may have been rolled-back.
755 *
756 * vol0_last_tid is the highest fully-synchronized TID. It is
757 * set-up when the UNDO fifo is fully synced, later on (not here).
47f363f1
MD
758 *
759 * The root volume can be open for modification by other threads
760 * generating UNDO or REDO records. For example, reblocking,
761 * pruning, REDO mode fast-fsyncs, so the write interlock is
762 * mandatory.
4889cbd4 763 */
19619882 764 if (root_volume->io.modified) {
adf01747
MD
765 hammer_modify_volume(NULL, root_volume, NULL, 0);
766 if (root_volume->ondisk->vol0_next_tid < trans->tid)
767 root_volume->ondisk->vol0_next_tid = trans->tid;
768 hammer_crc_set_volume(root_volume->ondisk);
769 hammer_modify_volume_done(root_volume);
47f363f1 770 hammer_io_write_interlock(&root_volume->io);
710733a6 771 hammer_io_flush(&root_volume->io, 0);
47f363f1 772 hammer_io_done_interlock(&root_volume->io);
19619882 773 }
059819e3
MD
774
775 /*
02428fb6
MD
776 * Wait for I/Os to complete.
777 *
778 * For HAMMER VERSION 4+ filesystems we do not have to wait for
779 * the I/O to complete as the new UNDO FIFO entries are recognized
780 * even without the volume header update. This allows the volume
781 * header to flushed along with meta-data, significantly reducing
782 * flush overheads.
059819e3 783 */
a99b9ea2 784 hammer_flusher_clean_loose_ios(hmp);
02428fb6 785 if (hmp->version < HAMMER_VOL_VERSION_FOUR)
eddadaee 786 hammer_io_wait_all(hmp, "hmrfl3", 1);
059819e3 787
cdb6e4e6
MD
788 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
789 goto failed;
790
059819e3 791 /*
e8599db1 792 * Flush meta-data. The meta-data will be undone if we crash
02428fb6
MD
793 * so we can safely flush it asynchronously. There is no need
794 * to wait for I/O to complete (or issue a synchronous disk flush).
9f5097dc 795 *
02428fb6
MD
796 * In fact, even if we did wait the meta-data will still be undone
797 * by a crash up until the next flush cycle due to the first_offset
798 * in the volume header for the UNDO FIFO not being adjusted until
799 * the following flush cycle.
77912481
MD
800 *
801 * No io interlock is needed, bioops callbacks will not mess with
802 * meta data buffers.
059819e3 803 */
c9b9e29d 804 count = 0;
1afb73cf 805 while ((io = RB_FIRST(hammer_mod_rb_tree, &hmp->meta_root)) != NULL) {
cdb6e4e6
MD
806 if (io->ioerror)
807 break;
10a5d1ba
MD
808 KKASSERT(io->modify_refs == 0);
809 hammer_ref(&io->lock);
810 KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
710733a6 811 hammer_io_flush(io, 0);
09ac686b 812 hammer_rel_buffer((hammer_buffer_t)io, 0);
ba298df1 813 hammer_io_limit_backlog(hmp);
daaabaa0 814 ++count;
059819e3 815 }
9f5097dc
MD
816
817 /*
818 * If this is the final finalization for the flush group set
819 * up for the next sequence by setting a new first_offset in
06ad81ff
MD
820 * our cached blockmap and clearing the undo history.
821 *
822 * Even though we have updated our cached first_offset, the on-disk
823 * first_offset still governs available-undo-space calculations.
91ed3855
MD
824 *
825 * We synchronize to save_undo_next_offset rather than
826 * cundomap->next_offset because that is what we flushed out
827 * above.
828 *
829 * NOTE! UNDOs can only be added with the sync_lock held
830 * so we can clear the undo history without racing.
831 * REDOs can be added at any time which is why we
832 * have to be careful and use save_undo_next_offset
833 * when setting the new first_offset.
9f5097dc
MD
834 */
835 if (final) {
836 cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
91ed3855
MD
837 if (cundomap->first_offset != save_undo_next_offset) {
838 cundomap->first_offset = save_undo_next_offset;
1b0ab2c3 839 hmp->hflags |= HMNT_UNDO_DIRTY;
91ed3855
MD
840 } else if (cundomap->first_offset != cundomap->next_offset) {
841 hmp->hflags |= HMNT_UNDO_DIRTY;
842 } else {
843 hmp->hflags &= ~HMNT_UNDO_DIRTY;
1b0ab2c3 844 }
9f5097dc 845 hammer_clear_undo_history(hmp);
4889cbd4
MD
846
847 /*
848 * Flush tid sequencing. flush_tid1 is fully synchronized,
849 * meaning a crash will not roll it back. flush_tid2 has
850 * been written out asynchronously and a crash will roll
851 * it back. flush_tid1 is used for all mirroring masters.
852 */
853 if (hmp->flush_tid1 != hmp->flush_tid2) {
854 hmp->flush_tid1 = hmp->flush_tid2;
855 wakeup(&hmp->flush_tid1);
856 }
857 hmp->flush_tid2 = trans->tid;
47f363f1
MD
858
859 /*
860 * Clear the REDO SYNC flag. This flag is used to ensure
861 * that the recovery span in the UNDO/REDO FIFO contains
862 * at least one REDO SYNC record.
863 */
864 hmp->flags &= ~HAMMER_MOUNT_REDO_SYNC;
9f5097dc
MD
865 }
866
cdb6e4e6
MD
867 /*
868 * Cleanup. Report any critical errors.
869 */
870failed:
2f85fa4d 871 hammer_sync_unlock(trans);
6c1f89f4 872
cdb6e4e6
MD
873 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) {
874 kprintf("HAMMER(%s): Critical write error during flush, "
875 "refusing to sync UNDO FIFO\n",
876 root_volume->ondisk->vol_name);
877 }
878
6c1f89f4
MD
879done:
880 hammer_unlock(&hmp->flusher.finalize_lock);
4889cbd4 881
6c1f89f4
MD
882 if (--hmp->flusher.finalize_want == 0)
883 wakeup(&hmp->flusher.finalize_want);
ce0138a6 884 hammer_stats_commits += final;
059819e3
MD
885}
886
06ad81ff 887/*
9192654c 888 * Flush UNDOs.
6048b411
MD
889 */
890void
9192654c 891hammer_flusher_flush_undos(hammer_mount_t hmp, int mode)
6048b411
MD
892{
893 hammer_io_t io;
894 int count;
895
6048b411 896 count = 0;
1afb73cf 897 while ((io = RB_FIRST(hammer_mod_rb_tree, &hmp->undo_root)) != NULL) {
6048b411
MD
898 if (io->ioerror)
899 break;
6048b411
MD
900 hammer_ref(&io->lock);
901 KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
91ed3855 902 hammer_io_write_interlock(io);
6048b411 903 hammer_io_flush(io, hammer_undo_reclaim(io));
91ed3855 904 hammer_io_done_interlock(io);
6048b411 905 hammer_rel_buffer((hammer_buffer_t)io, 0);
daaabaa0 906 hammer_io_limit_backlog(hmp);
6048b411
MD
907 ++count;
908 }
909 hammer_flusher_clean_loose_ios(hmp);
9192654c
MD
910 if (mode == HAMMER_FLUSH_UNDOS_FORCED ||
911 (mode == HAMMER_FLUSH_UNDOS_AUTO && count)) {
eddadaee
MD
912 hammer_io_wait_all(hmp, "hmrfl1", 1);
913 } else {
914 hammer_io_wait_all(hmp, "hmrfl2", 0);
9192654c 915 }
6048b411
MD
916}
917
918/*
06ad81ff
MD
919 * Return non-zero if too many dirty meta-data buffers have built up.
920 *
921 * Since we cannot allow such buffers to flush until we have dealt with
922 * the UNDOs, we risk deadlocking the kernel's buffer cache.
923 */
924int
925hammer_flusher_meta_limit(hammer_mount_t hmp)
926{
f5a07a7a
MD
927 if (hmp->locked_dirty_space + hmp->io_running_space >
928 hammer_limit_dirtybufspace) {
06ad81ff
MD
929 return(1);
930 }
931 return(0);
932}
933
1b0ab2c3
MD
934/*
935 * Return non-zero if too many dirty meta-data buffers have built up.
936 *
937 * This version is used by background operations (mirror, prune, reblock)
938 * to leave room for foreground operations.
939 */
93291532
MD
940int
941hammer_flusher_meta_halflimit(hammer_mount_t hmp)
942{
943 if (hmp->locked_dirty_space + hmp->io_running_space >
944 hammer_limit_dirtybufspace / 2) {
945 return(1);
946 }
947 return(0);
948}
949
1b0ab2c3
MD
950/*
951 * Return non-zero if the flusher still has something to flush.
952 */
953int
954hammer_flusher_haswork(hammer_mount_t hmp)
955{
c58123da
MD
956 if (hmp->ronly)
957 return(0);
cdb6e4e6
MD
958 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
959 return(0);
1b0ab2c3 960 if (TAILQ_FIRST(&hmp->flush_group_list) || /* dirty inodes */
1afb73cf
MD
961 RB_ROOT(&hmp->volu_root) || /* dirty buffers */
962 RB_ROOT(&hmp->undo_root) ||
963 RB_ROOT(&hmp->data_root) ||
964 RB_ROOT(&hmp->meta_root) ||
1b0ab2c3
MD
965 (hmp->hflags & HMNT_UNDO_DIRTY) /* UNDO FIFO sync */
966 ) {
967 return(1);
968 }
969 return(0);
970}
971