Merge from vendor branch NETGRAPH:
[dragonfly.git] / sys / vfs / hammer / hammer_flusher.c
CommitLineData
059819e3
MD
1/*
2 * Copyright (c) 2008 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
5a930e66 34 * $DragonFly: src/sys/vfs/hammer/hammer_flusher.c,v 1.29 2008/06/23 07:31:14 dillon Exp $
059819e3
MD
35 */
36/*
37 * HAMMER dependancy flusher thread
38 *
39 * Meta data updates create buffer dependancies which are arranged as a
40 * hierarchy of lists.
41 */
42
43#include "hammer.h"
44
da2da375
MD
45static void hammer_flusher_master_thread(void *arg);
46static void hammer_flusher_slave_thread(void *arg);
10a5d1ba 47static void hammer_flusher_clean_loose_ios(hammer_mount_t hmp);
059819e3 48static void hammer_flusher_flush(hammer_mount_t hmp);
9f5097dc
MD
49static void hammer_flusher_flush_inode(hammer_inode_t ip,
50 hammer_transaction_t trans);
0729c8c8 51static int hammer_must_finalize_undo(hammer_mount_t hmp);
9f5097dc 52static void hammer_flusher_finalize(hammer_transaction_t trans, int final);
c9b9e29d 53
af209b0f
MD
54/*
55 * Support structures for the flusher threads.
56 */
57struct hammer_flusher_info {
58 struct hammer_mount *hmp;
59 thread_t td;
60 int startit;
cb51be26 61 hammer_inode_t work_array[HAMMER_FLUSH_GROUP_SIZE];
af209b0f
MD
62};
63
64typedef struct hammer_flusher_info *hammer_flusher_info_t;
059819e3 65
7bc5b8c2
MD
66/*
67 * Sync all inodes pending on the flusher. This routine may have to be
68 * called twice to get them all as some may be queued to a later flush group.
69 */
059819e3
MD
70void
71hammer_flusher_sync(hammer_mount_t hmp)
72{
73 int seq;
74
da2da375
MD
75 if (hmp->flusher.td) {
76 seq = hmp->flusher.next;
77 if (hmp->flusher.signal++ == 0)
78 wakeup(&hmp->flusher.signal);
79 while ((int)(seq - hmp->flusher.done) > 0)
80 tsleep(&hmp->flusher.done, 0, "hmrfls", 0);
f90dde4c 81 }
059819e3
MD
82}
83
7bc5b8c2
MD
84/*
85 * Sync all inodes pending on the flusher - return immediately.
86 */
059819e3
MD
87void
88hammer_flusher_async(hammer_mount_t hmp)
89{
da2da375
MD
90 if (hmp->flusher.td) {
91 if (hmp->flusher.signal++ == 0)
92 wakeup(&hmp->flusher.signal);
f90dde4c 93 }
059819e3
MD
94}
95
96void
97hammer_flusher_create(hammer_mount_t hmp)
98{
da2da375
MD
99 hammer_flusher_info_t info;
100 int i;
101
102 hmp->flusher.signal = 0;
103 hmp->flusher.act = 0;
104 hmp->flusher.done = 0;
105 hmp->flusher.next = 1;
106 hmp->flusher.count = 0;
107 hammer_ref(&hmp->flusher.finalize_lock);
108
109 lwkt_create(hammer_flusher_master_thread, hmp,
110 &hmp->flusher.td, NULL, 0, -1, "hammer-M");
111 for (i = 0; i < HAMMER_MAX_FLUSHERS; ++i) {
112 info = kmalloc(sizeof(*info), M_HAMMER, M_WAITOK|M_ZERO);
113 info->hmp = hmp;
da2da375
MD
114 ++hmp->flusher.count;
115 hmp->flusher.info[i] = info;
116 lwkt_create(hammer_flusher_slave_thread, info,
117 &info->td, NULL, 0, -1, "hammer-S%d", i);
118 }
059819e3
MD
119}
120
121void
122hammer_flusher_destroy(hammer_mount_t hmp)
123{
da2da375
MD
124 hammer_flusher_info_t info;
125 int i;
126
127 /*
128 * Kill the master
129 */
130 hmp->flusher.exiting = 1;
131 while (hmp->flusher.td) {
132 ++hmp->flusher.signal;
133 wakeup(&hmp->flusher.signal);
134 tsleep(&hmp->flusher.exiting, 0, "hmrwex", hz);
135 }
136
137 /*
138 * Kill the slaves
139 */
140 for (i = 0; i < HAMMER_MAX_FLUSHERS; ++i) {
141 if ((info = hmp->flusher.info[i]) != NULL) {
af209b0f
MD
142 KKASSERT(info->startit == 0);
143 info->startit = -1;
144 wakeup(&info->startit);
da2da375
MD
145 while (info->td) {
146 tsleep(&info->td, 0, "hmrwwc", 0);
147 }
148 hmp->flusher.info[i] = NULL;
149 kfree(info, M_HAMMER);
150 --hmp->flusher.count;
1f07f686 151 }
f90dde4c 152 }
da2da375 153 KKASSERT(hmp->flusher.count == 0);
059819e3
MD
154}
155
af209b0f
MD
156/*
157 * The master flusher thread manages the flusher sequence id and
158 * synchronization with the slave work threads.
159 */
059819e3 160static void
da2da375 161hammer_flusher_master_thread(void *arg)
059819e3
MD
162{
163 hammer_mount_t hmp = arg;
0729c8c8 164
059819e3 165 for (;;) {
da2da375
MD
166 while (hmp->flusher.group_lock)
167 tsleep(&hmp->flusher.group_lock, 0, "hmrhld", 0);
da2da375
MD
168 hmp->flusher.act = hmp->flusher.next;
169 ++hmp->flusher.next;
10a5d1ba
MD
170 hammer_flusher_clean_loose_ios(hmp);
171 hammer_flusher_flush(hmp);
da2da375
MD
172 hmp->flusher.done = hmp->flusher.act;
173 wakeup(&hmp->flusher.done);
c32a6806
MD
174
175 /*
1f07f686 176 * Wait for activity.
c32a6806 177 */
da2da375 178 if (hmp->flusher.exiting && TAILQ_EMPTY(&hmp->flush_list))
059819e3 179 break;
1f07f686 180
c9b9e29d
MD
181 /*
182 * This is a hack until we can dispose of frontend buffer
183 * cache buffers on the frontend.
184 */
da2da375
MD
185 while (hmp->flusher.signal == 0)
186 tsleep(&hmp->flusher.signal, 0, "hmrwwa", 0);
187 hmp->flusher.signal = 0;
059819e3 188 }
da2da375
MD
189
190 /*
191 * And we are done.
192 */
193 hmp->flusher.td = NULL;
194 wakeup(&hmp->flusher.exiting);
195 lwkt_exit();
196}
197
af209b0f
MD
198/*
199 * The slave flusher thread pulls work off the master flush_list until no
200 * work is left.
201 */
da2da375
MD
202static void
203hammer_flusher_slave_thread(void *arg)
204{
205 hammer_flusher_info_t info;
206 hammer_mount_t hmp;
207 hammer_inode_t ip;
cb51be26
MD
208 int c;
209 int i;
210 int n;
da2da375
MD
211
212 info = arg;
213 hmp = info->hmp;
214
215 for (;;) {
af209b0f
MD
216 while (info->startit == 0)
217 tsleep(&info->startit, 0, "hmrssw", 0);
218 if (info->startit < 0)
da2da375 219 break;
af209b0f 220 info->startit = 0;
cb51be26
MD
221
222 /*
223 * Try to pull out around ~64 inodes at a time to flush.
224 * The idea is to try to avoid deadlocks between the slaves.
225 */
226 n = c = 0;
af209b0f
MD
227 while ((ip = TAILQ_FIRST(&hmp->flush_list)) != NULL) {
228 if (ip->flush_group != hmp->flusher.act)
229 break;
230 TAILQ_REMOVE(&hmp->flush_list, ip, flush_entry);
cb51be26
MD
231 info->work_array[n++] = ip;
232 c += ip->rsv_recs;
233 if (n < HAMMER_FLUSH_GROUP_SIZE &&
234 c < HAMMER_FLUSH_GROUP_SIZE * 8) {
235 continue;
236 }
237 for (i = 0; i < n; ++i){
238 hammer_flusher_flush_inode(info->work_array[i],
239 &hmp->flusher.trans);
240 }
241 n = c = 0;
242 }
243 for (i = 0; i < n; ++i) {
244 hammer_flusher_flush_inode(info->work_array[i],
245 &hmp->flusher.trans);
da2da375 246 }
da2da375
MD
247 if (--hmp->flusher.running == 0)
248 wakeup(&hmp->flusher.running);
249 }
250 info->td = NULL;
251 wakeup(&info->td);
059819e3
MD
252 lwkt_exit();
253}
254
10a5d1ba
MD
255static void
256hammer_flusher_clean_loose_ios(hammer_mount_t hmp)
257{
258 hammer_buffer_t buffer;
259 hammer_io_t io;
a99b9ea2 260 int panic_count = 1000000;
10a5d1ba
MD
261
262 /*
263 * loose ends - buffers without bp's aren't tracked by the kernel
264 * and can build up, so clean them out. This can occur when an
265 * IO completes on a buffer with no references left.
266 */
a99b9ea2 267 crit_enter(); /* biodone() race */
10a5d1ba 268 while ((io = TAILQ_FIRST(&hmp->lose_list)) != NULL) {
a99b9ea2 269 KKASSERT(--panic_count > 0);
10a5d1ba 270 KKASSERT(io->mod_list == &hmp->lose_list);
bf3b416b 271 TAILQ_REMOVE(&hmp->lose_list, io, mod_entry);
10a5d1ba 272 io->mod_list = NULL;
a99b9ea2
MD
273 if (io->lock.refs == 0)
274 ++hammer_count_refedbufs;
10a5d1ba 275 hammer_ref(&io->lock);
10a5d1ba
MD
276 buffer = (void *)io;
277 hammer_rel_buffer(buffer, 0);
278 }
a99b9ea2 279 crit_exit();
10a5d1ba
MD
280}
281
059819e3 282/*
cebe9493 283 * Flush all inodes in the current flush group.
059819e3
MD
284 */
285static void
286hammer_flusher_flush(hammer_mount_t hmp)
287{
da2da375 288 hammer_flusher_info_t info;
cebe9493 289 hammer_reserve_t resv;
da2da375 290 int i;
af209b0f
MD
291 int n;
292
293 hammer_start_transaction_fls(&hmp->flusher.trans, hmp);
10a5d1ba 294
cebe9493 295 /*
af209b0f 296 * Start work threads.
cebe9493 297 */
da2da375 298 i = 0;
cb51be26 299 n = hmp->count_iqueued / HAMMER_FLUSH_GROUP_SIZE;
af209b0f 300 if (TAILQ_FIRST(&hmp->flush_list)) {
cb51be26 301 for (i = 0; i <= n; ++i) {
af209b0f
MD
302 if (i == HAMMER_MAX_FLUSHERS ||
303 hmp->flusher.info[i] == NULL) {
304 break;
305 }
306 info = hmp->flusher.info[i];
307 if (info->startit == 0) {
308 ++hmp->flusher.running;
309 info->startit = 1;
310 wakeup(&info->startit);
311 }
da2da375 312 }
059819e3 313 }
da2da375
MD
314 while (hmp->flusher.running)
315 tsleep(&hmp->flusher.running, 0, "hmrfcc", 0);
316
317 hammer_flusher_finalize(&hmp->flusher.trans, 1);
318 hmp->flusher.tid = hmp->flusher.trans.tid;
cebe9493
MD
319
320 /*
321 * Clean up any freed big-blocks (typically zone-2).
322 * resv->flush_group is typically set several flush groups ahead
323 * of the free to ensure that the freed block is not reused until
324 * it can no longer be reused.
325 */
326 while ((resv = TAILQ_FIRST(&hmp->delay_list)) != NULL) {
da2da375 327 if (resv->flush_group != hmp->flusher.act)
cebe9493 328 break;
cb51be26 329 hammer_reserve_clrdelay(hmp, resv);
cebe9493 330 }
da2da375 331 hammer_done_transaction(&hmp->flusher.trans);
059819e3
MD
332}
333
9f5097dc
MD
334/*
335 * Flush a single inode that is part of a flush group.
336 */
337static
338void
339hammer_flusher_flush_inode(hammer_inode_t ip, hammer_transaction_t trans)
340{
341 hammer_mount_t hmp = ip->hmp;
342
da2da375 343 hammer_lock_sh(&hmp->flusher.finalize_lock);
9f5097dc
MD
344 ip->error = hammer_sync_inode(ip);
345 hammer_flush_inode_done(ip);
da2da375
MD
346 hammer_unlock(&hmp->flusher.finalize_lock);
347 while (hmp->flusher.finalize_want)
348 tsleep(&hmp->flusher.finalize_want, 0, "hmrsxx", 0);
9f5097dc 349 if (hammer_must_finalize_undo(hmp)) {
da2da375
MD
350 hmp->flusher.finalize_want = 1;
351 hammer_lock_ex(&hmp->flusher.finalize_lock);
5a930e66 352 kprintf("HAMMER: Warning: UNDO area too small!\n");
9f5097dc 353 hammer_flusher_finalize(trans, 1);
da2da375
MD
354 hammer_unlock(&hmp->flusher.finalize_lock);
355 hmp->flusher.finalize_want = 0;
356 wakeup(&hmp->flusher.finalize_want);
9f5097dc
MD
357 } else if (trans->hmp->locked_dirty_count +
358 trans->hmp->io_running_count > hammer_limit_dirtybufs) {
da2da375
MD
359 hmp->flusher.finalize_want = 1;
360 hammer_lock_ex(&hmp->flusher.finalize_lock);
9f5097dc 361 hammer_flusher_finalize(trans, 0);
da2da375
MD
362 hammer_unlock(&hmp->flusher.finalize_lock);
363 hmp->flusher.finalize_want = 0;
364 wakeup(&hmp->flusher.finalize_want);
9f5097dc
MD
365 }
366}
367
ec4e8497
MD
368/*
369 * If the UNDO area gets over half full we have to flush it. We can't
370 * afford the UNDO area becoming completely full as that would break
371 * the crash recovery atomicy.
372 */
373static
374int
0729c8c8 375hammer_must_finalize_undo(hammer_mount_t hmp)
ec4e8497 376{
1f07f686 377 if (hammer_undo_space(hmp) < hammer_undo_max(hmp) / 2) {
77062c8a 378 hkprintf("*");
1f07f686 379 return(1);
ec4e8497 380 } else {
1f07f686 381 return(0);
ec4e8497 382 }
ec4e8497
MD
383}
384
10a5d1ba 385/*
9f5097dc
MD
386 * Flush all pending UNDOs, wait for write completion, update the volume
387 * header with the new UNDO end position, and flush it. Then
388 * asynchronously flush the meta-data.
10a5d1ba 389 *
9f5097dc
MD
390 * If this is the last finalization in a flush group we also synchronize
391 * our cached blockmap and set hmp->flusher_undo_start and our cached undo
392 * fifo first_offset so the next flush resets the FIFO pointers.
10a5d1ba
MD
393 */
394static
395void
9f5097dc 396hammer_flusher_finalize(hammer_transaction_t trans, int final)
059819e3 397{
9f5097dc
MD
398 hammer_volume_t root_volume;
399 hammer_blockmap_t cundomap, dundomap;
400 hammer_mount_t hmp;
10a5d1ba 401 hammer_io_t io;
c9b9e29d 402 int count;
19619882 403 int i;
059819e3 404
9f5097dc
MD
405 hmp = trans->hmp;
406 root_volume = trans->rootvol;
407
47637bff
MD
408 /*
409 * Flush data buffers. This can occur asynchronously and at any
9f5097dc
MD
410 * time. We must interlock against the frontend direct-data write
411 * but do not have to acquire the sync-lock yet.
47637bff
MD
412 */
413 count = 0;
414 while ((io = TAILQ_FIRST(&hmp->data_list)) != NULL) {
a99b9ea2
MD
415 if (io->lock.refs == 0)
416 ++hammer_count_refedbufs;
47637bff 417 hammer_ref(&io->lock);
9f5097dc 418 hammer_io_write_interlock(io);
47637bff
MD
419 KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
420 hammer_io_flush(io);
9f5097dc 421 hammer_io_done_interlock(io);
47637bff
MD
422 hammer_rel_buffer((hammer_buffer_t)io, 0);
423 ++count;
424 }
47637bff 425
9f5097dc
MD
426 /*
427 * The sync-lock is required for the remaining sequence. This lock
428 * prevents meta-data from being modified.
429 */
2f85fa4d 430 hammer_sync_lock_ex(trans);
9480ff55 431
e8599db1 432 /*
9f5097dc
MD
433 * If we have been asked to finalize the volume header sync the
434 * cached blockmap to the on-disk blockmap. Generate an UNDO
435 * record for the update.
e8599db1 436 */
9f5097dc
MD
437 if (final) {
438 cundomap = &hmp->blockmap[0];
439 dundomap = &root_volume->ondisk->vol0_blockmap[0];
440 if (root_volume->io.modified) {
441 hammer_modify_volume(trans, root_volume,
442 dundomap, sizeof(hmp->blockmap));
443 for (i = 0; i < HAMMER_MAX_ZONES; ++i)
444 hammer_crc_set_blockmap(&cundomap[i]);
445 bcopy(cundomap, dundomap, sizeof(hmp->blockmap));
446 hammer_modify_volume_done(root_volume);
447 }
e8599db1
MD
448 }
449
059819e3 450 /*
9f5097dc 451 * Flush UNDOs
059819e3 452 */
c9b9e29d 453 count = 0;
10a5d1ba
MD
454 while ((io = TAILQ_FIRST(&hmp->undo_list)) != NULL) {
455 KKASSERT(io->modify_refs == 0);
a99b9ea2
MD
456 if (io->lock.refs == 0)
457 ++hammer_count_refedbufs;
10a5d1ba
MD
458 hammer_ref(&io->lock);
459 KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
460 hammer_io_flush(io);
09ac686b 461 hammer_rel_buffer((hammer_buffer_t)io, 0);
c9b9e29d 462 ++count;
059819e3
MD
463 }
464
059819e3 465 /*
9f5097dc 466 * Wait for I/Os to complete
059819e3 467 */
a99b9ea2 468 hammer_flusher_clean_loose_ios(hmp);
af209b0f 469 hammer_io_wait_all(hmp, "hmrfl1");
059819e3
MD
470
471 /*
9f5097dc
MD
472 * Update the on-disk volume header with new UNDO FIFO end position
473 * (do not generate new UNDO records for this change). We have to
474 * do this for the UNDO FIFO whether (final) is set or not.
475 *
476 * Also update the on-disk next_tid field. This does not require
477 * an UNDO. However, because our TID is generated before we get
478 * the sync lock another sync may have beat us to the punch.
479 *
480 * The volume header will be flushed out synchronously.
059819e3 481 */
9f5097dc
MD
482 dundomap = &root_volume->ondisk->vol0_blockmap[HAMMER_ZONE_UNDO_INDEX];
483 cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
09ac686b 484
9f5097dc
MD
485 if (dundomap->first_offset != cundomap->first_offset ||
486 dundomap->next_offset != cundomap->next_offset) {
0729c8c8 487 hammer_modify_volume(NULL, root_volume, NULL, 0);
9f5097dc
MD
488 dundomap->first_offset = cundomap->first_offset;
489 dundomap->next_offset = cundomap->next_offset;
490 hammer_crc_set_blockmap(dundomap);
491 hammer_crc_set_volume(root_volume->ondisk);
492 if (root_volume->ondisk->vol0_next_tid < trans->tid)
493 root_volume->ondisk->vol0_next_tid = trans->tid;
0729c8c8
MD
494 hammer_modify_volume_done(root_volume);
495 }
c9b9e29d 496
19619882 497 if (root_volume->io.modified) {
10a5d1ba 498 hammer_io_flush(&root_volume->io);
19619882 499 }
059819e3
MD
500
501 /*
9f5097dc 502 * Wait for I/Os to complete
059819e3 503 */
a99b9ea2 504 hammer_flusher_clean_loose_ios(hmp);
af209b0f 505 hammer_io_wait_all(hmp, "hmrfl2");
059819e3
MD
506
507 /*
e8599db1
MD
508 * Flush meta-data. The meta-data will be undone if we crash
509 * so we can safely flush it asynchronously.
9f5097dc
MD
510 *
511 * Repeated catchups will wind up flushing this update's meta-data
512 * and the UNDO buffers for the next update simultaniously. This
513 * is ok.
059819e3 514 */
c9b9e29d 515 count = 0;
10a5d1ba
MD
516 while ((io = TAILQ_FIRST(&hmp->meta_list)) != NULL) {
517 KKASSERT(io->modify_refs == 0);
a99b9ea2
MD
518 if (io->lock.refs == 0)
519 ++hammer_count_refedbufs;
10a5d1ba
MD
520 hammer_ref(&io->lock);
521 KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
522 hammer_io_flush(io);
09ac686b 523 hammer_rel_buffer((hammer_buffer_t)io, 0);
c9b9e29d 524 ++count;
059819e3 525 }
9f5097dc
MD
526
527 /*
528 * If this is the final finalization for the flush group set
529 * up for the next sequence by setting a new first_offset in
530 * our cached blockmap and
531 * clearing the undo history.
532 */
533 if (final) {
534 cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
535 cundomap->first_offset = cundomap->next_offset;
536 hammer_clear_undo_history(hmp);
537 }
538
2f85fa4d 539 hammer_sync_unlock(trans);
059819e3
MD
540}
541