2 * Copyright (c) 2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_flusher.c,v 1.26 2008/06/13 00:25:33 dillon Exp $
37 * HAMMER dependancy flusher thread
39 * Meta data updates create buffer dependancies which are arranged as a
45 static void hammer_flusher_master_thread(void *arg);
46 static void hammer_flusher_slave_thread(void *arg);
47 static void hammer_flusher_clean_loose_ios(hammer_mount_t hmp);
48 static void hammer_flusher_flush(hammer_mount_t hmp);
49 static void hammer_flusher_flush_inode(hammer_inode_t ip,
50 hammer_transaction_t trans);
51 static int hammer_must_finalize_undo(hammer_mount_t hmp);
52 static void hammer_flusher_finalize(hammer_transaction_t trans, int final);
55 * Support structures for the flusher threads.
57 struct hammer_flusher_info {
58 struct hammer_mount *hmp;
61 TAILQ_HEAD(,hammer_inode) work_list;
64 typedef struct hammer_flusher_info *hammer_flusher_info_t;
67 * Sync all inodes pending on the flusher. This routine may have to be
68 * called twice to get them all as some may be queued to a later flush group.
71 hammer_flusher_sync(hammer_mount_t hmp)
75 if (hmp->flusher.td) {
76 seq = hmp->flusher.next;
77 if (hmp->flusher.signal++ == 0)
78 wakeup(&hmp->flusher.signal);
79 while ((int)(seq - hmp->flusher.done) > 0)
80 tsleep(&hmp->flusher.done, 0, "hmrfls", 0);
85 * Sync all inodes pending on the flusher - return immediately.
88 hammer_flusher_async(hammer_mount_t hmp)
90 if (hmp->flusher.td) {
91 if (hmp->flusher.signal++ == 0)
92 wakeup(&hmp->flusher.signal);
97 hammer_flusher_create(hammer_mount_t hmp)
99 hammer_flusher_info_t info;
102 hmp->flusher.signal = 0;
103 hmp->flusher.act = 0;
104 hmp->flusher.done = 0;
105 hmp->flusher.next = 1;
106 hmp->flusher.count = 0;
107 hammer_ref(&hmp->flusher.finalize_lock);
109 lwkt_create(hammer_flusher_master_thread, hmp,
110 &hmp->flusher.td, NULL, 0, -1, "hammer-M");
111 for (i = 0; i < HAMMER_MAX_FLUSHERS; ++i) {
112 info = kmalloc(sizeof(*info), M_HAMMER, M_WAITOK|M_ZERO);
114 TAILQ_INIT(&info->work_list);
115 ++hmp->flusher.count;
116 hmp->flusher.info[i] = info;
117 lwkt_create(hammer_flusher_slave_thread, info,
118 &info->td, NULL, 0, -1, "hammer-S%d", i);
123 hammer_flusher_destroy(hammer_mount_t hmp)
125 hammer_flusher_info_t info;
131 hmp->flusher.exiting = 1;
132 while (hmp->flusher.td) {
133 ++hmp->flusher.signal;
134 wakeup(&hmp->flusher.signal);
135 tsleep(&hmp->flusher.exiting, 0, "hmrwex", hz);
141 for (i = 0; i < HAMMER_MAX_FLUSHERS; ++i) {
142 if ((info = hmp->flusher.info[i]) != NULL) {
143 KKASSERT(info->startit == 0);
145 wakeup(&info->startit);
147 tsleep(&info->td, 0, "hmrwwc", 0);
149 hmp->flusher.info[i] = NULL;
150 kfree(info, M_HAMMER);
151 --hmp->flusher.count;
154 KKASSERT(hmp->flusher.count == 0);
158 * The master flusher thread manages the flusher sequence id and
159 * synchronization with the slave work threads.
162 hammer_flusher_master_thread(void *arg)
164 hammer_mount_t hmp = arg;
167 while (hmp->flusher.group_lock)
168 tsleep(&hmp->flusher.group_lock, 0, "hmrhld", 0);
170 hmp->flusher.act = hmp->flusher.next;
172 hammer_flusher_clean_loose_ios(hmp);
173 hammer_flusher_flush(hmp);
174 hmp->flusher.done = hmp->flusher.act;
175 wakeup(&hmp->flusher.done);
180 if (hmp->flusher.exiting && TAILQ_EMPTY(&hmp->flush_list))
184 * This is a hack until we can dispose of frontend buffer
185 * cache buffers on the frontend.
187 while (hmp->flusher.signal == 0)
188 tsleep(&hmp->flusher.signal, 0, "hmrwwa", 0);
189 hmp->flusher.signal = 0;
195 hmp->flusher.td = NULL;
196 wakeup(&hmp->flusher.exiting);
201 * The slave flusher thread pulls work off the master flush_list until no
205 hammer_flusher_slave_thread(void *arg)
207 hammer_flusher_info_t info;
215 while (info->startit == 0)
216 tsleep(&info->startit, 0, "hmrssw", 0);
217 if (info->startit < 0)
220 while ((ip = TAILQ_FIRST(&hmp->flush_list)) != NULL) {
221 if (ip->flush_group != hmp->flusher.act)
223 TAILQ_REMOVE(&hmp->flush_list, ip, flush_entry);
224 hammer_flusher_flush_inode(ip, &hmp->flusher.trans);
226 if (--hmp->flusher.running == 0)
227 wakeup(&hmp->flusher.running);
235 hammer_flusher_clean_loose_ios(hammer_mount_t hmp)
237 hammer_buffer_t buffer;
239 int panic_count = 1000000;
242 * loose ends - buffers without bp's aren't tracked by the kernel
243 * and can build up, so clean them out. This can occur when an
244 * IO completes on a buffer with no references left.
246 crit_enter(); /* biodone() race */
247 while ((io = TAILQ_FIRST(&hmp->lose_list)) != NULL) {
248 KKASSERT(--panic_count > 0);
249 KKASSERT(io->mod_list == &hmp->lose_list);
250 TAILQ_REMOVE(io->mod_list, io, mod_entry);
252 if (io->lock.refs == 0)
253 ++hammer_count_refedbufs;
254 hammer_ref(&io->lock);
256 hammer_rel_buffer(buffer, 0);
262 * Flush all inodes in the current flush group.
265 hammer_flusher_flush(hammer_mount_t hmp)
267 hammer_flusher_info_t info;
268 hammer_reserve_t resv;
272 hammer_start_transaction_fls(&hmp->flusher.trans, hmp);
275 * Start work threads.
278 n = hmp->count_iqueued / 64;
279 if (TAILQ_FIRST(&hmp->flush_list)) {
280 for (i = 0; i <= hmp->count_iqueued / 64; ++i) {
281 if (i == HAMMER_MAX_FLUSHERS ||
282 hmp->flusher.info[i] == NULL) {
285 info = hmp->flusher.info[i];
286 if (info->startit == 0) {
287 ++hmp->flusher.running;
289 wakeup(&info->startit);
293 while (hmp->flusher.running)
294 tsleep(&hmp->flusher.running, 0, "hmrfcc", 0);
296 hammer_flusher_finalize(&hmp->flusher.trans, 1);
297 hmp->flusher.tid = hmp->flusher.trans.tid;
300 * Clean up any freed big-blocks (typically zone-2).
301 * resv->flush_group is typically set several flush groups ahead
302 * of the free to ensure that the freed block is not reused until
303 * it can no longer be reused.
305 while ((resv = TAILQ_FIRST(&hmp->delay_list)) != NULL) {
306 if (resv->flush_group != hmp->flusher.act)
308 TAILQ_REMOVE(&hmp->delay_list, resv, delay_entry);
309 hammer_blockmap_reserve_complete(hmp, resv);
311 hammer_done_transaction(&hmp->flusher.trans);
315 * Flush a single inode that is part of a flush group.
319 hammer_flusher_flush_inode(hammer_inode_t ip, hammer_transaction_t trans)
321 hammer_mount_t hmp = ip->hmp;
323 hammer_lock_sh(&hmp->flusher.finalize_lock);
324 ip->error = hammer_sync_inode(ip);
325 hammer_flush_inode_done(ip);
326 hammer_unlock(&hmp->flusher.finalize_lock);
327 while (hmp->flusher.finalize_want)
328 tsleep(&hmp->flusher.finalize_want, 0, "hmrsxx", 0);
329 if (hammer_must_finalize_undo(hmp)) {
330 hmp->flusher.finalize_want = 1;
331 hammer_lock_ex(&hmp->flusher.finalize_lock);
332 kprintf("HAMMER: Warning: UNDO area too small!");
333 hammer_flusher_finalize(trans, 1);
334 hammer_unlock(&hmp->flusher.finalize_lock);
335 hmp->flusher.finalize_want = 0;
336 wakeup(&hmp->flusher.finalize_want);
337 } else if (trans->hmp->locked_dirty_count +
338 trans->hmp->io_running_count > hammer_limit_dirtybufs) {
339 hmp->flusher.finalize_want = 1;
340 hammer_lock_ex(&hmp->flusher.finalize_lock);
342 hammer_flusher_finalize(trans, 0);
343 hammer_unlock(&hmp->flusher.finalize_lock);
344 hmp->flusher.finalize_want = 0;
345 wakeup(&hmp->flusher.finalize_want);
350 * If the UNDO area gets over half full we have to flush it. We can't
351 * afford the UNDO area becoming completely full as that would break
352 * the crash recovery atomicy.
356 hammer_must_finalize_undo(hammer_mount_t hmp)
358 if (hammer_undo_space(hmp) < hammer_undo_max(hmp) / 2) {
367 * Flush all pending UNDOs, wait for write completion, update the volume
368 * header with the new UNDO end position, and flush it. Then
369 * asynchronously flush the meta-data.
371 * If this is the last finalization in a flush group we also synchronize
372 * our cached blockmap and set hmp->flusher_undo_start and our cached undo
373 * fifo first_offset so the next flush resets the FIFO pointers.
377 hammer_flusher_finalize(hammer_transaction_t trans, int final)
379 hammer_volume_t root_volume;
380 hammer_blockmap_t cundomap, dundomap;
387 root_volume = trans->rootvol;
390 * Flush data buffers. This can occur asynchronously and at any
391 * time. We must interlock against the frontend direct-data write
392 * but do not have to acquire the sync-lock yet.
395 while ((io = TAILQ_FIRST(&hmp->data_list)) != NULL) {
396 if (io->lock.refs == 0)
397 ++hammer_count_refedbufs;
398 hammer_ref(&io->lock);
399 hammer_io_write_interlock(io);
400 KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
402 hammer_io_done_interlock(io);
403 hammer_rel_buffer((hammer_buffer_t)io, 0);
408 * The sync-lock is required for the remaining sequence. This lock
409 * prevents meta-data from being modified.
411 hammer_sync_lock_ex(trans);
414 * If we have been asked to finalize the volume header sync the
415 * cached blockmap to the on-disk blockmap. Generate an UNDO
416 * record for the update.
419 cundomap = &hmp->blockmap[0];
420 dundomap = &root_volume->ondisk->vol0_blockmap[0];
421 if (root_volume->io.modified) {
422 hammer_modify_volume(trans, root_volume,
423 dundomap, sizeof(hmp->blockmap));
424 for (i = 0; i < HAMMER_MAX_ZONES; ++i)
425 hammer_crc_set_blockmap(&cundomap[i]);
426 bcopy(cundomap, dundomap, sizeof(hmp->blockmap));
427 hammer_modify_volume_done(root_volume);
435 while ((io = TAILQ_FIRST(&hmp->undo_list)) != NULL) {
436 KKASSERT(io->modify_refs == 0);
437 if (io->lock.refs == 0)
438 ++hammer_count_refedbufs;
439 hammer_ref(&io->lock);
440 KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
442 hammer_rel_buffer((hammer_buffer_t)io, 0);
447 * Wait for I/Os to complete
449 hammer_flusher_clean_loose_ios(hmp);
450 hammer_io_wait_all(hmp, "hmrfl1");
453 * Update the on-disk volume header with new UNDO FIFO end position
454 * (do not generate new UNDO records for this change). We have to
455 * do this for the UNDO FIFO whether (final) is set or not.
457 * Also update the on-disk next_tid field. This does not require
458 * an UNDO. However, because our TID is generated before we get
459 * the sync lock another sync may have beat us to the punch.
461 * The volume header will be flushed out synchronously.
463 dundomap = &root_volume->ondisk->vol0_blockmap[HAMMER_ZONE_UNDO_INDEX];
464 cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
466 if (dundomap->first_offset != cundomap->first_offset ||
467 dundomap->next_offset != cundomap->next_offset) {
468 hammer_modify_volume(NULL, root_volume, NULL, 0);
469 dundomap->first_offset = cundomap->first_offset;
470 dundomap->next_offset = cundomap->next_offset;
471 hammer_crc_set_blockmap(dundomap);
472 hammer_crc_set_volume(root_volume->ondisk);
473 if (root_volume->ondisk->vol0_next_tid < trans->tid)
474 root_volume->ondisk->vol0_next_tid = trans->tid;
475 hammer_modify_volume_done(root_volume);
478 if (root_volume->io.modified) {
479 hammer_io_flush(&root_volume->io);
483 * Wait for I/Os to complete
485 hammer_flusher_clean_loose_ios(hmp);
486 hammer_io_wait_all(hmp, "hmrfl2");
489 * Flush meta-data. The meta-data will be undone if we crash
490 * so we can safely flush it asynchronously.
492 * Repeated catchups will wind up flushing this update's meta-data
493 * and the UNDO buffers for the next update simultaniously. This
497 while ((io = TAILQ_FIRST(&hmp->meta_list)) != NULL) {
498 KKASSERT(io->modify_refs == 0);
499 if (io->lock.refs == 0)
500 ++hammer_count_refedbufs;
501 hammer_ref(&io->lock);
502 KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
504 hammer_rel_buffer((hammer_buffer_t)io, 0);
509 * If this is the final finalization for the flush group set
510 * up for the next sequence by setting a new first_offset in
511 * our cached blockmap and
512 * clearing the undo history.
515 cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
516 cundomap->first_offset = cundomap->next_offset;
517 hammer_clear_undo_history(hmp);
520 hammer_sync_unlock(trans);