HAMMER 38C/Many: Undo/Synchronization and crash recovery
[dragonfly.git] / sys / vfs / hammer / hammer_io.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * $DragonFly: src/sys/vfs/hammer/hammer_io.c,v 1.26 2008/04/25 21:49:49 dillon Exp $
35 */
36/*
37 * IO Primitives and buffer cache management
38 *
39 * All major data-tracking structures in HAMMER contain a struct hammer_io
40 * which is used to manage their backing store. We use filesystem buffers
41 * for backing store and we leave them passively associated with their
42 * HAMMER structures.
43 *
44 * If the kernel tries to release a passively associated buf which we cannot
45 * yet let go we set B_LOCKED in the buffer and then actively released it
46 * later when we can.
47 */
48
49#include "hammer.h"
50#include <sys/fcntl.h>
51#include <sys/nlookup.h>
52#include <sys/buf.h>
53#include <sys/buf2.h>
54
55static void hammer_io_modify(hammer_io_t io, int count);
56static void hammer_io_deallocate(struct buf *bp);
57
58/*
59 * Initialize a new, already-zero'd hammer_io structure, or reinitialize
60 * an existing hammer_io structure which may have switched to another type.
61 */
62void
63hammer_io_init(hammer_io_t io, hammer_mount_t hmp, enum hammer_io_type type)
64{
65 io->hmp = hmp;
66 io->type = type;
67}
68
69void
70hammer_io_reinit(hammer_io_t io, enum hammer_io_type type)
71{
72 hammer_mount_t hmp = io->hmp;
73
74 if (io->modified) {
75 KKASSERT(io->mod_list != NULL);
76 if (io->mod_list == &hmp->volu_list ||
77 io->mod_list == &hmp->meta_list) {
78 --hmp->locked_dirty_count;
79 }
80 TAILQ_REMOVE(io->mod_list, io, mod_entry);
81 io->mod_list = NULL;
82 }
83 io->type = type;
84 if (io->modified) {
85 switch(io->type) {
86 case HAMMER_STRUCTURE_VOLUME:
87 io->mod_list = &hmp->volu_list;
88 ++hmp->locked_dirty_count;
89 break;
90 case HAMMER_STRUCTURE_META_BUFFER:
91 io->mod_list = &hmp->meta_list;
92 ++hmp->locked_dirty_count;
93 break;
94 case HAMMER_STRUCTURE_UNDO_BUFFER:
95 io->mod_list = &hmp->undo_list;
96 break;
97 case HAMMER_STRUCTURE_DATA_BUFFER:
98 io->mod_list = &hmp->data_list;
99 break;
100 }
101 TAILQ_INSERT_TAIL(io->mod_list, io, mod_entry);
102 }
103}
104
105/*
106 * Helper routine to disassociate a buffer cache buffer from an I/O
107 * structure. Called with the io structure exclusively locked.
108 *
109 * The io may have 0 or 1 references depending on who called us. The
110 * caller is responsible for dealing with the refs.
111 *
112 * This call can only be made when no action is required on the buffer.
113 * HAMMER must own the buffer (released == 0) since we mess around with it.
114 */
115static void
116hammer_io_disassociate(hammer_io_structure_t iou, int elseit)
117{
118 struct buf *bp = iou->io.bp;
119
120 KKASSERT(iou->io.modified == 0);
121 buf_dep_init(bp);
122 iou->io.bp = NULL;
123 bp->b_flags &= ~B_LOCKED;
124 if (elseit) {
125 KKASSERT(iou->io.released == 0);
126 iou->io.released = 1;
127 bqrelse(bp);
128 } else {
129 KKASSERT(iou->io.released);
130 }
131
132 switch(iou->io.type) {
133 case HAMMER_STRUCTURE_VOLUME:
134 iou->volume.ondisk = NULL;
135 break;
136 case HAMMER_STRUCTURE_DATA_BUFFER:
137 case HAMMER_STRUCTURE_META_BUFFER:
138 case HAMMER_STRUCTURE_UNDO_BUFFER:
139 iou->buffer.ondisk = NULL;
140 break;
141 }
142}
143
144/*
145 * Wait for any physical IO to complete
146 */
147static void
148hammer_io_wait(hammer_io_t io)
149{
150 if (io->running) {
151 crit_enter();
152 tsleep_interlock(io);
153 io->waiting = 1;
154 for (;;) {
155 tsleep(io, 0, "hmrflw", 0);
156 if (io->running == 0)
157 break;
158 tsleep_interlock(io);
159 io->waiting = 1;
160 if (io->running == 0)
161 break;
162 }
163 crit_exit();
164 }
165}
166
167/*
168 * Load bp for a HAMMER structure. The io must be exclusively locked by
169 * the caller.
170 */
171int
172hammer_io_read(struct vnode *devvp, struct hammer_io *io)
173{
174 struct buf *bp;
175 int error;
176
177 if ((bp = io->bp) == NULL) {
178 error = bread(devvp, io->offset, HAMMER_BUFSIZE, &io->bp);
179 if (error == 0) {
180 bp = io->bp;
181 bp->b_ops = &hammer_bioops;
182 LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node);
183 BUF_KERNPROC(bp);
184 }
185 KKASSERT(io->modified == 0);
186 KKASSERT(io->running == 0);
187 KKASSERT(io->waiting == 0);
188 io->released = 0; /* we hold an active lock on bp */
189 } else {
190 error = 0;
191 }
192 return(error);
193}
194
195/*
196 * Similar to hammer_io_read() but returns a zero'd out buffer instead.
197 * Must be called with the IO exclusively locked.
198 *
199 * vfs_bio_clrbuf() is kinda nasty, enforce serialization against background
200 * I/O by forcing the buffer to not be in a released state before calling
201 * it.
202 *
203 * This function will also mark the IO as modified but it will not
204 * increment the modify_refs count.
205 */
206int
207hammer_io_new(struct vnode *devvp, struct hammer_io *io)
208{
209 struct buf *bp;
210
211 if ((bp = io->bp) == NULL) {
212 io->bp = getblk(devvp, io->offset, HAMMER_BUFSIZE, 0, 0);
213 bp = io->bp;
214 bp->b_ops = &hammer_bioops;
215 LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node);
216 io->released = 0;
217 KKASSERT(io->running == 0);
218 io->waiting = 0;
219 BUF_KERNPROC(bp);
220 } else {
221 if (io->released) {
222 regetblk(bp);
223 BUF_KERNPROC(bp);
224 io->released = 0;
225 }
226 }
227 hammer_io_modify(io, 0);
228 vfs_bio_clrbuf(bp);
229 return(0);
230}
231
232/*
233 * This routine is called on the last reference to a hammer structure.
234 * The io is usually locked exclusively (but may not be during unmount).
235 *
236 * This routine is responsible for the disposition of the buffer cache
237 * buffer backing the IO. Only pure-data and undo buffers can be handed
238 * back to the kernel. Volume and meta-data buffers must be retained
239 * by HAMMER until explicitly flushed by the backend.
240 */
241void
242hammer_io_release(struct hammer_io *io)
243{
244 struct buf *bp;
245
246 if ((bp = io->bp) == NULL)
247 return;
248
249 /*
250 * Try to flush a dirty IO to disk if asked to by the
251 * caller or if the kernel tried to flush the buffer in the past.
252 *
253 * Kernel-initiated flushes are only allowed for pure-data buffers.
254 * meta-data and volume buffers can only be flushed explicitly
255 * by HAMMER.
256 */
257 if (io->modified) {
258 if (io->flush) {
259 hammer_io_flush(io);
260 } else if (bp->b_flags & B_LOCKED) {
261 switch(io->type) {
262 case HAMMER_STRUCTURE_DATA_BUFFER:
263 case HAMMER_STRUCTURE_UNDO_BUFFER:
264 hammer_io_flush(io);
265 break;
266 default:
267 break;
268 }
269 } /* else no explicit request to flush the buffer */
270 }
271
272 /*
273 * Wait for the IO to complete if asked to.
274 */
275 if (io->waitdep && io->running) {
276 hammer_io_wait(io);
277 }
278
279 /*
280 * Return control of the buffer to the kernel (with the provisio
281 * that our bioops can override kernel decisions with regards to
282 * the buffer).
283 */
284 if (io->flush && io->modified == 0 && io->running == 0) {
285 /*
286 * Always disassociate the bp if an explicit flush
287 * was requested and the IO completed with no error
288 * (so unmount can really clean up the structure).
289 */
290 if (io->released) {
291 regetblk(bp);
292 BUF_KERNPROC(bp);
293 io->released = 0;
294 }
295 hammer_io_disassociate((hammer_io_structure_t)io, 1);
296 } else if (io->modified) {
297 /*
298 * Only certain IO types can be released to the kernel.
299 * volume and meta-data IO types must be explicitly flushed
300 * by HAMMER.
301 */
302 switch(io->type) {
303 case HAMMER_STRUCTURE_DATA_BUFFER:
304 case HAMMER_STRUCTURE_UNDO_BUFFER:
305 if (io->released == 0) {
306 io->released = 1;
307 bdwrite(bp);
308 }
309 break;
310 default:
311 break;
312 }
313 } else if (io->released == 0) {
314 /*
315 * Clean buffers can be generally released to the kernel.
316 * We leave the bp passively associated with the HAMMER
317 * structure and use bioops to disconnect it later on
318 * if the kernel wants to discard the buffer.
319 */
320 io->released = 1;
321 bqrelse(bp);
322 }
323}
324
325/*
326 * This routine is called with a locked IO when a flush is desired and
327 * no other references to the structure exists other then ours. This
328 * routine is ONLY called when HAMMER believes it is safe to flush a
329 * potentially modified buffer out.
330 */
331void
332hammer_io_flush(struct hammer_io *io)
333{
334 struct buf *bp;
335
336 /*
337 * Degenerate case - nothing to flush if nothing is dirty.
338 */
339 if (io->modified == 0) {
340 io->flush = 0;
341 return;
342 }
343
344 KKASSERT(io->bp);
345 KKASSERT(io->modify_refs == 0);
346
347 /*
348 * Acquire exclusive access to the bp and then clear the modified
349 * state of the buffer prior to issuing I/O to interlock any
350 * modifications made while the I/O is in progress. This shouldn't
351 * happen anyway but losing data would be worse. The modified bit
352 * will be rechecked after the IO completes.
353 *
354 * This is only legal when lock.refs == 1 (otherwise we might clear
355 * the modified bit while there are still users of the cluster
356 * modifying the data).
357 *
358 * Do this before potentially blocking so any attempt to modify the
359 * ondisk while we are blocked blocks waiting for us.
360 */
361 KKASSERT(io->mod_list != NULL);
362 if (io->mod_list == &io->hmp->volu_list ||
363 io->mod_list == &io->hmp->meta_list) {
364 --io->hmp->locked_dirty_count;
365 }
366 TAILQ_REMOVE(io->mod_list, io, mod_entry);
367 io->mod_list = NULL;
368 io->modified = 0;
369 io->flush = 0;
370 bp = io->bp;
371
372 /*
373 * Acquire ownership (released variable set for clarity)
374 */
375 if (io->released) {
376 regetblk(bp);
377 /* BUF_KERNPROC(io->bp); */
378 io->released = 0;
379 }
380
381 /*
382 * Transfer ownership to the kernel and initiate I/O.
383 */
384 io->released = 1;
385 io->running = 1;
386 bawrite(bp);
387}
388
389/************************************************************************
390 * BUFFER DIRTYING *
391 ************************************************************************
392 *
393 * These routines deal with dependancies created when IO buffers get
394 * modified. The caller must call hammer_modify_*() on a referenced
395 * HAMMER structure prior to modifying its on-disk data.
396 *
397 * Any intent to modify an IO buffer acquires the related bp and imposes
398 * various write ordering dependancies.
399 */
400
401/*
402 * Mark a HAMMER structure as undergoing modification. Meta-data buffers
403 * are locked until the flusher can deal with them, pure data buffers
404 * can be written out.
405 */
406static
407void
408hammer_io_modify(hammer_io_t io, int count)
409{
410 struct hammer_mount *hmp = io->hmp;
411
412 /*
413 * Shortcut if nothing to do.
414 */
415 KKASSERT(io->lock.refs != 0 && io->bp != NULL);
416 io->modify_refs += count;
417 if (io->modified && io->released == 0)
418 return;
419
420 hammer_lock_ex(&io->lock);
421 if (io->modified == 0) {
422 KKASSERT(io->mod_list == NULL);
423 switch(io->type) {
424 case HAMMER_STRUCTURE_VOLUME:
425 io->mod_list = &hmp->volu_list;
426 ++hmp->locked_dirty_count;
427 break;
428 case HAMMER_STRUCTURE_META_BUFFER:
429 io->mod_list = &hmp->meta_list;
430 ++hmp->locked_dirty_count;
431 break;
432 case HAMMER_STRUCTURE_UNDO_BUFFER:
433 io->mod_list = &hmp->undo_list;
434 break;
435 case HAMMER_STRUCTURE_DATA_BUFFER:
436 io->mod_list = &hmp->data_list;
437 break;
438 }
439 TAILQ_INSERT_TAIL(io->mod_list, io, mod_entry);
440 io->modified = 1;
441 }
442 if (io->released) {
443 regetblk(io->bp);
444 BUF_KERNPROC(io->bp);
445 io->released = 0;
446 KKASSERT(io->modified != 0);
447 }
448 hammer_unlock(&io->lock);
449}
450
451static __inline
452void
453hammer_io_modify_done(hammer_io_t io)
454{
455 KKASSERT(io->modify_refs > 0);
456 --io->modify_refs;
457}
458
459void
460hammer_modify_volume(hammer_transaction_t trans, hammer_volume_t volume,
461 void *base, int len)
462{
463 hammer_io_modify(&volume->io, 1);
464
465 if (len) {
466 intptr_t rel_offset = (intptr_t)base - (intptr_t)volume->ondisk;
467 KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0);
468 hammer_generate_undo(trans, &volume->io,
469 HAMMER_ENCODE_RAW_VOLUME(volume->vol_no, rel_offset),
470 base, len);
471 }
472}
473
474/*
475 * Caller intends to modify a buffer's ondisk structure. The related
476 * cluster must be marked open prior to being able to flush the modified
477 * buffer so get that I/O going now.
478 */
479void
480hammer_modify_buffer(hammer_transaction_t trans, hammer_buffer_t buffer,
481 void *base, int len)
482{
483 hammer_io_modify(&buffer->io, 1);
484 if (len) {
485 intptr_t rel_offset = (intptr_t)base - (intptr_t)buffer->ondisk;
486 KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0);
487 hammer_generate_undo(trans, &buffer->io,
488 buffer->zone2_offset + rel_offset,
489 base, len);
490 }
491}
492
493void
494hammer_modify_volume_done(hammer_volume_t volume)
495{
496 hammer_io_modify_done(&volume->io);
497}
498
499void
500hammer_modify_buffer_done(hammer_buffer_t buffer)
501{
502 hammer_io_modify_done(&buffer->io);
503}
504
505/*
506 * Mark an entity as not being dirty any more -- this usually occurs when
507 * the governing a-list has freed the entire entity.
508 *
509 * XXX
510 */
511void
512hammer_io_clear_modify(struct hammer_io *io)
513{
514#if 0
515 struct buf *bp;
516
517 io->modified = 0;
518 XXX mod_list/entry
519 if ((bp = io->bp) != NULL) {
520 if (io->released) {
521 regetblk(bp);
522 /* BUF_KERNPROC(io->bp); */
523 } else {
524 io->released = 1;
525 }
526 if (io->modified == 0) {
527 kprintf("hammer_io_clear_modify: cleared %p\n", io);
528 bundirty(bp);
529 bqrelse(bp);
530 } else {
531 bdwrite(bp);
532 }
533 }
534#endif
535}
536
537/************************************************************************
538 * HAMMER_BIOOPS *
539 ************************************************************************
540 *
541 */
542
543/*
544 * Pre-IO initiation kernel callback - cluster build only
545 */
546static void
547hammer_io_start(struct buf *bp)
548{
549}
550
551/*
552 * Post-IO completion kernel callback
553 *
554 * NOTE: HAMMER may modify a buffer after initiating I/O. The modified bit
555 * may also be set if we were marking a cluster header open. Only remove
556 * our dependancy if the modified bit is clear.
557 */
558static void
559hammer_io_complete(struct buf *bp)
560{
561 union hammer_io_structure *iou = (void *)LIST_FIRST(&bp->b_dep);
562
563 KKASSERT(iou->io.released == 1);
564
565 /*
566 * If no lock references remain and we can acquire the IO lock and
567 * someone at some point wanted us to flush (B_LOCKED test), then
568 * try to dispose of the IO.
569 */
570 iou->io.running = 0;
571 if (iou->io.waiting) {
572 iou->io.waiting = 0;
573 wakeup(iou);
574 }
575
576 /*
577 * Someone wanted us to flush, try to clean out the buffer.
578 */
579 if ((bp->b_flags & B_LOCKED) && iou->io.lock.refs == 0) {
580 KKASSERT(iou->io.modified == 0);
581 bp->b_flags &= ~B_LOCKED;
582 hammer_io_deallocate(bp);
583 /* structure may be dead now */
584 }
585}
586
587/*
588 * Callback from kernel when it wishes to deallocate a passively
589 * associated structure. This mostly occurs with clean buffers
590 * but it may be possible for a holding structure to be marked dirty
591 * while its buffer is passively associated.
592 *
593 * If we cannot disassociate we set B_LOCKED to prevent the buffer
594 * from getting reused.
595 *
596 * WARNING: Because this can be called directly by getnewbuf we cannot
597 * recurse into the tree. If a bp cannot be immediately disassociated
598 * our only recourse is to set B_LOCKED.
599 */
600static void
601hammer_io_deallocate(struct buf *bp)
602{
603 hammer_io_structure_t iou = (void *)LIST_FIRST(&bp->b_dep);
604
605 KKASSERT((bp->b_flags & B_LOCKED) == 0 && iou->io.running == 0);
606 if (iou->io.lock.refs > 0 || iou->io.modified) {
607 /*
608 * It is not legal to disassociate a modified buffer. This
609 * case really shouldn't ever occur.
610 */
611 bp->b_flags |= B_LOCKED;
612 } else {
613 /*
614 * Disassociate the BP. If the io has no refs left we
615 * have to add it to the loose list.
616 */
617 hammer_io_disassociate(iou, 0);
618 if (iou->io.bp == NULL &&
619 iou->io.type != HAMMER_STRUCTURE_VOLUME) {
620 kprintf("ADD LOOSE %p\n", &iou->io);
621 KKASSERT(iou->io.mod_list == NULL);
622 iou->io.mod_list = &iou->io.hmp->lose_list;
623 TAILQ_INSERT_TAIL(iou->io.mod_list, &iou->io, mod_entry);
624 }
625 }
626}
627
628static int
629hammer_io_fsync(struct vnode *vp)
630{
631 return(0);
632}
633
634/*
635 * NOTE: will not be called unless we tell the kernel about the
636 * bioops. Unused... we use the mount's VFS_SYNC instead.
637 */
638static int
639hammer_io_sync(struct mount *mp)
640{
641 return(0);
642}
643
644static void
645hammer_io_movedeps(struct buf *bp1, struct buf *bp2)
646{
647}
648
649/*
650 * I/O pre-check for reading and writing. HAMMER only uses this for
651 * B_CACHE buffers so checkread just shouldn't happen, but if it does
652 * allow it.
653 *
654 * Writing is a different case. We don't want the kernel to try to write
655 * out a buffer that HAMMER may be modifying passively or which has a
656 * dependancy. In addition, kernel-demanded writes can only proceed for
657 * certain types of buffers (i.e. UNDO and DATA types). Other dirty
658 * buffer types can only be explicitly written by the flusher.
659 *
660 * checkwrite will only be called for bdwrite()n buffers. If we return
661 * success the kernel is guaranteed to initiate the buffer write.
662 */
663static int
664hammer_io_checkread(struct buf *bp)
665{
666 return(0);
667}
668
669static int
670hammer_io_checkwrite(struct buf *bp)
671{
672 hammer_io_t io = (void *)LIST_FIRST(&bp->b_dep);
673
674 /*
675 * We can only clear the modified bit if the IO is not currently
676 * undergoing modification. Otherwise we may miss changes.
677 */
678 if (io->modify_refs == 0 && io->modified) {
679 KKASSERT(io->mod_list != NULL);
680 if (io->mod_list == &io->hmp->volu_list ||
681 io->mod_list == &io->hmp->meta_list) {
682 --io->hmp->locked_dirty_count;
683 }
684 TAILQ_REMOVE(io->mod_list, io, mod_entry);
685 io->mod_list = NULL;
686 io->modified = 0;
687 }
688 return(0);
689}
690
691/*
692 * Return non-zero if the caller should flush the structure associated
693 * with this io sub-structure.
694 */
695int
696hammer_io_checkflush(struct hammer_io *io)
697{
698 if (io->bp == NULL || (io->bp->b_flags & B_LOCKED)) {
699 return(1);
700 }
701 return(0);
702}
703
704/*
705 * Return non-zero if we wish to delay the kernel's attempt to flush
706 * this buffer to disk.
707 */
708static int
709hammer_io_countdeps(struct buf *bp, int n)
710{
711 return(0);
712}
713
714struct bio_ops hammer_bioops = {
715 .io_start = hammer_io_start,
716 .io_complete = hammer_io_complete,
717 .io_deallocate = hammer_io_deallocate,
718 .io_fsync = hammer_io_fsync,
719 .io_sync = hammer_io_sync,
720 .io_movedeps = hammer_io_movedeps,
721 .io_countdeps = hammer_io_countdeps,
722 .io_checkread = hammer_io_checkread,
723 .io_checkwrite = hammer_io_checkwrite,
724};
725