2 * Copyright (c) 2007 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_io.c,v 1.16 2008/01/11 05:45:19 dillon Exp $
37 * IO Primitives and buffer cache management
39 * All major data-tracking structures in HAMMER contain a struct hammer_io
40 * which is used to manage their backing store. We use filesystem buffers
41 * for backing store and we leave them passively associated with their
44 * If the kernel tries to release a passively associated buf which we cannot
45 * yet let go we set B_LOCKED in the buffer and then actively released it
50 #include <sys/fcntl.h>
51 #include <sys/nlookup.h>
55 static void hammer_io_deallocate(struct buf *bp);
56 static int hammer_io_checkwrite(struct buf *bp);
59 * Initialize an already-zero'd hammer_io structure
62 hammer_io_init(hammer_io_t io, enum hammer_io_type type)
65 TAILQ_INIT(&io->deplist);
69 * Helper routine to disassociate a buffer cache buffer from an I/O
70 * structure. Called with the io structure exclusively locked.
72 * The io may have 0 or 1 references depending on who called us. The
73 * caller is responsible for dealing with the refs.
75 * This call can only be made when no action is required on the buffer.
76 * HAMMER must own the buffer (released == 0) since we mess around with it.
79 hammer_io_disassociate(hammer_io_structure_t iou, int elseit)
81 struct buf *bp = iou->io.bp;
83 KKASSERT(TAILQ_EMPTY(&iou->io.deplist) && iou->io.modified == 0);
86 bp->b_flags &= ~B_LOCKED;
88 KKASSERT(iou->io.released == 0);
92 KKASSERT(iou->io.released);
95 switch(iou->io.type) {
96 case HAMMER_STRUCTURE_VOLUME:
97 iou->volume.ondisk = NULL;
98 iou->volume.alist.meta = NULL;
100 case HAMMER_STRUCTURE_SUPERCL:
101 iou->supercl.ondisk = NULL;
102 iou->supercl.alist.meta = NULL;
104 case HAMMER_STRUCTURE_CLUSTER:
105 iou->cluster.ondisk = NULL;
106 iou->cluster.alist_master.meta = NULL;
107 iou->cluster.alist_btree.meta = NULL;
108 iou->cluster.alist_record.meta = NULL;
109 iou->cluster.alist_mdata.meta = NULL;
111 case HAMMER_STRUCTURE_BUFFER:
112 iou->buffer.ondisk = NULL;
113 iou->buffer.alist.meta = NULL;
119 * Wait for any physical IO to complete
122 hammer_io_wait(hammer_io_t io)
126 tsleep_interlock(io);
129 tsleep(io, 0, "hmrflw", 0);
130 if (io->running == 0)
132 tsleep_interlock(io);
134 if (io->running == 0)
142 hammer_io_waitdep(hammer_io_t io)
144 while (TAILQ_FIRST(&io->deplist)) {
145 kprintf("waitdep %p\n", io);
146 tsleep(io, 0, "hmrdep", hz);
151 * Load bp for a HAMMER structure. The io is exclusively locked by the
155 hammer_io_read(struct vnode *devvp, struct hammer_io *io)
160 if ((bp = io->bp) == NULL) {
161 error = bread(devvp, io->offset, HAMMER_BUFSIZE, &io->bp);
164 bp->b_ops = &hammer_bioops;
165 LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node);
168 io->modified = 0; /* no new modifications yet */
169 io->released = 0; /* we hold an active lock on bp */
179 * Similar to hammer_io_read() but returns a zero'd out buffer instead.
180 * vfs_bio_clrbuf() is kinda nasty, enforce serialization against background
181 * I/O so we can call it.
183 * The caller is responsible for calling hammer_modify_*() on the appropriate
187 hammer_io_new(struct vnode *devvp, struct hammer_io *io)
191 if ((bp = io->bp) == NULL) {
192 io->bp = getblk(devvp, io->offset, HAMMER_BUFSIZE, 0, 0);
194 bp->b_ops = &hammer_bioops;
195 LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node);
213 * This routine is called on the last reference to a hammer structure.
214 * The io is usually locked exclusively (but may not be during unmount).
216 * If flush is 1, or B_LOCKED was set indicating that the kernel
217 * wanted to recycle the buffer, and there are no dependancies, this
218 * function will issue an asynchronous write.
220 * If flush is 2 this function waits until all I/O has completed and
221 * disassociates the bp from the IO before returning, unless there
222 * are still other references.
225 hammer_io_release(struct hammer_io *io, int flush)
229 if ((bp = io->bp) == NULL)
234 * If flush is 2 wait for dependancies
236 while (flush == 2 && TAILQ_FIRST(&io->deplist)) {
237 hammer_io_wait(TAILQ_FIRST(&io->deplist));
242 * Try to flush a dirty IO to disk if asked to by the caller
243 * or if the kernel tried to flush the buffer in the past.
245 * The flush will fail if any dependancies are present.
247 if (io->modified && (flush || bp->b_flags & B_LOCKED))
251 * If flush is 2 we wait for the IO to complete.
253 if (flush == 2 && io->running) {
258 * Actively or passively release the buffer. Modified IOs with
259 * dependancies cannot be released.
261 if (flush && io->modified == 0 && io->running == 0) {
262 KKASSERT(TAILQ_EMPTY(&io->deplist));
267 hammer_io_disassociate((hammer_io_structure_t)io, 1);
268 } else if (io->modified) {
269 if (io->released == 0 && TAILQ_EMPTY(&io->deplist)) {
273 } else if (io->released == 0) {
280 * This routine is called with a locked IO when a flush is desired.
283 hammer_io_flush(struct hammer_io *io)
288 * Can't flush if the IO isn't modified or if it has dependancies.
290 if (io->modified == 0)
292 if (TAILQ_FIRST(&io->deplist))
300 * If we are trying to flush a buffer we have to wait until the
301 * cluster header for the mark-OPEN has completed its I/O.
303 if (io->type == HAMMER_STRUCTURE_BUFFER) {
304 hammer_io_structure_t iou = (void *)io;
305 hammer_cluster_t cluster = iou->buffer.cluster;
307 if (cluster->io.running) {
308 kprintf("WAIT CLUSTER OPEN %d\n", cluster->clu_no);
309 hammer_io_wait(&cluster->io);
310 kprintf("WAIT CLUSTER OPEN OK\n");
313 if (io->type == HAMMER_STRUCTURE_CLUSTER) {
315 * Mark the cluster closed if we can
317 hammer_io_checkwrite(io->bp);
321 /* BUF_KERNPROC(io->bp); */
329 /************************************************************************
331 ************************************************************************
333 * These routines deal with dependancies created when IO buffers get
334 * modified. The caller must call hammer_modify_*() on a referenced
335 * HAMMER structure prior to modifying its on-disk data.
337 * Any intent to modify an IO buffer acquires the related bp and imposes
338 * various write ordering dependancies.
342 * Ensure that the bp is acquired and return non-zero on a 0->1 transition
343 * of the modified bit.
347 hammer_io_modify(hammer_io_t io, struct hammer_io_list *list)
351 KKASSERT(io->lock.refs != 0 && io->bp != NULL);
352 if (io->modified == 0) {
353 hammer_lock_ex(&io->lock);
354 if (io->modified == 0) {
357 BUF_KERNPROC(io->bp);
361 io->entry_list = list;
363 TAILQ_INSERT_TAIL(list, io, entry);
366 hammer_unlock(&io->lock);
367 } else if (io->released) {
369 * Make sure no IO is occuring while we modify the contents
370 * of the buffer. XXX should be able to avoid doing this.
372 hammer_lock_ex(&io->lock);
375 BUF_KERNPROC(io->bp);
378 hammer_unlock(&io->lock);
384 hammer_modify_volume(hammer_volume_t volume)
386 hammer_io_modify(&volume->io, NULL);
390 hammer_modify_supercl(hammer_supercl_t supercl)
392 hammer_io_modify(&supercl->io, &supercl->volume->io.deplist);
396 * Caller intends to modify a cluster's ondisk structure.
399 hammer_modify_cluster(hammer_cluster_t cluster)
401 hammer_io_modify(&cluster->io, &cluster->volume->io.deplist);
405 * Caller intends to modify a buffer's ondisk structure. The related
406 * cluster must be marked open prior to being able to flush the modified
407 * buffer so get that I/O going now.
410 hammer_modify_buffer(hammer_buffer_t buffer)
412 hammer_cluster_t cluster = buffer->cluster;
414 if (hammer_io_modify(&buffer->io, &cluster->io.deplist)) {
415 hammer_modify_cluster(cluster);
416 if ((cluster->ondisk->clu_flags & HAMMER_CLUF_OPEN) == 0) {
417 hammer_lock_ex(&cluster->io.lock);
418 if ((cluster->ondisk->clu_flags & HAMMER_CLUF_OPEN) == 0) {
419 KKASSERT(cluster->io.released == 0);
420 cluster->ondisk->clu_flags |= HAMMER_CLUF_OPEN;
421 cluster->io.released = 1;
422 cluster->io.running = 1;
423 bawrite(cluster->io.bp);
424 kprintf("OPEN CLUSTER %d:%d\n",
425 cluster->volume->vol_no,
428 hammer_unlock(&cluster->io.lock);
434 * Mark an entity as not being dirty any more -- this usually occurs when
435 * the governing a-list has freed the entire entity.
440 hammer_io_clear_modify(struct hammer_io *io)
446 if ((bp = io->bp) != NULL) {
449 /* BUF_KERNPROC(io->bp); */
453 if (io->modified == 0) {
454 kprintf("hammer_io_clear_modify: cleared %p\n", io);
464 /************************************************************************
466 ************************************************************************
471 * Pre-IO initiation kernel callback - cluster build only
474 hammer_io_start(struct buf *bp)
479 * Post-IO completion kernel callback
482 hammer_io_complete(struct buf *bp)
484 union hammer_io_structure *iou = (void *)LIST_FIRST(&bp->b_dep);
486 KKASSERT(iou->io.released == 1);
488 if (iou->io.modified == 0)
492 * If we were writing the cluster header out and CLUF_OPEN is set,
493 * do NOT clear the modify bit. Just clear the IO running bit
496 if (iou->io.type == HAMMER_STRUCTURE_CLUSTER) {
497 if (iou->cluster.ondisk->clu_flags & HAMMER_CLUF_OPEN) {
499 if (iou->io.waiting) {
509 * If this was a write then clear the modified status and remove us
510 * from the dependancy list.
512 * If no lock references remain and we can acquire the IO lock and
513 * someone at some point wanted us to flush (B_LOCKED test), then
514 * try to dispose of the IO.
516 iou->io.modified = 0;
517 if (iou->io.entry_list) {
518 TAILQ_REMOVE(iou->io.entry_list, &iou->io, entry);
519 iou->io.entry_list = NULL;
522 if (iou->io.waiting) {
528 * Someone wanted us to flush, try to clean out the buffer.
530 if ((bp->b_flags & B_LOCKED) && iou->io.lock.refs == 0) {
531 hammer_io_deallocate(bp);
532 /* structure may be dead now */
537 * Callback from kernel when it wishes to deallocate a passively
538 * associated structure. This case can only occur with read-only
541 * If we cannot disassociate we set B_LOCKED to prevent the buffer
542 * from getting reused.
545 hammer_io_deallocate(struct buf *bp)
547 hammer_io_structure_t iou = (void *)LIST_FIRST(&bp->b_dep);
549 KKASSERT((bp->b_flags & B_LOCKED) == 0 && iou->io.running == 0);
550 if (iou->io.modified) {
551 bp->b_flags |= B_LOCKED;
554 hammer_ref(&iou->io.lock);
555 if (iou->io.lock.refs > 1 || iou->io.modified) {
556 hammer_unref(&iou->io.lock);
557 bp->b_flags |= B_LOCKED;
559 hammer_io_disassociate(iou, 0);
561 switch(iou->io.type) {
562 case HAMMER_STRUCTURE_VOLUME:
563 hammer_rel_volume(&iou->volume, 1);
565 case HAMMER_STRUCTURE_SUPERCL:
566 hammer_rel_supercl(&iou->supercl, 1);
568 case HAMMER_STRUCTURE_CLUSTER:
569 hammer_rel_cluster(&iou->cluster, 1);
571 case HAMMER_STRUCTURE_BUFFER:
572 hammer_rel_buffer(&iou->buffer, 1);
579 hammer_io_fsync(struct vnode *vp)
585 * NOTE: will not be called unless we tell the kernel about the
586 * bioops. Unused... we use the mount's VFS_SYNC instead.
589 hammer_io_sync(struct mount *mp)
595 hammer_io_movedeps(struct buf *bp1, struct buf *bp2)
600 * I/O pre-check for reading and writing. HAMMER only uses this for
601 * B_CACHE buffers so checkread just shouldn't happen, but if it does
604 * Writing is a different case. We don't want the kernel to try to write
605 * out a buffer that HAMMER may be modifying passively or which has a
608 * This code enforces the following write ordering: buffers, then cluster
609 * headers, then volume headers.
612 hammer_io_checkread(struct buf *bp)
618 hammer_io_checkwrite(struct buf *bp)
620 union hammer_io_structure *iou = (void *)LIST_FIRST(&bp->b_dep);
623 * A modified cluster with no dependancies can be closed.
625 if (iou->io.type == HAMMER_STRUCTURE_CLUSTER && iou->io.modified) {
626 hammer_cluster_t cluster = &iou->cluster;
628 if (TAILQ_EMPTY(&cluster->io.deplist)) {
629 cluster->ondisk->clu_flags &= ~HAMMER_CLUF_OPEN;
630 kprintf("CLOSE CLUSTER %d:%d\n",
631 cluster->volume->vol_no,
639 * Return non-zero if the caller should flush the structure associated
640 * with this io sub-structure.
643 hammer_io_checkflush(struct hammer_io *io)
645 if (io->bp == NULL || (io->bp->b_flags & B_LOCKED)) {
652 * Return non-zero if we wish to delay the kernel's attempt to flush
653 * this buffer to disk.
656 hammer_io_countdeps(struct buf *bp, int n)
661 struct bio_ops hammer_bioops = {
662 .io_start = hammer_io_start,
663 .io_complete = hammer_io_complete,
664 .io_deallocate = hammer_io_deallocate,
665 .io_fsync = hammer_io_fsync,
666 .io_sync = hammer_io_sync,
667 .io_movedeps = hammer_io_movedeps,
668 .io_countdeps = hammer_io_countdeps,
669 .io_checkread = hammer_io_checkread,
670 .io_checkwrite = hammer_io_checkwrite,