2 * Copyright (c) 2007 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_io.c,v 1.15 2008/01/11 01:41:33 dillon Exp $
37 * IO Primitives and buffer cache management
39 * All major data-tracking structures in HAMMER contain a struct hammer_io
40 * which is used to manage their backing store. We use filesystem buffers
41 * for backing store and we leave them passively associated with their
44 * If the kernel tries to release a passively associated buf which we cannot
45 * yet let go we set B_LOCKED in the buffer and then actively released it
50 #include <sys/fcntl.h>
51 #include <sys/nlookup.h>
55 static void hammer_io_deallocate(struct buf *bp);
56 static int hammer_io_checkwrite(struct buf *bp);
59 * Initialize an already-zero'd hammer_io structure
62 hammer_io_init(hammer_io_t io, enum hammer_io_type type)
65 TAILQ_INIT(&io->deplist);
69 * Helper routine to disassociate a buffer cache buffer from an I/O
70 * structure. Called with the io structure exclusively locked.
72 * The io may have 0 or 1 references depending on who called us. The
73 * caller is responsible for dealing with the refs.
75 * This call can only be made when no action is required on the buffer.
76 * HAMMER must own the buffer (released == 0) since mess around with it.
79 hammer_io_disassociate(hammer_io_structure_t iou, int elseit)
81 struct buf *bp = iou->io.bp;
83 KKASSERT(TAILQ_EMPTY(&iou->io.deplist) && iou->io.modified == 0);
87 KKASSERT(iou->io.released == 0);
91 KKASSERT(iou->io.released);
94 switch(iou->io.type) {
95 case HAMMER_STRUCTURE_VOLUME:
96 iou->volume.ondisk = NULL;
97 iou->volume.alist.meta = NULL;
99 case HAMMER_STRUCTURE_SUPERCL:
100 iou->supercl.ondisk = NULL;
101 iou->supercl.alist.meta = NULL;
103 case HAMMER_STRUCTURE_CLUSTER:
104 iou->cluster.ondisk = NULL;
105 iou->cluster.alist_master.meta = NULL;
106 iou->cluster.alist_btree.meta = NULL;
107 iou->cluster.alist_record.meta = NULL;
108 iou->cluster.alist_mdata.meta = NULL;
110 case HAMMER_STRUCTURE_BUFFER:
111 iou->buffer.ondisk = NULL;
112 iou->buffer.alist.meta = NULL;
118 * Wait for any physical IO to complete
121 hammer_io_wait(hammer_io_t io)
125 tsleep_interlock(io);
128 tsleep(io, 0, "hmrflw", 0);
129 if (io->running == 0)
131 tsleep_interlock(io);
133 if (io->running == 0)
141 hammer_io_waitdep(hammer_io_t io)
143 while (TAILQ_FIRST(&io->deplist)) {
144 kprintf("waitdep %p\n", io);
145 tsleep(io, 0, "hmrdep", hz);
150 * Load bp for a HAMMER structure. The io is exclusively locked by the
154 hammer_io_read(struct vnode *devvp, struct hammer_io *io)
159 if ((bp = io->bp) == NULL) {
160 error = bread(devvp, io->offset, HAMMER_BUFSIZE, &io->bp);
163 bp->b_ops = &hammer_bioops;
164 LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node);
167 io->modified = 0; /* no new modifications yet */
168 io->released = 0; /* we hold an active lock on bp */
178 * Similar to hammer_io_read() but returns a zero'd out buffer instead.
179 * vfs_bio_clrbuf() is kinda nasty, enforce serialization against background
180 * I/O so we can call it.
182 * The caller is responsible for calling hammer_modify_*() on the appropriate
186 hammer_io_new(struct vnode *devvp, struct hammer_io *io)
190 if ((bp = io->bp) == NULL) {
191 io->bp = getblk(devvp, io->offset, HAMMER_BUFSIZE, 0, 0);
193 bp->b_ops = &hammer_bioops;
194 LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node);
212 * This routine is called on the last reference to a hammer structure.
213 * The io is usually locked exclusively (but may not be during unmount).
215 * If flush is 1, or B_LOCKED was set indicating that the kernel
216 * wanted to recycle the buffer, and there are no dependancies, this
217 * function will issue an asynchronous write.
219 * If flush is 2 this function waits until all I/O has completed and
220 * disassociates the bp from the IO before returning, unless there
221 * are still other references.
224 hammer_io_release(struct hammer_io *io, int flush)
228 if ((bp = io->bp) == NULL)
233 * If flush is 2 wait for dependancies
235 while (flush == 2 && TAILQ_FIRST(&io->deplist)) {
236 hammer_io_wait(TAILQ_FIRST(&io->deplist));
241 * Try to flush a dirty IO to disk if asked to by the caller
242 * or if the kernel tried to flush the buffer in the past.
244 * The flush will fail if any dependancies are present.
246 if (io->modified && (flush || bp->b_flags & B_LOCKED))
250 * If flush is 2 we wait for the IO to complete.
252 if (flush == 2 && io->running) {
257 * Actively or passively release the buffer. Modified IOs with
258 * dependancies cannot be released.
260 if (flush && io->modified == 0 && io->running == 0) {
261 KKASSERT(TAILQ_EMPTY(&io->deplist));
266 hammer_io_disassociate((hammer_io_structure_t)io, 1);
267 } else if (io->modified) {
268 if (io->released == 0 && TAILQ_EMPTY(&io->deplist)) {
272 } else if (io->released == 0) {
279 * This routine is called with a locked IO when a flush is desired.
282 hammer_io_flush(struct hammer_io *io)
287 * Can't flush if the IO isn't modified or if it has dependancies.
289 if (io->modified == 0)
291 if (TAILQ_FIRST(&io->deplist))
299 * If we are trying to flush a buffer we have to wait until the
300 * cluster header for the mark-OPEN has completed its I/O.
302 if (io->type == HAMMER_STRUCTURE_BUFFER) {
303 hammer_io_structure_t iou = (void *)io;
304 hammer_cluster_t cluster = iou->buffer.cluster;
306 if (cluster->io.running) {
307 kprintf("WAIT CLUSTER OPEN %d\n", cluster->clu_no);
308 hammer_io_wait(&cluster->io);
309 kprintf("WAIT CLUSTER OPEN OK\n");
312 if (io->type == HAMMER_STRUCTURE_CLUSTER) {
314 * Mark the cluster closed if we can
316 hammer_io_checkwrite(io->bp);
320 /* BUF_KERNPROC(io->bp); */
328 /************************************************************************
330 ************************************************************************
332 * These routines deal with dependancies created when IO buffers get
333 * modified. The caller must call hammer_modify_*() on a referenced
334 * HAMMER structure prior to modifying its on-disk data.
336 * Any intent to modify an IO buffer acquires the related bp and imposes
337 * various write ordering dependancies.
341 * Ensure that the bp is acquired and return non-zero on a 0->1 transition
342 * of the modified bit.
346 hammer_io_modify(hammer_io_t io, struct hammer_io_list *list)
350 KKASSERT(io->lock.refs != 0 && io->bp != NULL);
351 if (io->modified == 0) {
352 hammer_lock_ex(&io->lock);
353 if (io->modified == 0) {
356 BUF_KERNPROC(io->bp);
360 io->entry_list = list;
362 TAILQ_INSERT_TAIL(list, io, entry);
365 hammer_unlock(&io->lock);
366 } else if (io->released) {
368 * Make sure no IO is occuring while we modify the contents
369 * of the buffer. XXX should be able to avoid doing this.
371 hammer_lock_ex(&io->lock);
374 BUF_KERNPROC(io->bp);
377 hammer_unlock(&io->lock);
383 hammer_modify_volume(hammer_volume_t volume)
385 hammer_io_modify(&volume->io, NULL);
389 hammer_modify_supercl(hammer_supercl_t supercl)
391 hammer_io_modify(&supercl->io, &supercl->volume->io.deplist);
395 * Caller intends to modify a cluster's ondisk structure.
398 hammer_modify_cluster(hammer_cluster_t cluster)
400 hammer_io_modify(&cluster->io, &cluster->volume->io.deplist);
404 * Caller intends to modify a buffer's ondisk structure. The related
405 * cluster must be marked open prior to being able to flush the modified
406 * buffer so get that I/O going now.
409 hammer_modify_buffer(hammer_buffer_t buffer)
411 hammer_cluster_t cluster = buffer->cluster;
413 if (hammer_io_modify(&buffer->io, &cluster->io.deplist)) {
414 hammer_modify_cluster(cluster);
415 if ((cluster->ondisk->clu_flags & HAMMER_CLUF_OPEN) == 0) {
416 hammer_lock_ex(&cluster->io.lock);
417 if ((cluster->ondisk->clu_flags & HAMMER_CLUF_OPEN) == 0) {
418 KKASSERT(cluster->io.released == 0);
419 cluster->ondisk->clu_flags |= HAMMER_CLUF_OPEN;
420 cluster->io.released = 1;
421 cluster->io.running = 1;
422 bawrite(cluster->io.bp);
423 kprintf("OPEN CLUSTER %d:%d\n",
424 cluster->volume->vol_no,
427 hammer_unlock(&cluster->io.lock);
433 * Mark an entity as not being dirty any more -- this usually occurs when
434 * the governing a-list has freed the entire entity.
439 hammer_io_clear_modify(struct hammer_io *io)
445 if ((bp = io->bp) != NULL) {
448 /* BUF_KERNPROC(io->bp); */
452 if (io->modified == 0) {
453 kprintf("hammer_io_clear_modify: cleared %p\n", io);
463 /************************************************************************
465 ************************************************************************
470 * Pre-IO initiation kernel callback - cluster build only
473 hammer_io_start(struct buf *bp)
478 * Post-IO completion kernel callback
481 hammer_io_complete(struct buf *bp)
483 union hammer_io_structure *iou = (void *)LIST_FIRST(&bp->b_dep);
485 KKASSERT(iou->io.released == 1);
487 if (iou->io.modified == 0)
491 * If we were writing the cluster header out and CLUF_OPEN is set,
492 * do NOT clear the modify bit. Just clear the IO running bit
495 if (iou->io.type == HAMMER_STRUCTURE_CLUSTER) {
496 if (iou->cluster.ondisk->clu_flags & HAMMER_CLUF_OPEN) {
498 if (iou->io.waiting) {
508 * If this was a write then clear the modified status and remove us
509 * from the dependancy list.
511 * If no lock references remain and we can acquire the IO lock and
512 * someone at some point wanted us to flush (B_LOCKED test), then
513 * try to dispose of the IO.
515 iou->io.modified = 0;
516 if (iou->io.entry_list) {
517 TAILQ_REMOVE(iou->io.entry_list, &iou->io, entry);
518 iou->io.entry_list = NULL;
521 if (iou->io.waiting) {
527 * Someone wanted us to flush, try to clean out the buffer.
529 if ((bp->b_flags & B_LOCKED) && iou->io.lock.refs == 0) {
530 hammer_io_deallocate(bp);
531 /* structure may be dead now */
536 * Callback from kernel when it wishes to deallocate a passively
537 * associated structure. This case can only occur with read-only
540 * If we cannot disassociate we set B_LOCKED to prevent the buffer
541 * from getting reused.
544 hammer_io_deallocate(struct buf *bp)
546 hammer_io_structure_t iou = (void *)LIST_FIRST(&bp->b_dep);
548 KKASSERT((bp->b_flags & B_LOCKED) == 0 && iou->io.running == 0);
549 if (iou->io.modified) {
550 bp->b_flags |= B_LOCKED;
553 hammer_ref(&iou->io.lock);
554 if (iou->io.lock.refs > 1 || iou->io.modified) {
555 hammer_unref(&iou->io.lock);
556 bp->b_flags |= B_LOCKED;
558 hammer_io_disassociate(iou, 0);
560 switch(iou->io.type) {
561 case HAMMER_STRUCTURE_VOLUME:
562 hammer_rel_volume(&iou->volume, 1);
564 case HAMMER_STRUCTURE_SUPERCL:
565 hammer_rel_supercl(&iou->supercl, 1);
567 case HAMMER_STRUCTURE_CLUSTER:
568 hammer_rel_cluster(&iou->cluster, 1);
570 case HAMMER_STRUCTURE_BUFFER:
571 hammer_rel_buffer(&iou->buffer, 1);
578 hammer_io_fsync(struct vnode *vp)
584 * NOTE: will not be called unless we tell the kernel about the
585 * bioops. Unused... we use the mount's VFS_SYNC instead.
588 hammer_io_sync(struct mount *mp)
594 hammer_io_movedeps(struct buf *bp1, struct buf *bp2)
599 * I/O pre-check for reading and writing. HAMMER only uses this for
600 * B_CACHE buffers so checkread just shouldn't happen, but if it does
603 * Writing is a different case. We don't want the kernel to try to write
604 * out a buffer that HAMMER may be modifying passively or which has a
607 * This code enforces the following write ordering: buffers, then cluster
608 * headers, then volume headers.
611 hammer_io_checkread(struct buf *bp)
617 hammer_io_checkwrite(struct buf *bp)
619 union hammer_io_structure *iou = (void *)LIST_FIRST(&bp->b_dep);
622 * A modified cluster with no dependancies can be closed.
624 if (iou->io.type == HAMMER_STRUCTURE_CLUSTER && iou->io.modified) {
625 hammer_cluster_t cluster = &iou->cluster;
627 if (TAILQ_EMPTY(&cluster->io.deplist)) {
628 cluster->ondisk->clu_flags &= ~HAMMER_CLUF_OPEN;
629 kprintf("CLOSE CLUSTER %d:%d\n",
630 cluster->volume->vol_no,
638 * Return non-zero if the caller should flush the structure associated
639 * with this io sub-structure.
642 hammer_io_checkflush(struct hammer_io *io)
644 if (io->bp == NULL || (io->bp->b_flags & B_LOCKED)) {
651 * Return non-zero if we wish to delay the kernel's attempt to flush
652 * this buffer to disk.
655 hammer_io_countdeps(struct buf *bp, int n)
660 struct bio_ops hammer_bioops = {
661 .io_start = hammer_io_start,
662 .io_complete = hammer_io_complete,
663 .io_deallocate = hammer_io_deallocate,
664 .io_fsync = hammer_io_fsync,
665 .io_sync = hammer_io_sync,
666 .io_movedeps = hammer_io_movedeps,
667 .io_countdeps = hammer_io_countdeps,
668 .io_checkread = hammer_io_checkread,
669 .io_checkwrite = hammer_io_checkwrite,