2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_io.c,v 1.55 2008/09/15 17:02:49 dillon Exp $
37 * IO Primitives and buffer cache management
39 * All major data-tracking structures in HAMMER contain a struct hammer_io
40 * which is used to manage their backing store. We use filesystem buffers
41 * for backing store and we leave them passively associated with their
44 * If the kernel tries to destroy a passively associated buf which we cannot
45 * yet let go we set B_LOCKED in the buffer and then actively released it
50 #include <sys/fcntl.h>
51 #include <sys/nlookup.h>
55 static void hammer_io_modify(hammer_io_t io, int count);
56 static void hammer_io_deallocate(struct buf *bp);
58 static void hammer_io_direct_read_complete(struct bio *nbio);
60 static void hammer_io_direct_write_complete(struct bio *nbio);
61 static int hammer_io_direct_uncache_callback(hammer_inode_t ip, void *data);
62 static void hammer_io_set_modlist(struct hammer_io *io);
63 static void hammer_io_flush_mark(hammer_volume_t volume);
67 * Initialize a new, already-zero'd hammer_io structure, or reinitialize
68 * an existing hammer_io structure which may have switched to another type.
71 hammer_io_init(hammer_io_t io, hammer_volume_t volume, enum hammer_io_type type)
74 io->hmp = volume->io.hmp;
79 * Determine if an io can be clustered for the storage cdev. We have to
80 * be careful to avoid creating overlapping buffers.
82 * (1) Any clustering is limited to within a largeblock, since going into
83 * an adjacent largeblock will change the zone.
85 * (2) The large-data zone can contain mixed buffer sizes. Other zones
86 * contain only HAMMER_BUFSIZE sized buffer sizes (16K).
89 hammer_io_clusterable(hammer_io_t io, hammer_off_t *limitp)
91 hammer_buffer_t buffer;
95 * Can't cluster non hammer_buffer_t's
97 if (io->type != HAMMER_STRUCTURE_DATA_BUFFER &&
98 io->type != HAMMER_STRUCTURE_META_BUFFER &&
99 io->type != HAMMER_STRUCTURE_UNDO_BUFFER) {
104 * We cannot cluster the large-data zone. This primarily targets
105 * the reblocker. The normal file handling code will still cluster
106 * file reads via file vnodes.
109 if ((buffer->zoneX_offset & HAMMER_OFF_ZONE_MASK) ==
110 HAMMER_ZONE_LARGE_DATA) {
115 * Do not allow the cluster operation to cross a largeblock
118 eoz = (io->offset + HAMMER_LARGEBLOCK_SIZE64 - 1) &
119 ~HAMMER_LARGEBLOCK_MASK64;
126 * Helper routine to disassociate a buffer cache buffer from an I/O
127 * structure. The buffer is unlocked and marked appropriate for reclamation.
129 * The io may have 0 or 1 references depending on who called us. The
130 * caller is responsible for dealing with the refs.
132 * This call can only be made when no action is required on the buffer.
134 * The caller must own the buffer and the IO must indicate that the
135 * structure no longer owns it (io.released != 0).
138 hammer_io_disassociate(hammer_io_structure_t iou)
140 struct buf *bp = iou->io.bp;
142 KKASSERT(iou->io.released);
143 KKASSERT(iou->io.modified == 0);
144 KKASSERT(LIST_FIRST(&bp->b_dep) == (void *)iou);
149 * If the buffer was locked someone wanted to get rid of it.
151 if (bp->b_flags & B_LOCKED) {
152 --hammer_count_io_locked;
153 bp->b_flags &= ~B_LOCKED;
155 if (iou->io.reclaim) {
156 bp->b_flags |= B_NOCACHE|B_RELBUF;
160 switch(iou->io.type) {
161 case HAMMER_STRUCTURE_VOLUME:
162 iou->volume.ondisk = NULL;
164 case HAMMER_STRUCTURE_DATA_BUFFER:
165 case HAMMER_STRUCTURE_META_BUFFER:
166 case HAMMER_STRUCTURE_UNDO_BUFFER:
167 iou->buffer.ondisk = NULL;
169 case HAMMER_STRUCTURE_DUMMY:
170 panic("hammer_io_disassociate: bad io type");
176 * Wait for any physical IO to complete
178 * XXX we aren't interlocked against a spinlock or anything so there
179 * is a small window in the interlock / io->running == 0 test.
182 hammer_io_wait(hammer_io_t io)
187 tsleep_interlock(io, 0);
188 if (io->running == 0)
190 tsleep(io, PINTERLOCKED, "hmrflw", hz);
191 if (io->running == 0)
198 * Wait for all currently queued HAMMER-initiated I/Os to complete.
200 * This is not supposed to count direct I/O's but some can leak
201 * through (for non-full-sized direct I/Os).
204 hammer_io_wait_all(hammer_mount_t hmp, const char *ident, int doflush)
206 struct hammer_io iodummy;
210 * Degenerate case, no I/O is running
213 if (TAILQ_EMPTY(&hmp->iorun_list)) {
216 hammer_io_flush_sync(hmp);
219 bzero(&iodummy, sizeof(iodummy));
220 iodummy.type = HAMMER_STRUCTURE_DUMMY;
223 * Add placemarker and then wait until it becomes the head of
226 TAILQ_INSERT_TAIL(&hmp->iorun_list, &iodummy, iorun_entry);
227 while (TAILQ_FIRST(&hmp->iorun_list) != &iodummy) {
228 tsleep(&iodummy, 0, ident, 0);
232 * Chain in case several placemarkers are present.
234 TAILQ_REMOVE(&hmp->iorun_list, &iodummy, iorun_entry);
235 io = TAILQ_FIRST(&hmp->iorun_list);
236 if (io && io->type == HAMMER_STRUCTURE_DUMMY)
241 hammer_io_flush_sync(hmp);
245 * Clear a flagged error condition on a I/O buffer. The caller must hold
246 * its own ref on the buffer.
249 hammer_io_clear_error(struct hammer_io *io)
253 hammer_rel(&io->lock);
254 KKASSERT(hammer_isactive(&io->lock));
259 * This is an advisory function only which tells the buffer cache
260 * the bp is not a meta-data buffer, even though it is backed by
263 * This is used by HAMMER's reblocking code to avoid trying to
264 * swapcache the filesystem's data when it is read or written
265 * by the reblocking code.
268 hammer_io_notmeta(hammer_buffer_t buffer)
270 buffer->io.bp->b_flags |= B_NOTMETA;
274 #define HAMMER_MAXRA 4
277 * Load bp for a HAMMER structure. The io must be exclusively locked by
280 * This routine is mostly used on meta-data and small-data blocks. Generally
281 * speaking HAMMER assumes some locality of reference and will cluster
284 * Note that the clustering which occurs here is clustering within the
285 * block device... typically meta-data and small-file data. Regular
286 * file clustering is different and handled in hammer_vnops.c
289 hammer_io_read(struct vnode *devvp, struct hammer_io *io, hammer_off_t limit)
294 if ((bp = io->bp) == NULL) {
295 hammer_count_io_running_read += io->bytes;
296 if (hammer_cluster_enable &&
297 hammer_io_clusterable(io, &limit)) {
298 error = cluster_read(devvp, limit,
299 io->offset, io->bytes,
304 error = bread(devvp, io->offset, io->bytes, &io->bp);
306 hammer_stats_disk_read += io->bytes;
307 hammer_count_io_running_read -= io->bytes;
310 * The code generally assumes b_ops/b_dep has been set-up,
311 * even if we error out here.
314 bp->b_ops = &hammer_bioops;
315 KKASSERT(LIST_FIRST(&bp->b_dep) == NULL);
316 LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node);
318 KKASSERT(io->modified == 0);
319 KKASSERT(io->running == 0);
320 KKASSERT(io->waiting == 0);
321 io->released = 0; /* we hold an active lock on bp */
329 * Similar to hammer_io_read() but returns a zero'd out buffer instead.
330 * Must be called with the IO exclusively locked.
332 * vfs_bio_clrbuf() is kinda nasty, enforce serialization against background
333 * I/O by forcing the buffer to not be in a released state before calling
336 * This function will also mark the IO as modified but it will not
337 * increment the modify_refs count.
340 hammer_io_new(struct vnode *devvp, struct hammer_io *io)
344 if ((bp = io->bp) == NULL) {
345 io->bp = getblk(devvp, io->offset, io->bytes, 0, 0);
347 bp->b_ops = &hammer_bioops;
348 KKASSERT(LIST_FIRST(&bp->b_dep) == NULL);
349 LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node);
351 KKASSERT(io->running == 0);
361 hammer_io_modify(io, 0);
367 * Advance the activity count on the underlying buffer because
368 * HAMMER does not getblk/brelse on every access.
371 hammer_io_advance(struct hammer_io *io)
374 buf_act_advance(io->bp);
378 * Remove potential device level aliases against buffers managed by high level
379 * vnodes. Aliases can also be created due to mixed buffer sizes or via
380 * direct access to the backing store device.
382 * This is nasty because the buffers are also VMIO-backed. Even if a buffer
383 * does not exist its backing VM pages might, and we have to invalidate
384 * those as well or a getblk() will reinstate them.
386 * Buffer cache buffers associated with hammer_buffers cannot be
390 hammer_io_inval(hammer_volume_t volume, hammer_off_t zone2_offset)
392 hammer_io_structure_t iou;
393 hammer_off_t phys_offset;
397 phys_offset = volume->ondisk->vol_buf_beg +
398 (zone2_offset & HAMMER_OFF_SHORT_MASK);
400 if ((bp = findblk(volume->devvp, phys_offset, FINDBLK_TEST)) != NULL)
401 bp = getblk(volume->devvp, phys_offset, bp->b_bufsize, 0, 0);
403 bp = getblk(volume->devvp, phys_offset, HAMMER_BUFSIZE, 0, 0);
404 if ((iou = (void *)LIST_FIRST(&bp->b_dep)) != NULL) {
406 hammer_ref(&iou->io.lock);
407 hammer_io_clear_modify(&iou->io, 1);
409 iou->io.released = 0;
413 KKASSERT(hammer_isactive(&iou->io.lock) == 1);
414 hammer_rel_buffer(&iou->buffer, 0);
415 /*hammer_io_deallocate(bp);*/
420 KKASSERT((bp->b_flags & B_LOCKED) == 0);
422 bp->b_flags |= B_NOCACHE|B_RELBUF;
431 * This routine is called on the last reference to a hammer structure.
432 * The io must be interlocked with a refcount of zero. The hammer structure
433 * will remain interlocked on return.
435 * This routine may return a non-NULL bp to the caller for dispoal.
436 * The caller typically brelse()'s the bp.
438 * The bp may or may not still be passively associated with the IO. It
439 * will remain passively associated if it is unreleasable (e.g. a modified
442 * The only requirement here is that modified meta-data and volume-header
443 * buffer may NOT be disassociated from the IO structure, and consequently
444 * we also leave such buffers actively associated with the IO if they already
445 * are (since the kernel can't do anything with them anyway). Only the
446 * flusher is allowed to write such buffers out. Modified pure-data and
447 * undo buffers are returned to the kernel but left passively associated
448 * so we can track when the kernel writes the bp out.
451 hammer_io_release(struct hammer_io *io, int flush)
453 union hammer_io_structure *iou = (void *)io;
456 if ((bp = io->bp) == NULL)
460 * Try to flush a dirty IO to disk if asked to by the
461 * caller or if the kernel tried to flush the buffer in the past.
463 * Kernel-initiated flushes are only allowed for pure-data buffers.
464 * meta-data and volume buffers can only be flushed explicitly
469 hammer_io_flush(io, 0);
470 } else if (bp->b_flags & B_LOCKED) {
472 case HAMMER_STRUCTURE_DATA_BUFFER:
473 hammer_io_flush(io, 0);
475 case HAMMER_STRUCTURE_UNDO_BUFFER:
476 hammer_io_flush(io, hammer_undo_reclaim(io));
481 } /* else no explicit request to flush the buffer */
485 * Wait for the IO to complete if asked to. This occurs when
486 * the buffer must be disposed of definitively during an umount
487 * or buffer invalidation.
489 if (io->waitdep && io->running) {
494 * Return control of the buffer to the kernel (with the provisio
495 * that our bioops can override kernel decisions with regards to
498 if ((flush || io->reclaim) && io->modified == 0 && io->running == 0) {
500 * Always disassociate the bp if an explicit flush
501 * was requested and the IO completed with no error
502 * (so unmount can really clean up the structure).
510 hammer_io_disassociate((hammer_io_structure_t)io);
512 } else if (io->modified) {
514 * Only certain IO types can be released to the kernel if
515 * the buffer has been modified.
517 * volume and meta-data IO types may only be explicitly
521 case HAMMER_STRUCTURE_DATA_BUFFER:
522 case HAMMER_STRUCTURE_UNDO_BUFFER:
523 if (io->released == 0) {
531 bp = NULL; /* bp left associated */
532 } else if (io->released == 0) {
534 * Clean buffers can be generally released to the kernel.
535 * We leave the bp passively associated with the HAMMER
536 * structure and use bioops to disconnect it later on
537 * if the kernel wants to discard the buffer.
539 * We can steal the structure's ownership of the bp.
542 if (bp->b_flags & B_LOCKED) {
543 hammer_io_disassociate(iou);
547 hammer_io_disassociate(iou);
550 /* return the bp (bp passively associated) */
555 * A released buffer is passively associate with our
556 * hammer_io structure. The kernel cannot destroy it
557 * without making a bioops call. If the kernel (B_LOCKED)
558 * or we (reclaim) requested that the buffer be destroyed
559 * we destroy it, otherwise we do a quick get/release to
560 * reset its position in the kernel's LRU list.
562 * Leaving the buffer passively associated allows us to
563 * use the kernel's LRU buffer flushing mechanisms rather
564 * then rolling our own.
566 * XXX there are two ways of doing this. We can re-acquire
567 * and passively release to reset the LRU, or not.
569 if (io->running == 0) {
571 if ((bp->b_flags & B_LOCKED) || io->reclaim) {
572 hammer_io_disassociate(iou);
575 /* return the bp (bp passively associated) */
579 * bp is left passively associated but we do not
580 * try to reacquire it. Interactions with the io
581 * structure will occur on completion of the bp's
591 * This routine is called with a locked IO when a flush is desired and
592 * no other references to the structure exists other then ours. This
593 * routine is ONLY called when HAMMER believes it is safe to flush a
594 * potentially modified buffer out.
597 hammer_io_flush(struct hammer_io *io, int reclaim)
602 * Degenerate case - nothing to flush if nothing is dirty.
604 if (io->modified == 0) {
609 KKASSERT(io->modify_refs <= 0);
612 * Acquire ownership of the bp, particularly before we clear our
615 * We are going to bawrite() this bp. Don't leave a window where
616 * io->released is set, we actually own the bp rather then our
622 /* BUF_KERNPROC(io->bp); */
623 /* io->released = 0; */
624 KKASSERT(io->released);
625 KKASSERT(io->bp == bp);
631 if ((bp->b_flags & B_LOCKED) == 0) {
632 bp->b_flags |= B_LOCKED;
633 ++hammer_count_io_locked;
638 * Acquire exclusive access to the bp and then clear the modified
639 * state of the buffer prior to issuing I/O to interlock any
640 * modifications made while the I/O is in progress. This shouldn't
641 * happen anyway but losing data would be worse. The modified bit
642 * will be rechecked after the IO completes.
644 * NOTE: This call also finalizes the buffer's content (inval == 0).
646 * This is only legal when lock.refs == 1 (otherwise we might clear
647 * the modified bit while there are still users of the cluster
648 * modifying the data).
650 * Do this before potentially blocking so any attempt to modify the
651 * ondisk while we are blocked blocks waiting for us.
653 hammer_ref(&io->lock);
654 hammer_io_clear_modify(io, 0);
655 hammer_rel(&io->lock);
657 if (hammer_debug_io & 0x0002)
658 kprintf("hammer io_write %016jx\n", bp->b_bio1.bio_offset);
661 * Transfer ownership to the kernel and initiate I/O.
664 io->hmp->io_running_space += io->bytes;
665 TAILQ_INSERT_TAIL(&io->hmp->iorun_list, io, iorun_entry);
666 hammer_count_io_running_write += io->bytes;
668 hammer_io_flush_mark(io->volume);
671 /************************************************************************
673 ************************************************************************
675 * These routines deal with dependancies created when IO buffers get
676 * modified. The caller must call hammer_modify_*() on a referenced
677 * HAMMER structure prior to modifying its on-disk data.
679 * Any intent to modify an IO buffer acquires the related bp and imposes
680 * various write ordering dependancies.
684 * Mark a HAMMER structure as undergoing modification. Meta-data buffers
685 * are locked until the flusher can deal with them, pure data buffers
686 * can be written out.
690 hammer_io_modify(hammer_io_t io, int count)
693 * io->modify_refs must be >= 0
695 while (io->modify_refs < 0) {
697 tsleep(io, 0, "hmrmod", 0);
701 * Shortcut if nothing to do.
703 KKASSERT(hammer_isactive(&io->lock) && io->bp != NULL);
704 io->modify_refs += count;
705 if (io->modified && io->released == 0)
708 hammer_lock_ex(&io->lock);
709 if (io->modified == 0) {
710 hammer_io_set_modlist(io);
715 BUF_KERNPROC(io->bp);
717 KKASSERT(io->modified != 0);
719 hammer_unlock(&io->lock);
724 hammer_io_modify_done(hammer_io_t io)
726 KKASSERT(io->modify_refs > 0);
728 if (io->modify_refs == 0 && io->waitmod) {
735 hammer_io_write_interlock(hammer_io_t io)
737 while (io->modify_refs != 0) {
739 tsleep(io, 0, "hmrmod", 0);
741 io->modify_refs = -1;
745 hammer_io_done_interlock(hammer_io_t io)
747 KKASSERT(io->modify_refs == -1);
756 * Caller intends to modify a volume's ondisk structure.
758 * This is only allowed if we are the flusher or we have a ref on the
762 hammer_modify_volume(hammer_transaction_t trans, hammer_volume_t volume,
765 KKASSERT (trans == NULL || trans->sync_lock_refs > 0);
767 hammer_io_modify(&volume->io, 1);
769 intptr_t rel_offset = (intptr_t)base - (intptr_t)volume->ondisk;
770 KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0);
771 hammer_generate_undo(trans,
772 HAMMER_ENCODE_RAW_VOLUME(volume->vol_no, rel_offset),
778 * Caller intends to modify a buffer's ondisk structure.
780 * This is only allowed if we are the flusher or we have a ref on the
784 hammer_modify_buffer(hammer_transaction_t trans, hammer_buffer_t buffer,
787 KKASSERT (trans == NULL || trans->sync_lock_refs > 0);
789 hammer_io_modify(&buffer->io, 1);
791 intptr_t rel_offset = (intptr_t)base - (intptr_t)buffer->ondisk;
792 KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0);
793 hammer_generate_undo(trans,
794 buffer->zone2_offset + rel_offset,
800 hammer_modify_volume_done(hammer_volume_t volume)
802 hammer_io_modify_done(&volume->io);
806 hammer_modify_buffer_done(hammer_buffer_t buffer)
808 hammer_io_modify_done(&buffer->io);
812 * Mark an entity as not being dirty any more and finalize any
813 * delayed adjustments to the buffer.
815 * Delayed adjustments are an important performance enhancement, allowing
816 * us to avoid recalculating B-Tree node CRCs over and over again when
817 * making bulk-modifications to the B-Tree.
819 * If inval is non-zero delayed adjustments are ignored.
821 * This routine may dereference related btree nodes and cause the
822 * buffer to be dereferenced. The caller must own a reference on io.
825 hammer_io_clear_modify(struct hammer_io *io, int inval)
827 if (io->modified == 0)
831 * Take us off the mod-list and clear the modified bit.
833 KKASSERT(io->mod_list != NULL);
834 if (io->mod_list == &io->hmp->volu_list ||
835 io->mod_list == &io->hmp->meta_list) {
836 io->hmp->locked_dirty_space -= io->bytes;
837 hammer_count_dirtybufspace -= io->bytes;
839 TAILQ_REMOVE(io->mod_list, io, mod_entry);
844 * If this bit is not set there are no delayed adjustments.
851 * Finalize requested CRCs. The NEEDSCRC flag also holds a reference
852 * on the node (& underlying buffer). Release the node after clearing
855 if (io->type == HAMMER_STRUCTURE_META_BUFFER) {
856 hammer_buffer_t buffer = (void *)io;
860 TAILQ_FOREACH(node, &buffer->clist, entry) {
861 if ((node->flags & HAMMER_NODE_NEEDSCRC) == 0)
863 node->flags &= ~HAMMER_NODE_NEEDSCRC;
864 KKASSERT(node->ondisk);
866 node->ondisk->crc = crc32(&node->ondisk->crc + 1, HAMMER_BTREE_CRCSIZE);
867 hammer_rel_node(node);
871 /* caller must still have ref on io */
872 KKASSERT(hammer_isactive(&io->lock));
876 * Clear the IO's modify list. Even though the IO is no longer modified
877 * it may still be on the lose_list. This routine is called just before
878 * the governing hammer_buffer is destroyed.
881 hammer_io_clear_modlist(struct hammer_io *io)
883 KKASSERT(io->modified == 0);
885 crit_enter(); /* biodone race against list */
886 KKASSERT(io->mod_list == &io->hmp->lose_list);
887 TAILQ_REMOVE(io->mod_list, io, mod_entry);
894 hammer_io_set_modlist(struct hammer_io *io)
896 struct hammer_mount *hmp = io->hmp;
898 KKASSERT(io->mod_list == NULL);
901 case HAMMER_STRUCTURE_VOLUME:
902 io->mod_list = &hmp->volu_list;
903 hmp->locked_dirty_space += io->bytes;
904 hammer_count_dirtybufspace += io->bytes;
906 case HAMMER_STRUCTURE_META_BUFFER:
907 io->mod_list = &hmp->meta_list;
908 hmp->locked_dirty_space += io->bytes;
909 hammer_count_dirtybufspace += io->bytes;
911 case HAMMER_STRUCTURE_UNDO_BUFFER:
912 io->mod_list = &hmp->undo_list;
914 case HAMMER_STRUCTURE_DATA_BUFFER:
915 io->mod_list = &hmp->data_list;
917 case HAMMER_STRUCTURE_DUMMY:
918 panic("hammer_io_disassociate: bad io type");
921 TAILQ_INSERT_TAIL(io->mod_list, io, mod_entry);
924 /************************************************************************
926 ************************************************************************
931 * Pre-IO initiation kernel callback - cluster build only
934 hammer_io_start(struct buf *bp)
939 * Post-IO completion kernel callback - MAY BE CALLED FROM INTERRUPT!
941 * NOTE: HAMMER may modify a buffer after initiating I/O. The modified bit
942 * may also be set if we were marking a cluster header open. Only remove
943 * our dependancy if the modified bit is clear.
946 hammer_io_complete(struct buf *bp)
948 union hammer_io_structure *iou = (void *)LIST_FIRST(&bp->b_dep);
949 struct hammer_mount *hmp = iou->io.hmp;
950 struct hammer_io *ionext;
952 KKASSERT(iou->io.released == 1);
955 * Deal with people waiting for I/O to drain
957 if (iou->io.running) {
959 * Deal with critical write errors. Once a critical error
960 * has been flagged in hmp the UNDO FIFO will not be updated.
961 * That way crash recover will give us a consistent
964 * Because of this we can throw away failed UNDO buffers. If
965 * we throw away META or DATA buffers we risk corrupting
966 * the now read-only version of the filesystem visible to
967 * the user. Clear B_ERROR so the buffer is not re-dirtied
968 * by the kernel and ref the io so it doesn't get thrown
971 if (bp->b_flags & B_ERROR) {
972 hammer_critical_error(hmp, NULL, bp->b_error,
973 "while flushing meta-data");
974 switch(iou->io.type) {
975 case HAMMER_STRUCTURE_UNDO_BUFFER:
978 if (iou->io.ioerror == 0) {
980 hammer_ref(&iou->io.lock);
984 bp->b_flags &= ~B_ERROR;
987 hammer_io_set_modlist(&iou->io);
988 iou->io.modified = 1;
991 hammer_stats_disk_write += iou->io.bytes;
992 hammer_count_io_running_write -= iou->io.bytes;
993 hmp->io_running_space -= iou->io.bytes;
994 if (hmp->io_running_wakeup &&
995 hmp->io_running_space < hammer_limit_running_io / 2) {
996 hmp->io_running_wakeup = 0;
997 wakeup(&hmp->io_running_wakeup);
999 KKASSERT(hmp->io_running_space >= 0);
1000 iou->io.running = 0;
1003 * Remove from iorun list and wakeup any multi-io waiter(s).
1005 if (TAILQ_FIRST(&hmp->iorun_list) == &iou->io) {
1006 ionext = TAILQ_NEXT(&iou->io, iorun_entry);
1007 if (ionext && ionext->type == HAMMER_STRUCTURE_DUMMY)
1010 TAILQ_REMOVE(&hmp->iorun_list, &iou->io, iorun_entry);
1012 hammer_stats_disk_read += iou->io.bytes;
1015 if (iou->io.waiting) {
1016 iou->io.waiting = 0;
1021 * If B_LOCKED is set someone wanted to deallocate the bp at some
1022 * point, try to do it now. The operation will fail if there are
1023 * refs or if hammer_io_deallocate() is unable to gain the
1026 if (bp->b_flags & B_LOCKED) {
1027 --hammer_count_io_locked;
1028 bp->b_flags &= ~B_LOCKED;
1029 hammer_io_deallocate(bp);
1030 /* structure may be dead now */
1035 * Callback from kernel when it wishes to deallocate a passively
1036 * associated structure. This mostly occurs with clean buffers
1037 * but it may be possible for a holding structure to be marked dirty
1038 * while its buffer is passively associated. The caller owns the bp.
1040 * If we cannot disassociate we set B_LOCKED to prevent the buffer
1041 * from getting reused.
1043 * WARNING: Because this can be called directly by getnewbuf we cannot
1044 * recurse into the tree. If a bp cannot be immediately disassociated
1045 * our only recourse is to set B_LOCKED.
1047 * WARNING: This may be called from an interrupt via hammer_io_complete()
1050 hammer_io_deallocate(struct buf *bp)
1052 hammer_io_structure_t iou = (void *)LIST_FIRST(&bp->b_dep);
1054 KKASSERT((bp->b_flags & B_LOCKED) == 0 && iou->io.running == 0);
1055 if (hammer_try_interlock_norefs(&iou->io.lock) == 0) {
1057 * We cannot safely disassociate a bp from a referenced
1058 * or interlocked HAMMER structure.
1060 bp->b_flags |= B_LOCKED;
1061 ++hammer_count_io_locked;
1062 } else if (iou->io.modified) {
1064 * It is not legal to disassociate a modified buffer. This
1065 * case really shouldn't ever occur.
1067 bp->b_flags |= B_LOCKED;
1068 ++hammer_count_io_locked;
1069 hammer_put_interlock(&iou->io.lock, 0);
1072 * Disassociate the BP. If the io has no refs left we
1073 * have to add it to the loose list.
1075 hammer_io_disassociate(iou);
1076 if (iou->io.type != HAMMER_STRUCTURE_VOLUME) {
1077 KKASSERT(iou->io.bp == NULL);
1078 KKASSERT(iou->io.mod_list == NULL);
1079 crit_enter(); /* biodone race against list */
1080 iou->io.mod_list = &iou->io.hmp->lose_list;
1081 TAILQ_INSERT_TAIL(iou->io.mod_list, &iou->io, mod_entry);
1084 hammer_put_interlock(&iou->io.lock, 1);
1089 hammer_io_fsync(struct vnode *vp)
1095 * NOTE: will not be called unless we tell the kernel about the
1096 * bioops. Unused... we use the mount's VFS_SYNC instead.
1099 hammer_io_sync(struct mount *mp)
1105 hammer_io_movedeps(struct buf *bp1, struct buf *bp2)
1110 * I/O pre-check for reading and writing. HAMMER only uses this for
1111 * B_CACHE buffers so checkread just shouldn't happen, but if it does
1114 * Writing is a different case. We don't want the kernel to try to write
1115 * out a buffer that HAMMER may be modifying passively or which has a
1116 * dependancy. In addition, kernel-demanded writes can only proceed for
1117 * certain types of buffers (i.e. UNDO and DATA types). Other dirty
1118 * buffer types can only be explicitly written by the flusher.
1120 * checkwrite will only be called for bdwrite()n buffers. If we return
1121 * success the kernel is guaranteed to initiate the buffer write.
1124 hammer_io_checkread(struct buf *bp)
1130 hammer_io_checkwrite(struct buf *bp)
1132 hammer_io_t io = (void *)LIST_FIRST(&bp->b_dep);
1135 * This shouldn't happen under normal operation.
1137 if (io->type == HAMMER_STRUCTURE_VOLUME ||
1138 io->type == HAMMER_STRUCTURE_META_BUFFER) {
1140 panic("hammer_io_checkwrite: illegal buffer");
1141 if ((bp->b_flags & B_LOCKED) == 0) {
1142 bp->b_flags |= B_LOCKED;
1143 ++hammer_count_io_locked;
1149 * We can only clear the modified bit if the IO is not currently
1150 * undergoing modification. Otherwise we may miss changes.
1152 * Only data and undo buffers can reach here. These buffers do
1153 * not have terminal crc functions but we temporarily reference
1154 * the IO anyway, just in case.
1156 if (io->modify_refs == 0 && io->modified) {
1157 hammer_ref(&io->lock);
1158 hammer_io_clear_modify(io, 0);
1159 hammer_rel(&io->lock);
1160 } else if (io->modified) {
1161 KKASSERT(io->type == HAMMER_STRUCTURE_DATA_BUFFER);
1165 * The kernel is going to start the IO, set io->running.
1167 KKASSERT(io->running == 0);
1169 io->hmp->io_running_space += io->bytes;
1170 TAILQ_INSERT_TAIL(&io->hmp->iorun_list, io, iorun_entry);
1171 hammer_count_io_running_write += io->bytes;
1176 * Return non-zero if we wish to delay the kernel's attempt to flush
1177 * this buffer to disk.
1180 hammer_io_countdeps(struct buf *bp, int n)
1185 struct bio_ops hammer_bioops = {
1186 .io_start = hammer_io_start,
1187 .io_complete = hammer_io_complete,
1188 .io_deallocate = hammer_io_deallocate,
1189 .io_fsync = hammer_io_fsync,
1190 .io_sync = hammer_io_sync,
1191 .io_movedeps = hammer_io_movedeps,
1192 .io_countdeps = hammer_io_countdeps,
1193 .io_checkread = hammer_io_checkread,
1194 .io_checkwrite = hammer_io_checkwrite,
1197 /************************************************************************
1199 ************************************************************************
1201 * These functions operate directly on the buffer cache buffer associated
1202 * with a front-end vnode rather then a back-end device vnode.
1206 * Read a buffer associated with a front-end vnode directly from the
1207 * disk media. The bio may be issued asynchronously. If leaf is non-NULL
1208 * we validate the CRC.
1210 * We must check for the presence of a HAMMER buffer to handle the case
1211 * where the reblocker has rewritten the data (which it does via the HAMMER
1212 * buffer system, not via the high-level vnode buffer cache), but not yet
1213 * committed the buffer to the media.
1216 hammer_io_direct_read(hammer_mount_t hmp, struct bio *bio,
1217 hammer_btree_leaf_elm_t leaf)
1219 hammer_off_t buf_offset;
1220 hammer_off_t zone2_offset;
1221 hammer_volume_t volume;
1227 buf_offset = bio->bio_offset;
1228 KKASSERT((buf_offset & HAMMER_OFF_ZONE_MASK) ==
1229 HAMMER_ZONE_LARGE_DATA);
1232 * The buffer cache may have an aliased buffer (the reblocker can
1233 * write them). If it does we have to sync any dirty data before
1234 * we can build our direct-read. This is a non-critical code path.
1237 hammer_sync_buffers(hmp, buf_offset, bp->b_bufsize);
1240 * Resolve to a zone-2 offset. The conversion just requires
1241 * munging the top 4 bits but we want to abstract it anyway
1242 * so the blockmap code can verify the zone assignment.
1244 zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, &error);
1247 KKASSERT((zone2_offset & HAMMER_OFF_ZONE_MASK) ==
1248 HAMMER_ZONE_RAW_BUFFER);
1251 * Resolve volume and raw-offset for 3rd level bio. The
1252 * offset will be specific to the volume.
1254 vol_no = HAMMER_VOL_DECODE(zone2_offset);
1255 volume = hammer_get_volume(hmp, vol_no, &error);
1256 if (error == 0 && zone2_offset >= volume->maxbuf_off)
1263 nbio = push_bio(bio);
1264 nbio->bio_offset = volume->ondisk->vol_buf_beg +
1265 (zone2_offset & HAMMER_OFF_SHORT_MASK);
1268 * XXX disabled - our CRC check doesn't work if the OS
1269 * does bogus_page replacement on the direct-read.
1271 if (leaf && hammer_verify_data) {
1272 nbio->bio_done = hammer_io_direct_read_complete;
1273 nbio->bio_caller_info1.uvalue32 = leaf->data_crc;
1276 hammer_stats_disk_read += bp->b_bufsize;
1277 vn_strategy(volume->devvp, nbio);
1279 hammer_rel_volume(volume, 0);
1282 kprintf("hammer_direct_read: failed @ %016llx\n",
1283 (long long)zone2_offset);
1284 bp->b_error = error;
1285 bp->b_flags |= B_ERROR;
1293 * On completion of the BIO this callback must check the data CRC
1294 * and chain to the previous bio.
1298 hammer_io_direct_read_complete(struct bio *nbio)
1302 u_int32_t rec_crc = nbio->bio_caller_info1.uvalue32;
1305 if (crc32(bp->b_data, bp->b_bufsize) != rec_crc) {
1306 kprintf("HAMMER: data_crc error @%016llx/%d\n",
1307 nbio->bio_offset, bp->b_bufsize);
1308 if (hammer_debug_critical)
1309 Debugger("data_crc on read");
1310 bp->b_flags |= B_ERROR;
1313 obio = pop_bio(nbio);
1319 * Write a buffer associated with a front-end vnode directly to the
1320 * disk media. The bio may be issued asynchronously.
1322 * The BIO is associated with the specified record and RECF_DIRECT_IO
1323 * is set. The recorded is added to its object.
1326 hammer_io_direct_write(hammer_mount_t hmp, struct bio *bio,
1327 hammer_record_t record)
1329 hammer_btree_leaf_elm_t leaf = &record->leaf;
1330 hammer_off_t buf_offset;
1331 hammer_off_t zone2_offset;
1332 hammer_volume_t volume;
1333 hammer_buffer_t buffer;
1340 buf_offset = leaf->data_offset;
1342 KKASSERT(buf_offset > HAMMER_ZONE_BTREE);
1343 KKASSERT(bio->bio_buf->b_cmd == BUF_CMD_WRITE);
1346 * Issue or execute the I/O. The new memory record must replace
1347 * the old one before the I/O completes, otherwise a reaquisition of
1348 * the buffer will load the old media data instead of the new.
1350 if ((buf_offset & HAMMER_BUFMASK) == 0 &&
1351 leaf->data_len >= HAMMER_BUFSIZE) {
1353 * We are using the vnode's bio to write directly to the
1354 * media, any hammer_buffer at the same zone-X offset will
1355 * now have stale data.
1357 zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, &error);
1358 vol_no = HAMMER_VOL_DECODE(zone2_offset);
1359 volume = hammer_get_volume(hmp, vol_no, &error);
1361 if (error == 0 && zone2_offset >= volume->maxbuf_off)
1365 KKASSERT((bp->b_bufsize & HAMMER_BUFMASK) == 0);
1367 hammer_del_buffers(hmp, buf_offset,
1368 zone2_offset, bp->b_bufsize);
1372 * Second level bio - cached zone2 offset.
1374 * (We can put our bio_done function in either the
1375 * 2nd or 3rd level).
1377 nbio = push_bio(bio);
1378 nbio->bio_offset = zone2_offset;
1379 nbio->bio_done = hammer_io_direct_write_complete;
1380 nbio->bio_caller_info1.ptr = record;
1381 record->zone2_offset = zone2_offset;
1382 record->flags |= HAMMER_RECF_DIRECT_IO |
1383 HAMMER_RECF_DIRECT_INVAL;
1386 * Third level bio - raw offset specific to the
1389 zone2_offset &= HAMMER_OFF_SHORT_MASK;
1390 nbio = push_bio(nbio);
1391 nbio->bio_offset = volume->ondisk->vol_buf_beg +
1393 hammer_stats_disk_write += bp->b_bufsize;
1394 hammer_ip_replace_bulk(hmp, record);
1395 vn_strategy(volume->devvp, nbio);
1396 hammer_io_flush_mark(volume);
1398 hammer_rel_volume(volume, 0);
1401 * Must fit in a standard HAMMER buffer. In this case all
1402 * consumers use the HAMMER buffer system and RECF_DIRECT_IO
1403 * does not need to be set-up.
1405 KKASSERT(((buf_offset ^ (buf_offset + leaf->data_len - 1)) & ~HAMMER_BUFMASK64) == 0);
1407 ptr = hammer_bread(hmp, buf_offset, &error, &buffer);
1410 bp->b_flags |= B_AGE;
1411 hammer_io_modify(&buffer->io, 1);
1412 bcopy(bp->b_data, ptr, leaf->data_len);
1413 hammer_io_modify_done(&buffer->io);
1414 hammer_rel_buffer(buffer, 0);
1416 hammer_ip_replace_bulk(hmp, record);
1422 * Major suckage occured. Also note: The record was
1423 * never added to the tree so we do not have to worry
1424 * about the backend.
1426 kprintf("hammer_direct_write: failed @ %016llx\n",
1427 (long long)leaf->data_offset);
1431 bp->b_flags |= B_ERROR;
1433 record->flags |= HAMMER_RECF_DELETED_FE;
1434 hammer_rel_mem_record(record);
1440 * On completion of the BIO this callback must disconnect
1441 * it from the hammer_record and chain to the previous bio.
1443 * An I/O error forces the mount to read-only. Data buffers
1444 * are not B_LOCKED like meta-data buffers are, so we have to
1445 * throw the buffer away to prevent the kernel from retrying.
1449 hammer_io_direct_write_complete(struct bio *nbio)
1453 hammer_record_t record = nbio->bio_caller_info1.ptr;
1456 obio = pop_bio(nbio);
1457 if (bp->b_flags & B_ERROR) {
1458 hammer_critical_error(record->ip->hmp, record->ip,
1460 "while writing bulk data");
1461 bp->b_flags |= B_INVAL;
1465 KKASSERT(record != NULL);
1466 KKASSERT(record->flags & HAMMER_RECF_DIRECT_IO);
1467 if (record->flags & HAMMER_RECF_DIRECT_WAIT) {
1468 record->flags &= ~(HAMMER_RECF_DIRECT_IO |
1469 HAMMER_RECF_DIRECT_WAIT);
1470 /* record can disappear once DIRECT_IO flag is cleared */
1471 wakeup(&record->flags);
1473 record->flags &= ~HAMMER_RECF_DIRECT_IO;
1474 /* record can disappear once DIRECT_IO flag is cleared */
1480 * This is called before a record is either committed to the B-Tree
1481 * or destroyed, to resolve any associated direct-IO.
1483 * (1) We must wait for any direct-IO related to the record to complete.
1485 * (2) We must remove any buffer cache aliases for data accessed via
1486 * leaf->data_offset or zone2_offset so non-direct-IO consumers
1487 * (the mirroring and reblocking code) do not see stale data.
1490 hammer_io_direct_wait(hammer_record_t record)
1493 * Wait for I/O to complete
1495 if (record->flags & HAMMER_RECF_DIRECT_IO) {
1497 while (record->flags & HAMMER_RECF_DIRECT_IO) {
1498 record->flags |= HAMMER_RECF_DIRECT_WAIT;
1499 tsleep(&record->flags, 0, "hmdiow", 0);
1505 * Invalidate any related buffer cache aliases associated with the
1506 * backing device. This is needed because the buffer cache buffer
1507 * for file data is associated with the file vnode, not the backing
1510 * XXX I do not think this case can occur any more now that
1511 * reservations ensure that all such buffers are removed before
1512 * an area can be reused.
1514 if (record->flags & HAMMER_RECF_DIRECT_INVAL) {
1515 KKASSERT(record->leaf.data_offset);
1516 hammer_del_buffers(record->ip->hmp, record->leaf.data_offset,
1517 record->zone2_offset, record->leaf.data_len,
1519 record->flags &= ~HAMMER_RECF_DIRECT_INVAL;
1524 * This is called to remove the second-level cached zone-2 offset from
1525 * frontend buffer cache buffers, now stale due to a data relocation.
1526 * These offsets are generated by cluster_read() via VOP_BMAP, or directly
1527 * by hammer_vop_strategy_read().
1529 * This is rather nasty because here we have something like the reblocker
1530 * scanning the raw B-Tree with no held references on anything, really,
1531 * other then a shared lock on the B-Tree node, and we have to access the
1532 * frontend's buffer cache to check for and clean out the association.
1533 * Specifically, if the reblocker is moving data on the disk, these cached
1534 * offsets will become invalid.
1536 * Only data record types associated with the large-data zone are subject
1537 * to direct-io and need to be checked.
1541 hammer_io_direct_uncache(hammer_mount_t hmp, hammer_btree_leaf_elm_t leaf)
1543 struct hammer_inode_info iinfo;
1546 if (leaf->base.rec_type != HAMMER_RECTYPE_DATA)
1548 zone = HAMMER_ZONE_DECODE(leaf->data_offset);
1549 if (zone != HAMMER_ZONE_LARGE_DATA_INDEX)
1551 iinfo.obj_id = leaf->base.obj_id;
1552 iinfo.obj_asof = 0; /* unused */
1553 iinfo.obj_localization = leaf->base.localization &
1554 HAMMER_LOCALIZE_PSEUDOFS_MASK;
1555 iinfo.u.leaf = leaf;
1556 hammer_scan_inode_snapshots(hmp, &iinfo,
1557 hammer_io_direct_uncache_callback,
1562 hammer_io_direct_uncache_callback(hammer_inode_t ip, void *data)
1564 hammer_inode_info_t iinfo = data;
1565 hammer_off_t data_offset;
1566 hammer_off_t file_offset;
1573 data_offset = iinfo->u.leaf->data_offset;
1574 file_offset = iinfo->u.leaf->base.key - iinfo->u.leaf->data_len;
1575 blksize = iinfo->u.leaf->data_len;
1576 KKASSERT((blksize & HAMMER_BUFMASK) == 0);
1578 hammer_ref(&ip->lock);
1579 if (hammer_get_vnode(ip, &vp) == 0) {
1580 if ((bp = findblk(ip->vp, file_offset, FINDBLK_TEST)) != NULL &&
1581 bp->b_bio2.bio_offset != NOOFFSET) {
1582 bp = getblk(ip->vp, file_offset, blksize, 0, 0);
1583 bp->b_bio2.bio_offset = NOOFFSET;
1588 hammer_rel_inode(ip, 0);
1594 * This function is called when writes may have occured on the volume,
1595 * indicating that the device may be holding cached writes.
1598 hammer_io_flush_mark(hammer_volume_t volume)
1600 volume->vol_flags |= HAMMER_VOLF_NEEDFLUSH;
1604 * This function ensures that the device has flushed any cached writes out.
1607 hammer_io_flush_sync(hammer_mount_t hmp)
1609 hammer_volume_t volume;
1610 struct buf *bp_base = NULL;
1613 RB_FOREACH(volume, hammer_vol_rb_tree, &hmp->rb_vols_root) {
1614 if (volume->vol_flags & HAMMER_VOLF_NEEDFLUSH) {
1615 volume->vol_flags &= ~HAMMER_VOLF_NEEDFLUSH;
1617 bp->b_bio1.bio_offset = 0;
1620 bp->b_cmd = BUF_CMD_FLUSH;
1621 bp->b_bio1.bio_caller_info1.cluster_head = bp_base;
1622 bp->b_bio1.bio_done = biodone_sync;
1623 bp->b_bio1.bio_flags |= BIO_SYNC;
1625 vn_strategy(volume->devvp, &bp->b_bio1);
1628 while ((bp = bp_base) != NULL) {
1629 bp_base = bp->b_bio1.bio_caller_info1.cluster_head;
1630 biowait(&bp->b_bio1, "hmrFLS");
1636 * Limit the amount of backlog which we allow to build up
1639 hammer_io_limit_backlog(hammer_mount_t hmp)
1641 while (hmp->io_running_space > hammer_limit_running_io) {
1642 hmp->io_running_wakeup = 1;
1643 tsleep(&hmp->io_running_wakeup, 0, "hmiolm", hz / 10);