2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_io.c,v 1.50 2008/07/14 20:27:54 dillon Exp $
37 * IO Primitives and buffer cache management
39 * All major data-tracking structures in HAMMER contain a struct hammer_io
40 * which is used to manage their backing store. We use filesystem buffers
41 * for backing store and we leave them passively associated with their
44 * If the kernel tries to destroy a passively associated buf which we cannot
45 * yet let go we set B_LOCKED in the buffer and then actively released it
50 #include <sys/fcntl.h>
51 #include <sys/nlookup.h>
55 static void hammer_io_modify(hammer_io_t io, int count);
56 static void hammer_io_deallocate(struct buf *bp);
58 static void hammer_io_direct_read_complete(struct bio *nbio);
60 static void hammer_io_direct_write_complete(struct bio *nbio);
61 static int hammer_io_direct_uncache_callback(hammer_inode_t ip, void *data);
64 * Initialize a new, already-zero'd hammer_io structure, or reinitialize
65 * an existing hammer_io structure which may have switched to another type.
68 hammer_io_init(hammer_io_t io, hammer_mount_t hmp, enum hammer_io_type type)
75 * Helper routine to disassociate a buffer cache buffer from an I/O
76 * structure. The buffer is unlocked and marked appropriate for reclamation.
78 * The io may have 0 or 1 references depending on who called us. The
79 * caller is responsible for dealing with the refs.
81 * This call can only be made when no action is required on the buffer.
83 * The caller must own the buffer and the IO must indicate that the
84 * structure no longer owns it (io.released != 0).
87 hammer_io_disassociate(hammer_io_structure_t iou)
89 struct buf *bp = iou->io.bp;
91 KKASSERT(iou->io.released);
92 KKASSERT(iou->io.modified == 0);
93 KKASSERT(LIST_FIRST(&bp->b_dep) == (void *)iou);
98 * If the buffer was locked someone wanted to get rid of it.
100 if (bp->b_flags & B_LOCKED) {
101 --hammer_count_io_locked;
102 bp->b_flags &= ~B_LOCKED;
104 if (iou->io.reclaim) {
105 bp->b_flags |= B_NOCACHE|B_RELBUF;
109 switch(iou->io.type) {
110 case HAMMER_STRUCTURE_VOLUME:
111 iou->volume.ondisk = NULL;
113 case HAMMER_STRUCTURE_DATA_BUFFER:
114 case HAMMER_STRUCTURE_META_BUFFER:
115 case HAMMER_STRUCTURE_UNDO_BUFFER:
116 iou->buffer.ondisk = NULL;
122 * Wait for any physical IO to complete
125 hammer_io_wait(hammer_io_t io)
129 tsleep_interlock(io);
132 tsleep(io, 0, "hmrflw", 0);
133 if (io->running == 0)
135 tsleep_interlock(io);
137 if (io->running == 0)
145 * Wait for all hammer_io-initated write I/O's to complete. This is not
146 * supposed to count direct I/O's but some can leak through (for
147 * non-full-sized direct I/Os).
150 hammer_io_wait_all(hammer_mount_t hmp, const char *ident)
153 while (hmp->io_running_space)
154 tsleep(&hmp->io_running_space, 0, ident, 0);
158 #define HAMMER_MAXRA 4
161 * Load bp for a HAMMER structure. The io must be exclusively locked by
164 * This routine is mostly used on meta-data and small-data blocks. Generally
165 * speaking HAMMER assumes some locality of reference and will cluster
168 * Note that clustering occurs at the device layer, not the logical layer.
169 * If the buffers do not apply to the current operation they may apply to
173 hammer_io_read(struct vnode *devvp, struct hammer_io *io, hammer_off_t limit)
178 if ((bp = io->bp) == NULL) {
179 hammer_count_io_running_read += io->bytes;
180 if (hammer_cluster_enable) {
181 error = cluster_read(devvp, limit,
182 io->offset, io->bytes,
184 HAMMER_CLUSTER_BUFS, &io->bp);
186 error = bread(devvp, io->offset, io->bytes, &io->bp);
188 hammer_stats_disk_read += io->bytes;
189 hammer_count_io_running_read -= io->bytes;
192 bp->b_ops = &hammer_bioops;
193 KKASSERT(LIST_FIRST(&bp->b_dep) == NULL);
194 LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node);
197 KKASSERT(io->modified == 0);
198 KKASSERT(io->running == 0);
199 KKASSERT(io->waiting == 0);
200 io->released = 0; /* we hold an active lock on bp */
208 * Similar to hammer_io_read() but returns a zero'd out buffer instead.
209 * Must be called with the IO exclusively locked.
211 * vfs_bio_clrbuf() is kinda nasty, enforce serialization against background
212 * I/O by forcing the buffer to not be in a released state before calling
215 * This function will also mark the IO as modified but it will not
216 * increment the modify_refs count.
219 hammer_io_new(struct vnode *devvp, struct hammer_io *io)
223 if ((bp = io->bp) == NULL) {
224 io->bp = getblk(devvp, io->offset, io->bytes, 0, 0);
226 bp->b_ops = &hammer_bioops;
227 KKASSERT(LIST_FIRST(&bp->b_dep) == NULL);
228 LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node);
230 KKASSERT(io->running == 0);
240 hammer_io_modify(io, 0);
246 * Remove potential device level aliases against buffers managed by high level
250 hammer_io_inval(hammer_volume_t volume, hammer_off_t zone2_offset)
252 hammer_io_structure_t iou;
253 hammer_off_t phys_offset;
256 phys_offset = volume->ondisk->vol_buf_beg +
257 (zone2_offset & HAMMER_OFF_SHORT_MASK);
259 if ((bp = findblk(volume->devvp, phys_offset)) != NULL) {
260 bp = getblk(volume->devvp, phys_offset, bp->b_bufsize, 0, 0);
261 if ((iou = (void *)LIST_FIRST(&bp->b_dep)) != NULL) {
262 hammer_io_clear_modify(&iou->io, 1);
265 hammer_io_deallocate(bp);
267 KKASSERT((bp->b_flags & B_LOCKED) == 0);
269 bp->b_flags |= B_NOCACHE|B_RELBUF;
277 * This routine is called on the last reference to a hammer structure.
278 * The io is usually interlocked with io.loading and io.refs must be 1.
280 * This routine may return a non-NULL bp to the caller for dispoal. Disposal
281 * simply means the caller finishes decrementing the ref-count on the
282 * IO structure then brelse()'s the bp. The bp may or may not still be
283 * passively associated with the IO.
285 * The only requirement here is that modified meta-data and volume-header
286 * buffer may NOT be disassociated from the IO structure, and consequently
287 * we also leave such buffers actively associated with the IO if they already
288 * are (since the kernel can't do anything with them anyway). Only the
289 * flusher is allowed to write such buffers out. Modified pure-data and
290 * undo buffers are returned to the kernel but left passively associated
291 * so we can track when the kernel writes the bp out.
294 hammer_io_release(struct hammer_io *io, int flush)
296 union hammer_io_structure *iou = (void *)io;
299 if ((bp = io->bp) == NULL)
303 * Try to flush a dirty IO to disk if asked to by the
304 * caller or if the kernel tried to flush the buffer in the past.
306 * Kernel-initiated flushes are only allowed for pure-data buffers.
307 * meta-data and volume buffers can only be flushed explicitly
313 } else if (bp->b_flags & B_LOCKED) {
315 case HAMMER_STRUCTURE_DATA_BUFFER:
316 case HAMMER_STRUCTURE_UNDO_BUFFER:
322 } /* else no explicit request to flush the buffer */
326 * Wait for the IO to complete if asked to.
328 if (io->waitdep && io->running) {
333 * Return control of the buffer to the kernel (with the provisio
334 * that our bioops can override kernel decisions with regards to
337 if ((flush || io->reclaim) && io->modified == 0 && io->running == 0) {
339 * Always disassociate the bp if an explicit flush
340 * was requested and the IO completed with no error
341 * (so unmount can really clean up the structure).
349 hammer_io_disassociate((hammer_io_structure_t)io);
351 } else if (io->modified) {
353 * Only certain IO types can be released to the kernel if
354 * the buffer has been modified.
356 * volume and meta-data IO types may only be explicitly
360 case HAMMER_STRUCTURE_DATA_BUFFER:
361 case HAMMER_STRUCTURE_UNDO_BUFFER:
362 if (io->released == 0) {
370 bp = NULL; /* bp left associated */
371 } else if (io->released == 0) {
373 * Clean buffers can be generally released to the kernel.
374 * We leave the bp passively associated with the HAMMER
375 * structure and use bioops to disconnect it later on
376 * if the kernel wants to discard the buffer.
378 * We can steal the structure's ownership of the bp.
381 if (bp->b_flags & B_LOCKED) {
382 hammer_io_disassociate(iou);
386 hammer_io_disassociate(iou);
389 /* return the bp (bp passively associated) */
394 * A released buffer is passively associate with our
395 * hammer_io structure. The kernel cannot destroy it
396 * without making a bioops call. If the kernel (B_LOCKED)
397 * or we (reclaim) requested that the buffer be destroyed
398 * we destroy it, otherwise we do a quick get/release to
399 * reset its position in the kernel's LRU list.
401 * Leaving the buffer passively associated allows us to
402 * use the kernel's LRU buffer flushing mechanisms rather
403 * then rolling our own.
405 * XXX there are two ways of doing this. We can re-acquire
406 * and passively release to reset the LRU, or not.
408 if (io->running == 0) {
410 if ((bp->b_flags & B_LOCKED) || io->reclaim) {
411 hammer_io_disassociate(iou);
414 /* return the bp (bp passively associated) */
418 * bp is left passively associated but we do not
419 * try to reacquire it. Interactions with the io
420 * structure will occur on completion of the bp's
430 * This routine is called with a locked IO when a flush is desired and
431 * no other references to the structure exists other then ours. This
432 * routine is ONLY called when HAMMER believes it is safe to flush a
433 * potentially modified buffer out.
436 hammer_io_flush(struct hammer_io *io)
441 * Degenerate case - nothing to flush if nothing is dirty.
443 if (io->modified == 0) {
448 KKASSERT(io->modify_refs <= 0);
451 * Acquire ownership of the bp, particularly before we clear our
454 * We are going to bawrite() this bp. Don't leave a window where
455 * io->released is set, we actually own the bp rather then our
461 /* BUF_KERNPROC(io->bp); */
462 /* io->released = 0; */
463 KKASSERT(io->released);
464 KKASSERT(io->bp == bp);
469 * Acquire exclusive access to the bp and then clear the modified
470 * state of the buffer prior to issuing I/O to interlock any
471 * modifications made while the I/O is in progress. This shouldn't
472 * happen anyway but losing data would be worse. The modified bit
473 * will be rechecked after the IO completes.
475 * NOTE: This call also finalizes the buffer's content (inval == 0).
477 * This is only legal when lock.refs == 1 (otherwise we might clear
478 * the modified bit while there are still users of the cluster
479 * modifying the data).
481 * Do this before potentially blocking so any attempt to modify the
482 * ondisk while we are blocked blocks waiting for us.
484 hammer_io_clear_modify(io, 0);
487 * Transfer ownership to the kernel and initiate I/O.
490 io->hmp->io_running_space += io->bytes;
491 hammer_count_io_running_write += io->bytes;
495 /************************************************************************
497 ************************************************************************
499 * These routines deal with dependancies created when IO buffers get
500 * modified. The caller must call hammer_modify_*() on a referenced
501 * HAMMER structure prior to modifying its on-disk data.
503 * Any intent to modify an IO buffer acquires the related bp and imposes
504 * various write ordering dependancies.
508 * Mark a HAMMER structure as undergoing modification. Meta-data buffers
509 * are locked until the flusher can deal with them, pure data buffers
510 * can be written out.
514 hammer_io_modify(hammer_io_t io, int count)
516 struct hammer_mount *hmp = io->hmp;
519 * io->modify_refs must be >= 0
521 while (io->modify_refs < 0) {
523 tsleep(io, 0, "hmrmod", 0);
527 * Shortcut if nothing to do.
529 KKASSERT(io->lock.refs != 0 && io->bp != NULL);
530 io->modify_refs += count;
531 if (io->modified && io->released == 0)
534 hammer_lock_ex(&io->lock);
535 if (io->modified == 0) {
536 KKASSERT(io->mod_list == NULL);
538 case HAMMER_STRUCTURE_VOLUME:
539 io->mod_list = &hmp->volu_list;
540 hmp->locked_dirty_space += io->bytes;
541 hammer_count_dirtybufspace += io->bytes;
543 case HAMMER_STRUCTURE_META_BUFFER:
544 io->mod_list = &hmp->meta_list;
545 hmp->locked_dirty_space += io->bytes;
546 hammer_count_dirtybufspace += io->bytes;
548 case HAMMER_STRUCTURE_UNDO_BUFFER:
549 io->mod_list = &hmp->undo_list;
551 case HAMMER_STRUCTURE_DATA_BUFFER:
552 io->mod_list = &hmp->data_list;
555 TAILQ_INSERT_TAIL(io->mod_list, io, mod_entry);
560 BUF_KERNPROC(io->bp);
562 KKASSERT(io->modified != 0);
564 hammer_unlock(&io->lock);
569 hammer_io_modify_done(hammer_io_t io)
571 KKASSERT(io->modify_refs > 0);
573 if (io->modify_refs == 0 && io->waitmod) {
580 hammer_io_write_interlock(hammer_io_t io)
582 while (io->modify_refs != 0) {
584 tsleep(io, 0, "hmrmod", 0);
586 io->modify_refs = -1;
590 hammer_io_done_interlock(hammer_io_t io)
592 KKASSERT(io->modify_refs == -1);
601 * Caller intends to modify a volume's ondisk structure.
603 * This is only allowed if we are the flusher or we have a ref on the
607 hammer_modify_volume(hammer_transaction_t trans, hammer_volume_t volume,
610 KKASSERT (trans == NULL || trans->sync_lock_refs > 0);
612 hammer_io_modify(&volume->io, 1);
614 intptr_t rel_offset = (intptr_t)base - (intptr_t)volume->ondisk;
615 KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0);
616 hammer_generate_undo(trans, &volume->io,
617 HAMMER_ENCODE_RAW_VOLUME(volume->vol_no, rel_offset),
623 * Caller intends to modify a buffer's ondisk structure.
625 * This is only allowed if we are the flusher or we have a ref on the
629 hammer_modify_buffer(hammer_transaction_t trans, hammer_buffer_t buffer,
632 KKASSERT (trans == NULL || trans->sync_lock_refs > 0);
634 hammer_io_modify(&buffer->io, 1);
636 intptr_t rel_offset = (intptr_t)base - (intptr_t)buffer->ondisk;
637 KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0);
638 hammer_generate_undo(trans, &buffer->io,
639 buffer->zone2_offset + rel_offset,
645 hammer_modify_volume_done(hammer_volume_t volume)
647 hammer_io_modify_done(&volume->io);
651 hammer_modify_buffer_done(hammer_buffer_t buffer)
653 hammer_io_modify_done(&buffer->io);
657 * Mark an entity as not being dirty any more and finalize any
658 * delayed adjustments to the buffer.
660 * Delayed adjustments are an important performance enhancement, allowing
661 * us to avoid recalculating B-Tree node CRCs over and over again when
662 * making bulk-modifications to the B-Tree.
664 * If inval is non-zero delayed adjustments are ignored.
667 hammer_io_clear_modify(struct hammer_io *io, int inval)
669 if (io->modified == 0)
673 * Take us off the mod-list and clear the modified bit.
675 KKASSERT(io->mod_list != NULL);
676 if (io->mod_list == &io->hmp->volu_list ||
677 io->mod_list == &io->hmp->meta_list) {
678 io->hmp->locked_dirty_space -= io->bytes;
679 hammer_count_dirtybufspace -= io->bytes;
681 TAILQ_REMOVE(io->mod_list, io, mod_entry);
686 * If this bit is not set there are no delayed adjustments.
693 * Finalize requested CRCs. The NEEDSCRC flag also holds a reference
694 * on the node (& underlying buffer). Release the node after clearing
697 if (io->type == HAMMER_STRUCTURE_META_BUFFER) {
698 hammer_buffer_t buffer = (void *)io;
702 TAILQ_FOREACH(node, &buffer->clist, entry) {
703 if ((node->flags & HAMMER_NODE_NEEDSCRC) == 0)
705 node->flags &= ~HAMMER_NODE_NEEDSCRC;
706 KKASSERT(node->ondisk);
708 node->ondisk->crc = crc32(&node->ondisk->crc + 1, HAMMER_BTREE_CRCSIZE);
709 hammer_rel_node(node);
717 * Clear the IO's modify list. Even though the IO is no longer modified
718 * it may still be on the lose_list. This routine is called just before
719 * the governing hammer_buffer is destroyed.
722 hammer_io_clear_modlist(struct hammer_io *io)
724 KKASSERT(io->modified == 0);
726 crit_enter(); /* biodone race against list */
727 KKASSERT(io->mod_list == &io->hmp->lose_list);
728 TAILQ_REMOVE(io->mod_list, io, mod_entry);
734 /************************************************************************
736 ************************************************************************
741 * Pre-IO initiation kernel callback - cluster build only
744 hammer_io_start(struct buf *bp)
749 * Post-IO completion kernel callback - MAY BE CALLED FROM INTERRUPT!
751 * NOTE: HAMMER may modify a buffer after initiating I/O. The modified bit
752 * may also be set if we were marking a cluster header open. Only remove
753 * our dependancy if the modified bit is clear.
756 hammer_io_complete(struct buf *bp)
758 union hammer_io_structure *iou = (void *)LIST_FIRST(&bp->b_dep);
760 KKASSERT(iou->io.released == 1);
763 * Deal with people waiting for I/O to drain
765 if (iou->io.running) {
766 hammer_stats_disk_write += iou->io.bytes;
767 hammer_count_io_running_write -= iou->io.bytes;
768 iou->io.hmp->io_running_space -= iou->io.bytes;
769 if (iou->io.hmp->io_running_space == 0)
770 wakeup(&iou->io.hmp->io_running_space);
771 KKASSERT(iou->io.hmp->io_running_space >= 0);
774 hammer_stats_disk_read += iou->io.bytes;
777 if (iou->io.waiting) {
783 * If B_LOCKED is set someone wanted to deallocate the bp at some
784 * point, do it now if refs has become zero.
786 if ((bp->b_flags & B_LOCKED) && iou->io.lock.refs == 0) {
787 KKASSERT(iou->io.modified == 0);
788 --hammer_count_io_locked;
789 bp->b_flags &= ~B_LOCKED;
790 hammer_io_deallocate(bp);
791 /* structure may be dead now */
796 * Callback from kernel when it wishes to deallocate a passively
797 * associated structure. This mostly occurs with clean buffers
798 * but it may be possible for a holding structure to be marked dirty
799 * while its buffer is passively associated. The caller owns the bp.
801 * If we cannot disassociate we set B_LOCKED to prevent the buffer
802 * from getting reused.
804 * WARNING: Because this can be called directly by getnewbuf we cannot
805 * recurse into the tree. If a bp cannot be immediately disassociated
806 * our only recourse is to set B_LOCKED.
808 * WARNING: This may be called from an interrupt via hammer_io_complete()
811 hammer_io_deallocate(struct buf *bp)
813 hammer_io_structure_t iou = (void *)LIST_FIRST(&bp->b_dep);
815 KKASSERT((bp->b_flags & B_LOCKED) == 0 && iou->io.running == 0);
816 if (iou->io.lock.refs > 0 || iou->io.modified) {
818 * It is not legal to disassociate a modified buffer. This
819 * case really shouldn't ever occur.
821 bp->b_flags |= B_LOCKED;
822 ++hammer_count_io_locked;
825 * Disassociate the BP. If the io has no refs left we
826 * have to add it to the loose list.
828 hammer_io_disassociate(iou);
829 if (iou->io.type != HAMMER_STRUCTURE_VOLUME) {
830 KKASSERT(iou->io.bp == NULL);
831 KKASSERT(iou->io.mod_list == NULL);
832 crit_enter(); /* biodone race against list */
833 iou->io.mod_list = &iou->io.hmp->lose_list;
834 TAILQ_INSERT_TAIL(iou->io.mod_list, &iou->io, mod_entry);
841 hammer_io_fsync(struct vnode *vp)
847 * NOTE: will not be called unless we tell the kernel about the
848 * bioops. Unused... we use the mount's VFS_SYNC instead.
851 hammer_io_sync(struct mount *mp)
857 hammer_io_movedeps(struct buf *bp1, struct buf *bp2)
862 * I/O pre-check for reading and writing. HAMMER only uses this for
863 * B_CACHE buffers so checkread just shouldn't happen, but if it does
866 * Writing is a different case. We don't want the kernel to try to write
867 * out a buffer that HAMMER may be modifying passively or which has a
868 * dependancy. In addition, kernel-demanded writes can only proceed for
869 * certain types of buffers (i.e. UNDO and DATA types). Other dirty
870 * buffer types can only be explicitly written by the flusher.
872 * checkwrite will only be called for bdwrite()n buffers. If we return
873 * success the kernel is guaranteed to initiate the buffer write.
876 hammer_io_checkread(struct buf *bp)
882 hammer_io_checkwrite(struct buf *bp)
884 hammer_io_t io = (void *)LIST_FIRST(&bp->b_dep);
887 * This shouldn't happen under normal operation.
889 if (io->type == HAMMER_STRUCTURE_VOLUME ||
890 io->type == HAMMER_STRUCTURE_META_BUFFER) {
892 panic("hammer_io_checkwrite: illegal buffer");
893 if ((bp->b_flags & B_LOCKED) == 0) {
894 bp->b_flags |= B_LOCKED;
895 ++hammer_count_io_locked;
901 * We can only clear the modified bit if the IO is not currently
902 * undergoing modification. Otherwise we may miss changes.
904 if (io->modify_refs == 0 && io->modified)
905 hammer_io_clear_modify(io, 0);
908 * The kernel is going to start the IO, set io->running.
910 KKASSERT(io->running == 0);
912 io->hmp->io_running_space += io->bytes;
913 hammer_count_io_running_write += io->bytes;
918 * Return non-zero if we wish to delay the kernel's attempt to flush
919 * this buffer to disk.
922 hammer_io_countdeps(struct buf *bp, int n)
927 struct bio_ops hammer_bioops = {
928 .io_start = hammer_io_start,
929 .io_complete = hammer_io_complete,
930 .io_deallocate = hammer_io_deallocate,
931 .io_fsync = hammer_io_fsync,
932 .io_sync = hammer_io_sync,
933 .io_movedeps = hammer_io_movedeps,
934 .io_countdeps = hammer_io_countdeps,
935 .io_checkread = hammer_io_checkread,
936 .io_checkwrite = hammer_io_checkwrite,
939 /************************************************************************
941 ************************************************************************
943 * These functions operate directly on the buffer cache buffer associated
944 * with a front-end vnode rather then a back-end device vnode.
948 * Read a buffer associated with a front-end vnode directly from the
949 * disk media. The bio may be issued asynchronously. If leaf is non-NULL
950 * we validate the CRC.
952 * A second-level bio already resolved to a zone-2 offset (typically by
953 * the BMAP code, or by a previous hammer_io_direct_write()), is passed.
955 * We must check for the presence of a HAMMER buffer to handle the case
956 * where the reblocker has rewritten the data (which it does via the HAMMER
957 * buffer system, not via the high-level vnode buffer cache), but not yet
958 * committed the buffer to the media.
961 hammer_io_direct_read(hammer_mount_t hmp, struct bio *bio,
962 hammer_btree_leaf_elm_t leaf)
964 hammer_off_t buf_offset;
965 hammer_off_t zone2_offset;
966 hammer_volume_t volume;
972 buf_offset = bio->bio_offset;
973 KKASSERT((buf_offset & HAMMER_OFF_ZONE_MASK) ==
974 HAMMER_ZONE_LARGE_DATA);
977 * The buffer cache may have an aliased buffer (the reblocker can
978 * write them). If it does we have to sync any dirty data before
979 * we can build our direct-read. This is a non-critical code path.
982 hammer_sync_buffers(hmp, buf_offset, bp->b_bufsize);
985 * Resolve to a zone-2 offset. The conversion just requires
986 * munging the top 4 bits but we want to abstract it anyway
987 * so the blockmap code can verify the zone assignment.
989 zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, &error);
992 KKASSERT((zone2_offset & HAMMER_OFF_ZONE_MASK) ==
993 HAMMER_ZONE_RAW_BUFFER);
996 * Resolve volume and raw-offset for 3rd level bio. The
997 * offset will be specific to the volume.
999 vol_no = HAMMER_VOL_DECODE(zone2_offset);
1000 volume = hammer_get_volume(hmp, vol_no, &error);
1001 if (error == 0 && zone2_offset >= volume->maxbuf_off)
1005 zone2_offset &= HAMMER_OFF_SHORT_MASK;
1007 nbio = push_bio(bio);
1008 nbio->bio_offset = volume->ondisk->vol_buf_beg +
1012 * XXX disabled - our CRC check doesn't work if the OS
1013 * does bogus_page replacement on the direct-read.
1015 if (leaf && hammer_verify_data) {
1016 nbio->bio_done = hammer_io_direct_read_complete;
1017 nbio->bio_caller_info1.uvalue32 = leaf->data_crc;
1020 hammer_stats_disk_read += bp->b_bufsize;
1021 vn_strategy(volume->devvp, nbio);
1023 hammer_rel_volume(volume, 0);
1026 kprintf("hammer_direct_read: failed @ %016llx\n",
1028 bp->b_error = error;
1029 bp->b_flags |= B_ERROR;
1037 * On completion of the BIO this callback must check the data CRC
1038 * and chain to the previous bio.
1042 hammer_io_direct_read_complete(struct bio *nbio)
1046 u_int32_t rec_crc = nbio->bio_caller_info1.uvalue32;
1049 if (crc32(bp->b_data, bp->b_bufsize) != rec_crc) {
1050 kprintf("HAMMER: data_crc error @%016llx/%d\n",
1051 nbio->bio_offset, bp->b_bufsize);
1052 if (hammer_debug_debug)
1054 bp->b_flags |= B_ERROR;
1057 obio = pop_bio(nbio);
1063 * Write a buffer associated with a front-end vnode directly to the
1064 * disk media. The bio may be issued asynchronously.
1066 * The BIO is associated with the specified record and RECF_DIRECT_IO
1070 hammer_io_direct_write(hammer_mount_t hmp, hammer_record_t record,
1073 hammer_btree_leaf_elm_t leaf = &record->leaf;
1074 hammer_off_t buf_offset;
1075 hammer_off_t zone2_offset;
1076 hammer_volume_t volume;
1077 hammer_buffer_t buffer;
1084 buf_offset = leaf->data_offset;
1086 KKASSERT(buf_offset > HAMMER_ZONE_BTREE);
1087 KKASSERT(bio->bio_buf->b_cmd == BUF_CMD_WRITE);
1089 if ((buf_offset & HAMMER_BUFMASK) == 0 &&
1090 leaf->data_len >= HAMMER_BUFSIZE) {
1092 * We are using the vnode's bio to write directly to the
1093 * media, any hammer_buffer at the same zone-X offset will
1094 * now have stale data.
1096 zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, &error);
1097 vol_no = HAMMER_VOL_DECODE(zone2_offset);
1098 volume = hammer_get_volume(hmp, vol_no, &error);
1100 if (error == 0 && zone2_offset >= volume->maxbuf_off)
1104 KKASSERT((bp->b_bufsize & HAMMER_BUFMASK) == 0);
1105 hammer_del_buffers(hmp, buf_offset,
1106 zone2_offset, bp->b_bufsize);
1109 * Second level bio - cached zone2 offset.
1111 * (We can put our bio_done function in either the
1112 * 2nd or 3rd level).
1114 nbio = push_bio(bio);
1115 nbio->bio_offset = zone2_offset;
1116 nbio->bio_done = hammer_io_direct_write_complete;
1117 nbio->bio_caller_info1.ptr = record;
1118 record->flags |= HAMMER_RECF_DIRECT_IO;
1121 * Third level bio - raw offset specific to the
1124 zone2_offset &= HAMMER_OFF_SHORT_MASK;
1125 nbio = push_bio(nbio);
1126 nbio->bio_offset = volume->ondisk->vol_buf_beg +
1128 hammer_stats_disk_write += bp->b_bufsize;
1129 vn_strategy(volume->devvp, nbio);
1131 hammer_rel_volume(volume, 0);
1134 * Must fit in a standard HAMMER buffer. In this case all
1135 * consumers use the HAMMER buffer system and RECF_DIRECT_IO
1136 * does not need to be set-up.
1138 KKASSERT(((buf_offset ^ (buf_offset + leaf->data_len - 1)) & ~HAMMER_BUFMASK64) == 0);
1140 ptr = hammer_bread(hmp, buf_offset, &error, &buffer);
1143 bp->b_flags |= B_AGE;
1144 hammer_io_modify(&buffer->io, 1);
1145 bcopy(bp->b_data, ptr, leaf->data_len);
1146 hammer_io_modify_done(&buffer->io);
1147 hammer_rel_buffer(buffer, 0);
1153 kprintf("hammer_direct_write: failed @ %016llx\n",
1158 bp->b_flags |= B_ERROR;
1165 * On completion of the BIO this callback must disconnect
1166 * it from the hammer_record and chain to the previous bio.
1170 hammer_io_direct_write_complete(struct bio *nbio)
1173 hammer_record_t record = nbio->bio_caller_info1.ptr;
1175 obio = pop_bio(nbio);
1177 KKASSERT(record != NULL && (record->flags & HAMMER_RECF_DIRECT_IO));
1178 record->flags &= ~HAMMER_RECF_DIRECT_IO;
1179 if (record->flags & HAMMER_RECF_DIRECT_WAIT) {
1180 record->flags &= ~HAMMER_RECF_DIRECT_WAIT;
1181 wakeup(&record->flags);
1187 * This is called before a record is either committed to the B-Tree
1188 * or destroyed, to resolve any associated direct-IO. We must
1189 * ensure that the data is available on-media to other consumers
1190 * such as the reblocker or mirroring code.
1192 * Note that other consumers might access the data via the block
1193 * device's buffer cache and not the high level vnode's buffer cache.
1196 hammer_io_direct_wait(hammer_record_t record)
1199 while (record->flags & HAMMER_RECF_DIRECT_IO) {
1200 record->flags |= HAMMER_RECF_DIRECT_WAIT;
1201 tsleep(&record->flags, 0, "hmdiow", 0);
1207 * This is called to remove the second-level cached zone-2 offset from
1208 * frontend buffer cache buffers, now stale due to a data relocation.
1209 * These offsets are generated by cluster_read() via VOP_BMAP, or directly
1210 * by hammer_vop_strategy_read().
1212 * This is rather nasty because here we have something like the reblocker
1213 * scanning the raw B-Tree with no held references on anything, really,
1214 * other then a shared lock on the B-Tree node, and we have to access the
1215 * frontend's buffer cache to check for and clean out the association.
1216 * Specifically, if the reblocker is moving data on the disk, these cached
1217 * offsets will become invalid.
1219 * Only data record types associated with the large-data zone are subject
1220 * to direct-io and need to be checked.
1224 hammer_io_direct_uncache(hammer_mount_t hmp, hammer_btree_leaf_elm_t leaf)
1226 struct hammer_inode_info iinfo;
1229 if (leaf->base.rec_type != HAMMER_RECTYPE_DATA)
1231 zone = HAMMER_ZONE_DECODE(leaf->data_offset);
1232 if (zone != HAMMER_ZONE_LARGE_DATA_INDEX)
1234 iinfo.obj_id = leaf->base.obj_id;
1235 iinfo.obj_asof = 0; /* unused */
1236 iinfo.obj_localization = leaf->base.localization &
1237 HAMMER_LOCALIZE_PSEUDOFS_MASK;
1238 iinfo.u.leaf = leaf;
1239 hammer_scan_inode_snapshots(hmp, &iinfo,
1240 hammer_io_direct_uncache_callback,
1245 hammer_io_direct_uncache_callback(hammer_inode_t ip, void *data)
1247 hammer_inode_info_t iinfo = data;
1248 hammer_off_t data_offset;
1249 hammer_off_t file_offset;
1256 data_offset = iinfo->u.leaf->data_offset;
1257 file_offset = iinfo->u.leaf->base.key - iinfo->u.leaf->data_len;
1258 blksize = iinfo->u.leaf->data_len;
1259 KKASSERT((blksize & HAMMER_BUFMASK) == 0);
1261 hammer_ref(&ip->lock);
1262 if (hammer_get_vnode(ip, &vp) == 0) {
1263 if ((bp = findblk(ip->vp, file_offset)) != NULL &&
1264 bp->b_bio2.bio_offset != NOOFFSET) {
1265 bp = getblk(ip->vp, file_offset, blksize, 0, 0);
1266 bp->b_bio2.bio_offset = NOOFFSET;
1271 hammer_rel_inode(ip, 0);