2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * IO Primitives and buffer cache management
37 * All major data-tracking structures in HAMMER contain a struct hammer_io
38 * which is used to manage their backing store. We use filesystem buffers
39 * for backing store and we leave them passively associated with their
42 * If the kernel tries to destroy a passively associated buf which we cannot
43 * yet let go we set B_LOCKED in the buffer and then actively released it
46 * The io_token is required for anything which might race bioops and bio_done
47 * callbacks, with one exception: A successful hammer_try_interlock_norefs().
48 * the fs_token will be held in all other cases.
55 static void hammer_io_modify(hammer_io_t io, int count);
56 static void hammer_io_deallocate(struct buf *bp);
57 static void hammer_indirect_callback(struct bio *bio);
58 static void hammer_io_direct_write_complete(struct bio *nbio);
59 static int hammer_io_direct_uncache_callback(hammer_inode_t ip, void *data);
60 static void hammer_io_set_modlist(struct hammer_io *io);
61 static __inline void hammer_io_flush_mark(hammer_volume_t volume);
62 static struct bio_ops hammer_bioops;
65 hammer_mod_rb_compare(hammer_io_t io1, hammer_io_t io2)
67 hammer_off_t io1_offset;
68 hammer_off_t io2_offset;
71 * Encoded offsets are neither valid block device offsets
72 * nor valid zone-X offsets.
74 io1_offset = HAMMER_ENCODE(0, io1->volume->vol_no, io1->offset);
75 io2_offset = HAMMER_ENCODE(0, io2->volume->vol_no, io2->offset);
77 if (io1_offset < io2_offset)
79 if (io1_offset > io2_offset)
84 RB_GENERATE(hammer_mod_rb_tree, hammer_io, rb_node, hammer_mod_rb_compare);
87 * Initialize a new, already-zero'd hammer_io structure, or reinitialize
88 * an existing hammer_io structure which may have switched to another type.
91 hammer_io_init(hammer_io_t io, hammer_volume_t volume, enum hammer_io_type type)
94 io->hmp = volume->io.hmp;
99 * Helper routine to disassociate a buffer cache buffer from an I/O
100 * structure. The io must be interlocked and marked appropriately for
103 * The io must be in a released state with the io->bp owned and
104 * locked by the caller of this function. When not called from an
105 * io_deallocate() this cannot race an io_deallocate() since the
106 * kernel would be unable to get the buffer lock in that case.
107 * (The released state in this case means we own the bp, not the
108 * hammer_io structure).
110 * The io may have 0 or 1 references depending on who called us. The
111 * caller is responsible for dealing with the refs.
113 * This call can only be made when no action is required on the buffer.
115 * This function is guaranteed not to race against anything because we
116 * own both the io lock and the bp lock and are interlocked with no
120 hammer_io_disassociate(hammer_io_t io)
122 struct buf *bp = io->bp;
124 KKASSERT(io->released);
125 KKASSERT(io->modified == 0);
126 KKASSERT(hammer_buf_peek_io(bp) == io);
131 * If the buffer was locked someone wanted to get rid of it.
133 if (bp->b_flags & B_LOCKED) {
134 atomic_add_int(&hammer_count_io_locked, -1);
135 bp->b_flags &= ~B_LOCKED;
138 bp->b_flags |= B_NOCACHE|B_RELBUF;
143 case HAMMER_STRUCTURE_VOLUME:
144 HAMMER_ITOV(io)->ondisk = NULL;
146 case HAMMER_STRUCTURE_DATA_BUFFER:
147 case HAMMER_STRUCTURE_META_BUFFER:
148 case HAMMER_STRUCTURE_UNDO_BUFFER:
149 HAMMER_ITOB(io)->ondisk = NULL;
151 case HAMMER_STRUCTURE_DUMMY:
152 hpanic("bad io type");
158 * Wait for any physical IO to complete
160 * XXX we aren't interlocked against a spinlock or anything so there
161 * is a small window in the interlock / io->running == 0 test.
164 hammer_io_wait(hammer_io_t io)
167 hammer_mount_t hmp = io->hmp;
169 lwkt_gettoken(&hmp->io_token);
170 while (io->running) {
172 tsleep_interlock(io, 0);
174 tsleep(io, PINTERLOCKED, "hmrflw", hz);
176 lwkt_reltoken(&hmp->io_token);
181 * Wait for all currently queued HAMMER-initiated I/Os to complete.
183 * This is not supposed to count direct I/O's but some can leak
184 * through (for non-full-sized direct I/Os).
187 hammer_io_wait_all(hammer_mount_t hmp, const char *ident, int doflush)
189 struct hammer_io iodummy;
193 * Degenerate case, no I/O is running
195 lwkt_gettoken(&hmp->io_token);
196 if (TAILQ_EMPTY(&hmp->iorun_list)) {
197 lwkt_reltoken(&hmp->io_token);
199 hammer_io_flush_sync(hmp);
202 bzero(&iodummy, sizeof(iodummy));
203 iodummy.type = HAMMER_STRUCTURE_DUMMY;
206 * Add placemarker and then wait until it becomes the head of
209 TAILQ_INSERT_TAIL(&hmp->iorun_list, &iodummy, iorun_entry);
210 while (TAILQ_FIRST(&hmp->iorun_list) != &iodummy) {
211 tsleep(&iodummy, 0, ident, 0);
215 * Chain in case several placemarkers are present.
217 TAILQ_REMOVE(&hmp->iorun_list, &iodummy, iorun_entry);
218 io = TAILQ_FIRST(&hmp->iorun_list);
219 if (io && io->type == HAMMER_STRUCTURE_DUMMY)
221 lwkt_reltoken(&hmp->io_token);
224 hammer_io_flush_sync(hmp);
228 * Clear a flagged error condition on a I/O buffer. The caller must hold
229 * its own ref on the buffer.
232 hammer_io_clear_error(struct hammer_io *io)
234 hammer_mount_t hmp = io->hmp;
236 lwkt_gettoken(&hmp->io_token);
239 hammer_rel(&io->lock);
240 KKASSERT(hammer_isactive(&io->lock));
242 lwkt_reltoken(&hmp->io_token);
246 hammer_io_clear_error_noassert(struct hammer_io *io)
248 hammer_mount_t hmp = io->hmp;
250 lwkt_gettoken(&hmp->io_token);
253 hammer_rel(&io->lock);
255 lwkt_reltoken(&hmp->io_token);
259 * This is an advisory function only which tells the buffer cache
260 * the bp is not a meta-data buffer, even though it is backed by
263 * This is used by HAMMER's reblocking code to avoid trying to
264 * swapcache the filesystem's data when it is read or written
265 * by the reblocking code.
267 * The caller has a ref on the buffer preventing the bp from
268 * being disassociated from it.
271 hammer_io_notmeta(hammer_buffer_t buffer)
273 if ((buffer->io.bp->b_flags & B_NOTMETA) == 0) {
274 hammer_mount_t hmp = buffer->io.hmp;
276 lwkt_gettoken(&hmp->io_token);
277 buffer->io.bp->b_flags |= B_NOTMETA;
278 lwkt_reltoken(&hmp->io_token);
283 * Load bp for a HAMMER structure. The io must be exclusively locked by
286 * This routine is mostly used on meta-data and small-data blocks. Generally
287 * speaking HAMMER assumes some locality of reference and will cluster.
289 * Note that the caller (hammer_ondisk.c) may place further restrictions
290 * on clusterability via the limit (in bytes). Typically large-data
291 * zones cannot be clustered due to their mixed buffer sizes. This is
292 * not an issue since such clustering occurs in hammer_vnops at the
293 * regular file layer, whereas this is the buffered block device layer.
295 * No I/O callbacks can occur while we hold the buffer locked.
298 hammer_io_read(struct vnode *devvp, struct hammer_io *io, int limit)
303 if ((bp = io->bp) == NULL) {
304 atomic_add_long(&hammer_count_io_running_read, io->bytes);
305 if (hammer_cluster_enable && limit > io->bytes) {
306 error = cluster_read(devvp, io->offset + limit,
307 io->offset, io->bytes,
312 error = bread(devvp, io->offset, io->bytes, &io->bp);
314 hammer_stats_disk_read += io->bytes;
315 atomic_add_long(&hammer_count_io_running_read, -io->bytes);
318 * The code generally assumes b_ops/b_dep has been set-up,
319 * even if we error out here.
322 if ((hammer_debug_io & 0x0001) && (bp->b_flags & B_IODEBUG)) {
323 const char *metatype;
326 case HAMMER_STRUCTURE_VOLUME:
329 case HAMMER_STRUCTURE_META_BUFFER:
330 switch(HAMMER_ITOB(io)->zoneX_offset
331 & HAMMER_OFF_ZONE_MASK) {
332 case HAMMER_ZONE_BTREE:
335 case HAMMER_ZONE_META:
338 case HAMMER_ZONE_FREEMAP:
339 metatype = "freemap";
346 case HAMMER_STRUCTURE_DATA_BUFFER:
349 case HAMMER_STRUCTURE_UNDO_BUFFER:
353 metatype = "unknown";
356 hdkprintf("zone2_offset %016jx %s\n",
357 (intmax_t)bp->b_bio2.bio_offset,
360 bp->b_flags &= ~B_IODEBUG;
361 bp->b_ops = &hammer_bioops;
363 hammer_buf_attach_io(bp, io); /* locked by the io lock */
365 KKASSERT(io->modified == 0);
366 KKASSERT(io->running == 0);
367 KKASSERT(io->waiting == 0);
368 io->released = 0; /* we hold an active lock on bp */
376 * Similar to hammer_io_read() but returns a zero'd out buffer instead.
377 * Must be called with the IO exclusively locked.
379 * vfs_bio_clrbuf() is kinda nasty, enforce serialization against background
380 * I/O by forcing the buffer to not be in a released state before calling
383 * This function will also mark the IO as modified but it will not
384 * increment the modify_refs count.
386 * No I/O callbacks can occur while we hold the buffer locked.
389 hammer_io_new(struct vnode *devvp, struct hammer_io *io)
393 if ((bp = io->bp) == NULL) {
394 io->bp = getblk(devvp, io->offset, io->bytes, 0, 0);
396 bp->b_ops = &hammer_bioops;
398 hammer_buf_attach_io(bp, io); /* locked by the io lock */
400 KKASSERT(io->running == 0);
410 hammer_io_modify(io, 0);
416 * Advance the activity count on the underlying buffer because
417 * HAMMER does not getblk/brelse on every access.
419 * The io->bp cannot go away while the buffer is referenced.
422 hammer_io_advance(struct hammer_io *io)
425 buf_act_advance(io->bp);
429 * Remove potential device level aliases against buffers managed by high level
430 * vnodes. Aliases can also be created due to mixed buffer sizes or via
431 * direct access to the backing store device.
433 * This is nasty because the buffers are also VMIO-backed. Even if a buffer
434 * does not exist its backing VM pages might, and we have to invalidate
435 * those as well or a getblk() will reinstate them.
437 * Buffer cache buffers associated with hammer_buffers cannot be
441 hammer_io_inval(hammer_volume_t volume, hammer_off_t zone2_offset)
445 hammer_off_t phys_offset;
449 hmp = volume->io.hmp;
450 lwkt_gettoken(&hmp->io_token);
453 * If a device buffer already exists for the specified physical
454 * offset use that, otherwise instantiate a buffer to cover any
455 * related VM pages, set BNOCACHE, and brelse().
457 phys_offset = volume->ondisk->vol_buf_beg +
458 (zone2_offset & HAMMER_OFF_SHORT_MASK);
459 if ((bp = findblk(volume->devvp, phys_offset, 0)) != NULL)
462 bp = getblk(volume->devvp, phys_offset, HAMMER_BUFSIZE, 0, 0);
464 if ((io = hammer_buf_peek_io(bp)) != NULL) {
466 hammer_ref(&io->lock);
467 hammer_io_clear_modify(io, 1);
472 io->waitdep = 1; /* XXX this is a fs_token field */
473 KKASSERT(hammer_isactive(&io->lock) == 1);
474 hammer_rel_buffer(HAMMER_ITOB(io), 0);
475 /*hammer_io_deallocate(bp);*/
480 KKASSERT((bp->b_flags & B_LOCKED) == 0);
482 bp->b_flags |= B_NOCACHE|B_RELBUF;
486 lwkt_reltoken(&hmp->io_token);
491 * This routine is called on the last reference to a hammer structure.
492 * The io must be interlocked with a refcount of zero. The hammer structure
493 * will remain interlocked on return.
495 * This routine may return a non-NULL bp to the caller for dispoal.
496 * The caller typically brelse()'s the bp.
498 * The bp may or may not still be passively associated with the IO. It
499 * will remain passively associated if it is unreleasable (e.g. a modified
502 * The only requirement here is that modified meta-data and volume-header
503 * buffer may NOT be disassociated from the IO structure, and consequently
504 * we also leave such buffers actively associated with the IO if they already
505 * are (since the kernel can't do anything with them anyway). Only the
506 * flusher is allowed to write such buffers out. Modified pure-data and
507 * undo buffers are returned to the kernel but left passively associated
508 * so we can track when the kernel writes the bp out.
511 hammer_io_release(struct hammer_io *io, int flush)
515 if ((bp = io->bp) == NULL)
519 * Try to flush a dirty IO to disk if asked to by the
520 * caller or if the kernel tried to flush the buffer in the past.
522 * Kernel-initiated flushes are only allowed for pure-data buffers.
523 * meta-data and volume buffers can only be flushed explicitly
528 hammer_io_flush(io, 0);
529 } else if (bp->b_flags & B_LOCKED) {
531 case HAMMER_STRUCTURE_DATA_BUFFER:
532 hammer_io_flush(io, 0);
534 case HAMMER_STRUCTURE_UNDO_BUFFER:
535 hammer_io_flush(io, hammer_undo_reclaim(io));
540 } /* else no explicit request to flush the buffer */
544 * Wait for the IO to complete if asked to. This occurs when
545 * the buffer must be disposed of definitively during an umount
546 * or buffer invalidation.
548 if (io->waitdep && io->running) {
553 * Return control of the buffer to the kernel (with the provisio
554 * that our bioops can override kernel decisions with regards to
557 if ((flush || io->reclaim) && io->modified == 0 && io->running == 0) {
559 * Always disassociate the bp if an explicit flush
560 * was requested and the IO completed with no error
561 * (so unmount can really clean up the structure).
569 hammer_io_disassociate(io);
571 } else if (io->modified) {
573 * Only certain IO types can be released to the kernel if
574 * the buffer has been modified.
576 * volume and meta-data IO types may only be explicitly
580 case HAMMER_STRUCTURE_DATA_BUFFER:
581 case HAMMER_STRUCTURE_UNDO_BUFFER:
582 if (io->released == 0) {
584 bp->b_flags |= B_CLUSTEROK;
591 bp = NULL; /* bp left associated */
592 } else if (io->released == 0) {
594 * Clean buffers can be generally released to the kernel.
595 * We leave the bp passively associated with the HAMMER
596 * structure and use bioops to disconnect it later on
597 * if the kernel wants to discard the buffer.
599 * We can steal the structure's ownership of the bp.
602 if (bp->b_flags & B_LOCKED) {
603 hammer_io_disassociate(io);
607 hammer_io_disassociate(io);
610 /* return the bp (bp passively associated) */
615 * A released buffer is passively associate with our
616 * hammer_io structure. The kernel cannot destroy it
617 * without making a bioops call. If the kernel (B_LOCKED)
618 * or we (reclaim) requested that the buffer be destroyed
619 * we destroy it, otherwise we do a quick get/release to
620 * reset its position in the kernel's LRU list.
622 * Leaving the buffer passively associated allows us to
623 * use the kernel's LRU buffer flushing mechanisms rather
624 * then rolling our own.
626 * XXX there are two ways of doing this. We can re-acquire
627 * and passively release to reset the LRU, or not.
629 if (io->running == 0) {
631 if ((bp->b_flags & B_LOCKED) || io->reclaim) {
632 hammer_io_disassociate(io);
635 /* return the bp (bp passively associated) */
639 * bp is left passively associated but we do not
640 * try to reacquire it. Interactions with the io
641 * structure will occur on completion of the bp's
651 * This routine is called with a locked IO when a flush is desired and
652 * no other references to the structure exists other then ours. This
653 * routine is ONLY called when HAMMER believes it is safe to flush a
654 * potentially modified buffer out.
656 * The locked io or io reference prevents a flush from being initiated
660 hammer_io_flush(struct hammer_io *io, int reclaim)
666 * Degenerate case - nothing to flush if nothing is dirty.
668 if (io->modified == 0)
672 KKASSERT(io->modify_refs <= 0);
675 * Acquire ownership of the bp, particularly before we clear our
678 * We are going to bawrite() this bp. Don't leave a window where
679 * io->released is set, we actually own the bp rather then our
682 * The io_token should not be required here as only
688 /* BUF_KERNPROC(io->bp); */
689 /* io->released = 0; */
690 KKASSERT(io->released);
691 KKASSERT(io->bp == bp);
698 if ((bp->b_flags & B_LOCKED) == 0) {
699 bp->b_flags |= B_LOCKED;
700 atomic_add_int(&hammer_count_io_locked, 1);
705 * Acquire exclusive access to the bp and then clear the modified
706 * state of the buffer prior to issuing I/O to interlock any
707 * modifications made while the I/O is in progress. This shouldn't
708 * happen anyway but losing data would be worse. The modified bit
709 * will be rechecked after the IO completes.
711 * NOTE: This call also finalizes the buffer's content (inval == 0).
713 * This is only legal when lock.refs == 1 (otherwise we might clear
714 * the modified bit while there are still users of the cluster
715 * modifying the data).
717 * Do this before potentially blocking so any attempt to modify the
718 * ondisk while we are blocked blocks waiting for us.
720 hammer_ref(&io->lock);
721 hammer_io_clear_modify(io, 0);
722 hammer_rel(&io->lock);
724 if (hammer_debug_io & 0x0002)
725 hdkprintf("%016jx\n", bp->b_bio1.bio_offset);
728 * Transfer ownership to the kernel and initiate I/O.
730 * NOTE: We do not hold io_token so an atomic op is required to
731 * update io_running_space.
734 atomic_add_long(&hmp->io_running_space, io->bytes);
735 atomic_add_long(&hammer_count_io_running_write, io->bytes);
736 lwkt_gettoken(&hmp->io_token);
737 TAILQ_INSERT_TAIL(&hmp->iorun_list, io, iorun_entry);
738 lwkt_reltoken(&hmp->io_token);
740 hammer_io_flush_mark(io->volume);
743 /************************************************************************
745 ************************************************************************
747 * These routines deal with dependancies created when IO buffers get
748 * modified. The caller must call hammer_modify_*() on a referenced
749 * HAMMER structure prior to modifying its on-disk data.
751 * Any intent to modify an IO buffer acquires the related bp and imposes
752 * various write ordering dependancies.
756 * Mark a HAMMER structure as undergoing modification. Meta-data buffers
757 * are locked until the flusher can deal with them, pure data buffers
758 * can be written out.
760 * The referenced io prevents races.
764 hammer_io_modify(hammer_io_t io, int count)
767 * io->modify_refs must be >= 0
769 while (io->modify_refs < 0) {
771 tsleep(io, 0, "hmrmod", 0);
775 * Shortcut if nothing to do.
777 KKASSERT(hammer_isactive(&io->lock) && io->bp != NULL);
778 io->modify_refs += count;
779 if (io->modified && io->released == 0)
783 * NOTE: It is important not to set the modified bit
784 * until after we have acquired the bp or we risk
785 * racing against checkwrite.
787 hammer_lock_ex(&io->lock);
790 BUF_KERNPROC(io->bp);
793 if (io->modified == 0) {
794 hammer_io_set_modlist(io);
797 hammer_unlock(&io->lock);
802 hammer_io_modify_done(hammer_io_t io)
804 KKASSERT(io->modify_refs > 0);
806 if (io->modify_refs == 0 && io->waitmod) {
813 * The write interlock blocks other threads trying to modify a buffer
814 * (they block in hammer_io_modify()) after us, or blocks us while other
815 * threads are in the middle of modifying a buffer.
817 * The caller also has a ref on the io, however if we are not careful
818 * we will race bioops callbacks (checkwrite). To deal with this
819 * we must at least acquire and release the io_token, and it is probably
820 * better to hold it through the setting of modify_refs.
823 hammer_io_write_interlock(hammer_io_t io)
825 hammer_mount_t hmp = io->hmp;
827 lwkt_gettoken(&hmp->io_token);
828 while (io->modify_refs != 0) {
830 tsleep(io, 0, "hmrmod", 0);
832 io->modify_refs = -1;
833 lwkt_reltoken(&hmp->io_token);
837 hammer_io_done_interlock(hammer_io_t io)
839 KKASSERT(io->modify_refs == -1);
848 * Caller intends to modify a volume's ondisk structure.
850 * This is only allowed if we are the flusher or we have a ref on the
854 hammer_modify_volume(hammer_transaction_t trans, hammer_volume_t volume,
857 KKASSERT (trans == NULL || trans->sync_lock_refs > 0);
859 hammer_io_modify(&volume->io, 1);
861 intptr_t rel_offset = (intptr_t)base - (intptr_t)volume->ondisk;
862 KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0);
863 hammer_generate_undo(trans,
864 HAMMER_ENCODE_RAW_VOLUME(volume->vol_no, rel_offset),
870 * Caller intends to modify a buffer's ondisk structure.
872 * This is only allowed if we are the flusher or we have a ref on the
876 hammer_modify_buffer(hammer_transaction_t trans, hammer_buffer_t buffer,
879 KKASSERT (trans == NULL || trans->sync_lock_refs > 0);
881 hammer_io_modify(&buffer->io, 1);
883 intptr_t rel_offset = (intptr_t)base - (intptr_t)buffer->ondisk;
884 KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0);
885 hammer_generate_undo(trans,
886 buffer->zone2_offset + rel_offset,
892 hammer_modify_volume_done(hammer_volume_t volume)
894 hammer_io_modify_done(&volume->io);
898 hammer_modify_buffer_done(hammer_buffer_t buffer)
900 hammer_io_modify_done(&buffer->io);
904 * Mark an entity as not being dirty any more and finalize any
905 * delayed adjustments to the buffer.
907 * Delayed adjustments are an important performance enhancement, allowing
908 * us to avoid recalculating B-Tree node CRCs over and over again when
909 * making bulk-modifications to the B-Tree.
911 * If inval is non-zero delayed adjustments are ignored.
913 * This routine may dereference related btree nodes and cause the
914 * buffer to be dereferenced. The caller must own a reference on io.
917 hammer_io_clear_modify(struct hammer_io *io, int inval)
922 * io_token is needed to avoid races on mod_root
924 if (io->modified == 0)
927 lwkt_gettoken(&hmp->io_token);
928 if (io->modified == 0) {
929 lwkt_reltoken(&hmp->io_token);
934 * Take us off the mod-list and clear the modified bit.
936 KKASSERT(io->mod_root != NULL);
937 if (io->mod_root == &io->hmp->volu_root ||
938 io->mod_root == &io->hmp->meta_root) {
939 io->hmp->locked_dirty_space -= io->bytes;
940 atomic_add_long(&hammer_count_dirtybufspace, -io->bytes);
942 RB_REMOVE(hammer_mod_rb_tree, io->mod_root, io);
946 lwkt_reltoken(&hmp->io_token);
949 * If this bit is not set there are no delayed adjustments.
956 * Finalize requested CRCs. The NEEDSCRC flag also holds a reference
957 * on the node (& underlying buffer). Release the node after clearing
960 if (io->type == HAMMER_STRUCTURE_META_BUFFER) {
961 hammer_buffer_t buffer = HAMMER_ITOB(io);
965 TAILQ_FOREACH(node, &buffer->clist, entry) {
966 if ((node->flags & HAMMER_NODE_NEEDSCRC) == 0)
968 node->flags &= ~HAMMER_NODE_NEEDSCRC;
969 KKASSERT(node->ondisk);
971 node->ondisk->crc = crc32(&node->ondisk->crc + 1, HAMMER_BTREE_CRCSIZE);
972 hammer_rel_node(node);
976 /* caller must still have ref on io */
977 KKASSERT(hammer_isactive(&io->lock));
981 * Clear the IO's modify list. Even though the IO is no longer modified
982 * it may still be on the lose_root. This routine is called just before
983 * the governing hammer_buffer is destroyed.
985 * mod_root requires io_token protection.
988 hammer_io_clear_modlist(struct hammer_io *io)
990 hammer_mount_t hmp = io->hmp;
992 KKASSERT(io->modified == 0);
994 lwkt_gettoken(&hmp->io_token);
996 KKASSERT(io->mod_root == &io->hmp->lose_root);
997 RB_REMOVE(hammer_mod_rb_tree, io->mod_root, io);
1000 lwkt_reltoken(&hmp->io_token);
1005 hammer_io_set_modlist(struct hammer_io *io)
1007 struct hammer_mount *hmp = io->hmp;
1009 lwkt_gettoken(&hmp->io_token);
1010 KKASSERT(io->mod_root == NULL);
1013 case HAMMER_STRUCTURE_VOLUME:
1014 io->mod_root = &hmp->volu_root;
1015 hmp->locked_dirty_space += io->bytes;
1016 atomic_add_long(&hammer_count_dirtybufspace, io->bytes);
1018 case HAMMER_STRUCTURE_META_BUFFER:
1019 io->mod_root = &hmp->meta_root;
1020 hmp->locked_dirty_space += io->bytes;
1021 atomic_add_long(&hammer_count_dirtybufspace, io->bytes);
1023 case HAMMER_STRUCTURE_UNDO_BUFFER:
1024 io->mod_root = &hmp->undo_root;
1026 case HAMMER_STRUCTURE_DATA_BUFFER:
1027 io->mod_root = &hmp->data_root;
1029 case HAMMER_STRUCTURE_DUMMY:
1030 hpanic("bad io type");
1031 break; /* NOT REACHED */
1033 if (RB_INSERT(hammer_mod_rb_tree, io->mod_root, io)) {
1034 hpanic("duplicate entry @ %d:%015jx",
1035 io->volume->vol_no, io->offset);
1038 lwkt_reltoken(&hmp->io_token);
1041 /************************************************************************
1043 ************************************************************************
1048 * Pre-IO initiation kernel callback - cluster build only
1050 * bioops callback - hold io_token
1053 hammer_io_start(struct buf *bp)
1055 /* nothing to do, so io_token not needed */
1059 * Post-IO completion kernel callback - MAY BE CALLED FROM INTERRUPT!
1061 * NOTE: HAMMER may modify a data buffer after we have initiated write
1064 * NOTE: MPSAFE callback
1066 * bioops callback - hold io_token
1069 hammer_io_complete(struct buf *bp)
1071 hammer_io_t io = hammer_buf_peek_io(bp);
1072 struct hammer_mount *hmp = io->hmp;
1073 struct hammer_io *ionext;
1075 lwkt_gettoken(&hmp->io_token);
1077 KKASSERT(io->released == 1);
1080 * Deal with people waiting for I/O to drain
1084 * Deal with critical write errors. Once a critical error
1085 * has been flagged in hmp the UNDO FIFO will not be updated.
1086 * That way crash recover will give us a consistent
1089 * Because of this we can throw away failed UNDO buffers. If
1090 * we throw away META or DATA buffers we risk corrupting
1091 * the now read-only version of the filesystem visible to
1092 * the user. Clear B_ERROR so the buffer is not re-dirtied
1093 * by the kernel and ref the io so it doesn't get thrown
1096 if (bp->b_flags & B_ERROR) {
1097 lwkt_gettoken(&hmp->fs_token);
1098 hammer_critical_error(hmp, NULL, bp->b_error,
1099 "while flushing meta-data");
1100 lwkt_reltoken(&hmp->fs_token);
1103 case HAMMER_STRUCTURE_UNDO_BUFFER:
1106 if (io->ioerror == 0) {
1108 hammer_ref(&io->lock);
1112 bp->b_flags &= ~B_ERROR;
1115 hammer_io_set_modlist(io);
1119 hammer_stats_disk_write += io->bytes;
1120 atomic_add_long(&hammer_count_io_running_write, -io->bytes);
1121 atomic_add_long(&hmp->io_running_space, -io->bytes);
1122 KKASSERT(hmp->io_running_space >= 0);
1126 * Remove from iorun list and wakeup any multi-io waiter(s).
1128 if (TAILQ_FIRST(&hmp->iorun_list) == io) {
1129 ionext = TAILQ_NEXT(io, iorun_entry);
1130 if (ionext && ionext->type == HAMMER_STRUCTURE_DUMMY)
1133 TAILQ_REMOVE(&hmp->iorun_list, io, iorun_entry);
1135 hammer_stats_disk_read += io->bytes;
1144 * If B_LOCKED is set someone wanted to deallocate the bp at some
1145 * point, try to do it now. The operation will fail if there are
1146 * refs or if hammer_io_deallocate() is unable to gain the
1149 if (bp->b_flags & B_LOCKED) {
1150 atomic_add_int(&hammer_count_io_locked, -1);
1151 bp->b_flags &= ~B_LOCKED;
1152 hammer_io_deallocate(bp);
1153 /* structure may be dead now */
1155 lwkt_reltoken(&hmp->io_token);
1159 * Callback from kernel when it wishes to deallocate a passively
1160 * associated structure. This mostly occurs with clean buffers
1161 * but it may be possible for a holding structure to be marked dirty
1162 * while its buffer is passively associated. The caller owns the bp.
1164 * If we cannot disassociate we set B_LOCKED to prevent the buffer
1165 * from getting reused.
1167 * WARNING: Because this can be called directly by getnewbuf we cannot
1168 * recurse into the tree. If a bp cannot be immediately disassociated
1169 * our only recourse is to set B_LOCKED.
1171 * WARNING: This may be called from an interrupt via hammer_io_complete()
1173 * bioops callback - hold io_token
1176 hammer_io_deallocate(struct buf *bp)
1178 hammer_io_t io = hammer_buf_peek_io(bp);
1183 lwkt_gettoken(&hmp->io_token);
1185 KKASSERT((bp->b_flags & B_LOCKED) == 0 && io->running == 0);
1186 if (hammer_try_interlock_norefs(&io->lock) == 0) {
1188 * We cannot safely disassociate a bp from a referenced
1189 * or interlocked HAMMER structure.
1191 bp->b_flags |= B_LOCKED;
1192 atomic_add_int(&hammer_count_io_locked, 1);
1193 } else if (io->modified) {
1195 * It is not legal to disassociate a modified buffer. This
1196 * case really shouldn't ever occur.
1198 bp->b_flags |= B_LOCKED;
1199 atomic_add_int(&hammer_count_io_locked, 1);
1200 hammer_put_interlock(&io->lock, 0);
1203 * Disassociate the BP. If the io has no refs left we
1204 * have to add it to the loose list. The kernel has
1205 * locked the buffer and therefore our io must be
1206 * in a released state.
1208 hammer_io_disassociate(io);
1209 if (io->type != HAMMER_STRUCTURE_VOLUME) {
1210 KKASSERT(io->bp == NULL);
1211 KKASSERT(io->mod_root == NULL);
1212 io->mod_root = &hmp->lose_root;
1213 if (RB_INSERT(hammer_mod_rb_tree, io->mod_root, io)) {
1214 hpanic("duplicate entry @ %d:%015jx",
1215 io->volume->vol_no, io->offset);
1219 hammer_put_interlock(&io->lock, 1);
1221 lwkt_reltoken(&hmp->io_token);
1225 * bioops callback - hold io_token
1228 hammer_io_fsync(struct vnode *vp)
1230 /* nothing to do, so io_token not needed */
1235 * NOTE: will not be called unless we tell the kernel about the
1236 * bioops. Unused... we use the mount's VFS_SYNC instead.
1238 * bioops callback - hold io_token
1241 hammer_io_sync(struct mount *mp)
1243 /* nothing to do, so io_token not needed */
1248 * bioops callback - hold io_token
1251 hammer_io_movedeps(struct buf *bp1, struct buf *bp2)
1253 /* nothing to do, so io_token not needed */
1257 * I/O pre-check for reading and writing. HAMMER only uses this for
1258 * B_CACHE buffers so checkread just shouldn't happen, but if it does
1261 * Writing is a different case. We don't want the kernel to try to write
1262 * out a buffer that HAMMER may be modifying passively or which has a
1263 * dependancy. In addition, kernel-demanded writes can only proceed for
1264 * certain types of buffers (i.e. UNDO and DATA types). Other dirty
1265 * buffer types can only be explicitly written by the flusher.
1267 * checkwrite will only be called for bdwrite()n buffers. If we return
1268 * success the kernel is guaranteed to initiate the buffer write.
1270 * bioops callback - hold io_token
1273 hammer_io_checkread(struct buf *bp)
1275 /* nothing to do, so io_token not needed */
1280 * The kernel is asking us whether it can write out a dirty buffer or not.
1282 * bioops callback - hold io_token
1285 hammer_io_checkwrite(struct buf *bp)
1287 hammer_io_t io = hammer_buf_peek_io(bp);
1288 hammer_mount_t hmp = io->hmp;
1291 * This shouldn't happen under normal operation.
1293 lwkt_gettoken(&hmp->io_token);
1294 if (io->type == HAMMER_STRUCTURE_VOLUME ||
1295 io->type == HAMMER_STRUCTURE_META_BUFFER) {
1297 hpanic("illegal buffer");
1298 if ((bp->b_flags & B_LOCKED) == 0) {
1299 bp->b_flags |= B_LOCKED;
1300 atomic_add_int(&hammer_count_io_locked, 1);
1302 lwkt_reltoken(&hmp->io_token);
1307 * We have to be able to interlock the IO to safely modify any
1308 * of its fields without holding the fs_token. If we can't lock
1309 * it then we are racing someone.
1311 * Our ownership of the bp lock prevents the io from being ripped
1312 * out from under us.
1314 if (hammer_try_interlock_norefs(&io->lock) == 0) {
1315 bp->b_flags |= B_LOCKED;
1316 atomic_add_int(&hammer_count_io_locked, 1);
1317 lwkt_reltoken(&hmp->io_token);
1322 * The modified bit must be cleared prior to the initiation of
1323 * any IO (returning 0 initiates the IO). Because this is a
1324 * normal data buffer hammer_io_clear_modify() runs through a
1325 * simple degenerate case.
1327 * Return 0 will cause the kernel to initiate the IO, and we
1328 * must normally clear the modified bit before we begin. If
1329 * the io has modify_refs we do not clear the modified bit,
1330 * otherwise we may miss changes.
1332 * Only data and undo buffers can reach here. These buffers do
1333 * not have terminal crc functions but we temporarily reference
1334 * the IO anyway, just in case.
1336 if (io->modify_refs == 0 && io->modified) {
1337 hammer_ref(&io->lock);
1338 hammer_io_clear_modify(io, 0);
1339 hammer_rel(&io->lock);
1340 } else if (io->modified) {
1341 KKASSERT(io->type == HAMMER_STRUCTURE_DATA_BUFFER);
1345 * The kernel is going to start the IO, set io->running.
1347 KKASSERT(io->running == 0);
1349 atomic_add_long(&io->hmp->io_running_space, io->bytes);
1350 atomic_add_long(&hammer_count_io_running_write, io->bytes);
1351 TAILQ_INSERT_TAIL(&io->hmp->iorun_list, io, iorun_entry);
1353 hammer_put_interlock(&io->lock, 1);
1354 lwkt_reltoken(&hmp->io_token);
1360 * Return non-zero if we wish to delay the kernel's attempt to flush
1361 * this buffer to disk.
1363 * bioops callback - hold io_token
1366 hammer_io_countdeps(struct buf *bp, int n)
1368 /* nothing to do, so io_token not needed */
1372 static struct bio_ops hammer_bioops = {
1373 .io_start = hammer_io_start,
1374 .io_complete = hammer_io_complete,
1375 .io_deallocate = hammer_io_deallocate,
1376 .io_fsync = hammer_io_fsync,
1377 .io_sync = hammer_io_sync,
1378 .io_movedeps = hammer_io_movedeps,
1379 .io_countdeps = hammer_io_countdeps,
1380 .io_checkread = hammer_io_checkread,
1381 .io_checkwrite = hammer_io_checkwrite,
1384 /************************************************************************
1386 ************************************************************************
1388 * These functions operate directly on the buffer cache buffer associated
1389 * with a front-end vnode rather then a back-end device vnode.
1393 * Read a buffer associated with a front-end vnode directly from the
1394 * disk media. The bio may be issued asynchronously. If leaf is non-NULL
1395 * we validate the CRC.
1397 * We must check for the presence of a HAMMER buffer to handle the case
1398 * where the reblocker has rewritten the data (which it does via the HAMMER
1399 * buffer system, not via the high-level vnode buffer cache), but not yet
1400 * committed the buffer to the media.
1403 hammer_io_direct_read(hammer_mount_t hmp, struct bio *bio,
1404 hammer_btree_leaf_elm_t leaf)
1406 hammer_off_t buf_offset;
1407 hammer_off_t zone2_offset;
1408 hammer_volume_t volume;
1414 buf_offset = bio->bio_offset;
1415 KKASSERT((buf_offset & HAMMER_OFF_ZONE_MASK) ==
1416 HAMMER_ZONE_LARGE_DATA);
1419 * The buffer cache may have an aliased buffer (the reblocker can
1420 * write them). If it does we have to sync any dirty data before
1421 * we can build our direct-read. This is a non-critical code path.
1424 hammer_sync_buffers(hmp, buf_offset, bp->b_bufsize);
1427 * Resolve to a zone-2 offset. The conversion just requires
1428 * munging the top 4 bits but we want to abstract it anyway
1429 * so the blockmap code can verify the zone assignment.
1431 zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, &error);
1434 KKASSERT((zone2_offset & HAMMER_OFF_ZONE_MASK) ==
1435 HAMMER_ZONE_RAW_BUFFER);
1438 * Resolve volume and raw-offset for 3rd level bio. The
1439 * offset will be specific to the volume.
1441 vol_no = HAMMER_VOL_DECODE(zone2_offset);
1442 volume = hammer_get_volume(hmp, vol_no, &error);
1443 if (error == 0 && zone2_offset >= volume->maxbuf_off)
1448 * 3rd level bio (the caller has already pushed once)
1450 nbio = push_bio(bio);
1451 nbio->bio_offset = volume->ondisk->vol_buf_beg +
1452 (zone2_offset & HAMMER_OFF_SHORT_MASK);
1453 hammer_stats_disk_read += bp->b_bufsize;
1454 vn_strategy(volume->devvp, nbio);
1456 hammer_rel_volume(volume, 0);
1459 hdkprintf("failed @ %016llx\n", (long long)zone2_offset);
1460 bp->b_error = error;
1461 bp->b_flags |= B_ERROR;
1468 * This works similarly to hammer_io_direct_read() except instead of
1469 * directly reading from the device into the bio we instead indirectly
1470 * read through the device's buffer cache and then copy the data into
1473 * If leaf is non-NULL and validation is enabled, the CRC will be checked.
1475 * This routine also executes asynchronously. It allows hammer strategy
1476 * calls to operate asynchronously when in double_buffer mode (in addition
1477 * to operating asynchronously when in normal mode).
1480 hammer_io_indirect_read(hammer_mount_t hmp, struct bio *bio,
1481 hammer_btree_leaf_elm_t leaf)
1483 hammer_off_t buf_offset;
1484 hammer_off_t zone2_offset;
1485 hammer_volume_t volume;
1490 buf_offset = bio->bio_offset;
1491 KKASSERT((buf_offset & HAMMER_OFF_ZONE_MASK) ==
1492 HAMMER_ZONE_LARGE_DATA);
1495 * The buffer cache may have an aliased buffer (the reblocker can
1496 * write them). If it does we have to sync any dirty data before
1497 * we can build our direct-read. This is a non-critical code path.
1500 hammer_sync_buffers(hmp, buf_offset, bp->b_bufsize);
1503 * Resolve to a zone-2 offset. The conversion just requires
1504 * munging the top 4 bits but we want to abstract it anyway
1505 * so the blockmap code can verify the zone assignment.
1507 zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, &error);
1510 KKASSERT((zone2_offset & HAMMER_OFF_ZONE_MASK) ==
1511 HAMMER_ZONE_RAW_BUFFER);
1514 * Resolve volume and raw-offset for 3rd level bio. The
1515 * offset will be specific to the volume.
1517 vol_no = HAMMER_VOL_DECODE(zone2_offset);
1518 volume = hammer_get_volume(hmp, vol_no, &error);
1519 if (error == 0 && zone2_offset >= volume->maxbuf_off)
1524 * Convert to the raw volume->devvp offset and acquire
1525 * the buf, issuing async I/O if necessary.
1527 buf_offset = volume->ondisk->vol_buf_beg +
1528 (zone2_offset & HAMMER_OFF_SHORT_MASK);
1530 if (leaf && hammer_verify_data) {
1531 bio->bio_caller_info1.uvalue32 = leaf->data_crc;
1532 bio->bio_caller_info2.index = 1;
1534 bio->bio_caller_info2.index = 0;
1536 breadcb(volume->devvp, buf_offset, bp->b_bufsize,
1537 hammer_indirect_callback, bio);
1539 hammer_rel_volume(volume, 0);
1542 hdkprintf("failed @ %016llx\n", (long long)zone2_offset);
1543 bp->b_error = error;
1544 bp->b_flags |= B_ERROR;
1551 * Indirect callback on completion. bio/bp specify the device-backed
1552 * buffer. bio->bio_caller_info1.ptr holds obio.
1554 * obio/obp is the original regular file buffer. obio->bio_caller_info*
1555 * contains the crc specification.
1557 * We are responsible for calling bpdone() and bqrelse() on bio/bp, and
1558 * for calling biodone() on obio.
1561 hammer_indirect_callback(struct bio *bio)
1563 struct buf *bp = bio->bio_buf;
1568 * If BIO_DONE is already set the device buffer was already
1569 * fully valid (B_CACHE). If it is not set then I/O was issued
1570 * and we have to run I/O completion as the last bio.
1572 * Nobody is waiting for our device I/O to complete, we are
1573 * responsible for bqrelse()ing it which means we also have to do
1574 * the equivalent of biowait() and clear BIO_DONE (which breadcb()
1577 * Any preexisting device buffer should match the requested size,
1578 * but due to big-block recycling and other factors there is some
1579 * fragility there, so we assert that the device buffer covers
1582 if ((bio->bio_flags & BIO_DONE) == 0)
1584 bio->bio_flags &= ~(BIO_DONE | BIO_SYNC);
1586 obio = bio->bio_caller_info1.ptr;
1587 obp = obio->bio_buf;
1589 if (bp->b_flags & B_ERROR) {
1590 obp->b_flags |= B_ERROR;
1591 obp->b_error = bp->b_error;
1592 } else if (obio->bio_caller_info2.index &&
1593 obio->bio_caller_info1.uvalue32 !=
1594 crc32(bp->b_data, bp->b_bufsize)) {
1595 obp->b_flags |= B_ERROR;
1598 KKASSERT(bp->b_bufsize >= obp->b_bufsize);
1599 bcopy(bp->b_data, obp->b_data, obp->b_bufsize);
1601 obp->b_flags |= B_AGE;
1608 * Write a buffer associated with a front-end vnode directly to the
1609 * disk media. The bio may be issued asynchronously.
1611 * The BIO is associated with the specified record and RECG_DIRECT_IO
1612 * is set. The recorded is added to its object.
1615 hammer_io_direct_write(hammer_mount_t hmp, struct bio *bio,
1616 hammer_record_t record)
1618 hammer_btree_leaf_elm_t leaf = &record->leaf;
1619 hammer_off_t buf_offset;
1620 hammer_off_t zone2_offset;
1621 hammer_volume_t volume;
1622 hammer_buffer_t buffer;
1629 buf_offset = leaf->data_offset;
1631 KKASSERT(hammer_is_zone2_mapped_index(
1632 HAMMER_ZONE_DECODE(buf_offset)));
1633 KKASSERT(bio->bio_buf->b_cmd == BUF_CMD_WRITE);
1636 * Issue or execute the I/O. The new memory record must replace
1637 * the old one before the I/O completes, otherwise a reaquisition of
1638 * the buffer will load the old media data instead of the new.
1640 if ((buf_offset & HAMMER_BUFMASK) == 0 &&
1641 leaf->data_len >= HAMMER_BUFSIZE) {
1643 * We are using the vnode's bio to write directly to the
1644 * media, any hammer_buffer at the same zone-X offset will
1645 * now have stale data.
1647 zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, &error);
1648 vol_no = HAMMER_VOL_DECODE(zone2_offset);
1649 volume = hammer_get_volume(hmp, vol_no, &error);
1651 if (error == 0 && zone2_offset >= volume->maxbuf_off)
1655 KKASSERT((bp->b_bufsize & HAMMER_BUFMASK) == 0);
1657 hammer_del_buffers(hmp, buf_offset,
1658 zone2_offset, bp->b_bufsize);
1662 * Second level bio - cached zone2 offset.
1664 * (We can put our bio_done function in either the
1665 * 2nd or 3rd level).
1667 nbio = push_bio(bio);
1668 nbio->bio_offset = zone2_offset;
1669 nbio->bio_done = hammer_io_direct_write_complete;
1670 nbio->bio_caller_info1.ptr = record;
1671 record->zone2_offset = zone2_offset;
1672 record->gflags |= HAMMER_RECG_DIRECT_IO |
1673 HAMMER_RECG_DIRECT_INVAL;
1676 * Third level bio - raw offset specific to the
1679 zone2_offset &= HAMMER_OFF_SHORT_MASK;
1680 nbio = push_bio(nbio);
1681 nbio->bio_offset = volume->ondisk->vol_buf_beg +
1683 hammer_stats_disk_write += bp->b_bufsize;
1684 hammer_ip_replace_bulk(hmp, record);
1685 vn_strategy(volume->devvp, nbio);
1686 hammer_io_flush_mark(volume);
1688 hammer_rel_volume(volume, 0);
1691 * Must fit in a standard HAMMER buffer. In this case all
1692 * consumers use the HAMMER buffer system and RECG_DIRECT_IO
1693 * does not need to be set-up.
1695 KKASSERT(((buf_offset ^ (buf_offset + leaf->data_len - 1)) & ~HAMMER_BUFMASK64) == 0);
1697 ptr = hammer_bread(hmp, buf_offset, &error, &buffer);
1700 bp->b_flags |= B_AGE;
1701 hammer_io_modify(&buffer->io, 1);
1702 bcopy(bp->b_data, ptr, leaf->data_len);
1703 hammer_io_modify_done(&buffer->io);
1704 hammer_rel_buffer(buffer, 0);
1706 hammer_ip_replace_bulk(hmp, record);
1712 * Major suckage occured. Also note: The record was
1713 * never added to the tree so we do not have to worry
1714 * about the backend.
1716 hdkprintf("failed @ %016llx\n", (long long)leaf->data_offset);
1720 bp->b_flags |= B_ERROR;
1722 record->flags |= HAMMER_RECF_DELETED_FE;
1723 hammer_rel_mem_record(record);
1729 * On completion of the BIO this callback must disconnect
1730 * it from the hammer_record and chain to the previous bio.
1732 * An I/O error forces the mount to read-only. Data buffers
1733 * are not B_LOCKED like meta-data buffers are, so we have to
1734 * throw the buffer away to prevent the kernel from retrying.
1736 * NOTE: MPSAFE callback, only modify fields we have explicit
1737 * access to (the bp and the record->gflags).
1741 hammer_io_direct_write_complete(struct bio *nbio)
1745 hammer_record_t record;
1748 record = nbio->bio_caller_info1.ptr;
1749 KKASSERT(record != NULL);
1750 hmp = record->ip->hmp;
1752 lwkt_gettoken(&hmp->io_token);
1755 obio = pop_bio(nbio);
1756 if (bp->b_flags & B_ERROR) {
1757 lwkt_gettoken(&hmp->fs_token);
1758 hammer_critical_error(hmp, record->ip, bp->b_error,
1759 "while writing bulk data");
1760 lwkt_reltoken(&hmp->fs_token);
1761 bp->b_flags |= B_INVAL;
1765 KKASSERT(record->gflags & HAMMER_RECG_DIRECT_IO);
1766 if (record->gflags & HAMMER_RECG_DIRECT_WAIT) {
1767 record->gflags &= ~(HAMMER_RECG_DIRECT_IO |
1768 HAMMER_RECG_DIRECT_WAIT);
1769 /* record can disappear once DIRECT_IO flag is cleared */
1770 wakeup(&record->flags);
1772 record->gflags &= ~HAMMER_RECG_DIRECT_IO;
1773 /* record can disappear once DIRECT_IO flag is cleared */
1775 lwkt_reltoken(&hmp->io_token);
1780 * This is called before a record is either committed to the B-Tree
1781 * or destroyed, to resolve any associated direct-IO.
1783 * (1) We must wait for any direct-IO related to the record to complete.
1785 * (2) We must remove any buffer cache aliases for data accessed via
1786 * leaf->data_offset or zone2_offset so non-direct-IO consumers
1787 * (the mirroring and reblocking code) do not see stale data.
1790 hammer_io_direct_wait(hammer_record_t record)
1792 hammer_mount_t hmp = record->ip->hmp;
1795 * Wait for I/O to complete
1797 if (record->gflags & HAMMER_RECG_DIRECT_IO) {
1798 lwkt_gettoken(&hmp->io_token);
1799 while (record->gflags & HAMMER_RECG_DIRECT_IO) {
1800 record->gflags |= HAMMER_RECG_DIRECT_WAIT;
1801 tsleep(&record->flags, 0, "hmdiow", 0);
1803 lwkt_reltoken(&hmp->io_token);
1807 * Invalidate any related buffer cache aliases associated with the
1808 * backing device. This is needed because the buffer cache buffer
1809 * for file data is associated with the file vnode, not the backing
1812 * XXX I do not think this case can occur any more now that
1813 * reservations ensure that all such buffers are removed before
1814 * an area can be reused.
1816 if (record->gflags & HAMMER_RECG_DIRECT_INVAL) {
1817 KKASSERT(record->leaf.data_offset);
1818 hammer_del_buffers(hmp, record->leaf.data_offset,
1819 record->zone2_offset, record->leaf.data_len,
1821 record->gflags &= ~HAMMER_RECG_DIRECT_INVAL;
1826 * This is called to remove the second-level cached zone-2 offset from
1827 * frontend buffer cache buffers, now stale due to a data relocation.
1828 * These offsets are generated by cluster_read() via VOP_BMAP, or directly
1829 * by hammer_vop_strategy_read().
1831 * This is rather nasty because here we have something like the reblocker
1832 * scanning the raw B-Tree with no held references on anything, really,
1833 * other then a shared lock on the B-Tree node, and we have to access the
1834 * frontend's buffer cache to check for and clean out the association.
1835 * Specifically, if the reblocker is moving data on the disk, these cached
1836 * offsets will become invalid.
1838 * Only data record types associated with the large-data zone are subject
1839 * to direct-io and need to be checked.
1843 hammer_io_direct_uncache(hammer_mount_t hmp, hammer_btree_leaf_elm_t leaf)
1845 struct hammer_inode_info iinfo;
1848 if (leaf->base.rec_type != HAMMER_RECTYPE_DATA)
1850 zone = HAMMER_ZONE_DECODE(leaf->data_offset);
1851 if (zone != HAMMER_ZONE_LARGE_DATA_INDEX)
1853 iinfo.obj_id = leaf->base.obj_id;
1854 iinfo.obj_asof = 0; /* unused */
1855 iinfo.obj_localization = leaf->base.localization &
1856 HAMMER_LOCALIZE_PSEUDOFS_MASK;
1857 iinfo.u.leaf = leaf;
1858 hammer_scan_inode_snapshots(hmp, &iinfo,
1859 hammer_io_direct_uncache_callback,
1864 hammer_io_direct_uncache_callback(hammer_inode_t ip, void *data)
1866 hammer_inode_info_t iinfo = data;
1867 hammer_off_t file_offset;
1874 file_offset = iinfo->u.leaf->base.key - iinfo->u.leaf->data_len;
1875 blksize = iinfo->u.leaf->data_len;
1876 KKASSERT((blksize & HAMMER_BUFMASK) == 0);
1879 * Warning: FINDBLK_TEST return stable storage but not stable
1880 * contents. It happens to be ok in this case.
1882 hammer_ref(&ip->lock);
1883 if (hammer_get_vnode(ip, &vp) == 0) {
1884 if ((bp = findblk(ip->vp, file_offset, FINDBLK_TEST)) != NULL &&
1885 bp->b_bio2.bio_offset != NOOFFSET) {
1886 bp = getblk(ip->vp, file_offset, blksize, 0, 0);
1887 bp->b_bio2.bio_offset = NOOFFSET;
1892 hammer_rel_inode(ip, 0);
1898 * This function is called when writes may have occured on the volume,
1899 * indicating that the device may be holding cached writes.
1901 static __inline void
1902 hammer_io_flush_mark(hammer_volume_t volume)
1904 atomic_set_int(&volume->vol_flags, HAMMER_VOLF_NEEDFLUSH);
1908 * This function ensures that the device has flushed any cached writes out.
1911 hammer_io_flush_sync(hammer_mount_t hmp)
1913 hammer_volume_t volume;
1914 struct buf *bp_base = NULL;
1917 RB_FOREACH(volume, hammer_vol_rb_tree, &hmp->rb_vols_root) {
1918 if (volume->vol_flags & HAMMER_VOLF_NEEDFLUSH) {
1919 atomic_clear_int(&volume->vol_flags,
1920 HAMMER_VOLF_NEEDFLUSH);
1922 bp->b_bio1.bio_offset = 0;
1925 bp->b_cmd = BUF_CMD_FLUSH;
1926 bp->b_bio1.bio_caller_info1.cluster_head = bp_base;
1927 bp->b_bio1.bio_done = biodone_sync;
1928 bp->b_bio1.bio_flags |= BIO_SYNC;
1930 vn_strategy(volume->devvp, &bp->b_bio1);
1933 while ((bp = bp_base) != NULL) {
1934 bp_base = bp->b_bio1.bio_caller_info1.cluster_head;
1935 biowait(&bp->b_bio1, "hmrFLS");
1941 * Limit the amount of backlog which we allow to build up
1944 hammer_io_limit_backlog(hammer_mount_t hmp)
1946 waitrunningbufspace();