2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_io.c,v 1.55 2008/09/15 17:02:49 dillon Exp $
37 * IO Primitives and buffer cache management
39 * All major data-tracking structures in HAMMER contain a struct hammer_io
40 * which is used to manage their backing store. We use filesystem buffers
41 * for backing store and we leave them passively associated with their
44 * If the kernel tries to destroy a passively associated buf which we cannot
45 * yet let go we set B_LOCKED in the buffer and then actively released it
48 * The io_token is required for anything which might race bioops and bio_done
49 * callbacks, with one exception: A successful hammer_try_interlock_norefs().
50 * the fs_token will be held in all other cases.
54 #include <sys/fcntl.h>
55 #include <sys/nlookup.h>
59 static void hammer_io_modify(hammer_io_t io, int count);
60 static void hammer_io_deallocate(struct buf *bp);
62 static void hammer_io_direct_read_complete(struct bio *nbio);
64 static void hammer_io_direct_write_complete(struct bio *nbio);
65 static int hammer_io_direct_uncache_callback(hammer_inode_t ip, void *data);
66 static void hammer_io_set_modlist(struct hammer_io *io);
67 static void hammer_io_flush_mark(hammer_volume_t volume);
70 * Initialize a new, already-zero'd hammer_io structure, or reinitialize
71 * an existing hammer_io structure which may have switched to another type.
74 hammer_io_init(hammer_io_t io, hammer_volume_t volume, enum hammer_io_type type)
77 io->hmp = volume->io.hmp;
82 * Helper routine to disassociate a buffer cache buffer from an I/O
83 * structure. The io must be interlocked and marked appropriately for
86 * The io must be in a released state with the io->bp owned and
87 * locked by the caller of this function. When not called from an
88 * io_deallocate() this cannot race an io_deallocate() since the
89 * kernel would be unable to get the buffer lock in that case.
90 * (The released state in this case means we own the bp, not the
91 * hammer_io structure).
93 * The io may have 0 or 1 references depending on who called us. The
94 * caller is responsible for dealing with the refs.
96 * This call can only be made when no action is required on the buffer.
98 * This function is guaranteed not to race against anything because we
99 * own both the io lock and the bp lock and are interlocked with no
103 hammer_io_disassociate(hammer_io_structure_t iou)
105 struct buf *bp = iou->io.bp;
107 KKASSERT(iou->io.released);
108 KKASSERT(iou->io.modified == 0);
109 KKASSERT(LIST_FIRST(&bp->b_dep) == (void *)iou);
114 * If the buffer was locked someone wanted to get rid of it.
116 if (bp->b_flags & B_LOCKED) {
117 atomic_add_int(&hammer_count_io_locked, -1);
118 bp->b_flags &= ~B_LOCKED;
120 if (iou->io.reclaim) {
121 bp->b_flags |= B_NOCACHE|B_RELBUF;
125 switch(iou->io.type) {
126 case HAMMER_STRUCTURE_VOLUME:
127 iou->volume.ondisk = NULL;
129 case HAMMER_STRUCTURE_DATA_BUFFER:
130 case HAMMER_STRUCTURE_META_BUFFER:
131 case HAMMER_STRUCTURE_UNDO_BUFFER:
132 iou->buffer.ondisk = NULL;
134 case HAMMER_STRUCTURE_DUMMY:
135 panic("hammer_io_disassociate: bad io type");
141 * Wait for any physical IO to complete
143 * XXX we aren't interlocked against a spinlock or anything so there
144 * is a small window in the interlock / io->running == 0 test.
147 hammer_io_wait(hammer_io_t io)
150 hammer_mount_t hmp = io->hmp;
152 lwkt_gettoken(&hmp->io_token);
153 while (io->running) {
155 tsleep_interlock(io, 0);
157 tsleep(io, PINTERLOCKED, "hmrflw", hz);
159 lwkt_reltoken(&hmp->io_token);
164 * Wait for all currently queued HAMMER-initiated I/Os to complete.
166 * This is not supposed to count direct I/O's but some can leak
167 * through (for non-full-sized direct I/Os).
170 hammer_io_wait_all(hammer_mount_t hmp, const char *ident, int doflush)
172 struct hammer_io iodummy;
176 * Degenerate case, no I/O is running
178 lwkt_gettoken(&hmp->io_token);
179 if (TAILQ_EMPTY(&hmp->iorun_list)) {
180 lwkt_reltoken(&hmp->io_token);
182 hammer_io_flush_sync(hmp);
185 bzero(&iodummy, sizeof(iodummy));
186 iodummy.type = HAMMER_STRUCTURE_DUMMY;
189 * Add placemarker and then wait until it becomes the head of
192 TAILQ_INSERT_TAIL(&hmp->iorun_list, &iodummy, iorun_entry);
193 while (TAILQ_FIRST(&hmp->iorun_list) != &iodummy) {
194 tsleep(&iodummy, 0, ident, 0);
198 * Chain in case several placemarkers are present.
200 TAILQ_REMOVE(&hmp->iorun_list, &iodummy, iorun_entry);
201 io = TAILQ_FIRST(&hmp->iorun_list);
202 if (io && io->type == HAMMER_STRUCTURE_DUMMY)
204 lwkt_reltoken(&hmp->io_token);
207 hammer_io_flush_sync(hmp);
211 * Clear a flagged error condition on a I/O buffer. The caller must hold
212 * its own ref on the buffer.
215 hammer_io_clear_error(struct hammer_io *io)
217 hammer_mount_t hmp = io->hmp;
219 lwkt_gettoken(&hmp->io_token);
222 hammer_rel(&io->lock);
223 KKASSERT(hammer_isactive(&io->lock));
225 lwkt_reltoken(&hmp->io_token);
229 hammer_io_clear_error_noassert(struct hammer_io *io)
231 hammer_mount_t hmp = io->hmp;
233 lwkt_gettoken(&hmp->io_token);
236 hammer_rel(&io->lock);
238 lwkt_reltoken(&hmp->io_token);
242 * This is an advisory function only which tells the buffer cache
243 * the bp is not a meta-data buffer, even though it is backed by
246 * This is used by HAMMER's reblocking code to avoid trying to
247 * swapcache the filesystem's data when it is read or written
248 * by the reblocking code.
250 * The caller has a ref on the buffer preventing the bp from
251 * being disassociated from it.
254 hammer_io_notmeta(hammer_buffer_t buffer)
256 if ((buffer->io.bp->b_flags & B_NOTMETA) == 0) {
257 hammer_mount_t hmp = buffer->io.hmp;
259 lwkt_gettoken(&hmp->io_token);
260 buffer->io.bp->b_flags |= B_NOTMETA;
261 lwkt_reltoken(&hmp->io_token);
266 * Load bp for a HAMMER structure. The io must be exclusively locked by
269 * This routine is mostly used on meta-data and small-data blocks. Generally
270 * speaking HAMMER assumes some locality of reference and will cluster.
272 * Note that the caller (hammer_ondisk.c) may place further restrictions
273 * on clusterability via the limit (in bytes). Typically large-data
274 * zones cannot be clustered due to their mixed buffer sizes. This is
275 * not an issue since such clustering occurs in hammer_vnops at the
276 * regular file layer, whereas this is the buffered block device layer.
278 * No I/O callbacks can occur while we hold the buffer locked.
281 hammer_io_read(struct vnode *devvp, struct hammer_io *io, int limit)
286 if ((bp = io->bp) == NULL) {
287 atomic_add_int(&hammer_count_io_running_read, io->bytes);
288 if (hammer_cluster_enable && limit > io->bytes) {
289 error = cluster_read(devvp, io->offset + limit,
290 io->offset, io->bytes,
295 error = bread(devvp, io->offset, io->bytes, &io->bp);
297 hammer_stats_disk_read += io->bytes;
298 atomic_add_int(&hammer_count_io_running_read, -io->bytes);
301 * The code generally assumes b_ops/b_dep has been set-up,
302 * even if we error out here.
305 if ((hammer_debug_io & 0x0001) && (bp->b_flags & B_IODEBUG)) {
306 const char *metatype;
309 case HAMMER_STRUCTURE_VOLUME:
312 case HAMMER_STRUCTURE_META_BUFFER:
313 switch(((struct hammer_buffer *)io)->
314 zoneX_offset & HAMMER_OFF_ZONE_MASK) {
315 case HAMMER_ZONE_BTREE:
318 case HAMMER_ZONE_META:
321 case HAMMER_ZONE_FREEMAP:
322 metatype = "freemap";
329 case HAMMER_STRUCTURE_DATA_BUFFER:
332 case HAMMER_STRUCTURE_UNDO_BUFFER:
336 metatype = "unknown";
339 kprintf("doff %016jx %s\n",
340 (intmax_t)bp->b_bio2.bio_offset,
343 bp->b_flags &= ~B_IODEBUG;
344 bp->b_ops = &hammer_bioops;
345 KKASSERT(LIST_FIRST(&bp->b_dep) == NULL);
347 /* io->worklist is locked by the io lock */
348 LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node);
350 KKASSERT(io->modified == 0);
351 KKASSERT(io->running == 0);
352 KKASSERT(io->waiting == 0);
353 io->released = 0; /* we hold an active lock on bp */
361 * Similar to hammer_io_read() but returns a zero'd out buffer instead.
362 * Must be called with the IO exclusively locked.
364 * vfs_bio_clrbuf() is kinda nasty, enforce serialization against background
365 * I/O by forcing the buffer to not be in a released state before calling
368 * This function will also mark the IO as modified but it will not
369 * increment the modify_refs count.
371 * No I/O callbacks can occur while we hold the buffer locked.
374 hammer_io_new(struct vnode *devvp, struct hammer_io *io)
378 if ((bp = io->bp) == NULL) {
379 io->bp = getblk(devvp, io->offset, io->bytes, 0, 0);
381 bp->b_ops = &hammer_bioops;
382 KKASSERT(LIST_FIRST(&bp->b_dep) == NULL);
384 /* io->worklist is locked by the io lock */
385 LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node);
387 KKASSERT(io->running == 0);
397 hammer_io_modify(io, 0);
403 * Advance the activity count on the underlying buffer because
404 * HAMMER does not getblk/brelse on every access.
406 * The io->bp cannot go away while the buffer is referenced.
409 hammer_io_advance(struct hammer_io *io)
412 buf_act_advance(io->bp);
416 * Remove potential device level aliases against buffers managed by high level
417 * vnodes. Aliases can also be created due to mixed buffer sizes or via
418 * direct access to the backing store device.
420 * This is nasty because the buffers are also VMIO-backed. Even if a buffer
421 * does not exist its backing VM pages might, and we have to invalidate
422 * those as well or a getblk() will reinstate them.
424 * Buffer cache buffers associated with hammer_buffers cannot be
428 hammer_io_inval(hammer_volume_t volume, hammer_off_t zone2_offset)
430 hammer_io_structure_t iou;
432 hammer_off_t phys_offset;
436 hmp = volume->io.hmp;
437 lwkt_gettoken(&hmp->io_token);
440 * If a device buffer already exists for the specified physical
441 * offset use that, otherwise instantiate a buffer to cover any
442 * related VM pages, set BNOCACHE, and brelse().
444 phys_offset = volume->ondisk->vol_buf_beg +
445 (zone2_offset & HAMMER_OFF_SHORT_MASK);
446 if ((bp = findblk(volume->devvp, phys_offset, 0)) != NULL)
449 bp = getblk(volume->devvp, phys_offset, HAMMER_BUFSIZE, 0, 0);
451 if ((iou = (void *)LIST_FIRST(&bp->b_dep)) != NULL) {
453 hammer_ref(&iou->io.lock);
454 hammer_io_clear_modify(&iou->io, 1);
456 iou->io.released = 0;
459 iou->io.waitdep = 1; /* XXX this is a fs_token field */
460 KKASSERT(hammer_isactive(&iou->io.lock) == 1);
461 hammer_rel_buffer(&iou->buffer, 0);
462 /*hammer_io_deallocate(bp);*/
467 KKASSERT((bp->b_flags & B_LOCKED) == 0);
469 bp->b_flags |= B_NOCACHE|B_RELBUF;
473 lwkt_reltoken(&hmp->io_token);
478 * This routine is called on the last reference to a hammer structure.
479 * The io must be interlocked with a refcount of zero. The hammer structure
480 * will remain interlocked on return.
482 * This routine may return a non-NULL bp to the caller for dispoal.
483 * The caller typically brelse()'s the bp.
485 * The bp may or may not still be passively associated with the IO. It
486 * will remain passively associated if it is unreleasable (e.g. a modified
489 * The only requirement here is that modified meta-data and volume-header
490 * buffer may NOT be disassociated from the IO structure, and consequently
491 * we also leave such buffers actively associated with the IO if they already
492 * are (since the kernel can't do anything with them anyway). Only the
493 * flusher is allowed to write such buffers out. Modified pure-data and
494 * undo buffers are returned to the kernel but left passively associated
495 * so we can track when the kernel writes the bp out.
498 hammer_io_release(struct hammer_io *io, int flush)
500 union hammer_io_structure *iou = (void *)io;
503 if ((bp = io->bp) == NULL)
507 * Try to flush a dirty IO to disk if asked to by the
508 * caller or if the kernel tried to flush the buffer in the past.
510 * Kernel-initiated flushes are only allowed for pure-data buffers.
511 * meta-data and volume buffers can only be flushed explicitly
516 hammer_io_flush(io, 0);
517 } else if (bp->b_flags & B_LOCKED) {
519 case HAMMER_STRUCTURE_DATA_BUFFER:
520 hammer_io_flush(io, 0);
522 case HAMMER_STRUCTURE_UNDO_BUFFER:
523 hammer_io_flush(io, hammer_undo_reclaim(io));
528 } /* else no explicit request to flush the buffer */
532 * Wait for the IO to complete if asked to. This occurs when
533 * the buffer must be disposed of definitively during an umount
534 * or buffer invalidation.
536 if (io->waitdep && io->running) {
541 * Return control of the buffer to the kernel (with the provisio
542 * that our bioops can override kernel decisions with regards to
545 if ((flush || io->reclaim) && io->modified == 0 && io->running == 0) {
547 * Always disassociate the bp if an explicit flush
548 * was requested and the IO completed with no error
549 * (so unmount can really clean up the structure).
557 hammer_io_disassociate((hammer_io_structure_t)io);
559 } else if (io->modified) {
561 * Only certain IO types can be released to the kernel if
562 * the buffer has been modified.
564 * volume and meta-data IO types may only be explicitly
568 case HAMMER_STRUCTURE_DATA_BUFFER:
569 case HAMMER_STRUCTURE_UNDO_BUFFER:
570 if (io->released == 0) {
578 bp = NULL; /* bp left associated */
579 } else if (io->released == 0) {
581 * Clean buffers can be generally released to the kernel.
582 * We leave the bp passively associated with the HAMMER
583 * structure and use bioops to disconnect it later on
584 * if the kernel wants to discard the buffer.
586 * We can steal the structure's ownership of the bp.
589 if (bp->b_flags & B_LOCKED) {
590 hammer_io_disassociate(iou);
594 hammer_io_disassociate(iou);
597 /* return the bp (bp passively associated) */
602 * A released buffer is passively associate with our
603 * hammer_io structure. The kernel cannot destroy it
604 * without making a bioops call. If the kernel (B_LOCKED)
605 * or we (reclaim) requested that the buffer be destroyed
606 * we destroy it, otherwise we do a quick get/release to
607 * reset its position in the kernel's LRU list.
609 * Leaving the buffer passively associated allows us to
610 * use the kernel's LRU buffer flushing mechanisms rather
611 * then rolling our own.
613 * XXX there are two ways of doing this. We can re-acquire
614 * and passively release to reset the LRU, or not.
616 if (io->running == 0) {
618 if ((bp->b_flags & B_LOCKED) || io->reclaim) {
619 hammer_io_disassociate(iou);
622 /* return the bp (bp passively associated) */
626 * bp is left passively associated but we do not
627 * try to reacquire it. Interactions with the io
628 * structure will occur on completion of the bp's
638 * This routine is called with a locked IO when a flush is desired and
639 * no other references to the structure exists other then ours. This
640 * routine is ONLY called when HAMMER believes it is safe to flush a
641 * potentially modified buffer out.
643 * The locked io or io reference prevents a flush from being initiated
647 hammer_io_flush(struct hammer_io *io, int reclaim)
653 * Degenerate case - nothing to flush if nothing is dirty.
655 if (io->modified == 0)
659 KKASSERT(io->modify_refs <= 0);
662 * Acquire ownership of the bp, particularly before we clear our
665 * We are going to bawrite() this bp. Don't leave a window where
666 * io->released is set, we actually own the bp rather then our
669 * The io_token should not be required here as only
675 /* BUF_KERNPROC(io->bp); */
676 /* io->released = 0; */
677 KKASSERT(io->released);
678 KKASSERT(io->bp == bp);
685 if ((bp->b_flags & B_LOCKED) == 0) {
686 bp->b_flags |= B_LOCKED;
687 atomic_add_int(&hammer_count_io_locked, 1);
692 * Acquire exclusive access to the bp and then clear the modified
693 * state of the buffer prior to issuing I/O to interlock any
694 * modifications made while the I/O is in progress. This shouldn't
695 * happen anyway but losing data would be worse. The modified bit
696 * will be rechecked after the IO completes.
698 * NOTE: This call also finalizes the buffer's content (inval == 0).
700 * This is only legal when lock.refs == 1 (otherwise we might clear
701 * the modified bit while there are still users of the cluster
702 * modifying the data).
704 * Do this before potentially blocking so any attempt to modify the
705 * ondisk while we are blocked blocks waiting for us.
707 hammer_ref(&io->lock);
708 hammer_io_clear_modify(io, 0);
709 hammer_rel(&io->lock);
711 if (hammer_debug_io & 0x0002)
712 kprintf("hammer io_write %016jx\n", bp->b_bio1.bio_offset);
715 * Transfer ownership to the kernel and initiate I/O.
717 * NOTE: We do not hold io_token so an atomic op is required to
718 * update io_running_space.
721 atomic_add_int(&hmp->io_running_space, io->bytes);
722 atomic_add_int(&hammer_count_io_running_write, io->bytes);
723 lwkt_gettoken(&hmp->io_token);
724 TAILQ_INSERT_TAIL(&hmp->iorun_list, io, iorun_entry);
725 lwkt_reltoken(&hmp->io_token);
727 hammer_io_flush_mark(io->volume);
730 /************************************************************************
732 ************************************************************************
734 * These routines deal with dependancies created when IO buffers get
735 * modified. The caller must call hammer_modify_*() on a referenced
736 * HAMMER structure prior to modifying its on-disk data.
738 * Any intent to modify an IO buffer acquires the related bp and imposes
739 * various write ordering dependancies.
743 * Mark a HAMMER structure as undergoing modification. Meta-data buffers
744 * are locked until the flusher can deal with them, pure data buffers
745 * can be written out.
747 * The referenced io prevents races.
751 hammer_io_modify(hammer_io_t io, int count)
754 * io->modify_refs must be >= 0
756 while (io->modify_refs < 0) {
758 tsleep(io, 0, "hmrmod", 0);
762 * Shortcut if nothing to do.
764 KKASSERT(hammer_isactive(&io->lock) && io->bp != NULL);
765 io->modify_refs += count;
766 if (io->modified && io->released == 0)
770 * NOTE: It is important not to set the modified bit
771 * until after we have acquired the bp or we risk
772 * racing against checkwrite.
774 hammer_lock_ex(&io->lock);
777 BUF_KERNPROC(io->bp);
780 if (io->modified == 0) {
781 hammer_io_set_modlist(io);
784 hammer_unlock(&io->lock);
789 hammer_io_modify_done(hammer_io_t io)
791 KKASSERT(io->modify_refs > 0);
793 if (io->modify_refs == 0 && io->waitmod) {
800 * The write interlock blocks other threads trying to modify a buffer
801 * (they block in hammer_io_modify()) after us, or blocks us while other
802 * threads are in the middle of modifying a buffer.
804 * The caller also has a ref on the io, however if we are not careful
805 * we will race bioops callbacks (checkwrite). To deal with this
806 * we must at least acquire and release the io_token, and it is probably
807 * better to hold it through the setting of modify_refs.
810 hammer_io_write_interlock(hammer_io_t io)
812 hammer_mount_t hmp = io->hmp;
814 lwkt_gettoken(&hmp->io_token);
815 while (io->modify_refs != 0) {
817 tsleep(io, 0, "hmrmod", 0);
819 io->modify_refs = -1;
820 lwkt_reltoken(&hmp->io_token);
824 hammer_io_done_interlock(hammer_io_t io)
826 KKASSERT(io->modify_refs == -1);
835 * Caller intends to modify a volume's ondisk structure.
837 * This is only allowed if we are the flusher or we have a ref on the
841 hammer_modify_volume(hammer_transaction_t trans, hammer_volume_t volume,
844 KKASSERT (trans == NULL || trans->sync_lock_refs > 0);
846 hammer_io_modify(&volume->io, 1);
848 intptr_t rel_offset = (intptr_t)base - (intptr_t)volume->ondisk;
849 KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0);
850 hammer_generate_undo(trans,
851 HAMMER_ENCODE_RAW_VOLUME(volume->vol_no, rel_offset),
857 * Caller intends to modify a buffer's ondisk structure.
859 * This is only allowed if we are the flusher or we have a ref on the
863 hammer_modify_buffer(hammer_transaction_t trans, hammer_buffer_t buffer,
866 KKASSERT (trans == NULL || trans->sync_lock_refs > 0);
868 hammer_io_modify(&buffer->io, 1);
870 intptr_t rel_offset = (intptr_t)base - (intptr_t)buffer->ondisk;
871 KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0);
872 hammer_generate_undo(trans,
873 buffer->zone2_offset + rel_offset,
879 hammer_modify_volume_done(hammer_volume_t volume)
881 hammer_io_modify_done(&volume->io);
885 hammer_modify_buffer_done(hammer_buffer_t buffer)
887 hammer_io_modify_done(&buffer->io);
891 * Mark an entity as not being dirty any more and finalize any
892 * delayed adjustments to the buffer.
894 * Delayed adjustments are an important performance enhancement, allowing
895 * us to avoid recalculating B-Tree node CRCs over and over again when
896 * making bulk-modifications to the B-Tree.
898 * If inval is non-zero delayed adjustments are ignored.
900 * This routine may dereference related btree nodes and cause the
901 * buffer to be dereferenced. The caller must own a reference on io.
904 hammer_io_clear_modify(struct hammer_io *io, int inval)
909 * io_token is needed to avoid races on mod_list
911 if (io->modified == 0)
914 lwkt_gettoken(&hmp->io_token);
915 if (io->modified == 0) {
916 lwkt_reltoken(&hmp->io_token);
921 * Take us off the mod-list and clear the modified bit.
923 KKASSERT(io->mod_list != NULL);
924 if (io->mod_list == &io->hmp->volu_list ||
925 io->mod_list == &io->hmp->meta_list) {
926 io->hmp->locked_dirty_space -= io->bytes;
927 atomic_add_int(&hammer_count_dirtybufspace, -io->bytes);
929 TAILQ_REMOVE(io->mod_list, io, mod_entry);
933 lwkt_reltoken(&hmp->io_token);
936 * If this bit is not set there are no delayed adjustments.
943 * Finalize requested CRCs. The NEEDSCRC flag also holds a reference
944 * on the node (& underlying buffer). Release the node after clearing
947 if (io->type == HAMMER_STRUCTURE_META_BUFFER) {
948 hammer_buffer_t buffer = (void *)io;
952 TAILQ_FOREACH(node, &buffer->clist, entry) {
953 if ((node->flags & HAMMER_NODE_NEEDSCRC) == 0)
955 node->flags &= ~HAMMER_NODE_NEEDSCRC;
956 KKASSERT(node->ondisk);
958 node->ondisk->crc = crc32(&node->ondisk->crc + 1, HAMMER_BTREE_CRCSIZE);
959 hammer_rel_node(node);
963 /* caller must still have ref on io */
964 KKASSERT(hammer_isactive(&io->lock));
968 * Clear the IO's modify list. Even though the IO is no longer modified
969 * it may still be on the lose_list. This routine is called just before
970 * the governing hammer_buffer is destroyed.
972 * mod_list requires io_token protection.
975 hammer_io_clear_modlist(struct hammer_io *io)
977 hammer_mount_t hmp = io->hmp;
979 KKASSERT(io->modified == 0);
981 lwkt_gettoken(&hmp->io_token);
983 KKASSERT(io->mod_list == &io->hmp->lose_list);
984 TAILQ_REMOVE(io->mod_list, io, mod_entry);
987 lwkt_reltoken(&hmp->io_token);
992 hammer_io_set_modlist(struct hammer_io *io)
994 struct hammer_mount *hmp = io->hmp;
996 lwkt_gettoken(&hmp->io_token);
997 KKASSERT(io->mod_list == NULL);
1000 case HAMMER_STRUCTURE_VOLUME:
1001 io->mod_list = &hmp->volu_list;
1002 hmp->locked_dirty_space += io->bytes;
1003 atomic_add_int(&hammer_count_dirtybufspace, io->bytes);
1005 case HAMMER_STRUCTURE_META_BUFFER:
1006 io->mod_list = &hmp->meta_list;
1007 hmp->locked_dirty_space += io->bytes;
1008 atomic_add_int(&hammer_count_dirtybufspace, io->bytes);
1010 case HAMMER_STRUCTURE_UNDO_BUFFER:
1011 io->mod_list = &hmp->undo_list;
1013 case HAMMER_STRUCTURE_DATA_BUFFER:
1014 io->mod_list = &hmp->data_list;
1016 case HAMMER_STRUCTURE_DUMMY:
1017 panic("hammer_io_disassociate: bad io type");
1020 TAILQ_INSERT_TAIL(io->mod_list, io, mod_entry);
1021 lwkt_reltoken(&hmp->io_token);
1024 /************************************************************************
1026 ************************************************************************
1031 * Pre-IO initiation kernel callback - cluster build only
1033 * bioops callback - hold io_token
1036 hammer_io_start(struct buf *bp)
1038 /* nothing to do, so io_token not needed */
1042 * Post-IO completion kernel callback - MAY BE CALLED FROM INTERRUPT!
1044 * NOTE: HAMMER may modify a data buffer after we have initiated write
1047 * NOTE: MPSAFE callback
1049 * bioops callback - hold io_token
1052 hammer_io_complete(struct buf *bp)
1054 union hammer_io_structure *iou = (void *)LIST_FIRST(&bp->b_dep);
1055 struct hammer_mount *hmp = iou->io.hmp;
1056 struct hammer_io *ionext;
1058 lwkt_gettoken(&hmp->io_token);
1060 KKASSERT(iou->io.released == 1);
1063 * Deal with people waiting for I/O to drain
1065 if (iou->io.running) {
1067 * Deal with critical write errors. Once a critical error
1068 * has been flagged in hmp the UNDO FIFO will not be updated.
1069 * That way crash recover will give us a consistent
1072 * Because of this we can throw away failed UNDO buffers. If
1073 * we throw away META or DATA buffers we risk corrupting
1074 * the now read-only version of the filesystem visible to
1075 * the user. Clear B_ERROR so the buffer is not re-dirtied
1076 * by the kernel and ref the io so it doesn't get thrown
1079 if (bp->b_flags & B_ERROR) {
1080 lwkt_gettoken(&hmp->fs_token);
1081 hammer_critical_error(hmp, NULL, bp->b_error,
1082 "while flushing meta-data");
1083 lwkt_reltoken(&hmp->fs_token);
1085 switch(iou->io.type) {
1086 case HAMMER_STRUCTURE_UNDO_BUFFER:
1089 if (iou->io.ioerror == 0) {
1090 iou->io.ioerror = 1;
1091 hammer_ref(&iou->io.lock);
1095 bp->b_flags &= ~B_ERROR;
1098 hammer_io_set_modlist(&iou->io);
1099 iou->io.modified = 1;
1102 hammer_stats_disk_write += iou->io.bytes;
1103 atomic_add_int(&hammer_count_io_running_write, -iou->io.bytes);
1104 atomic_add_int(&hmp->io_running_space, -iou->io.bytes);
1105 if (hmp->io_running_wakeup &&
1106 hmp->io_running_space < hammer_limit_running_io / 2) {
1107 hmp->io_running_wakeup = 0;
1108 wakeup(&hmp->io_running_wakeup);
1110 KKASSERT(hmp->io_running_space >= 0);
1111 iou->io.running = 0;
1114 * Remove from iorun list and wakeup any multi-io waiter(s).
1116 if (TAILQ_FIRST(&hmp->iorun_list) == &iou->io) {
1117 ionext = TAILQ_NEXT(&iou->io, iorun_entry);
1118 if (ionext && ionext->type == HAMMER_STRUCTURE_DUMMY)
1121 TAILQ_REMOVE(&hmp->iorun_list, &iou->io, iorun_entry);
1123 hammer_stats_disk_read += iou->io.bytes;
1126 if (iou->io.waiting) {
1127 iou->io.waiting = 0;
1132 * If B_LOCKED is set someone wanted to deallocate the bp at some
1133 * point, try to do it now. The operation will fail if there are
1134 * refs or if hammer_io_deallocate() is unable to gain the
1137 if (bp->b_flags & B_LOCKED) {
1138 atomic_add_int(&hammer_count_io_locked, -1);
1139 bp->b_flags &= ~B_LOCKED;
1140 hammer_io_deallocate(bp);
1141 /* structure may be dead now */
1143 lwkt_reltoken(&hmp->io_token);
1147 * Callback from kernel when it wishes to deallocate a passively
1148 * associated structure. This mostly occurs with clean buffers
1149 * but it may be possible for a holding structure to be marked dirty
1150 * while its buffer is passively associated. The caller owns the bp.
1152 * If we cannot disassociate we set B_LOCKED to prevent the buffer
1153 * from getting reused.
1155 * WARNING: Because this can be called directly by getnewbuf we cannot
1156 * recurse into the tree. If a bp cannot be immediately disassociated
1157 * our only recourse is to set B_LOCKED.
1159 * WARNING: This may be called from an interrupt via hammer_io_complete()
1161 * bioops callback - hold io_token
1164 hammer_io_deallocate(struct buf *bp)
1166 hammer_io_structure_t iou = (void *)LIST_FIRST(&bp->b_dep);
1171 lwkt_gettoken(&hmp->io_token);
1173 KKASSERT((bp->b_flags & B_LOCKED) == 0 && iou->io.running == 0);
1174 if (hammer_try_interlock_norefs(&iou->io.lock) == 0) {
1176 * We cannot safely disassociate a bp from a referenced
1177 * or interlocked HAMMER structure.
1179 bp->b_flags |= B_LOCKED;
1180 atomic_add_int(&hammer_count_io_locked, 1);
1181 } else if (iou->io.modified) {
1183 * It is not legal to disassociate a modified buffer. This
1184 * case really shouldn't ever occur.
1186 bp->b_flags |= B_LOCKED;
1187 atomic_add_int(&hammer_count_io_locked, 1);
1188 hammer_put_interlock(&iou->io.lock, 0);
1191 * Disassociate the BP. If the io has no refs left we
1192 * have to add it to the loose list. The kernel has
1193 * locked the buffer and therefore our io must be
1194 * in a released state.
1196 hammer_io_disassociate(iou);
1197 if (iou->io.type != HAMMER_STRUCTURE_VOLUME) {
1198 KKASSERT(iou->io.bp == NULL);
1199 KKASSERT(iou->io.mod_list == NULL);
1200 iou->io.mod_list = &hmp->lose_list;
1201 TAILQ_INSERT_TAIL(iou->io.mod_list, &iou->io, mod_entry);
1203 hammer_put_interlock(&iou->io.lock, 1);
1205 lwkt_reltoken(&hmp->io_token);
1209 * bioops callback - hold io_token
1212 hammer_io_fsync(struct vnode *vp)
1214 /* nothing to do, so io_token not needed */
1219 * NOTE: will not be called unless we tell the kernel about the
1220 * bioops. Unused... we use the mount's VFS_SYNC instead.
1222 * bioops callback - hold io_token
1225 hammer_io_sync(struct mount *mp)
1227 /* nothing to do, so io_token not needed */
1232 * bioops callback - hold io_token
1235 hammer_io_movedeps(struct buf *bp1, struct buf *bp2)
1237 /* nothing to do, so io_token not needed */
1241 * I/O pre-check for reading and writing. HAMMER only uses this for
1242 * B_CACHE buffers so checkread just shouldn't happen, but if it does
1245 * Writing is a different case. We don't want the kernel to try to write
1246 * out a buffer that HAMMER may be modifying passively or which has a
1247 * dependancy. In addition, kernel-demanded writes can only proceed for
1248 * certain types of buffers (i.e. UNDO and DATA types). Other dirty
1249 * buffer types can only be explicitly written by the flusher.
1251 * checkwrite will only be called for bdwrite()n buffers. If we return
1252 * success the kernel is guaranteed to initiate the buffer write.
1254 * bioops callback - hold io_token
1257 hammer_io_checkread(struct buf *bp)
1259 /* nothing to do, so io_token not needed */
1264 * The kernel is asking us whether it can write out a dirty buffer or not.
1266 * bioops callback - hold io_token
1269 hammer_io_checkwrite(struct buf *bp)
1271 hammer_io_t io = (void *)LIST_FIRST(&bp->b_dep);
1272 hammer_mount_t hmp = io->hmp;
1275 * This shouldn't happen under normal operation.
1277 lwkt_gettoken(&hmp->io_token);
1278 if (io->type == HAMMER_STRUCTURE_VOLUME ||
1279 io->type == HAMMER_STRUCTURE_META_BUFFER) {
1281 panic("hammer_io_checkwrite: illegal buffer");
1282 if ((bp->b_flags & B_LOCKED) == 0) {
1283 bp->b_flags |= B_LOCKED;
1284 atomic_add_int(&hammer_count_io_locked, 1);
1286 lwkt_reltoken(&hmp->io_token);
1291 * We have to be able to interlock the IO to safely modify any
1292 * of its fields without holding the fs_token. If we can't lock
1293 * it then we are racing someone.
1295 * Our ownership of the bp lock prevents the io from being ripped
1296 * out from under us.
1298 if (hammer_try_interlock_norefs(&io->lock) == 0) {
1299 bp->b_flags |= B_LOCKED;
1300 atomic_add_int(&hammer_count_io_locked, 1);
1301 lwkt_reltoken(&hmp->io_token);
1306 * The modified bit must be cleared prior to the initiation of
1307 * any IO (returning 0 initiates the IO). Because this is a
1308 * normal data buffer hammer_io_clear_modify() runs through a
1309 * simple degenerate case.
1311 * Return 0 will cause the kernel to initiate the IO, and we
1312 * must normally clear the modified bit before we begin. If
1313 * the io has modify_refs we do not clear the modified bit,
1314 * otherwise we may miss changes.
1316 * Only data and undo buffers can reach here. These buffers do
1317 * not have terminal crc functions but we temporarily reference
1318 * the IO anyway, just in case.
1320 if (io->modify_refs == 0 && io->modified) {
1321 hammer_ref(&io->lock);
1322 hammer_io_clear_modify(io, 0);
1323 hammer_rel(&io->lock);
1324 } else if (io->modified) {
1325 KKASSERT(io->type == HAMMER_STRUCTURE_DATA_BUFFER);
1329 * The kernel is going to start the IO, set io->running.
1331 KKASSERT(io->running == 0);
1333 atomic_add_int(&io->hmp->io_running_space, io->bytes);
1334 atomic_add_int(&hammer_count_io_running_write, io->bytes);
1335 TAILQ_INSERT_TAIL(&io->hmp->iorun_list, io, iorun_entry);
1337 hammer_put_interlock(&io->lock, 1);
1338 lwkt_reltoken(&hmp->io_token);
1344 * Return non-zero if we wish to delay the kernel's attempt to flush
1345 * this buffer to disk.
1347 * bioops callback - hold io_token
1350 hammer_io_countdeps(struct buf *bp, int n)
1352 /* nothing to do, so io_token not needed */
1356 struct bio_ops hammer_bioops = {
1357 .io_start = hammer_io_start,
1358 .io_complete = hammer_io_complete,
1359 .io_deallocate = hammer_io_deallocate,
1360 .io_fsync = hammer_io_fsync,
1361 .io_sync = hammer_io_sync,
1362 .io_movedeps = hammer_io_movedeps,
1363 .io_countdeps = hammer_io_countdeps,
1364 .io_checkread = hammer_io_checkread,
1365 .io_checkwrite = hammer_io_checkwrite,
1368 /************************************************************************
1370 ************************************************************************
1372 * These functions operate directly on the buffer cache buffer associated
1373 * with a front-end vnode rather then a back-end device vnode.
1377 * Read a buffer associated with a front-end vnode directly from the
1378 * disk media. The bio may be issued asynchronously. If leaf is non-NULL
1379 * we validate the CRC.
1381 * We must check for the presence of a HAMMER buffer to handle the case
1382 * where the reblocker has rewritten the data (which it does via the HAMMER
1383 * buffer system, not via the high-level vnode buffer cache), but not yet
1384 * committed the buffer to the media.
1387 hammer_io_direct_read(hammer_mount_t hmp, struct bio *bio,
1388 hammer_btree_leaf_elm_t leaf)
1390 hammer_off_t buf_offset;
1391 hammer_off_t zone2_offset;
1392 hammer_volume_t volume;
1398 buf_offset = bio->bio_offset;
1399 KKASSERT((buf_offset & HAMMER_OFF_ZONE_MASK) ==
1400 HAMMER_ZONE_LARGE_DATA);
1403 * The buffer cache may have an aliased buffer (the reblocker can
1404 * write them). If it does we have to sync any dirty data before
1405 * we can build our direct-read. This is a non-critical code path.
1408 hammer_sync_buffers(hmp, buf_offset, bp->b_bufsize);
1411 * Resolve to a zone-2 offset. The conversion just requires
1412 * munging the top 4 bits but we want to abstract it anyway
1413 * so the blockmap code can verify the zone assignment.
1415 zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, &error);
1418 KKASSERT((zone2_offset & HAMMER_OFF_ZONE_MASK) ==
1419 HAMMER_ZONE_RAW_BUFFER);
1422 * Resolve volume and raw-offset for 3rd level bio. The
1423 * offset will be specific to the volume.
1425 vol_no = HAMMER_VOL_DECODE(zone2_offset);
1426 volume = hammer_get_volume(hmp, vol_no, &error);
1427 if (error == 0 && zone2_offset >= volume->maxbuf_off)
1434 nbio = push_bio(bio);
1435 nbio->bio_offset = volume->ondisk->vol_buf_beg +
1436 (zone2_offset & HAMMER_OFF_SHORT_MASK);
1439 * XXX disabled - our CRC check doesn't work if the OS
1440 * does bogus_page replacement on the direct-read.
1442 if (leaf && hammer_verify_data) {
1443 nbio->bio_done = hammer_io_direct_read_complete;
1444 nbio->bio_caller_info1.uvalue32 = leaf->data_crc;
1447 hammer_stats_disk_read += bp->b_bufsize;
1448 vn_strategy(volume->devvp, nbio);
1450 hammer_rel_volume(volume, 0);
1453 kprintf("hammer_direct_read: failed @ %016llx\n",
1454 (long long)zone2_offset);
1455 bp->b_error = error;
1456 bp->b_flags |= B_ERROR;
1464 * On completion of the BIO this callback must check the data CRC
1465 * and chain to the previous bio.
1467 * MPSAFE - since we do not modify and hammer_records we do not need
1470 * NOTE: MPSAFE callback
1474 hammer_io_direct_read_complete(struct bio *nbio)
1478 u_int32_t rec_crc = nbio->bio_caller_info1.uvalue32;
1481 if (crc32(bp->b_data, bp->b_bufsize) != rec_crc) {
1482 kprintf("HAMMER: data_crc error @%016llx/%d\n",
1483 nbio->bio_offset, bp->b_bufsize);
1484 if (hammer_debug_critical)
1485 Debugger("data_crc on read");
1486 bp->b_flags |= B_ERROR;
1489 obio = pop_bio(nbio);
1495 * Write a buffer associated with a front-end vnode directly to the
1496 * disk media. The bio may be issued asynchronously.
1498 * The BIO is associated with the specified record and RECG_DIRECT_IO
1499 * is set. The recorded is added to its object.
1502 hammer_io_direct_write(hammer_mount_t hmp, struct bio *bio,
1503 hammer_record_t record)
1505 hammer_btree_leaf_elm_t leaf = &record->leaf;
1506 hammer_off_t buf_offset;
1507 hammer_off_t zone2_offset;
1508 hammer_volume_t volume;
1509 hammer_buffer_t buffer;
1516 buf_offset = leaf->data_offset;
1518 KKASSERT(buf_offset > HAMMER_ZONE_BTREE);
1519 KKASSERT(bio->bio_buf->b_cmd == BUF_CMD_WRITE);
1522 * Issue or execute the I/O. The new memory record must replace
1523 * the old one before the I/O completes, otherwise a reaquisition of
1524 * the buffer will load the old media data instead of the new.
1526 if ((buf_offset & HAMMER_BUFMASK) == 0 &&
1527 leaf->data_len >= HAMMER_BUFSIZE) {
1529 * We are using the vnode's bio to write directly to the
1530 * media, any hammer_buffer at the same zone-X offset will
1531 * now have stale data.
1533 zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, &error);
1534 vol_no = HAMMER_VOL_DECODE(zone2_offset);
1535 volume = hammer_get_volume(hmp, vol_no, &error);
1537 if (error == 0 && zone2_offset >= volume->maxbuf_off)
1541 KKASSERT((bp->b_bufsize & HAMMER_BUFMASK) == 0);
1543 hammer_del_buffers(hmp, buf_offset,
1544 zone2_offset, bp->b_bufsize);
1548 * Second level bio - cached zone2 offset.
1550 * (We can put our bio_done function in either the
1551 * 2nd or 3rd level).
1553 nbio = push_bio(bio);
1554 nbio->bio_offset = zone2_offset;
1555 nbio->bio_done = hammer_io_direct_write_complete;
1556 nbio->bio_caller_info1.ptr = record;
1557 record->zone2_offset = zone2_offset;
1558 record->gflags |= HAMMER_RECG_DIRECT_IO |
1559 HAMMER_RECG_DIRECT_INVAL;
1562 * Third level bio - raw offset specific to the
1565 zone2_offset &= HAMMER_OFF_SHORT_MASK;
1566 nbio = push_bio(nbio);
1567 nbio->bio_offset = volume->ondisk->vol_buf_beg +
1569 hammer_stats_disk_write += bp->b_bufsize;
1570 hammer_ip_replace_bulk(hmp, record);
1571 vn_strategy(volume->devvp, nbio);
1572 hammer_io_flush_mark(volume);
1574 hammer_rel_volume(volume, 0);
1577 * Must fit in a standard HAMMER buffer. In this case all
1578 * consumers use the HAMMER buffer system and RECG_DIRECT_IO
1579 * does not need to be set-up.
1581 KKASSERT(((buf_offset ^ (buf_offset + leaf->data_len - 1)) & ~HAMMER_BUFMASK64) == 0);
1583 ptr = hammer_bread(hmp, buf_offset, &error, &buffer);
1586 bp->b_flags |= B_AGE;
1587 hammer_io_modify(&buffer->io, 1);
1588 bcopy(bp->b_data, ptr, leaf->data_len);
1589 hammer_io_modify_done(&buffer->io);
1590 hammer_rel_buffer(buffer, 0);
1592 hammer_ip_replace_bulk(hmp, record);
1598 * Major suckage occured. Also note: The record was
1599 * never added to the tree so we do not have to worry
1600 * about the backend.
1602 kprintf("hammer_direct_write: failed @ %016llx\n",
1603 (long long)leaf->data_offset);
1607 bp->b_flags |= B_ERROR;
1609 record->flags |= HAMMER_RECF_DELETED_FE;
1610 hammer_rel_mem_record(record);
1616 * On completion of the BIO this callback must disconnect
1617 * it from the hammer_record and chain to the previous bio.
1619 * An I/O error forces the mount to read-only. Data buffers
1620 * are not B_LOCKED like meta-data buffers are, so we have to
1621 * throw the buffer away to prevent the kernel from retrying.
1623 * NOTE: MPSAFE callback, only modify fields we have explicit
1624 * access to (the bp and the record->gflags).
1628 hammer_io_direct_write_complete(struct bio *nbio)
1632 hammer_record_t record;
1635 record = nbio->bio_caller_info1.ptr;
1636 KKASSERT(record != NULL);
1637 hmp = record->ip->hmp;
1639 lwkt_gettoken(&hmp->io_token);
1642 obio = pop_bio(nbio);
1643 if (bp->b_flags & B_ERROR) {
1644 lwkt_gettoken(&hmp->fs_token);
1645 hammer_critical_error(hmp, record->ip,
1647 "while writing bulk data");
1648 lwkt_reltoken(&hmp->fs_token);
1649 bp->b_flags |= B_INVAL;
1653 KKASSERT(record->gflags & HAMMER_RECG_DIRECT_IO);
1654 if (record->gflags & HAMMER_RECG_DIRECT_WAIT) {
1655 record->gflags &= ~(HAMMER_RECG_DIRECT_IO |
1656 HAMMER_RECG_DIRECT_WAIT);
1657 /* record can disappear once DIRECT_IO flag is cleared */
1658 wakeup(&record->flags);
1660 record->gflags &= ~HAMMER_RECG_DIRECT_IO;
1661 /* record can disappear once DIRECT_IO flag is cleared */
1663 lwkt_reltoken(&hmp->io_token);
1668 * This is called before a record is either committed to the B-Tree
1669 * or destroyed, to resolve any associated direct-IO.
1671 * (1) We must wait for any direct-IO related to the record to complete.
1673 * (2) We must remove any buffer cache aliases for data accessed via
1674 * leaf->data_offset or zone2_offset so non-direct-IO consumers
1675 * (the mirroring and reblocking code) do not see stale data.
1678 hammer_io_direct_wait(hammer_record_t record)
1680 hammer_mount_t hmp = record->ip->hmp;
1683 * Wait for I/O to complete
1685 if (record->gflags & HAMMER_RECG_DIRECT_IO) {
1686 lwkt_gettoken(&hmp->io_token);
1687 while (record->gflags & HAMMER_RECG_DIRECT_IO) {
1688 record->gflags |= HAMMER_RECG_DIRECT_WAIT;
1689 tsleep(&record->flags, 0, "hmdiow", 0);
1691 lwkt_reltoken(&hmp->io_token);
1695 * Invalidate any related buffer cache aliases associated with the
1696 * backing device. This is needed because the buffer cache buffer
1697 * for file data is associated with the file vnode, not the backing
1700 * XXX I do not think this case can occur any more now that
1701 * reservations ensure that all such buffers are removed before
1702 * an area can be reused.
1704 if (record->gflags & HAMMER_RECG_DIRECT_INVAL) {
1705 KKASSERT(record->leaf.data_offset);
1706 hammer_del_buffers(hmp, record->leaf.data_offset,
1707 record->zone2_offset, record->leaf.data_len,
1709 record->gflags &= ~HAMMER_RECG_DIRECT_INVAL;
1714 * This is called to remove the second-level cached zone-2 offset from
1715 * frontend buffer cache buffers, now stale due to a data relocation.
1716 * These offsets are generated by cluster_read() via VOP_BMAP, or directly
1717 * by hammer_vop_strategy_read().
1719 * This is rather nasty because here we have something like the reblocker
1720 * scanning the raw B-Tree with no held references on anything, really,
1721 * other then a shared lock on the B-Tree node, and we have to access the
1722 * frontend's buffer cache to check for and clean out the association.
1723 * Specifically, if the reblocker is moving data on the disk, these cached
1724 * offsets will become invalid.
1726 * Only data record types associated with the large-data zone are subject
1727 * to direct-io and need to be checked.
1731 hammer_io_direct_uncache(hammer_mount_t hmp, hammer_btree_leaf_elm_t leaf)
1733 struct hammer_inode_info iinfo;
1736 if (leaf->base.rec_type != HAMMER_RECTYPE_DATA)
1738 zone = HAMMER_ZONE_DECODE(leaf->data_offset);
1739 if (zone != HAMMER_ZONE_LARGE_DATA_INDEX)
1741 iinfo.obj_id = leaf->base.obj_id;
1742 iinfo.obj_asof = 0; /* unused */
1743 iinfo.obj_localization = leaf->base.localization &
1744 HAMMER_LOCALIZE_PSEUDOFS_MASK;
1745 iinfo.u.leaf = leaf;
1746 hammer_scan_inode_snapshots(hmp, &iinfo,
1747 hammer_io_direct_uncache_callback,
1752 hammer_io_direct_uncache_callback(hammer_inode_t ip, void *data)
1754 hammer_inode_info_t iinfo = data;
1755 hammer_off_t data_offset;
1756 hammer_off_t file_offset;
1763 data_offset = iinfo->u.leaf->data_offset;
1764 file_offset = iinfo->u.leaf->base.key - iinfo->u.leaf->data_len;
1765 blksize = iinfo->u.leaf->data_len;
1766 KKASSERT((blksize & HAMMER_BUFMASK) == 0);
1769 * Warning: FINDBLK_TEST return stable storage but not stable
1770 * contents. It happens to be ok in this case.
1772 hammer_ref(&ip->lock);
1773 if (hammer_get_vnode(ip, &vp) == 0) {
1774 if ((bp = findblk(ip->vp, file_offset, FINDBLK_TEST)) != NULL &&
1775 bp->b_bio2.bio_offset != NOOFFSET) {
1776 bp = getblk(ip->vp, file_offset, blksize, 0, 0);
1777 bp->b_bio2.bio_offset = NOOFFSET;
1782 hammer_rel_inode(ip, 0);
1788 * This function is called when writes may have occured on the volume,
1789 * indicating that the device may be holding cached writes.
1792 hammer_io_flush_mark(hammer_volume_t volume)
1794 atomic_set_int(&volume->vol_flags, HAMMER_VOLF_NEEDFLUSH);
1798 * This function ensures that the device has flushed any cached writes out.
1801 hammer_io_flush_sync(hammer_mount_t hmp)
1803 hammer_volume_t volume;
1804 struct buf *bp_base = NULL;
1807 RB_FOREACH(volume, hammer_vol_rb_tree, &hmp->rb_vols_root) {
1808 if (volume->vol_flags & HAMMER_VOLF_NEEDFLUSH) {
1809 atomic_clear_int(&volume->vol_flags,
1810 HAMMER_VOLF_NEEDFLUSH);
1812 bp->b_bio1.bio_offset = 0;
1815 bp->b_cmd = BUF_CMD_FLUSH;
1816 bp->b_bio1.bio_caller_info1.cluster_head = bp_base;
1817 bp->b_bio1.bio_done = biodone_sync;
1818 bp->b_bio1.bio_flags |= BIO_SYNC;
1820 vn_strategy(volume->devvp, &bp->b_bio1);
1823 while ((bp = bp_base) != NULL) {
1824 bp_base = bp->b_bio1.bio_caller_info1.cluster_head;
1825 biowait(&bp->b_bio1, "hmrFLS");
1831 * Limit the amount of backlog which we allow to build up
1834 hammer_io_limit_backlog(hammer_mount_t hmp)
1836 while (hmp->io_running_space > hammer_limit_running_io) {
1837 hmp->io_running_wakeup = 1;
1838 tsleep(&hmp->io_running_wakeup, 0, "hmiolm", hz / 10);