2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_ondisk.c,v 1.76 2008/08/29 20:19:08 dillon Exp $
37 * Manage HAMMER's on-disk structures. These routines are primarily
38 * responsible for interfacing with the kernel's I/O subsystem and for
39 * managing in-memory structures.
43 #include <sys/fcntl.h>
44 #include <sys/nlookup.h>
48 static void hammer_free_volume(hammer_volume_t volume);
49 static int hammer_load_volume(hammer_volume_t volume);
50 static int hammer_load_buffer(hammer_buffer_t buffer, int isnew);
51 static int hammer_load_node(hammer_transaction_t trans,
52 hammer_node_t node, int isnew);
53 static void _hammer_rel_node(hammer_node_t node, int locked);
56 hammer_vol_rb_compare(hammer_volume_t vol1, hammer_volume_t vol2)
58 if (vol1->vol_no < vol2->vol_no)
60 if (vol1->vol_no > vol2->vol_no)
66 * hammer_buffer structures are indexed via their zoneX_offset, not
70 hammer_buf_rb_compare(hammer_buffer_t buf1, hammer_buffer_t buf2)
72 if (buf1->zoneX_offset < buf2->zoneX_offset)
74 if (buf1->zoneX_offset > buf2->zoneX_offset)
80 hammer_nod_rb_compare(hammer_node_t node1, hammer_node_t node2)
82 if (node1->node_offset < node2->node_offset)
84 if (node1->node_offset > node2->node_offset)
89 RB_GENERATE2(hammer_vol_rb_tree, hammer_volume, rb_node,
90 hammer_vol_rb_compare, int32_t, vol_no);
91 RB_GENERATE2(hammer_buf_rb_tree, hammer_buffer, rb_node,
92 hammer_buf_rb_compare, hammer_off_t, zoneX_offset);
93 RB_GENERATE2(hammer_nod_rb_tree, hammer_node, rb_node,
94 hammer_nod_rb_compare, hammer_off_t, node_offset);
96 /************************************************************************
98 ************************************************************************
100 * Load a HAMMER volume by name. Returns 0 on success or a positive error
101 * code on failure. Volumes must be loaded at mount time, get_volume() will
102 * not load a new volume.
104 * Calls made to hammer_load_volume() or single-threaded
107 hammer_install_volume(struct hammer_mount *hmp, const char *volname,
111 hammer_volume_t volume;
112 struct hammer_volume_ondisk *ondisk;
113 struct nlookupdata nd;
114 struct buf *bp = NULL;
120 ronly = ((mp->mnt_flag & MNT_RDONLY) ? 1 : 0);
123 * Allocate a volume structure
125 ++hammer_count_volumes;
126 volume = kmalloc(sizeof(*volume), hmp->m_misc, M_WAITOK|M_ZERO);
127 volume->vol_name = kstrdup(volname, hmp->m_misc);
128 volume->io.hmp = hmp; /* bootstrap */
129 hammer_io_init(&volume->io, volume, HAMMER_STRUCTURE_VOLUME);
130 volume->io.offset = 0LL;
131 volume->io.bytes = HAMMER_BUFSIZE;
134 * Get the device vnode
137 error = nlookup_init(&nd, volume->vol_name, UIO_SYSSPACE, NLC_FOLLOW);
139 error = nlookup(&nd);
141 error = cache_vref(&nd.nl_nch, nd.nl_cred, &volume->devvp);
145 volume->devvp = devvp;
149 if (vn_isdisk(volume->devvp, &error)) {
150 error = vfs_mountedon(volume->devvp);
153 if (error == 0 && vcount(volume->devvp) > 0)
156 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
157 error = vinvalbuf(volume->devvp, V_SAVE, 0, 0);
159 error = VOP_OPEN(volume->devvp,
160 (ronly ? FREAD : FREAD|FWRITE),
163 vn_unlock(volume->devvp);
166 hammer_free_volume(volume);
169 volume->devvp->v_rdev->si_mountpoint = mp;
173 * Extract the volume number from the volume header and do various
176 error = bread(volume->devvp, 0LL, HAMMER_BUFSIZE, &bp);
179 ondisk = (void *)bp->b_data;
180 if (ondisk->vol_signature != HAMMER_FSBUF_VOLUME) {
181 kprintf("hammer_mount: volume %s has an invalid header\n",
186 volume->vol_no = ondisk->vol_no;
187 volume->buffer_base = ondisk->vol_buf_beg;
188 volume->vol_flags = ondisk->vol_flags;
189 volume->nblocks = ondisk->vol_nblocks;
190 volume->maxbuf_off = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no,
191 ondisk->vol_buf_end - ondisk->vol_buf_beg);
192 volume->maxraw_off = ondisk->vol_buf_end;
194 if (RB_EMPTY(&hmp->rb_vols_root)) {
195 hmp->fsid = ondisk->vol_fsid;
196 } else if (bcmp(&hmp->fsid, &ondisk->vol_fsid, sizeof(uuid_t))) {
197 kprintf("hammer_mount: volume %s's fsid does not match "
198 "other volumes\n", volume->vol_name);
204 * Insert the volume structure into the red-black tree.
206 if (RB_INSERT(hammer_vol_rb_tree, &hmp->rb_vols_root, volume)) {
207 kprintf("hammer_mount: volume %s has a duplicate vol_no %d\n",
208 volume->vol_name, volume->vol_no);
213 * Set the root volume . HAMMER special cases rootvol the structure.
214 * We do not hold a ref because this would prevent related I/O
215 * from being flushed.
217 if (error == 0 && ondisk->vol_rootvol == ondisk->vol_no) {
218 hmp->rootvol = volume;
219 hmp->nvolumes = ondisk->vol_count;
224 hmp->mp->mnt_stat.f_blocks += ondisk->vol0_stat_bigblocks *
225 (HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE);
226 hmp->mp->mnt_vstat.f_blocks += ondisk->vol0_stat_bigblocks *
227 (HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE);
233 /*vinvalbuf(volume->devvp, V_SAVE, 0, 0);*/
235 volume->devvp->v_rdev->si_mountpoint = NULL;
236 VOP_CLOSE(volume->devvp, ronly ? FREAD : FREAD|FWRITE);
237 hammer_free_volume(volume);
243 * This is called for each volume when updating the mount point from
244 * read-write to read-only or vise-versa.
247 hammer_adjust_volume_mode(hammer_volume_t volume, void *data __unused)
250 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
251 if (volume->io.hmp->ronly) {
252 /* do not call vinvalbuf */
253 VOP_OPEN(volume->devvp, FREAD, FSCRED, NULL);
254 VOP_CLOSE(volume->devvp, FREAD|FWRITE);
256 /* do not call vinvalbuf */
257 VOP_OPEN(volume->devvp, FREAD|FWRITE, FSCRED, NULL);
258 VOP_CLOSE(volume->devvp, FREAD);
260 vn_unlock(volume->devvp);
266 * Unload and free a HAMMER volume. Must return >= 0 to continue scan
267 * so returns -1 on failure.
270 hammer_unload_volume(hammer_volume_t volume, void *data __unused)
272 hammer_mount_t hmp = volume->io.hmp;
273 int ronly = ((hmp->mp->mnt_flag & MNT_RDONLY) ? 1 : 0);
276 * Clean up the root volume pointer, which is held unlocked in hmp.
278 if (hmp->rootvol == volume)
282 * We must not flush a dirty buffer to disk on umount. It should
283 * have already been dealt with by the flusher, or we may be in
284 * catastrophic failure.
286 hammer_io_clear_modify(&volume->io, 1);
287 volume->io.waitdep = 1;
290 * Clean up the persistent ref ioerror might have on the volume
292 if (volume->io.ioerror)
293 hammer_io_clear_error_noassert(&volume->io);
296 * This should release the bp. Releasing the volume with flush set
297 * implies the interlock is set.
299 hammer_ref_interlock_true(&volume->io.lock);
300 hammer_rel_volume(volume, 1);
301 KKASSERT(volume->io.bp == NULL);
304 * There should be no references on the volume, no clusters, and
307 KKASSERT(hammer_norefs(&volume->io.lock));
309 volume->ondisk = NULL;
311 if (volume->devvp->v_rdev &&
312 volume->devvp->v_rdev->si_mountpoint == hmp->mp
314 volume->devvp->v_rdev->si_mountpoint = NULL;
318 * Make sure we don't sync anything to disk if we
319 * are in read-only mode (1) or critically-errored
320 * (2). Note that there may be dirty buffers in
321 * normal read-only mode from crash recovery.
323 vinvalbuf(volume->devvp, 0, 0, 0);
324 VOP_CLOSE(volume->devvp, FREAD);
327 * Normal termination, save any dirty buffers
328 * (XXX there really shouldn't be any).
330 vinvalbuf(volume->devvp, V_SAVE, 0, 0);
331 VOP_CLOSE(volume->devvp, FREAD|FWRITE);
336 * Destroy the structure
338 RB_REMOVE(hammer_vol_rb_tree, &hmp->rb_vols_root, volume);
339 hammer_free_volume(volume);
345 hammer_free_volume(hammer_volume_t volume)
347 hammer_mount_t hmp = volume->io.hmp;
349 if (volume->vol_name) {
350 kfree(volume->vol_name, hmp->m_misc);
351 volume->vol_name = NULL;
354 vrele(volume->devvp);
355 volume->devvp = NULL;
357 --hammer_count_volumes;
358 kfree(volume, hmp->m_misc);
362 * Get a HAMMER volume. The volume must already exist.
365 hammer_get_volume(struct hammer_mount *hmp, int32_t vol_no, int *errorp)
367 struct hammer_volume *volume;
370 * Locate the volume structure
372 volume = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, vol_no);
373 if (volume == NULL) {
379 * Reference the volume, load/check the data on the 0->1 transition.
380 * hammer_load_volume() will dispose of the interlock on return,
381 * and also clean up the ref count on error.
383 if (hammer_ref_interlock(&volume->io.lock)) {
384 *errorp = hammer_load_volume(volume);
388 KKASSERT(volume->ondisk);
395 hammer_ref_volume(hammer_volume_t volume)
400 * Reference the volume and deal with the check condition used to
401 * load its ondisk info.
403 if (hammer_ref_interlock(&volume->io.lock)) {
404 error = hammer_load_volume(volume);
406 KKASSERT(volume->ondisk);
413 hammer_get_root_volume(struct hammer_mount *hmp, int *errorp)
415 hammer_volume_t volume;
417 volume = hmp->rootvol;
418 KKASSERT(volume != NULL);
421 * Reference the volume and deal with the check condition used to
422 * load its ondisk info.
424 if (hammer_ref_interlock(&volume->io.lock)) {
425 *errorp = hammer_load_volume(volume);
429 KKASSERT(volume->ondisk);
436 * Load a volume's on-disk information. The volume must be referenced and
437 * the interlock is held on call. The interlock will be released on return.
438 * The reference will also be released on return if an error occurs.
441 hammer_load_volume(hammer_volume_t volume)
445 if (volume->ondisk == NULL) {
446 error = hammer_io_read(volume->devvp, &volume->io,
449 volume->ondisk = (void *)volume->io.bp->b_data;
450 hammer_ref_interlock_done(&volume->io.lock);
452 hammer_rel_volume(volume, 1);
461 * Release a previously acquired reference on the volume.
463 * Volumes are not unloaded from memory during normal operation.
466 hammer_rel_volume(hammer_volume_t volume, int locked)
470 if (hammer_rel_interlock(&volume->io.lock, locked)) {
471 volume->ondisk = NULL;
472 bp = hammer_io_release(&volume->io, locked);
473 hammer_rel_interlock_done(&volume->io.lock, locked);
480 hammer_mountcheck_volumes(struct hammer_mount *hmp)
485 for (i = 0; i < hmp->nvolumes; ++i) {
486 vol = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, i);
493 /************************************************************************
495 ************************************************************************
497 * Manage buffers. Currently most blockmap-backed zones are direct-mapped
498 * to zone-2 buffer offsets, without a translation stage. However, the
499 * hammer_buffer structure is indexed by its zoneX_offset, not its
502 * The proper zone must be maintained throughout the code-base all the way
503 * through to the big-block allocator, or routines like hammer_del_buffers()
504 * will not be able to locate all potentially conflicting buffers.
508 * Helper function returns whether a zone offset can be directly translated
509 * to a raw buffer index or not. Really only the volume and undo zones
510 * can't be directly translated. Volumes are special-cased and undo zones
511 * shouldn't be aliased accessed in read-only mode.
513 * This function is ONLY used to detect aliased zones during a read-only
517 hammer_direct_zone(hammer_off_t buf_offset)
519 switch(HAMMER_ZONE_DECODE(buf_offset)) {
520 case HAMMER_ZONE_RAW_BUFFER_INDEX:
521 case HAMMER_ZONE_FREEMAP_INDEX:
522 case HAMMER_ZONE_BTREE_INDEX:
523 case HAMMER_ZONE_META_INDEX:
524 case HAMMER_ZONE_LARGE_DATA_INDEX:
525 case HAMMER_ZONE_SMALL_DATA_INDEX:
534 hammer_get_buffer(hammer_mount_t hmp, hammer_off_t buf_offset,
535 int bytes, int isnew, int *errorp)
537 hammer_buffer_t buffer;
538 hammer_volume_t volume;
539 hammer_off_t zone2_offset;
540 hammer_io_type_t iotype;
544 buf_offset &= ~HAMMER_BUFMASK64;
547 * Shortcut if the buffer is already cached
549 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root, buf_offset);
552 * Once refed the ondisk field will not be cleared by
553 * any other action. Shortcut the operation if the
554 * ondisk structure is valid.
557 if (hammer_ref_interlock(&buffer->io.lock) == 0) {
558 hammer_io_advance(&buffer->io);
559 KKASSERT(buffer->ondisk);
565 * 0->1 transition or defered 0->1 transition (CHECK),
566 * interlock now held. Shortcut if ondisk is already
569 ++hammer_count_refedbufs;
570 if (buffer->ondisk) {
571 hammer_io_advance(&buffer->io);
572 hammer_ref_interlock_done(&buffer->io.lock);
578 * The buffer is no longer loose if it has a ref, and
579 * cannot become loose once it gains a ref. Loose
580 * buffers will never be in a modified state. This should
581 * only occur on the 0->1 transition of refs.
583 * lose_list can be modified via a biodone() interrupt
584 * so the io_token must be held.
586 if (buffer->io.mod_list == &hmp->lose_list) {
587 lwkt_gettoken(&hmp->io_token);
588 if (buffer->io.mod_list == &hmp->lose_list) {
589 TAILQ_REMOVE(buffer->io.mod_list, &buffer->io,
591 buffer->io.mod_list = NULL;
592 KKASSERT(buffer->io.modified == 0);
594 lwkt_reltoken(&hmp->io_token);
597 } else if (hmp->ronly && hammer_direct_zone(buf_offset)) {
599 * If this is a read-only mount there could be an alias
600 * in the raw-zone. If there is we use that buffer instead.
602 * rw mounts will not have aliases. Also note when going
603 * from ro -> rw the recovered raw buffers are flushed and
604 * reclaimed, so again there will not be any aliases once
607 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root,
608 (buf_offset & ~HAMMER_OFF_ZONE_MASK) |
609 HAMMER_ZONE_RAW_BUFFER);
611 kprintf("HAMMER: recovered aliased %016jx\n",
612 (intmax_t)buf_offset);
618 * What is the buffer class?
620 zone = HAMMER_ZONE_DECODE(buf_offset);
623 case HAMMER_ZONE_LARGE_DATA_INDEX:
624 case HAMMER_ZONE_SMALL_DATA_INDEX:
625 iotype = HAMMER_STRUCTURE_DATA_BUFFER;
627 case HAMMER_ZONE_UNDO_INDEX:
628 iotype = HAMMER_STRUCTURE_UNDO_BUFFER;
630 case HAMMER_ZONE_META_INDEX:
633 * NOTE: inode data and directory entries are placed in this
634 * zone. inode atime/mtime is updated in-place and thus
635 * buffers containing inodes must be synchronized as
636 * meta-buffers, same as buffers containing B-Tree info.
638 iotype = HAMMER_STRUCTURE_META_BUFFER;
643 * Handle blockmap offset translations
645 if (zone >= HAMMER_ZONE_BTREE_INDEX) {
646 zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, errorp);
647 } else if (zone == HAMMER_ZONE_UNDO_INDEX) {
648 zone2_offset = hammer_undo_lookup(hmp, buf_offset, errorp);
650 KKASSERT(zone == HAMMER_ZONE_RAW_BUFFER_INDEX);
651 zone2_offset = buf_offset;
658 * NOTE: zone2_offset and maxbuf_off are both full zone-2 offset
661 KKASSERT((zone2_offset & HAMMER_OFF_ZONE_MASK) ==
662 HAMMER_ZONE_RAW_BUFFER);
663 vol_no = HAMMER_VOL_DECODE(zone2_offset);
664 volume = hammer_get_volume(hmp, vol_no, errorp);
668 KKASSERT(zone2_offset < volume->maxbuf_off);
671 * Allocate a new buffer structure. We will check for races later.
673 ++hammer_count_buffers;
674 buffer = kmalloc(sizeof(*buffer), hmp->m_misc,
675 M_WAITOK|M_ZERO|M_USE_RESERVE);
676 buffer->zone2_offset = zone2_offset;
677 buffer->zoneX_offset = buf_offset;
679 hammer_io_init(&buffer->io, volume, iotype);
680 buffer->io.offset = volume->ondisk->vol_buf_beg +
681 (zone2_offset & HAMMER_OFF_SHORT_MASK);
682 buffer->io.bytes = bytes;
683 TAILQ_INIT(&buffer->clist);
684 hammer_ref_interlock_true(&buffer->io.lock);
687 * Insert the buffer into the RB tree and handle late collisions.
689 if (RB_INSERT(hammer_buf_rb_tree, &hmp->rb_bufs_root, buffer)) {
690 hammer_rel_volume(volume, 0);
691 buffer->io.volume = NULL; /* safety */
692 if (hammer_rel_interlock(&buffer->io.lock, 1)) /* safety */
693 hammer_rel_interlock_done(&buffer->io.lock, 1);
694 --hammer_count_buffers;
695 kfree(buffer, hmp->m_misc);
698 ++hammer_count_refedbufs;
702 * The buffer is referenced and interlocked. Load the buffer
703 * if necessary. hammer_load_buffer() deals with the interlock
704 * and, if an error is returned, also deals with the ref.
706 if (buffer->ondisk == NULL) {
707 *errorp = hammer_load_buffer(buffer, isnew);
711 hammer_io_advance(&buffer->io);
712 hammer_ref_interlock_done(&buffer->io.lock);
719 * This is used by the direct-read code to deal with large-data buffers
720 * created by the reblocker and mirror-write code. The direct-read code
721 * bypasses the HAMMER buffer subsystem and so any aliased dirty or write-
722 * running hammer buffers must be fully synced to disk before we can issue
725 * This code path is not considered critical as only the rebocker and
726 * mirror-write code will create large-data buffers via the HAMMER buffer
727 * subsystem. They do that because they operate at the B-Tree level and
728 * do not access the vnode/inode structures.
731 hammer_sync_buffers(hammer_mount_t hmp, hammer_off_t base_offset, int bytes)
733 hammer_buffer_t buffer;
736 KKASSERT((base_offset & HAMMER_OFF_ZONE_MASK) ==
737 HAMMER_ZONE_LARGE_DATA);
740 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root,
742 if (buffer && (buffer->io.modified || buffer->io.running)) {
743 error = hammer_ref_buffer(buffer);
745 hammer_io_wait(&buffer->io);
746 if (buffer->io.modified) {
747 hammer_io_write_interlock(&buffer->io);
748 hammer_io_flush(&buffer->io, 0);
749 hammer_io_done_interlock(&buffer->io);
750 hammer_io_wait(&buffer->io);
752 hammer_rel_buffer(buffer, 0);
755 base_offset += HAMMER_BUFSIZE;
756 bytes -= HAMMER_BUFSIZE;
761 * Destroy all buffers covering the specified zoneX offset range. This
762 * is called when the related blockmap layer2 entry is freed or when
763 * a direct write bypasses our buffer/buffer-cache subsystem.
765 * The buffers may be referenced by the caller itself. Setting reclaim
766 * will cause the buffer to be destroyed when it's ref count reaches zero.
768 * Return 0 on success, EAGAIN if some buffers could not be destroyed due
769 * to additional references held by other threads, or some other (typically
773 hammer_del_buffers(hammer_mount_t hmp, hammer_off_t base_offset,
774 hammer_off_t zone2_offset, int bytes,
775 int report_conflicts)
777 hammer_buffer_t buffer;
778 hammer_volume_t volume;
783 vol_no = HAMMER_VOL_DECODE(zone2_offset);
784 volume = hammer_get_volume(hmp, vol_no, &ret_error);
785 KKASSERT(ret_error == 0);
788 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root,
791 error = hammer_ref_buffer(buffer);
792 if (hammer_debug_general & 0x20000) {
793 kprintf("hammer: delbufr %016jx "
795 (intmax_t)buffer->zoneX_offset,
797 hammer_oneref(&buffer->io.lock));
799 if (error == 0 && !hammer_oneref(&buffer->io.lock)) {
801 hammer_rel_buffer(buffer, 0);
804 KKASSERT(buffer->zone2_offset == zone2_offset);
805 hammer_io_clear_modify(&buffer->io, 1);
806 buffer->io.reclaim = 1;
807 buffer->io.waitdep = 1;
808 KKASSERT(buffer->io.volume == volume);
809 hammer_rel_buffer(buffer, 0);
812 error = hammer_io_inval(volume, zone2_offset);
816 if (report_conflicts ||
817 (hammer_debug_general & 0x8000)) {
818 kprintf("hammer_del_buffers: unable to "
819 "invalidate %016llx buffer=%p rep=%d\n",
820 (long long)base_offset,
821 buffer, report_conflicts);
824 base_offset += HAMMER_BUFSIZE;
825 zone2_offset += HAMMER_BUFSIZE;
826 bytes -= HAMMER_BUFSIZE;
828 hammer_rel_volume(volume, 0);
833 * Given a referenced and interlocked buffer load/validate the data.
835 * The buffer interlock will be released on return. If an error is
836 * returned the buffer reference will also be released (and the buffer
837 * pointer will thus be stale).
840 hammer_load_buffer(hammer_buffer_t buffer, int isnew)
842 hammer_volume_t volume;
846 * Load the buffer's on-disk info
848 volume = buffer->io.volume;
850 if (hammer_debug_io & 0x0004) {
851 kprintf("load_buffer %016llx %016llx isnew=%d od=%p\n",
852 (long long)buffer->zoneX_offset,
853 (long long)buffer->zone2_offset,
854 isnew, buffer->ondisk);
857 if (buffer->ondisk == NULL) {
859 * Issue the read or generate a new buffer. When reading
860 * the limit argument controls any read-ahead clustering
861 * hammer_io_read() is allowed to do.
863 * We cannot read-ahead in the large-data zone and we cannot
864 * cross a largeblock boundary as the next largeblock might
865 * use a different buffer size.
868 error = hammer_io_new(volume->devvp, &buffer->io);
869 } else if ((buffer->zoneX_offset & HAMMER_OFF_ZONE_MASK) ==
870 HAMMER_ZONE_LARGE_DATA) {
871 error = hammer_io_read(volume->devvp, &buffer->io,
876 limit = (buffer->zone2_offset +
877 HAMMER_LARGEBLOCK_MASK64) &
878 ~HAMMER_LARGEBLOCK_MASK64;
879 limit -= buffer->zone2_offset;
880 error = hammer_io_read(volume->devvp, &buffer->io,
884 buffer->ondisk = (void *)buffer->io.bp->b_data;
886 error = hammer_io_new(volume->devvp, &buffer->io);
891 hammer_io_advance(&buffer->io);
892 hammer_ref_interlock_done(&buffer->io.lock);
894 hammer_rel_buffer(buffer, 1);
900 * NOTE: Called from RB_SCAN, must return >= 0 for scan to continue.
901 * This routine is only called during unmount or when a volume is
904 * If data != NULL, it specifies a volume whoose buffers should
908 hammer_unload_buffer(hammer_buffer_t buffer, void *data)
910 struct hammer_volume *volume = (struct hammer_volume *) data;
913 * If volume != NULL we are only interested in unloading buffers
914 * associated with a particular volume.
916 if (volume != NULL && volume != buffer->io.volume)
920 * Clean up the persistent ref ioerror might have on the buffer
921 * and acquire a ref. Expect a 0->1 transition.
923 if (buffer->io.ioerror) {
924 hammer_io_clear_error_noassert(&buffer->io);
925 --hammer_count_refedbufs;
927 hammer_ref_interlock_true(&buffer->io.lock);
928 ++hammer_count_refedbufs;
931 * We must not flush a dirty buffer to disk on umount. It should
932 * have already been dealt with by the flusher, or we may be in
933 * catastrophic failure.
935 * We must set waitdep to ensure that a running buffer is waited
936 * on and released prior to us trying to unload the volume.
938 hammer_io_clear_modify(&buffer->io, 1);
939 hammer_flush_buffer_nodes(buffer);
940 buffer->io.waitdep = 1;
941 hammer_rel_buffer(buffer, 1);
946 * Reference a buffer that is either already referenced or via a specially
947 * handled pointer (aka cursor->buffer).
950 hammer_ref_buffer(hammer_buffer_t buffer)
957 * Acquire a ref, plus the buffer will be interlocked on the
960 locked = hammer_ref_interlock(&buffer->io.lock);
961 hmp = buffer->io.hmp;
964 * At this point a biodone() will not touch the buffer other then
965 * incidental bits. However, lose_list can be modified via
966 * a biodone() interrupt.
968 * No longer loose. lose_list requires the io_token.
970 if (buffer->io.mod_list == &hmp->lose_list) {
971 lwkt_gettoken(&hmp->io_token);
972 if (buffer->io.mod_list == &hmp->lose_list) {
973 TAILQ_REMOVE(buffer->io.mod_list, &buffer->io,
975 buffer->io.mod_list = NULL;
977 lwkt_reltoken(&hmp->io_token);
981 ++hammer_count_refedbufs;
982 error = hammer_load_buffer(buffer, 0);
983 /* NOTE: on error the buffer pointer is stale */
991 * Release a reference on the buffer. On the 1->0 transition the
992 * underlying IO will be released but the data reference is left
995 * Only destroy the structure itself if the related buffer cache buffer
996 * was disassociated from it. This ties the management of the structure
997 * to the buffer cache subsystem. buffer->ondisk determines whether the
998 * embedded io is referenced or not.
1001 hammer_rel_buffer(hammer_buffer_t buffer, int locked)
1003 hammer_volume_t volume;
1005 struct buf *bp = NULL;
1008 hmp = buffer->io.hmp;
1010 if (hammer_rel_interlock(&buffer->io.lock, locked) == 0)
1014 * hammer_count_refedbufs accounting. Decrement if we are in
1015 * the error path or if CHECK is clear.
1017 * If we are not in the error path and CHECK is set the caller
1018 * probably just did a hammer_ref() and didn't account for it,
1019 * so we don't account for the loss here.
1021 if (locked || (buffer->io.lock.refs & HAMMER_REFS_CHECK) == 0)
1022 --hammer_count_refedbufs;
1025 * If the caller locked us or the normal released transitions
1026 * from 1->0 (and acquired the lock) attempt to release the
1027 * io. If the called locked us we tell hammer_io_release()
1028 * to flush (which would be the unload or failure path).
1030 bp = hammer_io_release(&buffer->io, locked);
1033 * If the buffer has no bp association and no refs we can destroy
1036 * NOTE: It is impossible for any associated B-Tree nodes to have
1037 * refs if the buffer has no additional refs.
1039 if (buffer->io.bp == NULL && hammer_norefs(&buffer->io.lock)) {
1040 RB_REMOVE(hammer_buf_rb_tree,
1041 &buffer->io.hmp->rb_bufs_root,
1043 volume = buffer->io.volume;
1044 buffer->io.volume = NULL; /* sanity */
1045 hammer_rel_volume(volume, 0);
1046 hammer_io_clear_modlist(&buffer->io);
1047 hammer_flush_buffer_nodes(buffer);
1048 KKASSERT(TAILQ_EMPTY(&buffer->clist));
1055 hammer_rel_interlock_done(&buffer->io.lock, locked);
1059 --hammer_count_buffers;
1060 kfree(buffer, hmp->m_misc);
1065 * Access the filesystem buffer containing the specified hammer offset.
1066 * buf_offset is a conglomeration of the volume number and vol_buf_beg
1067 * relative buffer offset. It must also have bit 55 set to be valid.
1068 * (see hammer_off_t in hammer_disk.h).
1070 * Any prior buffer in *bufferp will be released and replaced by the
1073 * NOTE: The buffer is indexed via its zoneX_offset but we allow the
1074 * passed cached *bufferp to match against either zoneX or zone2.
1078 _hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
1079 int *errorp, struct hammer_buffer **bufferp)
1081 hammer_buffer_t buffer;
1082 int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK;
1084 buf_offset &= ~HAMMER_BUFMASK64;
1085 KKASSERT((buf_offset & HAMMER_OFF_ZONE_MASK) != 0);
1088 if (buffer == NULL || (buffer->zone2_offset != buf_offset &&
1089 buffer->zoneX_offset != buf_offset)) {
1091 hammer_rel_buffer(buffer, 0);
1092 buffer = hammer_get_buffer(hmp, buf_offset, bytes, 0, errorp);
1099 * Return a pointer to the buffer data.
1104 return((char *)buffer->ondisk + xoff);
1108 hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset,
1109 int *errorp, struct hammer_buffer **bufferp)
1111 return(_hammer_bread(hmp, buf_offset, HAMMER_BUFSIZE, errorp, bufferp));
1115 hammer_bread_ext(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
1116 int *errorp, struct hammer_buffer **bufferp)
1118 bytes = (bytes + HAMMER_BUFMASK) & ~HAMMER_BUFMASK;
1119 return(_hammer_bread(hmp, buf_offset, bytes, errorp, bufferp));
1123 * Access the filesystem buffer containing the specified hammer offset.
1124 * No disk read operation occurs. The result buffer may contain garbage.
1126 * Any prior buffer in *bufferp will be released and replaced by the
1129 * This function marks the buffer dirty but does not increment its
1130 * modify_refs count.
1134 _hammer_bnew(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
1135 int *errorp, struct hammer_buffer **bufferp)
1137 hammer_buffer_t buffer;
1138 int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK;
1140 buf_offset &= ~HAMMER_BUFMASK64;
1143 if (buffer == NULL || (buffer->zone2_offset != buf_offset &&
1144 buffer->zoneX_offset != buf_offset)) {
1146 hammer_rel_buffer(buffer, 0);
1147 buffer = hammer_get_buffer(hmp, buf_offset, bytes, 1, errorp);
1154 * Return a pointer to the buffer data.
1159 return((char *)buffer->ondisk + xoff);
1163 hammer_bnew(hammer_mount_t hmp, hammer_off_t buf_offset,
1164 int *errorp, struct hammer_buffer **bufferp)
1166 return(_hammer_bnew(hmp, buf_offset, HAMMER_BUFSIZE, errorp, bufferp));
1170 hammer_bnew_ext(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
1171 int *errorp, struct hammer_buffer **bufferp)
1173 bytes = (bytes + HAMMER_BUFMASK) & ~HAMMER_BUFMASK;
1174 return(_hammer_bnew(hmp, buf_offset, bytes, errorp, bufferp));
1177 /************************************************************************
1179 ************************************************************************
1181 * Manage B-Tree nodes. B-Tree nodes represent the primary indexing
1182 * method used by the HAMMER filesystem.
1184 * Unlike other HAMMER structures, a hammer_node can be PASSIVELY
1185 * associated with its buffer, and will only referenced the buffer while
1186 * the node itself is referenced.
1188 * A hammer_node can also be passively associated with other HAMMER
1189 * structures, such as inodes, while retaining 0 references. These
1190 * associations can be cleared backwards using a pointer-to-pointer in
1193 * This allows the HAMMER implementation to cache hammer_nodes long-term
1194 * and short-cut a great deal of the infrastructure's complexity. In
1195 * most cases a cached node can be reacquired without having to dip into
1196 * either the buffer or cluster management code.
1198 * The caller must pass a referenced cluster on call and will retain
1199 * ownership of the reference on return. The node will acquire its own
1200 * additional references, if necessary.
1203 hammer_get_node(hammer_transaction_t trans, hammer_off_t node_offset,
1204 int isnew, int *errorp)
1206 hammer_mount_t hmp = trans->hmp;
1210 KKASSERT((node_offset & HAMMER_OFF_ZONE_MASK) == HAMMER_ZONE_BTREE);
1213 * Locate the structure, allocating one if necessary.
1216 node = RB_LOOKUP(hammer_nod_rb_tree, &hmp->rb_nods_root, node_offset);
1218 ++hammer_count_nodes;
1219 node = kmalloc(sizeof(*node), hmp->m_misc, M_WAITOK|M_ZERO|M_USE_RESERVE);
1220 node->node_offset = node_offset;
1222 TAILQ_INIT(&node->cursor_list);
1223 TAILQ_INIT(&node->cache_list);
1224 if (RB_INSERT(hammer_nod_rb_tree, &hmp->rb_nods_root, node)) {
1225 --hammer_count_nodes;
1226 kfree(node, hmp->m_misc);
1229 doload = hammer_ref_interlock_true(&node->lock);
1231 doload = hammer_ref_interlock(&node->lock);
1234 *errorp = hammer_load_node(trans, node, isnew);
1235 trans->flags |= HAMMER_TRANSF_DIDIO;
1239 KKASSERT(node->ondisk);
1241 hammer_io_advance(&node->buffer->io);
1247 * Reference an already-referenced node. 0->1 transitions should assert
1248 * so we do not have to deal with hammer_ref() setting CHECK.
1251 hammer_ref_node(hammer_node_t node)
1253 KKASSERT(hammer_isactive(&node->lock) && node->ondisk != NULL);
1254 hammer_ref(&node->lock);
1258 * Load a node's on-disk data reference. Called with the node referenced
1261 * On return the node interlock will be unlocked. If a non-zero error code
1262 * is returned the node will also be dereferenced (and the caller's pointer
1266 hammer_load_node(hammer_transaction_t trans, hammer_node_t node, int isnew)
1268 hammer_buffer_t buffer;
1269 hammer_off_t buf_offset;
1273 if (node->ondisk == NULL) {
1275 * This is a little confusing but the jist is that
1276 * node->buffer determines whether the node is on
1277 * the buffer's clist and node->ondisk determines
1278 * whether the buffer is referenced.
1280 * We could be racing a buffer release, in which case
1281 * node->buffer may become NULL while we are blocked
1282 * referencing the buffer.
1284 if ((buffer = node->buffer) != NULL) {
1285 error = hammer_ref_buffer(buffer);
1286 if (error == 0 && node->buffer == NULL) {
1287 TAILQ_INSERT_TAIL(&buffer->clist,
1289 node->buffer = buffer;
1292 buf_offset = node->node_offset & ~HAMMER_BUFMASK64;
1293 buffer = hammer_get_buffer(node->hmp, buf_offset,
1294 HAMMER_BUFSIZE, 0, &error);
1296 KKASSERT(error == 0);
1297 TAILQ_INSERT_TAIL(&buffer->clist,
1299 node->buffer = buffer;
1304 node->ondisk = (void *)((char *)buffer->ondisk +
1305 (node->node_offset & HAMMER_BUFMASK));
1308 * Check CRC. NOTE: Neither flag is set and the CRC is not
1309 * generated on new B-Tree nodes.
1312 (node->flags & HAMMER_NODE_CRCANY) == 0) {
1313 if (hammer_crc_test_btree(node->ondisk) == 0) {
1314 if (hammer_debug_critical)
1315 Debugger("CRC FAILED: B-TREE NODE");
1316 node->flags |= HAMMER_NODE_CRCBAD;
1318 node->flags |= HAMMER_NODE_CRCGOOD;
1322 if (node->flags & HAMMER_NODE_CRCBAD) {
1323 if (trans->flags & HAMMER_TRANSF_CRCDOM)
1330 _hammer_rel_node(node, 1);
1332 hammer_ref_interlock_done(&node->lock);
1338 * Safely reference a node, interlock against flushes via the IO subsystem.
1341 hammer_ref_node_safe(hammer_transaction_t trans, hammer_node_cache_t cache,
1349 doload = hammer_ref_interlock(&node->lock);
1351 *errorp = hammer_load_node(trans, node, 0);
1355 KKASSERT(node->ondisk);
1356 if (node->flags & HAMMER_NODE_CRCBAD) {
1357 if (trans->flags & HAMMER_TRANSF_CRCDOM)
1361 _hammer_rel_node(node, 0);
1374 * Release a hammer_node. On the last release the node dereferences
1375 * its underlying buffer and may or may not be destroyed.
1377 * If locked is non-zero the passed node has been interlocked by the
1378 * caller and we are in the failure/unload path, otherwise it has not and
1379 * we are doing a normal release.
1381 * This function will dispose of the interlock and the reference.
1382 * On return the node pointer is stale.
1385 _hammer_rel_node(hammer_node_t node, int locked)
1387 hammer_buffer_t buffer;
1390 * Deref the node. If this isn't the 1->0 transition we're basically
1391 * done. If locked is non-zero this function will just deref the
1392 * locked node and return TRUE, otherwise it will deref the locked
1393 * node and either lock and return TRUE on the 1->0 transition or
1394 * not lock and return FALSE.
1396 if (hammer_rel_interlock(&node->lock, locked) == 0)
1400 * Either locked was non-zero and we are interlocked, or the
1401 * hammer_rel_interlock() call returned non-zero and we are
1404 * The ref-count must still be decremented if locked != 0 so
1405 * the cleanup required still varies a bit.
1407 * hammer_flush_node() when called with 1 or 2 will dispose of
1408 * the lock and possible ref-count.
1410 if (node->ondisk == NULL) {
1411 hammer_flush_node(node, locked + 1);
1412 /* node is stale now */
1417 * Do not disassociate the node from the buffer if it represents
1418 * a modified B-Tree node that still needs its crc to be generated.
1420 if (node->flags & HAMMER_NODE_NEEDSCRC) {
1421 hammer_rel_interlock_done(&node->lock, locked);
1426 * Do final cleanups and then either destroy the node and leave it
1427 * passively cached. The buffer reference is removed regardless.
1429 buffer = node->buffer;
1430 node->ondisk = NULL;
1432 if ((node->flags & HAMMER_NODE_FLUSH) == 0) {
1436 hammer_rel_interlock_done(&node->lock, locked);
1441 hammer_flush_node(node, locked + 1);
1445 hammer_rel_buffer(buffer, 0);
1449 hammer_rel_node(hammer_node_t node)
1451 _hammer_rel_node(node, 0);
1455 * Free space on-media associated with a B-Tree node.
1458 hammer_delete_node(hammer_transaction_t trans, hammer_node_t node)
1460 KKASSERT((node->flags & HAMMER_NODE_DELETED) == 0);
1461 node->flags |= HAMMER_NODE_DELETED;
1462 hammer_blockmap_free(trans, node->node_offset, sizeof(*node->ondisk));
1466 * Passively cache a referenced hammer_node. The caller may release
1467 * the node on return.
1470 hammer_cache_node(hammer_node_cache_t cache, hammer_node_t node)
1473 * If the node doesn't exist, or is being deleted, don't cache it!
1475 * The node can only ever be NULL in the I/O failure path.
1477 if (node == NULL || (node->flags & HAMMER_NODE_DELETED))
1479 if (cache->node == node)
1482 hammer_uncache_node(cache);
1483 if (node->flags & HAMMER_NODE_DELETED)
1486 TAILQ_INSERT_TAIL(&node->cache_list, cache, entry);
1490 hammer_uncache_node(hammer_node_cache_t cache)
1494 if ((node = cache->node) != NULL) {
1495 TAILQ_REMOVE(&node->cache_list, cache, entry);
1497 if (TAILQ_EMPTY(&node->cache_list))
1498 hammer_flush_node(node, 0);
1503 * Remove a node's cache references and destroy the node if it has no
1504 * other references or backing store.
1506 * locked == 0 Normal unlocked operation
1507 * locked == 1 Call hammer_rel_interlock_done(..., 0);
1508 * locked == 2 Call hammer_rel_interlock_done(..., 1);
1510 * XXX for now this isn't even close to being MPSAFE so the refs check
1514 hammer_flush_node(hammer_node_t node, int locked)
1516 hammer_node_cache_t cache;
1517 hammer_buffer_t buffer;
1518 hammer_mount_t hmp = node->hmp;
1521 while ((cache = TAILQ_FIRST(&node->cache_list)) != NULL) {
1522 TAILQ_REMOVE(&node->cache_list, cache, entry);
1527 * NOTE: refs is predisposed if another thread is blocking and
1528 * will be larger than 0 in that case. We aren't MPSAFE
1531 if (node->ondisk == NULL && hammer_norefs(&node->lock)) {
1532 KKASSERT((node->flags & HAMMER_NODE_NEEDSCRC) == 0);
1533 RB_REMOVE(hammer_nod_rb_tree, &node->hmp->rb_nods_root, node);
1534 if ((buffer = node->buffer) != NULL) {
1535 node->buffer = NULL;
1536 TAILQ_REMOVE(&buffer->clist, node, entry);
1537 /* buffer is unreferenced because ondisk is NULL */
1545 * Deal with the interlock if locked == 1 or locked == 2.
1548 hammer_rel_interlock_done(&node->lock, locked - 1);
1551 * Destroy if requested
1554 --hammer_count_nodes;
1555 kfree(node, hmp->m_misc);
1560 * Flush passively cached B-Tree nodes associated with this buffer.
1561 * This is only called when the buffer is about to be destroyed, so
1562 * none of the nodes should have any references. The buffer is locked.
1564 * We may be interlocked with the buffer.
1567 hammer_flush_buffer_nodes(hammer_buffer_t buffer)
1571 while ((node = TAILQ_FIRST(&buffer->clist)) != NULL) {
1572 KKASSERT(node->ondisk == NULL);
1573 KKASSERT((node->flags & HAMMER_NODE_NEEDSCRC) == 0);
1575 if (hammer_try_interlock_norefs(&node->lock)) {
1576 hammer_ref(&node->lock);
1577 node->flags |= HAMMER_NODE_FLUSH;
1578 _hammer_rel_node(node, 1);
1580 KKASSERT(node->buffer != NULL);
1581 buffer = node->buffer;
1582 node->buffer = NULL;
1583 TAILQ_REMOVE(&buffer->clist, node, entry);
1584 /* buffer is unreferenced because ondisk is NULL */
1590 /************************************************************************
1592 ************************************************************************/
1595 * Allocate a B-Tree node.
1598 hammer_alloc_btree(hammer_transaction_t trans, hammer_off_t hint, int *errorp)
1600 hammer_buffer_t buffer = NULL;
1601 hammer_node_t node = NULL;
1602 hammer_off_t node_offset;
1604 node_offset = hammer_blockmap_alloc(trans, HAMMER_ZONE_BTREE_INDEX,
1605 sizeof(struct hammer_node_ondisk),
1608 node = hammer_get_node(trans, node_offset, 1, errorp);
1609 hammer_modify_node_noundo(trans, node);
1610 bzero(node->ondisk, sizeof(*node->ondisk));
1611 hammer_modify_node_done(node);
1614 hammer_rel_buffer(buffer, 0);
1619 * Allocate data. If the address of a data buffer is supplied then
1620 * any prior non-NULL *data_bufferp will be released and *data_bufferp
1621 * will be set to the related buffer. The caller must release it when
1622 * finally done. The initial *data_bufferp should be set to NULL by
1625 * The caller is responsible for making hammer_modify*() calls on the
1629 hammer_alloc_data(hammer_transaction_t trans, int32_t data_len,
1630 u_int16_t rec_type, hammer_off_t *data_offsetp,
1631 struct hammer_buffer **data_bufferp,
1632 hammer_off_t hint, int *errorp)
1642 case HAMMER_RECTYPE_INODE:
1643 case HAMMER_RECTYPE_DIRENTRY:
1644 case HAMMER_RECTYPE_EXT:
1645 case HAMMER_RECTYPE_FIX:
1646 case HAMMER_RECTYPE_PFS:
1647 case HAMMER_RECTYPE_SNAPSHOT:
1648 case HAMMER_RECTYPE_CONFIG:
1649 zone = HAMMER_ZONE_META_INDEX;
1651 case HAMMER_RECTYPE_DATA:
1652 case HAMMER_RECTYPE_DB:
1653 if (data_len <= HAMMER_BUFSIZE / 2) {
1654 zone = HAMMER_ZONE_SMALL_DATA_INDEX;
1656 data_len = (data_len + HAMMER_BUFMASK) &
1658 zone = HAMMER_ZONE_LARGE_DATA_INDEX;
1662 panic("hammer_alloc_data: rec_type %04x unknown",
1664 zone = 0; /* NOT REACHED */
1667 *data_offsetp = hammer_blockmap_alloc(trans, zone, data_len,
1672 if (*errorp == 0 && data_bufferp) {
1674 data = hammer_bread_ext(trans->hmp, *data_offsetp,
1675 data_len, errorp, data_bufferp);
1686 * Sync dirty buffers to the media and clean-up any loose ends.
1688 * These functions do not start the flusher going, they simply
1689 * queue everything up to the flusher.
1691 static int hammer_sync_scan1(struct mount *mp, struct vnode *vp, void *data);
1692 static int hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data);
1695 hammer_queue_inodes_flusher(hammer_mount_t hmp, int waitfor)
1697 struct hammer_sync_info info;
1700 info.waitfor = waitfor;
1701 if (waitfor == MNT_WAIT) {
1702 vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_ONEPASS,
1703 hammer_sync_scan1, hammer_sync_scan2, &info);
1705 vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_ONEPASS|VMSC_NOWAIT,
1706 hammer_sync_scan1, hammer_sync_scan2, &info);
1712 * Filesystem sync. If doing a synchronous sync make a second pass on
1713 * the vnodes in case any were already flushing during the first pass,
1714 * and activate the flusher twice (the second time brings the UNDO FIFO's
1715 * start position up to the end position after the first call).
1718 hammer_sync_hmp(hammer_mount_t hmp, int waitfor)
1720 struct hammer_sync_info info;
1723 info.waitfor = MNT_NOWAIT;
1724 vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_NOWAIT,
1725 hammer_sync_scan1, hammer_sync_scan2, &info);
1726 if (info.error == 0 && waitfor == MNT_WAIT) {
1727 info.waitfor = waitfor;
1728 vmntvnodescan(hmp->mp, VMSC_GETVP,
1729 hammer_sync_scan1, hammer_sync_scan2, &info);
1731 if (waitfor == MNT_WAIT) {
1732 hammer_flusher_sync(hmp);
1733 hammer_flusher_sync(hmp);
1735 hammer_flusher_async(hmp, NULL);
1736 hammer_flusher_async(hmp, NULL);
1742 hammer_sync_scan1(struct mount *mp, struct vnode *vp, void *data)
1744 struct hammer_inode *ip;
1747 if (vp->v_type == VNON || ip == NULL ||
1748 ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1749 RB_EMPTY(&vp->v_rbdirty_tree))) {
1756 hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data)
1758 struct hammer_sync_info *info = data;
1759 struct hammer_inode *ip;
1763 if (vp->v_type == VNON || vp->v_type == VBAD ||
1764 ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1765 RB_EMPTY(&vp->v_rbdirty_tree))) {
1768 error = VOP_FSYNC(vp, MNT_NOWAIT, 0);
1770 info->error = error;