2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_ondisk.c,v 1.76 2008/08/29 20:19:08 dillon Exp $
37 * Manage HAMMER's on-disk structures. These routines are primarily
38 * responsible for interfacing with the kernel's I/O subsystem and for
39 * managing in-memory structures.
42 #include <sys/nlookup.h>
47 static void hammer_free_volume(hammer_volume_t volume);
48 static int hammer_load_volume(hammer_volume_t volume);
49 static int hammer_load_buffer(hammer_buffer_t buffer, int isnew);
50 static int hammer_load_node(hammer_transaction_t trans,
51 hammer_node_t node, int isnew);
52 static void _hammer_rel_node(hammer_node_t node, int locked);
55 hammer_vol_rb_compare(hammer_volume_t vol1, hammer_volume_t vol2)
57 if (vol1->vol_no < vol2->vol_no)
59 if (vol1->vol_no > vol2->vol_no)
65 * hammer_buffer structures are indexed via their zoneX_offset, not
69 hammer_buf_rb_compare(hammer_buffer_t buf1, hammer_buffer_t buf2)
71 if (buf1->zoneX_offset < buf2->zoneX_offset)
73 if (buf1->zoneX_offset > buf2->zoneX_offset)
79 hammer_nod_rb_compare(hammer_node_t node1, hammer_node_t node2)
81 if (node1->node_offset < node2->node_offset)
83 if (node1->node_offset > node2->node_offset)
88 RB_GENERATE2(hammer_vol_rb_tree, hammer_volume, rb_node,
89 hammer_vol_rb_compare, int32_t, vol_no);
90 RB_GENERATE2(hammer_buf_rb_tree, hammer_buffer, rb_node,
91 hammer_buf_rb_compare, hammer_off_t, zoneX_offset);
92 RB_GENERATE2(hammer_nod_rb_tree, hammer_node, rb_node,
93 hammer_nod_rb_compare, hammer_off_t, node_offset);
95 /************************************************************************
97 ************************************************************************
99 * Load a HAMMER volume by name. Returns 0 on success or a positive error
100 * code on failure. Volumes must be loaded at mount time or via hammer
101 * volume-add command, hammer_get_volume() will not load a new volume.
103 * The passed devvp is vref()'d but not locked. This function consumes the
104 * ref (typically by associating it with the volume structure).
106 * Calls made to hammer_load_volume() or single-threaded
109 hammer_install_volume(hammer_mount_t hmp, const char *volname,
110 struct vnode *devvp, void *data)
113 hammer_volume_t volume;
114 struct hammer_volume_ondisk *ondisk;
115 struct hammer_volume_ondisk *img;
116 struct nlookupdata nd;
117 struct buf *bp = NULL;
124 ronly = ((mp->mnt_flag & MNT_RDONLY) ? 1 : 0);
127 * Allocate a volume structure
129 ++hammer_count_volumes;
130 volume = kmalloc(sizeof(*volume), hmp->m_misc, M_WAITOK|M_ZERO);
131 volume->vol_name = kstrdup(volname, hmp->m_misc);
132 volume->io.hmp = hmp; /* bootstrap */
133 hammer_io_init(&volume->io, volume, HAMMER_STRUCTURE_VOLUME);
134 volume->io.offset = 0LL;
135 volume->io.bytes = HAMMER_BUFSIZE;
138 * Get the device vnode
141 error = nlookup_init(&nd, volume->vol_name, UIO_SYSSPACE, NLC_FOLLOW);
143 error = nlookup(&nd);
145 error = cache_vref(&nd.nl_nch, nd.nl_cred, &volume->devvp);
149 volume->devvp = devvp;
153 if (vn_isdisk(volume->devvp, &error)) {
154 error = vfs_mountedon(volume->devvp);
157 if (error == 0 && vcount(volume->devvp) > 0)
160 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
161 error = vinvalbuf(volume->devvp, V_SAVE, 0, 0);
163 error = VOP_OPEN(volume->devvp,
164 (ronly ? FREAD : FREAD|FWRITE),
167 vn_unlock(volume->devvp);
170 hammer_free_volume(volume);
173 volume->devvp->v_rdev->si_mountpoint = mp;
177 * Extract the volume number from the volume header and do various
180 error = bread(volume->devvp, 0LL, HAMMER_BUFSIZE, &bp);
183 ondisk = (void *)bp->b_data;
186 * Initialize the volume header with data if the data is specified.
188 if (ronly == 0 && data) {
189 img = (struct hammer_volume_ondisk *)data;
190 if (ondisk->vol_signature == HAMMER_FSBUF_VOLUME) {
191 hkprintf("Formatting of valid HAMMER volume "
192 "%s denied. Erase with dd!\n", volname);
196 bcopy(img, ondisk, sizeof(*img));
199 if (ondisk->vol_signature != HAMMER_FSBUF_VOLUME) {
200 hkprintf("volume %s has an invalid header\n",
202 for (i = 0; i < (int)sizeof(ondisk->vol_signature); i++) {
203 kprintf("%02x", ((char*)&ondisk->vol_signature)[i] & 0xFF);
204 if (i != (int)sizeof(ondisk->vol_signature) - 1)
211 volume->vol_no = ondisk->vol_no;
212 volume->vol_flags = ondisk->vol_flags;
213 volume->maxbuf_off = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no,
214 ondisk->vol_buf_end - ondisk->vol_buf_beg);
216 if (RB_EMPTY(&hmp->rb_vols_root)) {
217 hmp->fsid = ondisk->vol_fsid;
218 } else if (bcmp(&hmp->fsid, &ondisk->vol_fsid, sizeof(uuid_t))) {
219 hkprintf("volume %s's fsid does not match other volumes\n",
226 * Insert the volume structure into the red-black tree.
228 if (RB_INSERT(hammer_vol_rb_tree, &hmp->rb_vols_root, volume)) {
229 hkprintf("volume %s has a duplicate vol_no %d\n",
230 volume->vol_name, volume->vol_no);
235 HAMMER_VOLUME_NUMBER_ADD(hmp, volume);
238 * Set the root volume . HAMMER special cases rootvol the structure.
239 * We do not hold a ref because this would prevent related I/O
240 * from being flushed.
242 if (error == 0 && ondisk->vol_rootvol == ondisk->vol_no) {
243 hmp->rootvol = volume;
244 hmp->nvolumes = ondisk->vol_count;
249 hmp->mp->mnt_stat.f_blocks += ondisk->vol0_stat_bigblocks *
250 HAMMER_BUFFERS_PER_BIGBLOCK;
251 hmp->mp->mnt_vstat.f_blocks += ondisk->vol0_stat_bigblocks *
252 HAMMER_BUFFERS_PER_BIGBLOCK;
258 /*vinvalbuf(volume->devvp, V_SAVE, 0, 0);*/
260 volume->devvp->v_rdev->si_mountpoint = NULL;
261 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
262 VOP_CLOSE(volume->devvp, ronly ? FREAD : FREAD|FWRITE, NULL);
263 vn_unlock(volume->devvp);
264 hammer_free_volume(volume);
270 * This is called for each volume when updating the mount point from
271 * read-write to read-only or vise-versa.
274 hammer_adjust_volume_mode(hammer_volume_t volume, void *data __unused)
277 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
278 if (volume->io.hmp->ronly) {
279 /* do not call vinvalbuf */
280 VOP_OPEN(volume->devvp, FREAD, FSCRED, NULL);
281 VOP_CLOSE(volume->devvp, FREAD|FWRITE, NULL);
283 /* do not call vinvalbuf */
284 VOP_OPEN(volume->devvp, FREAD|FWRITE, FSCRED, NULL);
285 VOP_CLOSE(volume->devvp, FREAD, NULL);
287 vn_unlock(volume->devvp);
293 * Unload and free a HAMMER volume. Must return >= 0 to continue scan
294 * so returns -1 on failure.
297 hammer_unload_volume(hammer_volume_t volume, void *data)
299 hammer_mount_t hmp = volume->io.hmp;
300 struct buf *bp = NULL;
301 struct hammer_volume_ondisk *img;
302 int ronly = ((hmp->mp->mnt_flag & MNT_RDONLY) ? 1 : 0);
306 * Clear the volume header with data if the data is specified.
308 if (ronly == 0 && data && volume->devvp) {
309 img = (struct hammer_volume_ondisk *)data;
310 error = bread(volume->devvp, 0LL, HAMMER_BUFSIZE, &bp);
311 if (error || bp->b_bcount < sizeof(*img)) {
312 hmkprintf(hmp, "Failed to read volume header: %d\n", error);
315 bcopy(img, bp->b_data, sizeof(*img));
318 hmkprintf(hmp, "Failed to clear volume header: %d\n",
324 * Clean up the root volume pointer, which is held unlocked in hmp.
326 if (hmp->rootvol == volume)
330 * We must not flush a dirty buffer to disk on umount. It should
331 * have already been dealt with by the flusher, or we may be in
332 * catastrophic failure.
334 hammer_io_clear_modify(&volume->io, 1);
335 volume->io.waitdep = 1;
338 * Clean up the persistent ref ioerror might have on the volume
340 if (volume->io.ioerror)
341 hammer_io_clear_error_noassert(&volume->io);
344 * This should release the bp. Releasing the volume with flush set
345 * implies the interlock is set.
347 hammer_ref_interlock_true(&volume->io.lock);
348 hammer_rel_volume(volume, 1);
349 KKASSERT(volume->io.bp == NULL);
352 * There should be no references on the volume.
354 KKASSERT(hammer_norefs(&volume->io.lock));
356 volume->ondisk = NULL;
358 if (volume->devvp->v_rdev &&
359 volume->devvp->v_rdev->si_mountpoint == hmp->mp) {
360 volume->devvp->v_rdev->si_mountpoint = NULL;
364 * Make sure we don't sync anything to disk if we
365 * are in read-only mode (1) or critically-errored
366 * (2). Note that there may be dirty buffers in
367 * normal read-only mode from crash recovery.
369 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
370 vinvalbuf(volume->devvp, 0, 0, 0);
371 VOP_CLOSE(volume->devvp, FREAD, NULL);
372 vn_unlock(volume->devvp);
375 * Normal termination, save any dirty buffers
376 * (XXX there really shouldn't be any).
378 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
379 vinvalbuf(volume->devvp, V_SAVE, 0, 0);
380 VOP_CLOSE(volume->devvp, FREAD|FWRITE, NULL);
381 vn_unlock(volume->devvp);
386 * Destroy the structure
388 RB_REMOVE(hammer_vol_rb_tree, &hmp->rb_vols_root, volume);
389 HAMMER_VOLUME_NUMBER_DEL(hmp, volume);
390 hammer_free_volume(volume);
396 hammer_free_volume(hammer_volume_t volume)
398 hammer_mount_t hmp = volume->io.hmp;
400 if (volume->vol_name) {
401 kfree(volume->vol_name, hmp->m_misc);
402 volume->vol_name = NULL;
405 vrele(volume->devvp);
406 volume->devvp = NULL;
408 --hammer_count_volumes;
409 kfree(volume, hmp->m_misc);
413 * Get a HAMMER volume. The volume must already exist.
416 hammer_get_volume(hammer_mount_t hmp, int32_t vol_no, int *errorp)
418 struct hammer_volume *volume;
421 * Locate the volume structure
423 volume = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, vol_no);
424 if (volume == NULL) {
430 * Reference the volume, load/check the data on the 0->1 transition.
431 * hammer_load_volume() will dispose of the interlock on return,
432 * and also clean up the ref count on error.
434 if (hammer_ref_interlock(&volume->io.lock)) {
435 *errorp = hammer_load_volume(volume);
439 KKASSERT(volume->ondisk);
446 hammer_ref_volume(hammer_volume_t volume)
451 * Reference the volume and deal with the check condition used to
452 * load its ondisk info.
454 if (hammer_ref_interlock(&volume->io.lock)) {
455 error = hammer_load_volume(volume);
457 KKASSERT(volume->ondisk);
464 * May be called without fs_token
467 hammer_get_root_volume(hammer_mount_t hmp, int *errorp)
469 hammer_volume_t volume;
471 volume = hmp->rootvol;
472 KKASSERT(volume != NULL);
475 * Reference the volume and deal with the check condition used to
476 * load its ondisk info.
478 if (hammer_ref_interlock(&volume->io.lock)) {
479 lwkt_gettoken(&volume->io.hmp->fs_token);
480 *errorp = hammer_load_volume(volume);
481 lwkt_reltoken(&volume->io.hmp->fs_token);
485 KKASSERT(volume->ondisk);
492 * Load a volume's on-disk information. The volume must be referenced and
493 * the interlock is held on call. The interlock will be released on return.
494 * The reference will also be released on return if an error occurs.
497 hammer_load_volume(hammer_volume_t volume)
501 if (volume->ondisk == NULL) {
502 error = hammer_io_read(volume->devvp, &volume->io,
505 volume->ondisk = (void *)volume->io.bp->b_data;
506 hammer_ref_interlock_done(&volume->io.lock);
508 hammer_rel_volume(volume, 1);
517 * Release a previously acquired reference on the volume.
519 * Volumes are not unloaded from memory during normal operation.
521 * May be called without fs_token
524 hammer_rel_volume(hammer_volume_t volume, int locked)
528 if (hammer_rel_interlock(&volume->io.lock, locked)) {
529 lwkt_gettoken(&volume->io.hmp->fs_token);
530 volume->ondisk = NULL;
531 bp = hammer_io_release(&volume->io, locked);
532 lwkt_reltoken(&volume->io.hmp->fs_token);
533 hammer_rel_interlock_done(&volume->io.lock, locked);
540 hammer_mountcheck_volumes(hammer_mount_t hmp)
545 HAMMER_VOLUME_NUMBER_FOREACH(hmp, i) {
546 vol = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, i);
554 hammer_get_installed_volumes(hammer_mount_t hmp)
558 HAMMER_VOLUME_NUMBER_FOREACH(hmp, i)
563 /************************************************************************
565 ************************************************************************
567 * Manage buffers. Currently most blockmap-backed zones are direct-mapped
568 * to zone-2 buffer offsets, without a translation stage. However, the
569 * hammer_buffer structure is indexed by its zoneX_offset, not its
572 * The proper zone must be maintained throughout the code-base all the way
573 * through to the big-block allocator, or routines like hammer_del_buffers()
574 * will not be able to locate all potentially conflicting buffers.
578 * Helper function returns whether a zone offset can be directly translated
579 * to a raw buffer index or not. Really only the volume and undo zones
580 * can't be directly translated. Volumes are special-cased and undo zones
581 * shouldn't be aliased accessed in read-only mode.
583 * This function is ONLY used to detect aliased zones during a read-only
587 hammer_direct_zone(hammer_off_t buf_offset)
589 int zone = HAMMER_ZONE_DECODE(buf_offset);
591 return(hammer_is_direct_mapped_index(zone));
595 hammer_get_buffer(hammer_mount_t hmp, hammer_off_t buf_offset,
596 int bytes, int isnew, int *errorp)
598 hammer_buffer_t buffer;
599 hammer_volume_t volume;
600 hammer_off_t zone2_offset;
601 hammer_io_type_t iotype;
605 buf_offset &= ~HAMMER_BUFMASK64;
608 * Shortcut if the buffer is already cached
610 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root, buf_offset);
613 * Once refed the ondisk field will not be cleared by
614 * any other action. Shortcut the operation if the
615 * ondisk structure is valid.
618 if (hammer_ref_interlock(&buffer->io.lock) == 0) {
619 hammer_io_advance(&buffer->io);
620 KKASSERT(buffer->ondisk);
626 * 0->1 transition or defered 0->1 transition (CHECK),
627 * interlock now held. Shortcut if ondisk is already
630 atomic_add_int(&hammer_count_refedbufs, 1);
631 if (buffer->ondisk) {
632 hammer_io_advance(&buffer->io);
633 hammer_ref_interlock_done(&buffer->io.lock);
639 * The buffer is no longer loose if it has a ref, and
640 * cannot become loose once it gains a ref. Loose
641 * buffers will never be in a modified state. This should
642 * only occur on the 0->1 transition of refs.
644 * lose_root can be modified via a biodone() interrupt
645 * so the io_token must be held.
647 if (buffer->io.mod_root == &hmp->lose_root) {
648 lwkt_gettoken(&hmp->io_token);
649 if (buffer->io.mod_root == &hmp->lose_root) {
650 RB_REMOVE(hammer_mod_rb_tree,
651 buffer->io.mod_root, &buffer->io);
652 buffer->io.mod_root = NULL;
653 KKASSERT(buffer->io.modified == 0);
655 lwkt_reltoken(&hmp->io_token);
658 } else if (hmp->ronly && hammer_direct_zone(buf_offset)) {
660 * If this is a read-only mount there could be an alias
661 * in the raw-zone. If there is we use that buffer instead.
663 * rw mounts will not have aliases. Also note when going
664 * from ro -> rw the recovered raw buffers are flushed and
665 * reclaimed, so again there will not be any aliases once
668 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root,
669 hammer_xlate_to_zone2(buf_offset));
671 if (hammer_debug_general & 0x0001) {
672 hkrateprintf(&hmp->kdiag,
673 "recovered aliased %016jx\n",
674 (intmax_t)buf_offset);
681 * What is the buffer class?
683 zone = HAMMER_ZONE_DECODE(buf_offset);
686 case HAMMER_ZONE_LARGE_DATA_INDEX:
687 case HAMMER_ZONE_SMALL_DATA_INDEX:
688 iotype = HAMMER_STRUCTURE_DATA_BUFFER;
690 case HAMMER_ZONE_UNDO_INDEX:
691 iotype = HAMMER_STRUCTURE_UNDO_BUFFER;
693 case HAMMER_ZONE_META_INDEX:
696 * NOTE: inode data and directory entries are placed in this
697 * zone. inode atime/mtime is updated in-place and thus
698 * buffers containing inodes must be synchronized as
699 * meta-buffers, same as buffers containing B-Tree info.
701 iotype = HAMMER_STRUCTURE_META_BUFFER;
706 * Handle blockmap offset translations
708 if (hammer_is_zone2_mapped_index(zone)) {
709 zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, errorp);
710 } else if (zone == HAMMER_ZONE_UNDO_INDEX) {
711 zone2_offset = hammer_undo_lookup(hmp, buf_offset, errorp);
713 /* Must be zone-2 (not 1 or 4 or 15) */
714 KKASSERT(zone == HAMMER_ZONE_RAW_BUFFER_INDEX);
715 zone2_offset = buf_offset;
722 * NOTE: zone2_offset and maxbuf_off are both full zone-2 offset
725 KKASSERT((zone2_offset & HAMMER_OFF_ZONE_MASK) ==
726 HAMMER_ZONE_RAW_BUFFER);
727 vol_no = HAMMER_VOL_DECODE(zone2_offset);
728 volume = hammer_get_volume(hmp, vol_no, errorp);
732 KKASSERT(zone2_offset < volume->maxbuf_off);
735 * Allocate a new buffer structure. We will check for races later.
737 ++hammer_count_buffers;
738 buffer = kmalloc(sizeof(*buffer), hmp->m_misc,
739 M_WAITOK|M_ZERO|M_USE_RESERVE);
740 buffer->zone2_offset = zone2_offset;
741 buffer->zoneX_offset = buf_offset;
743 hammer_io_init(&buffer->io, volume, iotype);
744 buffer->io.offset = hammer_xlate_to_phys(volume->ondisk, zone2_offset);
745 buffer->io.bytes = bytes;
746 TAILQ_INIT(&buffer->node_list);
747 hammer_ref_interlock_true(&buffer->io.lock);
750 * Insert the buffer into the RB tree and handle late collisions.
752 if (RB_INSERT(hammer_buf_rb_tree, &hmp->rb_bufs_root, buffer)) {
753 hammer_rel_volume(volume, 0);
754 buffer->io.volume = NULL; /* safety */
755 if (hammer_rel_interlock(&buffer->io.lock, 1)) /* safety */
756 hammer_rel_interlock_done(&buffer->io.lock, 1);
757 --hammer_count_buffers;
758 kfree(buffer, hmp->m_misc);
761 atomic_add_int(&hammer_count_refedbufs, 1);
765 * The buffer is referenced and interlocked. Load the buffer
766 * if necessary. hammer_load_buffer() deals with the interlock
767 * and, if an error is returned, also deals with the ref.
769 if (buffer->ondisk == NULL) {
770 *errorp = hammer_load_buffer(buffer, isnew);
774 hammer_io_advance(&buffer->io);
775 hammer_ref_interlock_done(&buffer->io.lock);
782 * This is used by the direct-read code to deal with large-data buffers
783 * created by the reblocker and mirror-write code. The direct-read code
784 * bypasses the HAMMER buffer subsystem and so any aliased dirty or write-
785 * running hammer buffers must be fully synced to disk before we can issue
788 * This code path is not considered critical as only the rebocker and
789 * mirror-write code will create large-data buffers via the HAMMER buffer
790 * subsystem. They do that because they operate at the B-Tree level and
791 * do not access the vnode/inode structures.
794 hammer_sync_buffers(hammer_mount_t hmp, hammer_off_t base_offset, int bytes)
796 hammer_buffer_t buffer;
799 KKASSERT((base_offset & HAMMER_OFF_ZONE_MASK) ==
800 HAMMER_ZONE_LARGE_DATA);
803 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root,
805 if (buffer && (buffer->io.modified || buffer->io.running)) {
806 error = hammer_ref_buffer(buffer);
808 hammer_io_wait(&buffer->io);
809 if (buffer->io.modified) {
810 hammer_io_write_interlock(&buffer->io);
811 hammer_io_flush(&buffer->io, 0);
812 hammer_io_done_interlock(&buffer->io);
813 hammer_io_wait(&buffer->io);
815 hammer_rel_buffer(buffer, 0);
818 base_offset += HAMMER_BUFSIZE;
819 bytes -= HAMMER_BUFSIZE;
824 * Destroy all buffers covering the specified zoneX offset range. This
825 * is called when the related blockmap layer2 entry is freed or when
826 * a direct write bypasses our buffer/buffer-cache subsystem.
828 * The buffers may be referenced by the caller itself. Setting reclaim
829 * will cause the buffer to be destroyed when it's ref count reaches zero.
831 * Return 0 on success, EAGAIN if some buffers could not be destroyed due
832 * to additional references held by other threads, or some other (typically
836 hammer_del_buffers(hammer_mount_t hmp, hammer_off_t base_offset,
837 hammer_off_t zone2_offset, int bytes,
838 int report_conflicts)
840 hammer_buffer_t buffer;
841 hammer_volume_t volume;
846 vol_no = HAMMER_VOL_DECODE(zone2_offset);
847 volume = hammer_get_volume(hmp, vol_no, &ret_error);
848 KKASSERT(ret_error == 0);
851 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root,
854 error = hammer_ref_buffer(buffer);
855 if (hammer_debug_general & 0x20000) {
856 hkprintf("delbufr %016jx rerr=%d 1ref=%d\n",
857 (intmax_t)buffer->zoneX_offset,
859 hammer_oneref(&buffer->io.lock));
861 if (error == 0 && !hammer_oneref(&buffer->io.lock)) {
863 hammer_rel_buffer(buffer, 0);
866 KKASSERT(buffer->zone2_offset == zone2_offset);
867 hammer_io_clear_modify(&buffer->io, 1);
868 buffer->io.reclaim = 1;
869 buffer->io.waitdep = 1;
870 KKASSERT(buffer->io.volume == volume);
871 hammer_rel_buffer(buffer, 0);
874 error = hammer_io_inval(volume, zone2_offset);
878 if (report_conflicts ||
879 (hammer_debug_general & 0x8000)) {
880 krateprintf(&hmp->kdiag,
881 "hammer_del_buffers: unable to "
882 "invalidate %016llx buffer=%p "
883 "rep=%d lkrefs=%08x\n",
884 (long long)base_offset,
885 buffer, report_conflicts,
886 (buffer ? buffer->io.lock.refs : -1));
889 base_offset += HAMMER_BUFSIZE;
890 zone2_offset += HAMMER_BUFSIZE;
891 bytes -= HAMMER_BUFSIZE;
893 hammer_rel_volume(volume, 0);
898 * Given a referenced and interlocked buffer load/validate the data.
900 * The buffer interlock will be released on return. If an error is
901 * returned the buffer reference will also be released (and the buffer
902 * pointer will thus be stale).
905 hammer_load_buffer(hammer_buffer_t buffer, int isnew)
907 hammer_volume_t volume;
911 * Load the buffer's on-disk info
913 volume = buffer->io.volume;
915 if (hammer_debug_io & 0x0004) {
916 hdkprintf("load_buffer %016llx %016llx isnew=%d od=%p\n",
917 (long long)buffer->zoneX_offset,
918 (long long)buffer->zone2_offset,
919 isnew, buffer->ondisk);
922 if (buffer->ondisk == NULL) {
924 * Issue the read or generate a new buffer. When reading
925 * the limit argument controls any read-ahead clustering
926 * hammer_io_read() is allowed to do.
928 * We cannot read-ahead in the large-data zone and we cannot
929 * cross a big-block boundary as the next big-block might
930 * use a different buffer size.
933 error = hammer_io_new(volume->devvp, &buffer->io);
934 } else if ((buffer->zoneX_offset & HAMMER_OFF_ZONE_MASK) ==
935 HAMMER_ZONE_LARGE_DATA) {
936 error = hammer_io_read(volume->devvp, &buffer->io,
941 limit = (buffer->zone2_offset +
942 HAMMER_BIGBLOCK_MASK64) &
943 ~HAMMER_BIGBLOCK_MASK64;
944 limit -= buffer->zone2_offset;
945 error = hammer_io_read(volume->devvp, &buffer->io,
949 buffer->ondisk = (void *)buffer->io.bp->b_data;
951 error = hammer_io_new(volume->devvp, &buffer->io);
956 hammer_io_advance(&buffer->io);
957 hammer_ref_interlock_done(&buffer->io.lock);
959 hammer_rel_buffer(buffer, 1);
965 * NOTE: Called from RB_SCAN, must return >= 0 for scan to continue.
966 * This routine is only called during unmount or when a volume is
969 * If data != NULL, it specifies a volume whoose buffers should
973 hammer_unload_buffer(hammer_buffer_t buffer, void *data)
975 struct hammer_volume *volume = (struct hammer_volume *) data;
978 * If volume != NULL we are only interested in unloading buffers
979 * associated with a particular volume.
981 if (volume != NULL && volume != buffer->io.volume)
985 * Clean up the persistent ref ioerror might have on the buffer
986 * and acquire a ref. Expect a 0->1 transition.
988 if (buffer->io.ioerror) {
989 hammer_io_clear_error_noassert(&buffer->io);
990 atomic_add_int(&hammer_count_refedbufs, -1);
992 hammer_ref_interlock_true(&buffer->io.lock);
993 atomic_add_int(&hammer_count_refedbufs, 1);
996 * We must not flush a dirty buffer to disk on umount. It should
997 * have already been dealt with by the flusher, or we may be in
998 * catastrophic failure.
1000 * We must set waitdep to ensure that a running buffer is waited
1001 * on and released prior to us trying to unload the volume.
1003 hammer_io_clear_modify(&buffer->io, 1);
1004 hammer_flush_buffer_nodes(buffer);
1005 buffer->io.waitdep = 1;
1006 hammer_rel_buffer(buffer, 1);
1011 * Reference a buffer that is either already referenced or via a specially
1012 * handled pointer (aka cursor->buffer).
1015 hammer_ref_buffer(hammer_buffer_t buffer)
1022 * Acquire a ref, plus the buffer will be interlocked on the
1025 locked = hammer_ref_interlock(&buffer->io.lock);
1026 hmp = buffer->io.hmp;
1029 * At this point a biodone() will not touch the buffer other then
1030 * incidental bits. However, lose_root can be modified via
1031 * a biodone() interrupt.
1033 * No longer loose. lose_root requires the io_token.
1035 if (buffer->io.mod_root == &hmp->lose_root) {
1036 lwkt_gettoken(&hmp->io_token);
1037 if (buffer->io.mod_root == &hmp->lose_root) {
1038 RB_REMOVE(hammer_mod_rb_tree,
1039 buffer->io.mod_root, &buffer->io);
1040 buffer->io.mod_root = NULL;
1042 lwkt_reltoken(&hmp->io_token);
1046 atomic_add_int(&hammer_count_refedbufs, 1);
1047 error = hammer_load_buffer(buffer, 0);
1048 /* NOTE: on error the buffer pointer is stale */
1056 * Release a reference on the buffer. On the 1->0 transition the
1057 * underlying IO will be released but the data reference is left
1060 * Only destroy the structure itself if the related buffer cache buffer
1061 * was disassociated from it. This ties the management of the structure
1062 * to the buffer cache subsystem. buffer->ondisk determines whether the
1063 * embedded io is referenced or not.
1066 hammer_rel_buffer(hammer_buffer_t buffer, int locked)
1068 hammer_volume_t volume;
1070 struct buf *bp = NULL;
1073 hmp = buffer->io.hmp;
1075 if (hammer_rel_interlock(&buffer->io.lock, locked) == 0)
1079 * hammer_count_refedbufs accounting. Decrement if we are in
1080 * the error path or if CHECK is clear.
1082 * If we are not in the error path and CHECK is set the caller
1083 * probably just did a hammer_ref() and didn't account for it,
1084 * so we don't account for the loss here.
1086 if (locked || (buffer->io.lock.refs & HAMMER_REFS_CHECK) == 0)
1087 atomic_add_int(&hammer_count_refedbufs, -1);
1090 * If the caller locked us or the normal released transitions
1091 * from 1->0 (and acquired the lock) attempt to release the
1092 * io. If the called locked us we tell hammer_io_release()
1093 * to flush (which would be the unload or failure path).
1095 bp = hammer_io_release(&buffer->io, locked);
1098 * If the buffer has no bp association and no refs we can destroy
1101 * NOTE: It is impossible for any associated B-Tree nodes to have
1102 * refs if the buffer has no additional refs.
1104 if (buffer->io.bp == NULL && hammer_norefs(&buffer->io.lock)) {
1105 RB_REMOVE(hammer_buf_rb_tree,
1106 &buffer->io.hmp->rb_bufs_root,
1108 volume = buffer->io.volume;
1109 buffer->io.volume = NULL; /* sanity */
1110 hammer_rel_volume(volume, 0);
1111 hammer_io_clear_modlist(&buffer->io);
1112 hammer_flush_buffer_nodes(buffer);
1113 KKASSERT(TAILQ_EMPTY(&buffer->node_list));
1120 hammer_rel_interlock_done(&buffer->io.lock, locked);
1124 --hammer_count_buffers;
1125 kfree(buffer, hmp->m_misc);
1130 * Access the filesystem buffer containing the specified hammer offset.
1131 * buf_offset is a conglomeration of the volume number and vol_buf_beg
1132 * relative buffer offset. It must also have bit 55 set to be valid.
1133 * (see hammer_off_t in hammer_disk.h).
1135 * Any prior buffer in *bufferp will be released and replaced by the
1138 * NOTE: The buffer is indexed via its zoneX_offset but we allow the
1139 * passed cached *bufferp to match against either zoneX or zone2.
1143 _hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
1144 int isnew, int *errorp, struct hammer_buffer **bufferp)
1146 hammer_buffer_t buffer;
1147 int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK;
1149 buf_offset &= ~HAMMER_BUFMASK64;
1150 KKASSERT(HAMMER_ZONE(buf_offset) != 0);
1153 if (buffer == NULL || (buffer->zone2_offset != buf_offset &&
1154 buffer->zoneX_offset != buf_offset)) {
1156 hammer_rel_buffer(buffer, 0);
1157 buffer = hammer_get_buffer(hmp, buf_offset, bytes, isnew, errorp);
1164 * Return a pointer to the buffer data.
1169 return((char *)buffer->ondisk + xoff);
1173 hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset,
1174 int *errorp, struct hammer_buffer **bufferp)
1176 return(_hammer_bread(hmp, buf_offset, HAMMER_BUFSIZE, 0, errorp, bufferp));
1180 hammer_bread_ext(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
1181 int *errorp, struct hammer_buffer **bufferp)
1183 bytes = (bytes + HAMMER_BUFMASK) & ~HAMMER_BUFMASK;
1184 return(_hammer_bread(hmp, buf_offset, bytes, 0, errorp, bufferp));
1188 * Access the filesystem buffer containing the specified hammer offset.
1189 * No disk read operation occurs. The result buffer may contain garbage.
1191 * Any prior buffer in *bufferp will be released and replaced by the
1194 * This function marks the buffer dirty but does not increment its
1195 * modify_refs count.
1198 hammer_bnew(hammer_mount_t hmp, hammer_off_t buf_offset,
1199 int *errorp, struct hammer_buffer **bufferp)
1201 return(_hammer_bread(hmp, buf_offset, HAMMER_BUFSIZE, 1, errorp, bufferp));
1205 hammer_bnew_ext(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
1206 int *errorp, struct hammer_buffer **bufferp)
1208 bytes = (bytes + HAMMER_BUFMASK) & ~HAMMER_BUFMASK;
1209 return(_hammer_bread(hmp, buf_offset, bytes, 1, errorp, bufferp));
1212 /************************************************************************
1214 ************************************************************************
1216 * Manage B-Tree nodes. B-Tree nodes represent the primary indexing
1217 * method used by the HAMMER filesystem.
1219 * Unlike other HAMMER structures, a hammer_node can be PASSIVELY
1220 * associated with its buffer, and will only referenced the buffer while
1221 * the node itself is referenced.
1223 * A hammer_node can also be passively associated with other HAMMER
1224 * structures, such as inodes, while retaining 0 references. These
1225 * associations can be cleared backwards using a pointer-to-pointer in
1228 * This allows the HAMMER implementation to cache hammer_nodes long-term
1229 * and short-cut a great deal of the infrastructure's complexity. In
1230 * most cases a cached node can be reacquired without having to dip into
1234 hammer_get_node(hammer_transaction_t trans, hammer_off_t node_offset,
1235 int isnew, int *errorp)
1237 hammer_mount_t hmp = trans->hmp;
1241 KKASSERT((node_offset & HAMMER_OFF_ZONE_MASK) == HAMMER_ZONE_BTREE);
1244 * Locate the structure, allocating one if necessary.
1247 node = RB_LOOKUP(hammer_nod_rb_tree, &hmp->rb_nods_root, node_offset);
1249 ++hammer_count_nodes;
1250 node = kmalloc(sizeof(*node), hmp->m_misc, M_WAITOK|M_ZERO|M_USE_RESERVE);
1251 node->node_offset = node_offset;
1253 TAILQ_INIT(&node->cursor_list);
1254 TAILQ_INIT(&node->cache_list);
1255 if (RB_INSERT(hammer_nod_rb_tree, &hmp->rb_nods_root, node)) {
1256 --hammer_count_nodes;
1257 kfree(node, hmp->m_misc);
1260 doload = hammer_ref_interlock_true(&node->lock);
1262 doload = hammer_ref_interlock(&node->lock);
1265 *errorp = hammer_load_node(trans, node, isnew);
1269 KKASSERT(node->ondisk);
1271 hammer_io_advance(&node->buffer->io);
1277 * Reference an already-referenced node. 0->1 transitions should assert
1278 * so we do not have to deal with hammer_ref() setting CHECK.
1281 hammer_ref_node(hammer_node_t node)
1283 KKASSERT(hammer_isactive(&node->lock) && node->ondisk != NULL);
1284 hammer_ref(&node->lock);
1288 * Load a node's on-disk data reference. Called with the node referenced
1291 * On return the node interlock will be unlocked. If a non-zero error code
1292 * is returned the node will also be dereferenced (and the caller's pointer
1296 hammer_load_node(hammer_transaction_t trans, hammer_node_t node, int isnew)
1298 hammer_buffer_t buffer;
1299 hammer_off_t buf_offset;
1303 if (node->ondisk == NULL) {
1305 * This is a little confusing but the jist is that
1306 * node->buffer determines whether the node is on
1307 * the buffer's node_list and node->ondisk determines
1308 * whether the buffer is referenced.
1310 * We could be racing a buffer release, in which case
1311 * node->buffer may become NULL while we are blocked
1312 * referencing the buffer.
1314 if ((buffer = node->buffer) != NULL) {
1315 error = hammer_ref_buffer(buffer);
1316 if (error == 0 && node->buffer == NULL) {
1317 TAILQ_INSERT_TAIL(&buffer->node_list, node, entry);
1318 node->buffer = buffer;
1321 buf_offset = node->node_offset & ~HAMMER_BUFMASK64;
1322 buffer = hammer_get_buffer(node->hmp, buf_offset,
1323 HAMMER_BUFSIZE, 0, &error);
1325 KKASSERT(error == 0);
1326 TAILQ_INSERT_TAIL(&buffer->node_list, node, entry);
1327 node->buffer = buffer;
1332 node->ondisk = (void *)((char *)buffer->ondisk +
1333 (node->node_offset & HAMMER_BUFMASK));
1336 * Check CRC. NOTE: Neither flag is set and the CRC is not
1337 * generated on new B-Tree nodes.
1340 (node->flags & HAMMER_NODE_CRCANY) == 0) {
1341 if (hammer_crc_test_btree(node->ondisk) == 0) {
1342 hdkprintf("CRC B-TREE NODE @ %016llx/%lu FAILED\n",
1343 (long long)node->node_offset,
1344 sizeof(*node->ondisk));
1345 if (hammer_debug_critical)
1346 Debugger("CRC FAILED: B-TREE NODE");
1347 node->flags |= HAMMER_NODE_CRCBAD;
1349 node->flags |= HAMMER_NODE_CRCGOOD;
1353 if (node->flags & HAMMER_NODE_CRCBAD) {
1354 if (trans->flags & HAMMER_TRANSF_CRCDOM)
1361 _hammer_rel_node(node, 1);
1363 hammer_ref_interlock_done(&node->lock);
1369 * Safely reference a node, interlock against flushes via the IO subsystem.
1372 hammer_ref_node_safe(hammer_transaction_t trans, hammer_node_cache_t cache,
1380 doload = hammer_ref_interlock(&node->lock);
1382 *errorp = hammer_load_node(trans, node, 0);
1386 KKASSERT(node->ondisk);
1387 if (node->flags & HAMMER_NODE_CRCBAD) {
1388 if (trans->flags & HAMMER_TRANSF_CRCDOM)
1392 _hammer_rel_node(node, 0);
1405 * Release a hammer_node. On the last release the node dereferences
1406 * its underlying buffer and may or may not be destroyed.
1408 * If locked is non-zero the passed node has been interlocked by the
1409 * caller and we are in the failure/unload path, otherwise it has not and
1410 * we are doing a normal release.
1412 * This function will dispose of the interlock and the reference.
1413 * On return the node pointer is stale.
1416 _hammer_rel_node(hammer_node_t node, int locked)
1418 hammer_buffer_t buffer;
1421 * Deref the node. If this isn't the 1->0 transition we're basically
1422 * done. If locked is non-zero this function will just deref the
1423 * locked node and return 1, otherwise it will deref the locked
1424 * node and either lock and return 1 on the 1->0 transition or
1425 * not lock and return 0.
1427 if (hammer_rel_interlock(&node->lock, locked) == 0)
1431 * Either locked was non-zero and we are interlocked, or the
1432 * hammer_rel_interlock() call returned non-zero and we are
1435 * The ref-count must still be decremented if locked != 0 so
1436 * the cleanup required still varies a bit.
1438 * hammer_flush_node() when called with 1 or 2 will dispose of
1439 * the lock and possible ref-count.
1441 if (node->ondisk == NULL) {
1442 hammer_flush_node(node, locked + 1);
1443 /* node is stale now */
1448 * Do not disassociate the node from the buffer if it represents
1449 * a modified B-Tree node that still needs its crc to be generated.
1451 if (node->flags & HAMMER_NODE_NEEDSCRC) {
1452 hammer_rel_interlock_done(&node->lock, locked);
1457 * Do final cleanups and then either destroy the node and leave it
1458 * passively cached. The buffer reference is removed regardless.
1460 buffer = node->buffer;
1461 node->ondisk = NULL;
1463 if ((node->flags & HAMMER_NODE_FLUSH) == 0) {
1467 hammer_rel_interlock_done(&node->lock, locked);
1472 hammer_flush_node(node, locked + 1);
1476 hammer_rel_buffer(buffer, 0);
1480 hammer_rel_node(hammer_node_t node)
1482 _hammer_rel_node(node, 0);
1486 * Free space on-media associated with a B-Tree node.
1489 hammer_delete_node(hammer_transaction_t trans, hammer_node_t node)
1491 KKASSERT((node->flags & HAMMER_NODE_DELETED) == 0);
1492 node->flags |= HAMMER_NODE_DELETED;
1493 hammer_blockmap_free(trans, node->node_offset, sizeof(*node->ondisk));
1497 * Passively cache a referenced hammer_node. The caller may release
1498 * the node on return.
1501 hammer_cache_node(hammer_node_cache_t cache, hammer_node_t node)
1504 * If the node doesn't exist, or is being deleted, don't cache it!
1506 * The node can only ever be NULL in the I/O failure path.
1508 if (node == NULL || (node->flags & HAMMER_NODE_DELETED))
1510 if (cache->node == node)
1513 hammer_uncache_node(cache);
1514 if (node->flags & HAMMER_NODE_DELETED)
1517 TAILQ_INSERT_TAIL(&node->cache_list, cache, entry);
1521 hammer_uncache_node(hammer_node_cache_t cache)
1525 if ((node = cache->node) != NULL) {
1526 TAILQ_REMOVE(&node->cache_list, cache, entry);
1528 if (TAILQ_EMPTY(&node->cache_list))
1529 hammer_flush_node(node, 0);
1534 * Remove a node's cache references and destroy the node if it has no
1535 * other references or backing store.
1537 * locked == 0 Normal unlocked operation
1538 * locked == 1 Call hammer_rel_interlock_done(..., 0);
1539 * locked == 2 Call hammer_rel_interlock_done(..., 1);
1541 * XXX for now this isn't even close to being MPSAFE so the refs check
1545 hammer_flush_node(hammer_node_t node, int locked)
1547 hammer_node_cache_t cache;
1548 hammer_buffer_t buffer;
1549 hammer_mount_t hmp = node->hmp;
1552 while ((cache = TAILQ_FIRST(&node->cache_list)) != NULL) {
1553 TAILQ_REMOVE(&node->cache_list, cache, entry);
1558 * NOTE: refs is predisposed if another thread is blocking and
1559 * will be larger than 0 in that case. We aren't MPSAFE
1562 if (node->ondisk == NULL && hammer_norefs(&node->lock)) {
1563 KKASSERT((node->flags & HAMMER_NODE_NEEDSCRC) == 0);
1564 RB_REMOVE(hammer_nod_rb_tree, &node->hmp->rb_nods_root, node);
1565 if ((buffer = node->buffer) != NULL) {
1566 node->buffer = NULL;
1567 TAILQ_REMOVE(&buffer->node_list, node, entry);
1568 /* buffer is unreferenced because ondisk is NULL */
1576 * Deal with the interlock if locked == 1 or locked == 2.
1579 hammer_rel_interlock_done(&node->lock, locked - 1);
1582 * Destroy if requested
1585 --hammer_count_nodes;
1586 kfree(node, hmp->m_misc);
1591 * Flush passively cached B-Tree nodes associated with this buffer.
1592 * This is only called when the buffer is about to be destroyed, so
1593 * none of the nodes should have any references. The buffer is locked.
1595 * We may be interlocked with the buffer.
1598 hammer_flush_buffer_nodes(hammer_buffer_t buffer)
1602 while ((node = TAILQ_FIRST(&buffer->node_list)) != NULL) {
1603 KKASSERT(node->ondisk == NULL);
1604 KKASSERT((node->flags & HAMMER_NODE_NEEDSCRC) == 0);
1606 if (hammer_try_interlock_norefs(&node->lock)) {
1607 hammer_ref(&node->lock);
1608 node->flags |= HAMMER_NODE_FLUSH;
1609 _hammer_rel_node(node, 1);
1611 KKASSERT(node->buffer != NULL);
1612 buffer = node->buffer;
1613 node->buffer = NULL;
1614 TAILQ_REMOVE(&buffer->node_list, node, entry);
1615 /* buffer is unreferenced because ondisk is NULL */
1621 /************************************************************************
1623 ************************************************************************/
1626 * Allocate a B-Tree node.
1629 hammer_alloc_btree(hammer_transaction_t trans, hammer_off_t hint, int *errorp)
1631 hammer_buffer_t buffer = NULL;
1632 hammer_node_t node = NULL;
1633 hammer_off_t node_offset;
1635 node_offset = hammer_blockmap_alloc(trans, HAMMER_ZONE_BTREE_INDEX,
1636 sizeof(struct hammer_node_ondisk),
1639 node = hammer_get_node(trans, node_offset, 1, errorp);
1640 hammer_modify_node_noundo(trans, node);
1641 bzero(node->ondisk, sizeof(*node->ondisk));
1642 hammer_modify_node_done(node);
1645 hammer_rel_buffer(buffer, 0);
1650 * Allocate data. If the address of a data buffer is supplied then
1651 * any prior non-NULL *data_bufferp will be released and *data_bufferp
1652 * will be set to the related buffer. The caller must release it when
1653 * finally done. The initial *data_bufferp should be set to NULL by
1656 * The caller is responsible for making hammer_modify*() calls on the
1660 hammer_alloc_data(hammer_transaction_t trans, int32_t data_len,
1661 uint16_t rec_type, hammer_off_t *data_offsetp,
1662 struct hammer_buffer **data_bufferp,
1663 hammer_off_t hint, int *errorp)
1669 * Allocate data directly from blockmap.
1673 case HAMMER_RECTYPE_INODE:
1674 case HAMMER_RECTYPE_DIRENTRY:
1675 case HAMMER_RECTYPE_EXT:
1676 case HAMMER_RECTYPE_FIX:
1677 case HAMMER_RECTYPE_PFS:
1678 case HAMMER_RECTYPE_SNAPSHOT:
1679 case HAMMER_RECTYPE_CONFIG:
1680 zone = HAMMER_ZONE_META_INDEX;
1682 case HAMMER_RECTYPE_DATA:
1683 case HAMMER_RECTYPE_DB:
1685 * Only mirror-write comes here.
1686 * Regular allocation path uses blockmap reservation.
1688 zone = hammer_data_zone_index(data_len);
1689 if (zone == HAMMER_ZONE_LARGE_DATA_INDEX) {
1691 data_len = (data_len + HAMMER_BUFMASK) &
1696 hpanic("rec_type %04x unknown", rec_type);
1697 zone = HAMMER_ZONE_UNAVAIL_INDEX; /* NOT REACHED */
1700 *data_offsetp = hammer_blockmap_alloc(trans, zone, data_len,
1707 if (*errorp == 0 && data_bufferp && data_len)
1708 data = hammer_bread_ext(trans->hmp, *data_offsetp, data_len,
1709 errorp, data_bufferp);
1714 * Sync dirty buffers to the media and clean-up any loose ends.
1716 * These functions do not start the flusher going, they simply
1717 * queue everything up to the flusher.
1719 static int hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data);
1721 struct hammer_sync_info {
1726 hammer_queue_inodes_flusher(hammer_mount_t hmp, int waitfor)
1728 struct hammer_sync_info info;
1731 if (waitfor == MNT_WAIT) {
1732 vsyncscan(hmp->mp, VMSC_GETVP | VMSC_ONEPASS,
1733 hammer_sync_scan2, &info);
1735 vsyncscan(hmp->mp, VMSC_GETVP | VMSC_ONEPASS | VMSC_NOWAIT,
1736 hammer_sync_scan2, &info);
1742 * Filesystem sync. If doing a synchronous sync make a second pass on
1743 * the vnodes in case any were already flushing during the first pass,
1744 * and activate the flusher twice (the second time brings the UNDO FIFO's
1745 * start position up to the end position after the first call).
1747 * If doing a lazy sync make just one pass on the vnode list, ignoring
1748 * any new vnodes added to the list while the sync is in progress.
1751 hammer_sync_hmp(hammer_mount_t hmp, int waitfor)
1753 struct hammer_sync_info info;
1757 if (waitfor & MNT_LAZY)
1758 flags |= VMSC_ONEPASS;
1761 vsyncscan(hmp->mp, flags | VMSC_NOWAIT, hammer_sync_scan2, &info);
1763 if (info.error == 0 && (waitfor & MNT_WAIT)) {
1764 vsyncscan(hmp->mp, flags, hammer_sync_scan2, &info);
1766 if (waitfor == MNT_WAIT) {
1767 hammer_flusher_sync(hmp);
1768 hammer_flusher_sync(hmp);
1770 hammer_flusher_async(hmp, NULL);
1771 hammer_flusher_async(hmp, NULL);
1777 hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data)
1779 struct hammer_sync_info *info = data;
1780 struct hammer_inode *ip;
1786 if (vp->v_type == VNON || vp->v_type == VBAD) {
1790 if ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1791 RB_EMPTY(&vp->v_rbdirty_tree)) {
1795 error = VOP_FSYNC(vp, MNT_NOWAIT, 0);
1797 info->error = error;