2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_ondisk.c,v 1.76 2008/08/29 20:19:08 dillon Exp $
37 * Manage HAMMER's on-disk structures. These routines are primarily
38 * responsible for interfacing with the kernel's I/O subsystem and for
39 * managing in-memory structures.
43 #include <sys/fcntl.h>
44 #include <sys/nlookup.h>
48 static void hammer_free_volume(hammer_volume_t volume);
49 static int hammer_load_volume(hammer_volume_t volume);
50 static int hammer_load_buffer(hammer_buffer_t buffer, int isnew);
51 static int hammer_load_node(hammer_transaction_t trans,
52 hammer_node_t node, int isnew);
55 hammer_vol_rb_compare(hammer_volume_t vol1, hammer_volume_t vol2)
57 if (vol1->vol_no < vol2->vol_no)
59 if (vol1->vol_no > vol2->vol_no)
65 * hammer_buffer structures are indexed via their zoneX_offset, not
69 hammer_buf_rb_compare(hammer_buffer_t buf1, hammer_buffer_t buf2)
71 if (buf1->zoneX_offset < buf2->zoneX_offset)
73 if (buf1->zoneX_offset > buf2->zoneX_offset)
79 hammer_nod_rb_compare(hammer_node_t node1, hammer_node_t node2)
81 if (node1->node_offset < node2->node_offset)
83 if (node1->node_offset > node2->node_offset)
88 RB_GENERATE2(hammer_vol_rb_tree, hammer_volume, rb_node,
89 hammer_vol_rb_compare, int32_t, vol_no);
90 RB_GENERATE2(hammer_buf_rb_tree, hammer_buffer, rb_node,
91 hammer_buf_rb_compare, hammer_off_t, zoneX_offset);
92 RB_GENERATE2(hammer_nod_rb_tree, hammer_node, rb_node,
93 hammer_nod_rb_compare, hammer_off_t, node_offset);
95 /************************************************************************
97 ************************************************************************
99 * Load a HAMMER volume by name. Returns 0 on success or a positive error
100 * code on failure. Volumes must be loaded at mount time, get_volume() will
101 * not load a new volume.
103 * Calls made to hammer_load_volume() or single-threaded
106 hammer_install_volume(struct hammer_mount *hmp, const char *volname,
110 hammer_volume_t volume;
111 struct hammer_volume_ondisk *ondisk;
112 struct nlookupdata nd;
113 struct buf *bp = NULL;
119 ronly = ((mp->mnt_flag & MNT_RDONLY) ? 1 : 0);
122 * Allocate a volume structure
124 ++hammer_count_volumes;
125 volume = kmalloc(sizeof(*volume), hmp->m_misc, M_WAITOK|M_ZERO);
126 volume->vol_name = kstrdup(volname, hmp->m_misc);
127 volume->io.hmp = hmp; /* bootstrap */
128 hammer_io_init(&volume->io, volume, HAMMER_STRUCTURE_VOLUME);
129 volume->io.offset = 0LL;
130 volume->io.bytes = HAMMER_BUFSIZE;
133 * Get the device vnode
136 error = nlookup_init(&nd, volume->vol_name, UIO_SYSSPACE, NLC_FOLLOW);
138 error = nlookup(&nd);
140 error = cache_vref(&nd.nl_nch, nd.nl_cred, &volume->devvp);
144 volume->devvp = devvp;
148 if (vn_isdisk(volume->devvp, &error)) {
149 error = vfs_mountedon(volume->devvp);
152 if (error == 0 && vcount(volume->devvp) > 0)
155 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
156 error = vinvalbuf(volume->devvp, V_SAVE, 0, 0);
158 error = VOP_OPEN(volume->devvp,
159 (ronly ? FREAD : FREAD|FWRITE),
162 vn_unlock(volume->devvp);
165 hammer_free_volume(volume);
168 volume->devvp->v_rdev->si_mountpoint = mp;
172 * Extract the volume number from the volume header and do various
175 error = bread(volume->devvp, 0LL, HAMMER_BUFSIZE, &bp);
178 ondisk = (void *)bp->b_data;
179 if (ondisk->vol_signature != HAMMER_FSBUF_VOLUME) {
180 kprintf("hammer_mount: volume %s has an invalid header\n",
185 volume->vol_no = ondisk->vol_no;
186 volume->buffer_base = ondisk->vol_buf_beg;
187 volume->vol_flags = ondisk->vol_flags;
188 volume->nblocks = ondisk->vol_nblocks;
189 volume->maxbuf_off = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no,
190 ondisk->vol_buf_end - ondisk->vol_buf_beg);
191 volume->maxraw_off = ondisk->vol_buf_end;
193 if (RB_EMPTY(&hmp->rb_vols_root)) {
194 hmp->fsid = ondisk->vol_fsid;
195 } else if (bcmp(&hmp->fsid, &ondisk->vol_fsid, sizeof(uuid_t))) {
196 kprintf("hammer_mount: volume %s's fsid does not match "
197 "other volumes\n", volume->vol_name);
203 * Insert the volume structure into the red-black tree.
205 if (RB_INSERT(hammer_vol_rb_tree, &hmp->rb_vols_root, volume)) {
206 kprintf("hammer_mount: volume %s has a duplicate vol_no %d\n",
207 volume->vol_name, volume->vol_no);
212 * Set the root volume . HAMMER special cases rootvol the structure.
213 * We do not hold a ref because this would prevent related I/O
214 * from being flushed.
216 if (error == 0 && ondisk->vol_rootvol == ondisk->vol_no) {
217 hmp->rootvol = volume;
218 hmp->nvolumes = ondisk->vol_count;
223 hmp->mp->mnt_stat.f_blocks += ondisk->vol0_stat_bigblocks *
224 (HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE);
225 hmp->mp->mnt_vstat.f_blocks += ondisk->vol0_stat_bigblocks *
226 (HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE);
232 /*vinvalbuf(volume->devvp, V_SAVE, 0, 0);*/
234 volume->devvp->v_rdev->si_mountpoint = NULL;
235 VOP_CLOSE(volume->devvp, ronly ? FREAD : FREAD|FWRITE);
236 hammer_free_volume(volume);
242 * This is called for each volume when updating the mount point from
243 * read-write to read-only or vise-versa.
246 hammer_adjust_volume_mode(hammer_volume_t volume, void *data __unused)
249 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
250 if (volume->io.hmp->ronly) {
251 /* do not call vinvalbuf */
252 VOP_OPEN(volume->devvp, FREAD, FSCRED, NULL);
253 VOP_CLOSE(volume->devvp, FREAD|FWRITE);
255 /* do not call vinvalbuf */
256 VOP_OPEN(volume->devvp, FREAD|FWRITE, FSCRED, NULL);
257 VOP_CLOSE(volume->devvp, FREAD);
259 vn_unlock(volume->devvp);
265 * Unload and free a HAMMER volume. Must return >= 0 to continue scan
266 * so returns -1 on failure.
269 hammer_unload_volume(hammer_volume_t volume, void *data __unused)
271 hammer_mount_t hmp = volume->io.hmp;
272 int ronly = ((hmp->mp->mnt_flag & MNT_RDONLY) ? 1 : 0);
276 * Clean up the root volume pointer, which is held unlocked in hmp.
278 if (hmp->rootvol == volume)
282 * We must not flush a dirty buffer to disk on umount. It should
283 * have already been dealt with by the flusher, or we may be in
284 * catastrophic failure.
286 hammer_io_clear_modify(&volume->io, 1);
287 volume->io.waitdep = 1;
288 bp = hammer_io_release(&volume->io, 1);
291 * Clean up the persistent ref ioerror might have on the volume
293 if (volume->io.ioerror) {
294 volume->io.ioerror = 0;
295 hammer_unref(&volume->io.lock);
299 * There should be no references on the volume, no clusters, and
302 KKASSERT(volume->io.lock.refs == 0);
306 volume->ondisk = NULL;
308 if (volume->devvp->v_rdev &&
309 volume->devvp->v_rdev->si_mountpoint == hmp->mp
311 volume->devvp->v_rdev->si_mountpoint = NULL;
315 * Make sure we don't sync anything to disk if we
316 * are in read-only mode (1) or critically-errored
317 * (2). Note that there may be dirty buffers in
318 * normal read-only mode from crash recovery.
320 vinvalbuf(volume->devvp, 0, 0, 0);
321 VOP_CLOSE(volume->devvp, FREAD);
324 * Normal termination, save any dirty buffers
325 * (XXX there really shouldn't be any).
327 vinvalbuf(volume->devvp, V_SAVE, 0, 0);
328 VOP_CLOSE(volume->devvp, FREAD|FWRITE);
333 * Destroy the structure
335 RB_REMOVE(hammer_vol_rb_tree, &hmp->rb_vols_root, volume);
336 hammer_free_volume(volume);
342 hammer_free_volume(hammer_volume_t volume)
344 hammer_mount_t hmp = volume->io.hmp;
346 if (volume->vol_name) {
347 kfree(volume->vol_name, hmp->m_misc);
348 volume->vol_name = NULL;
351 vrele(volume->devvp);
352 volume->devvp = NULL;
354 --hammer_count_volumes;
355 kfree(volume, hmp->m_misc);
359 * Get a HAMMER volume. The volume must already exist.
362 hammer_get_volume(struct hammer_mount *hmp, int32_t vol_no, int *errorp)
364 struct hammer_volume *volume;
367 * Locate the volume structure
369 volume = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, vol_no);
370 if (volume == NULL) {
374 hammer_ref(&volume->io.lock);
377 * Deal with on-disk info
379 if (volume->ondisk == NULL || volume->io.loading) {
380 *errorp = hammer_load_volume(volume);
382 hammer_rel_volume(volume, 1);
392 hammer_ref_volume(hammer_volume_t volume)
396 hammer_ref(&volume->io.lock);
399 * Deal with on-disk info
401 if (volume->ondisk == NULL || volume->io.loading) {
402 error = hammer_load_volume(volume);
404 hammer_rel_volume(volume, 1);
412 hammer_get_root_volume(struct hammer_mount *hmp, int *errorp)
414 hammer_volume_t volume;
416 volume = hmp->rootvol;
417 KKASSERT(volume != NULL);
418 hammer_ref(&volume->io.lock);
421 * Deal with on-disk info
423 if (volume->ondisk == NULL || volume->io.loading) {
424 *errorp = hammer_load_volume(volume);
426 hammer_rel_volume(volume, 1);
436 * Load a volume's on-disk information. The volume must be referenced and
437 * not locked. We temporarily acquire an exclusive lock to interlock
438 * against releases or multiple get's.
441 hammer_load_volume(hammer_volume_t volume)
445 ++volume->io.loading;
446 hammer_lock_ex(&volume->io.lock);
448 if (volume->ondisk == NULL) {
449 error = hammer_io_read(volume->devvp, &volume->io,
452 volume->ondisk = (void *)volume->io.bp->b_data;
456 --volume->io.loading;
457 hammer_unlock(&volume->io.lock);
462 * Release a volume. Call hammer_io_release on the last reference. We have
463 * to acquire an exclusive lock to interlock against volume->ondisk tests
464 * in hammer_load_volume(), and hammer_io_release() also expects an exclusive
467 * Volumes are not unloaded from memory during normal operation.
470 hammer_rel_volume(hammer_volume_t volume, int flush)
472 struct buf *bp = NULL;
475 if (volume->io.lock.refs == 1) {
476 ++volume->io.loading;
477 hammer_lock_ex(&volume->io.lock);
478 if (volume->io.lock.refs == 1) {
479 volume->ondisk = NULL;
480 bp = hammer_io_release(&volume->io, flush);
482 --volume->io.loading;
483 hammer_unlock(&volume->io.lock);
485 hammer_unref(&volume->io.lock);
492 hammer_mountcheck_volumes(struct hammer_mount *hmp)
497 for (i = 0; i < hmp->nvolumes; ++i) {
498 vol = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, i);
505 /************************************************************************
507 ************************************************************************
509 * Manage buffers. Currently most blockmap-backed zones are direct-mapped
510 * to zone-2 buffer offsets, without a translation stage. However, the
511 * hammer_buffer structure is indexed by its zoneX_offset, not its
514 * The proper zone must be maintained throughout the code-base all the way
515 * through to the big-block allocator, or routines like hammer_del_buffers()
516 * will not be able to locate all potentially conflicting buffers.
519 hammer_get_buffer(hammer_mount_t hmp, hammer_off_t buf_offset,
520 int bytes, int isnew, int *errorp)
522 hammer_buffer_t buffer;
523 hammer_volume_t volume;
524 hammer_off_t zone2_offset;
525 hammer_io_type_t iotype;
529 buf_offset &= ~HAMMER_BUFMASK64;
532 * Shortcut if the buffer is already cached
534 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root, buf_offset);
536 if (buffer->io.lock.refs == 0)
537 ++hammer_count_refedbufs;
538 hammer_ref(&buffer->io.lock);
541 * Once refed the ondisk field will not be cleared by
544 if (buffer->ondisk && buffer->io.loading == 0) {
546 hammer_io_advance(&buffer->io);
551 * The buffer is no longer loose if it has a ref, and
552 * cannot become loose once it gains a ref. Loose
553 * buffers will never be in a modified state. This should
554 * only occur on the 0->1 transition of refs.
556 * lose_list can be modified via a biodone() interrupt.
558 if (buffer->io.mod_list == &hmp->lose_list) {
559 crit_enter(); /* biodone race against list */
560 TAILQ_REMOVE(buffer->io.mod_list, &buffer->io,
563 buffer->io.mod_list = NULL;
564 KKASSERT(buffer->io.modified == 0);
570 * What is the buffer class?
572 zone = HAMMER_ZONE_DECODE(buf_offset);
575 case HAMMER_ZONE_LARGE_DATA_INDEX:
576 case HAMMER_ZONE_SMALL_DATA_INDEX:
577 iotype = HAMMER_STRUCTURE_DATA_BUFFER;
579 case HAMMER_ZONE_UNDO_INDEX:
580 iotype = HAMMER_STRUCTURE_UNDO_BUFFER;
582 case HAMMER_ZONE_META_INDEX:
585 * NOTE: inode data and directory entries are placed in this
586 * zone. inode atime/mtime is updated in-place and thus
587 * buffers containing inodes must be synchronized as
588 * meta-buffers, same as buffers containing B-Tree info.
590 iotype = HAMMER_STRUCTURE_META_BUFFER;
595 * Handle blockmap offset translations
597 if (zone >= HAMMER_ZONE_BTREE_INDEX) {
598 zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, errorp);
599 } else if (zone == HAMMER_ZONE_UNDO_INDEX) {
600 zone2_offset = hammer_undo_lookup(hmp, buf_offset, errorp);
602 KKASSERT(zone == HAMMER_ZONE_RAW_BUFFER_INDEX);
603 zone2_offset = buf_offset;
610 * NOTE: zone2_offset and maxbuf_off are both full zone-2 offset
613 KKASSERT((zone2_offset & HAMMER_OFF_ZONE_MASK) ==
614 HAMMER_ZONE_RAW_BUFFER);
615 vol_no = HAMMER_VOL_DECODE(zone2_offset);
616 volume = hammer_get_volume(hmp, vol_no, errorp);
620 KKASSERT(zone2_offset < volume->maxbuf_off);
623 * Allocate a new buffer structure. We will check for races later.
625 ++hammer_count_buffers;
626 buffer = kmalloc(sizeof(*buffer), hmp->m_misc,
627 M_WAITOK|M_ZERO|M_USE_RESERVE);
628 buffer->zone2_offset = zone2_offset;
629 buffer->zoneX_offset = buf_offset;
631 hammer_io_init(&buffer->io, volume, iotype);
632 buffer->io.offset = volume->ondisk->vol_buf_beg +
633 (zone2_offset & HAMMER_OFF_SHORT_MASK);
634 buffer->io.bytes = bytes;
635 TAILQ_INIT(&buffer->clist);
636 hammer_ref(&buffer->io.lock);
639 * Insert the buffer into the RB tree and handle late collisions.
641 if (RB_INSERT(hammer_buf_rb_tree, &hmp->rb_bufs_root, buffer)) {
642 hammer_rel_volume(volume, 0);
643 buffer->io.volume = NULL; /* safety */
644 hammer_unref(&buffer->io.lock); /* safety */
645 --hammer_count_buffers;
646 kfree(buffer, hmp->m_misc);
649 ++hammer_count_refedbufs;
653 * Deal with on-disk info and loading races.
655 if (buffer->ondisk == NULL || buffer->io.loading) {
656 *errorp = hammer_load_buffer(buffer, isnew);
658 hammer_rel_buffer(buffer, 1);
661 hammer_io_advance(&buffer->io);
665 hammer_io_advance(&buffer->io);
671 * This is used by the direct-read code to deal with large-data buffers
672 * created by the reblocker and mirror-write code. The direct-read code
673 * bypasses the HAMMER buffer subsystem and so any aliased dirty or write-
674 * running hammer buffers must be fully synced to disk before we can issue
677 * This code path is not considered critical as only the rebocker and
678 * mirror-write code will create large-data buffers via the HAMMER buffer
679 * subsystem. They do that because they operate at the B-Tree level and
680 * do not access the vnode/inode structures.
683 hammer_sync_buffers(hammer_mount_t hmp, hammer_off_t base_offset, int bytes)
685 hammer_buffer_t buffer;
688 KKASSERT((base_offset & HAMMER_OFF_ZONE_MASK) ==
689 HAMMER_ZONE_LARGE_DATA);
692 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root,
694 if (buffer && (buffer->io.modified || buffer->io.running)) {
695 error = hammer_ref_buffer(buffer);
697 hammer_io_wait(&buffer->io);
698 if (buffer->io.modified) {
699 hammer_io_write_interlock(&buffer->io);
700 hammer_io_flush(&buffer->io, 0);
701 hammer_io_done_interlock(&buffer->io);
702 hammer_io_wait(&buffer->io);
704 hammer_rel_buffer(buffer, 0);
707 base_offset += HAMMER_BUFSIZE;
708 bytes -= HAMMER_BUFSIZE;
713 * Destroy all buffers covering the specified zoneX offset range. This
714 * is called when the related blockmap layer2 entry is freed or when
715 * a direct write bypasses our buffer/buffer-cache subsystem.
717 * The buffers may be referenced by the caller itself. Setting reclaim
718 * will cause the buffer to be destroyed when it's ref count reaches zero.
720 * Return 0 on success, EAGAIN if some buffers could not be destroyed due
721 * to additional references held by other threads, or some other (typically
725 hammer_del_buffers(hammer_mount_t hmp, hammer_off_t base_offset,
726 hammer_off_t zone2_offset, int bytes,
727 int report_conflicts)
729 hammer_buffer_t buffer;
730 hammer_volume_t volume;
735 vol_no = HAMMER_VOL_DECODE(zone2_offset);
736 volume = hammer_get_volume(hmp, vol_no, &ret_error);
737 KKASSERT(ret_error == 0);
740 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root,
743 error = hammer_ref_buffer(buffer);
744 if (error == 0 && buffer->io.lock.refs != 1) {
746 hammer_rel_buffer(buffer, 0);
749 KKASSERT(buffer->zone2_offset == zone2_offset);
750 hammer_io_clear_modify(&buffer->io, 1);
751 buffer->io.reclaim = 1;
752 buffer->io.waitdep = 1;
753 KKASSERT(buffer->io.volume == volume);
754 hammer_rel_buffer(buffer, 0);
757 error = hammer_io_inval(volume, zone2_offset);
761 if (report_conflicts ||
762 (hammer_debug_general & 0x8000)) {
763 kprintf("hammer_del_buffers: unable to "
764 "invalidate %016llx buffer=%p rep=%d\n",
765 (long long)base_offset,
766 buffer, report_conflicts);
769 base_offset += HAMMER_BUFSIZE;
770 zone2_offset += HAMMER_BUFSIZE;
771 bytes -= HAMMER_BUFSIZE;
773 hammer_rel_volume(volume, 0);
778 hammer_load_buffer(hammer_buffer_t buffer, int isnew)
780 hammer_volume_t volume;
784 * Load the buffer's on-disk info
786 volume = buffer->io.volume;
787 ++buffer->io.loading;
788 hammer_lock_ex(&buffer->io.lock);
790 if (hammer_debug_io & 0x0001) {
791 kprintf("load_buffer %016llx %016llx isnew=%d od=%p\n",
792 (long long)buffer->zoneX_offset,
793 (long long)buffer->zone2_offset,
794 isnew, buffer->ondisk);
797 if (buffer->ondisk == NULL) {
799 error = hammer_io_new(volume->devvp, &buffer->io);
801 error = hammer_io_read(volume->devvp, &buffer->io,
805 buffer->ondisk = (void *)buffer->io.bp->b_data;
807 error = hammer_io_new(volume->devvp, &buffer->io);
811 --buffer->io.loading;
812 hammer_unlock(&buffer->io.lock);
817 * NOTE: Called from RB_SCAN, must return >= 0 for scan to continue.
818 * This routine is only called during unmount or when a volume is
821 * If data != NULL, it specifies a volume whoose buffers should
825 hammer_unload_buffer(hammer_buffer_t buffer, void *data)
827 struct hammer_volume *volume = (struct hammer_volume *) data;
829 if (volume != NULL && volume != buffer->io.volume) {
831 * We are only interested in unloading buffers of volume,
838 * Clean up the persistent ref ioerror might have on the buffer
839 * and acquire a ref (steal ioerror's if we can).
841 if (buffer->io.ioerror) {
842 buffer->io.ioerror = 0;
844 if (buffer->io.lock.refs == 0)
845 ++hammer_count_refedbufs;
846 hammer_ref(&buffer->io.lock);
850 * We must not flush a dirty buffer to disk on umount. It should
851 * have already been dealt with by the flusher, or we may be in
852 * catastrophic failure.
854 * We must set waitdep to ensure that a running buffer is waited
855 * on and released prior to us trying to unload the volume.
857 hammer_io_clear_modify(&buffer->io, 1);
858 hammer_flush_buffer_nodes(buffer);
859 KKASSERT(buffer->io.lock.refs == 1);
860 buffer->io.waitdep = 1;
861 hammer_rel_buffer(buffer, 2);
866 * Reference a buffer that is either already referenced or via a specially
867 * handled pointer (aka cursor->buffer).
870 hammer_ref_buffer(hammer_buffer_t buffer)
874 if (buffer->io.lock.refs == 0)
875 ++hammer_count_refedbufs;
876 hammer_ref(&buffer->io.lock);
879 * At this point a biodone() will not touch the buffer other then
880 * incidental bits. However, lose_list can be modified via
881 * a biodone() interrupt.
885 if (buffer->io.mod_list == &buffer->io.hmp->lose_list) {
887 TAILQ_REMOVE(buffer->io.mod_list, &buffer->io, mod_entry);
888 buffer->io.mod_list = NULL;
892 if (buffer->ondisk == NULL || buffer->io.loading) {
893 error = hammer_load_buffer(buffer, 0);
895 hammer_rel_buffer(buffer, 1);
897 * NOTE: buffer pointer can become stale after
908 * Release a buffer. We have to deal with several places where
909 * another thread can ref the buffer.
911 * Only destroy the structure itself if the related buffer cache buffer
912 * was disassociated from it. This ties the management of the structure
913 * to the buffer cache subsystem. buffer->ondisk determines whether the
914 * embedded io is referenced or not.
917 hammer_rel_buffer(hammer_buffer_t buffer, int flush)
919 hammer_volume_t volume;
921 struct buf *bp = NULL;
924 hmp = buffer->io.hmp;
927 if (buffer->io.lock.refs == 1) {
928 ++buffer->io.loading; /* force interlock check */
929 hammer_lock_ex(&buffer->io.lock);
930 if (buffer->io.lock.refs == 1) {
931 bp = hammer_io_release(&buffer->io, flush);
933 if (buffer->io.lock.refs == 1)
934 --hammer_count_refedbufs;
936 if (buffer->io.bp == NULL &&
937 buffer->io.lock.refs == 1) {
941 * NOTE: It is impossible for any associated
942 * B-Tree nodes to have refs if the buffer
943 * has no additional refs.
945 RB_REMOVE(hammer_buf_rb_tree,
946 &buffer->io.hmp->rb_bufs_root,
948 volume = buffer->io.volume;
949 buffer->io.volume = NULL; /* sanity */
950 hammer_rel_volume(volume, 0);
951 hammer_io_clear_modlist(&buffer->io);
952 hammer_flush_buffer_nodes(buffer);
953 KKASSERT(TAILQ_EMPTY(&buffer->clist));
957 --buffer->io.loading;
958 hammer_unlock(&buffer->io.lock);
960 hammer_unref(&buffer->io.lock);
965 --hammer_count_buffers;
966 kfree(buffer, hmp->m_misc);
971 * Access the filesystem buffer containing the specified hammer offset.
972 * buf_offset is a conglomeration of the volume number and vol_buf_beg
973 * relative buffer offset. It must also have bit 55 set to be valid.
974 * (see hammer_off_t in hammer_disk.h).
976 * Any prior buffer in *bufferp will be released and replaced by the
979 * NOTE: The buffer is indexed via its zoneX_offset but we allow the
980 * passed cached *bufferp to match against either zoneX or zone2.
984 _hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
985 int *errorp, struct hammer_buffer **bufferp)
987 hammer_buffer_t buffer;
988 int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK;
990 buf_offset &= ~HAMMER_BUFMASK64;
991 KKASSERT((buf_offset & HAMMER_OFF_ZONE_MASK) != 0);
994 if (buffer == NULL || (buffer->zone2_offset != buf_offset &&
995 buffer->zoneX_offset != buf_offset)) {
997 hammer_rel_buffer(buffer, 0);
998 buffer = hammer_get_buffer(hmp, buf_offset, bytes, 0, errorp);
1005 * Return a pointer to the buffer data.
1010 return((char *)buffer->ondisk + xoff);
1014 hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset,
1015 int *errorp, struct hammer_buffer **bufferp)
1017 return(_hammer_bread(hmp, buf_offset, HAMMER_BUFSIZE, errorp, bufferp));
1021 hammer_bread_ext(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
1022 int *errorp, struct hammer_buffer **bufferp)
1024 bytes = (bytes + HAMMER_BUFMASK) & ~HAMMER_BUFMASK;
1025 return(_hammer_bread(hmp, buf_offset, bytes, errorp, bufferp));
1029 * Access the filesystem buffer containing the specified hammer offset.
1030 * No disk read operation occurs. The result buffer may contain garbage.
1032 * Any prior buffer in *bufferp will be released and replaced by the
1035 * This function marks the buffer dirty but does not increment its
1036 * modify_refs count.
1040 _hammer_bnew(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
1041 int *errorp, struct hammer_buffer **bufferp)
1043 hammer_buffer_t buffer;
1044 int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK;
1046 buf_offset &= ~HAMMER_BUFMASK64;
1049 if (buffer == NULL || (buffer->zone2_offset != buf_offset &&
1050 buffer->zoneX_offset != buf_offset)) {
1052 hammer_rel_buffer(buffer, 0);
1053 buffer = hammer_get_buffer(hmp, buf_offset, bytes, 1, errorp);
1060 * Return a pointer to the buffer data.
1065 return((char *)buffer->ondisk + xoff);
1069 hammer_bnew(hammer_mount_t hmp, hammer_off_t buf_offset,
1070 int *errorp, struct hammer_buffer **bufferp)
1072 return(_hammer_bnew(hmp, buf_offset, HAMMER_BUFSIZE, errorp, bufferp));
1076 hammer_bnew_ext(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
1077 int *errorp, struct hammer_buffer **bufferp)
1079 bytes = (bytes + HAMMER_BUFMASK) & ~HAMMER_BUFMASK;
1080 return(_hammer_bnew(hmp, buf_offset, bytes, errorp, bufferp));
1083 /************************************************************************
1085 ************************************************************************
1087 * Manage B-Tree nodes. B-Tree nodes represent the primary indexing
1088 * method used by the HAMMER filesystem.
1090 * Unlike other HAMMER structures, a hammer_node can be PASSIVELY
1091 * associated with its buffer, and will only referenced the buffer while
1092 * the node itself is referenced.
1094 * A hammer_node can also be passively associated with other HAMMER
1095 * structures, such as inodes, while retaining 0 references. These
1096 * associations can be cleared backwards using a pointer-to-pointer in
1099 * This allows the HAMMER implementation to cache hammer_nodes long-term
1100 * and short-cut a great deal of the infrastructure's complexity. In
1101 * most cases a cached node can be reacquired without having to dip into
1102 * either the buffer or cluster management code.
1104 * The caller must pass a referenced cluster on call and will retain
1105 * ownership of the reference on return. The node will acquire its own
1106 * additional references, if necessary.
1109 hammer_get_node(hammer_transaction_t trans, hammer_off_t node_offset,
1110 int isnew, int *errorp)
1112 hammer_mount_t hmp = trans->hmp;
1115 KKASSERT((node_offset & HAMMER_OFF_ZONE_MASK) == HAMMER_ZONE_BTREE);
1118 * Locate the structure, allocating one if necessary.
1121 node = RB_LOOKUP(hammer_nod_rb_tree, &hmp->rb_nods_root, node_offset);
1123 ++hammer_count_nodes;
1124 node = kmalloc(sizeof(*node), hmp->m_misc, M_WAITOK|M_ZERO|M_USE_RESERVE);
1125 node->node_offset = node_offset;
1127 TAILQ_INIT(&node->cursor_list);
1128 TAILQ_INIT(&node->cache_list);
1129 if (RB_INSERT(hammer_nod_rb_tree, &hmp->rb_nods_root, node)) {
1130 --hammer_count_nodes;
1131 kfree(node, hmp->m_misc);
1135 hammer_ref(&node->lock);
1138 hammer_io_advance(&node->buffer->io);
1140 *errorp = hammer_load_node(trans, node, isnew);
1141 trans->flags |= HAMMER_TRANSF_DIDIO;
1144 hammer_rel_node(node);
1151 * Reference an already-referenced node.
1154 hammer_ref_node(hammer_node_t node)
1156 KKASSERT(node->lock.refs > 0 && node->ondisk != NULL);
1157 hammer_ref(&node->lock);
1161 * Load a node's on-disk data reference.
1164 hammer_load_node(hammer_transaction_t trans, hammer_node_t node, int isnew)
1166 hammer_buffer_t buffer;
1167 hammer_off_t buf_offset;
1172 hammer_lock_ex(&node->lock);
1173 if (node->ondisk == NULL) {
1175 * This is a little confusing but the jist is that
1176 * node->buffer determines whether the node is on
1177 * the buffer's clist and node->ondisk determines
1178 * whether the buffer is referenced.
1180 * We could be racing a buffer release, in which case
1181 * node->buffer may become NULL while we are blocked
1182 * referencing the buffer.
1184 if ((buffer = node->buffer) != NULL) {
1185 error = hammer_ref_buffer(buffer);
1186 if (error == 0 && node->buffer == NULL) {
1187 TAILQ_INSERT_TAIL(&buffer->clist,
1189 node->buffer = buffer;
1192 buf_offset = node->node_offset & ~HAMMER_BUFMASK64;
1193 buffer = hammer_get_buffer(node->hmp, buf_offset,
1194 HAMMER_BUFSIZE, 0, &error);
1196 KKASSERT(error == 0);
1197 TAILQ_INSERT_TAIL(&buffer->clist,
1199 node->buffer = buffer;
1204 node->ondisk = (void *)((char *)buffer->ondisk +
1205 (node->node_offset & HAMMER_BUFMASK));
1208 * Check CRC. NOTE: Neither flag is set and the CRC is not
1209 * generated on new B-Tree nodes.
1212 (node->flags & HAMMER_NODE_CRCANY) == 0) {
1213 if (hammer_crc_test_btree(node->ondisk) == 0) {
1214 if (hammer_debug_critical)
1215 Debugger("CRC FAILED: B-TREE NODE");
1216 node->flags |= HAMMER_NODE_CRCBAD;
1218 node->flags |= HAMMER_NODE_CRCGOOD;
1222 if (node->flags & HAMMER_NODE_CRCBAD) {
1223 if (trans->flags & HAMMER_TRANSF_CRCDOM)
1230 hammer_unlock(&node->lock);
1235 * Safely reference a node, interlock against flushes via the IO subsystem.
1238 hammer_ref_node_safe(hammer_transaction_t trans, hammer_node_cache_t cache,
1245 hammer_ref(&node->lock);
1247 if (node->flags & HAMMER_NODE_CRCBAD) {
1248 if (trans->flags & HAMMER_TRANSF_CRCDOM)
1256 *errorp = hammer_load_node(trans, node, 0);
1259 hammer_rel_node(node);
1269 * Release a hammer_node. On the last release the node dereferences
1270 * its underlying buffer and may or may not be destroyed.
1273 hammer_rel_node(hammer_node_t node)
1275 hammer_buffer_t buffer;
1278 * If this isn't the last ref just decrement the ref count and
1281 if (node->lock.refs > 1) {
1282 hammer_unref(&node->lock);
1287 * If there is no ondisk info or no buffer the node failed to load,
1288 * remove the last reference and destroy the node.
1290 if (node->ondisk == NULL) {
1291 hammer_unref(&node->lock);
1292 hammer_flush_node(node);
1293 /* node is stale now */
1298 * Do not disassociate the node from the buffer if it represents
1299 * a modified B-Tree node that still needs its crc to be generated.
1301 if (node->flags & HAMMER_NODE_NEEDSCRC)
1305 * Do final cleanups and then either destroy the node and leave it
1306 * passively cached. The buffer reference is removed regardless.
1308 buffer = node->buffer;
1309 node->ondisk = NULL;
1311 if ((node->flags & HAMMER_NODE_FLUSH) == 0) {
1312 hammer_unref(&node->lock);
1313 hammer_rel_buffer(buffer, 0);
1320 hammer_unref(&node->lock);
1321 hammer_flush_node(node);
1323 hammer_rel_buffer(buffer, 0);
1327 * Free space on-media associated with a B-Tree node.
1330 hammer_delete_node(hammer_transaction_t trans, hammer_node_t node)
1332 KKASSERT((node->flags & HAMMER_NODE_DELETED) == 0);
1333 node->flags |= HAMMER_NODE_DELETED;
1334 hammer_blockmap_free(trans, node->node_offset, sizeof(*node->ondisk));
1338 * Passively cache a referenced hammer_node. The caller may release
1339 * the node on return.
1342 hammer_cache_node(hammer_node_cache_t cache, hammer_node_t node)
1345 * If the node doesn't exist, or is being deleted, don't cache it!
1347 * The node can only ever be NULL in the I/O failure path.
1349 if (node == NULL || (node->flags & HAMMER_NODE_DELETED))
1351 if (cache->node == node)
1354 hammer_uncache_node(cache);
1355 if (node->flags & HAMMER_NODE_DELETED)
1358 TAILQ_INSERT_TAIL(&node->cache_list, cache, entry);
1362 hammer_uncache_node(hammer_node_cache_t cache)
1366 if ((node = cache->node) != NULL) {
1367 TAILQ_REMOVE(&node->cache_list, cache, entry);
1369 if (TAILQ_EMPTY(&node->cache_list))
1370 hammer_flush_node(node);
1375 * Remove a node's cache references and destroy the node if it has no
1376 * other references or backing store.
1379 hammer_flush_node(hammer_node_t node)
1381 hammer_node_cache_t cache;
1382 hammer_buffer_t buffer;
1383 hammer_mount_t hmp = node->hmp;
1385 while ((cache = TAILQ_FIRST(&node->cache_list)) != NULL) {
1386 TAILQ_REMOVE(&node->cache_list, cache, entry);
1389 if (node->lock.refs == 0 && node->ondisk == NULL) {
1390 KKASSERT((node->flags & HAMMER_NODE_NEEDSCRC) == 0);
1391 RB_REMOVE(hammer_nod_rb_tree, &node->hmp->rb_nods_root, node);
1392 if ((buffer = node->buffer) != NULL) {
1393 node->buffer = NULL;
1394 TAILQ_REMOVE(&buffer->clist, node, entry);
1395 /* buffer is unreferenced because ondisk is NULL */
1397 --hammer_count_nodes;
1398 kfree(node, hmp->m_misc);
1403 * Flush passively cached B-Tree nodes associated with this buffer.
1404 * This is only called when the buffer is about to be destroyed, so
1405 * none of the nodes should have any references. The buffer is locked.
1407 * We may be interlocked with the buffer.
1410 hammer_flush_buffer_nodes(hammer_buffer_t buffer)
1414 while ((node = TAILQ_FIRST(&buffer->clist)) != NULL) {
1415 KKASSERT(node->ondisk == NULL);
1416 KKASSERT((node->flags & HAMMER_NODE_NEEDSCRC) == 0);
1418 if (node->lock.refs == 0) {
1419 hammer_ref(&node->lock);
1420 node->flags |= HAMMER_NODE_FLUSH;
1421 hammer_rel_node(node);
1423 KKASSERT(node->loading != 0);
1424 KKASSERT(node->buffer != NULL);
1425 buffer = node->buffer;
1426 node->buffer = NULL;
1427 TAILQ_REMOVE(&buffer->clist, node, entry);
1428 /* buffer is unreferenced because ondisk is NULL */
1434 /************************************************************************
1436 ************************************************************************/
1439 * Allocate a B-Tree node.
1442 hammer_alloc_btree(hammer_transaction_t trans, hammer_off_t hint, int *errorp)
1444 hammer_buffer_t buffer = NULL;
1445 hammer_node_t node = NULL;
1446 hammer_off_t node_offset;
1448 node_offset = hammer_blockmap_alloc(trans, HAMMER_ZONE_BTREE_INDEX,
1449 sizeof(struct hammer_node_ondisk),
1452 node = hammer_get_node(trans, node_offset, 1, errorp);
1453 hammer_modify_node_noundo(trans, node);
1454 bzero(node->ondisk, sizeof(*node->ondisk));
1455 hammer_modify_node_done(node);
1458 hammer_rel_buffer(buffer, 0);
1463 * Allocate data. If the address of a data buffer is supplied then
1464 * any prior non-NULL *data_bufferp will be released and *data_bufferp
1465 * will be set to the related buffer. The caller must release it when
1466 * finally done. The initial *data_bufferp should be set to NULL by
1469 * The caller is responsible for making hammer_modify*() calls on the
1473 hammer_alloc_data(hammer_transaction_t trans, int32_t data_len,
1474 u_int16_t rec_type, hammer_off_t *data_offsetp,
1475 struct hammer_buffer **data_bufferp,
1476 hammer_off_t hint, int *errorp)
1486 case HAMMER_RECTYPE_INODE:
1487 case HAMMER_RECTYPE_DIRENTRY:
1488 case HAMMER_RECTYPE_EXT:
1489 case HAMMER_RECTYPE_FIX:
1490 case HAMMER_RECTYPE_PFS:
1491 case HAMMER_RECTYPE_SNAPSHOT:
1492 case HAMMER_RECTYPE_CONFIG:
1493 zone = HAMMER_ZONE_META_INDEX;
1495 case HAMMER_RECTYPE_DATA:
1496 case HAMMER_RECTYPE_DB:
1497 if (data_len <= HAMMER_BUFSIZE / 2) {
1498 zone = HAMMER_ZONE_SMALL_DATA_INDEX;
1500 data_len = (data_len + HAMMER_BUFMASK) &
1502 zone = HAMMER_ZONE_LARGE_DATA_INDEX;
1506 panic("hammer_alloc_data: rec_type %04x unknown",
1508 zone = 0; /* NOT REACHED */
1511 *data_offsetp = hammer_blockmap_alloc(trans, zone, data_len,
1516 if (*errorp == 0 && data_bufferp) {
1518 data = hammer_bread_ext(trans->hmp, *data_offsetp,
1519 data_len, errorp, data_bufferp);
1530 * Sync dirty buffers to the media and clean-up any loose ends.
1532 * These functions do not start the flusher going, they simply
1533 * queue everything up to the flusher.
1535 static int hammer_sync_scan1(struct mount *mp, struct vnode *vp, void *data);
1536 static int hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data);
1539 hammer_queue_inodes_flusher(hammer_mount_t hmp, int waitfor)
1541 struct hammer_sync_info info;
1544 info.waitfor = waitfor;
1545 if (waitfor == MNT_WAIT) {
1546 vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_ONEPASS,
1547 hammer_sync_scan1, hammer_sync_scan2, &info);
1549 vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_ONEPASS|VMSC_NOWAIT,
1550 hammer_sync_scan1, hammer_sync_scan2, &info);
1556 * Filesystem sync. If doing a synchronous sync make a second pass on
1557 * the vnodes in case any were already flushing during the first pass,
1558 * and activate the flusher twice (the second time brings the UNDO FIFO's
1559 * start position up to the end position after the first call).
1562 hammer_sync_hmp(hammer_mount_t hmp, int waitfor)
1564 struct hammer_sync_info info;
1567 info.waitfor = MNT_NOWAIT;
1568 vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_NOWAIT,
1569 hammer_sync_scan1, hammer_sync_scan2, &info);
1570 if (info.error == 0 && waitfor == MNT_WAIT) {
1571 info.waitfor = waitfor;
1572 vmntvnodescan(hmp->mp, VMSC_GETVP,
1573 hammer_sync_scan1, hammer_sync_scan2, &info);
1575 if (waitfor == MNT_WAIT) {
1576 hammer_flusher_sync(hmp);
1577 hammer_flusher_sync(hmp);
1579 hammer_flusher_async(hmp, NULL);
1580 hammer_flusher_async(hmp, NULL);
1586 hammer_sync_scan1(struct mount *mp, struct vnode *vp, void *data)
1588 struct hammer_inode *ip;
1591 if (vp->v_type == VNON || ip == NULL ||
1592 ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1593 RB_EMPTY(&vp->v_rbdirty_tree))) {
1600 hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data)
1602 struct hammer_sync_info *info = data;
1603 struct hammer_inode *ip;
1607 if (vp->v_type == VNON || vp->v_type == VBAD ||
1608 ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1609 RB_EMPTY(&vp->v_rbdirty_tree))) {
1612 error = VOP_FSYNC(vp, MNT_NOWAIT, 0);
1614 info->error = error;