2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_ondisk.c,v 1.74 2008/07/31 22:30:33 dillon Exp $
37 * Manage HAMMER's on-disk structures. These routines are primarily
38 * responsible for interfacing with the kernel's I/O subsystem and for
39 * managing in-memory structures.
43 #include <sys/fcntl.h>
44 #include <sys/nlookup.h>
48 static void hammer_free_volume(hammer_volume_t volume);
49 static int hammer_load_volume(hammer_volume_t volume);
50 static int hammer_load_buffer(hammer_buffer_t buffer, int isnew);
51 static int hammer_load_node(hammer_node_t node, int isnew);
54 hammer_vol_rb_compare(hammer_volume_t vol1, hammer_volume_t vol2)
56 if (vol1->vol_no < vol2->vol_no)
58 if (vol1->vol_no > vol2->vol_no)
64 hammer_buf_rb_compare(hammer_buffer_t buf1, hammer_buffer_t buf2)
66 if (buf1->zoneX_offset < buf2->zoneX_offset)
68 if (buf1->zoneX_offset > buf2->zoneX_offset)
74 hammer_nod_rb_compare(hammer_node_t node1, hammer_node_t node2)
76 if (node1->node_offset < node2->node_offset)
78 if (node1->node_offset > node2->node_offset)
83 RB_GENERATE2(hammer_vol_rb_tree, hammer_volume, rb_node,
84 hammer_vol_rb_compare, int32_t, vol_no);
85 RB_GENERATE2(hammer_buf_rb_tree, hammer_buffer, rb_node,
86 hammer_buf_rb_compare, hammer_off_t, zoneX_offset);
87 RB_GENERATE2(hammer_nod_rb_tree, hammer_node, rb_node,
88 hammer_nod_rb_compare, hammer_off_t, node_offset);
90 /************************************************************************
92 ************************************************************************
94 * Load a HAMMER volume by name. Returns 0 on success or a positive error
95 * code on failure. Volumes must be loaded at mount time, get_volume() will
96 * not load a new volume.
98 * Calls made to hammer_load_volume() or single-threaded
101 hammer_install_volume(struct hammer_mount *hmp, const char *volname,
105 hammer_volume_t volume;
106 struct hammer_volume_ondisk *ondisk;
107 struct nlookupdata nd;
108 struct buf *bp = NULL;
114 ronly = ((mp->mnt_flag & MNT_RDONLY) ? 1 : 0);
117 * Allocate a volume structure
119 ++hammer_count_volumes;
120 volume = kmalloc(sizeof(*volume), M_HAMMER, M_WAITOK|M_ZERO);
121 volume->vol_name = kstrdup(volname, M_HAMMER);
122 hammer_io_init(&volume->io, hmp, HAMMER_STRUCTURE_VOLUME);
123 volume->io.offset = 0LL;
124 volume->io.bytes = HAMMER_BUFSIZE;
127 * Get the device vnode
130 error = nlookup_init(&nd, volume->vol_name, UIO_SYSSPACE, NLC_FOLLOW);
132 error = nlookup(&nd);
134 error = cache_vref(&nd.nl_nch, nd.nl_cred, &volume->devvp);
138 volume->devvp = devvp;
142 if (vn_isdisk(volume->devvp, &error)) {
143 error = vfs_mountedon(volume->devvp);
147 count_udev(volume->devvp->v_umajor, volume->devvp->v_uminor) > 0) {
151 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
152 error = vinvalbuf(volume->devvp, V_SAVE, 0, 0);
154 error = VOP_OPEN(volume->devvp,
155 (ronly ? FREAD : FREAD|FWRITE),
158 vn_unlock(volume->devvp);
161 hammer_free_volume(volume);
164 volume->devvp->v_rdev->si_mountpoint = mp;
168 * Extract the volume number from the volume header and do various
171 error = bread(volume->devvp, 0LL, HAMMER_BUFSIZE, &bp);
174 ondisk = (void *)bp->b_data;
175 if (ondisk->vol_signature != HAMMER_FSBUF_VOLUME) {
176 kprintf("hammer_mount: volume %s has an invalid header\n",
181 volume->vol_no = ondisk->vol_no;
182 volume->buffer_base = ondisk->vol_buf_beg;
183 volume->vol_flags = ondisk->vol_flags;
184 volume->nblocks = ondisk->vol_nblocks;
185 volume->maxbuf_off = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no,
186 ondisk->vol_buf_end - ondisk->vol_buf_beg);
187 volume->maxraw_off = ondisk->vol_buf_end;
189 if (RB_EMPTY(&hmp->rb_vols_root)) {
190 hmp->fsid = ondisk->vol_fsid;
191 } else if (bcmp(&hmp->fsid, &ondisk->vol_fsid, sizeof(uuid_t))) {
192 kprintf("hammer_mount: volume %s's fsid does not match "
193 "other volumes\n", volume->vol_name);
199 * Insert the volume structure into the red-black tree.
201 if (RB_INSERT(hammer_vol_rb_tree, &hmp->rb_vols_root, volume)) {
202 kprintf("hammer_mount: volume %s has a duplicate vol_no %d\n",
203 volume->vol_name, volume->vol_no);
208 * Set the root volume . HAMMER special cases rootvol the structure.
209 * We do not hold a ref because this would prevent related I/O
210 * from being flushed.
212 if (error == 0 && ondisk->vol_rootvol == ondisk->vol_no) {
213 hmp->rootvol = volume;
214 hmp->nvolumes = ondisk->vol_count;
219 hmp->mp->mnt_stat.f_blocks += ondisk->vol0_stat_bigblocks *
220 (HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE);
221 hmp->mp->mnt_vstat.f_blocks += ondisk->vol0_stat_bigblocks *
222 (HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE);
228 /*vinvalbuf(volume->devvp, V_SAVE, 0, 0);*/
230 volume->devvp->v_rdev->si_mountpoint = NULL;
231 VOP_CLOSE(volume->devvp, ronly ? FREAD : FREAD|FWRITE);
232 hammer_free_volume(volume);
238 * This is called for each volume when updating the mount point from
239 * read-write to read-only or vise-versa.
242 hammer_adjust_volume_mode(hammer_volume_t volume, void *data __unused)
245 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
246 if (volume->io.hmp->ronly) {
247 /* do not call vinvalbuf */
248 VOP_OPEN(volume->devvp, FREAD, FSCRED, NULL);
249 VOP_CLOSE(volume->devvp, FREAD|FWRITE);
251 /* do not call vinvalbuf */
252 VOP_OPEN(volume->devvp, FREAD|FWRITE, FSCRED, NULL);
253 VOP_CLOSE(volume->devvp, FREAD);
255 vn_unlock(volume->devvp);
261 * Unload and free a HAMMER volume. Must return >= 0 to continue scan
262 * so returns -1 on failure.
265 hammer_unload_volume(hammer_volume_t volume, void *data __unused)
267 struct hammer_mount *hmp = volume->io.hmp;
268 int ronly = ((hmp->mp->mnt_flag & MNT_RDONLY) ? 1 : 0);
272 * Clean up the root volume pointer, which is held unlocked in hmp.
274 if (hmp->rootvol == volume)
278 * We must not flush a dirty buffer to disk on umount. It should
279 * have already been dealt with by the flusher, or we may be in
280 * catastrophic failure.
282 hammer_io_clear_modify(&volume->io, 1);
283 volume->io.waitdep = 1;
284 bp = hammer_io_release(&volume->io, 1);
287 * Clean up the persistent ref ioerror might have on the volume
289 if (volume->io.ioerror) {
290 volume->io.ioerror = 0;
291 hammer_unref(&volume->io.lock);
295 * There should be no references on the volume, no clusters, and
298 KKASSERT(volume->io.lock.refs == 0);
302 volume->ondisk = NULL;
304 if (volume->devvp->v_rdev &&
305 volume->devvp->v_rdev->si_mountpoint == hmp->mp
307 volume->devvp->v_rdev->si_mountpoint = NULL;
311 * Make sure we don't sync anything to disk if we
312 * are in read-only mode (1) or critically-errored
313 * (2). Note that there may be dirty buffers in
314 * normal read-only mode from crash recovery.
316 vinvalbuf(volume->devvp, 0, 0, 0);
317 VOP_CLOSE(volume->devvp, FREAD);
320 * Normal termination, save any dirty buffers
321 * (XXX there really shouldn't be any).
323 vinvalbuf(volume->devvp, V_SAVE, 0, 0);
324 VOP_CLOSE(volume->devvp, FREAD|FWRITE);
329 * Destroy the structure
331 RB_REMOVE(hammer_vol_rb_tree, &hmp->rb_vols_root, volume);
332 hammer_free_volume(volume);
338 hammer_free_volume(hammer_volume_t volume)
340 if (volume->vol_name) {
341 kfree(volume->vol_name, M_HAMMER);
342 volume->vol_name = NULL;
345 vrele(volume->devvp);
346 volume->devvp = NULL;
348 --hammer_count_volumes;
349 kfree(volume, M_HAMMER);
353 * Get a HAMMER volume. The volume must already exist.
356 hammer_get_volume(struct hammer_mount *hmp, int32_t vol_no, int *errorp)
358 struct hammer_volume *volume;
361 * Locate the volume structure
363 volume = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, vol_no);
364 if (volume == NULL) {
368 hammer_ref(&volume->io.lock);
371 * Deal with on-disk info
373 if (volume->ondisk == NULL || volume->io.loading) {
374 *errorp = hammer_load_volume(volume);
376 hammer_rel_volume(volume, 1);
386 hammer_ref_volume(hammer_volume_t volume)
390 hammer_ref(&volume->io.lock);
393 * Deal with on-disk info
395 if (volume->ondisk == NULL || volume->io.loading) {
396 error = hammer_load_volume(volume);
398 hammer_rel_volume(volume, 1);
406 hammer_get_root_volume(struct hammer_mount *hmp, int *errorp)
408 hammer_volume_t volume;
410 volume = hmp->rootvol;
411 KKASSERT(volume != NULL);
412 hammer_ref(&volume->io.lock);
415 * Deal with on-disk info
417 if (volume->ondisk == NULL || volume->io.loading) {
418 *errorp = hammer_load_volume(volume);
420 hammer_rel_volume(volume, 1);
430 * Load a volume's on-disk information. The volume must be referenced and
431 * not locked. We temporarily acquire an exclusive lock to interlock
432 * against releases or multiple get's.
435 hammer_load_volume(hammer_volume_t volume)
439 ++volume->io.loading;
440 hammer_lock_ex(&volume->io.lock);
442 if (volume->ondisk == NULL) {
443 error = hammer_io_read(volume->devvp, &volume->io,
446 volume->ondisk = (void *)volume->io.bp->b_data;
450 --volume->io.loading;
451 hammer_unlock(&volume->io.lock);
456 * Release a volume. Call hammer_io_release on the last reference. We have
457 * to acquire an exclusive lock to interlock against volume->ondisk tests
458 * in hammer_load_volume(), and hammer_io_release() also expects an exclusive
461 * Volumes are not unloaded from memory during normal operation.
464 hammer_rel_volume(hammer_volume_t volume, int flush)
466 struct buf *bp = NULL;
469 if (volume->io.lock.refs == 1) {
470 ++volume->io.loading;
471 hammer_lock_ex(&volume->io.lock);
472 if (volume->io.lock.refs == 1) {
473 volume->ondisk = NULL;
474 bp = hammer_io_release(&volume->io, flush);
476 --volume->io.loading;
477 hammer_unlock(&volume->io.lock);
479 hammer_unref(&volume->io.lock);
486 hammer_mountcheck_volumes(struct hammer_mount *hmp)
491 for (i = 0; i < hmp->nvolumes; ++i) {
492 vol = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, i);
499 /************************************************************************
501 ************************************************************************
503 * Manage buffers. Currently all blockmap-backed zones are translated
504 * to zone-2 buffer offsets.
507 hammer_get_buffer(hammer_mount_t hmp, hammer_off_t buf_offset,
508 int bytes, int isnew, int *errorp)
510 hammer_buffer_t buffer;
511 hammer_volume_t volume;
512 hammer_off_t zone2_offset;
513 hammer_io_type_t iotype;
517 buf_offset &= ~HAMMER_BUFMASK64;
520 * Shortcut if the buffer is already cached
522 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root, buf_offset);
524 if (buffer->io.lock.refs == 0)
525 ++hammer_count_refedbufs;
526 hammer_ref(&buffer->io.lock);
529 * Once refed the ondisk field will not be cleared by
532 if (buffer->ondisk && buffer->io.loading == 0) {
538 * The buffer is no longer loose if it has a ref, and
539 * cannot become loose once it gains a ref. Loose
540 * buffers will never be in a modified state. This should
541 * only occur on the 0->1 transition of refs.
543 * lose_list can be modified via a biodone() interrupt.
545 if (buffer->io.mod_list == &hmp->lose_list) {
546 crit_enter(); /* biodone race against list */
547 TAILQ_REMOVE(buffer->io.mod_list, &buffer->io,
550 buffer->io.mod_list = NULL;
551 KKASSERT(buffer->io.modified == 0);
557 * What is the buffer class?
559 zone = HAMMER_ZONE_DECODE(buf_offset);
562 case HAMMER_ZONE_LARGE_DATA_INDEX:
563 case HAMMER_ZONE_SMALL_DATA_INDEX:
564 iotype = HAMMER_STRUCTURE_DATA_BUFFER;
566 case HAMMER_ZONE_UNDO_INDEX:
567 iotype = HAMMER_STRUCTURE_UNDO_BUFFER;
569 case HAMMER_ZONE_META_INDEX:
572 * NOTE: inode data and directory entries are placed in this
573 * zone. inode atime/mtime is updated in-place and thus
574 * buffers containing inodes must be synchronized as
575 * meta-buffers, same as buffers containing B-Tree info.
577 iotype = HAMMER_STRUCTURE_META_BUFFER;
582 * Handle blockmap offset translations
584 if (zone >= HAMMER_ZONE_BTREE_INDEX) {
585 zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, errorp);
586 } else if (zone == HAMMER_ZONE_UNDO_INDEX) {
587 zone2_offset = hammer_undo_lookup(hmp, buf_offset, errorp);
589 KKASSERT(zone == HAMMER_ZONE_RAW_BUFFER_INDEX);
590 zone2_offset = buf_offset;
597 * NOTE: zone2_offset and maxbuf_off are both full zone-2 offset
600 KKASSERT((zone2_offset & HAMMER_OFF_ZONE_MASK) ==
601 HAMMER_ZONE_RAW_BUFFER);
602 vol_no = HAMMER_VOL_DECODE(zone2_offset);
603 volume = hammer_get_volume(hmp, vol_no, errorp);
607 KKASSERT(zone2_offset < volume->maxbuf_off);
610 * Allocate a new buffer structure. We will check for races later.
612 ++hammer_count_buffers;
613 buffer = kmalloc(sizeof(*buffer), M_HAMMER,
614 M_WAITOK|M_ZERO|M_USE_RESERVE);
615 buffer->zone2_offset = zone2_offset;
616 buffer->zoneX_offset = buf_offset;
617 buffer->volume = volume;
619 hammer_io_init(&buffer->io, hmp, iotype);
620 buffer->io.offset = volume->ondisk->vol_buf_beg +
621 (zone2_offset & HAMMER_OFF_SHORT_MASK);
622 buffer->io.bytes = bytes;
623 TAILQ_INIT(&buffer->clist);
624 hammer_ref(&buffer->io.lock);
627 * Insert the buffer into the RB tree and handle late collisions.
629 if (RB_INSERT(hammer_buf_rb_tree, &hmp->rb_bufs_root, buffer)) {
630 hammer_unref(&buffer->io.lock);
631 --hammer_count_buffers;
632 kfree(buffer, M_HAMMER);
635 ++hammer_count_refedbufs;
639 * Deal with on-disk info and loading races.
641 if (buffer->ondisk == NULL || buffer->io.loading) {
642 *errorp = hammer_load_buffer(buffer, isnew);
644 hammer_rel_buffer(buffer, 1);
654 * This is used by the direct-read code to deal with large-data buffers
655 * created by the reblocker and mirror-write code. The direct-read code
656 * bypasses the HAMMER buffer subsystem and so any aliased dirty hammer
657 * buffers must be fully synced to disk before we can issue the direct-read.
659 * This code path is not considered critical as only the rebocker and
660 * mirror-write code will create large-data buffers via the HAMMER buffer
661 * subsystem. They do that because they operate at the B-Tree level and
662 * do not access the vnode/inode structures.
665 hammer_sync_buffers(hammer_mount_t hmp, hammer_off_t base_offset, int bytes)
667 hammer_buffer_t buffer;
670 KKASSERT((base_offset & HAMMER_OFF_ZONE_MASK) ==
671 HAMMER_ZONE_LARGE_DATA);
674 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root,
676 if (buffer && buffer->io.modified) {
677 error = hammer_ref_buffer(buffer);
678 if (error == 0 && buffer->io.modified) {
679 hammer_io_write_interlock(&buffer->io);
680 hammer_io_flush(&buffer->io);
681 hammer_io_done_interlock(&buffer->io);
682 hammer_io_wait(&buffer->io);
683 hammer_rel_buffer(buffer, 0);
686 base_offset += HAMMER_BUFSIZE;
687 bytes -= HAMMER_BUFSIZE;
692 * Destroy all buffers covering the specified zoneX offset range. This
693 * is called when the related blockmap layer2 entry is freed or when
694 * a direct write bypasses our buffer/buffer-cache subsystem.
696 * The buffers may be referenced by the caller itself. Setting reclaim
697 * will cause the buffer to be destroyed when it's ref count reaches zero.
700 hammer_del_buffers(hammer_mount_t hmp, hammer_off_t base_offset,
701 hammer_off_t zone2_offset, int bytes)
703 hammer_buffer_t buffer;
704 hammer_volume_t volume;
708 vol_no = HAMMER_VOL_DECODE(zone2_offset);
709 volume = hammer_get_volume(hmp, vol_no, &error);
710 KKASSERT(error == 0);
713 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root,
716 error = hammer_ref_buffer(buffer);
718 KKASSERT(buffer->zone2_offset == zone2_offset);
719 hammer_io_clear_modify(&buffer->io, 1);
720 buffer->io.reclaim = 1;
721 KKASSERT(buffer->volume == volume);
722 hammer_rel_buffer(buffer, 0);
725 hammer_io_inval(volume, zone2_offset);
727 base_offset += HAMMER_BUFSIZE;
728 zone2_offset += HAMMER_BUFSIZE;
729 bytes -= HAMMER_BUFSIZE;
731 hammer_rel_volume(volume, 0);
735 hammer_load_buffer(hammer_buffer_t buffer, int isnew)
737 hammer_volume_t volume;
741 * Load the buffer's on-disk info
743 volume = buffer->volume;
744 ++buffer->io.loading;
745 hammer_lock_ex(&buffer->io.lock);
747 if (hammer_debug_io & 0x0001) {
748 kprintf("load_buffer %016llx %016llx isnew=%d od=%p\n",
749 buffer->zoneX_offset, buffer->zone2_offset, isnew,
753 if (buffer->ondisk == NULL) {
755 error = hammer_io_new(volume->devvp, &buffer->io);
757 error = hammer_io_read(volume->devvp, &buffer->io,
761 buffer->ondisk = (void *)buffer->io.bp->b_data;
763 error = hammer_io_new(volume->devvp, &buffer->io);
767 --buffer->io.loading;
768 hammer_unlock(&buffer->io.lock);
773 * NOTE: Called from RB_SCAN, must return >= 0 for scan to continue.
774 * This routine is only called during unmount.
777 hammer_unload_buffer(hammer_buffer_t buffer, void *data __unused)
780 * Clean up the persistent ref ioerror might have on the buffer
781 * and acquire a ref (steal ioerror's if we can).
783 if (buffer->io.ioerror) {
784 buffer->io.ioerror = 0;
786 if (buffer->io.lock.refs == 0)
787 ++hammer_count_refedbufs;
788 hammer_ref(&buffer->io.lock);
792 * We must not flush a dirty buffer to disk on umount. It should
793 * have already been dealt with by the flusher, or we may be in
794 * catastrophic failure.
796 hammer_io_clear_modify(&buffer->io, 1);
797 hammer_flush_buffer_nodes(buffer);
798 KKASSERT(buffer->io.lock.refs == 1);
799 hammer_rel_buffer(buffer, 2);
804 * Reference a buffer that is either already referenced or via a specially
805 * handled pointer (aka cursor->buffer).
808 hammer_ref_buffer(hammer_buffer_t buffer)
812 if (buffer->io.lock.refs == 0)
813 ++hammer_count_refedbufs;
814 hammer_ref(&buffer->io.lock);
817 * At this point a biodone() will not touch the buffer other then
818 * incidental bits. However, lose_list can be modified via
819 * a biodone() interrupt.
823 if (buffer->io.mod_list == &buffer->io.hmp->lose_list) {
825 TAILQ_REMOVE(buffer->io.mod_list, &buffer->io, mod_entry);
826 buffer->io.mod_list = NULL;
830 if (buffer->ondisk == NULL || buffer->io.loading) {
831 error = hammer_load_buffer(buffer, 0);
833 hammer_rel_buffer(buffer, 1);
835 * NOTE: buffer pointer can become stale after
846 * Release a buffer. We have to deal with several places where
847 * another thread can ref the buffer.
849 * Only destroy the structure itself if the related buffer cache buffer
850 * was disassociated from it. This ties the management of the structure
851 * to the buffer cache subsystem. buffer->ondisk determines whether the
852 * embedded io is referenced or not.
855 hammer_rel_buffer(hammer_buffer_t buffer, int flush)
857 hammer_volume_t volume;
858 struct buf *bp = NULL;
862 if (buffer->io.lock.refs == 1) {
863 ++buffer->io.loading; /* force interlock check */
864 hammer_lock_ex(&buffer->io.lock);
865 if (buffer->io.lock.refs == 1) {
866 bp = hammer_io_release(&buffer->io, flush);
868 if (buffer->io.lock.refs == 1)
869 --hammer_count_refedbufs;
871 if (buffer->io.bp == NULL &&
872 buffer->io.lock.refs == 1) {
876 * NOTE: It is impossible for any associated
877 * B-Tree nodes to have refs if the buffer
878 * has no additional refs.
880 RB_REMOVE(hammer_buf_rb_tree,
881 &buffer->io.hmp->rb_bufs_root,
883 volume = buffer->volume;
884 buffer->volume = NULL; /* sanity */
885 hammer_rel_volume(volume, 0);
886 hammer_io_clear_modlist(&buffer->io);
887 hammer_flush_buffer_nodes(buffer);
888 KKASSERT(TAILQ_EMPTY(&buffer->clist));
892 --buffer->io.loading;
893 hammer_unlock(&buffer->io.lock);
895 hammer_unref(&buffer->io.lock);
900 --hammer_count_buffers;
901 kfree(buffer, M_HAMMER);
906 * Access the filesystem buffer containing the specified hammer offset.
907 * buf_offset is a conglomeration of the volume number and vol_buf_beg
908 * relative buffer offset. It must also have bit 55 set to be valid.
909 * (see hammer_off_t in hammer_disk.h).
911 * Any prior buffer in *bufferp will be released and replaced by the
916 _hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
917 int *errorp, struct hammer_buffer **bufferp)
919 hammer_buffer_t buffer;
920 int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK;
922 buf_offset &= ~HAMMER_BUFMASK64;
923 KKASSERT((buf_offset & HAMMER_OFF_ZONE_MASK) != 0);
926 if (buffer == NULL || (buffer->zone2_offset != buf_offset &&
927 buffer->zoneX_offset != buf_offset)) {
929 hammer_rel_buffer(buffer, 0);
930 buffer = hammer_get_buffer(hmp, buf_offset, bytes, 0, errorp);
937 * Return a pointer to the buffer data.
942 return((char *)buffer->ondisk + xoff);
946 hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset,
947 int *errorp, struct hammer_buffer **bufferp)
949 return(_hammer_bread(hmp, buf_offset, HAMMER_BUFSIZE, errorp, bufferp));
953 hammer_bread_ext(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
954 int *errorp, struct hammer_buffer **bufferp)
956 bytes = (bytes + HAMMER_BUFMASK) & ~HAMMER_BUFMASK;
957 return(_hammer_bread(hmp, buf_offset, bytes, errorp, bufferp));
961 * Access the filesystem buffer containing the specified hammer offset.
962 * No disk read operation occurs. The result buffer may contain garbage.
964 * Any prior buffer in *bufferp will be released and replaced by the
967 * This function marks the buffer dirty but does not increment its
972 _hammer_bnew(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
973 int *errorp, struct hammer_buffer **bufferp)
975 hammer_buffer_t buffer;
976 int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK;
978 buf_offset &= ~HAMMER_BUFMASK64;
981 if (buffer == NULL || (buffer->zone2_offset != buf_offset &&
982 buffer->zoneX_offset != buf_offset)) {
984 hammer_rel_buffer(buffer, 0);
985 buffer = hammer_get_buffer(hmp, buf_offset, bytes, 1, errorp);
992 * Return a pointer to the buffer data.
997 return((char *)buffer->ondisk + xoff);
1001 hammer_bnew(hammer_mount_t hmp, hammer_off_t buf_offset,
1002 int *errorp, struct hammer_buffer **bufferp)
1004 return(_hammer_bnew(hmp, buf_offset, HAMMER_BUFSIZE, errorp, bufferp));
1008 hammer_bnew_ext(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
1009 int *errorp, struct hammer_buffer **bufferp)
1011 bytes = (bytes + HAMMER_BUFMASK) & ~HAMMER_BUFMASK;
1012 return(_hammer_bnew(hmp, buf_offset, bytes, errorp, bufferp));
1015 /************************************************************************
1017 ************************************************************************
1019 * Manage B-Tree nodes. B-Tree nodes represent the primary indexing
1020 * method used by the HAMMER filesystem.
1022 * Unlike other HAMMER structures, a hammer_node can be PASSIVELY
1023 * associated with its buffer, and will only referenced the buffer while
1024 * the node itself is referenced.
1026 * A hammer_node can also be passively associated with other HAMMER
1027 * structures, such as inodes, while retaining 0 references. These
1028 * associations can be cleared backwards using a pointer-to-pointer in
1031 * This allows the HAMMER implementation to cache hammer_nodes long-term
1032 * and short-cut a great deal of the infrastructure's complexity. In
1033 * most cases a cached node can be reacquired without having to dip into
1034 * either the buffer or cluster management code.
1036 * The caller must pass a referenced cluster on call and will retain
1037 * ownership of the reference on return. The node will acquire its own
1038 * additional references, if necessary.
1041 hammer_get_node(hammer_mount_t hmp, hammer_off_t node_offset,
1042 int isnew, int *errorp)
1046 KKASSERT((node_offset & HAMMER_OFF_ZONE_MASK) == HAMMER_ZONE_BTREE);
1049 * Locate the structure, allocating one if necessary.
1052 node = RB_LOOKUP(hammer_nod_rb_tree, &hmp->rb_nods_root, node_offset);
1054 ++hammer_count_nodes;
1055 node = kmalloc(sizeof(*node), M_HAMMER, M_WAITOK|M_ZERO|M_USE_RESERVE);
1056 node->node_offset = node_offset;
1058 TAILQ_INIT(&node->cursor_list);
1059 TAILQ_INIT(&node->cache_list);
1060 if (RB_INSERT(hammer_nod_rb_tree, &hmp->rb_nods_root, node)) {
1061 --hammer_count_nodes;
1062 kfree(node, M_HAMMER);
1066 hammer_ref(&node->lock);
1070 *errorp = hammer_load_node(node, isnew);
1072 hammer_rel_node(node);
1079 * Reference an already-referenced node.
1082 hammer_ref_node(hammer_node_t node)
1084 KKASSERT(node->lock.refs > 0 && node->ondisk != NULL);
1085 hammer_ref(&node->lock);
1089 * Load a node's on-disk data reference.
1092 hammer_load_node(hammer_node_t node, int isnew)
1094 hammer_buffer_t buffer;
1095 hammer_off_t buf_offset;
1100 hammer_lock_ex(&node->lock);
1101 if (node->ondisk == NULL) {
1103 * This is a little confusing but the jist is that
1104 * node->buffer determines whether the node is on
1105 * the buffer's clist and node->ondisk determines
1106 * whether the buffer is referenced.
1108 * We could be racing a buffer release, in which case
1109 * node->buffer may become NULL while we are blocked
1110 * referencing the buffer.
1112 if ((buffer = node->buffer) != NULL) {
1113 error = hammer_ref_buffer(buffer);
1114 if (error == 0 && node->buffer == NULL) {
1115 TAILQ_INSERT_TAIL(&buffer->clist,
1117 node->buffer = buffer;
1120 buf_offset = node->node_offset & ~HAMMER_BUFMASK64;
1121 buffer = hammer_get_buffer(node->hmp, buf_offset,
1122 HAMMER_BUFSIZE, 0, &error);
1124 KKASSERT(error == 0);
1125 TAILQ_INSERT_TAIL(&buffer->clist,
1127 node->buffer = buffer;
1132 node->ondisk = (void *)((char *)buffer->ondisk +
1133 (node->node_offset & HAMMER_BUFMASK));
1135 (node->flags & HAMMER_NODE_CRCGOOD) == 0) {
1136 if (hammer_crc_test_btree(node->ondisk) == 0)
1137 Debugger("CRC FAILED: B-TREE NODE");
1138 node->flags |= HAMMER_NODE_CRCGOOD;
1143 hammer_unlock(&node->lock);
1148 * Safely reference a node, interlock against flushes via the IO subsystem.
1151 hammer_ref_node_safe(struct hammer_mount *hmp, hammer_node_cache_t cache,
1158 hammer_ref(&node->lock);
1162 *errorp = hammer_load_node(node, 0);
1164 hammer_rel_node(node);
1174 * Release a hammer_node. On the last release the node dereferences
1175 * its underlying buffer and may or may not be destroyed.
1178 hammer_rel_node(hammer_node_t node)
1180 hammer_buffer_t buffer;
1183 * If this isn't the last ref just decrement the ref count and
1186 if (node->lock.refs > 1) {
1187 hammer_unref(&node->lock);
1192 * If there is no ondisk info or no buffer the node failed to load,
1193 * remove the last reference and destroy the node.
1195 if (node->ondisk == NULL) {
1196 hammer_unref(&node->lock);
1197 hammer_flush_node(node);
1198 /* node is stale now */
1203 * Do not disassociate the node from the buffer if it represents
1204 * a modified B-Tree node that still needs its crc to be generated.
1206 if (node->flags & HAMMER_NODE_NEEDSCRC)
1210 * Do final cleanups and then either destroy the node and leave it
1211 * passively cached. The buffer reference is removed regardless.
1213 buffer = node->buffer;
1214 node->ondisk = NULL;
1216 if ((node->flags & HAMMER_NODE_FLUSH) == 0) {
1217 hammer_unref(&node->lock);
1218 hammer_rel_buffer(buffer, 0);
1225 hammer_unref(&node->lock);
1226 hammer_flush_node(node);
1228 hammer_rel_buffer(buffer, 0);
1232 * Free space on-media associated with a B-Tree node.
1235 hammer_delete_node(hammer_transaction_t trans, hammer_node_t node)
1237 KKASSERT((node->flags & HAMMER_NODE_DELETED) == 0);
1238 node->flags |= HAMMER_NODE_DELETED;
1239 hammer_blockmap_free(trans, node->node_offset, sizeof(*node->ondisk));
1243 * Passively cache a referenced hammer_node. The caller may release
1244 * the node on return.
1247 hammer_cache_node(hammer_node_cache_t cache, hammer_node_t node)
1250 * If the node doesn't exist, or is being deleted, don't cache it!
1252 * The node can only ever be NULL in the I/O failure path.
1254 if (node == NULL || (node->flags & HAMMER_NODE_DELETED))
1256 if (cache->node == node)
1259 hammer_uncache_node(cache);
1260 if (node->flags & HAMMER_NODE_DELETED)
1263 TAILQ_INSERT_TAIL(&node->cache_list, cache, entry);
1267 hammer_uncache_node(hammer_node_cache_t cache)
1271 if ((node = cache->node) != NULL) {
1272 TAILQ_REMOVE(&node->cache_list, cache, entry);
1274 if (TAILQ_EMPTY(&node->cache_list))
1275 hammer_flush_node(node);
1280 * Remove a node's cache references and destroy the node if it has no
1281 * other references or backing store.
1284 hammer_flush_node(hammer_node_t node)
1286 hammer_node_cache_t cache;
1287 hammer_buffer_t buffer;
1289 while ((cache = TAILQ_FIRST(&node->cache_list)) != NULL) {
1290 TAILQ_REMOVE(&node->cache_list, cache, entry);
1293 if (node->lock.refs == 0 && node->ondisk == NULL) {
1294 KKASSERT((node->flags & HAMMER_NODE_NEEDSCRC) == 0);
1295 RB_REMOVE(hammer_nod_rb_tree, &node->hmp->rb_nods_root, node);
1296 if ((buffer = node->buffer) != NULL) {
1297 node->buffer = NULL;
1298 TAILQ_REMOVE(&buffer->clist, node, entry);
1299 /* buffer is unreferenced because ondisk is NULL */
1301 --hammer_count_nodes;
1302 kfree(node, M_HAMMER);
1307 * Flush passively cached B-Tree nodes associated with this buffer.
1308 * This is only called when the buffer is about to be destroyed, so
1309 * none of the nodes should have any references. The buffer is locked.
1311 * We may be interlocked with the buffer.
1314 hammer_flush_buffer_nodes(hammer_buffer_t buffer)
1318 while ((node = TAILQ_FIRST(&buffer->clist)) != NULL) {
1319 KKASSERT(node->ondisk == NULL);
1320 KKASSERT((node->flags & HAMMER_NODE_NEEDSCRC) == 0);
1322 if (node->lock.refs == 0) {
1323 hammer_ref(&node->lock);
1324 node->flags |= HAMMER_NODE_FLUSH;
1325 hammer_rel_node(node);
1327 KKASSERT(node->loading != 0);
1328 KKASSERT(node->buffer != NULL);
1329 buffer = node->buffer;
1330 node->buffer = NULL;
1331 TAILQ_REMOVE(&buffer->clist, node, entry);
1332 /* buffer is unreferenced because ondisk is NULL */
1338 /************************************************************************
1340 ************************************************************************/
1343 * Allocate a B-Tree node.
1346 hammer_alloc_btree(hammer_transaction_t trans, int *errorp)
1348 hammer_buffer_t buffer = NULL;
1349 hammer_node_t node = NULL;
1350 hammer_off_t node_offset;
1352 node_offset = hammer_blockmap_alloc(trans, HAMMER_ZONE_BTREE_INDEX,
1353 sizeof(struct hammer_node_ondisk),
1356 node = hammer_get_node(trans->hmp, node_offset, 1, errorp);
1357 hammer_modify_node_noundo(trans, node);
1358 bzero(node->ondisk, sizeof(*node->ondisk));
1359 hammer_modify_node_done(node);
1362 hammer_rel_buffer(buffer, 0);
1367 * Allocate data. If the address of a data buffer is supplied then
1368 * any prior non-NULL *data_bufferp will be released and *data_bufferp
1369 * will be set to the related buffer. The caller must release it when
1370 * finally done. The initial *data_bufferp should be set to NULL by
1373 * The caller is responsible for making hammer_modify*() calls on the
1377 hammer_alloc_data(hammer_transaction_t trans, int32_t data_len,
1378 u_int16_t rec_type, hammer_off_t *data_offsetp,
1379 struct hammer_buffer **data_bufferp, int *errorp)
1389 case HAMMER_RECTYPE_INODE:
1390 case HAMMER_RECTYPE_DIRENTRY:
1391 case HAMMER_RECTYPE_EXT:
1392 case HAMMER_RECTYPE_FIX:
1393 case HAMMER_RECTYPE_PFS:
1394 zone = HAMMER_ZONE_META_INDEX;
1396 case HAMMER_RECTYPE_DATA:
1397 case HAMMER_RECTYPE_DB:
1398 if (data_len <= HAMMER_BUFSIZE / 2) {
1399 zone = HAMMER_ZONE_SMALL_DATA_INDEX;
1401 data_len = (data_len + HAMMER_BUFMASK) &
1403 zone = HAMMER_ZONE_LARGE_DATA_INDEX;
1407 panic("hammer_alloc_data: rec_type %04x unknown",
1409 zone = 0; /* NOT REACHED */
1412 *data_offsetp = hammer_blockmap_alloc(trans, zone,
1417 if (*errorp == 0 && data_bufferp) {
1419 data = hammer_bread_ext(trans->hmp, *data_offsetp,
1420 data_len, errorp, data_bufferp);
1431 * Sync dirty buffers to the media and clean-up any loose ends.
1433 * These functions do not start the flusher going, they simply
1434 * queue everything up to the flusher.
1436 static int hammer_sync_scan1(struct mount *mp, struct vnode *vp, void *data);
1437 static int hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data);
1440 hammer_queue_inodes_flusher(hammer_mount_t hmp, int waitfor)
1442 struct hammer_sync_info info;
1445 info.waitfor = waitfor;
1446 if (waitfor == MNT_WAIT) {
1447 vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_ONEPASS,
1448 hammer_sync_scan1, hammer_sync_scan2, &info);
1450 vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_ONEPASS|VMSC_NOWAIT,
1451 hammer_sync_scan1, hammer_sync_scan2, &info);
1457 * Filesystem sync. If doing a synchronous sync make a second pass on
1458 * the vnodes in case any were already flushing during the first pass,
1459 * and activate the flusher twice (the second time brings the UNDO FIFO's
1460 * start position up to the end position after the first call).
1463 hammer_sync_hmp(hammer_mount_t hmp, int waitfor)
1465 struct hammer_sync_info info;
1468 info.waitfor = MNT_NOWAIT;
1469 vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_NOWAIT,
1470 hammer_sync_scan1, hammer_sync_scan2, &info);
1471 if (info.error == 0 && waitfor == MNT_WAIT) {
1472 info.waitfor = waitfor;
1473 vmntvnodescan(hmp->mp, VMSC_GETVP,
1474 hammer_sync_scan1, hammer_sync_scan2, &info);
1476 if (waitfor == MNT_WAIT) {
1477 hammer_flusher_sync(hmp);
1478 hammer_flusher_sync(hmp);
1480 hammer_flusher_async(hmp, NULL);
1481 hammer_flusher_async(hmp, NULL);
1487 hammer_sync_scan1(struct mount *mp, struct vnode *vp, void *data)
1489 struct hammer_inode *ip;
1492 if (vp->v_type == VNON || ip == NULL ||
1493 ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1494 RB_EMPTY(&vp->v_rbdirty_tree))) {
1501 hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data)
1503 struct hammer_sync_info *info = data;
1504 struct hammer_inode *ip;
1508 if (vp->v_type == VNON || vp->v_type == VBAD ||
1509 ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1510 RB_EMPTY(&vp->v_rbdirty_tree))) {
1513 error = VOP_FSYNC(vp, MNT_NOWAIT);
1515 info->error = error;