2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_ondisk.c,v 1.61 2008/06/20 21:24:53 dillon Exp $
37 * Manage HAMMER's on-disk structures. These routines are primarily
38 * responsible for interfacing with the kernel's I/O subsystem and for
39 * managing in-memory structures.
43 #include <sys/fcntl.h>
44 #include <sys/nlookup.h>
48 static void hammer_free_volume(hammer_volume_t volume);
49 static int hammer_load_volume(hammer_volume_t volume);
50 static int hammer_load_buffer(hammer_buffer_t buffer, int isnew);
51 static int hammer_load_node(hammer_node_t node, int isnew);
54 * Red-Black tree support for various structures
57 hammer_ino_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2)
59 if (ip1->obj_localization < ip2->obj_localization)
61 if (ip1->obj_localization > ip2->obj_localization)
63 if (ip1->obj_id < ip2->obj_id)
65 if (ip1->obj_id > ip2->obj_id)
67 if (ip1->obj_asof < ip2->obj_asof)
69 if (ip1->obj_asof > ip2->obj_asof)
75 hammer_inode_info_cmp(hammer_inode_info_t info, hammer_inode_t ip)
77 if (info->obj_localization < ip->obj_localization)
79 if (info->obj_localization > ip->obj_localization)
81 if (info->obj_id < ip->obj_id)
83 if (info->obj_id > ip->obj_id)
85 if (info->obj_asof < ip->obj_asof)
87 if (info->obj_asof > ip->obj_asof)
93 hammer_vol_rb_compare(hammer_volume_t vol1, hammer_volume_t vol2)
95 if (vol1->vol_no < vol2->vol_no)
97 if (vol1->vol_no > vol2->vol_no)
103 hammer_buf_rb_compare(hammer_buffer_t buf1, hammer_buffer_t buf2)
105 if (buf1->zoneX_offset < buf2->zoneX_offset)
107 if (buf1->zoneX_offset > buf2->zoneX_offset)
113 hammer_nod_rb_compare(hammer_node_t node1, hammer_node_t node2)
115 if (node1->node_offset < node2->node_offset)
117 if (node1->node_offset > node2->node_offset)
123 * Note: The lookup function for hammer_ino_rb_tree winds up being named
124 * hammer_ino_rb_tree_RB_LOOKUP_INFO(root, info). The other lookup
125 * functions are normal, e.g. hammer_buf_rb_tree_RB_LOOKUP(root, zone2_offset).
127 RB_GENERATE(hammer_ino_rb_tree, hammer_inode, rb_node, hammer_ino_rb_compare);
128 RB_GENERATE_XLOOKUP(hammer_ino_rb_tree, INFO, hammer_inode, rb_node,
129 hammer_inode_info_cmp, hammer_inode_info_t);
130 RB_GENERATE2(hammer_vol_rb_tree, hammer_volume, rb_node,
131 hammer_vol_rb_compare, int32_t, vol_no);
132 RB_GENERATE2(hammer_buf_rb_tree, hammer_buffer, rb_node,
133 hammer_buf_rb_compare, hammer_off_t, zoneX_offset);
134 RB_GENERATE2(hammer_nod_rb_tree, hammer_node, rb_node,
135 hammer_nod_rb_compare, hammer_off_t, node_offset);
137 /************************************************************************
139 ************************************************************************
141 * Load a HAMMER volume by name. Returns 0 on success or a positive error
142 * code on failure. Volumes must be loaded at mount time, get_volume() will
143 * not load a new volume.
145 * Calls made to hammer_load_volume() or single-threaded
148 hammer_install_volume(struct hammer_mount *hmp, const char *volname)
151 hammer_volume_t volume;
152 struct hammer_volume_ondisk *ondisk;
153 struct nlookupdata nd;
154 struct buf *bp = NULL;
160 ronly = ((mp->mnt_flag & MNT_RDONLY) ? 1 : 0);
163 * Allocate a volume structure
165 ++hammer_count_volumes;
166 volume = kmalloc(sizeof(*volume), M_HAMMER, M_WAITOK|M_ZERO);
167 volume->vol_name = kstrdup(volname, M_HAMMER);
168 hammer_io_init(&volume->io, hmp, HAMMER_STRUCTURE_VOLUME);
169 volume->io.offset = 0LL;
170 volume->io.bytes = HAMMER_BUFSIZE;
173 * Get the device vnode
175 error = nlookup_init(&nd, volume->vol_name, UIO_SYSSPACE, NLC_FOLLOW);
177 error = nlookup(&nd);
179 error = cache_vref(&nd.nl_nch, nd.nl_cred, &volume->devvp);
182 if (vn_isdisk(volume->devvp, &error)) {
183 error = vfs_mountedon(volume->devvp);
187 count_udev(volume->devvp->v_umajor, volume->devvp->v_uminor) > 0) {
191 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
192 error = vinvalbuf(volume->devvp, V_SAVE, 0, 0);
194 error = VOP_OPEN(volume->devvp,
195 (ronly ? FREAD : FREAD|FWRITE),
198 vn_unlock(volume->devvp);
201 hammer_free_volume(volume);
204 volume->devvp->v_rdev->si_mountpoint = mp;
208 * Extract the volume number from the volume header and do various
211 error = bread(volume->devvp, 0LL, HAMMER_BUFSIZE, &bp);
214 ondisk = (void *)bp->b_data;
215 if (ondisk->vol_signature != HAMMER_FSBUF_VOLUME) {
216 kprintf("hammer_mount: volume %s has an invalid header\n",
221 volume->vol_no = ondisk->vol_no;
222 volume->buffer_base = ondisk->vol_buf_beg;
223 volume->vol_flags = ondisk->vol_flags;
224 volume->nblocks = ondisk->vol_nblocks;
225 volume->maxbuf_off = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no,
226 ondisk->vol_buf_end - ondisk->vol_buf_beg);
227 volume->maxraw_off = ondisk->vol_buf_end;
229 if (RB_EMPTY(&hmp->rb_vols_root)) {
230 hmp->fsid = ondisk->vol_fsid;
231 } else if (bcmp(&hmp->fsid, &ondisk->vol_fsid, sizeof(uuid_t))) {
232 kprintf("hammer_mount: volume %s's fsid does not match "
233 "other volumes\n", volume->vol_name);
239 * Insert the volume structure into the red-black tree.
241 if (RB_INSERT(hammer_vol_rb_tree, &hmp->rb_vols_root, volume)) {
242 kprintf("hammer_mount: volume %s has a duplicate vol_no %d\n",
243 volume->vol_name, volume->vol_no);
248 * Set the root volume . HAMMER special cases rootvol the structure.
249 * We do not hold a ref because this would prevent related I/O
250 * from being flushed.
252 if (error == 0 && ondisk->vol_rootvol == ondisk->vol_no) {
253 hmp->rootvol = volume;
258 hmp->fsid_udev = dev2udev(vn_todev(volume->devvp));
259 hmp->mp->mnt_stat.f_blocks += ondisk->vol0_stat_bigblocks *
260 (HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE);
261 hmp->mp->mnt_vstat.f_blocks += ondisk->vol0_stat_bigblocks *
262 (HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE);
268 /*vinvalbuf(volume->devvp, V_SAVE, 0, 0);*/
270 volume->devvp->v_rdev->si_mountpoint = NULL;
271 VOP_CLOSE(volume->devvp, ronly ? FREAD : FREAD|FWRITE);
272 hammer_free_volume(volume);
278 * This is called for each volume when updating the mount point from
279 * read-write to read-only or vise-versa.
282 hammer_adjust_volume_mode(hammer_volume_t volume, void *data __unused)
285 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
286 if (volume->io.hmp->ronly) {
287 /* do not call vinvalbuf */
288 VOP_OPEN(volume->devvp, FREAD, FSCRED, NULL);
289 VOP_CLOSE(volume->devvp, FREAD|FWRITE);
291 /* do not call vinvalbuf */
292 VOP_OPEN(volume->devvp, FREAD|FWRITE, FSCRED, NULL);
293 VOP_CLOSE(volume->devvp, FREAD);
295 vn_unlock(volume->devvp);
301 * Unload and free a HAMMER volume. Must return >= 0 to continue scan
302 * so returns -1 on failure.
305 hammer_unload_volume(hammer_volume_t volume, void *data __unused)
307 struct hammer_mount *hmp = volume->io.hmp;
308 int ronly = ((hmp->mp->mnt_flag & MNT_RDONLY) ? 1 : 0);
311 * Clean up the root volume pointer, which is held unlocked in hmp.
313 if (hmp->rootvol == volume)
317 * Release our buffer and flush anything left in the buffer cache.
319 volume->io.waitdep = 1;
320 hammer_io_release(&volume->io, 1);
321 hammer_io_clear_modlist(&volume->io);
324 * There should be no references on the volume, no clusters, and
327 KKASSERT(volume->io.lock.refs == 0);
329 volume->ondisk = NULL;
331 if (volume->devvp->v_rdev &&
332 volume->devvp->v_rdev->si_mountpoint == hmp->mp
334 volume->devvp->v_rdev->si_mountpoint = NULL;
337 vinvalbuf(volume->devvp, 0, 0, 0);
338 VOP_CLOSE(volume->devvp, FREAD);
340 vinvalbuf(volume->devvp, V_SAVE, 0, 0);
341 VOP_CLOSE(volume->devvp, FREAD|FWRITE);
346 * Destroy the structure
348 RB_REMOVE(hammer_vol_rb_tree, &hmp->rb_vols_root, volume);
349 hammer_free_volume(volume);
355 hammer_free_volume(hammer_volume_t volume)
357 if (volume->vol_name) {
358 kfree(volume->vol_name, M_HAMMER);
359 volume->vol_name = NULL;
362 vrele(volume->devvp);
363 volume->devvp = NULL;
365 --hammer_count_volumes;
366 kfree(volume, M_HAMMER);
370 * Get a HAMMER volume. The volume must already exist.
373 hammer_get_volume(struct hammer_mount *hmp, int32_t vol_no, int *errorp)
375 struct hammer_volume *volume;
378 * Locate the volume structure
380 volume = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, vol_no);
381 if (volume == NULL) {
385 hammer_ref(&volume->io.lock);
388 * Deal with on-disk info
390 if (volume->ondisk == NULL || volume->io.loading) {
391 *errorp = hammer_load_volume(volume);
393 hammer_rel_volume(volume, 1);
403 hammer_ref_volume(hammer_volume_t volume)
407 hammer_ref(&volume->io.lock);
410 * Deal with on-disk info
412 if (volume->ondisk == NULL || volume->io.loading) {
413 error = hammer_load_volume(volume);
415 hammer_rel_volume(volume, 1);
423 hammer_get_root_volume(struct hammer_mount *hmp, int *errorp)
425 hammer_volume_t volume;
427 volume = hmp->rootvol;
428 KKASSERT(volume != NULL);
429 hammer_ref(&volume->io.lock);
432 * Deal with on-disk info
434 if (volume->ondisk == NULL || volume->io.loading) {
435 *errorp = hammer_load_volume(volume);
437 hammer_rel_volume(volume, 1);
447 * Load a volume's on-disk information. The volume must be referenced and
448 * not locked. We temporarily acquire an exclusive lock to interlock
449 * against releases or multiple get's.
452 hammer_load_volume(hammer_volume_t volume)
456 ++volume->io.loading;
457 hammer_lock_ex(&volume->io.lock);
459 if (volume->ondisk == NULL) {
460 error = hammer_io_read(volume->devvp, &volume->io,
463 volume->ondisk = (void *)volume->io.bp->b_data;
467 --volume->io.loading;
468 hammer_unlock(&volume->io.lock);
473 * Release a volume. Call hammer_io_release on the last reference. We have
474 * to acquire an exclusive lock to interlock against volume->ondisk tests
475 * in hammer_load_volume(), and hammer_io_release() also expects an exclusive
478 * Volumes are not unloaded from memory during normal operation.
481 hammer_rel_volume(hammer_volume_t volume, int flush)
484 if (volume->io.lock.refs == 1) {
485 ++volume->io.loading;
486 hammer_lock_ex(&volume->io.lock);
487 if (volume->io.lock.refs == 1) {
488 volume->ondisk = NULL;
489 hammer_io_release(&volume->io, flush);
491 --volume->io.loading;
492 hammer_unlock(&volume->io.lock);
494 hammer_unref(&volume->io.lock);
498 /************************************************************************
500 ************************************************************************
502 * Manage buffers. Currently all blockmap-backed zones are translated
503 * to zone-2 buffer offsets.
506 hammer_get_buffer(hammer_mount_t hmp, hammer_off_t buf_offset,
507 int bytes, int isnew, int *errorp)
509 hammer_buffer_t buffer;
510 hammer_volume_t volume;
511 hammer_off_t zone2_offset;
512 hammer_io_type_t iotype;
516 buf_offset &= ~HAMMER_BUFMASK64;
519 * Shortcut if the buffer is already cached
521 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root, buf_offset);
523 if (buffer->io.lock.refs == 0)
524 ++hammer_count_refedbufs;
525 hammer_ref(&buffer->io.lock);
528 * Onced refed the ondisk field will not be cleared by
531 if (buffer->ondisk && buffer->io.loading == 0) {
537 * The buffer is no longer loose if it has a ref, and
538 * cannot become loose once it gains a ref. Loose
539 * buffers will never be in a modified state. This should
540 * only occur on the 0->1 transition of refs.
542 * lose_list can be modified via a biodone() interrupt.
544 if (buffer->io.mod_list == &hmp->lose_list) {
545 crit_enter(); /* biodone race against list */
546 TAILQ_REMOVE(buffer->io.mod_list, &buffer->io,
549 buffer->io.mod_list = NULL;
550 KKASSERT(buffer->io.modified == 0);
556 * What is the buffer class?
558 zone = HAMMER_ZONE_DECODE(buf_offset);
561 case HAMMER_ZONE_LARGE_DATA_INDEX:
562 case HAMMER_ZONE_SMALL_DATA_INDEX:
563 iotype = HAMMER_STRUCTURE_DATA_BUFFER;
565 case HAMMER_ZONE_UNDO_INDEX:
566 iotype = HAMMER_STRUCTURE_UNDO_BUFFER;
568 case HAMMER_ZONE_META_INDEX:
571 * NOTE: inode data and directory entries are placed in this
572 * zone. inode atime/mtime is updated in-place and thus
573 * buffers containing inodes must be synchronized as
574 * meta-buffers, same as buffers containing B-Tree info.
576 iotype = HAMMER_STRUCTURE_META_BUFFER;
581 * Handle blockmap offset translations
583 if (zone >= HAMMER_ZONE_BTREE_INDEX) {
584 zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, errorp);
585 } else if (zone == HAMMER_ZONE_UNDO_INDEX) {
586 zone2_offset = hammer_undo_lookup(hmp, buf_offset, errorp);
588 KKASSERT(zone == HAMMER_ZONE_RAW_BUFFER_INDEX);
589 zone2_offset = buf_offset;
596 * NOTE: zone2_offset and maxbuf_off are both full zone-2 offset
599 KKASSERT((zone2_offset & HAMMER_OFF_ZONE_MASK) ==
600 HAMMER_ZONE_RAW_BUFFER);
601 vol_no = HAMMER_VOL_DECODE(zone2_offset);
602 volume = hammer_get_volume(hmp, vol_no, errorp);
606 KKASSERT(zone2_offset < volume->maxbuf_off);
609 * Allocate a new buffer structure. We will check for races later.
611 ++hammer_count_buffers;
612 buffer = kmalloc(sizeof(*buffer), M_HAMMER, M_WAITOK|M_ZERO);
613 buffer->zone2_offset = zone2_offset;
614 buffer->zoneX_offset = buf_offset;
615 buffer->volume = volume;
617 hammer_io_init(&buffer->io, hmp, iotype);
618 buffer->io.offset = volume->ondisk->vol_buf_beg +
619 (zone2_offset & HAMMER_OFF_SHORT_MASK);
620 buffer->io.bytes = bytes;
621 TAILQ_INIT(&buffer->clist);
622 hammer_ref(&buffer->io.lock);
625 * Insert the buffer into the RB tree and handle late collisions.
627 if (RB_INSERT(hammer_buf_rb_tree, &hmp->rb_bufs_root, buffer)) {
628 hammer_unref(&buffer->io.lock);
629 --hammer_count_buffers;
630 kfree(buffer, M_HAMMER);
633 ++hammer_count_refedbufs;
637 * Deal with on-disk info and loading races.
639 if (buffer->ondisk == NULL || buffer->io.loading) {
640 *errorp = hammer_load_buffer(buffer, isnew);
642 hammer_rel_buffer(buffer, 1);
652 * Destroy all buffers covering the specified zoneX offset range. This
653 * is called when the related blockmap layer2 entry is freed or when
654 * a direct write bypasses our buffer/buffer-cache subsystem.
656 * The buffers may be referenced by the caller itself. Setting reclaim
657 * will cause the buffer to be destroyed when it's ref count reaches zero.
660 hammer_del_buffers(hammer_mount_t hmp, hammer_off_t base_offset,
661 hammer_off_t zone2_offset, int bytes)
663 hammer_buffer_t buffer;
664 hammer_volume_t volume;
668 vol_no = HAMMER_VOL_DECODE(zone2_offset);
669 volume = hammer_get_volume(hmp, vol_no, &error);
670 KKASSERT(error == 0);
673 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root,
676 KKASSERT(buffer->zone2_offset == zone2_offset);
677 hammer_io_clear_modify(&buffer->io, 1);
678 buffer->io.reclaim = 1;
679 KKASSERT(buffer->volume == volume);
680 if (buffer->io.lock.refs == 0)
681 hammer_unload_buffer(buffer, NULL);
683 hammer_io_inval(volume, zone2_offset);
685 base_offset += HAMMER_BUFSIZE;
686 zone2_offset += HAMMER_BUFSIZE;
687 bytes -= HAMMER_BUFSIZE;
689 hammer_rel_volume(volume, 0);
693 hammer_load_buffer(hammer_buffer_t buffer, int isnew)
695 hammer_volume_t volume;
699 * Load the buffer's on-disk info
701 volume = buffer->volume;
702 ++buffer->io.loading;
703 hammer_lock_ex(&buffer->io.lock);
705 if (hammer_debug_io & 0x0001) {
706 kprintf("load_buffer %016llx %016llx isnew=%d od=%p\n",
707 buffer->zoneX_offset, buffer->zone2_offset, isnew,
711 if (buffer->ondisk == NULL) {
713 error = hammer_io_new(volume->devvp, &buffer->io);
715 error = hammer_io_read(volume->devvp, &buffer->io,
719 buffer->ondisk = (void *)buffer->io.bp->b_data;
721 error = hammer_io_new(volume->devvp, &buffer->io);
725 --buffer->io.loading;
726 hammer_unlock(&buffer->io.lock);
731 * NOTE: Called from RB_SCAN, must return >= 0 for scan to continue.
734 hammer_unload_buffer(hammer_buffer_t buffer, void *data __unused)
736 ++hammer_count_refedbufs;
737 hammer_ref(&buffer->io.lock);
738 hammer_flush_buffer_nodes(buffer);
739 KKASSERT(buffer->io.lock.refs == 1);
740 hammer_rel_buffer(buffer, 2);
745 * Reference a buffer that is either already referenced or via a specially
746 * handled pointer (aka cursor->buffer).
749 hammer_ref_buffer(hammer_buffer_t buffer)
753 if (buffer->io.lock.refs == 0)
754 ++hammer_count_refedbufs;
755 hammer_ref(&buffer->io.lock);
758 * At this point a biodone() will not touch the buffer other then
759 * incidental bits. However, lose_list can be modified via
760 * a biodone() interrupt.
764 if (buffer->io.mod_list == &buffer->io.hmp->lose_list) {
766 TAILQ_REMOVE(buffer->io.mod_list, &buffer->io, mod_entry);
767 buffer->io.mod_list = NULL;
771 if (buffer->ondisk == NULL || buffer->io.loading) {
772 error = hammer_load_buffer(buffer, 0);
774 hammer_rel_buffer(buffer, 1);
776 * NOTE: buffer pointer can become stale after
787 * Release a buffer. We have to deal with several places where
788 * another thread can ref the buffer.
790 * Only destroy the structure itself if the related buffer cache buffer
791 * was disassociated from it. This ties the management of the structure
792 * to the buffer cache subsystem. buffer->ondisk determines whether the
793 * embedded io is referenced or not.
796 hammer_rel_buffer(hammer_buffer_t buffer, int flush)
798 hammer_volume_t volume;
802 if (buffer->io.lock.refs == 1) {
803 ++buffer->io.loading; /* force interlock check */
804 hammer_lock_ex(&buffer->io.lock);
805 if (buffer->io.lock.refs == 1) {
806 hammer_io_release(&buffer->io, flush);
808 if (buffer->io.lock.refs == 1)
809 --hammer_count_refedbufs;
811 if (buffer->io.bp == NULL &&
812 buffer->io.lock.refs == 1) {
816 * NOTE: It is impossible for any associated
817 * B-Tree nodes to have refs if the buffer
818 * has no additional refs.
820 RB_REMOVE(hammer_buf_rb_tree,
821 &buffer->io.hmp->rb_bufs_root,
823 volume = buffer->volume;
824 buffer->volume = NULL; /* sanity */
825 hammer_rel_volume(volume, 0);
826 hammer_io_clear_modlist(&buffer->io);
827 hammer_flush_buffer_nodes(buffer);
828 KKASSERT(TAILQ_EMPTY(&buffer->clist));
832 --buffer->io.loading;
833 hammer_unlock(&buffer->io.lock);
835 hammer_unref(&buffer->io.lock);
838 --hammer_count_buffers;
839 kfree(buffer, M_HAMMER);
844 * Access the filesystem buffer containing the specified hammer offset.
845 * buf_offset is a conglomeration of the volume number and vol_buf_beg
846 * relative buffer offset. It must also have bit 55 set to be valid.
847 * (see hammer_off_t in hammer_disk.h).
849 * Any prior buffer in *bufferp will be released and replaced by the
854 _hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
855 int *errorp, struct hammer_buffer **bufferp)
857 hammer_buffer_t buffer;
858 int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK;
860 buf_offset &= ~HAMMER_BUFMASK64;
861 KKASSERT((buf_offset & HAMMER_OFF_ZONE_MASK) != 0);
864 if (buffer == NULL || (buffer->zone2_offset != buf_offset &&
865 buffer->zoneX_offset != buf_offset)) {
867 hammer_rel_buffer(buffer, 0);
868 buffer = hammer_get_buffer(hmp, buf_offset, bytes, 0, errorp);
875 * Return a pointer to the buffer data.
880 return((char *)buffer->ondisk + xoff);
884 hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset,
885 int *errorp, struct hammer_buffer **bufferp)
887 return(_hammer_bread(hmp, buf_offset, HAMMER_BUFSIZE, errorp, bufferp));
891 hammer_bread_ext(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
892 int *errorp, struct hammer_buffer **bufferp)
894 bytes = (bytes + HAMMER_BUFMASK) & ~HAMMER_BUFMASK;
895 return(_hammer_bread(hmp, buf_offset, bytes, errorp, bufferp));
899 * Access the filesystem buffer containing the specified hammer offset.
900 * No disk read operation occurs. The result buffer may contain garbage.
902 * Any prior buffer in *bufferp will be released and replaced by the
905 * This function marks the buffer dirty but does not increment its
910 _hammer_bnew(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
911 int *errorp, struct hammer_buffer **bufferp)
913 hammer_buffer_t buffer;
914 int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK;
916 buf_offset &= ~HAMMER_BUFMASK64;
919 if (buffer == NULL || (buffer->zone2_offset != buf_offset &&
920 buffer->zoneX_offset != buf_offset)) {
922 hammer_rel_buffer(buffer, 0);
923 buffer = hammer_get_buffer(hmp, buf_offset, bytes, 1, errorp);
930 * Return a pointer to the buffer data.
935 return((char *)buffer->ondisk + xoff);
939 hammer_bnew(hammer_mount_t hmp, hammer_off_t buf_offset,
940 int *errorp, struct hammer_buffer **bufferp)
942 return(_hammer_bnew(hmp, buf_offset, HAMMER_BUFSIZE, errorp, bufferp));
946 hammer_bnew_ext(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
947 int *errorp, struct hammer_buffer **bufferp)
949 bytes = (bytes + HAMMER_BUFMASK) & ~HAMMER_BUFMASK;
950 return(_hammer_bnew(hmp, buf_offset, bytes, errorp, bufferp));
953 /************************************************************************
955 ************************************************************************
957 * Manage B-Tree nodes. B-Tree nodes represent the primary indexing
958 * method used by the HAMMER filesystem.
960 * Unlike other HAMMER structures, a hammer_node can be PASSIVELY
961 * associated with its buffer, and will only referenced the buffer while
962 * the node itself is referenced.
964 * A hammer_node can also be passively associated with other HAMMER
965 * structures, such as inodes, while retaining 0 references. These
966 * associations can be cleared backwards using a pointer-to-pointer in
969 * This allows the HAMMER implementation to cache hammer_nodes long-term
970 * and short-cut a great deal of the infrastructure's complexity. In
971 * most cases a cached node can be reacquired without having to dip into
972 * either the buffer or cluster management code.
974 * The caller must pass a referenced cluster on call and will retain
975 * ownership of the reference on return. The node will acquire its own
976 * additional references, if necessary.
979 hammer_get_node(hammer_mount_t hmp, hammer_off_t node_offset,
980 int isnew, int *errorp)
984 KKASSERT((node_offset & HAMMER_OFF_ZONE_MASK) == HAMMER_ZONE_BTREE);
987 * Locate the structure, allocating one if necessary.
990 node = RB_LOOKUP(hammer_nod_rb_tree, &hmp->rb_nods_root, node_offset);
992 ++hammer_count_nodes;
993 node = kmalloc(sizeof(*node), M_HAMMER, M_WAITOK|M_ZERO);
994 node->node_offset = node_offset;
996 TAILQ_INIT(&node->cache_list);
997 if (RB_INSERT(hammer_nod_rb_tree, &hmp->rb_nods_root, node)) {
998 --hammer_count_nodes;
999 kfree(node, M_HAMMER);
1003 hammer_ref(&node->lock);
1007 *errorp = hammer_load_node(node, isnew);
1009 hammer_rel_node(node);
1016 * Reference an already-referenced node.
1019 hammer_ref_node(hammer_node_t node)
1021 KKASSERT(node->lock.refs > 0 && node->ondisk != NULL);
1022 hammer_ref(&node->lock);
1026 * Load a node's on-disk data reference.
1029 hammer_load_node(hammer_node_t node, int isnew)
1031 hammer_buffer_t buffer;
1032 hammer_off_t buf_offset;
1037 hammer_lock_ex(&node->lock);
1038 if (node->ondisk == NULL) {
1040 * This is a little confusing but the jist is that
1041 * node->buffer determines whether the node is on
1042 * the buffer's clist and node->ondisk determines
1043 * whether the buffer is referenced.
1045 * We could be racing a buffer release, in which case
1046 * node->buffer may become NULL while we are blocked
1047 * referencing the buffer.
1049 if ((buffer = node->buffer) != NULL) {
1050 error = hammer_ref_buffer(buffer);
1051 if (error == 0 && node->buffer == NULL) {
1052 TAILQ_INSERT_TAIL(&buffer->clist,
1054 node->buffer = buffer;
1057 buf_offset = node->node_offset & ~HAMMER_BUFMASK64;
1058 buffer = hammer_get_buffer(node->hmp, buf_offset,
1059 HAMMER_BUFSIZE, 0, &error);
1061 KKASSERT(error == 0);
1062 TAILQ_INSERT_TAIL(&buffer->clist,
1064 node->buffer = buffer;
1069 node->ondisk = (void *)((char *)buffer->ondisk +
1070 (node->node_offset & HAMMER_BUFMASK));
1072 (node->flags & HAMMER_NODE_CRCGOOD) == 0) {
1073 if (hammer_crc_test_btree(node->ondisk) == 0)
1074 Debugger("CRC FAILED: B-TREE NODE");
1075 node->flags |= HAMMER_NODE_CRCGOOD;
1080 hammer_unlock(&node->lock);
1085 * Safely reference a node, interlock against flushes via the IO subsystem.
1088 hammer_ref_node_safe(struct hammer_mount *hmp, hammer_node_cache_t cache,
1095 hammer_ref(&node->lock);
1099 *errorp = hammer_load_node(node, 0);
1101 hammer_rel_node(node);
1111 * Release a hammer_node. On the last release the node dereferences
1112 * its underlying buffer and may or may not be destroyed.
1115 hammer_rel_node(hammer_node_t node)
1117 hammer_buffer_t buffer;
1120 * If this isn't the last ref just decrement the ref count and
1123 if (node->lock.refs > 1) {
1124 hammer_unref(&node->lock);
1129 * If there is no ondisk info or no buffer the node failed to load,
1130 * remove the last reference and destroy the node.
1132 if (node->ondisk == NULL) {
1133 hammer_unref(&node->lock);
1134 hammer_flush_node(node);
1135 /* node is stale now */
1140 * Do not disassociate the node from the buffer if it represents
1141 * a modified B-Tree node that still needs its crc to be generated.
1143 if (node->flags & HAMMER_NODE_NEEDSCRC)
1147 * Do final cleanups and then either destroy the node and leave it
1148 * passively cached. The buffer reference is removed regardless.
1150 buffer = node->buffer;
1151 node->ondisk = NULL;
1153 if ((node->flags & HAMMER_NODE_FLUSH) == 0) {
1154 hammer_unref(&node->lock);
1155 hammer_rel_buffer(buffer, 0);
1162 hammer_unref(&node->lock);
1163 hammer_flush_node(node);
1165 hammer_rel_buffer(buffer, 0);
1169 * Free space on-media associated with a B-Tree node.
1172 hammer_delete_node(hammer_transaction_t trans, hammer_node_t node)
1174 KKASSERT((node->flags & HAMMER_NODE_DELETED) == 0);
1175 node->flags |= HAMMER_NODE_DELETED;
1176 hammer_blockmap_free(trans, node->node_offset, sizeof(*node->ondisk));
1180 * Passively cache a referenced hammer_node. The caller may release
1181 * the node on return.
1184 hammer_cache_node(hammer_node_cache_t cache, hammer_node_t node)
1187 * If the node is being deleted, don't cache it!
1189 if (node->flags & HAMMER_NODE_DELETED)
1191 if (cache->node == node)
1194 hammer_uncache_node(cache);
1195 if (node->flags & HAMMER_NODE_DELETED)
1198 TAILQ_INSERT_TAIL(&node->cache_list, cache, entry);
1202 hammer_uncache_node(hammer_node_cache_t cache)
1206 if ((node = cache->node) != NULL) {
1207 TAILQ_REMOVE(&node->cache_list, cache, entry);
1209 if (TAILQ_EMPTY(&node->cache_list))
1210 hammer_flush_node(node);
1215 * Remove a node's cache references and destroy the node if it has no
1216 * other references or backing store.
1219 hammer_flush_node(hammer_node_t node)
1221 hammer_node_cache_t cache;
1222 hammer_buffer_t buffer;
1224 while ((cache = TAILQ_FIRST(&node->cache_list)) != NULL) {
1225 TAILQ_REMOVE(&node->cache_list, cache, entry);
1228 if (node->lock.refs == 0 && node->ondisk == NULL) {
1229 KKASSERT((node->flags & HAMMER_NODE_NEEDSCRC) == 0);
1230 RB_REMOVE(hammer_nod_rb_tree, &node->hmp->rb_nods_root, node);
1231 if ((buffer = node->buffer) != NULL) {
1232 node->buffer = NULL;
1233 TAILQ_REMOVE(&buffer->clist, node, entry);
1234 /* buffer is unreferenced because ondisk is NULL */
1236 --hammer_count_nodes;
1237 kfree(node, M_HAMMER);
1242 * Flush passively cached B-Tree nodes associated with this buffer.
1243 * This is only called when the buffer is about to be destroyed, so
1244 * none of the nodes should have any references. The buffer is locked.
1246 * We may be interlocked with the buffer.
1249 hammer_flush_buffer_nodes(hammer_buffer_t buffer)
1253 while ((node = TAILQ_FIRST(&buffer->clist)) != NULL) {
1254 KKASSERT(node->ondisk == NULL);
1255 KKASSERT((node->flags & HAMMER_NODE_NEEDSCRC) == 0);
1257 if (node->lock.refs == 0) {
1258 hammer_ref(&node->lock);
1259 node->flags |= HAMMER_NODE_FLUSH;
1260 hammer_rel_node(node);
1262 KKASSERT(node->loading != 0);
1263 KKASSERT(node->buffer != NULL);
1264 buffer = node->buffer;
1265 node->buffer = NULL;
1266 TAILQ_REMOVE(&buffer->clist, node, entry);
1267 /* buffer is unreferenced because ondisk is NULL */
1273 /************************************************************************
1275 ************************************************************************/
1278 * Allocate a B-Tree node.
1281 hammer_alloc_btree(hammer_transaction_t trans, int *errorp)
1283 hammer_buffer_t buffer = NULL;
1284 hammer_node_t node = NULL;
1285 hammer_off_t node_offset;
1287 node_offset = hammer_blockmap_alloc(trans, HAMMER_ZONE_BTREE_INDEX,
1288 sizeof(struct hammer_node_ondisk),
1291 node = hammer_get_node(trans->hmp, node_offset, 1, errorp);
1292 hammer_modify_node_noundo(trans, node);
1293 bzero(node->ondisk, sizeof(*node->ondisk));
1294 hammer_modify_node_done(node);
1297 hammer_rel_buffer(buffer, 0);
1302 * Allocate data. If the address of a data buffer is supplied then
1303 * any prior non-NULL *data_bufferp will be released and *data_bufferp
1304 * will be set to the related buffer. The caller must release it when
1305 * finally done. The initial *data_bufferp should be set to NULL by
1308 * The caller is responsible for making hammer_modify*() calls on the
1312 hammer_alloc_data(hammer_transaction_t trans, int32_t data_len,
1313 u_int16_t rec_type, hammer_off_t *data_offsetp,
1314 struct hammer_buffer **data_bufferp, int *errorp)
1324 case HAMMER_RECTYPE_INODE:
1325 case HAMMER_RECTYPE_DIRENTRY:
1326 case HAMMER_RECTYPE_EXT:
1327 case HAMMER_RECTYPE_FIX:
1328 zone = HAMMER_ZONE_META_INDEX;
1330 case HAMMER_RECTYPE_DATA:
1331 case HAMMER_RECTYPE_DB:
1332 if (data_len <= HAMMER_BUFSIZE / 2) {
1333 zone = HAMMER_ZONE_SMALL_DATA_INDEX;
1335 data_len = (data_len + HAMMER_BUFMASK) &
1337 zone = HAMMER_ZONE_LARGE_DATA_INDEX;
1341 panic("hammer_alloc_data: rec_type %04x unknown",
1343 zone = 0; /* NOT REACHED */
1346 *data_offsetp = hammer_blockmap_alloc(trans, zone,
1351 if (*errorp == 0 && data_bufferp) {
1353 data = hammer_bread_ext(trans->hmp, *data_offsetp,
1354 data_len, errorp, data_bufferp);
1355 KKASSERT(*errorp == 0);
1362 KKASSERT(*errorp == 0);
1367 * Sync dirty buffers to the media and clean-up any loose ends.
1369 static int hammer_sync_scan1(struct mount *mp, struct vnode *vp, void *data);
1370 static int hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data);
1373 hammer_queue_inodes_flusher(hammer_mount_t hmp, int waitfor)
1375 struct hammer_sync_info info;
1378 info.waitfor = waitfor;
1379 if (waitfor == MNT_WAIT) {
1380 vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_ONEPASS,
1381 hammer_sync_scan1, hammer_sync_scan2, &info);
1383 vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_ONEPASS|VMSC_NOWAIT,
1384 hammer_sync_scan1, hammer_sync_scan2, &info);
1390 * Filesystem sync. If doing a synchronous sync make a second pass on
1391 * the vnodes in case any were already flushing during the first pass,
1392 * and activate the flusher twice (the second time brings the UNDO FIFO's
1393 * start position up to the end position after the first call).
1396 hammer_sync_hmp(hammer_mount_t hmp, int waitfor)
1398 struct hammer_sync_info info;
1401 info.waitfor = MNT_NOWAIT;
1402 vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_NOWAIT,
1403 hammer_sync_scan1, hammer_sync_scan2, &info);
1404 if (info.error == 0 && waitfor == MNT_WAIT) {
1405 info.waitfor = waitfor;
1406 vmntvnodescan(hmp->mp, VMSC_GETVP,
1407 hammer_sync_scan1, hammer_sync_scan2, &info);
1409 if (waitfor == MNT_WAIT) {
1410 hammer_flusher_sync(hmp);
1411 hammer_flusher_sync(hmp);
1413 hammer_flusher_async(hmp);
1419 hammer_sync_scan1(struct mount *mp, struct vnode *vp, void *data)
1421 struct hammer_inode *ip;
1424 if (vp->v_type == VNON || ip == NULL ||
1425 ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1426 RB_EMPTY(&vp->v_rbdirty_tree))) {
1433 hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data)
1435 struct hammer_sync_info *info = data;
1436 struct hammer_inode *ip;
1440 if (vp->v_type == VNON || vp->v_type == VBAD ||
1441 ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1442 RB_EMPTY(&vp->v_rbdirty_tree))) {
1445 error = VOP_FSYNC(vp, info->waitfor);
1447 info->error = error;