2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_ondisk.c,v 1.60 2008/06/20 05:38:26 dillon Exp $
37 * Manage HAMMER's on-disk structures. These routines are primarily
38 * responsible for interfacing with the kernel's I/O subsystem and for
39 * managing in-memory structures.
43 #include <sys/fcntl.h>
44 #include <sys/nlookup.h>
48 static void hammer_free_volume(hammer_volume_t volume);
49 static int hammer_load_volume(hammer_volume_t volume);
50 static int hammer_load_buffer(hammer_buffer_t buffer, int isnew);
51 static int hammer_load_node(hammer_node_t node, int isnew);
54 * Red-Black tree support for various structures
57 hammer_ino_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2)
59 if (ip1->obj_id < ip2->obj_id)
61 if (ip1->obj_id > ip2->obj_id)
63 if (ip1->obj_asof < ip2->obj_asof)
65 if (ip1->obj_asof > ip2->obj_asof)
71 hammer_inode_info_cmp(hammer_inode_info_t info, hammer_inode_t ip)
73 if (info->obj_id < ip->obj_id)
75 if (info->obj_id > ip->obj_id)
77 if (info->obj_asof < ip->obj_asof)
79 if (info->obj_asof > ip->obj_asof)
85 hammer_vol_rb_compare(hammer_volume_t vol1, hammer_volume_t vol2)
87 if (vol1->vol_no < vol2->vol_no)
89 if (vol1->vol_no > vol2->vol_no)
95 hammer_buf_rb_compare(hammer_buffer_t buf1, hammer_buffer_t buf2)
97 if (buf1->zoneX_offset < buf2->zoneX_offset)
99 if (buf1->zoneX_offset > buf2->zoneX_offset)
105 hammer_nod_rb_compare(hammer_node_t node1, hammer_node_t node2)
107 if (node1->node_offset < node2->node_offset)
109 if (node1->node_offset > node2->node_offset)
115 * Note: The lookup function for hammer_ino_rb_tree winds up being named
116 * hammer_ino_rb_tree_RB_LOOKUP_INFO(root, info). The other lookup
117 * functions are normal, e.g. hammer_buf_rb_tree_RB_LOOKUP(root, zone2_offset).
119 RB_GENERATE(hammer_ino_rb_tree, hammer_inode, rb_node, hammer_ino_rb_compare);
120 RB_GENERATE_XLOOKUP(hammer_ino_rb_tree, INFO, hammer_inode, rb_node,
121 hammer_inode_info_cmp, hammer_inode_info_t);
122 RB_GENERATE2(hammer_vol_rb_tree, hammer_volume, rb_node,
123 hammer_vol_rb_compare, int32_t, vol_no);
124 RB_GENERATE2(hammer_buf_rb_tree, hammer_buffer, rb_node,
125 hammer_buf_rb_compare, hammer_off_t, zoneX_offset);
126 RB_GENERATE2(hammer_nod_rb_tree, hammer_node, rb_node,
127 hammer_nod_rb_compare, hammer_off_t, node_offset);
129 /************************************************************************
131 ************************************************************************
133 * Load a HAMMER volume by name. Returns 0 on success or a positive error
134 * code on failure. Volumes must be loaded at mount time, get_volume() will
135 * not load a new volume.
137 * Calls made to hammer_load_volume() or single-threaded
140 hammer_install_volume(struct hammer_mount *hmp, const char *volname)
143 hammer_volume_t volume;
144 struct hammer_volume_ondisk *ondisk;
145 struct nlookupdata nd;
146 struct buf *bp = NULL;
152 ronly = ((mp->mnt_flag & MNT_RDONLY) ? 1 : 0);
155 * Allocate a volume structure
157 ++hammer_count_volumes;
158 volume = kmalloc(sizeof(*volume), M_HAMMER, M_WAITOK|M_ZERO);
159 volume->vol_name = kstrdup(volname, M_HAMMER);
160 hammer_io_init(&volume->io, hmp, HAMMER_STRUCTURE_VOLUME);
161 volume->io.offset = 0LL;
162 volume->io.bytes = HAMMER_BUFSIZE;
165 * Get the device vnode
167 error = nlookup_init(&nd, volume->vol_name, UIO_SYSSPACE, NLC_FOLLOW);
169 error = nlookup(&nd);
171 error = cache_vref(&nd.nl_nch, nd.nl_cred, &volume->devvp);
174 if (vn_isdisk(volume->devvp, &error)) {
175 error = vfs_mountedon(volume->devvp);
179 count_udev(volume->devvp->v_umajor, volume->devvp->v_uminor) > 0) {
183 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
184 error = vinvalbuf(volume->devvp, V_SAVE, 0, 0);
186 error = VOP_OPEN(volume->devvp,
187 (ronly ? FREAD : FREAD|FWRITE),
190 vn_unlock(volume->devvp);
193 hammer_free_volume(volume);
196 volume->devvp->v_rdev->si_mountpoint = mp;
200 * Extract the volume number from the volume header and do various
203 error = bread(volume->devvp, 0LL, HAMMER_BUFSIZE, &bp);
206 ondisk = (void *)bp->b_data;
207 if (ondisk->vol_signature != HAMMER_FSBUF_VOLUME) {
208 kprintf("hammer_mount: volume %s has an invalid header\n",
213 volume->vol_no = ondisk->vol_no;
214 volume->buffer_base = ondisk->vol_buf_beg;
215 volume->vol_flags = ondisk->vol_flags;
216 volume->nblocks = ondisk->vol_nblocks;
217 volume->maxbuf_off = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no,
218 ondisk->vol_buf_end - ondisk->vol_buf_beg);
219 volume->maxraw_off = ondisk->vol_buf_end;
221 if (RB_EMPTY(&hmp->rb_vols_root)) {
222 hmp->fsid = ondisk->vol_fsid;
223 } else if (bcmp(&hmp->fsid, &ondisk->vol_fsid, sizeof(uuid_t))) {
224 kprintf("hammer_mount: volume %s's fsid does not match "
225 "other volumes\n", volume->vol_name);
231 * Insert the volume structure into the red-black tree.
233 if (RB_INSERT(hammer_vol_rb_tree, &hmp->rb_vols_root, volume)) {
234 kprintf("hammer_mount: volume %s has a duplicate vol_no %d\n",
235 volume->vol_name, volume->vol_no);
240 * Set the root volume . HAMMER special cases rootvol the structure.
241 * We do not hold a ref because this would prevent related I/O
242 * from being flushed.
244 if (error == 0 && ondisk->vol_rootvol == ondisk->vol_no) {
245 hmp->rootvol = volume;
250 hmp->fsid_udev = dev2udev(vn_todev(volume->devvp));
251 hmp->mp->mnt_stat.f_blocks += ondisk->vol0_stat_bigblocks *
252 (HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE);
253 hmp->mp->mnt_vstat.f_blocks += ondisk->vol0_stat_bigblocks *
254 (HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE);
260 /*vinvalbuf(volume->devvp, V_SAVE, 0, 0);*/
262 volume->devvp->v_rdev->si_mountpoint = NULL;
263 VOP_CLOSE(volume->devvp, ronly ? FREAD : FREAD|FWRITE);
264 hammer_free_volume(volume);
270 * This is called for each volume when updating the mount point from
271 * read-write to read-only or vise-versa.
274 hammer_adjust_volume_mode(hammer_volume_t volume, void *data __unused)
277 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
278 if (volume->io.hmp->ronly) {
279 /* do not call vinvalbuf */
280 VOP_OPEN(volume->devvp, FREAD, FSCRED, NULL);
281 VOP_CLOSE(volume->devvp, FREAD|FWRITE);
283 /* do not call vinvalbuf */
284 VOP_OPEN(volume->devvp, FREAD|FWRITE, FSCRED, NULL);
285 VOP_CLOSE(volume->devvp, FREAD);
287 vn_unlock(volume->devvp);
293 * Unload and free a HAMMER volume. Must return >= 0 to continue scan
294 * so returns -1 on failure.
297 hammer_unload_volume(hammer_volume_t volume, void *data __unused)
299 struct hammer_mount *hmp = volume->io.hmp;
300 int ronly = ((hmp->mp->mnt_flag & MNT_RDONLY) ? 1 : 0);
303 * Clean up the root volume pointer, which is held unlocked in hmp.
305 if (hmp->rootvol == volume)
309 * Release our buffer and flush anything left in the buffer cache.
311 volume->io.waitdep = 1;
312 hammer_io_release(&volume->io, 1);
313 hammer_io_clear_modlist(&volume->io);
316 * There should be no references on the volume, no clusters, and
319 KKASSERT(volume->io.lock.refs == 0);
321 volume->ondisk = NULL;
323 if (volume->devvp->v_rdev &&
324 volume->devvp->v_rdev->si_mountpoint == hmp->mp
326 volume->devvp->v_rdev->si_mountpoint = NULL;
329 vinvalbuf(volume->devvp, 0, 0, 0);
330 VOP_CLOSE(volume->devvp, FREAD);
332 vinvalbuf(volume->devvp, V_SAVE, 0, 0);
333 VOP_CLOSE(volume->devvp, FREAD|FWRITE);
338 * Destroy the structure
340 RB_REMOVE(hammer_vol_rb_tree, &hmp->rb_vols_root, volume);
341 hammer_free_volume(volume);
347 hammer_free_volume(hammer_volume_t volume)
349 if (volume->vol_name) {
350 kfree(volume->vol_name, M_HAMMER);
351 volume->vol_name = NULL;
354 vrele(volume->devvp);
355 volume->devvp = NULL;
357 --hammer_count_volumes;
358 kfree(volume, M_HAMMER);
362 * Get a HAMMER volume. The volume must already exist.
365 hammer_get_volume(struct hammer_mount *hmp, int32_t vol_no, int *errorp)
367 struct hammer_volume *volume;
370 * Locate the volume structure
372 volume = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, vol_no);
373 if (volume == NULL) {
377 hammer_ref(&volume->io.lock);
380 * Deal with on-disk info
382 if (volume->ondisk == NULL || volume->io.loading) {
383 *errorp = hammer_load_volume(volume);
385 hammer_rel_volume(volume, 1);
395 hammer_ref_volume(hammer_volume_t volume)
399 hammer_ref(&volume->io.lock);
402 * Deal with on-disk info
404 if (volume->ondisk == NULL || volume->io.loading) {
405 error = hammer_load_volume(volume);
407 hammer_rel_volume(volume, 1);
415 hammer_get_root_volume(struct hammer_mount *hmp, int *errorp)
417 hammer_volume_t volume;
419 volume = hmp->rootvol;
420 KKASSERT(volume != NULL);
421 hammer_ref(&volume->io.lock);
424 * Deal with on-disk info
426 if (volume->ondisk == NULL || volume->io.loading) {
427 *errorp = hammer_load_volume(volume);
429 hammer_rel_volume(volume, 1);
439 * Load a volume's on-disk information. The volume must be referenced and
440 * not locked. We temporarily acquire an exclusive lock to interlock
441 * against releases or multiple get's.
444 hammer_load_volume(hammer_volume_t volume)
448 ++volume->io.loading;
449 hammer_lock_ex(&volume->io.lock);
451 if (volume->ondisk == NULL) {
452 error = hammer_io_read(volume->devvp, &volume->io,
455 volume->ondisk = (void *)volume->io.bp->b_data;
459 --volume->io.loading;
460 hammer_unlock(&volume->io.lock);
465 * Release a volume. Call hammer_io_release on the last reference. We have
466 * to acquire an exclusive lock to interlock against volume->ondisk tests
467 * in hammer_load_volume(), and hammer_io_release() also expects an exclusive
470 * Volumes are not unloaded from memory during normal operation.
473 hammer_rel_volume(hammer_volume_t volume, int flush)
476 if (volume->io.lock.refs == 1) {
477 ++volume->io.loading;
478 hammer_lock_ex(&volume->io.lock);
479 if (volume->io.lock.refs == 1) {
480 volume->ondisk = NULL;
481 hammer_io_release(&volume->io, flush);
483 --volume->io.loading;
484 hammer_unlock(&volume->io.lock);
486 hammer_unref(&volume->io.lock);
490 /************************************************************************
492 ************************************************************************
494 * Manage buffers. Currently all blockmap-backed zones are translated
495 * to zone-2 buffer offsets.
498 hammer_get_buffer(hammer_mount_t hmp, hammer_off_t buf_offset,
499 int bytes, int isnew, int *errorp)
501 hammer_buffer_t buffer;
502 hammer_volume_t volume;
503 hammer_off_t zone2_offset;
504 hammer_io_type_t iotype;
508 buf_offset &= ~HAMMER_BUFMASK64;
511 * Shortcut if the buffer is already cached
513 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root, buf_offset);
515 if (buffer->io.lock.refs == 0)
516 ++hammer_count_refedbufs;
517 hammer_ref(&buffer->io.lock);
520 * Onced refed the ondisk field will not be cleared by
523 if (buffer->ondisk && buffer->io.loading == 0) {
529 * The buffer is no longer loose if it has a ref, and
530 * cannot become loose once it gains a ref. Loose
531 * buffers will never be in a modified state. This should
532 * only occur on the 0->1 transition of refs.
534 * lose_list can be modified via a biodone() interrupt.
536 if (buffer->io.mod_list == &hmp->lose_list) {
537 crit_enter(); /* biodone race against list */
538 TAILQ_REMOVE(buffer->io.mod_list, &buffer->io,
541 buffer->io.mod_list = NULL;
542 KKASSERT(buffer->io.modified == 0);
548 * What is the buffer class?
550 zone = HAMMER_ZONE_DECODE(buf_offset);
553 case HAMMER_ZONE_LARGE_DATA_INDEX:
554 case HAMMER_ZONE_SMALL_DATA_INDEX:
555 iotype = HAMMER_STRUCTURE_DATA_BUFFER;
557 case HAMMER_ZONE_UNDO_INDEX:
558 iotype = HAMMER_STRUCTURE_UNDO_BUFFER;
560 case HAMMER_ZONE_META_INDEX:
563 * NOTE: inode data and directory entries are placed in this
564 * zone. inode atime/mtime is updated in-place and thus
565 * buffers containing inodes must be synchronized as
566 * meta-buffers, same as buffers containing B-Tree info.
568 iotype = HAMMER_STRUCTURE_META_BUFFER;
573 * Handle blockmap offset translations
575 if (zone >= HAMMER_ZONE_BTREE_INDEX) {
576 zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, errorp);
577 } else if (zone == HAMMER_ZONE_UNDO_INDEX) {
578 zone2_offset = hammer_undo_lookup(hmp, buf_offset, errorp);
580 KKASSERT(zone == HAMMER_ZONE_RAW_BUFFER_INDEX);
581 zone2_offset = buf_offset;
588 * NOTE: zone2_offset and maxbuf_off are both full zone-2 offset
591 KKASSERT((zone2_offset & HAMMER_OFF_ZONE_MASK) ==
592 HAMMER_ZONE_RAW_BUFFER);
593 vol_no = HAMMER_VOL_DECODE(zone2_offset);
594 volume = hammer_get_volume(hmp, vol_no, errorp);
598 KKASSERT(zone2_offset < volume->maxbuf_off);
601 * Allocate a new buffer structure. We will check for races later.
603 ++hammer_count_buffers;
604 buffer = kmalloc(sizeof(*buffer), M_HAMMER, M_WAITOK|M_ZERO);
605 buffer->zone2_offset = zone2_offset;
606 buffer->zoneX_offset = buf_offset;
607 buffer->volume = volume;
609 hammer_io_init(&buffer->io, hmp, iotype);
610 buffer->io.offset = volume->ondisk->vol_buf_beg +
611 (zone2_offset & HAMMER_OFF_SHORT_MASK);
612 buffer->io.bytes = bytes;
613 TAILQ_INIT(&buffer->clist);
614 hammer_ref(&buffer->io.lock);
617 * Insert the buffer into the RB tree and handle late collisions.
619 if (RB_INSERT(hammer_buf_rb_tree, &hmp->rb_bufs_root, buffer)) {
620 hammer_unref(&buffer->io.lock);
621 --hammer_count_buffers;
622 kfree(buffer, M_HAMMER);
625 ++hammer_count_refedbufs;
629 * Deal with on-disk info and loading races.
631 if (buffer->ondisk == NULL || buffer->io.loading) {
632 *errorp = hammer_load_buffer(buffer, isnew);
634 hammer_rel_buffer(buffer, 1);
644 * Destroy all buffers covering the specified zoneX offset range. This
645 * is called when the related blockmap layer2 entry is freed or when
646 * a direct write bypasses our buffer/buffer-cache subsystem.
648 * The buffers may be referenced by the caller itself. Setting reclaim
649 * will cause the buffer to be destroyed when it's ref count reaches zero.
652 hammer_del_buffers(hammer_mount_t hmp, hammer_off_t base_offset,
653 hammer_off_t zone2_offset, int bytes)
655 hammer_buffer_t buffer;
656 hammer_volume_t volume;
660 vol_no = HAMMER_VOL_DECODE(zone2_offset);
661 volume = hammer_get_volume(hmp, vol_no, &error);
662 KKASSERT(error == 0);
665 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root,
668 KKASSERT(buffer->zone2_offset == zone2_offset);
669 hammer_io_clear_modify(&buffer->io, 1);
670 buffer->io.reclaim = 1;
671 KKASSERT(buffer->volume == volume);
672 if (buffer->io.lock.refs == 0)
673 hammer_unload_buffer(buffer, NULL);
675 hammer_io_inval(volume, zone2_offset);
677 base_offset += HAMMER_BUFSIZE;
678 zone2_offset += HAMMER_BUFSIZE;
679 bytes -= HAMMER_BUFSIZE;
681 hammer_rel_volume(volume, 0);
685 hammer_load_buffer(hammer_buffer_t buffer, int isnew)
687 hammer_volume_t volume;
691 * Load the buffer's on-disk info
693 volume = buffer->volume;
694 ++buffer->io.loading;
695 hammer_lock_ex(&buffer->io.lock);
697 if (hammer_debug_io & 0x0001) {
698 kprintf("load_buffer %016llx %016llx isnew=%d od=%p\n",
699 buffer->zoneX_offset, buffer->zone2_offset, isnew,
703 if (buffer->ondisk == NULL) {
705 error = hammer_io_new(volume->devvp, &buffer->io);
707 error = hammer_io_read(volume->devvp, &buffer->io,
711 buffer->ondisk = (void *)buffer->io.bp->b_data;
713 error = hammer_io_new(volume->devvp, &buffer->io);
717 --buffer->io.loading;
718 hammer_unlock(&buffer->io.lock);
723 * NOTE: Called from RB_SCAN, must return >= 0 for scan to continue.
726 hammer_unload_buffer(hammer_buffer_t buffer, void *data __unused)
728 ++hammer_count_refedbufs;
729 hammer_ref(&buffer->io.lock);
730 hammer_flush_buffer_nodes(buffer);
731 KKASSERT(buffer->io.lock.refs == 1);
732 hammer_rel_buffer(buffer, 2);
737 * Reference a buffer that is either already referenced or via a specially
738 * handled pointer (aka cursor->buffer).
741 hammer_ref_buffer(hammer_buffer_t buffer)
745 if (buffer->io.lock.refs == 0)
746 ++hammer_count_refedbufs;
747 hammer_ref(&buffer->io.lock);
750 * At this point a biodone() will not touch the buffer other then
751 * incidental bits. However, lose_list can be modified via
752 * a biodone() interrupt.
756 if (buffer->io.mod_list == &buffer->io.hmp->lose_list) {
758 TAILQ_REMOVE(buffer->io.mod_list, &buffer->io, mod_entry);
759 buffer->io.mod_list = NULL;
763 if (buffer->ondisk == NULL || buffer->io.loading) {
764 error = hammer_load_buffer(buffer, 0);
766 hammer_rel_buffer(buffer, 1);
768 * NOTE: buffer pointer can become stale after
779 * Release a buffer. We have to deal with several places where
780 * another thread can ref the buffer.
782 * Only destroy the structure itself if the related buffer cache buffer
783 * was disassociated from it. This ties the management of the structure
784 * to the buffer cache subsystem. buffer->ondisk determines whether the
785 * embedded io is referenced or not.
788 hammer_rel_buffer(hammer_buffer_t buffer, int flush)
790 hammer_volume_t volume;
794 if (buffer->io.lock.refs == 1) {
795 ++buffer->io.loading; /* force interlock check */
796 hammer_lock_ex(&buffer->io.lock);
797 if (buffer->io.lock.refs == 1) {
798 hammer_io_release(&buffer->io, flush);
800 if (buffer->io.lock.refs == 1)
801 --hammer_count_refedbufs;
803 if (buffer->io.bp == NULL &&
804 buffer->io.lock.refs == 1) {
808 * NOTE: It is impossible for any associated
809 * B-Tree nodes to have refs if the buffer
810 * has no additional refs.
812 RB_REMOVE(hammer_buf_rb_tree,
813 &buffer->io.hmp->rb_bufs_root,
815 volume = buffer->volume;
816 buffer->volume = NULL; /* sanity */
817 hammer_rel_volume(volume, 0);
818 hammer_io_clear_modlist(&buffer->io);
819 hammer_flush_buffer_nodes(buffer);
820 KKASSERT(TAILQ_EMPTY(&buffer->clist));
824 --buffer->io.loading;
825 hammer_unlock(&buffer->io.lock);
827 hammer_unref(&buffer->io.lock);
830 --hammer_count_buffers;
831 kfree(buffer, M_HAMMER);
836 * Access the filesystem buffer containing the specified hammer offset.
837 * buf_offset is a conglomeration of the volume number and vol_buf_beg
838 * relative buffer offset. It must also have bit 55 set to be valid.
839 * (see hammer_off_t in hammer_disk.h).
841 * Any prior buffer in *bufferp will be released and replaced by the
846 _hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
847 int *errorp, struct hammer_buffer **bufferp)
849 hammer_buffer_t buffer;
850 int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK;
852 buf_offset &= ~HAMMER_BUFMASK64;
853 KKASSERT((buf_offset & HAMMER_OFF_ZONE_MASK) != 0);
856 if (buffer == NULL || (buffer->zone2_offset != buf_offset &&
857 buffer->zoneX_offset != buf_offset)) {
859 hammer_rel_buffer(buffer, 0);
860 buffer = hammer_get_buffer(hmp, buf_offset, bytes, 0, errorp);
867 * Return a pointer to the buffer data.
872 return((char *)buffer->ondisk + xoff);
876 hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset,
877 int *errorp, struct hammer_buffer **bufferp)
879 return(_hammer_bread(hmp, buf_offset, HAMMER_BUFSIZE, errorp, bufferp));
883 hammer_bread_ext(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
884 int *errorp, struct hammer_buffer **bufferp)
886 bytes = (bytes + HAMMER_BUFMASK) & ~HAMMER_BUFMASK;
887 return(_hammer_bread(hmp, buf_offset, bytes, errorp, bufferp));
891 * Access the filesystem buffer containing the specified hammer offset.
892 * No disk read operation occurs. The result buffer may contain garbage.
894 * Any prior buffer in *bufferp will be released and replaced by the
897 * This function marks the buffer dirty but does not increment its
902 _hammer_bnew(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
903 int *errorp, struct hammer_buffer **bufferp)
905 hammer_buffer_t buffer;
906 int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK;
908 buf_offset &= ~HAMMER_BUFMASK64;
911 if (buffer == NULL || (buffer->zone2_offset != buf_offset &&
912 buffer->zoneX_offset != buf_offset)) {
914 hammer_rel_buffer(buffer, 0);
915 buffer = hammer_get_buffer(hmp, buf_offset, bytes, 1, errorp);
922 * Return a pointer to the buffer data.
927 return((char *)buffer->ondisk + xoff);
931 hammer_bnew(hammer_mount_t hmp, hammer_off_t buf_offset,
932 int *errorp, struct hammer_buffer **bufferp)
934 return(_hammer_bnew(hmp, buf_offset, HAMMER_BUFSIZE, errorp, bufferp));
938 hammer_bnew_ext(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
939 int *errorp, struct hammer_buffer **bufferp)
941 bytes = (bytes + HAMMER_BUFMASK) & ~HAMMER_BUFMASK;
942 return(_hammer_bnew(hmp, buf_offset, bytes, errorp, bufferp));
945 /************************************************************************
947 ************************************************************************
949 * Manage B-Tree nodes. B-Tree nodes represent the primary indexing
950 * method used by the HAMMER filesystem.
952 * Unlike other HAMMER structures, a hammer_node can be PASSIVELY
953 * associated with its buffer, and will only referenced the buffer while
954 * the node itself is referenced.
956 * A hammer_node can also be passively associated with other HAMMER
957 * structures, such as inodes, while retaining 0 references. These
958 * associations can be cleared backwards using a pointer-to-pointer in
961 * This allows the HAMMER implementation to cache hammer_nodes long-term
962 * and short-cut a great deal of the infrastructure's complexity. In
963 * most cases a cached node can be reacquired without having to dip into
964 * either the buffer or cluster management code.
966 * The caller must pass a referenced cluster on call and will retain
967 * ownership of the reference on return. The node will acquire its own
968 * additional references, if necessary.
971 hammer_get_node(hammer_mount_t hmp, hammer_off_t node_offset,
972 int isnew, int *errorp)
976 KKASSERT((node_offset & HAMMER_OFF_ZONE_MASK) == HAMMER_ZONE_BTREE);
979 * Locate the structure, allocating one if necessary.
982 node = RB_LOOKUP(hammer_nod_rb_tree, &hmp->rb_nods_root, node_offset);
984 ++hammer_count_nodes;
985 node = kmalloc(sizeof(*node), M_HAMMER, M_WAITOK|M_ZERO);
986 node->node_offset = node_offset;
988 TAILQ_INIT(&node->cache_list);
989 if (RB_INSERT(hammer_nod_rb_tree, &hmp->rb_nods_root, node)) {
990 --hammer_count_nodes;
991 kfree(node, M_HAMMER);
995 hammer_ref(&node->lock);
999 *errorp = hammer_load_node(node, isnew);
1001 hammer_rel_node(node);
1008 * Reference an already-referenced node.
1011 hammer_ref_node(hammer_node_t node)
1013 KKASSERT(node->lock.refs > 0 && node->ondisk != NULL);
1014 hammer_ref(&node->lock);
1018 * Load a node's on-disk data reference.
1021 hammer_load_node(hammer_node_t node, int isnew)
1023 hammer_buffer_t buffer;
1024 hammer_off_t buf_offset;
1029 hammer_lock_ex(&node->lock);
1030 if (node->ondisk == NULL) {
1032 * This is a little confusing but the jist is that
1033 * node->buffer determines whether the node is on
1034 * the buffer's clist and node->ondisk determines
1035 * whether the buffer is referenced.
1037 * We could be racing a buffer release, in which case
1038 * node->buffer may become NULL while we are blocked
1039 * referencing the buffer.
1041 if ((buffer = node->buffer) != NULL) {
1042 error = hammer_ref_buffer(buffer);
1043 if (error == 0 && node->buffer == NULL) {
1044 TAILQ_INSERT_TAIL(&buffer->clist,
1046 node->buffer = buffer;
1049 buf_offset = node->node_offset & ~HAMMER_BUFMASK64;
1050 buffer = hammer_get_buffer(node->hmp, buf_offset,
1051 HAMMER_BUFSIZE, 0, &error);
1053 KKASSERT(error == 0);
1054 TAILQ_INSERT_TAIL(&buffer->clist,
1056 node->buffer = buffer;
1061 node->ondisk = (void *)((char *)buffer->ondisk +
1062 (node->node_offset & HAMMER_BUFMASK));
1064 (node->flags & HAMMER_NODE_CRCGOOD) == 0) {
1065 if (hammer_crc_test_btree(node->ondisk) == 0)
1066 Debugger("CRC FAILED: B-TREE NODE");
1067 node->flags |= HAMMER_NODE_CRCGOOD;
1072 hammer_unlock(&node->lock);
1077 * Safely reference a node, interlock against flushes via the IO subsystem.
1080 hammer_ref_node_safe(struct hammer_mount *hmp, hammer_node_cache_t cache,
1087 hammer_ref(&node->lock);
1091 *errorp = hammer_load_node(node, 0);
1093 hammer_rel_node(node);
1103 * Release a hammer_node. On the last release the node dereferences
1104 * its underlying buffer and may or may not be destroyed.
1107 hammer_rel_node(hammer_node_t node)
1109 hammer_buffer_t buffer;
1112 * If this isn't the last ref just decrement the ref count and
1115 if (node->lock.refs > 1) {
1116 hammer_unref(&node->lock);
1121 * If there is no ondisk info or no buffer the node failed to load,
1122 * remove the last reference and destroy the node.
1124 if (node->ondisk == NULL) {
1125 hammer_unref(&node->lock);
1126 hammer_flush_node(node);
1127 /* node is stale now */
1132 * Do not disassociate the node from the buffer if it represents
1133 * a modified B-Tree node that still needs its crc to be generated.
1135 if (node->flags & HAMMER_NODE_NEEDSCRC)
1139 * Do final cleanups and then either destroy the node and leave it
1140 * passively cached. The buffer reference is removed regardless.
1142 buffer = node->buffer;
1143 node->ondisk = NULL;
1145 if ((node->flags & HAMMER_NODE_FLUSH) == 0) {
1146 hammer_unref(&node->lock);
1147 hammer_rel_buffer(buffer, 0);
1154 hammer_unref(&node->lock);
1155 hammer_flush_node(node);
1157 hammer_rel_buffer(buffer, 0);
1161 * Free space on-media associated with a B-Tree node.
1164 hammer_delete_node(hammer_transaction_t trans, hammer_node_t node)
1166 KKASSERT((node->flags & HAMMER_NODE_DELETED) == 0);
1167 node->flags |= HAMMER_NODE_DELETED;
1168 hammer_blockmap_free(trans, node->node_offset, sizeof(*node->ondisk));
1172 * Passively cache a referenced hammer_node. The caller may release
1173 * the node on return.
1176 hammer_cache_node(hammer_node_cache_t cache, hammer_node_t node)
1179 * If the node is being deleted, don't cache it!
1181 if (node->flags & HAMMER_NODE_DELETED)
1183 if (cache->node == node)
1186 hammer_uncache_node(cache);
1187 if (node->flags & HAMMER_NODE_DELETED)
1190 TAILQ_INSERT_TAIL(&node->cache_list, cache, entry);
1194 hammer_uncache_node(hammer_node_cache_t cache)
1198 if ((node = cache->node) != NULL) {
1199 TAILQ_REMOVE(&node->cache_list, cache, entry);
1201 if (TAILQ_EMPTY(&node->cache_list))
1202 hammer_flush_node(node);
1207 * Remove a node's cache references and destroy the node if it has no
1208 * other references or backing store.
1211 hammer_flush_node(hammer_node_t node)
1213 hammer_node_cache_t cache;
1214 hammer_buffer_t buffer;
1216 while ((cache = TAILQ_FIRST(&node->cache_list)) != NULL) {
1217 TAILQ_REMOVE(&node->cache_list, cache, entry);
1220 if (node->lock.refs == 0 && node->ondisk == NULL) {
1221 KKASSERT((node->flags & HAMMER_NODE_NEEDSCRC) == 0);
1222 RB_REMOVE(hammer_nod_rb_tree, &node->hmp->rb_nods_root, node);
1223 if ((buffer = node->buffer) != NULL) {
1224 node->buffer = NULL;
1225 TAILQ_REMOVE(&buffer->clist, node, entry);
1226 /* buffer is unreferenced because ondisk is NULL */
1228 --hammer_count_nodes;
1229 kfree(node, M_HAMMER);
1234 * Flush passively cached B-Tree nodes associated with this buffer.
1235 * This is only called when the buffer is about to be destroyed, so
1236 * none of the nodes should have any references. The buffer is locked.
1238 * We may be interlocked with the buffer.
1241 hammer_flush_buffer_nodes(hammer_buffer_t buffer)
1245 while ((node = TAILQ_FIRST(&buffer->clist)) != NULL) {
1246 KKASSERT(node->ondisk == NULL);
1247 KKASSERT((node->flags & HAMMER_NODE_NEEDSCRC) == 0);
1249 if (node->lock.refs == 0) {
1250 hammer_ref(&node->lock);
1251 node->flags |= HAMMER_NODE_FLUSH;
1252 hammer_rel_node(node);
1254 KKASSERT(node->loading != 0);
1255 KKASSERT(node->buffer != NULL);
1256 buffer = node->buffer;
1257 node->buffer = NULL;
1258 TAILQ_REMOVE(&buffer->clist, node, entry);
1259 /* buffer is unreferenced because ondisk is NULL */
1265 /************************************************************************
1267 ************************************************************************/
1270 * Allocate a B-Tree node.
1273 hammer_alloc_btree(hammer_transaction_t trans, int *errorp)
1275 hammer_buffer_t buffer = NULL;
1276 hammer_node_t node = NULL;
1277 hammer_off_t node_offset;
1279 node_offset = hammer_blockmap_alloc(trans, HAMMER_ZONE_BTREE_INDEX,
1280 sizeof(struct hammer_node_ondisk),
1283 node = hammer_get_node(trans->hmp, node_offset, 1, errorp);
1284 hammer_modify_node_noundo(trans, node);
1285 bzero(node->ondisk, sizeof(*node->ondisk));
1286 hammer_modify_node_done(node);
1289 hammer_rel_buffer(buffer, 0);
1294 * Allocate data. If the address of a data buffer is supplied then
1295 * any prior non-NULL *data_bufferp will be released and *data_bufferp
1296 * will be set to the related buffer. The caller must release it when
1297 * finally done. The initial *data_bufferp should be set to NULL by
1300 * The caller is responsible for making hammer_modify*() calls on the
1304 hammer_alloc_data(hammer_transaction_t trans, int32_t data_len,
1305 u_int16_t rec_type, hammer_off_t *data_offsetp,
1306 struct hammer_buffer **data_bufferp, int *errorp)
1316 case HAMMER_RECTYPE_INODE:
1317 case HAMMER_RECTYPE_PSEUDO_INODE:
1318 case HAMMER_RECTYPE_DIRENTRY:
1319 case HAMMER_RECTYPE_EXT:
1320 case HAMMER_RECTYPE_FIX:
1321 zone = HAMMER_ZONE_META_INDEX;
1323 case HAMMER_RECTYPE_DATA:
1324 case HAMMER_RECTYPE_DB:
1325 if (data_len <= HAMMER_BUFSIZE / 2) {
1326 zone = HAMMER_ZONE_SMALL_DATA_INDEX;
1328 data_len = (data_len + HAMMER_BUFMASK) &
1330 zone = HAMMER_ZONE_LARGE_DATA_INDEX;
1334 panic("hammer_alloc_data: rec_type %04x unknown",
1336 zone = 0; /* NOT REACHED */
1339 *data_offsetp = hammer_blockmap_alloc(trans, zone,
1344 if (*errorp == 0 && data_bufferp) {
1346 data = hammer_bread_ext(trans->hmp, *data_offsetp,
1347 data_len, errorp, data_bufferp);
1348 KKASSERT(*errorp == 0);
1355 KKASSERT(*errorp == 0);
1360 * Sync dirty buffers to the media and clean-up any loose ends.
1362 static int hammer_sync_scan1(struct mount *mp, struct vnode *vp, void *data);
1363 static int hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data);
1366 hammer_queue_inodes_flusher(hammer_mount_t hmp, int waitfor)
1368 struct hammer_sync_info info;
1371 info.waitfor = waitfor;
1372 if (waitfor == MNT_WAIT) {
1373 vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_ONEPASS,
1374 hammer_sync_scan1, hammer_sync_scan2, &info);
1376 vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_ONEPASS|VMSC_NOWAIT,
1377 hammer_sync_scan1, hammer_sync_scan2, &info);
1383 hammer_sync_hmp(hammer_mount_t hmp, int waitfor)
1385 struct hammer_sync_info info;
1388 info.waitfor = waitfor;
1390 vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_NOWAIT,
1391 hammer_sync_scan1, hammer_sync_scan2, &info);
1392 if (waitfor == MNT_WAIT)
1393 hammer_flusher_sync(hmp);
1395 hammer_flusher_async(hmp);
1401 hammer_sync_scan1(struct mount *mp, struct vnode *vp, void *data)
1403 struct hammer_inode *ip;
1406 if (vp->v_type == VNON || ip == NULL ||
1407 ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1408 RB_EMPTY(&vp->v_rbdirty_tree))) {
1415 hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data)
1417 struct hammer_sync_info *info = data;
1418 struct hammer_inode *ip;
1422 if (vp->v_type == VNON || vp->v_type == VBAD ||
1423 ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1424 RB_EMPTY(&vp->v_rbdirty_tree))) {
1427 error = VOP_FSYNC(vp, info->waitfor);
1429 info->error = error;