2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_ondisk.c,v 1.50 2008/06/07 07:41:51 dillon Exp $
37 * Manage HAMMER's on-disk structures. These routines are primarily
38 * responsible for interfacing with the kernel's I/O subsystem and for
39 * managing in-memory structures.
43 #include <sys/fcntl.h>
44 #include <sys/nlookup.h>
48 static void hammer_free_volume(hammer_volume_t volume);
49 static int hammer_load_volume(hammer_volume_t volume);
50 static int hammer_load_buffer(hammer_buffer_t buffer, int isnew);
51 static int hammer_load_node(hammer_node_t node, int isnew);
54 * Red-Black tree support for various structures
57 hammer_ino_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2)
59 if (ip1->obj_id < ip2->obj_id)
61 if (ip1->obj_id > ip2->obj_id)
63 if (ip1->obj_asof < ip2->obj_asof)
65 if (ip1->obj_asof > ip2->obj_asof)
71 hammer_inode_info_cmp(hammer_inode_info_t info, hammer_inode_t ip)
73 if (info->obj_id < ip->obj_id)
75 if (info->obj_id > ip->obj_id)
77 if (info->obj_asof < ip->obj_asof)
79 if (info->obj_asof > ip->obj_asof)
85 hammer_vol_rb_compare(hammer_volume_t vol1, hammer_volume_t vol2)
87 if (vol1->vol_no < vol2->vol_no)
89 if (vol1->vol_no > vol2->vol_no)
95 hammer_buf_rb_compare(hammer_buffer_t buf1, hammer_buffer_t buf2)
97 if (buf1->zone2_offset < buf2->zone2_offset)
99 if (buf1->zone2_offset > buf2->zone2_offset)
105 hammer_nod_rb_compare(hammer_node_t node1, hammer_node_t node2)
107 if (node1->node_offset < node2->node_offset)
109 if (node1->node_offset > node2->node_offset)
115 * Note: The lookup function for hammer_ino_rb_tree winds up being named
116 * hammer_ino_rb_tree_RB_LOOKUP_INFO(root, info). The other lookup
117 * functions are normal, e.g. hammer_buf_rb_tree_RB_LOOKUP(root, zone2_offset).
119 RB_GENERATE(hammer_ino_rb_tree, hammer_inode, rb_node, hammer_ino_rb_compare);
120 RB_GENERATE_XLOOKUP(hammer_ino_rb_tree, INFO, hammer_inode, rb_node,
121 hammer_inode_info_cmp, hammer_inode_info_t);
122 RB_GENERATE2(hammer_vol_rb_tree, hammer_volume, rb_node,
123 hammer_vol_rb_compare, int32_t, vol_no);
124 RB_GENERATE2(hammer_buf_rb_tree, hammer_buffer, rb_node,
125 hammer_buf_rb_compare, hammer_off_t, zone2_offset);
126 RB_GENERATE2(hammer_nod_rb_tree, hammer_node, rb_node,
127 hammer_nod_rb_compare, hammer_off_t, node_offset);
129 /************************************************************************
131 ************************************************************************
133 * Load a HAMMER volume by name. Returns 0 on success or a positive error
134 * code on failure. Volumes must be loaded at mount time, get_volume() will
135 * not load a new volume.
137 * Calls made to hammer_load_volume() or single-threaded
140 hammer_install_volume(struct hammer_mount *hmp, const char *volname)
143 hammer_volume_t volume;
144 struct hammer_volume_ondisk *ondisk;
145 struct nlookupdata nd;
146 struct buf *bp = NULL;
152 ronly = ((mp->mnt_flag & MNT_RDONLY) ? 1 : 0);
155 * Allocate a volume structure
157 ++hammer_count_volumes;
158 volume = kmalloc(sizeof(*volume), M_HAMMER, M_WAITOK|M_ZERO);
159 volume->vol_name = kstrdup(volname, M_HAMMER);
160 hammer_io_init(&volume->io, hmp, HAMMER_STRUCTURE_VOLUME);
161 volume->io.offset = 0LL;
164 * Get the device vnode
166 error = nlookup_init(&nd, volume->vol_name, UIO_SYSSPACE, NLC_FOLLOW);
168 error = nlookup(&nd);
170 error = cache_vref(&nd.nl_nch, nd.nl_cred, &volume->devvp);
173 if (vn_isdisk(volume->devvp, &error)) {
174 error = vfs_mountedon(volume->devvp);
178 count_udev(volume->devvp->v_umajor, volume->devvp->v_uminor) > 0) {
182 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
183 error = vinvalbuf(volume->devvp, V_SAVE, 0, 0);
185 error = VOP_OPEN(volume->devvp,
186 (ronly ? FREAD : FREAD|FWRITE),
189 vn_unlock(volume->devvp);
192 hammer_free_volume(volume);
195 volume->devvp->v_rdev->si_mountpoint = mp;
199 * Extract the volume number from the volume header and do various
202 error = bread(volume->devvp, 0LL, HAMMER_BUFSIZE, &bp);
205 ondisk = (void *)bp->b_data;
206 if (ondisk->vol_signature != HAMMER_FSBUF_VOLUME) {
207 kprintf("hammer_mount: volume %s has an invalid header\n",
212 volume->vol_no = ondisk->vol_no;
213 volume->buffer_base = ondisk->vol_buf_beg;
214 volume->vol_flags = ondisk->vol_flags;
215 volume->nblocks = ondisk->vol_nblocks;
216 volume->maxbuf_off = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no,
217 ondisk->vol_buf_end - ondisk->vol_buf_beg);
218 volume->maxraw_off = ondisk->vol_buf_end;
219 RB_INIT(&volume->rb_bufs_root);
221 if (RB_EMPTY(&hmp->rb_vols_root)) {
222 hmp->fsid = ondisk->vol_fsid;
223 } else if (bcmp(&hmp->fsid, &ondisk->vol_fsid, sizeof(uuid_t))) {
224 kprintf("hammer_mount: volume %s's fsid does not match "
225 "other volumes\n", volume->vol_name);
231 * Insert the volume structure into the red-black tree.
233 if (RB_INSERT(hammer_vol_rb_tree, &hmp->rb_vols_root, volume)) {
234 kprintf("hammer_mount: volume %s has a duplicate vol_no %d\n",
235 volume->vol_name, volume->vol_no);
240 * Set the root volume . HAMMER special cases rootvol the structure.
241 * We do not hold a ref because this would prevent related I/O
242 * from being flushed.
244 if (error == 0 && ondisk->vol_rootvol == ondisk->vol_no) {
245 hmp->rootvol = volume;
250 hmp->fsid_udev = dev2udev(vn_todev(volume->devvp));
251 hmp->mp->mnt_stat.f_blocks += ondisk->vol0_stat_bigblocks *
252 (HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE);
253 hmp->mp->mnt_vstat.f_blocks += ondisk->vol0_stat_bigblocks *
254 (HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE);
260 /*vinvalbuf(volume->devvp, V_SAVE, 0, 0);*/
262 volume->devvp->v_rdev->si_mountpoint = NULL;
263 VOP_CLOSE(volume->devvp, ronly ? FREAD : FREAD|FWRITE);
264 hammer_free_volume(volume);
270 * This is called for each volume when updating the mount point from
271 * read-write to read-only or vise-versa.
274 hammer_adjust_volume_mode(hammer_volume_t volume, void *data __unused)
277 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
278 if (volume->io.hmp->ronly) {
279 /* do not call vinvalbuf */
280 VOP_OPEN(volume->devvp, FREAD, FSCRED, NULL);
281 VOP_CLOSE(volume->devvp, FREAD|FWRITE);
283 /* do not call vinvalbuf */
284 VOP_OPEN(volume->devvp, FREAD|FWRITE, FSCRED, NULL);
285 VOP_CLOSE(volume->devvp, FREAD);
287 vn_unlock(volume->devvp);
293 * Unload and free a HAMMER volume. Must return >= 0 to continue scan
294 * so returns -1 on failure.
297 hammer_unload_volume(hammer_volume_t volume, void *data __unused)
299 struct hammer_mount *hmp = volume->io.hmp;
300 int ronly = ((hmp->mp->mnt_flag & MNT_RDONLY) ? 1 : 0);
303 * Clean up the root volume pointer, which is held unlocked in hmp.
305 if (hmp->rootvol == volume)
311 RB_SCAN(hammer_buf_rb_tree, &volume->rb_bufs_root, NULL,
312 hammer_unload_buffer, NULL);
315 * Release our buffer and flush anything left in the buffer cache.
317 volume->io.waitdep = 1;
318 hammer_io_release(&volume->io, 1);
321 * There should be no references on the volume, no clusters, and
324 KKASSERT(volume->io.lock.refs == 0);
325 KKASSERT(RB_EMPTY(&volume->rb_bufs_root));
327 volume->ondisk = NULL;
329 if (volume->devvp->v_rdev &&
330 volume->devvp->v_rdev->si_mountpoint == hmp->mp
332 volume->devvp->v_rdev->si_mountpoint = NULL;
335 vinvalbuf(volume->devvp, 0, 0, 0);
336 VOP_CLOSE(volume->devvp, FREAD);
338 vinvalbuf(volume->devvp, V_SAVE, 0, 0);
339 VOP_CLOSE(volume->devvp, FREAD|FWRITE);
344 * Destroy the structure
346 RB_REMOVE(hammer_vol_rb_tree, &hmp->rb_vols_root, volume);
347 hammer_free_volume(volume);
353 hammer_free_volume(hammer_volume_t volume)
355 if (volume->vol_name) {
356 kfree(volume->vol_name, M_HAMMER);
357 volume->vol_name = NULL;
360 vrele(volume->devvp);
361 volume->devvp = NULL;
363 --hammer_count_volumes;
364 kfree(volume, M_HAMMER);
368 * Get a HAMMER volume. The volume must already exist.
371 hammer_get_volume(struct hammer_mount *hmp, int32_t vol_no, int *errorp)
373 struct hammer_volume *volume;
376 * Locate the volume structure
378 volume = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, vol_no);
379 if (volume == NULL) {
383 hammer_ref(&volume->io.lock);
386 * Deal with on-disk info
388 if (volume->ondisk == NULL || volume->io.loading) {
389 *errorp = hammer_load_volume(volume);
391 hammer_rel_volume(volume, 1);
401 hammer_ref_volume(hammer_volume_t volume)
405 hammer_ref(&volume->io.lock);
408 * Deal with on-disk info
410 if (volume->ondisk == NULL || volume->io.loading) {
411 error = hammer_load_volume(volume);
413 hammer_rel_volume(volume, 1);
421 hammer_get_root_volume(struct hammer_mount *hmp, int *errorp)
423 hammer_volume_t volume;
425 volume = hmp->rootvol;
426 KKASSERT(volume != NULL);
427 hammer_ref(&volume->io.lock);
430 * Deal with on-disk info
432 if (volume->ondisk == NULL || volume->io.loading) {
433 *errorp = hammer_load_volume(volume);
435 hammer_rel_volume(volume, 1);
445 * Load a volume's on-disk information. The volume must be referenced and
446 * not locked. We temporarily acquire an exclusive lock to interlock
447 * against releases or multiple get's.
450 hammer_load_volume(hammer_volume_t volume)
454 ++volume->io.loading;
455 hammer_lock_ex(&volume->io.lock);
457 if (volume->ondisk == NULL) {
458 error = hammer_io_read(volume->devvp, &volume->io,
461 volume->ondisk = (void *)volume->io.bp->b_data;
465 --volume->io.loading;
466 hammer_unlock(&volume->io.lock);
471 * Release a volume. Call hammer_io_release on the last reference. We have
472 * to acquire an exclusive lock to interlock against volume->ondisk tests
473 * in hammer_load_volume(), and hammer_io_release() also expects an exclusive
476 * Volumes are not unloaded from memory during normal operation.
479 hammer_rel_volume(hammer_volume_t volume, int flush)
482 if (volume->io.lock.refs == 1) {
483 ++volume->io.loading;
484 hammer_lock_ex(&volume->io.lock);
485 if (volume->io.lock.refs == 1) {
486 volume->ondisk = NULL;
487 hammer_io_release(&volume->io, flush);
489 --volume->io.loading;
490 hammer_unlock(&volume->io.lock);
492 hammer_unref(&volume->io.lock);
496 /************************************************************************
498 ************************************************************************
500 * Manage buffers. Currently all blockmap-backed zones are translated
501 * to zone-2 buffer offsets.
504 hammer_get_buffer(hammer_mount_t hmp, hammer_off_t buf_offset,
505 int isnew, int *errorp)
507 hammer_buffer_t buffer;
508 hammer_volume_t volume;
509 hammer_off_t zoneX_offset;
510 hammer_io_type_t iotype;
514 zoneX_offset = buf_offset;
515 zone = HAMMER_ZONE_DECODE(buf_offset);
518 * What is the buffer class?
521 case HAMMER_ZONE_LARGE_DATA_INDEX:
522 case HAMMER_ZONE_SMALL_DATA_INDEX:
523 iotype = HAMMER_STRUCTURE_DATA_BUFFER;
525 case HAMMER_ZONE_UNDO_INDEX:
526 iotype = HAMMER_STRUCTURE_UNDO_BUFFER;
529 iotype = HAMMER_STRUCTURE_META_BUFFER;
534 * Handle blockmap offset translations
536 if (zone >= HAMMER_ZONE_BTREE_INDEX) {
537 buf_offset = hammer_blockmap_lookup(hmp, buf_offset, errorp);
538 KKASSERT(*errorp == 0);
539 } else if (zone == HAMMER_ZONE_UNDO_INDEX) {
540 buf_offset = hammer_undo_lookup(hmp, buf_offset, errorp);
541 KKASSERT(*errorp == 0);
545 * Locate the buffer given its zone-2 offset.
547 buf_offset &= ~HAMMER_BUFMASK64;
548 KKASSERT((buf_offset & HAMMER_OFF_ZONE_MASK) == HAMMER_ZONE_RAW_BUFFER);
549 vol_no = HAMMER_VOL_DECODE(buf_offset);
550 volume = hammer_get_volume(hmp, vol_no, errorp);
555 * NOTE: buf_offset and maxbuf_off are both full zone-2 offset
558 KKASSERT(buf_offset < volume->maxbuf_off);
561 * Locate and lock the buffer structure, creating one if necessary.
564 buffer = RB_LOOKUP(hammer_buf_rb_tree, &volume->rb_bufs_root,
566 if (buffer == NULL) {
567 ++hammer_count_buffers;
568 buffer = kmalloc(sizeof(*buffer), M_HAMMER, M_WAITOK|M_ZERO);
569 buffer->zone2_offset = buf_offset;
570 buffer->volume = volume;
572 hammer_io_init(&buffer->io, hmp, iotype);
573 buffer->io.offset = volume->ondisk->vol_buf_beg +
574 (buf_offset & HAMMER_OFF_SHORT_MASK);
575 TAILQ_INIT(&buffer->clist);
576 hammer_ref(&buffer->io.lock);
579 * Insert the buffer into the RB tree and handle late
582 if (RB_INSERT(hammer_buf_rb_tree, &volume->rb_bufs_root, buffer)) {
583 hammer_unref(&buffer->io.lock);
584 --hammer_count_buffers;
585 kfree(buffer, M_HAMMER);
588 hammer_ref(&volume->io.lock);
590 hammer_ref(&buffer->io.lock);
593 * The buffer is no longer loose if it has a ref.
595 if (buffer->io.mod_list == &hmp->lose_list) {
596 TAILQ_REMOVE(buffer->io.mod_list, &buffer->io,
598 buffer->io.mod_list = NULL;
600 if (buffer->io.lock.refs == 1)
601 hammer_io_reinit(&buffer->io, iotype);
603 KKASSERT(buffer->io.type == iotype);
607 * Cache the blockmap translation
609 if ((zoneX_offset & HAMMER_OFF_ZONE_MASK) != HAMMER_ZONE_RAW_BUFFER)
610 buffer->zoneX_offset = zoneX_offset;
613 * Deal with on-disk info
615 if (buffer->ondisk == NULL || buffer->io.loading) {
616 *errorp = hammer_load_buffer(buffer, isnew);
618 hammer_rel_buffer(buffer, 1);
624 hammer_rel_volume(volume, 0);
629 * Clear the cached zone-X translation for a buffer.
632 hammer_clrxlate_buffer(hammer_mount_t hmp, hammer_off_t buf_offset)
634 hammer_buffer_t buffer;
635 hammer_volume_t volume;
639 buf_offset &= ~HAMMER_BUFMASK64;
640 KKASSERT((buf_offset & HAMMER_OFF_ZONE_MASK) == HAMMER_ZONE_RAW_BUFFER);
641 vol_no = HAMMER_VOL_DECODE(buf_offset);
642 volume = hammer_get_volume(hmp, vol_no, &error);
645 buffer = RB_LOOKUP(hammer_buf_rb_tree, &volume->rb_bufs_root,
648 buffer->zoneX_offset = 0;
649 hammer_rel_volume(volume, 0);
653 hammer_load_buffer(hammer_buffer_t buffer, int isnew)
655 hammer_volume_t volume;
659 * Load the buffer's on-disk info
661 volume = buffer->volume;
662 ++buffer->io.loading;
663 hammer_lock_ex(&buffer->io.lock);
665 if (hammer_debug_io & 0x0001) {
666 kprintf("load_buffer %016llx %016llx\n",
667 buffer->zoneX_offset, buffer->zone2_offset);
670 if (buffer->ondisk == NULL) {
672 error = hammer_io_new(volume->devvp, &buffer->io);
674 error = hammer_io_read(volume->devvp, &buffer->io,
678 buffer->ondisk = (void *)buffer->io.bp->b_data;
680 error = hammer_io_new(volume->devvp, &buffer->io);
684 --buffer->io.loading;
685 hammer_unlock(&buffer->io.lock);
690 * NOTE: Called from RB_SCAN, must return >= 0 for scan to continue.
693 hammer_unload_buffer(hammer_buffer_t buffer, void *data __unused)
695 hammer_ref(&buffer->io.lock);
696 hammer_flush_buffer_nodes(buffer);
697 KKASSERT(buffer->io.lock.refs == 1);
698 hammer_rel_buffer(buffer, 2);
703 * Reference a buffer that is either already referenced or via a specially
704 * handled pointer (aka cursor->buffer).
707 hammer_ref_buffer(hammer_buffer_t buffer)
711 hammer_ref(&buffer->io.lock);
716 if (buffer->io.mod_list == &buffer->io.hmp->lose_list) {
717 TAILQ_REMOVE(buffer->io.mod_list, &buffer->io, mod_entry);
718 buffer->io.mod_list = NULL;
721 if (buffer->ondisk == NULL || buffer->io.loading) {
722 error = hammer_load_buffer(buffer, 0);
724 hammer_rel_buffer(buffer, 1);
726 * NOTE: buffer pointer can become stale after
737 * Release a buffer. We have to deal with several places where
738 * another thread can ref the buffer.
740 * Only destroy the structure itself if the related buffer cache buffer
741 * was disassociated from it. This ties the management of the structure
742 * to the buffer cache subsystem. buffer->ondisk determines whether the
743 * embedded io is referenced or not.
746 hammer_rel_buffer(hammer_buffer_t buffer, int flush)
748 hammer_volume_t volume;
752 if (buffer->io.lock.refs == 1) {
753 ++buffer->io.loading; /* force interlock check */
754 hammer_lock_ex(&buffer->io.lock);
755 if (buffer->io.lock.refs == 1) {
756 hammer_io_release(&buffer->io, flush);
757 hammer_flush_buffer_nodes(buffer);
758 KKASSERT(TAILQ_EMPTY(&buffer->clist));
760 if (buffer->io.bp == NULL &&
761 buffer->io.lock.refs == 1) {
765 volume = buffer->volume;
766 RB_REMOVE(hammer_buf_rb_tree,
767 &volume->rb_bufs_root, buffer);
768 buffer->volume = NULL; /* sanity */
769 hammer_rel_volume(volume, 0);
773 --buffer->io.loading;
774 hammer_unlock(&buffer->io.lock);
776 hammer_unref(&buffer->io.lock);
779 KKASSERT(buffer->io.mod_list == NULL);
780 --hammer_count_buffers;
781 kfree(buffer, M_HAMMER);
786 * Remove the zoneX translation cache for a buffer given its zone-2 offset.
789 hammer_uncache_buffer(hammer_mount_t hmp, hammer_off_t buf_offset)
791 hammer_volume_t volume;
792 hammer_buffer_t buffer;
796 buf_offset &= ~HAMMER_BUFMASK64;
797 KKASSERT((buf_offset & HAMMER_OFF_ZONE_MASK) == HAMMER_ZONE_RAW_BUFFER);
798 vol_no = HAMMER_VOL_DECODE(buf_offset);
799 volume = hammer_get_volume(hmp, vol_no, &error);
800 KKASSERT(volume != 0);
801 KKASSERT(buf_offset < volume->maxbuf_off);
803 buffer = RB_LOOKUP(hammer_buf_rb_tree, &volume->rb_bufs_root,
806 buffer->zoneX_offset = 0;
807 hammer_rel_volume(volume, 0);
811 * Access the filesystem buffer containing the specified hammer offset.
812 * buf_offset is a conglomeration of the volume number and vol_buf_beg
813 * relative buffer offset. It must also have bit 55 set to be valid.
814 * (see hammer_off_t in hammer_disk.h).
816 * Any prior buffer in *bufferp will be released and replaced by the
820 hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset, int *errorp,
821 struct hammer_buffer **bufferp)
823 hammer_buffer_t buffer;
824 int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK;
826 buf_offset &= ~HAMMER_BUFMASK64;
827 KKASSERT((buf_offset & HAMMER_OFF_ZONE_MASK) != 0);
830 if (buffer == NULL || (buffer->zone2_offset != buf_offset &&
831 buffer->zoneX_offset != buf_offset)) {
833 hammer_rel_buffer(buffer, 0);
834 buffer = hammer_get_buffer(hmp, buf_offset, 0, errorp);
841 * Return a pointer to the buffer data.
846 return((char *)buffer->ondisk + xoff);
850 * Access the filesystem buffer containing the specified hammer offset.
851 * No disk read operation occurs. The result buffer may contain garbage.
853 * Any prior buffer in *bufferp will be released and replaced by the
856 * This function marks the buffer dirty but does not increment its
860 hammer_bnew(hammer_mount_t hmp, hammer_off_t buf_offset, int *errorp,
861 struct hammer_buffer **bufferp)
863 hammer_buffer_t buffer;
864 int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK;
866 buf_offset &= ~HAMMER_BUFMASK64;
869 if (buffer == NULL || (buffer->zone2_offset != buf_offset &&
870 buffer->zoneX_offset != buf_offset)) {
872 hammer_rel_buffer(buffer, 0);
873 buffer = hammer_get_buffer(hmp, buf_offset, 1, errorp);
880 * Return a pointer to the buffer data.
885 return((char *)buffer->ondisk + xoff);
889 * Invalidate HAMMER_BUFSIZE bytes at zone2_offset. This is used to
890 * make sure that we do not have the related buffer cache buffer at
891 * the device layer because it is going to be aliased in a high level
895 hammer_binval(hammer_mount_t hmp, hammer_off_t zone2_offset)
897 hammer_volume_t volume;
901 KKASSERT((zone2_offset & HAMMER_OFF_ZONE_MASK) ==
902 HAMMER_ZONE_RAW_BUFFER);
903 vol_no = HAMMER_VOL_DECODE(zone2_offset);
904 volume = hammer_get_volume(hmp, vol_no, &error);
906 hammer_io_inval(volume, zone2_offset);
907 hammer_rel_volume(volume, 0);
911 /************************************************************************
913 ************************************************************************
915 * Manage B-Tree nodes. B-Tree nodes represent the primary indexing
916 * method used by the HAMMER filesystem.
918 * Unlike other HAMMER structures, a hammer_node can be PASSIVELY
919 * associated with its buffer, and will only referenced the buffer while
920 * the node itself is referenced.
922 * A hammer_node can also be passively associated with other HAMMER
923 * structures, such as inodes, while retaining 0 references. These
924 * associations can be cleared backwards using a pointer-to-pointer in
927 * This allows the HAMMER implementation to cache hammer_nodes long-term
928 * and short-cut a great deal of the infrastructure's complexity. In
929 * most cases a cached node can be reacquired without having to dip into
930 * either the buffer or cluster management code.
932 * The caller must pass a referenced cluster on call and will retain
933 * ownership of the reference on return. The node will acquire its own
934 * additional references, if necessary.
937 hammer_get_node(hammer_mount_t hmp, hammer_off_t node_offset,
938 int isnew, int *errorp)
942 KKASSERT((node_offset & HAMMER_OFF_ZONE_MASK) == HAMMER_ZONE_BTREE);
945 * Locate the structure, allocating one if necessary.
948 node = RB_LOOKUP(hammer_nod_rb_tree, &hmp->rb_nods_root, node_offset);
950 ++hammer_count_nodes;
951 node = kmalloc(sizeof(*node), M_HAMMER, M_WAITOK|M_ZERO);
952 node->node_offset = node_offset;
954 if (RB_INSERT(hammer_nod_rb_tree, &hmp->rb_nods_root, node)) {
955 --hammer_count_nodes;
956 kfree(node, M_HAMMER);
960 hammer_ref(&node->lock);
964 *errorp = hammer_load_node(node, isnew);
966 hammer_rel_node(node);
973 * Reference an already-referenced node.
976 hammer_ref_node(hammer_node_t node)
978 KKASSERT(node->lock.refs > 0 && node->ondisk != NULL);
979 hammer_ref(&node->lock);
983 * Load a node's on-disk data reference.
986 hammer_load_node(hammer_node_t node, int isnew)
988 hammer_buffer_t buffer;
989 hammer_off_t buf_offset;
994 hammer_lock_ex(&node->lock);
995 if (node->ondisk == NULL) {
997 * This is a little confusing but the jist is that
998 * node->buffer determines whether the node is on
999 * the buffer's clist and node->ondisk determines
1000 * whether the buffer is referenced.
1002 * We could be racing a buffer release, in which case
1003 * node->buffer may become NULL while we are blocked
1004 * referencing the buffer.
1006 if ((buffer = node->buffer) != NULL) {
1007 error = hammer_ref_buffer(buffer);
1008 if (error == 0 && node->buffer == NULL) {
1009 TAILQ_INSERT_TAIL(&buffer->clist,
1011 node->buffer = buffer;
1014 buf_offset = node->node_offset & ~HAMMER_BUFMASK64;
1015 buffer = hammer_get_buffer(node->hmp, buf_offset,
1018 KKASSERT(error == 0);
1019 TAILQ_INSERT_TAIL(&buffer->clist,
1021 node->buffer = buffer;
1025 node->ondisk = (void *)((char *)buffer->ondisk +
1026 (node->node_offset & HAMMER_BUFMASK));
1028 hammer_crc_test_btree(node->ondisk) == 0) {
1029 Debugger("CRC FAILED: B-TREE NODE");
1034 hammer_unlock(&node->lock);
1039 * Safely reference a node, interlock against flushes via the IO subsystem.
1042 hammer_ref_node_safe(struct hammer_mount *hmp, struct hammer_node **cache,
1049 hammer_ref(&node->lock);
1053 *errorp = hammer_load_node(node, 0);
1055 hammer_rel_node(node);
1065 * Release a hammer_node. On the last release the node dereferences
1066 * its underlying buffer and may or may not be destroyed.
1069 hammer_rel_node(hammer_node_t node)
1071 hammer_buffer_t buffer;
1074 * If this isn't the last ref just decrement the ref count and
1077 if (node->lock.refs > 1) {
1078 hammer_unref(&node->lock);
1083 * If there is no ondisk info or no buffer the node failed to load,
1084 * remove the last reference and destroy the node.
1086 if (node->ondisk == NULL) {
1087 hammer_unref(&node->lock);
1088 hammer_flush_node(node);
1089 /* node is stale now */
1094 * Do final cleanups and then either destroy the node and leave it
1095 * passively cached. The buffer reference is removed regardless.
1097 buffer = node->buffer;
1098 node->ondisk = NULL;
1100 if ((node->flags & HAMMER_NODE_FLUSH) == 0) {
1101 hammer_unref(&node->lock);
1102 hammer_rel_buffer(buffer, 0);
1109 hammer_unref(&node->lock);
1110 hammer_flush_node(node);
1112 hammer_rel_buffer(buffer, 0);
1116 * Free space on-media associated with a B-Tree node.
1119 hammer_delete_node(hammer_transaction_t trans, hammer_node_t node)
1121 KKASSERT((node->flags & HAMMER_NODE_DELETED) == 0);
1122 node->flags |= HAMMER_NODE_DELETED;
1123 hammer_blockmap_free(trans, node->node_offset, sizeof(*node->ondisk));
1127 * Passively cache a referenced hammer_node in *cache. The caller may
1128 * release the node on return.
1131 hammer_cache_node(hammer_node_t node, struct hammer_node **cache)
1136 * If the node is being deleted, don't cache it!
1138 if (node->flags & HAMMER_NODE_DELETED)
1142 * Cache the node. If we previously cached a different node we
1143 * have to give HAMMER a chance to destroy it.
1146 if (node->cache1 != cache) {
1147 if (node->cache2 != cache) {
1148 if ((old = *cache) != NULL) {
1149 KKASSERT(node->lock.refs != 0);
1150 hammer_uncache_node(cache);
1154 *node->cache2 = NULL;
1155 node->cache2 = node->cache1;
1156 node->cache1 = cache;
1159 struct hammer_node **tmp;
1161 node->cache1 = node->cache2;
1168 hammer_uncache_node(struct hammer_node **cache)
1172 if ((node = *cache) != NULL) {
1174 if (node->cache1 == cache) {
1175 node->cache1 = node->cache2;
1176 node->cache2 = NULL;
1177 } else if (node->cache2 == cache) {
1178 node->cache2 = NULL;
1180 panic("hammer_uncache_node: missing cache linkage");
1182 if (node->cache1 == NULL && node->cache2 == NULL)
1183 hammer_flush_node(node);
1188 * Remove a node's cache references and destroy the node if it has no
1189 * other references or backing store.
1192 hammer_flush_node(hammer_node_t node)
1194 hammer_buffer_t buffer;
1197 *node->cache1 = NULL;
1199 *node->cache2 = NULL;
1200 if (node->lock.refs == 0 && node->ondisk == NULL) {
1201 RB_REMOVE(hammer_nod_rb_tree, &node->hmp->rb_nods_root, node);
1202 if ((buffer = node->buffer) != NULL) {
1203 node->buffer = NULL;
1204 TAILQ_REMOVE(&buffer->clist, node, entry);
1205 /* buffer is unreferenced because ondisk is NULL */
1207 --hammer_count_nodes;
1208 kfree(node, M_HAMMER);
1213 * Flush passively cached B-Tree nodes associated with this buffer.
1214 * This is only called when the buffer is about to be destroyed, so
1215 * none of the nodes should have any references. The buffer is locked.
1217 * We may be interlocked with the buffer.
1220 hammer_flush_buffer_nodes(hammer_buffer_t buffer)
1224 while ((node = TAILQ_FIRST(&buffer->clist)) != NULL) {
1225 KKASSERT(node->ondisk == NULL);
1227 if (node->lock.refs == 0) {
1228 hammer_ref(&node->lock);
1229 node->flags |= HAMMER_NODE_FLUSH;
1230 hammer_rel_node(node);
1232 KKASSERT(node->loading != 0);
1233 KKASSERT(node->buffer != NULL);
1234 buffer = node->buffer;
1235 node->buffer = NULL;
1236 TAILQ_REMOVE(&buffer->clist, node, entry);
1237 /* buffer is unreferenced because ondisk is NULL */
1243 /************************************************************************
1245 ************************************************************************/
1248 * Allocate a B-Tree node.
1251 hammer_alloc_btree(hammer_transaction_t trans, int *errorp)
1253 hammer_buffer_t buffer = NULL;
1254 hammer_node_t node = NULL;
1255 hammer_off_t node_offset;
1257 node_offset = hammer_blockmap_alloc(trans, HAMMER_ZONE_BTREE_INDEX,
1258 sizeof(struct hammer_node_ondisk),
1261 node = hammer_get_node(trans->hmp, node_offset, 1, errorp);
1262 hammer_modify_node_noundo(trans, node);
1263 bzero(node->ondisk, sizeof(*node->ondisk));
1264 hammer_modify_node_done(node);
1267 hammer_rel_buffer(buffer, 0);
1274 * The returned buffers are already appropriately marked as being modified.
1275 * If the caller marks them again unnecessary undo records may be generated.
1277 * In-band data is indicated by data_bufferp == NULL. Pass a data_len of 0
1278 * for zero-fill (caller modifies data_len afterwords).
1280 * If the caller is responsible for calling hammer_modify_*() prior to making
1281 * any additional modifications to either the returned record buffer or the
1282 * returned data buffer.
1285 hammer_alloc_record(hammer_transaction_t trans,
1286 hammer_off_t *rec_offp, u_int16_t rec_type,
1287 struct hammer_buffer **rec_bufferp,
1288 int32_t data_len, void **datap,
1289 hammer_off_t *data_offp,
1290 struct hammer_buffer **data_bufferp, int *errorp)
1292 hammer_record_ondisk_t rec;
1293 hammer_off_t rec_offset;
1294 hammer_off_t data_offset;
1301 * Allocate the record
1303 rec_offset = hammer_blockmap_alloc(trans, HAMMER_ZONE_RECORD_INDEX,
1304 HAMMER_RECORD_SIZE, errorp);
1314 if (data_bufferp == NULL) {
1316 case HAMMER_RECTYPE_DATA:
1317 reclen = offsetof(struct hammer_data_record,
1320 case HAMMER_RECTYPE_DIRENTRY:
1321 reclen = offsetof(struct hammer_entry_record,
1325 panic("hammer_alloc_record: illegal "
1331 KKASSERT(reclen + data_len <= HAMMER_RECORD_SIZE);
1332 data_offset = rec_offset + reclen;
1333 } else if (data_len < HAMMER_BUFSIZE) {
1334 data_offset = hammer_blockmap_alloc(trans,
1335 HAMMER_ZONE_SMALL_DATA_INDEX,
1337 *data_offp = data_offset;
1339 data_offset = hammer_blockmap_alloc(trans,
1340 HAMMER_ZONE_LARGE_DATA_INDEX,
1342 *data_offp = data_offset;
1348 hammer_blockmap_free(trans, rec_offset, HAMMER_RECORD_SIZE);
1353 * Basic return values.
1355 * Note that because this is a 'new' buffer, there is no need to
1356 * generate UNDO records for it.
1358 *rec_offp = rec_offset;
1359 rec = hammer_bread(trans->hmp, rec_offset, errorp, rec_bufferp);
1360 hammer_modify_buffer(trans, *rec_bufferp, NULL, 0);
1361 bzero(rec, sizeof(*rec));
1362 KKASSERT(*errorp == 0);
1363 rec->base.data_off = data_offset;
1364 rec->base.data_len = data_len;
1365 hammer_modify_buffer_done(*rec_bufferp);
1369 *datap = hammer_bread(trans->hmp, data_offset, errorp,
1371 KKASSERT(*errorp == 0);
1375 } else if (data_len) {
1376 KKASSERT(data_offset + data_len - rec_offset <=
1377 HAMMER_RECORD_SIZE);
1379 *datap = (void *)((char *)rec +
1380 (int32_t)(data_offset - rec_offset));
1383 KKASSERT(datap == NULL);
1385 KKASSERT(*errorp == 0);
1392 * Allocate data. If the address of a data buffer is supplied then
1393 * any prior non-NULL *data_bufferp will be released and *data_bufferp
1394 * will be set to the related buffer. The caller must release it when
1395 * finally done. The initial *data_bufferp should be set to NULL by
1398 * The caller is responsible for making hammer_modify*() calls on the
1402 hammer_alloc_data(hammer_transaction_t trans, int32_t data_len,
1403 hammer_off_t *data_offsetp,
1404 struct hammer_buffer **data_bufferp, int *errorp)
1412 if (data_len < HAMMER_BUFSIZE) {
1413 *data_offsetp = hammer_blockmap_alloc(trans,
1414 HAMMER_ZONE_SMALL_DATA_INDEX,
1417 *data_offsetp = hammer_blockmap_alloc(trans,
1418 HAMMER_ZONE_LARGE_DATA_INDEX,
1424 if (*errorp == 0 && data_bufferp) {
1426 data = hammer_bread(trans->hmp, *data_offsetp, errorp,
1428 KKASSERT(*errorp == 0);
1435 KKASSERT(*errorp == 0);
1440 * Sync dirty buffers to the media and clean-up any loose ends.
1442 static int hammer_sync_scan1(struct mount *mp, struct vnode *vp, void *data);
1443 static int hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data);
1446 hammer_queue_inodes_flusher(hammer_mount_t hmp, int waitfor)
1448 struct hammer_sync_info info;
1451 info.waitfor = waitfor;
1452 if (waitfor == MNT_WAIT) {
1453 vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_ONEPASS,
1454 hammer_sync_scan1, hammer_sync_scan2, &info);
1456 vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_ONEPASS|VMSC_NOWAIT,
1457 hammer_sync_scan1, hammer_sync_scan2, &info);
1463 hammer_sync_hmp(hammer_mount_t hmp, int waitfor)
1465 struct hammer_sync_info info;
1468 info.waitfor = waitfor;
1470 vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_NOWAIT,
1471 hammer_sync_scan1, hammer_sync_scan2, &info);
1472 if (waitfor == MNT_WAIT)
1473 hammer_flusher_sync(hmp);
1475 hammer_flusher_async(hmp);
1481 hammer_sync_scan1(struct mount *mp, struct vnode *vp, void *data)
1483 struct hammer_inode *ip;
1486 if (vp->v_type == VNON || ip == NULL ||
1487 ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1488 RB_EMPTY(&vp->v_rbdirty_tree))) {
1495 hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data)
1497 struct hammer_sync_info *info = data;
1498 struct hammer_inode *ip;
1502 if (vp->v_type == VNON || vp->v_type == VBAD ||
1503 ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1504 RB_EMPTY(&vp->v_rbdirty_tree))) {
1507 error = VOP_FSYNC(vp, info->waitfor);
1509 info->error = error;