2 * Copyright (c) 2007 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_ondisk.c,v 1.34 2008/03/19 20:18:17 dillon Exp $
37 * Manage HAMMER's on-disk structures. These routines are primarily
38 * responsible for interfacing with the kernel's I/O subsystem and for
39 * managing in-memory structures.
43 #include <sys/fcntl.h>
44 #include <sys/nlookup.h>
48 static void hammer_free_volume(hammer_volume_t volume);
49 static int hammer_load_volume(hammer_volume_t volume);
50 static int hammer_load_buffer(hammer_buffer_t buffer, int isnew);
51 static int hammer_load_node(hammer_node_t node);
54 * Red-Black tree support for various structures
57 hammer_ino_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2)
59 if (ip1->obj_id < ip2->obj_id)
61 if (ip1->obj_id > ip2->obj_id)
63 if (ip1->obj_asof < ip2->obj_asof)
65 if (ip1->obj_asof > ip2->obj_asof)
71 hammer_inode_info_cmp(hammer_inode_info_t info, hammer_inode_t ip)
73 if (info->obj_id < ip->obj_id)
75 if (info->obj_id > ip->obj_id)
77 if (info->obj_asof < ip->obj_asof)
79 if (info->obj_asof > ip->obj_asof)
85 hammer_vol_rb_compare(hammer_volume_t vol1, hammer_volume_t vol2)
87 if (vol1->vol_no < vol2->vol_no)
89 if (vol1->vol_no > vol2->vol_no)
95 hammer_buf_rb_compare(hammer_buffer_t buf1, hammer_buffer_t buf2)
97 if (buf1->zone2_offset < buf2->zone2_offset)
99 if (buf1->zone2_offset > buf2->zone2_offset)
105 hammer_nod_rb_compare(hammer_node_t node1, hammer_node_t node2)
107 if (node1->node_offset < node2->node_offset)
109 if (node1->node_offset > node2->node_offset)
115 * Note: The lookup function for hammer_ino_rb_tree winds up being named
116 * hammer_ino_rb_tree_RB_LOOKUP_INFO(root, info). The other lookup
117 * functions are normal, e.g. hammer_buf_rb_tree_RB_LOOKUP(root, zone2_offset).
119 RB_GENERATE(hammer_ino_rb_tree, hammer_inode, rb_node, hammer_ino_rb_compare);
120 RB_GENERATE_XLOOKUP(hammer_ino_rb_tree, INFO, hammer_inode, rb_node,
121 hammer_inode_info_cmp, hammer_inode_info_t);
122 RB_GENERATE2(hammer_vol_rb_tree, hammer_volume, rb_node,
123 hammer_vol_rb_compare, int32_t, vol_no);
124 RB_GENERATE2(hammer_buf_rb_tree, hammer_buffer, rb_node,
125 hammer_buf_rb_compare, hammer_off_t, zone2_offset);
126 RB_GENERATE2(hammer_nod_rb_tree, hammer_node, rb_node,
127 hammer_nod_rb_compare, hammer_off_t, node_offset);
129 /************************************************************************
131 ************************************************************************
133 * Load a HAMMER volume by name. Returns 0 on success or a positive error
134 * code on failure. Volumes must be loaded at mount time, get_volume() will
135 * not load a new volume.
137 * Calls made to hammer_load_volume() or single-threaded
140 hammer_install_volume(struct hammer_mount *hmp, const char *volname)
143 hammer_volume_t volume;
144 struct hammer_volume_ondisk *ondisk;
145 struct nlookupdata nd;
146 struct buf *bp = NULL;
152 ronly = ((mp->mnt_flag & MNT_RDONLY) ? 1 : 0);
155 * Allocate a volume structure
157 ++hammer_count_volumes;
158 volume = kmalloc(sizeof(*volume), M_HAMMER, M_WAITOK|M_ZERO);
159 volume->vol_name = kstrdup(volname, M_HAMMER);
161 hammer_io_init(&volume->io, HAMMER_STRUCTURE_VOLUME);
162 volume->io.offset = 0LL;
165 * Get the device vnode
167 error = nlookup_init(&nd, volume->vol_name, UIO_SYSSPACE, NLC_FOLLOW);
169 error = nlookup(&nd);
171 error = cache_vref(&nd.nl_nch, nd.nl_cred, &volume->devvp);
174 if (vn_isdisk(volume->devvp, &error)) {
175 error = vfs_mountedon(volume->devvp);
179 count_udev(volume->devvp->v_umajor, volume->devvp->v_uminor) > 0) {
183 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
184 error = vinvalbuf(volume->devvp, V_SAVE, 0, 0);
186 error = VOP_OPEN(volume->devvp,
187 (ronly ? FREAD : FREAD|FWRITE),
190 vn_unlock(volume->devvp);
193 hammer_free_volume(volume);
196 volume->devvp->v_rdev->si_mountpoint = mp;
200 * Extract the volume number from the volume header and do various
203 error = bread(volume->devvp, 0LL, HAMMER_BUFSIZE, &bp);
206 ondisk = (void *)bp->b_data;
207 if (ondisk->vol_signature != HAMMER_FSBUF_VOLUME) {
208 kprintf("hammer_mount: volume %s has an invalid header\n",
213 volume->vol_no = ondisk->vol_no;
214 volume->buffer_base = ondisk->vol_buf_beg;
215 volume->vol_flags = ondisk->vol_flags;
216 volume->nblocks = ondisk->vol_nblocks;
217 volume->maxbuf_off = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no,
218 ondisk->vol_buf_end - ondisk->vol_buf_beg);
219 RB_INIT(&volume->rb_bufs_root);
221 hmp->mp->mnt_stat.f_blocks += volume->nblocks;
223 if (RB_EMPTY(&hmp->rb_vols_root)) {
224 hmp->fsid = ondisk->vol_fsid;
225 } else if (bcmp(&hmp->fsid, &ondisk->vol_fsid, sizeof(uuid_t))) {
226 kprintf("hammer_mount: volume %s's fsid does not match "
227 "other volumes\n", volume->vol_name);
233 * Insert the volume structure into the red-black tree.
235 if (RB_INSERT(hammer_vol_rb_tree, &hmp->rb_vols_root, volume)) {
236 kprintf("hammer_mount: volume %s has a duplicate vol_no %d\n",
237 volume->vol_name, volume->vol_no);
242 * Set the root volume . HAMMER special cases rootvol the structure.
243 * We do not hold a ref because this would prevent related I/O
244 * from being flushed.
246 if (error == 0 && ondisk->vol_rootvol == ondisk->vol_no) {
247 hmp->rootvol = volume;
252 hmp->fsid_udev = dev2udev(vn_todev(volume->devvp));
258 /*vinvalbuf(volume->devvp, V_SAVE, 0, 0);*/
260 volume->devvp->v_rdev->si_mountpoint = NULL;
261 VOP_CLOSE(volume->devvp, ronly ? FREAD : FREAD|FWRITE);
262 hammer_free_volume(volume);
268 * Unload and free a HAMMER volume. Must return >= 0 to continue scan
269 * so returns -1 on failure.
272 hammer_unload_volume(hammer_volume_t volume, void *data __unused)
274 struct hammer_mount *hmp = volume->hmp;
275 int ronly = ((hmp->mp->mnt_flag & MNT_RDONLY) ? 1 : 0);
278 * Sync clusters, sync volume
281 hmp->mp->mnt_stat.f_blocks -= volume->nblocks;
284 * Clean up the root volume pointer, which is held unlocked in hmp.
286 if (hmp->rootvol == volume)
292 RB_SCAN(hammer_buf_rb_tree, &volume->rb_bufs_root, NULL,
293 hammer_unload_buffer, NULL);
294 hammer_io_waitdep(&volume->io);
297 * Release our buffer and flush anything left in the buffer cache.
299 hammer_io_release(&volume->io, 2);
302 * There should be no references on the volume, no clusters, and
305 KKASSERT(volume->io.lock.refs == 0);
306 KKASSERT(RB_EMPTY(&volume->rb_bufs_root));
308 volume->ondisk = NULL;
310 if (volume->devvp->v_rdev &&
311 volume->devvp->v_rdev->si_mountpoint == hmp->mp
313 volume->devvp->v_rdev->si_mountpoint = NULL;
316 vinvalbuf(volume->devvp, 0, 0, 0);
317 VOP_CLOSE(volume->devvp, FREAD);
319 vinvalbuf(volume->devvp, V_SAVE, 0, 0);
320 VOP_CLOSE(volume->devvp, FREAD|FWRITE);
325 * Destroy the structure
327 RB_REMOVE(hammer_vol_rb_tree, &hmp->rb_vols_root, volume);
328 hammer_free_volume(volume);
334 hammer_free_volume(hammer_volume_t volume)
336 if (volume->vol_name) {
337 kfree(volume->vol_name, M_HAMMER);
338 volume->vol_name = NULL;
341 vrele(volume->devvp);
342 volume->devvp = NULL;
344 --hammer_count_volumes;
345 kfree(volume, M_HAMMER);
349 * Get a HAMMER volume. The volume must already exist.
352 hammer_get_volume(struct hammer_mount *hmp, int32_t vol_no, int *errorp)
354 struct hammer_volume *volume;
357 * Locate the volume structure
359 volume = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, vol_no);
360 if (volume == NULL) {
364 hammer_ref(&volume->io.lock);
367 * Deal with on-disk info
369 if (volume->ondisk == NULL || volume->io.loading) {
370 *errorp = hammer_load_volume(volume);
372 hammer_rel_volume(volume, 1);
382 hammer_ref_volume(hammer_volume_t volume)
386 hammer_ref(&volume->io.lock);
389 * Deal with on-disk info
391 if (volume->ondisk == NULL || volume->io.loading) {
392 error = hammer_load_volume(volume);
394 hammer_rel_volume(volume, 1);
402 hammer_get_root_volume(struct hammer_mount *hmp, int *errorp)
404 hammer_volume_t volume;
406 volume = hmp->rootvol;
407 KKASSERT(volume != NULL);
408 hammer_ref(&volume->io.lock);
411 * Deal with on-disk info
413 if (volume->ondisk == NULL || volume->io.loading) {
414 *errorp = hammer_load_volume(volume);
416 hammer_rel_volume(volume, 1);
426 * Load a volume's on-disk information. The volume must be referenced and
427 * not locked. We temporarily acquire an exclusive lock to interlock
428 * against releases or multiple get's.
431 hammer_load_volume(hammer_volume_t volume)
433 struct hammer_volume_ondisk *ondisk;
436 hammer_lock_ex(&volume->io.lock);
437 KKASSERT(volume->io.loading == 0);
438 volume->io.loading = 1;
440 if (volume->ondisk == NULL) {
441 error = hammer_io_read(volume->devvp, &volume->io);
443 volume->io.loading = 0;
444 hammer_unlock(&volume->io.lock);
447 volume->ondisk = ondisk = (void *)volume->io.bp->b_data;
451 volume->io.loading = 0;
452 hammer_unlock(&volume->io.lock);
457 * Release a volume. Call hammer_io_release on the last reference. We have
458 * to acquire an exclusive lock to interlock against volume->ondisk tests
459 * in hammer_load_volume(), and hammer_io_release() also expects an exclusive
462 * Volumes are not unloaded from memory during normal operation.
465 hammer_rel_volume(hammer_volume_t volume, int flush)
467 if (volume->io.lock.refs == 1) {
468 hammer_lock_ex(&volume->io.lock);
469 if (volume->io.lock.refs == 1) {
470 volume->ondisk = NULL;
471 hammer_io_release(&volume->io, flush);
473 hammer_io_flush(&volume->io);
475 hammer_unlock(&volume->io.lock);
477 hammer_unref(&volume->io.lock);
480 /************************************************************************
482 ************************************************************************
484 * Manage buffers. Currently all blockmap-backed zones are translated
485 * to zone-2 buffer offsets.
488 hammer_get_buffer(hammer_mount_t hmp, hammer_off_t buf_offset,
489 int isnew, int *errorp)
491 hammer_buffer_t buffer;
492 hammer_volume_t volume;
493 hammer_off_t zoneX_offset;
497 zoneX_offset = buf_offset;
498 zone = HAMMER_ZONE_DECODE(buf_offset);
499 if (zone >= HAMMER_ZONE_BTREE_INDEX) {
500 buf_offset = hammer_blockmap_lookup(hmp, buf_offset, errorp);
501 KKASSERT(*errorp == 0);
502 } else if (zone == HAMMER_ZONE_UNDO_INDEX) {
503 buf_offset = hammer_undo_lookup(hmp, buf_offset, errorp);
504 KKASSERT(*errorp == 0);
506 buf_offset &= ~HAMMER_BUFMASK64;
507 KKASSERT((buf_offset & HAMMER_ZONE_RAW_BUFFER) ==
508 HAMMER_ZONE_RAW_BUFFER);
509 vol_no = HAMMER_VOL_DECODE(buf_offset);
510 volume = hammer_get_volume(hmp, vol_no, errorp);
515 * NOTE: buf_offset and maxbuf_off are both full offset
518 KKASSERT(buf_offset < volume->maxbuf_off);
521 * Locate and lock the buffer structure, creating one if necessary.
524 buffer = RB_LOOKUP(hammer_buf_rb_tree, &volume->rb_bufs_root,
526 if (buffer == NULL) {
527 ++hammer_count_buffers;
528 buffer = kmalloc(sizeof(*buffer), M_HAMMER, M_WAITOK|M_ZERO);
529 buffer->zone2_offset = buf_offset;
530 buffer->volume = volume;
531 hammer_io_init(&buffer->io, HAMMER_STRUCTURE_BUFFER);
532 buffer->io.offset = volume->ondisk->vol_buf_beg +
533 (buf_offset & HAMMER_OFF_SHORT_MASK);
534 TAILQ_INIT(&buffer->clist);
535 hammer_ref(&buffer->io.lock);
538 * Insert the buffer into the RB tree and handle late
541 if (RB_INSERT(hammer_buf_rb_tree, &volume->rb_bufs_root, buffer)) {
542 hammer_unref(&buffer->io.lock);
543 --hammer_count_buffers;
544 kfree(buffer, M_HAMMER);
547 hammer_ref(&volume->io.lock);
549 hammer_ref(&buffer->io.lock);
553 * Cache the blockmap translation
555 if ((zoneX_offset & HAMMER_ZONE_RAW_BUFFER) != HAMMER_ZONE_RAW_BUFFER)
556 buffer->zoneX_offset = zoneX_offset;
559 * Deal with on-disk info
561 if (buffer->ondisk == NULL || buffer->io.loading) {
562 *errorp = hammer_load_buffer(buffer, isnew);
564 hammer_rel_buffer(buffer, 1);
570 hammer_rel_volume(volume, 0);
575 hammer_load_buffer(hammer_buffer_t buffer, int isnew)
577 hammer_volume_t volume;
582 * Load the buffer's on-disk info
584 volume = buffer->volume;
585 hammer_lock_ex(&buffer->io.lock);
586 KKASSERT(buffer->io.loading == 0);
587 buffer->io.loading = 1;
589 if (buffer->ondisk == NULL) {
591 error = hammer_io_new(volume->devvp, &buffer->io);
593 error = hammer_io_read(volume->devvp, &buffer->io);
596 buffer->io.loading = 0;
597 hammer_unlock(&buffer->io.lock);
600 buffer->ondisk = ondisk = (void *)buffer->io.bp->b_data;
602 error = hammer_io_new(volume->devvp, &buffer->io);
606 if (error == 0 && isnew) {
607 hammer_modify_buffer(NULL, buffer, NULL, 0);
608 /* additional initialization goes here */
610 buffer->io.loading = 0;
611 hammer_unlock(&buffer->io.lock);
616 * NOTE: Called from RB_SCAN, must return >= 0 for scan to continue.
619 hammer_unload_buffer(hammer_buffer_t buffer, void *data __unused)
621 hammer_ref(&buffer->io.lock);
622 hammer_flush_buffer_nodes(buffer);
623 KKASSERT(buffer->io.lock.refs == 1);
624 hammer_rel_buffer(buffer, 2);
629 * Reference a buffer that is either already referenced or via a specially
630 * handled pointer (aka cursor->buffer).
633 hammer_ref_buffer(hammer_buffer_t buffer)
637 hammer_ref(&buffer->io.lock);
638 if (buffer->ondisk == NULL || buffer->io.loading) {
639 error = hammer_load_buffer(buffer, 0);
641 hammer_rel_buffer(buffer, 1);
643 * NOTE: buffer pointer can become stale after
654 * Release a buffer. We have to deal with several places where
655 * another thread can ref the buffer.
657 * Only destroy the structure itself if the related buffer cache buffer
658 * was disassociated from it. This ties the management of the structure
659 * to the buffer cache subsystem. buffer->ondisk determines whether the
660 * embedded io is referenced or not.
663 hammer_rel_buffer(hammer_buffer_t buffer, int flush)
665 hammer_volume_t volume;
667 if (buffer->io.lock.refs == 1) {
668 hammer_lock_ex(&buffer->io.lock);
669 if (buffer->io.lock.refs == 1) {
670 hammer_io_release(&buffer->io, flush);
672 if (buffer->io.bp == NULL &&
673 buffer->io.lock.refs == 1) {
674 hammer_flush_buffer_nodes(buffer);
675 KKASSERT(TAILQ_EMPTY(&buffer->clist));
676 volume = buffer->volume;
677 RB_REMOVE(hammer_buf_rb_tree,
678 &volume->rb_bufs_root, buffer);
679 buffer->volume = NULL; /* sanity */
680 --hammer_count_buffers;
681 kfree(buffer, M_HAMMER);
682 hammer_rel_volume(volume, 0);
686 hammer_io_flush(&buffer->io);
688 hammer_unlock(&buffer->io.lock);
690 hammer_unref(&buffer->io.lock);
694 * Access the filesystem buffer containing the specified hammer offset.
695 * buf_offset is a conglomeration of the volume number and vol_buf_beg
696 * relative buffer offset. It must also have bit 55 set to be valid.
697 * (see hammer_off_t in hammer_disk.h).
699 * Any prior buffer in *bufferp will be released and replaced by the
703 hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset, int *errorp,
704 struct hammer_buffer **bufferp)
706 hammer_buffer_t buffer;
707 int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK;
709 buf_offset &= ~HAMMER_BUFMASK64;
710 KKASSERT((buf_offset & HAMMER_OFF_ZONE_MASK) != 0);
713 if (buffer == NULL || (buffer->zone2_offset != buf_offset &&
714 buffer->zoneX_offset != buf_offset)) {
716 hammer_rel_buffer(buffer, 0);
717 buffer = hammer_get_buffer(hmp, buf_offset, 0, errorp);
724 * Return a pointer to the buffer data.
729 return((char *)buffer->ondisk + xoff);
733 * Access the filesystem buffer containing the specified hammer offset.
734 * No disk read operation occurs. The result buffer may contain garbage.
736 * Any prior buffer in *bufferp will be released and replaced by the
740 hammer_bnew(hammer_mount_t hmp, hammer_off_t buf_offset, int *errorp,
741 struct hammer_buffer **bufferp)
743 hammer_buffer_t buffer;
744 int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK;
746 buf_offset &= ~HAMMER_BUFMASK64;
749 if (buffer == NULL || (buffer->zone2_offset != buf_offset &&
750 buffer->zoneX_offset != buf_offset)) {
752 hammer_rel_buffer(buffer, 0);
753 buffer = hammer_get_buffer(hmp, buf_offset, 1, errorp);
760 * Return a pointer to the buffer data.
765 return((char *)buffer->ondisk + xoff);
768 /************************************************************************
770 ************************************************************************
772 * Manage B-Tree nodes. B-Tree nodes represent the primary indexing
773 * method used by the HAMMER filesystem.
775 * Unlike other HAMMER structures, a hammer_node can be PASSIVELY
776 * associated with its buffer, and will only referenced the buffer while
777 * the node itself is referenced.
779 * A hammer_node can also be passively associated with other HAMMER
780 * structures, such as inodes, while retaining 0 references. These
781 * associations can be cleared backwards using a pointer-to-pointer in
784 * This allows the HAMMER implementation to cache hammer_nodes long-term
785 * and short-cut a great deal of the infrastructure's complexity. In
786 * most cases a cached node can be reacquired without having to dip into
787 * either the buffer or cluster management code.
789 * The caller must pass a referenced cluster on call and will retain
790 * ownership of the reference on return. The node will acquire its own
791 * additional references, if necessary.
794 hammer_get_node(hammer_mount_t hmp, hammer_off_t node_offset, int *errorp)
798 KKASSERT((node_offset & HAMMER_OFF_ZONE_MASK) == HAMMER_ZONE_BTREE);
801 * Locate the structure, allocating one if necessary.
804 node = RB_LOOKUP(hammer_nod_rb_tree, &hmp->rb_nods_root, node_offset);
806 ++hammer_count_nodes;
807 node = kmalloc(sizeof(*node), M_HAMMER, M_WAITOK|M_ZERO);
808 node->node_offset = node_offset;
810 if (RB_INSERT(hammer_nod_rb_tree, &hmp->rb_nods_root, node)) {
811 --hammer_count_nodes;
812 kfree(node, M_HAMMER);
816 hammer_ref(&node->lock);
817 *errorp = hammer_load_node(node);
819 hammer_rel_node(node);
826 * Reference an already-referenced node.
829 hammer_ref_node(hammer_node_t node)
833 KKASSERT(node->lock.refs > 0);
834 hammer_ref(&node->lock);
835 if ((error = hammer_load_node(node)) != 0)
836 hammer_rel_node(node);
841 * Load a node's on-disk data reference.
844 hammer_load_node(hammer_node_t node)
846 hammer_buffer_t buffer;
852 hammer_lock_ex(&node->lock);
853 if (node->ondisk == NULL) {
855 * This is a little confusing but the jist is that
856 * node->buffer determines whether the node is on
857 * the buffer's clist and node->ondisk determines
858 * whether the buffer is referenced.
860 if ((buffer = node->buffer) != NULL) {
861 error = hammer_ref_buffer(buffer);
863 buffer = hammer_get_buffer(node->hmp,
864 node->node_offset, 0,
867 KKASSERT(error == 0);
868 TAILQ_INSERT_TAIL(&buffer->clist,
870 node->buffer = buffer;
874 node->ondisk = (void *)((char *)buffer->ondisk +
875 (node->node_offset & HAMMER_BUFMASK));
878 hammer_unlock(&node->lock);
883 * Safely reference a node, interlock against flushes via the IO subsystem.
886 hammer_ref_node_safe(struct hammer_mount *hmp, struct hammer_node **cache,
891 if ((node = *cache) != NULL)
892 hammer_ref(&node->lock);
894 *errorp = hammer_load_node(node);
896 hammer_rel_node(node);
906 * Release a hammer_node. On the last release the node dereferences
907 * its underlying buffer and may or may not be destroyed.
910 hammer_rel_node(hammer_node_t node)
912 hammer_buffer_t buffer;
915 * If this isn't the last ref just decrement the ref count and
918 if (node->lock.refs > 1) {
919 hammer_unref(&node->lock);
924 * If there is no ondisk info or no buffer the node failed to load,
925 * remove the last reference and destroy the node.
927 if (node->ondisk == NULL) {
928 hammer_unref(&node->lock);
929 hammer_flush_node(node);
930 /* node is stale now */
935 * Do final cleanups and then either destroy the node and leave it
936 * passively cached. The buffer reference is removed regardless.
938 buffer = node->buffer;
941 if ((node->flags & HAMMER_NODE_FLUSH) == 0) {
942 hammer_unref(&node->lock);
943 hammer_rel_buffer(buffer, 0);
950 hammer_unref(&node->lock);
951 hammer_flush_node(node);
953 hammer_rel_buffer(buffer, 0);
961 hammer_delete_node(hammer_transaction_t trans, hammer_node_t node)
963 node->flags |= HAMMER_NODE_DELETED;
964 hammer_blockmap_free(trans, node->node_offset, sizeof(*node->ondisk));
968 * Passively cache a referenced hammer_node in *cache. The caller may
969 * release the node on return.
972 hammer_cache_node(hammer_node_t node, struct hammer_node **cache)
977 * If the node is being deleted, don't cache it!
979 if (node->flags & HAMMER_NODE_DELETED)
983 * Cache the node. If we previously cached a different node we
984 * have to give HAMMER a chance to destroy it.
987 if (node->cache1 != cache) {
988 if (node->cache2 != cache) {
989 if ((old = *cache) != NULL) {
990 KKASSERT(node->lock.refs != 0);
991 hammer_uncache_node(cache);
995 *node->cache2 = NULL;
996 node->cache2 = node->cache1;
997 node->cache1 = cache;
1000 struct hammer_node **tmp;
1002 node->cache1 = node->cache2;
1009 hammer_uncache_node(struct hammer_node **cache)
1013 if ((node = *cache) != NULL) {
1015 if (node->cache1 == cache) {
1016 node->cache1 = node->cache2;
1017 node->cache2 = NULL;
1018 } else if (node->cache2 == cache) {
1019 node->cache2 = NULL;
1021 panic("hammer_uncache_node: missing cache linkage");
1023 if (node->cache1 == NULL && node->cache2 == NULL)
1024 hammer_flush_node(node);
1029 * Remove a node's cache references and destroy the node if it has no
1030 * other references or backing store.
1033 hammer_flush_node(hammer_node_t node)
1035 hammer_buffer_t buffer;
1038 *node->cache1 = NULL;
1040 *node->cache2 = NULL;
1041 if (node->lock.refs == 0 && node->ondisk == NULL) {
1042 RB_REMOVE(hammer_nod_rb_tree, &node->hmp->rb_nods_root, node);
1043 if ((buffer = node->buffer) != NULL) {
1044 node->buffer = NULL;
1045 TAILQ_REMOVE(&buffer->clist, node, entry);
1046 /* buffer is unreferenced because ondisk is NULL */
1048 --hammer_count_nodes;
1049 kfree(node, M_HAMMER);
1054 * Flush passively cached B-Tree nodes associated with this buffer.
1055 * This is only called when the buffer is about to be destroyed, so
1056 * none of the nodes should have any references.
1059 hammer_flush_buffer_nodes(hammer_buffer_t buffer)
1063 while ((node = TAILQ_FIRST(&buffer->clist)) != NULL) {
1064 KKASSERT(node->lock.refs == 0 && node->ondisk == NULL);
1065 hammer_ref(&node->lock);
1066 node->flags |= HAMMER_NODE_FLUSH;
1067 hammer_rel_node(node);
1072 /************************************************************************
1074 ************************************************************************/
1077 * Allocate a B-Tree node.
1080 hammer_alloc_btree(hammer_transaction_t trans, int *errorp)
1082 hammer_buffer_t buffer = NULL;
1083 hammer_node_t node = NULL;
1084 hammer_off_t node_offset;
1086 node_offset = hammer_blockmap_alloc(trans, HAMMER_ZONE_BTREE_INDEX,
1087 sizeof(struct hammer_node_ondisk),
1090 node = hammer_get_node(trans->hmp, node_offset, errorp);
1091 hammer_modify_node_noundo(trans, node);
1092 bzero(node->ondisk, sizeof(*node->ondisk));
1095 hammer_rel_buffer(buffer, 0);
1100 * The returned buffers are already appropriately marked as being modified.
1101 * If the caller marks them again unnecessary undo records may be generated.
1103 * In-band data is indicated by data_bufferp == NULL. Pass a data_len of 0
1104 * for zero-fill (caller modifies data_len afterwords).
1107 hammer_alloc_record(hammer_transaction_t trans,
1108 hammer_off_t *rec_offp, u_int16_t rec_type,
1109 struct hammer_buffer **rec_bufferp,
1110 int32_t data_len, void **datap,
1111 struct hammer_buffer **data_bufferp, int *errorp)
1113 hammer_record_ondisk_t rec;
1114 hammer_off_t rec_offset;
1115 hammer_off_t data_offset;
1122 * Allocate the record
1124 rec_offset = hammer_blockmap_alloc(trans, HAMMER_ZONE_RECORD_INDEX,
1125 HAMMER_RECORD_SIZE, errorp);
1133 if (data_bufferp == NULL) {
1135 case HAMMER_RECTYPE_DATA:
1136 reclen = offsetof(struct hammer_data_record,
1139 case HAMMER_RECTYPE_DIRENTRY:
1140 reclen = offsetof(struct hammer_entry_record,
1144 panic("hammer_alloc_record: illegal "
1150 KKASSERT(reclen + data_len <= HAMMER_RECORD_SIZE);
1151 data_offset = rec_offset + reclen;
1152 } else if (data_len < HAMMER_BUFSIZE) {
1153 data_offset = hammer_blockmap_alloc(trans,
1154 HAMMER_ZONE_SMALL_DATA_INDEX,
1157 data_offset = hammer_blockmap_alloc(trans,
1158 HAMMER_ZONE_LARGE_DATA_INDEX,
1165 hammer_blockmap_free(trans, rec_offset, HAMMER_RECORD_SIZE);
1170 * Basic return values.
1172 *rec_offp = rec_offset;
1173 rec = hammer_bread(trans->hmp, rec_offset, errorp, rec_bufferp);
1174 hammer_modify_buffer(trans, *rec_bufferp, NULL, 0);
1175 bzero(rec, sizeof(*rec));
1176 KKASSERT(*errorp == 0);
1177 rec->base.data_off = data_offset;
1178 rec->base.data_len = data_len;
1182 *datap = hammer_bread(trans->hmp, data_offset, errorp,
1184 KKASSERT(*errorp == 0);
1185 hammer_modify_buffer(trans, *data_bufferp, NULL, 0);
1189 } else if (data_len) {
1190 KKASSERT(data_offset + data_len - rec_offset <=
1191 HAMMER_RECORD_SIZE);
1193 *datap = (void *)((char *)rec +
1194 (int32_t)(data_offset - rec_offset));
1197 KKASSERT(datap == NULL);
1199 KKASSERT(*errorp == 0);
1204 hammer_alloc_data(hammer_transaction_t trans, int32_t data_len,
1205 hammer_off_t *data_offsetp,
1206 struct hammer_buffer **data_bufferp, int *errorp)
1214 if (data_len < HAMMER_BUFSIZE) {
1215 *data_offsetp = hammer_blockmap_alloc(trans,
1216 HAMMER_ZONE_SMALL_DATA_INDEX,
1219 *data_offsetp = hammer_blockmap_alloc(trans,
1220 HAMMER_ZONE_LARGE_DATA_INDEX,
1226 if (*errorp == 0 && data_bufferp) {
1228 data = hammer_bread(trans->hmp, *data_offsetp, errorp,
1230 KKASSERT(*errorp == 0);
1231 hammer_modify_buffer(trans, *data_bufferp, NULL, 0);
1238 KKASSERT(*errorp == 0);
1244 * Sync dirty buffers to the media
1247 static int hammer_sync_scan1(struct mount *mp, struct vnode *vp, void *data);
1248 static int hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data);
1251 hammer_sync_hmp(hammer_mount_t hmp, int waitfor)
1253 struct hammer_sync_info info;
1256 info.waitfor = waitfor;
1258 vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_NOWAIT,
1259 hammer_sync_scan1, hammer_sync_scan2, &info);
1261 RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL,
1262 hammer_sync_volume, &info);
1267 hammer_sync_scan1(struct mount *mp, struct vnode *vp, void *data)
1269 struct hammer_inode *ip;
1272 if (vp->v_type == VNON || ip == NULL ||
1273 ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1274 RB_EMPTY(&vp->v_rbdirty_tree))) {
1281 hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data)
1283 struct hammer_sync_info *info = data;
1284 struct hammer_inode *ip;
1288 if (vp->v_type == VNON || vp->v_type == VBAD ||
1289 ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1290 RB_EMPTY(&vp->v_rbdirty_tree))) {
1293 error = VOP_FSYNC(vp, info->waitfor);
1295 info->error = error;
1300 hammer_sync_volume(hammer_volume_t volume, void *data)
1302 struct hammer_sync_info *info = data;
1304 hammer_ref(&volume->io.lock);
1305 RB_SCAN(hammer_buf_rb_tree, &volume->rb_bufs_root, NULL,
1306 hammer_sync_buffer, info);
1307 hammer_rel_volume(volume, 1);
1312 hammer_sync_buffer(hammer_buffer_t buffer, void *data __unused)
1314 hammer_ref(&buffer->io.lock);
1315 hammer_rel_buffer(buffer, 1);