2 * Copyright (c) 2007 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_ondisk.c,v 1.28 2008/02/08 08:30:59 dillon Exp $
37 * Manage HAMMER's on-disk structures. These routines are primarily
38 * responsible for interfacing with the kernel's I/O subsystem and for
39 * managing in-memory structures.
43 #include <sys/fcntl.h>
44 #include <sys/nlookup.h>
48 static void hammer_free_volume(hammer_volume_t volume);
49 static int hammer_load_volume(hammer_volume_t volume);
50 static int hammer_load_buffer(hammer_buffer_t buffer, int isnew);
51 static int hammer_load_node(hammer_node_t node);
52 static hammer_off_t hammer_advance_fifo(hammer_volume_t volume,
53 hammer_off_t off, int32_t bytes);
55 static hammer_off_t hammer_alloc_fifo(hammer_mount_t hmp, int32_t rec_len,
56 int32_t data_len, struct hammer_buffer **rec_bufferp,
57 u_int16_t hdr_type, int can_cross,
58 struct hammer_buffer **data2_bufferp, int *errorp);
61 * Red-Black tree support for various structures
64 hammer_ino_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2)
66 if (ip1->obj_id < ip2->obj_id)
68 if (ip1->obj_id > ip2->obj_id)
70 if (ip1->obj_asof < ip2->obj_asof)
72 if (ip1->obj_asof > ip2->obj_asof)
78 hammer_inode_info_cmp(hammer_inode_info_t info, hammer_inode_t ip)
80 if (info->obj_id < ip->obj_id)
82 if (info->obj_id > ip->obj_id)
84 if (info->obj_asof < ip->obj_asof)
86 if (info->obj_asof > ip->obj_asof)
92 hammer_vol_rb_compare(hammer_volume_t vol1, hammer_volume_t vol2)
94 if (vol1->vol_no < vol2->vol_no)
96 if (vol1->vol_no > vol2->vol_no)
102 hammer_buf_rb_compare(hammer_buffer_t buf1, hammer_buffer_t buf2)
104 if (buf1->buf_offset < buf2->buf_offset)
106 if (buf1->buf_offset > buf2->buf_offset)
112 hammer_nod_rb_compare(hammer_node_t node1, hammer_node_t node2)
114 if (node1->node_offset < node2->node_offset)
116 if (node1->node_offset > node2->node_offset)
122 * Note: The lookup function for hammer_ino_rb_tree winds up being named
123 * hammer_ino_rb_tree_RB_LOOKUP_INFO(root, info). The other lookup
124 * functions are normal, e.g. hammer_buf_rb_tree_RB_LOOKUP(root, buf_offset).
126 RB_GENERATE(hammer_ino_rb_tree, hammer_inode, rb_node, hammer_ino_rb_compare);
127 RB_GENERATE_XLOOKUP(hammer_ino_rb_tree, INFO, hammer_inode, rb_node,
128 hammer_inode_info_cmp, hammer_inode_info_t);
129 RB_GENERATE2(hammer_vol_rb_tree, hammer_volume, rb_node,
130 hammer_vol_rb_compare, int32_t, vol_no);
131 RB_GENERATE2(hammer_buf_rb_tree, hammer_buffer, rb_node,
132 hammer_buf_rb_compare, hammer_off_t, buf_offset);
133 RB_GENERATE2(hammer_nod_rb_tree, hammer_node, rb_node,
134 hammer_nod_rb_compare, hammer_off_t, node_offset);
136 /************************************************************************
138 ************************************************************************
140 * Load a HAMMER volume by name. Returns 0 on success or a positive error
141 * code on failure. Volumes must be loaded at mount time, get_volume() will
142 * not load a new volume.
144 * Calls made to hammer_load_volume() or single-threaded
147 hammer_install_volume(struct hammer_mount *hmp, const char *volname)
150 hammer_volume_t volume;
151 struct hammer_volume_ondisk *ondisk;
152 struct nlookupdata nd;
153 struct buf *bp = NULL;
158 ronly = ((mp->mnt_flag & MNT_RDONLY) ? 1 : 0);
161 * Allocate a volume structure
163 ++hammer_count_volumes;
164 volume = kmalloc(sizeof(*volume), M_HAMMER, M_WAITOK|M_ZERO);
165 volume->vol_name = kstrdup(volname, M_HAMMER);
167 hammer_io_init(&volume->io, HAMMER_STRUCTURE_VOLUME);
168 volume->io.offset = 0LL;
171 * Get the device vnode
173 error = nlookup_init(&nd, volume->vol_name, UIO_SYSSPACE, NLC_FOLLOW);
175 error = nlookup(&nd);
177 error = cache_vref(&nd.nl_nch, nd.nl_cred, &volume->devvp);
180 if (vn_isdisk(volume->devvp, &error)) {
181 error = vfs_mountedon(volume->devvp);
185 count_udev(volume->devvp->v_umajor, volume->devvp->v_uminor) > 0) {
189 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
190 error = vinvalbuf(volume->devvp, V_SAVE, 0, 0);
192 error = VOP_OPEN(volume->devvp,
193 (ronly ? FREAD : FREAD|FWRITE),
196 vn_unlock(volume->devvp);
199 hammer_free_volume(volume);
202 volume->devvp->v_rdev->si_mountpoint = mp;
205 * Extract the volume number from the volume header and do various
208 error = bread(volume->devvp, 0LL, HAMMER_BUFSIZE, &bp);
211 ondisk = (void *)bp->b_data;
212 if (ondisk->vol_signature != HAMMER_FSBUF_VOLUME) {
213 kprintf("hammer_mount: volume %s has an invalid header\n",
218 volume->vol_no = ondisk->vol_no;
219 volume->buffer_base = ondisk->vol_buf_beg;
220 volume->vol_flags = ondisk->vol_flags;
221 volume->nblocks = ondisk->vol_nblocks;
222 volume->maxbuf_off = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no,
223 ondisk->vol_buf_end - ondisk->vol_buf_beg);
224 RB_INIT(&volume->rb_bufs_root);
226 hmp->mp->mnt_stat.f_blocks += volume->nblocks;
228 if (RB_EMPTY(&hmp->rb_vols_root)) {
229 hmp->fsid = ondisk->vol_fsid;
230 } else if (bcmp(&hmp->fsid, &ondisk->vol_fsid, sizeof(uuid_t))) {
231 kprintf("hammer_mount: volume %s's fsid does not match "
232 "other volumes\n", volume->vol_name);
238 * Insert the volume structure into the red-black tree.
240 if (RB_INSERT(hammer_vol_rb_tree, &hmp->rb_vols_root, volume)) {
241 kprintf("hammer_mount: volume %s has a duplicate vol_no %d\n",
242 volume->vol_name, volume->vol_no);
247 * Set the root volume . HAMMER special cases rootvol the structure.
248 * We do not hold a ref because this would prevent related I/O
249 * from being flushed.
251 if (error == 0 && ondisk->vol_rootvol == ondisk->vol_no) {
252 hmp->rootvol = volume;
257 hmp->fsid_udev = dev2udev(vn_todev(volume->devvp));
263 /*vinvalbuf(volume->devvp, V_SAVE, 0, 0);*/
264 VOP_CLOSE(volume->devvp, ronly ? FREAD : FREAD|FWRITE);
265 hammer_free_volume(volume);
271 * Unload and free a HAMMER volume. Must return >= 0 to continue scan
272 * so returns -1 on failure.
275 hammer_unload_volume(hammer_volume_t volume, void *data __unused)
277 struct hammer_mount *hmp = volume->hmp;
278 int ronly = ((hmp->mp->mnt_flag & MNT_RDONLY) ? 1 : 0);
281 * Sync clusters, sync volume
284 hmp->mp->mnt_stat.f_blocks -= volume->nblocks;
287 * Clean up the root volume pointer, which is held unlocked in hmp.
289 if (hmp->rootvol == volume)
293 * Unload clusters and super-clusters. Unloading a super-cluster
294 * also unloads related clusters, but the filesystem may not be
295 * using super-clusters so unload clusters anyway.
297 RB_SCAN(hammer_buf_rb_tree, &volume->rb_bufs_root, NULL,
298 hammer_unload_buffer, NULL);
299 hammer_io_waitdep(&volume->io);
302 * Release our buffer and flush anything left in the buffer cache.
304 hammer_io_release(&volume->io, 2);
307 * There should be no references on the volume, no clusters, and
310 KKASSERT(volume->io.lock.refs == 0);
311 KKASSERT(RB_EMPTY(&volume->rb_bufs_root));
313 volume->ondisk = NULL;
316 vinvalbuf(volume->devvp, 0, 0, 0);
317 VOP_CLOSE(volume->devvp, FREAD);
319 vinvalbuf(volume->devvp, V_SAVE, 0, 0);
320 VOP_CLOSE(volume->devvp, FREAD|FWRITE);
325 * Destroy the structure
327 RB_REMOVE(hammer_vol_rb_tree, &hmp->rb_vols_root, volume);
328 hammer_free_volume(volume);
334 hammer_free_volume(hammer_volume_t volume)
336 if (volume->vol_name) {
337 kfree(volume->vol_name, M_HAMMER);
338 volume->vol_name = NULL;
341 if (vn_isdisk(volume->devvp, NULL) &&
342 volume->devvp->v_rdev &&
343 volume->devvp->v_rdev->si_mountpoint == volume->hmp->mp
345 volume->devvp->v_rdev->si_mountpoint = NULL;
347 vrele(volume->devvp);
348 volume->devvp = NULL;
350 --hammer_count_volumes;
351 kfree(volume, M_HAMMER);
355 * Get a HAMMER volume. The volume must already exist.
358 hammer_get_volume(struct hammer_mount *hmp, int32_t vol_no, int *errorp)
360 struct hammer_volume *volume;
363 * Locate the volume structure
365 volume = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, vol_no);
366 if (volume == NULL) {
370 hammer_ref(&volume->io.lock);
373 * Deal with on-disk info
375 if (volume->ondisk == NULL || volume->io.loading) {
376 *errorp = hammer_load_volume(volume);
378 hammer_rel_volume(volume, 1);
388 hammer_ref_volume(hammer_volume_t volume)
392 hammer_ref(&volume->io.lock);
395 * Deal with on-disk info
397 if (volume->ondisk == NULL || volume->io.loading) {
398 error = hammer_load_volume(volume);
400 hammer_rel_volume(volume, 1);
408 hammer_get_root_volume(struct hammer_mount *hmp, int *errorp)
410 hammer_volume_t volume;
412 volume = hmp->rootvol;
413 KKASSERT(volume != NULL);
414 hammer_ref(&volume->io.lock);
417 * Deal with on-disk info
419 if (volume->ondisk == NULL || volume->io.loading) {
420 *errorp = hammer_load_volume(volume);
422 hammer_rel_volume(volume, 1);
432 * Load a volume's on-disk information. The volume must be referenced and
433 * not locked. We temporarily acquire an exclusive lock to interlock
434 * against releases or multiple get's.
437 hammer_load_volume(hammer_volume_t volume)
439 struct hammer_volume_ondisk *ondisk;
442 hammer_lock_ex(&volume->io.lock);
443 KKASSERT(volume->io.loading == 0);
444 volume->io.loading = 1;
446 if (volume->ondisk == NULL) {
447 error = hammer_io_read(volume->devvp, &volume->io);
449 volume->io.loading = 0;
450 hammer_unlock(&volume->io.lock);
453 volume->ondisk = ondisk = (void *)volume->io.bp->b_data;
457 volume->io.loading = 0;
458 hammer_unlock(&volume->io.lock);
463 * Release a volume. Call hammer_io_release on the last reference. We have
464 * to acquire an exclusive lock to interlock against volume->ondisk tests
465 * in hammer_load_volume(), and hammer_io_release() also expects an exclusive
468 * Volumes are not unloaded from memory during normal operation.
471 hammer_rel_volume(hammer_volume_t volume, int flush)
473 if (volume->io.lock.refs == 1) {
474 hammer_lock_ex(&volume->io.lock);
475 if (volume->io.lock.refs == 1) {
476 volume->ondisk = NULL;
477 hammer_io_release(&volume->io, flush);
479 hammer_io_flush(&volume->io);
481 hammer_unlock(&volume->io.lock);
483 hammer_unref(&volume->io.lock);
486 /************************************************************************
488 ************************************************************************
490 * Manage buffers. Note that a buffer holds a reference to its associated
491 * cluster, and its cluster will hold a reference to the cluster's volume.
494 hammer_get_buffer(hammer_mount_t hmp, hammer_off_t buf_offset,
495 int isnew, int *errorp)
497 hammer_buffer_t buffer;
498 hammer_volume_t volume;
501 buf_offset &= ~HAMMER_BUFMASK64;
502 KKASSERT((buf_offset & HAMMER_OFF_ZONE_MASK) == HAMMER_ZONE_RAW_BUFFER);
503 vol_no = HAMMER_VOL_DECODE(buf_offset);
504 volume = hammer_get_volume(hmp, vol_no, errorp);
508 * NOTE: buf_offset and maxbuf_off are both full offset
511 KKASSERT(buf_offset < volume->maxbuf_off);
514 * Locate and lock the buffer structure, creating one if necessary.
517 buffer = RB_LOOKUP(hammer_buf_rb_tree, &volume->rb_bufs_root,
519 if (buffer == NULL) {
520 ++hammer_count_buffers;
521 buffer = kmalloc(sizeof(*buffer), M_HAMMER, M_WAITOK|M_ZERO);
522 buffer->buf_offset = buf_offset;
523 buffer->volume = volume;
524 hammer_io_init(&buffer->io, HAMMER_STRUCTURE_BUFFER);
525 buffer->io.offset = volume->ondisk->vol_buf_beg +
526 (buf_offset & HAMMER_OFF_SHORT_MASK);
527 TAILQ_INIT(&buffer->clist);
528 hammer_ref(&buffer->io.lock);
531 * Insert the buffer into the RB tree and handle late
534 if (RB_INSERT(hammer_buf_rb_tree, &volume->rb_bufs_root, buffer)) {
535 hammer_unref(&buffer->io.lock);
536 --hammer_count_buffers;
537 kfree(buffer, M_HAMMER);
540 hammer_ref(&volume->io.lock);
542 hammer_ref(&buffer->io.lock);
546 * Deal with on-disk info
548 if (buffer->ondisk == NULL || buffer->io.loading) {
549 *errorp = hammer_load_buffer(buffer, isnew);
551 hammer_rel_buffer(buffer, 1);
557 hammer_rel_volume(volume, 0);
562 hammer_load_buffer(hammer_buffer_t buffer, int isnew)
564 hammer_volume_t volume;
569 * Load the buffer's on-disk info
571 volume = buffer->volume;
572 hammer_lock_ex(&buffer->io.lock);
573 KKASSERT(buffer->io.loading == 0);
574 buffer->io.loading = 1;
576 if (buffer->ondisk == NULL) {
578 error = hammer_io_new(volume->devvp, &buffer->io);
580 error = hammer_io_read(volume->devvp, &buffer->io);
583 buffer->io.loading = 0;
584 hammer_unlock(&buffer->io.lock);
587 buffer->ondisk = ondisk = (void *)buffer->io.bp->b_data;
589 error = hammer_io_new(volume->devvp, &buffer->io);
593 if (error == 0 && isnew) {
594 hammer_modify_buffer(buffer, NULL, 0);
595 /* additional initialization goes here */
597 buffer->io.loading = 0;
598 hammer_unlock(&buffer->io.lock);
603 * NOTE: Called from RB_SCAN, must return >= 0 for scan to continue.
606 hammer_unload_buffer(hammer_buffer_t buffer, void *data __unused)
608 hammer_ref(&buffer->io.lock);
609 hammer_flush_buffer_nodes(buffer);
610 KKASSERT(buffer->io.lock.refs == 1);
611 hammer_rel_buffer(buffer, 2);
616 * Reference a buffer that is either already referenced or via a specially
617 * handled pointer (aka cursor->buffer).
620 hammer_ref_buffer(hammer_buffer_t buffer)
624 hammer_ref(&buffer->io.lock);
625 if (buffer->ondisk == NULL || buffer->io.loading) {
626 error = hammer_load_buffer(buffer, 0);
628 hammer_rel_buffer(buffer, 1);
630 * NOTE: buffer pointer can become stale after
641 * Release a buffer. We have to deal with several places where
642 * another thread can ref the buffer.
644 * Only destroy the structure itself if the related buffer cache buffer
645 * was disassociated from it. This ties the management of the structure
646 * to the buffer cache subsystem. buffer->ondisk determines whether the
647 * embedded io is referenced or not.
650 hammer_rel_buffer(hammer_buffer_t buffer, int flush)
652 hammer_volume_t volume;
654 if (buffer->io.lock.refs == 1) {
655 hammer_lock_ex(&buffer->io.lock);
656 if (buffer->io.lock.refs == 1) {
657 hammer_io_release(&buffer->io, flush);
659 if (buffer->io.bp == NULL &&
660 buffer->io.lock.refs == 1) {
661 hammer_flush_buffer_nodes(buffer);
662 KKASSERT(TAILQ_EMPTY(&buffer->clist));
663 volume = buffer->volume;
664 RB_REMOVE(hammer_buf_rb_tree,
665 &volume->rb_bufs_root, buffer);
666 buffer->volume = NULL; /* sanity */
667 --hammer_count_buffers;
668 kfree(buffer, M_HAMMER);
669 hammer_rel_volume(volume, 0);
673 hammer_io_flush(&buffer->io);
675 hammer_unlock(&buffer->io.lock);
677 hammer_unref(&buffer->io.lock);
681 * Access the filesystem buffer containing the specified hammer offset.
682 * buf_offset is a conglomeration of the volume number and vol_buf_beg
683 * relative buffer offset. It must also have bit 55 set to be valid.
684 * (see hammer_off_t in hammer_disk.h).
686 * Any prior buffer in *bufferp will be released and replaced by the
690 hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset, int *errorp,
691 struct hammer_buffer **bufferp)
693 hammer_buffer_t buffer;
694 int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK;
696 buf_offset &= ~HAMMER_BUFMASK64;
699 if (buffer == NULL || buffer->buf_offset != buf_offset) {
701 hammer_rel_buffer(buffer, 0);
702 buffer = hammer_get_buffer(hmp, buf_offset, 0, errorp);
709 * Return a pointer to the buffer data.
714 return((char *)buffer->ondisk + xoff);
718 * Access the filesystem buffer containing the specified hammer offset.
719 * No disk read operation occurs. The result buffer may contain garbage.
721 * Any prior buffer in *bufferp will be released and replaced by the
725 hammer_bnew(hammer_mount_t hmp, hammer_off_t buf_offset, int *errorp,
726 struct hammer_buffer **bufferp)
728 hammer_buffer_t buffer;
729 int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK;
731 buf_offset &= ~HAMMER_BUFMASK64;
734 if (buffer == NULL || buffer->buf_offset != buf_offset) {
736 hammer_rel_buffer(buffer, 0);
737 buffer = hammer_get_buffer(hmp, buf_offset, 1, errorp);
744 * Return a pointer to the buffer data.
749 return((char *)buffer->ondisk + xoff);
752 /************************************************************************
754 ************************************************************************
756 * Manage B-Tree nodes. B-Tree nodes represent the primary indexing
757 * method used by the HAMMER filesystem.
759 * Unlike other HAMMER structures, a hammer_node can be PASSIVELY
760 * associated with its buffer, and will only referenced the buffer while
761 * the node itself is referenced.
763 * A hammer_node can also be passively associated with other HAMMER
764 * structures, such as inodes, while retaining 0 references. These
765 * associations can be cleared backwards using a pointer-to-pointer in
768 * This allows the HAMMER implementation to cache hammer_nodes long-term
769 * and short-cut a great deal of the infrastructure's complexity. In
770 * most cases a cached node can be reacquired without having to dip into
771 * either the buffer or cluster management code.
773 * The caller must pass a referenced cluster on call and will retain
774 * ownership of the reference on return. The node will acquire its own
775 * additional references, if necessary.
778 hammer_get_node(hammer_mount_t hmp, hammer_off_t node_offset, int *errorp)
780 hammer_volume_t volume;
784 KKASSERT((node_offset & HAMMER_OFF_ZONE_MASK) ==
785 HAMMER_ZONE_RAW_BUFFER);
786 vol_no = HAMMER_VOL_DECODE(node_offset);
787 volume = hammer_get_volume(hmp, vol_no, errorp);
792 * Locate the structure, allocating one if necessary.
795 node = RB_LOOKUP(hammer_nod_rb_tree, &volume->rb_nods_root,
798 ++hammer_count_nodes;
799 node = kmalloc(sizeof(*node), M_HAMMER, M_WAITOK|M_ZERO);
800 node->node_offset = node_offset;
801 node->volume = volume; /* not directly referenced */
802 if (RB_INSERT(hammer_nod_rb_tree, &volume->rb_nods_root,
804 --hammer_count_nodes;
805 kfree(node, M_HAMMER);
809 hammer_ref(&node->lock);
810 *errorp = hammer_load_node(node);
812 hammer_rel_node(node);
815 hammer_rel_volume(volume, 0);
820 * Reference an already-referenced node.
823 hammer_ref_node(hammer_node_t node)
827 KKASSERT(node->lock.refs > 0);
828 hammer_ref(&node->lock);
829 if ((error = hammer_load_node(node)) != 0)
830 hammer_rel_node(node);
835 * Load a node's on-disk data reference.
838 hammer_load_node(hammer_node_t node)
840 hammer_buffer_t buffer;
846 hammer_lock_ex(&node->lock);
847 if (node->ondisk == NULL) {
849 * This is a little confusing but the jist is that
850 * node->buffer determines whether the node is on
851 * the buffer's clist and node->ondisk determines
852 * whether the buffer is referenced.
854 if ((buffer = node->buffer) != NULL) {
855 error = hammer_ref_buffer(buffer);
857 buffer = hammer_get_buffer(node->volume->hmp,
858 node->node_offset, 0,
861 KKASSERT(error == 0);
862 TAILQ_INSERT_TAIL(&buffer->clist,
864 node->buffer = buffer;
868 node->ondisk = (void *)((char *)buffer->ondisk +
869 (node->node_offset & HAMMER_BUFMASK));
872 hammer_unlock(&node->lock);
877 * Safely reference a node, interlock against flushes via the IO subsystem.
880 hammer_ref_node_safe(struct hammer_mount *hmp, struct hammer_node **cache,
885 if ((node = *cache) != NULL)
886 hammer_ref(&node->lock);
888 *errorp = hammer_load_node(node);
890 hammer_rel_node(node);
900 * Release a hammer_node. On the last release the node dereferences
901 * its underlying buffer and may or may not be destroyed.
904 hammer_rel_node(hammer_node_t node)
906 hammer_buffer_t buffer;
909 * If this isn't the last ref just decrement the ref count and
912 if (node->lock.refs > 1) {
913 hammer_unref(&node->lock);
918 * If there is no ondisk info or no buffer the node failed to load,
919 * remove the last reference and destroy the node.
921 if (node->ondisk == NULL) {
922 hammer_unref(&node->lock);
923 hammer_flush_node(node);
924 /* node is stale now */
929 * Do final cleanups and then either destroy the node and leave it
930 * passively cached. The buffer reference is removed regardless.
932 buffer = node->buffer;
935 if ((node->flags & (HAMMER_NODE_DELETED|HAMMER_NODE_FLUSH)) == 0) {
936 hammer_unref(&node->lock);
937 hammer_rel_buffer(buffer, 0);
942 * Destroy the node if it has been marked for deletion. We mark
943 * it as being free. Note that the disk space is physically
944 * freed when the fifo cycles back through the node.
946 if (node->flags & HAMMER_NODE_DELETED)
947 hammer_free_fifo(node->volume->hmp, node->node_offset);
950 * Destroy the node. Record pertainant data because the node
951 * becomes stale the instant we flush it.
953 hammer_unref(&node->lock);
954 hammer_flush_node(node);
956 hammer_rel_buffer(buffer, 0);
960 * Passively cache a referenced hammer_node in *cache. The caller may
961 * release the node on return.
964 hammer_cache_node(hammer_node_t node, struct hammer_node **cache)
969 * If the node is being deleted, don't cache it!
971 if (node->flags & HAMMER_NODE_DELETED)
975 * Cache the node. If we previously cached a different node we
976 * have to give HAMMER a chance to destroy it.
979 if (node->cache1 != cache) {
980 if (node->cache2 != cache) {
981 if ((old = *cache) != NULL) {
982 KKASSERT(node->lock.refs != 0);
983 hammer_uncache_node(cache);
987 *node->cache2 = NULL;
988 node->cache2 = node->cache1;
989 node->cache1 = cache;
992 struct hammer_node **tmp;
994 node->cache1 = node->cache2;
1001 hammer_uncache_node(struct hammer_node **cache)
1005 if ((node = *cache) != NULL) {
1007 if (node->cache1 == cache) {
1008 node->cache1 = node->cache2;
1009 node->cache2 = NULL;
1010 } else if (node->cache2 == cache) {
1011 node->cache2 = NULL;
1013 panic("hammer_uncache_node: missing cache linkage");
1015 if (node->cache1 == NULL && node->cache2 == NULL)
1016 hammer_flush_node(node);
1021 * Remove a node's cache references and destroy the node if it has no
1022 * other references or backing store.
1025 hammer_flush_node(hammer_node_t node)
1027 hammer_buffer_t buffer;
1030 *node->cache1 = NULL;
1032 *node->cache2 = NULL;
1033 if (node->lock.refs == 0 && node->ondisk == NULL) {
1034 RB_REMOVE(hammer_nod_rb_tree, &node->volume->rb_nods_root,
1036 if ((buffer = node->buffer) != NULL) {
1037 node->buffer = NULL;
1038 TAILQ_REMOVE(&buffer->clist, node, entry);
1039 /* buffer is unreferenced because ondisk is NULL */
1041 --hammer_count_nodes;
1042 kfree(node, M_HAMMER);
1047 * Flush passively cached B-Tree nodes associated with this buffer.
1048 * This is only called when the buffer is about to be destroyed, so
1049 * none of the nodes should have any references.
1052 hammer_flush_buffer_nodes(hammer_buffer_t buffer)
1056 while ((node = TAILQ_FIRST(&buffer->clist)) != NULL) {
1057 KKASSERT(node->lock.refs == 0 && node->ondisk == NULL);
1058 hammer_ref(&node->lock);
1059 node->flags |= HAMMER_NODE_FLUSH;
1060 hammer_rel_node(node);
1065 /************************************************************************
1067 ************************************************************************/
1070 * Allocate a B-Tree node.
1073 hammer_alloc_btree(hammer_mount_t hmp, int *errorp)
1075 hammer_buffer_t buffer = NULL;
1076 hammer_node_t node = NULL;
1077 hammer_off_t node_offset;
1079 node_offset = hammer_alloc_fifo(hmp, sizeof(struct hammer_node_ondisk),
1080 0, &buffer, HAMMER_HEAD_TYPE_BTREE,
1084 node = hammer_get_node(hmp, node_offset, errorp);
1086 hammer_rel_buffer(buffer, 0);
1091 * The returned buffers are already appropriately marked as being modified.
1092 * If the caller marks them again unnecessary undo records may be generated.
1094 * The core record (rec_len) cannot cross a buffer boundary. The record + data
1095 * is only allowed to cross a buffer boundary for HAMMER_RECTYPE_DATA
1098 hammer_alloc_record(hammer_mount_t hmp,
1099 hammer_off_t *rec_offp, u_int8_t rec_type,
1100 int32_t rec_len, struct hammer_buffer **rec_bufferp,
1101 hammer_off_t *data_offp, int32_t data_len,
1102 void **data1p, void **data2p, int32_t *data2_index,
1103 struct hammer_buffer **data2_bufferp,
1106 int32_t aligned_rec_len, n;
1107 hammer_off_t rec_offset;
1108 hammer_record_ondisk_t rec;
1111 aligned_rec_len = (rec_len + HAMMER_HEAD_ALIGN_MASK) &
1112 ~HAMMER_HEAD_ALIGN_MASK;
1113 can_cross = (rec_type == HAMMER_RECTYPE_DATA);
1115 rec_offset = hammer_alloc_fifo(hmp, aligned_rec_len, data_len,
1116 rec_bufferp, HAMMER_HEAD_TYPE_RECORD,
1117 can_cross, data2_bufferp, errorp);
1122 * Basic return values.
1124 *rec_offp = rec_offset;
1126 *data_offp = rec_offset + aligned_rec_len;
1127 rec = (void *)((char *)(*rec_bufferp)->ondisk +
1128 ((int32_t)rec_offset & HAMMER_BUFMASK));
1130 rec->base.data_off = rec_offset + aligned_rec_len;
1131 rec->base.data_len = data_len;
1133 *data1p = (void *)((char *)rec + aligned_rec_len);
1135 n = ((int32_t)rec_offset & HAMMER_BUFMASK) +
1136 aligned_rec_len + data_len;
1137 if (n > HAMMER_BUFSIZE) {
1138 *data2_index = data_len - (n - HAMMER_BUFSIZE);
1139 KKASSERT(can_cross != 0);
1140 *data2p = (*data2_bufferp)->ondisk;
1142 *data2_index = data_len;
1146 KKASSERT(data2p == NULL);
1152 * Generate an undo fifo entry and return the buffer to the caller (XXX).
1153 * The caller must create a dependancy to ensure that the undo record is
1154 * flushed before the modified buffer is flushed.
1157 hammer_generate_undo(hammer_mount_t hmp, hammer_off_t off, void *base, int len)
1159 hammer_off_t rec_offset;
1160 hammer_fifo_undo_t undo;
1161 hammer_buffer_t buffer = NULL;
1164 rec_offset = hammer_alloc_fifo(hmp, sizeof(*undo), len,
1165 &buffer, HAMMER_HEAD_TYPE_UNDO,
1168 undo = (void *)((char *)buffer->ondisk +
1169 ((int32_t)rec_offset & HAMMER_BUFMASK));
1170 undo->undo_offset = off;
1171 bcopy(base, undo + 1, len);
1174 hammer_rel_buffer(buffer, 0);
1179 * Allocate space from the FIFO. The first rec_len bytes will be zero'd.
1180 * The entire space is marked modified (the caller should not remark it as
1181 * that will cause unnecessary undo records to be added).
1185 hammer_alloc_fifo(hammer_mount_t hmp, int32_t rec_len, int32_t data_len,
1186 struct hammer_buffer **rec_bufferp, u_int16_t hdr_type,
1188 struct hammer_buffer **data2_bufferp, int *errorp)
1190 hammer_volume_t root_volume;
1191 hammer_volume_t end_volume;
1192 hammer_volume_ondisk_t ondisk;
1193 hammer_fifo_head_t head;
1194 hammer_off_t end_off = 0;
1195 hammer_off_t tmp_off = 0;
1199 int32_t aligned_bytes;
1202 aligned_bytes = (rec_len + data_len + HAMMER_HEAD_ALIGN_MASK) &
1203 ~HAMMER_HEAD_ALIGN_MASK;
1205 root_volume = hammer_get_root_volume(hmp, errorp);
1206 while (root_volume) {
1207 hammer_modify_volume(root_volume, NULL, 0);
1208 ondisk = root_volume->ondisk;
1210 end_off = ondisk->vol0_fifo_end;
1211 end_vol_no = HAMMER_VOL_DECODE(end_off);
1213 end_volume = hammer_get_volume(hmp, end_vol_no, errorp);
1218 * Check to see if we ran out of space. Include some extra
1221 * vol0_fifo_end cannot be advanced into the same buffer
1222 * that vol0_fifo_beg resides in. This allows us to
1223 * instantiate a new buffer without reading it in.
1227 tmp_off = ondisk->vol0_fifo_beg & ~HAMMER_BUFMASK64;
1228 tmp_vol_no = HAMMER_VOL_DECODE(tmp_off);
1229 if ((tmp_off & HAMMER_OFF_SHORT_MASK) == 0) {
1230 if (end_vol_no + 1 == tmp_vol_no) {
1231 tmp_vol_no = end_vol_no;
1232 tmp_off = end_volume->maxbuf_off;
1233 } else if (end_vol_no + 1 == hmp->nvolumes &&
1235 tmp_vol_no = end_vol_no;
1236 tmp_off = end_volume->maxbuf_off;
1239 hammer_rel_volume(end_volume, 0);
1242 * XXX dummy head at end of fifo
1244 if (end_vol_no == tmp_vol_no &&
1245 end_off < tmp_off &&
1246 end_off + aligned_bytes + sizeof(*head) >= tmp_off) {
1251 if ((int32_t)end_off & HAMMER_BUFMASK)
1252 head = hammer_bread(hmp, end_off, errorp, rec_bufferp);
1254 head = hammer_bnew(hmp, end_off, errorp, rec_bufferp);
1259 * Load the buffer, retry if someone else squeeked in
1260 * while we were blocked.
1263 if (ondisk->vol0_fifo_end != end_off)
1267 * Ok, we're gonna do something. Modify the buffer
1269 hammer_modify_buffer(*rec_bufferp, NULL, 0);
1270 if (ondisk->vol0_fifo_end != end_off)
1272 xoff = (int32_t)end_off & HAMMER_BUFMASK;
1275 * The non-data portion of the fifo record cannot cross
1276 * a buffer boundary.
1278 * The entire record cannot cross a buffer boundary if
1281 * It is illegal for a record to cross a volume boundary.
1283 * It is illegal for a record to cross a recovery boundary
1284 * (this is so recovery code is guaranteed a record rather
1285 * then data at certain points).
1287 * Add a pad record and loop if it does.
1290 if (xoff + rec_len > HAMMER_BUFSIZE)
1292 if (can_cross == 0) {
1293 if (xoff + aligned_bytes > HAMMER_BUFSIZE)
1296 if (xoff + aligned_bytes > HAMMER_BUFSIZE &&
1297 (end_off + aligned_bytes) >=
1298 (*rec_bufferp)->volume->maxbuf_off) {
1301 if ((end_off ^ (end_off + aligned_bytes)) &
1302 HAMMER_OFF_SHORT_REC_MASK) {
1307 must_pad = HAMMER_BUFSIZE - xoff;
1308 head->hdr_signature = HAMMER_HEAD_SIGNATURE;
1309 head->hdr_type = HAMMER_HEAD_TYPE_PAD;
1310 head->hdr_fwd_link = must_pad;
1311 head->hdr_seq = 0; /* XXX seq */
1312 KKASSERT((must_pad & 7) == 0);
1313 ondisk->vol0_fifo_end =
1314 hammer_advance_fifo((*rec_bufferp)->volume,
1320 if (xoff + aligned_bytes > HAMMER_BUFSIZE) {
1321 KKASSERT(xoff + aligned_bytes <= HAMMER_BUFSIZE * 2);
1322 hammer_bnew(hmp, end_off + (HAMMER_BUFSIZE - xoff),
1323 errorp, data2_bufferp);
1324 hammer_modify_buffer(*data2_bufferp, NULL, 0);
1329 head->hdr_signature = HAMMER_HEAD_SIGNATURE;
1330 head->hdr_type = hdr_type;
1331 head->hdr_fwd_link = aligned_bytes / 64;
1332 head->hdr_rev_link = -1; /* XXX */
1334 head->hdr_seq = 0; /* XXX */
1335 ondisk->vol0_fifo_end =
1336 hammer_advance_fifo((*rec_bufferp)->volume,
1337 end_off, aligned_bytes);
1339 hammer_rel_volume(root_volume, 0);
1348 * Mark a fifo record as having been freed. XXX needs undo.
1351 hammer_free_fifo(hammer_mount_t hmp, hammer_off_t fifo_offset)
1353 hammer_buffer_t buffer = NULL;
1354 hammer_fifo_head_t head;
1357 head = hammer_bread(hmp, fifo_offset, &error, &buffer);
1359 hammer_modify_buffer(buffer, &head->hdr_type,
1360 sizeof(head->hdr_type));
1361 head->hdr_type |= HAMMER_HEAD_TYPEF_FREED;
1364 hammer_rel_buffer(buffer, 0);
1368 * Attempt to rewind the FIFO
1370 * This routine is allowed to do nothing.
1373 hammer_unwind_fifo(hammer_mount_t hmp, hammer_off_t rec_offset)
1378 * Advance the FIFO a certain number of bytes.
1382 hammer_advance_fifo(hammer_volume_t volume, hammer_off_t off, int32_t bytes)
1387 KKASSERT(off <= volume->maxbuf_off);
1388 KKASSERT((off & HAMMER_OFF_ZONE_MASK) == HAMMER_ZONE_RAW_BUFFER);
1389 if (off == volume->maxbuf_off) {
1390 vol_no = volume->vol_no + 1;
1391 if (vol_no == volume->hmp->nvolumes)
1393 off = HAMMER_ENCODE_RAW_BUFFER(vol_no, 0);
1399 * Sync dirty buffers to the media
1402 static int hammer_sync_scan1(struct mount *mp, struct vnode *vp, void *data);
1403 static int hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data);
1406 hammer_sync_hmp(hammer_mount_t hmp, int waitfor)
1408 struct hammer_sync_info info;
1411 info.waitfor = waitfor;
1413 vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_NOWAIT,
1414 hammer_sync_scan1, hammer_sync_scan2, &info);
1416 RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL,
1417 hammer_sync_volume, &info);
1422 hammer_sync_scan1(struct mount *mp, struct vnode *vp, void *data)
1424 struct hammer_inode *ip;
1427 if (vp->v_type == VNON || ip == NULL ||
1428 ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1429 RB_EMPTY(&vp->v_rbdirty_tree))) {
1436 hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data)
1438 struct hammer_sync_info *info = data;
1439 struct hammer_inode *ip;
1443 if (vp->v_type == VNON || vp->v_type == VBAD ||
1444 ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1445 RB_EMPTY(&vp->v_rbdirty_tree))) {
1448 error = VOP_FSYNC(vp, info->waitfor);
1450 info->error = error;
1455 hammer_sync_volume(hammer_volume_t volume, void *data)
1457 struct hammer_sync_info *info = data;
1459 hammer_ref(&volume->io.lock);
1460 RB_SCAN(hammer_buf_rb_tree, &volume->rb_bufs_root, NULL,
1461 hammer_sync_buffer, info);
1462 hammer_rel_volume(volume, 1);
1467 hammer_sync_buffer(hammer_buffer_t buffer, void *data __unused)
1469 hammer_ref(&buffer->io.lock);
1470 hammer_rel_buffer(buffer, 1);
1475 * Generic buffer initialization. Initialize the A-list into an all-allocated
1476 * state with the free block limit properly set.
1478 * Note that alloc_new_buffer() will free the appropriate block range via
1479 * the appropriate cluster alist, so the free count is properly propogated.
1482 hammer_init_fifo(hammer_fifo_head_t head, u_int16_t type)
1484 head->hdr_signature = HAMMER_HEAD_SIGNATURE;
1485 head->hdr_type = type;
1486 head->hdr_rev_link = 0;
1487 head->hdr_fwd_link = 0;