2 * Copyright (c) 2007 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_ondisk.c,v 1.32 2008/02/24 20:08:50 dillon Exp $
37 * Manage HAMMER's on-disk structures. These routines are primarily
38 * responsible for interfacing with the kernel's I/O subsystem and for
39 * managing in-memory structures.
43 #include <sys/fcntl.h>
44 #include <sys/nlookup.h>
48 static void hammer_free_volume(hammer_volume_t volume);
49 static int hammer_load_volume(hammer_volume_t volume);
50 static int hammer_load_buffer(hammer_buffer_t buffer, int isnew);
51 static int hammer_load_node(hammer_node_t node);
53 static hammer_off_t hammer_advance_fifo(hammer_volume_t volume,
54 hammer_off_t off, int32_t bytes);
56 static hammer_off_t hammer_alloc_fifo(hammer_mount_t hmp, int32_t rec_len,
57 int32_t data_len, struct hammer_buffer **rec_bufferp,
58 u_int16_t hdr_type, int can_cross,
59 struct hammer_buffer **data2_bufferp, int *errorp);
63 * Red-Black tree support for various structures
66 hammer_ino_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2)
68 if (ip1->obj_id < ip2->obj_id)
70 if (ip1->obj_id > ip2->obj_id)
72 if (ip1->obj_asof < ip2->obj_asof)
74 if (ip1->obj_asof > ip2->obj_asof)
80 hammer_inode_info_cmp(hammer_inode_info_t info, hammer_inode_t ip)
82 if (info->obj_id < ip->obj_id)
84 if (info->obj_id > ip->obj_id)
86 if (info->obj_asof < ip->obj_asof)
88 if (info->obj_asof > ip->obj_asof)
94 hammer_vol_rb_compare(hammer_volume_t vol1, hammer_volume_t vol2)
96 if (vol1->vol_no < vol2->vol_no)
98 if (vol1->vol_no > vol2->vol_no)
104 hammer_buf_rb_compare(hammer_buffer_t buf1, hammer_buffer_t buf2)
106 if (buf1->zone2_offset < buf2->zone2_offset)
108 if (buf1->zone2_offset > buf2->zone2_offset)
114 hammer_nod_rb_compare(hammer_node_t node1, hammer_node_t node2)
116 if (node1->node_offset < node2->node_offset)
118 if (node1->node_offset > node2->node_offset)
124 * Note: The lookup function for hammer_ino_rb_tree winds up being named
125 * hammer_ino_rb_tree_RB_LOOKUP_INFO(root, info). The other lookup
126 * functions are normal, e.g. hammer_buf_rb_tree_RB_LOOKUP(root, zone2_offset).
128 RB_GENERATE(hammer_ino_rb_tree, hammer_inode, rb_node, hammer_ino_rb_compare);
129 RB_GENERATE_XLOOKUP(hammer_ino_rb_tree, INFO, hammer_inode, rb_node,
130 hammer_inode_info_cmp, hammer_inode_info_t);
131 RB_GENERATE2(hammer_vol_rb_tree, hammer_volume, rb_node,
132 hammer_vol_rb_compare, int32_t, vol_no);
133 RB_GENERATE2(hammer_buf_rb_tree, hammer_buffer, rb_node,
134 hammer_buf_rb_compare, hammer_off_t, zone2_offset);
135 RB_GENERATE2(hammer_nod_rb_tree, hammer_node, rb_node,
136 hammer_nod_rb_compare, hammer_off_t, node_offset);
138 /************************************************************************
140 ************************************************************************
142 * Load a HAMMER volume by name. Returns 0 on success or a positive error
143 * code on failure. Volumes must be loaded at mount time, get_volume() will
144 * not load a new volume.
146 * Calls made to hammer_load_volume() or single-threaded
149 hammer_install_volume(struct hammer_mount *hmp, const char *volname)
152 hammer_volume_t volume;
153 struct hammer_volume_ondisk *ondisk;
154 struct nlookupdata nd;
155 struct buf *bp = NULL;
161 ronly = ((mp->mnt_flag & MNT_RDONLY) ? 1 : 0);
164 * Allocate a volume structure
166 ++hammer_count_volumes;
167 volume = kmalloc(sizeof(*volume), M_HAMMER, M_WAITOK|M_ZERO);
168 volume->vol_name = kstrdup(volname, M_HAMMER);
170 hammer_io_init(&volume->io, HAMMER_STRUCTURE_VOLUME);
171 volume->io.offset = 0LL;
174 * Get the device vnode
176 error = nlookup_init(&nd, volume->vol_name, UIO_SYSSPACE, NLC_FOLLOW);
178 error = nlookup(&nd);
180 error = cache_vref(&nd.nl_nch, nd.nl_cred, &volume->devvp);
183 if (vn_isdisk(volume->devvp, &error)) {
184 error = vfs_mountedon(volume->devvp);
188 count_udev(volume->devvp->v_umajor, volume->devvp->v_uminor) > 0) {
192 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
193 error = vinvalbuf(volume->devvp, V_SAVE, 0, 0);
195 error = VOP_OPEN(volume->devvp,
196 (ronly ? FREAD : FREAD|FWRITE),
199 vn_unlock(volume->devvp);
202 hammer_free_volume(volume);
205 volume->devvp->v_rdev->si_mountpoint = mp;
209 * Extract the volume number from the volume header and do various
212 error = bread(volume->devvp, 0LL, HAMMER_BUFSIZE, &bp);
215 ondisk = (void *)bp->b_data;
216 if (ondisk->vol_signature != HAMMER_FSBUF_VOLUME) {
217 kprintf("hammer_mount: volume %s has an invalid header\n",
222 volume->vol_no = ondisk->vol_no;
223 volume->buffer_base = ondisk->vol_buf_beg;
224 volume->vol_flags = ondisk->vol_flags;
225 volume->nblocks = ondisk->vol_nblocks;
226 volume->maxbuf_off = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no,
227 ondisk->vol_buf_end - ondisk->vol_buf_beg);
228 RB_INIT(&volume->rb_bufs_root);
230 hmp->mp->mnt_stat.f_blocks += volume->nblocks;
232 if (RB_EMPTY(&hmp->rb_vols_root)) {
233 hmp->fsid = ondisk->vol_fsid;
234 } else if (bcmp(&hmp->fsid, &ondisk->vol_fsid, sizeof(uuid_t))) {
235 kprintf("hammer_mount: volume %s's fsid does not match "
236 "other volumes\n", volume->vol_name);
242 * Insert the volume structure into the red-black tree.
244 if (RB_INSERT(hammer_vol_rb_tree, &hmp->rb_vols_root, volume)) {
245 kprintf("hammer_mount: volume %s has a duplicate vol_no %d\n",
246 volume->vol_name, volume->vol_no);
251 * Set the root volume . HAMMER special cases rootvol the structure.
252 * We do not hold a ref because this would prevent related I/O
253 * from being flushed.
255 if (error == 0 && ondisk->vol_rootvol == ondisk->vol_no) {
256 hmp->rootvol = volume;
261 hmp->fsid_udev = dev2udev(vn_todev(volume->devvp));
267 /*vinvalbuf(volume->devvp, V_SAVE, 0, 0);*/
269 volume->devvp->v_rdev->si_mountpoint = NULL;
270 VOP_CLOSE(volume->devvp, ronly ? FREAD : FREAD|FWRITE);
271 hammer_free_volume(volume);
277 * Unload and free a HAMMER volume. Must return >= 0 to continue scan
278 * so returns -1 on failure.
281 hammer_unload_volume(hammer_volume_t volume, void *data __unused)
283 struct hammer_mount *hmp = volume->hmp;
284 int ronly = ((hmp->mp->mnt_flag & MNT_RDONLY) ? 1 : 0);
287 * Sync clusters, sync volume
290 hmp->mp->mnt_stat.f_blocks -= volume->nblocks;
293 * Clean up the root volume pointer, which is held unlocked in hmp.
295 if (hmp->rootvol == volume)
299 * Unload clusters and super-clusters. Unloading a super-cluster
300 * also unloads related clusters, but the filesystem may not be
301 * using super-clusters so unload clusters anyway.
303 RB_SCAN(hammer_buf_rb_tree, &volume->rb_bufs_root, NULL,
304 hammer_unload_buffer, NULL);
305 hammer_io_waitdep(&volume->io);
308 * Release our buffer and flush anything left in the buffer cache.
310 hammer_io_release(&volume->io, 2);
313 * There should be no references on the volume, no clusters, and
316 KKASSERT(volume->io.lock.refs == 0);
317 KKASSERT(RB_EMPTY(&volume->rb_bufs_root));
319 volume->ondisk = NULL;
321 if (volume->devvp->v_rdev &&
322 volume->devvp->v_rdev->si_mountpoint == hmp->mp
324 volume->devvp->v_rdev->si_mountpoint = NULL;
327 vinvalbuf(volume->devvp, 0, 0, 0);
328 VOP_CLOSE(volume->devvp, FREAD);
330 vinvalbuf(volume->devvp, V_SAVE, 0, 0);
331 VOP_CLOSE(volume->devvp, FREAD|FWRITE);
336 * Destroy the structure
338 RB_REMOVE(hammer_vol_rb_tree, &hmp->rb_vols_root, volume);
339 hammer_free_volume(volume);
345 hammer_free_volume(hammer_volume_t volume)
347 if (volume->vol_name) {
348 kfree(volume->vol_name, M_HAMMER);
349 volume->vol_name = NULL;
352 vrele(volume->devvp);
353 volume->devvp = NULL;
355 --hammer_count_volumes;
356 kfree(volume, M_HAMMER);
360 * Get a HAMMER volume. The volume must already exist.
363 hammer_get_volume(struct hammer_mount *hmp, int32_t vol_no, int *errorp)
365 struct hammer_volume *volume;
368 * Locate the volume structure
370 volume = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, vol_no);
371 if (volume == NULL) {
375 hammer_ref(&volume->io.lock);
378 * Deal with on-disk info
380 if (volume->ondisk == NULL || volume->io.loading) {
381 *errorp = hammer_load_volume(volume);
383 hammer_rel_volume(volume, 1);
393 hammer_ref_volume(hammer_volume_t volume)
397 hammer_ref(&volume->io.lock);
400 * Deal with on-disk info
402 if (volume->ondisk == NULL || volume->io.loading) {
403 error = hammer_load_volume(volume);
405 hammer_rel_volume(volume, 1);
413 hammer_get_root_volume(struct hammer_mount *hmp, int *errorp)
415 hammer_volume_t volume;
417 volume = hmp->rootvol;
418 KKASSERT(volume != NULL);
419 hammer_ref(&volume->io.lock);
422 * Deal with on-disk info
424 if (volume->ondisk == NULL || volume->io.loading) {
425 *errorp = hammer_load_volume(volume);
427 hammer_rel_volume(volume, 1);
437 * Load a volume's on-disk information. The volume must be referenced and
438 * not locked. We temporarily acquire an exclusive lock to interlock
439 * against releases or multiple get's.
442 hammer_load_volume(hammer_volume_t volume)
444 struct hammer_volume_ondisk *ondisk;
447 hammer_lock_ex(&volume->io.lock);
448 KKASSERT(volume->io.loading == 0);
449 volume->io.loading = 1;
451 if (volume->ondisk == NULL) {
452 error = hammer_io_read(volume->devvp, &volume->io);
454 volume->io.loading = 0;
455 hammer_unlock(&volume->io.lock);
458 volume->ondisk = ondisk = (void *)volume->io.bp->b_data;
462 volume->io.loading = 0;
463 hammer_unlock(&volume->io.lock);
468 * Release a volume. Call hammer_io_release on the last reference. We have
469 * to acquire an exclusive lock to interlock against volume->ondisk tests
470 * in hammer_load_volume(), and hammer_io_release() also expects an exclusive
473 * Volumes are not unloaded from memory during normal operation.
476 hammer_rel_volume(hammer_volume_t volume, int flush)
478 if (volume->io.lock.refs == 1) {
479 hammer_lock_ex(&volume->io.lock);
480 if (volume->io.lock.refs == 1) {
481 volume->ondisk = NULL;
482 hammer_io_release(&volume->io, flush);
484 hammer_io_flush(&volume->io);
486 hammer_unlock(&volume->io.lock);
488 hammer_unref(&volume->io.lock);
491 /************************************************************************
493 ************************************************************************
495 * Manage buffers. Currently all blockmap-backed zones are translated
496 * to zone-2 buffer offsets.
499 hammer_get_buffer(hammer_mount_t hmp, hammer_off_t buf_offset,
500 int isnew, int *errorp)
502 hammer_buffer_t buffer;
503 hammer_volume_t volume;
504 hammer_off_t zoneX_offset;
508 zoneX_offset = buf_offset;
509 zone = HAMMER_ZONE_DECODE(buf_offset);
510 if (zone > HAMMER_ZONE_RAW_BUFFER_INDEX) {
511 buf_offset = hammer_blockmap_lookup(hmp, buf_offset, errorp);
512 KKASSERT(*errorp == 0);
514 buf_offset &= ~HAMMER_BUFMASK64;
515 KKASSERT((buf_offset & HAMMER_ZONE_RAW_BUFFER) ==
516 HAMMER_ZONE_RAW_BUFFER);
517 vol_no = HAMMER_VOL_DECODE(buf_offset);
518 volume = hammer_get_volume(hmp, vol_no, errorp);
523 * NOTE: buf_offset and maxbuf_off are both full offset
526 KKASSERT(buf_offset < volume->maxbuf_off);
529 * Locate and lock the buffer structure, creating one if necessary.
532 buffer = RB_LOOKUP(hammer_buf_rb_tree, &volume->rb_bufs_root,
534 if (buffer == NULL) {
535 ++hammer_count_buffers;
536 buffer = kmalloc(sizeof(*buffer), M_HAMMER, M_WAITOK|M_ZERO);
537 buffer->zone2_offset = buf_offset;
538 buffer->volume = volume;
539 hammer_io_init(&buffer->io, HAMMER_STRUCTURE_BUFFER);
540 buffer->io.offset = volume->ondisk->vol_buf_beg +
541 (buf_offset & HAMMER_OFF_SHORT_MASK);
542 TAILQ_INIT(&buffer->clist);
543 hammer_ref(&buffer->io.lock);
546 * Insert the buffer into the RB tree and handle late
549 if (RB_INSERT(hammer_buf_rb_tree, &volume->rb_bufs_root, buffer)) {
550 hammer_unref(&buffer->io.lock);
551 --hammer_count_buffers;
552 kfree(buffer, M_HAMMER);
555 hammer_ref(&volume->io.lock);
557 hammer_ref(&buffer->io.lock);
561 * Cache the blockmap translation
563 if ((zoneX_offset & HAMMER_ZONE_RAW_BUFFER) != HAMMER_ZONE_RAW_BUFFER)
564 buffer->zoneX_offset = zoneX_offset;
567 * Deal with on-disk info
569 if (buffer->ondisk == NULL || buffer->io.loading) {
570 *errorp = hammer_load_buffer(buffer, isnew);
572 hammer_rel_buffer(buffer, 1);
578 hammer_rel_volume(volume, 0);
583 hammer_load_buffer(hammer_buffer_t buffer, int isnew)
585 hammer_volume_t volume;
590 * Load the buffer's on-disk info
592 volume = buffer->volume;
593 hammer_lock_ex(&buffer->io.lock);
594 KKASSERT(buffer->io.loading == 0);
595 buffer->io.loading = 1;
597 if (buffer->ondisk == NULL) {
599 error = hammer_io_new(volume->devvp, &buffer->io);
601 error = hammer_io_read(volume->devvp, &buffer->io);
604 buffer->io.loading = 0;
605 hammer_unlock(&buffer->io.lock);
608 buffer->ondisk = ondisk = (void *)buffer->io.bp->b_data;
610 error = hammer_io_new(volume->devvp, &buffer->io);
614 if (error == 0 && isnew) {
615 hammer_modify_buffer(buffer, NULL, 0);
616 /* additional initialization goes here */
618 buffer->io.loading = 0;
619 hammer_unlock(&buffer->io.lock);
624 * NOTE: Called from RB_SCAN, must return >= 0 for scan to continue.
627 hammer_unload_buffer(hammer_buffer_t buffer, void *data __unused)
629 hammer_ref(&buffer->io.lock);
630 hammer_flush_buffer_nodes(buffer);
631 KKASSERT(buffer->io.lock.refs == 1);
632 hammer_rel_buffer(buffer, 2);
637 * Reference a buffer that is either already referenced or via a specially
638 * handled pointer (aka cursor->buffer).
641 hammer_ref_buffer(hammer_buffer_t buffer)
645 hammer_ref(&buffer->io.lock);
646 if (buffer->ondisk == NULL || buffer->io.loading) {
647 error = hammer_load_buffer(buffer, 0);
649 hammer_rel_buffer(buffer, 1);
651 * NOTE: buffer pointer can become stale after
662 * Release a buffer. We have to deal with several places where
663 * another thread can ref the buffer.
665 * Only destroy the structure itself if the related buffer cache buffer
666 * was disassociated from it. This ties the management of the structure
667 * to the buffer cache subsystem. buffer->ondisk determines whether the
668 * embedded io is referenced or not.
671 hammer_rel_buffer(hammer_buffer_t buffer, int flush)
673 hammer_volume_t volume;
675 if (buffer->io.lock.refs == 1) {
676 hammer_lock_ex(&buffer->io.lock);
677 if (buffer->io.lock.refs == 1) {
678 hammer_io_release(&buffer->io, flush);
680 if (buffer->io.bp == NULL &&
681 buffer->io.lock.refs == 1) {
682 hammer_flush_buffer_nodes(buffer);
683 KKASSERT(TAILQ_EMPTY(&buffer->clist));
684 volume = buffer->volume;
685 RB_REMOVE(hammer_buf_rb_tree,
686 &volume->rb_bufs_root, buffer);
687 buffer->volume = NULL; /* sanity */
688 --hammer_count_buffers;
689 kfree(buffer, M_HAMMER);
690 hammer_rel_volume(volume, 0);
694 hammer_io_flush(&buffer->io);
696 hammer_unlock(&buffer->io.lock);
698 hammer_unref(&buffer->io.lock);
702 * Access the filesystem buffer containing the specified hammer offset.
703 * buf_offset is a conglomeration of the volume number and vol_buf_beg
704 * relative buffer offset. It must also have bit 55 set to be valid.
705 * (see hammer_off_t in hammer_disk.h).
707 * Any prior buffer in *bufferp will be released and replaced by the
711 hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset, int *errorp,
712 struct hammer_buffer **bufferp)
714 hammer_buffer_t buffer;
715 int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK;
717 buf_offset &= ~HAMMER_BUFMASK64;
718 KKASSERT((buf_offset & HAMMER_OFF_ZONE_MASK) != 0);
721 if (buffer == NULL || (buffer->zone2_offset != buf_offset &&
722 buffer->zoneX_offset != buf_offset)) {
724 hammer_rel_buffer(buffer, 0);
725 buffer = hammer_get_buffer(hmp, buf_offset, 0, errorp);
732 * Return a pointer to the buffer data.
737 return((char *)buffer->ondisk + xoff);
741 * Access the filesystem buffer containing the specified hammer offset.
742 * No disk read operation occurs. The result buffer may contain garbage.
744 * Any prior buffer in *bufferp will be released and replaced by the
748 hammer_bnew(hammer_mount_t hmp, hammer_off_t buf_offset, int *errorp,
749 struct hammer_buffer **bufferp)
751 hammer_buffer_t buffer;
752 int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK;
754 buf_offset &= ~HAMMER_BUFMASK64;
757 if (buffer == NULL || (buffer->zone2_offset != buf_offset &&
758 buffer->zoneX_offset != buf_offset)) {
760 hammer_rel_buffer(buffer, 0);
761 buffer = hammer_get_buffer(hmp, buf_offset, 1, errorp);
768 * Return a pointer to the buffer data.
773 return((char *)buffer->ondisk + xoff);
776 /************************************************************************
778 ************************************************************************
780 * Manage B-Tree nodes. B-Tree nodes represent the primary indexing
781 * method used by the HAMMER filesystem.
783 * Unlike other HAMMER structures, a hammer_node can be PASSIVELY
784 * associated with its buffer, and will only referenced the buffer while
785 * the node itself is referenced.
787 * A hammer_node can also be passively associated with other HAMMER
788 * structures, such as inodes, while retaining 0 references. These
789 * associations can be cleared backwards using a pointer-to-pointer in
792 * This allows the HAMMER implementation to cache hammer_nodes long-term
793 * and short-cut a great deal of the infrastructure's complexity. In
794 * most cases a cached node can be reacquired without having to dip into
795 * either the buffer or cluster management code.
797 * The caller must pass a referenced cluster on call and will retain
798 * ownership of the reference on return. The node will acquire its own
799 * additional references, if necessary.
802 hammer_get_node(hammer_mount_t hmp, hammer_off_t node_offset, int *errorp)
806 KKASSERT((node_offset & HAMMER_OFF_ZONE_MASK) == HAMMER_ZONE_BTREE);
809 * Locate the structure, allocating one if necessary.
812 node = RB_LOOKUP(hammer_nod_rb_tree, &hmp->rb_nods_root, node_offset);
814 ++hammer_count_nodes;
815 node = kmalloc(sizeof(*node), M_HAMMER, M_WAITOK|M_ZERO);
816 node->node_offset = node_offset;
818 if (RB_INSERT(hammer_nod_rb_tree, &hmp->rb_nods_root, node)) {
819 --hammer_count_nodes;
820 kfree(node, M_HAMMER);
824 hammer_ref(&node->lock);
825 *errorp = hammer_load_node(node);
827 hammer_rel_node(node);
834 * Reference an already-referenced node.
837 hammer_ref_node(hammer_node_t node)
841 KKASSERT(node->lock.refs > 0);
842 hammer_ref(&node->lock);
843 if ((error = hammer_load_node(node)) != 0)
844 hammer_rel_node(node);
849 * Load a node's on-disk data reference.
852 hammer_load_node(hammer_node_t node)
854 hammer_buffer_t buffer;
860 hammer_lock_ex(&node->lock);
861 if (node->ondisk == NULL) {
863 * This is a little confusing but the jist is that
864 * node->buffer determines whether the node is on
865 * the buffer's clist and node->ondisk determines
866 * whether the buffer is referenced.
868 if ((buffer = node->buffer) != NULL) {
869 error = hammer_ref_buffer(buffer);
871 buffer = hammer_get_buffer(node->hmp,
872 node->node_offset, 0,
875 KKASSERT(error == 0);
876 TAILQ_INSERT_TAIL(&buffer->clist,
878 node->buffer = buffer;
882 node->ondisk = (void *)((char *)buffer->ondisk +
883 (node->node_offset & HAMMER_BUFMASK));
886 hammer_unlock(&node->lock);
891 * Safely reference a node, interlock against flushes via the IO subsystem.
894 hammer_ref_node_safe(struct hammer_mount *hmp, struct hammer_node **cache,
899 if ((node = *cache) != NULL)
900 hammer_ref(&node->lock);
902 *errorp = hammer_load_node(node);
904 hammer_rel_node(node);
914 * Release a hammer_node. On the last release the node dereferences
915 * its underlying buffer and may or may not be destroyed.
918 hammer_rel_node(hammer_node_t node)
920 hammer_buffer_t buffer;
923 * If this isn't the last ref just decrement the ref count and
926 if (node->lock.refs > 1) {
927 hammer_unref(&node->lock);
932 * If there is no ondisk info or no buffer the node failed to load,
933 * remove the last reference and destroy the node.
935 if (node->ondisk == NULL) {
936 hammer_unref(&node->lock);
937 hammer_flush_node(node);
938 /* node is stale now */
943 * Do final cleanups and then either destroy the node and leave it
944 * passively cached. The buffer reference is removed regardless.
946 buffer = node->buffer;
949 if ((node->flags & (HAMMER_NODE_DELETED|HAMMER_NODE_FLUSH)) == 0) {
950 hammer_unref(&node->lock);
951 hammer_rel_buffer(buffer, 0);
956 * Destroy the node if it has been marked for deletion. We mark
957 * it as being free. Note that the disk space is physically
958 * freed when the fifo cycles back through the node.
960 if (node->flags & HAMMER_NODE_DELETED) {
961 hammer_blockmap_free(node->hmp, node->node_offset,
962 sizeof(*node->ondisk));
966 * Destroy the node. Record pertainant data because the node
967 * becomes stale the instant we flush it.
969 hammer_unref(&node->lock);
970 hammer_flush_node(node);
972 hammer_rel_buffer(buffer, 0);
976 * Passively cache a referenced hammer_node in *cache. The caller may
977 * release the node on return.
980 hammer_cache_node(hammer_node_t node, struct hammer_node **cache)
985 * If the node is being deleted, don't cache it!
987 if (node->flags & HAMMER_NODE_DELETED)
991 * Cache the node. If we previously cached a different node we
992 * have to give HAMMER a chance to destroy it.
995 if (node->cache1 != cache) {
996 if (node->cache2 != cache) {
997 if ((old = *cache) != NULL) {
998 KKASSERT(node->lock.refs != 0);
999 hammer_uncache_node(cache);
1003 *node->cache2 = NULL;
1004 node->cache2 = node->cache1;
1005 node->cache1 = cache;
1008 struct hammer_node **tmp;
1010 node->cache1 = node->cache2;
1017 hammer_uncache_node(struct hammer_node **cache)
1021 if ((node = *cache) != NULL) {
1023 if (node->cache1 == cache) {
1024 node->cache1 = node->cache2;
1025 node->cache2 = NULL;
1026 } else if (node->cache2 == cache) {
1027 node->cache2 = NULL;
1029 panic("hammer_uncache_node: missing cache linkage");
1031 if (node->cache1 == NULL && node->cache2 == NULL)
1032 hammer_flush_node(node);
1037 * Remove a node's cache references and destroy the node if it has no
1038 * other references or backing store.
1041 hammer_flush_node(hammer_node_t node)
1043 hammer_buffer_t buffer;
1046 *node->cache1 = NULL;
1048 *node->cache2 = NULL;
1049 if (node->lock.refs == 0 && node->ondisk == NULL) {
1050 RB_REMOVE(hammer_nod_rb_tree, &node->hmp->rb_nods_root, node);
1051 if ((buffer = node->buffer) != NULL) {
1052 node->buffer = NULL;
1053 TAILQ_REMOVE(&buffer->clist, node, entry);
1054 /* buffer is unreferenced because ondisk is NULL */
1056 --hammer_count_nodes;
1057 kfree(node, M_HAMMER);
1062 * Flush passively cached B-Tree nodes associated with this buffer.
1063 * This is only called when the buffer is about to be destroyed, so
1064 * none of the nodes should have any references.
1067 hammer_flush_buffer_nodes(hammer_buffer_t buffer)
1071 while ((node = TAILQ_FIRST(&buffer->clist)) != NULL) {
1072 KKASSERT(node->lock.refs == 0 && node->ondisk == NULL);
1073 hammer_ref(&node->lock);
1074 node->flags |= HAMMER_NODE_FLUSH;
1075 hammer_rel_node(node);
1080 /************************************************************************
1082 ************************************************************************/
1085 * Allocate a B-Tree node.
1088 hammer_alloc_btree(hammer_mount_t hmp, int *errorp)
1090 hammer_buffer_t buffer = NULL;
1091 hammer_node_t node = NULL;
1092 hammer_off_t node_offset;
1094 node_offset = hammer_blockmap_alloc(hmp, HAMMER_ZONE_BTREE_INDEX,
1095 sizeof(struct hammer_node_ondisk),
1098 node = hammer_get_node(hmp, node_offset, errorp);
1099 hammer_modify_node(node);
1100 bzero(node->ondisk, sizeof(*node->ondisk));
1103 hammer_rel_buffer(buffer, 0);
1108 * The returned buffers are already appropriately marked as being modified.
1109 * If the caller marks them again unnecessary undo records may be generated.
1111 * In-band data is indicated by data_bufferp == NULL. Pass a data_len of 0
1112 * for zero-fill (caller modifies data_len afterwords).
1115 hammer_alloc_record(hammer_mount_t hmp,
1116 hammer_off_t *rec_offp, u_int8_t rec_type,
1117 struct hammer_buffer **rec_bufferp,
1118 int32_t data_len, void **datap,
1119 struct hammer_buffer **data_bufferp, int *errorp)
1121 hammer_record_ondisk_t rec;
1122 hammer_off_t rec_offset;
1123 hammer_off_t data_offset;
1130 * Allocate the record
1132 rec_offset = hammer_blockmap_alloc(hmp, HAMMER_ZONE_RECORD_INDEX,
1133 HAMMER_RECORD_SIZE, errorp);
1141 if (data_bufferp == NULL) {
1143 case HAMMER_RECTYPE_DATA:
1144 reclen = offsetof(struct hammer_data_record,
1147 case HAMMER_RECTYPE_DIRENTRY:
1148 reclen = offsetof(struct hammer_entry_record,
1152 panic("hammer_alloc_record: illegal "
1158 KKASSERT(reclen + data_len <= HAMMER_RECORD_SIZE);
1159 data_offset = rec_offset + reclen;
1160 } else if (data_len < HAMMER_BUFSIZE) {
1161 data_offset = hammer_blockmap_alloc(hmp,
1162 HAMMER_ZONE_SMALL_DATA_INDEX,
1165 data_offset = hammer_blockmap_alloc(hmp,
1166 HAMMER_ZONE_LARGE_DATA_INDEX,
1173 hammer_blockmap_free(hmp, rec_offset, HAMMER_RECORD_SIZE);
1178 * Basic return values.
1180 *rec_offp = rec_offset;
1181 rec = hammer_bread(hmp, rec_offset, errorp, rec_bufferp);
1182 hammer_modify_buffer(*rec_bufferp, NULL, 0);
1183 bzero(rec, sizeof(*rec));
1184 KKASSERT(*errorp == 0);
1185 rec->base.data_off = data_offset;
1186 rec->base.data_len = data_len;
1190 *datap = hammer_bread(hmp, data_offset, errorp,
1192 KKASSERT(*errorp == 0);
1193 hammer_modify_buffer(*data_bufferp, NULL, 0);
1197 } else if (data_len) {
1198 KKASSERT(data_offset + data_len - rec_offset <=
1199 HAMMER_RECORD_SIZE);
1201 *datap = (void *)((char *)rec +
1202 (int32_t)(data_offset - rec_offset));
1205 KKASSERT(datap == NULL);
1207 KKASSERT(*errorp == 0);
1212 * Generate an undo fifo entry and return the buffer to the caller (XXX).
1213 * The caller must create a dependancy to ensure that the undo record is
1214 * flushed before the modified buffer is flushed.
1217 hammer_generate_undo(hammer_mount_t hmp, hammer_off_t off, void *base, int len)
1221 hammer_off_t rec_offset;
1222 hammer_fifo_undo_t undo;
1223 hammer_buffer_t buffer = NULL;
1226 rec_offset = hammer_alloc_fifo(hmp, sizeof(*undo), len,
1227 &buffer, HAMMER_HEAD_TYPE_UNDO,
1230 undo = (void *)((char *)buffer->ondisk +
1231 ((int32_t)rec_offset & HAMMER_BUFMASK));
1232 undo->undo_offset = off;
1233 bcopy(base, undo + 1, len);
1236 hammer_rel_buffer(buffer, 0);
1244 * Allocate space from the FIFO. The first rec_len bytes will be zero'd.
1245 * The entire space is marked modified (the caller should not remark it as
1246 * that will cause unnecessary undo records to be added).
1250 hammer_alloc_fifo(hammer_mount_t hmp, int32_t rec_len, int32_t data_len,
1251 struct hammer_buffer **rec_bufferp, u_int16_t hdr_type,
1253 struct hammer_buffer **data2_bufferp, int *errorp)
1255 hammer_volume_t root_volume;
1256 hammer_volume_t end_volume;
1257 hammer_volume_ondisk_t ondisk;
1258 hammer_fifo_head_t head;
1259 hammer_fifo_tail_t tail;
1260 hammer_off_t end_off = 0;
1261 hammer_off_t tmp_off = 0;
1265 int32_t aligned_bytes;
1268 aligned_bytes = (rec_len + data_len + HAMMER_TAIL_ONDISK_SIZE +
1269 HAMMER_HEAD_ALIGN_MASK) & ~HAMMER_HEAD_ALIGN_MASK;
1271 root_volume = hammer_get_root_volume(hmp, errorp);
1273 hammer_modify_volume(root_volume, NULL, 0);
1275 while (root_volume) {
1276 ondisk = root_volume->ondisk;
1278 end_off = ondisk->vol0_fifo_end;
1279 end_vol_no = HAMMER_VOL_DECODE(end_off);
1281 end_volume = hammer_get_volume(hmp, end_vol_no, errorp);
1286 * Check to see if we ran out of space. Include some extra
1289 * vol0_fifo_end cannot be advanced into the same buffer
1290 * that vol0_fifo_beg resides in. This allows us to
1291 * instantiate a new buffer without reading it in.
1295 tmp_off = ondisk->vol0_fifo_beg & ~HAMMER_BUFMASK64;
1296 tmp_vol_no = HAMMER_VOL_DECODE(tmp_off);
1297 if ((tmp_off & HAMMER_OFF_SHORT_MASK) == 0) {
1298 if (end_vol_no + 1 == tmp_vol_no) {
1299 tmp_vol_no = end_vol_no;
1300 tmp_off = end_volume->maxbuf_off;
1301 } else if (end_vol_no + 1 == hmp->nvolumes &&
1303 tmp_vol_no = end_vol_no;
1304 tmp_off = end_volume->maxbuf_off;
1307 hammer_rel_volume(end_volume, 0);
1310 * XXX dummy head at end of fifo
1312 if (end_vol_no == tmp_vol_no &&
1313 end_off < tmp_off &&
1314 end_off + aligned_bytes + sizeof(*head) >= tmp_off) {
1319 if ((int32_t)end_off & HAMMER_BUFMASK)
1320 head = hammer_bread(hmp, end_off, errorp, rec_bufferp);
1322 head = hammer_bnew(hmp, end_off, errorp, rec_bufferp);
1327 * Load the buffer, retry if someone else squeeked in
1328 * while we were blocked.
1331 if (ondisk->vol0_fifo_end != end_off)
1335 * Ok, we're gonna do something. Modify the buffer
1337 hammer_modify_buffer(*rec_bufferp, NULL, 0);
1338 if (ondisk->vol0_fifo_end != end_off)
1340 xoff = (int32_t)end_off & HAMMER_BUFMASK;
1343 * The non-data portion of the fifo record cannot cross
1344 * a buffer boundary.
1346 * The entire record cannot cross a buffer boundary if
1349 * The entire record cannot cover more then two whole buffers
1350 * regardless. Even if the data portion is 16K, this case
1351 * can occur due to the addition of the fifo_tail.
1353 * It is illegal for a record to cross a volume boundary.
1355 * It is illegal for a record to cross a recovery boundary
1356 * (this is so recovery code is guaranteed a record rather
1357 * then data at certain points).
1359 * Add a pad record and loop if it does.
1362 if (xoff + rec_len > HAMMER_BUFSIZE)
1364 if (can_cross == 0) {
1365 if (xoff + aligned_bytes > HAMMER_BUFSIZE)
1368 if (xoff + aligned_bytes > HAMMER_BUFSIZE &&
1369 (end_off + aligned_bytes) >=
1370 (*rec_bufferp)->volume->maxbuf_off) {
1373 if ((end_off ^ (end_off + aligned_bytes)) &
1374 HAMMER_OFF_SHORT_REC_MASK) {
1377 if (xoff + aligned_bytes - HAMMER_BUFSIZE >
1379 KKASSERT(xoff != 0);
1385 * Pad to end of the buffer if necessary. PADs can be
1386 * squeezed into as little as 8 bytes (hence our alignment
1387 * requirement). The crc, reserved, and sequence number
1388 * fields are not used, but initialize them anyway if there
1392 xoff = HAMMER_BUFSIZE - xoff;
1393 head->hdr_signature = HAMMER_HEAD_SIGNATURE;
1394 head->hdr_type = HAMMER_HEAD_TYPE_PAD;
1395 head->hdr_size = xoff;
1396 if (xoff >= HAMMER_HEAD_ONDISK_SIZE +
1397 HAMMER_TAIL_ONDISK_SIZE) {
1399 head->hdr_reserved02 = 0;
1403 tail = (void *)((char *)head + xoff -
1404 HAMMER_TAIL_ONDISK_SIZE);
1405 if ((void *)head != (void *)tail) {
1406 tail->tail_signature = HAMMER_TAIL_SIGNATURE;
1407 tail->tail_type = HAMMER_HEAD_TYPE_PAD;
1408 tail->tail_size = xoff;
1410 KKASSERT((xoff & HAMMER_HEAD_ALIGN_MASK) == 0);
1411 ondisk->vol0_fifo_end =
1412 hammer_advance_fifo((*rec_bufferp)->volume,
1417 if (xoff + aligned_bytes > HAMMER_BUFSIZE) {
1418 xoff = xoff + aligned_bytes - HAMMER_BUFSIZE;
1420 KKASSERT(xoff <= HAMMER_BUFSIZE);
1421 tail = hammer_bnew(hmp, end_off + aligned_bytes -
1422 HAMMER_TAIL_ONDISK_SIZE,
1423 errorp, data2_bufferp);
1424 hammer_modify_buffer(*data2_bufferp, NULL, 0);
1429 * Retry if someone else appended to the fifo while
1432 if (ondisk->vol0_fifo_end != end_off)
1435 tail = (void *)((char *)head + aligned_bytes -
1436 HAMMER_TAIL_ONDISK_SIZE);
1439 bzero(head, rec_len);
1440 head->hdr_signature = HAMMER_HEAD_SIGNATURE;
1441 head->hdr_type = hdr_type;
1442 head->hdr_size = aligned_bytes;
1444 head->hdr_seq = root_volume->ondisk->vol0_next_seq++;
1446 tail->tail_signature = HAMMER_TAIL_SIGNATURE;
1447 tail->tail_type = hdr_type;
1448 tail->tail_size = aligned_bytes;
1450 ondisk->vol0_fifo_end =
1451 hammer_advance_fifo((*rec_bufferp)->volume,
1452 end_off, aligned_bytes);
1454 hammer_rel_volume(root_volume, 0);
1463 * Mark a fifo record as having been freed. XXX needs undo.
1466 hammer_free_fifo(hammer_mount_t hmp, hammer_off_t fifo_offset)
1468 hammer_buffer_t buffer = NULL;
1469 hammer_fifo_head_t head;
1472 head = hammer_bread(hmp, fifo_offset, &error, &buffer);
1474 hammer_modify_buffer(buffer, &head->hdr_type,
1475 sizeof(head->hdr_type));
1476 head->hdr_type |= HAMMER_HEAD_FLAG_FREE;
1479 hammer_rel_buffer(buffer, 0);
1483 * Attempt to rewind the FIFO
1485 * This routine is allowed to do nothing.
1488 hammer_unwind_fifo(hammer_mount_t hmp, hammer_off_t rec_offset)
1493 * Advance the FIFO a certain number of bytes.
1497 hammer_advance_fifo(hammer_volume_t volume, hammer_off_t off, int32_t bytes)
1502 KKASSERT(off <= volume->maxbuf_off);
1503 KKASSERT((off & HAMMER_OFF_ZONE_MASK) == HAMMER_ZONE_RAW_BUFFER);
1504 if (off == volume->maxbuf_off) {
1505 vol_no = volume->vol_no + 1;
1506 if (vol_no == volume->hmp->nvolumes)
1508 off = HAMMER_ENCODE_RAW_BUFFER(vol_no, 0);
1515 * Sync dirty buffers to the media
1518 static int hammer_sync_scan1(struct mount *mp, struct vnode *vp, void *data);
1519 static int hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data);
1522 hammer_sync_hmp(hammer_mount_t hmp, int waitfor)
1524 struct hammer_sync_info info;
1527 info.waitfor = waitfor;
1529 vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_NOWAIT,
1530 hammer_sync_scan1, hammer_sync_scan2, &info);
1532 RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL,
1533 hammer_sync_volume, &info);
1538 hammer_sync_scan1(struct mount *mp, struct vnode *vp, void *data)
1540 struct hammer_inode *ip;
1543 if (vp->v_type == VNON || ip == NULL ||
1544 ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1545 RB_EMPTY(&vp->v_rbdirty_tree))) {
1552 hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data)
1554 struct hammer_sync_info *info = data;
1555 struct hammer_inode *ip;
1559 if (vp->v_type == VNON || vp->v_type == VBAD ||
1560 ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1561 RB_EMPTY(&vp->v_rbdirty_tree))) {
1564 error = VOP_FSYNC(vp, info->waitfor);
1566 info->error = error;
1571 hammer_sync_volume(hammer_volume_t volume, void *data)
1573 struct hammer_sync_info *info = data;
1575 hammer_ref(&volume->io.lock);
1576 RB_SCAN(hammer_buf_rb_tree, &volume->rb_bufs_root, NULL,
1577 hammer_sync_buffer, info);
1578 hammer_rel_volume(volume, 1);
1583 hammer_sync_buffer(hammer_buffer_t buffer, void *data __unused)
1585 hammer_ref(&buffer->io.lock);
1586 hammer_rel_buffer(buffer, 1);
1592 * Generic buffer initialization. Initialize the A-list into an all-allocated
1593 * state with the free block limit properly set.
1595 * Note that alloc_new_buffer() will free the appropriate block range via
1596 * the appropriate cluster alist, so the free count is properly propogated.
1599 hammer_init_fifo(hammer_fifo_head_t head, u_int16_t type)
1601 head->hdr_signature = HAMMER_HEAD_SIGNATURE;
1602 head->hdr_type = type;