2 * Copyright (c) 2007 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_ondisk.c,v 1.4 2007/11/19 00:53:40 dillon Exp $
37 * Manage HAMMER's on-disk structures. These routines are primarily
38 * responsible for interfacing with the kernel's I/O subsystem and for
39 * managing in-memory structures.
43 #include <sys/fcntl.h>
44 #include <sys/nlookup.h>
48 static void hammer_free_volume(hammer_volume_t volume);
49 static int hammer_load_volume(hammer_volume_t volume);
50 static int hammer_load_supercl(hammer_supercl_t supercl, int isnew);
51 static int hammer_load_cluster(hammer_cluster_t cluster, int isnew);
52 static int hammer_load_buffer(hammer_buffer_t buffer, u_int64_t buf_type);
53 static void hammer_remove_node_clist(hammer_buffer_t buffer,
55 static void initbuffer(hammer_alist_t live, hammer_fsbuf_head_t head,
57 static void alloc_new_buffer(hammer_cluster_t cluster,
58 hammer_alist_t live, u_int64_t type, int32_t nelements,
60 int *errorp, struct hammer_buffer **bufferp);
62 static void readhammerbuf(hammer_volume_t vol, void *data,
64 static void writehammerbuf(hammer_volume_t vol, const void *data,
67 static int64_t calculate_cluster_offset(hammer_volume_t vol, int32_t clu_no);
68 static int64_t calculate_supercl_offset(hammer_volume_t vol, int32_t scl_no);
70 struct hammer_alist_config Buf_alist_config;
71 struct hammer_alist_config Vol_normal_alist_config;
72 struct hammer_alist_config Vol_super_alist_config;
73 struct hammer_alist_config Supercl_alist_config;
74 struct hammer_alist_config Clu_master_alist_config;
75 struct hammer_alist_config Clu_slave_alist_config;
78 * Red-Black tree support for various structures
81 hammer_ino_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2)
83 if (ip1->obj_id < ip2->obj_id)
85 if (ip1->obj_id > ip2->obj_id)
87 if (ip1->obj_asof < ip2->obj_asof)
89 if (ip1->obj_asof > ip2->obj_asof)
95 hammer_inode_info_cmp(hammer_inode_info_t info, hammer_inode_t ip)
97 if (info->obj_id < ip->obj_id)
99 if (info->obj_id > ip->obj_id)
101 if (info->obj_asof < ip->obj_asof)
103 if (info->obj_asof > ip->obj_asof)
109 hammer_vol_rb_compare(hammer_volume_t vol1, hammer_volume_t vol2)
111 if (vol1->vol_no < vol2->vol_no)
113 if (vol1->vol_no > vol2->vol_no)
119 hammer_scl_rb_compare(hammer_supercl_t cl1, hammer_supercl_t cl2)
121 if (cl1->scl_no < cl2->scl_no)
123 if (cl1->scl_no > cl2->scl_no)
129 hammer_clu_rb_compare(hammer_cluster_t cl1, hammer_cluster_t cl2)
131 if (cl1->clu_no < cl2->clu_no)
133 if (cl1->clu_no > cl2->clu_no)
139 hammer_buf_rb_compare(hammer_buffer_t buf1, hammer_buffer_t buf2)
141 if (buf1->buf_no < buf2->buf_no)
143 if (buf1->buf_no > buf2->buf_no)
149 hammer_nod_rb_compare(hammer_node_t node1, hammer_node_t node2)
151 if (node1->node_offset < node2->node_offset)
153 if (node1->node_offset < node2->node_offset)
159 * Note: The lookup function for hammer_ino_rb_tree winds up being named
160 * hammer_ino_rb_tree_RB_LOOKUP_INFO(root, info). The other lookup
161 * functions are normal, e.g. hammer_clu_rb_tree_RB_LOOKUP(root, clu_no).
163 RB_GENERATE(hammer_ino_rb_tree, hammer_inode, rb_node, hammer_ino_rb_compare);
164 RB_GENERATE_XLOOKUP(hammer_ino_rb_tree, INFO, hammer_inode, rb_node,
165 hammer_inode_info_cmp, hammer_inode_info_t);
166 RB_GENERATE2(hammer_vol_rb_tree, hammer_volume, rb_node,
167 hammer_vol_rb_compare, int32_t, vol_no);
168 RB_GENERATE2(hammer_scl_rb_tree, hammer_supercl, rb_node,
169 hammer_scl_rb_compare, int32_t, scl_no);
170 RB_GENERATE2(hammer_clu_rb_tree, hammer_cluster, rb_node,
171 hammer_clu_rb_compare, int32_t, clu_no);
172 RB_GENERATE2(hammer_buf_rb_tree, hammer_buffer, rb_node,
173 hammer_buf_rb_compare, int32_t, buf_no);
174 RB_GENERATE2(hammer_nod_rb_tree, hammer_node, rb_node,
175 hammer_nod_rb_compare, int32_t, node_offset);
177 /************************************************************************
179 ************************************************************************
181 * Load a HAMMER volume by name. Returns 0 on success or a positive error
182 * code on failure. Volumes must be loaded at mount time, get_volume() will
183 * not load a new volume.
185 * Calls made to hammer_load_volume() or single-threaded
188 hammer_install_volume(struct hammer_mount *hmp, const char *volname)
191 hammer_volume_t volume;
192 struct hammer_volume_ondisk *ondisk;
193 struct nlookupdata nd;
194 struct buf *bp = NULL;
199 ronly = ((mp->mnt_flag & MNT_RDONLY) ? 1 : 0);
202 * Allocate a volume structure
204 volume = kmalloc(sizeof(*volume), M_HAMMER, M_WAITOK|M_ZERO);
205 volume->vol_name = kstrdup(volname, M_HAMMER);
207 volume->io.type = HAMMER_STRUCTURE_VOLUME;
208 volume->io.offset = 0LL;
211 * Get the device vnode
213 error = nlookup_init(&nd, volume->vol_name, UIO_SYSSPACE, NLC_FOLLOW);
215 error = nlookup(&nd);
217 error = cache_vref(&nd.nl_nch, nd.nl_cred, &volume->devvp);
220 vn_isdisk(volume->devvp, &error);
223 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
224 error = VOP_OPEN(volume->devvp, (ronly ? FREAD : FREAD|FWRITE),
226 vn_unlock(volume->devvp);
229 hammer_free_volume(volume);
234 * Extract the volume number from the volume header and do various
237 error = bread(volume->devvp, 0LL, HAMMER_BUFSIZE, &bp);
240 ondisk = (void *)bp->b_data;
241 if (ondisk->head.buf_type != HAMMER_FSBUF_VOLUME) {
242 kprintf("hammer_mount: volume %s has an invalid header\n",
247 volume->vol_no = ondisk->vol_no;
248 volume->cluster_base = ondisk->vol_beg;
249 volume->vol_clsize = ondisk->vol_clsize;
250 volume->vol_flags = ondisk->vol_flags;
251 RB_INIT(&volume->rb_clus_root);
252 RB_INIT(&volume->rb_scls_root);
254 if (RB_EMPTY(&hmp->rb_vols_root)) {
255 hmp->fsid = ondisk->vol_fsid;
256 } else if (bcmp(&hmp->fsid, &ondisk->vol_fsid, sizeof(uuid_t))) {
257 kprintf("hammer_mount: volume %s's fsid does not match "
258 "other volumes\n", volume->vol_name);
264 * Insert the volume structure into the red-black tree.
266 if (RB_INSERT(hammer_vol_rb_tree, &hmp->rb_vols_root, volume)) {
267 kprintf("hammer_mount: volume %s has a duplicate vol_no %d\n",
268 volume->vol_name, volume->vol_no);
273 * Set the root volume and load the root cluster. HAMMER special
274 * cases rootvol and rootcl and will not deallocate the structures.
275 * We do not hold a ref because this would prevent related I/O
276 * from being flushed.
278 if (error == 0 && ondisk->vol_rootvol == ondisk->vol_no) {
279 hmp->rootvol = volume;
280 hmp->rootcl = hammer_get_cluster(volume,
281 ondisk->vol0_root_clu_no,
283 hammer_rel_cluster(hmp->rootcl, 0);
284 hmp->fsid_udev = dev2udev(vn_todev(volume->devvp));
290 /*vinvalbuf(volume->devvp, V_SAVE, 0, 0);*/
291 VOP_CLOSE(volume->devvp, ronly ? FREAD : FREAD|FWRITE);
292 hammer_free_volume(volume);
298 * Unload and free a HAMMER volume. Must return >= 0 to continue scan
299 * so returns -1 on failure.
302 hammer_unload_volume(hammer_volume_t volume, void *data __unused)
304 struct hammer_mount *hmp = volume->hmp;
305 hammer_cluster_t rootcl;
306 int ronly = ((hmp->mp->mnt_flag & MNT_RDONLY) ? 1 : 0);
309 * Sync clusters, sync volume
313 * Clean up the root cluster, which is held unlocked in the root
316 hammer_ref(&volume->io.lock);
317 if (hmp->rootvol == volume) {
318 if ((rootcl = hmp->rootcl) != NULL)
326 KKASSERT(volume->io.lock.refs == 1);
327 hammer_io_release(&volume->io, 1);
328 volume->ondisk = NULL;
331 vinvalbuf(volume->devvp, 0, 0, 0);
332 VOP_CLOSE(volume->devvp, FREAD);
334 vinvalbuf(volume->devvp, V_SAVE, 0, 0);
335 VOP_CLOSE(volume->devvp, FREAD|FWRITE);
340 * Destroy the structure
342 RB_REMOVE(hammer_vol_rb_tree, &hmp->rb_vols_root, volume);
343 hammer_free_volume(volume);
349 hammer_free_volume(hammer_volume_t volume)
351 if (volume->vol_name) {
352 kfree(volume->vol_name, M_HAMMER);
353 volume->vol_name = NULL;
356 vrele(volume->devvp);
357 volume->devvp = NULL;
359 kfree(volume, M_HAMMER);
363 * Get a HAMMER volume. The volume must already exist.
366 hammer_get_volume(struct hammer_mount *hmp, int32_t vol_no, int *errorp)
368 struct hammer_volume *volume;
371 * Locate the volume structure
373 volume = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, vol_no);
374 if (volume == NULL) {
378 hammer_ref(&volume->io.lock);
381 * Deal with on-disk info
383 if (volume->ondisk == NULL) {
384 *errorp = hammer_load_volume(volume);
386 hammer_rel_volume(volume, 1);
396 hammer_get_root_volume(struct hammer_mount *hmp, int *errorp)
398 hammer_volume_t volume;
400 volume = hmp->rootvol;
401 KKASSERT(volume != NULL);
402 hammer_ref(&volume->io.lock);
405 * Deal with on-disk info
407 if (volume->ondisk == NULL) {
408 *errorp = hammer_load_volume(volume);
410 hammer_rel_volume(volume, 1);
420 * Load a volume's on-disk information. The volume must be referenced and
421 * not locked. We temporarily acquire an exclusive lock to interlock
422 * against releases or multiple get's.
425 hammer_load_volume(hammer_volume_t volume)
427 struct hammer_volume_ondisk *ondisk;
430 hammer_lock_ex(&volume->io.lock);
431 if (volume->ondisk == NULL) {
432 error = hammer_io_read(volume->devvp, &volume->io);
434 hammer_unlock(&volume->io.lock);
437 volume->ondisk = ondisk = (void *)volume->io.bp->b_data;
440 * Configure the volume's A-lists. These are used to
443 if (volume->vol_flags & HAMMER_VOLF_USINGSUPERCL) {
444 volume->alist.config = &Vol_super_alist_config;
445 volume->alist.meta = ondisk->vol_almeta.super;
446 volume->alist.info = volume;
448 volume->alist.config = &Vol_normal_alist_config;
449 volume->alist.meta = ondisk->vol_almeta.normal;
450 volume->alist.info = NULL;
452 hammer_alist_init(&volume->alist);
456 hammer_unlock(&volume->io.lock);
461 * Release a volume. Call hammer_io_release on the last reference. We have
462 * to acquire an exclusive lock to interlock against volume->ondisk tests
463 * in hammer_load_volume().
466 hammer_rel_volume(hammer_volume_t volume, int flush)
468 if (hammer_islastref(&volume->io.lock)) {
469 hammer_lock_ex(&volume->io.lock);
470 if (hammer_islastref(&volume->io.lock)) {
471 volume->ondisk = NULL;
472 hammer_io_release(&volume->io, flush);
474 hammer_unlock(&volume->io.lock);
476 hammer_unref(&volume->io.lock);
479 /************************************************************************
481 ************************************************************************
483 * Manage super-clusters. Note that a supercl holds a reference to its
487 hammer_get_supercl(hammer_volume_t volume, int32_t scl_no,
488 int *errorp, int isnew)
490 hammer_supercl_t supercl;
493 * Locate and lock the super-cluster structure, creating one
497 supercl = RB_LOOKUP(hammer_scl_rb_tree, &volume->rb_scls_root, scl_no);
498 if (supercl == NULL) {
499 supercl = kmalloc(sizeof(*supercl), M_HAMMER, M_WAITOK|M_ZERO);
500 supercl->scl_no = scl_no;
501 supercl->volume = volume;
502 supercl->io.offset = calculate_supercl_offset(volume, scl_no);
503 supercl->io.type = HAMMER_STRUCTURE_SUPERCL;
504 hammer_ref(&supercl->io.lock);
507 * Insert the cluster into the RB tree and handle late
510 if (RB_INSERT(hammer_scl_rb_tree, &volume->rb_scls_root, supercl)) {
511 hammer_unref(&supercl->io.lock);
512 kfree(supercl, M_HAMMER);
515 hammer_ref(&volume->io.lock);
517 hammer_ref(&supercl->io.lock);
521 * Deal with on-disk info
523 if (supercl->ondisk == NULL || isnew) {
524 *errorp = hammer_load_supercl(supercl, isnew);
526 hammer_rel_supercl(supercl, 1);
536 hammer_load_supercl(hammer_supercl_t supercl, int isnew)
538 struct hammer_supercl_ondisk *ondisk;
539 hammer_volume_t volume = supercl->volume;
542 hammer_lock_ex(&supercl->io.lock);
543 if (supercl->ondisk == NULL) {
545 error = hammer_io_new(volume->devvp, &supercl->io);
547 error = hammer_io_read(volume->devvp, &supercl->io);
549 hammer_unlock(&supercl->io.lock);
552 supercl->ondisk = ondisk = (void *)supercl->io.bp->b_data;
554 supercl->alist.config = &Supercl_alist_config;
555 supercl->alist.meta = ondisk->scl_meta;
556 supercl->alist.info = NULL;
559 * If this is a new super-cluster we have to initialize
560 * various ondisk structural elements. The caller is
561 * responsible for the remainder.
564 struct hammer_alist_live dummy;
566 dummy.config = &Buf_alist_config;
567 dummy.meta = ondisk->head.buf_almeta;
569 initbuffer(&dummy, &ondisk->head, HAMMER_FSBUF_SUPERCL);
570 hammer_alist_init(&supercl->alist);
573 error = hammer_io_new(volume->devvp, &supercl->io);
577 hammer_unlock(&supercl->io.lock);
582 * Release a super-cluster. We have to deal with several places where
583 * another thread can ref the super-cluster.
585 * Only destroy the structure itself if the related buffer cache buffer
586 * was disassociated from it. This ties the management of the structure
587 * to the buffer cache subsystem.
590 hammer_rel_supercl(hammer_supercl_t supercl, int flush)
592 hammer_volume_t volume;
594 if (hammer_islastref(&supercl->io.lock)) {
595 hammer_lock_ex(&supercl->io.lock);
596 if (hammer_islastref(&supercl->io.lock)) {
597 supercl->ondisk = NULL;
598 hammer_io_release(&supercl->io, flush);
599 if (supercl->io.bp == NULL &&
600 hammer_islastref(&supercl->io.lock)) {
601 volume = supercl->volume;
602 RB_REMOVE(hammer_scl_rb_tree,
603 &volume->rb_scls_root, supercl);
604 supercl->volume = NULL; /* sanity */
605 kfree(supercl, M_HAMMER);
606 hammer_rel_volume(volume, 0);
610 hammer_unlock(&supercl->io.lock);
612 hammer_unref(&supercl->io.lock);
615 /************************************************************************
617 ************************************************************************
619 * Manage clusters. Note that a cluster holds a reference to its
623 hammer_get_cluster(hammer_volume_t volume, int32_t clu_no,
624 int *errorp, int isnew)
626 hammer_cluster_t cluster;
629 cluster = RB_LOOKUP(hammer_clu_rb_tree, &volume->rb_clus_root, clu_no);
630 if (cluster == NULL) {
631 cluster = kmalloc(sizeof(*cluster), M_HAMMER, M_WAITOK|M_ZERO);
632 cluster->clu_no = clu_no;
633 cluster->volume = volume;
634 cluster->io.offset = calculate_cluster_offset(volume, clu_no);
635 RB_INIT(&cluster->rb_bufs_root);
636 RB_INIT(&cluster->rb_nods_root);
637 cluster->io.type = HAMMER_STRUCTURE_CLUSTER;
638 hammer_ref(&cluster->io.lock);
641 * Insert the cluster into the RB tree and handle late
644 if (RB_INSERT(hammer_clu_rb_tree, &volume->rb_clus_root, cluster)) {
645 hammer_unref(&cluster->io.lock);
646 kfree(cluster, M_HAMMER);
649 hammer_ref(&volume->io.lock);
651 hammer_ref(&cluster->io.lock);
655 * Deal with on-disk info
657 if (cluster->ondisk == NULL || isnew) {
658 *errorp = hammer_load_cluster(cluster, isnew);
660 hammer_rel_cluster(cluster, 1);
670 hammer_get_root_cluster(struct hammer_mount *hmp, int *errorp)
672 hammer_cluster_t cluster;
674 cluster = hmp->rootcl;
675 KKASSERT(cluster != NULL);
676 hammer_ref(&cluster->io.lock);
679 * Deal with on-disk info
681 if (cluster->ondisk == NULL) {
682 *errorp = hammer_load_cluster(cluster, 0);
684 hammer_rel_cluster(cluster, 1);
695 hammer_load_cluster(hammer_cluster_t cluster, int isnew)
697 hammer_volume_t volume = cluster->volume;
698 struct hammer_cluster_ondisk *ondisk;
702 * Load the cluster's on-disk info
704 hammer_lock_ex(&cluster->io.lock);
705 if (cluster->ondisk == NULL) {
707 error = hammer_io_new(volume->devvp, &cluster->io);
709 error = hammer_io_read(volume->devvp, &cluster->io);
711 hammer_unlock(&cluster->io.lock);
714 cluster->ondisk = ondisk = (void *)cluster->io.bp->b_data;
716 cluster->alist_master.config = &Clu_master_alist_config;
717 cluster->alist_master.meta = ondisk->clu_master_meta;
718 cluster->alist_btree.config = &Clu_slave_alist_config;
719 cluster->alist_btree.meta = ondisk->clu_btree_meta;
720 cluster->alist_btree.info = cluster;
721 cluster->alist_record.config = &Clu_slave_alist_config;
722 cluster->alist_record.meta = ondisk->clu_record_meta;
723 cluster->alist_record.info = cluster;
724 cluster->alist_mdata.config = &Clu_slave_alist_config;
725 cluster->alist_mdata.meta = ondisk->clu_mdata_meta;
726 cluster->alist_mdata.info = cluster;
728 cluster->clu_btree_beg = ondisk->clu_btree_beg;
729 cluster->clu_btree_end = ondisk->clu_btree_end;
732 * If this is a new cluster we have to initialize
733 * various ondisk structural elements. The caller is
734 * responsible for the remainder.
737 struct hammer_alist_live dummy;
739 dummy.config = &Buf_alist_config;
740 dummy.meta = ondisk->head.buf_almeta;
742 initbuffer(&dummy, &ondisk->head, HAMMER_FSBUF_CLUSTER);
744 hammer_alist_init(&cluster->alist_master);
745 hammer_alist_init(&cluster->alist_btree);
746 hammer_alist_init(&cluster->alist_record);
747 hammer_alist_init(&cluster->alist_mdata);
750 error = hammer_io_new(volume->devvp, &cluster->io);
754 hammer_unlock(&cluster->io.lock);
759 * Reference a cluster that is either already referenced or via a specially
760 * handled pointer (aka rootcl).
763 hammer_ref_cluster(hammer_cluster_t cluster)
767 KKASSERT(cluster != NULL);
768 hammer_ref(&cluster->io.lock);
771 * Deal with on-disk info
773 if (cluster->ondisk == NULL) {
774 error = hammer_load_cluster(cluster, 0);
776 hammer_rel_cluster(cluster, 1);
784 * Release a cluster. We have to deal with several places where
785 * another thread can ref the cluster.
787 * Only destroy the structure itself if the related buffer cache buffer
788 * was disassociated from it. This ties the management of the structure
789 * to the buffer cache subsystem.
792 hammer_rel_cluster(hammer_cluster_t cluster, int flush)
795 hammer_volume_t volume;
797 if (hammer_islastref(&cluster->io.lock)) {
798 hammer_lock_ex(&cluster->io.lock);
799 if (hammer_islastref(&cluster->io.lock)) {
800 cluster->ondisk = NULL;
801 hammer_io_release(&cluster->io, flush);
804 * Clean out the B-Tree node cache, if any, then
805 * clean up the volume ref and free the cluster.
807 * If the cluster acquires a new reference while we
808 * are trying to clean it out, abort the cleaning.
810 * There really shouldn't be any nodes at this point
811 * but we allow a node with no buffer association
812 * so handle the case.
814 while (cluster->io.bp == NULL &&
815 hammer_islastref(&cluster->io.lock) &&
816 (node = RB_ROOT(&cluster->rb_nods_root)) != NULL
818 KKASSERT(node->lock.refs == 0);
819 hammer_flush_node(node);
821 if (cluster->io.bp == NULL &&
822 hammer_islastref(&cluster->io.lock)) {
823 volume = cluster->volume;
824 RB_REMOVE(hammer_clu_rb_tree,
825 &volume->rb_clus_root, cluster);
826 cluster->volume = NULL; /* sanity */
827 kfree(cluster, M_HAMMER);
828 hammer_rel_volume(volume, 0);
832 hammer_unlock(&cluster->io.lock);
834 hammer_unref(&cluster->io.lock);
837 /************************************************************************
839 ************************************************************************
841 * Manage buffers. Note that a buffer holds a reference to its associated
842 * cluster, and its cluster will hold a reference to the cluster's volume.
844 * A non-zero buf_type indicates that a new buffer should be created and
848 hammer_get_buffer(hammer_cluster_t cluster, int32_t buf_no,
849 u_int64_t buf_type, int *errorp)
851 hammer_buffer_t buffer;
854 * Find the buffer. Note that buffer 0 corresponds to the cluster
855 * header and should never be requested.
857 KKASSERT(buf_no != 0);
860 * Locate and lock the buffer structure, creating one if necessary.
863 buffer = RB_LOOKUP(hammer_buf_rb_tree, &cluster->rb_bufs_root, buf_no);
864 if (buffer == NULL) {
865 buffer = kmalloc(sizeof(*cluster), M_HAMMER, M_WAITOK|M_ZERO);
866 buffer->buf_no = buf_no;
867 buffer->cluster = cluster;
868 buffer->volume = cluster->volume;
869 buffer->io.offset = cluster->io.offset +
870 (buf_no * HAMMER_BUFSIZE);
871 buffer->io.type = HAMMER_STRUCTURE_BUFFER;
872 TAILQ_INIT(&buffer->clist);
873 hammer_ref(&buffer->io.lock);
876 * Insert the cluster into the RB tree and handle late
879 if (RB_INSERT(hammer_buf_rb_tree, &cluster->rb_bufs_root, buffer)) {
880 hammer_unref(&buffer->io.lock);
881 kfree(buffer, M_HAMMER);
884 hammer_ref(&cluster->io.lock);
886 hammer_ref(&buffer->io.lock);
890 * Deal with on-disk info
892 if (buffer->ondisk == NULL || buf_type) {
893 *errorp = hammer_load_buffer(buffer, buf_type);
895 hammer_rel_buffer(buffer, 1);
905 hammer_load_buffer(hammer_buffer_t buffer, u_int64_t buf_type)
907 hammer_volume_t volume;
908 hammer_fsbuf_ondisk_t ondisk;
912 * Load the buffer's on-disk info
914 volume = buffer->volume;
915 hammer_lock_ex(&buffer->io.lock);
916 if (buffer->ondisk == NULL) {
918 error = hammer_io_new(volume->devvp, &buffer->io);
920 error = hammer_io_read(volume->devvp, &buffer->io);
923 hammer_unlock(&buffer->io.lock);
926 buffer->ondisk = ondisk = (void *)buffer->io.bp->b_data;
927 buffer->alist.config = &Buf_alist_config;
928 buffer->alist.meta = ondisk->head.buf_almeta;
931 initbuffer(&buffer->alist, &ondisk->head, buf_type);
933 buffer->buf_type = ondisk->head.buf_type;
934 } else if (buf_type) {
935 error = hammer_io_new(volume->devvp, &buffer->io);
939 hammer_unlock(&buffer->io.lock);
944 * Reference a buffer that is either already referenced or via a specially
945 * handled pointer (aka cursor->buffer).
948 hammer_ref_buffer(hammer_buffer_t buffer)
952 hammer_ref(&buffer->io.lock);
953 if (buffer->ondisk == NULL) {
954 error = hammer_load_buffer(buffer, 0);
956 hammer_rel_buffer(buffer, 1);
958 * NOTE: buffer pointer can become stale after
962 KKASSERT(buffer->buf_type ==
963 buffer->ondisk->head.buf_type);
972 * Release a buffer. We have to deal with several places where
973 * another thread can ref the buffer.
975 * Only destroy the structure itself if the related buffer cache buffer
976 * was disassociated from it. This ties the management of the structure
977 * to the buffer cache subsystem. buffer->ondisk determines whether the
978 * embedded io is referenced or not.
981 hammer_rel_buffer(hammer_buffer_t buffer, int flush)
983 hammer_cluster_t cluster;
986 if (hammer_islastref(&buffer->io.lock)) {
987 hammer_lock_ex(&buffer->io.lock);
988 if (hammer_islastref(&buffer->io.lock)) {
989 buffer->ondisk = NULL;
990 hammer_io_release(&buffer->io, flush);
993 * Clean out the B-Tree node cache, if any, then
994 * clean up the cluster ref and free the buffer.
996 * If the buffer acquires a new reference while we
997 * are trying to clean it out, abort the cleaning.
999 while (buffer->io.bp == NULL &&
1000 hammer_islastref(&buffer->io.lock) &&
1001 (node = TAILQ_FIRST(&buffer->clist)) != NULL
1003 KKASSERT(node->lock.refs == 0);
1004 hammer_flush_node(node);
1006 if (buffer->io.bp == NULL &&
1007 hammer_islastref(&buffer->io.lock)) {
1008 cluster = buffer->cluster;
1009 RB_REMOVE(hammer_buf_rb_tree,
1010 &cluster->rb_bufs_root, buffer);
1011 buffer->cluster = NULL; /* sanity */
1012 kfree(buffer, M_HAMMER);
1013 hammer_rel_cluster(cluster, 0);
1017 hammer_unlock(&buffer->io.lock);
1019 hammer_unref(&buffer->io.lock);
1023 * Flush passively cached B-Tree nodes associated with this buffer.
1025 * NOTE: The buffer is referenced and locked.
1028 hammer_flush_buffer_nodes(hammer_buffer_t buffer)
1032 node = TAILQ_FIRST(&buffer->clist);
1034 buffer->save_scan = TAILQ_NEXT(node, entry);
1035 if (node->lock.refs == 0)
1036 hammer_flush_node(node);
1037 node = buffer->save_scan;
1041 /************************************************************************
1043 ************************************************************************
1045 * Manage B-Tree nodes. B-Tree nodes represent the primary indexing
1046 * method used by the HAMMER filesystem.
1048 * Unlike other HAMMER structures, a hammer_node can be PASSIVELY
1049 * associated with its buffer. It can have an active buffer reference
1050 * even when the node itself has no references. The node also passively
1051 * associates itself with its cluster without holding any cluster refs.
1052 * The cluster ref is indirectly maintained by the active buffer ref when
1053 * a node is acquired.
1055 * A hammer_node can also be passively associated with other HAMMER
1056 * structures, such as inodes, while retaining 0 references. These
1057 * associations can be cleared backwards using a pointer-to-pointer in
1060 * This allows the HAMMER implementation to cache hammer_node's long-term
1061 * and short-cut a great deal of the infrastructure's complexity. In
1062 * most cases a cached node can be reacquired without having to dip into
1063 * either the buffer or cluster management code.
1065 * The caller must pass a referenced cluster on call and will retain
1066 * ownership of the reference on return. The node will acquire its own
1067 * additional references, if necessary.
1070 hammer_get_node(hammer_cluster_t cluster, int32_t node_offset, int *errorp)
1075 * Locate the structure, allocating one if necessary.
1078 node = RB_LOOKUP(hammer_nod_rb_tree, &cluster->rb_nods_root,
1081 node = kmalloc(sizeof(*node), M_HAMMER, M_WAITOK|M_ZERO);
1082 node->node_offset = node_offset;
1083 node->cluster = cluster;
1084 if (RB_INSERT(hammer_nod_rb_tree, &cluster->rb_nods_root,
1086 kfree(node, M_HAMMER);
1090 *errorp = hammer_ref_node(node);
1093 * NOTE: The node pointer may be stale on error return.
1094 * In fact, its probably been destroyed.
1102 * Reference the node to prevent disassociations, then associate and
1103 * load the related buffer. This routine can also be called to reference
1104 * a node from a cache pointer.
1106 * NOTE: Because the caller does not have a ref on the node, the caller's
1107 * node pointer will be stale if an error is returned. We may also wind
1108 * up clearing the related cache pointers.
1110 * NOTE: The cluster is indirectly referenced by our buffer ref.
1113 hammer_ref_node(hammer_node_t node)
1115 hammer_buffer_t buffer;
1119 hammer_ref(&node->lock);
1120 if (node->ondisk == NULL) {
1121 hammer_lock_ex(&node->lock);
1122 if (node->ondisk == NULL) {
1124 * This is a little confusing but the jist is that
1125 * node->buffer determines whether the node is on
1126 * the buffer's clist and node->ondisk determines
1127 * whether the buffer is referenced.
1129 if ((buffer = node->buffer) != NULL) {
1130 error = hammer_ref_buffer(buffer);
1132 buf_no = node->node_offset / HAMMER_BUFSIZE;
1133 buffer = hammer_get_buffer(node->cluster,
1136 KKASSERT(error == 0);
1137 TAILQ_INSERT_TAIL(&buffer->clist,
1139 node->buffer = buffer;
1143 node->ondisk = (void *)((char *)buffer->ondisk +
1144 (node->node_offset & HAMMER_BUFMASK));
1147 hammer_unlock(&node->lock);
1150 hammer_rel_node(node);
1155 * Release a hammer_node. The node retains a passive association with
1156 * its cluster, buffer and caches.
1158 * However, to avoid cluttering up kernel memory with tons of B-Tree
1159 * node cache structures we destroy the node if no passive cache or
1160 * (instantiated) buffer references exist.
1163 hammer_rel_node(hammer_node_t node)
1165 hammer_cluster_t cluster;
1166 hammer_buffer_t buffer;
1168 if (hammer_islastref(&node->lock)) {
1169 cluster = node->cluster;
1171 * Clutter control, this case only occurs after a failed
1172 * load since otherwise ondisk will be non-NULL.
1174 if (node->cache1 == NULL && node->cache2 == NULL &&
1175 node->ondisk == NULL) {
1176 RB_REMOVE(hammer_nod_rb_tree, &cluster->rb_nods_root,
1178 if ((buffer = node->buffer) != NULL) {
1179 node->buffer = NULL;
1180 hammer_remove_node_clist(buffer, node);
1182 kfree(node, M_HAMMER);
1187 * node->ondisk determines whether we have a buffer reference
1188 * to get rid of or not. Only get rid of the reference if
1189 * the kernel tried to flush the buffer.
1191 * NOTE: Once unref'd the node can be physically destroyed,
1192 * so our node is stale afterwords.
1194 * This case occurs if the node still has cache references.
1195 * We could remove the references and free the structure
1196 * but for now we allow them (and the node structure) to
1199 if (node->ondisk && hammer_io_checkflush(&node->buffer->io)) {
1200 buffer = node->buffer;
1201 node->buffer = NULL;
1202 node->ondisk = NULL;
1203 hammer_remove_node_clist(buffer, node);
1204 hammer_unref(&node->lock);
1205 hammer_rel_buffer(buffer, 0);
1207 hammer_unref(&node->lock);
1210 hammer_unref(&node->lock);
1215 * Cache-and-release a hammer_node. Kinda like catching and releasing a
1216 * fish, but keeping an eye on him. The node is passively cached in *cache.
1218 * NOTE! HAMMER may NULL *cache at any time, even after you have
1219 * referenced the node!
1222 hammer_cache_node(hammer_node_t node, struct hammer_node **cache)
1224 if (node->cache1 != cache) {
1225 if (node->cache2 == cache) {
1226 struct hammer_node **tmp;
1228 node->cache1 = node->cache2;
1232 *node->cache2 = NULL;
1233 node->cache2 = node->cache1;
1234 node->cache1 = cache;
1238 hammer_rel_node(node);
1242 hammer_uncache_node(struct hammer_node **cache)
1246 if ((node = *cache) != NULL) {
1248 if (node->cache1 == cache) {
1249 node->cache1 = node->cache2;
1250 node->cache2 = NULL;
1251 } else if (node->cache2 == cache) {
1252 node->cache2 = NULL;
1254 panic("hammer_uncache_node: missing cache linkage");
1256 if (node->cache1 == NULL && node->cache2 == NULL &&
1257 node->lock.refs == 0) {
1258 hammer_flush_node(node);
1264 * Remove a node's cache references and destroy the node if it has no
1265 * references. This is typically called from the buffer handling code.
1267 * The node may have an active buffer reference (ondisk != NULL) even
1268 * if the node itself has no references.
1270 * Note that a caller iterating through nodes via a buffer must have its
1271 * own reference on the buffer or our hammer_rel_buffer() call below may
1272 * rip it out from under the caller.
1275 hammer_flush_node(hammer_node_t node)
1277 hammer_buffer_t buffer;
1280 *node->cache1 = NULL;
1282 *node->cache2 = NULL;
1283 if (node->lock.refs == 0) {
1284 RB_REMOVE(hammer_nod_rb_tree, &node->cluster->rb_nods_root,
1286 if ((buffer = node->buffer) != NULL) {
1287 node->buffer = NULL;
1288 hammer_remove_node_clist(buffer, node);
1290 node->ondisk = NULL;
1291 hammer_rel_buffer(buffer, 0);
1294 kfree(node, M_HAMMER);
1299 * Remove a node from the buffer's clist. Adjust save_scan as appropriate.
1300 * This is in its own little routine to properly handle interactions with
1301 * save_scan, so it is possible to block while scanning a buffer's node list.
1305 hammer_remove_node_clist(hammer_buffer_t buffer, hammer_node_t node)
1307 if (buffer->save_scan == node)
1308 buffer->save_scan = TAILQ_NEXT(node, entry);
1309 TAILQ_REMOVE(&buffer->clist, node, entry);
1312 /************************************************************************
1313 * A-LIST ALLOCATORS *
1314 ************************************************************************/
1317 * Allocate HAMMER elements - btree nodes, data storage, and record elements
1319 * The passed *bufferp should be initialized to NULL. On successive calls
1320 * *bufferp caches the most recent buffer used until put away by the caller.
1321 * Note that previously returned pointers using the cached buffer become
1322 * invalid on successive calls which reuse *bufferp.
1324 * All allocations first attempt to use the block found at the specified
1325 * iterator. If that fails the first available block is used. If that
1326 * fails a new buffer is allocated and associated with the buffer type
1327 * A-list and the element is allocated out of the new buffer.
1331 hammer_alloc_btree(hammer_cluster_t cluster, int *errorp)
1333 hammer_buffer_t buffer;
1334 hammer_alist_t live;
1338 int32_t node_offset;
1341 * Allocate a B-Tree element
1344 live = &cluster->alist_btree;
1345 elm_no = hammer_alist_alloc_fwd(live, 1, cluster->ondisk->idx_index);
1346 if (elm_no == HAMMER_ALIST_BLOCK_NONE)
1347 elm_no = hammer_alist_alloc_fwd(live, 1, 0);
1348 if (elm_no == HAMMER_ALIST_BLOCK_NONE) {
1349 alloc_new_buffer(cluster, live,
1350 HAMMER_FSBUF_BTREE, HAMMER_BTREE_NODES,
1351 cluster->ondisk->idx_index, errorp, &buffer);
1352 elm_no = hammer_alist_alloc(live, 1);
1353 if (elm_no == HAMMER_ALIST_BLOCK_NONE) {
1356 hammer_rel_buffer(buffer, 0);
1360 cluster->ondisk->idx_index = elm_no;
1363 * Load and return the B-Tree element
1365 buf_no = elm_no / HAMMER_FSBUF_MAXBLKS;
1366 node_offset = buf_no * HAMMER_BUFSIZE +
1367 offsetof(union hammer_fsbuf_ondisk,
1368 btree.nodes[elm_no & HAMMER_FSBUF_BLKMASK]);
1369 node = hammer_get_node(cluster, node_offset, errorp);
1371 bzero(node->ondisk, sizeof(*node->ondisk));
1373 hammer_alist_free(live, elm_no, 1);
1374 hammer_rel_node(node);
1378 hammer_rel_buffer(buffer, 0);
1383 hammer_alloc_data(hammer_cluster_t cluster, int32_t bytes,
1384 int *errorp, struct hammer_buffer **bufferp)
1386 hammer_buffer_t buffer;
1387 hammer_alist_t live;
1394 * Allocate a data element
1396 nblks = (bytes + HAMMER_DATA_BLKMASK) & ~HAMMER_DATA_BLKMASK;
1397 live = &cluster->alist_mdata;
1398 elm_no = hammer_alist_alloc_fwd(live, nblks, cluster->ondisk->idx_data);
1399 if (elm_no == HAMMER_ALIST_BLOCK_NONE)
1400 elm_no = hammer_alist_alloc_fwd(live, 1, 0);
1401 if (elm_no == HAMMER_ALIST_BLOCK_NONE) {
1402 alloc_new_buffer(cluster, live,
1403 HAMMER_FSBUF_DATA, HAMMER_DATA_NODES,
1404 cluster->ondisk->idx_data, errorp, bufferp);
1405 elm_no = hammer_alist_alloc(live, nblks);
1406 if (elm_no == HAMMER_ALIST_BLOCK_NONE) {
1411 cluster->ondisk->idx_index = elm_no;
1414 * Load and return the B-Tree element
1416 buf_no = elm_no / HAMMER_FSBUF_MAXBLKS;
1418 if (buffer == NULL || buffer->cluster != cluster ||
1419 buffer->buf_no != buf_no) {
1421 hammer_rel_buffer(buffer, 0);
1422 buffer = hammer_get_buffer(cluster, buf_no, 0, errorp);
1425 KKASSERT(buffer->ondisk->head.buf_type == HAMMER_FSBUF_BTREE);
1426 item = &buffer->ondisk->data.data[elm_no & HAMMER_FSBUF_BLKMASK];
1427 bzero(item, nblks * HAMMER_DATA_BLKSIZE);
1433 hammer_alloc_record(hammer_cluster_t cluster,
1434 int *errorp, struct hammer_buffer **bufferp)
1436 hammer_buffer_t buffer;
1437 hammer_alist_t live;
1443 * Allocate a record element
1445 live = &cluster->alist_record;
1446 elm_no = hammer_alist_alloc_rev(live, 1, cluster->ondisk->idx_record);
1447 if (elm_no == HAMMER_ALIST_BLOCK_NONE)
1448 elm_no = hammer_alist_alloc_rev(live, 1,HAMMER_ALIST_BLOCK_MAX);
1449 if (elm_no == HAMMER_ALIST_BLOCK_NONE) {
1450 alloc_new_buffer(cluster, live,
1451 HAMMER_FSBUF_RECORDS, HAMMER_RECORD_NODES,
1452 cluster->ondisk->idx_record, errorp, bufferp);
1453 elm_no = hammer_alist_alloc(live, 1);
1454 if (elm_no == HAMMER_ALIST_BLOCK_NONE) {
1459 cluster->ondisk->idx_record = elm_no;
1462 * Load and return the B-Tree element
1464 buf_no = elm_no / HAMMER_FSBUF_MAXBLKS;
1466 if (buffer == NULL || buffer->cluster != cluster ||
1467 buffer->buf_no != buf_no) {
1469 hammer_rel_buffer(buffer, 0);
1470 buffer = hammer_get_buffer(cluster, buf_no, 0, errorp);
1473 KKASSERT(buffer->ondisk->head.buf_type != 0);
1474 item = &buffer->ondisk->record.recs[elm_no & HAMMER_FSBUF_BLKMASK];
1475 bzero(item, sizeof(union hammer_record_ondisk));
1481 * Free HAMMER elements based on either a hammer_buffer and element pointer
1482 * or a cluster-relative byte offset.
1485 hammer_free_btree_ptr(hammer_buffer_t buffer, hammer_node_ondisk_t node)
1488 hammer_alist_t live;
1490 elm_no = node - &buffer->ondisk->btree.nodes[0];
1491 KKASSERT(elm_no >= 0 && elm_no < HAMMER_BTREE_NODES);
1492 elm_no += buffer->buf_no * HAMMER_FSBUF_MAXBLKS;
1493 live = &buffer->cluster->alist_btree;
1494 hammer_alist_free(live, elm_no, 1);
1498 hammer_free_data_ptr(hammer_buffer_t buffer, void *data, int bytes)
1502 hammer_alist_t live;
1504 elm_no = ((char *)data - (char *)buffer->ondisk->data.data) /
1505 HAMMER_DATA_BLKSIZE;
1506 KKASSERT(elm_no >= 0 && elm_no < HAMMER_DATA_NODES);
1507 elm_no += buffer->buf_no * HAMMER_FSBUF_MAXBLKS;
1508 nblks = (bytes + HAMMER_DATA_BLKMASK) & ~HAMMER_DATA_BLKMASK;
1509 live = &buffer->cluster->alist_mdata;
1510 hammer_alist_free(live, elm_no, nblks);
1514 hammer_free_record_ptr(hammer_buffer_t buffer, union hammer_record_ondisk *rec)
1517 hammer_alist_t live;
1519 elm_no = rec - &buffer->ondisk->record.recs[0];
1520 KKASSERT(elm_no >= 0 && elm_no < HAMMER_BTREE_NODES);
1521 elm_no += buffer->buf_no * HAMMER_FSBUF_MAXBLKS;
1522 live = &buffer->cluster->alist_record;
1523 hammer_alist_free(live, elm_no, 1);
1527 hammer_free_btree(hammer_cluster_t cluster, int32_t bclu_offset)
1529 const int32_t blksize = sizeof(struct hammer_node_ondisk);
1530 int32_t fsbuf_offset = bclu_offset & HAMMER_BUFMASK;
1531 hammer_alist_t live;
1534 elm_no = bclu_offset / HAMMER_BUFSIZE * HAMMER_FSBUF_MAXBLKS;
1535 fsbuf_offset -= offsetof(union hammer_fsbuf_ondisk, btree.nodes[0]);
1536 live = &cluster->alist_btree;
1537 KKASSERT(fsbuf_offset >= 0 && fsbuf_offset % blksize == 0);
1538 elm_no += fsbuf_offset / blksize;
1539 hammer_alist_free(live, elm_no, 1);
1543 hammer_free_data(hammer_cluster_t cluster, int32_t bclu_offset, int32_t bytes)
1545 const int32_t blksize = HAMMER_DATA_BLKSIZE;
1546 int32_t fsbuf_offset = bclu_offset & HAMMER_BUFMASK;
1547 hammer_alist_t live;
1551 elm_no = bclu_offset / HAMMER_BUFSIZE * HAMMER_FSBUF_MAXBLKS;
1552 fsbuf_offset -= offsetof(union hammer_fsbuf_ondisk, data.data[0][0]);
1553 live = &cluster->alist_mdata;
1554 nblks = (bytes + HAMMER_DATA_BLKMASK) & ~HAMMER_DATA_BLKMASK;
1555 KKASSERT(fsbuf_offset >= 0 && fsbuf_offset % blksize == 0);
1556 elm_no += fsbuf_offset / blksize;
1557 hammer_alist_free(live, elm_no, nblks);
1561 hammer_free_record(hammer_cluster_t cluster, int32_t bclu_offset)
1563 const int32_t blksize = sizeof(union hammer_record_ondisk);
1564 int32_t fsbuf_offset = bclu_offset & HAMMER_BUFMASK;
1565 hammer_alist_t live;
1568 elm_no = bclu_offset / HAMMER_BUFSIZE * HAMMER_FSBUF_MAXBLKS;
1569 fsbuf_offset -= offsetof(union hammer_fsbuf_ondisk, record.recs[0]);
1570 live = &cluster->alist_record;
1571 KKASSERT(fsbuf_offset >= 0 && fsbuf_offset % blksize == 0);
1572 elm_no += fsbuf_offset / blksize;
1573 hammer_alist_free(live, elm_no, 1);
1578 * Allocate a new filesystem buffer and assign it to the specified
1579 * filesystem buffer type. The new buffer will be added to the
1580 * type-specific A-list and initialized.
1583 alloc_new_buffer(hammer_cluster_t cluster, hammer_alist_t live,
1584 u_int64_t type, int32_t nelements,
1585 int start, int *errorp, struct hammer_buffer **bufferp)
1587 hammer_buffer_t buffer;
1590 start = start / HAMMER_FSBUF_MAXBLKS; /* convert to buf_no */
1592 if (type == HAMMER_FSBUF_RECORDS) {
1593 buf_no = hammer_alist_alloc_rev(&cluster->alist_master,
1595 if (buf_no == HAMMER_ALIST_BLOCK_NONE) {
1596 buf_no = hammer_alist_alloc_rev(&cluster->alist_master,
1597 1, HAMMER_ALIST_BLOCK_MAX);
1600 buf_no = hammer_alist_alloc_fwd(&cluster->alist_master,
1602 if (buf_no == HAMMER_ALIST_BLOCK_NONE) {
1603 buf_no = hammer_alist_alloc_fwd(&cluster->alist_master,
1607 KKASSERT(buf_no != HAMMER_ALIST_BLOCK_NONE); /* XXX */
1610 * The new buffer must be initialized (type != 0) regardless of
1611 * whether we already have it cached or not, so don't try to
1612 * optimize the cached buffer check. Just call hammer_get_buffer().
1614 buffer = hammer_get_buffer(cluster, buf_no, type, errorp);
1616 hammer_rel_buffer(*bufferp, 0);
1623 * Flush various tracking structures to disk
1627 * Flush various tracking structures to disk
1630 flush_all_volumes(void)
1632 hammer_volume_t vol;
1634 for (vol = VolBase; vol; vol = vol->next)
1639 flush_volume(hammer_volume_t vol)
1641 hammer_supercl_t supercl;
1642 hammer_cluster_t cl;
1644 for (supercl = vol->supercl_base; supercl; supercl = supercl->next)
1645 flush_supercl(supercl);
1646 for (cl = vol->cluster_base; cl; cl = cl->next)
1648 writehammerbuf(vol, vol->ondisk, 0);
1652 flush_supercl(hammer_supercl_t supercl)
1654 int64_t supercl_offset;
1656 supercl_offset = supercl->scl_offset;
1657 writehammerbuf(supercl->volume, supercl->ondisk, supercl_offset);
1661 flush_cluster(hammer_cluster_t cl)
1663 hammer_buffer_t buf;
1664 int64_t cluster_offset;
1666 for (buf = cl->buffer_base; buf; buf = buf->next)
1668 cluster_offset = cl->clu_offset;
1669 writehammerbuf(cl->volume, cl->ondisk, cluster_offset);
1673 flush_buffer(hammer_buffer_t buf)
1675 int64_t buffer_offset;
1677 buffer_offset = buf->buf_offset + buf->cluster->clu_offset;
1678 writehammerbuf(buf->volume, buf->ondisk, buffer_offset);
1684 * Generic buffer initialization
1687 initbuffer(hammer_alist_t live, hammer_fsbuf_head_t head, u_int64_t type)
1689 head->buf_type = type;
1690 hammer_alist_init(live);
1694 * Calculate the cluster's offset in the volume. This calculation is
1695 * slightly more complex when using superclusters because superclusters
1696 * are grouped in blocks of 16, followed by 16 x N clusters where N
1697 * is the number of clusters a supercluster can manage.
1700 calculate_cluster_offset(hammer_volume_t volume, int32_t clu_no)
1703 int64_t scl_group_size;
1706 if (volume->vol_flags & HAMMER_VOLF_USINGSUPERCL) {
1707 scl_group = clu_no / HAMMER_VOL_SUPERCLUSTER_GROUP /
1708 HAMMER_SCL_MAXCLUSTERS;
1710 ((int64_t)HAMMER_BUFSIZE *
1711 HAMMER_VOL_SUPERCLUSTER_GROUP) +
1712 ((int64_t)HAMMER_VOL_SUPERCLUSTER_GROUP *
1713 volume->vol_clsize * HAMMER_SCL_MAXCLUSTERS);
1715 HAMMER_VOL_SUPERCLUSTER_GROUP * HAMMER_BUFSIZE;
1717 off = volume->cluster_base +
1718 scl_group * scl_group_size +
1719 (HAMMER_BUFSIZE * HAMMER_VOL_SUPERCLUSTER_GROUP) +
1720 ((int64_t)clu_no % ((int64_t)HAMMER_SCL_MAXCLUSTERS *
1721 HAMMER_VOL_SUPERCLUSTER_GROUP))
1724 off = volume->cluster_base +
1725 (int64_t)clu_no * volume->vol_clsize;
1731 * Calculate a super-cluster's offset in the volume.
1734 calculate_supercl_offset(hammer_volume_t volume, int32_t scl_no)
1738 int64_t scl_group_size;
1740 KKASSERT (volume->vol_flags & HAMMER_VOLF_USINGSUPERCL);
1741 scl_group = scl_no / HAMMER_VOL_SUPERCLUSTER_GROUP;
1744 ((int64_t)HAMMER_BUFSIZE *
1745 HAMMER_VOL_SUPERCLUSTER_GROUP) +
1746 ((int64_t)HAMMER_VOL_SUPERCLUSTER_GROUP *
1747 volume->vol_clsize * HAMMER_SCL_MAXCLUSTERS);
1749 HAMMER_VOL_SUPERCLUSTER_GROUP * HAMMER_BUFSIZE;
1750 off = volume->cluster_base + (scl_group * scl_group_size) +
1751 (scl_no % HAMMER_VOL_SUPERCLUSTER_GROUP) * HAMMER_BUFSIZE;
1753 off = volume->cluster_base + (scl_no * HAMMER_BUFSIZE);
1761 * Setup the parameters for the various A-lists we use in hammer. The
1762 * supercluster A-list must be chained to the cluster A-list and cluster
1763 * slave A-lists are chained to buffer A-lists.
1765 * See hammer_init_alist_config() below.
1769 * A-LIST - cluster recursion into a filesystem buffer
1772 buffer_alist_init(void *info, int32_t blk, int32_t radix)
1774 hammer_cluster_t cluster = info;
1775 hammer_buffer_t buffer;
1780 * Calculate the buffer number, initialize based on the buffer type.
1781 * The buffer has already been allocated so assert that it has been
1784 buf_no = blk / HAMMER_FSBUF_MAXBLKS;
1785 buffer = hammer_get_buffer(cluster, buf_no, 0, &error);
1787 hammer_rel_buffer(buffer, 0);
1792 buffer_alist_destroy(void *info, int32_t blk, int32_t radix)
1798 buffer_alist_alloc_fwd(void *info, int32_t blk, int32_t radix,
1799 int32_t count, int32_t atblk, int32_t *fullp)
1801 hammer_cluster_t cluster = info;
1802 hammer_buffer_t buffer;
1807 buf_no = blk / HAMMER_FSBUF_MAXBLKS;
1808 buffer = hammer_get_buffer(cluster, buf_no, 0, &error);
1810 KKASSERT(buffer->ondisk->head.buf_type != 0);
1812 r = hammer_alist_alloc_fwd(&buffer->alist, count, atblk - blk);
1813 if (r != HAMMER_ALIST_BLOCK_NONE)
1815 *fullp = hammer_alist_isfull(&buffer->alist);
1816 hammer_rel_buffer(buffer, 0);
1818 r = HAMMER_ALIST_BLOCK_NONE;
1824 buffer_alist_alloc_rev(void *info, int32_t blk, int32_t radix,
1825 int32_t count, int32_t atblk, int32_t *fullp)
1827 hammer_cluster_t cluster = info;
1828 hammer_buffer_t buffer;
1833 buf_no = blk / HAMMER_FSBUF_MAXBLKS;
1834 buffer = hammer_get_buffer(cluster, buf_no, 0, &error);
1836 KKASSERT(buffer->ondisk->head.buf_type != 0);
1838 r = hammer_alist_alloc_rev(&buffer->alist, count, atblk - blk);
1839 if (r != HAMMER_ALIST_BLOCK_NONE)
1841 *fullp = hammer_alist_isfull(&buffer->alist);
1842 hammer_rel_buffer(buffer, 0);
1844 r = HAMMER_ALIST_BLOCK_NONE;
1851 buffer_alist_free(void *info, int32_t blk, int32_t radix,
1852 int32_t base_blk, int32_t count, int32_t *emptyp)
1854 hammer_cluster_t cluster = info;
1855 hammer_buffer_t buffer;
1859 buf_no = blk / HAMMER_FSBUF_MAXBLKS;
1860 buffer = hammer_get_buffer(cluster, buf_no, 0, &error);
1862 KKASSERT(buffer->ondisk->head.buf_type != 0);
1863 hammer_alist_free(&buffer->alist, base_blk, count);
1864 *emptyp = hammer_alist_isempty(&buffer->alist);
1865 hammer_rel_buffer(buffer, 0);
1872 buffer_alist_print(void *info, int32_t blk, int32_t radix, int tab)
1877 * A-LIST - super-cluster recursion into a cluster and cluster recursion
1878 * into a filesystem buffer. A-List's are mostly self-contained entities,
1879 * but callbacks must be installed to recurse from one A-List to another.
1881 * Implementing these callbacks allows us to operate a multi-layered A-List
1882 * as a single entity.
1885 super_alist_init(void *info, int32_t blk, int32_t radix)
1887 hammer_volume_t volume = info;
1888 hammer_supercl_t supercl;
1893 * Calculate the super-cluster number containing the cluster (blk)
1894 * and obtain the super-cluster buffer.
1896 scl_no = blk / HAMMER_SCL_MAXCLUSTERS;
1897 supercl = hammer_get_supercl(volume, scl_no, &error, 1);
1899 hammer_rel_supercl(supercl, 0);
1904 super_alist_destroy(void *info, int32_t blk, int32_t radix)
1910 super_alist_alloc_fwd(void *info, int32_t blk, int32_t radix,
1911 int32_t count, int32_t atblk, int32_t *fullp)
1913 hammer_volume_t volume = info;
1914 hammer_supercl_t supercl;
1919 scl_no = blk / HAMMER_SCL_MAXCLUSTERS;
1920 supercl = hammer_get_supercl(volume, scl_no, &error, 1);
1922 r = hammer_alist_alloc_fwd(&supercl->alist, count, atblk - blk);
1923 if (r != HAMMER_ALIST_BLOCK_NONE)
1925 *fullp = hammer_alist_isfull(&supercl->alist);
1926 hammer_rel_supercl(supercl, 0);
1928 r = HAMMER_ALIST_BLOCK_NONE;
1935 super_alist_alloc_rev(void *info, int32_t blk, int32_t radix,
1936 int32_t count, int32_t atblk, int32_t *fullp)
1938 hammer_volume_t volume = info;
1939 hammer_supercl_t supercl;
1944 scl_no = blk / HAMMER_SCL_MAXCLUSTERS;
1945 supercl = hammer_get_supercl(volume, scl_no, &error, 1);
1947 r = hammer_alist_alloc_rev(&supercl->alist, count, atblk - blk);
1948 if (r != HAMMER_ALIST_BLOCK_NONE)
1950 *fullp = hammer_alist_isfull(&supercl->alist);
1951 hammer_rel_supercl(supercl, 0);
1953 r = HAMMER_ALIST_BLOCK_NONE;
1960 super_alist_free(void *info, int32_t blk, int32_t radix,
1961 int32_t base_blk, int32_t count, int32_t *emptyp)
1963 hammer_volume_t volume = info;
1964 hammer_supercl_t supercl;
1968 scl_no = blk / HAMMER_SCL_MAXCLUSTERS;
1969 supercl = hammer_get_supercl(volume, scl_no, &error, 1);
1971 hammer_alist_free(&supercl->alist, base_blk, count);
1972 *emptyp = hammer_alist_isempty(&supercl->alist);
1973 hammer_rel_supercl(supercl, 0);
1980 super_alist_print(void *info, int32_t blk, int32_t radix, int tab)
1985 hammer_init_alist_config(void)
1987 hammer_alist_config_t config;
1989 hammer_alist_template(&Buf_alist_config, HAMMER_FSBUF_MAXBLKS,
1990 1, HAMMER_FSBUF_METAELMS);
1991 hammer_alist_template(&Vol_normal_alist_config, HAMMER_VOL_MAXCLUSTERS,
1992 1, HAMMER_VOL_METAELMS_1LYR);
1993 hammer_alist_template(&Vol_super_alist_config,
1994 HAMMER_VOL_MAXSUPERCLUSTERS,
1995 HAMMER_SCL_MAXCLUSTERS, HAMMER_VOL_METAELMS_2LYR);
1996 hammer_alist_template(&Supercl_alist_config, HAMMER_VOL_MAXCLUSTERS,
1997 1, HAMMER_SUPERCL_METAELMS);
1998 hammer_alist_template(&Clu_master_alist_config, HAMMER_CLU_MAXBUFFERS,
1999 1, HAMMER_CLU_MASTER_METAELMS);
2000 hammer_alist_template(&Clu_slave_alist_config, HAMMER_CLU_MAXBUFFERS,
2001 HAMMER_FSBUF_MAXBLKS, HAMMER_CLU_SLAVE_METAELMS);
2003 config = &Vol_super_alist_config;
2004 config->bl_radix_init = super_alist_init;
2005 config->bl_radix_destroy = super_alist_destroy;
2006 config->bl_radix_alloc_fwd = super_alist_alloc_fwd;
2007 config->bl_radix_alloc_rev = super_alist_alloc_rev;
2008 config->bl_radix_free = super_alist_free;
2009 config->bl_radix_print = super_alist_print;
2011 config = &Clu_slave_alist_config;
2012 config->bl_radix_init = buffer_alist_init;
2013 config->bl_radix_destroy = buffer_alist_destroy;
2014 config->bl_radix_alloc_fwd = buffer_alist_alloc_fwd;
2015 config->bl_radix_alloc_rev = buffer_alist_alloc_rev;
2016 config->bl_radix_free = buffer_alist_free;
2017 config->bl_radix_print = buffer_alist_print;