2 * Copyright (c) 2007 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_ondisk.c,v 1.12 2007/12/30 00:47:22 dillon Exp $
37 * Manage HAMMER's on-disk structures. These routines are primarily
38 * responsible for interfacing with the kernel's I/O subsystem and for
39 * managing in-memory structures.
43 #include <sys/fcntl.h>
44 #include <sys/nlookup.h>
48 static void hammer_free_volume(hammer_volume_t volume);
49 static int hammer_load_volume(hammer_volume_t volume);
50 static int hammer_load_supercl(hammer_supercl_t supercl, int isnew);
51 static int hammer_load_cluster(hammer_cluster_t cluster, int isnew);
52 static int hammer_load_buffer(hammer_buffer_t buffer, u_int64_t buf_type);
53 static void hammer_remove_node_clist(hammer_buffer_t buffer,
55 static void initbuffer(hammer_alist_t live, hammer_fsbuf_head_t head,
57 static void alloc_new_buffer(hammer_cluster_t cluster,
58 hammer_alist_t live, u_int64_t type, int32_t nelements,
60 int *errorp, struct hammer_buffer **bufferp);
62 static void readhammerbuf(hammer_volume_t vol, void *data,
64 static void writehammerbuf(hammer_volume_t vol, const void *data,
67 static int64_t calculate_cluster_offset(hammer_volume_t vol, int32_t clu_no);
68 static int64_t calculate_supercl_offset(hammer_volume_t vol, int32_t scl_no);
69 static int32_t hammer_alloc_master(hammer_cluster_t cluster, int nblks,
70 int32_t start, int isfwd);
71 static void hammer_adjust_stats(hammer_cluster_t cluster,
72 u_int64_t buf_type, int nblks);
74 struct hammer_alist_config Buf_alist_config;
75 struct hammer_alist_config Vol_normal_alist_config;
76 struct hammer_alist_config Vol_super_alist_config;
77 struct hammer_alist_config Supercl_alist_config;
78 struct hammer_alist_config Clu_master_alist_config;
79 struct hammer_alist_config Clu_slave_alist_config;
82 * Red-Black tree support for various structures
85 hammer_ino_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2)
87 if (ip1->obj_id < ip2->obj_id)
89 if (ip1->obj_id > ip2->obj_id)
91 if (ip1->obj_asof < ip2->obj_asof)
93 if (ip1->obj_asof > ip2->obj_asof)
99 hammer_inode_info_cmp(hammer_inode_info_t info, hammer_inode_t ip)
101 if (info->obj_id < ip->obj_id)
103 if (info->obj_id > ip->obj_id)
105 if (info->obj_asof < ip->obj_asof)
107 if (info->obj_asof > ip->obj_asof)
113 hammer_vol_rb_compare(hammer_volume_t vol1, hammer_volume_t vol2)
115 if (vol1->vol_no < vol2->vol_no)
117 if (vol1->vol_no > vol2->vol_no)
123 hammer_scl_rb_compare(hammer_supercl_t cl1, hammer_supercl_t cl2)
125 if (cl1->scl_no < cl2->scl_no)
127 if (cl1->scl_no > cl2->scl_no)
133 hammer_clu_rb_compare(hammer_cluster_t cl1, hammer_cluster_t cl2)
135 if (cl1->clu_no < cl2->clu_no)
137 if (cl1->clu_no > cl2->clu_no)
143 hammer_buf_rb_compare(hammer_buffer_t buf1, hammer_buffer_t buf2)
145 if (buf1->buf_no < buf2->buf_no)
147 if (buf1->buf_no > buf2->buf_no)
153 hammer_nod_rb_compare(hammer_node_t node1, hammer_node_t node2)
155 if (node1->node_offset < node2->node_offset)
157 if (node1->node_offset > node2->node_offset)
163 * Note: The lookup function for hammer_ino_rb_tree winds up being named
164 * hammer_ino_rb_tree_RB_LOOKUP_INFO(root, info). The other lookup
165 * functions are normal, e.g. hammer_clu_rb_tree_RB_LOOKUP(root, clu_no).
167 RB_GENERATE(hammer_ino_rb_tree, hammer_inode, rb_node, hammer_ino_rb_compare);
168 RB_GENERATE_XLOOKUP(hammer_ino_rb_tree, INFO, hammer_inode, rb_node,
169 hammer_inode_info_cmp, hammer_inode_info_t);
170 RB_GENERATE2(hammer_vol_rb_tree, hammer_volume, rb_node,
171 hammer_vol_rb_compare, int32_t, vol_no);
172 RB_GENERATE2(hammer_scl_rb_tree, hammer_supercl, rb_node,
173 hammer_scl_rb_compare, int32_t, scl_no);
174 RB_GENERATE2(hammer_clu_rb_tree, hammer_cluster, rb_node,
175 hammer_clu_rb_compare, int32_t, clu_no);
176 RB_GENERATE2(hammer_buf_rb_tree, hammer_buffer, rb_node,
177 hammer_buf_rb_compare, int32_t, buf_no);
178 RB_GENERATE2(hammer_nod_rb_tree, hammer_node, rb_node,
179 hammer_nod_rb_compare, int32_t, node_offset);
181 /************************************************************************
183 ************************************************************************
185 * Load a HAMMER volume by name. Returns 0 on success or a positive error
186 * code on failure. Volumes must be loaded at mount time, get_volume() will
187 * not load a new volume.
189 * Calls made to hammer_load_volume() or single-threaded
192 hammer_install_volume(struct hammer_mount *hmp, const char *volname)
195 hammer_volume_t volume;
196 struct hammer_volume_ondisk *ondisk;
197 struct nlookupdata nd;
198 struct buf *bp = NULL;
203 ronly = ((mp->mnt_flag & MNT_RDONLY) ? 1 : 0);
206 * Allocate a volume structure
208 volume = kmalloc(sizeof(*volume), M_HAMMER, M_WAITOK|M_ZERO);
209 volume->vol_name = kstrdup(volname, M_HAMMER);
211 volume->io.type = HAMMER_STRUCTURE_VOLUME;
212 volume->io.offset = 0LL;
215 * Get the device vnode
217 error = nlookup_init(&nd, volume->vol_name, UIO_SYSSPACE, NLC_FOLLOW);
219 error = nlookup(&nd);
221 error = cache_vref(&nd.nl_nch, nd.nl_cred, &volume->devvp);
224 vn_isdisk(volume->devvp, &error);
227 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
228 error = VOP_OPEN(volume->devvp, (ronly ? FREAD : FREAD|FWRITE),
230 vn_unlock(volume->devvp);
233 hammer_free_volume(volume);
238 * Extract the volume number from the volume header and do various
241 error = bread(volume->devvp, 0LL, HAMMER_BUFSIZE, &bp);
244 ondisk = (void *)bp->b_data;
245 if (ondisk->head.buf_type != HAMMER_FSBUF_VOLUME) {
246 kprintf("hammer_mount: volume %s has an invalid header\n",
251 volume->vol_no = ondisk->vol_no;
252 volume->cluster_base = ondisk->vol_clo_beg;
253 volume->vol_clsize = ondisk->vol_clsize;
254 volume->vol_flags = ondisk->vol_flags;
255 volume->nblocks = ondisk->vol_nblocks;
256 RB_INIT(&volume->rb_clus_root);
257 RB_INIT(&volume->rb_scls_root);
259 hmp->mp->mnt_stat.f_blocks += volume->nblocks;
261 if (RB_EMPTY(&hmp->rb_vols_root)) {
262 hmp->fsid = ondisk->vol_fsid;
263 } else if (bcmp(&hmp->fsid, &ondisk->vol_fsid, sizeof(uuid_t))) {
264 kprintf("hammer_mount: volume %s's fsid does not match "
265 "other volumes\n", volume->vol_name);
271 * Insert the volume structure into the red-black tree.
273 if (RB_INSERT(hammer_vol_rb_tree, &hmp->rb_vols_root, volume)) {
274 kprintf("hammer_mount: volume %s has a duplicate vol_no %d\n",
275 volume->vol_name, volume->vol_no);
280 * Set the root volume and load the root cluster. HAMMER special
281 * cases rootvol and rootcl and will not deallocate the structures.
282 * We do not hold a ref because this would prevent related I/O
283 * from being flushed.
285 if (error == 0 && ondisk->vol_rootvol == ondisk->vol_no) {
286 hmp->rootvol = volume;
287 hmp->rootcl = hammer_get_cluster(volume,
288 ondisk->vol0_root_clu_no,
290 hammer_rel_cluster(hmp->rootcl, 0);
291 hmp->fsid_udev = dev2udev(vn_todev(volume->devvp));
297 /*vinvalbuf(volume->devvp, V_SAVE, 0, 0);*/
298 VOP_CLOSE(volume->devvp, ronly ? FREAD : FREAD|FWRITE);
299 hammer_free_volume(volume);
305 * Unload and free a HAMMER volume. Must return >= 0 to continue scan
306 * so returns -1 on failure.
309 hammer_unload_volume(hammer_volume_t volume, void *data __unused)
311 struct hammer_mount *hmp = volume->hmp;
312 hammer_cluster_t rootcl;
313 int ronly = ((hmp->mp->mnt_flag & MNT_RDONLY) ? 1 : 0);
316 * Sync clusters, sync volume
319 hmp->mp->mnt_stat.f_blocks -= volume->nblocks;
322 * Clean up the root cluster, which is held unlocked in the root
325 if (hmp->rootvol == volume) {
326 if ((rootcl = hmp->rootcl) != NULL)
332 * Unload clusters and super-clusters. Unloading a super-cluster
333 * also unloads related clusters, but the filesystem may not be
334 * using super-clusters so unload clusters anyway.
336 RB_SCAN(hammer_clu_rb_tree, &volume->rb_clus_root, NULL,
337 hammer_unload_cluster, NULL);
338 RB_SCAN(hammer_scl_rb_tree, &volume->rb_scls_root, NULL,
339 hammer_unload_supercl, NULL);
342 * Release our buffer and flush anything left in the buffer cache.
344 hammer_io_release(&volume->io, 1);
347 * There should be no references on the volume, no clusters, and
350 KKASSERT(volume->io.lock.refs == 0);
351 KKASSERT(RB_EMPTY(&volume->rb_clus_root));
352 KKASSERT(RB_EMPTY(&volume->rb_scls_root));
354 volume->ondisk = NULL;
357 vinvalbuf(volume->devvp, 0, 0, 0);
358 VOP_CLOSE(volume->devvp, FREAD);
360 vinvalbuf(volume->devvp, V_SAVE, 0, 0);
361 VOP_CLOSE(volume->devvp, FREAD|FWRITE);
366 * Destroy the structure
368 RB_REMOVE(hammer_vol_rb_tree, &hmp->rb_vols_root, volume);
369 hammer_free_volume(volume);
375 hammer_free_volume(hammer_volume_t volume)
377 if (volume->vol_name) {
378 kfree(volume->vol_name, M_HAMMER);
379 volume->vol_name = NULL;
382 vrele(volume->devvp);
383 volume->devvp = NULL;
385 kfree(volume, M_HAMMER);
389 * Get a HAMMER volume. The volume must already exist.
392 hammer_get_volume(struct hammer_mount *hmp, int32_t vol_no, int *errorp)
394 struct hammer_volume *volume;
397 * Locate the volume structure
399 volume = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, vol_no);
400 if (volume == NULL) {
404 hammer_ref(&volume->io.lock);
407 * Deal with on-disk info
409 if (volume->ondisk == NULL) {
410 *errorp = hammer_load_volume(volume);
412 hammer_rel_volume(volume, 1);
422 hammer_ref_volume(hammer_volume_t volume)
426 hammer_ref(&volume->io.lock);
429 * Deal with on-disk info
431 if (volume->ondisk == NULL) {
432 error = hammer_load_volume(volume);
434 hammer_rel_volume(volume, 1);
442 hammer_get_root_volume(struct hammer_mount *hmp, int *errorp)
444 hammer_volume_t volume;
446 volume = hmp->rootvol;
447 KKASSERT(volume != NULL);
448 hammer_ref(&volume->io.lock);
451 * Deal with on-disk info
453 if (volume->ondisk == NULL) {
454 *errorp = hammer_load_volume(volume);
456 hammer_rel_volume(volume, 1);
466 * Load a volume's on-disk information. The volume must be referenced and
467 * not locked. We temporarily acquire an exclusive lock to interlock
468 * against releases or multiple get's.
471 hammer_load_volume(hammer_volume_t volume)
473 struct hammer_volume_ondisk *ondisk;
476 hammer_lock_ex(&volume->io.lock);
477 if (volume->ondisk == NULL) {
478 error = hammer_io_read(volume->devvp, &volume->io);
480 hammer_unlock(&volume->io.lock);
483 volume->ondisk = ondisk = (void *)volume->io.bp->b_data;
486 * Configure the volume's A-lists. These are used to
489 if (volume->vol_flags & HAMMER_VOLF_USINGSUPERCL) {
490 volume->alist.config = &Vol_super_alist_config;
491 volume->alist.meta = ondisk->vol_almeta.super;
492 volume->alist.info = volume;
494 volume->alist.config = &Vol_normal_alist_config;
495 volume->alist.meta = ondisk->vol_almeta.normal;
496 volume->alist.info = NULL;
501 hammer_unlock(&volume->io.lock);
506 * Release a volume. Call hammer_io_release on the last reference. We have
507 * to acquire an exclusive lock to interlock against volume->ondisk tests
508 * in hammer_load_volume(), and hammer_io_release() also expects an exclusive
511 * Volumes are not unloaded from memory during normal operation.
514 hammer_rel_volume(hammer_volume_t volume, int flush)
516 if (volume->io.lock.refs == 1) {
517 hammer_lock_ex(&volume->io.lock);
518 if (volume->io.lock.refs == 1) {
519 volume->ondisk = NULL;
520 hammer_io_release(&volume->io, flush);
522 hammer_unlock(&volume->io.lock);
524 hammer_unref(&volume->io.lock);
527 /************************************************************************
529 ************************************************************************
531 * Manage super-clusters. Note that a supercl holds a reference to its
535 hammer_get_supercl(hammer_volume_t volume, int32_t scl_no,
536 int *errorp, int isnew)
538 hammer_supercl_t supercl;
541 * Locate and lock the super-cluster structure, creating one
545 supercl = RB_LOOKUP(hammer_scl_rb_tree, &volume->rb_scls_root, scl_no);
546 if (supercl == NULL) {
547 supercl = kmalloc(sizeof(*supercl), M_HAMMER, M_WAITOK|M_ZERO);
548 supercl->scl_no = scl_no;
549 supercl->volume = volume;
550 supercl->io.offset = calculate_supercl_offset(volume, scl_no);
551 supercl->io.type = HAMMER_STRUCTURE_SUPERCL;
552 hammer_ref(&supercl->io.lock);
555 * Insert the cluster into the RB tree and handle late
558 if (RB_INSERT(hammer_scl_rb_tree, &volume->rb_scls_root, supercl)) {
559 hammer_unref(&supercl->io.lock);
560 kfree(supercl, M_HAMMER);
563 hammer_ref(&volume->io.lock);
565 hammer_ref(&supercl->io.lock);
569 * Deal with on-disk info
571 if (supercl->ondisk == NULL || isnew) {
572 *errorp = hammer_load_supercl(supercl, isnew);
574 hammer_rel_supercl(supercl, 1);
584 hammer_load_supercl(hammer_supercl_t supercl, int isnew)
586 struct hammer_supercl_ondisk *ondisk;
587 hammer_volume_t volume = supercl->volume;
590 hammer_lock_ex(&supercl->io.lock);
591 if (supercl->ondisk == NULL) {
593 error = hammer_io_new(volume->devvp, &supercl->io);
595 error = hammer_io_read(volume->devvp, &supercl->io);
597 hammer_unlock(&supercl->io.lock);
600 supercl->ondisk = ondisk = (void *)supercl->io.bp->b_data;
602 supercl->alist.config = &Supercl_alist_config;
603 supercl->alist.meta = ondisk->scl_meta;
604 supercl->alist.info = NULL;
606 error = hammer_io_new(volume->devvp, &supercl->io);
610 if (error == 0 && isnew) {
612 * If this is a new super-cluster we have to initialize
613 * various ondisk structural elements. The caller is
614 * responsible for the remainder.
616 struct hammer_alist_live dummy;
618 ondisk = supercl->ondisk;
619 dummy.config = &Buf_alist_config;
620 dummy.meta = ondisk->head.buf_almeta;
622 initbuffer(&dummy, &ondisk->head, HAMMER_FSBUF_SUPERCL);
623 hammer_alist_init(&supercl->alist);
625 hammer_unlock(&supercl->io.lock);
630 * NOTE: Called from RB_SCAN, must return >= 0 for scan to continue.
633 hammer_unload_supercl(hammer_supercl_t supercl, void *data __unused)
635 KKASSERT(supercl->io.lock.refs == 0);
636 hammer_ref(&supercl->io.lock);
637 hammer_rel_supercl(supercl, 1);
642 * Release a super-cluster. We have to deal with several places where
643 * another thread can ref the super-cluster.
645 * Only destroy the structure itself if the related buffer cache buffer
646 * was disassociated from it. This ties the management of the structure
647 * to the buffer cache subsystem.
650 hammer_rel_supercl(hammer_supercl_t supercl, int flush)
652 hammer_volume_t volume;
654 if (supercl->io.lock.refs == 1) {
655 hammer_lock_ex(&supercl->io.lock);
656 if (supercl->io.lock.refs == 1) {
657 hammer_io_release(&supercl->io, flush);
658 if (supercl->io.bp == NULL &&
659 supercl->io.lock.refs == 1) {
660 volume = supercl->volume;
661 RB_REMOVE(hammer_scl_rb_tree,
662 &volume->rb_scls_root, supercl);
663 supercl->volume = NULL; /* sanity */
664 kfree(supercl, M_HAMMER);
665 hammer_rel_volume(volume, 0);
669 hammer_unlock(&supercl->io.lock);
671 hammer_unref(&supercl->io.lock);
674 /************************************************************************
676 ************************************************************************
678 * Manage clusters. Note that a cluster holds a reference to its
682 hammer_get_cluster(hammer_volume_t volume, int32_t clu_no,
683 int *errorp, int isnew)
685 hammer_cluster_t cluster;
688 cluster = RB_LOOKUP(hammer_clu_rb_tree, &volume->rb_clus_root, clu_no);
689 if (cluster == NULL) {
690 cluster = kmalloc(sizeof(*cluster), M_HAMMER, M_WAITOK|M_ZERO);
691 cluster->clu_no = clu_no;
692 cluster->volume = volume;
693 cluster->io.offset = calculate_cluster_offset(volume, clu_no);
694 cluster->state = HAMMER_CLUSTER_IDLE;
695 RB_INIT(&cluster->rb_bufs_root);
696 RB_INIT(&cluster->rb_nods_root);
697 cluster->io.type = HAMMER_STRUCTURE_CLUSTER;
698 hammer_ref(&cluster->io.lock);
701 * Insert the cluster into the RB tree and handle late
704 if (RB_INSERT(hammer_clu_rb_tree, &volume->rb_clus_root, cluster)) {
705 hammer_unref(&cluster->io.lock);
706 kfree(cluster, M_HAMMER);
709 hammer_ref(&volume->io.lock);
711 hammer_ref(&cluster->io.lock);
715 * Deal with on-disk info
717 if (cluster->ondisk == NULL || isnew) {
718 *errorp = hammer_load_cluster(cluster, isnew);
720 hammer_rel_cluster(cluster, 1);
730 hammer_get_root_cluster(struct hammer_mount *hmp, int *errorp)
732 hammer_cluster_t cluster;
734 cluster = hmp->rootcl;
735 KKASSERT(cluster != NULL);
736 hammer_ref(&cluster->io.lock);
739 * Deal with on-disk info
741 if (cluster->ondisk == NULL) {
742 *errorp = hammer_load_cluster(cluster, 0);
744 hammer_rel_cluster(cluster, 1);
755 hammer_load_cluster(hammer_cluster_t cluster, int isnew)
757 hammer_volume_t volume = cluster->volume;
758 struct hammer_cluster_ondisk *ondisk;
762 * Load the cluster's on-disk info
764 hammer_lock_ex(&cluster->io.lock);
765 if (cluster->ondisk == NULL) {
767 error = hammer_io_new(volume->devvp, &cluster->io);
769 error = hammer_io_read(volume->devvp, &cluster->io);
771 hammer_unlock(&cluster->io.lock);
774 cluster->ondisk = ondisk = (void *)cluster->io.bp->b_data;
776 cluster->alist_master.config = &Clu_master_alist_config;
777 cluster->alist_master.meta = ondisk->clu_master_meta;
778 cluster->alist_btree.config = &Clu_slave_alist_config;
779 cluster->alist_btree.meta = ondisk->clu_btree_meta;
780 cluster->alist_btree.info = cluster;
781 cluster->alist_record.config = &Clu_slave_alist_config;
782 cluster->alist_record.meta = ondisk->clu_record_meta;
783 cluster->alist_record.info = cluster;
784 cluster->alist_mdata.config = &Clu_slave_alist_config;
785 cluster->alist_mdata.meta = ondisk->clu_mdata_meta;
786 cluster->alist_mdata.info = cluster;
789 cluster->clu_btree_beg = ondisk->clu_btree_beg;
790 cluster->clu_btree_end = ondisk->clu_btree_end;
793 error = hammer_io_new(volume->devvp, &cluster->io);
797 if (error == 0 && isnew) {
799 * If this is a new cluster we have to initialize
800 * various ondisk structural elements. The caller is
801 * responsible for the remainder.
803 struct hammer_alist_live dummy;
805 hammer_volume_ondisk_t voldisk;
808 ondisk = cluster->ondisk;
809 voldisk = volume->ondisk;
811 dummy.config = &Buf_alist_config;
812 dummy.meta = ondisk->head.buf_almeta;
814 initbuffer(&dummy, &ondisk->head, HAMMER_FSBUF_CLUSTER);
816 hammer_alist_init(&cluster->alist_master);
817 hammer_alist_init(&cluster->alist_btree);
818 hammer_alist_init(&cluster->alist_record);
819 hammer_alist_init(&cluster->alist_mdata);
821 ondisk->vol_fsid = voldisk->vol_fsid;
822 ondisk->vol_fstype = voldisk->vol_fstype;
824 ondisk->clu_id = 0; /* XXX */
825 ondisk->clu_no = cluster->clu_no;
826 ondisk->clu_flags = 0;
827 ondisk->clu_start = HAMMER_BUFSIZE;
828 KKASSERT(voldisk->vol_clo_end > cluster->io.offset);
829 if (voldisk->vol_clo_end - cluster->io.offset >
830 voldisk->vol_clsize) {
831 ondisk->clu_limit = voldisk->vol_clsize;
833 ondisk->clu_limit = (int32_t)(voldisk->vol_clo_end -
836 nbuffers = ondisk->clu_limit / HAMMER_BUFSIZE;
837 hammer_alist_free(&cluster->alist_master, 1, nbuffers - 1);
838 ondisk->idx_data = 1 * HAMMER_FSBUF_MAXBLKS;
839 ondisk->idx_index = 0 * HAMMER_FSBUF_MAXBLKS;
840 ondisk->idx_record = nbuffers * HAMMER_FSBUF_MAXBLKS;
843 * Initialize the B-Tree. We don't know what the caller
844 * intends to do with the cluster so make sure it causes
845 * an assertion if the caller makes no changes.
847 ondisk->clu_btree_parent_vol_no = -2;
848 ondisk->clu_btree_parent_clu_no = -2;
849 ondisk->clu_btree_parent_offset = -2;
850 ondisk->clu_btree_parent_clu_gen = -2;
852 croot = hammer_alloc_btree(cluster, &error);
854 bzero(croot->ondisk, sizeof(*croot->ondisk));
855 croot->ondisk->count = 0;
856 croot->ondisk->type = HAMMER_BTREE_TYPE_LEAF;
857 ondisk->clu_btree_root = croot->node_offset;
858 hammer_modify_cluster(cluster);
861 hammer_unlock(&cluster->io.lock);
866 * NOTE: Called from RB_SCAN, must return >= 0 for scan to continue.
869 hammer_unload_cluster(hammer_cluster_t cluster, void *data __unused)
871 hammer_ref(&cluster->io.lock);
872 RB_SCAN(hammer_buf_rb_tree, &cluster->rb_bufs_root, NULL,
873 hammer_unload_buffer, NULL);
874 KKASSERT(cluster->io.lock.refs == 1);
875 hammer_rel_cluster(cluster, 1);
880 * Reference a cluster that is either already referenced or via a specially
881 * handled pointer (aka rootcl).
884 hammer_ref_cluster(hammer_cluster_t cluster)
888 KKASSERT(cluster != NULL);
889 hammer_ref(&cluster->io.lock);
892 * Deal with on-disk info
894 if (cluster->ondisk == NULL) {
895 error = hammer_load_cluster(cluster, 0);
897 hammer_rel_cluster(cluster, 1);
905 * Release a cluster. We have to deal with several places where
906 * another thread can ref the cluster.
908 * Only destroy the structure itself if the related buffer cache buffer
909 * was disassociated from it. This ties the management of the structure
910 * to the buffer cache subsystem.
913 hammer_rel_cluster(hammer_cluster_t cluster, int flush)
916 hammer_volume_t volume;
918 if (cluster->io.lock.refs == 1) {
919 hammer_lock_ex(&cluster->io.lock);
920 if (cluster->io.lock.refs == 1) {
922 * Release the I/O. If we or the kernel wants to
923 * flush, this will release the bp. Otherwise the
924 * bp may be written and flushed passively by the
927 hammer_io_release(&cluster->io, flush);
930 * The B-Tree node cache is not counted in the
931 * cluster's reference count. Clean out the
934 * If the cluster acquires a new reference while we
935 * are trying to clean it out, abort the cleaning.
937 * Any actively referenced nodes will reference the
938 * related buffer and cluster, so a ref count check
939 * should be sufficient.
941 while (cluster->io.bp == NULL &&
942 cluster->io.lock.refs == 1 &&
943 (node = RB_ROOT(&cluster->rb_nods_root)) != NULL
945 KKASSERT(node->lock.refs == 0);
946 hammer_flush_node(node);
952 if (cluster != cluster->volume->hmp->rootcl &&
953 cluster->io.bp == NULL &&
954 cluster->io.lock.refs == 1 &&
955 RB_EMPTY(&cluster->rb_nods_root)) {
956 KKASSERT(RB_EMPTY(&cluster->rb_bufs_root));
957 volume = cluster->volume;
958 RB_REMOVE(hammer_clu_rb_tree,
959 &volume->rb_clus_root, cluster);
960 cluster->volume = NULL; /* sanity */
961 kfree(cluster, M_HAMMER);
962 hammer_rel_volume(volume, 0);
966 hammer_unlock(&cluster->io.lock);
968 hammer_unref(&cluster->io.lock);
971 /************************************************************************
973 ************************************************************************
975 * Manage buffers. Note that a buffer holds a reference to its associated
976 * cluster, and its cluster will hold a reference to the cluster's volume.
978 * A non-zero buf_type indicates that a new buffer should be created and
982 hammer_get_buffer(hammer_cluster_t cluster, int32_t buf_no,
983 u_int64_t buf_type, int *errorp)
985 hammer_buffer_t buffer;
988 * Find the buffer. Note that buffer 0 corresponds to the cluster
989 * header and should never be requested.
991 KKASSERT(buf_no >= cluster->ondisk->clu_start / HAMMER_BUFSIZE &&
992 buf_no < cluster->ondisk->clu_limit / HAMMER_BUFSIZE);
995 * Locate and lock the buffer structure, creating one if necessary.
998 buffer = RB_LOOKUP(hammer_buf_rb_tree, &cluster->rb_bufs_root, buf_no);
999 if (buffer == NULL) {
1000 buffer = kmalloc(sizeof(*cluster), M_HAMMER, M_WAITOK|M_ZERO);
1001 buffer->buf_no = buf_no;
1002 buffer->cluster = cluster;
1003 buffer->volume = cluster->volume;
1004 buffer->io.offset = cluster->io.offset +
1005 (buf_no * HAMMER_BUFSIZE);
1006 buffer->io.type = HAMMER_STRUCTURE_BUFFER;
1007 TAILQ_INIT(&buffer->clist);
1008 hammer_ref(&buffer->io.lock);
1011 * Insert the cluster into the RB tree and handle late
1014 if (RB_INSERT(hammer_buf_rb_tree, &cluster->rb_bufs_root, buffer)) {
1015 hammer_unref(&buffer->io.lock);
1016 kfree(buffer, M_HAMMER);
1019 hammer_ref(&cluster->io.lock);
1021 hammer_ref(&buffer->io.lock);
1025 * Deal with on-disk info
1027 if (buffer->ondisk == NULL || buf_type) {
1028 *errorp = hammer_load_buffer(buffer, buf_type);
1030 hammer_rel_buffer(buffer, 1);
1040 hammer_load_buffer(hammer_buffer_t buffer, u_int64_t buf_type)
1042 hammer_volume_t volume;
1043 hammer_fsbuf_ondisk_t ondisk;
1047 * Load the buffer's on-disk info
1049 volume = buffer->volume;
1050 hammer_lock_ex(&buffer->io.lock);
1051 if (buffer->ondisk == NULL) {
1053 error = hammer_io_new(volume->devvp, &buffer->io);
1055 error = hammer_io_read(volume->devvp, &buffer->io);
1058 hammer_unlock(&buffer->io.lock);
1061 buffer->ondisk = ondisk = (void *)buffer->io.bp->b_data;
1062 buffer->alist.config = &Buf_alist_config;
1063 buffer->alist.meta = ondisk->head.buf_almeta;
1064 buffer->buf_type = ondisk->head.buf_type;
1065 } else if (buf_type) {
1066 error = hammer_io_new(volume->devvp, &buffer->io);
1070 if (error == 0 && buf_type) {
1071 ondisk = buffer->ondisk;
1072 initbuffer(&buffer->alist, &ondisk->head, buf_type);
1073 buffer->buf_type = ondisk->head.buf_type;
1075 hammer_unlock(&buffer->io.lock);
1080 * NOTE: Called from RB_SCAN, must return >= 0 for scan to continue.
1083 hammer_unload_buffer(hammer_buffer_t buffer, void *data __unused)
1085 hammer_ref(&buffer->io.lock);
1086 hammer_flush_buffer_nodes(buffer);
1087 KKASSERT(buffer->io.lock.refs == 1);
1088 hammer_rel_buffer(buffer, 1);
1093 * Reference a buffer that is either already referenced or via a specially
1094 * handled pointer (aka cursor->buffer).
1097 hammer_ref_buffer(hammer_buffer_t buffer)
1101 hammer_ref(&buffer->io.lock);
1102 if (buffer->ondisk == NULL) {
1103 error = hammer_load_buffer(buffer, 0);
1105 hammer_rel_buffer(buffer, 1);
1107 * NOTE: buffer pointer can become stale after
1108 * the above release.
1111 KKASSERT(buffer->buf_type ==
1112 buffer->ondisk->head.buf_type);
1121 * Release a buffer. We have to deal with several places where
1122 * another thread can ref the buffer.
1124 * Only destroy the structure itself if the related buffer cache buffer
1125 * was disassociated from it. This ties the management of the structure
1126 * to the buffer cache subsystem. buffer->ondisk determines whether the
1127 * embedded io is referenced or not.
1130 hammer_rel_buffer(hammer_buffer_t buffer, int flush)
1132 hammer_cluster_t cluster;
1135 if (buffer->io.lock.refs == 1) {
1136 hammer_lock_ex(&buffer->io.lock);
1137 if (buffer->io.lock.refs == 1) {
1138 hammer_io_release(&buffer->io, flush);
1141 * Clean out the B-Tree node cache, if any, then
1142 * clean up the cluster ref and free the buffer.
1144 * If the buffer acquires a new reference while we
1145 * are trying to clean it out, abort the cleaning.
1147 while (buffer->io.bp == NULL &&
1148 buffer->io.lock.refs == 1 &&
1149 (node = TAILQ_FIRST(&buffer->clist)) != NULL
1151 KKASSERT(node->lock.refs == 0);
1152 hammer_flush_node(node);
1154 if (buffer->io.bp == NULL &&
1155 hammer_islastref(&buffer->io.lock)) {
1156 cluster = buffer->cluster;
1157 RB_REMOVE(hammer_buf_rb_tree,
1158 &cluster->rb_bufs_root, buffer);
1159 buffer->cluster = NULL; /* sanity */
1160 kfree(buffer, M_HAMMER);
1161 hammer_rel_cluster(cluster, 0);
1165 hammer_unlock(&buffer->io.lock);
1167 hammer_unref(&buffer->io.lock);
1171 * Flush passively cached B-Tree nodes associated with this buffer.
1173 * NOTE: The buffer is referenced and locked.
1176 hammer_flush_buffer_nodes(hammer_buffer_t buffer)
1180 node = TAILQ_FIRST(&buffer->clist);
1182 buffer->save_scan = TAILQ_NEXT(node, entry);
1183 if (node->lock.refs == 0)
1184 hammer_flush_node(node);
1185 node = buffer->save_scan;
1189 /************************************************************************
1191 ************************************************************************
1193 * Manage B-Tree nodes. B-Tree nodes represent the primary indexing
1194 * method used by the HAMMER filesystem.
1196 * Unlike other HAMMER structures, a hammer_node can be PASSIVELY
1197 * associated with its buffer. It can have an active buffer reference
1198 * even when the node itself has no references. The node also passively
1199 * associates itself with its cluster without holding any cluster refs.
1200 * The cluster ref is indirectly maintained by the active buffer ref when
1201 * a node is acquired.
1203 * A hammer_node can also be passively associated with other HAMMER
1204 * structures, such as inodes, while retaining 0 references. These
1205 * associations can be cleared backwards using a pointer-to-pointer in
1208 * This allows the HAMMER implementation to cache hammer_node's long-term
1209 * and short-cut a great deal of the infrastructure's complexity. In
1210 * most cases a cached node can be reacquired without having to dip into
1211 * either the buffer or cluster management code.
1213 * The caller must pass a referenced cluster on call and will retain
1214 * ownership of the reference on return. The node will acquire its own
1215 * additional references, if necessary.
1218 hammer_get_node(hammer_cluster_t cluster, int32_t node_offset, int *errorp)
1223 * Locate the structure, allocating one if necessary.
1226 node = RB_LOOKUP(hammer_nod_rb_tree, &cluster->rb_nods_root,
1229 node = kmalloc(sizeof(*node), M_HAMMER, M_WAITOK|M_ZERO);
1230 node->node_offset = node_offset;
1231 node->cluster = cluster;
1232 if (RB_INSERT(hammer_nod_rb_tree, &cluster->rb_nods_root,
1234 kfree(node, M_HAMMER);
1238 *errorp = hammer_ref_node(node);
1241 * NOTE: The node pointer may be stale on error return.
1242 * In fact, its probably been destroyed.
1250 * Reference the node to prevent disassociations, then associate and
1251 * load the related buffer. This routine can also be called to reference
1252 * a node from a cache pointer.
1254 * NOTE: Because the caller does not have a ref on the node, the caller's
1255 * node pointer will be stale if an error is returned. We may also wind
1256 * up clearing the related cache pointers.
1258 * NOTE: The cluster is indirectly referenced by our buffer ref.
1261 hammer_ref_node(hammer_node_t node)
1263 hammer_buffer_t buffer;
1267 hammer_ref(&node->lock);
1269 if (node->ondisk == NULL) {
1270 hammer_lock_ex(&node->lock);
1271 if (node->ondisk == NULL) {
1273 * This is a little confusing but the jist is that
1274 * node->buffer determines whether the node is on
1275 * the buffer's clist and node->ondisk determines
1276 * whether the buffer is referenced.
1278 if ((buffer = node->buffer) != NULL) {
1279 error = hammer_ref_buffer(buffer);
1281 buf_no = node->node_offset / HAMMER_BUFSIZE;
1282 buffer = hammer_get_buffer(node->cluster,
1285 KKASSERT(error == 0);
1286 TAILQ_INSERT_TAIL(&buffer->clist,
1288 node->buffer = buffer;
1292 node->ondisk = (void *)((char *)buffer->ondisk +
1293 (node->node_offset & HAMMER_BUFMASK));
1296 hammer_unlock(&node->lock);
1299 hammer_rel_node(node);
1304 * Release a hammer_node. The node retains a passive association with
1305 * its cluster, buffer and caches.
1307 * However, to avoid cluttering up kernel memory with tons of B-Tree
1308 * node cache structures we destroy the node if no passive cache or
1309 * (instantiated) buffer references exist.
1312 hammer_rel_node(hammer_node_t node)
1314 hammer_cluster_t cluster;
1315 hammer_buffer_t buffer;
1317 if (hammer_islastref(&node->lock)) {
1318 cluster = node->cluster;
1320 * Clutter control, this case only occurs after a failed
1321 * load since otherwise ondisk will be non-NULL.
1323 if (node->cache1 == NULL && node->cache2 == NULL &&
1324 node->ondisk == NULL) {
1325 RB_REMOVE(hammer_nod_rb_tree, &cluster->rb_nods_root,
1327 if ((buffer = node->buffer) != NULL) {
1328 node->buffer = NULL;
1329 hammer_remove_node_clist(buffer, node);
1331 kfree(node, M_HAMMER);
1336 * node->ondisk determines whether we have a buffer reference
1337 * to get rid of or not. Only get rid of the reference if
1338 * the kernel tried to flush the buffer.
1340 * NOTE: Once unref'd the node can be physically destroyed,
1341 * so our node is stale afterwords.
1343 * This case occurs if the node still has cache references.
1344 * We could remove the references and free the structure
1345 * but for now we allow them (and the node structure) to
1348 if (node->ondisk && hammer_io_checkflush(&node->buffer->io)) {
1349 buffer = node->buffer;
1350 node->buffer = NULL;
1351 node->ondisk = NULL;
1352 hammer_remove_node_clist(buffer, node);
1353 hammer_unref(&node->lock);
1354 hammer_rel_buffer(buffer, 0);
1356 hammer_unref(&node->lock);
1359 hammer_unref(&node->lock);
1364 * Cache-and-release a hammer_node. Kinda like catching and releasing a
1365 * fish, but keeping an eye on him. The node is passively cached in *cache.
1367 * NOTE! HAMMER may NULL *cache at any time, even after you have
1368 * referenced the node!
1371 hammer_cache_node(hammer_node_t node, struct hammer_node **cache)
1373 if (node->cache1 != cache) {
1374 if (node->cache2 == cache) {
1375 struct hammer_node **tmp;
1377 node->cache1 = node->cache2;
1381 *node->cache2 = NULL;
1382 node->cache2 = node->cache1;
1383 node->cache1 = cache;
1390 hammer_uncache_node(struct hammer_node **cache)
1394 if ((node = *cache) != NULL) {
1396 if (node->cache1 == cache) {
1397 node->cache1 = node->cache2;
1398 node->cache2 = NULL;
1399 } else if (node->cache2 == cache) {
1400 node->cache2 = NULL;
1402 panic("hammer_uncache_node: missing cache linkage");
1404 if (node->cache1 == NULL && node->cache2 == NULL &&
1405 node->lock.refs == 0) {
1406 hammer_flush_node(node);
1412 * Remove a node's cache references and destroy the node if it has no
1413 * references. This is typically called from the buffer handling code.
1415 * The node may have an active buffer reference (ondisk != NULL) even
1416 * if the node itself has no references.
1418 * Note that a caller iterating through nodes via a buffer must have its
1419 * own reference on the buffer or our hammer_rel_buffer() call below may
1420 * rip it out from under the caller.
1423 hammer_flush_node(hammer_node_t node)
1425 hammer_buffer_t buffer;
1428 *node->cache1 = NULL;
1430 *node->cache2 = NULL;
1431 if (node->lock.refs == 0) {
1432 RB_REMOVE(hammer_nod_rb_tree, &node->cluster->rb_nods_root,
1434 if ((buffer = node->buffer) != NULL) {
1435 node->buffer = NULL;
1436 hammer_remove_node_clist(buffer, node);
1438 node->ondisk = NULL;
1439 hammer_rel_buffer(buffer, 0);
1442 kfree(node, M_HAMMER);
1447 * Remove a node from the buffer's clist. Adjust save_scan as appropriate.
1448 * This is in its own little routine to properly handle interactions with
1449 * save_scan, so it is possible to block while scanning a buffer's node list.
1453 hammer_remove_node_clist(hammer_buffer_t buffer, hammer_node_t node)
1455 if (buffer->save_scan == node)
1456 buffer->save_scan = TAILQ_NEXT(node, entry);
1457 TAILQ_REMOVE(&buffer->clist, node, entry);
1460 /************************************************************************
1461 * A-LIST ALLOCATORS *
1462 ************************************************************************/
1465 * Allocate HAMMER clusters
1468 hammer_alloc_cluster(hammer_mount_t hmp, hammer_cluster_t cluster_hint,
1471 hammer_volume_t volume;
1472 hammer_cluster_t cluster;
1479 * Figure out our starting volume and hint.
1482 vol_beg = cluster_hint->volume->vol_no;
1483 clu_hint = cluster_hint->clu_no;
1485 vol_beg = hmp->volume_iterator;
1490 * Loop through volumes looking for a free cluster. If allocating
1491 * a new cluster relative to an existing cluster try to find a free
1492 * cluster on either side (clu_hint >= 0), otherwise just do a
1493 * forwards iteration.
1497 volume = hammer_get_volume(hmp, vol_no, errorp);
1498 kprintf("VOLUME %p %d\n", volume, vol_no);
1500 clu_no = HAMMER_ALIST_BLOCK_NONE;
1503 if (clu_hint == -1) {
1504 clu_hint = volume->clu_iterator;
1505 clu_no = hammer_alist_alloc_fwd(&volume->alist, 1,
1507 if (clu_no == HAMMER_ALIST_BLOCK_NONE) {
1508 clu_no = hammer_alist_alloc_fwd(&volume->alist,
1512 clu_no = hammer_alist_alloc_fwd(&volume->alist, 1,
1514 if (clu_no == HAMMER_ALIST_BLOCK_NONE) {
1515 clu_no = hammer_alist_alloc_rev(&volume->alist,
1519 if (clu_no != HAMMER_ALIST_BLOCK_NONE) {
1520 hammer_modify_volume(volume);
1523 hammer_rel_volume(volume, 0);
1526 vol_no = (vol_no + 1) % hmp->nvolumes;
1528 } while (vol_no != vol_beg);
1531 * Acquire the cluster. On success this will force *errorp to 0.
1533 if (clu_no != HAMMER_ALIST_BLOCK_NONE) {
1534 kprintf("ALLOC CLUSTER %d\n", clu_no);
1535 cluster = hammer_get_cluster(volume, clu_no, errorp, 1);
1536 volume->clu_iterator = clu_no;
1537 hammer_rel_volume(volume, 0);
1542 hammer_lock_ex(&cluster->io.lock);
1547 hammer_init_cluster(hammer_cluster_t cluster, hammer_base_elm_t left_bound,
1548 hammer_base_elm_t right_bound)
1550 hammer_cluster_ondisk_t ondisk = cluster->ondisk;
1552 ondisk->clu_btree_beg = *left_bound;
1553 ondisk->clu_btree_end = *right_bound;
1554 cluster->clu_btree_beg = ondisk->clu_btree_beg;
1555 cluster->clu_btree_end = ondisk->clu_btree_end;
1559 * Deallocate a cluster
1562 hammer_free_cluster(hammer_cluster_t cluster)
1564 hammer_alist_free(&cluster->volume->alist, cluster->clu_no, 1);
1568 * Allocate HAMMER elements - btree nodes, data storage, and record elements
1570 * The passed *bufferp should be initialized to NULL. On successive calls
1571 * *bufferp caches the most recent buffer used until put away by the caller.
1572 * Note that previously returned pointers using the cached buffer become
1573 * invalid on successive calls which reuse *bufferp.
1575 * All allocations first attempt to use the block found at the specified
1576 * iterator. If that fails the first available block is used. If that
1577 * fails a new buffer is allocated and associated with the buffer type
1578 * A-list and the element is allocated out of the new buffer.
1582 hammer_alloc_btree(hammer_cluster_t cluster, int *errorp)
1584 hammer_buffer_t buffer;
1585 hammer_alist_t live;
1589 int32_t node_offset;
1592 * Allocate a B-Tree element
1595 live = &cluster->alist_btree;
1596 elm_no = hammer_alist_alloc_fwd(live, 1, cluster->ondisk->idx_index);
1597 if (elm_no == HAMMER_ALIST_BLOCK_NONE)
1598 elm_no = hammer_alist_alloc_fwd(live, 1, 0);
1599 if (elm_no == HAMMER_ALIST_BLOCK_NONE) {
1600 alloc_new_buffer(cluster, live,
1601 HAMMER_FSBUF_BTREE, HAMMER_BTREE_NODES,
1602 cluster->ondisk->idx_index, errorp, &buffer);
1603 elm_no = hammer_alist_alloc(live, 1);
1604 if (elm_no == HAMMER_ALIST_BLOCK_NONE) {
1607 hammer_rel_buffer(buffer, 0);
1608 hammer_modify_cluster(cluster);
1612 cluster->ondisk->idx_index = elm_no;
1613 KKASSERT((elm_no & HAMMER_FSBUF_BLKMASK) < HAMMER_BTREE_NODES);
1614 hammer_modify_cluster(cluster);
1617 * Load and return the B-Tree element
1619 buf_no = elm_no / HAMMER_FSBUF_MAXBLKS;
1620 node_offset = buf_no * HAMMER_BUFSIZE +
1621 offsetof(union hammer_fsbuf_ondisk,
1622 btree.nodes[elm_no & HAMMER_FSBUF_BLKMASK]);
1623 node = hammer_get_node(cluster, node_offset, errorp);
1625 bzero(node->ondisk, sizeof(*node->ondisk));
1627 hammer_alist_free(live, elm_no, 1);
1628 hammer_rel_node(node);
1632 hammer_rel_buffer(buffer, 0);
1637 hammer_alloc_data(hammer_cluster_t cluster, int32_t bytes,
1638 int *errorp, struct hammer_buffer **bufferp)
1640 hammer_buffer_t buffer;
1641 hammer_alist_t live;
1648 * Deal with large data blocks. The blocksize is HAMMER_BUFSIZE
1649 * for these allocations.
1651 if ((bytes & HAMMER_BUFMASK) == 0) {
1652 nblks = bytes / HAMMER_BUFSIZE;
1653 /* only one block allowed for now (so buffer can hold it) */
1654 KKASSERT(nblks == 1);
1656 buf_no = hammer_alloc_master(cluster, nblks,
1657 cluster->ondisk->idx_ldata, 1);
1658 if (buf_no == HAMMER_ALIST_BLOCK_NONE) {
1662 hammer_adjust_stats(cluster, HAMMER_FSBUF_DATA, nblks);
1663 cluster->ondisk->idx_ldata = buf_no;
1665 *bufferp = hammer_get_buffer(cluster, buf_no, -1, errorp);
1667 hammer_rel_buffer(buffer, 0);
1669 return(buffer->ondisk);
1673 * Allocate a data element. The block size is HAMMER_DATA_BLKSIZE
1674 * (64 bytes) for these allocations.
1676 nblks = (bytes + HAMMER_DATA_BLKMASK) & ~HAMMER_DATA_BLKMASK;
1677 nblks /= HAMMER_DATA_BLKSIZE;
1678 live = &cluster->alist_mdata;
1679 elm_no = hammer_alist_alloc_fwd(live, nblks, cluster->ondisk->idx_data);
1680 if (elm_no == HAMMER_ALIST_BLOCK_NONE)
1681 elm_no = hammer_alist_alloc_fwd(live, 1, 0);
1682 if (elm_no == HAMMER_ALIST_BLOCK_NONE) {
1683 alloc_new_buffer(cluster, live,
1684 HAMMER_FSBUF_DATA, HAMMER_DATA_NODES,
1685 cluster->ondisk->idx_data, errorp, bufferp);
1686 elm_no = hammer_alist_alloc(live, nblks);
1687 if (elm_no == HAMMER_ALIST_BLOCK_NONE) {
1689 hammer_modify_cluster(cluster);
1693 cluster->ondisk->idx_index = elm_no;
1694 hammer_modify_cluster(cluster);
1697 * Load and return the B-Tree element
1699 buf_no = elm_no / HAMMER_FSBUF_MAXBLKS;
1701 if (buffer == NULL || buffer->cluster != cluster ||
1702 buffer->buf_no != buf_no) {
1704 hammer_rel_buffer(buffer, 0);
1705 buffer = hammer_get_buffer(cluster, buf_no, 0, errorp);
1708 KKASSERT(buffer->ondisk->head.buf_type == HAMMER_FSBUF_DATA);
1709 KKASSERT((elm_no & HAMMER_FSBUF_BLKMASK) < HAMMER_DATA_NODES);
1710 item = &buffer->ondisk->data.data[elm_no & HAMMER_FSBUF_BLKMASK];
1711 bzero(item, nblks * HAMMER_DATA_BLKSIZE);
1717 hammer_alloc_record(hammer_cluster_t cluster,
1718 int *errorp, struct hammer_buffer **bufferp)
1720 hammer_buffer_t buffer;
1721 hammer_alist_t live;
1727 * Allocate a record element
1729 live = &cluster->alist_record;
1730 elm_no = hammer_alist_alloc_rev(live, 1, cluster->ondisk->idx_record);
1731 if (elm_no == HAMMER_ALIST_BLOCK_NONE)
1732 elm_no = hammer_alist_alloc_rev(live, 1,HAMMER_ALIST_BLOCK_MAX);
1733 if (elm_no == HAMMER_ALIST_BLOCK_NONE) {
1734 alloc_new_buffer(cluster, live,
1735 HAMMER_FSBUF_RECORDS, HAMMER_RECORD_NODES,
1736 cluster->ondisk->idx_record, errorp, bufferp);
1737 elm_no = hammer_alist_alloc_rev(live, 1,HAMMER_ALIST_BLOCK_MAX);
1738 kprintf("hammer_alloc_record elm again %08x\n", elm_no);
1739 if (elm_no == HAMMER_ALIST_BLOCK_NONE) {
1741 hammer_modify_cluster(cluster);
1745 cluster->ondisk->idx_record = elm_no;
1746 hammer_modify_cluster(cluster);
1749 * Load and return the B-Tree element
1751 buf_no = elm_no / HAMMER_FSBUF_MAXBLKS;
1753 if (buffer == NULL || buffer->cluster != cluster ||
1754 buffer->buf_no != buf_no) {
1756 hammer_rel_buffer(buffer, 0);
1757 buffer = hammer_get_buffer(cluster, buf_no, 0, errorp);
1760 KKASSERT(buffer->ondisk->head.buf_type == HAMMER_FSBUF_RECORDS);
1761 KKASSERT((elm_no & HAMMER_FSBUF_BLKMASK) < HAMMER_RECORD_NODES);
1762 item = &buffer->ondisk->record.recs[elm_no & HAMMER_FSBUF_BLKMASK];
1763 bzero(item, sizeof(union hammer_record_ondisk));
1769 hammer_free_data_ptr(hammer_buffer_t buffer, void *data, int bytes)
1773 hammer_alist_t live;
1775 if ((bytes & HAMMER_BUFMASK) == 0) {
1776 nblks = bytes / HAMMER_BUFSIZE;
1777 KKASSERT(nblks == 1 && data == (void *)buffer->ondisk);
1778 hammer_alist_free(&buffer->cluster->alist_master,
1779 buffer->buf_no, nblks);
1780 hammer_adjust_stats(buffer->cluster, HAMMER_FSBUF_DATA, -nblks);
1781 hammer_modify_cluster(buffer->cluster);
1785 elm_no = ((char *)data - (char *)buffer->ondisk->data.data) /
1786 HAMMER_DATA_BLKSIZE;
1787 KKASSERT(elm_no >= 0 && elm_no < HAMMER_DATA_NODES);
1788 elm_no += buffer->buf_no * HAMMER_FSBUF_MAXBLKS;
1789 nblks = (bytes + HAMMER_DATA_BLKMASK) & ~HAMMER_DATA_BLKMASK;
1790 nblks /= HAMMER_DATA_BLKSIZE;
1791 live = &buffer->cluster->alist_mdata;
1792 hammer_alist_free(live, elm_no, nblks);
1793 hammer_modify_cluster(buffer->cluster);
1797 hammer_free_record_ptr(hammer_buffer_t buffer, union hammer_record_ondisk *rec)
1800 hammer_alist_t live;
1802 elm_no = rec - &buffer->ondisk->record.recs[0];
1803 KKASSERT(elm_no >= 0 && elm_no < HAMMER_BTREE_NODES);
1804 elm_no += buffer->buf_no * HAMMER_FSBUF_MAXBLKS;
1805 live = &buffer->cluster->alist_record;
1806 hammer_alist_free(live, elm_no, 1);
1807 hammer_modify_cluster(buffer->cluster);
1811 hammer_free_btree(hammer_cluster_t cluster, int32_t bclu_offset)
1813 const int32_t blksize = sizeof(struct hammer_node_ondisk);
1814 int32_t fsbuf_offset = bclu_offset & HAMMER_BUFMASK;
1815 hammer_alist_t live;
1818 elm_no = bclu_offset / HAMMER_BUFSIZE * HAMMER_FSBUF_MAXBLKS;
1819 fsbuf_offset -= offsetof(union hammer_fsbuf_ondisk, btree.nodes[0]);
1820 live = &cluster->alist_btree;
1821 KKASSERT(fsbuf_offset >= 0 && fsbuf_offset % blksize == 0);
1822 elm_no += fsbuf_offset / blksize;
1823 hammer_alist_free(live, elm_no, 1);
1824 hammer_modify_cluster(cluster);
1828 hammer_free_data(hammer_cluster_t cluster, int32_t bclu_offset, int32_t bytes)
1830 const int32_t blksize = HAMMER_DATA_BLKSIZE;
1831 int32_t fsbuf_offset = bclu_offset & HAMMER_BUFMASK;
1832 hammer_alist_t live;
1837 if ((bytes & HAMMER_BUFMASK) == 0) {
1838 nblks = bytes / HAMMER_BUFSIZE;
1839 KKASSERT(nblks == 1 && (bclu_offset & HAMMER_BUFMASK) == 0);
1840 buf_no = bclu_offset / HAMMER_BUFSIZE;
1841 hammer_alist_free(&cluster->alist_master, buf_no, nblks);
1842 hammer_adjust_stats(cluster, HAMMER_FSBUF_DATA, -nblks);
1843 hammer_modify_cluster(cluster);
1847 elm_no = bclu_offset / HAMMER_BUFSIZE * HAMMER_FSBUF_MAXBLKS;
1848 fsbuf_offset -= offsetof(union hammer_fsbuf_ondisk, data.data[0][0]);
1849 live = &cluster->alist_mdata;
1850 nblks = (bytes + HAMMER_DATA_BLKMASK) & ~HAMMER_DATA_BLKMASK;
1851 nblks /= HAMMER_DATA_BLKSIZE;
1852 KKASSERT(fsbuf_offset >= 0 && fsbuf_offset % blksize == 0);
1853 elm_no += fsbuf_offset / blksize;
1854 hammer_alist_free(live, elm_no, nblks);
1855 hammer_modify_cluster(cluster);
1859 hammer_free_record(hammer_cluster_t cluster, int32_t bclu_offset)
1861 const int32_t blksize = sizeof(union hammer_record_ondisk);
1862 int32_t fsbuf_offset = bclu_offset & HAMMER_BUFMASK;
1863 hammer_alist_t live;
1866 elm_no = bclu_offset / HAMMER_BUFSIZE * HAMMER_FSBUF_MAXBLKS;
1867 fsbuf_offset -= offsetof(union hammer_fsbuf_ondisk, record.recs[0]);
1868 live = &cluster->alist_record;
1869 KKASSERT(fsbuf_offset >= 0 && fsbuf_offset % blksize == 0);
1870 elm_no += fsbuf_offset / blksize;
1871 hammer_alist_free(live, elm_no, 1);
1872 hammer_modify_cluster(cluster);
1877 * Allocate a new filesystem buffer and assign it to the specified
1878 * filesystem buffer type. The new buffer will be added to the
1879 * type-specific A-list and initialized.
1882 alloc_new_buffer(hammer_cluster_t cluster, hammer_alist_t live,
1883 u_int64_t type, int32_t nelements,
1884 int start, int *errorp, struct hammer_buffer **bufferp)
1886 hammer_buffer_t buffer;
1890 start = start / HAMMER_FSBUF_MAXBLKS; /* convert to buf_no */
1892 isfwd = (type != HAMMER_FSBUF_RECORDS);
1893 buf_no = hammer_alloc_master(cluster, 1, start, isfwd);
1894 if (buf_no == HAMMER_ALIST_BLOCK_NONE) {
1900 hammer_modify_cluster(cluster);
1903 * The new buffer must be initialized (type != 0) regardless of
1904 * whether we already have it cached or not, so don't try to
1905 * optimize the cached buffer check. Just call hammer_get_buffer().
1907 buffer = hammer_get_buffer(cluster, buf_no, type, errorp);
1909 hammer_rel_buffer(*bufferp, 0);
1913 * Finally, do a meta-free of the buffer's elements into the
1914 * type-specific A-list and update our statistics to reflect
1918 kprintf("alloc_new_buffer buf_no %d type %016llx nelms %d\n",
1919 buf_no, type, nelements);
1920 hammer_alist_free(live, buf_no * HAMMER_FSBUF_MAXBLKS,
1922 hammer_adjust_stats(cluster, type, 1);
1927 * Sync dirty buffers to the media
1931 * Sync the entire filesystem.
1934 hammer_sync_hmp(hammer_mount_t hmp, int waitfor)
1936 struct hammer_sync_info info;
1939 info.waitfor = waitfor;
1941 RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL,
1942 hammer_sync_volume, &info);
1947 hammer_sync_volume(hammer_volume_t volume, void *data)
1949 struct hammer_sync_info *info = data;
1951 RB_SCAN(hammer_clu_rb_tree, &volume->rb_clus_root, NULL,
1952 hammer_sync_cluster, info);
1953 if (hammer_ref_volume(volume) == 0) {
1954 hammer_io_flush(&volume->io, info);
1955 hammer_rel_volume(volume, 0);
1961 hammer_sync_cluster(hammer_cluster_t cluster, void *data)
1963 struct hammer_sync_info *info = data;
1965 if (cluster->state != HAMMER_CLUSTER_IDLE) {
1966 RB_SCAN(hammer_buf_rb_tree, &cluster->rb_bufs_root, NULL,
1967 hammer_sync_buffer, info);
1968 if (hammer_ref_cluster(cluster) == 0) {
1969 hammer_io_flush(&cluster->io, info);
1970 hammer_rel_cluster(cluster, 0);
1977 hammer_sync_buffer(hammer_buffer_t buffer, void *data)
1979 struct hammer_sync_info *info = data;
1981 if (hammer_ref_buffer(buffer) == 0) {
1982 hammer_lock_ex(&buffer->io.lock);
1983 hammer_flush_buffer_nodes(buffer);
1984 hammer_unlock(&buffer->io.lock);
1985 hammer_io_flush(&buffer->io, info);
1986 hammer_rel_buffer(buffer, 0);
1994 * Flush various tracking structures to disk
1998 * Flush various tracking structures to disk
2001 flush_all_volumes(void)
2003 hammer_volume_t vol;
2005 for (vol = VolBase; vol; vol = vol->next)
2010 flush_volume(hammer_volume_t vol)
2012 hammer_supercl_t supercl;
2013 hammer_cluster_t cl;
2015 for (supercl = vol->supercl_base; supercl; supercl = supercl->next)
2016 flush_supercl(supercl);
2017 for (cl = vol->cluster_base; cl; cl = cl->next)
2019 writehammerbuf(vol, vol->ondisk, 0);
2023 flush_supercl(hammer_supercl_t supercl)
2025 int64_t supercl_offset;
2027 supercl_offset = supercl->scl_offset;
2028 writehammerbuf(supercl->volume, supercl->ondisk, supercl_offset);
2032 flush_cluster(hammer_cluster_t cl)
2034 hammer_buffer_t buf;
2035 int64_t cluster_offset;
2037 for (buf = cl->buffer_base; buf; buf = buf->next)
2039 cluster_offset = cl->clu_offset;
2040 writehammerbuf(cl->volume, cl->ondisk, cluster_offset);
2044 flush_buffer(hammer_buffer_t buf)
2046 int64_t buffer_offset;
2048 buffer_offset = buf->buf_offset + buf->cluster->clu_offset;
2049 writehammerbuf(buf->volume, buf->ondisk, buffer_offset);
2055 * Generic buffer initialization
2058 initbuffer(hammer_alist_t live, hammer_fsbuf_head_t head, u_int64_t type)
2060 head->buf_type = type;
2061 hammer_alist_init(live);
2065 * Calculate the cluster's offset in the volume. This calculation is
2066 * slightly more complex when using superclusters because superclusters
2067 * are grouped in blocks of 16, followed by 16 x N clusters where N
2068 * is the number of clusters a supercluster can manage.
2071 calculate_cluster_offset(hammer_volume_t volume, int32_t clu_no)
2074 int64_t scl_group_size;
2077 if (volume->vol_flags & HAMMER_VOLF_USINGSUPERCL) {
2078 scl_group = clu_no / HAMMER_VOL_SUPERCLUSTER_GROUP /
2079 HAMMER_SCL_MAXCLUSTERS;
2081 ((int64_t)HAMMER_BUFSIZE *
2082 HAMMER_VOL_SUPERCLUSTER_GROUP) +
2083 ((int64_t)HAMMER_VOL_SUPERCLUSTER_GROUP *
2084 volume->vol_clsize * HAMMER_SCL_MAXCLUSTERS);
2086 HAMMER_VOL_SUPERCLUSTER_GROUP * HAMMER_BUFSIZE;
2088 off = volume->cluster_base +
2089 scl_group * scl_group_size +
2090 (HAMMER_BUFSIZE * HAMMER_VOL_SUPERCLUSTER_GROUP) +
2091 ((int64_t)clu_no % ((int64_t)HAMMER_SCL_MAXCLUSTERS *
2092 HAMMER_VOL_SUPERCLUSTER_GROUP))
2093 * volume->vol_clsize;
2095 off = volume->cluster_base +
2096 (int64_t)clu_no * volume->vol_clsize;
2102 * Calculate a super-cluster's offset in the volume.
2105 calculate_supercl_offset(hammer_volume_t volume, int32_t scl_no)
2109 int64_t scl_group_size;
2111 KKASSERT (volume->vol_flags & HAMMER_VOLF_USINGSUPERCL);
2112 scl_group = scl_no / HAMMER_VOL_SUPERCLUSTER_GROUP;
2115 ((int64_t)HAMMER_BUFSIZE *
2116 HAMMER_VOL_SUPERCLUSTER_GROUP) +
2117 ((int64_t)HAMMER_VOL_SUPERCLUSTER_GROUP *
2118 volume->vol_clsize * HAMMER_SCL_MAXCLUSTERS);
2120 HAMMER_VOL_SUPERCLUSTER_GROUP * HAMMER_BUFSIZE;
2121 off = volume->cluster_base + (scl_group * scl_group_size) +
2122 (scl_no % HAMMER_VOL_SUPERCLUSTER_GROUP) * HAMMER_BUFSIZE;
2124 off = volume->cluster_base + (scl_no * HAMMER_BUFSIZE);
2134 hammer_alloc_master(hammer_cluster_t cluster, int nblks,
2135 int32_t start, int isfwd)
2140 buf_no = hammer_alist_alloc_fwd(&cluster->alist_master,
2142 if (buf_no == HAMMER_ALIST_BLOCK_NONE) {
2143 buf_no = hammer_alist_alloc_fwd(&cluster->alist_master,
2147 buf_no = hammer_alist_alloc_rev(&cluster->alist_master,
2149 if (buf_no == HAMMER_ALIST_BLOCK_NONE) {
2150 buf_no = hammer_alist_alloc_rev(&cluster->alist_master,
2151 nblks, HAMMER_ALIST_BLOCK_MAX);
2156 * Recover space from empty record, b-tree, and data a-lists.
2163 * Adjust allocation statistics
2166 hammer_adjust_stats(hammer_cluster_t cluster, u_int64_t buf_type, int nblks)
2169 case HAMMER_FSBUF_BTREE:
2170 cluster->ondisk->stat_idx_bufs += nblks;
2171 cluster->volume->ondisk->vol_stat_idx_bufs += nblks;
2172 cluster->volume->hmp->rootvol->ondisk->vol0_stat_idx_bufs += nblks;
2174 case HAMMER_FSBUF_DATA:
2175 cluster->ondisk->stat_data_bufs += nblks;
2176 cluster->volume->ondisk->vol_stat_data_bufs += nblks;
2177 cluster->volume->hmp->rootvol->ondisk->vol0_stat_data_bufs += nblks;
2179 case HAMMER_FSBUF_RECORDS:
2180 cluster->ondisk->stat_rec_bufs += nblks;
2181 cluster->volume->ondisk->vol_stat_rec_bufs += nblks;
2182 cluster->volume->hmp->rootvol->ondisk->vol0_stat_rec_bufs += nblks;
2185 hammer_modify_cluster(cluster);
2186 hammer_modify_volume(cluster->volume);
2187 hammer_modify_volume(cluster->volume->hmp->rootvol);
2193 * Setup the parameters for the various A-lists we use in hammer. The
2194 * supercluster A-list must be chained to the cluster A-list and cluster
2195 * slave A-lists are chained to buffer A-lists.
2197 * See hammer_init_alist_config() below.
2201 * A-LIST - cluster recursion into a filesystem buffer
2204 buffer_alist_init(void *info, int32_t blk, int32_t radix)
2208 hammer_cluster_t cluster = info;
2209 hammer_buffer_t buffer;
2214 * Calculate the buffer number, initialize based on the buffer type.
2215 * The buffer has already been allocated so assert that it has been
2218 buf_no = blk / HAMMER_FSBUF_MAXBLKS;
2219 buffer = hammer_get_buffer(cluster, buf_no, 0, &error);
2221 hammer_adjust_stats(cluster, buffer->ondisk->head.buf_type, 1);
2222 hammer_rel_buffer(buffer, 0);
2229 buffer_alist_destroy(void *info, int32_t blk, int32_t radix)
2233 hammer_cluster_t cluster = info;
2234 hammer_buffer_t buffer;
2239 * Calculate the buffer number, initialize based on the buffer type.
2240 * The buffer has already been allocated so assert that it has been
2243 buf_no = blk / HAMMER_FSBUF_MAXBLKS;
2244 buffer = hammer_get_buffer(cluster, buf_no, 0, &error);
2246 hammer_adjust_stats(cluster, buffer->ondisk->head.buf_type, -1);
2247 hammer_rel_buffer(buffer, 0);
2254 * Note: atblk can be negative and atblk - blk can go negative.
2257 buffer_alist_alloc_fwd(void *info, int32_t blk, int32_t radix,
2258 int32_t count, int32_t atblk, int32_t *fullp)
2260 hammer_cluster_t cluster = info;
2261 hammer_buffer_t buffer;
2266 buf_no = blk / HAMMER_FSBUF_MAXBLKS;
2267 buffer = hammer_get_buffer(cluster, buf_no, 0, &error);
2269 KKASSERT(buffer->ondisk->head.buf_type != 0);
2271 r = hammer_alist_alloc_fwd(&buffer->alist, count, atblk - blk);
2272 if (r != HAMMER_ALIST_BLOCK_NONE)
2274 *fullp = hammer_alist_isfull(&buffer->alist);
2275 hammer_modify_buffer(buffer);
2276 hammer_rel_buffer(buffer, 0);
2278 r = HAMMER_ALIST_BLOCK_NONE;
2284 buffer_alist_alloc_rev(void *info, int32_t blk, int32_t radix,
2285 int32_t count, int32_t atblk, int32_t *fullp)
2287 hammer_cluster_t cluster = info;
2288 hammer_buffer_t buffer;
2293 buf_no = blk / HAMMER_FSBUF_MAXBLKS;
2294 buffer = hammer_get_buffer(cluster, buf_no, 0, &error);
2296 KKASSERT(buffer->ondisk->head.buf_type != 0);
2298 r = hammer_alist_alloc_rev(&buffer->alist, count, atblk - blk);
2299 if (r != HAMMER_ALIST_BLOCK_NONE)
2301 *fullp = hammer_alist_isfull(&buffer->alist);
2302 hammer_modify_buffer(buffer);
2303 hammer_rel_buffer(buffer, 0);
2305 r = HAMMER_ALIST_BLOCK_NONE;
2312 buffer_alist_free(void *info, int32_t blk, int32_t radix,
2313 int32_t base_blk, int32_t count, int32_t *emptyp)
2315 hammer_cluster_t cluster = info;
2316 hammer_buffer_t buffer;
2320 buf_no = blk / HAMMER_FSBUF_MAXBLKS;
2321 buffer = hammer_get_buffer(cluster, buf_no, 0, &error);
2323 KKASSERT(buffer->ondisk->head.buf_type != 0);
2324 hammer_alist_free(&buffer->alist, base_blk, count);
2325 *emptyp = hammer_alist_isempty(&buffer->alist);
2326 /* XXX don't bother updating the buffer is completely empty? */
2327 hammer_modify_buffer(buffer);
2328 hammer_rel_buffer(buffer, 0);
2335 buffer_alist_print(void *info, int32_t blk, int32_t radix, int tab)
2340 * A-LIST - super-cluster recursion into a cluster and cluster recursion
2341 * into a filesystem buffer. A-List's are mostly self-contained entities,
2342 * but callbacks must be installed to recurse from one A-List to another.
2344 * Implementing these callbacks allows us to operate a multi-layered A-List
2345 * as a single entity.
2348 super_alist_init(void *info, int32_t blk, int32_t radix)
2350 hammer_volume_t volume = info;
2351 hammer_supercl_t supercl;
2356 * Calculate the super-cluster number containing the cluster (blk)
2357 * and obtain the super-cluster buffer.
2359 scl_no = blk / HAMMER_SCL_MAXCLUSTERS;
2360 supercl = hammer_get_supercl(volume, scl_no, &error, 1);
2362 hammer_rel_supercl(supercl, 0);
2367 super_alist_destroy(void *info, int32_t blk, int32_t radix)
2373 super_alist_alloc_fwd(void *info, int32_t blk, int32_t radix,
2374 int32_t count, int32_t atblk, int32_t *fullp)
2376 hammer_volume_t volume = info;
2377 hammer_supercl_t supercl;
2382 scl_no = blk / HAMMER_SCL_MAXCLUSTERS;
2383 supercl = hammer_get_supercl(volume, scl_no, &error, 0);
2385 r = hammer_alist_alloc_fwd(&supercl->alist, count, atblk - blk);
2386 if (r != HAMMER_ALIST_BLOCK_NONE)
2388 *fullp = hammer_alist_isfull(&supercl->alist);
2389 hammer_modify_supercl(supercl);
2390 hammer_rel_supercl(supercl, 0);
2392 r = HAMMER_ALIST_BLOCK_NONE;
2399 super_alist_alloc_rev(void *info, int32_t blk, int32_t radix,
2400 int32_t count, int32_t atblk, int32_t *fullp)
2402 hammer_volume_t volume = info;
2403 hammer_supercl_t supercl;
2408 scl_no = blk / HAMMER_SCL_MAXCLUSTERS;
2409 supercl = hammer_get_supercl(volume, scl_no, &error, 0);
2411 r = hammer_alist_alloc_rev(&supercl->alist, count, atblk - blk);
2412 if (r != HAMMER_ALIST_BLOCK_NONE)
2414 *fullp = hammer_alist_isfull(&supercl->alist);
2415 hammer_modify_supercl(supercl);
2416 hammer_rel_supercl(supercl, 0);
2418 r = HAMMER_ALIST_BLOCK_NONE;
2425 super_alist_free(void *info, int32_t blk, int32_t radix,
2426 int32_t base_blk, int32_t count, int32_t *emptyp)
2428 hammer_volume_t volume = info;
2429 hammer_supercl_t supercl;
2433 scl_no = blk / HAMMER_SCL_MAXCLUSTERS;
2434 supercl = hammer_get_supercl(volume, scl_no, &error, 0);
2436 hammer_alist_free(&supercl->alist, base_blk, count);
2437 *emptyp = hammer_alist_isempty(&supercl->alist);
2438 hammer_modify_supercl(supercl);
2439 hammer_rel_supercl(supercl, 0);
2446 super_alist_print(void *info, int32_t blk, int32_t radix, int tab)
2451 hammer_init_alist_config(void)
2453 hammer_alist_config_t config;
2455 hammer_alist_template(&Buf_alist_config, HAMMER_FSBUF_MAXBLKS,
2456 1, HAMMER_FSBUF_METAELMS);
2457 hammer_alist_template(&Vol_normal_alist_config, HAMMER_VOL_MAXCLUSTERS,
2458 1, HAMMER_VOL_METAELMS_1LYR);
2459 hammer_alist_template(&Vol_super_alist_config,
2460 HAMMER_VOL_MAXSUPERCLUSTERS * HAMMER_SCL_MAXCLUSTERS,
2461 HAMMER_SCL_MAXCLUSTERS, HAMMER_VOL_METAELMS_2LYR);
2462 hammer_alist_template(&Supercl_alist_config, HAMMER_VOL_MAXCLUSTERS,
2463 1, HAMMER_SUPERCL_METAELMS);
2464 hammer_alist_template(&Clu_master_alist_config, HAMMER_CLU_MAXBUFFERS,
2465 1, HAMMER_CLU_MASTER_METAELMS);
2466 hammer_alist_template(&Clu_slave_alist_config,
2467 HAMMER_CLU_MAXBUFFERS * HAMMER_FSBUF_MAXBLKS,
2468 HAMMER_FSBUF_MAXBLKS, HAMMER_CLU_SLAVE_METAELMS);
2470 config = &Vol_super_alist_config;
2471 config->bl_radix_init = super_alist_init;
2472 config->bl_radix_destroy = super_alist_destroy;
2473 config->bl_radix_alloc_fwd = super_alist_alloc_fwd;
2474 config->bl_radix_alloc_rev = super_alist_alloc_rev;
2475 config->bl_radix_free = super_alist_free;
2476 config->bl_radix_print = super_alist_print;
2478 config = &Clu_slave_alist_config;
2479 config->bl_radix_init = buffer_alist_init;
2480 config->bl_radix_destroy = buffer_alist_destroy;
2481 config->bl_radix_alloc_fwd = buffer_alist_alloc_fwd;
2482 config->bl_radix_alloc_rev = buffer_alist_alloc_rev;
2483 config->bl_radix_free = buffer_alist_free;
2484 config->bl_radix_print = buffer_alist_print;