2 * Copyright (c) 2007 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sbin/hammer/ondisk.c,v 1.9 2008/01/24 02:16:47 dillon Exp $
37 #include <sys/types.h>
46 #include "hammer_util.h"
48 static void initbuffer(hammer_alist_t live, hammer_fsbuf_head_t head,
50 static void alloc_new_buffer(struct cluster_info *cluster, hammer_alist_t live,
51 u_int64_t type, int32_t nelements);
53 static void readhammerbuf(struct volume_info *vol, void *data,
56 static void writehammerbuf(struct volume_info *vol, const void *data,
60 struct hammer_alist_config Buf_alist_config;
61 struct hammer_alist_config Vol_normal_alist_config;
62 struct hammer_alist_config Vol_super_alist_config;
63 struct hammer_alist_config Supercl_alist_config;
64 struct hammer_alist_config Clu_master_alist_config;
65 struct hammer_alist_config Clu_slave_alist_config;
70 int UsingSuperClusters;
73 struct volume_list VolList = TAILQ_HEAD_INITIALIZER(VolList);
76 init_alist_templates(void)
79 * Initialize the alist templates we will be using
81 hammer_alist_template(&Buf_alist_config, HAMMER_FSBUF_MAXBLKS,
82 1, HAMMER_FSBUF_METAELMS, 0);
83 hammer_alist_template(&Vol_normal_alist_config, HAMMER_VOL_MAXCLUSTERS,
84 1, HAMMER_VOL_METAELMS_1LYR, 0);
85 hammer_alist_template(&Vol_super_alist_config,
86 HAMMER_VOL_MAXSUPERCLUSTERS * HAMMER_SCL_MAXCLUSTERS,
87 HAMMER_SCL_MAXCLUSTERS, HAMMER_VOL_METAELMS_2LYR,
89 hammer_super_alist_template(&Vol_super_alist_config);
90 hammer_alist_template(&Supercl_alist_config, HAMMER_VOL_MAXCLUSTERS,
91 1, HAMMER_SUPERCL_METAELMS, 0);
92 hammer_alist_template(&Clu_master_alist_config, HAMMER_CLU_MAXBUFFERS,
93 1, HAMMER_CLU_MASTER_METAELMS, 0);
94 hammer_alist_template(&Clu_slave_alist_config,
95 HAMMER_CLU_MAXBUFFERS * HAMMER_FSBUF_MAXBLKS,
96 HAMMER_FSBUF_MAXBLKS, HAMMER_CLU_SLAVE_METAELMS,
98 hammer_buffer_alist_template(&Clu_slave_alist_config);
102 * Lookup the requested information structure and related on-disk buffer.
103 * Missing structures are created.
107 setup_volume(int32_t vol_no, const char *filename, int isnew, int oflags)
109 struct volume_info *vol;
110 struct volume_info *scan;
111 struct hammer_volume_ondisk *ondisk;
115 * Allocate the volume structure
117 vol = malloc(sizeof(*vol));
118 bzero(vol, sizeof(*vol));
119 TAILQ_INIT(&vol->cluster_list);
120 TAILQ_INIT(&vol->supercl_list);
121 vol->name = strdup(filename);
122 vol->fd = open(filename, oflags);
126 err(1, "setup_volume: %s: Open failed", filename);
130 * Read or initialize the volume header
132 vol->ondisk = ondisk = malloc(HAMMER_BUFSIZE);
134 bzero(ondisk, HAMMER_BUFSIZE);
135 vol->using_supercl = UsingSuperClusters;
137 n = pread(vol->fd, ondisk, HAMMER_BUFSIZE, 0);
138 if (n != HAMMER_BUFSIZE) {
139 err(1, "setup_volume: %s: Read failed at offset 0",
142 if (ondisk->vol_flags & HAMMER_VOLF_USINGSUPERCL)
143 vol->using_supercl = 1;
144 vol_no = ondisk->vol_no;
146 RootVolNo = ondisk->vol_rootvol;
147 } else if (RootVolNo != (int)ondisk->vol_rootvol) {
148 errx(1, "setup_volume: %s: root volume disagreement: "
150 vol->name, RootVolNo, ondisk->vol_rootvol);
153 if (bcmp(&Hammer_FSType, &ondisk->vol_fstype, sizeof(Hammer_FSType)) != 0) {
154 errx(1, "setup_volume: %s: Header does not indicate "
155 "that this is a hammer volume", vol->name);
157 if (TAILQ_EMPTY(&VolList)) {
158 Hammer_FSId = vol->ondisk->vol_fsid;
159 } else if (bcmp(&Hammer_FSId, &ondisk->vol_fsid, sizeof(Hammer_FSId)) != 0) {
160 errx(1, "setup_volume: %s: FSId does match other "
161 "volumes!", vol->name);
164 vol->vol_no = vol_no;
165 if (vol->using_supercl) {
166 vol->clu_alist.config = &Vol_super_alist_config;
167 vol->clu_alist.meta = ondisk->vol_almeta.super;
168 vol->clu_alist.info = vol;
170 vol->clu_alist.config = &Vol_normal_alist_config;
171 vol->clu_alist.meta = ondisk->vol_almeta.normal;
173 vol->buf_alist.config = &Buf_alist_config;
174 vol->buf_alist.meta = ondisk->head.buf_almeta;
177 hammer_alist_init(&vol->clu_alist, 0, 0, HAMMER_ASTATE_ALLOC);
178 initbuffer(&vol->buf_alist, &ondisk->head, HAMMER_FSBUF_VOLUME);
179 vol->cache.modified = 1;
183 * Link the volume structure in
185 TAILQ_FOREACH(scan, &VolList, entry) {
186 if (scan->vol_no == vol_no) {
187 errx(1, "setup_volume %s: Duplicate volume number %d "
188 "against %s", filename, vol_no, scan->name);
191 TAILQ_INSERT_TAIL(&VolList, vol, entry);
196 get_volume(int32_t vol_no)
198 struct volume_info *vol;
200 TAILQ_FOREACH(vol, &VolList, entry) {
201 if (vol->vol_no == vol_no)
205 errx(1, "get_volume: Volume %d does not exist!", vol_no);
207 /* not added to or removed from hammer cache */
212 rel_volume(struct volume_info *volume)
214 /* not added to or removed from hammer cache */
215 --volume->cache.refs;
218 struct supercl_info *
219 get_supercl(struct volume_info *vol, int32_t scl_no, hammer_alloc_state_t isnew)
221 struct hammer_supercl_ondisk *ondisk;
222 struct supercl_info *scl;
224 int64_t scl_group_size;
225 int64_t clusterSize = vol->ondisk->vol_clsize;
228 assert(vol->using_supercl);
230 TAILQ_FOREACH(scl, &vol->supercl_list, entry) {
231 if (scl->scl_no == scl_no)
238 scl = malloc(sizeof(*scl));
239 bzero(scl, sizeof(*scl));
240 scl->scl_no = scl_no;
242 TAILQ_INSERT_TAIL(&vol->supercl_list, scl, entry);
244 scl->cache.u.supercl = scl;
245 hammer_cache_add(&scl->cache, ISSUPERCL);
248 * Calculate the super-cluster's offset in the volume.
250 * The arrangement is [scl * N][N * 32768 clusters], repeat.
253 scl_group = scl_no / HAMMER_VOL_SUPERCLUSTER_GROUP;
254 scl_group_size = ((int64_t)HAMMER_BUFSIZE *
255 HAMMER_VOL_SUPERCLUSTER_GROUP) +
256 ((int64_t)HAMMER_VOL_SUPERCLUSTER_GROUP *
257 clusterSize * HAMMER_SCL_MAXCLUSTERS);
258 scl->scl_offset = vol->ondisk->vol_clo_beg +
259 scl_group * scl_group_size +
260 (scl_no % HAMMER_VOL_SUPERCLUSTER_GROUP) *
264 hammer_cache_flush();
265 if ((ondisk = scl->ondisk) == NULL) {
266 scl->ondisk = ondisk = malloc(HAMMER_BUFSIZE);
267 scl->clu_alist.config = &Supercl_alist_config;
268 scl->clu_alist.meta = ondisk->scl_meta;
269 scl->buf_alist.config = &Buf_alist_config;
270 scl->buf_alist.meta = ondisk->head.buf_almeta;
272 n = pread(vol->fd, ondisk, HAMMER_BUFSIZE,
274 if (n != HAMMER_BUFSIZE) {
275 err(1, "get_supercl: %s:%d Read failed "
277 vol->name, scl_no, scl->scl_offset);
282 bzero(ondisk, HAMMER_BUFSIZE);
283 hammer_alist_init(&scl->clu_alist, 0, 0, isnew);
284 initbuffer(&scl->buf_alist, &ondisk->head,
285 HAMMER_FSBUF_SUPERCL);
286 scl->cache.modified = 1;
292 rel_supercl(struct supercl_info *supercl)
294 struct volume_info *volume;
296 assert(supercl->cache.refs > 0);
297 if (--supercl->cache.refs == 0) {
298 if (supercl->cache.delete) {
299 volume = supercl->volume;
300 if (supercl->cache.modified)
301 flush_supercl(supercl);
302 TAILQ_REMOVE(&volume->supercl_list, supercl, entry);
303 hammer_cache_del(&supercl->cache);
304 free(supercl->ondisk);
311 struct cluster_info *
312 get_cluster(struct volume_info *vol, int32_t clu_no, hammer_alloc_state_t isnew)
314 struct hammer_cluster_ondisk *ondisk;
315 struct cluster_info *cl;
317 int64_t scl_group_size;
318 int64_t clusterSize = vol->ondisk->vol_clsize;
321 TAILQ_FOREACH(cl, &vol->cluster_list, entry) {
322 if (cl->clu_no == clu_no)
327 * Allocate the cluster
329 cl = malloc(sizeof(*cl));
330 bzero(cl, sizeof(*cl));
331 TAILQ_INIT(&cl->buffer_list);
334 TAILQ_INSERT_TAIL(&vol->cluster_list, cl, entry);
336 cl->cache.u.cluster = cl;
337 hammer_cache_add(&cl->cache, ISCLUSTER);
338 if (vol->using_supercl) {
339 cl->supercl = get_supercl(vol, clu_no / HAMMER_SCL_MAXCLUSTERS, 0);
340 ++cl->supercl->cache.refs;
344 * Calculate the cluster's offset in the volume
346 * The arrangement is [scl * N][N * 32768 clusters], repeat.
349 * Note that the cluster offset calculation is slightly
350 * different from the supercluster offset calculation due
351 * to the way the grouping works.
353 if (vol->using_supercl) {
354 scl_group = clu_no / HAMMER_VOL_SUPERCLUSTER_GROUP /
355 HAMMER_SCL_MAXCLUSTERS;
357 ((int64_t)HAMMER_BUFSIZE *
358 HAMMER_VOL_SUPERCLUSTER_GROUP) +
359 ((int64_t)HAMMER_VOL_SUPERCLUSTER_GROUP *
360 clusterSize * HAMMER_SCL_MAXCLUSTERS);
361 scl_group_size += HAMMER_VOL_SUPERCLUSTER_GROUP *
364 vol->ondisk->vol_clo_beg +
365 scl_group * scl_group_size +
366 (HAMMER_BUFSIZE * HAMMER_VOL_SUPERCLUSTER_GROUP) +
367 ((int64_t)clu_no % ((int64_t)HAMMER_SCL_MAXCLUSTERS * HAMMER_VOL_SUPERCLUSTER_GROUP)) *
370 cl->clu_offset = vol->ondisk->vol_clo_beg +
371 (int64_t)clu_no * clusterSize;
375 hammer_cache_flush();
376 if ((ondisk = cl->ondisk) == NULL) {
377 cl->ondisk = ondisk = malloc(HAMMER_BUFSIZE);
378 cl->alist_master.config = &Clu_master_alist_config;
379 cl->alist_master.meta = ondisk->clu_master_meta;
380 cl->alist_btree.config = &Clu_slave_alist_config;
381 cl->alist_btree.meta = ondisk->clu_btree_meta;
382 cl->alist_btree.info = cl;
383 cl->alist_record.config = &Clu_slave_alist_config;
384 cl->alist_record.meta = ondisk->clu_record_meta;
385 cl->alist_record.info = cl;
386 cl->alist_mdata.config = &Clu_slave_alist_config;
387 cl->alist_mdata.meta = ondisk->clu_mdata_meta;
388 cl->alist_mdata.info = cl;
390 n = pread(vol->fd, ondisk, HAMMER_BUFSIZE,
392 if (n != HAMMER_BUFSIZE) {
393 err(1, "get_cluster: %s:%d Read failed "
395 vol->name, clu_no, cl->clu_offset);
400 bzero(ondisk, HAMMER_BUFSIZE);
401 hammer_alist_init(&cl->alist_master, 0, 0, isnew);
402 hammer_alist_init(&cl->alist_btree, 0, 0, HAMMER_ASTATE_ALLOC);
403 hammer_alist_init(&cl->alist_record, 0, 0, HAMMER_ASTATE_ALLOC);
404 hammer_alist_init(&cl->alist_mdata, 0, 0, HAMMER_ASTATE_ALLOC);
405 cl->cache.modified = 1;
411 rel_cluster(struct cluster_info *cluster)
413 struct volume_info *volume;
414 struct supercl_info *supercl;
416 assert(cluster->cache.refs > 0);
417 if (--cluster->cache.refs == 0) {
418 if (cluster->cache.delete) {
419 volume = cluster->volume;
420 supercl = cluster->supercl;
421 if (cluster->cache.modified)
422 flush_cluster(cluster);
423 TAILQ_REMOVE(&volume->cluster_list, cluster, entry);
424 hammer_cache_del(&cluster->cache);
425 free(cluster->ondisk);
429 rel_supercl(supercl);
435 * Acquire the specified buffer.
437 * We are formatting a new buffer is buf_type != 0
440 get_buffer(struct cluster_info *cl, int32_t buf_no, int64_t buf_type)
442 hammer_fsbuf_ondisk_t ondisk;
443 struct buffer_info *buf;
447 * Find the buffer. Note that buffer 0 corresponds to the cluster
448 * header and should never be requested.
451 TAILQ_FOREACH(buf, &cl->buffer_list, entry) {
452 if (buf->buf_no == buf_no)
456 buf = malloc(sizeof(*buf));
457 bzero(buf, sizeof(*buf));
458 buf->buf_no = buf_no;
459 buf->buf_offset = cl->clu_offset + buf_no * HAMMER_BUFSIZE;
461 buf->volume = cl->volume;
462 TAILQ_INSERT_TAIL(&cl->buffer_list, buf, entry);
464 buf->cache.u.buffer = buf;
465 hammer_cache_add(&buf->cache, ISBUFFER);
468 hammer_cache_flush();
469 if ((ondisk = buf->ondisk) == NULL) {
470 buf->ondisk = ondisk = malloc(HAMMER_BUFSIZE);
471 buf->alist.config = &Buf_alist_config;
472 buf->alist.meta = ondisk->head.buf_almeta;
474 n = pread(cl->volume->fd, ondisk, HAMMER_BUFSIZE,
476 if (n != HAMMER_BUFSIZE) {
477 err(1, "get_buffer: %s:%d:%d Read failed at "
479 cl->volume->name, buf->cluster->clu_no,
480 buf_no, buf->buf_offset);
485 bzero(ondisk, HAMMER_BUFSIZE);
486 initbuffer(&buf->alist, &ondisk->head, buf_type);
487 buf->cache.modified = 1;
493 rel_buffer(struct buffer_info *buffer)
495 struct cluster_info *cluster;
497 assert(buffer->cache.refs > 0);
498 if (--buffer->cache.refs == 0) {
499 if (buffer->cache.delete) {
500 cluster = buffer->cluster;
501 if (buffer->cache.modified)
502 flush_buffer(buffer);
503 TAILQ_REMOVE(&cluster->buffer_list, buffer, entry);
504 hammer_cache_del(&buffer->cache);
505 free(buffer->ondisk);
507 rel_cluster(cluster);
513 * Retrieve a pointer to a B-Tree node given a cluster offset. The underlying
514 * bufp is freed if non-NULL and a referenced buffer is loaded into it.
517 get_node(struct cluster_info *cl, int32_t offset, struct buffer_info **bufp)
519 struct buffer_info *buf;
523 *bufp = buf = get_buffer(cl, offset / HAMMER_BUFSIZE, 0);
524 if (buf->ondisk->head.buf_type != HAMMER_FSBUF_BTREE) {
525 errx(1, "get_node %d:%d:%d - not a B-Tree node buffer!",
526 cl->volume->vol_no, cl->clu_no, offset);
528 return((void *)((char *)buf->ondisk + (offset & HAMMER_BUFMASK)));
532 * Allocate HAMMER elements - btree nodes, data storage, and record elements
535 alloc_btree_element(struct cluster_info *cluster, int32_t *offp)
537 struct buffer_info *buf;
542 live = &cluster->alist_btree;
543 elm_no = hammer_alist_alloc_fwd(live, 1, cluster->ondisk->idx_index);
544 if (elm_no == HAMMER_ALIST_BLOCK_NONE)
545 elm_no = hammer_alist_alloc_fwd(live, 1, 0);
546 if (elm_no == HAMMER_ALIST_BLOCK_NONE) {
547 alloc_new_buffer(cluster, live,
548 HAMMER_FSBUF_BTREE, HAMMER_BTREE_NODES);
549 ++cluster->ondisk->stat_idx_bufs;
550 ++cluster->volume->ondisk->vol_stat_idx_bufs;
551 ++cluster->volume->ondisk->vol0_stat_idx_bufs;
552 elm_no = hammer_alist_alloc(live, 1);
553 assert(elm_no != HAMMER_ALIST_BLOCK_NONE);
555 cluster->ondisk->idx_index = elm_no;
556 buf = get_buffer(cluster, elm_no / HAMMER_FSBUF_MAXBLKS, 0);
557 assert(buf->ondisk->head.buf_type != 0);
558 item = &buf->ondisk->btree.nodes[elm_no & HAMMER_FSBUF_BLKMASK];
559 *offp = buf->buf_no * HAMMER_BUFSIZE +
560 ((char *)item - (char *)buf->ondisk);
565 alloc_data_element(struct cluster_info *cluster, int32_t bytes, int32_t *offp)
567 struct buffer_info *buf;
570 int32_t nblks = (bytes + HAMMER_DATA_BLKMASK) & ~HAMMER_DATA_BLKMASK;
574 * Try to allocate a btree-node. If elm_no is HAMMER_ALIST_BLOCK_NONE
575 * and buf is non-NULL we have to initialize a new buffer's a-list.
577 live = &cluster->alist_mdata;
578 elm_no = hammer_alist_alloc_fwd(live, nblks, cluster->ondisk->idx_data);
579 if (elm_no == HAMMER_ALIST_BLOCK_NONE)
580 elm_no = hammer_alist_alloc_fwd(live, 1, 0);
581 if (elm_no == HAMMER_ALIST_BLOCK_NONE) {
582 alloc_new_buffer(cluster, live,
583 HAMMER_FSBUF_DATA, HAMMER_DATA_NODES);
584 ++cluster->ondisk->stat_data_bufs;
585 ++cluster->volume->ondisk->vol_stat_data_bufs;
586 ++cluster->volume->ondisk->vol0_stat_data_bufs;
587 elm_no = hammer_alist_alloc(live, nblks);
588 assert(elm_no != HAMMER_ALIST_BLOCK_NONE);
590 cluster->ondisk->idx_index = elm_no;
591 buf = get_buffer(cluster, elm_no / HAMMER_FSBUF_MAXBLKS, 0);
592 assert(buf->ondisk->head.buf_type != 0);
593 item = &buf->ondisk->data.data[elm_no & HAMMER_FSBUF_BLKMASK];
594 *offp = buf->buf_no * HAMMER_BUFSIZE +
595 ((char *)item - (char *)buf->ondisk);
600 alloc_record_element(struct cluster_info *cluster, int32_t *offp,
603 struct buffer_info *buf;
608 live = &cluster->alist_record;
609 elm_no = hammer_alist_alloc_rev(live, 1, cluster->ondisk->idx_record);
610 if (elm_no == HAMMER_ALIST_BLOCK_NONE)
611 elm_no = hammer_alist_alloc_rev(live, 1,HAMMER_ALIST_BLOCK_MAX);
612 if (elm_no == HAMMER_ALIST_BLOCK_NONE) {
613 alloc_new_buffer(cluster, live,
614 HAMMER_FSBUF_RECORDS, HAMMER_RECORD_NODES);
615 ++cluster->ondisk->stat_rec_bufs;
616 ++cluster->volume->ondisk->vol_stat_rec_bufs;
617 ++cluster->volume->ondisk->vol0_stat_rec_bufs;
618 elm_no = hammer_alist_alloc_rev(live, 1,HAMMER_ALIST_BLOCK_MAX);
619 assert(elm_no != HAMMER_ALIST_BLOCK_NONE);
621 cluster->ondisk->idx_record = elm_no;
622 buf = get_buffer(cluster, elm_no / HAMMER_FSBUF_MAXBLKS, 0);
623 assert(buf->ondisk->head.buf_type != 0);
624 item = &buf->ondisk->record.recs[elm_no & HAMMER_FSBUF_BLKMASK];
625 *offp = buf->buf_no * HAMMER_BUFSIZE +
626 ((char *)item - (char *)buf->ondisk);
627 ++cluster->ondisk->stat_records;
628 if (rec_type == HAMMER_RECTYPE_CLUSTER)
629 ++cluster->ondisk->stat_records;
634 alloc_new_buffer(struct cluster_info *cluster, hammer_alist_t live,
635 u_int64_t type, int32_t nelements)
638 struct buffer_info *buf;
640 if (type == HAMMER_FSBUF_RECORDS) {
641 buf_no = hammer_alist_alloc_rev(&cluster->alist_master, 1,
642 HAMMER_ALIST_BLOCK_MAX);
644 buf_no = hammer_alist_alloc_fwd(&cluster->alist_master, 1,
647 assert(buf_no != HAMMER_ALIST_BLOCK_NONE);
648 buf = get_buffer(cluster, buf_no, type);
649 hammer_alist_free(live, buf_no * HAMMER_FSBUF_MAXBLKS, nelements);
650 /* XXX modified bit for multiple gets/rels */
654 * Flush various tracking structures to disk
658 * Flush various tracking structures to disk
661 flush_all_volumes(void)
663 struct volume_info *vol;
665 TAILQ_FOREACH(vol, &VolList, entry)
670 flush_volume(struct volume_info *vol)
672 struct supercl_info *supercl;
673 struct cluster_info *cl;
675 TAILQ_FOREACH(supercl, &vol->supercl_list, entry)
676 flush_supercl(supercl);
677 TAILQ_FOREACH(cl, &vol->cluster_list, entry)
679 writehammerbuf(vol, vol->ondisk, 0);
680 vol->cache.modified = 0;
684 flush_supercl(struct supercl_info *supercl)
686 int64_t supercl_offset;
688 supercl_offset = supercl->scl_offset;
689 writehammerbuf(supercl->volume, supercl->ondisk, supercl_offset);
690 supercl->cache.modified = 0;
694 flush_cluster(struct cluster_info *cl)
696 struct buffer_info *buf;
697 int64_t cluster_offset;
699 TAILQ_FOREACH(buf, &cl->buffer_list, entry)
701 cluster_offset = cl->clu_offset;
702 writehammerbuf(cl->volume, cl->ondisk, cluster_offset);
703 cl->cache.modified = 0;
707 flush_buffer(struct buffer_info *buf)
709 writehammerbuf(buf->volume, buf->ondisk, buf->buf_offset);
710 buf->cache.modified = 0;
714 * Generic buffer initialization
717 initbuffer(hammer_alist_t live, hammer_fsbuf_head_t head, u_int64_t type)
719 head->buf_type = type;
720 hammer_alist_init(live, 0, 0, HAMMER_ASTATE_ALLOC);
725 * Core I/O operations
728 readhammerbuf(struct volume_info *vol, void *data, int64_t offset)
732 n = pread(vol->fd, data, HAMMER_BUFSIZE, offset);
733 if (n != HAMMER_BUFSIZE)
734 err(1, "Read volume %d (%s)", vol->vol_no, vol->name);
740 writehammerbuf(struct volume_info *vol, const void *data, int64_t offset)
744 n = pwrite(vol->fd, data, HAMMER_BUFSIZE, offset);
745 if (n != HAMMER_BUFSIZE)
746 err(1, "Write volume %d (%s)", vol->vol_no, vol->name);
750 panic(const char *ctl, ...)
755 vfprintf(stderr, ctl, va);
757 fprintf(stderr, "\n");