2 * Copyright (c) 2007 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include <sys/diskslice.h>
36 #include <sys/diskmbr.h>
38 #include "hammer_util.h"
40 static void check_volume(struct volume_info *vol);
41 static void get_buffer_readahead(struct buffer_info *base);
42 static __inline int readhammervol(struct volume_info *vol);
43 static __inline int readhammerbuf(struct buffer_info *buf);
44 static __inline int writehammervol(struct volume_info *vol);
45 static __inline int writehammerbuf(struct buffer_info *buf);
49 int UseReadBehind = -4;
53 TAILQ_HEAD(volume_list, volume_info);
54 static struct volume_list VolList = TAILQ_HEAD_INITIALIZER(VolList);
55 static int valid_hammer_volumes;
59 buffer_hash(hammer_off_t buf_offset)
63 hi = (int)(buf_offset / HAMMER_BUFSIZE) & HAMMER_BUFLISTMASK;
67 static struct buffer_info*
68 find_buffer(struct volume_info *volume, hammer_off_t buf_offset)
71 struct buffer_info *buf;
73 hi = buffer_hash(buf_offset);
74 TAILQ_FOREACH(buf, &volume->buffer_lists[hi], entry)
75 if (buf->buf_offset == buf_offset)
82 __alloc_volume(const char *volname, int oflags)
84 struct volume_info *vol;
87 vol = malloc(sizeof(*vol));
89 err(1, "alloc_volume");
90 bzero(vol, sizeof(*vol));
93 vol->rdonly = (oflags == O_RDONLY);
94 vol->name = strdup(volname);
95 vol->fd = open(vol->name, oflags);
97 err(1, "alloc_volume: Failed to open %s", vol->name);
100 vol->ondisk = malloc(HAMMER_BUFSIZE);
101 if (vol->ondisk == NULL)
102 err(1, "alloc_volume");
103 bzero(vol->ondisk, HAMMER_BUFSIZE);
105 for (i = 0; i < HAMMER_BUFLISTS; ++i)
106 TAILQ_INIT(&vol->buffer_lists[i]);
112 __add_volume(struct volume_info *vol)
114 struct volume_info *scan;
115 struct stat st1, st2;
117 if (fstat(vol->fd, &st1) != 0)
118 errx(1, "add_volume: %s: Failed to stat", vol->name);
120 TAILQ_FOREACH(scan, &VolList, entry) {
121 if (scan->vol_no == vol->vol_no) {
122 errx(1, "add_volume: %s: Duplicate volume number %d "
124 vol->name, vol->vol_no, scan->name);
126 if (fstat(scan->fd, &st2) != 0) {
127 errx(1, "add_volume: %s: Failed to stat %s",
128 vol->name, scan->name);
130 if ((st1.st_ino == st2.st_ino) && (st1.st_dev == st2.st_dev)) {
131 errx(1, "add_volume: %s: Specified more than once",
136 TAILQ_INSERT_TAIL(&VolList, vol, entry);
140 __verify_volume(struct volume_info *vol)
142 hammer_volume_ondisk_t ondisk = vol->ondisk;
144 if (ondisk->vol_signature != HAMMER_FSBUF_VOLUME) {
145 errx(1, "verify_volume: Invalid volume signature %016jx",
146 ondisk->vol_signature);
148 if (ondisk->vol_rootvol != HAMMER_ROOT_VOLNO) {
149 errx(1, "verify_volume: Invalid root volume# %d",
150 ondisk->vol_rootvol);
152 if (bcmp(&Hammer_FSType, &ondisk->vol_fstype, sizeof(Hammer_FSType))) {
153 errx(1, "verify_volume: %s: Header does not indicate "
154 "that this is a HAMMER volume", vol->name);
156 if (bcmp(&Hammer_FSId, &ondisk->vol_fsid, sizeof(Hammer_FSId))) {
157 errx(1, "verify_volume: %s: FSId does not match other volumes!",
163 * Initialize a volume structure and ondisk vol_no field.
166 init_volume(const char *filename, int oflags, int32_t vol_no)
168 struct volume_info *vol;
170 vol = __alloc_volume(filename, oflags);
171 vol->vol_no = vol->ondisk->vol_no = vol_no;
179 * Initialize a volume structure and read ondisk volume header.
182 load_volume(const char *filename, int oflags, int verify)
184 struct volume_info *vol;
187 vol = __alloc_volume(filename, oflags);
189 n = readhammervol(vol);
191 err(1, "load_volume: %s: Read failed at offset 0", vol->name);
193 vol->vol_no = vol->ondisk->vol_no;
195 if (valid_hammer_volumes++ == 0)
196 Hammer_FSId = vol->ondisk->vol_fsid;
198 __verify_volume(vol);
206 * Check basic volume characteristics.
209 check_volume(struct volume_info *vol)
211 struct partinfo pinfo;
215 * Get basic information about the volume
217 if (ioctl(vol->fd, DIOCGPART, &pinfo) < 0) {
219 * Allow the formatting of regular files as HAMMER volumes
221 if (fstat(vol->fd, &st) < 0)
222 err(1, "Unable to stat %s", vol->name);
223 vol->size = st.st_size;
224 vol->type = "REGFILE";
227 * When formatting a block device as a HAMMER volume the
228 * sector size must be compatible. HAMMER uses 16384 byte
229 * filesystem buffers.
231 if (pinfo.reserved_blocks) {
232 errx(1, "HAMMER cannot be placed in a partition "
233 "which overlaps the disklabel or MBR");
235 if (pinfo.media_blksize > HAMMER_BUFSIZE ||
236 HAMMER_BUFSIZE % pinfo.media_blksize) {
237 errx(1, "A media sector size of %d is not supported",
238 pinfo.media_blksize);
241 vol->size = pinfo.media_size;
242 vol->device_offset = pinfo.media_offset;
243 vol->type = "DEVICE";
248 assert_volume_offset(struct volume_info *vol)
250 assert(hammer_is_zone_raw_buffer(vol->vol_free_off));
251 assert(hammer_is_zone_raw_buffer(vol->vol_free_end));
255 get_volume(int32_t vol_no)
257 struct volume_info *vol;
259 TAILQ_FOREACH(vol, &VolList, entry) {
260 if (vol->vol_no == vol_no)
268 get_root_volume(void)
270 return(get_volume(HAMMER_ROOT_VOLNO));
274 * Acquire the specified buffer. isnew is -1 only when called
275 * via get_buffer_readahead() to prevent another readahead.
277 static struct buffer_info *
278 get_buffer(hammer_off_t buf_offset, int isnew)
280 struct buffer_info *buf;
281 struct volume_info *volume;
288 zone = HAMMER_ZONE_DECODE(buf_offset);
289 if (zone > HAMMER_ZONE_RAW_BUFFER_INDEX)
290 buf_offset = blockmap_lookup(buf_offset, NULL, NULL, &error);
291 if (error || buf_offset == HAMMER_OFF_BAD)
293 assert(hammer_is_zone_raw_buffer(buf_offset));
295 vol_no = HAMMER_VOL_DECODE(buf_offset);
296 volume = get_volume(vol_no);
297 assert(volume != NULL);
299 buf_offset &= ~HAMMER_BUFMASK64;
300 buf = find_buffer(volume, buf_offset);
303 buf = malloc(sizeof(*buf));
304 bzero(buf, sizeof(*buf));
305 buf->buf_offset = buf_offset;
306 buf->raw_offset = hammer_xlate_to_phys(volume->ondisk,
308 buf->volume = volume;
309 buf->ondisk = malloc(HAMMER_BUFSIZE);
311 if (readhammerbuf(buf) == -1) {
312 err(1, "get_buffer: %s:%016jx "
313 "Read failed at offset %016jx",
315 (intmax_t)buf->buf_offset,
316 (intmax_t)buf->raw_offset);
320 hi = buffer_hash(buf_offset);
321 TAILQ_INSERT_TAIL(&volume->buffer_lists[hi], buf, entry);
322 hammer_cache_add(&buf->cache);
325 assert(buf->ondisk != NULL);
327 hammer_cache_used(&buf->cache);
331 hammer_cache_flush();
334 assert(buf->cache.modified == 0);
335 bzero(buf->ondisk, HAMMER_BUFSIZE);
336 buf->cache.modified = 1;
339 get_buffer_readahead(buf);
344 get_buffer_readahead(struct buffer_info *base)
346 struct buffer_info *buf;
347 struct volume_info *vol;
348 hammer_off_t buf_offset;
350 int ri = UseReadBehind;
351 int re = UseReadAhead;
353 raw_offset = base->raw_offset + ri * HAMMER_BUFSIZE;
357 if (raw_offset >= vol->ondisk->vol_buf_end)
359 if (raw_offset < vol->ondisk->vol_buf_beg || ri == 0) {
361 raw_offset += HAMMER_BUFSIZE;
364 buf_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no,
365 raw_offset - vol->ondisk->vol_buf_beg);
366 buf = find_buffer(vol, buf_offset);
368 buf = get_buffer(buf_offset, -1);
372 raw_offset += HAMMER_BUFSIZE;
377 rel_buffer(struct buffer_info *buffer)
379 struct volume_info *volume;
384 assert(buffer->cache.refs > 0);
385 if (--buffer->cache.refs == 0) {
386 if (buffer->cache.delete) {
387 hi = buffer_hash(buffer->buf_offset);
388 volume = buffer->volume;
389 if (buffer->cache.modified)
390 flush_buffer(buffer);
391 TAILQ_REMOVE(&volume->buffer_lists[hi], buffer, entry);
392 hammer_cache_del(&buffer->cache);
393 free(buffer->ondisk);
400 * Retrieve a pointer to a buffer data given a buffer offset. The underlying
401 * bufferp is freed if isnew or the offset is out of range of the cached data.
402 * If bufferp is freed a referenced buffer is loaded into it.
405 get_buffer_data(hammer_off_t buf_offset, struct buffer_info **bufferp,
408 if (*bufferp != NULL) {
410 (((*bufferp)->buf_offset ^ buf_offset) & ~HAMMER_BUFMASK64)) {
411 rel_buffer(*bufferp);
416 if (*bufferp == NULL) {
417 *bufferp = get_buffer(buf_offset, isnew);
418 if (*bufferp == NULL)
422 return(((char *)(*bufferp)->ondisk) +
423 ((int32_t)buf_offset & HAMMER_BUFMASK));
427 * Allocate HAMMER elements - B-Tree nodes
430 alloc_btree_node(hammer_off_t *offp, struct buffer_info **data_bufferp)
432 hammer_node_ondisk_t node;
434 node = alloc_blockmap(HAMMER_ZONE_BTREE_INDEX, sizeof(*node),
436 bzero(node, sizeof(*node));
441 * Allocate HAMMER elements - meta data (inode, direntry, PFS, etc)
444 alloc_meta_element(hammer_off_t *offp, int32_t data_len,
445 struct buffer_info **data_bufferp)
449 data = alloc_blockmap(HAMMER_ZONE_META_INDEX, data_len,
451 bzero(data, data_len);
456 * Format a new blockmap. This is mostly a degenerate case because
457 * all allocations are now actually done from the freemap.
460 format_blockmap(struct volume_info *root_vol, int zone, hammer_off_t offset)
462 hammer_blockmap_t blockmap;
463 hammer_off_t zone_base;
465 /* Only root volume needs formatting */
466 assert(root_vol->vol_no == HAMMER_ROOT_VOLNO);
468 assert(hammer_is_zone2_mapped_index(zone));
470 blockmap = &root_vol->ondisk->vol0_blockmap[zone];
471 zone_base = HAMMER_ZONE_ENCODE(zone, offset);
473 bzero(blockmap, sizeof(*blockmap));
474 blockmap->phys_offset = 0;
475 blockmap->first_offset = zone_base;
476 blockmap->next_offset = zone_base;
477 blockmap->alloc_offset = HAMMER_ENCODE(zone, 255, -1);
478 hammer_crc_set_blockmap(blockmap);
482 * Format a new freemap. Set all layer1 entries to UNAVAIL. The initialize
483 * code will load each volume's freemap.
486 format_freemap(struct volume_info *root_vol)
488 struct buffer_info *buffer = NULL;
489 hammer_off_t layer1_offset;
490 hammer_blockmap_t blockmap;
491 hammer_blockmap_layer1_t layer1;
494 /* Only root volume needs formatting */
495 assert(root_vol->vol_no == HAMMER_ROOT_VOLNO);
497 layer1_offset = bootstrap_bigblock(root_vol);
498 for (i = 0; i < HAMMER_BIGBLOCK_SIZE; i += sizeof(*layer1)) {
499 isnew = ((i % HAMMER_BUFSIZE) == 0);
500 layer1 = get_buffer_data(layer1_offset + i, &buffer, isnew);
501 bzero(layer1, sizeof(*layer1));
502 layer1->phys_offset = HAMMER_BLOCKMAP_UNAVAIL;
503 layer1->blocks_free = 0;
504 hammer_crc_set_layer1(layer1);
506 assert(i == HAMMER_BIGBLOCK_SIZE);
509 blockmap = &root_vol->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
510 bzero(blockmap, sizeof(*blockmap));
511 blockmap->phys_offset = layer1_offset;
512 blockmap->first_offset = 0;
513 blockmap->next_offset = HAMMER_ENCODE_RAW_BUFFER(0, 0);
514 blockmap->alloc_offset = HAMMER_ENCODE_RAW_BUFFER(255, -1);
515 hammer_crc_set_blockmap(blockmap);
519 * Load the volume's remaining free space into the freemap.
521 * Returns the number of big-blocks available.
524 initialize_freemap(struct volume_info *vol)
526 struct volume_info *root_vol;
527 struct buffer_info *buffer1 = NULL;
528 struct buffer_info *buffer2 = NULL;
529 hammer_blockmap_layer1_t layer1;
530 hammer_blockmap_layer2_t layer2;
531 hammer_off_t layer1_offset;
532 hammer_off_t layer2_offset;
533 hammer_off_t phys_offset;
534 hammer_off_t block_offset;
535 hammer_off_t aligned_vol_free_end;
536 hammer_blockmap_t freemap;
538 int64_t layer1_count = 0;
540 root_vol = get_root_volume();
542 assert_volume_offset(vol);
543 aligned_vol_free_end = HAMMER_BLOCKMAP_LAYER2_DOALIGN(vol->vol_free_end);
545 printf("initialize freemap volume %d\n", vol->vol_no);
548 * Initialize the freemap. First preallocate the big-blocks required
549 * to implement layer2. This preallocation is a bootstrap allocation
550 * using blocks from the target volume.
552 freemap = &root_vol->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
554 for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
555 phys_offset < aligned_vol_free_end;
556 phys_offset += HAMMER_BLOCKMAP_LAYER2) {
557 layer1_offset = freemap->phys_offset +
558 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
559 layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
560 if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
561 layer1->phys_offset = bootstrap_bigblock(vol);
562 layer1->blocks_free = 0;
563 buffer1->cache.modified = 1;
564 hammer_crc_set_layer1(layer1);
569 * Now fill everything in.
571 for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
572 phys_offset < aligned_vol_free_end;
573 phys_offset += HAMMER_BLOCKMAP_LAYER2) {
575 layer1_offset = freemap->phys_offset +
576 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
577 layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
578 assert(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
580 for (block_offset = 0;
581 block_offset < HAMMER_BLOCKMAP_LAYER2;
582 block_offset += HAMMER_BIGBLOCK_SIZE) {
583 layer2_offset = layer1->phys_offset +
584 HAMMER_BLOCKMAP_LAYER2_OFFSET(block_offset);
585 layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
586 bzero(layer2, sizeof(*layer2));
588 if (phys_offset + block_offset < vol->vol_free_off) {
590 * Big-blocks already allocated as part
591 * of the freemap bootstrap.
593 layer2->zone = HAMMER_ZONE_FREEMAP_INDEX;
594 layer2->append_off = HAMMER_BIGBLOCK_SIZE;
595 layer2->bytes_free = 0;
596 } else if (phys_offset + block_offset < vol->vol_free_end) {
598 layer2->append_off = 0;
599 layer2->bytes_free = HAMMER_BIGBLOCK_SIZE;
603 layer2->zone = HAMMER_ZONE_UNAVAIL_INDEX;
604 layer2->append_off = HAMMER_BIGBLOCK_SIZE;
605 layer2->bytes_free = 0;
607 hammer_crc_set_layer2(layer2);
608 buffer2->cache.modified = 1;
611 layer1->blocks_free += layer1_count;
612 hammer_crc_set_layer1(layer1);
613 buffer1->cache.modified = 1;
622 * Returns the number of big-blocks available for filesystem data and undos
623 * without formatting.
626 count_freemap(struct volume_info *vol)
628 hammer_off_t phys_offset;
629 hammer_off_t vol_free_off;
630 hammer_off_t aligned_vol_free_end;
633 vol_free_off = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
635 assert_volume_offset(vol);
636 aligned_vol_free_end = HAMMER_BLOCKMAP_LAYER2_DOALIGN(vol->vol_free_end);
638 if (vol->vol_no == HAMMER_ROOT_VOLNO)
639 vol_free_off += HAMMER_BIGBLOCK_SIZE;
641 for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
642 phys_offset < aligned_vol_free_end;
643 phys_offset += HAMMER_BLOCKMAP_LAYER2) {
644 vol_free_off += HAMMER_BIGBLOCK_SIZE;
647 for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
648 phys_offset < aligned_vol_free_end;
649 phys_offset += HAMMER_BIGBLOCK_SIZE) {
650 if (phys_offset < vol_free_off) {
652 } else if (phys_offset < vol->vol_free_end) {
661 * Format the undomap for the root volume.
664 format_undomap(struct volume_info *root_vol, int64_t *undo_buffer_size)
666 hammer_off_t undo_limit;
667 hammer_blockmap_t blockmap;
668 hammer_volume_ondisk_t ondisk;
669 struct buffer_info *buffer = NULL;
675 /* Only root volume needs formatting */
676 assert(root_vol->vol_no == HAMMER_ROOT_VOLNO);
677 ondisk = root_vol->ondisk;
680 * Size the undo buffer in multiples of HAMMER_BIGBLOCK_SIZE,
681 * up to HAMMER_MAX_UNDO_BIGBLOCKS big-blocks.
682 * Size to approximately 0.1% of the disk.
684 * The minimum UNDO fifo size is 512MB, or approximately 1% of
685 * the recommended 50G disk.
687 * Changing this minimum is rather dangerous as complex filesystem
688 * operations can cause the UNDO FIFO to fill up otherwise.
690 undo_limit = *undo_buffer_size;
691 if (undo_limit == 0) {
692 undo_limit = HAMMER_VOL_BUF_SIZE(ondisk) / 1000;
693 if (undo_limit < HAMMER_BIGBLOCK_SIZE * HAMMER_MIN_UNDO_BIGBLOCKS)
694 undo_limit = HAMMER_BIGBLOCK_SIZE * HAMMER_MIN_UNDO_BIGBLOCKS;
696 undo_limit = (undo_limit + HAMMER_BIGBLOCK_MASK64) &
697 ~HAMMER_BIGBLOCK_MASK64;
698 if (undo_limit < HAMMER_BIGBLOCK_SIZE)
699 undo_limit = HAMMER_BIGBLOCK_SIZE;
700 if (undo_limit > HAMMER_BIGBLOCK_SIZE * HAMMER_MAX_UNDO_BIGBLOCKS)
701 undo_limit = HAMMER_BIGBLOCK_SIZE * HAMMER_MAX_UNDO_BIGBLOCKS;
702 *undo_buffer_size = undo_limit;
704 blockmap = &ondisk->vol0_blockmap[HAMMER_ZONE_UNDO_INDEX];
705 bzero(blockmap, sizeof(*blockmap));
706 blockmap->phys_offset = HAMMER_BLOCKMAP_UNAVAIL;
707 blockmap->first_offset = HAMMER_ENCODE_UNDO(0);
708 blockmap->next_offset = blockmap->first_offset;
709 blockmap->alloc_offset = HAMMER_ENCODE_UNDO(undo_limit);
710 hammer_crc_set_blockmap(blockmap);
712 limit_index = undo_limit / HAMMER_BIGBLOCK_SIZE;
713 assert(limit_index <= HAMMER_MAX_UNDO_BIGBLOCKS);
715 for (n = 0; n < limit_index; ++n) {
716 ondisk->vol0_undo_array[n] = alloc_undo_bigblock(root_vol);
718 while (n < HAMMER_MAX_UNDO_BIGBLOCKS) {
719 ondisk->vol0_undo_array[n++] = HAMMER_BLOCKMAP_UNAVAIL;
723 * Pre-initialize the UNDO blocks (HAMMER version 4+)
725 printf("initializing the undo map (%jd MB)\n",
726 (intmax_t)HAMMER_OFF_LONG_ENCODE(blockmap->alloc_offset) /
729 scan = blockmap->first_offset;
732 while (scan < blockmap->alloc_offset) {
733 hammer_fifo_head_t head;
734 hammer_fifo_tail_t tail;
736 int bytes = HAMMER_UNDO_ALIGN;
738 isnew = ((scan & HAMMER_BUFMASK64) == 0);
739 head = get_buffer_data(scan, &buffer, isnew);
740 buffer->cache.modified = 1;
741 tail = (void *)((char *)head + bytes - sizeof(*tail));
744 head->hdr_signature = HAMMER_HEAD_SIGNATURE;
745 head->hdr_type = HAMMER_HEAD_TYPE_DUMMY;
746 head->hdr_size = bytes;
747 head->hdr_seq = seqno++;
749 tail->tail_signature = HAMMER_TAIL_SIGNATURE;
750 tail->tail_type = HAMMER_HEAD_TYPE_DUMMY;
751 tail->tail_size = bytes;
753 hammer_crc_set_fifo_head(head, bytes);
760 const char *zone_labels[] = {
762 "raw_volume", /* 1 */
763 "raw_buffer", /* 2 */
771 "large_data", /* 10 */
772 "small_data", /* 11 */
780 print_blockmap(const struct volume_info *vol)
782 hammer_blockmap_t blockmap;
783 hammer_volume_ondisk_t ondisk;
788 ondisk = vol->ondisk;
789 printf(INDENT"vol_label\t%s\n", ondisk->vol_label);
790 printf(INDENT"vol_count\t%d\n", ondisk->vol_count);
791 printf(INDENT"vol_bot_beg\t%s\n", sizetostr(ondisk->vol_bot_beg));
792 printf(INDENT"vol_mem_beg\t%s\n", sizetostr(ondisk->vol_mem_beg));
793 printf(INDENT"vol_buf_beg\t%s\n", sizetostr(ondisk->vol_buf_beg));
794 printf(INDENT"vol_buf_end\t%s\n", sizetostr(ondisk->vol_buf_end));
795 printf(INDENT"vol0_next_tid\t%016jx\n",
796 (uintmax_t)ondisk->vol0_next_tid);
798 blockmap = &ondisk->vol0_blockmap[HAMMER_ZONE_UNDO_INDEX];
799 size = HAMMER_OFF_LONG_ENCODE(blockmap->alloc_offset);
800 if (blockmap->first_offset <= blockmap->next_offset)
801 used = blockmap->next_offset - blockmap->first_offset;
803 used = blockmap->alloc_offset - blockmap->first_offset +
804 HAMMER_OFF_LONG_ENCODE(blockmap->next_offset);
805 printf(INDENT"undo_size\t%s\n", sizetostr(size));
806 printf(INDENT"undo_used\t%s\n", sizetostr(used));
808 printf(INDENT"zone # "
809 "phys first next alloc\n");
810 for (i = 0; i < HAMMER_MAX_ZONES; i++) {
811 blockmap = &ondisk->vol0_blockmap[i];
812 printf(INDENT"zone %-2d %-10s %016jx %016jx %016jx %016jx\n",
814 (uintmax_t)blockmap->phys_offset,
815 (uintmax_t)blockmap->first_offset,
816 (uintmax_t)blockmap->next_offset,
817 (uintmax_t)blockmap->alloc_offset);
822 * Flush various tracking structures to disk
825 flush_all_volumes(void)
827 struct volume_info *vol;
829 TAILQ_FOREACH(vol, &VolList, entry)
834 flush_volume(struct volume_info *volume)
836 struct buffer_info *buffer;
839 for (i = 0; i < HAMMER_BUFLISTS; ++i) {
840 TAILQ_FOREACH(buffer, &volume->buffer_lists[i], entry)
841 flush_buffer(buffer);
843 if (writehammervol(volume) == -1)
844 err(1, "Write volume %d (%s)", volume->vol_no, volume->name);
848 flush_buffer(struct buffer_info *buffer)
850 struct volume_info *vol;
852 vol = buffer->volume;
853 if (writehammerbuf(buffer) == -1)
854 err(1, "Write volume %d (%s)", vol->vol_no, vol->name);
855 buffer->cache.modified = 0;
859 * Core I/O operations
862 __read(struct volume_info *vol, void *data, int64_t offset, int size)
866 n = pread(vol->fd, data, size, offset);
873 readhammervol(struct volume_info *vol)
875 return(__read(vol, vol->ondisk, 0, HAMMER_BUFSIZE));
879 readhammerbuf(struct buffer_info *buf)
881 return(__read(buf->volume, buf->ondisk, buf->raw_offset, HAMMER_BUFSIZE));
885 __write(struct volume_info *vol, const void *data, int64_t offset, int size)
892 n = pwrite(vol->fd, data, size, offset);
899 writehammervol(struct volume_info *vol)
901 return(__write(vol, vol->ondisk, 0, HAMMER_BUFSIZE));
905 writehammerbuf(struct buffer_info *buf)
907 return(__write(buf->volume, buf->ondisk, buf->raw_offset, HAMMER_BUFSIZE));
910 int64_t init_boot_area_size(int64_t value, off_t avg_vol_size)
913 value = HAMMER_BOOT_NOMBYTES;
914 while (value > avg_vol_size / HAMMER_MAX_VOLUMES)
918 if (value < HAMMER_BOOT_MINBYTES) {
919 value = HAMMER_BOOT_MINBYTES;
920 } else if (value > HAMMER_BOOT_MAXBYTES) {
921 value = HAMMER_BOOT_MAXBYTES;
927 int64_t init_memory_log_size(int64_t value, off_t avg_vol_size)
930 value = HAMMER_MEM_NOMBYTES;
931 while (value > avg_vol_size / HAMMER_MAX_VOLUMES)
935 if (value < HAMMER_MEM_MINBYTES) {
936 value = HAMMER_MEM_MINBYTES;
937 } else if (value > HAMMER_MEM_MAXBYTES) {
938 value = HAMMER_MEM_MAXBYTES;