2 * Copyright (c) 2007 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include <sys/diskslice.h>
36 #include <sys/diskmbr.h>
38 #include "hammer_util.h"
40 static void check_volume(struct volume_info *vol);
41 static void get_buffer_readahead(struct buffer_info *base);
42 static __inline int readhammervol(struct volume_info *vol);
43 static __inline int readhammerbuf(struct buffer_info *buf);
44 static __inline int writehammervol(struct volume_info *vol);
45 static __inline int writehammerbuf(struct buffer_info *buf);
49 int UseReadBehind = -4;
53 TAILQ_HEAD(volume_list, volume_info);
54 static struct volume_list VolList = TAILQ_HEAD_INITIALIZER(VolList);
55 static int valid_hammer_volumes;
59 buffer_hash(hammer_off_t buf_offset)
63 hi = (int)(buf_offset / HAMMER_BUFSIZE) & HAMMER_BUFLISTMASK;
67 static struct buffer_info*
68 find_buffer(struct volume_info *volume, hammer_off_t buf_offset)
71 struct buffer_info *buf;
73 hi = buffer_hash(buf_offset);
74 TAILQ_FOREACH(buf, &volume->buffer_lists[hi], entry)
75 if (buf->buf_offset == buf_offset)
82 __alloc_volume(const char *volname, int oflags)
84 struct volume_info *vol;
87 vol = malloc(sizeof(*vol));
89 err(1, "alloc_volume");
90 bzero(vol, sizeof(*vol));
93 vol->rdonly = (oflags == O_RDONLY);
94 vol->name = strdup(volname);
95 vol->fd = open(vol->name, oflags);
97 err(1, "alloc_volume: Failed to open %s", vol->name);
100 vol->ondisk = malloc(HAMMER_BUFSIZE);
101 if (vol->ondisk == NULL)
102 err(1, "alloc_volume");
103 bzero(vol->ondisk, HAMMER_BUFSIZE);
105 for (i = 0; i < HAMMER_BUFLISTS; ++i)
106 TAILQ_INIT(&vol->buffer_lists[i]);
112 __add_volume(struct volume_info *vol)
114 struct volume_info *scan;
115 struct stat st1, st2;
117 if (fstat(vol->fd, &st1) != 0)
118 errx(1, "add_volume: %s: Failed to stat", vol->name);
120 TAILQ_FOREACH(scan, &VolList, entry) {
121 if (scan->vol_no == vol->vol_no) {
122 errx(1, "add_volume: %s: Duplicate volume number %d "
124 vol->name, vol->vol_no, scan->name);
126 if (fstat(scan->fd, &st2) != 0) {
127 errx(1, "add_volume: %s: Failed to stat %s",
128 vol->name, scan->name);
130 if ((st1.st_ino == st2.st_ino) && (st1.st_dev == st2.st_dev)) {
131 errx(1, "add_volume: %s: Specified more than once",
136 TAILQ_INSERT_TAIL(&VolList, vol, entry);
140 __verify_volume(struct volume_info *vol)
142 hammer_volume_ondisk_t ondisk = vol->ondisk;
144 if (ondisk->vol_rootvol != HAMMER_ROOT_VOLNO) {
145 errx(1, "verify_volume: Invalid root volume# %d",
146 ondisk->vol_rootvol);
148 if (bcmp(&Hammer_FSType, &ondisk->vol_fstype, sizeof(Hammer_FSType))) {
149 errx(1, "verify_volume: %s: Header does not indicate "
150 "that this is a hammer volume", vol->name);
152 if (bcmp(&Hammer_FSId, &ondisk->vol_fsid, sizeof(Hammer_FSId))) {
153 errx(1, "verify_volume: %s: FSId does match other volumes!",
159 * Initialize a volume structure and ondisk vol_no field.
162 init_volume(int32_t vol_no, const char *filename, int oflags)
164 struct volume_info *vol;
166 vol = __alloc_volume(filename, oflags);
167 vol->vol_no = vol->ondisk->vol_no = vol_no;
175 * Initialize a volume structure and read ondisk volume header.
178 load_volume(const char *filename, int oflags, int verify)
180 struct volume_info *vol;
183 vol = __alloc_volume(filename, oflags);
185 n = readhammervol(vol);
187 err(1, "load_volume: %s: Read failed at offset 0", vol->name);
189 vol->vol_no = vol->ondisk->vol_no;
191 if (valid_hammer_volumes++ == 0)
192 Hammer_FSId = vol->ondisk->vol_fsid;
194 __verify_volume(vol);
202 * Check basic volume characteristics.
205 check_volume(struct volume_info *vol)
207 struct partinfo pinfo;
211 * Get basic information about the volume
213 if (ioctl(vol->fd, DIOCGPART, &pinfo) < 0) {
215 * Allow the formatting of regular files as HAMMER volumes
217 if (fstat(vol->fd, &st) < 0)
218 err(1, "Unable to stat %s", vol->name);
219 vol->size = st.st_size;
220 vol->type = "REGFILE";
223 * When formatting a block device as a HAMMER volume the
224 * sector size must be compatible. HAMMER uses 16384 byte
225 * filesystem buffers.
227 if (pinfo.reserved_blocks) {
228 errx(1, "HAMMER cannot be placed in a partition "
229 "which overlaps the disklabel or MBR");
231 if (pinfo.media_blksize > HAMMER_BUFSIZE ||
232 HAMMER_BUFSIZE % pinfo.media_blksize) {
233 errx(1, "A media sector size of %d is not supported",
234 pinfo.media_blksize);
237 vol->size = pinfo.media_size;
238 vol->device_offset = pinfo.media_offset;
239 vol->type = "DEVICE";
244 assert_volume_offset(struct volume_info *vol)
246 assert(hammer_is_zone_raw_buffer(vol->vol_free_off));
247 assert(hammer_is_zone_raw_buffer(vol->vol_free_end));
251 get_volume(int32_t vol_no)
253 struct volume_info *vol;
255 TAILQ_FOREACH(vol, &VolList, entry) {
256 if (vol->vol_no == vol_no)
264 get_root_volume(void)
266 return(get_volume(HAMMER_ROOT_VOLNO));
270 * Acquire the specified buffer. isnew is -1 only when called
271 * via get_buffer_readahead() to prevent another readahead.
273 static struct buffer_info *
274 get_buffer(hammer_off_t buf_offset, int isnew)
276 struct buffer_info *buf;
277 struct volume_info *volume;
284 zone = HAMMER_ZONE_DECODE(buf_offset);
285 if (zone > HAMMER_ZONE_RAW_BUFFER_INDEX)
286 buf_offset = blockmap_lookup(buf_offset, NULL, NULL, &error);
287 if (error || buf_offset == HAMMER_OFF_BAD)
289 assert(hammer_is_zone_raw_buffer(buf_offset));
291 vol_no = HAMMER_VOL_DECODE(buf_offset);
292 volume = get_volume(vol_no);
293 assert(volume != NULL);
295 buf_offset &= ~HAMMER_BUFMASK64;
296 buf = find_buffer(volume, buf_offset);
299 buf = malloc(sizeof(*buf));
300 bzero(buf, sizeof(*buf));
301 buf->buf_offset = buf_offset;
302 buf->raw_offset = hammer_xlate_to_phys(volume->ondisk,
304 buf->volume = volume;
305 buf->ondisk = malloc(HAMMER_BUFSIZE);
307 if (readhammerbuf(buf) == -1) {
308 err(1, "get_buffer: %s:%016jx "
309 "Read failed at offset %016jx",
311 (intmax_t)buf->buf_offset,
312 (intmax_t)buf->raw_offset);
316 hi = buffer_hash(buf_offset);
317 TAILQ_INSERT_TAIL(&volume->buffer_lists[hi], buf, entry);
318 hammer_cache_add(&buf->cache);
321 assert(buf->ondisk != NULL);
323 hammer_cache_used(&buf->cache);
327 hammer_cache_flush();
330 assert(buf->cache.modified == 0);
331 bzero(buf->ondisk, HAMMER_BUFSIZE);
332 buf->cache.modified = 1;
335 get_buffer_readahead(buf);
340 get_buffer_readahead(struct buffer_info *base)
342 struct buffer_info *buf;
343 struct volume_info *vol;
344 hammer_off_t buf_offset;
346 int ri = UseReadBehind;
347 int re = UseReadAhead;
349 raw_offset = base->raw_offset + ri * HAMMER_BUFSIZE;
353 if (raw_offset >= vol->ondisk->vol_buf_end)
355 if (raw_offset < vol->ondisk->vol_buf_beg || ri == 0) {
357 raw_offset += HAMMER_BUFSIZE;
360 buf_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no,
361 raw_offset - vol->ondisk->vol_buf_beg);
362 buf = find_buffer(vol, buf_offset);
364 buf = get_buffer(buf_offset, -1);
368 raw_offset += HAMMER_BUFSIZE;
373 rel_buffer(struct buffer_info *buffer)
375 struct volume_info *volume;
380 assert(buffer->cache.refs > 0);
381 if (--buffer->cache.refs == 0) {
382 if (buffer->cache.delete) {
383 hi = buffer_hash(buffer->buf_offset);
384 volume = buffer->volume;
385 if (buffer->cache.modified)
386 flush_buffer(buffer);
387 TAILQ_REMOVE(&volume->buffer_lists[hi], buffer, entry);
388 hammer_cache_del(&buffer->cache);
389 free(buffer->ondisk);
396 * Retrieve a pointer to a buffer data given a buffer offset. The underlying
397 * bufferp is freed if isnew or the offset is out of range of the cached data.
398 * If bufferp is freed a referenced buffer is loaded into it.
401 get_buffer_data(hammer_off_t buf_offset, struct buffer_info **bufferp,
404 if (*bufferp != NULL) {
406 (((*bufferp)->buf_offset ^ buf_offset) & ~HAMMER_BUFMASK64)) {
407 rel_buffer(*bufferp);
412 if (*bufferp == NULL) {
413 *bufferp = get_buffer(buf_offset, isnew);
414 if (*bufferp == NULL)
418 return(((char *)(*bufferp)->ondisk) +
419 ((int32_t)buf_offset & HAMMER_BUFMASK));
423 * Allocate HAMMER elements - B-Tree nodes
426 alloc_btree_node(hammer_off_t *offp, struct buffer_info **data_bufferp)
428 hammer_node_ondisk_t node;
430 node = alloc_blockmap(HAMMER_ZONE_BTREE_INDEX, sizeof(*node),
432 bzero(node, sizeof(*node));
437 * Allocate HAMMER elements - meta data (inode, direntry, PFS, etc)
440 alloc_meta_element(hammer_off_t *offp, int32_t data_len,
441 struct buffer_info **data_bufferp)
445 data = alloc_blockmap(HAMMER_ZONE_META_INDEX, data_len,
447 bzero(data, data_len);
452 * Format a new blockmap. This is mostly a degenerate case because
453 * all allocations are now actually done from the freemap.
456 format_blockmap(struct volume_info *root_vol, int zone, hammer_off_t offset)
458 hammer_blockmap_t blockmap;
459 hammer_off_t zone_base;
461 /* Only root volume needs formatting */
462 assert(root_vol->vol_no == HAMMER_ROOT_VOLNO);
464 assert(hammer_is_zone2_mapped_index(zone));
466 blockmap = &root_vol->ondisk->vol0_blockmap[zone];
467 zone_base = HAMMER_ZONE_ENCODE(zone, offset);
469 bzero(blockmap, sizeof(*blockmap));
470 blockmap->phys_offset = 0;
471 blockmap->first_offset = zone_base;
472 blockmap->next_offset = zone_base;
473 blockmap->alloc_offset = HAMMER_ENCODE(zone, 255, -1);
474 hammer_crc_set_blockmap(blockmap);
478 * Format a new freemap. Set all layer1 entries to UNAVAIL. The initialize
479 * code will load each volume's freemap.
482 format_freemap(struct volume_info *root_vol)
484 struct buffer_info *buffer = NULL;
485 hammer_off_t layer1_offset;
486 hammer_blockmap_t blockmap;
487 hammer_blockmap_layer1_t layer1;
490 /* Only root volume needs formatting */
491 assert(root_vol->vol_no == HAMMER_ROOT_VOLNO);
493 layer1_offset = alloc_bigblock(root_vol, HAMMER_ZONE_FREEMAP_INDEX);
494 for (i = 0; i < HAMMER_BIGBLOCK_SIZE; i += sizeof(*layer1)) {
495 isnew = ((i % HAMMER_BUFSIZE) == 0);
496 layer1 = get_buffer_data(layer1_offset + i, &buffer, isnew);
497 bzero(layer1, sizeof(*layer1));
498 layer1->phys_offset = HAMMER_BLOCKMAP_UNAVAIL;
499 layer1->blocks_free = 0;
500 hammer_crc_set_layer1(layer1);
502 assert(i == HAMMER_BIGBLOCK_SIZE);
505 blockmap = &root_vol->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
506 bzero(blockmap, sizeof(*blockmap));
507 blockmap->phys_offset = layer1_offset;
508 blockmap->first_offset = 0;
509 blockmap->next_offset = HAMMER_ENCODE_RAW_BUFFER(0, 0);
510 blockmap->alloc_offset = HAMMER_ENCODE_RAW_BUFFER(255, -1);
511 hammer_crc_set_blockmap(blockmap);
515 * Load the volume's remaining free space into the freemap.
517 * Returns the number of big-blocks available.
520 initialize_freemap(struct volume_info *vol)
522 struct volume_info *root_vol;
523 struct buffer_info *buffer1 = NULL;
524 struct buffer_info *buffer2 = NULL;
525 hammer_blockmap_layer1_t layer1;
526 hammer_blockmap_layer2_t layer2;
527 hammer_off_t layer1_offset;
528 hammer_off_t layer2_offset;
529 hammer_off_t phys_offset;
530 hammer_off_t block_offset;
531 hammer_off_t aligned_vol_free_end;
532 hammer_blockmap_t freemap;
534 int64_t layer1_count = 0;
536 root_vol = get_root_volume();
538 assert_volume_offset(vol);
539 aligned_vol_free_end = (vol->vol_free_end + HAMMER_BLOCKMAP_LAYER2_MASK)
540 & ~HAMMER_BLOCKMAP_LAYER2_MASK;
542 printf("initialize freemap volume %d\n", vol->vol_no);
545 * Initialize the freemap. First preallocate the big-blocks required
546 * to implement layer2. This preallocation is a bootstrap allocation
547 * using blocks from the target volume.
549 freemap = &root_vol->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
551 for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
552 phys_offset < aligned_vol_free_end;
553 phys_offset += HAMMER_BLOCKMAP_LAYER2) {
554 layer1_offset = freemap->phys_offset +
555 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
556 layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
557 if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
558 layer1->phys_offset = alloc_bigblock(vol,
559 HAMMER_ZONE_FREEMAP_INDEX);
560 layer1->blocks_free = 0;
561 buffer1->cache.modified = 1;
562 hammer_crc_set_layer1(layer1);
567 * Now fill everything in.
569 for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
570 phys_offset < aligned_vol_free_end;
571 phys_offset += HAMMER_BLOCKMAP_LAYER2) {
573 layer1_offset = freemap->phys_offset +
574 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
575 layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
576 assert(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
578 for (block_offset = 0;
579 block_offset < HAMMER_BLOCKMAP_LAYER2;
580 block_offset += HAMMER_BIGBLOCK_SIZE) {
581 layer2_offset = layer1->phys_offset +
582 HAMMER_BLOCKMAP_LAYER2_OFFSET(block_offset);
583 layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
584 bzero(layer2, sizeof(*layer2));
586 if (phys_offset + block_offset < vol->vol_free_off) {
588 * Big-blocks already allocated as part
589 * of the freemap bootstrap.
591 layer2->zone = HAMMER_ZONE_FREEMAP_INDEX;
592 layer2->append_off = HAMMER_BIGBLOCK_SIZE;
593 layer2->bytes_free = 0;
594 } else if (phys_offset + block_offset < vol->vol_free_end) {
596 layer2->append_off = 0;
597 layer2->bytes_free = HAMMER_BIGBLOCK_SIZE;
601 layer2->zone = HAMMER_ZONE_UNAVAIL_INDEX;
602 layer2->append_off = HAMMER_BIGBLOCK_SIZE;
603 layer2->bytes_free = 0;
605 hammer_crc_set_layer2(layer2);
606 buffer2->cache.modified = 1;
609 layer1->blocks_free += layer1_count;
610 hammer_crc_set_layer1(layer1);
611 buffer1->cache.modified = 1;
620 * Returns the number of big-blocks available for filesystem data and undos
621 * without formatting.
624 count_freemap(struct volume_info *vol)
626 hammer_off_t phys_offset;
627 hammer_off_t vol_free_off;
628 hammer_off_t aligned_vol_free_end;
631 vol_free_off = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
633 assert_volume_offset(vol);
634 aligned_vol_free_end = (vol->vol_free_end + HAMMER_BLOCKMAP_LAYER2_MASK)
635 & ~HAMMER_BLOCKMAP_LAYER2_MASK;
637 if (vol->vol_no == HAMMER_ROOT_VOLNO)
638 vol_free_off += HAMMER_BIGBLOCK_SIZE;
640 for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
641 phys_offset < aligned_vol_free_end;
642 phys_offset += HAMMER_BLOCKMAP_LAYER2) {
643 vol_free_off += HAMMER_BIGBLOCK_SIZE;
646 for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
647 phys_offset < aligned_vol_free_end;
648 phys_offset += HAMMER_BIGBLOCK_SIZE) {
649 if (phys_offset < vol_free_off) {
651 } else if (phys_offset < vol->vol_free_end) {
660 * Format the undomap for the root volume.
663 format_undomap(struct volume_info *root_vol, int64_t *undo_buffer_size)
665 const int undo_zone = HAMMER_ZONE_UNDO_INDEX;
666 hammer_off_t undo_limit;
667 hammer_blockmap_t blockmap;
668 hammer_volume_ondisk_t ondisk;
669 struct buffer_info *buffer = NULL;
675 /* Only root volume needs formatting */
676 assert(root_vol->vol_no == HAMMER_ROOT_VOLNO);
677 ondisk = root_vol->ondisk;
680 * Size the undo buffer in multiples of HAMMER_BIGBLOCK_SIZE,
681 * up to HAMMER_UNDO_LAYER2 big-blocks. Size to approximately
684 * The minimum UNDO fifo size is 500MB, or approximately 1% of
685 * the recommended 50G disk.
687 * Changing this minimum is rather dangerous as complex filesystem
688 * operations can cause the UNDO FIFO to fill up otherwise.
690 undo_limit = *undo_buffer_size;
691 if (undo_limit == 0) {
692 undo_limit = HAMMER_VOL_BUF_SIZE(ondisk) / 1000;
693 if (undo_limit < 500*1024*1024)
694 undo_limit = 500*1024*1024;
696 undo_limit = (undo_limit + HAMMER_BIGBLOCK_MASK64) &
697 ~HAMMER_BIGBLOCK_MASK64;
698 if (undo_limit < HAMMER_BIGBLOCK_SIZE)
699 undo_limit = HAMMER_BIGBLOCK_SIZE;
700 if (undo_limit > HAMMER_BIGBLOCK_SIZE * HAMMER_UNDO_LAYER2)
701 undo_limit = HAMMER_BIGBLOCK_SIZE * HAMMER_UNDO_LAYER2;
702 *undo_buffer_size = undo_limit;
704 blockmap = &ondisk->vol0_blockmap[undo_zone];
705 bzero(blockmap, sizeof(*blockmap));
706 blockmap->phys_offset = HAMMER_BLOCKMAP_UNAVAIL;
707 blockmap->first_offset = HAMMER_ZONE_ENCODE(undo_zone, 0);
708 blockmap->next_offset = blockmap->first_offset;
709 blockmap->alloc_offset = HAMMER_ZONE_ENCODE(undo_zone, undo_limit);
710 hammer_crc_set_blockmap(blockmap);
712 limit_index = undo_limit / HAMMER_BIGBLOCK_SIZE;
713 assert(limit_index <= HAMMER_UNDO_LAYER2);
715 for (n = 0; n < limit_index; ++n) {
716 ondisk->vol0_undo_array[n] = alloc_bigblock(root_vol,
717 HAMMER_ZONE_UNDO_INDEX);
719 while (n < HAMMER_UNDO_LAYER2) {
720 ondisk->vol0_undo_array[n++] = HAMMER_BLOCKMAP_UNAVAIL;
724 * Pre-initialize the UNDO blocks (HAMMER version 4+)
726 printf("initializing the undo map (%jd MB)\n",
727 (intmax_t)(blockmap->alloc_offset & HAMMER_OFF_LONG_MASK) /
730 scan = blockmap->first_offset;
733 while (scan < blockmap->alloc_offset) {
734 hammer_fifo_head_t head;
735 hammer_fifo_tail_t tail;
737 int bytes = HAMMER_UNDO_ALIGN;
739 isnew = ((scan & HAMMER_BUFMASK64) == 0);
740 head = get_buffer_data(scan, &buffer, isnew);
741 buffer->cache.modified = 1;
742 tail = (void *)((char *)head + bytes - sizeof(*tail));
745 head->hdr_signature = HAMMER_HEAD_SIGNATURE;
746 head->hdr_type = HAMMER_HEAD_TYPE_DUMMY;
747 head->hdr_size = bytes;
748 head->hdr_seq = seqno++;
750 tail->tail_signature = HAMMER_TAIL_SIGNATURE;
751 tail->tail_type = HAMMER_HEAD_TYPE_DUMMY;
752 tail->tail_size = bytes;
754 hammer_crc_set_fifo_head(head, bytes);
761 const char *zone_labels[] = {
763 "raw_volume", /* 1 */
764 "raw_buffer", /* 2 */
772 "large_data", /* 10 */
773 "small_data", /* 11 */
781 print_blockmap(const struct volume_info *root_vol)
783 hammer_blockmap_t blockmap;
784 hammer_volume_ondisk_t ondisk;
789 ondisk = root_vol->ondisk;
790 printf(INDENT"vol_label\t%s\n", ondisk->vol_label);
791 printf(INDENT"vol_count\t%d\n", ondisk->vol_count);
792 printf(INDENT"vol_bot_beg\t%s\n", sizetostr(ondisk->vol_bot_beg));
793 printf(INDENT"vol_mem_beg\t%s\n", sizetostr(ondisk->vol_mem_beg));
794 printf(INDENT"vol_buf_beg\t%s\n", sizetostr(ondisk->vol_buf_beg));
795 printf(INDENT"vol_buf_end\t%s\n", sizetostr(ondisk->vol_buf_end));
796 printf(INDENT"vol0_next_tid\t%016jx\n",
797 (uintmax_t)ondisk->vol0_next_tid);
799 blockmap = &ondisk->vol0_blockmap[HAMMER_ZONE_UNDO_INDEX];
800 size = blockmap->alloc_offset & HAMMER_OFF_LONG_MASK;
801 if (blockmap->first_offset <= blockmap->next_offset)
802 used = blockmap->next_offset - blockmap->first_offset;
804 used = blockmap->alloc_offset - blockmap->first_offset +
805 (blockmap->next_offset & HAMMER_OFF_LONG_MASK);
806 printf(INDENT"undo_size\t%s\n", sizetostr(size));
807 printf(INDENT"undo_used\t%s\n", sizetostr(used));
809 printf(INDENT"zone # "
810 "phys first next alloc\n");
811 for (i = 0; i < HAMMER_MAX_ZONES; i++) {
812 blockmap = &ondisk->vol0_blockmap[i];
813 printf(INDENT"zone %-2d %-10s %016jx %016jx %016jx %016jx\n",
815 (uintmax_t)blockmap->phys_offset,
816 (uintmax_t)blockmap->first_offset,
817 (uintmax_t)blockmap->next_offset,
818 (uintmax_t)blockmap->alloc_offset);
823 * Flush various tracking structures to disk
826 flush_all_volumes(void)
828 struct volume_info *vol;
830 TAILQ_FOREACH(vol, &VolList, entry)
835 flush_volume(struct volume_info *volume)
837 struct buffer_info *buffer;
840 for (i = 0; i < HAMMER_BUFLISTS; ++i) {
841 TAILQ_FOREACH(buffer, &volume->buffer_lists[i], entry)
842 flush_buffer(buffer);
844 if (writehammervol(volume) == -1)
845 err(1, "Write volume %d (%s)", volume->vol_no, volume->name);
849 flush_buffer(struct buffer_info *buffer)
851 struct volume_info *vol;
853 vol = buffer->volume;
854 if (writehammerbuf(buffer) == -1)
855 err(1, "Write volume %d (%s)", vol->vol_no, vol->name);
856 buffer->cache.modified = 0;
860 * Core I/O operations
863 __read(struct volume_info *vol, void *data, int64_t offset, int size)
867 n = pread(vol->fd, data, size, offset);
874 readhammervol(struct volume_info *vol)
876 return(__read(vol, vol->ondisk, 0, HAMMER_BUFSIZE));
880 readhammerbuf(struct buffer_info *buf)
882 return(__read(buf->volume, buf->ondisk, buf->raw_offset, HAMMER_BUFSIZE));
886 __write(struct volume_info *vol, const void *data, int64_t offset, int size)
893 n = pwrite(vol->fd, data, size, offset);
900 writehammervol(struct volume_info *vol)
902 return(__write(vol, vol->ondisk, 0, HAMMER_BUFSIZE));
906 writehammerbuf(struct buffer_info *buf)
908 return(__write(buf->volume, buf->ondisk, buf->raw_offset, HAMMER_BUFSIZE));
911 int64_t init_boot_area_size(int64_t value, off_t avg_vol_size)
914 value = HAMMER_BOOT_NOMBYTES;
915 while (value > avg_vol_size / HAMMER_MAX_VOLUMES)
917 if (value < HAMMER_BOOT_MINBYTES)
919 } else if (value < HAMMER_BOOT_MINBYTES) {
920 value = HAMMER_BOOT_MINBYTES;
926 int64_t init_mem_area_size(int64_t value, off_t avg_vol_size)
929 value = HAMMER_MEM_NOMBYTES;
930 while (value > avg_vol_size / HAMMER_MAX_VOLUMES)
932 if (value < HAMMER_MEM_MINBYTES)
934 } else if (value < HAMMER_MEM_MINBYTES) {
935 value = HAMMER_MEM_MINBYTES;