2 * Copyright (c) 2007 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include <sys/diskslice.h>
36 #include <sys/diskmbr.h>
38 #include "hammer_util.h"
40 static void check_volume(struct volume_info *vol);
41 static void get_buffer_readahead(struct buffer_info *base);
42 static void *get_ondisk(hammer_off_t buf_offset, struct buffer_info **bufferp,
44 static __inline int readhammervol(struct volume_info *vol);
45 static __inline int readhammerbuf(struct buffer_info *buf);
46 static __inline int writehammervol(struct volume_info *vol);
47 static __inline int writehammerbuf(struct buffer_info *buf);
51 int UseReadBehind = -4;
55 TAILQ_HEAD(volume_list, volume_info);
56 static struct volume_list VolList = TAILQ_HEAD_INITIALIZER(VolList);
57 static int valid_hammer_volumes;
61 buffer_hash(hammer_off_t buf_offset)
65 hi = (int)(buf_offset / HAMMER_BUFSIZE) & HAMMER_BUFLISTMASK;
69 static struct buffer_info*
70 find_buffer(struct volume_info *volume, hammer_off_t buf_offset)
73 struct buffer_info *buf;
75 hi = buffer_hash(buf_offset);
76 TAILQ_FOREACH(buf, &volume->buffer_lists[hi], entry)
77 if (buf->buf_offset == buf_offset)
84 __alloc_volume(const char *volname, int oflags)
86 struct volume_info *vol;
89 vol = malloc(sizeof(*vol));
91 err(1, "alloc_volume");
92 bzero(vol, sizeof(*vol));
95 vol->rdonly = (oflags == O_RDONLY);
96 vol->name = strdup(volname);
97 vol->fd = open(vol->name, oflags);
99 err(1, "alloc_volume: Failed to open %s", vol->name);
102 vol->ondisk = malloc(HAMMER_BUFSIZE);
103 if (vol->ondisk == NULL)
104 err(1, "alloc_volume");
105 bzero(vol->ondisk, HAMMER_BUFSIZE);
107 for (i = 0; i < HAMMER_BUFLISTS; ++i)
108 TAILQ_INIT(&vol->buffer_lists[i]);
114 __add_volume(struct volume_info *vol)
116 struct volume_info *scan;
117 struct stat st1, st2;
119 if (fstat(vol->fd, &st1) != 0)
120 errx(1, "add_volume: %s: Failed to stat", vol->name);
122 TAILQ_FOREACH(scan, &VolList, entry) {
123 if (scan->vol_no == vol->vol_no) {
124 errx(1, "add_volume: %s: Duplicate volume number %d "
126 vol->name, vol->vol_no, scan->name);
128 if (fstat(scan->fd, &st2) != 0) {
129 errx(1, "add_volume: %s: Failed to stat %s",
130 vol->name, scan->name);
132 if ((st1.st_ino == st2.st_ino) && (st1.st_dev == st2.st_dev)) {
133 errx(1, "add_volume: %s: Specified more than once",
138 TAILQ_INSERT_TAIL(&VolList, vol, entry);
142 * Initialize a volume structure and ondisk vol_no field.
145 init_volume(int32_t vol_no, const char *filename, int oflags)
147 struct volume_info *vol;
149 vol = __alloc_volume(filename, oflags);
150 vol->vol_no = vol->ondisk->vol_no = vol_no;
158 * Initialize a volume structure and read ondisk volume header.
161 load_volume(const char *filename, int oflags)
163 struct volume_info *vol;
164 hammer_volume_ondisk_t ondisk;
167 vol = __alloc_volume(filename, oflags);
169 n = readhammervol(vol);
171 err(1, "load_volume: %s: Read failed at offset 0", vol->name);
173 ondisk = vol->ondisk;
174 vol->vol_no = ondisk->vol_no;
176 if (ondisk->vol_rootvol != HAMMER_ROOT_VOLNO) {
177 errx(1, "load_volume: Invalid root volume# %d",
178 ondisk->vol_rootvol);
181 if (bcmp(&Hammer_FSType, &ondisk->vol_fstype, sizeof(Hammer_FSType))) {
182 errx(1, "load_volume: %s: Header does not indicate "
183 "that this is a hammer volume", vol->name);
186 if (valid_hammer_volumes++ == 0) {
187 Hammer_FSId = ondisk->vol_fsid;
188 } else if (bcmp(&Hammer_FSId, &ondisk->vol_fsid, sizeof(Hammer_FSId))) {
189 errx(1, "load_volume: %s: FSId does match other volumes!",
199 * Check basic volume characteristics.
202 check_volume(struct volume_info *vol)
204 struct partinfo pinfo;
208 * Get basic information about the volume
210 if (ioctl(vol->fd, DIOCGPART, &pinfo) < 0) {
212 * Allow the formatting of regular files as HAMMER volumes
214 if (fstat(vol->fd, &st) < 0)
215 err(1, "Unable to stat %s", vol->name);
216 vol->size = st.st_size;
217 vol->type = "REGFILE";
220 * When formatting a block device as a HAMMER volume the
221 * sector size must be compatible. HAMMER uses 16384 byte
222 * filesystem buffers.
224 if (pinfo.reserved_blocks) {
225 errx(1, "HAMMER cannot be placed in a partition "
226 "which overlaps the disklabel or MBR");
228 if (pinfo.media_blksize > HAMMER_BUFSIZE ||
229 HAMMER_BUFSIZE % pinfo.media_blksize) {
230 errx(1, "A media sector size of %d is not supported",
231 pinfo.media_blksize);
234 vol->size = pinfo.media_size;
235 vol->device_offset = pinfo.media_offset;
236 vol->type = "DEVICE";
241 get_volume(int32_t vol_no)
243 struct volume_info *vol;
245 TAILQ_FOREACH(vol, &VolList, entry) {
246 if (vol->vol_no == vol_no)
254 get_root_volume(void)
256 struct volume_info *root_vol;
258 root_vol = get_volume(HAMMER_ROOT_VOLNO);
259 assert(root_vol != NULL);
265 * Acquire the specified buffer. isnew is -1 only when called
266 * via get_buffer_readahead() to prevent another readahead.
268 static struct buffer_info *
269 get_buffer(hammer_off_t buf_offset, int isnew)
271 struct buffer_info *buf;
272 struct volume_info *volume;
279 zone = HAMMER_ZONE_DECODE(buf_offset);
280 if (zone > HAMMER_ZONE_RAW_BUFFER_INDEX)
281 buf_offset = blockmap_lookup(buf_offset, NULL, NULL, &error);
282 if (error || buf_offset == HAMMER_OFF_BAD)
284 assert(hammer_is_zone_raw_buffer(buf_offset));
286 vol_no = HAMMER_VOL_DECODE(buf_offset);
287 volume = get_volume(vol_no);
288 assert(volume != NULL);
290 buf_offset &= ~HAMMER_BUFMASK64;
291 buf = find_buffer(volume, buf_offset);
294 buf = malloc(sizeof(*buf));
295 bzero(buf, sizeof(*buf));
296 buf->buf_offset = buf_offset;
297 buf->raw_offset = hammer_xlate_to_phys(volume->ondisk,
299 buf->volume = volume;
300 buf->ondisk = malloc(HAMMER_BUFSIZE);
302 if (readhammerbuf(buf) == -1) {
303 err(1, "get_buffer: %s:%016jx "
304 "Read failed at offset %016jx",
306 (intmax_t)buf->buf_offset,
307 (intmax_t)buf->raw_offset);
311 hi = buffer_hash(buf_offset);
312 TAILQ_INSERT_TAIL(&volume->buffer_lists[hi], buf, entry);
313 hammer_cache_add(&buf->cache);
316 assert(buf->ondisk != NULL);
318 hammer_cache_used(&buf->cache);
322 hammer_cache_flush();
325 assert(buf->cache.modified == 0);
326 bzero(buf->ondisk, HAMMER_BUFSIZE);
327 buf->cache.modified = 1;
330 get_buffer_readahead(buf);
335 get_buffer_readahead(struct buffer_info *base)
337 struct buffer_info *buf;
338 struct volume_info *vol;
339 hammer_off_t buf_offset;
341 int ri = UseReadBehind;
342 int re = UseReadAhead;
344 raw_offset = base->raw_offset + ri * HAMMER_BUFSIZE;
348 if (raw_offset >= vol->ondisk->vol_buf_end)
350 if (raw_offset < vol->ondisk->vol_buf_beg || ri == 0) {
352 raw_offset += HAMMER_BUFSIZE;
355 buf_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no,
356 raw_offset - vol->ondisk->vol_buf_beg);
357 buf = find_buffer(vol, buf_offset);
359 buf = get_buffer(buf_offset, -1);
363 raw_offset += HAMMER_BUFSIZE;
368 rel_buffer(struct buffer_info *buffer)
370 struct volume_info *volume;
375 assert(buffer->cache.refs > 0);
376 if (--buffer->cache.refs == 0) {
377 if (buffer->cache.delete) {
378 hi = buffer_hash(buffer->buf_offset);
379 volume = buffer->volume;
380 if (buffer->cache.modified)
381 flush_buffer(buffer);
382 TAILQ_REMOVE(&volume->buffer_lists[hi], buffer, entry);
383 hammer_cache_del(&buffer->cache);
384 free(buffer->ondisk);
391 * Retrieve a pointer to a buffer data given a buffer offset. The underlying
392 * bufferp is freed if isnew or the offset is out of range of the cached data.
393 * If bufferp is freed a referenced buffer is loaded into it.
396 get_buffer_data(hammer_off_t buf_offset, struct buffer_info **bufferp,
399 if (*bufferp != NULL) {
401 (((*bufferp)->buf_offset ^ buf_offset) & ~HAMMER_BUFMASK64)) {
402 rel_buffer(*bufferp);
406 return(get_ondisk(buf_offset, bufferp, isnew));
410 * Retrieve a pointer to a B-Tree node given a zone offset. The underlying
411 * bufferp is freed if non-NULL and a referenced buffer is loaded into it.
414 get_node(hammer_off_t node_offset, struct buffer_info **bufferp)
416 if (*bufferp != NULL) {
417 rel_buffer(*bufferp);
420 return(get_ondisk(node_offset, bufferp, 0));
424 * Return a pointer to a buffer data given a buffer offset.
425 * If *bufferp is NULL acquire the buffer otherwise use that buffer.
428 get_ondisk(hammer_off_t buf_offset, struct buffer_info **bufferp, int isnew)
430 if (*bufferp == NULL) {
431 *bufferp = get_buffer(buf_offset, isnew);
432 if (*bufferp == NULL)
436 return(((char *)(*bufferp)->ondisk) +
437 ((int32_t)buf_offset & HAMMER_BUFMASK));
441 * Allocate HAMMER elements - B-Tree nodes
444 alloc_btree_element(hammer_off_t *offp, struct buffer_info **data_bufferp)
446 hammer_node_ondisk_t node;
448 node = alloc_blockmap(HAMMER_ZONE_BTREE_INDEX, sizeof(*node),
450 bzero(node, sizeof(*node));
455 * Allocate HAMMER elements - meta data (inode, direntry, PFS, etc)
458 alloc_meta_element(hammer_off_t *offp, int32_t data_len,
459 struct buffer_info **data_bufferp)
463 data = alloc_blockmap(HAMMER_ZONE_META_INDEX, data_len,
465 bzero(data, data_len);
470 * Allocate HAMMER elements - data storage
472 * The only data_len supported by HAMMER userspace for large data zone
473 * (zone 10) is HAMMER_BUFSIZE which is 16KB. >16KB data does not fit
474 * in a buffer allocated by get_buffer(). Also alloc_blockmap() does
475 * not consider >16KB buffer size.
478 alloc_data_element(hammer_off_t *offp, int32_t data_len,
479 struct buffer_info **data_bufferp)
487 zone = hammer_data_zone_index(data_len);
488 assert(data_len <= HAMMER_BUFSIZE); /* just one buffer */
489 assert(zone == HAMMER_ZONE_LARGE_DATA_INDEX ||
490 zone == HAMMER_ZONE_SMALL_DATA_INDEX);
492 data = alloc_blockmap(zone, data_len, offp, data_bufferp);
493 bzero(data, data_len);
498 * Format a new blockmap. This is mostly a degenerate case because
499 * all allocations are now actually done from the freemap.
502 format_blockmap(struct volume_info *root_vol, int zone, hammer_off_t offset)
504 hammer_blockmap_t blockmap;
505 hammer_off_t zone_base;
507 /* Only root volume needs formatting */
508 assert(root_vol->vol_no == HAMMER_ROOT_VOLNO);
510 assert(hammer_is_zone2_mapped_index(zone));
512 blockmap = &root_vol->ondisk->vol0_blockmap[zone];
513 zone_base = HAMMER_ZONE_ENCODE(zone, offset);
515 bzero(blockmap, sizeof(*blockmap));
516 blockmap->phys_offset = 0;
517 blockmap->first_offset = zone_base;
518 blockmap->next_offset = zone_base;
519 blockmap->alloc_offset = HAMMER_ENCODE(zone, 255, -1);
520 blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
524 * Format a new freemap. Set all layer1 entries to UNAVAIL. The initialize
525 * code will load each volume's freemap.
528 format_freemap(struct volume_info *root_vol)
530 struct buffer_info *buffer = NULL;
531 hammer_off_t layer1_offset;
532 hammer_blockmap_t blockmap;
533 hammer_blockmap_layer1_t layer1;
536 /* Only root volume needs formatting */
537 assert(root_vol->vol_no == HAMMER_ROOT_VOLNO);
539 layer1_offset = alloc_bigblock(root_vol, HAMMER_ZONE_FREEMAP_INDEX);
540 for (i = 0; i < HAMMER_BIGBLOCK_SIZE; i += sizeof(*layer1)) {
541 isnew = ((i % HAMMER_BUFSIZE) == 0);
542 layer1 = get_buffer_data(layer1_offset + i, &buffer, isnew);
543 bzero(layer1, sizeof(*layer1));
544 layer1->phys_offset = HAMMER_BLOCKMAP_UNAVAIL;
545 layer1->blocks_free = 0;
546 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
548 assert(i == HAMMER_BIGBLOCK_SIZE);
551 blockmap = &root_vol->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
552 bzero(blockmap, sizeof(*blockmap));
553 blockmap->phys_offset = layer1_offset;
554 blockmap->first_offset = 0;
555 blockmap->next_offset = HAMMER_ENCODE_RAW_BUFFER(0, 0);
556 blockmap->alloc_offset = HAMMER_ENCODE_RAW_BUFFER(255, -1);
557 blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
561 * Load the volume's remaining free space into the freemap.
563 * Returns the number of big-blocks available.
566 initialize_freemap(struct volume_info *vol)
568 struct volume_info *root_vol;
569 struct buffer_info *buffer1 = NULL;
570 struct buffer_info *buffer2 = NULL;
571 hammer_blockmap_layer1_t layer1;
572 hammer_blockmap_layer2_t layer2;
573 hammer_off_t layer1_offset;
574 hammer_off_t layer2_offset;
575 hammer_off_t phys_offset;
576 hammer_off_t block_offset;
577 hammer_off_t aligned_vol_free_end;
578 hammer_blockmap_t freemap;
580 int64_t layer1_count = 0;
582 root_vol = get_root_volume();
583 aligned_vol_free_end = (vol->vol_free_end + HAMMER_BLOCKMAP_LAYER2_MASK)
584 & ~HAMMER_BLOCKMAP_LAYER2_MASK;
586 printf("initialize freemap volume %d\n", vol->vol_no);
589 * Initialize the freemap. First preallocate the big-blocks required
590 * to implement layer2. This preallocation is a bootstrap allocation
591 * using blocks from the target volume.
593 freemap = &root_vol->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
595 for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
596 phys_offset < aligned_vol_free_end;
597 phys_offset += HAMMER_BLOCKMAP_LAYER2) {
598 layer1_offset = freemap->phys_offset +
599 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
600 layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
601 if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
602 layer1->phys_offset = alloc_bigblock(vol,
603 HAMMER_ZONE_FREEMAP_INDEX);
604 layer1->blocks_free = 0;
605 buffer1->cache.modified = 1;
606 layer1->layer1_crc = crc32(layer1,
607 HAMMER_LAYER1_CRCSIZE);
612 * Now fill everything in.
614 for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
615 phys_offset < aligned_vol_free_end;
616 phys_offset += HAMMER_BLOCKMAP_LAYER2) {
618 layer1_offset = freemap->phys_offset +
619 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
620 layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
621 assert(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
623 for (block_offset = 0;
624 block_offset < HAMMER_BLOCKMAP_LAYER2;
625 block_offset += HAMMER_BIGBLOCK_SIZE) {
626 layer2_offset = layer1->phys_offset +
627 HAMMER_BLOCKMAP_LAYER2_OFFSET(block_offset);
628 layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
629 bzero(layer2, sizeof(*layer2));
631 if (phys_offset + block_offset < vol->vol_free_off) {
633 * Fixups XXX - big-blocks already allocated as part
634 * of the freemap bootstrap.
636 layer2->zone = HAMMER_ZONE_FREEMAP_INDEX;
637 layer2->append_off = HAMMER_BIGBLOCK_SIZE;
638 layer2->bytes_free = 0;
639 } else if (phys_offset + block_offset < vol->vol_free_end) {
641 layer2->append_off = 0;
642 layer2->bytes_free = HAMMER_BIGBLOCK_SIZE;
646 layer2->zone = HAMMER_ZONE_UNAVAIL_INDEX;
647 layer2->append_off = HAMMER_BIGBLOCK_SIZE;
648 layer2->bytes_free = 0;
650 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
651 buffer2->cache.modified = 1;
654 layer1->blocks_free += layer1_count;
655 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
656 buffer1->cache.modified = 1;
665 * Returns the number of big-blocks available for filesystem data and undos
666 * without formatting.
669 count_freemap(struct volume_info *vol)
671 hammer_off_t phys_offset;
672 hammer_off_t vol_free_off;
673 hammer_off_t aligned_vol_free_end;
676 vol_free_off = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
677 aligned_vol_free_end = (vol->vol_free_end + HAMMER_BLOCKMAP_LAYER2_MASK)
678 & ~HAMMER_BLOCKMAP_LAYER2_MASK;
680 if (vol->vol_no == HAMMER_ROOT_VOLNO)
681 vol_free_off += HAMMER_BIGBLOCK_SIZE;
683 for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
684 phys_offset < aligned_vol_free_end;
685 phys_offset += HAMMER_BLOCKMAP_LAYER2) {
686 vol_free_off += HAMMER_BIGBLOCK_SIZE;
689 for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
690 phys_offset < aligned_vol_free_end;
691 phys_offset += HAMMER_BIGBLOCK_SIZE) {
692 if (phys_offset < vol_free_off) {
694 } else if (phys_offset < vol->vol_free_end) {
703 * Format the undomap for the root volume.
706 format_undomap(struct volume_info *root_vol, int64_t *undo_buffer_size)
708 const int undo_zone = HAMMER_ZONE_UNDO_INDEX;
709 hammer_off_t undo_limit;
710 hammer_blockmap_t blockmap;
711 hammer_volume_ondisk_t ondisk;
712 struct buffer_info *buffer = NULL;
718 /* Only root volume needs formatting */
719 assert(root_vol->vol_no == HAMMER_ROOT_VOLNO);
720 ondisk = root_vol->ondisk;
723 * Size the undo buffer in multiples of HAMMER_BIGBLOCK_SIZE,
724 * up to HAMMER_UNDO_LAYER2 big-blocks. Size to approximately
727 * The minimum UNDO fifo size is 500MB, or approximately 1% of
728 * the recommended 50G disk.
730 * Changing this minimum is rather dangerous as complex filesystem
731 * operations can cause the UNDO FIFO to fill up otherwise.
733 undo_limit = *undo_buffer_size;
734 if (undo_limit == 0) {
735 undo_limit = HAMMER_VOL_BUF_SIZE(ondisk) / 1000;
736 if (undo_limit < 500*1024*1024)
737 undo_limit = 500*1024*1024;
739 undo_limit = (undo_limit + HAMMER_BIGBLOCK_MASK64) &
740 ~HAMMER_BIGBLOCK_MASK64;
741 if (undo_limit < HAMMER_BIGBLOCK_SIZE)
742 undo_limit = HAMMER_BIGBLOCK_SIZE;
743 if (undo_limit > HAMMER_BIGBLOCK_SIZE * HAMMER_UNDO_LAYER2)
744 undo_limit = HAMMER_BIGBLOCK_SIZE * HAMMER_UNDO_LAYER2;
745 *undo_buffer_size = undo_limit;
747 blockmap = &ondisk->vol0_blockmap[undo_zone];
748 bzero(blockmap, sizeof(*blockmap));
749 blockmap->phys_offset = HAMMER_BLOCKMAP_UNAVAIL;
750 blockmap->first_offset = HAMMER_ZONE_ENCODE(undo_zone, 0);
751 blockmap->next_offset = blockmap->first_offset;
752 blockmap->alloc_offset = HAMMER_ZONE_ENCODE(undo_zone, undo_limit);
753 blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
755 limit_index = undo_limit / HAMMER_BIGBLOCK_SIZE;
756 assert(limit_index <= HAMMER_UNDO_LAYER2);
758 for (n = 0; n < limit_index; ++n) {
759 ondisk->vol0_undo_array[n] = alloc_bigblock(root_vol,
760 HAMMER_ZONE_UNDO_INDEX);
762 while (n < HAMMER_UNDO_LAYER2) {
763 ondisk->vol0_undo_array[n++] = HAMMER_BLOCKMAP_UNAVAIL;
767 * Pre-initialize the UNDO blocks (HAMMER version 4+)
769 printf("initializing the undo map (%jd MB)\n",
770 (intmax_t)(blockmap->alloc_offset & HAMMER_OFF_LONG_MASK) /
773 scan = blockmap->first_offset;
776 while (scan < blockmap->alloc_offset) {
777 hammer_fifo_head_t head;
778 hammer_fifo_tail_t tail;
780 int bytes = HAMMER_UNDO_ALIGN;
782 isnew = ((scan & HAMMER_BUFMASK64) == 0);
783 head = get_buffer_data(scan, &buffer, isnew);
784 buffer->cache.modified = 1;
785 tail = (void *)((char *)head + bytes - sizeof(*tail));
788 head->hdr_signature = HAMMER_HEAD_SIGNATURE;
789 head->hdr_type = HAMMER_HEAD_TYPE_DUMMY;
790 head->hdr_size = bytes;
791 head->hdr_seq = seqno++;
793 tail->tail_signature = HAMMER_TAIL_SIGNATURE;
794 tail->tail_type = HAMMER_HEAD_TYPE_DUMMY;
795 tail->tail_size = bytes;
797 head->hdr_crc = crc32(head, HAMMER_FIFO_HEAD_CRCOFF) ^
798 crc32(head + 1, bytes - sizeof(*head));
805 const char *zone_labels[] = {
807 "raw_volume", /* 1 */
808 "raw_buffer", /* 2 */
816 "large_data", /* 10 */
817 "small_data", /* 11 */
825 print_blockmap(const struct volume_info *root_vol)
827 hammer_blockmap_t blockmap;
828 hammer_volume_ondisk_t ondisk;
833 ondisk = root_vol->ondisk;
834 printf(INDENT"vol_label\t%s\n", ondisk->vol_label);
835 printf(INDENT"vol_count\t%d\n", ondisk->vol_count);
836 printf(INDENT"vol_bot_beg\t%s\n", sizetostr(ondisk->vol_bot_beg));
837 printf(INDENT"vol_mem_beg\t%s\n", sizetostr(ondisk->vol_mem_beg));
838 printf(INDENT"vol_buf_beg\t%s\n", sizetostr(ondisk->vol_buf_beg));
839 printf(INDENT"vol_buf_end\t%s\n", sizetostr(ondisk->vol_buf_end));
840 printf(INDENT"vol0_next_tid\t%016jx\n",
841 (uintmax_t)ondisk->vol0_next_tid);
843 blockmap = &ondisk->vol0_blockmap[HAMMER_ZONE_UNDO_INDEX];
844 size = blockmap->alloc_offset & HAMMER_OFF_LONG_MASK;
845 if (blockmap->first_offset <= blockmap->next_offset)
846 used = blockmap->next_offset - blockmap->first_offset;
848 used = blockmap->alloc_offset - blockmap->first_offset +
849 (blockmap->next_offset & HAMMER_OFF_LONG_MASK);
850 printf(INDENT"undo_size\t%s\n", sizetostr(size));
851 printf(INDENT"undo_used\t%s\n", sizetostr(used));
853 printf(INDENT"zone # "
854 "phys first next alloc\n");
855 for (i = 0; i < HAMMER_MAX_ZONES; i++) {
856 blockmap = &ondisk->vol0_blockmap[i];
857 printf(INDENT"zone %-2d %-10s %016jx %016jx %016jx %016jx\n",
859 (uintmax_t)blockmap->phys_offset,
860 (uintmax_t)blockmap->first_offset,
861 (uintmax_t)blockmap->next_offset,
862 (uintmax_t)blockmap->alloc_offset);
867 * Flush various tracking structures to disk
870 flush_all_volumes(void)
872 struct volume_info *vol;
874 TAILQ_FOREACH(vol, &VolList, entry)
879 flush_volume(struct volume_info *volume)
881 struct buffer_info *buffer;
884 for (i = 0; i < HAMMER_BUFLISTS; ++i) {
885 TAILQ_FOREACH(buffer, &volume->buffer_lists[i], entry)
886 flush_buffer(buffer);
888 if (writehammervol(volume) == -1)
889 err(1, "Write volume %d (%s)", volume->vol_no, volume->name);
893 flush_buffer(struct buffer_info *buffer)
895 struct volume_info *vol;
897 vol = buffer->volume;
898 if (writehammerbuf(buffer) == -1)
899 err(1, "Write volume %d (%s)", vol->vol_no, vol->name);
900 buffer->cache.modified = 0;
904 * Core I/O operations
907 __read(struct volume_info *vol, void *data, int64_t offset, int size)
911 n = pread(vol->fd, data, size, offset);
918 readhammervol(struct volume_info *vol)
920 return(__read(vol, vol->ondisk, 0, HAMMER_BUFSIZE));
924 readhammerbuf(struct buffer_info *buf)
926 return(__read(buf->volume, buf->ondisk, buf->raw_offset, HAMMER_BUFSIZE));
930 __write(struct volume_info *vol, const void *data, int64_t offset, int size)
937 n = pwrite(vol->fd, data, size, offset);
944 writehammervol(struct volume_info *vol)
946 return(__write(vol, vol->ondisk, 0, HAMMER_BUFSIZE));
950 writehammerbuf(struct buffer_info *buf)
952 return(__write(buf->volume, buf->ondisk, buf->raw_offset, HAMMER_BUFSIZE));
955 int64_t init_boot_area_size(int64_t value, off_t avg_vol_size)
958 value = HAMMER_BOOT_NOMBYTES;
959 while (value > avg_vol_size / HAMMER_MAX_VOLUMES)
961 if (value < HAMMER_BOOT_MINBYTES)
963 } else if (value < HAMMER_BOOT_MINBYTES) {
964 value = HAMMER_BOOT_MINBYTES;
970 int64_t init_mem_area_size(int64_t value, off_t avg_vol_size)
973 value = HAMMER_MEM_NOMBYTES;
974 while (value > avg_vol_size / HAMMER_MAX_VOLUMES)
976 if (value < HAMMER_MEM_MINBYTES)
978 } else if (value < HAMMER_MEM_MINBYTES) {
979 value = HAMMER_MEM_MINBYTES;