2 * Copyright (c) 2007 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include <sys/diskslice.h>
36 #include <sys/diskmbr.h>
38 #include "hammer_util.h"
40 static void get_buffer_readahead(struct buffer_info *base);
41 static void *get_ondisk(hammer_off_t buf_offset, struct buffer_info **bufferp,
43 static int readhammerbuf(struct volume_info *vol, void *data, int64_t offset);
44 static int writehammerbuf(struct volume_info *vol, const void *data,
51 int UseReadBehind = -4;
53 int AssertOnFailure = 1;
54 struct volume_list VolList = TAILQ_HEAD_INITIALIZER(VolList);
55 static int valid_hammer_volumes;
59 buffer_hash(hammer_off_t buf_offset)
63 hi = (int)(buf_offset / HAMMER_BUFSIZE) & HAMMER_BUFLISTMASK;
67 static struct buffer_info*
68 find_buffer(struct volume_info *volume, hammer_off_t buf_offset)
71 struct buffer_info *buf;
73 hi = buffer_hash(buf_offset);
74 TAILQ_FOREACH(buf, &volume->buffer_lists[hi], entry)
75 if (buf->buf_offset == buf_offset)
82 __alloc_volume(const char *volname, int oflags)
84 struct volume_info *vol;
87 vol = malloc(sizeof(*vol));
89 err(1, "alloc_volume");
90 bzero(vol, sizeof(*vol));
93 vol->rdonly = (oflags == O_RDONLY);
94 vol->name = strdup(volname);
95 vol->fd = open(vol->name, oflags);
97 err(1, "alloc_volume: Failed to open %s", vol->name);
100 vol->device_offset = 0;
103 vol->ondisk = malloc(HAMMER_BUFSIZE);
104 if (vol->ondisk == NULL)
105 err(1, "alloc_volume");
106 bzero(vol->ondisk, HAMMER_BUFSIZE);
108 for (i = 0; i < HAMMER_BUFLISTS; ++i)
109 TAILQ_INIT(&vol->buffer_lists[i]);
115 __add_volume(struct volume_info *vol)
117 struct volume_info *scan;
118 struct stat st1, st2;
120 if (fstat(vol->fd, &st1) != 0)
121 errx(1, "add_volume: %s: Failed to stat", vol->name);
123 TAILQ_FOREACH(scan, &VolList, entry) {
124 if (scan->vol_no == vol->vol_no) {
125 errx(1, "add_volume: %s: Duplicate volume number %d "
127 vol->name, vol->vol_no, scan->name);
129 if (fstat(scan->fd, &st2) != 0) {
130 errx(1, "add_volume: %s: Failed to stat %s",
131 vol->name, scan->name);
133 if ((st1.st_ino == st2.st_ino) && (st1.st_dev == st2.st_dev)) {
134 errx(1, "add_volume: %s: Specified more than once",
139 TAILQ_INSERT_TAIL(&VolList, vol, entry);
143 * Initialize a volume structure and ondisk vol_no field.
146 init_volume(int32_t vol_no, const char *filename, int oflags)
148 struct volume_info *vol;
150 vol = __alloc_volume(filename, oflags);
151 vol->vol_no = vol->ondisk->vol_no = vol_no;
159 * Initialize a volume structure and read ondisk volume header.
162 load_volume(const char *filename, int oflags)
164 struct volume_info *vol;
165 struct hammer_volume_ondisk *ondisk;
168 vol = __alloc_volume(filename, oflags);
169 ondisk = vol->ondisk;
171 n = readhammerbuf(vol, ondisk, 0);
173 err(1, "load_volume: %s: Read failed at offset 0", vol->name);
175 vol->vol_no = ondisk->vol_no;
177 if (ondisk->vol_rootvol != HAMMER_ROOT_VOLNO) {
178 errx(1, "load_volume: Invalid root volume# %d",
179 ondisk->vol_rootvol);
182 if (bcmp(&Hammer_FSType, &ondisk->vol_fstype, sizeof(Hammer_FSType))) {
183 errx(1, "load_volume: %s: Header does not indicate "
184 "that this is a hammer volume", vol->name);
187 if (valid_hammer_volumes++ == 0) {
188 Hammer_FSId = ondisk->vol_fsid;
189 } else if (bcmp(&Hammer_FSId, &ondisk->vol_fsid, sizeof(Hammer_FSId))) {
190 errx(1, "load_volume: %s: FSId does match other volumes!",
200 * Check basic volume characteristics.
203 check_volume(struct volume_info *vol)
205 struct partinfo pinfo;
209 * Get basic information about the volume
211 if (ioctl(vol->fd, DIOCGPART, &pinfo) < 0) {
213 * Allow the formatting of regular files as HAMMER volumes
215 if (fstat(vol->fd, &st) < 0)
216 err(1, "Unable to stat %s", vol->name);
217 vol->size = st.st_size;
218 vol->type = "REGFILE";
221 * When formatting a block device as a HAMMER volume the
222 * sector size must be compatible. HAMMER uses 16384 byte
223 * filesystem buffers.
225 if (pinfo.reserved_blocks) {
226 errx(1, "HAMMER cannot be placed in a partition "
227 "which overlaps the disklabel or MBR");
229 if (pinfo.media_blksize > HAMMER_BUFSIZE ||
230 HAMMER_BUFSIZE % pinfo.media_blksize) {
231 errx(1, "A media sector size of %d is not supported",
232 pinfo.media_blksize);
235 vol->size = pinfo.media_size;
236 vol->device_offset = pinfo.media_offset;
237 vol->type = "DEVICE";
242 get_volume(int32_t vol_no)
244 struct volume_info *vol;
246 TAILQ_FOREACH(vol, &VolList, entry) {
247 if (vol->vol_no == vol_no)
251 errx(1, "get_volume: Volume %d does not exist!", vol_no);
253 /* not added to or removed from hammer cache */
258 get_root_volume(void)
260 return(get_volume(HAMMER_ROOT_VOLNO));
264 rel_volume(struct volume_info *volume __unused)
270 * Acquire the specified buffer. isnew is -1 only when called
271 * via get_buffer_readahead() to prevent another readahead.
274 get_buffer(hammer_off_t buf_offset, int isnew)
277 struct buffer_info *buf;
278 struct volume_info *volume;
279 hammer_off_t orig_offset = buf_offset;
285 zone = HAMMER_ZONE_DECODE(buf_offset);
286 if (zone > HAMMER_ZONE_RAW_BUFFER_INDEX) {
287 buf_offset = blockmap_lookup(buf_offset, NULL, NULL, NULL);
289 if (buf_offset == HAMMER_OFF_BAD)
291 assert(hammer_is_zone_raw_buffer(buf_offset));
293 vol_no = HAMMER_VOL_DECODE(buf_offset);
294 volume = get_volume(vol_no);
296 buf_offset &= ~HAMMER_BUFMASK64;
297 buf = find_buffer(volume, buf_offset);
300 buf = malloc(sizeof(*buf));
301 bzero(buf, sizeof(*buf));
303 fprintf(stderr, "get_buffer: %016jx %016jx at %p\n",
304 (intmax_t)orig_offset, (intmax_t)buf_offset,
307 buf->buf_offset = buf_offset;
308 buf->raw_offset = hammer_xlate_to_phys(volume->ondisk,
310 buf->volume = volume;
311 hi = buffer_hash(buf_offset);
312 TAILQ_INSERT_TAIL(&volume->buffer_lists[hi], buf, entry);
313 buf->cache.buffer = buf;
314 hammer_cache_add(&buf->cache);
318 fprintf(stderr, "get_buffer: %016jx %016jx at %p *\n",
319 (intmax_t)orig_offset, (intmax_t)buf_offset,
322 hammer_cache_used(&buf->cache);
326 hammer_cache_flush();
327 if ((ondisk = buf->ondisk) == NULL) {
328 buf->ondisk = ondisk = malloc(HAMMER_BUFSIZE);
330 n = readhammerbuf(volume, ondisk, buf->raw_offset);
333 err(1, "get_buffer: %s:%016jx "
334 "Read failed at offset %016jx",
336 (intmax_t)buf->buf_offset,
337 (intmax_t)buf->raw_offset);
338 bzero(ondisk, HAMMER_BUFSIZE);
343 bzero(ondisk, HAMMER_BUFSIZE);
344 buf->cache.modified = 1;
347 get_buffer_readahead(buf);
352 get_buffer_readahead(struct buffer_info *base)
354 struct buffer_info *buf;
355 struct volume_info *vol;
356 hammer_off_t buf_offset;
358 int ri = UseReadBehind;
359 int re = UseReadAhead;
361 raw_offset = base->raw_offset + ri * HAMMER_BUFSIZE;
365 if (raw_offset >= vol->ondisk->vol_buf_end)
367 if (raw_offset < vol->ondisk->vol_buf_beg || ri == 0) {
369 raw_offset += HAMMER_BUFSIZE;
372 buf_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no,
373 raw_offset - vol->ondisk->vol_buf_beg);
374 buf = find_buffer(vol, buf_offset);
376 buf = get_buffer(buf_offset, -1);
380 raw_offset += HAMMER_BUFSIZE;
385 rel_buffer(struct buffer_info *buffer)
387 struct volume_info *volume;
392 assert(buffer->cache.refs > 0);
393 if (--buffer->cache.refs == 0) {
394 if (buffer->cache.delete) {
395 hi = buffer_hash(buffer->buf_offset);
396 volume = buffer->volume;
397 if (buffer->cache.modified)
398 flush_buffer(buffer);
399 TAILQ_REMOVE(&volume->buffer_lists[hi], buffer, entry);
400 hammer_cache_del(&buffer->cache);
401 free(buffer->ondisk);
409 * Retrieve a pointer to a buffer data given a buffer offset. The underlying
410 * bufferp is freed if isnew or the offset is out of range of the cached data.
411 * If bufferp is freed a referenced buffer is loaded into it.
414 get_buffer_data(hammer_off_t buf_offset, struct buffer_info **bufferp,
417 if (*bufferp != NULL) {
419 (((*bufferp)->buf_offset ^ buf_offset) & ~HAMMER_BUFMASK64)) {
420 rel_buffer(*bufferp);
424 return(get_ondisk(buf_offset, bufferp, isnew));
428 * Retrieve a pointer to a B-Tree node given a zone offset. The underlying
429 * bufferp is freed if non-NULL and a referenced buffer is loaded into it.
432 get_node(hammer_off_t node_offset, struct buffer_info **bufferp)
434 if (*bufferp != NULL) {
435 rel_buffer(*bufferp);
438 return(get_ondisk(node_offset, bufferp, 0));
442 * Return a pointer to a buffer data given a buffer offset.
443 * If *bufferp is NULL acquire the buffer otherwise use that buffer.
446 get_ondisk(hammer_off_t buf_offset, struct buffer_info **bufferp, int isnew)
448 struct buffer_info *buffer;
451 if (buffer == NULL) {
452 buffer = *bufferp = get_buffer(buf_offset, isnew);
457 return((char *)buffer->ondisk +
458 ((int32_t)buf_offset & HAMMER_BUFMASK));
462 * Allocate HAMMER elements - B-Tree nodse
465 alloc_btree_element(hammer_off_t *offp, struct buffer_info **data_bufferp)
467 hammer_node_ondisk_t node;
469 node = alloc_blockmap(HAMMER_ZONE_BTREE_INDEX, sizeof(*node),
471 bzero(node, sizeof(*node));
476 * Allocate HAMMER elements - meta data (inode, direntry, PFS, etc)
479 alloc_meta_element(hammer_off_t *offp, int32_t data_len,
480 struct buffer_info **data_bufferp)
484 data = alloc_blockmap(HAMMER_ZONE_META_INDEX, data_len,
486 bzero(data, data_len);
491 * Allocate HAMMER elements - data storage
493 * The only data_len supported by HAMMER userspace for large data zone
494 * (zone 10) is HAMMER_BUFSIZE which is 16KB. >16KB data does not fit
495 * in a buffer allocated by get_buffer(). Also alloc_blockmap() does
496 * not consider >16KB buffer size.
499 alloc_data_element(hammer_off_t *offp, int32_t data_len,
500 struct buffer_info **data_bufferp)
508 zone = hammer_data_zone_index(data_len);
509 assert(data_len <= HAMMER_BUFSIZE); /* just one buffer */
510 assert(zone == HAMMER_ZONE_LARGE_DATA_INDEX ||
511 zone == HAMMER_ZONE_SMALL_DATA_INDEX);
513 data = alloc_blockmap(zone, data_len, offp, data_bufferp);
514 bzero(data, data_len);
519 * Format a new blockmap. This is mostly a degenerate case because
520 * all allocations are now actually done from the freemap.
523 format_blockmap(struct volume_info *root_vol, int zone, hammer_off_t offset)
525 hammer_blockmap_t blockmap;
526 hammer_off_t zone_base;
528 /* Only root volume needs formatting */
529 assert(root_vol->vol_no == HAMMER_ROOT_VOLNO);
531 assert(hammer_is_zone2_mapped_index(zone));
533 blockmap = &root_vol->ondisk->vol0_blockmap[zone];
534 zone_base = HAMMER_ZONE_ENCODE(zone, offset);
536 bzero(blockmap, sizeof(*blockmap));
537 blockmap->phys_offset = 0;
538 blockmap->first_offset = zone_base;
539 blockmap->next_offset = zone_base;
540 blockmap->alloc_offset = HAMMER_ENCODE(zone, 255, -1);
541 blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
545 * Format a new freemap. Set all layer1 entries to UNAVAIL. The initialize
546 * code will load each volume's freemap.
549 format_freemap(struct volume_info *root_vol)
551 struct buffer_info *buffer = NULL;
552 hammer_off_t layer1_offset;
553 hammer_blockmap_t blockmap;
554 struct hammer_blockmap_layer1 *layer1;
557 /* Only root volume needs formatting */
558 assert(root_vol->vol_no == HAMMER_ROOT_VOLNO);
560 layer1_offset = alloc_bigblock(root_vol, HAMMER_ZONE_FREEMAP_INDEX);
561 for (i = 0; i < HAMMER_BIGBLOCK_SIZE; i += sizeof(*layer1)) {
562 isnew = ((i % HAMMER_BUFSIZE) == 0);
563 layer1 = get_buffer_data(layer1_offset + i, &buffer, isnew);
564 bzero(layer1, sizeof(*layer1));
565 layer1->phys_offset = HAMMER_BLOCKMAP_UNAVAIL;
566 layer1->blocks_free = 0;
567 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
569 assert(i == HAMMER_BIGBLOCK_SIZE);
572 blockmap = &root_vol->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
573 bzero(blockmap, sizeof(*blockmap));
574 blockmap->phys_offset = layer1_offset;
575 blockmap->first_offset = 0;
576 blockmap->next_offset = HAMMER_ENCODE_RAW_BUFFER(0, 0);
577 blockmap->alloc_offset = HAMMER_ENCODE_RAW_BUFFER(255, -1);
578 blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
582 * Load the volume's remaining free space into the freemap.
584 * Returns the number of big-blocks available.
587 initialize_freemap(struct volume_info *vol)
589 struct volume_info *root_vol;
590 struct buffer_info *buffer1 = NULL;
591 struct buffer_info *buffer2 = NULL;
592 struct hammer_blockmap_layer1 *layer1;
593 struct hammer_blockmap_layer2 *layer2;
594 hammer_off_t layer1_base;
595 hammer_off_t layer1_offset;
596 hammer_off_t layer2_offset;
597 hammer_off_t phys_offset;
598 hammer_off_t block_offset;
599 hammer_off_t aligned_vol_free_end;
600 hammer_blockmap_t freemap;
602 int64_t layer1_count = 0;
604 root_vol = get_root_volume();
605 aligned_vol_free_end = (vol->vol_free_end + HAMMER_BLOCKMAP_LAYER2_MASK)
606 & ~HAMMER_BLOCKMAP_LAYER2_MASK;
608 printf("initialize freemap volume %d\n", vol->vol_no);
611 * Initialize the freemap. First preallocate the big-blocks required
612 * to implement layer2. This preallocation is a bootstrap allocation
613 * using blocks from the target volume.
615 freemap = &root_vol->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
616 layer1_base = freemap->phys_offset;
618 for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
619 phys_offset < aligned_vol_free_end;
620 phys_offset += HAMMER_BLOCKMAP_LAYER2) {
621 layer1_offset = layer1_base +
622 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
623 layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
624 if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
625 layer1->phys_offset = alloc_bigblock(vol,
626 HAMMER_ZONE_FREEMAP_INDEX);
627 layer1->blocks_free = 0;
628 buffer1->cache.modified = 1;
629 layer1->layer1_crc = crc32(layer1,
630 HAMMER_LAYER1_CRCSIZE);
635 * Now fill everything in.
637 for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
638 phys_offset < aligned_vol_free_end;
639 phys_offset += HAMMER_BLOCKMAP_LAYER2) {
641 layer1_offset = layer1_base +
642 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
643 layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
644 assert(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
646 for (block_offset = 0;
647 block_offset < HAMMER_BLOCKMAP_LAYER2;
648 block_offset += HAMMER_BIGBLOCK_SIZE) {
649 layer2_offset = layer1->phys_offset +
650 HAMMER_BLOCKMAP_LAYER2_OFFSET(block_offset);
651 layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
652 bzero(layer2, sizeof(*layer2));
654 if (phys_offset + block_offset < vol->vol_free_off) {
656 * Fixups XXX - big-blocks already allocated as part
657 * of the freemap bootstrap.
659 layer2->zone = HAMMER_ZONE_FREEMAP_INDEX;
660 layer2->append_off = HAMMER_BIGBLOCK_SIZE;
661 layer2->bytes_free = 0;
662 } else if (phys_offset + block_offset < vol->vol_free_end) {
664 layer2->append_off = 0;
665 layer2->bytes_free = HAMMER_BIGBLOCK_SIZE;
669 layer2->zone = HAMMER_ZONE_UNAVAIL_INDEX;
670 layer2->append_off = HAMMER_BIGBLOCK_SIZE;
671 layer2->bytes_free = 0;
673 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
674 buffer2->cache.modified = 1;
677 layer1->blocks_free += layer1_count;
678 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
679 buffer1->cache.modified = 1;
684 rel_volume(root_vol);
689 * Returns the number of big-blocks available for filesystem data and undos
690 * without formatting.
693 count_freemap(struct volume_info *vol)
695 hammer_off_t phys_offset;
696 hammer_off_t vol_free_off;
697 hammer_off_t aligned_vol_free_end;
700 vol_free_off = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
701 aligned_vol_free_end = (vol->vol_free_end + HAMMER_BLOCKMAP_LAYER2_MASK)
702 & ~HAMMER_BLOCKMAP_LAYER2_MASK;
704 if (vol->vol_no == HAMMER_ROOT_VOLNO)
705 vol_free_off += HAMMER_BIGBLOCK_SIZE;
707 for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
708 phys_offset < aligned_vol_free_end;
709 phys_offset += HAMMER_BLOCKMAP_LAYER2) {
710 vol_free_off += HAMMER_BIGBLOCK_SIZE;
713 for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
714 phys_offset < aligned_vol_free_end;
715 phys_offset += HAMMER_BIGBLOCK_SIZE) {
716 if (phys_offset < vol_free_off) {
718 } else if (phys_offset < vol->vol_free_end) {
727 * Format the undomap for the root volume.
730 format_undomap(struct volume_info *root_vol, int64_t *undo_buffer_size)
732 const int undo_zone = HAMMER_ZONE_UNDO_INDEX;
733 hammer_off_t undo_limit;
734 hammer_blockmap_t blockmap;
735 struct hammer_volume_ondisk *ondisk;
736 struct buffer_info *buffer = NULL;
742 /* Only root volume needs formatting */
743 assert(root_vol->vol_no == HAMMER_ROOT_VOLNO);
744 ondisk = root_vol->ondisk;
747 * Size the undo buffer in multiples of HAMMER_BIGBLOCK_SIZE,
748 * up to HAMMER_UNDO_LAYER2 big-blocks. Size to approximately
751 * The minimum UNDO fifo size is 500MB, or approximately 1% of
752 * the recommended 50G disk.
754 * Changing this minimum is rather dangerous as complex filesystem
755 * operations can cause the UNDO FIFO to fill up otherwise.
757 undo_limit = *undo_buffer_size;
758 if (undo_limit == 0) {
759 undo_limit = (ondisk->vol_buf_end - ondisk->vol_buf_beg) / 1000;
760 if (undo_limit < 500*1024*1024)
761 undo_limit = 500*1024*1024;
763 undo_limit = (undo_limit + HAMMER_BIGBLOCK_MASK64) &
764 ~HAMMER_BIGBLOCK_MASK64;
765 if (undo_limit < HAMMER_BIGBLOCK_SIZE)
766 undo_limit = HAMMER_BIGBLOCK_SIZE;
767 if (undo_limit > HAMMER_BIGBLOCK_SIZE * HAMMER_UNDO_LAYER2)
768 undo_limit = HAMMER_BIGBLOCK_SIZE * HAMMER_UNDO_LAYER2;
769 *undo_buffer_size = undo_limit;
771 blockmap = &ondisk->vol0_blockmap[undo_zone];
772 bzero(blockmap, sizeof(*blockmap));
773 blockmap->phys_offset = HAMMER_BLOCKMAP_UNAVAIL;
774 blockmap->first_offset = HAMMER_ZONE_ENCODE(undo_zone, 0);
775 blockmap->next_offset = blockmap->first_offset;
776 blockmap->alloc_offset = HAMMER_ZONE_ENCODE(undo_zone, undo_limit);
777 blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
779 limit_index = undo_limit / HAMMER_BIGBLOCK_SIZE;
780 assert(limit_index <= HAMMER_UNDO_LAYER2);
782 for (n = 0; n < limit_index; ++n) {
783 ondisk->vol0_undo_array[n] = alloc_bigblock(root_vol,
784 HAMMER_ZONE_UNDO_INDEX);
786 while (n < HAMMER_UNDO_LAYER2) {
787 ondisk->vol0_undo_array[n++] = HAMMER_BLOCKMAP_UNAVAIL;
791 * Pre-initialize the UNDO blocks (HAMMER version 4+)
793 printf("initializing the undo map (%jd MB)\n",
794 (intmax_t)(blockmap->alloc_offset & HAMMER_OFF_LONG_MASK) /
797 scan = blockmap->first_offset;
800 while (scan < blockmap->alloc_offset) {
801 hammer_fifo_head_t head;
802 hammer_fifo_tail_t tail;
804 int bytes = HAMMER_UNDO_ALIGN;
806 isnew = ((scan & HAMMER_BUFMASK64) == 0);
807 head = get_buffer_data(scan, &buffer, isnew);
808 buffer->cache.modified = 1;
809 tail = (void *)((char *)head + bytes - sizeof(*tail));
812 head->hdr_signature = HAMMER_HEAD_SIGNATURE;
813 head->hdr_type = HAMMER_HEAD_TYPE_DUMMY;
814 head->hdr_size = bytes;
815 head->hdr_seq = seqno++;
817 tail->tail_signature = HAMMER_TAIL_SIGNATURE;
818 tail->tail_type = HAMMER_HEAD_TYPE_DUMMY;
819 tail->tail_size = bytes;
821 head->hdr_crc = crc32(head, HAMMER_FIFO_HEAD_CRCOFF) ^
822 crc32(head + 1, bytes - sizeof(*head));
830 * Flush various tracking structures to disk
833 flush_all_volumes(void)
835 struct volume_info *vol;
837 TAILQ_FOREACH(vol, &VolList, entry)
842 flush_volume(struct volume_info *volume)
844 struct buffer_info *buffer;
847 for (i = 0; i < HAMMER_BUFLISTS; ++i) {
848 TAILQ_FOREACH(buffer, &volume->buffer_lists[i], entry)
849 flush_buffer(buffer);
851 if (writehammerbuf(volume, volume->ondisk, 0) == -1)
852 err(1, "Write volume %d (%s)", volume->vol_no, volume->name);
856 flush_buffer(struct buffer_info *buffer)
858 struct volume_info *vol;
860 vol = buffer->volume;
861 if (writehammerbuf(vol, buffer->ondisk, buffer->raw_offset) == -1)
862 err(1, "Write volume %d (%s)", vol->vol_no, vol->name);
863 buffer->cache.modified = 0;
867 * Core I/O operations
870 readhammerbuf(struct volume_info *vol, void *data, int64_t offset)
874 n = pread(vol->fd, data, HAMMER_BUFSIZE, offset);
875 if (n != HAMMER_BUFSIZE)
881 writehammerbuf(struct volume_info *vol, const void *data, int64_t offset)
888 n = pwrite(vol->fd, data, HAMMER_BUFSIZE, offset);
889 if (n != HAMMER_BUFSIZE)
894 int64_t init_boot_area_size(int64_t value, off_t avg_vol_size)
897 value = HAMMER_BOOT_NOMBYTES;
898 while (value > avg_vol_size / HAMMER_MAX_VOLUMES)
900 if (value < HAMMER_BOOT_MINBYTES)
902 } else if (value < HAMMER_BOOT_MINBYTES) {
903 value = HAMMER_BOOT_MINBYTES;
909 int64_t init_mem_area_size(int64_t value, off_t avg_vol_size)
912 value = HAMMER_MEM_NOMBYTES;
913 while (value > avg_vol_size / HAMMER_MAX_VOLUMES)
915 if (value < HAMMER_MEM_MINBYTES)
917 } else if (value < HAMMER_MEM_MINBYTES) {
918 value = HAMMER_MEM_MINBYTES;