2 * Copyright (c) 2007 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include <sys/diskslice.h>
36 #include <sys/diskmbr.h>
38 #include "hammer_util.h"
40 static void get_buffer_readahead(struct buffer_info *base);
41 static __inline void *get_ondisk(hammer_off_t buf_offset,
42 struct buffer_info **bufferp, int isnew);
43 static int readhammerbuf(struct volume_info *vol, void *data, int64_t offset);
44 static int writehammerbuf(struct volume_info *vol, const void *data,
53 int64_t UndoBufferSize;
54 int UseReadBehind = -4;
56 int AssertOnFailure = 1;
57 struct volume_list VolList = TAILQ_HEAD_INITIALIZER(VolList);
61 buffer_hash(hammer_off_t buf_offset)
65 hi = (int)(buf_offset / HAMMER_BUFSIZE) & HAMMER_BUFLISTMASK;
69 static struct buffer_info*
70 find_buffer(struct volume_info *volume, hammer_off_t buf_offset)
73 struct buffer_info *buf;
75 hi = buffer_hash(buf_offset);
76 TAILQ_FOREACH(buf, &volume->buffer_lists[hi], entry)
77 if (buf->buf_offset == buf_offset)
84 __alloc_volume(const char *volname, int oflags)
86 struct volume_info *vol;
89 vol = malloc(sizeof(*vol));
91 err(1, "alloc_volume");
92 bzero(vol, sizeof(*vol));
95 vol->name = strdup(volname);
96 vol->fd = open(vol->name, oflags);
98 err(1, "alloc_volume: Failed to open %s", vol->name);
101 vol->device_offset = 0;
104 vol->ondisk = malloc(HAMMER_BUFSIZE);
105 if (vol->ondisk == NULL)
106 err(1, "alloc_volume");
107 bzero(vol->ondisk, HAMMER_BUFSIZE);
109 for (i = 0; i < HAMMER_BUFLISTS; ++i)
110 TAILQ_INIT(&vol->buffer_lists[i]);
116 __add_volume(struct volume_info *vol)
118 struct volume_info *scan;
119 struct stat st1, st2;
121 if (fstat(vol->fd, &st1) != 0)
122 errx(1, "add_volume: %s: Failed to stat", vol->name);
124 TAILQ_FOREACH(scan, &VolList, entry) {
125 if (scan->vol_no == vol->vol_no) {
126 errx(1, "add_volume: %s: Duplicate volume number %d "
128 vol->name, vol->vol_no, scan->name);
130 if (fstat(scan->fd, &st2) != 0) {
131 errx(1, "add_volume: %s: Failed to stat %s",
132 vol->name, scan->name);
134 if ((st1.st_ino == st2.st_ino) && (st1.st_dev == st2.st_dev)) {
135 errx(1, "add_volume: %s: Specified more than once",
140 TAILQ_INSERT_TAIL(&VolList, vol, entry);
144 * Initialize a volume structure and ondisk vol_no field.
147 init_volume(int32_t vol_no, const char *filename, int oflags)
149 struct volume_info *vol;
151 vol = __alloc_volume(filename, oflags);
152 vol->vol_no = vol->ondisk->vol_no = vol_no;
153 vol->cache.modified = 1;
161 * Initialize a volume structure and read ondisk volume header.
164 load_volume(const char *filename, int oflags)
166 struct volume_info *vol;
167 struct hammer_volume_ondisk *ondisk;
170 vol = __alloc_volume(filename, oflags);
171 ondisk = vol->ondisk;
173 n = readhammerbuf(vol, ondisk, 0);
175 err(1, "load_volume: %s: Read failed at offset 0", vol->name);
177 vol->vol_no = ondisk->vol_no;
179 if (ondisk->vol_rootvol != HAMMER_ROOT_VOLNO) {
180 errx(1, "load_volume: Invalid root volume# %d",
181 ondisk->vol_rootvol);
184 if (bcmp(&Hammer_FSType, &ondisk->vol_fstype, sizeof(Hammer_FSType))) {
185 errx(1, "load_volume: %s: Header does not indicate "
186 "that this is a hammer volume", vol->name);
189 if (TAILQ_EMPTY(&VolList)) {
190 Hammer_FSId = ondisk->vol_fsid;
191 } else if (bcmp(&Hammer_FSId, &ondisk->vol_fsid, sizeof(Hammer_FSId))) {
192 errx(1, "load_volume: %s: FSId does match other volumes!",
202 * Check basic volume characteristics.
205 check_volume(struct volume_info *vol)
207 struct partinfo pinfo;
211 * Get basic information about the volume
213 if (ioctl(vol->fd, DIOCGPART, &pinfo) < 0) {
215 * Allow the formatting of regular files as HAMMER volumes
217 if (fstat(vol->fd, &st) < 0)
218 err(1, "Unable to stat %s", vol->name);
219 vol->size = st.st_size;
220 vol->type = "REGFILE";
223 * When formatting a block device as a HAMMER volume the
224 * sector size must be compatible. HAMMER uses 16384 byte
225 * filesystem buffers.
227 if (pinfo.reserved_blocks) {
228 errx(1, "HAMMER cannot be placed in a partition "
229 "which overlaps the disklabel or MBR");
231 if (pinfo.media_blksize > HAMMER_BUFSIZE ||
232 HAMMER_BUFSIZE % pinfo.media_blksize) {
233 errx(1, "A media sector size of %d is not supported",
234 pinfo.media_blksize);
237 vol->size = pinfo.media_size;
238 vol->device_offset = pinfo.media_offset;
239 vol->type = "DEVICE";
244 get_volume(int32_t vol_no)
246 struct volume_info *vol;
248 TAILQ_FOREACH(vol, &VolList, entry) {
249 if (vol->vol_no == vol_no)
254 errx(1, "get_volume: Volume %d does not exist!",
259 /* not added to or removed from hammer cache */
264 get_root_volume(void)
266 return(get_volume(HAMMER_ROOT_VOLNO));
270 rel_volume(struct volume_info *volume)
274 /* not added to or removed from hammer cache */
275 --volume->cache.refs;
279 * Acquire the specified buffer. isnew is -1 only when called
280 * via get_buffer_readahead() to prevent another readahead.
283 get_buffer(hammer_off_t buf_offset, int isnew)
286 struct buffer_info *buf;
287 struct volume_info *volume;
288 hammer_off_t orig_offset = buf_offset;
294 zone = HAMMER_ZONE_DECODE(buf_offset);
295 if (zone > HAMMER_ZONE_RAW_BUFFER_INDEX) {
296 buf_offset = blockmap_lookup(buf_offset, NULL, NULL, NULL);
298 if (buf_offset == HAMMER_OFF_BAD)
301 if (AssertOnFailure) {
302 assert((buf_offset & HAMMER_OFF_ZONE_MASK) ==
303 HAMMER_ZONE_RAW_BUFFER);
305 vol_no = HAMMER_VOL_DECODE(buf_offset);
306 volume = get_volume(vol_no);
310 buf_offset &= ~HAMMER_BUFMASK64;
311 buf = find_buffer(volume, buf_offset);
314 buf = malloc(sizeof(*buf));
315 bzero(buf, sizeof(*buf));
317 fprintf(stderr, "get_buffer: %016llx %016llx at %p\n",
318 (long long)orig_offset, (long long)buf_offset,
321 buf->buf_offset = buf_offset;
322 buf->raw_offset = hammer_xlate_to_phys(volume->ondisk,
324 buf->volume = volume;
325 hi = buffer_hash(buf_offset);
326 TAILQ_INSERT_TAIL(&volume->buffer_lists[hi], buf, entry);
327 ++volume->cache.refs;
328 buf->cache.u.buffer = buf;
329 hammer_cache_add(&buf->cache, ISBUFFER);
333 fprintf(stderr, "get_buffer: %016llx %016llx at %p *\n",
334 (long long)orig_offset, (long long)buf_offset,
337 hammer_cache_used(&buf->cache);
341 hammer_cache_flush();
342 if ((ondisk = buf->ondisk) == NULL) {
343 buf->ondisk = ondisk = malloc(HAMMER_BUFSIZE);
345 n = readhammerbuf(volume, ondisk, buf->raw_offset);
348 err(1, "get_buffer: %s:%016llx "
349 "Read failed at offset %016llx",
351 (long long)buf->buf_offset,
352 (long long)buf->raw_offset);
353 bzero(ondisk, HAMMER_BUFSIZE);
358 bzero(ondisk, HAMMER_BUFSIZE);
359 buf->cache.modified = 1;
362 get_buffer_readahead(buf);
367 get_buffer_readahead(struct buffer_info *base)
369 struct buffer_info *buf;
370 struct volume_info *vol;
371 hammer_off_t buf_offset;
373 int ri = UseReadBehind;
374 int re = UseReadAhead;
376 raw_offset = base->raw_offset + ri * HAMMER_BUFSIZE;
380 if (raw_offset >= vol->ondisk->vol_buf_end)
382 if (raw_offset < vol->ondisk->vol_buf_beg || ri == 0) {
384 raw_offset += HAMMER_BUFSIZE;
387 buf_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no,
388 raw_offset - vol->ondisk->vol_buf_beg);
389 buf = find_buffer(vol, buf_offset);
391 buf = get_buffer(buf_offset, -1);
395 raw_offset += HAMMER_BUFSIZE;
400 rel_buffer(struct buffer_info *buffer)
402 struct volume_info *volume;
407 assert(buffer->cache.refs > 0);
408 if (--buffer->cache.refs == 0) {
409 if (buffer->cache.delete) {
410 hi = buffer_hash(buffer->buf_offset);
411 volume = buffer->volume;
412 if (buffer->cache.modified)
413 flush_buffer(buffer);
414 TAILQ_REMOVE(&volume->buffer_lists[hi], buffer, entry);
415 hammer_cache_del(&buffer->cache);
416 free(buffer->ondisk);
424 * Retrieve a pointer to a buffer data given a buffer offset. The underlying
425 * bufferp is freed if isnew or the offset is out of range of the cached data.
426 * If bufferp is freed a referenced buffer is loaded into it.
429 get_buffer_data(hammer_off_t buf_offset, struct buffer_info **bufferp,
432 if (*bufferp != NULL) {
434 (((*bufferp)->buf_offset ^ buf_offset) & ~HAMMER_BUFMASK64)) {
435 rel_buffer(*bufferp);
439 return(get_ondisk(buf_offset, bufferp, isnew));
443 * Retrieve a pointer to a B-Tree node given a zone offset. The underlying
444 * bufferp is freed if non-NULL and a referenced buffer is loaded into it.
447 get_node(hammer_off_t node_offset, struct buffer_info **bufferp)
449 if (*bufferp != NULL) {
450 rel_buffer(*bufferp);
453 return(get_ondisk(node_offset, bufferp, 0));
457 * Return a pointer to a buffer data given a buffer offset.
458 * If *bufferp is NULL acquire the buffer otherwise use that buffer.
462 get_ondisk(hammer_off_t buf_offset, struct buffer_info **bufferp, int isnew)
464 struct buffer_info *buffer;
467 if (buffer == NULL) {
468 buffer = *bufferp = get_buffer(buf_offset, isnew);
473 return((char *)buffer->ondisk +
474 ((int32_t)buf_offset & HAMMER_BUFMASK));
478 * Allocate HAMMER elements - btree nodes, meta data, data storage
481 alloc_btree_element(hammer_off_t *offp, struct buffer_info **data_bufferp)
483 hammer_node_ondisk_t node;
485 node = alloc_blockmap(HAMMER_ZONE_BTREE_INDEX, sizeof(*node),
487 bzero(node, sizeof(*node));
492 alloc_meta_element(hammer_off_t *offp, int32_t data_len,
493 struct buffer_info **data_bufferp)
497 data = alloc_blockmap(HAMMER_ZONE_META_INDEX, data_len,
499 bzero(data, data_len);
504 * The only data_len supported by HAMMER userspace for large data zone
505 * (zone 10) is HAMMER_BUFSIZE which is 16KB. >16KB data does not fit
506 * in a buffer allocated by get_buffer(). Also alloc_blockmap() does
507 * not consider >16KB buffer size.
510 alloc_data_element(hammer_off_t *offp, int32_t data_len,
511 struct buffer_info **data_bufferp)
519 zone = hammer_data_zone_index(data_len);
520 assert(data_len <= HAMMER_BUFSIZE); /* just one buffer */
521 assert(zone == HAMMER_ZONE_LARGE_DATA_INDEX ||
522 zone == HAMMER_ZONE_SMALL_DATA_INDEX);
524 data = alloc_blockmap(zone, data_len, offp, data_bufferp);
525 bzero(data, data_len);
530 * Format a new blockmap. This is mostly a degenerate case because
531 * all allocations are now actually done from the freemap.
534 format_blockmap(struct volume_info *root_vol, int zone, hammer_off_t offset)
536 hammer_blockmap_t blockmap;
537 hammer_off_t zone_base;
539 /* Only root volume needs formatting */
540 assert(root_vol->vol_no == HAMMER_ROOT_VOLNO);
542 assert(hammer_is_zone2_mapped_index(zone));
544 blockmap = &root_vol->ondisk->vol0_blockmap[zone];
545 zone_base = HAMMER_ZONE_ENCODE(zone, offset);
547 bzero(blockmap, sizeof(*blockmap));
548 blockmap->phys_offset = 0;
549 blockmap->first_offset = zone_base;
550 blockmap->next_offset = zone_base;
551 blockmap->alloc_offset = HAMMER_ENCODE(zone, 255, -1);
552 blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
553 root_vol->cache.modified = 1;
557 * Format a new freemap. Set all layer1 entries to UNAVAIL. The initialize
558 * code will load each volume's freemap.
561 format_freemap(struct volume_info *root_vol)
563 struct buffer_info *buffer = NULL;
564 hammer_off_t layer1_offset;
565 hammer_blockmap_t blockmap;
566 struct hammer_blockmap_layer1 *layer1;
569 /* Only root volume needs formatting */
570 assert(root_vol->vol_no == HAMMER_ROOT_VOLNO);
572 layer1_offset = alloc_bigblock(root_vol, HAMMER_ZONE_FREEMAP_INDEX);
573 for (i = 0; i < HAMMER_BIGBLOCK_SIZE; i += sizeof(*layer1)) {
574 isnew = ((i % HAMMER_BUFSIZE) == 0);
575 layer1 = get_buffer_data(layer1_offset + i, &buffer, isnew);
576 bzero(layer1, sizeof(*layer1));
577 layer1->phys_offset = HAMMER_BLOCKMAP_UNAVAIL;
578 layer1->blocks_free = 0;
579 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
581 assert(i == HAMMER_BIGBLOCK_SIZE);
584 blockmap = &root_vol->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
585 bzero(blockmap, sizeof(*blockmap));
586 blockmap->phys_offset = layer1_offset;
587 blockmap->first_offset = 0;
588 blockmap->next_offset = HAMMER_ENCODE_RAW_BUFFER(0, 0);
589 blockmap->alloc_offset = HAMMER_ENCODE_RAW_BUFFER(255, -1);
590 blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
591 root_vol->cache.modified = 1;
595 * Load the volume's remaining free space into the freemap.
597 * Returns the number of big-blocks available.
600 initialize_freemap(struct volume_info *vol)
602 struct volume_info *root_vol;
603 struct buffer_info *buffer1 = NULL;
604 struct buffer_info *buffer2 = NULL;
605 struct hammer_blockmap_layer1 *layer1;
606 struct hammer_blockmap_layer2 *layer2;
607 hammer_off_t layer1_base;
608 hammer_off_t layer1_offset;
609 hammer_off_t layer2_offset;
610 hammer_off_t phys_offset;
611 hammer_off_t block_offset;
612 hammer_off_t aligned_vol_free_end;
613 hammer_blockmap_t freemap;
615 int64_t layer1_count = 0;
617 root_vol = get_root_volume();
618 aligned_vol_free_end = (vol->vol_free_end + HAMMER_BLOCKMAP_LAYER2_MASK)
619 & ~HAMMER_BLOCKMAP_LAYER2_MASK;
621 printf("initialize freemap volume %d\n", vol->vol_no);
624 * Initialize the freemap. First preallocate the big-blocks required
625 * to implement layer2. This preallocation is a bootstrap allocation
626 * using blocks from the target volume.
628 freemap = &root_vol->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
629 layer1_base = freemap->phys_offset;
631 for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
632 phys_offset < aligned_vol_free_end;
633 phys_offset += HAMMER_BLOCKMAP_LAYER2) {
634 layer1_offset = layer1_base +
635 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
636 layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
637 if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
638 layer1->phys_offset = alloc_bigblock(vol,
639 HAMMER_ZONE_FREEMAP_INDEX);
640 layer1->blocks_free = 0;
641 buffer1->cache.modified = 1;
642 layer1->layer1_crc = crc32(layer1,
643 HAMMER_LAYER1_CRCSIZE);
648 * Now fill everything in.
650 for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
651 phys_offset < aligned_vol_free_end;
652 phys_offset += HAMMER_BLOCKMAP_LAYER2) {
654 layer1_offset = layer1_base +
655 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
656 layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
657 assert(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
659 for (block_offset = 0;
660 block_offset < HAMMER_BLOCKMAP_LAYER2;
661 block_offset += HAMMER_BIGBLOCK_SIZE) {
662 layer2_offset = layer1->phys_offset +
663 HAMMER_BLOCKMAP_LAYER2_OFFSET(block_offset);
664 layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
665 bzero(layer2, sizeof(*layer2));
667 if (phys_offset + block_offset < vol->vol_free_off) {
669 * Fixups XXX - big-blocks already allocated as part
670 * of the freemap bootstrap.
672 if (layer2->zone == 0) {
673 layer2->zone = HAMMER_ZONE_FREEMAP_INDEX;
674 layer2->append_off = HAMMER_BIGBLOCK_SIZE;
675 layer2->bytes_free = 0;
677 } else if (phys_offset + block_offset < vol->vol_free_end) {
679 layer2->append_off = 0;
680 layer2->bytes_free = HAMMER_BIGBLOCK_SIZE;
684 layer2->zone = HAMMER_ZONE_UNAVAIL_INDEX;
685 layer2->append_off = HAMMER_BIGBLOCK_SIZE;
686 layer2->bytes_free = 0;
688 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
689 buffer2->cache.modified = 1;
692 layer1->blocks_free += layer1_count;
693 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
694 buffer1->cache.modified = 1;
699 rel_volume(root_vol);
704 * Returns the number of big-blocks available for filesystem data and undos
705 * without formatting.
708 count_freemap(struct volume_info *vol)
710 hammer_off_t phys_offset;
711 hammer_off_t vol_free_off;
712 hammer_off_t aligned_vol_free_end;
715 vol_free_off = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
716 aligned_vol_free_end = (vol->vol_free_end + HAMMER_BLOCKMAP_LAYER2_MASK)
717 & ~HAMMER_BLOCKMAP_LAYER2_MASK;
719 if (vol->vol_no == HAMMER_ROOT_VOLNO)
720 vol_free_off += HAMMER_BIGBLOCK_SIZE;
722 for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
723 phys_offset < aligned_vol_free_end;
724 phys_offset += HAMMER_BLOCKMAP_LAYER2) {
725 vol_free_off += HAMMER_BIGBLOCK_SIZE;
728 for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
729 phys_offset < aligned_vol_free_end;
730 phys_offset += HAMMER_BIGBLOCK_SIZE) {
731 if (phys_offset < vol_free_off) {
733 } else if (phys_offset < vol->vol_free_end) {
742 * Format the undomap for the root volume.
745 format_undomap(struct volume_info *root_vol)
747 const int undo_zone = HAMMER_ZONE_UNDO_INDEX;
748 hammer_off_t undo_limit;
749 hammer_blockmap_t blockmap;
750 struct hammer_volume_ondisk *ondisk;
751 struct buffer_info *buffer = NULL;
757 /* Only root volume needs formatting */
758 assert(root_vol->vol_no == HAMMER_ROOT_VOLNO);
759 ondisk = root_vol->ondisk;
762 * Size the undo buffer in multiples of HAMMER_BIGBLOCK_SIZE,
763 * up to HAMMER_UNDO_LAYER2 big-blocks. Size to approximately
766 * The minimum UNDO fifo size is 500MB, or approximately 1% of
767 * the recommended 50G disk.
769 * Changing this minimum is rather dangerous as complex filesystem
770 * operations can cause the UNDO FIFO to fill up otherwise.
772 undo_limit = UndoBufferSize;
773 if (undo_limit == 0) {
774 undo_limit = (ondisk->vol_buf_end - ondisk->vol_buf_beg) / 1000;
775 if (undo_limit < 500*1024*1024)
776 undo_limit = 500*1024*1024;
778 undo_limit = (undo_limit + HAMMER_BIGBLOCK_MASK64) &
779 ~HAMMER_BIGBLOCK_MASK64;
780 if (undo_limit < HAMMER_BIGBLOCK_SIZE)
781 undo_limit = HAMMER_BIGBLOCK_SIZE;
782 if (undo_limit > HAMMER_BIGBLOCK_SIZE * HAMMER_UNDO_LAYER2)
783 undo_limit = HAMMER_BIGBLOCK_SIZE * HAMMER_UNDO_LAYER2;
784 UndoBufferSize = undo_limit;
786 blockmap = &ondisk->vol0_blockmap[undo_zone];
787 bzero(blockmap, sizeof(*blockmap));
788 blockmap->phys_offset = HAMMER_BLOCKMAP_UNAVAIL;
789 blockmap->first_offset = HAMMER_ZONE_ENCODE(undo_zone, 0);
790 blockmap->next_offset = blockmap->first_offset;
791 blockmap->alloc_offset = HAMMER_ZONE_ENCODE(undo_zone, undo_limit);
792 blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
794 limit_index = undo_limit / HAMMER_BIGBLOCK_SIZE;
795 assert(limit_index <= HAMMER_UNDO_LAYER2);
797 for (n = 0; n < limit_index; ++n) {
798 ondisk->vol0_undo_array[n] = alloc_bigblock(NULL,
799 HAMMER_ZONE_UNDO_INDEX);
801 while (n < HAMMER_UNDO_LAYER2) {
802 ondisk->vol0_undo_array[n++] = HAMMER_BLOCKMAP_UNAVAIL;
806 * Pre-initialize the UNDO blocks (HAMMER version 4+)
808 printf("initializing the undo map (%jd MB)\n",
809 (intmax_t)(blockmap->alloc_offset & HAMMER_OFF_LONG_MASK) /
812 scan = blockmap->first_offset;
815 while (scan < blockmap->alloc_offset) {
816 hammer_fifo_head_t head;
817 hammer_fifo_tail_t tail;
819 int bytes = HAMMER_UNDO_ALIGN;
821 isnew = ((scan & HAMMER_BUFMASK64) == 0);
822 head = get_buffer_data(scan, &buffer, isnew);
823 buffer->cache.modified = 1;
824 tail = (void *)((char *)head + bytes - sizeof(*tail));
827 head->hdr_signature = HAMMER_HEAD_SIGNATURE;
828 head->hdr_type = HAMMER_HEAD_TYPE_DUMMY;
829 head->hdr_size = bytes;
830 head->hdr_seq = seqno++;
832 tail->tail_signature = HAMMER_TAIL_SIGNATURE;
833 tail->tail_type = HAMMER_HEAD_TYPE_DUMMY;
834 tail->tail_size = bytes;
836 head->hdr_crc = crc32(head, HAMMER_FIFO_HEAD_CRCOFF) ^
837 crc32(head + 1, bytes - sizeof(*head));
845 * Flush various tracking structures to disk
848 flush_all_volumes(void)
850 struct volume_info *vol;
852 TAILQ_FOREACH(vol, &VolList, entry)
857 flush_volume(struct volume_info *volume)
859 struct buffer_info *buffer;
862 for (i = 0; i < HAMMER_BUFLISTS; ++i) {
863 TAILQ_FOREACH(buffer, &volume->buffer_lists[i], entry)
864 flush_buffer(buffer);
866 if (writehammerbuf(volume, volume->ondisk, 0) == -1)
867 err(1, "Write volume %d (%s)", volume->vol_no, volume->name);
868 volume->cache.modified = 0;
872 flush_buffer(struct buffer_info *buffer)
874 struct volume_info *vol;
876 vol = buffer->volume;
877 if (writehammerbuf(vol, buffer->ondisk, buffer->raw_offset) == -1)
878 err(1, "Write volume %d (%s)", vol->vol_no, vol->name);
879 buffer->cache.modified = 0;
883 * Core I/O operations
886 readhammerbuf(struct volume_info *vol, void *data, int64_t offset)
890 n = pread(vol->fd, data, HAMMER_BUFSIZE, offset);
891 if (n != HAMMER_BUFSIZE)
897 writehammerbuf(struct volume_info *vol, const void *data, int64_t offset)
901 n = pwrite(vol->fd, data, HAMMER_BUFSIZE, offset);
902 if (n != HAMMER_BUFSIZE)
907 int64_t init_boot_area_size(int64_t value, off_t avg_vol_size)
910 value = HAMMER_BOOT_NOMBYTES;
911 while (value > avg_vol_size / HAMMER_MAX_VOLUMES)
913 if (value < HAMMER_BOOT_MINBYTES)
915 } else if (value < HAMMER_BOOT_MINBYTES) {
916 value = HAMMER_BOOT_MINBYTES;
922 int64_t init_mem_area_size(int64_t value, off_t avg_vol_size)
925 value = HAMMER_MEM_NOMBYTES;
926 while (value > avg_vol_size / HAMMER_MAX_VOLUMES)
928 if (value < HAMMER_MEM_MINBYTES)
930 } else if (value < HAMMER_MEM_MINBYTES) {
931 value = HAMMER_MEM_MINBYTES;