2 * Copyright (c) 2007 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include <sys/diskslice.h>
36 #include <sys/diskmbr.h>
38 #include "hammer_util.h"
40 static void get_buffer_readahead(struct buffer_info *base);
41 static __inline void *get_ondisk(hammer_off_t buf_offset,
42 struct buffer_info **bufferp, int isnew);
43 static int readhammerbuf(struct volume_info *vol, void *data, int64_t offset);
44 static int writehammerbuf(struct volume_info *vol, const void *data,
51 int UseReadBehind = -4;
53 int AssertOnFailure = 1;
54 struct volume_list VolList = TAILQ_HEAD_INITIALIZER(VolList);
58 buffer_hash(hammer_off_t buf_offset)
62 hi = (int)(buf_offset / HAMMER_BUFSIZE) & HAMMER_BUFLISTMASK;
66 static struct buffer_info*
67 find_buffer(struct volume_info *volume, hammer_off_t buf_offset)
70 struct buffer_info *buf;
72 hi = buffer_hash(buf_offset);
73 TAILQ_FOREACH(buf, &volume->buffer_lists[hi], entry)
74 if (buf->buf_offset == buf_offset)
81 __alloc_volume(const char *volname, int oflags)
83 struct volume_info *vol;
86 vol = malloc(sizeof(*vol));
88 err(1, "alloc_volume");
89 bzero(vol, sizeof(*vol));
92 vol->name = strdup(volname);
93 vol->fd = open(vol->name, oflags);
95 err(1, "alloc_volume: Failed to open %s", vol->name);
98 vol->device_offset = 0;
101 vol->ondisk = malloc(HAMMER_BUFSIZE);
102 if (vol->ondisk == NULL)
103 err(1, "alloc_volume");
104 bzero(vol->ondisk, HAMMER_BUFSIZE);
106 for (i = 0; i < HAMMER_BUFLISTS; ++i)
107 TAILQ_INIT(&vol->buffer_lists[i]);
113 __add_volume(struct volume_info *vol)
115 struct volume_info *scan;
116 struct stat st1, st2;
118 if (fstat(vol->fd, &st1) != 0)
119 errx(1, "add_volume: %s: Failed to stat", vol->name);
121 TAILQ_FOREACH(scan, &VolList, entry) {
122 if (scan->vol_no == vol->vol_no) {
123 errx(1, "add_volume: %s: Duplicate volume number %d "
125 vol->name, vol->vol_no, scan->name);
127 if (fstat(scan->fd, &st2) != 0) {
128 errx(1, "add_volume: %s: Failed to stat %s",
129 vol->name, scan->name);
131 if ((st1.st_ino == st2.st_ino) && (st1.st_dev == st2.st_dev)) {
132 errx(1, "add_volume: %s: Specified more than once",
137 TAILQ_INSERT_TAIL(&VolList, vol, entry);
141 * Initialize a volume structure and ondisk vol_no field.
144 init_volume(int32_t vol_no, const char *filename, int oflags)
146 struct volume_info *vol;
148 vol = __alloc_volume(filename, oflags);
149 vol->vol_no = vol->ondisk->vol_no = vol_no;
150 vol->cache.modified = 1;
158 * Initialize a volume structure and read ondisk volume header.
161 load_volume(const char *filename, int oflags)
163 struct volume_info *vol;
164 struct hammer_volume_ondisk *ondisk;
167 vol = __alloc_volume(filename, oflags);
168 ondisk = vol->ondisk;
170 n = readhammerbuf(vol, ondisk, 0);
172 err(1, "load_volume: %s: Read failed at offset 0", vol->name);
174 vol->vol_no = ondisk->vol_no;
176 if (ondisk->vol_rootvol != HAMMER_ROOT_VOLNO) {
177 errx(1, "load_volume: Invalid root volume# %d",
178 ondisk->vol_rootvol);
181 if (bcmp(&Hammer_FSType, &ondisk->vol_fstype, sizeof(Hammer_FSType))) {
182 errx(1, "load_volume: %s: Header does not indicate "
183 "that this is a hammer volume", vol->name);
186 if (TAILQ_EMPTY(&VolList)) {
187 Hammer_FSId = ondisk->vol_fsid;
188 } else if (bcmp(&Hammer_FSId, &ondisk->vol_fsid, sizeof(Hammer_FSId))) {
189 errx(1, "load_volume: %s: FSId does match other volumes!",
199 * Check basic volume characteristics.
202 check_volume(struct volume_info *vol)
204 struct partinfo pinfo;
208 * Get basic information about the volume
210 if (ioctl(vol->fd, DIOCGPART, &pinfo) < 0) {
212 * Allow the formatting of regular files as HAMMER volumes
214 if (fstat(vol->fd, &st) < 0)
215 err(1, "Unable to stat %s", vol->name);
216 vol->size = st.st_size;
217 vol->type = "REGFILE";
220 * When formatting a block device as a HAMMER volume the
221 * sector size must be compatible. HAMMER uses 16384 byte
222 * filesystem buffers.
224 if (pinfo.reserved_blocks) {
225 errx(1, "HAMMER cannot be placed in a partition "
226 "which overlaps the disklabel or MBR");
228 if (pinfo.media_blksize > HAMMER_BUFSIZE ||
229 HAMMER_BUFSIZE % pinfo.media_blksize) {
230 errx(1, "A media sector size of %d is not supported",
231 pinfo.media_blksize);
234 vol->size = pinfo.media_size;
235 vol->device_offset = pinfo.media_offset;
236 vol->type = "DEVICE";
241 get_volume(int32_t vol_no)
243 struct volume_info *vol;
245 TAILQ_FOREACH(vol, &VolList, entry) {
246 if (vol->vol_no == vol_no)
250 errx(1, "get_volume: Volume %d does not exist!", vol_no);
253 /* not added to or removed from hammer cache */
258 get_root_volume(void)
260 return(get_volume(HAMMER_ROOT_VOLNO));
264 rel_volume(struct volume_info *volume)
268 /* not added to or removed from hammer cache */
269 --volume->cache.refs;
273 * Acquire the specified buffer. isnew is -1 only when called
274 * via get_buffer_readahead() to prevent another readahead.
277 get_buffer(hammer_off_t buf_offset, int isnew)
280 struct buffer_info *buf;
281 struct volume_info *volume;
282 hammer_off_t orig_offset = buf_offset;
288 zone = HAMMER_ZONE_DECODE(buf_offset);
289 if (zone > HAMMER_ZONE_RAW_BUFFER_INDEX) {
290 buf_offset = blockmap_lookup(buf_offset, NULL, NULL, NULL);
292 if (buf_offset == HAMMER_OFF_BAD)
294 assert((buf_offset & HAMMER_OFF_ZONE_MASK) == HAMMER_ZONE_RAW_BUFFER);
296 vol_no = HAMMER_VOL_DECODE(buf_offset);
297 volume = get_volume(vol_no);
301 buf_offset &= ~HAMMER_BUFMASK64;
302 buf = find_buffer(volume, buf_offset);
305 buf = malloc(sizeof(*buf));
306 bzero(buf, sizeof(*buf));
308 fprintf(stderr, "get_buffer: %016llx %016llx at %p\n",
309 (long long)orig_offset, (long long)buf_offset,
312 buf->buf_offset = buf_offset;
313 buf->raw_offset = hammer_xlate_to_phys(volume->ondisk,
315 buf->volume = volume;
316 hi = buffer_hash(buf_offset);
317 TAILQ_INSERT_TAIL(&volume->buffer_lists[hi], buf, entry);
318 ++volume->cache.refs;
319 buf->cache.u.buffer = buf;
320 hammer_cache_add(&buf->cache, ISBUFFER);
324 fprintf(stderr, "get_buffer: %016llx %016llx at %p *\n",
325 (long long)orig_offset, (long long)buf_offset,
328 hammer_cache_used(&buf->cache);
332 hammer_cache_flush();
333 if ((ondisk = buf->ondisk) == NULL) {
334 buf->ondisk = ondisk = malloc(HAMMER_BUFSIZE);
336 n = readhammerbuf(volume, ondisk, buf->raw_offset);
339 err(1, "get_buffer: %s:%016llx "
340 "Read failed at offset %016llx",
342 (long long)buf->buf_offset,
343 (long long)buf->raw_offset);
344 bzero(ondisk, HAMMER_BUFSIZE);
349 bzero(ondisk, HAMMER_BUFSIZE);
350 buf->cache.modified = 1;
353 get_buffer_readahead(buf);
358 get_buffer_readahead(struct buffer_info *base)
360 struct buffer_info *buf;
361 struct volume_info *vol;
362 hammer_off_t buf_offset;
364 int ri = UseReadBehind;
365 int re = UseReadAhead;
367 raw_offset = base->raw_offset + ri * HAMMER_BUFSIZE;
371 if (raw_offset >= vol->ondisk->vol_buf_end)
373 if (raw_offset < vol->ondisk->vol_buf_beg || ri == 0) {
375 raw_offset += HAMMER_BUFSIZE;
378 buf_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no,
379 raw_offset - vol->ondisk->vol_buf_beg);
380 buf = find_buffer(vol, buf_offset);
382 buf = get_buffer(buf_offset, -1);
386 raw_offset += HAMMER_BUFSIZE;
391 rel_buffer(struct buffer_info *buffer)
393 struct volume_info *volume;
398 assert(buffer->cache.refs > 0);
399 if (--buffer->cache.refs == 0) {
400 if (buffer->cache.delete) {
401 hi = buffer_hash(buffer->buf_offset);
402 volume = buffer->volume;
403 if (buffer->cache.modified)
404 flush_buffer(buffer);
405 TAILQ_REMOVE(&volume->buffer_lists[hi], buffer, entry);
406 hammer_cache_del(&buffer->cache);
407 free(buffer->ondisk);
415 * Retrieve a pointer to a buffer data given a buffer offset. The underlying
416 * bufferp is freed if isnew or the offset is out of range of the cached data.
417 * If bufferp is freed a referenced buffer is loaded into it.
420 get_buffer_data(hammer_off_t buf_offset, struct buffer_info **bufferp,
423 if (*bufferp != NULL) {
425 (((*bufferp)->buf_offset ^ buf_offset) & ~HAMMER_BUFMASK64)) {
426 rel_buffer(*bufferp);
430 return(get_ondisk(buf_offset, bufferp, isnew));
434 * Retrieve a pointer to a B-Tree node given a zone offset. The underlying
435 * bufferp is freed if non-NULL and a referenced buffer is loaded into it.
438 get_node(hammer_off_t node_offset, struct buffer_info **bufferp)
440 if (*bufferp != NULL) {
441 rel_buffer(*bufferp);
444 return(get_ondisk(node_offset, bufferp, 0));
448 * Return a pointer to a buffer data given a buffer offset.
449 * If *bufferp is NULL acquire the buffer otherwise use that buffer.
453 get_ondisk(hammer_off_t buf_offset, struct buffer_info **bufferp, int isnew)
455 struct buffer_info *buffer;
458 if (buffer == NULL) {
459 buffer = *bufferp = get_buffer(buf_offset, isnew);
464 return((char *)buffer->ondisk +
465 ((int32_t)buf_offset & HAMMER_BUFMASK));
469 * Allocate HAMMER elements - btree nodes, meta data, data storage
472 alloc_btree_element(hammer_off_t *offp, struct buffer_info **data_bufferp)
474 hammer_node_ondisk_t node;
476 node = alloc_blockmap(HAMMER_ZONE_BTREE_INDEX, sizeof(*node),
478 bzero(node, sizeof(*node));
483 alloc_meta_element(hammer_off_t *offp, int32_t data_len,
484 struct buffer_info **data_bufferp)
488 data = alloc_blockmap(HAMMER_ZONE_META_INDEX, data_len,
490 bzero(data, data_len);
495 * The only data_len supported by HAMMER userspace for large data zone
496 * (zone 10) is HAMMER_BUFSIZE which is 16KB. >16KB data does not fit
497 * in a buffer allocated by get_buffer(). Also alloc_blockmap() does
498 * not consider >16KB buffer size.
501 alloc_data_element(hammer_off_t *offp, int32_t data_len,
502 struct buffer_info **data_bufferp)
510 zone = hammer_data_zone_index(data_len);
511 assert(data_len <= HAMMER_BUFSIZE); /* just one buffer */
512 assert(zone == HAMMER_ZONE_LARGE_DATA_INDEX ||
513 zone == HAMMER_ZONE_SMALL_DATA_INDEX);
515 data = alloc_blockmap(zone, data_len, offp, data_bufferp);
516 bzero(data, data_len);
521 * Format a new blockmap. This is mostly a degenerate case because
522 * all allocations are now actually done from the freemap.
525 format_blockmap(struct volume_info *root_vol, int zone, hammer_off_t offset)
527 hammer_blockmap_t blockmap;
528 hammer_off_t zone_base;
530 /* Only root volume needs formatting */
531 assert(root_vol->vol_no == HAMMER_ROOT_VOLNO);
533 assert(hammer_is_zone2_mapped_index(zone));
535 blockmap = &root_vol->ondisk->vol0_blockmap[zone];
536 zone_base = HAMMER_ZONE_ENCODE(zone, offset);
538 bzero(blockmap, sizeof(*blockmap));
539 blockmap->phys_offset = 0;
540 blockmap->first_offset = zone_base;
541 blockmap->next_offset = zone_base;
542 blockmap->alloc_offset = HAMMER_ENCODE(zone, 255, -1);
543 blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
544 root_vol->cache.modified = 1;
548 * Format a new freemap. Set all layer1 entries to UNAVAIL. The initialize
549 * code will load each volume's freemap.
552 format_freemap(struct volume_info *root_vol)
554 struct buffer_info *buffer = NULL;
555 hammer_off_t layer1_offset;
556 hammer_blockmap_t blockmap;
557 struct hammer_blockmap_layer1 *layer1;
560 /* Only root volume needs formatting */
561 assert(root_vol->vol_no == HAMMER_ROOT_VOLNO);
563 layer1_offset = alloc_bigblock(root_vol, HAMMER_ZONE_FREEMAP_INDEX);
564 for (i = 0; i < HAMMER_BIGBLOCK_SIZE; i += sizeof(*layer1)) {
565 isnew = ((i % HAMMER_BUFSIZE) == 0);
566 layer1 = get_buffer_data(layer1_offset + i, &buffer, isnew);
567 bzero(layer1, sizeof(*layer1));
568 layer1->phys_offset = HAMMER_BLOCKMAP_UNAVAIL;
569 layer1->blocks_free = 0;
570 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
572 assert(i == HAMMER_BIGBLOCK_SIZE);
575 blockmap = &root_vol->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
576 bzero(blockmap, sizeof(*blockmap));
577 blockmap->phys_offset = layer1_offset;
578 blockmap->first_offset = 0;
579 blockmap->next_offset = HAMMER_ENCODE_RAW_BUFFER(0, 0);
580 blockmap->alloc_offset = HAMMER_ENCODE_RAW_BUFFER(255, -1);
581 blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
582 root_vol->cache.modified = 1;
586 * Load the volume's remaining free space into the freemap.
588 * Returns the number of big-blocks available.
591 initialize_freemap(struct volume_info *vol)
593 struct volume_info *root_vol;
594 struct buffer_info *buffer1 = NULL;
595 struct buffer_info *buffer2 = NULL;
596 struct hammer_blockmap_layer1 *layer1;
597 struct hammer_blockmap_layer2 *layer2;
598 hammer_off_t layer1_base;
599 hammer_off_t layer1_offset;
600 hammer_off_t layer2_offset;
601 hammer_off_t phys_offset;
602 hammer_off_t block_offset;
603 hammer_off_t aligned_vol_free_end;
604 hammer_blockmap_t freemap;
606 int64_t layer1_count = 0;
608 root_vol = get_root_volume();
609 aligned_vol_free_end = (vol->vol_free_end + HAMMER_BLOCKMAP_LAYER2_MASK)
610 & ~HAMMER_BLOCKMAP_LAYER2_MASK;
612 printf("initialize freemap volume %d\n", vol->vol_no);
615 * Initialize the freemap. First preallocate the big-blocks required
616 * to implement layer2. This preallocation is a bootstrap allocation
617 * using blocks from the target volume.
619 freemap = &root_vol->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
620 layer1_base = freemap->phys_offset;
622 for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
623 phys_offset < aligned_vol_free_end;
624 phys_offset += HAMMER_BLOCKMAP_LAYER2) {
625 layer1_offset = layer1_base +
626 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
627 layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
628 if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
629 layer1->phys_offset = alloc_bigblock(vol,
630 HAMMER_ZONE_FREEMAP_INDEX);
631 layer1->blocks_free = 0;
632 buffer1->cache.modified = 1;
633 layer1->layer1_crc = crc32(layer1,
634 HAMMER_LAYER1_CRCSIZE);
639 * Now fill everything in.
641 for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
642 phys_offset < aligned_vol_free_end;
643 phys_offset += HAMMER_BLOCKMAP_LAYER2) {
645 layer1_offset = layer1_base +
646 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
647 layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
648 assert(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
650 for (block_offset = 0;
651 block_offset < HAMMER_BLOCKMAP_LAYER2;
652 block_offset += HAMMER_BIGBLOCK_SIZE) {
653 layer2_offset = layer1->phys_offset +
654 HAMMER_BLOCKMAP_LAYER2_OFFSET(block_offset);
655 layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
656 bzero(layer2, sizeof(*layer2));
658 if (phys_offset + block_offset < vol->vol_free_off) {
660 * Fixups XXX - big-blocks already allocated as part
661 * of the freemap bootstrap.
663 if (layer2->zone == 0) {
664 layer2->zone = HAMMER_ZONE_FREEMAP_INDEX;
665 layer2->append_off = HAMMER_BIGBLOCK_SIZE;
666 layer2->bytes_free = 0;
668 } else if (phys_offset + block_offset < vol->vol_free_end) {
670 layer2->append_off = 0;
671 layer2->bytes_free = HAMMER_BIGBLOCK_SIZE;
675 layer2->zone = HAMMER_ZONE_UNAVAIL_INDEX;
676 layer2->append_off = HAMMER_BIGBLOCK_SIZE;
677 layer2->bytes_free = 0;
679 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
680 buffer2->cache.modified = 1;
683 layer1->blocks_free += layer1_count;
684 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
685 buffer1->cache.modified = 1;
690 rel_volume(root_vol);
695 * Returns the number of big-blocks available for filesystem data and undos
696 * without formatting.
699 count_freemap(struct volume_info *vol)
701 hammer_off_t phys_offset;
702 hammer_off_t vol_free_off;
703 hammer_off_t aligned_vol_free_end;
706 vol_free_off = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
707 aligned_vol_free_end = (vol->vol_free_end + HAMMER_BLOCKMAP_LAYER2_MASK)
708 & ~HAMMER_BLOCKMAP_LAYER2_MASK;
710 if (vol->vol_no == HAMMER_ROOT_VOLNO)
711 vol_free_off += HAMMER_BIGBLOCK_SIZE;
713 for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
714 phys_offset < aligned_vol_free_end;
715 phys_offset += HAMMER_BLOCKMAP_LAYER2) {
716 vol_free_off += HAMMER_BIGBLOCK_SIZE;
719 for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
720 phys_offset < aligned_vol_free_end;
721 phys_offset += HAMMER_BIGBLOCK_SIZE) {
722 if (phys_offset < vol_free_off) {
724 } else if (phys_offset < vol->vol_free_end) {
733 * Format the undomap for the root volume.
736 format_undomap(struct volume_info *root_vol, int64_t *undo_buffer_size)
738 const int undo_zone = HAMMER_ZONE_UNDO_INDEX;
739 hammer_off_t undo_limit;
740 hammer_blockmap_t blockmap;
741 struct hammer_volume_ondisk *ondisk;
742 struct buffer_info *buffer = NULL;
748 /* Only root volume needs formatting */
749 assert(root_vol->vol_no == HAMMER_ROOT_VOLNO);
750 ondisk = root_vol->ondisk;
753 * Size the undo buffer in multiples of HAMMER_BIGBLOCK_SIZE,
754 * up to HAMMER_UNDO_LAYER2 big-blocks. Size to approximately
757 * The minimum UNDO fifo size is 500MB, or approximately 1% of
758 * the recommended 50G disk.
760 * Changing this minimum is rather dangerous as complex filesystem
761 * operations can cause the UNDO FIFO to fill up otherwise.
763 undo_limit = *undo_buffer_size;
764 if (undo_limit == 0) {
765 undo_limit = (ondisk->vol_buf_end - ondisk->vol_buf_beg) / 1000;
766 if (undo_limit < 500*1024*1024)
767 undo_limit = 500*1024*1024;
769 undo_limit = (undo_limit + HAMMER_BIGBLOCK_MASK64) &
770 ~HAMMER_BIGBLOCK_MASK64;
771 if (undo_limit < HAMMER_BIGBLOCK_SIZE)
772 undo_limit = HAMMER_BIGBLOCK_SIZE;
773 if (undo_limit > HAMMER_BIGBLOCK_SIZE * HAMMER_UNDO_LAYER2)
774 undo_limit = HAMMER_BIGBLOCK_SIZE * HAMMER_UNDO_LAYER2;
775 *undo_buffer_size = undo_limit;
777 blockmap = &ondisk->vol0_blockmap[undo_zone];
778 bzero(blockmap, sizeof(*blockmap));
779 blockmap->phys_offset = HAMMER_BLOCKMAP_UNAVAIL;
780 blockmap->first_offset = HAMMER_ZONE_ENCODE(undo_zone, 0);
781 blockmap->next_offset = blockmap->first_offset;
782 blockmap->alloc_offset = HAMMER_ZONE_ENCODE(undo_zone, undo_limit);
783 blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
785 limit_index = undo_limit / HAMMER_BIGBLOCK_SIZE;
786 assert(limit_index <= HAMMER_UNDO_LAYER2);
788 for (n = 0; n < limit_index; ++n) {
789 ondisk->vol0_undo_array[n] = alloc_bigblock(NULL,
790 HAMMER_ZONE_UNDO_INDEX);
792 while (n < HAMMER_UNDO_LAYER2) {
793 ondisk->vol0_undo_array[n++] = HAMMER_BLOCKMAP_UNAVAIL;
797 * Pre-initialize the UNDO blocks (HAMMER version 4+)
799 printf("initializing the undo map (%jd MB)\n",
800 (intmax_t)(blockmap->alloc_offset & HAMMER_OFF_LONG_MASK) /
803 scan = blockmap->first_offset;
806 while (scan < blockmap->alloc_offset) {
807 hammer_fifo_head_t head;
808 hammer_fifo_tail_t tail;
810 int bytes = HAMMER_UNDO_ALIGN;
812 isnew = ((scan & HAMMER_BUFMASK64) == 0);
813 head = get_buffer_data(scan, &buffer, isnew);
814 buffer->cache.modified = 1;
815 tail = (void *)((char *)head + bytes - sizeof(*tail));
818 head->hdr_signature = HAMMER_HEAD_SIGNATURE;
819 head->hdr_type = HAMMER_HEAD_TYPE_DUMMY;
820 head->hdr_size = bytes;
821 head->hdr_seq = seqno++;
823 tail->tail_signature = HAMMER_TAIL_SIGNATURE;
824 tail->tail_type = HAMMER_HEAD_TYPE_DUMMY;
825 tail->tail_size = bytes;
827 head->hdr_crc = crc32(head, HAMMER_FIFO_HEAD_CRCOFF) ^
828 crc32(head + 1, bytes - sizeof(*head));
836 * Flush various tracking structures to disk
839 flush_all_volumes(void)
841 struct volume_info *vol;
843 TAILQ_FOREACH(vol, &VolList, entry)
848 flush_volume(struct volume_info *volume)
850 struct buffer_info *buffer;
853 for (i = 0; i < HAMMER_BUFLISTS; ++i) {
854 TAILQ_FOREACH(buffer, &volume->buffer_lists[i], entry)
855 flush_buffer(buffer);
857 if (writehammerbuf(volume, volume->ondisk, 0) == -1)
858 err(1, "Write volume %d (%s)", volume->vol_no, volume->name);
859 volume->cache.modified = 0;
863 flush_buffer(struct buffer_info *buffer)
865 struct volume_info *vol;
867 vol = buffer->volume;
868 if (writehammerbuf(vol, buffer->ondisk, buffer->raw_offset) == -1)
869 err(1, "Write volume %d (%s)", vol->vol_no, vol->name);
870 buffer->cache.modified = 0;
874 * Core I/O operations
877 readhammerbuf(struct volume_info *vol, void *data, int64_t offset)
881 n = pread(vol->fd, data, HAMMER_BUFSIZE, offset);
882 if (n != HAMMER_BUFSIZE)
888 writehammerbuf(struct volume_info *vol, const void *data, int64_t offset)
892 n = pwrite(vol->fd, data, HAMMER_BUFSIZE, offset);
893 if (n != HAMMER_BUFSIZE)
898 int64_t init_boot_area_size(int64_t value, off_t avg_vol_size)
901 value = HAMMER_BOOT_NOMBYTES;
902 while (value > avg_vol_size / HAMMER_MAX_VOLUMES)
904 if (value < HAMMER_BOOT_MINBYTES)
906 } else if (value < HAMMER_BOOT_MINBYTES) {
907 value = HAMMER_BOOT_MINBYTES;
913 int64_t init_mem_area_size(int64_t value, off_t avg_vol_size)
916 value = HAMMER_MEM_NOMBYTES;
917 while (value > avg_vol_size / HAMMER_MAX_VOLUMES)
919 if (value < HAMMER_MEM_MINBYTES)
921 } else if (value < HAMMER_MEM_MINBYTES) {
922 value = HAMMER_MEM_MINBYTES;