2 * Copyright (c) 2007 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include <sys/types.h>
45 #include "hammer_util.h"
47 static void *alloc_blockmap(int zone, int bytes, hammer_off_t *result_offp,
48 struct buffer_info **bufferp);
49 static hammer_off_t alloc_bigblock(struct volume_info *volume, int zone);
50 static void get_buffer_readahead(struct buffer_info *base);
51 static __inline void *get_ondisk(hammer_off_t buf_offset,
52 struct buffer_info **bufferp, int isnew);
54 static void init_fifo_head(hammer_fifo_head_t head, u_int16_t hdr_type);
55 static void readhammerbuf(struct volume_info *vol, void *data,
58 static void writehammerbuf(struct volume_info *vol, const void *data,
67 int64_t UndoBufferSize;
68 int UsingSuperClusters;
71 int UseReadBehind = -4;
73 int AssertOnFailure = 1;
74 struct volume_list VolList = TAILQ_HEAD_INITIALIZER(VolList);
78 buffer_hash(hammer_off_t buf_offset)
82 hi = (int)(buf_offset / HAMMER_BUFSIZE) & HAMMER_BUFLISTMASK;
87 * Lookup the requested information structure and related on-disk buffer.
88 * Missing structures are created.
91 setup_volume(int32_t vol_no, const char *filename, int isnew, int oflags)
93 struct volume_info *vol;
94 struct volume_info *scan;
95 struct hammer_volume_ondisk *ondisk;
99 * Allocate the volume structure
101 vol = malloc(sizeof(*vol));
102 bzero(vol, sizeof(*vol));
103 for (i = 0; i < HAMMER_BUFLISTS; ++i)
104 TAILQ_INIT(&vol->buffer_lists[i]);
105 vol->name = strdup(filename);
106 vol->fd = open(filename, oflags);
110 err(1, "setup_volume: %s: Open failed", filename);
114 * Read or initialize the volume header
116 vol->ondisk = ondisk = malloc(HAMMER_BUFSIZE);
118 bzero(ondisk, HAMMER_BUFSIZE);
120 n = pread(vol->fd, ondisk, HAMMER_BUFSIZE, 0);
121 if (n != HAMMER_BUFSIZE) {
122 err(1, "setup_volume: %s: Read failed at offset 0",
125 vol_no = ondisk->vol_no;
127 RootVolNo = ondisk->vol_rootvol;
128 } else if (RootVolNo != (int)ondisk->vol_rootvol) {
129 errx(1, "setup_volume: %s: root volume disagreement: "
131 vol->name, RootVolNo, ondisk->vol_rootvol);
134 if (bcmp(&Hammer_FSType, &ondisk->vol_fstype, sizeof(Hammer_FSType)) != 0) {
135 errx(1, "setup_volume: %s: Header does not indicate "
136 "that this is a hammer volume", vol->name);
138 if (TAILQ_EMPTY(&VolList)) {
139 Hammer_FSId = vol->ondisk->vol_fsid;
140 } else if (bcmp(&Hammer_FSId, &ondisk->vol_fsid, sizeof(Hammer_FSId)) != 0) {
141 errx(1, "setup_volume: %s: FSId does match other "
142 "volumes!", vol->name);
145 vol->vol_no = vol_no;
148 /*init_fifo_head(&ondisk->head, HAMMER_HEAD_TYPE_VOL);*/
149 vol->cache.modified = 1;
153 * Link the volume structure in
155 TAILQ_FOREACH(scan, &VolList, entry) {
156 if (scan->vol_no == vol_no) {
157 errx(1, "setup_volume %s: Duplicate volume number %d "
158 "against %s", filename, vol_no, scan->name);
161 TAILQ_INSERT_TAIL(&VolList, vol, entry);
166 test_volume(int32_t vol_no)
168 struct volume_info *vol;
170 TAILQ_FOREACH(vol, &VolList, entry) {
171 if (vol->vol_no == vol_no)
177 /* not added to or removed from hammer cache */
182 get_volume(int32_t vol_no)
184 struct volume_info *vol;
186 TAILQ_FOREACH(vol, &VolList, entry) {
187 if (vol->vol_no == vol_no)
191 errx(1, "get_volume: Volume %d does not exist!", vol_no);
193 /* not added to or removed from hammer cache */
198 rel_volume(struct volume_info *volume)
200 /* not added to or removed from hammer cache */
201 --volume->cache.refs;
205 * Acquire the specified buffer.
208 get_buffer(hammer_off_t buf_offset, int isnew)
211 struct buffer_info *buf;
212 struct volume_info *volume;
213 hammer_off_t orig_offset = buf_offset;
219 zone = HAMMER_ZONE_DECODE(buf_offset);
220 if (zone > HAMMER_ZONE_RAW_BUFFER_INDEX) {
221 buf_offset = blockmap_lookup(buf_offset, NULL, NULL, NULL);
223 if (buf_offset == HAMMER_OFF_BAD)
226 if (AssertOnFailure) {
227 assert((buf_offset & HAMMER_OFF_ZONE_MASK) ==
228 HAMMER_ZONE_RAW_BUFFER);
230 vol_no = HAMMER_VOL_DECODE(buf_offset);
231 volume = test_volume(vol_no);
232 if (volume == NULL) {
234 errx(1, "get_buffer: Volume %d not found!", vol_no);
238 buf_offset &= ~HAMMER_BUFMASK64;
240 hi = buffer_hash(buf_offset);
242 TAILQ_FOREACH(buf, &volume->buffer_lists[hi], entry) {
243 if (buf->buf_offset == buf_offset)
247 buf = malloc(sizeof(*buf));
248 bzero(buf, sizeof(*buf));
250 fprintf(stderr, "get_buffer: %016llx %016llx at %p\n",
251 (long long)orig_offset, (long long)buf_offset,
254 buf->buf_offset = buf_offset;
255 buf->raw_offset = volume->ondisk->vol_buf_beg +
256 (buf_offset & HAMMER_OFF_SHORT_MASK);
257 buf->volume = volume;
258 TAILQ_INSERT_TAIL(&volume->buffer_lists[hi], buf, entry);
259 ++volume->cache.refs;
260 buf->cache.u.buffer = buf;
261 hammer_cache_add(&buf->cache, ISBUFFER);
264 buf->flags |= HAMMER_BUFINFO_READAHEAD;
267 fprintf(stderr, "get_buffer: %016llx %016llx at %p *\n",
268 (long long)orig_offset, (long long)buf_offset,
272 buf->flags &= ~HAMMER_BUFINFO_READAHEAD;
273 hammer_cache_used(&buf->cache);
278 hammer_cache_flush();
279 if ((ondisk = buf->ondisk) == NULL) {
280 buf->ondisk = ondisk = malloc(HAMMER_BUFSIZE);
282 n = pread(volume->fd, ondisk, HAMMER_BUFSIZE,
284 if (n != HAMMER_BUFSIZE) {
286 err(1, "get_buffer: %s:%016llx "
287 "Read failed at offset %016llx",
289 (long long)buf->buf_offset,
290 (long long)buf->raw_offset);
291 bzero(ondisk, HAMMER_BUFSIZE);
296 bzero(ondisk, HAMMER_BUFSIZE);
297 buf->cache.modified = 1;
300 get_buffer_readahead(buf);
305 get_buffer_readahead(struct buffer_info *base)
307 struct buffer_info *buf;
308 struct volume_info *vol;
309 hammer_off_t buf_offset;
311 int ri = UseReadBehind;
312 int re = UseReadAhead;
315 raw_offset = base->raw_offset + ri * HAMMER_BUFSIZE;
319 if (raw_offset >= vol->ondisk->vol_buf_end)
321 if (raw_offset < vol->ondisk->vol_buf_beg) {
323 raw_offset += HAMMER_BUFSIZE;
326 buf_offset = HAMMER_VOL_ENCODE(vol->vol_no) |
327 HAMMER_ZONE_RAW_BUFFER |
328 (raw_offset - vol->ondisk->vol_buf_beg);
329 hi = buffer_hash(raw_offset);
330 TAILQ_FOREACH(buf, &vol->buffer_lists[hi], entry) {
331 if (buf->raw_offset == raw_offset)
335 buf = get_buffer(buf_offset, -1);
339 raw_offset += HAMMER_BUFSIZE;
344 rel_buffer(struct buffer_info *buffer)
346 struct volume_info *volume;
349 assert(buffer->cache.refs > 0);
350 if (--buffer->cache.refs == 0) {
351 if (buffer->cache.delete) {
352 hi = buffer_hash(buffer->buf_offset);
353 volume = buffer->volume;
354 if (buffer->cache.modified)
355 flush_buffer(buffer);
356 TAILQ_REMOVE(&volume->buffer_lists[hi], buffer, entry);
357 hammer_cache_del(&buffer->cache);
358 free(buffer->ondisk);
366 * Retrieve a pointer to a buffer data given a buffer offset. The underlying
367 * bufferp is freed if isnew or the offset is out of range of the cached data.
368 * If bufferp is freed a referenced buffer is loaded into it.
371 get_buffer_data(hammer_off_t buf_offset, struct buffer_info **bufferp,
374 if (*bufferp != NULL) {
376 (((*bufferp)->buf_offset ^ buf_offset) & ~HAMMER_BUFMASK64)) {
377 rel_buffer(*bufferp);
381 return(get_ondisk(buf_offset, bufferp, isnew));
385 * Retrieve a pointer to a B-Tree node given a cluster offset. The underlying
386 * bufferp is freed if non-NULL and a referenced buffer is loaded into it.
389 get_node(hammer_off_t node_offset, struct buffer_info **bufferp)
391 if (*bufferp != NULL) {
392 rel_buffer(*bufferp);
395 return(get_ondisk(node_offset, bufferp, 0));
399 * Return a pointer to a buffer data given a buffer offset.
400 * If *bufferp is NULL acquire the buffer otherwise use that buffer.
404 get_ondisk(hammer_off_t buf_offset, struct buffer_info **bufferp,
407 struct buffer_info *buffer;
410 if (buffer == NULL) {
411 buffer = *bufferp = get_buffer(buf_offset, isnew);
416 return((char *)buffer->ondisk +
417 ((int32_t)buf_offset & HAMMER_BUFMASK));
421 * Allocate HAMMER elements - btree nodes, data storage
424 alloc_btree_element(hammer_off_t *offp)
426 struct buffer_info *buffer = NULL;
427 hammer_node_ondisk_t node;
429 node = alloc_blockmap(HAMMER_ZONE_BTREE_INDEX, sizeof(*node),
431 bzero(node, sizeof(*node));
432 /* XXX buffer not released, pointer remains valid */
437 alloc_data_element(hammer_off_t *offp, int32_t data_len,
438 struct buffer_info **data_bufferp)
442 if (data_len >= HAMMER_BUFSIZE) {
443 assert(data_len <= HAMMER_BUFSIZE); /* just one buffer */
444 data = alloc_blockmap(HAMMER_ZONE_LARGE_DATA_INDEX, data_len,
446 bzero(data, data_len);
447 } else if (data_len) {
448 data = alloc_blockmap(HAMMER_ZONE_SMALL_DATA_INDEX, data_len,
450 bzero(data, data_len);
458 * Format a new freemap. Set all layer1 entries to UNAVAIL. The initialize
459 * code will load each volume's freemap.
462 format_freemap(struct volume_info *root_vol, hammer_blockmap_t blockmap)
464 struct buffer_info *buffer = NULL;
465 hammer_off_t layer1_offset;
466 struct hammer_blockmap_layer1 *layer1;
469 layer1_offset = alloc_bigblock(root_vol, HAMMER_ZONE_FREEMAP_INDEX);
470 for (i = 0; i < (int)HAMMER_BLOCKMAP_RADIX1; ++i) {
471 isnew = ((i % HAMMER_BLOCKMAP_RADIX1_PERBUFFER) == 0);
472 layer1 = get_buffer_data(layer1_offset + i * sizeof(*layer1),
474 bzero(layer1, sizeof(*layer1));
475 layer1->phys_offset = HAMMER_BLOCKMAP_UNAVAIL;
476 layer1->blocks_free = 0;
477 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
481 blockmap = &root_vol->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
482 blockmap->phys_offset = layer1_offset;
483 blockmap->alloc_offset = HAMMER_ENCODE_RAW_BUFFER(255, -1);
484 blockmap->next_offset = HAMMER_ENCODE_RAW_BUFFER(0, 0);
485 blockmap->reserved01 = 0;
486 blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
487 root_vol->cache.modified = 1;
491 * Load the volume's remaining free space into the freemap.
493 * Returns the number of bigblocks available.
496 initialize_freemap(struct volume_info *vol)
498 struct volume_info *root_vol;
499 struct buffer_info *buffer1 = NULL;
500 struct buffer_info *buffer2 = NULL;
501 struct hammer_blockmap_layer1 *layer1;
502 struct hammer_blockmap_layer2 *layer2;
503 hammer_off_t layer1_base;
504 hammer_off_t layer1_offset;
505 hammer_off_t layer2_offset;
506 hammer_off_t phys_offset;
507 hammer_off_t aligned_vol_free_end;
511 root_vol = get_volume(RootVolNo);
512 aligned_vol_free_end = (vol->vol_free_end + HAMMER_BLOCKMAP_LAYER2_MASK)
513 & ~HAMMER_BLOCKMAP_LAYER2_MASK;
515 printf("initialize freemap volume %d\n", vol->vol_no);
518 * Initialize the freemap. First preallocate the bigblocks required
519 * to implement layer2. This preallocation is a bootstrap allocation
520 * using blocks from the target volume.
522 layer1_base = root_vol->ondisk->vol0_blockmap[
523 HAMMER_ZONE_FREEMAP_INDEX].phys_offset;
524 for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
525 phys_offset < aligned_vol_free_end;
526 phys_offset += HAMMER_BLOCKMAP_LAYER2) {
527 layer1_offset = layer1_base +
528 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
529 layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
530 if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
531 layer1->phys_offset = alloc_bigblock(vol,
532 HAMMER_ZONE_FREEMAP_INDEX);
533 layer1->blocks_free = 0;
534 buffer1->cache.modified = 1;
535 layer1->layer1_crc = crc32(layer1,
536 HAMMER_LAYER1_CRCSIZE);
541 * Now fill everything in.
543 for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
544 phys_offset < aligned_vol_free_end;
545 phys_offset += HAMMER_BIGBLOCK_SIZE) {
547 layer1_offset = layer1_base +
548 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
549 layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
551 assert(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
552 layer2_offset = layer1->phys_offset +
553 HAMMER_BLOCKMAP_LAYER2_OFFSET(phys_offset);
555 layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
556 bzero(layer2, sizeof(*layer2));
557 if (phys_offset < vol->vol_free_off) {
559 * Fixups XXX - bigblocks already allocated as part
560 * of the freemap bootstrap.
562 if (layer2->zone == 0) {
563 layer2->zone = HAMMER_ZONE_FREEMAP_INDEX;
564 layer2->append_off = HAMMER_BIGBLOCK_SIZE;
565 layer2->bytes_free = 0;
567 } else if (phys_offset < vol->vol_free_end) {
568 ++layer1->blocks_free;
569 buffer1->cache.modified = 1;
571 layer2->append_off = 0;
572 layer2->bytes_free = HAMMER_BIGBLOCK_SIZE;
576 layer2->zone = HAMMER_ZONE_UNAVAIL_INDEX;
577 layer2->append_off = HAMMER_BIGBLOCK_SIZE;
578 layer2->bytes_free = 0;
580 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
581 buffer2->cache.modified = 1;
587 layer1->layer1_crc = crc32(layer1,
588 HAMMER_LAYER1_CRCSIZE);
589 buffer1->cache.modified = 1;
594 rel_volume(root_vol);
599 * Allocate big-blocks using our poor-man's volume->vol_free_off.
601 * If the zone is HAMMER_ZONE_FREEMAP_INDEX we are bootstrapping the freemap
602 * itself and cannot update it yet.
605 alloc_bigblock(struct volume_info *volume, int zone)
607 struct buffer_info *buffer1 = NULL;
608 struct buffer_info *buffer2 = NULL;
609 struct volume_info *root_vol;
610 hammer_off_t result_offset;
611 hammer_off_t layer_offset;
612 struct hammer_blockmap_layer1 *layer1;
613 struct hammer_blockmap_layer2 *layer2;
616 if (volume == NULL) {
617 volume = get_volume(RootVolNo);
622 result_offset = volume->vol_free_off;
623 if (result_offset >= volume->vol_free_end)
624 panic("alloc_bigblock: Ran out of room, filesystem too small");
625 volume->vol_free_off += HAMMER_BIGBLOCK_SIZE;
628 * Update the freemap.
630 if (zone != HAMMER_ZONE_FREEMAP_INDEX) {
631 root_vol = get_volume(RootVolNo);
632 layer_offset = root_vol->ondisk->vol0_blockmap[
633 HAMMER_ZONE_FREEMAP_INDEX].phys_offset;
634 layer_offset += HAMMER_BLOCKMAP_LAYER1_OFFSET(result_offset);
635 layer1 = get_buffer_data(layer_offset, &buffer1, 0);
636 assert(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
637 --layer1->blocks_free;
638 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
639 buffer1->cache.modified = 1;
640 layer_offset = layer1->phys_offset +
641 HAMMER_BLOCKMAP_LAYER2_OFFSET(result_offset);
642 layer2 = get_buffer_data(layer_offset, &buffer2, 0);
643 assert(layer2->zone == 0);
645 layer2->append_off = HAMMER_BIGBLOCK_SIZE;
646 layer2->bytes_free = 0;
647 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
648 buffer2->cache.modified = 1;
650 --root_vol->ondisk->vol0_stat_freebigblocks;
651 root_vol->cache.modified = 1;
655 rel_volume(root_vol);
660 return(result_offset);
664 * Format the undo-map for the root volume.
667 format_undomap(hammer_volume_ondisk_t ondisk)
669 const int undo_zone = HAMMER_ZONE_UNDO_INDEX;
670 hammer_off_t undo_limit;
671 hammer_blockmap_t blockmap;
672 struct buffer_info *buffer = NULL;
679 * Size the undo buffer in multiples of HAMMER_BIGBLOCK_SIZE,
680 * up to HAMMER_UNDO_LAYER2 big blocks. Size to approximately
683 * The minimum UNDO fifo size is 500MB, or approximately 1% of
684 * the recommended 50G disk.
686 * Changing this minimum is rather dangerous as complex filesystem
687 * operations can cause the UNDO FIFO to fill up otherwise.
689 undo_limit = UndoBufferSize;
690 if (undo_limit == 0) {
691 undo_limit = (ondisk->vol_buf_end - ondisk->vol_buf_beg) / 1000;
692 if (undo_limit < 500*1024*1024)
693 undo_limit = 500*1024*1024;
695 undo_limit = (undo_limit + HAMMER_BIGBLOCK_MASK64) &
696 ~HAMMER_BIGBLOCK_MASK64;
697 if (undo_limit < HAMMER_BIGBLOCK_SIZE)
698 undo_limit = HAMMER_BIGBLOCK_SIZE;
699 if (undo_limit > HAMMER_BIGBLOCK_SIZE * HAMMER_UNDO_LAYER2)
700 undo_limit = HAMMER_BIGBLOCK_SIZE * HAMMER_UNDO_LAYER2;
701 UndoBufferSize = undo_limit;
703 blockmap = &ondisk->vol0_blockmap[undo_zone];
704 bzero(blockmap, sizeof(*blockmap));
705 blockmap->phys_offset = HAMMER_BLOCKMAP_UNAVAIL;
706 blockmap->first_offset = HAMMER_ZONE_ENCODE(undo_zone, 0);
707 blockmap->next_offset = blockmap->first_offset;
708 blockmap->alloc_offset = HAMMER_ZONE_ENCODE(undo_zone, undo_limit);
709 blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
712 scan = blockmap->next_offset;
713 limit_index = undo_limit / HAMMER_BIGBLOCK_SIZE;
715 assert(limit_index <= HAMMER_UNDO_LAYER2);
717 for (n = 0; n < limit_index; ++n) {
718 ondisk->vol0_undo_array[n] = alloc_bigblock(NULL,
719 HAMMER_ZONE_UNDO_INDEX);
720 scan += HAMMER_BIGBLOCK_SIZE;
722 while (n < HAMMER_UNDO_LAYER2) {
723 ondisk->vol0_undo_array[n] = HAMMER_BLOCKMAP_UNAVAIL;
728 * Pre-initialize the UNDO blocks (HAMMER version 4+)
730 printf("initializing the undo map (%jd MB)\n",
731 (intmax_t)(blockmap->alloc_offset & HAMMER_OFF_LONG_MASK) /
734 scan = blockmap->first_offset;
737 while (scan < blockmap->alloc_offset) {
738 hammer_fifo_head_t head;
739 hammer_fifo_tail_t tail;
741 int bytes = HAMMER_UNDO_ALIGN;
743 isnew = ((scan & HAMMER_BUFMASK64) == 0);
744 head = get_buffer_data(scan, &buffer, isnew);
745 buffer->cache.modified = 1;
746 tail = (void *)((char *)head + bytes - sizeof(*tail));
749 head->hdr_signature = HAMMER_HEAD_SIGNATURE;
750 head->hdr_type = HAMMER_HEAD_TYPE_DUMMY;
751 head->hdr_size = bytes;
752 head->hdr_seq = seqno++;
754 tail->tail_signature = HAMMER_TAIL_SIGNATURE;
755 tail->tail_type = HAMMER_HEAD_TYPE_DUMMY;
756 tail->tail_size = bytes;
758 head->hdr_crc = crc32(head, HAMMER_FIFO_HEAD_CRCOFF) ^
759 crc32(head + 1, bytes - sizeof(*head));
768 * Format a new blockmap. This is mostly a degenerate case because
769 * all allocations are now actually done from the freemap.
772 format_blockmap(hammer_blockmap_t blockmap, hammer_off_t zone_base)
774 blockmap->phys_offset = 0;
775 blockmap->alloc_offset = zone_base | HAMMER_VOL_ENCODE(255) |
776 HAMMER_SHORT_OFF_ENCODE(-1);
777 blockmap->first_offset = zone_base;
778 blockmap->next_offset = zone_base;
779 blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
783 * Allocate a chunk of data out of a blockmap. This is a simplified
784 * version which uses next_offset as a simple allocation iterator.
788 alloc_blockmap(int zone, int bytes, hammer_off_t *result_offp,
789 struct buffer_info **bufferp)
791 struct buffer_info *buffer1 = NULL;
792 struct buffer_info *buffer2 = NULL;
793 struct volume_info *volume;
794 hammer_blockmap_t blockmap;
795 hammer_blockmap_t freemap;
796 struct hammer_blockmap_layer1 *layer1;
797 struct hammer_blockmap_layer2 *layer2;
798 hammer_off_t layer1_offset;
799 hammer_off_t layer2_offset;
800 hammer_off_t zone2_offset;
803 volume = get_volume(RootVolNo);
805 blockmap = &volume->ondisk->vol0_blockmap[zone];
806 freemap = &volume->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
809 * Alignment and buffer-boundary issues. If the allocation would
810 * cross a buffer boundary we have to skip to the next buffer.
812 bytes = (bytes + 15) & ~15;
815 if ((blockmap->next_offset ^ (blockmap->next_offset + bytes - 1)) &
817 volume->cache.modified = 1;
818 blockmap->next_offset = (blockmap->next_offset + bytes) &
823 * Dive layer 1. For now we can't allocate data outside of volume 0.
825 layer1_offset = freemap->phys_offset +
826 HAMMER_BLOCKMAP_LAYER1_OFFSET(blockmap->next_offset);
828 layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
830 if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
831 fprintf(stderr, "alloc_blockmap: ran out of space!\n");
838 layer2_offset = layer1->phys_offset +
839 HAMMER_BLOCKMAP_LAYER2_OFFSET(blockmap->next_offset);
841 layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
843 if (layer2->zone == HAMMER_ZONE_UNAVAIL_INDEX) {
844 fprintf(stderr, "alloc_blockmap: ran out of space!\n");
849 * If we are entering a new bigblock assign ownership to our
850 * zone. If the bigblock is owned by another zone skip it.
852 if (layer2->zone == 0) {
853 --layer1->blocks_free;
855 assert(layer2->bytes_free == HAMMER_BIGBLOCK_SIZE);
856 assert(layer2->append_off == 0);
858 if (layer2->zone != zone) {
859 blockmap->next_offset = (blockmap->next_offset + HAMMER_BIGBLOCK_SIZE) &
860 ~HAMMER_BIGBLOCK_MASK64;
864 buffer1->cache.modified = 1;
865 buffer2->cache.modified = 1;
866 volume->cache.modified = 1;
867 assert(layer2->append_off ==
868 (blockmap->next_offset & HAMMER_BIGBLOCK_MASK));
869 layer2->bytes_free -= bytes;
870 *result_offp = blockmap->next_offset;
871 blockmap->next_offset += bytes;
872 layer2->append_off = (int)blockmap->next_offset &
873 HAMMER_BIGBLOCK_MASK;
875 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
876 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
878 zone2_offset = (*result_offp & ~HAMMER_OFF_ZONE_MASK) |
879 HAMMER_ZONE_ENCODE(zone, 0);
881 ptr = get_buffer_data(zone2_offset, bufferp, 0);
882 (*bufferp)->cache.modified = 1;
894 * Flush various tracking structures to disk
898 * Flush various tracking structures to disk
901 flush_all_volumes(void)
903 struct volume_info *vol;
905 TAILQ_FOREACH(vol, &VolList, entry)
910 flush_volume(struct volume_info *volume)
912 struct buffer_info *buffer;
915 for (i = 0; i < HAMMER_BUFLISTS; ++i) {
916 TAILQ_FOREACH(buffer, &volume->buffer_lists[i], entry)
917 flush_buffer(buffer);
919 writehammerbuf(volume, volume->ondisk, 0);
920 volume->cache.modified = 0;
924 flush_buffer(struct buffer_info *buffer)
926 writehammerbuf(buffer->volume, buffer->ondisk, buffer->raw_offset);
927 buffer->cache.modified = 0;
932 * Generic buffer initialization
935 init_fifo_head(hammer_fifo_head_t head, u_int16_t hdr_type)
937 head->hdr_signature = HAMMER_HEAD_SIGNATURE;
938 head->hdr_type = hdr_type;
948 * Core I/O operations
951 readhammerbuf(struct volume_info *vol, void *data, int64_t offset)
955 n = pread(vol->fd, data, HAMMER_BUFSIZE, offset);
956 if (n != HAMMER_BUFSIZE)
957 err(1, "Read volume %d (%s)", vol->vol_no, vol->name);
963 writehammerbuf(struct volume_info *vol, const void *data, int64_t offset)
967 n = pwrite(vol->fd, data, HAMMER_BUFSIZE, offset);
968 if (n != HAMMER_BUFSIZE)
969 err(1, "Write volume %d (%s)", vol->vol_no, vol->name);
973 panic(const char *ctl, ...)
978 vfprintf(stderr, ctl, va);
980 fprintf(stderr, "\n");