2 * Copyright (c) 2007 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sbin/hammer/ondisk.c,v 1.18 2008/05/12 05:13:48 dillon Exp $
37 #include <sys/types.h>
47 #include "hammer_util.h"
49 static void *alloc_blockmap(int zone, int bytes, hammer_off_t *result_offp,
50 struct buffer_info **bufferp);
51 static hammer_off_t alloc_bigblock(struct volume_info *volume,
54 static void init_fifo_head(hammer_fifo_head_t head, u_int16_t hdr_type);
55 static hammer_off_t hammer_alloc_fifo(int32_t base_bytes, int32_t ext_bytes,
56 struct buffer_info **bufp, u_int16_t hdr_type);
57 static void readhammerbuf(struct volume_info *vol, void *data,
60 static void writehammerbuf(struct volume_info *vol, const void *data,
69 int64_t UndoBufferSize;
70 int UsingSuperClusters;
73 struct volume_list VolList = TAILQ_HEAD_INITIALIZER(VolList);
77 buffer_hash(hammer_off_t buf_offset)
81 hi = (int)(buf_offset / HAMMER_BUFSIZE) & HAMMER_BUFLISTMASK;
86 * Lookup the requested information structure and related on-disk buffer.
87 * Missing structures are created.
90 setup_volume(int32_t vol_no, const char *filename, int isnew, int oflags)
92 struct volume_info *vol;
93 struct volume_info *scan;
94 struct hammer_volume_ondisk *ondisk;
98 * Allocate the volume structure
100 vol = malloc(sizeof(*vol));
101 bzero(vol, sizeof(*vol));
102 for (i = 0; i < HAMMER_BUFLISTS; ++i)
103 TAILQ_INIT(&vol->buffer_lists[i]);
104 vol->name = strdup(filename);
105 vol->fd = open(filename, oflags);
109 err(1, "setup_volume: %s: Open failed", filename);
113 * Read or initialize the volume header
115 vol->ondisk = ondisk = malloc(HAMMER_BUFSIZE);
117 bzero(ondisk, HAMMER_BUFSIZE);
119 n = pread(vol->fd, ondisk, HAMMER_BUFSIZE, 0);
120 if (n != HAMMER_BUFSIZE) {
121 err(1, "setup_volume: %s: Read failed at offset 0",
124 vol_no = ondisk->vol_no;
126 RootVolNo = ondisk->vol_rootvol;
127 } else if (RootVolNo != (int)ondisk->vol_rootvol) {
128 errx(1, "setup_volume: %s: root volume disagreement: "
130 vol->name, RootVolNo, ondisk->vol_rootvol);
133 if (bcmp(&Hammer_FSType, &ondisk->vol_fstype, sizeof(Hammer_FSType)) != 0) {
134 errx(1, "setup_volume: %s: Header does not indicate "
135 "that this is a hammer volume", vol->name);
137 if (TAILQ_EMPTY(&VolList)) {
138 Hammer_FSId = vol->ondisk->vol_fsid;
139 } else if (bcmp(&Hammer_FSId, &ondisk->vol_fsid, sizeof(Hammer_FSId)) != 0) {
140 errx(1, "setup_volume: %s: FSId does match other "
141 "volumes!", vol->name);
144 vol->vol_no = vol_no;
147 /*init_fifo_head(&ondisk->head, HAMMER_HEAD_TYPE_VOL);*/
148 vol->cache.modified = 1;
152 * Link the volume structure in
154 TAILQ_FOREACH(scan, &VolList, entry) {
155 if (scan->vol_no == vol_no) {
156 errx(1, "setup_volume %s: Duplicate volume number %d "
157 "against %s", filename, vol_no, scan->name);
160 TAILQ_INSERT_TAIL(&VolList, vol, entry);
165 get_volume(int32_t vol_no)
167 struct volume_info *vol;
169 TAILQ_FOREACH(vol, &VolList, entry) {
170 if (vol->vol_no == vol_no)
174 errx(1, "get_volume: Volume %d does not exist!", vol_no);
176 /* not added to or removed from hammer cache */
181 rel_volume(struct volume_info *volume)
183 /* not added to or removed from hammer cache */
184 --volume->cache.refs;
188 * Acquire the specified buffer.
191 get_buffer(hammer_off_t buf_offset, int isnew)
194 struct buffer_info *buf;
195 struct volume_info *volume;
196 hammer_off_t orig_offset = buf_offset;
201 zone = HAMMER_ZONE_DECODE(buf_offset);
202 if (zone > HAMMER_ZONE_RAW_BUFFER_INDEX) {
203 buf_offset = blockmap_lookup(buf_offset, NULL, NULL);
205 assert((buf_offset & HAMMER_OFF_ZONE_MASK) == HAMMER_ZONE_RAW_BUFFER);
206 vol_no = HAMMER_VOL_DECODE(buf_offset);
207 volume = get_volume(vol_no);
208 buf_offset &= ~HAMMER_BUFMASK64;
210 hi = buffer_hash(buf_offset);
212 TAILQ_FOREACH(buf, &volume->buffer_lists[hi], entry) {
213 if (buf->buf_offset == buf_offset)
217 buf = malloc(sizeof(*buf));
218 bzero(buf, sizeof(*buf));
220 fprintf(stderr, "get_buffer %016llx %016llx\n",
221 orig_offset, buf_offset);
223 buf->buf_offset = buf_offset;
224 buf->buf_disk_offset = volume->ondisk->vol_buf_beg +
225 (buf_offset & HAMMER_OFF_SHORT_MASK);
226 buf->volume = volume;
227 TAILQ_INSERT_TAIL(&volume->buffer_lists[hi], buf, entry);
228 ++volume->cache.refs;
229 buf->cache.u.buffer = buf;
230 hammer_cache_add(&buf->cache, ISBUFFER);
233 hammer_cache_flush();
234 if ((ondisk = buf->ondisk) == NULL) {
235 buf->ondisk = ondisk = malloc(HAMMER_BUFSIZE);
237 n = pread(volume->fd, ondisk, HAMMER_BUFSIZE,
238 buf->buf_disk_offset);
239 if (n != HAMMER_BUFSIZE) {
240 err(1, "get_buffer: %s:%016llx Read failed at "
242 volume->name, buf->buf_offset,
243 buf->buf_disk_offset);
248 bzero(ondisk, HAMMER_BUFSIZE);
249 buf->cache.modified = 1;
255 rel_buffer(struct buffer_info *buffer)
257 struct volume_info *volume;
260 assert(buffer->cache.refs > 0);
261 if (--buffer->cache.refs == 0) {
262 if (buffer->cache.delete) {
263 hi = buffer_hash(buffer->buf_offset);
264 volume = buffer->volume;
265 if (buffer->cache.modified)
266 flush_buffer(buffer);
267 TAILQ_REMOVE(&volume->buffer_lists[hi], buffer, entry);
268 hammer_cache_del(&buffer->cache);
269 free(buffer->ondisk);
277 get_buffer_data(hammer_off_t buf_offset, struct buffer_info **bufferp,
280 struct buffer_info *buffer;
282 if ((buffer = *bufferp) != NULL) {
284 ((buffer->buf_offset ^ buf_offset) & ~HAMMER_BUFMASK64)) {
286 buffer = *bufferp = NULL;
290 buffer = *bufferp = get_buffer(buf_offset, isnew);
291 return((char *)buffer->ondisk + ((int32_t)buf_offset & HAMMER_BUFMASK));
295 * Retrieve a pointer to a B-Tree node given a cluster offset. The underlying
296 * bufp is freed if non-NULL and a referenced buffer is loaded into it.
299 get_node(hammer_off_t node_offset, struct buffer_info **bufp)
301 struct buffer_info *buf;
305 *bufp = buf = get_buffer(node_offset, 0);
306 return((void *)((char *)buf->ondisk +
307 (int32_t)(node_offset & HAMMER_BUFMASK)));
311 * Allocate HAMMER elements - btree nodes, data storage, and record elements
313 * NOTE: hammer_alloc_fifo() initializes the fifo header for the returned
314 * item and zero's out the remainder, so don't bzero() it.
317 alloc_btree_element(hammer_off_t *offp)
319 struct buffer_info *buffer = NULL;
320 hammer_node_ondisk_t node;
322 node = alloc_blockmap(HAMMER_ZONE_BTREE_INDEX, sizeof(*node),
324 bzero(node, sizeof(*node));
325 /* XXX buffer not released, pointer remains valid */
329 hammer_record_ondisk_t
330 alloc_record_element(hammer_off_t *offp, int32_t data_len, void **datap)
332 struct buffer_info *record_buffer = NULL;
333 struct buffer_info *data_buffer = NULL;
334 hammer_record_ondisk_t rec;
336 rec = alloc_blockmap(HAMMER_ZONE_RECORD_INDEX, sizeof(*rec),
337 offp, &record_buffer);
338 bzero(rec, sizeof(*rec));
340 if (data_len >= HAMMER_BUFSIZE) {
341 assert(data_len <= HAMMER_BUFSIZE); /* just one buffer */
342 *datap = alloc_blockmap(HAMMER_ZONE_LARGE_DATA_INDEX, data_len,
343 &rec->base.data_off, &data_buffer);
344 rec->base.data_len = data_len;
345 bzero(*datap, data_len);
346 } else if (data_len) {
347 *datap = alloc_blockmap(HAMMER_ZONE_SMALL_DATA_INDEX, data_len,
348 &rec->base.data_off, &data_buffer);
349 rec->base.data_len = data_len;
350 bzero(*datap, data_len);
354 /* XXX buf not released, ptr remains valid */
359 * Format a new freemap. Set all layer1 entries to UNAVAIL. The initialize
360 * code will load each volume's freemap.
363 format_freemap(struct volume_info *root_vol, hammer_blockmap_t blockmap)
365 struct buffer_info *buffer = NULL;
366 hammer_off_t layer1_offset;
367 struct hammer_blockmap_layer1 *layer1;
370 layer1_offset = alloc_bigblock(root_vol, 0);
371 for (i = 0; i < (int)HAMMER_BLOCKMAP_RADIX1; ++i) {
372 isnew = ((i % HAMMER_BLOCKMAP_RADIX1_PERBUFFER) == 0);
373 layer1 = get_buffer_data(layer1_offset + i * sizeof(*layer1),
375 bzero(layer1, sizeof(*layer1));
376 layer1->phys_offset = HAMMER_BLOCKMAP_UNAVAIL;
377 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
381 blockmap = &root_vol->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
382 blockmap->phys_offset = layer1_offset;
383 blockmap->alloc_offset = HAMMER_ENCODE_RAW_BUFFER(255, -1);
384 blockmap->next_offset = HAMMER_ENCODE_RAW_BUFFER(0, 0);
385 blockmap->reserved01 = 0;
386 blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
387 root_vol->cache.modified = 1;
391 * Load the volume's remaining free space into the freemap. If this is
392 * the root volume, initialize the freemap owner for the layer1 bigblock.
394 * Returns the number of bigblocks available.
397 initialize_freemap(struct volume_info *vol)
399 struct volume_info *root_vol;
400 struct buffer_info *buffer1 = NULL;
401 struct buffer_info *buffer2 = NULL;
402 struct hammer_blockmap_layer1 *layer1;
403 struct hammer_blockmap_layer2 *layer2;
404 hammer_off_t layer1_base;
405 hammer_off_t layer1_offset;
406 hammer_off_t layer2_offset;
407 hammer_off_t phys_offset;
408 hammer_off_t aligned_vol_free_end;
412 root_vol = get_volume(RootVolNo);
413 aligned_vol_free_end = (vol->vol_free_end + HAMMER_BLOCKMAP_LAYER2_MASK)
414 & ~HAMMER_BLOCKMAP_LAYER2_MASK;
416 printf("initialize freemap volume %d\n", vol->vol_no);
419 * Initialize the freemap. First preallocate the bigblocks required
420 * to implement layer2. This preallocation is a bootstrap allocation
421 * using blocks from the target volume.
423 layer1_base = root_vol->ondisk->vol0_blockmap[
424 HAMMER_ZONE_FREEMAP_INDEX].phys_offset;
425 for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
426 phys_offset < aligned_vol_free_end;
427 phys_offset += HAMMER_BLOCKMAP_LAYER2) {
428 layer1_offset = layer1_base +
429 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
430 layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
431 if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
432 layer1->phys_offset = alloc_bigblock(vol, 0);
433 layer1->blocks_free = 0;
434 buffer1->cache.modified = 1;
435 layer1->layer1_crc = crc32(layer1,
436 HAMMER_LAYER1_CRCSIZE);
441 * Now fill everything in.
443 for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
444 phys_offset < aligned_vol_free_end;
445 phys_offset += HAMMER_LARGEBLOCK_SIZE) {
447 layer1_offset = layer1_base +
448 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
449 layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
451 assert(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
452 layer2_offset = layer1->phys_offset +
453 HAMMER_BLOCKMAP_LAYER2_OFFSET(phys_offset);
455 layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
456 if (phys_offset < vol->vol_free_off) {
458 * Fixups XXX - bigblocks already allocated as part
459 * of the freemap bootstrap.
461 layer2->u.owner = HAMMER_ENCODE_FREEMAP(0, 0); /* XXX */
462 } else if (phys_offset < vol->vol_free_end) {
463 ++layer1->blocks_free;
464 buffer1->cache.modified = 1;
465 layer2->u.owner = HAMMER_BLOCKMAP_FREE;
469 layer2->u.owner = HAMMER_BLOCKMAP_UNAVAIL;
471 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
472 buffer2->cache.modified = 1;
478 layer1->layer1_crc = crc32(layer1,
479 HAMMER_LAYER1_CRCSIZE);
480 buffer1->cache.modified = 1;
485 rel_volume(root_vol);
490 * Allocate big-blocks using our poor-man's volume->vol_free_off and
491 * update the freemap if owner != 0.
494 alloc_bigblock(struct volume_info *volume, hammer_off_t owner)
496 struct buffer_info *buffer = NULL;
497 struct volume_info *root_vol;
498 hammer_off_t result_offset;
499 hammer_off_t layer_offset;
500 struct hammer_blockmap_layer1 *layer1;
501 struct hammer_blockmap_layer2 *layer2;
504 if (volume == NULL) {
505 volume = get_volume(RootVolNo);
510 result_offset = volume->vol_free_off;
511 if (result_offset >= volume->vol_free_end)
512 panic("alloc_bigblock: Ran out of room, filesystem too small");
513 volume->vol_free_off += HAMMER_LARGEBLOCK_SIZE;
519 root_vol = get_volume(RootVolNo);
520 layer_offset = root_vol->ondisk->vol0_blockmap[
521 HAMMER_ZONE_FREEMAP_INDEX].phys_offset;
522 layer_offset += HAMMER_BLOCKMAP_LAYER1_OFFSET(result_offset);
523 layer1 = get_buffer_data(layer_offset, &buffer, 0);
524 assert(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
525 --layer1->blocks_free;
526 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
527 buffer->cache.modified = 1;
528 layer_offset = layer1->phys_offset +
529 HAMMER_BLOCKMAP_LAYER2_OFFSET(result_offset);
530 layer2 = get_buffer_data(layer_offset, &buffer, 0);
531 assert(layer2->u.owner == HAMMER_BLOCKMAP_FREE);
532 layer2->u.owner = owner;
533 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
534 buffer->cache.modified = 1;
537 rel_volume(root_vol);
542 return(result_offset);
546 * Format the undo-map for the root volume.
549 format_undomap(hammer_volume_ondisk_t ondisk)
551 const int undo_zone = HAMMER_ZONE_UNDO_INDEX;
552 hammer_off_t undo_limit;
553 hammer_blockmap_t blockmap;
555 struct hammer_blockmap_layer2 *layer2;
560 * Size the undo buffer in multiples of HAMMER_LARGEBLOCK_SIZE,
561 * up to HAMMER_UNDO_LAYER2 large blocks. Size to approximately
564 undo_limit = UndoBufferSize;
566 undo_limit = (ondisk->vol_buf_end - ondisk->vol_buf_beg) / 1000;
567 undo_limit = (undo_limit + HAMMER_LARGEBLOCK_MASK64) &
568 ~HAMMER_LARGEBLOCK_MASK64;
569 if (undo_limit < HAMMER_LARGEBLOCK_SIZE)
570 undo_limit = HAMMER_LARGEBLOCK_SIZE;
571 if (undo_limit > HAMMER_LARGEBLOCK_SIZE * HAMMER_UNDO_LAYER2)
572 undo_limit = HAMMER_LARGEBLOCK_SIZE * HAMMER_UNDO_LAYER2;
573 UndoBufferSize = undo_limit;
575 blockmap = &ondisk->vol0_blockmap[undo_zone];
576 bzero(blockmap, sizeof(*blockmap));
577 blockmap->phys_offset = HAMMER_BLOCKMAP_UNAVAIL;
578 blockmap->first_offset = HAMMER_ZONE_ENCODE(undo_zone, 0);
579 blockmap->next_offset = blockmap->first_offset;
580 blockmap->alloc_offset = HAMMER_ZONE_ENCODE(undo_zone, undo_limit);
581 blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
583 layer2 = &ondisk->vol0_undo_array[0];
585 scan = blockmap->next_offset;
586 limit_index = undo_limit / HAMMER_LARGEBLOCK_SIZE;
588 assert(limit_index <= HAMMER_UNDO_LAYER2);
590 for (n = 0; n < limit_index; ++n) {
591 layer2->u.phys_offset = alloc_bigblock(NULL, scan);
592 layer2->bytes_free = -1; /* not used */
593 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
595 scan += HAMMER_LARGEBLOCK_SIZE;
598 while (n < HAMMER_UNDO_LAYER2) {
599 layer2->u.phys_offset = HAMMER_BLOCKMAP_UNAVAIL;
600 layer2->bytes_free = -1;
601 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
608 * Format a new blockmap. Set the owner to the base of the blockmap
609 * (meaning either the blockmap layer1 bigblock, layer2 bigblock, or
613 format_blockmap(hammer_blockmap_t blockmap, hammer_off_t zone_off)
615 blockmap->phys_offset = alloc_bigblock(NULL, zone_off);
616 blockmap->alloc_offset = zone_off;
617 blockmap->first_offset = zone_off;
618 blockmap->next_offset = zone_off;
619 blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
624 alloc_blockmap(int zone, int bytes, hammer_off_t *result_offp,
625 struct buffer_info **bufferp)
627 struct buffer_info *buffer1 = NULL;
628 struct buffer_info *buffer2 = NULL;
629 struct volume_info *volume;
630 hammer_blockmap_t rootmap;
631 struct hammer_blockmap_layer1 *layer1;
632 struct hammer_blockmap_layer2 *layer2;
633 hammer_off_t layer1_offset;
634 hammer_off_t layer2_offset;
635 hammer_off_t bigblock_offset;
638 volume = get_volume(RootVolNo);
640 rootmap = &volume->ondisk->vol0_blockmap[zone];
643 * Alignment and buffer-boundary issues
645 bytes = (bytes + 7) & ~7;
646 if ((rootmap->phys_offset ^ (rootmap->phys_offset + bytes - 1)) &
648 volume->cache.modified = 1;
649 rootmap->phys_offset = (rootmap->phys_offset + bytes) &
656 layer1_offset = rootmap->phys_offset +
657 HAMMER_BLOCKMAP_LAYER1_OFFSET(rootmap->alloc_offset);
659 layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
660 if ((rootmap->alloc_offset & HAMMER_BLOCKMAP_LAYER2_MASK) == 0) {
661 buffer1->cache.modified = 1;
662 bzero(layer1, sizeof(*layer1));
663 layer1->blocks_free = HAMMER_BLOCKMAP_RADIX2;
664 layer1->phys_offset = alloc_bigblock(NULL,
665 rootmap->alloc_offset);
671 layer2_offset = layer1->phys_offset +
672 HAMMER_BLOCKMAP_LAYER2_OFFSET(rootmap->alloc_offset);
674 layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
676 if ((rootmap->alloc_offset & HAMMER_LARGEBLOCK_MASK64) == 0) {
677 buffer2->cache.modified = 1;
678 bzero(layer2, sizeof(*layer2));
679 layer2->u.phys_offset = alloc_bigblock(NULL,
680 rootmap->alloc_offset);
681 layer2->bytes_free = HAMMER_LARGEBLOCK_SIZE;
682 --layer1->blocks_free;
685 buffer1->cache.modified = 1;
686 buffer2->cache.modified = 1;
687 volume->cache.modified = 1;
688 layer2->bytes_free -= bytes;
689 *result_offp = rootmap->alloc_offset;
690 rootmap->alloc_offset += bytes;
691 rootmap->next_offset = rootmap->alloc_offset;
693 bigblock_offset = layer2->u.phys_offset +
694 (*result_offp & HAMMER_LARGEBLOCK_MASK);
696 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
697 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
699 ptr = get_buffer_data(bigblock_offset, bufferp, 0);
700 (*bufferp)->cache.modified = 1;
713 * Reserve space from the FIFO. Make sure that bytes does not cross a
716 * Zero out base_bytes and initialize the fifo head and tail. The
717 * data area is not zerod.
721 hammer_alloc_fifo(int32_t base_bytes, int32_t ext_bytes,
722 struct buffer_info **bufp, u_int16_t hdr_type)
724 struct buffer_info *buf;
725 struct volume_info *volume;
726 hammer_fifo_head_t head;
727 hammer_fifo_tail_t tail;
729 int32_t aligned_bytes;
731 aligned_bytes = (base_bytes + ext_bytes + HAMMER_TAIL_ONDISK_SIZE +
732 HAMMER_HEAD_ALIGN_MASK) & ~HAMMER_HEAD_ALIGN_MASK;
734 volume = get_volume(RootVolNo);
735 off = volume->ondisk->vol0_fifo_end;
738 * For now don't deal with transitions across buffer boundaries,
739 * only newfs_hammer uses this function.
741 assert((off & ~HAMMER_BUFMASK64) ==
742 ((off + aligned_bytes) & ~HAMMER_BUFMASK));
744 *bufp = buf = get_buffer(off, 0);
746 buf->cache.modified = 1;
747 volume->cache.modified = 1;
749 head = (void *)((char *)buf->ondisk + ((int32_t)off & HAMMER_BUFMASK));
750 bzero(head, base_bytes);
752 head->hdr_signature = HAMMER_HEAD_SIGNATURE;
753 head->hdr_type = hdr_type;
754 head->hdr_size = aligned_bytes;
755 head->hdr_seq = volume->ondisk->vol0_next_seq++;
757 tail = (void*)((char *)head + aligned_bytes - HAMMER_TAIL_ONDISK_SIZE);
758 tail->tail_signature = HAMMER_TAIL_SIGNATURE;
759 tail->tail_type = hdr_type;
760 tail->tail_size = aligned_bytes;
762 volume->ondisk->vol0_fifo_end += aligned_bytes;
763 volume->cache.modified = 1;
773 * Flush various tracking structures to disk
777 * Flush various tracking structures to disk
780 flush_all_volumes(void)
782 struct volume_info *vol;
784 TAILQ_FOREACH(vol, &VolList, entry)
789 flush_volume(struct volume_info *volume)
791 struct buffer_info *buffer;
794 for (i = 0; i < HAMMER_BUFLISTS; ++i) {
795 TAILQ_FOREACH(buffer, &volume->buffer_lists[i], entry)
796 flush_buffer(buffer);
798 writehammerbuf(volume, volume->ondisk, 0);
799 volume->cache.modified = 0;
803 flush_buffer(struct buffer_info *buffer)
805 writehammerbuf(buffer->volume, buffer->ondisk, buffer->buf_disk_offset);
806 buffer->cache.modified = 0;
811 * Generic buffer initialization
814 init_fifo_head(hammer_fifo_head_t head, u_int16_t hdr_type)
816 head->hdr_signature = HAMMER_HEAD_SIGNATURE;
817 head->hdr_type = hdr_type;
827 * Core I/O operations
830 readhammerbuf(struct volume_info *vol, void *data, int64_t offset)
834 n = pread(vol->fd, data, HAMMER_BUFSIZE, offset);
835 if (n != HAMMER_BUFSIZE)
836 err(1, "Read volume %d (%s)", vol->vol_no, vol->name);
842 writehammerbuf(struct volume_info *vol, const void *data, int64_t offset)
846 n = pwrite(vol->fd, data, HAMMER_BUFSIZE, offset);
847 if (n != HAMMER_BUFSIZE)
848 err(1, "Write volume %d (%s)", vol->vol_no, vol->name);
852 panic(const char *ctl, ...)
857 vfprintf(stderr, ctl, va);
859 fprintf(stderr, "\n");