2 * Copyright (c) 2010 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40 struct recover_dict *next;
41 struct recover_dict *parent;
50 #define DICTF_MADEDIR 0x01
51 #define DICTF_MADEFILE 0x02
52 #define DICTF_PARENT 0x04 /* parent attached for real */
53 #define DICTF_TRAVERSED 0x80
55 typedef struct bigblock *bigblock_t;
57 static void recover_top(char *ptr, hammer_off_t offset);
58 static void recover_elm(hammer_btree_leaf_elm_t leaf);
59 static struct recover_dict *get_dict(int64_t obj_id, uint16_t pfs_id);
60 static char *recover_path(struct recover_dict *dict);
61 static void sanitize_string(char *str);
62 static hammer_off_t scan_raw_limit(void);
63 static void scan_bigblocks(int target_zone);
64 static void free_bigblocks(void);
65 static void add_bigblock_entry(hammer_off_t offset,
66 hammer_blockmap_layer1_t layer1, hammer_blockmap_layer2_t layer2);
67 static bigblock_t get_bigblock_entry(hammer_off_t offset);
69 static const char *TargetDir;
70 static int CachedFd = -1;
71 static char *CachedPath;
73 typedef struct bigblock {
74 RB_ENTRY(bigblock) entry;
75 hammer_off_t phys_offset; /* zone-2 */
76 struct hammer_blockmap_layer1 layer1;
77 struct hammer_blockmap_layer2 layer2;
81 bigblock_cmp(bigblock_t b1, bigblock_t b2)
83 if (b1->phys_offset < b2->phys_offset)
85 if (b1->phys_offset > b2->phys_offset)
90 RB_HEAD(bigblock_rb_tree, bigblock) ZoneTree = RB_INITIALIZER(&ZoneTree);
91 RB_PROTOTYPE2(bigblock_rb_tree, bigblock, entry, bigblock_cmp, hammer_off_t);
92 RB_GENERATE2(bigblock_rb_tree, bigblock, entry, bigblock_cmp, hammer_off_t,
96 * There was a hidden bug here while iterating zone-2 offset as
97 * shown in an example below.
99 * If a volume was once used as HAMMER filesystem which consists of
100 * multiple volumes whose usage has reached beyond the first volume,
101 * and then later re-formatted only using 1 volume, hammer recover is
102 * likely to hit assertion in get_buffer() due to having access to
103 * invalid volume (vol1,2,...) from old filesystem data.
105 * To avoid this, now the command only scans upto the last big-block
106 * that's actually used for filesystem data or meta-data at the moment,
107 * if all layer1/2 entries have correct CRC values. This also avoids
108 * recovery of irrelevant files from old filesystem.
110 * It also doesn't scan beyond append offset of big-blocks in B-Tree
111 * zone to avoid recovery of irrelevant files from old filesystem,
112 * if layer1/2 entries for those big-blocks have correct CRC values.
114 * |-----vol0-----|-----vol1-----|-----vol2-----| old filesystem
115 * <-----------------------> used by old filesystem
117 * |-----vol0-----| new filesystem
118 * <-----> used by new filesystem
119 * <-------> unused, invalid data from old filesystem
120 * <-> B-Tree nodes likely to point to vol1
124 hammer_cmd_recover(char **av, int ac)
126 buffer_info_t data_buffer;
127 volume_info_t volume;
130 hammer_off_t off_end;
131 hammer_off_t off_blk;
132 hammer_off_t raw_limit = 0;
133 hammer_off_t zone_limit = 0;
136 int target_zone = HAMMER_ZONE_BTREE_INDEX;
141 errx(1, "hammer recover <target_dir> [full|quick]");
147 if (!strcmp(av[1], "full"))
149 if (!strcmp(av[1], "quick"))
152 assert(!full || !quick);
154 if (mkdir(TargetDir, 0777) == -1) {
155 if (errno != EEXIST) {
161 printf("Running %sraw scan of HAMMER image, recovering to %s\n",
162 full ? "full " : quick ? "quick " : "",
166 scan_bigblocks(target_zone);
167 raw_limit = scan_raw_limit();
169 raw_limit += HAMMER_BIGBLOCK_SIZE;
170 assert(hammer_is_zone_raw_buffer(raw_limit));
176 if (!RB_EMPTY(&ZoneTree)) {
177 printf("Found zone-%d big-blocks at\n", target_zone);
178 RB_FOREACH(b, bigblock_rb_tree, &ZoneTree)
179 printf("%016jx\n", b->phys_offset);
181 b = RB_MAX(bigblock_rb_tree, &ZoneTree);
182 zone_limit = b->phys_offset + HAMMER_BIGBLOCK_SIZE;
183 assert(hammer_is_zone_raw_buffer(zone_limit));
187 if (raw_limit || zone_limit) {
188 #define _fmt "Scanning zone-%d big-blocks till %016jx"
189 if (!raw_limit) /* unlikely */
190 printf(_fmt" ???", target_zone, zone_limit);
191 else if (!zone_limit)
192 printf(_fmt, HAMMER_ZONE_RAW_BUFFER_INDEX, raw_limit);
193 else if (raw_limit >= zone_limit)
194 printf(_fmt, target_zone, zone_limit);
196 printf(_fmt" ???", HAMMER_ZONE_RAW_BUFFER_INDEX, raw_limit);
201 for (i = 0; i < HAMMER_MAX_VOLUMES; i++) {
202 volume = get_volume(i);
206 printf("Scanning volume %d size %s\n",
207 volume->vol_no, sizetostr(volume->size));
208 off = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no, 0);
209 off_end = off + HAMMER_VOL_BUF_SIZE(volume->ondisk);
211 while (off < off_end) {
212 off_blk = off & HAMMER_BIGBLOCK_MASK64;
214 b = get_bigblock_entry(off);
217 if (off >= raw_limit) {
218 printf("Done %016jx\n", (uintmax_t)off);
223 if (off >= zone_limit) {
224 printf("Done %016jx\n", (uintmax_t)off);
228 off = HAMMER_ZONE_LAYER2_NEXT_OFFSET(off);
234 if (hammer_crc_test_layer1(HammerVersion,
236 hammer_crc_test_layer2(HammerVersion,
238 off_blk >= b->layer2.append_off) {
239 off = HAMMER_ZONE_LAYER2_NEXT_OFFSET(off);
244 ptr = get_buffer_data(off, &data_buffer, 0);
246 recover_top(ptr, off);
247 off += HAMMER_BUFSIZE;
251 rel_buffer(data_buffer);
264 print_node(hammer_node_ondisk_t node, hammer_off_t offset)
266 char buf[HAMMER_BTREE_LEAF_ELMS + 1];
267 int maxcount = hammer_node_max_elements(node->type);
270 for (i = 0; i < node->count && i < maxcount; ++i)
271 buf[i] = hammer_elm_btype(&node->elms[i]);
274 printf("%016jx %c %d %s\n", offset, node->type, node->count, buf);
278 * Top level recovery processor. Assume the data is a B-Tree node.
279 * If the CRC is good we attempt to process the node, building the
280 * object space and creating the dictionary as we go.
284 recover_top(char *ptr, hammer_off_t offset)
286 hammer_node_ondisk_t node;
287 hammer_btree_elm_t elm;
292 for (node = (void *)ptr; (char *)node < ptr + HAMMER_BUFSIZE; ++node) {
293 isnode = hammer_crc_test_btree(HammerVersion, node);
294 maxcount = hammer_node_max_elements(node->type);
298 print_node(node, offset);
299 else if (DebugOpt > 1)
300 printf("%016jx -\n", offset);
302 offset += sizeof(*node);
304 if (isnode && node->type == HAMMER_BTREE_TYPE_LEAF) {
305 for (i = 0; i < node->count && i < maxcount; ++i) {
306 elm = &node->elms[i];
307 if (elm->base.btype == HAMMER_BTREE_TYPE_RECORD)
308 recover_elm(&elm->leaf);
316 recover_elm(hammer_btree_leaf_elm_t leaf)
318 buffer_info_t data_buffer = NULL;
319 struct recover_dict *dict;
320 struct recover_dict *dict2;
321 hammer_data_ondisk_t ondisk;
322 hammer_off_t data_offset;
336 * Ignore deleted records
342 * If we're running full scan, it's possible that data_offset
343 * refers to old filesystem data that we can't physically access.
345 data_offset = leaf->data_offset;
346 if (get_volume(HAMMER_VOL_DECODE(data_offset)) == NULL)
349 if (data_offset != 0)
350 ondisk = get_buffer_data(data_offset, &data_buffer, 0);
356 len = leaf->data_len;
357 chunk = HAMMER_BUFSIZE - ((int)data_offset & HAMMER_BUFMASK);
361 if (len < 0 || len > HAMMER_XBUFSIZE || len > chunk)
364 pfs_id = lo_to_pfs(leaf->base.localization);
367 * Note that meaning of leaf->base.obj_id differs depending
368 * on record type. For a direntry, leaf->base.obj_id points
369 * to its parent inode that this entry is a part of, but not
370 * its corresponding inode.
372 dict = get_dict(leaf->base.obj_id, pfs_id);
374 switch(leaf->base.rec_type) {
375 case HAMMER_RECTYPE_INODE:
377 * We found an inode which also tells us where the file
378 * or directory is in the directory hierarchy.
381 printf("inode %016jx:%05d found\n",
382 (uintmax_t)leaf->base.obj_id, pfs_id);
384 path1 = recover_path(dict);
387 * Attach the inode to its parent. This isn't strictly
388 * necessary because the information is also in the
389 * directory entries, but if we do not find the directory
390 * entry this ensures that the files will still be
391 * reasonably well organized in their proper directories.
393 if ((dict->flags & DICTF_PARENT) == 0 &&
394 dict->obj_id != HAMMER_OBJID_ROOT &&
395 ondisk->inode.parent_obj_id != 0) {
396 dict->flags |= DICTF_PARENT;
397 dict->parent = get_dict(ondisk->inode.parent_obj_id,
400 (dict->parent->flags & DICTF_MADEDIR) == 0) {
401 dict->parent->flags |= DICTF_MADEDIR;
402 path2 = recover_path(dict->parent);
403 printf("mkdir %s\n", path2);
409 if (dict->obj_type == 0)
410 dict->obj_type = ondisk->inode.obj_type;
411 dict->size = ondisk->inode.size;
412 path2 = recover_path(dict);
414 if (lstat(path1, &st) == 0) {
415 if (ondisk->inode.obj_type == HAMMER_OBJTYPE_REGFILE) {
416 truncate(path1, dict->size);
417 /* chmod(path1, 0666); */
419 if (strcmp(path1, path2)) {
420 printf("Rename (inode) %s -> %s\n", path1, path2);
421 rename(path1, path2);
423 } else if (ondisk->inode.obj_type == HAMMER_OBJTYPE_REGFILE) {
424 printf("mkinode (file) %s\n", path2);
425 fd = open(path2, O_RDWR|O_CREAT, 0666);
428 } else if (ondisk->inode.obj_type == HAMMER_OBJTYPE_DIRECTORY) {
429 printf("mkinode (dir) %s\n", path2);
431 dict->flags |= DICTF_MADEDIR;
436 case HAMMER_RECTYPE_DATA:
440 if (leaf->base.obj_id == 0)
443 printf("inode %016jx:%05d data %016jx,%d\n",
444 (uintmax_t)leaf->base.obj_id,
446 (uintmax_t)leaf->base.key - len,
451 * Update the dictionary entry
453 if (dict->obj_type == 0)
454 dict->obj_type = HAMMER_OBJTYPE_REGFILE;
457 * If the parent directory has not been created we
458 * have to create it (typically a PFS%05d)
461 (dict->parent->flags & DICTF_MADEDIR) == 0) {
462 dict->parent->flags |= DICTF_MADEDIR;
463 path2 = recover_path(dict->parent);
464 printf("mkdir %s\n", path2);
471 * Create the file if necessary, report file creations
473 path1 = recover_path(dict);
474 if (CachedPath && strcmp(CachedPath, path1) == 0)
477 fd = open(path1, O_CREAT|O_RDWR, 0666);
479 printf("Unable to create %s: %s\n",
480 path1, strerror(errno));
484 if ((dict->flags & DICTF_MADEFILE) == 0) {
485 dict->flags |= DICTF_MADEFILE;
486 printf("mkfile %s\n", path1);
490 * And write the record. A HAMMER data block is aligned
491 * and may contain trailing zeros after the file EOF. The
492 * inode record is required to get the actual file size.
494 * However, when the inode record is not available
495 * we can do a sparse write and that will get it right
496 * most of the time even if the inode record is never
499 file_offset = (int64_t)leaf->base.key - len;
500 lseek(fd, (off_t)file_offset, SEEK_SET);
502 if (dict->size == -1) {
503 for (zfill = chunk - 1; zfill >= 0; --zfill) {
504 if (((char *)ondisk)[zfill])
513 write(fd, ondisk, zfill);
515 lseek(fd, chunk - zfill, SEEK_CUR);
518 data_offset += chunk;
519 file_offset += chunk;
520 ondisk = get_buffer_data(data_offset, &data_buffer, 0);
523 chunk = HAMMER_BUFSIZE -
524 ((int)data_offset & HAMMER_BUFMASK);
528 if (dict->size >= 0 && file_offset > dict->size) {
529 ftruncate(fd, dict->size);
530 /* fchmod(fd, 0666); */
533 if (fd == CachedFd) {
535 } else if (CachedPath) {
545 case HAMMER_RECTYPE_DIRENTRY:
546 nlen = len - HAMMER_ENTRY_NAME_OFF;
547 if ((int)nlen < 0) /* illegal length */
549 if (ondisk->entry.obj_id == 0 ||
550 ondisk->entry.obj_id == HAMMER_OBJID_ROOT) {
553 name = malloc(nlen + 1);
554 bcopy(ondisk->entry.name, name, nlen);
556 sanitize_string(name);
559 printf("dir %016jx:%05d entry %016jx \"%s\"\n",
560 (uintmax_t)leaf->base.obj_id,
562 (uintmax_t)ondisk->entry.obj_id,
567 * We can't deal with hardlinks so if the object already
568 * has a name assigned to it we just keep using that name.
570 dict2 = get_dict(ondisk->entry.obj_id, pfs_id);
571 path1 = recover_path(dict2);
573 if (dict2->name == NULL)
579 * Attach dict2 to its directory (dict), create the
580 * directory (dict) if necessary. We must ensure
581 * that the directory entry exists in order to be
582 * able to properly rename() the file without creating
583 * a namespace conflict.
585 if ((dict2->flags & DICTF_PARENT) == 0) {
586 dict2->flags |= DICTF_PARENT;
587 dict2->parent = dict;
588 if ((dict->flags & DICTF_MADEDIR) == 0) {
589 dict->flags |= DICTF_MADEDIR;
590 path2 = recover_path(dict);
591 printf("mkdir %s\n", path2);
597 path2 = recover_path(dict2);
598 if (strcmp(path1, path2) != 0 && lstat(path1, &st) == 0) {
599 printf("Rename (entry) %s -> %s\n", path1, path2);
600 rename(path1, path2);
607 * Ignore any other record types
612 rel_buffer(data_buffer);
615 #define RD_HSIZE 32768
616 #define RD_HMASK (RD_HSIZE - 1)
618 struct recover_dict *RDHash[RD_HSIZE];
621 struct recover_dict *
622 get_dict(int64_t obj_id, uint16_t pfs_id)
624 struct recover_dict *dict;
630 i = crc32(&obj_id, sizeof(obj_id)) & RD_HMASK;
631 for (dict = RDHash[i]; dict; dict = dict->next) {
632 if (dict->obj_id == obj_id && dict->pfs_id == pfs_id)
637 dict = malloc(sizeof(*dict));
638 bzero(dict, sizeof(*dict));
639 dict->obj_id = obj_id;
640 dict->pfs_id = pfs_id;
641 dict->next = RDHash[i];
646 * Always connect dangling dictionary entries to object 1
647 * (the root of the PFS).
649 * DICTF_PARENT will not be set until we know what the
650 * real parent directory object is.
652 if (dict->obj_id != HAMMER_OBJID_ROOT)
653 dict->parent = get_dict(HAMMER_OBJID_ROOT, pfs_id);
659 enum { PI_FIGURE, PI_LOAD } state;
666 static void recover_path_helper(struct recover_dict *, struct path_info *);
670 recover_path(struct recover_dict *dict)
672 struct path_info info;
674 /* Find info.len first */
675 bzero(&info, sizeof(info));
676 info.state = PI_FIGURE;
677 recover_path_helper(dict, &info);
679 /* Fill in the path */
680 info.pfs_id = dict->pfs_id;
681 info.base = malloc(info.len);
682 info.next = info.base;
683 info.state = PI_LOAD;
684 recover_path_helper(dict, &info);
686 /* Return the path */
690 #define STRLEN_OBJID 22 /* "obj_0x%016jx" */
691 #define STRLEN_PFSID 8 /* "PFS%05d" */
695 recover_path_helper(struct recover_dict *dict, struct path_info *info)
698 * Calculate path element length
700 dict->flags |= DICTF_TRAVERSED;
702 switch(info->state) {
704 if (dict->obj_id == HAMMER_OBJID_ROOT)
705 info->len += STRLEN_PFSID;
707 info->len += strlen(dict->name);
709 info->len += STRLEN_OBJID;
713 (dict->parent->flags & DICTF_TRAVERSED) == 0) {
714 recover_path_helper(dict->parent, info);
716 info->len += strlen(TargetDir) + 1;
721 (dict->parent->flags & DICTF_TRAVERSED) == 0) {
722 recover_path_helper(dict->parent, info);
724 strcpy(info->next, TargetDir);
725 info->next += strlen(info->next);
729 if (dict->obj_id == HAMMER_OBJID_ROOT) {
730 snprintf(info->next, STRLEN_PFSID + 1,
731 "PFS%05d", info->pfs_id);
732 } else if (dict->name) {
733 strcpy(info->next, dict->name);
735 snprintf(info->next, STRLEN_OBJID + 1,
736 "obj_0x%016jx", (uintmax_t)dict->obj_id);
738 info->next += strlen(info->next);
741 dict->flags &= ~DICTF_TRAVERSED;
746 sanitize_string(char *str)
759 volume_info_t volume;
760 hammer_blockmap_t rootmap;
761 hammer_blockmap_layer1_t layer1;
762 hammer_blockmap_layer2_t layer2;
763 buffer_info_t buffer1 = NULL;
764 buffer_info_t buffer2 = NULL;
765 hammer_off_t layer1_offset;
766 hammer_off_t layer2_offset;
767 hammer_off_t phys_offset;
768 hammer_off_t block_offset;
769 hammer_off_t offset = 0;
770 int zone = HAMMER_ZONE_FREEMAP_INDEX;
772 volume = get_root_volume();
773 rootmap = &volume->ondisk->vol0_blockmap[zone];
774 assert(rootmap->phys_offset != 0);
776 for (phys_offset = HAMMER_ZONE_ENCODE(zone, 0);
777 phys_offset < HAMMER_ZONE_ENCODE(zone, HAMMER_OFF_LONG_MASK);
778 phys_offset += HAMMER_BLOCKMAP_LAYER2) {
782 layer1_offset = rootmap->phys_offset +
783 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
784 layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
786 if (!hammer_crc_test_layer1(HammerVersion, layer1)) {
787 offset = 0; /* failed */
790 if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL)
793 for (block_offset = 0;
794 block_offset < HAMMER_BLOCKMAP_LAYER2;
795 block_offset += HAMMER_BIGBLOCK_SIZE) {
797 * Dive layer 2, each entry represents a big-block.
799 layer2_offset = layer1->phys_offset +
800 HAMMER_BLOCKMAP_LAYER2_OFFSET(block_offset);
801 layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
803 if (!hammer_crc_test_layer2(HammerVersion, layer2)) {
804 offset = 0; /* failed */
807 if (layer2->zone == HAMMER_ZONE_UNAVAIL_INDEX) {
809 } else if (layer2->zone && layer2->zone != zone) {
810 offset = phys_offset + block_offset;
818 return(hammer_xlate_to_zone2(offset));
823 scan_bigblocks(int target_zone)
825 volume_info_t volume;
826 hammer_blockmap_t rootmap;
827 hammer_blockmap_layer1_t layer1;
828 hammer_blockmap_layer2_t layer2;
829 buffer_info_t buffer1 = NULL;
830 buffer_info_t buffer2 = NULL;
831 hammer_off_t layer1_offset;
832 hammer_off_t layer2_offset;
833 hammer_off_t phys_offset;
834 hammer_off_t block_offset;
835 hammer_off_t offset = 0;
836 int zone = HAMMER_ZONE_FREEMAP_INDEX;
838 volume = get_root_volume();
839 rootmap = &volume->ondisk->vol0_blockmap[zone];
840 assert(rootmap->phys_offset != 0);
842 for (phys_offset = HAMMER_ZONE_ENCODE(zone, 0);
843 phys_offset < HAMMER_ZONE_ENCODE(zone, HAMMER_OFF_LONG_MASK);
844 phys_offset += HAMMER_BLOCKMAP_LAYER2) {
848 layer1_offset = rootmap->phys_offset +
849 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
850 layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
853 if (!hammer_crc_test_layer1(HammerVersion, layer1)) {
856 if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL)
859 for (block_offset = 0;
860 block_offset < HAMMER_BLOCKMAP_LAYER2;
861 block_offset += HAMMER_BIGBLOCK_SIZE) {
862 offset = phys_offset + block_offset;
864 * Dive layer 2, each entry represents a big-block.
866 layer2_offset = layer1->phys_offset +
867 HAMMER_BLOCKMAP_LAYER2_OFFSET(block_offset);
868 layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
871 if (!hammer_crc_test_layer2(HammerVersion, layer2)) {
874 if (layer2->zone == target_zone) {
875 add_bigblock_entry(offset, layer1, layer2);
876 } else if (layer2->zone == HAMMER_ZONE_UNAVAIL_INDEX) {
891 while ((b = RB_ROOT(&ZoneTree)) != NULL) {
892 RB_REMOVE(bigblock_rb_tree, &ZoneTree, b);
895 assert(RB_EMPTY(&ZoneTree));
900 add_bigblock_entry(hammer_off_t offset,
901 hammer_blockmap_layer1_t layer1, hammer_blockmap_layer2_t layer2)
905 b = calloc(1, sizeof(*b));
906 b->phys_offset = hammer_xlate_to_zone2(offset);
907 assert((b->phys_offset & HAMMER_BIGBLOCK_MASK64) == 0);
908 bcopy(layer1, &b->layer1, sizeof(*layer1));
909 bcopy(layer2, &b->layer2, sizeof(*layer2));
911 RB_INSERT(bigblock_rb_tree, &ZoneTree, b);
916 get_bigblock_entry(hammer_off_t offset)
920 offset = hammer_xlate_to_zone2(offset);
921 offset &= ~HAMMER_BIGBLOCK_MASK64;
923 b = RB_LOOKUP(bigblock_rb_tree, &ZoneTree, offset);