From 6ed4c8865ecf2f58625468bb0845d3884828ea1f Mon Sep 17 00:00:00 2001 From: Matthew Dillon Date: Thu, 15 Apr 2010 20:41:18 -0700 Subject: [PATCH] HAMMER Util - Add 'checkmap', adjust hammer show * Add a checkmap function which scans the B-Tree, collects byte usage, and checks it against the blockmap. This is still experimental. * Adjust hammer show to flag zone mismatches instead of asserting. --- sbin/hammer/blockmap.c | 12 ++- sbin/hammer/cmd_blockmap.c | 207 +++++++++++++++++++++++++++++++++++-- sbin/hammer/cmd_show.c | 42 ++++++-- sbin/hammer/hammer.8 | 7 ++ sbin/hammer/hammer.c | 5 + sbin/hammer/hammer.h | 1 + sbin/hammer/hammer_util.h | 3 +- sbin/hammer/ondisk.c | 2 +- 8 files changed, 255 insertions(+), 24 deletions(-) diff --git a/sbin/hammer/blockmap.c b/sbin/hammer/blockmap.c index eb71241b54..8f3ffa24ae 100644 --- a/sbin/hammer/blockmap.c +++ b/sbin/hammer/blockmap.c @@ -39,7 +39,8 @@ hammer_off_t blockmap_lookup(hammer_off_t zone_offset, struct hammer_blockmap_layer1 *save_layer1, - struct hammer_blockmap_layer2 *save_layer2) + struct hammer_blockmap_layer2 *save_layer2, + int *errorp) { struct volume_info *root_volume; hammer_blockmap_t blockmap; @@ -54,6 +55,8 @@ blockmap_lookup(hammer_off_t zone_offset, int i; zone = HAMMER_ZONE_DECODE(zone_offset); + if (errorp) + *errorp = 0; assert(zone > HAMMER_ZONE_RAW_VOLUME_INDEX); assert(zone < HAMMER_MAX_ZONES); @@ -101,7 +104,12 @@ blockmap_lookup(hammer_off_t zone_offset, if (save_layer2) *save_layer2 = *layer2; - assert(layer2->zone == zone); + if (errorp) { + if (layer2->zone != zone) + *errorp = EDOM; + } else { + assert(layer2->zone == zone); + } if (buffer) rel_buffer(buffer); diff --git a/sbin/hammer/cmd_blockmap.c b/sbin/hammer/cmd_blockmap.c index e483369fdf..9eee17c879 100644 --- a/sbin/hammer/cmd_blockmap.c +++ b/sbin/hammer/cmd_blockmap.c @@ -36,22 +36,34 @@ #include "hammer.h" +typedef struct collect { + struct collect *hnext; + hammer_off_t phys_offset; + struct hammer_blockmap_layer2 *track2; + struct hammer_blockmap_layer2 *layer2; +} *collect_t; + +#define COLLECT_HSIZE 1024 +#define COLLECT_HMASK (COLLECT_HSIZE - 1) + +collect_t CollectHash[COLLECT_HSIZE]; + static void dump_blockmap(const char *label, int zone); +static void check_btree_node(hammer_off_t node_offset, int depth); +static void collect_btree_elm(hammer_btree_elm_t elm); +static struct hammer_blockmap_layer2 *collect_get_track( + collect_t collect, hammer_off_t offset, + struct hammer_blockmap_layer2 *layer2); +static collect_t collect_get(hammer_off_t phys_offset); +static void dump_collect_table(void); +static void dump_collect(collect_t collect); void hammer_cmd_blockmap(void) { dump_blockmap("btree", HAMMER_ZONE_FREEMAP_INDEX); -#if 0 - dump_blockmap("btree", HAMMER_ZONE_BTREE_INDEX); - dump_blockmap("meta", HAMMER_ZONE_META_INDEX); - dump_blockmap("large-data", HAMMER_ZONE_LARGE_DATA_INDEX); - dump_blockmap("small-data", HAMMER_ZONE_SMALL_DATA_INDEX); -#endif } -#if 1 - static void dump_blockmap(const char *label, int zone) @@ -129,4 +141,181 @@ dump_blockmap(const char *label, int zone) rel_volume(root_volume); } -#endif +void +hammer_cmd_checkmap(void) +{ + struct volume_info *volume; + hammer_off_t node_offset; + + volume = get_volume(RootVolNo); + node_offset = volume->ondisk->vol0_btree_root; + if (QuietOpt < 3) { + printf("Volume header\trecords=%jd next_tid=%016jx\n", + (intmax_t)volume->ondisk->vol0_stat_records, + (uintmax_t)volume->ondisk->vol0_next_tid); + printf("\t\tbufoffset=%016jx\n", + (uintmax_t)volume->ondisk->vol_buf_beg); + } + rel_volume(volume); + + printf("Collecting allocation info from B-Tree: "); + fflush(stdout); + check_btree_node(node_offset, 0); + printf("done\n"); + dump_collect_table(); +} + +static void +check_btree_node(hammer_off_t node_offset, int depth) +{ + struct buffer_info *buffer = NULL; + hammer_node_ondisk_t node; + hammer_btree_elm_t elm; + int i; + char badc; + + node = get_node(node_offset, &buffer); + + if (crc32(&node->crc + 1, HAMMER_BTREE_CRCSIZE) == node->crc) + badc = ' '; + else + badc = 'B'; + + if (badc != ' ') { + printf("B NODE %016jx cnt=%02d p=%016jx " + "type=%c depth=%d", + (uintmax_t)node_offset, node->count, + (uintmax_t)node->parent, + (node->type ? node->type : '?'), depth); + printf(" mirror %016jx", (uintmax_t)node->mirror_tid); + printf(" {\n"); + } + + for (i = 0; i < node->count; ++i) { + elm = &node->elms[i]; + + switch(node->type) { + case HAMMER_BTREE_TYPE_INTERNAL: + if (elm->internal.subtree_offset) { + check_btree_node(elm->internal.subtree_offset, + depth + 1); + } + break; + case HAMMER_BTREE_TYPE_LEAF: + if (elm->leaf.data_offset) + collect_btree_elm(elm); + break; + default: + assert(0); + } + } + rel_buffer(buffer); +} + +static +void +collect_btree_elm(hammer_btree_elm_t elm) +{ + struct hammer_blockmap_layer1 layer1; + struct hammer_blockmap_layer2 layer2; + struct hammer_blockmap_layer2 *track2; + hammer_off_t offset = elm->leaf.data_offset; + collect_t collect; + int error; + + blockmap_lookup(offset, &layer1, &layer2, &error); + collect = collect_get(layer1.phys_offset); + track2 = collect_get_track(collect, offset, &layer2); + track2->bytes_free -= (elm->leaf.data_len + 15) & ~15; +} + +static +collect_t +collect_get(hammer_off_t phys_offset) +{ + int hv = crc32(&phys_offset, sizeof(phys_offset)) & COLLECT_HMASK; + collect_t collect; + + for (collect = CollectHash[hv]; collect; collect = collect->hnext) { + if (collect->phys_offset == phys_offset) + return(collect); + } + collect = calloc(sizeof(*collect), 1); + collect->track2 = malloc(HAMMER_LARGEBLOCK_SIZE); + collect->layer2 = malloc(HAMMER_LARGEBLOCK_SIZE); + collect->phys_offset = phys_offset; + collect->hnext = CollectHash[hv]; + CollectHash[hv] = collect; + bzero(collect->track2, HAMMER_LARGEBLOCK_SIZE); + bzero(collect->layer2, HAMMER_LARGEBLOCK_SIZE); + + return (collect); +} + +static +struct hammer_blockmap_layer2 * +collect_get_track(collect_t collect, hammer_off_t offset, + struct hammer_blockmap_layer2 *layer2) +{ + struct hammer_blockmap_layer2 *track2; + size_t i; + + i = HAMMER_BLOCKMAP_LAYER2_OFFSET(offset) / sizeof(*track2); + track2 = &collect->track2[i]; + if (track2->entry_crc == 0) { + collect->layer2[i] = *layer2; + track2->bytes_free = HAMMER_LARGEBLOCK_SIZE; + track2->entry_crc = 1; /* steal field to tag track load */ + } + return (track2); +} + +static +void +dump_collect_table(void) +{ + collect_t collect; + int i; + + for (i = 0; i < COLLECT_HSIZE; ++i) { + for (collect = CollectHash[i]; + collect; + collect = collect->hnext) { + dump_collect(collect); + } + } +} + +static +void +dump_collect(collect_t collect) +{ + struct hammer_blockmap_layer2 *track2; + struct hammer_blockmap_layer2 *layer2; + size_t i; + + for (i = 0; i < HAMMER_BLOCKMAP_RADIX2; ++i) { + track2 = &collect->track2[i]; + layer2 = &collect->layer2[i]; + + /* + * Currently just check bigblocks referenced by data + * or B-Tree nodes. + */ + if (track2->entry_crc == 0) + continue; + + if (track2->bytes_free != layer2->bytes_free) { + printf("BM\tblock=%016jx calc %d free, got %d\n", + (intmax_t)(collect->phys_offset + + i * HAMMER_LARGEBLOCK_SIZE), + track2->bytes_free, + layer2->bytes_free); + } else if (VerboseOpt) { + printf("\tblock=%016jx %d free (correct)\n", + (intmax_t)(collect->phys_offset + + i * HAMMER_LARGEBLOCK_SIZE), + track2->bytes_free); + } + } +} diff --git a/sbin/hammer/cmd_show.c b/sbin/hammer/cmd_show.c index 7019a2b41e..037cf29e07 100644 --- a/sbin/hammer/cmd_show.c +++ b/sbin/hammer/cmd_show.c @@ -385,21 +385,33 @@ print_bigblock_fill(hammer_off_t offset) struct hammer_blockmap_layer1 layer1; struct hammer_blockmap_layer2 layer2; int fill; - - blockmap_lookup(offset, &layer1, &layer2); - fill = layer2.bytes_free * 100 / HAMMER_LARGEBLOCK_SIZE; - fill = 100 - fill; - - printf("z%d:%lld=%d%%", - HAMMER_ZONE_DECODE(offset), - (offset & ~HAMMER_OFF_ZONE_MASK) / HAMMER_LARGEBLOCK_SIZE, - fill - ); + int error; + + blockmap_lookup(offset, &layer1, &layer2, &error); + if (error) { + printf("z%d:%lld=BADZ", + HAMMER_ZONE_DECODE(offset), + (offset & ~HAMMER_OFF_ZONE_MASK) / + HAMMER_LARGEBLOCK_SIZE + ); + } else { + fill = layer2.bytes_free * 100 / HAMMER_LARGEBLOCK_SIZE; + fill = 100 - fill; + + printf("z%d:%lld=%d%%", + HAMMER_ZONE_DECODE(offset), + (offset & ~HAMMER_OFF_ZONE_MASK) / + HAMMER_LARGEBLOCK_SIZE, + fill + ); + } } /* * Check the generic crc on a data element. Inodes record types are * special in that some of their fields are not CRCed. + * + * Also check that the zone is valid. */ static const char * @@ -410,6 +422,7 @@ check_data_crc(hammer_btree_elm_t elm) int32_t data_len; int32_t len; u_int32_t crc; + int error; char *ptr; data_offset = elm->leaf.data_offset; @@ -419,7 +432,12 @@ check_data_crc(hammer_btree_elm_t elm) return("Z"); crc = 0; + error = 0; while (data_len) { + blockmap_lookup(data_offset, NULL, NULL, &error); + if (error) + break; + ptr = get_buffer_data(data_offset, &data_buffer, 0); len = HAMMER_BUFSIZE - ((int)data_offset & HAMMER_BUFMASK); if (len > data_len) @@ -435,9 +453,11 @@ check_data_crc(hammer_btree_elm_t elm) } if (data_buffer) rel_buffer(data_buffer); + if (error) + return("BO"); /* bad offset */ if (crc == elm->leaf.data_crc) return(""); - return("B"); + return("BX"); /* bad crc */ } static diff --git a/sbin/hammer/hammer.8 b/sbin/hammer/hammer.8 index 946a9b5013..69320d4286 100644 --- a/sbin/hammer/hammer.8 +++ b/sbin/hammer/hammer.8 @@ -298,6 +298,13 @@ This means the free byte count can legally go negative. This command needs the .Fl f flag. +.\" ==== checkmap ==== +.It Cm checkmap +Check the blockmap allocation count. +.Nm +will scan the B-Tree, collect allocation information, and +construct a blockmap in-memory. It will then check that blockmap +against the on-disk blockmap. .\" ==== show ==== .It Cm show Op Ar lo Ns Cm \&: Ns Ar objid Dump the B-Tree. diff --git a/sbin/hammer/hammer.c b/sbin/hammer/hammer.c index c3e02edeb1..db5117bbb8 100644 --- a/sbin/hammer/hammer.c +++ b/sbin/hammer/hammer.c @@ -459,6 +459,11 @@ main(int ac, char **av) hammer_cmd_blockmap(); exit(0); } + if (strcmp(av[0], "checkmap") == 0) { + hammer_parsedevs(blkdevs); + hammer_cmd_checkmap(); + exit(0); + } usage(1); /* not reached */ return(0); diff --git a/sbin/hammer/hammer.h b/sbin/hammer/hammer.h index eed667cc14..50936494a4 100644 --- a/sbin/hammer/hammer.h +++ b/sbin/hammer/hammer.h @@ -82,6 +82,7 @@ void hammer_cmd_show(hammer_tid_t node_offset, u_int32_t lo, int64_t obj_id, int depth, hammer_base_elm_t left_bound, hammer_base_elm_t right_bound); void hammer_cmd_show_undo(void); +void hammer_cmd_checkmap(void); void hammer_cmd_prune(char **av, int ac); void hammer_cmd_softprune(char **av, int ac, int everything_opt); void hammer_cmd_bstats(char **av, int ac); diff --git a/sbin/hammer/hammer_util.h b/sbin/hammer/hammer_util.h index dd86a40bd8..53a741bbe5 100644 --- a/sbin/hammer/hammer_util.h +++ b/sbin/hammer/hammer_util.h @@ -129,7 +129,8 @@ void rel_buffer(struct buffer_info *buffer); hammer_off_t blockmap_lookup(hammer_off_t bmap_off, struct hammer_blockmap_layer1 *layer1, - struct hammer_blockmap_layer2 *layer2); + struct hammer_blockmap_layer2 *layer2, + int *errorp); void format_blockmap(hammer_blockmap_t blockmap, hammer_off_t zone_base); void format_undomap(hammer_volume_ondisk_t ondisk); diff --git a/sbin/hammer/ondisk.c b/sbin/hammer/ondisk.c index 9afad6ab88..2c744a32cc 100644 --- a/sbin/hammer/ondisk.c +++ b/sbin/hammer/ondisk.c @@ -203,7 +203,7 @@ get_buffer(hammer_off_t buf_offset, int isnew) zone = HAMMER_ZONE_DECODE(buf_offset); if (zone > HAMMER_ZONE_RAW_BUFFER_INDEX) { - buf_offset = blockmap_lookup(buf_offset, NULL, NULL); + buf_offset = blockmap_lookup(buf_offset, NULL, NULL, NULL); } assert((buf_offset & HAMMER_OFF_ZONE_MASK) == HAMMER_ZONE_RAW_BUFFER); vol_no = HAMMER_VOL_DECODE(buf_offset); -- 2.41.0