2 * Copyright (c) 2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sbin/hammer/cmd_blockmap.c,v 1.4 2008/07/19 18:48:14 dillon Exp $
39 typedef struct collect {
40 TAILQ_ENTRY(collect) entry;
41 hammer_off_t phys_offset;
42 struct hammer_blockmap_layer2 *track2;
43 struct hammer_blockmap_layer2 *layer2;
47 #define COLLECT_HSIZE 1024
48 #define COLLECT_HMASK (COLLECT_HSIZE - 1)
49 TAILQ_HEAD(collect_head, collect) CollectHash[COLLECT_HSIZE];
51 static void dump_blockmap(const char *label, int zone);
52 static void check_btree_node(hammer_off_t node_offset, int depth);
53 static void check_undo(hammer_blockmap_t rootmap);
54 static __inline void collect_btree_root(hammer_off_t node_offset);
55 static __inline void collect_btree_internal(hammer_btree_elm_t elm);
56 static __inline void collect_btree_leaf(hammer_btree_elm_t elm);
57 static __inline void collect_undo(hammer_off_t scan_offset,
58 hammer_fifo_head_t head);
59 static void collect_blockmap(hammer_off_t offset, int32_t length);
60 static struct hammer_blockmap_layer2 *collect_get_track(
61 collect_t collect, hammer_off_t offset,
62 struct hammer_blockmap_layer2 *layer2);
63 static collect_t collect_get(hammer_off_t phys_offset);
64 static void dump_collect_table(void);
65 static void dump_collect(collect_t collect, int *stats);
68 hammer_cmd_blockmap(void)
70 dump_blockmap("freemap", HAMMER_ZONE_FREEMAP_INDEX);
75 dump_blockmap(const char *label, int zone)
77 struct volume_info *root_volume;
78 hammer_blockmap_t rootmap;
79 struct hammer_blockmap_layer1 *layer1;
80 struct hammer_blockmap_layer2 *layer2;
81 struct buffer_info *buffer1 = NULL;
82 struct buffer_info *buffer2 = NULL;
83 hammer_off_t layer1_offset;
84 hammer_off_t layer2_offset;
89 assert(RootVolNo >= 0);
90 root_volume = get_volume(RootVolNo);
91 rootmap = &root_volume->ondisk->vol0_blockmap[zone];
92 assert(rootmap->phys_offset != 0);
94 printf("zone %-16s next %016jx alloc %016jx\n",
96 (uintmax_t)rootmap->next_offset,
97 (uintmax_t)rootmap->alloc_offset);
99 for (scan1 = HAMMER_ZONE_ENCODE(zone, 0);
100 scan1 < HAMMER_ZONE_ENCODE(zone, HAMMER_OFF_LONG_MASK);
101 scan1 += HAMMER_BLOCKMAP_LAYER2) {
105 layer1_offset = rootmap->phys_offset +
106 HAMMER_BLOCKMAP_LAYER1_OFFSET(scan1);
107 layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
109 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
112 layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
115 printf("%c layer1 %016jx @%016jx blocks-free %jd\n",
118 (uintmax_t)layer1->phys_offset,
119 (intmax_t)layer1->blocks_free);
120 if (layer1->phys_offset == HAMMER_BLOCKMAP_FREE)
123 scan2 < scan1 + HAMMER_BLOCKMAP_LAYER2;
124 scan2 += HAMMER_BIGBLOCK_SIZE
127 * Dive layer 2, each entry represents a big-block.
129 layer2_offset = layer1->phys_offset +
130 HAMMER_BLOCKMAP_LAYER2_OFFSET(scan2);
131 layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
133 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
135 printf("%c %016jx zone=%d app=%-7d free=%-7d\n",
147 rel_volume(root_volume);
151 hammer_cmd_checkmap(void)
153 struct volume_info *volume;
154 hammer_blockmap_t rootmap;
155 hammer_off_t node_offset;
158 volume = get_volume(RootVolNo);
159 node_offset = volume->ondisk->vol0_btree_root;
160 rootmap = &volume->ondisk->vol0_blockmap[HAMMER_ZONE_UNDO_INDEX];
163 printf("Volume header\trecords=%jd next_tid=%016jx\n",
164 (intmax_t)volume->ondisk->vol0_stat_records,
165 (uintmax_t)volume->ondisk->vol0_next_tid);
166 printf("\t\tbufoffset=%016jx\n",
167 (uintmax_t)volume->ondisk->vol_buf_beg);
168 printf("\t\tundosize=%jdMB\n",
169 (intmax_t)((rootmap->alloc_offset & HAMMER_OFF_LONG_MASK)
174 for (i = 0; i < COLLECT_HSIZE; i++)
175 TAILQ_INIT(&CollectHash[i]);
179 printf("Collecting allocation info from B-Tree: ");
181 collect_btree_root(node_offset);
182 check_btree_node(node_offset, 0);
185 printf("Collecting allocation info from UNDO: ");
190 dump_collect_table();
195 check_btree_node(hammer_off_t node_offset, int depth)
197 struct buffer_info *buffer = NULL;
198 hammer_node_ondisk_t node;
199 hammer_btree_elm_t elm;
203 node = get_node(node_offset, &buffer);
205 if (crc32(&node->crc + 1, HAMMER_BTREE_CRCSIZE) == node->crc)
211 printf("%c NODE %016jx cnt=%02d p=%016jx "
214 (uintmax_t)node_offset, node->count,
215 (uintmax_t)node->parent,
216 (node->type ? node->type : '?'), depth);
217 printf(" mirror %016jx\n", (uintmax_t)node->mirror_tid);
220 for (i = 0; i < node->count; ++i) {
221 elm = &node->elms[i];
224 case HAMMER_BTREE_TYPE_INTERNAL:
225 if (elm->internal.subtree_offset) {
226 collect_btree_internal(elm);
227 check_btree_node(elm->internal.subtree_offset,
231 case HAMMER_BTREE_TYPE_LEAF:
232 if (elm->leaf.data_offset)
233 collect_btree_leaf(elm);
241 check_undo(hammer_blockmap_t rootmap)
243 struct buffer_info *buffer = NULL;
244 hammer_off_t scan_offset;
245 hammer_fifo_head_t head;
247 scan_offset = HAMMER_ZONE_ENCODE(HAMMER_ZONE_UNDO_INDEX, 0);
248 while (scan_offset < rootmap->alloc_offset) {
249 head = get_buffer_data(scan_offset, &buffer, 0);
250 switch (head->hdr_type) {
251 case HAMMER_HEAD_TYPE_PAD:
252 case HAMMER_HEAD_TYPE_DUMMY:
253 case HAMMER_HEAD_TYPE_UNDO:
254 case HAMMER_HEAD_TYPE_REDO:
255 collect_undo(scan_offset, head);
258 if ((head->hdr_size & HAMMER_HEAD_ALIGN_MASK) ||
259 head->hdr_size == 0 ||
260 head->hdr_size > HAMMER_UNDO_ALIGN -
261 ((u_int)scan_offset & HAMMER_UNDO_MASK)) {
262 printf("Illegal size, skipping to next boundary\n");
263 scan_offset = (scan_offset + HAMMER_UNDO_MASK) &
266 scan_offset += head->hdr_size;
274 collect_btree_root(hammer_off_t node_offset)
276 collect_blockmap(node_offset,
277 sizeof(struct hammer_node_ondisk)); /* 4KB */
282 collect_btree_internal(hammer_btree_elm_t elm)
284 collect_blockmap(elm->internal.subtree_offset,
285 sizeof(struct hammer_node_ondisk)); /* 4KB */
290 collect_btree_leaf(hammer_btree_elm_t elm)
292 collect_blockmap(elm->leaf.data_offset,
293 (elm->leaf.data_len + 15) & ~15);
298 collect_undo(hammer_off_t scan_offset, hammer_fifo_head_t head)
300 collect_blockmap(scan_offset, head->hdr_size);
305 collect_blockmap(hammer_off_t offset, int32_t length)
307 struct hammer_blockmap_layer1 layer1;
308 struct hammer_blockmap_layer2 layer2;
309 struct hammer_blockmap_layer2 *track2;
310 hammer_off_t result_offset;
314 result_offset = blockmap_lookup(offset, &layer1, &layer2, &error);
315 if (AssertOnFailure) {
316 assert(HAMMER_ZONE_DECODE(result_offset) ==
317 HAMMER_ZONE_RAW_BUFFER_INDEX);
320 collect = collect_get(layer1.phys_offset); /* layer2 address */
321 track2 = collect_get_track(collect, offset, &layer2);
322 track2->bytes_free -= length;
327 collect_get(hammer_off_t phys_offset)
329 int hv = crc32(&phys_offset, sizeof(phys_offset)) & COLLECT_HMASK;
332 TAILQ_FOREACH(collect, &CollectHash[hv], entry) {
333 if (collect->phys_offset == phys_offset)
336 collect = calloc(sizeof(*collect), 1);
337 collect->track2 = malloc(HAMMER_BIGBLOCK_SIZE);
338 collect->layer2 = malloc(HAMMER_BIGBLOCK_SIZE);
339 collect->phys_offset = phys_offset;
340 TAILQ_INSERT_HEAD(&CollectHash[hv], collect, entry);
341 bzero(collect->track2, HAMMER_BIGBLOCK_SIZE);
342 bzero(collect->layer2, HAMMER_BIGBLOCK_SIZE);
349 collect_rel(collect_t collect)
351 free(collect->layer2);
352 free(collect->track2);
357 struct hammer_blockmap_layer2 *
358 collect_get_track(collect_t collect, hammer_off_t offset,
359 struct hammer_blockmap_layer2 *layer2)
361 struct hammer_blockmap_layer2 *track2;
364 i = HAMMER_BLOCKMAP_LAYER2_OFFSET(offset) / sizeof(*track2);
365 track2 = &collect->track2[i];
366 if (track2->entry_crc == 0) {
367 collect->layer2[i] = *layer2;
368 track2->bytes_free = HAMMER_BIGBLOCK_SIZE;
369 track2->entry_crc = 1; /* steal field to tag track load */
376 dump_collect_table(void)
379 struct collect_head *p;
383 int stats[HAMMER_MAX_ZONES];
384 bzero(stats, sizeof(stats));
386 for (i = 0; i < COLLECT_HSIZE; ++i) {
388 while (!TAILQ_EMPTY(p)) {
389 collect = TAILQ_FIRST(p);
390 TAILQ_REMOVE(p, collect, entry);
391 dump_collect(collect, stats);
392 error += collect->error;
393 collect_rel(collect);
398 printf("zone-bigblock statistics\n");
399 printf("\tzone #\tbigblocks\n");
400 for (i = 0; i < HAMMER_MAX_ZONES; i++) {
401 printf("\tzone %d\t%d\n", i, stats[i]);
404 printf("\t---------------\n");
405 printf("\ttotal\t%d\n", total);
408 if (error || VerboseOpt)
409 printf("%d errors\n", error);
414 dump_collect(collect_t collect, int *stats)
416 struct hammer_blockmap_layer2 *track2;
417 struct hammer_blockmap_layer2 *layer2;
421 for (i = 0; i < HAMMER_BLOCKMAP_RADIX2; ++i) {
422 track2 = &collect->track2[i];
423 layer2 = &collect->layer2[i];
426 * Currently just check bigblocks referenced by data
429 if (track2->entry_crc == 0)
433 if (AssertOnFailure) {
434 assert(zone >= HAMMER_ZONE_BTREE_INDEX);
435 assert(zone < HAMMER_MAX_ZONES);
439 if (track2->bytes_free != layer2->bytes_free) {
440 printf("BM\tblock=%016jx zone=%2d calc %d free, got %d\n",
441 (intmax_t)(collect->phys_offset +
442 i * HAMMER_BIGBLOCK_SIZE),
447 } else if (VerboseOpt) {
448 printf("\tblock=%016jx zone=%2d %d free (correct)\n",
449 (intmax_t)(collect->phys_offset +
450 i * HAMMER_BIGBLOCK_SIZE),