hammer - Migration to libhammer (step 1/many)
[dragonfly.git] / sbin / hammer / cmd_blockmap.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2008 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * $DragonFly: src/sbin/hammer/cmd_blockmap.c,v 1.4 2008/07/19 18:48:14 dillon Exp $
35 */
36
37#include "hammer.h"
38
39typedef struct collect {
40 struct collect *hnext;
41 hammer_off_t phys_offset;
42 struct hammer_blockmap_layer2 *track2;
43 struct hammer_blockmap_layer2 *layer2;
44} *collect_t;
45
46collect_t CollectHash[COLLECT_HSIZE];
47
48static void dump_blockmap(const char *label, int zone);
49static void check_btree_node(hammer_off_t node_offset, int depth);
50static void collect_btree_elm(hammer_btree_elm_t elm);
51static struct hammer_blockmap_layer2 *collect_get_track(
52 collect_t collect, hammer_off_t offset,
53 struct hammer_blockmap_layer2 *layer2);
54static collect_t collect_get(hammer_off_t phys_offset);
55static void dump_collect_table(void);
56static void dump_collect(collect_t collect);
57
58void
59hammer_cmd_blockmap(void)
60{
61 dump_blockmap("btree", HAMMER_ZONE_FREEMAP_INDEX);
62}
63
64static
65void
66dump_blockmap(const char *label, int zone)
67{
68 struct volume_info *root_volume;
69 hammer_blockmap_t rootmap;
70 struct hammer_blockmap_layer1 *layer1;
71 struct hammer_blockmap_layer2 *layer2;
72 struct buffer_info *buffer1 = NULL;
73 struct buffer_info *buffer2 = NULL;
74 hammer_off_t layer1_offset;
75 hammer_off_t layer2_offset;
76 hammer_off_t scan1;
77 hammer_off_t scan2;
78 int xerr;
79
80 assert(RootVolNo >= 0);
81 root_volume = get_volume(RootVolNo);
82 rootmap = &root_volume->ondisk->vol0_blockmap[zone];
83 assert(rootmap->phys_offset != 0);
84
85 printf("zone %-16s next %016jx alloc %016jx\n",
86 label,
87 (uintmax_t)rootmap->next_offset,
88 (uintmax_t)rootmap->alloc_offset);
89
90 for (scan1 = HAMMER_ZONE_ENCODE(zone, 0);
91 scan1 < HAMMER_ZONE_ENCODE(zone, HAMMER_OFF_LONG_MASK);
92 scan1 += HAMMER_BLOCKMAP_LAYER2) {
93 /*
94 * Dive layer 1.
95 */
96 layer1_offset = rootmap->phys_offset +
97 HAMMER_BLOCKMAP_LAYER1_OFFSET(scan1);
98 layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
99 xerr = ' ';
100 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
101 xerr = 'B';
102 if (xerr == ' ' &&
103 layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
104 continue;
105 }
106 printf("%c layer1 %016jx @%016jx blocks-free %jd\n",
107 xerr,
108 (uintmax_t)scan1,
109 (uintmax_t)layer1->phys_offset,
110 (intmax_t)layer1->blocks_free);
111 if (layer1->phys_offset == HAMMER_BLOCKMAP_FREE)
112 continue;
113 for (scan2 = scan1;
114 scan2 < scan1 + HAMMER_BLOCKMAP_LAYER2;
115 scan2 += HAMMER_LARGEBLOCK_SIZE
116 ) {
117 /*
118 * Dive layer 2, each entry represents a large-block.
119 */
120 layer2_offset = layer1->phys_offset +
121 HAMMER_BLOCKMAP_LAYER2_OFFSET(scan2);
122 layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
123 xerr = ' ';
124 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
125 xerr = 'B';
126 printf("%c %016jx zone=%d app=%-7d free=%-7d\n",
127 xerr,
128 (uintmax_t)scan2,
129 layer2->zone,
130 layer2->append_off,
131 layer2->bytes_free);
132 }
133 }
134 if (buffer1)
135 rel_buffer(buffer1);
136 if (buffer2)
137 rel_buffer(buffer2);
138 rel_volume(root_volume);
139}
140
141void
142hammer_cmd_checkmap(void)
143{
144 struct volume_info *volume;
145 hammer_off_t node_offset;
146
147 volume = get_volume(RootVolNo);
148 node_offset = volume->ondisk->vol0_btree_root;
149 if (QuietOpt < 3) {
150 printf("Volume header\trecords=%jd next_tid=%016jx\n",
151 (intmax_t)volume->ondisk->vol0_stat_records,
152 (uintmax_t)volume->ondisk->vol0_next_tid);
153 printf("\t\tbufoffset=%016jx\n",
154 (uintmax_t)volume->ondisk->vol_buf_beg);
155 }
156 rel_volume(volume);
157
158 printf("Collecting allocation info from B-Tree: ");
159 fflush(stdout);
160 check_btree_node(node_offset, 0);
161 printf("done\n");
162 dump_collect_table();
163}
164
165static void
166check_btree_node(hammer_off_t node_offset, int depth)
167{
168 struct buffer_info *buffer = NULL;
169 hammer_node_ondisk_t node;
170 hammer_btree_elm_t elm;
171 int i;
172 char badc;
173
174 node = get_node(node_offset, &buffer);
175
176 if (crc32(&node->crc + 1, HAMMER_BTREE_CRCSIZE) == node->crc)
177 badc = ' ';
178 else
179 badc = 'B';
180
181 if (badc != ' ') {
182 printf("B NODE %016jx cnt=%02d p=%016jx "
183 "type=%c depth=%d",
184 (uintmax_t)node_offset, node->count,
185 (uintmax_t)node->parent,
186 (node->type ? node->type : '?'), depth);
187 printf(" mirror %016jx", (uintmax_t)node->mirror_tid);
188 printf(" {\n");
189 }
190
191 for (i = 0; i < node->count; ++i) {
192 elm = &node->elms[i];
193
194 switch(node->type) {
195 case HAMMER_BTREE_TYPE_INTERNAL:
196 if (elm->internal.subtree_offset) {
197 check_btree_node(elm->internal.subtree_offset,
198 depth + 1);
199 }
200 break;
201 case HAMMER_BTREE_TYPE_LEAF:
202 if (elm->leaf.data_offset)
203 collect_btree_elm(elm);
204 break;
205 default:
206 assert(0);
207 }
208 }
209 rel_buffer(buffer);
210}
211
212static
213void
214collect_btree_elm(hammer_btree_elm_t elm)
215{
216 struct hammer_blockmap_layer1 layer1;
217 struct hammer_blockmap_layer2 layer2;
218 struct hammer_blockmap_layer2 *track2;
219 hammer_off_t offset = elm->leaf.data_offset;
220 collect_t collect;
221 int error;
222
223 blockmap_lookup(offset, &layer1, &layer2, &error);
224 collect = collect_get(layer1.phys_offset);
225 track2 = collect_get_track(collect, offset, &layer2);
226 track2->bytes_free -= (elm->leaf.data_len + 15) & ~15;
227}
228
229static
230collect_t
231collect_get(hammer_off_t phys_offset)
232{
233 int hv = crc32(&phys_offset, sizeof(phys_offset)) & COLLECT_HMASK;
234 collect_t collect;
235
236 for (collect = CollectHash[hv]; collect; collect = collect->hnext) {
237 if (collect->phys_offset == phys_offset)
238 return(collect);
239 }
240 collect = calloc(sizeof(*collect), 1);
241 collect->track2 = malloc(HAMMER_LARGEBLOCK_SIZE);
242 collect->layer2 = malloc(HAMMER_LARGEBLOCK_SIZE);
243 collect->phys_offset = phys_offset;
244 collect->hnext = CollectHash[hv];
245 CollectHash[hv] = collect;
246 bzero(collect->track2, HAMMER_LARGEBLOCK_SIZE);
247 bzero(collect->layer2, HAMMER_LARGEBLOCK_SIZE);
248
249 return (collect);
250}
251
252static
253struct hammer_blockmap_layer2 *
254collect_get_track(collect_t collect, hammer_off_t offset,
255 struct hammer_blockmap_layer2 *layer2)
256{
257 struct hammer_blockmap_layer2 *track2;
258 size_t i;
259
260 i = HAMMER_BLOCKMAP_LAYER2_OFFSET(offset) / sizeof(*track2);
261 track2 = &collect->track2[i];
262 if (track2->entry_crc == 0) {
263 collect->layer2[i] = *layer2;
264 track2->bytes_free = HAMMER_LARGEBLOCK_SIZE;
265 track2->entry_crc = 1; /* steal field to tag track load */
266 }
267 return (track2);
268}
269
270static
271void
272dump_collect_table(void)
273{
274 collect_t collect;
275 int i;
276
277 for (i = 0; i < COLLECT_HSIZE; ++i) {
278 for (collect = CollectHash[i];
279 collect;
280 collect = collect->hnext) {
281 dump_collect(collect);
282 }
283 }
284}
285
286static
287void
288dump_collect(collect_t collect)
289{
290 struct hammer_blockmap_layer2 *track2;
291 struct hammer_blockmap_layer2 *layer2;
292 size_t i;
293
294 for (i = 0; i < HAMMER_BLOCKMAP_RADIX2; ++i) {
295 track2 = &collect->track2[i];
296 layer2 = &collect->layer2[i];
297
298 /*
299 * Currently just check bigblocks referenced by data
300 * or B-Tree nodes.
301 */
302 if (track2->entry_crc == 0)
303 continue;
304
305 if (track2->bytes_free != layer2->bytes_free) {
306 printf("BM\tblock=%016jx calc %d free, got %d\n",
307 (intmax_t)(collect->phys_offset +
308 i * HAMMER_LARGEBLOCK_SIZE),
309 track2->bytes_free,
310 layer2->bytes_free);
311 } else if (VerboseOpt) {
312 printf("\tblock=%016jx %d free (correct)\n",
313 (intmax_t)(collect->phys_offset +
314 i * HAMMER_LARGEBLOCK_SIZE),
315 track2->bytes_free);
316 }
317 }
318}