Merge branch 'vendor/DHCPCD'
[dragonfly.git] / sbin / hammer / cmd_blockmap.c
1 /*
2  * Copyright (c) 2008 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34
35 #include "hammer.h"
36
37 #include <sys/tree.h>
38
39 /*
40  * Each collect covers 1<<(19+23) bytes address space of layer 1.
41  * (plus a copy of 1<<23 bytes that holds layer2 entries in layer 1).
42  */
43 typedef struct collect {
44         RB_ENTRY(collect) entry;
45         hammer_off_t    phys_offset;  /* layer2 address pointed by layer1 */
46         hammer_off_t    *offsets;  /* big-block offset for layer2[i] */
47         hammer_blockmap_layer2_t track2;  /* track of layer2 entries */
48         hammer_blockmap_layer2_t layer2;  /* 1<<19 x 16 bytes entries */
49         int error;  /* # of inconsistencies */
50 } *collect_t;
51
52 static int
53 collect_compare(struct collect *c1, struct collect *c2)
54 {
55         if (c1->phys_offset < c2->phys_offset)
56                 return(-1);
57         if (c1->phys_offset > c2->phys_offset)
58                 return(1);
59         return(0);
60 }
61
62 static RB_HEAD(collect_rb_tree, collect) CollectTree =
63                                         RB_INITIALIZER(&CollectTree);
64 RB_PROTOTYPE2(collect_rb_tree, collect, entry, collect_compare, hammer_off_t);
65 RB_GENERATE2(collect_rb_tree, collect, entry, collect_compare, hammer_off_t,
66         phys_offset);
67
68 static void dump_blockmap(int zone);
69 static void check_freemap(hammer_blockmap_t freemap);
70 static void check_btree_node(hammer_off_t node_offset, int depth);
71 static void check_undo(hammer_blockmap_t undomap);
72 static __inline void collect_btree_root(hammer_off_t node_offset);
73 static __inline void collect_btree_internal(hammer_btree_elm_t elm);
74 static __inline void collect_btree_leaf(hammer_btree_elm_t elm);
75 static __inline void collect_freemap_layer1(hammer_blockmap_t freemap);
76 static __inline void collect_freemap_layer2(hammer_blockmap_layer1_t layer1);
77 static __inline void collect_undo(hammer_off_t scan_offset,
78         hammer_fifo_head_t head);
79 static void collect_blockmap(hammer_off_t offset, int32_t length, int zone);
80 static hammer_blockmap_layer2_t collect_get_track(
81         collect_t collect, hammer_off_t offset, int zone,
82         hammer_blockmap_layer2_t layer2);
83 static collect_t collect_get(hammer_off_t phys_offset);
84 static void dump_collect_table(void);
85 static void dump_collect(collect_t collect, zone_stat_t stats);
86
87 static int num_bad_layer1 = 0;
88 static int num_bad_layer2 = 0;
89 static int num_bad_node = 0;
90
91 void
92 hammer_cmd_blockmap(void)
93 {
94         dump_blockmap(HAMMER_ZONE_FREEMAP_INDEX);
95 }
96
97 static
98 void
99 dump_blockmap(int zone)
100 {
101         volume_info_t root_volume;
102         hammer_blockmap_t rootmap;
103         hammer_blockmap_layer1_t layer1;
104         hammer_blockmap_layer2_t layer2;
105         buffer_info_t buffer1 = NULL;
106         buffer_info_t buffer2 = NULL;
107         hammer_off_t layer1_offset;
108         hammer_off_t layer2_offset;
109         hammer_off_t phys_offset;
110         hammer_off_t block_offset;
111         zone_stat_t stats = NULL;
112         int xerr, aerr, ferr;
113
114         root_volume = get_root_volume();
115         rootmap = &root_volume->ondisk->vol0_blockmap[zone];
116         assert(rootmap->phys_offset != 0);
117
118         print_blockmap(root_volume);
119
120         if (VerboseOpt)
121                 stats = hammer_init_zone_stat();
122
123         for (phys_offset = HAMMER_ZONE_ENCODE(zone, 0);
124              phys_offset < HAMMER_ZONE_ENCODE(zone, HAMMER_OFF_LONG_MASK);
125              phys_offset += HAMMER_BLOCKMAP_LAYER2) {
126                 /*
127                  * Dive layer 1.
128                  */
129                 layer1_offset = rootmap->phys_offset +
130                                 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
131                 layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
132
133                 xerr = ' ';  /* good */
134                 if (!hammer_crc_test_layer1(HammerVersion, layer1)) {
135                         xerr = 'B';
136                         ++num_bad_layer1;
137                 }
138                 if (xerr == ' ' &&
139                     layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
140                         continue;
141                 }
142                 printf("%c layer1 %016jx @%016jx blocks-free %jd\n",
143                         xerr,
144                         (uintmax_t)phys_offset,
145                         (uintmax_t)layer1->phys_offset,
146                         (intmax_t)layer1->blocks_free);
147
148                 for (block_offset = 0;
149                      block_offset < HAMMER_BLOCKMAP_LAYER2;
150                      block_offset += HAMMER_BIGBLOCK_SIZE) {
151                         hammer_off_t zone_offset = phys_offset + block_offset;
152                         /*
153                          * Dive layer 2, each entry represents a big-block.
154                          */
155                         layer2_offset = layer1->phys_offset +
156                                         HAMMER_BLOCKMAP_LAYER2_OFFSET(block_offset);
157                         layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
158
159                         xerr = aerr = ferr = ' ';  /* good */
160                         if (!hammer_crc_test_layer2(HammerVersion, layer2)) {
161                                 xerr = 'B';
162                                 ++num_bad_layer2;
163                         }
164                         if (layer2->append_off > HAMMER_BIGBLOCK_SIZE) {
165                                 aerr = 'A';
166                                 ++num_bad_layer2;
167                         }
168                         if (layer2->bytes_free < 0 ||
169                             layer2->bytes_free > HAMMER_BIGBLOCK_SIZE) {
170                                 ferr = 'F';
171                                 ++num_bad_layer2;
172                         }
173
174                         if (VerboseOpt < 2 &&
175                             xerr == ' ' && aerr == ' ' && ferr == ' ' &&
176                             layer2->zone == HAMMER_ZONE_UNAVAIL_INDEX) {
177                                 break;
178                         }
179                         printf("%c%c%c     %016jx zone=%-2d ",
180                                 xerr, aerr, ferr, (uintmax_t)zone_offset, layer2->zone);
181                         if (VerboseOpt) {
182                                 printf("vol=%-3d L1#=%-6d L2#=%-6d L1=%-7lu L2=%-7lu ",
183                                         HAMMER_VOL_DECODE(zone_offset),
184                                         HAMMER_BLOCKMAP_LAYER1_INDEX(zone_offset),
185                                         HAMMER_BLOCKMAP_LAYER2_INDEX(zone_offset),
186                                         HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset),
187                                         HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset));
188                         }
189                         printf("app=%-7d free=%-7d",
190                                 layer2->append_off,
191                                 layer2->bytes_free);
192                         if (VerboseOpt) {
193                                 double bytes_used = HAMMER_BIGBLOCK_SIZE -
194                                         layer2->bytes_free;
195                                 printf(" fill=%-5.1lf crc=%08x-%08x\n",
196                                         bytes_used * 100 / HAMMER_BIGBLOCK_SIZE,
197                                         layer1->layer1_crc,
198                                         layer2->entry_crc);
199                         } else {
200                                 printf("\n");
201                         }
202
203                         if (stats)
204                                 hammer_add_zone_stat_layer2(stats, layer2);
205                 }
206         }
207         rel_buffer(buffer1);
208         rel_buffer(buffer2);
209
210         if (stats) {
211                 hammer_print_zone_stat(stats);
212                 hammer_cleanup_zone_stat(stats);
213         }
214
215         if (num_bad_layer1 || VerboseOpt)
216                 printf("%d bad layer1\n", num_bad_layer1);
217         if (num_bad_layer2 || VerboseOpt)
218                 printf("%d bad layer2\n", num_bad_layer1);
219 }
220
221 void
222 hammer_cmd_checkmap(void)
223 {
224         volume_info_t volume;
225         hammer_blockmap_t freemap;
226         hammer_blockmap_t undomap;
227         hammer_off_t node_offset;
228
229         volume = get_root_volume();
230         node_offset = volume->ondisk->vol0_btree_root;
231         freemap = &volume->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
232         undomap = &volume->ondisk->vol0_blockmap[HAMMER_ZONE_UNDO_INDEX];
233
234         print_blockmap(volume);
235
236         printf("Collecting allocation info from freemap: ");
237         fflush(stdout);
238         check_freemap(freemap);
239         printf("done\n");
240
241         printf("Collecting allocation info from B-Tree: ");
242         fflush(stdout);
243         check_btree_node(node_offset, 0);
244         printf("done\n");
245
246         printf("Collecting allocation info from UNDO: ");
247         fflush(stdout);
248         check_undo(undomap);
249         printf("done\n");
250
251         dump_collect_table();
252 }
253
254 static
255 void
256 check_freemap(hammer_blockmap_t freemap)
257 {
258         hammer_off_t offset;
259         buffer_info_t buffer1 = NULL;
260         hammer_blockmap_layer1_t layer1;
261         int i;
262
263         collect_freemap_layer1(freemap);
264
265         for (i = 0; i < HAMMER_BLOCKMAP_RADIX1; ++i) {
266                 offset = freemap->phys_offset + i * sizeof(*layer1);
267                 layer1 = get_buffer_data(offset, &buffer1, 0);
268                 if (layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL)
269                         collect_freemap_layer2(layer1);
270         }
271         rel_buffer(buffer1);
272 }
273
274 static
275 void
276 check_btree_node(hammer_off_t node_offset, int depth)
277 {
278         buffer_info_t buffer = NULL;
279         hammer_node_ondisk_t node;
280         hammer_btree_elm_t elm;
281         int i;
282         char badc = ' ';  /* good */
283         char badm = ' ';  /* good */
284
285         if (depth == 0)
286                 collect_btree_root(node_offset);
287         node = get_buffer_data(node_offset, &buffer, 0);
288
289         if (node == NULL) {
290                 badc = 'B';
291                 badm = 'I';
292         } else if (!hammer_crc_test_btree(HammerVersion, node)) {
293                 badc = 'B';
294         }
295
296         if (badm != ' ' || badc != ' ') {  /* not good */
297                 ++num_bad_node;
298                 printf("%c%c   NODE %016jx ",
299                         badc, badm, (uintmax_t)node_offset);
300                 if (node == NULL) {
301                         printf("(IO ERROR)\n");
302                         rel_buffer(buffer);
303                         return;
304                 } else {
305                         printf("cnt=%02d p=%016jx type=%c depth=%d mirror=%016jx\n",
306                                node->count,
307                                (uintmax_t)node->parent,
308                                (node->type ? node->type : '?'),
309                                depth,
310                                (uintmax_t)node->mirror_tid);
311                 }
312         }
313
314         for (i = 0; i < node->count; ++i) {
315                 elm = &node->elms[i];
316
317                 switch(node->type) {
318                 case HAMMER_BTREE_TYPE_INTERNAL:
319                         if (elm->internal.subtree_offset) {
320                                 collect_btree_internal(elm);
321                                 check_btree_node(elm->internal.subtree_offset,
322                                                  depth + 1);
323                         }
324                         break;
325                 case HAMMER_BTREE_TYPE_LEAF:
326                         if (elm->leaf.data_offset)
327                                 collect_btree_leaf(elm);
328                         break;
329                 default:
330                         assert(!DebugOpt);
331                         break;
332                 }
333         }
334         rel_buffer(buffer);
335 }
336
337 static
338 void
339 check_undo(hammer_blockmap_t undomap)
340 {
341         buffer_info_t buffer = NULL;
342         hammer_off_t scan_offset;
343         hammer_fifo_head_t head;
344
345         scan_offset = HAMMER_ENCODE_UNDO(0);
346         while (scan_offset < undomap->alloc_offset) {
347                 head = get_buffer_data(scan_offset, &buffer, 0);
348                 switch (head->hdr_type) {
349                 case HAMMER_HEAD_TYPE_PAD:
350                 case HAMMER_HEAD_TYPE_DUMMY:
351                 case HAMMER_HEAD_TYPE_UNDO:
352                 case HAMMER_HEAD_TYPE_REDO:
353                         collect_undo(scan_offset, head);
354                         break;
355                 default:
356                         assert(!DebugOpt);
357                         break;
358                 }
359                 if ((head->hdr_size & HAMMER_HEAD_ALIGN_MASK) ||
360                      head->hdr_size == 0 ||
361                      head->hdr_size > HAMMER_UNDO_ALIGN -
362                         ((u_int)scan_offset & HAMMER_UNDO_MASK)) {
363                         printf("Illegal size, skipping to next boundary\n");
364                         scan_offset = HAMMER_UNDO_DOALIGN(scan_offset);
365                 } else {
366                         scan_offset += head->hdr_size;
367                 }
368         }
369         rel_buffer(buffer);
370 }
371
372 static __inline
373 void
374 collect_freemap_layer1(hammer_blockmap_t freemap)
375 {
376         /*
377          * This translation is necessary to do checkmap properly
378          * as zone4 is really just zone2 address space.
379          */
380         hammer_off_t zone4_offset = hammer_xlate_to_zoneX(
381                 HAMMER_ZONE_FREEMAP_INDEX, freemap->phys_offset);
382         collect_blockmap(zone4_offset, HAMMER_BIGBLOCK_SIZE,
383                 HAMMER_ZONE_FREEMAP_INDEX);
384 }
385
386 static __inline
387 void
388 collect_freemap_layer2(hammer_blockmap_layer1_t layer1)
389 {
390         /*
391          * This translation is necessary to do checkmap properly
392          * as zone4 is really just zone2 address space.
393          */
394         hammer_off_t zone4_offset = hammer_xlate_to_zoneX(
395                 HAMMER_ZONE_FREEMAP_INDEX, layer1->phys_offset);
396         collect_blockmap(zone4_offset, HAMMER_BIGBLOCK_SIZE,
397                 HAMMER_ZONE_FREEMAP_INDEX);
398 }
399
400 static __inline
401 void
402 collect_btree_root(hammer_off_t node_offset)
403 {
404         collect_blockmap(node_offset,
405                 sizeof(struct hammer_node_ondisk),  /* 4KB */
406                 HAMMER_ZONE_BTREE_INDEX);
407 }
408
409 static __inline
410 void
411 collect_btree_internal(hammer_btree_elm_t elm)
412 {
413         collect_blockmap(elm->internal.subtree_offset,
414                 sizeof(struct hammer_node_ondisk),  /* 4KB */
415                 HAMMER_ZONE_BTREE_INDEX);
416 }
417
418 static __inline
419 void
420 collect_btree_leaf(hammer_btree_elm_t elm)
421 {
422         int zone;
423
424         switch (elm->base.rec_type) {
425         case HAMMER_RECTYPE_INODE:
426         case HAMMER_RECTYPE_DIRENTRY:
427         case HAMMER_RECTYPE_EXT:
428         case HAMMER_RECTYPE_FIX:
429         case HAMMER_RECTYPE_PFS:
430         case HAMMER_RECTYPE_SNAPSHOT:
431         case HAMMER_RECTYPE_CONFIG:
432                 zone = HAMMER_ZONE_META_INDEX;
433                 break;
434         case HAMMER_RECTYPE_DATA:
435         case HAMMER_RECTYPE_DB:
436                 zone = hammer_data_zone_index(elm->leaf.data_len);
437                 break;
438         default:
439                 zone = HAMMER_ZONE_UNAVAIL_INDEX;
440                 break;
441         }
442         collect_blockmap(elm->leaf.data_offset,
443                 HAMMER_DATA_DOALIGN(elm->leaf.data_len), zone);
444 }
445
446 static __inline
447 void
448 collect_undo(hammer_off_t scan_offset, hammer_fifo_head_t head)
449 {
450         collect_blockmap(scan_offset, head->hdr_size,
451                 HAMMER_ZONE_UNDO_INDEX);
452 }
453
454 static
455 void
456 collect_blockmap(hammer_off_t offset, int32_t length, int zone)
457 {
458         struct hammer_blockmap_layer1 layer1;
459         struct hammer_blockmap_layer2 layer2;
460         hammer_blockmap_layer2_t track2;
461         hammer_off_t result_offset;
462         collect_t collect;
463         int error;
464
465         result_offset = blockmap_lookup_save(offset, &layer1, &layer2, &error);
466         if (DebugOpt) {
467                 assert(HAMMER_ZONE_DECODE(offset) == zone);
468                 assert(hammer_is_zone_raw_buffer(result_offset));
469                 assert(error == 0);
470         }
471         collect = collect_get(layer1.phys_offset); /* layer2 address */
472         track2 = collect_get_track(collect, result_offset, zone, &layer2);
473         track2->bytes_free -= length;
474 }
475
476 static
477 collect_t
478 collect_get(hammer_off_t phys_offset)
479 {
480         collect_t collect;
481
482         collect = RB_LOOKUP(collect_rb_tree, &CollectTree, phys_offset);
483         if (collect)
484                 return(collect);
485
486         collect = calloc(1, sizeof(*collect));
487         collect->track2 = calloc(1, HAMMER_BIGBLOCK_SIZE);  /* 1<<23 bytes */
488         collect->layer2 = calloc(1, HAMMER_BIGBLOCK_SIZE);  /* 1<<23 bytes */
489         collect->offsets = calloc(HAMMER_BLOCKMAP_RADIX2, sizeof(hammer_off_t));
490         collect->phys_offset = phys_offset;
491         RB_INSERT(collect_rb_tree, &CollectTree, collect);
492
493         return (collect);
494 }
495
496 static
497 void
498 collect_rel(collect_t collect)
499 {
500         free(collect->offsets);
501         free(collect->layer2);
502         free(collect->track2);
503         free(collect);
504 }
505
506 static
507 hammer_blockmap_layer2_t
508 collect_get_track(collect_t collect, hammer_off_t offset, int zone,
509                   hammer_blockmap_layer2_t layer2)
510 {
511         hammer_blockmap_layer2_t track2;
512         size_t i;
513
514         i = HAMMER_BLOCKMAP_LAYER2_INDEX(offset);
515         track2 = &collect->track2[i];
516         if (track2->entry_crc == 0) {
517                 collect->layer2[i] = *layer2;
518                 collect->offsets[i] = offset & ~HAMMER_BIGBLOCK_MASK64;
519                 track2->zone = zone;
520                 track2->bytes_free = HAMMER_BIGBLOCK_SIZE;
521                 track2->entry_crc = 1;  /* steal field to tag track load */
522         }
523         return (track2);
524 }
525
526 static
527 void
528 dump_collect_table(void)
529 {
530         collect_t collect;
531         int error = 0;
532         zone_stat_t stats = NULL;
533
534         if (VerboseOpt)
535                 stats = hammer_init_zone_stat();
536
537         RB_FOREACH(collect, collect_rb_tree, &CollectTree) {
538                 dump_collect(collect, stats);
539                 error += collect->error;
540         }
541
542         while ((collect = RB_ROOT(&CollectTree)) != NULL) {
543                 RB_REMOVE(collect_rb_tree, &CollectTree, collect);
544                 collect_rel(collect);
545         }
546         assert(RB_EMPTY(&CollectTree));
547
548         if (stats) {
549                 hammer_print_zone_stat(stats);
550                 hammer_cleanup_zone_stat(stats);
551         }
552
553         if (num_bad_node || VerboseOpt)
554                 printf("%d bad nodes\n", num_bad_node);
555         if (error || VerboseOpt)
556                 printf("%d errors\n", error);
557 }
558
559 static
560 void
561 dump_collect(collect_t collect, zone_stat_t stats)
562 {
563         hammer_blockmap_layer2_t track2;
564         hammer_blockmap_layer2_t layer2;
565         hammer_off_t offset;
566         int i;
567
568         for (i = 0; i < HAMMER_BLOCKMAP_RADIX2; ++i) {
569                 track2 = &collect->track2[i];
570                 layer2 = &collect->layer2[i];
571                 offset = collect->offsets[i];
572
573                 /*
574                  * Check big-blocks referenced by freemap, data,
575                  * B-Tree nodes and UNDO fifo.
576                  */
577                 if (track2->entry_crc == 0)
578                         continue;
579
580                 if (DebugOpt) {
581                         assert((layer2->zone == HAMMER_ZONE_UNDO_INDEX) ||
582                                 (layer2->zone == HAMMER_ZONE_FREEMAP_INDEX) ||
583                                 hammer_is_index_record(layer2->zone));
584                 }
585                 if (stats)
586                         hammer_add_zone_stat_layer2(stats, layer2);
587
588                 if (track2->zone != layer2->zone) {
589                         printf("BZ\tblock=%016jx calc zone=%-2d, got zone=%-2d\n",
590                                 (uintmax_t)offset,
591                                 track2->zone,
592                                 layer2->zone);
593                         collect->error++;
594                 } else if (track2->bytes_free != layer2->bytes_free) {
595                         printf("BM\tblock=%016jx zone=%-2d calc %d free, got %d\n",
596                                 (uintmax_t)offset,
597                                 layer2->zone,
598                                 track2->bytes_free,
599                                 layer2->bytes_free);
600                         collect->error++;
601                 } else if (VerboseOpt) {
602                         printf("\tblock=%016jx zone=%-2d %d free (correct)\n",
603                                 (uintmax_t)offset,
604                                 layer2->zone,
605                                 track2->bytes_free);
606                 }
607         }
608 }