8dbdf85adfd612375511cf3b1195d811bab10eca
[dragonfly.git] / sbin / hammer / cmd_dedup.c
1 /*
2  * Copyright (c) 2010 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Ilya Dryomov <idryomov@gmail.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34
35 #include "hammer.h"
36 #include <libutil.h>
37 #include <crypto/sha2/sha2.h>
38
39 #define DEDUP_BUF (64 * 1024)
40
41 /* Sorted list of block CRCs - light version for dedup-simulate */
42 struct sim_dedup_entry_rb_tree;
43 RB_HEAD(sim_dedup_entry_rb_tree, sim_dedup_entry) sim_dedup_tree =
44                                         RB_INITIALIZER(&sim_dedup_tree);
45 RB_PROTOTYPE2(sim_dedup_entry_rb_tree, sim_dedup_entry, rb_entry,
46                 rb_sim_dedup_entry_compare, hammer_crc_t);
47
48 struct sim_dedup_entry {
49         hammer_crc_t    crc;
50         u_int64_t       ref_blks; /* number of blocks referenced */
51         u_int64_t       ref_size; /* size of data referenced */
52         RB_ENTRY(sim_dedup_entry) rb_entry;
53 };
54
55 /* Sorted list of HAMMER B-Tree keys */
56 struct dedup_entry_rb_tree;
57 struct sha_dedup_entry_rb_tree;
58
59 RB_HEAD(dedup_entry_rb_tree, dedup_entry) dedup_tree =
60                                         RB_INITIALIZER(&dedup_tree);
61 RB_PROTOTYPE2(dedup_entry_rb_tree, dedup_entry, rb_entry,
62                 rb_dedup_entry_compare, hammer_crc_t);
63
64 RB_PROTOTYPE(sha_dedup_entry_rb_tree, sha_dedup_entry, fict_entry,
65                 rb_sha_dedup_entry_compare);
66
67 struct dedup_entry {
68         struct hammer_btree_leaf_elm leaf;
69         union {
70                 struct {
71                         u_int64_t ref_blks;
72                         u_int64_t ref_size;
73                 } de;
74                 RB_HEAD(sha_dedup_entry_rb_tree, sha_dedup_entry) fict_root;
75         } u;
76         u_int8_t flags;
77         RB_ENTRY(dedup_entry) rb_entry;
78 };
79
80 #define HAMMER_DEDUP_ENTRY_FICTITIOUS   0x0001
81
82 struct sha_dedup_entry {
83         struct hammer_btree_leaf_elm    leaf;
84         u_int64_t                       ref_blks;
85         u_int64_t                       ref_size;
86         u_int8_t                        sha_hash[SHA256_DIGEST_LENGTH];
87         RB_ENTRY(sha_dedup_entry)       fict_entry;
88 };
89
90 /*
91  * Pass2 list - contains entries that were not dedup'ed because ioctl failed
92  */
93 STAILQ_HEAD(, pass2_dedup_entry) pass2_dedup_queue =
94                                 STAILQ_HEAD_INITIALIZER(pass2_dedup_queue);
95
96 struct pass2_dedup_entry {
97         struct hammer_btree_leaf_elm    leaf;
98         STAILQ_ENTRY(pass2_dedup_entry) sq_entry;
99 };
100
101 #define DEDUP_PASS2     0x0001 /* process_btree_elm() mode */
102
103 /* PFS global ids - we deal with just one PFS at a run */
104 int glob_fd;
105 struct hammer_ioc_pseudofs_rw glob_pfs;
106
107 /*
108  * Global accounting variables
109  *
110  * Last three don't have to be 64-bit, just to be safe..
111  */
112 u_int64_t dedup_alloc_size = 0;
113 u_int64_t dedup_ref_size = 0;
114 u_int64_t dedup_skipped_size = 0;
115 u_int64_t dedup_crc_failures = 0;
116 u_int64_t dedup_sha_failures = 0;
117 u_int64_t dedup_underflows = 0;
118
119 static int rb_sim_dedup_entry_compare(struct sim_dedup_entry *sim_de1,
120                                 struct sim_dedup_entry *sim_de2);
121 static int rb_dedup_entry_compare(struct dedup_entry *de1,
122                                 struct dedup_entry *de2);
123 static int rb_sha_dedup_entry_compare(struct sha_dedup_entry *sha_de1,
124                                 struct sha_dedup_entry *sha_de2);
125 typedef int (*scan_pfs_cb_t)(hammer_btree_leaf_elm_t scan_leaf, int flags);
126 static void scan_pfs(char *filesystem, scan_pfs_cb_t func);
127 static int collect_btree_elm(hammer_btree_leaf_elm_t scan_leaf, int flags);
128 static int process_btree_elm(hammer_btree_leaf_elm_t scan_leaf, int flags);
129 static int upgrade_chksum(hammer_btree_leaf_elm_t leaf, u_int8_t *sha_hash);
130 static void dump_simulated_dedup(void);
131 static void dump_real_dedup(void);
132 static void dedup_usage(int code);
133
134 RB_GENERATE2(sim_dedup_entry_rb_tree, sim_dedup_entry, rb_entry,
135                 rb_sim_dedup_entry_compare, hammer_crc_t, crc);
136 RB_GENERATE2(dedup_entry_rb_tree, dedup_entry, rb_entry,
137                 rb_dedup_entry_compare, hammer_crc_t, leaf.data_crc);
138 RB_GENERATE(sha_dedup_entry_rb_tree, sha_dedup_entry, fict_entry,
139                 rb_sha_dedup_entry_compare);
140
141 static int
142 rb_sim_dedup_entry_compare(struct sim_dedup_entry *sim_de1,
143                         struct sim_dedup_entry *sim_de2)
144 {
145         if (sim_de1->crc < sim_de2->crc)
146                 return (-1);
147         if (sim_de1->crc > sim_de2->crc)
148                 return (1);
149
150         return (0);
151 }
152
153 static int
154 rb_dedup_entry_compare(struct dedup_entry *de1, struct dedup_entry *de2)
155 {
156         if (de1->leaf.data_crc < de2->leaf.data_crc)
157                 return (-1);
158         if (de1->leaf.data_crc > de2->leaf.data_crc)
159                 return (1);
160
161         return (0);
162 }
163
164 static int
165 rb_sha_dedup_entry_compare(struct sha_dedup_entry *sha_de1,
166                         struct sha_dedup_entry *sha_de2)
167 {
168         unsigned long *h1 = (unsigned long *)&sha_de1->sha_hash;
169         unsigned long *h2 = (unsigned long *)&sha_de2->sha_hash;
170         int i;
171
172         for (i = 0; i < SHA256_DIGEST_LENGTH / (int)sizeof(unsigned long); ++i) {
173                 if (h1[i] < h2[i])
174                         return (-1);
175                 if (h1[i] > h2[i])
176                         return (1);
177         }
178
179         return (0);
180 }
181
182 /*
183  * dedup-simulate <filesystem>
184  */
185 void
186 hammer_cmd_dedup_simulate(char **av, int ac)
187 {
188         struct sim_dedup_entry *sim_de;
189
190         if (ac != 1)
191                 dedup_usage(1);
192
193         glob_fd = getpfs(&glob_pfs, av[0]);
194
195         printf("Dedup-simulate ");
196         scan_pfs(av[0], &collect_btree_elm);
197         printf("Dedup-simulate %s succeeded\n", av[0]);
198
199         relpfs(glob_fd, &glob_pfs);
200
201         if (VerboseOpt >= 2)
202                 dump_simulated_dedup();
203
204         /*
205          * Calculate simulated dedup ratio and get rid of the tree
206          */
207         while ((sim_de = RB_ROOT(&sim_dedup_tree)) != NULL) {
208                 assert(sim_de->ref_blks != 0);
209                 dedup_ref_size += sim_de->ref_size;
210                 dedup_alloc_size += sim_de->ref_size / sim_de->ref_blks;
211
212                 RB_REMOVE(sim_dedup_entry_rb_tree, &sim_dedup_tree, sim_de);
213                 free(sim_de);
214         }
215
216         printf("Simulated dedup ratio = %.2f\n",
217             (double)dedup_ref_size / dedup_alloc_size);
218 }
219
220 /*
221  * dedup <filesystem>
222  */
223 void
224 hammer_cmd_dedup(char **av, int ac)
225 {
226         struct dedup_entry *de;
227         struct sha_dedup_entry *sha_de;
228         struct pass2_dedup_entry *pass2_de;
229         char buf[8];
230
231         if (TimeoutOpt > 0)
232                 alarm(TimeoutOpt);
233
234         if (ac != 1)
235                 dedup_usage(1);
236
237         STAILQ_INIT(&pass2_dedup_queue);
238
239         glob_fd = getpfs(&glob_pfs, av[0]);
240
241         printf("Dedup ");
242         scan_pfs(av[0], &process_btree_elm);
243
244         while ((pass2_de = STAILQ_FIRST(&pass2_dedup_queue)) != NULL) {
245                 if (process_btree_elm(&pass2_de->leaf, DEDUP_PASS2))
246                         dedup_skipped_size -= pass2_de->leaf.data_len;
247
248                 STAILQ_REMOVE_HEAD(&pass2_dedup_queue, sq_entry);
249                 free(pass2_de);
250         }
251         assert(STAILQ_EMPTY(&pass2_dedup_queue));
252
253         printf("Dedup %s succeeded\n", av[0]);
254
255         relpfs(glob_fd, &glob_pfs);
256
257         if (VerboseOpt >= 2)
258                 dump_real_dedup();
259
260         /*
261          * Calculate dedup ratio and get rid of the trees
262          */
263         while ((de = RB_ROOT(&dedup_tree)) != NULL) {
264                 if (de->flags & HAMMER_DEDUP_ENTRY_FICTITIOUS) {
265                         while ((sha_de = RB_ROOT(&de->u.fict_root)) != NULL) {
266                                 assert(sha_de->ref_blks != 0);
267                                 dedup_ref_size += sha_de->ref_size;
268                                 dedup_alloc_size += sha_de->ref_size / sha_de->ref_blks;
269
270                                 RB_REMOVE(sha_dedup_entry_rb_tree,
271                                                 &de->u.fict_root, sha_de);
272                                 free(sha_de);
273                         }
274                         assert(RB_EMPTY(&de->u.fict_root));
275                 } else {
276                         assert(de->u.de.ref_blks != 0);
277                         dedup_ref_size += de->u.de.ref_size;
278                         dedup_alloc_size += de->u.de.ref_size / de->u.de.ref_blks;
279                 }
280
281                 RB_REMOVE(dedup_entry_rb_tree, &dedup_tree, de);
282                 free(de);
283         }
284         assert(RB_EMPTY(&dedup_tree));
285
286         assert(dedup_alloc_size != 0);
287         humanize_unsigned(buf, sizeof(buf), dedup_ref_size, "B", 1024);
288         printf("Dedup ratio = %.2f\n"
289                "    %8s referenced\n",
290                (double)dedup_ref_size / dedup_alloc_size,
291                buf
292         );
293         humanize_unsigned(buf, sizeof(buf), dedup_alloc_size, "B", 1024);
294         printf("    %8s allocated\n", buf);
295         humanize_unsigned(buf, sizeof(buf), dedup_skipped_size, "B", 1024);
296         printf("    %8s skipped\n", buf);
297         printf("    %8jd CRC collisions\n"
298                "    %8jd SHA collisions\n"
299                "    %8jd bigblock underflows\n",
300                (intmax_t)dedup_crc_failures,
301                (intmax_t)dedup_sha_failures,
302                (intmax_t)dedup_underflows
303         );
304 }
305
306 static int
307 collect_btree_elm(hammer_btree_leaf_elm_t scan_leaf, int flags __unused)
308 {
309         struct sim_dedup_entry *sim_de;
310
311         sim_de = RB_LOOKUP(sim_dedup_entry_rb_tree, &sim_dedup_tree, scan_leaf->data_crc);
312
313         if (sim_de == NULL) {
314                 sim_de = calloc(sizeof(*sim_de), 1);
315                 sim_de->crc = scan_leaf->data_crc;
316                 RB_INSERT(sim_dedup_entry_rb_tree, &sim_dedup_tree, sim_de);
317         }
318
319         sim_de->ref_blks += 1;
320         sim_de->ref_size += scan_leaf->data_len;
321         return (1);
322 }
323
324 static __inline int
325 validate_dedup_pair(hammer_btree_leaf_elm_t p, hammer_btree_leaf_elm_t q)
326 {
327         if ((p->data_offset & HAMMER_OFF_ZONE_MASK) !=
328             (q->data_offset & HAMMER_OFF_ZONE_MASK)) {
329                 return (1);
330         }
331         if (p->data_len != q->data_len) {
332                 return (1);
333         }
334
335         return (0);
336 }
337
338 #define DEDUP_TECH_FAILURE      1
339 #define DEDUP_CMP_FAILURE       2
340 #define DEDUP_INVALID_ZONE      3
341 #define DEDUP_UNDERFLOW         4
342
343 static __inline int
344 deduplicate(hammer_btree_leaf_elm_t p, hammer_btree_leaf_elm_t q)
345 {
346         struct hammer_ioc_dedup dedup;
347
348         bzero(&dedup, sizeof(dedup));
349
350         /*
351          * If data_offset fields are the same there is no need to run ioctl,
352          * candidate is already dedup'ed.
353          */
354         if (p->data_offset == q->data_offset) {
355                 return (0);
356         }
357
358         dedup.elm1 = p->base;
359         dedup.elm2 = q->base;
360         RunningIoctl = 1;
361         if (ioctl(glob_fd, HAMMERIOC_DEDUP, &dedup) < 0) {
362                 /* Technical failure - locking or w/e */
363                 return (DEDUP_TECH_FAILURE);
364         }
365         if (dedup.head.flags & HAMMER_IOC_DEDUP_CMP_FAILURE) {
366                 return (DEDUP_CMP_FAILURE);
367         }
368         if (dedup.head.flags & HAMMER_IOC_DEDUP_INVALID_ZONE) {
369                 return (DEDUP_INVALID_ZONE);
370         }
371         if (dedup.head.flags & HAMMER_IOC_DEDUP_UNDERFLOW) {
372                 return (DEDUP_UNDERFLOW);
373         }
374         RunningIoctl = 0;
375
376         return (0);
377 }
378
379 static int
380 process_btree_elm(hammer_btree_leaf_elm_t scan_leaf, int flags)
381 {
382         struct dedup_entry *de;
383         struct sha_dedup_entry *sha_de, temp;
384         struct pass2_dedup_entry *pass2_de;
385         int error;
386
387         de = RB_LOOKUP(dedup_entry_rb_tree, &dedup_tree, scan_leaf->data_crc);
388         if (de == NULL) {
389                 /*
390                  * Insert new entry into CRC tree
391                  */
392                 de = calloc(sizeof(*de), 1);
393                 de->leaf = *scan_leaf;
394                 RB_INSERT(dedup_entry_rb_tree, &dedup_tree, de);
395                 goto upgrade_stats;
396         }
397
398         /*
399          * Found entry in CRC tree
400          */
401         if (de->flags & HAMMER_DEDUP_ENTRY_FICTITIOUS) {
402                 /*
403                  * Entry in CRC tree is fictious, so we already had problems
404                  * with this CRC. Upgrade (compute SHA) the candidate and
405                  * dive into SHA subtree. If upgrade fails insert the candidate
406                  * into Pass2 list (it will be processed later).
407                  */
408                 if (upgrade_chksum(scan_leaf, temp.sha_hash))
409                         goto pass2_insert;
410
411                 sha_de = RB_FIND(sha_dedup_entry_rb_tree, &de->u.fict_root, &temp);
412                 if (sha_de == NULL) {
413                         /*
414                          * Nothing in SHA subtree so far, so this is a new
415                          * 'dataset'. Insert new entry into SHA subtree.
416                          */
417                         sha_de = calloc(sizeof(*sha_de), 1);
418                         sha_de->leaf = *scan_leaf;
419                         memcpy(sha_de->sha_hash, temp.sha_hash, SHA256_DIGEST_LENGTH);
420                         RB_INSERT(sha_dedup_entry_rb_tree, &de->u.fict_root, sha_de);
421                         goto upgrade_stats_sha;
422                 }
423
424                 /*
425                  * Found entry in SHA subtree, it means we have a potential
426                  * dedup pair. Validate it (zones have to match and data_len
427                  * field have to be the same too. If validation fails, treat
428                  * it as a SHA collision (jump to sha256_failure).
429                  */
430                 if (validate_dedup_pair(&sha_de->leaf, scan_leaf))
431                         goto sha256_failure;
432
433                 /*
434                  * We have a valid dedup pair (SHA match, validated).
435                  *
436                  * In case of technical failure (dedup pair was good, but
437                  * ioctl failed anyways) insert the candidate into Pass2 list
438                  * (we will try to dedup it after we are done with the rest of
439                  * the tree).
440                  *
441                  * If ioctl fails because either of blocks is in the non-dedup
442                  * zone (we can dedup only in LARGE_DATA and SMALL_DATA) don't
443                  * bother with the candidate and terminate early.
444                  *
445                  * If ioctl fails because of bigblock underflow replace the
446                  * leaf node that found dedup entry represents with scan_leaf.
447                  */
448                 error = deduplicate(&sha_de->leaf, scan_leaf);
449                 switch(error) {
450                 case DEDUP_TECH_FAILURE:
451                         goto pass2_insert;
452                 case DEDUP_CMP_FAILURE:
453                         goto sha256_failure;
454                 case DEDUP_INVALID_ZONE:
455                         goto terminate_early;
456                 case DEDUP_UNDERFLOW:
457                         ++dedup_underflows;
458                         sha_de->leaf = *scan_leaf;
459                         memcpy(sha_de->sha_hash, temp.sha_hash, SHA256_DIGEST_LENGTH);
460                         goto upgrade_stats_sha;
461                 default:
462                         goto upgrade_stats_sha;
463                 }
464
465                 /*
466                  * Ooh la la.. SHA-256 collision. Terminate early, there's
467                  * nothing we can do here.
468                  */
469 sha256_failure:
470                 ++dedup_sha_failures;
471                 goto terminate_early;
472         } else {
473                 /*
474                  * Candidate CRC is good for now (we found an entry in CRC
475                  * tree and it's not fictious). This means we have a
476                  * potential dedup pair.
477                  */
478                 if (validate_dedup_pair(&de->leaf, scan_leaf))
479                         goto crc_failure;
480
481                 /*
482                  * We have a valid dedup pair (CRC match, validated)
483                  */
484                 error = deduplicate(&de->leaf, scan_leaf);
485                 switch(error) {
486                 case DEDUP_TECH_FAILURE:
487                         goto pass2_insert;
488                 case DEDUP_CMP_FAILURE:
489                         goto crc_failure;
490                 case DEDUP_INVALID_ZONE:
491                         goto terminate_early;
492                 case DEDUP_UNDERFLOW:
493                         ++dedup_underflows;
494                         de->leaf = *scan_leaf;
495                         goto upgrade_stats;
496                 default:
497                         goto upgrade_stats;
498                 }
499
500 crc_failure:
501                 /*
502                  * We got a CRC collision - either ioctl failed because of
503                  * the comparison failure or validation of the potential
504                  * dedup pair went bad. In all cases insert both blocks
505                  * into SHA subtree (this requires checksum upgrade) and mark
506                  * entry that corresponds to this CRC in the CRC tree
507                  * fictious, so that all futher operations with this CRC go
508                  * through SHA subtree.
509                  */
510                 ++dedup_crc_failures;
511                 /*
512                  * Insert block that was represented by now fictious dedup
513                  * entry (create a new SHA entry and preserve stats of the
514                  * old CRC one). If checksum upgrade fails insert the
515                  * candidate into Pass2 list and return - keep both trees
516                  * unmodified.
517                  */
518                 sha_de = calloc(sizeof(*sha_de), 1);
519                 sha_de->leaf = de->leaf;
520                 sha_de->ref_blks = de->u.de.ref_blks;
521                 sha_de->ref_size = de->u.de.ref_size;
522                 if (upgrade_chksum(&sha_de->leaf, sha_de->sha_hash)) {
523                         free(sha_de);
524                         goto pass2_insert;
525                 }
526
527                 RB_INIT(&de->u.fict_root);
528                 /*
529                  * Here we can insert without prior checking because the tree
530                  * is empty at this point
531                  */
532                 RB_INSERT(sha_dedup_entry_rb_tree, &de->u.fict_root, sha_de);
533
534                 /*
535                  * Mark entry in CRC tree fictious
536                  */
537                 de->flags |= HAMMER_DEDUP_ENTRY_FICTITIOUS;
538
539                 /*
540                  * Upgrade checksum of the candidate and insert it into
541                  * SHA subtree. If upgrade fails insert the candidate into
542                  * Pass2 list.
543                  */
544                 if (upgrade_chksum(scan_leaf, temp.sha_hash)) {
545                         goto pass2_insert;
546                 }
547                 sha_de = RB_FIND(sha_dedup_entry_rb_tree, &de->u.fict_root, &temp);
548                 if (sha_de != NULL)
549                         /* There is an entry with this SHA already, but the only
550                          * RB-tree element at this point is that entry we just
551                          * added. We know for sure these blocks are different
552                          * (this is crc_failure branch) so treat it as SHA
553                          * collision.
554                          */
555                         goto sha256_failure;
556
557                 sha_de = calloc(sizeof(*sha_de), 1);
558                 sha_de->leaf = *scan_leaf;
559                 memcpy(sha_de->sha_hash, temp.sha_hash, SHA256_DIGEST_LENGTH);
560                 RB_INSERT(sha_dedup_entry_rb_tree, &de->u.fict_root, sha_de);
561                 goto upgrade_stats_sha;
562         }
563
564 upgrade_stats:
565         de->u.de.ref_blks += 1;
566         de->u.de.ref_size += scan_leaf->data_len;
567         return (1);
568
569 upgrade_stats_sha:
570         sha_de->ref_blks += 1;
571         sha_de->ref_size += scan_leaf->data_len;
572         return (1);
573
574 pass2_insert:
575         /*
576          * If in pass2 mode don't insert anything, fall through to
577          * terminate_early
578          */
579         if ((flags & DEDUP_PASS2) == 0) {
580                 pass2_de = calloc(sizeof(*pass2_de), 1);
581                 pass2_de->leaf = *scan_leaf;
582                 STAILQ_INSERT_TAIL(&pass2_dedup_queue, pass2_de, sq_entry);
583                 dedup_skipped_size += scan_leaf->data_len;
584                 return (1);
585         }
586
587 terminate_early:
588         /*
589          * Early termination path. Fixup stats.
590          */
591         dedup_alloc_size += scan_leaf->data_len;
592         dedup_ref_size += scan_leaf->data_len;
593         return (0);
594 }
595
596 static int
597 upgrade_chksum(hammer_btree_leaf_elm_t leaf, u_int8_t *sha_hash)
598 {
599         struct hammer_ioc_data data;
600         char *buf = malloc(DEDUP_BUF);
601         SHA256_CTX ctx;
602         int error;
603
604         bzero(&data, sizeof(data));
605         data.elm = leaf->base;
606         data.ubuf = buf;
607         data.size = DEDUP_BUF;
608
609         error = 0;
610         if (ioctl(glob_fd, HAMMERIOC_GET_DATA, &data) < 0) {
611                 fprintf(stderr, "Get-data failed: %s\n", strerror(errno));
612                 error = 1;
613                 goto done;
614         }
615
616         if (data.leaf.data_len != leaf->data_len) {
617                 error = 1;
618                 goto done;
619         }
620
621         if (data.leaf.base.btype == HAMMER_BTREE_TYPE_RECORD &&
622             data.leaf.base.rec_type == HAMMER_RECTYPE_DATA) {
623                 SHA256_Init(&ctx);
624                 SHA256_Update(&ctx, (void *)buf, data.leaf.data_len);
625                 SHA256_Final(sha_hash, &ctx);
626         }
627
628 done:
629         free(buf);
630         return (error);
631 }
632
633 static void
634 scan_pfs(char *filesystem, scan_pfs_cb_t func)
635 {
636         struct hammer_ioc_mirror_rw mirror;
637         hammer_ioc_mrecord_any_t mrec;
638         struct hammer_btree_leaf_elm elm;
639         char *buf = malloc(DEDUP_BUF);
640         int offset, bytes;
641
642         bzero(&mirror, sizeof(mirror));
643         hammer_key_beg_init(&mirror.key_beg);
644         hammer_key_end_init(&mirror.key_end);
645
646         mirror.tid_beg = glob_pfs.ondisk->sync_beg_tid;
647         mirror.tid_end = glob_pfs.ondisk->sync_end_tid;
648         mirror.head.flags |= HAMMER_IOC_MIRROR_NODATA; /* we want only keys */
649         mirror.ubuf = buf;
650         mirror.size = DEDUP_BUF;
651         mirror.pfs_id = glob_pfs.pfs_id;
652         mirror.shared_uuid = glob_pfs.ondisk->shared_uuid;
653
654         printf("%s: objspace %016jx:%04x %016jx:%04x pfs_id %d\n",
655                 filesystem,
656                 (uintmax_t)mirror.key_beg.obj_id,
657                 mirror.key_beg.localization,
658                 (uintmax_t)mirror.key_end.obj_id,
659                 mirror.key_end.localization,
660                 glob_pfs.pfs_id);
661         fflush(stdout);
662
663         do {
664                 mirror.count = 0;
665                 mirror.pfs_id = glob_pfs.pfs_id;
666                 mirror.shared_uuid = glob_pfs.ondisk->shared_uuid;
667                 if (ioctl(glob_fd, HAMMERIOC_MIRROR_READ, &mirror) < 0) {
668                         fprintf(stderr, "Mirror-read %s failed: %s\n",
669                                 filesystem, strerror(errno));
670                         exit(1);
671                 }
672                 if (mirror.head.flags & HAMMER_IOC_HEAD_ERROR) {
673                         fprintf(stderr, "Mirror-read %s fatal error %d\n",
674                                 filesystem, mirror.head.error);
675                         exit(1);
676                 }
677                 if (mirror.count) {
678                         offset = 0;
679                         while (offset < mirror.count) {
680                                 mrec = (void *)((char *)buf + offset);
681                                 bytes = HAMMER_HEAD_DOALIGN(mrec->head.rec_size);
682                                 if (offset + bytes > mirror.count) {
683                                         fprintf(stderr, "Misaligned record\n");
684                                         exit(1);
685                                 }
686                                 assert((mrec->head.type &
687                                     HAMMER_MRECF_TYPE_MASK) == HAMMER_MREC_TYPE_REC);
688
689                                 elm = mrec->rec.leaf;
690                                 if (elm.base.btype == HAMMER_BTREE_TYPE_RECORD &&
691                                     elm.base.rec_type == HAMMER_RECTYPE_DATA) {
692                                         func(&elm, 0);
693                                 }
694                                 offset += bytes;
695                         }
696                 }
697                 mirror.key_beg = mirror.key_cur;
698         } while (mirror.count != 0);
699
700         free(buf);
701 }
702
703 static void
704 dump_simulated_dedup(void)
705 {
706         struct sim_dedup_entry *sim_de;
707
708         printf("=== Dumping simulated dedup entries:\n");
709         RB_FOREACH(sim_de, sim_dedup_entry_rb_tree, &sim_dedup_tree) {
710                 printf("\tcrc=%08x cnt=%llu size=%llu\n", sim_de->crc,
711                     sim_de->ref_blks, sim_de->ref_size);
712         }
713         printf("end of dump ===\n");
714 }
715
716 static void
717 dump_real_dedup(void)
718 {
719         struct dedup_entry *de;
720         struct sha_dedup_entry *sha_de;
721         int i;
722
723         printf("=== Dumping dedup entries:\n");
724         RB_FOREACH(de, dedup_entry_rb_tree, &dedup_tree) {
725                 if (de->flags & HAMMER_DEDUP_ENTRY_FICTITIOUS) {
726                         printf("\tcrc=%08x fictious\n", de->leaf.data_crc);
727
728                         RB_FOREACH(sha_de, sha_dedup_entry_rb_tree, &de->u.fict_root) {
729                                 printf("\t\tcrc=%08x cnt=%llu size=%llu\n\t\t\tsha=",
730                                         sha_de->leaf.data_crc,
731                                         sha_de->ref_blks,
732                                         sha_de->ref_size);
733                                 for (i = 0; i < SHA256_DIGEST_LENGTH; ++i)
734                                         printf("%02x", sha_de->sha_hash[i]);
735                                 printf("\n");
736                         }
737                 } else {
738                         printf("\tcrc=%08x cnt=%llu size=%llu\n",
739                                 de->leaf.data_crc,
740                                 de->u.de.ref_blks,
741                                 de->u.de.ref_size);
742                 }
743         }
744         printf("end of dump ===\n");
745 }
746
747 static void
748 dedup_usage(int code)
749 {
750         fprintf(stderr,
751                 "hammer dedup-simulate <filesystem>\n"
752                 "hammer dedup <filesystem>\n"
753         );
754         exit(code);
755 }