sbin/*hammer: Use consistent static/inline/returntype format for functions
[dragonfly.git] / sbin / hammer / cmd_dedup.c
1 /*
2  * Copyright (c) 2010 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Ilya Dryomov <idryomov@gmail.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34
35 #include <libutil.h>
36 #include <crypto/sha2/sha2.h>
37
38 #include "hammer.h"
39
40 #define DEDUP_BUF (64 * 1024)
41
42 /* Sorted list of block CRCs - light version for dedup-simulate */
43 struct sim_dedup_entry_rb_tree;
44 RB_HEAD(sim_dedup_entry_rb_tree, sim_dedup_entry) sim_dedup_tree =
45                                         RB_INITIALIZER(&sim_dedup_tree);
46 RB_PROTOTYPE2(sim_dedup_entry_rb_tree, sim_dedup_entry, rb_entry,
47                 rb_sim_dedup_entry_compare, hammer_crc_t);
48
49 struct sim_dedup_entry {
50         hammer_crc_t    crc;
51         uint64_t        ref_blks; /* number of blocks referenced */
52         uint64_t        ref_size; /* size of data referenced */
53         RB_ENTRY(sim_dedup_entry) rb_entry;
54 };
55
56 struct dedup_entry {
57         struct hammer_btree_leaf_elm leaf;
58         union {
59                 struct {
60                         uint64_t ref_blks;
61                         uint64_t ref_size;
62                 } de;
63                 RB_HEAD(sha_dedup_entry_rb_tree, sha_dedup_entry) fict_root;
64         } u;
65         uint8_t flags;
66         RB_ENTRY(dedup_entry) rb_entry;
67 };
68
69 #define HAMMER_DEDUP_ENTRY_FICTITIOUS   0x0001
70
71 struct sha_dedup_entry {
72         struct hammer_btree_leaf_elm    leaf;
73         uint64_t                        ref_blks;
74         uint64_t                        ref_size;
75         uint8_t                         sha_hash[SHA256_DIGEST_LENGTH];
76         RB_ENTRY(sha_dedup_entry)       fict_entry;
77 };
78
79 /* Sorted list of HAMMER B-Tree keys */
80 struct dedup_entry_rb_tree;
81 struct sha_dedup_entry_rb_tree;
82
83 RB_HEAD(dedup_entry_rb_tree, dedup_entry) dedup_tree =
84                                         RB_INITIALIZER(&dedup_tree);
85 RB_PROTOTYPE2(dedup_entry_rb_tree, dedup_entry, rb_entry,
86                 rb_dedup_entry_compare, hammer_crc_t);
87
88 RB_PROTOTYPE(sha_dedup_entry_rb_tree, sha_dedup_entry, fict_entry,
89                 rb_sha_dedup_entry_compare);
90
91 /*
92  * Pass2 list - contains entries that were not dedup'ed because ioctl failed
93  */
94 STAILQ_HEAD(, pass2_dedup_entry) pass2_dedup_queue =
95                                 STAILQ_HEAD_INITIALIZER(pass2_dedup_queue);
96
97 struct pass2_dedup_entry {
98         struct hammer_btree_leaf_elm    leaf;
99         STAILQ_ENTRY(pass2_dedup_entry) sq_entry;
100 };
101
102 #define DEDUP_PASS2     0x0001 /* process_btree_elm() mode */
103
104 static int SigInfoFlag;
105 static int SigAlrmFlag;
106 static int64_t DedupDataReads;
107 static int64_t DedupCurrentRecords;
108 static int64_t DedupTotalRecords;
109 static uint32_t DedupCrcStart;
110 static uint32_t DedupCrcEnd;
111 static uint64_t MemoryUse;
112
113 /* PFS global ids - we deal with just one PFS at a run */
114 static int glob_fd;
115 static struct hammer_ioc_pseudofs_rw glob_pfs;
116
117 /*
118  * Global accounting variables
119  *
120  * Last three don't have to be 64-bit, just to be safe..
121  */
122 static uint64_t dedup_alloc_size;
123 static uint64_t dedup_ref_size;
124 static uint64_t dedup_skipped_size;
125 static uint64_t dedup_crc_failures;
126 static uint64_t dedup_sha_failures;
127 static uint64_t dedup_underflows;
128 static uint64_t dedup_successes_count;
129 static uint64_t dedup_successes_bytes;
130
131 static int rb_sim_dedup_entry_compare(struct sim_dedup_entry *sim_de1,
132                                 struct sim_dedup_entry *sim_de2);
133 static int rb_dedup_entry_compare(struct dedup_entry *de1,
134                                 struct dedup_entry *de2);
135 static int rb_sha_dedup_entry_compare(struct sha_dedup_entry *sha_de1,
136                                 struct sha_dedup_entry *sha_de2);
137 typedef int (*scan_pfs_cb_t)(hammer_btree_leaf_elm_t scan_leaf, int flags);
138 static void scan_pfs(char *filesystem, scan_pfs_cb_t func, const char *id);
139 static int collect_btree_elm(hammer_btree_leaf_elm_t scan_leaf, int flags);
140 static int count_btree_elm(hammer_btree_leaf_elm_t scan_leaf, int flags);
141 static int process_btree_elm(hammer_btree_leaf_elm_t scan_leaf, int flags);
142 static int upgrade_chksum(hammer_btree_leaf_elm_t leaf, uint8_t *sha_hash);
143 static void dump_simulated_dedup(void);
144 static void dump_real_dedup(void);
145 static void dedup_usage(int code);
146
147 RB_GENERATE2(sim_dedup_entry_rb_tree, sim_dedup_entry, rb_entry,
148                 rb_sim_dedup_entry_compare, hammer_crc_t, crc);
149 RB_GENERATE2(dedup_entry_rb_tree, dedup_entry, rb_entry,
150                 rb_dedup_entry_compare, hammer_crc_t, leaf.data_crc);
151 RB_GENERATE(sha_dedup_entry_rb_tree, sha_dedup_entry, fict_entry,
152                 rb_sha_dedup_entry_compare);
153
154 static
155 int
156 rb_sim_dedup_entry_compare(struct sim_dedup_entry *sim_de1,
157                         struct sim_dedup_entry *sim_de2)
158 {
159         if (sim_de1->crc < sim_de2->crc)
160                 return (-1);
161         if (sim_de1->crc > sim_de2->crc)
162                 return (1);
163
164         return (0);
165 }
166
167 static
168 int
169 rb_dedup_entry_compare(struct dedup_entry *de1, struct dedup_entry *de2)
170 {
171         if (de1->leaf.data_crc < de2->leaf.data_crc)
172                 return (-1);
173         if (de1->leaf.data_crc > de2->leaf.data_crc)
174                 return (1);
175
176         return (0);
177 }
178
179 static
180 int
181 rb_sha_dedup_entry_compare(struct sha_dedup_entry *sha_de1,
182                         struct sha_dedup_entry *sha_de2)
183 {
184         unsigned long *h1 = (unsigned long *)&sha_de1->sha_hash;
185         unsigned long *h2 = (unsigned long *)&sha_de2->sha_hash;
186         int i;
187
188         for (i = 0; i < SHA256_DIGEST_LENGTH / (int)sizeof(unsigned long); ++i) {
189                 if (h1[i] < h2[i])
190                         return (-1);
191                 if (h1[i] > h2[i])
192                         return (1);
193         }
194
195         return (0);
196 }
197
198 /*
199  * dedup-simulate <filesystem>
200  */
201 void
202 hammer_cmd_dedup_simulate(char **av, int ac)
203 {
204         struct sim_dedup_entry *sim_de;
205
206         if (ac != 1) {
207                 dedup_usage(1);
208                 /* not reached */
209         }
210
211         glob_fd = getpfs(&glob_pfs, av[0]);
212
213         /*
214          * Collection passes (memory limited)
215          */
216         printf("Dedup-simulate running\n");
217         do {
218                 DedupCrcStart = DedupCrcEnd;
219                 DedupCrcEnd = 0;
220                 MemoryUse = 0;
221
222                 if (VerboseOpt) {
223                         printf("B-Tree pass  crc-range %08x-max\n",
224                                 DedupCrcStart);
225                         fflush(stdout);
226                 }
227                 scan_pfs(av[0], collect_btree_elm, "simu-pass");
228
229                 if (VerboseOpt >= 2)
230                         dump_simulated_dedup();
231
232                 /*
233                  * Calculate simulated dedup ratio and get rid of the tree
234                  */
235                 while ((sim_de = RB_ROOT(&sim_dedup_tree)) != NULL) {
236                         assert(sim_de->ref_blks != 0);
237                         dedup_ref_size += sim_de->ref_size;
238                         dedup_alloc_size += sim_de->ref_size / sim_de->ref_blks;
239
240                         RB_REMOVE(sim_dedup_entry_rb_tree, &sim_dedup_tree, sim_de);
241                         free(sim_de);
242                 }
243                 if (DedupCrcEnd && VerboseOpt == 0)
244                         printf(".");
245         } while (DedupCrcEnd);
246
247         printf("Dedup-simulate %s succeeded\n", av[0]);
248         relpfs(glob_fd, &glob_pfs);
249
250         printf("Simulated dedup ratio = %.2f\n",
251             (dedup_alloc_size != 0) ?
252                 (double)dedup_ref_size / dedup_alloc_size : 0);
253 }
254
255 /*
256  * dedup <filesystem>
257  */
258 void
259 hammer_cmd_dedup(char **av, int ac)
260 {
261         struct dedup_entry *de;
262         struct sha_dedup_entry *sha_de;
263         struct pass2_dedup_entry *pass2_de;
264         char *tmp;
265         char buf[8];
266         int needfree = 0;
267
268         if (TimeoutOpt > 0)
269                 alarm(TimeoutOpt);
270
271         if (ac != 1) {
272                 dedup_usage(1);
273                 /* not reached */
274         }
275
276         STAILQ_INIT(&pass2_dedup_queue);
277
278         glob_fd = getpfs(&glob_pfs, av[0]);
279
280         /*
281          * A cycle file is _required_ for resuming dedup after the timeout
282          * specified with -t has expired. If no -c option, then place a
283          * .dedup.cycle file either in the PFS snapshots directory or in
284          * the default snapshots directory.
285          */
286         if (!CyclePath) {
287                 if (glob_pfs.ondisk->snapshots[0] != '/')
288                         asprintf(&tmp, "%s/%s/.dedup.cycle",
289                             SNAPSHOTS_BASE, av[0]);
290                 else
291                         asprintf(&tmp, "%s/.dedup.cycle",
292                             glob_pfs.ondisk->snapshots);
293                 CyclePath = tmp;
294                 needfree = 1;
295         }
296
297         /*
298          * Pre-pass to cache the btree
299          */
300         scan_pfs(av[0], count_btree_elm, "pre-pass ");
301         DedupTotalRecords = DedupCurrentRecords;
302
303         /*
304          * Collection passes (memory limited)
305          */
306         printf("Dedup running\n");
307         do {
308                 DedupCrcStart = DedupCrcEnd;
309                 DedupCrcEnd = 0;
310                 MemoryUse = 0;
311
312                 if (VerboseOpt) {
313                         printf("B-Tree pass  crc-range %08x-max\n",
314                                 DedupCrcStart);
315                         fflush(stdout);
316                 }
317                 scan_pfs(av[0], process_btree_elm, "main-pass");
318
319                 while ((pass2_de = STAILQ_FIRST(&pass2_dedup_queue)) != NULL) {
320                         if (process_btree_elm(&pass2_de->leaf, DEDUP_PASS2))
321                                 dedup_skipped_size -= pass2_de->leaf.data_len;
322
323                         STAILQ_REMOVE_HEAD(&pass2_dedup_queue, sq_entry);
324                         free(pass2_de);
325                 }
326                 assert(STAILQ_EMPTY(&pass2_dedup_queue));
327
328                 if (VerboseOpt >= 2)
329                         dump_real_dedup();
330
331                 /*
332                  * Calculate dedup ratio and get rid of the trees
333                  */
334                 while ((de = RB_ROOT(&dedup_tree)) != NULL) {
335                         if (de->flags & HAMMER_DEDUP_ENTRY_FICTITIOUS) {
336                                 while ((sha_de = RB_ROOT(&de->u.fict_root)) != NULL) {
337                                         assert(sha_de->ref_blks != 0);
338                                         dedup_ref_size += sha_de->ref_size;
339                                         dedup_alloc_size += sha_de->ref_size / sha_de->ref_blks;
340
341                                         RB_REMOVE(sha_dedup_entry_rb_tree,
342                                                         &de->u.fict_root, sha_de);
343                                         free(sha_de);
344                                 }
345                                 assert(RB_EMPTY(&de->u.fict_root));
346                         } else {
347                                 assert(de->u.de.ref_blks != 0);
348                                 dedup_ref_size += de->u.de.ref_size;
349                                 dedup_alloc_size += de->u.de.ref_size / de->u.de.ref_blks;
350                         }
351
352                         RB_REMOVE(dedup_entry_rb_tree, &dedup_tree, de);
353                         free(de);
354                 }
355                 assert(RB_EMPTY(&dedup_tree));
356                 if (DedupCrcEnd && VerboseOpt == 0)
357                         printf(".");
358         } while (DedupCrcEnd);
359
360         printf("Dedup %s succeeded\n", av[0]);
361         relpfs(glob_fd, &glob_pfs);
362
363         humanize_unsigned(buf, sizeof(buf), dedup_ref_size, "B", 1024);
364         printf("Dedup ratio = %.2f (in this run)\n"
365                "    %8s referenced\n",
366                ((dedup_alloc_size != 0) ?
367                         (double)dedup_ref_size / dedup_alloc_size : 0),
368                buf
369         );
370         humanize_unsigned(buf, sizeof(buf), dedup_alloc_size, "B", 1024);
371         printf("    %8s allocated\n", buf);
372         humanize_unsigned(buf, sizeof(buf), dedup_skipped_size, "B", 1024);
373         printf("    %8s skipped\n", buf);
374         printf("    %8jd CRC collisions\n"
375                "    %8jd SHA collisions\n"
376                "    %8jd big-block underflows\n"
377                "    %8jd new dedup records\n"
378                "    %8jd new dedup bytes\n",
379                (intmax_t)dedup_crc_failures,
380                (intmax_t)dedup_sha_failures,
381                (intmax_t)dedup_underflows,
382                (intmax_t)dedup_successes_count,
383                (intmax_t)dedup_successes_bytes
384         );
385
386         /* Once completed remove cycle file */
387         hammer_reset_cycle();
388
389         /* We don't want to mess up with other directives */
390         if (needfree) {
391                 free(tmp);
392                 CyclePath = NULL;
393         }
394 }
395
396 static
397 int
398 count_btree_elm(hammer_btree_leaf_elm_t scan_leaf __unused, int flags __unused)
399 {
400         return(1);
401 }
402
403 static
404 int
405 collect_btree_elm(hammer_btree_leaf_elm_t scan_leaf, int flags __unused)
406 {
407         struct sim_dedup_entry *sim_de;
408
409         /*
410          * If we are using too much memory we have to clean some out, which
411          * will cause the run to use multiple passes.  Be careful of integer
412          * overflows!
413          */
414         if (MemoryUse > MemoryLimit) {
415                 DedupCrcEnd = DedupCrcStart +
416                               (uint32_t)(DedupCrcEnd - DedupCrcStart - 1) / 2;
417                 if (VerboseOpt) {
418                         printf("memory limit  crc-range %08x-%08x\n",
419                                 DedupCrcStart, DedupCrcEnd);
420                         fflush(stdout);
421                 }
422                 for (;;) {
423                         sim_de = RB_MAX(sim_dedup_entry_rb_tree,
424                                         &sim_dedup_tree);
425                         if (sim_de == NULL || sim_de->crc < DedupCrcEnd)
426                                 break;
427                         RB_REMOVE(sim_dedup_entry_rb_tree,
428                                   &sim_dedup_tree, sim_de);
429                         MemoryUse -= sizeof(*sim_de);
430                         free(sim_de);
431                 }
432         }
433
434         /*
435          * Collect statistics based on the CRC only, do not try to read
436          * any data blocks or run SHA hashes.
437          */
438         sim_de = RB_LOOKUP(sim_dedup_entry_rb_tree, &sim_dedup_tree,
439                            scan_leaf->data_crc);
440
441         if (sim_de == NULL) {
442                 sim_de = calloc(1, sizeof(*sim_de));
443                 sim_de->crc = scan_leaf->data_crc;
444                 RB_INSERT(sim_dedup_entry_rb_tree, &sim_dedup_tree, sim_de);
445                 MemoryUse += sizeof(*sim_de);
446         }
447
448         sim_de->ref_blks += 1;
449         sim_de->ref_size += scan_leaf->data_len;
450         return (1);
451 }
452
453 static __inline
454 int
455 validate_dedup_pair(hammer_btree_leaf_elm_t p, hammer_btree_leaf_elm_t q)
456 {
457         if (HAMMER_ZONE(p->data_offset) != HAMMER_ZONE(q->data_offset))
458                 return (1);
459         if (p->data_len != q->data_len)
460                 return (1);
461
462         return (0);
463 }
464
465 #define DEDUP_TECH_FAILURE      1
466 #define DEDUP_CMP_FAILURE       2
467 #define DEDUP_INVALID_ZONE      3
468 #define DEDUP_UNDERFLOW         4
469 #define DEDUP_VERS_FAILURE      5
470
471 static __inline
472 int
473 deduplicate(hammer_btree_leaf_elm_t p, hammer_btree_leaf_elm_t q)
474 {
475         struct hammer_ioc_dedup dedup;
476
477         bzero(&dedup, sizeof(dedup));
478
479         /*
480          * If data_offset fields are the same there is no need to run ioctl,
481          * candidate is already dedup'ed.
482          */
483         if (p->data_offset == q->data_offset)
484                 return (0);
485
486         dedup.elm1 = p->base;
487         dedup.elm2 = q->base;
488         RunningIoctl = 1;
489         if (ioctl(glob_fd, HAMMERIOC_DEDUP, &dedup) < 0) {
490                 if (errno == EOPNOTSUPP)
491                         return (DEDUP_VERS_FAILURE); /* must be at least version 5 */
492                 /* Technical failure - locking or w/e */
493                 return (DEDUP_TECH_FAILURE);
494         }
495         if (dedup.head.flags & HAMMER_IOC_DEDUP_CMP_FAILURE)
496                 return (DEDUP_CMP_FAILURE);
497         if (dedup.head.flags & HAMMER_IOC_DEDUP_INVALID_ZONE)
498                 return (DEDUP_INVALID_ZONE);
499         if (dedup.head.flags & HAMMER_IOC_DEDUP_UNDERFLOW)
500                 return (DEDUP_UNDERFLOW);
501         RunningIoctl = 0;
502         ++dedup_successes_count;
503         dedup_successes_bytes += p->data_len;
504         return (0);
505 }
506
507 static
508 int
509 process_btree_elm(hammer_btree_leaf_elm_t scan_leaf, int flags)
510 {
511         struct dedup_entry *de;
512         struct sha_dedup_entry *sha_de, temp;
513         struct pass2_dedup_entry *pass2_de;
514         int error;
515
516         /*
517          * If we are using too much memory we have to clean some out, which
518          * will cause the run to use multiple passes.  Be careful of integer
519          * overflows!
520          */
521         while (MemoryUse > MemoryLimit) {
522                 DedupCrcEnd = DedupCrcStart +
523                               (uint32_t)(DedupCrcEnd - DedupCrcStart - 1) / 2;
524                 if (VerboseOpt) {
525                         printf("memory limit  crc-range %08x-%08x\n",
526                                 DedupCrcStart, DedupCrcEnd);
527                         fflush(stdout);
528                 }
529
530                 for (;;) {
531                         de = RB_MAX(dedup_entry_rb_tree, &dedup_tree);
532                         if (de == NULL || de->leaf.data_crc < DedupCrcEnd)
533                                 break;
534                         if (de->flags & HAMMER_DEDUP_ENTRY_FICTITIOUS) {
535                                 while ((sha_de = RB_ROOT(&de->u.fict_root)) !=
536                                        NULL) {
537                                         RB_REMOVE(sha_dedup_entry_rb_tree,
538                                                   &de->u.fict_root, sha_de);
539                                         MemoryUse -= sizeof(*sha_de);
540                                         free(sha_de);
541                                 }
542                         }
543                         RB_REMOVE(dedup_entry_rb_tree, &dedup_tree, de);
544                         MemoryUse -= sizeof(*de);
545                         free(de);
546                 }
547         }
548
549         /*
550          * Collect statistics based on the CRC.  Colliding CRCs usually
551          * cause a SHA sub-tree to be created under the de.
552          *
553          * Trivial case if de not found.
554          */
555         de = RB_LOOKUP(dedup_entry_rb_tree, &dedup_tree, scan_leaf->data_crc);
556         if (de == NULL) {
557                 de = calloc(1, sizeof(*de));
558                 de->leaf = *scan_leaf;
559                 RB_INSERT(dedup_entry_rb_tree, &dedup_tree, de);
560                 MemoryUse += sizeof(*de);
561                 goto upgrade_stats;
562         }
563
564         /*
565          * Found entry in CRC tree
566          */
567         if (de->flags & HAMMER_DEDUP_ENTRY_FICTITIOUS) {
568                 /*
569                  * Optimize the case where a CRC failure results in multiple
570                  * SHA entries.  If we unconditionally issue a data-read a
571                  * degenerate situation where a colliding CRC's second SHA
572                  * entry contains the lion's share of the deduplication
573                  * candidates will result in excessive data block reads.
574                  *
575                  * Deal with the degenerate case by looking for a matching
576                  * data_offset/data_len in the SHA elements we already have
577                  * before reading the data block and generating a new SHA.
578                  */
579                 RB_FOREACH(sha_de, sha_dedup_entry_rb_tree, &de->u.fict_root) {
580                         if (sha_de->leaf.data_offset ==
581                                                 scan_leaf->data_offset &&
582                             sha_de->leaf.data_len == scan_leaf->data_len) {
583                                 memcpy(temp.sha_hash, sha_de->sha_hash,
584                                         SHA256_DIGEST_LENGTH);
585                                 break;
586                         }
587                 }
588
589                 /*
590                  * Entry in CRC tree is fictitious, so we already had problems
591                  * with this CRC. Upgrade (compute SHA) the candidate and
592                  * dive into SHA subtree. If upgrade fails insert the candidate
593                  * into Pass2 list (it will be processed later).
594                  */
595                 if (sha_de == NULL) {
596                         if (upgrade_chksum(scan_leaf, temp.sha_hash))
597                                 goto pass2_insert;
598
599                         sha_de = RB_FIND(sha_dedup_entry_rb_tree,
600                                          &de->u.fict_root, &temp);
601                 }
602
603                 /*
604                  * Nothing in SHA subtree so far, so this is a new
605                  * 'dataset'. Insert new entry into SHA subtree.
606                  */
607                 if (sha_de == NULL) {
608                         sha_de = calloc(1, sizeof(*sha_de));
609                         sha_de->leaf = *scan_leaf;
610                         memcpy(sha_de->sha_hash, temp.sha_hash,
611                                SHA256_DIGEST_LENGTH);
612                         RB_INSERT(sha_dedup_entry_rb_tree, &de->u.fict_root,
613                                   sha_de);
614                         MemoryUse += sizeof(*sha_de);
615                         goto upgrade_stats_sha;
616                 }
617
618                 /*
619                  * Found entry in SHA subtree, it means we have a potential
620                  * dedup pair. Validate it (zones have to match and data_len
621                  * field have to be the same too. If validation fails, treat
622                  * it as a SHA collision (jump to sha256_failure).
623                  */
624                 if (validate_dedup_pair(&sha_de->leaf, scan_leaf))
625                         goto sha256_failure;
626
627                 /*
628                  * We have a valid dedup pair (SHA match, validated).
629                  *
630                  * In case of technical failure (dedup pair was good, but
631                  * ioctl failed anyways) insert the candidate into Pass2 list
632                  * (we will try to dedup it after we are done with the rest of
633                  * the tree).
634                  *
635                  * If ioctl fails because either of blocks is in the non-dedup
636                  * zone (we can dedup only in LARGE_DATA and SMALL_DATA) don't
637                  * bother with the candidate and terminate early.
638                  *
639                  * If ioctl fails because of big-block underflow replace the
640                  * leaf node that found dedup entry represents with scan_leaf.
641                  */
642                 error = deduplicate(&sha_de->leaf, scan_leaf);
643                 switch(error) {
644                 case 0:
645                         goto upgrade_stats_sha;
646                 case DEDUP_TECH_FAILURE:
647                         goto pass2_insert;
648                 case DEDUP_CMP_FAILURE:
649                         goto sha256_failure;
650                 case DEDUP_INVALID_ZONE:
651                         goto terminate_early;
652                 case DEDUP_UNDERFLOW:
653                         ++dedup_underflows;
654                         sha_de->leaf = *scan_leaf;
655                         memcpy(sha_de->sha_hash, temp.sha_hash,
656                                 SHA256_DIGEST_LENGTH);
657                         goto upgrade_stats_sha;
658                 case DEDUP_VERS_FAILURE:
659                         errx(1, "HAMMER filesystem must be at least "
660                                 "version 5 to dedup");
661                         /* not reached */
662                 default:
663                         fprintf(stderr, "Unknown error\n");
664                         goto terminate_early;
665                 }
666
667                 /*
668                  * Ooh la la.. SHA-256 collision. Terminate early, there's
669                  * nothing we can do here.
670                  */
671 sha256_failure:
672                 ++dedup_sha_failures;
673                 goto terminate_early;
674         } else {
675                 /*
676                  * Candidate CRC is good for now (we found an entry in CRC
677                  * tree and it's not fictitious). This means we have a
678                  * potential dedup pair.
679                  */
680                 if (validate_dedup_pair(&de->leaf, scan_leaf))
681                         goto crc_failure;
682
683                 /*
684                  * We have a valid dedup pair (CRC match, validated)
685                  */
686                 error = deduplicate(&de->leaf, scan_leaf);
687                 switch(error) {
688                 case 0:
689                         goto upgrade_stats;
690                 case DEDUP_TECH_FAILURE:
691                         goto pass2_insert;
692                 case DEDUP_CMP_FAILURE:
693                         goto crc_failure;
694                 case DEDUP_INVALID_ZONE:
695                         goto terminate_early;
696                 case DEDUP_UNDERFLOW:
697                         ++dedup_underflows;
698                         de->leaf = *scan_leaf;
699                         goto upgrade_stats;
700                 case DEDUP_VERS_FAILURE:
701                         errx(1, "HAMMER filesystem must be at least "
702                                 "version 5 to dedup");
703                         /* not reached */
704                 default:
705                         fprintf(stderr, "Unknown error\n");
706                         goto terminate_early;
707                 }
708
709 crc_failure:
710                 /*
711                  * We got a CRC collision - either ioctl failed because of
712                  * the comparison failure or validation of the potential
713                  * dedup pair went bad. In all cases insert both blocks
714                  * into SHA subtree (this requires checksum upgrade) and mark
715                  * entry that corresponds to this CRC in the CRC tree
716                  * fictitious, so that all futher operations with this CRC go
717                  * through SHA subtree.
718                  */
719                 ++dedup_crc_failures;
720
721                 /*
722                  * Insert block that was represented by now fictitious dedup
723                  * entry (create a new SHA entry and preserve stats of the
724                  * old CRC one). If checksum upgrade fails insert the
725                  * candidate into Pass2 list and return - keep both trees
726                  * unmodified.
727                  */
728                 sha_de = calloc(1, sizeof(*sha_de));
729                 sha_de->leaf = de->leaf;
730                 sha_de->ref_blks = de->u.de.ref_blks;
731                 sha_de->ref_size = de->u.de.ref_size;
732                 if (upgrade_chksum(&sha_de->leaf, sha_de->sha_hash)) {
733                         free(sha_de);
734                         goto pass2_insert;
735                 }
736                 MemoryUse += sizeof(*sha_de);
737
738                 RB_INIT(&de->u.fict_root);
739                 /*
740                  * Here we can insert without prior checking because the tree
741                  * is empty at this point
742                  */
743                 RB_INSERT(sha_dedup_entry_rb_tree, &de->u.fict_root, sha_de);
744
745                 /*
746                  * Mark entry in CRC tree fictitious
747                  */
748                 de->flags |= HAMMER_DEDUP_ENTRY_FICTITIOUS;
749
750                 /*
751                  * Upgrade checksum of the candidate and insert it into
752                  * SHA subtree. If upgrade fails insert the candidate into
753                  * Pass2 list.
754                  */
755                 if (upgrade_chksum(scan_leaf, temp.sha_hash))
756                         goto pass2_insert;
757                 sha_de = RB_FIND(sha_dedup_entry_rb_tree, &de->u.fict_root,
758                                  &temp);
759                 if (sha_de != NULL)
760                         /* There is an entry with this SHA already, but the only
761                          * RB-tree element at this point is that entry we just
762                          * added. We know for sure these blocks are different
763                          * (this is crc_failure branch) so treat it as SHA
764                          * collision.
765                          */
766                         goto sha256_failure;
767
768                 sha_de = calloc(1, sizeof(*sha_de));
769                 sha_de->leaf = *scan_leaf;
770                 memcpy(sha_de->sha_hash, temp.sha_hash, SHA256_DIGEST_LENGTH);
771                 RB_INSERT(sha_dedup_entry_rb_tree, &de->u.fict_root, sha_de);
772                 MemoryUse += sizeof(*sha_de);
773                 goto upgrade_stats_sha;
774         }
775
776 upgrade_stats:
777         de->u.de.ref_blks += 1;
778         de->u.de.ref_size += scan_leaf->data_len;
779         return (1);
780
781 upgrade_stats_sha:
782         sha_de->ref_blks += 1;
783         sha_de->ref_size += scan_leaf->data_len;
784         return (1);
785
786 pass2_insert:
787         /*
788          * If in pass2 mode don't insert anything, fall through to
789          * terminate_early
790          */
791         if ((flags & DEDUP_PASS2) == 0) {
792                 pass2_de = calloc(1, sizeof(*pass2_de));
793                 pass2_de->leaf = *scan_leaf;
794                 STAILQ_INSERT_TAIL(&pass2_dedup_queue, pass2_de, sq_entry);
795                 dedup_skipped_size += scan_leaf->data_len;
796                 return (1);
797         }
798
799 terminate_early:
800         /*
801          * Early termination path. Fixup stats.
802          */
803         dedup_alloc_size += scan_leaf->data_len;
804         dedup_ref_size += scan_leaf->data_len;
805         return (0);
806 }
807
808 static
809 int
810 upgrade_chksum(hammer_btree_leaf_elm_t leaf, uint8_t *sha_hash)
811 {
812         struct hammer_ioc_data data;
813         char *buf = malloc(DEDUP_BUF);
814         SHA256_CTX ctx;
815         int error;
816
817         bzero(&data, sizeof(data));
818         data.elm = leaf->base;
819         data.ubuf = buf;
820         data.size = DEDUP_BUF;
821
822         error = 0;
823         if (ioctl(glob_fd, HAMMERIOC_GET_DATA, &data) < 0) {
824                 fprintf(stderr, "Get-data failed: %s\n", strerror(errno));
825                 error = 1;
826                 goto done;
827         }
828         DedupDataReads += leaf->data_len;
829
830         if (data.leaf.data_len != leaf->data_len) {
831                 error = 1;
832                 goto done;
833         }
834
835         if (data.leaf.base.btype == HAMMER_BTREE_TYPE_RECORD &&
836             data.leaf.base.rec_type == HAMMER_RECTYPE_DATA) {
837                 SHA256_Init(&ctx);
838                 SHA256_Update(&ctx, (void *)buf, data.leaf.data_len);
839                 SHA256_Final(sha_hash, &ctx);
840         }
841
842 done:
843         free(buf);
844         return (error);
845 }
846
847 static
848 void
849 sigAlrm(int signo __unused)
850 {
851         SigAlrmFlag = 1;
852 }
853
854 static
855 void
856 sigInfo(int signo __unused)
857 {
858         SigInfoFlag = 1;
859 }
860
861 static
862 void
863 scan_pfs(char *filesystem, scan_pfs_cb_t func, const char *id)
864 {
865         struct hammer_ioc_mirror_rw mirror;
866         hammer_ioc_mrecord_any_t mrec;
867         struct hammer_btree_leaf_elm elm;
868         char *buf = malloc(DEDUP_BUF);
869         char buf1[8];
870         char buf2[8];
871         int offset, bytes;
872
873         SigInfoFlag = 0;
874         DedupDataReads = 0;
875         DedupCurrentRecords = 0;
876         signal(SIGINFO, sigInfo);
877         signal(SIGALRM, sigAlrm);
878
879         /*
880          * Deduplication happens per element so hammer(8) is in full
881          * control of the ioctl()s to actually perform it. SIGALRM
882          * needs to be handled within hammer(8) but a checkpoint
883          * is needed for resuming. Use cycle file for that.
884          *
885          * Try to obtain the previous obj_id from the cycle file and
886          * if not available just start from the beginning.
887          */
888         bzero(&mirror, sizeof(mirror));
889         hammer_key_beg_init(&mirror.key_beg);
890         hammer_get_cycle(&mirror.key_beg, &mirror.tid_beg);
891
892         if (mirror.key_beg.obj_id != (int64_t)HAMMER_MIN_OBJID) {
893                 if (VerboseOpt)
894                         fprintf(stderr, "%s: mirror-read: Resuming at object %016jx\n",
895                             id, (uintmax_t)mirror.key_beg.obj_id);
896         }
897
898         hammer_key_end_init(&mirror.key_end);
899
900         mirror.tid_beg = glob_pfs.ondisk->sync_beg_tid;
901         mirror.tid_end = glob_pfs.ondisk->sync_end_tid;
902         mirror.head.flags |= HAMMER_IOC_MIRROR_NODATA; /* we want only keys */
903         mirror.ubuf = buf;
904         mirror.size = DEDUP_BUF;
905         mirror.pfs_id = glob_pfs.pfs_id;
906         mirror.shared_uuid = glob_pfs.ondisk->shared_uuid;
907
908         if (VerboseOpt && DedupCrcStart == 0) {
909                 printf("%s %s: objspace %016jx:%04x %016jx:%04x\n",
910                         id, filesystem,
911                         (uintmax_t)mirror.key_beg.obj_id,
912                         mirror.key_beg.localization,
913                         (uintmax_t)mirror.key_end.obj_id,
914                         mirror.key_end.localization);
915                 printf("%s %s: pfs_id %d\n",
916                         id, filesystem, glob_pfs.pfs_id);
917         }
918         fflush(stdout);
919         fflush(stderr);
920
921         do {
922                 mirror.count = 0;
923                 mirror.pfs_id = glob_pfs.pfs_id;
924                 mirror.shared_uuid = glob_pfs.ondisk->shared_uuid;
925                 if (ioctl(glob_fd, HAMMERIOC_MIRROR_READ, &mirror) < 0) {
926                         err(1, "Mirror-read %s failed", filesystem);
927                         /* not reached */
928                 }
929                 if (mirror.head.flags & HAMMER_IOC_HEAD_ERROR) {
930                         errx(1, "Mirror-read %s fatal error %d",
931                                 filesystem, mirror.head.error);
932                         /* not reached */
933                 }
934                 if (mirror.count) {
935                         offset = 0;
936                         while (offset < mirror.count) {
937                                 mrec = (void *)((char *)buf + offset);
938                                 bytes = HAMMER_HEAD_DOALIGN(mrec->head.rec_size);
939                                 if (offset + bytes > mirror.count) {
940                                         errx(1, "Misaligned record");
941                                         /* not reached */
942                                 }
943                                 assert((mrec->head.type &
944                                        HAMMER_MRECF_TYPE_MASK) ==
945                                        HAMMER_MREC_TYPE_REC);
946                                 offset += bytes;
947                                 elm = mrec->rec.leaf;
948                                 if (elm.base.btype != HAMMER_BTREE_TYPE_RECORD)
949                                         continue;
950                                 if (elm.base.rec_type != HAMMER_RECTYPE_DATA)
951                                         continue;
952                                 ++DedupCurrentRecords;
953                                 if (DedupCrcStart != DedupCrcEnd) {
954                                         if (elm.data_crc < DedupCrcStart)
955                                                 continue;
956                                         if (DedupCrcEnd &&
957                                             elm.data_crc >= DedupCrcEnd) {
958                                                 continue;
959                                         }
960                                 }
961                                 func(&elm, 0);
962                         }
963                 }
964                 mirror.key_beg = mirror.key_cur;
965                 if (DidInterrupt || SigAlrmFlag) {
966                         if (VerboseOpt)
967                                 fprintf(stderr, "%s\n",
968                                     (DidInterrupt ? "Interrupted" : "Timeout"));
969                         hammer_set_cycle(&mirror.key_cur, mirror.tid_beg);
970                         if (VerboseOpt)
971                                 fprintf(stderr, "Cyclefile %s updated for "
972                                     "continuation\n", CyclePath);
973                         exit(1);
974                 }
975                 if (SigInfoFlag) {
976                         if (DedupTotalRecords) {
977                                 humanize_unsigned(buf1, sizeof(buf1),
978                                                   DedupDataReads,
979                                                   "B", 1024);
980                                 humanize_unsigned(buf2, sizeof(buf2),
981                                                   dedup_successes_bytes,
982                                                   "B", 1024);
983                                 fprintf(stderr, "%s count %7jd/%jd "
984                                                 "(%02d.%02d%%) "
985                                                 "ioread %s newddup %s\n",
986                                         id,
987                                         (intmax_t)DedupCurrentRecords,
988                                         (intmax_t)DedupTotalRecords,
989                                         (int)(DedupCurrentRecords * 100 /
990                                                 DedupTotalRecords),
991                                         (int)(DedupCurrentRecords * 10000 /
992                                                 DedupTotalRecords % 100),
993                                         buf1, buf2);
994                         } else {
995                                 fprintf(stderr, "%s count %-7jd\n",
996                                         id,
997                                         (intmax_t)DedupCurrentRecords);
998                         }
999                         SigInfoFlag = 0;
1000                 }
1001         } while (mirror.count != 0);
1002
1003         signal(SIGINFO, SIG_IGN);
1004         signal(SIGALRM, SIG_IGN);
1005
1006         free(buf);
1007 }
1008
1009 static
1010 void
1011 dump_simulated_dedup(void)
1012 {
1013         struct sim_dedup_entry *sim_de;
1014
1015         printf("=== Dumping simulated dedup entries:\n");
1016         RB_FOREACH(sim_de, sim_dedup_entry_rb_tree, &sim_dedup_tree) {
1017                 printf("\tcrc=%08x cnt=%ju size=%ju\n",
1018                         sim_de->crc,
1019                         (intmax_t)sim_de->ref_blks,
1020                         (intmax_t)sim_de->ref_size);
1021         }
1022         printf("end of dump ===\n");
1023 }
1024
1025 static
1026 void
1027 dump_real_dedup(void)
1028 {
1029         struct dedup_entry *de;
1030         struct sha_dedup_entry *sha_de;
1031         int i;
1032
1033         printf("=== Dumping dedup entries:\n");
1034         RB_FOREACH(de, dedup_entry_rb_tree, &dedup_tree) {
1035                 if (de->flags & HAMMER_DEDUP_ENTRY_FICTITIOUS) {
1036                         printf("\tcrc=%08x fictitious\n", de->leaf.data_crc);
1037
1038                         RB_FOREACH(sha_de, sha_dedup_entry_rb_tree, &de->u.fict_root) {
1039                                 printf("\t\tcrc=%08x cnt=%ju size=%ju\n\t"
1040                                        "\t\tsha=",
1041                                        sha_de->leaf.data_crc,
1042                                        (intmax_t)sha_de->ref_blks,
1043                                        (intmax_t)sha_de->ref_size);
1044                                 for (i = 0; i < SHA256_DIGEST_LENGTH; ++i)
1045                                         printf("%02x", sha_de->sha_hash[i]);
1046                                 printf("\n");
1047                         }
1048                 } else {
1049                         printf("\tcrc=%08x cnt=%ju size=%ju\n",
1050                                de->leaf.data_crc,
1051                                (intmax_t)de->u.de.ref_blks,
1052                                (intmax_t)de->u.de.ref_size);
1053                 }
1054         }
1055         printf("end of dump ===\n");
1056 }
1057
1058 static
1059 void
1060 dedup_usage(int code)
1061 {
1062         fprintf(stderr,
1063                 "hammer dedup-simulate <filesystem>\n"
1064                 "hammer dedup <filesystem>\n"
1065         );
1066         exit(code);
1067 }