nrelease - fix/improve livecd
[dragonfly.git] / sys / vfs / hammer2 / hammer2_bulkfree.c
1 /*
2  * Copyright (c) 2013-2019 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/proc.h>
38 #include <sys/mount.h>
39 #include <vm/vm_kern.h>
40 #include <vm/vm_extern.h>
41
42 #include "hammer2.h"
43
44 /*
45  * breadth-first search
46  */
47 typedef struct hammer2_chain_save {
48         TAILQ_ENTRY(hammer2_chain_save) entry;
49         hammer2_chain_t *chain;
50 } hammer2_chain_save_t;
51
52 TAILQ_HEAD(hammer2_chain_save_list, hammer2_chain_save);
53 typedef struct hammer2_chain_save_list hammer2_chain_save_list_t;
54
55 typedef struct hammer2_bulkfree_info {
56         hammer2_dev_t           *hmp;
57         kmem_anon_desc_t        kp;
58         hammer2_off_t           sbase;          /* sub-loop iteration */
59         hammer2_off_t           sstop;
60         hammer2_bmap_data_t     *bmap;
61         int                     depth;
62         long                    count_10_00;    /* staged->free      */
63         long                    count_11_10;    /* allocated->staged */
64         long                    count_00_11;    /* (should not happen) */
65         long                    count_01_11;    /* (should not happen) */
66         long                    count_10_11;    /* staged->allocated */
67         long                    count_l0cleans;
68         long                    count_linadjusts;
69         long                    count_inodes_scanned;
70         long                    count_dirents_scanned;
71         long                    count_dedup_factor;
72         long                    count_bytes_scanned;
73         long                    count_chains_scanned;
74         long                    count_chains_reported;
75         long                    bulkfree_calls;
76         int                     bulkfree_ticks;
77         int                     list_alert;
78         hammer2_off_t           adj_free;
79         hammer2_tid_t           mtid;
80         time_t                  save_time;
81         hammer2_chain_save_list_t list;
82         long                    list_count;
83         long                    list_count_max;
84         hammer2_chain_save_t    *backout;       /* ins pt while backing out */
85         hammer2_dedup_t         *dedup;
86         int                     pri;
87 } hammer2_bulkfree_info_t;
88
89 static int h2_bulkfree_test(hammer2_bulkfree_info_t *info,
90                         hammer2_blockref_t *bref, int pri, int saved_error);
91 static uint32_t bigmask_get(hammer2_bmap_data_t *bmap);
92 static int bigmask_good(hammer2_bmap_data_t *bmap, uint32_t live_bigmask);
93
94 /*
95  * General bulk scan function with callback.  Called with a referenced
96  * but UNLOCKED parent.  The parent is returned in the same state.
97  */
98 static
99 int
100 hammer2_bulkfree_scan(hammer2_chain_t *parent,
101                   int (*func)(hammer2_bulkfree_info_t *info,
102                               hammer2_blockref_t *bref),
103                   hammer2_bulkfree_info_t *info)
104 {
105         hammer2_blockref_t bref;
106         hammer2_chain_t *chain;
107         hammer2_chain_save_t *tail;
108         hammer2_chain_save_t *save;
109         int first = 1;
110         int rup_error;
111         int error;
112         int e2;
113
114         ++info->pri;
115
116         chain = NULL;
117         rup_error = 0;
118         error = 0;
119
120         hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS |
121                                    HAMMER2_RESOLVE_SHARED);
122
123         /*
124          * End of scan if parent is a PFS
125          */
126         tail = TAILQ_FIRST(&info->list);
127
128         /*
129          * The parent was previously retrieved NODATA and thus has not
130          * tested the CRC.  Now that we have locked it normally, check
131          * for a CRC problem and skip it if we found one.  The bulk scan
132          * cannot safely traverse invalid block tables (we could end up
133          * in an endless loop or cause a panic).
134          */
135         if (parent->error & HAMMER2_ERROR_CHECK) {
136                 error = parent->error;
137                 goto done;
138         }
139
140         /*
141          * Report which PFS is being scanned
142          */
143         if (parent->bref.type == HAMMER2_BREF_TYPE_INODE &&
144             (parent->bref.flags & HAMMER2_BREF_FLAG_PFSROOT)) {
145                 kprintf("hammer2_bulkfree: Scanning %s\n",
146                         parent->data->ipdata.filename);
147         }
148
149         /*
150          * Generally loop on the contents if we have not been flagged
151          * for abort.
152          *
153          * Remember that these chains are completely isolated from
154          * the frontend, so we can release locks temporarily without
155          * imploding.
156          */
157         for (;;) {
158                 error |= hammer2_chain_scan(parent, &chain, &bref, &first,
159                                             HAMMER2_LOOKUP_NODATA |
160                                             HAMMER2_LOOKUP_SHARED);
161
162                 /*
163                  * Handle EOF or other error at current level.  This stops
164                  * the bulkfree scan.
165                  */
166                 if (error & ~HAMMER2_ERROR_CHECK)
167                         break;
168
169                 /*
170                  * Account for dirents before thre data_off test, since most
171                  * dirents do not need a data reference.
172                  */
173                 if (bref.type == HAMMER2_BREF_TYPE_DIRENT)
174                         ++info->count_dirents_scanned;
175
176                 /*
177                  * Ignore brefs without data (typically dirents)
178                  */
179                 if ((bref.data_off & ~HAMMER2_OFF_MASK_RADIX) == 0)
180                         continue;
181
182                 /*
183                  * Process bref, chain is only non-NULL if the bref
184                  * might be recursable (its possible that we sometimes get
185                  * a non-NULL chain where the bref cannot be recursed).
186                  *
187                  * If we already ran down this tree we do not have to do it
188                  * again, but we must still recover any cumulative error
189                  * recorded from the time we did.
190                  */
191                 ++info->pri;
192                 e2 = h2_bulkfree_test(info, &bref, 1, 0);
193                 if (e2) {
194                         error |= e2 & ~HAMMER2_ERROR_EOF;
195                         continue;
196                 }
197
198                 if (bref.type == HAMMER2_BREF_TYPE_INODE)
199                         ++info->count_inodes_scanned;
200
201                 error |= func(info, &bref);
202                 if (error & ~HAMMER2_ERROR_CHECK)
203                         break;
204
205                 /*
206                  * A non-null chain is always returned if it is
207                  * recursive, otherwise a non-null chain might be
208                  * returned but usually is not when not recursive.
209                  */
210                 if (chain == NULL)
211                         continue;
212
213                 info->count_bytes_scanned += chain->bytes;
214                 ++info->count_chains_scanned;
215
216                 if (info->count_chains_scanned >=
217                     info->count_chains_reported + 1000000 ||
218                     (info->count_chains_scanned < 1000000 &&
219                      info->count_chains_scanned >=
220                      info->count_chains_reported + 100000)) {
221                         kprintf(" chains %-7ld inodes %-7ld "
222                                 "dirents %-7ld bytes %5ldMB\n",
223                                 info->count_chains_scanned,
224                                 info->count_inodes_scanned,
225                                 info->count_dirents_scanned,
226                                 info->count_bytes_scanned / 1000000);
227                         info->count_chains_reported =
228                                 info->count_chains_scanned;
229                 }
230
231                 /*
232                  * Else check type and setup depth-first scan.
233                  *
234                  * Account for bytes actually read.
235                  */
236                 switch(chain->bref.type) {
237                 case HAMMER2_BREF_TYPE_INODE:
238                 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
239                 case HAMMER2_BREF_TYPE_INDIRECT:
240                 case HAMMER2_BREF_TYPE_VOLUME:
241                 case HAMMER2_BREF_TYPE_FREEMAP:
242                         ++info->depth;
243                         if (chain->error & HAMMER2_ERROR_CHECK) {
244                                 /*
245                                  * Cannot safely recurse chains with crc
246                                  * errors, even in emergency mode.
247                                  */
248                                 /* NOP */
249                         } else if (info->depth > 16 ||
250                                    info->backout ||
251                                    (info->depth > hammer2_limit_saved_depth &&
252                                    info->list_count >=
253                                     (hammer2_limit_saved_chains >> 2)))
254                         {
255                                 /*
256                                  * We must defer the recursion if it runs
257                                  * too deep or if too many saved chains are
258                                  * allocated.
259                                  *
260                                  * In the case of too many saved chains, we
261                                  * have to stop recursing ASAP to avoid an
262                                  * explosion of memory use since each radix
263                                  * level can hold 512 elements.
264                                  *
265                                  * If we had to defer at a deeper level
266                                  * backout is non-NULL.  We must backout
267                                  * completely before resuming.
268                                  */
269                                 if (info->list_count >
270                                      hammer2_limit_saved_chains &&
271                                     info->list_alert == 0)
272                                 {
273                                         kprintf("hammer2: during bulkfree, "
274                                                 "saved chains exceeded %ld "
275                                                 "at depth %d, "
276                                                 "backing off to less-efficient "
277                                                 "operation\n",
278                                                 hammer2_limit_saved_chains,
279                                                 info->depth);
280                                         info->list_alert = 1;
281                                 }
282
283                                 /*
284                                  * Must be placed at head so pfsroot scan
285                                  * can exhaust saved elements for that pfs
286                                  * first.
287                                  *
288                                  * Must be placed at head for depth-first
289                                  * recovery when too many saved chains, to
290                                  * limit number of chains saved during
291                                  * saved-chain reruns.  The worst-case excess
292                                  * is (maximum_depth * 512) saved chains above
293                                  * the threshold.
294                                  *
295                                  * The maximum_depth generally occurs in the
296                                  * inode index and can be fairly deep once
297                                  * the radix tree becomes a bit fragmented.
298                                  * nominally 100M inodes would be only 4 deep,
299                                  * plus a maximally sized file would be another
300                                  * 8 deep, but with fragmentation it can wind
301                                  * up being a lot more.
302                                  *
303                                  * However, when backing out, we have to place
304                                  * all the entries in each parent node not
305                                  * yet processed on the list too, and because
306                                  * these entries are shallower they must be
307                                  * placed after each other in order to maintain
308                                  * our depth-first processing.
309                                  */
310                                 save = kmalloc(sizeof(*save), M_HAMMER2,
311                                                M_WAITOK | M_ZERO);
312                                 save->chain = chain;
313                                 hammer2_chain_ref(chain);
314
315                                 if (info->backout) {
316                                         TAILQ_INSERT_AFTER(&info->list,
317                                                            info->backout,
318                                                            save, entry);
319                                 } else {
320                                         TAILQ_INSERT_HEAD(&info->list,
321                                                           save, entry);
322                                 }
323                                 info->backout = save;
324                                 ++info->list_count;
325                                 if (info->list_count_max < info->list_count)
326                                         info->list_count_max = info->list_count;
327
328                                 /* guess */
329                                 info->pri += 10;
330                         } else {
331                                 int savepri = info->pri;
332
333                                 hammer2_chain_unlock(chain);
334                                 hammer2_chain_unlock(parent);
335                                 info->pri = 0;
336                                 rup_error |= hammer2_bulkfree_scan(chain,
337                                                                    func, info);
338                                 info->pri += savepri;
339                                 hammer2_chain_lock(parent,
340                                                    HAMMER2_RESOLVE_ALWAYS |
341                                                    HAMMER2_RESOLVE_SHARED);
342                                 hammer2_chain_lock(chain,
343                                                    HAMMER2_RESOLVE_ALWAYS |
344                                                    HAMMER2_RESOLVE_SHARED);
345                         }
346                         --info->depth;
347                         break;
348                 case HAMMER2_BREF_TYPE_DATA:
349                         break;
350                 default:
351                         /* does not recurse */
352                         break;
353                 }
354                 if (rup_error & HAMMER2_ERROR_ABORTED)
355                         break;
356         }
357         if (chain) {
358                 hammer2_chain_unlock(chain);
359                 hammer2_chain_drop(chain);
360         }
361
362         /*
363          * If this is a PFSROOT, also re-run any defered elements
364          * added during our scan so we can report any cumulative errors
365          * for the PFS.
366          */
367         if (parent->bref.type == HAMMER2_BREF_TYPE_INODE &&
368             (parent->bref.flags & HAMMER2_BREF_FLAG_PFSROOT)) {
369                 for (;;) {
370                         int opri;
371
372                         save = TAILQ_FIRST(&info->list);
373                         if (save == tail)       /* exhaust this PFS only */
374                                 break;
375
376                         TAILQ_REMOVE(&info->list, save, entry);
377                         info->backout = NULL;
378                         --info->list_count;
379                         opri = info->pri;
380                         info->pri = 0;
381                         rup_error |= hammer2_bulkfree_scan(save->chain, func, info);
382                         hammer2_chain_drop(save->chain);
383                         kfree(save, M_HAMMER2);
384                         info->pri = opri;
385                 }
386         }
387
388         error |= rup_error;
389
390         /*
391          * Report which PFS the errors were encountered in.
392          */
393         if (parent->bref.type == HAMMER2_BREF_TYPE_INODE &&
394             (parent->bref.flags & HAMMER2_BREF_FLAG_PFSROOT) &&
395             (error & ~HAMMER2_ERROR_EOF)) {
396                 kprintf("hammer2_bulkfree: Encountered errors (%08x) "
397                         "while scanning \"%s\"\n",
398                         error, parent->data->ipdata.filename);
399         }
400
401         /*
402          * Save with higher pri now that we know what it is.
403          */
404         h2_bulkfree_test(info, &parent->bref, info->pri + 1,
405                          (error & ~HAMMER2_ERROR_EOF));
406
407 done:
408         hammer2_chain_unlock(parent);
409
410         return (error & ~HAMMER2_ERROR_EOF);
411 }
412
413 /*
414  * Bulkfree algorithm
415  *
416  * Repeat {
417  *      Chain flush (partial synchronization) XXX removed
418  *      Scan the whole topology - build in-memory freemap (mark 11)
419  *      Reconcile the in-memory freemap against the on-disk freemap.
420  *              ondisk xx -> ondisk 11 (if allocated)
421  *              ondisk 11 -> ondisk 10 (if free in-memory)
422  *              ondisk 10 -> ondisk 00 (if free in-memory) - on next pass
423  * }
424  *
425  * The topology scan may have to be performed multiple times to window
426  * freemaps which are too large to fit in kernel memory.
427  *
428  * Races are handled using a double-transition (11->10, 10->00).  The bulkfree
429  * scan snapshots the volume root's blockset and thus can run concurrent with
430  * normal operations, as long as a full flush is made between each pass to
431  * synchronize any modified chains (otherwise their blocks might be improperly
432  * freed).
433  *
434  * Temporary memory in multiples of 32KB is required to reconstruct the leaf
435  * hammer2_bmap_data blocks so they can later be compared against the live
436  * freemap.  Each 32KB represents 256 x 16KB x 256 = ~1 GB of storage.
437  * A 32MB save area thus represents around ~1 TB.  The temporary memory
438  * allocated can be specified.  If it is not sufficient multiple topology
439  * passes will be made.
440  */
441
442 /*
443  * Bulkfree callback info
444  */
445 static void hammer2_bulkfree_thread(void *arg __unused);
446 static void cbinfo_bmap_init(hammer2_bulkfree_info_t *cbinfo, size_t size);
447 static int h2_bulkfree_callback(hammer2_bulkfree_info_t *cbinfo,
448                         hammer2_blockref_t *bref);
449 static int h2_bulkfree_sync(hammer2_bulkfree_info_t *cbinfo);
450 static void h2_bulkfree_sync_adjust(hammer2_bulkfree_info_t *cbinfo,
451                         hammer2_off_t data_off, hammer2_bmap_data_t *live,
452                         hammer2_bmap_data_t *bmap, hammer2_key_t alloc_base);
453
454 void
455 hammer2_bulkfree_init(hammer2_dev_t *hmp)
456 {
457         hammer2_thr_create(&hmp->bfthr, NULL, hmp,
458                            hmp->devrepname, -1, -1,
459                            hammer2_bulkfree_thread);
460 }
461
462 void
463 hammer2_bulkfree_uninit(hammer2_dev_t *hmp)
464 {
465         hammer2_thr_delete(&hmp->bfthr);
466 }
467
468 static void
469 hammer2_bulkfree_thread(void *arg)
470 {
471         hammer2_thread_t *thr = arg;
472         hammer2_ioc_bulkfree_t bfi;
473         uint32_t flags;
474
475         for (;;) {
476                 hammer2_thr_wait_any(thr,
477                                      HAMMER2_THREAD_STOP |
478                                      HAMMER2_THREAD_FREEZE |
479                                      HAMMER2_THREAD_UNFREEZE |
480                                      HAMMER2_THREAD_REMASTER,
481                                      hz * 60);
482
483                 flags = thr->flags;
484                 cpu_ccfence();
485                 if (flags & HAMMER2_THREAD_STOP)
486                         break;
487                 if (flags & HAMMER2_THREAD_FREEZE) {
488                         hammer2_thr_signal2(thr, HAMMER2_THREAD_FROZEN,
489                                                  HAMMER2_THREAD_FREEZE);
490                         continue;
491                 }
492                 if (flags & HAMMER2_THREAD_UNFREEZE) {
493                         hammer2_thr_signal2(thr, 0,
494                                                  HAMMER2_THREAD_FROZEN |
495                                                  HAMMER2_THREAD_UNFREEZE);
496                         continue;
497                 }
498                 if (flags & HAMMER2_THREAD_FROZEN)
499                         continue;
500                 if (flags & HAMMER2_THREAD_REMASTER) {
501                         hammer2_thr_signal2(thr, 0, HAMMER2_THREAD_REMASTER);
502                         bzero(&bfi, sizeof(bfi));
503                         bfi.size = 8192 * 1024;
504                         /* hammer2_bulkfree_pass(thr->hmp, &bfi); */
505                 }
506         }
507         thr->td = NULL;
508         hammer2_thr_signal(thr, HAMMER2_THREAD_STOPPED);
509         /* structure can go invalid at this point */
510 }
511
512 int
513 hammer2_bulkfree_pass(hammer2_dev_t *hmp, hammer2_chain_t *vchain,
514                       hammer2_ioc_bulkfree_t *bfi)
515 {
516         hammer2_bulkfree_info_t cbinfo;
517         hammer2_chain_save_t *save;
518         hammer2_off_t incr;
519         size_t size;
520         int error;
521
522         /*
523          * We have to clear the live dedup cache as it might have entries
524          * that are freeable as of now.  Any new entries in the dedup cache
525          * made after this point, even if they become freeable, will have
526          * previously been fully allocated and will be protected by the
527          * 2-stage bulkfree.
528          */
529         hammer2_dedup_clear(hmp);
530
531         /*
532          * Setup for free pass using the buffer size specified by the
533          * hammer2 utility, 32K-aligned.
534          */
535         bzero(&cbinfo, sizeof(cbinfo));
536         size = (bfi->size + HAMMER2_FREEMAP_LEVELN_PSIZE - 1) &
537                ~(size_t)(HAMMER2_FREEMAP_LEVELN_PSIZE - 1);
538
539         /*
540          * Cap at 1/4 physical memory (hammer2 utility will not normally
541          * ever specify a buffer this big, but leave the option available).
542          */
543         if (size > kmem_lim_size() * 1024 * 1024 / 4) {
544                 size = kmem_lim_size() * 1024 * 1024 / 4;
545                 kprintf("hammer2: Warning: capping bulkfree buffer at %jdM\n",
546                         (intmax_t)size / (1024 * 1024));
547         }
548
549 #define HAMMER2_FREEMAP_SIZEDIV \
550         (HAMMER2_FREEMAP_LEVEL1_SIZE / HAMMER2_FREEMAP_LEVELN_PSIZE)
551
552         /*
553          * Cap at the size needed to cover the whole volume to avoid
554          * making an unnecessarily large allocation.
555          */
556         if (size > hmp->total_size / HAMMER2_FREEMAP_SIZEDIV)
557                 size = howmany(hmp->total_size, HAMMER2_FREEMAP_SIZEDIV);
558
559         /*
560          * Minimum bitmap buffer size, then align to a LEVELN_PSIZE (32K)
561          * boundary.
562          */
563         if (size < 1024 * 1024)
564                 size = 1024 * 1024;
565         size = (size + HAMMER2_FREEMAP_LEVELN_PSIZE - 1) &
566                ~(size_t)(HAMMER2_FREEMAP_LEVELN_PSIZE - 1);
567
568         cbinfo.hmp = hmp;
569         cbinfo.bmap = kmem_alloc_swapbacked(&cbinfo.kp, size, VM_SUBSYS_HAMMER);
570         cbinfo.dedup = kmalloc(sizeof(*cbinfo.dedup) * HAMMER2_DEDUP_HEUR_SIZE,
571                                M_HAMMER2, M_WAITOK | M_ZERO);
572
573         kprintf("hammer2: bulkfree buf=%jdM\n",
574                 (intmax_t)size / (1024 * 1024));
575
576         /*
577          * Normalize start point to a 1GB boundary.  We operate on a
578          * 32KB leaf bitmap boundary which represents 1GB of storage.
579          */
580         cbinfo.sbase = bfi->sbase;
581         if (cbinfo.sbase > hmp->total_size)
582                 cbinfo.sbase = hmp->total_size;
583         cbinfo.sbase &= ~HAMMER2_FREEMAP_LEVEL1_MASK;
584         TAILQ_INIT(&cbinfo.list);
585
586         cbinfo.bulkfree_ticks = ticks;
587
588         /*
589          * Loop on a full meta-data scan as many times as required to
590          * get through all available storage.
591          */
592         error = 0;
593         while (cbinfo.sbase < hmp->total_size) {
594                 /*
595                  * We have enough ram to represent (incr) bytes of storage.
596                  * Each 32KB of ram represents 1GB of storage.
597                  *
598                  * We must also clean out our de-duplication heuristic for
599                  * each (incr) bytes of storage, otherwise we wind up not
600                  * scanning meta-data for later areas of storage because
601                  * they had already been scanned in earlier areas of storage.
602                  * Since the ranging is different, we have to restart
603                  * the dedup heuristic too.
604                  */
605                 int allmedia;
606
607                 cbinfo_bmap_init(&cbinfo, size);
608                 bzero(cbinfo.dedup, sizeof(*cbinfo.dedup) *
609                                     HAMMER2_DEDUP_HEUR_SIZE);
610                 cbinfo.count_inodes_scanned = 0;
611                 cbinfo.count_dirents_scanned = 0;
612                 cbinfo.count_bytes_scanned = 0;
613                 cbinfo.count_chains_scanned = 0;
614                 cbinfo.count_chains_reported = 0;
615
616                 incr = size / HAMMER2_FREEMAP_LEVELN_PSIZE *
617                        HAMMER2_FREEMAP_LEVEL1_SIZE;
618                 if (hmp->total_size - cbinfo.sbase <= incr) {
619                         cbinfo.sstop = hmp->total_size;
620                         allmedia = 1;
621                 } else {
622                         cbinfo.sstop = cbinfo.sbase + incr;
623                         allmedia = 0;
624                 }
625                 kprintf("hammer2: pass %016jx-%016jx ",
626                         (intmax_t)cbinfo.sbase,
627                         (intmax_t)cbinfo.sstop);
628                 if (allmedia && cbinfo.sbase == 0)
629                         kprintf("(all media)\n");
630                 else if (allmedia)
631                         kprintf("(remaining media)\n");
632                 else
633                         kprintf("(%jdGB of media)\n",
634                                 (intmax_t)incr / (1024L*1024*1024));
635
636                 /*
637                  * Scan topology for stuff inside this range.
638                  *
639                  * NOTE - By not using a transaction the operation can
640                  *        run concurrent with the frontend as well as
641                  *        with flushes.
642                  *
643                  *        We cannot safely set a mtid without a transaction,
644                  *        and in fact we don't want to set one anyway.  We
645                  *        want the bulkfree to be passive and no interfere
646                  *        with crash recovery.
647                  */
648 #undef HAMMER2_BULKFREE_TRANS   /* undef - don't use transaction */
649 #ifdef HAMMER2_BULKFREE_TRANS
650                 hammer2_trans_init(hmp->spmp, 0);
651                 cbinfo.mtid = hammer2_trans_sub(hmp->spmp);
652 #else
653                 cbinfo.mtid = 0;
654 #endif
655                 cbinfo.pri = 0;
656                 error |= hammer2_bulkfree_scan(vchain,
657                                                h2_bulkfree_callback, &cbinfo);
658
659                 while ((save = TAILQ_FIRST(&cbinfo.list)) != NULL &&
660                        (error & ~HAMMER2_ERROR_CHECK) == 0) {
661                         TAILQ_REMOVE(&cbinfo.list, save, entry);
662                         --cbinfo.list_count;
663                         cbinfo.pri = 0;
664                         cbinfo.backout = NULL;
665                         error |= hammer2_bulkfree_scan(save->chain,
666                                                        h2_bulkfree_callback,
667                                                        &cbinfo);
668                         hammer2_chain_drop(save->chain);
669                         kfree(save, M_HAMMER2);
670                 }
671                 while (save) {
672                         TAILQ_REMOVE(&cbinfo.list, save, entry);
673                         --cbinfo.list_count;
674                         hammer2_chain_drop(save->chain);
675                         kfree(save, M_HAMMER2);
676                         save = TAILQ_FIRST(&cbinfo.list);
677                 }
678                 cbinfo.backout = NULL;
679
680                 /*
681                  * If the complete scan succeeded we can synchronize our
682                  * in-memory freemap against live storage.  If an abort
683                  * occured we cannot safely synchronize our partially
684                  * filled-out in-memory freemap.
685                  *
686                  * We still synchronize on CHECK failures.  That is, we still
687                  * want bulkfree to operate even if the filesystem has defects.
688                  */
689                 if (error & ~HAMMER2_ERROR_CHECK) {
690                         kprintf("bulkfree lastdrop %d %d error=0x%04x\n",
691                                 vchain->refs, vchain->core.chain_count, error);
692                 } else {
693                         if (error & HAMMER2_ERROR_CHECK) {
694                                 kprintf("bulkfree lastdrop %d %d "
695                                         "(with check errors)\n",
696                                         vchain->refs, vchain->core.chain_count);
697                         } else {
698                                 kprintf("bulkfree lastdrop %d %d\n",
699                                         vchain->refs, vchain->core.chain_count);
700                         }
701
702                         error = h2_bulkfree_sync(&cbinfo);
703
704                         hammer2_voldata_lock(hmp);
705                         hammer2_voldata_modify(hmp);
706                         hmp->voldata.allocator_free += cbinfo.adj_free;
707                         hammer2_voldata_unlock(hmp);
708                 }
709
710                 /*
711                  * Cleanup for next loop.
712                  */
713 #ifdef HAMMER2_BULKFREE_TRANS
714                 hammer2_trans_done(hmp->spmp, 0);
715 #endif
716                 if (error & ~HAMMER2_ERROR_CHECK)
717                         break;
718                 cbinfo.sbase = cbinfo.sstop;
719                 cbinfo.adj_free = 0;
720         }
721         kmem_free_swapbacked(&cbinfo.kp);
722         kfree(cbinfo.dedup, M_HAMMER2);
723         cbinfo.dedup = NULL;
724
725         bfi->sstop = cbinfo.sbase;
726
727         incr = bfi->sstop / (hmp->total_size / 10000);
728         if (incr > 10000)
729                 incr = 10000;
730
731         kprintf("bulkfree pass statistics (%d.%02d%% storage processed):\n",
732                 (int)incr / 100,
733                 (int)incr % 100);
734
735         if (error & ~HAMMER2_ERROR_CHECK) {
736                 kprintf("    bulkfree was aborted\n");
737         } else {
738                 if (error & HAMMER2_ERROR_CHECK) {
739                         kprintf("    WARNING: bulkfree "
740                                 "encountered CRC errors\n");
741                 }
742                 kprintf("    transition->free   %ld\n", cbinfo.count_10_00);
743                 kprintf("    transition->staged %ld\n", cbinfo.count_11_10);
744                 kprintf("    ERR(00)->allocated %ld\n", cbinfo.count_00_11);
745                 kprintf("    ERR(01)->allocated %ld\n", cbinfo.count_01_11);
746                 kprintf("    staged->allocated  %ld\n", cbinfo.count_10_11);
747                 kprintf("    ~4MB segs cleaned  %ld\n", cbinfo.count_l0cleans);
748                 kprintf("    linear adjusts     %ld\n",
749                         cbinfo.count_linadjusts);
750                 kprintf("    dedup factor       %ld\n",
751                         cbinfo.count_dedup_factor);
752                 kprintf("    max saved chains   %ld\n", cbinfo.list_count_max);
753         }
754
755         return error;
756 }
757
758 static void
759 cbinfo_bmap_init(hammer2_bulkfree_info_t *cbinfo, size_t size)
760 {
761         hammer2_bmap_data_t *bmap = cbinfo->bmap;
762         hammer2_key_t key = cbinfo->sbase;
763         hammer2_key_t lokey;
764         hammer2_key_t hikey;
765
766         lokey = (cbinfo->hmp->voldata.allocator_beg + HAMMER2_SEGMASK64) &
767                 ~HAMMER2_SEGMASK64;
768         hikey = cbinfo->hmp->total_size & ~HAMMER2_SEGMASK64;
769
770         bzero(bmap, size);
771         while (size) {
772                 bzero(bmap, sizeof(*bmap));
773                 if (lokey < H2FMBASE(key, HAMMER2_FREEMAP_LEVEL1_RADIX))
774                         lokey = H2FMBASE(key, HAMMER2_FREEMAP_LEVEL1_RADIX);
775                 if (lokey < H2FMZONEBASE(key) + HAMMER2_ZONE_SEG64)
776                         lokey = H2FMZONEBASE(key) + HAMMER2_ZONE_SEG64;
777                 if (key < lokey || key >= hikey) {
778                         memset(bmap->bitmapq, -1,
779                                sizeof(bmap->bitmapq));
780                         bmap->avail = 0;
781                         bmap->linear = HAMMER2_SEGSIZE;
782                 } else {
783                         bmap->avail = HAMMER2_FREEMAP_LEVEL0_SIZE;
784                 }
785                 size -= sizeof(*bmap);
786                 key += HAMMER2_FREEMAP_LEVEL0_SIZE;
787                 ++bmap;
788         }
789 }
790
791 static int
792 h2_bulkfree_callback(hammer2_bulkfree_info_t *cbinfo, hammer2_blockref_t *bref)
793 {
794         hammer2_bmap_data_t *bmap;
795         hammer2_off_t data_off;
796         uint16_t class;
797         size_t bytes;
798         int radix;
799
800         /*
801          * Check for signal and allow yield to userland during scan.
802          */
803         if (hammer2_signal_check(&cbinfo->save_time))
804                 return HAMMER2_ERROR_ABORTED;
805
806         /*
807          * Deal with kernel thread cpu or I/O hogging by limiting the
808          * number of chains scanned per second to hammer2_bulkfree_tps.
809          * Ignore leaf records (DIRENT and DATA), no per-record I/O is
810          * involved for those since we don't load their data.
811          */
812         if (bref->type != HAMMER2_BREF_TYPE_DATA &&
813             bref->type != HAMMER2_BREF_TYPE_DIRENT) {
814                 ++cbinfo->bulkfree_calls;
815                 if (cbinfo->bulkfree_calls > hammer2_bulkfree_tps) {
816                         int dticks = ticks - cbinfo->bulkfree_ticks;
817                         if (dticks < 0)
818                                 dticks = 0;
819                         if (dticks < hz) {
820                                 tsleep(&cbinfo->bulkfree_ticks, 0,
821                                        "h2bw", hz - dticks);
822                         }
823                         cbinfo->bulkfree_calls = 0;
824                         cbinfo->bulkfree_ticks = ticks;
825                 }
826         }
827
828         /*
829          * Calculate the data offset and determine if it is within
830          * the current freemap range being gathered.
831          */
832         data_off = bref->data_off & ~HAMMER2_OFF_MASK_RADIX;
833         if (data_off < cbinfo->sbase || data_off >= cbinfo->sstop)
834                 return 0;
835         if (data_off < cbinfo->hmp->voldata.allocator_beg)
836                 return 0;
837         if (data_off >= cbinfo->hmp->total_size)
838                 return 0;
839
840         /*
841          * Calculate the information needed to generate the in-memory
842          * freemap record.
843          *
844          * Hammer2 does not allow allocations to cross the L1 (1GB) boundary,
845          * it's a problem if it does.  (Or L0 (4MB) for that matter).
846          */
847         radix = (int)(bref->data_off & HAMMER2_OFF_MASK_RADIX);
848         KKASSERT(radix != 0);
849         bytes = (size_t)1 << radix;
850         class = (bref->type << 8) | HAMMER2_PBUFRADIX;
851
852         if (data_off + bytes > cbinfo->sstop) {
853                 kprintf("hammer2_bulkfree_scan: illegal 1GB boundary "
854                         "%016jx %016jx/%d\n",
855                         (intmax_t)bref->data_off,
856                         (intmax_t)bref->key,
857                         bref->keybits);
858                 bytes = cbinfo->sstop - data_off;       /* XXX */
859         }
860
861         /*
862          * Convert to a storage offset relative to the beginning of the
863          * storage range we are collecting.  Then lookup the level0 bmap entry.
864          */
865         data_off -= cbinfo->sbase;
866         bmap = cbinfo->bmap + (data_off >> HAMMER2_FREEMAP_LEVEL0_RADIX);
867
868         /*
869          * Convert data_off to a bmap-relative value (~4MB storage range).
870          * Adjust linear, class, and avail.
871          *
872          * Hammer2 does not allow allocations to cross the L0 (4MB) boundary,
873          */
874         data_off &= HAMMER2_FREEMAP_LEVEL0_MASK;
875         if (data_off + bytes > HAMMER2_FREEMAP_LEVEL0_SIZE) {
876                 kprintf("hammer2_bulkfree_scan: illegal 4MB boundary "
877                         "%016jx %016jx/%d\n",
878                         (intmax_t)bref->data_off,
879                         (intmax_t)bref->key,
880                         bref->keybits);
881                 bytes = HAMMER2_FREEMAP_LEVEL0_SIZE - data_off;
882         }
883
884         if (bmap->class == 0) {
885                 bmap->class = class;
886                 bmap->avail = HAMMER2_FREEMAP_LEVEL0_SIZE;
887         }
888
889         /*
890          * NOTE: bmap->class does not have to match class.  Classification
891          *       is relaxed when free space is low, so some mixing can occur.
892          */
893 #if 0
894         /*
895          * XXX removed
896          */
897         if (bmap->class != class) {
898                 kprintf("hammer2_bulkfree_scan: illegal mixed class "
899                         "%016jx %016jx/%d (%04x vs %04x)\n",
900                         (intmax_t)bref->data_off,
901                         (intmax_t)bref->key,
902                         bref->keybits,
903                         class, bmap->class);
904         }
905 #endif
906
907         /*
908          * Just record the highest byte-granular offset for now.  Do not
909          * match against allocations which are in multiples of whole blocks.
910          *
911          * Make sure that any in-block linear offset at least covers the
912          * data range.  This can cause bmap->linear to become block-aligned.
913          */
914         if (bytes & HAMMER2_FREEMAP_BLOCK_MASK) {
915                 if (bmap->linear < (int32_t)data_off + (int32_t)bytes)
916                         bmap->linear = (int32_t)data_off + (int32_t)bytes;
917         } else if (bmap->linear >= (int32_t)data_off &&
918                    bmap->linear < (int32_t)data_off + (int32_t)bytes) {
919                 bmap->linear = (int32_t)data_off + (int32_t)bytes;
920         }
921
922         /*
923          * Adjust the hammer2_bitmap_t bitmap[HAMMER2_BMAP_ELEMENTS].
924          * 64-bit entries, 2 bits per entry, to code 11.
925          *
926          * NOTE: data_off mask to 524288, shift right by 14 (radix for 16384),
927          *       and multiply shift amount by 2 for sets of 2 bits.
928          *
929          * NOTE: The allocation can be smaller than HAMMER2_FREEMAP_BLOCK_SIZE.
930          *       also, data_off may not be FREEMAP_BLOCK_SIZE aligned.
931          */
932         while (bytes > 0) {
933                 hammer2_bitmap_t bmask;
934                 int bindex;
935
936                 bindex = (int)data_off >> (HAMMER2_FREEMAP_BLOCK_RADIX +
937                                            HAMMER2_BMAP_INDEX_RADIX);
938                 bmask = (hammer2_bitmap_t)3 <<
939                         ((((int)data_off & HAMMER2_BMAP_INDEX_MASK) >>
940                          HAMMER2_FREEMAP_BLOCK_RADIX) << 1);
941
942                 /*
943                  * NOTE! The (avail) calculation is bitmap-granular.  Multiple
944                  *       sub-granular records can wind up at the same bitmap
945                  *       position.
946                  */
947                 if ((bmap->bitmapq[bindex] & bmask) == 0) {
948                         if (bytes < HAMMER2_FREEMAP_BLOCK_SIZE) {
949                                 bmap->avail -= HAMMER2_FREEMAP_BLOCK_SIZE;
950                         } else {
951                                 bmap->avail -= bytes;
952                         }
953                         bmap->bitmapq[bindex] |= bmask;
954                 }
955                 data_off += HAMMER2_FREEMAP_BLOCK_SIZE;
956                 if (bytes < HAMMER2_FREEMAP_BLOCK_SIZE)
957                         bytes = 0;
958                 else
959                         bytes -= HAMMER2_FREEMAP_BLOCK_SIZE;
960         }
961         return 0;
962 }
963
964 /*
965  * Synchronize the in-memory bitmap with the live freemap.  This is not a
966  * direct copy.  Instead the bitmaps must be compared:
967  *
968  *      In-memory       Live-freemap
969  *         00             11 -> 10      (do nothing if live modified)
970  *                        10 -> 00      (do nothing if live modified)
971  *         11             10 -> 11      handles race against live
972  *                        ** -> 11      nominally warn of corruption
973  *
974  * We must also fixup the hints in HAMMER2_BREF_TYPE_FREEMAP_LEAF.
975  */
976 static int
977 h2_bulkfree_sync(hammer2_bulkfree_info_t *cbinfo)
978 {
979         hammer2_off_t data_off;
980         hammer2_key_t key;
981         hammer2_key_t key_dummy;
982         hammer2_bmap_data_t *bmap;
983         hammer2_bmap_data_t *live;
984         hammer2_chain_t *live_parent;
985         hammer2_chain_t *live_chain;
986         int bmapindex;
987         int error;
988
989         kprintf("hammer2_bulkfree - range ");
990
991         if (cbinfo->sbase < cbinfo->hmp->voldata.allocator_beg)
992                 kprintf("%016jx-",
993                         (intmax_t)cbinfo->hmp->voldata.allocator_beg);
994         else
995                 kprintf("%016jx-",
996                         (intmax_t)cbinfo->sbase);
997
998         if (cbinfo->sstop > cbinfo->hmp->total_size)
999                 kprintf("%016jx\n",
1000                         (intmax_t)cbinfo->hmp->total_size);
1001         else
1002                 kprintf("%016jx\n",
1003                         (intmax_t)cbinfo->sstop);
1004
1005         data_off = cbinfo->sbase;
1006         bmap = cbinfo->bmap;
1007
1008         live_parent = &cbinfo->hmp->fchain;
1009         hammer2_chain_ref(live_parent);
1010         hammer2_chain_lock(live_parent, HAMMER2_RESOLVE_ALWAYS);
1011         live_chain = NULL;
1012         error = 0;
1013
1014         /*
1015          * Iterate each hammer2_bmap_data_t line (128 bytes) managing
1016          * 4MB of storage.
1017          */
1018         while (data_off < cbinfo->sstop) {
1019                 /*
1020                  * The freemap is not used below allocator_beg or beyond
1021                  * total_size.
1022                  */
1023
1024                 if (data_off < cbinfo->hmp->voldata.allocator_beg)
1025                         goto next;
1026                 if (data_off >= cbinfo->hmp->total_size)
1027                         goto next;
1028
1029                 /*
1030                  * Locate the freemap leaf on the live filesystem
1031                  */
1032                 key = (data_off & ~HAMMER2_FREEMAP_LEVEL1_MASK);
1033
1034                 if (live_chain == NULL || live_chain->bref.key != key) {
1035                         if (live_chain) {
1036                                 hammer2_chain_unlock(live_chain);
1037                                 hammer2_chain_drop(live_chain);
1038                         }
1039                         live_chain = hammer2_chain_lookup(
1040                                             &live_parent,
1041                                             &key_dummy,
1042                                             key,
1043                                             key + HAMMER2_FREEMAP_LEVEL1_MASK,
1044                                             &error,
1045                                             HAMMER2_LOOKUP_ALWAYS);
1046                         if (error) {
1047                                 kprintf("hammer2_bulkfree: freemap lookup "
1048                                         "error near %016jx, error %s\n",
1049                                         (intmax_t)data_off,
1050                                         hammer2_error_str(live_chain->error));
1051                                 break;
1052                         }
1053                 }
1054                 if (live_chain == NULL) {
1055                         /*
1056                          * XXX if we implement a full recovery mode we need
1057                          * to create/recreate missing freemap chains if our
1058                          * bmap has any allocated blocks.
1059                          */
1060                         if (bmap->class &&
1061                             bmap->avail != HAMMER2_FREEMAP_LEVEL0_SIZE) {
1062                                 kprintf("hammer2_bulkfree: cannot locate "
1063                                         "live leaf for allocated data "
1064                                         "near %016jx\n",
1065                                         (intmax_t)data_off);
1066                         }
1067                         goto next;
1068                 }
1069                 if (live_chain->error) {
1070                         kprintf("hammer2_bulkfree: unable to access freemap "
1071                                 "near %016jx, error %s\n",
1072                                 (intmax_t)data_off,
1073                                 hammer2_error_str(live_chain->error));
1074                         hammer2_chain_unlock(live_chain);
1075                         hammer2_chain_drop(live_chain);
1076                         live_chain = NULL;
1077                         goto next;
1078                 }
1079
1080                 bmapindex = (data_off & HAMMER2_FREEMAP_LEVEL1_MASK) >>
1081                             HAMMER2_FREEMAP_LEVEL0_RADIX;
1082                 live = &live_chain->data->bmdata[bmapindex];
1083
1084                 /*
1085                  * Shortcut if the bitmaps match and the live linear
1086                  * indicator is sane.  We can't do a perfect check of
1087                  * live->linear because the only real requirement is that
1088                  * if it is not block-aligned, that it not cover the space
1089                  * within its current block which overlaps one of the data
1090                  * ranges we scan.  We don't retain enough fine-grained
1091                  * data in our scan to be able to set it exactly.
1092                  *
1093                  * TODO - we could shortcut this by testing that both
1094                  * live->class and bmap->class are 0, and both avails are
1095                  * set to HAMMER2_FREEMAP_LEVEL0_SIZE (4MB).
1096                  */
1097                 if (bcmp(live->bitmapq, bmap->bitmapq,
1098                          sizeof(bmap->bitmapq)) == 0 &&
1099                     live->linear >= bmap->linear &&
1100                     (hammer2_aux_flags & 1) == 0 &&
1101                     bigmask_good(bmap, live_chain->bref.check.freemap.bigmask))
1102                 {
1103                         goto next;
1104                 }
1105                 if (hammer2_debug & 1) {
1106                         kprintf("live %016jx %04d.%04x (avail=%d) "
1107                                 "bigmask %08x->%08x\n",
1108                                 data_off, bmapindex, live->class, live->avail,
1109                                 live_chain->bref.check.freemap.bigmask,
1110                                 live_chain->bref.check.freemap.bigmask |
1111                                 bigmask_get(bmap));
1112                 }
1113
1114                 if (hammer2_chain_modify(live_chain, cbinfo->mtid, 0, 0)) {
1115                         kprintf("hammer2_bulkfree: unable to modify freemap "
1116                                 "at %016jx for data-block %016jx, error %s\n",
1117                                 live_chain->bref.data_off,
1118                                 (intmax_t)data_off,
1119                                 hammer2_error_str(live_chain->error));
1120                         hammer2_chain_unlock(live_chain);
1121                         hammer2_chain_drop(live_chain);
1122                         live_chain = NULL;
1123                         goto next;
1124                 }
1125                 live_chain->bref.check.freemap.bigmask = -1;
1126                 cbinfo->hmp->freemap_relaxed = 0;       /* reset heuristic */
1127                 live = &live_chain->data->bmdata[bmapindex];
1128
1129                 h2_bulkfree_sync_adjust(cbinfo, data_off, live, bmap,
1130                                         live_chain->bref.key +
1131                                         bmapindex *
1132                                         HAMMER2_FREEMAP_LEVEL0_SIZE);
1133 next:
1134                 data_off += HAMMER2_FREEMAP_LEVEL0_SIZE;
1135                 ++bmap;
1136         }
1137         if (live_chain) {
1138                 hammer2_chain_unlock(live_chain);
1139                 hammer2_chain_drop(live_chain);
1140         }
1141         if (live_parent) {
1142                 hammer2_chain_unlock(live_parent);
1143                 hammer2_chain_drop(live_parent);
1144         }
1145         return error;
1146 }
1147
1148 /*
1149  * Merge the bulkfree bitmap against the existing bitmap.
1150  */
1151 static
1152 void
1153 h2_bulkfree_sync_adjust(hammer2_bulkfree_info_t *cbinfo,
1154                         hammer2_off_t data_off, hammer2_bmap_data_t *live,
1155                         hammer2_bmap_data_t *bmap, hammer2_key_t alloc_base)
1156 {
1157         int bindex;
1158         int scount;
1159         hammer2_off_t tmp_off;
1160         hammer2_bitmap_t lmask;
1161         hammer2_bitmap_t mmask;
1162
1163         tmp_off = data_off;
1164
1165         for (bindex = 0; bindex < HAMMER2_BMAP_ELEMENTS; ++bindex) {
1166                 lmask = live->bitmapq[bindex];  /* live */
1167                 mmask = bmap->bitmapq[bindex];  /* snapshotted bulkfree */
1168                 if (lmask == mmask) {
1169                         tmp_off += HAMMER2_BMAP_INDEX_SIZE;
1170                         continue;
1171                 }
1172
1173                 for (scount = 0;
1174                      scount < HAMMER2_BMAP_BITS_PER_ELEMENT;
1175                      scount += 2) {
1176                         if ((mmask & 3) == 0) {
1177                                 /*
1178                                  * in-memory 00         live 11 -> 10
1179                                  *                      live 10 -> 00
1180                                  *
1181                                  * Storage might be marked allocated or
1182                                  * staged and must be remarked staged or
1183                                  * free.
1184                                  */
1185                                 switch (lmask & 3) {
1186                                 case 0: /* 00 */
1187                                         break;
1188                                 case 1: /* 01 */
1189                                         kprintf("hammer2_bulkfree: cannot "
1190                                                 "transition m=00/l=01\n");
1191                                         break;
1192                                 case 2: /* 10 -> 00 */
1193                                         live->bitmapq[bindex] &=
1194                                             ~((hammer2_bitmap_t)2 << scount);
1195                                         live->avail +=
1196                                                 HAMMER2_FREEMAP_BLOCK_SIZE;
1197                                         if (live->avail >
1198                                             HAMMER2_FREEMAP_LEVEL0_SIZE) {
1199                                                 live->avail =
1200                                                     HAMMER2_FREEMAP_LEVEL0_SIZE;
1201                                         }
1202                                         cbinfo->adj_free +=
1203                                                 HAMMER2_FREEMAP_BLOCK_SIZE;
1204                                         ++cbinfo->count_10_00;
1205                                         hammer2_io_dedup_assert(
1206                                                 cbinfo->hmp,
1207                                                 tmp_off |
1208                                                 HAMMER2_FREEMAP_BLOCK_RADIX,
1209                                                 HAMMER2_FREEMAP_BLOCK_SIZE);
1210                                         break;
1211                                 case 3: /* 11 -> 10 */
1212                                         live->bitmapq[bindex] &=
1213                                             ~((hammer2_bitmap_t)1 << scount);
1214                                         ++cbinfo->count_11_10;
1215                                         hammer2_io_dedup_delete(
1216                                                 cbinfo->hmp,
1217                                                 HAMMER2_BREF_TYPE_DATA,
1218                                                 tmp_off |
1219                                                 HAMMER2_FREEMAP_BLOCK_RADIX,
1220                                                 HAMMER2_FREEMAP_BLOCK_SIZE);
1221                                         break;
1222                                 }
1223                         } else if ((mmask & 3) == 3) {
1224                                 /*
1225                                  * in-memory 11         live 10 -> 11
1226                                  *                      live ** -> 11
1227                                  *
1228                                  * Storage might be incorrectly marked free
1229                                  * or staged and must be remarked fully
1230                                  * allocated.
1231                                  */
1232                                 switch (lmask & 3) {
1233                                 case 0: /* 00 */
1234                                         /*
1235                                          * This case is not supposed to
1236                                          * happen.  If it does, it means
1237                                          * that an allocated block was
1238                                          * thought by the filesystem to be
1239                                          * free.
1240                                          */
1241                                         kprintf("hammer2_bulkfree: "
1242                                                 "00->11 critical freemap "
1243                                                 "transition for datablock "
1244                                                 "%016jx\n",
1245                                                 tmp_off);
1246                                         ++cbinfo->count_00_11;
1247                                         cbinfo->adj_free -=
1248                                                 HAMMER2_FREEMAP_BLOCK_SIZE;
1249                                         live->avail -=
1250                                                 HAMMER2_FREEMAP_BLOCK_SIZE;
1251                                         if ((int32_t)live->avail < 0)
1252                                                 live->avail = 0;
1253                                         break;
1254                                 case 1: /* 01 */
1255                                         ++cbinfo->count_01_11;
1256                                         break;
1257                                 case 2: /* 10 -> 11 */
1258                                         ++cbinfo->count_10_11;
1259                                         break;
1260                                 case 3: /* 11 */
1261                                         break;
1262                                 }
1263                                 live->bitmapq[bindex] |=
1264                                         ((hammer2_bitmap_t)3 << scount);
1265                         }
1266                         mmask >>= 2;
1267                         lmask >>= 2;
1268                         tmp_off += HAMMER2_FREEMAP_BLOCK_SIZE;
1269                 }
1270         }
1271
1272         /*
1273          * Determine if the live bitmap is completely free and reset its
1274          * fields if so.  Otherwise check to see if we can reduce the linear
1275          * offset.
1276          */
1277         for (bindex = HAMMER2_BMAP_ELEMENTS - 1; bindex >= 0; --bindex) {
1278                 if (live->bitmapq[bindex] != 0)
1279                         break;
1280         }
1281         if (bindex < 0) {
1282                 /*
1283                  * Completely empty, reset entire segment
1284                  */
1285 #if 0
1286                 kprintf("hammer2: cleanseg %016jx.%04x (%d)\n",
1287                         alloc_base, live->class, live->avail);
1288 #endif
1289                 live->avail = HAMMER2_FREEMAP_LEVEL0_SIZE;
1290                 live->class = 0;
1291                 live->linear = 0;
1292                 ++cbinfo->count_l0cleans;
1293         } else if (bindex < 7) {
1294                 /*
1295                  * Partially full, bitmapq[bindex] != 0.  Our bulkfree pass
1296                  * does not record enough information to set live->linear
1297                  * exactly.
1298                  *
1299                  * NOTE: Setting live->linear to a sub-block (16K) boundary
1300                  *       forces the live code to iterate to the next fully
1301                  *       free block.  It does NOT mean that all blocks above
1302                  *       live->linear are available.
1303                  *
1304                  *       Setting live->linear to a fragmentary (less than
1305                  *       16K) boundary allows allocations to iterate within
1306                  *       that sub-block.
1307                  */
1308                 if (live->linear < bmap->linear &&
1309                     ((live->linear ^ bmap->linear) &
1310                      ~HAMMER2_FREEMAP_BLOCK_MASK) == 0) {
1311                         /*
1312                          * If greater than but still within the same
1313                          * sub-block as live we can adjust linear upward.
1314                          */
1315                         live->linear = bmap->linear;
1316                         ++cbinfo->count_linadjusts;
1317                 } else {
1318                         /*
1319                          * Otherwise adjust to the nearest higher or same
1320                          * sub-block boundary.  The live system may have
1321                          * bounced live->linear around so we cannot make any
1322                          * assumptions with regards to available fragmentary
1323                          * allocations.
1324                          */
1325                         live->linear =
1326                                 (bmap->linear + HAMMER2_FREEMAP_BLOCK_MASK) &
1327                                 ~HAMMER2_FREEMAP_BLOCK_MASK;
1328                         ++cbinfo->count_linadjusts;
1329                 }
1330         } else {
1331                 /*
1332                  * Completely full, effectively disable the linear iterator
1333                  */
1334                 live->linear = HAMMER2_SEGSIZE;
1335         }
1336
1337 #if 0
1338         if (bmap->class) {
1339                 kprintf("%016jx %04d.%04x (avail=%7d) "
1340                         "%08x %08x %08x %08x %08x %08x %08x %08x\n",
1341                         (intmax_t)data_off,
1342                         (int)((data_off &
1343                                HAMMER2_FREEMAP_LEVEL1_MASK) >>
1344                               HAMMER2_FREEMAP_LEVEL0_RADIX),
1345                         bmap->class,
1346                         bmap->avail,
1347                         bmap->bitmap[0], bmap->bitmap[1],
1348                         bmap->bitmap[2], bmap->bitmap[3],
1349                         bmap->bitmap[4], bmap->bitmap[5],
1350                         bmap->bitmap[6], bmap->bitmap[7]);
1351         }
1352 #endif
1353 }
1354
1355 /*
1356  * BULKFREE DEDUP HEURISTIC
1357  *
1358  * WARNING! This code is SMP safe but the heuristic allows SMP collisions.
1359  *          All fields must be loaded into locals and validated.
1360  */
1361 static
1362 int
1363 h2_bulkfree_test(hammer2_bulkfree_info_t *cbinfo, hammer2_blockref_t *bref,
1364                  int pri, int saved_error)
1365 {
1366         hammer2_dedup_t *dedup;
1367         int best;
1368         int n;
1369         int i;
1370
1371         n = hammer2_icrc32(&bref->data_off, sizeof(bref->data_off));
1372         dedup = cbinfo->dedup + (n & (HAMMER2_DEDUP_HEUR_MASK & ~7));
1373
1374         for (i = best = 0; i < 8; ++i) {
1375                 if (dedup[i].data_off == bref->data_off) {
1376                         if (dedup[i].ticks < pri)
1377                                 dedup[i].ticks = pri;
1378                         if (pri == 1)
1379                                 cbinfo->count_dedup_factor += dedup[i].ticks;
1380                         return (dedup[i].saved_error | HAMMER2_ERROR_EOF);
1381                 }
1382                 if (dedup[i].ticks < dedup[best].ticks)
1383                         best = i;
1384         }
1385         dedup[best].data_off = bref->data_off;
1386         dedup[best].ticks = pri;
1387         dedup[best].saved_error = saved_error;
1388
1389         return 0;
1390 }
1391
1392 /*
1393  * Calculate what the bigmask should be.  bigmask is permissive, so the
1394  * bits returned must be set at a minimum in the live bigmask.  Other bits
1395  * might also be set in the live bigmask.
1396  */
1397 static uint32_t
1398 bigmask_get(hammer2_bmap_data_t *bmap)
1399 {
1400         hammer2_bitmap_t mask;  /* 64-bit mask to check */
1401         hammer2_bitmap_t scan;
1402         uint32_t bigmask;
1403         uint32_t radix_mask;
1404         int iter;
1405         int i;
1406         int j;
1407
1408         bigmask = 0;
1409         for (i = 0; i < HAMMER2_BMAP_ELEMENTS; ++i) {
1410                 mask = bmap->bitmapq[i];
1411
1412                 radix_mask = 1U << HAMMER2_FREEMAP_BLOCK_RADIX;
1413                 radix_mask |= radix_mask - 1;
1414                 iter = 2;       /* each bitmap entry is 2 bits. 2, 4, 8... */
1415                 while (iter <= HAMMER2_BMAP_BITS_PER_ELEMENT) {
1416                         if (iter == HAMMER2_BMAP_BITS_PER_ELEMENT)
1417                                 scan = -1;
1418                         else
1419                                 scan = (1LU << iter) - 1;
1420                         j = 0;
1421                         while (j < HAMMER2_BMAP_BITS_PER_ELEMENT) {
1422                                 /*
1423                                  * Check if all bits are 0 (free block).
1424                                  * If so, set the bit in bigmask for the
1425                                  * allocation radix under test.
1426                                  */
1427                                 if ((scan & mask) == 0) {
1428                                         bigmask |= radix_mask;
1429                                 }
1430                                 scan <<= iter;
1431                                 j += iter;
1432                         }
1433                         iter <<= 1;
1434                         radix_mask = (radix_mask << 1) | 1;
1435                 }
1436         }
1437         return bigmask;
1438 }
1439
1440 static int
1441 bigmask_good(hammer2_bmap_data_t *bmap, uint32_t live_bigmask)
1442 {
1443         uint32_t bigmask;
1444
1445         bigmask = bigmask_get(bmap);
1446         return ((live_bigmask & bigmask) == bigmask);
1447 }