2 * Copyright (c) 2013-2015 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/fcntl.h>
40 #include <sys/namei.h>
41 #include <sys/mount.h>
42 #include <sys/vnode.h>
43 #include <sys/mountctl.h>
44 #include <vm/vm_kern.h>
45 #include <vm/vm_extern.h>
50 * breadth-first search
52 typedef struct hammer2_chain_save {
53 TAILQ_ENTRY(hammer2_chain_save) entry;
54 hammer2_chain_t *parent;
55 } hammer2_chain_save_t;
57 TAILQ_HEAD(hammer2_chain_save_list, hammer2_chain_save);
58 typedef struct hammer2_chain_save_list hammer2_chain_save_list_t;
61 * General bulk scan function with callback. Called with a referenced
62 * but UNLOCKED parent. The original parent is returned in the same state.
65 hammer2_bulk_scan(hammer2_chain_t *parent,
66 int (*func)(hammer2_chain_t *chain, void *info),
69 hammer2_chain_save_list_t list;
70 hammer2_chain_save_t *save;
74 hammer2_chain_ref(parent);
75 save = kmalloc(sizeof(*save), M_HAMMER2, M_WAITOK | M_ZERO);
76 save->parent = parent;
77 TAILQ_INSERT_TAIL(&list, save, entry);
79 while ((save = TAILQ_FIRST(&list)) != NULL && doabort == 0) {
80 hammer2_chain_t *chain;
83 TAILQ_REMOVE(&list, save, entry);
85 parent = save->parent;
91 * lock the parent, the lock eats the ref.
93 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS |
94 HAMMER2_RESOLVE_SHARED);
97 * Generally loop on the contents if we have not been flagged
100 while ((doabort & HAMMER2_BULK_ABORT) == 0) {
101 chain = hammer2_chain_scan(parent, chain, &cache_index,
102 HAMMER2_LOOKUP_NODATA |
103 HAMMER2_LOOKUP_SHARED);
106 doabort |= func(chain, info);
108 if (doabort & HAMMER2_BULK_ABORT) {
109 hammer2_chain_unlock(chain);
110 hammer2_chain_drop(chain);
114 switch(chain->bref.type) {
115 case HAMMER2_BREF_TYPE_INODE:
116 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
117 case HAMMER2_BREF_TYPE_INDIRECT:
118 case HAMMER2_BREF_TYPE_VOLUME:
119 case HAMMER2_BREF_TYPE_FREEMAP:
121 * Breadth-first scan. Chain is referenced
122 * to save for later and will be unlocked on
123 * our loop (so it isn't left locked while on
127 save = kmalloc(sizeof(*save),
131 hammer2_chain_ref(chain);
132 save->parent = chain;
133 TAILQ_INSERT_TAIL(&list, save, entry);
137 /* does not recurse */
143 * Releases the lock and the ref the lock inherited. Free
144 * save structure if we didn't recycle it above.
146 hammer2_chain_unlock(parent);
147 hammer2_chain_drop(parent);
149 kfree(save, M_HAMMER2);
153 * Cleanup anything left undone due to an abort
155 while ((save = TAILQ_FIRST(&list)) != NULL) {
156 TAILQ_REMOVE(&list, save, entry);
157 hammer2_chain_drop(save->parent);
158 kfree(save, M_HAMMER2);
165 * Bulkfree algorithm -
169 * Scan the whole topology and build the freemap
170 * ** -> 11 during scan for all elements scanned (and thus not free)
171 * 11 -> 10 after scan if allocated in-topo and free in-memory, mark 10
172 * 10 -> 00 after scan if possibly-free in-topo and free in-memory mark 00
175 * Adjustment of the freemap ->10 and ->00 cannot occur until the topology
176 * scan is complete. The scan runs concurrentlyt with normal filesystem
177 * operations and any allocation will also remark the freemap bitmap 11.
178 * We handle races by performing two scans and only changing the map to
179 * fully free (00) if both passes believe it is free.
181 * Temporary memory in multiples of 64KB is required to reconstruct leaf
182 * hammer2_bmap_data blocks so they can later be compared against the live
183 * freemap. Each 64KB block represents 128 x 16KB x 1024 = ~2 GB of storage.
184 * A 32MB save area thus represents around ~1 TB. The temporary memory
185 * allocated can be specified. If it is not sufficient multiple topology
186 * passes will be made.
190 * Bulkfree callback info
192 typedef struct hammer2_bulkfree_info {
195 hammer2_off_t sbase; /* sub-loop iteration */
197 hammer2_bmap_data_t *bmap;
202 long count_linadjusts;
203 hammer2_off_t adj_free;
206 } hammer2_bulkfree_info_t;
208 static int h2_bulkfree_callback(hammer2_chain_t *chain, void *info);
209 static void h2_bulkfree_sync(hammer2_bulkfree_info_t *cbinfo);
210 static void h2_bulkfree_sync_adjust(hammer2_bulkfree_info_t *cbinfo,
211 hammer2_bmap_data_t *live, hammer2_bmap_data_t *bmap);
214 hammer2_bulkfree_pass(hammer2_dev_t *hmp, hammer2_ioc_bulkfree_t *bfi)
216 hammer2_bulkfree_info_t cbinfo;
221 /* hammer2_vfs_sync(hmp->mp, MNT_WAIT); XXX */
223 bzero(&cbinfo, sizeof(cbinfo));
224 size = (bfi->size + HAMMER2_FREEMAP_LEVELN_PSIZE - 1) &
225 ~(size_t)(HAMMER2_FREEMAP_LEVELN_PSIZE - 1);
227 cbinfo.bmap = kmem_alloc_swapbacked(&cbinfo.kp, size);
230 * Normalize start point to a 2GB boundary. We operate on a
231 * 64KB leaf bitmap boundary which represents 2GB of storage.
233 cbinfo.sbase = bfi->sbase;
234 if (cbinfo.sbase > hmp->voldata.volu_size)
235 cbinfo.sbase = hmp->voldata.volu_size;
236 cbinfo.sbase &= ~HAMMER2_FREEMAP_LEVEL1_MASK;
239 * Loop on a full meta-data scan as many times as required to
240 * get through all available storage.
242 while (cbinfo.sbase < hmp->voldata.volu_size) {
244 * We have enough ram to represent (incr) bytes of storage.
245 * Each 64KB of ram represents 2GB of storage.
247 bzero(cbinfo.bmap, size);
248 incr = size / HAMMER2_FREEMAP_LEVELN_PSIZE *
249 HAMMER2_FREEMAP_LEVEL1_SIZE;
250 if (hmp->voldata.volu_size - cbinfo.sbase < incr)
251 cbinfo.sstop = hmp->voldata.volu_size;
253 cbinfo.sstop = cbinfo.sbase + incr;
254 if (hammer2_debug & 1)
255 kprintf("bulkfree pass %016jx/%jdGB\n",
256 (intmax_t)cbinfo.sbase,
257 (intmax_t)incr / HAMMER2_FREEMAP_LEVEL1_SIZE);
259 hammer2_trans_init(hmp->spmp, 0);
260 cbinfo.mtid = hammer2_trans_sub(hmp->spmp);
261 doabort |= hammer2_bulk_scan(&hmp->vchain,
262 h2_bulkfree_callback, &cbinfo);
265 * If complete scan succeeded we can synchronize our
266 * in-memory freemap against live storage. If an abort
267 * did occur we cannot safely synchronize our partially
268 * filled-out in-memory freemap.
271 h2_bulkfree_sync(&cbinfo);
273 hammer2_voldata_lock(hmp);
274 hammer2_voldata_modify(hmp);
275 hmp->voldata.allocator_free += cbinfo.adj_free;
276 hammer2_voldata_unlock(hmp);
280 * Cleanup for next loop.
282 hammer2_trans_done(hmp->spmp);
285 cbinfo.sbase = cbinfo.sstop;
287 kmem_free_swapbacked(&cbinfo.kp);
289 bfi->sstop = cbinfo.sbase;
291 incr = bfi->sstop / (hmp->voldata.volu_size / 10000);
295 kprintf("bulkfree pass statistics (%d.%02d%% storage processed):\n",
299 kprintf(" transition->free %ld\n", cbinfo.count_10_00);
300 kprintf(" transition->staged %ld\n", cbinfo.count_11_10);
301 kprintf(" raced on %ld\n", cbinfo.count_10_11);
302 kprintf(" ~2MB segs cleaned %ld\n", cbinfo.count_l0cleans);
303 kprintf(" linear adjusts %ld\n", cbinfo.count_linadjusts);
309 h2_bulkfree_callback(hammer2_chain_t *chain, void *info)
311 hammer2_bulkfree_info_t *cbinfo = info;
312 hammer2_bmap_data_t *bmap;
313 hammer2_off_t data_off;
320 * Check for signal and allow yield to userland during scan
322 if (hammer2_signal_check(&cbinfo->save_time))
323 return HAMMER2_BULK_ABORT;
326 kprintf("scan chain %016jx %016jx/%-2d type=%02x\n",
327 (intmax_t)chain->bref.data_off,
328 (intmax_t)chain->bref.key,
334 * Calculate the data offset and determine if it is within
335 * the current freemap range being gathered.
338 data_off = chain->bref.data_off & ~HAMMER2_OFF_MASK_RADIX;
339 if (data_off < cbinfo->sbase || data_off > cbinfo->sstop)
341 if (data_off < chain->hmp->voldata.allocator_beg)
343 if (data_off > chain->hmp->voldata.volu_size)
347 * Calculate the information needed to generate the in-memory
350 * Hammer2 does not allow allocations to cross the L1 (2GB) boundary,
351 * it's a problem if it does. (Or L0 (2MB) for that matter).
353 radix = (int)(chain->bref.data_off & HAMMER2_OFF_MASK_RADIX);
354 bytes = (size_t)1 << radix;
355 class = (chain->bref.type << 8) | hammer2_devblkradix(radix);
357 if (data_off + bytes > cbinfo->sstop) {
358 kprintf("hammer2_bulkfree_scan: illegal 2GB boundary "
359 "%016jx %016jx/%d\n",
360 (intmax_t)chain->bref.data_off,
361 (intmax_t)chain->bref.key,
362 chain->bref.keybits);
363 bytes = cbinfo->sstop - data_off; /* XXX */
367 * Convert to a storage offset relative to the beginning of the
368 * storage range we are collecting. Then lookup the level0 bmap entry.
370 data_off -= cbinfo->sbase;
371 bmap = cbinfo->bmap + (data_off >> HAMMER2_FREEMAP_LEVEL0_RADIX);
374 * Convert data_off to a bmap-relative value (~2MB storage range).
375 * Adjust linear, class, and avail.
377 * Hammer2 does not allow allocations to cross the L0 (2MB) boundary,
379 data_off &= HAMMER2_FREEMAP_LEVEL0_MASK;
380 if (data_off + bytes > HAMMER2_FREEMAP_LEVEL0_SIZE) {
381 kprintf("hammer2_bulkfree_scan: illegal 2MB boundary "
382 "%016jx %016jx/%d\n",
383 (intmax_t)chain->bref.data_off,
384 (intmax_t)chain->bref.key,
385 chain->bref.keybits);
386 bytes = HAMMER2_FREEMAP_LEVEL0_SIZE - data_off;
389 if (bmap->class == 0) {
391 bmap->avail = HAMMER2_FREEMAP_LEVEL0_SIZE;
393 if (bmap->class != class) {
394 kprintf("hammer2_bulkfree_scan: illegal mixed class "
395 "%016jx %016jx/%d (%04x vs %04x)\n",
396 (intmax_t)chain->bref.data_off,
397 (intmax_t)chain->bref.key,
401 if (bmap->linear < (int32_t)data_off + (int32_t)bytes)
402 bmap->linear = (int32_t)data_off + (int32_t)bytes;
405 * Adjust the hammer2_bitmap_t bitmap[HAMMER2_BMAP_ELEMENTS].
406 * 64-bit entries, 2 bits per entry, to code 11.
408 * NOTE: The allocation can be smaller than HAMMER2_FREEMAP_BLOCK_SIZE.
412 hammer2_bitmap_t bmask;
414 bindex = (int)data_off >> (HAMMER2_FREEMAP_BLOCK_RADIX +
415 HAMMER2_BMAP_INDEX_RADIX);
416 bmask = (hammer2_bitmap_t)3 <<
417 ((((int)data_off & HAMMER2_BMAP_INDEX_MASK) >>
418 HAMMER2_FREEMAP_BLOCK_RADIX) << 1);
421 * NOTE! The (avail) calculation is bitmap-granular. Multiple
422 * sub-granular records can wind up at the same bitmap
425 if ((bmap->bitmapq[bindex] & bmask) == 0) {
426 if (bytes < HAMMER2_FREEMAP_BLOCK_SIZE) {
427 bmap->avail -= HAMMER2_FREEMAP_BLOCK_SIZE;
429 bmap->avail -= bytes;
431 bmap->bitmapq[bindex] |= bmask;
433 data_off += HAMMER2_FREEMAP_BLOCK_SIZE;
434 if (bytes < HAMMER2_FREEMAP_BLOCK_SIZE)
437 bytes -= HAMMER2_FREEMAP_BLOCK_SIZE;
443 * Synchronize the in-memory bitmap with the live freemap. This is not a
444 * direct copy. Instead the bitmaps must be compared:
446 * In-memory Live-freemap
449 * 11 10 -> 11 handles race against live
450 * ** -> 11 nominally warn of corruption
454 h2_bulkfree_sync(hammer2_bulkfree_info_t *cbinfo)
456 hammer2_off_t data_off;
458 hammer2_key_t key_dummy;
459 hammer2_bmap_data_t *bmap;
460 hammer2_bmap_data_t *live;
461 hammer2_chain_t *live_parent;
462 hammer2_chain_t *live_chain;
463 int cache_index = -1;
466 kprintf("hammer2_bulkfree - range %016jx-%016jx\n",
467 (intmax_t)cbinfo->sbase,
468 (intmax_t)cbinfo->sstop);
470 data_off = cbinfo->sbase;
473 live_parent = &cbinfo->hmp->fchain;
474 hammer2_chain_ref(live_parent);
475 hammer2_chain_lock(live_parent, HAMMER2_RESOLVE_ALWAYS);
478 while (data_off < cbinfo->sstop) {
480 * The freemap is not used below allocator_beg or beyond
483 if (data_off < cbinfo->hmp->voldata.allocator_beg)
485 if (data_off > cbinfo->hmp->voldata.volu_size)
489 * Locate the freemap leaf on the live filesystem
491 key = (data_off & ~HAMMER2_FREEMAP_LEVEL1_MASK);
492 if (live_chain == NULL || live_chain->bref.key != key) {
494 hammer2_chain_unlock(live_chain);
495 hammer2_chain_drop(live_chain);
497 live_chain = hammer2_chain_lookup(
501 key + HAMMER2_FREEMAP_LEVEL1_MASK,
503 HAMMER2_LOOKUP_ALWAYS);
505 kprintf("live_chain %016jx\n", (intmax_t)key);
508 if (live_chain == NULL) {
510 bmap->avail != HAMMER2_FREEMAP_LEVEL0_SIZE) {
511 kprintf("hammer2_bulkfree: cannot locate "
512 "live leaf for allocated data "
518 if (live_chain->error) {
519 kprintf("hammer2_bulkfree: error %s looking up "
520 "live leaf for allocated data near %016jx\n",
521 hammer2_error_str(live_chain->error),
523 hammer2_chain_unlock(live_chain);
524 hammer2_chain_drop(live_chain);
529 bmapindex = (data_off & HAMMER2_FREEMAP_LEVEL1_MASK) >>
530 HAMMER2_FREEMAP_LEVEL0_RADIX;
531 live = &live_chain->data->bmdata[bmapindex];
534 * For now just handle the 11->10, 10->00, and 10->11
537 if (live->class == 0 ||
538 live->avail == HAMMER2_FREEMAP_LEVEL0_SIZE) {
541 if (bcmp(live->bitmapq, bmap->bitmapq,
542 sizeof(bmap->bitmapq)) == 0) {
545 if (hammer2_debug & 1)
546 kprintf("live %016jx %04d.%04x (avail=%d)\n",
547 data_off, bmapindex, live->class, live->avail);
549 hammer2_chain_modify(live_chain, cbinfo->mtid, 0);
550 h2_bulkfree_sync_adjust(cbinfo, live, bmap);
552 data_off += HAMMER2_FREEMAP_LEVEL0_SIZE;
556 hammer2_chain_unlock(live_chain);
557 hammer2_chain_drop(live_chain);
560 hammer2_chain_unlock(live_parent);
561 hammer2_chain_drop(live_parent);
567 h2_bulkfree_sync_adjust(hammer2_bulkfree_info_t *cbinfo,
568 hammer2_bmap_data_t *live, hammer2_bmap_data_t *bmap)
572 hammer2_bitmap_t lmask;
573 hammer2_bitmap_t mmask;
575 for (bindex = 0; bindex < HAMMER2_BMAP_ELEMENTS; ++bindex) {
576 lmask = live->bitmapq[bindex];
577 mmask = bmap->bitmapq[bindex];
582 scount < HAMMER2_BMAP_BITS_PER_ELEMENT;
584 if ((mmask & 3) == 0) {
586 * in-memory 00 live 11 -> 10
593 kprintf("hammer2_bulkfree: cannot "
594 "transition m=00/l=01\n");
596 case 2: /* 10 -> 00 */
597 live->bitmapq[bindex] &=
598 ~((hammer2_bitmap_t)2 << scount);
600 HAMMER2_FREEMAP_BLOCK_SIZE;
602 HAMMER2_FREEMAP_BLOCK_SIZE;
603 ++cbinfo->count_10_00;
605 case 3: /* 11 -> 10 */
606 live->bitmapq[bindex] &=
607 ~((hammer2_bitmap_t)1 << scount);
608 ++cbinfo->count_11_10;
611 } else if ((lmask & 3) == 3) {
613 * in-memory 11 live 10 -> 11
618 kprintf("hammer2_bulkfree: cannot "
619 "transition m=11/l=00\n");
622 kprintf("hammer2_bulkfree: cannot "
623 "transition m=11/l=01\n");
625 case 2: /* 10 -> 11 */
626 live->bitmapq[bindex] |=
627 ((hammer2_bitmap_t)1 << scount);
628 ++cbinfo->count_10_11;
640 * Determine if the live bitmap is completely free and reset its
641 * fields if so. Otherwise check to see if we can reduce the linear
644 for (bindex = HAMMER2_BMAP_ELEMENTS - 1; bindex >= 0; --bindex) {
645 if (live->bitmapq[bindex] != 0)
649 live->avail = HAMMER2_FREEMAP_LEVEL0_SIZE;
652 ++cbinfo->count_l0cleans;
653 } else if (bindex < 7) {
655 if (live->linear > bindex * HAMMER2_FREEMAP_BLOCK_SIZE) {
656 live->linear = bindex * HAMMER2_FREEMAP_BLOCK_SIZE;
657 ++cbinfo->count_linadjusts;
663 kprintf("%016jx %04d.%04x (avail=%7d) "
664 "%08x %08x %08x %08x %08x %08x %08x %08x\n",
667 HAMMER2_FREEMAP_LEVEL1_MASK) >>
668 HAMMER2_FREEMAP_LEVEL0_RADIX),
671 bmap->bitmap[0], bmap->bitmap[1],
672 bmap->bitmap[2], bmap->bitmap[3],
673 bmap->bitmap[4], bmap->bitmap[5],
674 bmap->bitmap[6], bmap->bitmap[7]);