2 * Copyright (c) 2011-2014 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/fcntl.h>
41 #include <sys/namei.h>
42 #include <sys/mount.h>
43 #include <sys/vnode.h>
44 #include <sys/mountctl.h>
48 #define FREEMAP_DEBUG 0
50 struct hammer2_fiterate {
56 typedef struct hammer2_fiterate hammer2_fiterate_t;
58 static int hammer2_freemap_try_alloc(hammer2_trans_t *trans,
59 hammer2_chain_t **parentp, hammer2_blockref_t *bref,
60 int radix, hammer2_fiterate_t *iter);
61 static void hammer2_freemap_init(hammer2_trans_t *trans, hammer2_dev_t *hmp,
62 hammer2_key_t key, hammer2_chain_t *chain);
63 static int hammer2_bmap_alloc(hammer2_trans_t *trans, hammer2_dev_t *hmp,
64 hammer2_bmap_data_t *bmap, uint16_t class,
65 int n, int radix, hammer2_key_t *basep);
66 static int hammer2_freemap_iterate(hammer2_trans_t *trans,
67 hammer2_chain_t **parentp, hammer2_chain_t **chainp,
68 hammer2_fiterate_t *iter);
72 hammer2_freemapradix(int radix)
78 * Calculate the device offset for the specified FREEMAP_NODE or FREEMAP_LEAF
79 * bref. Return a combined media offset and physical size radix. Freemap
80 * chains use fixed storage offsets in the 4MB reserved area at the
81 * beginning of each 2GB zone
83 * Rotate between four possibilities. Theoretically this means we have three
84 * good freemaps in case of a crash which we can use as a base for the fixup
87 #define H2FMBASE(key, radix) ((key) & ~(((hammer2_off_t)1 << (radix)) - 1))
88 #define H2FMSHIFT(radix) ((hammer2_off_t)1 << (radix))
92 hammer2_freemap_reserve(hammer2_trans_t *trans, hammer2_chain_t *chain,
95 hammer2_blockref_t *bref = &chain->bref;
102 * Physical allocation size.
104 bytes = (size_t)1 << radix;
107 * Calculate block selection index 0..7 of current block. If this
108 * is the first allocation of the block (verses a modification of an
109 * existing block), we use index 0, otherwise we use the next rotating
112 if ((bref->data_off & ~HAMMER2_OFF_MASK_RADIX) == 0) {
115 off = bref->data_off & ~HAMMER2_OFF_MASK_RADIX &
116 (((hammer2_off_t)1 <<
117 HAMMER2_FREEMAP_LEVEL1_RADIX) - 1);
118 off = off / HAMMER2_PBUFSIZE;
119 KKASSERT(off >= HAMMER2_ZONE_FREEMAP_00 &&
120 off < HAMMER2_ZONE_FREEMAP_END);
121 index = (int)(off - HAMMER2_ZONE_FREEMAP_00) /
122 HAMMER2_ZONE_FREEMAP_INC;
123 KKASSERT(index >= 0 && index < HAMMER2_NFREEMAPS);
124 if (++index == HAMMER2_NFREEMAPS)
129 * Calculate the block offset of the reserved block. This will
130 * point into the 4MB reserved area at the base of the appropriate
131 * 2GB zone, once added to the FREEMAP_x selection above.
133 index_inc = index * HAMMER2_ZONE_FREEMAP_INC;
135 switch(bref->keybits) {
136 /* case HAMMER2_FREEMAP_LEVEL6_RADIX: not applicable */
137 case HAMMER2_FREEMAP_LEVEL5_RADIX: /* 2EB */
138 KKASSERT(bref->type == HAMMER2_BREF_TYPE_FREEMAP_NODE);
139 KKASSERT(bytes == HAMMER2_FREEMAP_LEVELN_PSIZE);
140 off = H2FMBASE(bref->key, HAMMER2_FREEMAP_LEVEL5_RADIX) +
141 (index_inc + HAMMER2_ZONE_FREEMAP_00 +
142 HAMMER2_ZONEFM_LEVEL5) * HAMMER2_PBUFSIZE;
144 case HAMMER2_FREEMAP_LEVEL4_RADIX: /* 2EB */
145 KKASSERT(bref->type == HAMMER2_BREF_TYPE_FREEMAP_NODE);
146 KKASSERT(bytes == HAMMER2_FREEMAP_LEVELN_PSIZE);
147 off = H2FMBASE(bref->key, HAMMER2_FREEMAP_LEVEL4_RADIX) +
148 (index_inc + HAMMER2_ZONE_FREEMAP_00 +
149 HAMMER2_ZONEFM_LEVEL4) * HAMMER2_PBUFSIZE;
151 case HAMMER2_FREEMAP_LEVEL3_RADIX: /* 2PB */
152 KKASSERT(bref->type == HAMMER2_BREF_TYPE_FREEMAP_NODE);
153 KKASSERT(bytes == HAMMER2_FREEMAP_LEVELN_PSIZE);
154 off = H2FMBASE(bref->key, HAMMER2_FREEMAP_LEVEL3_RADIX) +
155 (index_inc + HAMMER2_ZONE_FREEMAP_00 +
156 HAMMER2_ZONEFM_LEVEL3) * HAMMER2_PBUFSIZE;
158 case HAMMER2_FREEMAP_LEVEL2_RADIX: /* 2TB */
159 KKASSERT(bref->type == HAMMER2_BREF_TYPE_FREEMAP_NODE);
160 KKASSERT(bytes == HAMMER2_FREEMAP_LEVELN_PSIZE);
161 off = H2FMBASE(bref->key, HAMMER2_FREEMAP_LEVEL2_RADIX) +
162 (index_inc + HAMMER2_ZONE_FREEMAP_00 +
163 HAMMER2_ZONEFM_LEVEL2) * HAMMER2_PBUFSIZE;
165 case HAMMER2_FREEMAP_LEVEL1_RADIX: /* 2GB */
166 KKASSERT(bref->type == HAMMER2_BREF_TYPE_FREEMAP_LEAF);
167 KKASSERT(bytes == HAMMER2_FREEMAP_LEVELN_PSIZE);
168 off = H2FMBASE(bref->key, HAMMER2_FREEMAP_LEVEL1_RADIX) +
169 (index_inc + HAMMER2_ZONE_FREEMAP_00 +
170 HAMMER2_ZONEFM_LEVEL1) * HAMMER2_PBUFSIZE;
173 panic("freemap: bad radix(2) %p %d\n", bref, bref->keybits);
175 off = (hammer2_off_t)-1;
178 bref->data_off = off | radix;
180 kprintf("FREEMAP BLOCK TYPE %d %016jx/%d DATA_OFF=%016jx\n",
181 bref->type, bref->key, bref->keybits, bref->data_off);
187 * Normal freemap allocator
189 * Use available hints to allocate space using the freemap. Create missing
190 * freemap infrastructure on-the-fly as needed (including marking initial
191 * allocations using the iterator as allocated, instantiating new 2GB zones,
192 * and dealing with the end-of-media edge case).
194 * ip and bpref are only used as a heuristic to determine locality of
195 * reference. bref->key may also be used heuristically.
198 hammer2_freemap_alloc(hammer2_trans_t *trans, hammer2_chain_t *chain,
201 hammer2_dev_t *hmp = chain->hmp;
202 hammer2_blockref_t *bref = &chain->bref;
203 hammer2_chain_t *parent;
207 hammer2_fiterate_t iter;
210 * Validate the allocation size. It must be a power of 2.
212 * For now require that the caller be aware of the minimum
215 radix = hammer2_getradix(bytes);
216 KKASSERT((size_t)1 << radix == bytes);
218 if (bref->type == HAMMER2_BREF_TYPE_FREEMAP_NODE ||
219 bref->type == HAMMER2_BREF_TYPE_FREEMAP_LEAF) {
221 * Freemap blocks themselves are assigned from the reserve
222 * area, not allocated from the freemap.
224 error = hammer2_freemap_reserve(trans, chain, radix);
228 KKASSERT(bytes >= HAMMER2_ALLOC_MIN && bytes <= HAMMER2_ALLOC_MAX);
230 if (trans->flags & (HAMMER2_TRANS_ISFLUSH | HAMMER2_TRANS_PREFLUSH))
234 * Calculate the starting point for our allocation search.
236 * Each freemap leaf is dedicated to a specific freemap_radix.
237 * The freemap_radix can be more fine-grained than the device buffer
238 * radix which results in inodes being grouped together in their
239 * own segment, terminal-data (16K or less) and initial indirect
240 * block being grouped together, and then full-indirect and full-data
241 * blocks (64K) being grouped together.
243 * The single most important aspect of this is the inode grouping
244 * because that is what allows 'find' and 'ls' and other filesystem
245 * topology operations to run fast.
248 if (bref->data_off & ~HAMMER2_OFF_MASK_RADIX)
249 bpref = bref->data_off & ~HAMMER2_OFF_MASK_RADIX;
250 else if (trans->tmp_bpref)
251 bpref = trans->tmp_bpref;
252 else if (trans->tmp_ip)
253 bpref = trans->tmp_ip->chain->bref.data_off;
257 * Heuristic tracking index. We would like one for each distinct
258 * bref type if possible. heur_freemap[] has room for two classes
259 * for each type. At a minimum we have to break-up our heuristic
260 * by device block sizes.
262 hindex = hammer2_devblkradix(radix) - HAMMER2_MINIORADIX;
263 KKASSERT(hindex < HAMMER2_FREEMAP_HEUR_NRADIX);
264 hindex += bref->type * HAMMER2_FREEMAP_HEUR_NRADIX;
265 hindex &= HAMMER2_FREEMAP_HEUR_TYPES * HAMMER2_FREEMAP_HEUR_NRADIX - 1;
266 KKASSERT(hindex < HAMMER2_FREEMAP_HEUR);
268 iter.bpref = hmp->heur_freemap[hindex];
271 * Make sure bpref is in-bounds. It's ok if bpref covers a zone's
272 * reserved area, the try code will iterate past it.
274 if (iter.bpref > hmp->voldata.volu_size)
275 iter.bpref = hmp->voldata.volu_size - 1;
278 * Iterate the freemap looking for free space before and after.
280 parent = &hmp->fchain;
281 hammer2_chain_ref(parent);
282 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS);
284 iter.bnext = iter.bpref;
287 while (error == EAGAIN) {
288 error = hammer2_freemap_try_alloc(trans, &parent, bref,
291 hmp->heur_freemap[hindex] = iter.bnext;
292 hammer2_chain_unlock(parent);
293 hammer2_chain_drop(parent);
295 if (trans->flags & (HAMMER2_TRANS_ISFLUSH | HAMMER2_TRANS_PREFLUSH))
302 hammer2_freemap_try_alloc(hammer2_trans_t *trans, hammer2_chain_t **parentp,
303 hammer2_blockref_t *bref, int radix,
304 hammer2_fiterate_t *iter)
306 hammer2_dev_t *hmp = (*parentp)->hmp;
307 hammer2_off_t l0size;
308 hammer2_off_t l1size;
309 hammer2_off_t l1mask;
310 hammer2_key_t key_dummy;
311 hammer2_chain_t *chain;
316 int cache_index = -1;
319 * Calculate the number of bytes being allocated, the number
320 * of contiguous bits of bitmap being allocated, and the bitmap
323 * WARNING! cpu hardware may mask bits == 64 -> 0 and blow up the
326 bytes = (size_t)1 << radix;
327 class = (bref->type << 8) | hammer2_devblkradix(radix);
330 * Lookup the level1 freemap chain, creating and initializing one
331 * if necessary. Intermediate levels will be created automatically
332 * when necessary by hammer2_chain_create().
334 key = H2FMBASE(iter->bnext, HAMMER2_FREEMAP_LEVEL1_RADIX);
335 l0size = H2FMSHIFT(HAMMER2_FREEMAP_LEVEL0_RADIX);
336 l1size = H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX);
339 chain = hammer2_chain_lookup(parentp, &key_dummy, key, key + l1mask,
341 HAMMER2_LOOKUP_ALWAYS |
342 HAMMER2_LOOKUP_MATCHIND);
346 * Create the missing leaf, be sure to initialize
347 * the auxillary freemap tracking information in
348 * the bref.check.freemap structure.
351 kprintf("freemap create L1 @ %016jx bpref %016jx\n",
354 error = hammer2_chain_create(trans, parentp, &chain, hmp->spmp,
355 key, HAMMER2_FREEMAP_LEVEL1_RADIX,
356 HAMMER2_BREF_TYPE_FREEMAP_LEAF,
357 HAMMER2_FREEMAP_LEVELN_PSIZE,
359 KKASSERT(error == 0);
361 hammer2_chain_modify(trans, chain, 0);
362 bzero(&chain->data->bmdata[0],
363 HAMMER2_FREEMAP_LEVELN_PSIZE);
364 chain->bref.check.freemap.bigmask = (uint32_t)-1;
365 chain->bref.check.freemap.avail = l1size;
366 /* bref.methods should already be inherited */
368 hammer2_freemap_init(trans, hmp, key, chain);
370 } else if (chain->error) {
372 * Error during lookup.
374 kprintf("hammer2_freemap_try_alloc: %016jx: error %s\n",
375 (intmax_t)bref->data_off,
376 hammer2_error_str(chain->error));
378 } else if ((chain->bref.check.freemap.bigmask &
379 ((size_t)1 << radix)) == 0) {
381 * Already flagged as not having enough space
386 * Modify existing chain to setup for adjustment.
388 hammer2_chain_modify(trans, chain, 0);
395 hammer2_bmap_data_t *bmap;
396 hammer2_key_t base_key;
401 KKASSERT(chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_LEAF);
402 start = (int)((iter->bnext - key) >>
403 HAMMER2_FREEMAP_LEVEL0_RADIX);
404 KKASSERT(start >= 0 && start < HAMMER2_FREEMAP_COUNT);
405 hammer2_chain_modify(trans, chain, 0);
408 for (count = 0; count < HAMMER2_FREEMAP_COUNT; ++count) {
411 if (start + count >= HAMMER2_FREEMAP_COUNT &&
417 * Calculate bmap pointer
419 * NOTE: bmap pointer is invalid if n >= FREEMAP_COUNT.
422 bmap = &chain->data->bmdata[n];
424 if (n >= HAMMER2_FREEMAP_COUNT) {
426 } else if (bmap->avail) {
428 } else if (radix < HAMMER2_FREEMAP_BLOCK_RADIX &&
429 (bmap->linear & HAMMER2_FREEMAP_BLOCK_MASK)) {
436 (bmap->class == 0 || bmap->class == class)) {
437 base_key = key + n * l0size;
438 error = hammer2_bmap_alloc(trans, hmp, bmap,
441 if (error != ENOSPC) {
448 * Must recalculate after potentially having called
449 * hammer2_bmap_alloc() above in case chain was
452 * NOTE: bmap pointer is invalid if n < 0.
455 bmap = &chain->data->bmdata[n];
458 } else if (bmap->avail) {
460 } else if (radix < HAMMER2_FREEMAP_BLOCK_RADIX &&
461 (bmap->linear & HAMMER2_FREEMAP_BLOCK_MASK)) {
468 (bmap->class == 0 || bmap->class == class)) {
469 base_key = key + n * l0size;
470 error = hammer2_bmap_alloc(trans, hmp, bmap,
473 if (error != ENOSPC) {
479 if (error == ENOSPC) {
480 chain->bref.check.freemap.bigmask &=
481 (uint32_t)~((size_t)1 << radix);
483 /* XXX also scan down from original count */
488 * Assert validity. Must be beyond the static allocator used
489 * by newfs_hammer2 (and thus also beyond the aux area),
490 * not go past the volume size, and must not be in the
491 * reserved segment area for a zone.
493 KKASSERT(key >= hmp->voldata.allocator_beg &&
494 key + bytes <= hmp->voldata.volu_size);
495 KKASSERT((key & HAMMER2_ZONE_MASK64) >= HAMMER2_ZONE_SEG);
496 bref->data_off = key | radix;
499 kprintf("alloc cp=%p %016jx %016jx using %016jx\n",
501 bref->key, bref->data_off, chain->bref.data_off);
503 } else if (error == ENOSPC) {
505 * Return EAGAIN with next iteration in iter->bnext, or
506 * return ENOSPC if the allocation map has been exhausted.
508 error = hammer2_freemap_iterate(trans, parentp, &chain, iter);
515 hammer2_chain_unlock(chain);
516 hammer2_chain_drop(chain);
522 * Allocate (1<<radix) bytes from the bmap whos base data offset is (*basep).
524 * If the linear iterator is mid-block we use it directly (the bitmap should
525 * already be marked allocated), otherwise we search for a block in the bitmap
526 * that fits the allocation request.
528 * A partial bitmap allocation sets the minimum bitmap granularity (16KB)
529 * to fully allocated and adjusts the linear allocator to allow the
530 * remaining space to be allocated.
534 hammer2_bmap_alloc(hammer2_trans_t *trans, hammer2_dev_t *hmp,
535 hammer2_bmap_data_t *bmap,
536 uint16_t class, int n, int radix, hammer2_key_t *basep)
542 hammer2_bitmap_t bmmask;
549 * Take into account 2-bits per block when calculating bmradix.
551 size = (size_t)1 << radix;
553 if (radix <= HAMMER2_FREEMAP_BLOCK_RADIX) {
555 /* (16K) 2 bits per allocation block */
557 bmradix = (hammer2_bitmap_t)2 <<
558 (radix - HAMMER2_FREEMAP_BLOCK_RADIX);
559 /* (32K-256K) 4, 8, 16, 32 bits per allocation block */
563 * Use the linear iterator to pack small allocations, otherwise
564 * fall-back to finding a free 16KB chunk. The linear iterator
565 * is only valid when *NOT* on a freemap chunking boundary (16KB).
566 * If it is the bitmap must be scanned. It can become invalid
567 * once we pack to the boundary. We adjust it after a bitmap
568 * allocation only for sub-16KB allocations (so the perfectly good
569 * previous value can still be used for fragments when 16KB+
570 * allocations are made).
572 * Beware of hardware artifacts when bmradix == 64 (intermediate
573 * result can wind up being '1' instead of '0' if hardware masks
576 * NOTE: j needs to be even in the j= calculation. As an artifact
577 * of the /2 division, our bitmask has to clear bit 0.
579 * NOTE: TODO this can leave little unallocatable fragments lying
582 if (((uint32_t)bmap->linear & HAMMER2_FREEMAP_BLOCK_MASK) + size <=
583 HAMMER2_FREEMAP_BLOCK_SIZE &&
584 (bmap->linear & HAMMER2_FREEMAP_BLOCK_MASK) &&
585 bmap->linear < HAMMER2_SEGSIZE) {
586 KKASSERT(bmap->linear >= 0 &&
587 bmap->linear + size <= HAMMER2_SEGSIZE &&
588 (bmap->linear & (HAMMER2_ALLOC_MIN - 1)) == 0);
589 offset = bmap->linear;
590 i = offset / (HAMMER2_SEGSIZE / 8);
591 j = (offset / (HAMMER2_FREEMAP_BLOCK_SIZE / 2)) & 30;
592 bmmask = (bmradix == HAMMER2_BMAP_BITS_PER_ELEMENT) ?
593 HAMMER2_BMAP_ALLONES :
594 ((hammer2_bitmap_t)1 << bmradix) - 1;
596 bmap->linear = offset + size;
598 for (i = 0; i < HAMMER2_BMAP_ELEMENTS; ++i) {
599 bmmask = (bmradix == HAMMER2_BMAP_BITS_PER_ELEMENT) ?
600 HAMMER2_BMAP_ALLONES :
601 ((hammer2_bitmap_t)1 << bmradix) - 1;
603 j < HAMMER2_BMAP_BITS_PER_ELEMENT;
605 if ((bmap->bitmapq[i] & bmmask) == 0)
610 /*fragments might remain*/
611 /*KKASSERT(bmap->avail == 0);*/
614 offset = i * (HAMMER2_SEGSIZE / HAMMER2_BMAP_ELEMENTS) +
615 (j * (HAMMER2_FREEMAP_BLOCK_SIZE / 2));
616 if (size & HAMMER2_FREEMAP_BLOCK_MASK)
617 bmap->linear = offset + size;
620 /* 8 x (64/2) -> 256 x 16K -> 4MB */
621 KKASSERT(i >= 0 && i < HAMMER2_BMAP_ELEMENTS);
624 * Optimize the buffer cache to avoid unnecessary read-before-write
627 * The device block size could be larger than the allocation size
628 * so the actual bitmap test is somewhat more involved. We have
629 * to use a compatible buffer size for this operation.
631 if ((bmap->bitmapq[i] & bmmask) == 0 &&
632 hammer2_devblksize(size) != size) {
633 size_t psize = hammer2_devblksize(size);
634 hammer2_off_t pmask = (hammer2_off_t)psize - 1;
635 int pbmradix = (hammer2_bitmap_t)2 <<
636 (hammer2_devblkradix(radix) -
637 HAMMER2_FREEMAP_BLOCK_RADIX);
638 hammer2_bitmap_t pbmmask;
639 int pradix = hammer2_getradix(psize);
641 pbmmask = (pbmradix == HAMMER2_BMAP_BITS_PER_ELEMENT) ?
642 HAMMER2_BMAP_ALLONES :
643 ((hammer2_bitmap_t)1 << pbmradix) - 1;
644 while ((pbmmask & bmmask) == 0)
645 pbmmask <<= pbmradix;
648 kprintf("%016jx mask %016jx %016jx %016jx (%zd/%zd)\n",
649 *basep + offset, bmap->bitmapq[i],
650 pbmmask, bmmask, size, psize);
653 if ((bmap->bitmapq[i] & pbmmask) == 0) {
654 error = hammer2_io_newq(hmp,
655 (*basep + (offset & ~pmask)) |
658 hammer2_io_bqrelse(&dio);
664 * When initializing a new inode segment also attempt to initialize
665 * an adjacent segment. Be careful not to index beyond the array
668 * We do this to try to localize inode accesses to improve
669 * directory scan rates. XXX doesn't improve scan rates.
671 if (size == HAMMER2_INODE_BYTES) {
673 if (bmap[-1].radix == 0 && bmap[-1].avail)
674 bmap[-1].radix = radix;
676 if (bmap[1].radix == 0 && bmap[1].avail)
677 bmap[1].radix = radix;
682 * Calculate the bitmap-granular change in bgsize for the volume
683 * header. We cannot use the fine-grained change here because
684 * the bulkfree code can't undo it. If the bitmap element is already
685 * marked allocated it has already been accounted for.
687 if (radix < HAMMER2_FREEMAP_BLOCK_RADIX) {
688 if (bmap->bitmapq[i] & bmmask)
691 bgsize = HAMMER2_FREEMAP_BLOCK_SIZE;
697 * Adjust the bitmap, set the class (it might have been 0),
698 * and available bytes, update the allocation offset (*basep)
699 * from the L0 base to the actual offset.
701 * avail must reflect the bitmap-granular availability. The allocator
702 * tests will also check the linear iterator.
704 bmap->bitmapq[i] |= bmmask;
706 bmap->avail -= bgsize;
710 * Adjust the volume header's allocator_free parameter. This
711 * parameter has to be fixed up by bulkfree which has no way to
712 * figure out sub-16K chunking, so it must be adjusted by the
713 * bitmap-granular size.
716 hammer2_voldata_lock(hmp);
717 hammer2_voldata_modify(hmp);
718 hmp->voldata.allocator_free -= bgsize;
719 hammer2_voldata_unlock(hmp);
727 hammer2_freemap_init(hammer2_trans_t *trans, hammer2_dev_t *hmp,
728 hammer2_key_t key, hammer2_chain_t *chain)
730 hammer2_off_t l1size;
733 hammer2_bmap_data_t *bmap;
736 l1size = H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX);
739 * Calculate the portion of the 2GB map that should be initialized
740 * as free. Portions below or after will be initialized as allocated.
741 * SEGMASK-align the areas so we don't have to worry about sub-scans
742 * or endianess when using memset.
744 * (1) Ensure that all statically allocated space from newfs_hammer2
745 * is marked allocated.
747 * (2) Ensure that the reserved area is marked allocated (typically
748 * the first 4MB of the 2GB area being represented).
750 * (3) Ensure that any trailing space at the end-of-volume is marked
753 * WARNING! It is possible for lokey to be larger than hikey if the
754 * entire 2GB segment is within the static allocation.
756 lokey = (hmp->voldata.allocator_beg + HAMMER2_SEGMASK64) &
759 if (lokey < H2FMBASE(key, HAMMER2_FREEMAP_LEVEL1_RADIX) +
760 HAMMER2_ZONE_SEG64) {
761 lokey = H2FMBASE(key, HAMMER2_FREEMAP_LEVEL1_RADIX) +
765 hikey = key + H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX);
766 if (hikey > hmp->voldata.volu_size) {
767 hikey = hmp->voldata.volu_size & ~HAMMER2_SEGMASK64;
770 chain->bref.check.freemap.avail =
771 H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX);
772 bmap = &chain->data->bmdata[0];
774 for (count = 0; count < HAMMER2_FREEMAP_COUNT; ++count) {
775 if (key < lokey || key >= hikey) {
776 memset(bmap->bitmapq, -1,
777 sizeof(bmap->bitmapq));
779 bmap->linear = HAMMER2_SEGSIZE;
780 chain->bref.check.freemap.avail -=
781 H2FMSHIFT(HAMMER2_FREEMAP_LEVEL0_RADIX);
783 bmap->avail = H2FMSHIFT(HAMMER2_FREEMAP_LEVEL0_RADIX);
785 key += H2FMSHIFT(HAMMER2_FREEMAP_LEVEL0_RADIX);
791 * The current Level 1 freemap has been exhausted, iterate to the next
792 * one, return ENOSPC if no freemaps remain.
794 * XXX this should rotate back to the beginning to handle freed-up space
795 * XXX or use intermediate entries to locate free space. TODO
798 hammer2_freemap_iterate(hammer2_trans_t *trans, hammer2_chain_t **parentp,
799 hammer2_chain_t **chainp, hammer2_fiterate_t *iter)
801 hammer2_dev_t *hmp = (*parentp)->hmp;
803 iter->bnext &= ~(H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX) - 1);
804 iter->bnext += H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX);
805 if (iter->bnext >= hmp->voldata.volu_size) {
807 if (++iter->loops == 2)
814 * Adjust the bit-pattern for data in the freemap bitmap according to
815 * (how). This code is called from on-mount recovery to fixup (mark
816 * as allocated) blocks whos freemap upates might not have been committed
817 * in the last crash and is used by the bulk freemap scan to stage frees.
819 * XXX currently disabled when how == 0 (the normal real-time case). At
820 * the moment we depend on the bulk freescan to actually free blocks. It
821 * will still call this routine with a non-zero how to stage possible frees
822 * and to do the actual free.
825 hammer2_freemap_adjust(hammer2_trans_t *trans, hammer2_dev_t *hmp,
826 hammer2_blockref_t *bref, int how)
828 hammer2_off_t data_off = bref->data_off;
829 hammer2_chain_t *chain;
830 hammer2_chain_t *parent;
831 hammer2_bmap_data_t *bmap;
833 hammer2_key_t key_dummy;
834 hammer2_off_t l0size;
835 hammer2_off_t l1size;
836 hammer2_off_t l1mask;
837 hammer2_bitmap_t *bitmap;
838 const hammer2_bitmap_t bmmask00 = 0;
839 hammer2_bitmap_t bmmask01;
840 hammer2_bitmap_t bmmask10;
841 hammer2_bitmap_t bmmask11;
848 int cache_index = -1;
851 KKASSERT(how == HAMMER2_FREEMAP_DORECOVER);
853 radix = (int)data_off & HAMMER2_OFF_MASK_RADIX;
854 data_off &= ~HAMMER2_OFF_MASK_RADIX;
855 KKASSERT(radix <= HAMMER2_RADIX_MAX);
857 bytes = (size_t)1 << radix;
858 class = (bref->type << 8) | hammer2_devblkradix(radix);
861 * We can't adjust thre freemap for data allocations made by
864 if (data_off < hmp->voldata.allocator_beg)
867 KKASSERT((data_off & HAMMER2_ZONE_MASK64) >= HAMMER2_ZONE_SEG);
870 * Lookup the level1 freemap chain. The chain must exist.
872 key = H2FMBASE(data_off, HAMMER2_FREEMAP_LEVEL1_RADIX);
873 l0size = H2FMSHIFT(HAMMER2_FREEMAP_LEVEL0_RADIX);
874 l1size = H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX);
877 parent = &hmp->fchain;
878 hammer2_chain_ref(parent);
879 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS);
881 chain = hammer2_chain_lookup(&parent, &key_dummy, key, key + l1mask,
883 HAMMER2_LOOKUP_ALWAYS |
884 HAMMER2_LOOKUP_MATCHIND);
887 * Stop early if we are trying to free something but no leaf exists.
889 if (chain == NULL && how != HAMMER2_FREEMAP_DORECOVER) {
890 kprintf("hammer2_freemap_adjust: %016jx: no chain\n",
891 (intmax_t)bref->data_off);
895 kprintf("hammer2_freemap_adjust: %016jx: error %s\n",
896 (intmax_t)bref->data_off,
897 hammer2_error_str(chain->error));
898 hammer2_chain_unlock(chain);
899 hammer2_chain_drop(chain);
905 * Create any missing leaf(s) if we are doing a recovery (marking
906 * the block(s) as being allocated instead of being freed). Be sure
907 * to initialize the auxillary freemap tracking info in the
908 * bref.check.freemap structure.
910 if (chain == NULL && how == HAMMER2_FREEMAP_DORECOVER) {
911 error = hammer2_chain_create(trans, &parent, &chain, hmp->spmp,
912 key, HAMMER2_FREEMAP_LEVEL1_RADIX,
913 HAMMER2_BREF_TYPE_FREEMAP_LEAF,
914 HAMMER2_FREEMAP_LEVELN_PSIZE,
917 if (hammer2_debug & 0x0040) {
918 kprintf("fixup create chain %p %016jx:%d\n",
919 chain, chain->bref.key, chain->bref.keybits);
923 hammer2_chain_modify(trans, chain, 0);
924 bzero(&chain->data->bmdata[0],
925 HAMMER2_FREEMAP_LEVELN_PSIZE);
926 chain->bref.check.freemap.bigmask = (uint32_t)-1;
927 chain->bref.check.freemap.avail = l1size;
928 /* bref.methods should already be inherited */
930 hammer2_freemap_init(trans, hmp, key, chain);
932 /* XXX handle error */
936 kprintf("FREEMAP ADJUST TYPE %d %016jx/%d DATA_OFF=%016jx\n",
937 chain->bref.type, chain->bref.key,
938 chain->bref.keybits, chain->bref.data_off);
942 * Calculate the bitmask (runs in 2-bit pairs).
944 start = ((int)(data_off >> HAMMER2_FREEMAP_BLOCK_RADIX) & 15) * 2;
945 bmmask01 = (hammer2_bitmap_t)1 << start;
946 bmmask10 = (hammer2_bitmap_t)2 << start;
947 bmmask11 = (hammer2_bitmap_t)3 << start;
950 * Fixup the bitmap. Partial blocks cannot be fully freed unless
951 * a bulk scan is able to roll them up.
953 if (radix < HAMMER2_FREEMAP_BLOCK_RADIX) {
955 if (how == HAMMER2_FREEMAP_DOREALFREE)
956 how = HAMMER2_FREEMAP_DOMAYFREE;
958 count = 1 << (radix - HAMMER2_FREEMAP_BLOCK_RADIX);
962 * [re]load the bmap and bitmap pointers. Each bmap entry covers
963 * a 2MB swath. The bmap itself (LEVEL1) covers 2GB.
965 * Be sure to reset the linear iterator to ensure that the adjustment
969 bmap = &chain->data->bmdata[(int)(data_off >> HAMMER2_SEGRADIX) &
970 (HAMMER2_FREEMAP_COUNT - 1)];
971 bitmap = &bmap->bitmapq[(int)(data_off >> (HAMMER2_SEGRADIX - 3)) & 7];
978 if (how == HAMMER2_FREEMAP_DORECOVER) {
980 * Recovery request, mark as allocated.
982 if ((*bitmap & bmmask11) != bmmask11) {
984 hammer2_chain_modify(trans, chain, 0);
988 if ((*bitmap & bmmask11) == bmmask00) {
990 HAMMER2_FREEMAP_BLOCK_SIZE;
992 if (bmap->class == 0)
995 if (hammer2_debug & 0x0040) {
996 kprintf("hammer2_freemap_recover: "
998 "block=%016jx/%zd\n",
999 bref->type, data_off, bytes);
1003 kprintf("hammer2_freemap_recover: good "
1004 "type=%02x block=%016jx/%zd\n",
1005 bref->type, data_off, bytes);
1011 * XXX this stuff doesn't work, avail is miscalculated and
1012 * code 10 means something else now.
1014 else if ((*bitmap & bmmask11) == bmmask11) {
1016 * Mayfree/Realfree request and bitmap is currently
1017 * marked as being fully allocated.
1020 hammer2_chain_modify(trans, chain, 0);
1024 if (how == HAMMER2_FREEMAP_DOREALFREE)
1025 *bitmap &= ~bmmask11;
1027 *bitmap = (*bitmap & ~bmmask11) | bmmask10;
1028 } else if ((*bitmap & bmmask11) == bmmask10) {
1030 * Mayfree/Realfree request and bitmap is currently
1031 * marked as being possibly freeable.
1033 if (how == HAMMER2_FREEMAP_DOREALFREE) {
1035 hammer2_chain_modify(trans, chain, 0);
1039 *bitmap &= ~bmmask11;
1043 * 01 - Not implemented, currently illegal state
1044 * 00 - Not allocated at all, illegal free.
1046 panic("hammer2_freemap_adjust: "
1047 "Illegal state %08x(%08x)",
1048 *bitmap, *bitmap & bmmask11);
1056 #if HAMMER2_BMAP_ELEMENTS != 8
1057 #error "hammer2_freemap.c: HAMMER2_BMAP_ELEMENTS expected to be 8"
1059 if (how == HAMMER2_FREEMAP_DOREALFREE && modified) {
1060 bmap->avail += 1 << radix;
1061 KKASSERT(bmap->avail <= HAMMER2_SEGSIZE);
1062 if (bmap->avail == HAMMER2_SEGSIZE &&
1063 bmap->bitmapq[0] == 0 &&
1064 bmap->bitmapq[1] == 0 &&
1065 bmap->bitmapq[2] == 0 &&
1066 bmap->bitmapq[3] == 0 &&
1067 bmap->bitmapq[4] == 0 &&
1068 bmap->bitmapq[5] == 0 &&
1069 bmap->bitmapq[6] == 0 &&
1070 bmap->bitmapq[7] == 0) {
1071 key = H2FMBASE(data_off, HAMMER2_FREEMAP_LEVEL0_RADIX);
1072 kprintf("Freeseg %016jx\n", (intmax_t)key);
1078 * chain->bref.check.freemap.bigmask (XXX)
1080 * Setting bigmask is a hint to the allocation code that there might
1081 * be something allocatable. We also set this in recovery... it
1082 * doesn't hurt and we might want to use the hint for other validation
1083 * operations later on.
1086 chain->bref.check.freemap.bigmask |= 1 << radix;
1088 hammer2_chain_unlock(chain);
1089 hammer2_chain_drop(chain);
1091 hammer2_chain_unlock(parent);
1092 hammer2_chain_drop(parent);
1096 * Validate the freemap, in three stages.
1098 * stage-1 ALLOCATED -> POSSIBLY FREE
1099 * POSSIBLY FREE -> POSSIBLY FREE (type corrected)
1101 * This transitions bitmap entries from ALLOCATED to POSSIBLY FREE.
1102 * The POSSIBLY FREE state does not mean that a block is actually free
1103 * and may be transitioned back to ALLOCATED in stage-2.
1105 * This is typically done during normal filesystem operations when
1106 * something is deleted or a block is replaced.
1108 * This is done by bulkfree in-bulk after a memory-bounded meta-data
1109 * scan to try to determine what might be freeable.
1111 * This can be done unconditionally through a freemap scan when the
1112 * intention is to brute-force recover the proper state of the freemap.
1114 * stage-2 POSSIBLY FREE -> ALLOCATED (scan metadata topology)
1116 * This is done by bulkfree during a meta-data scan to ensure that
1117 * all blocks still actually allocated by the filesystem are marked
1120 * NOTE! Live filesystem transitions to POSSIBLY FREE can occur while
1121 * the bulkfree stage-2 and stage-3 is running. The live filesystem
1122 * will use the alternative POSSIBLY FREE type (2) to prevent
1123 * stage-3 from improperly transitioning unvetted possibly-free
1126 * stage-3 POSSIBLY FREE (type 1) -> FREE (scan freemap)
1128 * This is done by bulkfree to finalize POSSIBLY FREE states.