2 * Copyright (c) 2011-2013 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/fcntl.h>
41 #include <sys/namei.h>
42 #include <sys/mount.h>
43 #include <sys/vnode.h>
44 #include <sys/mountctl.h>
48 struct hammer2_fiterate {
54 typedef struct hammer2_fiterate hammer2_fiterate_t;
56 static int hammer2_freemap_try_alloc(hammer2_trans_t *trans,
57 hammer2_chain_t **parentp, hammer2_blockref_t *bref,
58 int radix, hammer2_fiterate_t *iter);
59 static void hammer2_freemap_init(hammer2_trans_t *trans, hammer2_mount_t *hmp,
60 hammer2_key_t key, hammer2_chain_t *chain);
61 static int hammer2_bmap_alloc(hammer2_trans_t *trans, hammer2_mount_t *hmp,
62 hammer2_bmap_data_t *bmap, uint16_t class,
63 int n, int radix, hammer2_key_t *basep);
64 static int hammer2_freemap_iterate(hammer2_trans_t *trans,
65 hammer2_chain_t **parentp, hammer2_chain_t **chainp,
66 hammer2_fiterate_t *iter);
70 hammer2_freemapradix(int radix)
76 * Calculate the device offset for the specified FREEMAP_NODE or FREEMAP_LEAF
77 * bref. Return a combined media offset and physical size radix. Freemap
78 * chains use fixed storage offsets in the 4MB reserved area at the
79 * beginning of each 2GB zone
81 * Rotate between four possibilities. Theoretically this means we have three
82 * good freemaps in case of a crash which we can use as a base for the fixup
85 #define H2FMBASE(key, radix) ((key) & ~(((hammer2_off_t)1 << (radix)) - 1))
86 #define H2FMSHIFT(radix) ((hammer2_off_t)1 << (radix))
90 hammer2_freemap_reserve(hammer2_mount_t *hmp, hammer2_blockref_t *bref,
97 * Physical allocation size -> radix. Typically either 256 for
98 * a level 0 freemap leaf or 65536 for a level N freemap node.
100 * NOTE: A 256 byte bitmap represents 256 x 8 x 1024 = 2MB of storage.
101 * Do not use hammer2_allocsize() here as it has a min cap.
106 * Adjust by HAMMER2_ZONE_FREEMAP_{A,B,C,D} using the existing
107 * offset as a basis. Start in zone A if previously unallocated.
109 if ((bref->data_off & ~HAMMER2_OFF_MASK_RADIX) == 0) {
110 off = HAMMER2_ZONE_FREEMAP_A;
112 off = bref->data_off & ~HAMMER2_OFF_MASK_RADIX &
113 (((hammer2_off_t)1 << HAMMER2_FREEMAP_LEVEL1_RADIX) - 1);
114 off = off / HAMMER2_PBUFSIZE;
115 KKASSERT(off >= HAMMER2_ZONE_FREEMAP_A);
116 KKASSERT(off < HAMMER2_ZONE_FREEMAP_D + 4);
118 if (off >= HAMMER2_ZONE_FREEMAP_D)
119 off = HAMMER2_ZONE_FREEMAP_A;
120 else if (off >= HAMMER2_ZONE_FREEMAP_C)
121 off = HAMMER2_ZONE_FREEMAP_D;
122 else if (off >= HAMMER2_ZONE_FREEMAP_B)
123 off = HAMMER2_ZONE_FREEMAP_C;
125 off = HAMMER2_ZONE_FREEMAP_B;
127 off = off * HAMMER2_PBUFSIZE;
130 * Calculate the block offset of the reserved block. This will
131 * point into the 4MB reserved area at the base of the appropriate
132 * 2GB zone, once added to the FREEMAP_x selection above.
134 switch(bref->keybits) {
135 /* case HAMMER2_FREEMAP_LEVEL5_RADIX: not applicable */
136 case HAMMER2_FREEMAP_LEVEL4_RADIX: /* 2EB */
137 KKASSERT(bref->type == HAMMER2_BREF_TYPE_FREEMAP_NODE);
138 KKASSERT(bytes == HAMMER2_FREEMAP_LEVELN_PSIZE);
139 off += H2FMBASE(bref->key, HAMMER2_FREEMAP_LEVEL4_RADIX) +
140 HAMMER2_ZONEFM_LEVEL4 * HAMMER2_PBUFSIZE;
142 case HAMMER2_FREEMAP_LEVEL3_RADIX: /* 2PB */
143 KKASSERT(bref->type == HAMMER2_BREF_TYPE_FREEMAP_NODE);
144 KKASSERT(bytes == HAMMER2_FREEMAP_LEVELN_PSIZE);
145 off += H2FMBASE(bref->key, HAMMER2_FREEMAP_LEVEL3_RADIX) +
146 HAMMER2_ZONEFM_LEVEL3 * HAMMER2_PBUFSIZE;
148 case HAMMER2_FREEMAP_LEVEL2_RADIX: /* 2TB */
149 KKASSERT(bref->type == HAMMER2_BREF_TYPE_FREEMAP_NODE);
150 KKASSERT(bytes == HAMMER2_FREEMAP_LEVELN_PSIZE);
151 off += H2FMBASE(bref->key, HAMMER2_FREEMAP_LEVEL2_RADIX) +
152 HAMMER2_ZONEFM_LEVEL2 * HAMMER2_PBUFSIZE;
154 case HAMMER2_FREEMAP_LEVEL1_RADIX: /* 2GB */
155 KKASSERT(bref->type == HAMMER2_BREF_TYPE_FREEMAP_LEAF);
156 KKASSERT(bytes == HAMMER2_FREEMAP_LEVELN_PSIZE);
157 off += H2FMBASE(bref->key, HAMMER2_FREEMAP_LEVEL1_RADIX) +
158 HAMMER2_ZONEFM_LEVEL1 * HAMMER2_PBUFSIZE;
161 panic("freemap: bad radix(2) %p %d\n", bref, bref->keybits);
165 bref->data_off = off | radix;
170 * Normal freemap allocator
172 * Use available hints to allocate space using the freemap. Create missing
173 * freemap infrastructure on-the-fly as needed (including marking initial
174 * allocations using the iterator as allocated, instantiating new 2GB zones,
175 * and dealing with the end-of-media edge case).
177 * ip and bpref are only used as a heuristic to determine locality of
178 * reference. bref->key may also be used heuristically.
180 * WARNING! When called from a flush we have to use the 'live' sync_tid
181 * and not the flush sync_tid. The live sync_tid is the flush
182 * sync_tid + 1. That is, freemap allocations which occur during
183 * a flush are not part of the flush. Crash-recovery will restore
184 * any lost allocations.
187 hammer2_freemap_alloc(hammer2_trans_t *trans, hammer2_mount_t *hmp,
188 hammer2_blockref_t *bref, size_t bytes)
190 hammer2_chain_t *parent;
194 hammer2_fiterate_t iter;
197 * Validate the allocation size. It must be a power of 2.
199 * For now require that the caller be aware of the minimum
202 radix = hammer2_getradix(bytes);
203 KKASSERT((size_t)1 << radix == bytes);
206 * Freemap blocks themselves are simply assigned from the reserve
207 * area, not allocated from the freemap.
209 if (bref->type == HAMMER2_BREF_TYPE_FREEMAP_NODE ||
210 bref->type == HAMMER2_BREF_TYPE_FREEMAP_LEAF) {
211 return(hammer2_freemap_reserve(hmp, bref, radix));
215 * Mark previously allocated block as possibly freeable. There might
216 * be snapshots and other races so we can't just mark it fully free.
217 * (XXX optimize this for the current-transaction create+delete case)
219 if (bref->data_off & ~HAMMER2_OFF_MASK_RADIX) {
220 hammer2_freemap_adjust(trans, hmp, bref,
221 HAMMER2_FREEMAP_DOMAYFREE);
225 * Setting ISALLOCATING ensures correct operation even when the
226 * flusher itself is making allocations.
228 KKASSERT(bytes >= HAMMER2_MIN_ALLOC && bytes <= HAMMER2_MAX_ALLOC);
229 KKASSERT((trans->flags & HAMMER2_TRANS_ISALLOCATING) == 0);
230 atomic_set_int(&trans->flags, HAMMER2_TRANS_ISALLOCATING);
231 if (trans->flags & HAMMER2_TRANS_ISFLUSH)
235 * Calculate the starting point for our allocation search.
237 * Each freemap leaf is dedicated to a specific freemap_radix.
238 * The freemap_radix can be more fine-grained than the device buffer
239 * radix which results in inodes being grouped together in their
240 * own segment, terminal-data (16K or less) and initial indirect
241 * block being grouped together, and then full-indirect and full-data
242 * blocks (64K) being grouped together.
244 * The single most important aspect of this is the inode grouping
245 * because that is what allows 'find' and 'ls' and other filesystem
246 * topology operations to run fast.
249 if (bref->data_off & ~HAMMER2_OFF_MASK_RADIX)
250 bpref = bref->data_off & ~HAMMER2_OFF_MASK_RADIX;
251 else if (trans->tmp_bpref)
252 bpref = trans->tmp_bpref;
253 else if (trans->tmp_ip)
254 bpref = trans->tmp_ip->chain->bref.data_off;
258 * Heuristic tracking index. We would like one for each distinct
259 * bref type if possible. heur_freemap[] has room for two classes
260 * for each type. At a minimum we have to break-up our heuristic
261 * by device block sizes.
263 hindex = hammer2_devblkradix(radix) - HAMMER2_MINIORADIX;
264 KKASSERT(hindex < HAMMER2_FREEMAP_HEUR_NRADIX);
265 hindex += bref->type * HAMMER2_FREEMAP_HEUR_NRADIX;
266 hindex &= HAMMER2_FREEMAP_HEUR_TYPES * HAMMER2_FREEMAP_HEUR_NRADIX - 1;
267 KKASSERT(hindex < HAMMER2_FREEMAP_HEUR);
269 iter.bpref = hmp->heur_freemap[hindex];
272 * Make sure bpref is in-bounds. It's ok if bpref covers a zone's
273 * reserved area, the try code will iterate past it.
275 if (iter.bpref > hmp->voldata.volu_size)
276 iter.bpref = hmp->voldata.volu_size - 1;
279 * Iterate the freemap looking for free space before and after.
281 parent = &hmp->fchain;
282 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS);
284 iter.bnext = iter.bpref;
287 while (error == EAGAIN) {
288 error = hammer2_freemap_try_alloc(trans, &parent, bref,
291 hmp->heur_freemap[hindex] = iter.bnext;
292 hammer2_chain_unlock(parent);
294 atomic_clear_int(&trans->flags, HAMMER2_TRANS_ISALLOCATING);
295 if (trans->flags & HAMMER2_TRANS_ISFLUSH)
302 hammer2_freemap_try_alloc(hammer2_trans_t *trans, hammer2_chain_t **parentp,
303 hammer2_blockref_t *bref, int radix,
304 hammer2_fiterate_t *iter)
306 hammer2_mount_t *hmp = (*parentp)->hmp;
307 hammer2_off_t l0size;
308 hammer2_off_t l1size;
309 hammer2_off_t l1mask;
310 hammer2_key_t key_dummy;
311 hammer2_chain_t *chain;
316 int cache_index = -1;
320 * Calculate the number of bytes being allocated, the number
321 * of contiguous bits of bitmap being allocated, and the bitmap
324 * WARNING! cpu hardware may mask bits == 64 -> 0 and blow up the
327 bytes = (size_t)1 << radix;
328 class = (bref->type << 8) | hammer2_devblkradix(radix);
331 * Lookup the level1 freemap chain, creating and initializing one
332 * if necessary. Intermediate levels will be created automatically
333 * when necessary by hammer2_chain_create().
335 key = H2FMBASE(iter->bnext, HAMMER2_FREEMAP_LEVEL1_RADIX);
336 l0size = H2FMSHIFT(HAMMER2_FREEMAP_LEVEL0_RADIX);
337 l1size = H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX);
340 chain = hammer2_chain_lookup(parentp, &key_dummy, key, key + l1mask,
342 HAMMER2_LOOKUP_FREEMAP |
343 HAMMER2_LOOKUP_ALWAYS |
344 HAMMER2_LOOKUP_MATCHIND);
347 * Create the missing leaf, be sure to initialize
348 * the auxillary freemap tracking information in
349 * the bref.check.freemap structure.
352 kprintf("freemap create L1 @ %016jx bpref %016jx\n",
355 error = hammer2_chain_create(trans, parentp, &chain,
356 key, HAMMER2_FREEMAP_LEVEL1_RADIX,
357 HAMMER2_BREF_TYPE_FREEMAP_LEAF,
358 HAMMER2_FREEMAP_LEVELN_PSIZE);
360 hammer2_chain_modify(trans, &chain, 0);
361 bzero(&chain->data->bmdata[0],
362 HAMMER2_FREEMAP_LEVELN_PSIZE);
363 chain->bref.check.freemap.bigmask = (uint32_t)-1;
364 chain->bref.check.freemap.avail = l1size;
365 /* bref.methods should already be inherited */
367 hammer2_freemap_init(trans, hmp, key, chain);
369 } else if ((chain->bref.check.freemap.bigmask & (1 << radix)) == 0) {
371 * Already flagged as not having enough space
376 * Modify existing chain to setup for adjustment.
378 hammer2_chain_modify(trans, &chain, 0);
385 hammer2_bmap_data_t *bmap;
386 hammer2_key_t base_key;
391 KKASSERT(chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_LEAF);
392 start = (int)((iter->bnext - key) >>
393 HAMMER2_FREEMAP_LEVEL0_RADIX);
394 KKASSERT(start >= 0 && start < HAMMER2_FREEMAP_COUNT);
395 hammer2_chain_modify(trans, &chain, 0);
398 for (count = 0; count < HAMMER2_FREEMAP_COUNT; ++count) {
399 if (start + count >= HAMMER2_FREEMAP_COUNT &&
404 bmap = &chain->data->bmdata[n];
405 if (n < HAMMER2_FREEMAP_COUNT && bmap->avail &&
406 (bmap->class == 0 || bmap->class == class)) {
407 base_key = key + n * l0size;
408 error = hammer2_bmap_alloc(trans, hmp, bmap,
411 if (error != ENOSPC) {
417 bmap = &chain->data->bmdata[n];
418 if (n >= 0 && bmap->avail &&
419 (bmap->class == 0 || bmap->class == class)) {
420 base_key = key + n * l0size;
421 error = hammer2_bmap_alloc(trans, hmp, bmap,
424 if (error != ENOSPC) {
431 chain->bref.check.freemap.bigmask &= ~(1 << radix);
432 /* XXX also scan down from original count */
437 * Assert validity. Must be beyond the static allocator used
438 * by newfs_hammer2 (and thus also beyond the aux area),
439 * not go past the volume size, and must not be in the
440 * reserved segment area for a zone.
442 KKASSERT(key >= hmp->voldata.allocator_beg &&
443 key + bytes <= hmp->voldata.volu_size);
444 KKASSERT((key & HAMMER2_ZONE_MASK64) >= HAMMER2_ZONE_SEG);
445 bref->data_off = key | radix;
448 kprintf("alloc cp=%p %016jx %016jx using %016jx\n",
450 bref->key, bref->data_off, chain->bref.data_off);
452 } else if (error == ENOSPC) {
454 * Return EAGAIN with next iteration in iter->bnext, or
455 * return ENOSPC if the allocation map has been exhausted.
457 error = hammer2_freemap_iterate(trans, parentp, &chain, iter);
464 hammer2_chain_unlock(chain);
469 * Allocate (1<<radix) bytes from the bmap whos base data offset is (*basep).
471 * If the linear iterator is mid-block we use it directly (the bitmap should
472 * already be marked allocated), otherwise we search for a block in the bitmap
473 * that fits the allocation request.
475 * A partial bitmap allocation sets the minimum bitmap granularity (16KB)
476 * to fully allocated and adjusts the linear allocator to allow the
477 * remaining space to be allocated.
481 hammer2_bmap_alloc(hammer2_trans_t *trans, hammer2_mount_t *hmp,
482 hammer2_bmap_data_t *bmap,
483 uint16_t class, int n, int radix, hammer2_key_t *basep)
496 * Take into account 2-bits per block when calculating bmradix.
498 size = (size_t)1 << radix;
500 if (radix <= HAMMER2_FREEMAP_BLOCK_RADIX) {
502 bsize = HAMMER2_FREEMAP_BLOCK_SIZE;
503 /* (16K) 2 bits per allocation block */
505 bmradix = 2 << (radix - HAMMER2_FREEMAP_BLOCK_RADIX);
507 /* (32K-256K) 4, 8, 16, 32 bits per allocation block */
511 * Use the linear iterator to pack small allocations, otherwise
512 * fall-back to finding a free 16KB chunk. The linear iterator
513 * is only valid when *NOT* on a freemap chunking boundary (16KB).
514 * If it is the bitmap must be scanned. It can become invalid
515 * once we pack to the boundary. We adjust it after a bitmap
516 * allocation only for sub-16KB allocations (so the perfectly good
517 * previous value can still be used for fragments when 16KB+
518 * allocations are made).
520 * Beware of hardware artifacts when bmradix == 32 (intermediate
521 * result can wind up being '1' instead of '0' if hardware masks
524 * NOTE: j needs to be even in the j= calculation. As an artifact
525 * of the /2 division, our bitmask has to clear bit 0.
527 * NOTE: TODO this can leave little unallocatable fragments lying
530 if (((uint32_t)bmap->linear & HAMMER2_FREEMAP_BLOCK_MASK) + size <=
531 HAMMER2_FREEMAP_BLOCK_SIZE &&
532 (bmap->linear & HAMMER2_FREEMAP_BLOCK_MASK) &&
533 bmap->linear < HAMMER2_SEGSIZE) {
534 KKASSERT(bmap->linear >= 0 &&
535 bmap->linear + size <= HAMMER2_SEGSIZE &&
536 (bmap->linear & (HAMMER2_MIN_ALLOC - 1)) == 0);
537 offset = bmap->linear;
538 i = offset / (HAMMER2_SEGSIZE / 8);
539 j = (offset / (HAMMER2_FREEMAP_BLOCK_SIZE / 2)) & 30;
540 bmmask = (bmradix == 32) ?
541 0xFFFFFFFFU : (1 << bmradix) - 1;
543 bmap->linear = offset + size;
545 for (i = 0; i < 8; ++i) {
546 bmmask = (bmradix == 32) ?
547 0xFFFFFFFFU : (1 << bmradix) - 1;
548 for (j = 0; j < 32; j += bmradix) {
549 if ((bmap->bitmap[i] & bmmask) == 0)
554 /*fragments might remain*/
555 /*KKASSERT(bmap->avail == 0);*/
558 offset = i * (HAMMER2_SEGSIZE / 8) +
559 (j * (HAMMER2_FREEMAP_BLOCK_SIZE / 2));
560 if (size & HAMMER2_FREEMAP_BLOCK_MASK)
561 bmap->linear = offset + size;
564 KKASSERT(i >= 0 && i < 8); /* 8 x 16 -> 128 x 16K -> 2MB */
567 * Optimize the buffer cache to avoid unnecessary read-before-write
570 * The device block size could be larger than the allocation size
571 * so the actual bitmap test is somewhat more involved. We have
572 * to use a compatible buffer size for this operation.
574 if ((bmap->bitmap[i] & bmmask) == 0 &&
575 hammer2_devblksize(size) != size) {
576 size_t psize = hammer2_devblksize(size);
577 hammer2_off_t pmask = (hammer2_off_t)psize - 1;
578 int pbmradix = 2 << (hammer2_devblkradix(radix) -
579 HAMMER2_FREEMAP_BLOCK_RADIX);
581 int pradix = hammer2_getradix(psize);
583 pbmmask = (pbmradix == 32) ? 0xFFFFFFFFU : (1 << pbmradix) - 1;
584 while ((pbmmask & bmmask) == 0)
585 pbmmask <<= pbmradix;
588 kprintf("%016jx mask %08x %08x %08x (%zd/%zd)\n",
589 *basep + offset, bmap->bitmap[i],
590 pbmmask, bmmask, size, psize);
593 if ((bmap->bitmap[i] & pbmmask) == 0) {
594 error = hammer2_io_newq(hmp,
595 (*basep + (offset & ~pmask)) |
598 hammer2_io_bqrelse(&dio);
604 * When initializing a new inode segment also attempt to initialize
605 * an adjacent segment. Be careful not to index beyond the array
608 * We do this to try to localize inode accesses to improve
609 * directory scan rates. XXX doesn't improve scan rates.
611 if (size == HAMMER2_INODE_BYTES) {
613 if (bmap[-1].radix == 0 && bmap[-1].avail)
614 bmap[-1].radix = radix;
616 if (bmap[1].radix == 0 && bmap[1].avail)
617 bmap[1].radix = radix;
623 * Adjust the linear iterator, set the radix if necessary (might as
624 * well just set it unconditionally), adjust *basep to return the
625 * allocated data offset.
627 bmap->bitmap[i] |= bmmask;
632 hammer2_voldata_lock(hmp);
633 hmp->voldata.allocator_free -= size; /* XXX */
634 hammer2_voldata_unlock(hmp, 1);
641 hammer2_freemap_init(hammer2_trans_t *trans, hammer2_mount_t *hmp,
642 hammer2_key_t key, hammer2_chain_t *chain)
644 hammer2_off_t l1size;
647 hammer2_bmap_data_t *bmap;
650 l1size = H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX);
653 * Calculate the portion of the 2GB map that should be initialized
654 * as free. Portions below or after will be initialized as allocated.
655 * SEGMASK-align the areas so we don't have to worry about sub-scans
656 * or endianess when using memset.
658 * (1) Ensure that all statically allocated space from newfs_hammer2
659 * is marked allocated.
661 * (2) Ensure that the reserved area is marked allocated (typically
662 * the first 4MB of the 2GB area being represented).
664 * (3) Ensure that any trailing space at the end-of-volume is marked
667 * WARNING! It is possible for lokey to be larger than hikey if the
668 * entire 2GB segment is within the static allocation.
670 lokey = (hmp->voldata.allocator_beg + HAMMER2_SEGMASK64) &
673 if (lokey < H2FMBASE(key, HAMMER2_FREEMAP_LEVEL1_RADIX) +
674 HAMMER2_ZONE_SEG64) {
675 lokey = H2FMBASE(key, HAMMER2_FREEMAP_LEVEL1_RADIX) +
679 hikey = key + H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX);
680 if (hikey > hmp->voldata.volu_size) {
681 hikey = hmp->voldata.volu_size & ~HAMMER2_SEGMASK64;
684 chain->bref.check.freemap.avail =
685 H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX);
686 bmap = &chain->data->bmdata[0];
688 for (count = 0; count < HAMMER2_FREEMAP_COUNT; ++count) {
689 if (key < lokey || key >= hikey) {
690 memset(bmap->bitmap, -1,
691 sizeof(bmap->bitmap));
693 bmap->linear = HAMMER2_SEGSIZE;
694 chain->bref.check.freemap.avail -=
695 H2FMSHIFT(HAMMER2_FREEMAP_LEVEL0_RADIX);
697 bmap->avail = H2FMSHIFT(HAMMER2_FREEMAP_LEVEL0_RADIX);
699 key += H2FMSHIFT(HAMMER2_FREEMAP_LEVEL0_RADIX);
705 * The current Level 1 freemap has been exhausted, iterate to the next
706 * one, return ENOSPC if no freemaps remain.
708 * XXX this should rotate back to the beginning to handle freed-up space
709 * XXX or use intermediate entries to locate free space. TODO
712 hammer2_freemap_iterate(hammer2_trans_t *trans, hammer2_chain_t **parentp,
713 hammer2_chain_t **chainp, hammer2_fiterate_t *iter)
715 hammer2_mount_t *hmp = (*parentp)->hmp;
717 iter->bnext &= ~(H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX) - 1);
718 iter->bnext += H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX);
719 if (iter->bnext >= hmp->voldata.volu_size) {
721 if (++iter->loops == 2)
728 * Free the specified blockref. This code is only able to fully free
729 * blocks when (how) is non-zero, otherwise the block is marked for
730 * the bulk freeing pass to check.
732 * Normal use is to only mark inodes as possibly being free. The underlying
733 * file blocks are not necessarily marked. The bulk freescan can
734 * theoretically handle the case.
736 * XXX currently disabled when how == 0 (the normal real-time case). At
737 * the moment we depend on the bulk freescan to actually free blocks. It
738 * will still call this routine with a non-zero how to stage possible frees
739 * and to do the actual free.
741 * WARNING! When called from a flush we have to use the 'live' sync_tid
742 * and not the flush sync_tid. The live sync_tid is the flush
743 * sync_tid + 1. That is, freemap allocations which occur during
744 * a flush are not part of the flush. Crash-recovery will restore
745 * any lost allocations.
748 hammer2_freemap_adjust(hammer2_trans_t *trans, hammer2_mount_t *hmp,
749 hammer2_blockref_t *bref, int how)
751 hammer2_off_t data_off = bref->data_off;
752 hammer2_chain_t *chain;
753 hammer2_chain_t *parent;
754 hammer2_bmap_data_t *bmap;
756 hammer2_key_t key_dummy;
757 hammer2_off_t l0size;
758 hammer2_off_t l1size;
759 hammer2_off_t l1mask;
761 const uint32_t bmmask00 = 0;
771 int cache_index = -1;
774 radix = (int)data_off & HAMMER2_OFF_MASK_RADIX;
775 data_off &= ~HAMMER2_OFF_MASK_RADIX;
776 KKASSERT(radix <= HAMMER2_MAX_RADIX);
778 bytes = (size_t)1 << radix;
779 class = (bref->type << 8) | hammer2_devblkradix(radix);
782 * We can't adjust thre freemap for data allocations made by
785 if (data_off < hmp->voldata.allocator_beg)
788 KKASSERT((data_off & HAMMER2_ZONE_MASK64) >= HAMMER2_ZONE_SEG);
789 KKASSERT((trans->flags & HAMMER2_TRANS_ISALLOCATING) == 0);
790 atomic_set_int(&trans->flags, HAMMER2_TRANS_ISALLOCATING);
791 if (trans->flags & HAMMER2_TRANS_ISFLUSH)
795 * Lookup the level1 freemap chain. The chain must exist.
797 key = H2FMBASE(data_off, HAMMER2_FREEMAP_LEVEL1_RADIX);
798 l0size = H2FMSHIFT(HAMMER2_FREEMAP_LEVEL0_RADIX);
799 l1size = H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX);
802 parent = &hmp->fchain;
803 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS);
805 chain = hammer2_chain_lookup(&parent, &key_dummy, key, key + l1mask,
807 HAMMER2_LOOKUP_FREEMAP |
808 HAMMER2_LOOKUP_ALWAYS |
809 HAMMER2_LOOKUP_MATCHIND);
812 * Stop early if we are trying to free something but no leaf exists.
814 if (chain == NULL && how != HAMMER2_FREEMAP_DORECOVER) {
815 kprintf("hammer2_freemap_adjust: %016jx: no chain\n",
816 (intmax_t)bref->data_off);
817 hammer2_chain_unlock(parent);
822 * Create any missing leaf(s) if we are doing a recovery (marking
823 * the block(s) as being allocated instead of being freed). Be sure
824 * to initialize the auxillary freemap tracking info in the
825 * bref.check.freemap structure.
827 if (chain == NULL && how == HAMMER2_FREEMAP_DORECOVER) {
828 error = hammer2_chain_create(trans, &parent, &chain,
829 key, HAMMER2_FREEMAP_LEVEL1_RADIX,
830 HAMMER2_BREF_TYPE_FREEMAP_LEAF,
831 HAMMER2_FREEMAP_LEVELN_PSIZE);
832 kprintf("fixup create chain %p %016jx:%d\n", chain, chain->bref.key, chain->bref.keybits);
835 hammer2_chain_modify(trans, &chain, 0);
836 bzero(&chain->data->bmdata[0],
837 HAMMER2_FREEMAP_LEVELN_PSIZE);
838 chain->bref.check.freemap.bigmask = (uint32_t)-1;
839 chain->bref.check.freemap.avail = l1size;
840 /* bref.methods should already be inherited */
842 hammer2_freemap_init(trans, hmp, key, chain);
844 /* XXX handle error */
848 * Calculate the bitmask (runs in 2-bit pairs).
850 start = ((int)(data_off >> HAMMER2_FREEMAP_BLOCK_RADIX) & 15) * 2;
851 bmmask01 = 1 << start;
852 bmmask10 = 2 << start;
853 bmmask11 = 3 << start;
856 * Fixup the bitmap. Partial blocks cannot be fully freed unless
857 * a bulk scan is able to roll them up.
859 if (radix < HAMMER2_FREEMAP_BLOCK_RADIX) {
861 if (how == HAMMER2_FREEMAP_DOREALFREE)
862 how = HAMMER2_FREEMAP_DOMAYFREE;
864 count = 1 << (radix - HAMMER2_FREEMAP_BLOCK_RADIX);
868 * [re]load the bmap and bitmap pointers. Each bmap entry covers
869 * a 2MB swath. The bmap itself (LEVEL1) covers 2GB.
872 bmap = &chain->data->bmdata[(int)(data_off >> HAMMER2_SEGRADIX) &
873 (HAMMER2_FREEMAP_COUNT - 1)];
874 bitmap = &bmap->bitmap[(int)(data_off >> (HAMMER2_SEGRADIX - 3)) & 7];
879 if (how == HAMMER2_FREEMAP_DORECOVER) {
881 * Recovery request, mark as allocated.
883 if ((*bitmap & bmmask11) != bmmask11) {
885 hammer2_chain_modify(trans, &chain, 0);
889 if ((*bitmap & bmmask11) == bmmask00)
890 bmap->avail -= 1 << radix;
891 if (bmap->class == 0)
894 kprintf("hammer2_freemap_recover: fixup "
895 "type=%02x block=%016jx/%zd\n",
896 bref->type, data_off, bytes);
899 kprintf("hammer2_freemap_recover: good "
900 "type=%02x block=%016jx/%zd\n",
901 bref->type, data_off, bytes);
904 } else if ((*bitmap & bmmask11) == bmmask11) {
906 * Mayfree/Realfree request and bitmap is currently
907 * marked as being fully allocated.
910 hammer2_chain_modify(trans, &chain, 0);
914 if (how == HAMMER2_FREEMAP_DOREALFREE)
915 *bitmap &= ~bmmask11;
917 *bitmap = (*bitmap & ~bmmask11) | bmmask10;
918 } else if ((*bitmap & bmmask11) == bmmask10) {
920 * Mayfree/Realfree request and bitmap is currently
921 * marked as being possibly freeable.
923 if (how == HAMMER2_FREEMAP_DOREALFREE) {
925 hammer2_chain_modify(trans, &chain, 0);
929 *bitmap &= ~bmmask11;
933 * 01 - Not implemented, currently illegal state
934 * 00 - Not allocated at all, illegal free.
936 panic("hammer2_freemap_adjust: "
937 "Illegal state %08x(%08x)",
938 *bitmap, *bitmap & bmmask11);
945 if (how == HAMMER2_FREEMAP_DOREALFREE && modified) {
946 bmap->avail += 1 << radix;
947 KKASSERT(bmap->avail <= HAMMER2_SEGSIZE);
948 if (bmap->avail == HAMMER2_SEGSIZE &&
949 bmap->bitmap[0] == 0 &&
950 bmap->bitmap[1] == 0 &&
951 bmap->bitmap[2] == 0 &&
952 bmap->bitmap[3] == 0 &&
953 bmap->bitmap[4] == 0 &&
954 bmap->bitmap[5] == 0 &&
955 bmap->bitmap[6] == 0 &&
956 bmap->bitmap[7] == 0) {
957 key = H2FMBASE(data_off, HAMMER2_FREEMAP_LEVEL0_RADIX);
958 kprintf("Freeseg %016jx\n", (intmax_t)key);
964 * chain->bref.check.freemap.bigmask (XXX)
966 * Setting bigmask is a hint to the allocation code that there might
967 * be something allocatable. We also set this in recovery... it
968 * doesn't hurt and we might want to use the hint for other validation
969 * operations later on.
972 chain->bref.check.freemap.bigmask |= 1 << radix;
974 hammer2_chain_unlock(chain);
975 hammer2_chain_unlock(parent);
977 atomic_clear_int(&trans->flags, HAMMER2_TRANS_ISALLOCATING);
978 if (trans->flags & HAMMER2_TRANS_ISFLUSH)