2 * Copyright (c) 2011-2013 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/fcntl.h>
41 #include <sys/namei.h>
42 #include <sys/mount.h>
43 #include <sys/vnode.h>
44 #include <sys/mountctl.h>
48 struct hammer2_fiterate {
54 typedef struct hammer2_fiterate hammer2_fiterate_t;
56 static int hammer2_freemap_try_alloc(hammer2_trans_t *trans,
57 hammer2_chain_t **parentp, hammer2_blockref_t *bref,
58 int radix, hammer2_fiterate_t *iter);
59 static void hammer2_freemap_init(hammer2_trans_t *trans, hammer2_mount_t *hmp,
60 hammer2_key_t key, hammer2_chain_t *chain);
61 static int hammer2_bmap_alloc(hammer2_trans_t *trans, hammer2_mount_t *hmp,
62 hammer2_bmap_data_t *bmap, uint16_t class,
63 int n, int radix, hammer2_key_t *basep);
64 static int hammer2_freemap_iterate(hammer2_trans_t *trans,
65 hammer2_chain_t **parentp, hammer2_chain_t **chainp,
66 hammer2_fiterate_t *iter);
70 hammer2_freemapradix(int radix)
76 * Calculate the device offset for the specified FREEMAP_NODE or FREEMAP_LEAF
77 * bref. Return a combined media offset and physical size radix. Freemap
78 * chains use fixed storage offsets in the 4MB reserved area at the
79 * beginning of each 2GB zone
81 * Rotate between four possibilities. Theoretically this means we have three
82 * good freemaps in case of a crash which we can use as a base for the fixup
85 #define H2FMBASE(key, radix) ((key) & ~(((hammer2_off_t)1 << (radix)) - 1))
86 #define H2FMSHIFT(radix) ((hammer2_off_t)1 << (radix))
90 hammer2_freemap_reserve(hammer2_trans_t *trans, hammer2_chain_t *chain,
93 hammer2_blockref_t *bref = &chain->bref;
99 * Physical allocation size -> radix. Typically either 256 for
100 * a level 0 freemap leaf or 65536 for a level N freemap node.
102 * NOTE: A 256 byte bitmap represents 256 x 8 x 1024 = 2MB of storage.
103 * Do not use hammer2_allocsize() here as it has a min cap.
108 * Calculate block selection index 0..7 of current block.
110 if ((bref->data_off & ~HAMMER2_OFF_MASK_RADIX) == 0) {
113 off = bref->data_off & ~HAMMER2_OFF_MASK_RADIX &
114 (((hammer2_off_t)1 << HAMMER2_FREEMAP_LEVEL1_RADIX) - 1);
115 off = off / HAMMER2_PBUFSIZE;
116 KKASSERT(off >= HAMMER2_ZONE_FREEMAP_00 &&
117 off < HAMMER2_ZONE_FREEMAP_END);
118 index = (int)(off - HAMMER2_ZONE_FREEMAP_00) / 4;
119 KKASSERT(index >= 0 && index < HAMMER2_ZONE_FREEMAP_COPIES);
123 * Calculate new index (our 'allocation'). We have to be careful
124 * here as there can be two different transaction ids running
125 * concurrently when a flush is in-progress.
127 * We also want to make sure, for algorithmic repeatability, that
128 * the index sequences are monotonic with transaction ids. Some
129 * skipping is allowed as long as we ensure that all four volume
130 * header backups have consistent freemaps.
132 * FLUSH NORMAL FLUSH NORMAL FLUSH NORMAL FLUSH NORMAL
134 * (0->1) (1->3) (3->4) (4->6) (6->7) (7->9) (9->10) (10->12)
136 * [-concurrent-][-concurrent-][-concurrent-][-concurrent-]
138 * (alternative first NORMAL might be 0->2 if flush had not yet
139 * modified the chain, this is the worst case).
141 if ((trans->flags & HAMMER2_TRANS_ISFLUSH) == 0) {
143 * Normal transactions always run with the highest TID.
144 * But if a flush is in-progress we want to reserve a slot
145 * for the flush with a lower TID running concurrently to
146 * do a delete-duplicate.
148 index = (index + 2) % HAMMER2_ZONE_FREEMAP_COPIES;
149 } else if (trans->flags & HAMMER2_TRANS_ISALLOCATING) {
151 * Flush transaction, hammer2_freemap.c itself is doing a
152 * delete-duplicate during an allocation within the freemap.
154 index = (index + 1) % HAMMER2_ZONE_FREEMAP_COPIES;
157 * Flush transaction, hammer2_flush.c is doing a
158 * delete-duplicate on the freemap while flushing
161 index = (index + 1) % HAMMER2_ZONE_FREEMAP_COPIES;
165 * Calculate the block offset of the reserved block. This will
166 * point into the 4MB reserved area at the base of the appropriate
167 * 2GB zone, once added to the FREEMAP_x selection above.
169 switch(bref->keybits) {
170 /* case HAMMER2_FREEMAP_LEVEL5_RADIX: not applicable */
171 case HAMMER2_FREEMAP_LEVEL4_RADIX: /* 2EB */
172 KKASSERT(bref->type == HAMMER2_BREF_TYPE_FREEMAP_NODE);
173 KKASSERT(bytes == HAMMER2_FREEMAP_LEVELN_PSIZE);
174 off = H2FMBASE(bref->key, HAMMER2_FREEMAP_LEVEL4_RADIX) +
175 (index * 4 + HAMMER2_ZONE_FREEMAP_00 +
176 HAMMER2_ZONEFM_LEVEL4) * HAMMER2_PBUFSIZE;
178 case HAMMER2_FREEMAP_LEVEL3_RADIX: /* 2PB */
179 KKASSERT(bref->type == HAMMER2_BREF_TYPE_FREEMAP_NODE);
180 KKASSERT(bytes == HAMMER2_FREEMAP_LEVELN_PSIZE);
181 off = H2FMBASE(bref->key, HAMMER2_FREEMAP_LEVEL3_RADIX) +
182 (index * 4 + HAMMER2_ZONE_FREEMAP_00 +
183 HAMMER2_ZONEFM_LEVEL3) * HAMMER2_PBUFSIZE;
185 case HAMMER2_FREEMAP_LEVEL2_RADIX: /* 2TB */
186 KKASSERT(bref->type == HAMMER2_BREF_TYPE_FREEMAP_NODE);
187 KKASSERT(bytes == HAMMER2_FREEMAP_LEVELN_PSIZE);
188 off = H2FMBASE(bref->key, HAMMER2_FREEMAP_LEVEL2_RADIX) +
189 (index * 4 + HAMMER2_ZONE_FREEMAP_00 +
190 HAMMER2_ZONEFM_LEVEL2) * HAMMER2_PBUFSIZE;
192 case HAMMER2_FREEMAP_LEVEL1_RADIX: /* 2GB */
193 KKASSERT(bref->type == HAMMER2_BREF_TYPE_FREEMAP_LEAF);
194 KKASSERT(bytes == HAMMER2_FREEMAP_LEVELN_PSIZE);
195 off = H2FMBASE(bref->key, HAMMER2_FREEMAP_LEVEL1_RADIX) +
196 (index * 4 + HAMMER2_ZONE_FREEMAP_00 +
197 HAMMER2_ZONEFM_LEVEL1) * HAMMER2_PBUFSIZE;
200 panic("freemap: bad radix(2) %p %d\n", bref, bref->keybits);
202 off = (hammer2_off_t)-1;
205 bref->data_off = off | radix;
207 kprintf("-> %016jx\n", bref->data_off);
213 * Normal freemap allocator
215 * Use available hints to allocate space using the freemap. Create missing
216 * freemap infrastructure on-the-fly as needed (including marking initial
217 * allocations using the iterator as allocated, instantiating new 2GB zones,
218 * and dealing with the end-of-media edge case).
220 * ip and bpref are only used as a heuristic to determine locality of
221 * reference. bref->key may also be used heuristically.
223 * WARNING! When called from a flush we have to use the 'live' sync_tid
224 * and not the flush sync_tid. The live sync_tid is the flush
225 * sync_tid + 1. That is, freemap allocations which occur during
226 * a flush are not part of the flush. Crash-recovery will restore
227 * any lost allocations.
230 hammer2_freemap_alloc(hammer2_trans_t *trans, hammer2_chain_t *chain,
233 hammer2_mount_t *hmp = chain->hmp;
234 hammer2_blockref_t *bref = &chain->bref;
235 hammer2_chain_t *parent;
239 hammer2_fiterate_t iter;
242 * Validate the allocation size. It must be a power of 2.
244 * For now require that the caller be aware of the minimum
247 radix = hammer2_getradix(bytes);
248 KKASSERT((size_t)1 << radix == bytes);
251 * Freemap blocks themselves are simply assigned from the reserve
252 * area, not allocated from the freemap.
254 if (bref->type == HAMMER2_BREF_TYPE_FREEMAP_NODE ||
255 bref->type == HAMMER2_BREF_TYPE_FREEMAP_LEAF) {
256 return (hammer2_freemap_reserve(trans, chain, radix));
260 * Mark previously allocated block as possibly freeable. There might
261 * be snapshots and other races so we can't just mark it fully free.
262 * (XXX optimize this for the current-transaction create+delete case)
264 if (bref->data_off & ~HAMMER2_OFF_MASK_RADIX) {
265 hammer2_freemap_adjust(trans, hmp, bref,
266 HAMMER2_FREEMAP_DOMAYFREE);
270 * Setting ISALLOCATING ensures correct operation even when the
271 * flusher itself is making allocations.
273 KKASSERT(bytes >= HAMMER2_MIN_ALLOC && bytes <= HAMMER2_MAX_ALLOC);
274 KKASSERT((trans->flags & HAMMER2_TRANS_ISALLOCATING) == 0);
275 atomic_set_int(&trans->flags, HAMMER2_TRANS_ISALLOCATING);
276 if (trans->flags & HAMMER2_TRANS_ISFLUSH)
280 * Calculate the starting point for our allocation search.
282 * Each freemap leaf is dedicated to a specific freemap_radix.
283 * The freemap_radix can be more fine-grained than the device buffer
284 * radix which results in inodes being grouped together in their
285 * own segment, terminal-data (16K or less) and initial indirect
286 * block being grouped together, and then full-indirect and full-data
287 * blocks (64K) being grouped together.
289 * The single most important aspect of this is the inode grouping
290 * because that is what allows 'find' and 'ls' and other filesystem
291 * topology operations to run fast.
294 if (bref->data_off & ~HAMMER2_OFF_MASK_RADIX)
295 bpref = bref->data_off & ~HAMMER2_OFF_MASK_RADIX;
296 else if (trans->tmp_bpref)
297 bpref = trans->tmp_bpref;
298 else if (trans->tmp_ip)
299 bpref = trans->tmp_ip->chain->bref.data_off;
303 * Heuristic tracking index. We would like one for each distinct
304 * bref type if possible. heur_freemap[] has room for two classes
305 * for each type. At a minimum we have to break-up our heuristic
306 * by device block sizes.
308 hindex = hammer2_devblkradix(radix) - HAMMER2_MINIORADIX;
309 KKASSERT(hindex < HAMMER2_FREEMAP_HEUR_NRADIX);
310 hindex += bref->type * HAMMER2_FREEMAP_HEUR_NRADIX;
311 hindex &= HAMMER2_FREEMAP_HEUR_TYPES * HAMMER2_FREEMAP_HEUR_NRADIX - 1;
312 KKASSERT(hindex < HAMMER2_FREEMAP_HEUR);
314 iter.bpref = hmp->heur_freemap[hindex];
317 * Make sure bpref is in-bounds. It's ok if bpref covers a zone's
318 * reserved area, the try code will iterate past it.
320 if (iter.bpref > hmp->voldata.volu_size)
321 iter.bpref = hmp->voldata.volu_size - 1;
324 * Iterate the freemap looking for free space before and after.
326 parent = &hmp->fchain;
327 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS);
329 iter.bnext = iter.bpref;
332 while (error == EAGAIN) {
333 error = hammer2_freemap_try_alloc(trans, &parent, bref,
336 hmp->heur_freemap[hindex] = iter.bnext;
337 hammer2_chain_unlock(parent);
339 atomic_clear_int(&trans->flags, HAMMER2_TRANS_ISALLOCATING);
340 if (trans->flags & HAMMER2_TRANS_ISFLUSH)
347 hammer2_freemap_try_alloc(hammer2_trans_t *trans, hammer2_chain_t **parentp,
348 hammer2_blockref_t *bref, int radix,
349 hammer2_fiterate_t *iter)
351 hammer2_mount_t *hmp = (*parentp)->hmp;
352 hammer2_off_t l0size;
353 hammer2_off_t l1size;
354 hammer2_off_t l1mask;
355 hammer2_key_t key_dummy;
356 hammer2_chain_t *chain;
361 int cache_index = -1;
365 * Calculate the number of bytes being allocated, the number
366 * of contiguous bits of bitmap being allocated, and the bitmap
369 * WARNING! cpu hardware may mask bits == 64 -> 0 and blow up the
372 bytes = (size_t)1 << radix;
373 class = (bref->type << 8) | hammer2_devblkradix(radix);
376 * Lookup the level1 freemap chain, creating and initializing one
377 * if necessary. Intermediate levels will be created automatically
378 * when necessary by hammer2_chain_create().
380 key = H2FMBASE(iter->bnext, HAMMER2_FREEMAP_LEVEL1_RADIX);
381 l0size = H2FMSHIFT(HAMMER2_FREEMAP_LEVEL0_RADIX);
382 l1size = H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX);
385 chain = hammer2_chain_lookup(parentp, &key_dummy, key, key + l1mask,
387 HAMMER2_LOOKUP_ALWAYS |
388 HAMMER2_LOOKUP_MATCHIND);
392 * Create the missing leaf, be sure to initialize
393 * the auxillary freemap tracking information in
394 * the bref.check.freemap structure.
397 kprintf("freemap create L1 @ %016jx bpref %016jx\n",
400 error = hammer2_chain_create(trans, parentp, &chain,
401 key, HAMMER2_FREEMAP_LEVEL1_RADIX,
402 HAMMER2_BREF_TYPE_FREEMAP_LEAF,
403 HAMMER2_FREEMAP_LEVELN_PSIZE);
404 KKASSERT(error == 0);
406 hammer2_chain_modify(trans, &chain, 0);
407 bzero(&chain->data->bmdata[0],
408 HAMMER2_FREEMAP_LEVELN_PSIZE);
409 chain->bref.check.freemap.bigmask = (uint32_t)-1;
410 chain->bref.check.freemap.avail = l1size;
411 /* bref.methods should already be inherited */
413 hammer2_freemap_init(trans, hmp, key, chain);
415 } else if ((chain->bref.check.freemap.bigmask & (1 << radix)) == 0) {
417 * Already flagged as not having enough space
422 * Modify existing chain to setup for adjustment.
424 hammer2_chain_modify(trans, &chain, 0);
431 hammer2_bmap_data_t *bmap;
432 hammer2_key_t base_key;
437 KKASSERT(chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_LEAF);
438 start = (int)((iter->bnext - key) >>
439 HAMMER2_FREEMAP_LEVEL0_RADIX);
440 KKASSERT(start >= 0 && start < HAMMER2_FREEMAP_COUNT);
441 hammer2_chain_modify(trans, &chain, 0);
444 for (count = 0; count < HAMMER2_FREEMAP_COUNT; ++count) {
445 if (start + count >= HAMMER2_FREEMAP_COUNT &&
450 bmap = &chain->data->bmdata[n];
451 if (n < HAMMER2_FREEMAP_COUNT && bmap->avail &&
452 (bmap->class == 0 || bmap->class == class)) {
453 base_key = key + n * l0size;
454 error = hammer2_bmap_alloc(trans, hmp, bmap,
457 if (error != ENOSPC) {
463 bmap = &chain->data->bmdata[n];
464 if (n >= 0 && bmap->avail &&
465 (bmap->class == 0 || bmap->class == class)) {
466 base_key = key + n * l0size;
467 error = hammer2_bmap_alloc(trans, hmp, bmap,
470 if (error != ENOSPC) {
477 chain->bref.check.freemap.bigmask &= ~(1 << radix);
478 /* XXX also scan down from original count */
483 * Assert validity. Must be beyond the static allocator used
484 * by newfs_hammer2 (and thus also beyond the aux area),
485 * not go past the volume size, and must not be in the
486 * reserved segment area for a zone.
488 KKASSERT(key >= hmp->voldata.allocator_beg &&
489 key + bytes <= hmp->voldata.volu_size);
490 KKASSERT((key & HAMMER2_ZONE_MASK64) >= HAMMER2_ZONE_SEG);
491 bref->data_off = key | radix;
494 kprintf("alloc cp=%p %016jx %016jx using %016jx\n",
496 bref->key, bref->data_off, chain->bref.data_off);
498 } else if (error == ENOSPC) {
500 * Return EAGAIN with next iteration in iter->bnext, or
501 * return ENOSPC if the allocation map has been exhausted.
503 error = hammer2_freemap_iterate(trans, parentp, &chain, iter);
510 hammer2_chain_unlock(chain);
515 * Allocate (1<<radix) bytes from the bmap whos base data offset is (*basep).
517 * If the linear iterator is mid-block we use it directly (the bitmap should
518 * already be marked allocated), otherwise we search for a block in the bitmap
519 * that fits the allocation request.
521 * A partial bitmap allocation sets the minimum bitmap granularity (16KB)
522 * to fully allocated and adjusts the linear allocator to allow the
523 * remaining space to be allocated.
527 hammer2_bmap_alloc(hammer2_trans_t *trans, hammer2_mount_t *hmp,
528 hammer2_bmap_data_t *bmap,
529 uint16_t class, int n, int radix, hammer2_key_t *basep)
542 * Take into account 2-bits per block when calculating bmradix.
544 size = (size_t)1 << radix;
546 if (radix <= HAMMER2_FREEMAP_BLOCK_RADIX) {
548 bsize = HAMMER2_FREEMAP_BLOCK_SIZE;
549 /* (16K) 2 bits per allocation block */
551 bmradix = 2 << (radix - HAMMER2_FREEMAP_BLOCK_RADIX);
553 /* (32K-256K) 4, 8, 16, 32 bits per allocation block */
557 * Use the linear iterator to pack small allocations, otherwise
558 * fall-back to finding a free 16KB chunk. The linear iterator
559 * is only valid when *NOT* on a freemap chunking boundary (16KB).
560 * If it is the bitmap must be scanned. It can become invalid
561 * once we pack to the boundary. We adjust it after a bitmap
562 * allocation only for sub-16KB allocations (so the perfectly good
563 * previous value can still be used for fragments when 16KB+
564 * allocations are made).
566 * Beware of hardware artifacts when bmradix == 32 (intermediate
567 * result can wind up being '1' instead of '0' if hardware masks
570 * NOTE: j needs to be even in the j= calculation. As an artifact
571 * of the /2 division, our bitmask has to clear bit 0.
573 * NOTE: TODO this can leave little unallocatable fragments lying
576 if (((uint32_t)bmap->linear & HAMMER2_FREEMAP_BLOCK_MASK) + size <=
577 HAMMER2_FREEMAP_BLOCK_SIZE &&
578 (bmap->linear & HAMMER2_FREEMAP_BLOCK_MASK) &&
579 bmap->linear < HAMMER2_SEGSIZE) {
580 KKASSERT(bmap->linear >= 0 &&
581 bmap->linear + size <= HAMMER2_SEGSIZE &&
582 (bmap->linear & (HAMMER2_MIN_ALLOC - 1)) == 0);
583 offset = bmap->linear;
584 i = offset / (HAMMER2_SEGSIZE / 8);
585 j = (offset / (HAMMER2_FREEMAP_BLOCK_SIZE / 2)) & 30;
586 bmmask = (bmradix == 32) ?
587 0xFFFFFFFFU : (1 << bmradix) - 1;
589 bmap->linear = offset + size;
591 for (i = 0; i < 8; ++i) {
592 bmmask = (bmradix == 32) ?
593 0xFFFFFFFFU : (1 << bmradix) - 1;
594 for (j = 0; j < 32; j += bmradix) {
595 if ((bmap->bitmap[i] & bmmask) == 0)
600 /*fragments might remain*/
601 /*KKASSERT(bmap->avail == 0);*/
604 offset = i * (HAMMER2_SEGSIZE / 8) +
605 (j * (HAMMER2_FREEMAP_BLOCK_SIZE / 2));
606 if (size & HAMMER2_FREEMAP_BLOCK_MASK)
607 bmap->linear = offset + size;
610 KKASSERT(i >= 0 && i < 8); /* 8 x 16 -> 128 x 16K -> 2MB */
613 * Optimize the buffer cache to avoid unnecessary read-before-write
616 * The device block size could be larger than the allocation size
617 * so the actual bitmap test is somewhat more involved. We have
618 * to use a compatible buffer size for this operation.
620 if ((bmap->bitmap[i] & bmmask) == 0 &&
621 hammer2_devblksize(size) != size) {
622 size_t psize = hammer2_devblksize(size);
623 hammer2_off_t pmask = (hammer2_off_t)psize - 1;
624 int pbmradix = 2 << (hammer2_devblkradix(radix) -
625 HAMMER2_FREEMAP_BLOCK_RADIX);
627 int pradix = hammer2_getradix(psize);
629 pbmmask = (pbmradix == 32) ? 0xFFFFFFFFU : (1 << pbmradix) - 1;
630 while ((pbmmask & bmmask) == 0)
631 pbmmask <<= pbmradix;
634 kprintf("%016jx mask %08x %08x %08x (%zd/%zd)\n",
635 *basep + offset, bmap->bitmap[i],
636 pbmmask, bmmask, size, psize);
639 if ((bmap->bitmap[i] & pbmmask) == 0) {
640 error = hammer2_io_newq(hmp,
641 (*basep + (offset & ~pmask)) |
644 hammer2_io_bqrelse(&dio);
650 * When initializing a new inode segment also attempt to initialize
651 * an adjacent segment. Be careful not to index beyond the array
654 * We do this to try to localize inode accesses to improve
655 * directory scan rates. XXX doesn't improve scan rates.
657 if (size == HAMMER2_INODE_BYTES) {
659 if (bmap[-1].radix == 0 && bmap[-1].avail)
660 bmap[-1].radix = radix;
662 if (bmap[1].radix == 0 && bmap[1].avail)
663 bmap[1].radix = radix;
669 * Adjust the linear iterator, set the radix if necessary (might as
670 * well just set it unconditionally), adjust *basep to return the
671 * allocated data offset.
673 bmap->bitmap[i] |= bmmask;
678 hammer2_voldata_lock(hmp);
679 hmp->voldata.allocator_free -= size; /* XXX */
680 hammer2_voldata_unlock(hmp, 1);
687 hammer2_freemap_init(hammer2_trans_t *trans, hammer2_mount_t *hmp,
688 hammer2_key_t key, hammer2_chain_t *chain)
690 hammer2_off_t l1size;
693 hammer2_bmap_data_t *bmap;
696 l1size = H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX);
699 * Calculate the portion of the 2GB map that should be initialized
700 * as free. Portions below or after will be initialized as allocated.
701 * SEGMASK-align the areas so we don't have to worry about sub-scans
702 * or endianess when using memset.
704 * (1) Ensure that all statically allocated space from newfs_hammer2
705 * is marked allocated.
707 * (2) Ensure that the reserved area is marked allocated (typically
708 * the first 4MB of the 2GB area being represented).
710 * (3) Ensure that any trailing space at the end-of-volume is marked
713 * WARNING! It is possible for lokey to be larger than hikey if the
714 * entire 2GB segment is within the static allocation.
716 lokey = (hmp->voldata.allocator_beg + HAMMER2_SEGMASK64) &
719 if (lokey < H2FMBASE(key, HAMMER2_FREEMAP_LEVEL1_RADIX) +
720 HAMMER2_ZONE_SEG64) {
721 lokey = H2FMBASE(key, HAMMER2_FREEMAP_LEVEL1_RADIX) +
725 hikey = key + H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX);
726 if (hikey > hmp->voldata.volu_size) {
727 hikey = hmp->voldata.volu_size & ~HAMMER2_SEGMASK64;
730 chain->bref.check.freemap.avail =
731 H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX);
732 bmap = &chain->data->bmdata[0];
734 for (count = 0; count < HAMMER2_FREEMAP_COUNT; ++count) {
735 if (key < lokey || key >= hikey) {
736 memset(bmap->bitmap, -1,
737 sizeof(bmap->bitmap));
739 bmap->linear = HAMMER2_SEGSIZE;
740 chain->bref.check.freemap.avail -=
741 H2FMSHIFT(HAMMER2_FREEMAP_LEVEL0_RADIX);
743 bmap->avail = H2FMSHIFT(HAMMER2_FREEMAP_LEVEL0_RADIX);
745 key += H2FMSHIFT(HAMMER2_FREEMAP_LEVEL0_RADIX);
751 * The current Level 1 freemap has been exhausted, iterate to the next
752 * one, return ENOSPC if no freemaps remain.
754 * XXX this should rotate back to the beginning to handle freed-up space
755 * XXX or use intermediate entries to locate free space. TODO
758 hammer2_freemap_iterate(hammer2_trans_t *trans, hammer2_chain_t **parentp,
759 hammer2_chain_t **chainp, hammer2_fiterate_t *iter)
761 hammer2_mount_t *hmp = (*parentp)->hmp;
763 iter->bnext &= ~(H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX) - 1);
764 iter->bnext += H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX);
765 if (iter->bnext >= hmp->voldata.volu_size) {
767 if (++iter->loops == 2)
774 * Free the specified blockref. This code is only able to fully free
775 * blocks when (how) is non-zero, otherwise the block is marked for
776 * the bulk freeing pass to check.
778 * Normal use is to only mark inodes as possibly being free. The underlying
779 * file blocks are not necessarily marked. The bulk freescan can
780 * theoretically handle the case.
782 * XXX currently disabled when how == 0 (the normal real-time case). At
783 * the moment we depend on the bulk freescan to actually free blocks. It
784 * will still call this routine with a non-zero how to stage possible frees
785 * and to do the actual free.
787 * WARNING! When called from a flush we have to use the 'live' sync_tid
788 * and not the flush sync_tid. The live sync_tid is the flush
789 * sync_tid + 1. That is, freemap allocations which occur during
790 * a flush are not part of the flush. Crash-recovery will restore
791 * any lost allocations.
794 hammer2_freemap_adjust(hammer2_trans_t *trans, hammer2_mount_t *hmp,
795 hammer2_blockref_t *bref, int how)
797 hammer2_off_t data_off = bref->data_off;
798 hammer2_chain_t *chain;
799 hammer2_chain_t *parent;
800 hammer2_bmap_data_t *bmap;
802 hammer2_key_t key_dummy;
803 hammer2_off_t l0size;
804 hammer2_off_t l1size;
805 hammer2_off_t l1mask;
807 const uint32_t bmmask00 = 0;
817 int cache_index = -1;
820 radix = (int)data_off & HAMMER2_OFF_MASK_RADIX;
821 data_off &= ~HAMMER2_OFF_MASK_RADIX;
822 KKASSERT(radix <= HAMMER2_MAX_RADIX);
824 bytes = (size_t)1 << radix;
825 class = (bref->type << 8) | hammer2_devblkradix(radix);
828 * We can't adjust thre freemap for data allocations made by
831 if (data_off < hmp->voldata.allocator_beg)
834 KKASSERT((data_off & HAMMER2_ZONE_MASK64) >= HAMMER2_ZONE_SEG);
835 KKASSERT((trans->flags & HAMMER2_TRANS_ISALLOCATING) == 0);
836 atomic_set_int(&trans->flags, HAMMER2_TRANS_ISALLOCATING);
837 if (trans->flags & HAMMER2_TRANS_ISFLUSH)
841 * Lookup the level1 freemap chain. The chain must exist.
843 key = H2FMBASE(data_off, HAMMER2_FREEMAP_LEVEL1_RADIX);
844 l0size = H2FMSHIFT(HAMMER2_FREEMAP_LEVEL0_RADIX);
845 l1size = H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX);
848 parent = &hmp->fchain;
849 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS);
851 chain = hammer2_chain_lookup(&parent, &key_dummy, key, key + l1mask,
853 HAMMER2_LOOKUP_ALWAYS |
854 HAMMER2_LOOKUP_MATCHIND);
857 * Stop early if we are trying to free something but no leaf exists.
859 if (chain == NULL && how != HAMMER2_FREEMAP_DORECOVER) {
860 kprintf("hammer2_freemap_adjust: %016jx: no chain\n",
861 (intmax_t)bref->data_off);
866 * Create any missing leaf(s) if we are doing a recovery (marking
867 * the block(s) as being allocated instead of being freed). Be sure
868 * to initialize the auxillary freemap tracking info in the
869 * bref.check.freemap structure.
871 if (chain == NULL && how == HAMMER2_FREEMAP_DORECOVER) {
872 error = hammer2_chain_create(trans, &parent, &chain,
873 key, HAMMER2_FREEMAP_LEVEL1_RADIX,
874 HAMMER2_BREF_TYPE_FREEMAP_LEAF,
875 HAMMER2_FREEMAP_LEVELN_PSIZE);
877 if (hammer2_debug & 0x0040) {
878 kprintf("fixup create chain %p %016jx:%d\n",
879 chain, chain->bref.key, chain->bref.keybits);
883 hammer2_chain_modify(trans, &chain, 0);
884 bzero(&chain->data->bmdata[0],
885 HAMMER2_FREEMAP_LEVELN_PSIZE);
886 chain->bref.check.freemap.bigmask = (uint32_t)-1;
887 chain->bref.check.freemap.avail = l1size;
888 /* bref.methods should already be inherited */
890 hammer2_freemap_init(trans, hmp, key, chain);
892 /* XXX handle error */
896 * Calculate the bitmask (runs in 2-bit pairs).
898 start = ((int)(data_off >> HAMMER2_FREEMAP_BLOCK_RADIX) & 15) * 2;
899 bmmask01 = 1 << start;
900 bmmask10 = 2 << start;
901 bmmask11 = 3 << start;
904 * Fixup the bitmap. Partial blocks cannot be fully freed unless
905 * a bulk scan is able to roll them up.
907 if (radix < HAMMER2_FREEMAP_BLOCK_RADIX) {
909 if (how == HAMMER2_FREEMAP_DOREALFREE)
910 how = HAMMER2_FREEMAP_DOMAYFREE;
912 count = 1 << (radix - HAMMER2_FREEMAP_BLOCK_RADIX);
916 * [re]load the bmap and bitmap pointers. Each bmap entry covers
917 * a 2MB swath. The bmap itself (LEVEL1) covers 2GB.
920 bmap = &chain->data->bmdata[(int)(data_off >> HAMMER2_SEGRADIX) &
921 (HAMMER2_FREEMAP_COUNT - 1)];
922 bitmap = &bmap->bitmap[(int)(data_off >> (HAMMER2_SEGRADIX - 3)) & 7];
927 if (how == HAMMER2_FREEMAP_DORECOVER) {
929 * Recovery request, mark as allocated.
931 if ((*bitmap & bmmask11) != bmmask11) {
933 hammer2_chain_modify(trans, &chain, 0);
937 if ((*bitmap & bmmask11) == bmmask00)
938 bmap->avail -= 1 << radix;
939 if (bmap->class == 0)
942 if (hammer2_debug & 0x0040) {
943 kprintf("hammer2_freemap_recover: "
945 "block=%016jx/%zd\n",
946 bref->type, data_off, bytes);
950 kprintf("hammer2_freemap_recover: good "
951 "type=%02x block=%016jx/%zd\n",
952 bref->type, data_off, bytes);
955 } else if ((*bitmap & bmmask11) == bmmask11) {
957 * Mayfree/Realfree request and bitmap is currently
958 * marked as being fully allocated.
961 hammer2_chain_modify(trans, &chain, 0);
965 if (how == HAMMER2_FREEMAP_DOREALFREE)
966 *bitmap &= ~bmmask11;
968 *bitmap = (*bitmap & ~bmmask11) | bmmask10;
969 } else if ((*bitmap & bmmask11) == bmmask10) {
971 * Mayfree/Realfree request and bitmap is currently
972 * marked as being possibly freeable.
974 if (how == HAMMER2_FREEMAP_DOREALFREE) {
976 hammer2_chain_modify(trans, &chain, 0);
980 *bitmap &= ~bmmask11;
984 * 01 - Not implemented, currently illegal state
985 * 00 - Not allocated at all, illegal free.
987 panic("hammer2_freemap_adjust: "
988 "Illegal state %08x(%08x)",
989 *bitmap, *bitmap & bmmask11);
996 if (how == HAMMER2_FREEMAP_DOREALFREE && modified) {
997 bmap->avail += 1 << radix;
998 KKASSERT(bmap->avail <= HAMMER2_SEGSIZE);
999 if (bmap->avail == HAMMER2_SEGSIZE &&
1000 bmap->bitmap[0] == 0 &&
1001 bmap->bitmap[1] == 0 &&
1002 bmap->bitmap[2] == 0 &&
1003 bmap->bitmap[3] == 0 &&
1004 bmap->bitmap[4] == 0 &&
1005 bmap->bitmap[5] == 0 &&
1006 bmap->bitmap[6] == 0 &&
1007 bmap->bitmap[7] == 0) {
1008 key = H2FMBASE(data_off, HAMMER2_FREEMAP_LEVEL0_RADIX);
1009 kprintf("Freeseg %016jx\n", (intmax_t)key);
1015 * chain->bref.check.freemap.bigmask (XXX)
1017 * Setting bigmask is a hint to the allocation code that there might
1018 * be something allocatable. We also set this in recovery... it
1019 * doesn't hurt and we might want to use the hint for other validation
1020 * operations later on.
1023 chain->bref.check.freemap.bigmask |= 1 << radix;
1025 hammer2_chain_unlock(chain);
1027 hammer2_chain_unlock(parent);
1028 atomic_clear_int(&trans->flags, HAMMER2_TRANS_ISALLOCATING);
1029 if (trans->flags & HAMMER2_TRANS_ISFLUSH)