4 * Bitmap allocator/deallocator, using a radix tree with hinting.
5 * Unlimited-size allocations, power-of-2 only, power-of-2 aligned results
6 * only. This module has been separated from the generic kernel module and
7 * written specifically for embedding in HAMMER storage structures.
9 * Copyright (c) 2007 The DragonFly Project. All rights reserved.
11 * This code is derived from software contributed to The DragonFly Project
12 * by Matthew Dillon <dillon@backplane.com>
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in
22 * the documentation and/or other materials provided with the
24 * 3. Neither the name of The DragonFly Project nor the names of its
25 * contributors may be used to endorse or promote products derived
26 * from this software without specific, prior written permission.
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
31 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
32 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
33 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
34 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
35 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
36 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
37 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
38 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
41 * $DragonFly: src/sys/vfs/hammer/Attic/hammer_alist.c,v 1.1 2007/10/10 19:37:25 dillon Exp $
44 * This module implements a generic allocator through the use of a hinted
45 * radix tree. All allocations must be in powers of 2 and will return
46 * similarly aligned results. The radix tree typically recurses within
47 * a memory buffer and then continues its recursion by chaining to other
48 * memory buffers, creating a seemless whole capable of managing any amount
51 * The radix tree is layed out recursively using a linear array. Each meta
52 * node is immediately followed (layed out sequentially in memory) by
53 * HAMMER_ALIST_META_RADIX lower level nodes. This is a recursive structure
54 * but one that can be easily scanned through a very simple 'skip'
57 * The radix tree supports an early-termination optimization which
58 * effectively allows us to efficiently mix large and small allocations
59 * with a single abstraction. The address space can be partitioned
60 * arbitrarily without adding much in the way of additional meta-storage
63 * This code can be compiled stand-alone for debugging.
68 #include <sys/param.h>
69 #include <sys/systm.h>
71 #include <sys/kernel.h>
72 #include <sys/malloc.h>
74 #include <vm/vm_object.h>
75 #include <vm/vm_kern.h>
76 #include <vm/vm_extern.h>
77 #include <vm/vm_page.h>
83 #ifndef ALIST_NO_DEBUG
87 #include <sys/types.h>
94 #define kmalloc(a,b,c) malloc(a)
95 #define kfree(a,b) free(a)
96 #define kprintf printf
97 #define KKASSERT(exp) assert(exp)
100 #include "hammerfs.h"
102 void panic(const char *ctl, ...);
107 * static support functions
110 static int32_t hammer_alst_leaf_alloc_fwd(hammer_almeta_t *scan,
111 int32_t blk, int count);
112 static int32_t hammer_alst_meta_alloc_fwd(hammer_almeta_t *scan,
113 int32_t blk, int32_t count,
114 int32_t radix, int skip);
115 static int32_t hammer_alst_leaf_alloc_rev(hammer_almeta_t *scan,
116 int32_t blk, int count);
117 static int32_t hammer_alst_meta_alloc_rev(hammer_almeta_t *scan,
118 int32_t blk, int32_t count,
119 int32_t radix, int skip);
120 static void hammer_alst_leaf_free(hammer_almeta_t *scan,
121 int32_t relblk, int count);
122 static void hammer_alst_meta_free(hammer_almeta_t *scan,
123 int32_t freeBlk, int32_t count,
124 int32_t radix, int skip, int32_t blk);
125 static int32_t hammer_alst_radix_init(hammer_almeta_t *scan,
126 int32_t radix, int skip, int32_t count);
128 static void hammer_alst_radix_print(hammer_almeta_t *scan,
130 int32_t radix, int skip, int tab);
134 * Initialize an alist for use. The alist will initially be marked
135 * all-allocated so the caller must free the portion it wishes to manage.
138 hammer_alist_template(hammer_alist_t bl, int blocks, int maxmeta)
144 * Calculate radix and skip field used for scanning.
146 radix = HAMMER_ALIST_BMAP_RADIX;
148 while (radix < blocks) {
149 radix *= HAMMER_ALIST_META_RADIX;
150 skip = (skip + 1) * HAMMER_ALIST_META_RADIX;
153 bzero(bl, sizeof(*bl));
155 bl->bl_blocks = blocks;
156 bl->bl_radix = radix;
158 bl->bl_rootblks = 1 +
159 hammer_alst_radix_init(NULL, bl->bl_radix, bl->bl_skip, blocks);
160 KKASSERT(bl->bl_rootblks <= maxmeta);
162 #if defined(ALIST_DEBUG)
164 "ALIST representing %d blocks (%d MB of swap)"
165 ", requiring %dK (%d bytes) of ram\n",
167 bl->bl_blocks * 4 / 1024,
168 (bl->bl_rootblks * sizeof(hammer_almeta_t) + 1023) / 1024,
169 (bl->bl_rootblks * sizeof(hammer_almeta_t))
171 kprintf("ALIST raw radix tree contains %d records\n", bl->bl_rootblks);
176 hammer_alist_init(hammer_alist_t bl, hammer_almeta_t *meta)
178 hammer_alst_radix_init(meta, bl->bl_radix, bl->bl_skip, bl->bl_blocks);
181 #if !defined(_KERNEL) && defined(ALIST_DEBUG)
184 * hammer_alist_create() (userland only)
186 * create a alist capable of handling up to the specified number of
187 * blocks. blocks must be greater then 0
189 * The smallest alist consists of a single leaf node capable of
190 * managing HAMMER_ALIST_BMAP_RADIX blocks.
194 hammer_alist_create(int32_t blocks, struct malloc_type *mtype,
195 hammer_almeta_t **metap)
198 hammer_almeta_t *meta;
205 * Calculate radix and skip field used for scanning.
207 radix = HAMMER_ALIST_BMAP_RADIX;
209 while (radix < blocks) {
210 radix *= HAMMER_ALIST_META_RADIX;
211 skip = (skip + 1) * HAMMER_ALIST_META_RADIX;
214 rootblks = 1 + hammer_alst_radix_init(NULL, radix, skip, blocks);
215 metasize = sizeof(struct hammer_almeta) * rootblks;
216 bl = kmalloc(sizeof(struct hammer_alist), mtype, M_WAITOK);
217 meta = kmalloc(metasize, mtype, M_WAITOK);
219 bzero(bl, sizeof(*bl));
220 bzero(meta, metasize);
222 bl->bl_blocks = blocks;
223 bl->bl_radix = radix;
225 bl->bl_rootblks = rootblks;
227 #if defined(ALIST_DEBUG)
229 "ALIST representing %d blocks (%d MB of swap)"
230 ", requiring %dK (%d bytes) of ram\n",
232 bl->bl_blocks * 4 / 1024,
233 (bl->bl_rootblks * sizeof(hammer_almeta_t) + 1023) / 1024,
234 (bl->bl_rootblks * sizeof(hammer_almeta_t))
236 kprintf("ALIST raw radix tree contains %d records\n", bl->bl_rootblks);
238 hammer_alst_radix_init(meta, bl->bl_radix, bl->bl_skip, blocks);
245 hammer_alist_destroy(hammer_alist_t bl, struct malloc_type *mtype)
253 * hammer_alist_alloc()
255 * Reserve space in the block bitmap. Return the base of a contiguous
256 * region or HAMMER_ALIST_BLOCK_NONE if space could not be allocated.
260 hammer_alist_alloc(hammer_alist_t bl, hammer_almeta_t *meta, int32_t count)
262 int32_t blk = HAMMER_ALIST_BLOCK_NONE;
264 KKASSERT((count | (count - 1)) == (count << 1) - 1);
266 if (bl && count < bl->bl_radix) {
267 if (bl->bl_radix == HAMMER_ALIST_BMAP_RADIX) {
268 blk = hammer_alst_leaf_alloc_fwd(meta, 0, count);
270 blk = hammer_alst_meta_alloc_fwd(
271 meta, 0, count, bl->bl_radix, bl->bl_skip);
273 if (blk != HAMMER_ALIST_BLOCK_NONE)
274 bl->bl_free -= count;
280 hammer_alist_alloc_rev(hammer_alist_t bl, hammer_almeta_t *meta, int32_t count)
282 int32_t blk = HAMMER_ALIST_BLOCK_NONE;
284 KKASSERT((count | (count - 1)) == (count << 1) - 1);
286 if (bl && count < bl->bl_radix) {
287 if (bl->bl_radix == HAMMER_ALIST_BMAP_RADIX) {
288 blk = hammer_alst_leaf_alloc_rev(meta, 0, count);
290 blk = hammer_alst_meta_alloc_rev(
291 meta, 0, count, bl->bl_radix, bl->bl_skip);
293 if (blk != HAMMER_ALIST_BLOCK_NONE)
294 bl->bl_free -= count;
302 * hammer_alist_alloc_from()
304 * An extended version of hammer_alist_alloc() which locates free space
305 * starting at the specified block either forwards or backwards.
306 * HAMMER_ALIST_BLOCK_NONE is returned if space could not be allocated.
308 * Note: when allocating from a particular point forwards space is never
309 * allocated behind that start point, and similarly when going backwards.
312 hammer_alist_alloc_from(hammer_alist_t bl, hammer_almeta_t *meta,
313 int32_t count, int32_t start, int flags)
323 * Free up space in the block bitmap. Return the base of a contiguous
324 * region. Panic if an inconsistancy is found.
326 * Unlike allocations, there are no alignment requirements for blkno or
327 * count when freeing blocks.
331 hammer_alist_free(hammer_alist_t bl, hammer_almeta_t *meta,
332 int32_t blkno, int32_t count)
335 KKASSERT(blkno + count <= bl->bl_blocks);
336 if (bl->bl_radix == HAMMER_ALIST_BMAP_RADIX)
337 hammer_alst_leaf_free(meta, blkno, count);
339 hammer_alst_meta_free(meta, blkno, count,
340 bl->bl_radix, bl->bl_skip, 0);
341 bl->bl_free += count;
348 * alist_print() - dump radix tree
352 hammer_alist_print(hammer_alist_t bl, hammer_almeta_t *meta)
354 kprintf("ALIST {\n");
355 hammer_alst_radix_print(meta, 0, bl->bl_radix, bl->bl_skip, 4);
361 /************************************************************************
362 * ALLOCATION SUPPORT FUNCTIONS *
363 ************************************************************************
365 * These support functions do all the actual work. They may seem
366 * rather longish, but that's because I've commented them up. The
367 * actual code is straight forward.
372 * hammer_alist_leaf_alloc_fwd()
374 * Allocate at a leaf in the radix tree (a bitmap).
376 * This is the core of the allocator and is optimized for the 1 block
377 * and the HAMMER_ALIST_BMAP_RADIX block allocation cases. Other cases
378 * are somewhat slower. The 1 block allocation case is log2 and extremely
383 hammer_alst_leaf_alloc_fwd(hammer_almeta_t *scan, int32_t blk, int count)
385 u_int32_t orig = scan->bm_bitmap;
388 * Optimize bitmap all-allocated case. Also, count = 1
389 * case assumes at least 1 bit is free in the bitmap, so
390 * we have to take care of this case here.
393 scan->bm_bighint = 0;
394 return(HAMMER_ALIST_BLOCK_NONE);
398 * Optimized code to allocate one bit out of the bitmap
402 int j = HAMMER_ALIST_BMAP_RADIX/2;
405 mask = (u_int32_t)-1 >> (HAMMER_ALIST_BMAP_RADIX/2);
408 if ((orig & mask) == 0) {
415 scan->bm_bitmap &= ~(1 << r);
420 * non-optimized code to allocate N bits out of the bitmap.
421 * The more bits, the faster the code runs. It will run
422 * the slowest allocating 2 bits, but since there aren't any
423 * memory ops in the core loop (or shouldn't be, anyway),
424 * you probably won't notice the difference.
426 * Similar to the blist case, the alist code also requires
427 * allocations to be power-of-2 sized and aligned to the
428 * size of the allocation, which simplifies the algorithm.
432 int n = HAMMER_ALIST_BMAP_RADIX - count;
435 mask = (u_int32_t)-1 >> n;
437 for (j = 0; j <= n; j += count) {
438 if ((orig & mask) == mask) {
439 scan->bm_bitmap &= ~mask;
442 mask = mask << count;
447 * We couldn't allocate count in this subtree, update bighint.
449 scan->bm_bighint = count - 1;
450 return(HAMMER_ALIST_BLOCK_NONE);
454 * This version allocates blocks in the reverse direction.
457 hammer_alst_leaf_alloc_rev(hammer_almeta_t *scan, int32_t blk, int count)
459 u_int32_t orig = scan->bm_bitmap;
462 * Optimize bitmap all-allocated case. Also, count = 1
463 * case assumes at least 1 bit is free in the bitmap, so
464 * we have to take care of this case here.
467 scan->bm_bighint = 0;
468 return(HAMMER_ALIST_BLOCK_NONE);
472 * Optimized code to allocate one bit out of the bitmap
476 int j = HAMMER_ALIST_BMAP_RADIX/2;
477 int r = HAMMER_ALIST_BMAP_RADIX - 1;
479 mask = ~((u_int32_t)-1 >> (HAMMER_ALIST_BMAP_RADIX/2));
482 if ((orig & mask) == 0) {
489 scan->bm_bitmap &= ~(1 << r);
494 * non-optimized code to allocate N bits out of the bitmap.
495 * The more bits, the faster the code runs. It will run
496 * the slowest allocating 2 bits, but since there aren't any
497 * memory ops in the core loop (or shouldn't be, anyway),
498 * you probably won't notice the difference.
500 * Similar to the blist case, the alist code also requires
501 * allocations to be power-of-2 sized and aligned to the
502 * size of the allocation, which simplifies the algorithm.
504 * initial mask if count == 2: 1100....0000
508 int n = HAMMER_ALIST_BMAP_RADIX - count;
511 mask = ((u_int32_t)-1 >> n) << n;
513 for (j = n; j >= 0; j -= count) {
514 if ((orig & mask) == mask) {
515 scan->bm_bitmap &= ~mask;
518 mask = mask >> count;
523 * We couldn't allocate count in this subtree, update bighint.
525 scan->bm_bighint = count - 1;
526 return(HAMMER_ALIST_BLOCK_NONE);
530 * hammer_alist_meta_alloc_fwd()
532 * Allocate at a meta in the radix tree.
534 * Attempt to allocate at a meta node. If we can't, we update
535 * bighint and return a failure. Updating bighint optimize future
536 * calls that hit this node. We have to check for our collapse cases
537 * and we have a few optimizations strewn in as well.
540 hammer_alst_meta_alloc_fwd(hammer_almeta_t *scan, int32_t blk, int32_t count,
541 int32_t radix, int skip
546 int next_skip = ((u_int)skip / HAMMER_ALIST_META_RADIX);
549 * ALL-ALLOCATED special case
551 if (scan->bm_bitmap == 0) {
552 scan->bm_bighint = 0;
553 return(HAMMER_ALIST_BLOCK_NONE);
556 radix /= HAMMER_ALIST_META_RADIX;
559 * Radix now represents each bitmap entry for this meta node. If
560 * the number of blocks being allocated can be fully represented,
561 * we allocate directly out of this meta node.
563 * Meta node bitmaps use 2 bits per block.
566 * 01 PARTIALLY-FREE/PARTIALLY-ALLOCATED
570 if (count >= radix) {
571 int n = count / radix * 2; /* number of bits */
574 mask = (u_int32_t)-1 >> (HAMMER_ALIST_BMAP_RADIX - n);
575 for (j = 0; j < HAMMER_ALIST_META_RADIX; j += n / 2) {
576 if ((scan->bm_bitmap & mask) == mask) {
577 scan->bm_bitmap &= ~mask;
578 return(blk + j * radix);
582 if (scan->bm_bighint >= count)
583 scan->bm_bighint = count >> 1;
584 return(HAMMER_ALIST_BLOCK_NONE);
588 * If not we have to recurse.
592 for (i = 1; i <= skip; i += next_skip) {
593 if (scan[i].bm_bighint == (int32_t)-1) {
599 if ((scan->bm_bitmap & mask) == mask) {
600 scan[i].bm_bitmap = (u_int32_t)-1;
601 scan[i].bm_bighint = radix;
604 if (count <= scan[i].bm_bighint) {
606 * count fits in object
609 if (next_skip == 1) {
610 r = hammer_alst_leaf_alloc_fwd(
611 &scan[i], blk, count);
613 r = hammer_alst_meta_alloc_fwd(
614 &scan[i], blk, count,
615 radix, next_skip - 1);
617 if (r != HAMMER_ALIST_BLOCK_NONE) {
618 if (scan[i].bm_bitmap == 0) {
619 scan->bm_bitmap &= ~mask;
621 scan->bm_bitmap &= ~mask;
622 scan->bm_bitmap |= pmask;
626 } else if (count > radix) {
628 * count does not fit in object even if it were
639 * We couldn't allocate count in this subtree, update bighint.
641 if (scan->bm_bighint >= count)
642 scan->bm_bighint = count >> 1;
643 return(HAMMER_ALIST_BLOCK_NONE);
647 * This version allocates blocks in the reverse direction.
650 hammer_alst_meta_alloc_rev(hammer_almeta_t *scan, int32_t blk, int32_t count,
651 int32_t radix, int skip
657 int next_skip = ((u_int)skip / HAMMER_ALIST_META_RADIX);
660 * ALL-ALLOCATED special case
662 if (scan->bm_bitmap == 0) {
663 scan->bm_bighint = 0;
664 return(HAMMER_ALIST_BLOCK_NONE);
667 radix /= HAMMER_ALIST_META_RADIX;
670 * Radix now represents each bitmap entry for this meta node. If
671 * the number of blocks being allocated can be fully represented,
672 * we allocate directly out of this meta node.
674 * Meta node bitmaps use 2 bits per block.
677 * 01 PARTIALLY-FREE/PARTIALLY-ALLOCATED
681 if (count >= radix) {
682 int n = count / radix * 2; /* number of bits */
686 * Initial mask if e.g. n == 2: 1100....0000
688 mask = (u_int32_t)-1 >> (HAMMER_ALIST_BMAP_RADIX - n) <<
689 (HAMMER_ALIST_BMAP_RADIX - n);
690 for (j = HAMMER_ALIST_META_RADIX - n / 2; j >= 0; j -= n / 2) {
691 if ((scan->bm_bitmap & mask) == mask) {
692 scan->bm_bitmap &= ~mask;
693 return(blk + j * radix);
697 if (scan->bm_bighint >= count)
698 scan->bm_bighint = count >> 1;
699 return(HAMMER_ALIST_BLOCK_NONE);
703 * If not we have to recurse. Since we are going in the reverse
704 * direction we need an extra loop to determine if there is a
705 * terminator, then run backwards.
707 * This is a little weird but we do not want to overflow the
708 * mask/pmask in the loop.
711 for (i = 1; i <= skip; i += next_skip) {
712 if (scan[i].bm_bighint == (int32_t)-1)
719 mask = 0x00000003 << j;
720 pmask = 0x00000001 << j;
725 * Initialize the bitmap in the child if allocating from
728 if ((scan->bm_bitmap & mask) == mask) {
729 scan[i].bm_bitmap = (u_int32_t)-1;
730 scan[i].bm_bighint = radix;
734 * Handle various count cases. Bighint may be too large but
735 * is never too small.
737 if (count <= scan[i].bm_bighint) {
739 * count fits in object
742 if (next_skip == 1) {
743 r = hammer_alst_leaf_alloc_rev(
744 &scan[i], blk, count);
746 r = hammer_alst_meta_alloc_rev(
747 &scan[i], blk, count,
748 radix, next_skip - 1);
750 if (r != HAMMER_ALIST_BLOCK_NONE) {
751 if (scan[i].bm_bitmap == 0) {
752 scan->bm_bitmap &= ~mask;
754 scan->bm_bitmap &= ~mask;
755 scan->bm_bitmap |= pmask;
759 } else if (count > radix) {
761 * count does not fit in object even if it were
773 * We couldn't allocate count in this subtree, update bighint.
775 if (scan->bm_bighint >= count)
776 scan->bm_bighint = count >> 1;
777 return(HAMMER_ALIST_BLOCK_NONE);
783 * Free allocated blocks from leaf bitmap. The allocation code is
784 * restricted to powers of 2, the freeing code is not.
787 hammer_alst_leaf_free(
788 hammer_almeta_t *scan,
793 * free some data in this bitmap
796 * 0000111111111110000
800 int n = blk & (HAMMER_ALIST_BMAP_RADIX - 1);
803 mask = ((u_int32_t)-1 << n) &
804 ((u_int32_t)-1 >> (HAMMER_ALIST_BMAP_RADIX - count - n));
806 if (scan->bm_bitmap & mask)
807 panic("hammer_alst_radix_free: freeing free block");
808 scan->bm_bitmap |= mask;
811 * We could probably do a better job here. We are required to make
812 * bighint at least as large as the biggest contiguous block of
813 * data. If we just shoehorn it, a little extra overhead will
814 * be incured on the next allocation (but only that one typically).
816 scan->bm_bighint = HAMMER_ALIST_BMAP_RADIX;
822 * Free allocated blocks from radix tree meta info.
824 * This support routine frees a range of blocks from the bitmap.
825 * The range must be entirely enclosed by this radix node. If a
826 * meta node, we break the range down recursively to free blocks
827 * in subnodes (which means that this code can free an arbitrary
828 * range whereas the allocation code cannot allocate an arbitrary
833 hammer_alst_meta_free(
834 hammer_almeta_t *scan,
841 int next_skip = ((u_int)skip / HAMMER_ALIST_META_RADIX);
847 * Break the free down into its components. Because it is so easy
848 * to implement, frees are not limited to power-of-2 sizes.
850 * Each block in a meta-node bitmap takes two bits.
852 radix /= HAMMER_ALIST_META_RADIX;
854 i = (freeBlk - blk) / radix;
856 mask = 0x00000003 << (i * 2);
857 pmask = 0x00000001 << (i * 2);
859 i = i * next_skip + 1;
861 while (i <= skip && blk < freeBlk + count) {
864 v = blk + radix - freeBlk;
868 if (scan->bm_bighint == (int32_t)-1)
869 panic("hammer_alst_meta_free: freeing unexpected range");
871 if (freeBlk == blk && count >= radix) {
873 * All-free case, no need to update sub-tree
875 scan->bm_bitmap |= mask;
876 scan->bm_bighint = radix * HAMMER_ALIST_META_RADIX;
883 hammer_alst_leaf_free(&scan[i], freeBlk, v);
885 hammer_alst_meta_free(&scan[i], freeBlk, v, radix, next_skip - 1, blk);
886 if (scan[i].bm_bitmap == (u_int32_t)-1)
887 scan->bm_bitmap |= mask;
889 scan->bm_bitmap |= pmask;
890 if (scan->bm_bighint < scan[i].bm_bighint)
891 scan->bm_bighint = scan[i].bm_bighint;
905 * Initialize our meta structures and bitmaps and calculate the exact
906 * amount of space required to manage 'count' blocks - this space may
907 * be considerably less then the calculated radix due to the large
908 * RADIX values we use.
912 hammer_alst_radix_init(hammer_almeta_t *scan, int32_t radix,
913 int skip, int32_t count)
917 int32_t memindex = 0;
924 if (radix == HAMMER_ALIST_BMAP_RADIX) {
926 scan->bm_bighint = 0;
933 * Meta node. If allocating the entire object we can special
934 * case it. However, we need to figure out how much memory
935 * is required to manage 'count' blocks, so we continue on anyway.
939 scan->bm_bighint = 0;
943 radix /= HAMMER_ALIST_META_RADIX;
944 next_skip = ((u_int)skip / HAMMER_ALIST_META_RADIX);
948 for (i = 1; i <= skip; i += next_skip) {
949 if (count >= radix) {
951 * Allocate the entire object
953 memindex = i + hammer_alst_radix_init(
954 ((scan) ? &scan[i] : NULL),
960 /* already marked as wholely allocated */
961 } else if (count > 0) {
963 * Allocate a partial object
965 memindex = i + hammer_alst_radix_init(
966 ((scan) ? &scan[i] : NULL),
974 * Mark as partially allocated
977 scan->bm_bitmap |= pmask;
980 * Add terminator and break out
983 scan[i].bm_bighint = (int32_t)-1;
984 /* already marked as wholely allocated */
998 hammer_alst_radix_print(hammer_almeta_t *scan, int32_t blk,
999 int32_t radix, int skip, int tab)
1006 if (radix == HAMMER_ALIST_BMAP_RADIX) {
1008 "%*.*s(%04x,%d): bitmap %08x big=%d\n",
1017 if (scan->bm_bitmap == 0) {
1019 "%*.*s(%04x,%d) ALL ALLOCATED\n",
1026 if (scan->bm_bitmap == (u_int32_t)-1) {
1028 "%*.*s(%04x,%d) ALL FREE\n",
1037 "%*.*s(%04x,%d): subtree (%d) bitmap=%08x big=%d {\n",
1045 radix /= HAMMER_ALIST_META_RADIX;
1046 next_skip = ((u_int)skip / HAMMER_ALIST_META_RADIX);
1050 for (i = 1; i <= skip; i += next_skip) {
1051 if (scan[i].bm_bighint == (int32_t)-1) {
1053 "%*.*s(%04x,%d): Terminator\n",
1060 if ((scan->bm_bitmap & mask) == mask) {
1062 "%*.*s(%04x,%d): ALL FREE\n",
1066 } else if ((scan->bm_bitmap & mask) == 0) {
1068 "%*.*s(%04x,%d): ALL ALLOCATED\n",
1073 hammer_alst_radix_print(
1097 main(int ac, char **av)
1102 hammer_almeta_t *meta = NULL;
1104 for (i = 1; i < ac; ++i) {
1105 const char *ptr = av[i];
1107 size = strtol(ptr, NULL, 0);
1111 fprintf(stderr, "Bad option: %s\n", ptr - 2);
1114 bl = hammer_alist_create(size, NULL, &meta);
1115 hammer_alist_free(bl, meta, 0, size);
1123 kprintf("%d/%d/%d> ", bl->bl_free, size, bl->bl_radix);
1125 if (fgets(buf, sizeof(buf), stdin) == NULL)
1129 hammer_alist_print(bl, meta);
1132 if (sscanf(buf + 1, "%d", &count) == 1) {
1133 blk = hammer_alist_alloc(bl, meta, count);
1134 kprintf(" R=%04x\n", blk);
1140 if (sscanf(buf + 1, "%d", &count) == 1) {
1141 blk = hammer_alist_alloc_rev(bl, meta, count);
1142 kprintf(" R=%04x\n", blk);
1148 if (sscanf(buf + 1, "%x %d", &da, &count) == 2) {
1149 hammer_alist_free(bl, meta, da, count);
1159 "r %d -allocate reverse\n"
1173 panic(const char *ctl, ...)
1177 __va_start(va, ctl);
1178 vfprintf(stderr, ctl, va);
1179 fprintf(stderr, "\n");