4 * Bitmap allocator/deallocator, using a radix tree with hinting.
5 * Unlimited-size allocations, power-of-2 only, power-of-2 aligned results
6 * only. This module has been separated from the generic kernel module and
7 * written specifically for embedding in HAMMER storage structures.
9 * Copyright (c) 2007 The DragonFly Project. All rights reserved.
11 * This code is derived from software contributed to The DragonFly Project
12 * by Matthew Dillon <dillon@backplane.com>
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in
22 * the documentation and/or other materials provided with the
24 * 3. Neither the name of The DragonFly Project nor the names of its
25 * contributors may be used to endorse or promote products derived
26 * from this software without specific, prior written permission.
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
31 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
32 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
33 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
34 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
35 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
36 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
37 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
38 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
41 * $DragonFly: src/sys/vfs/hammer/Attic/hammer_alist.c,v 1.8 2008/01/11 01:41:33 dillon Exp $
44 * This module implements a generic allocator through the use of a hinted
45 * radix tree. All allocations must be in powers of 2 and will return
46 * similarly aligned results. The radix tree typically recurses within
47 * a memory buffer and then continues its recursion by chaining to other
48 * memory buffers, creating a seemless whole capable of managing any amount
51 * The radix tree is layed out recursively using a linear array. Each meta
52 * node is immediately followed (layed out sequentially in memory) by
53 * HAMMER_ALIST_META_RADIX lower level nodes. This is a recursive structure
54 * but one that can be easily scanned through a very simple 'skip'
57 * The radix tree supports an early-termination optimization which
58 * effectively allows us to efficiently mix large and small allocations
59 * with a single abstraction. The address space can be partitioned
60 * arbitrarily without adding much in the way of additional meta-storage
63 * The radix tree supports allocator layering. By supplying a base_radix > 1
64 * the allocator will issue callbacks to recurse into lower layers once
65 * higher layers have been exhausted.
67 * ALLOCATIONS IN MULTIPLES OF base_radix WILL BE ENTIRELY RETAINED IN THE
68 * HIGHER LEVEL ALLOCATOR AND NEVER RECURSE. This means the init function
69 * will never be called and the A-list will consider the underlying zone
70 * as being uninitialized. If you then do a partial free, the A-list will
71 * call the init function before freeing. Most users of this API, including
72 * HAMMER, only allocate and free whole zones, or only allocate and free
73 * partial zones, and never mix their metaphors.
75 * This code can be compiled stand-alone for debugging.
80 #include <sys/param.h>
81 #include <sys/systm.h>
83 #include <sys/kernel.h>
84 #include <sys/malloc.h>
86 #include <vm/vm_object.h>
87 #include <vm/vm_kern.h>
88 #include <vm/vm_extern.h>
89 #include <vm/vm_page.h>
91 #include "hammer_alist.h"
92 #include "hammer_disk.h"
96 #ifndef ALIST_NO_DEBUG
100 #include <sys/types.h>
107 #define kmalloc(a,b,c) malloc(a)
108 #define kfree(a,b) free(a)
109 #define kprintf printf
110 #define KKASSERT(exp) assert(exp)
113 #include "hammer_alist.h"
115 void panic(const char *ctl, ...);
120 * static support functions
122 static int32_t hammer_alst_leaf_alloc_fwd(hammer_almeta_t scan,
123 int32_t blk, int count, int32_t atblk);
124 static int32_t hammer_alst_meta_alloc_fwd(hammer_alist_t live,
125 hammer_almeta_t scan,
126 int32_t blk, int32_t count,
127 int32_t radix, int skip, int32_t atblk);
128 static int32_t hammer_alst_leaf_alloc_rev(hammer_almeta_t scan,
129 int32_t blk, int count, int32_t atblk);
130 static int32_t hammer_alst_meta_alloc_rev(hammer_alist_t live,
131 hammer_almeta_t scan,
132 int32_t blk, int32_t count,
133 int32_t radix, int skip, int32_t atblk);
134 static int32_t hammer_alst_find(hammer_alist_t live, hammer_almeta_t scan,
135 int32_t blk, int32_t radix,
136 int32_t skip, int32_t atblk);
137 static void hammer_alst_leaf_free(hammer_almeta_t scan, int32_t relblk,
139 static void hammer_alst_meta_free(hammer_alist_t live, hammer_almeta_t scan,
140 int32_t freeBlk, int32_t count,
141 int32_t radix, int skip, int32_t blk);
142 static int32_t hammer_alst_radix_init(hammer_almeta_t scan,
143 int32_t radix, int skip, int32_t count);
144 static void hammer_alst_radix_recover(hammer_alist_recover_t info,
145 hammer_almeta_t scan, int32_t blk,
146 int32_t radix, int skip, int32_t count);
148 static void hammer_alst_radix_print(hammer_alist_t live,
149 hammer_almeta_t scan, int32_t blk,
150 int32_t radix, int skip, int tab);
154 * Initialize an a-list config structure for use. The config structure
155 * describes the basic structure of an a-list's topology and may be
156 * shared by any number of a-lists which have the same topology.
158 * blocks is the total number of blocks, that is the number of blocks
159 * handled at this layer multiplied by the base radix.
161 * When base_radix != 1 the A-list has only meta elements and does not have
162 * any leaves, in order to be able to track partial allocations.
165 hammer_alist_template(hammer_alist_config_t bl, int32_t blocks,
166 int32_t base_radix, int32_t maxmeta)
172 * Calculate radix and skip field used for scanning. The leaf nodes
173 * in our tree are either BMAP or META nodes depending on whether
174 * we chain to a lower level allocation layer or not.
177 radix = HAMMER_ALIST_BMAP_RADIX;
179 radix = HAMMER_ALIST_META_RADIX;
182 while (radix < blocks / base_radix) {
183 radix *= HAMMER_ALIST_META_RADIX;
184 skip = skip * HAMMER_ALIST_META_RADIX + 1;
188 * Increase the radix based on the number of blocks a lower level
189 * allocator is able to handle at the 'base' of our allocator.
190 * If base_radix != 1 the caller will have to initialize the callback
191 * fields to implement the lower level allocator.
193 KKASSERT((int64_t)radix * (int64_t)base_radix < 0x80000000LL);
196 bzero(bl, sizeof(*bl));
198 bl->bl_blocks = blocks;
199 bl->bl_base_radix = base_radix;
200 bl->bl_radix = radix;
202 bl->bl_rootblks = hammer_alst_radix_init(NULL, bl->bl_radix,
203 bl->bl_skip, blocks);
204 ++bl->bl_rootblks; /* one more for freeblks header */
207 KKASSERT(maxmeta == 0 || bl->bl_rootblks <= maxmeta);
209 #if defined(ALIST_DEBUG)
211 "PRIMARY ALIST LAYER manages %d blocks"
212 ", requiring %dK (%d bytes) of ram\n",
213 bl->bl_blocks / bl->bl_base_radix,
214 (bl->bl_rootblks * sizeof(struct hammer_almeta) + 1023) / 1024,
215 (bl->bl_rootblks * sizeof(struct hammer_almeta))
217 kprintf("ALIST raw radix tree contains %d records\n", bl->bl_rootblks);
222 * Initialize a new A-list
225 hammer_alist_init(hammer_alist_t live, int32_t start, int32_t count,
226 enum hammer_alloc_state state)
228 hammer_alist_config_t bl = live->config;
231 * Note: base_freeblks is a count, not a block number limit.
233 live->meta->bm_alist_freeblks = 0;
234 live->meta->bm_alist_base_freeblks = count;
235 hammer_alst_radix_init(live->meta + 1, bl->bl_radix,
236 bl->bl_skip, bl->bl_blocks);
237 if (count && state == HAMMER_ASTATE_FREE)
238 hammer_alist_free(live, start, count);
241 #if !defined(_KERNEL) && defined(ALIST_DEBUG)
244 * hammer_alist_create() (userland only)
246 * create a alist capable of handling up to the specified number of
247 * blocks. blocks must be greater then 0
249 * The smallest alist consists of a single leaf node capable of
250 * managing HAMMER_ALIST_BMAP_RADIX blocks, or (if base_radix != 1)
251 * a single meta node capable of managing HAMMER_ALIST_META_RADIX
252 * blocks which recurses into other storage layers for a total
253 * handling capability of (HAMMER_ALIST_META_RADIX * base_radix) blocks.
255 * Larger a-list's increase their capability exponentially by
256 * HAMMER_ALIST_META_RADIX.
258 * The block count is the total number of blocks inclusive of any
259 * layering. blocks can be less then base_radix and will result in
260 * a radix tree with a single leaf node which then chains down.
264 hammer_alist_create(int32_t blocks, int32_t base_radix,
265 struct malloc_type *mtype, enum hammer_alloc_state state)
268 hammer_alist_config_t bl;
271 live = kmalloc(sizeof(*live), mtype, M_WAITOK);
272 live->config = bl = kmalloc(sizeof(*bl), mtype, M_WAITOK);
273 hammer_alist_template(bl, blocks, base_radix, 0);
275 metasize = sizeof(*live->meta) * bl->bl_rootblks;
276 live->meta = kmalloc(metasize, mtype, M_WAITOK);
277 bzero(live->meta, metasize);
279 #if defined(ALIST_DEBUG)
281 "ALIST representing %d blocks (%d MB of swap)"
282 ", requiring %dK (%d bytes) of ram\n",
284 bl->bl_blocks * 4 / 1024,
285 (bl->bl_rootblks * sizeof(*live->meta) + 1023) / 1024,
286 (bl->bl_rootblks * sizeof(*live->meta))
288 if (base_radix != 1) {
289 kprintf("ALIST recurses when it reaches a base_radix of %d\n",
292 kprintf("ALIST raw radix tree contains %d records\n", bl->bl_rootblks);
294 hammer_alist_init(live, 0, blocks, state);
299 hammer_alist_destroy(hammer_alist_t live, struct malloc_type *mtype)
301 kfree(live->config, mtype);
302 kfree(live->meta, mtype);
311 * hammer_alist_alloc()
313 * Reserve space in the block bitmap. Return the base of a contiguous
314 * region or HAMMER_ALIST_BLOCK_NONE if space could not be allocated.
318 hammer_alist_alloc(hammer_alist_t live, int32_t count)
320 int32_t blk = HAMMER_ALIST_BLOCK_NONE;
321 hammer_alist_config_t bl = live->config;
323 KKASSERT((count | (count - 1)) == (count << 1) - 1);
325 if (bl && count <= bl->bl_radix) {
327 * When skip is 1 we are at a leaf. If we are the terminal
328 * allocator we use our native leaf functions and radix will
329 * be HAMMER_ALIST_BMAP_RADIX. Otherwise we are a meta node
330 * which will chain to another allocator.
332 if (bl->bl_skip == 1 && bl->bl_terminal) {
334 KKASSERT(bl->bl_radix == HAMMER_ALIST_BMAP_RADIX);
336 blk = hammer_alst_leaf_alloc_fwd(
337 live->meta + 1, 0, count, 0);
339 blk = hammer_alst_meta_alloc_fwd(
340 live, live->meta + 1,
341 0, count, bl->bl_radix, bl->bl_skip, 0);
343 if (blk != HAMMER_ALIST_BLOCK_NONE)
344 live->meta->bm_alist_freeblks -= count;
350 hammer_alist_alloc_fwd(hammer_alist_t live, int32_t count, int32_t atblk)
352 int32_t blk = HAMMER_ALIST_BLOCK_NONE;
353 hammer_alist_config_t bl = live->config;
355 KKASSERT((count | (count - 1)) == (count << 1) - 1);
357 if (bl && count <= bl->bl_radix) {
359 * When skip is 1 we are at a leaf. If we are the terminal
360 * allocator we use our native leaf functions and radix will
361 * be HAMMER_ALIST_BMAP_RADIX. Otherwise we are a meta node
362 * which will chain to another allocator.
364 if (bl->bl_skip == 1 && bl->bl_terminal) {
366 KKASSERT(bl->bl_radix == HAMMER_ALIST_BMAP_RADIX);
368 blk = hammer_alst_leaf_alloc_fwd(
369 live->meta + 1, 0, count, atblk);
371 blk = hammer_alst_meta_alloc_fwd(
372 live, live->meta + 1,
373 0, count, bl->bl_radix, bl->bl_skip, atblk);
375 if (blk != HAMMER_ALIST_BLOCK_NONE)
376 live->meta->bm_alist_freeblks -= count;
382 hammer_alist_alloc_rev(hammer_alist_t live, int32_t count, int32_t atblk)
384 hammer_alist_config_t bl = live->config;
385 int32_t blk = HAMMER_ALIST_BLOCK_NONE;
387 KKASSERT((count | (count - 1)) == (count << 1) - 1);
389 if (bl && count < bl->bl_radix) {
390 if (bl->bl_skip == 1 && bl->bl_terminal) {
392 KKASSERT(bl->bl_radix == HAMMER_ALIST_BMAP_RADIX);
394 blk = hammer_alst_leaf_alloc_rev(
395 live->meta + 1, 0, count, atblk);
397 blk = hammer_alst_meta_alloc_rev(
398 live, live->meta + 1,
399 0, count, bl->bl_radix, bl->bl_skip, atblk);
401 if (blk != HAMMER_ALIST_BLOCK_NONE)
402 live->meta->bm_alist_freeblks -= count;
408 * hammer_alist_find()
410 * Locate the first block >= atblk marked as allocated in the A-list
411 * and return it. Return HAMMER_ALIST_BLOCK_NONE if no block could
415 hammer_alist_find(hammer_alist_t live, int32_t atblk)
417 hammer_alist_config_t bl = live->config;
418 KKASSERT(live->config != NULL);
419 KKASSERT(atblk >= 0);
420 atblk = hammer_alst_find(live, live->meta + 1, 0, bl->bl_radix,
426 * hammer_alist_free()
428 * Free up space in the block bitmap. Return the base of a contiguous
429 * region. Panic if an inconsistancy is found.
431 * Unlike allocations, there are no alignment requirements for blkno or
432 * count when freeing blocks.
436 hammer_alist_free(hammer_alist_t live, int32_t blkno, int32_t count)
438 hammer_alist_config_t bl = live->config;
440 KKASSERT(blkno + count <= bl->bl_blocks);
441 if (bl->bl_skip == 1 && bl->bl_terminal) {
443 KKASSERT(bl->bl_radix == HAMMER_ALIST_BMAP_RADIX);
445 hammer_alst_leaf_free(live->meta + 1, blkno, count);
447 hammer_alst_meta_free(live, live->meta + 1,
449 bl->bl_radix, bl->bl_skip, 0);
451 live->meta->bm_alist_freeblks += count;
455 * Recover an A-list. This will dive down to the leaves and regenerate
456 * the hints and the freeblks count. This function will also recurse
457 * through any stacked A-lists. > 0 is returned on success, a negative
458 * error code on failure.
460 * Since A-lists have no pointers the only thing that can prevent recovery
461 * is an I/O error in e.g. a stacked A-list. This doesn't mean the recovered
462 * map will be meaningful, however.
464 * blk is usually passed as 0 at the top level and is adjusted as the recovery
465 * code scans the A-list. It is only used when recursing down a stacked
469 hammer_alist_recover(hammer_alist_t live, int32_t blk, int32_t start,
472 hammer_alist_config_t bl = live->config;
473 struct hammer_alist_recover info;
479 live->meta->bm_alist_freeblks = 0;
480 live->meta->bm_alist_base_freeblks = count;
481 hammer_alst_radix_recover(&info, live->meta + 1, blk, bl->bl_radix,
482 bl->bl_skip, bl->bl_blocks);
487 * Any garbage between 0 and start, and >= start, is removed.
489 while ((r = hammer_alist_find(live, 0)) != HAMMER_ALIST_BLOCK_NONE &&
491 hammer_alist_free(live, r, 1);
493 while ((r = hammer_alist_find(live, start + count)) !=
494 HAMMER_ALIST_BLOCK_NONE) {
495 hammer_alist_free(live, r, 1);
497 return(live->meta->bm_alist_freeblks);
501 hammer_alist_isfull(hammer_alist_t live)
503 return(live->meta->bm_alist_freeblks == 0);
507 hammer_alist_isempty(hammer_alist_t live)
509 return((int)live->meta->bm_alist_freeblks ==
510 live->meta->bm_alist_base_freeblks);
516 * alist_print() - dump radix tree
520 hammer_alist_print(hammer_alist_t live, int tab)
522 hammer_alist_config_t bl = live->config;
524 kprintf("%*.*sALIST (%d/%d free blocks) {\n",
526 live->meta->bm_alist_freeblks,
527 live->meta->bm_alist_base_freeblks);
528 hammer_alst_radix_print(live, live->meta + 1, 0,
529 bl->bl_radix, bl->bl_skip, tab + 4);
530 kprintf("%*.*s}\n", tab, tab, "");
535 /************************************************************************
536 * ALLOCATION SUPPORT FUNCTIONS *
537 ************************************************************************
539 * These support functions do all the actual work. They may seem
540 * rather longish, but that's because I've commented them up. The
541 * actual code is straight forward.
546 * hammer_alist_leaf_alloc_fwd()
548 * Allocate at a leaf in the radix tree (a bitmap).
550 * This is the core of the allocator and is optimized for the 1 block
551 * and the HAMMER_ALIST_BMAP_RADIX block allocation cases. Other cases
552 * are somewhat slower. The 1 block allocation case is log2 and extremely
557 hammer_alst_leaf_alloc_fwd(hammer_almeta_t scan, int32_t blk,
558 int count, int32_t atblk)
560 u_int32_t orig = scan->bm_bitmap;
561 int32_t saveblk = blk;
564 * Optimize bitmap all-allocated case. Also, count = 1
565 * case assumes at least 1 bit is free in the bitmap, so
566 * we have to take care of this case here.
569 scan->bm_bighint = 0;
570 return(HAMMER_ALIST_BLOCK_NONE);
575 * Optimized code to allocate one bit out of the bitmap
577 * mask iterates e.g. 00001111 00000011 00000001
579 * mask starts at 00001111
583 int j = HAMMER_ALIST_BMAP_RADIX/2;
586 mask = (u_int32_t)-1 >> (HAMMER_ALIST_BMAP_RADIX/2);
589 if ((orig & mask) == 0) {
596 scan->bm_bitmap &= ~(1 << r);
602 * non-optimized code to allocate N bits out of the bitmap.
603 * The more bits, the faster the code runs. It will run
604 * the slowest allocating 2 bits, but since there aren't any
605 * memory ops in the core loop (or shouldn't be, anyway),
606 * you probably won't notice the difference.
608 * Similar to the blist case, the alist code also requires
609 * allocations to be power-of-2 sized and aligned to the
610 * size of the allocation, which simplifies the algorithm.
614 int n = HAMMER_ALIST_BMAP_RADIX - count;
617 mask = (u_int32_t)-1 >> n;
619 for (j = 0; j <= n; j += count) {
620 if ((orig & mask) == mask && blk >= atblk) {
621 scan->bm_bitmap &= ~mask;
624 mask = mask << count;
630 * We couldn't allocate count in this subtree, update bighint if
631 * atblk didn't interfere with the hinting mechanism.
633 if (saveblk >= atblk)
634 scan->bm_bighint = count - 1;
635 return(HAMMER_ALIST_BLOCK_NONE);
639 * This version allocates blocks in the reverse direction.
642 hammer_alst_leaf_alloc_rev(hammer_almeta_t scan, int32_t blk,
643 int count, int32_t atblk)
645 u_int32_t orig = scan->bm_bitmap;
649 * Optimize bitmap all-allocated case. Also, count = 1
650 * case assumes at least 1 bit is free in the bitmap, so
651 * we have to take care of this case here.
654 scan->bm_bighint = 0;
655 return(HAMMER_ALIST_BLOCK_NONE);
660 * Optimized code to allocate one bit out of the bitmap
664 int j = HAMMER_ALIST_BMAP_RADIX/2;
665 int r = HAMMER_ALIST_BMAP_RADIX - 1;
667 mask = ~((u_int32_t)-1 >> (HAMMER_ALIST_BMAP_RADIX/2));
670 if ((orig & mask) == 0) {
677 scan->bm_bitmap &= ~(1 << r);
683 * non-optimized code to allocate N bits out of the bitmap.
684 * The more bits, the faster the code runs. It will run
685 * the slowest allocating 2 bits, but since there aren't any
686 * memory ops in the core loop (or shouldn't be, anyway),
687 * you probably won't notice the difference.
689 * Similar to the blist case, the alist code also requires
690 * allocations to be power-of-2 sized and aligned to the
691 * size of the allocation, which simplifies the algorithm.
693 * initial mask if count == 2: 1100....0000
697 int n = HAMMER_ALIST_BMAP_RADIX - count;
700 mask = ((u_int32_t)-1 >> n) << n;
704 for (j = n; j >= 0; j -= count) {
705 if ((orig & mask) == mask && blk <= atblk) {
706 scan->bm_bitmap &= ~mask;
709 mask = mask >> count;
715 * We couldn't allocate count in this subtree, update bighint if
716 * atblk didn't interfere with it.
718 if (saveblk <= atblk)
719 scan->bm_bighint = count - 1;
720 return(HAMMER_ALIST_BLOCK_NONE);
724 * hammer_alist_meta_alloc_fwd()
726 * Allocate at a meta in the radix tree.
728 * Attempt to allocate at a meta node. If we can't, we update
729 * bighint and return a failure. Updating bighint optimize future
730 * calls that hit this node. We have to check for our collapse cases
731 * and we have a few optimizations strewn in as well.
734 hammer_alst_meta_alloc_fwd(hammer_alist_t live, hammer_almeta_t scan,
735 int32_t blk, int32_t count,
736 int32_t radix, int skip, int32_t atblk
738 hammer_alist_config_t bl;
746 * ALL-ALLOCATED special case
748 if (scan->bm_bitmap == 0) {
749 scan->bm_bighint = 0;
750 return(HAMMER_ALIST_BLOCK_NONE);
753 radix /= HAMMER_ALIST_META_RADIX;
757 * Radix now represents each bitmap entry for this meta node. If
758 * the number of blocks being allocated can be fully represented,
759 * we allocate directly out of this meta node.
761 * Meta node bitmaps use 2 bits per block.
763 * 00 ALL-ALLOCATED - UNINITIALIZED
764 * 01 PARTIALLY-FREE/PARTIALLY-ALLOCATED
765 * 10 ALL-ALLOCATED - INITIALIZED
766 * 11 ALL-FREE - UNINITIALIZED
768 if (count >= radix) {
769 int n = count / radix * 2; /* number of bits */
773 mask = (u_int32_t)-1 >> (HAMMER_ALIST_BMAP_RADIX - n);
775 for (j = 0; j < (int)HAMMER_ALIST_META_RADIX; j += nd2) {
776 if ((scan->bm_bitmap & mask) == mask && blk >= atblk) {
778 * NOTE: Marked all-allocate/uninitialized
779 * rather then all-allocated/initialized.
780 * See the warning at the top of the file.
782 scan->bm_bitmap &= ~mask;
788 if (scan->bm_bighint >= count && saveblk >= atblk)
789 scan->bm_bighint = count >> 1;
790 return(HAMMER_ALIST_BLOCK_NONE);
794 * If the count is too big we couldn't allocate anything from a
795 * recursion even if the sub-tree were entirely free.
802 * If not we have to recurse.
809 * If skip is 1 we are a meta leaf node, which means there
810 * is another allocation layer we have to dive down into.
812 for (i = 0; i < (int)HAMMER_ALIST_META_RADIX; ++i) {
814 * If the next layer is completely free then call
815 * its init function to initialize it.
817 if ((scan->bm_bitmap & mask) == mask &&
818 blk + radix > atblk) {
819 if (bl->bl_radix_init(live->info, blk, radix, HAMMER_ASTATE_FREE) == 0) {
821 * NOTE: Marked all-allocate/uninit-
822 * ialized rather then all-allocated/
823 * initialized. See the warning at
824 * the top of the file.
826 scan->bm_bitmap &= ~mask;
827 scan->bm_bitmap |= pmask;
832 * If there may be some free blocks try to allocate
833 * out of the layer. If the layer indicates that
834 * it is completely full then clear both bits to
835 * propogate the condition.
837 if ((scan->bm_bitmap & mask) == pmask &&
838 blk + radix > atblk) {
842 r = bl->bl_radix_alloc_fwd(live->info, blk,
845 if (r != HAMMER_ALIST_BLOCK_NONE) {
847 scan->bm_bitmap &= ~mask;
848 scan->bm_bitmap |= pmask << 1;
859 * Otherwise there are sub-records in the current a-list
860 * layer. We can also peek into the sub-layers to get
861 * more accurate size hints.
863 next_skip = (skip - 1) / HAMMER_ALIST_META_RADIX;
864 for (i = 1; i < skip; i += next_skip) {
865 if (scan[i].bm_bighint == (int32_t)-1) {
873 * Initialize bitmap if allocating from the all-free
876 if ((scan->bm_bitmap & mask) == mask) {
877 scan[i].bm_bitmap = (u_int32_t)-1;
878 scan[i].bm_bighint = radix;
881 if (count <= scan[i].bm_bighint &&
882 blk + radix > atblk) {
884 * count fits in object, recurse into the
885 * next layer. If the next_skip is 1 it
886 * will be either a normal leaf or a meta
891 if (next_skip == 1 && bl->bl_terminal) {
892 r = hammer_alst_leaf_alloc_fwd(
893 &scan[i], blk, count, atblk);
895 r = hammer_alst_meta_alloc_fwd(
898 radix, next_skip, atblk);
900 if (r != HAMMER_ALIST_BLOCK_NONE) {
901 if (scan[i].bm_bitmap == 0) {
902 scan->bm_bitmap &= ~mask;
904 scan->bm_bitmap &= ~mask;
905 scan->bm_bitmap |= pmask;
918 * We couldn't allocate count in this subtree, update bighint.
920 if (scan->bm_bighint >= count && saveblk >= atblk)
921 scan->bm_bighint = count >> 1;
922 return(HAMMER_ALIST_BLOCK_NONE);
926 * This version allocates blocks in the reverse direction.
928 * This is really nasty code
931 hammer_alst_meta_alloc_rev(hammer_alist_t live, hammer_almeta_t scan,
932 int32_t blk, int32_t count,
933 int32_t radix, int skip, int32_t atblk
935 hammer_alist_config_t bl;
944 * ALL-ALLOCATED special case
946 if (scan->bm_bitmap == 0) {
947 scan->bm_bighint = 0;
948 return(HAMMER_ALIST_BLOCK_NONE);
951 radix /= HAMMER_ALIST_META_RADIX;
955 * Radix now represents each bitmap entry for this meta node. If
956 * the number of blocks being allocated can be fully represented,
957 * we allocate directly out of this meta node.
959 * Meta node bitmaps use 2 bits per block.
961 * 00 ALL-ALLOCATED (uninitialized)
962 * 01 PARTIALLY-FREE/PARTIALLY-ALLOCATED
963 * 10 ALL-ALLOCATED (initialized)
966 if (count >= radix) {
967 int n = count / radix * 2; /* number of bits */
968 int nd2 = n / 2; /* number of radi */
971 * Initial mask if e.g. n == 2: 1100....0000
973 mask = (u_int32_t)-1 >> (HAMMER_ALIST_BMAP_RADIX - n) <<
974 (HAMMER_ALIST_BMAP_RADIX - n);
975 blk += (HAMMER_ALIST_META_RADIX - nd2) * radix;
977 for (j = HAMMER_ALIST_META_RADIX - nd2; j >= 0; j -= nd2) {
978 if ((scan->bm_bitmap & mask) == mask && blk <= atblk) {
979 scan->bm_bitmap &= ~mask;
985 if (scan->bm_bighint >= count && saveblk <= atblk)
986 scan->bm_bighint = count >> 1;
987 return(HAMMER_ALIST_BLOCK_NONE);
991 * NOTE: saveblk must represent the entire layer, not the base blk
992 * of the last element. Otherwise an atblk that is inside the
993 * last element can cause bighint to be updated for a failed
994 * allocation when we didn't actually test all available blocks.
999 * We need the recurse but we are at a meta node leaf, which
1000 * means there is another layer under us.
1004 blk += radix * HAMMER_ALIST_META_RADIX;
1009 for (i = 0; i < (int)HAMMER_ALIST_META_RADIX; ++i) {
1011 * If the next layer is completely free then call
1012 * its init function to initialize it. The init
1013 * function is responsible for the initial freeing.
1015 if ((scan->bm_bitmap & mask) == mask && blk <= atblk) {
1016 if (bl->bl_radix_init(live->info, blk, radix, HAMMER_ASTATE_FREE) == 0) {
1017 scan->bm_bitmap &= ~mask;
1018 scan->bm_bitmap |= pmask;
1023 * If there may be some free blocks try to allocate
1024 * out of the layer. If the layer indicates that
1025 * it is completely full then clear both bits to
1026 * propogate the condition.
1028 if ((scan->bm_bitmap & mask) == pmask && blk <= atblk) {
1032 r = bl->bl_radix_alloc_rev(live->info, blk,
1035 if (r != HAMMER_ALIST_BLOCK_NONE) {
1037 scan->bm_bitmap &= ~mask;
1038 scan->bm_bitmap |= pmask << 1;
1049 * Since we are going in the reverse direction we need an
1050 * extra loop to determine if there is a terminator, then
1053 * This is a little weird but we do not want to overflow the
1054 * mask/pmask in the loop.
1056 next_skip = (skip - 1) / HAMMER_ALIST_META_RADIX;
1058 for (i = 1; i < skip; i += next_skip) {
1059 if (scan[i].bm_bighint == (int32_t)-1) {
1070 mask = 0x00000003 << j;
1071 pmask = 0x00000001 << j;
1076 * Initialize the bitmap in the child if allocating
1077 * from the all-free case.
1079 if ((scan->bm_bitmap & mask) == mask) {
1080 scan[i].bm_bitmap = (u_int32_t)-1;
1081 scan[i].bm_bighint = radix;
1085 * Handle various count cases. Bighint may be too
1086 * large but is never too small.
1088 if (count <= scan[i].bm_bighint && blk <= atblk) {
1090 * count fits in object
1093 if (next_skip == 1 && bl->bl_terminal) {
1094 r = hammer_alst_leaf_alloc_rev(
1095 &scan[i], blk, count, atblk);
1097 r = hammer_alst_meta_alloc_rev(
1100 radix, next_skip, atblk);
1102 if (r != HAMMER_ALIST_BLOCK_NONE) {
1103 if (scan[i].bm_bitmap == 0) {
1104 scan->bm_bitmap &= ~mask;
1106 scan->bm_bitmap &= ~mask;
1107 scan->bm_bitmap |= pmask;
1120 * We couldn't allocate count in this subtree, update bighint.
1121 * Since we are restricted to powers of 2, the next highest count
1122 * we might be able to allocate is (count >> 1).
1124 if (scan->bm_bighint >= count && saveblk <= atblk)
1125 scan->bm_bighint = count >> 1;
1126 return(HAMMER_ALIST_BLOCK_NONE);
1130 * HAMMER_ALST_FIND()
1132 * Locate the first allocated block greater or equal to atblk.
1135 hammer_alst_find(hammer_alist_t live, hammer_almeta_t scan, int32_t blk,
1136 int32_t radix, int32_t skip, int32_t atblk)
1146 * Leaf node (currently hammer_alist_find() only works on terminal
1147 * a-list's and the case is asserted in hammer_alist_find()).
1149 if (skip == 1 && live->config->bl_terminal) {
1150 if (scan->bm_bitmap == (u_int32_t)-1)
1151 return(HAMMER_ALIST_BLOCK_NONE);
1152 for (i = 0; i < (int)HAMMER_ALIST_BMAP_RADIX; ++i) {
1153 if (blk + i < atblk)
1155 if ((scan->bm_bitmap & (1 << i)) == 0)
1158 return(HAMMER_ALIST_BLOCK_NONE);
1164 radix /= HAMMER_ALIST_META_RADIX;
1165 next_skip = (skip - 1) / HAMMER_ALIST_META_RADIX;
1168 for (j = 0, i = 1; j < HAMMER_ALIST_META_RADIX; (i += next_skip), ++j) {
1172 if (scan[i].bm_bighint == (int32_t)-1) {
1177 * Recurse if this meta might contain a desired block.
1179 if (blk + radix > atblk) {
1180 if ((scan->bm_bitmap & mask) == 0) {
1182 * 00 - all-allocated, uninitialized
1184 return(atblk < blk ? blk : atblk);
1185 } else if ((scan->bm_bitmap & mask) == (pmask << 1)) {
1187 * 10 - all-allocated, initialized
1189 return(atblk < blk ? blk : atblk);
1190 } else if ((scan->bm_bitmap & mask) == mask) {
1192 * 11 - all-free (skip)
1194 } else if (next_skip == 0) {
1196 * Partially allocated but we have to recurse
1197 * into a stacked A-list.
1199 tmpblk = live->config->bl_radix_find(
1200 live->info, blk, radix, atblk);
1201 if (tmpblk != HAMMER_ALIST_BLOCK_NONE)
1203 } else if ((scan->bm_bitmap & mask) == pmask) {
1205 * 01 - partially-allocated
1207 tmpblk = hammer_alst_find(live, &scan[i],
1210 if (tmpblk != HAMMER_ALIST_BLOCK_NONE)
1219 return(HAMMER_ALIST_BLOCK_NONE);
1223 * HAMMER_ALST_LEAF_FREE()
1225 * Free allocated blocks from leaf bitmap. The allocation code is
1226 * restricted to powers of 2, the freeing code is not.
1229 hammer_alst_leaf_free(hammer_almeta_t scan, int32_t blk, int count) {
1231 * free some data in this bitmap
1234 * 0000111111111110000
1238 int n = blk & (HAMMER_ALIST_BMAP_RADIX - 1);
1241 mask = ((u_int32_t)-1 << n) &
1242 ((u_int32_t)-1 >> (HAMMER_ALIST_BMAP_RADIX - count - n));
1244 if (scan->bm_bitmap & mask)
1245 panic("hammer_alst_radix_free: freeing free block");
1246 scan->bm_bitmap |= mask;
1249 * We could probably do a better job here. We are required to make
1250 * bighint at least as large as the biggest contiguous block of
1251 * data. If we just shoehorn it, a little extra overhead will
1252 * be incured on the next allocation (but only that one typically).
1254 scan->bm_bighint = HAMMER_ALIST_BMAP_RADIX;
1260 * Free allocated blocks from radix tree meta info.
1262 * This support routine frees a range of blocks from the bitmap.
1263 * The range must be entirely enclosed by this radix node. If a
1264 * meta node, we break the range down recursively to free blocks
1265 * in subnodes (which means that this code can free an arbitrary
1266 * range whereas the allocation code cannot allocate an arbitrary
1271 hammer_alst_meta_free(hammer_alist_t live, hammer_almeta_t scan,
1272 int32_t freeBlk, int32_t count,
1273 int32_t radix, int skip, int32_t blk
1275 hammer_alist_config_t bl;
1282 * Break the free down into its components. Because it is so easy
1283 * to implement, frees are not limited to power-of-2 sizes.
1285 * Each block in a meta-node bitmap takes two bits.
1287 radix /= HAMMER_ALIST_META_RADIX;
1290 i = (freeBlk - blk) / radix;
1292 mask = 0x00000003 << (i * 2);
1293 pmask = 0x00000001 << (i * 2);
1297 * Our meta node is a leaf node, which means it must recurse
1298 * into another allocator.
1300 while (i < (int)HAMMER_ALIST_META_RADIX &&
1301 blk < freeBlk + count) {
1304 v = blk + radix - freeBlk;
1308 if (scan->bm_bighint == (int32_t)-1)
1309 panic("hammer_alst_meta_free: freeing unexpected range");
1310 KKASSERT((scan->bm_bitmap & mask) != mask);
1312 if (freeBlk == blk && count >= radix) {
1314 * Freeing an entire zone. Only recurse if
1315 * the zone was initialized. A 00 state means
1316 * that the zone is marked all-allocated,
1317 * but was never initialized.
1319 * Then set the zone to the all-free state (11).
1323 if (scan->bm_bitmap & mask) {
1324 bl->bl_radix_free(live->info, blk, radix,
1325 freeBlk - blk, v, &empty);
1327 bl->bl_radix_destroy(live->info, blk, radix);
1329 scan->bm_bitmap |= mask;
1330 scan->bm_bighint = radix * HAMMER_ALIST_META_RADIX;
1331 /* XXX bighint not being set properly */
1334 * Recursion case, partial free. If 00 the
1335 * zone is marked all allocated but has never
1336 * been initialized, so we init it.
1340 if ((scan->bm_bitmap & mask) == 0)
1341 bl->bl_radix_init(live->info, blk, radix, HAMMER_ASTATE_ALLOC);
1342 bl->bl_radix_free(live->info, blk, radix,
1343 freeBlk - blk, v, &empty);
1345 scan->bm_bitmap |= mask;
1346 scan->bm_bighint = radix * HAMMER_ALIST_META_RADIX;
1347 bl->bl_radix_destroy(live->info, blk, radix);
1348 /* XXX bighint not being set properly */
1350 scan->bm_bitmap &= ~mask;
1351 scan->bm_bitmap |= pmask;
1352 if (scan->bm_bighint < radix / 2)
1353 scan->bm_bighint = radix / 2;
1354 /* XXX bighint not being set properly */
1365 next_skip = (skip - 1) / HAMMER_ALIST_META_RADIX;
1366 i = 1 + i * next_skip;
1368 while (i <= skip && blk < freeBlk + count) {
1371 KKASSERT(mask != 0);
1372 KKASSERT(count != 0);
1374 v = blk + radix - freeBlk;
1378 if (scan->bm_bighint == (int32_t)-1)
1379 panic("hammer_alst_meta_free: freeing unexpected range");
1381 if (freeBlk == blk && count >= radix) {
1383 * All-free case, no need to update sub-tree
1385 scan->bm_bitmap |= mask;
1386 scan->bm_bighint = radix * HAMMER_ALIST_META_RADIX;
1387 /* XXX bighint not being set properly */
1392 if (next_skip == 1 && bl->bl_terminal) {
1393 hammer_alst_leaf_free(&scan[i], freeBlk, v);
1395 hammer_alst_meta_free(live, &scan[i],
1400 if (scan[i].bm_bitmap == (u_int32_t)-1) {
1401 scan->bm_bitmap |= mask;
1402 /* XXX bighint not set properly */
1403 scan->bm_bighint = radix * HAMMER_ALIST_META_RADIX;
1405 scan->bm_bitmap &= ~mask;
1406 scan->bm_bitmap |= pmask;
1407 /* XXX bighint not set properly */
1408 if (scan->bm_bighint < scan[i].bm_bighint)
1409 scan->bm_bighint = scan[i].bm_bighint;
1423 * hammer_alst_radix_init()
1425 * Initialize our meta structures and bitmaps and calculate the exact
1426 * number of meta-nodes required to manage 'count' blocks.
1428 * The required space may be truncated due to early termination records.
1431 hammer_alst_radix_init(hammer_almeta_t scan, int32_t radix,
1432 int skip, int32_t count)
1436 int32_t memindex = 1;
1441 * Basic initialization of the almeta for meta or leaf nodes. This
1442 * marks the element as all-allocated.
1445 scan->bm_bighint = 0;
1446 scan->bm_bitmap = 0;
1450 * We are at a terminal node, we only eat one meta element. If
1451 * live->config->bl_terminal is set this is a leaf node, otherwise
1452 * it is a meta node for a stacked A-list. We do NOT recurse into
1453 * stacked A-lists but simply mark the entire stack as all-free using
1454 * code 00 (meaning all-free & uninitialized).
1460 * Meta node. If allocating the entire object we can special
1461 * case it. However, we need to figure out how much memory
1462 * is required to manage 'count' blocks, so we continue on anyway.
1464 radix /= HAMMER_ALIST_META_RADIX;
1465 next_skip = (skip - 1) / HAMMER_ALIST_META_RADIX;
1469 for (i = 1; i < skip; i += next_skip) {
1471 * We eat up to this record
1475 KKASSERT(mask != 0);
1477 if (count >= radix) {
1479 * Allocate the entire object
1481 memindex += hammer_alst_radix_init(
1482 ((scan) ? &scan[i] : NULL),
1488 /* already marked as wholely allocated */
1489 } else if (count > 0) {
1491 * Allocate a partial object
1493 memindex += hammer_alst_radix_init(
1494 ((scan) ? &scan[i] : NULL),
1502 * Mark as partially allocated
1505 scan->bm_bitmap &= ~mask;
1506 scan->bm_bitmap |= pmask;
1510 * Add terminator and break out. The terminal
1511 * eats the meta node at scan[i].
1515 scan[i].bm_bighint = (int32_t)-1;
1516 /* already marked as wholely allocated */
1526 * hammer_alst_radix_recover()
1528 * This code is basically a duplicate of hammer_alst_radix_init()
1529 * except it recovers the a-list instead of initializes it.
1532 hammer_alst_radix_recover(hammer_alist_recover_t info, hammer_almeta_t scan,
1533 int32_t blk, int32_t radix, int skip, int32_t count)
1535 hammer_alist_t live = info->live;
1544 * Don't try to recover bighint, just set it to its maximum
1545 * value and let the A-list allocations reoptimize it. XXX
1547 scan->bm_bighint = radix;
1550 * If we are at a terminal node (i.e. not stacked on top of another
1551 * A-list), just count the free blocks.
1553 if (skip == 1 && live->config->bl_terminal) {
1554 for (i = 0; i < (int)HAMMER_ALIST_BMAP_RADIX; ++i) {
1555 if (scan->bm_bitmap & (1 << i))
1556 ++info->live->meta->bm_alist_freeblks;
1562 * Recursive meta node (next_skip != 0) or terminal meta
1563 * node (next_skip == 0).
1565 radix /= HAMMER_ALIST_META_RADIX;
1566 next_skip = (skip - 1) / HAMMER_ALIST_META_RADIX;
1570 for (i = 1, j = 0; j < (int)HAMMER_ALIST_META_RADIX;
1571 ++j, (i += next_skip)) {
1575 * 00 ALL-ALLOCATED - UNINITIALIZED
1576 * 01 PARTIALLY-FREE/PARTIALLY-ALLOCATED
1577 * 10 ALL-ALLOCATED - INITIALIZED
1578 * 11 ALL-FREE - UNINITIALIZED
1581 if (count >= radix) {
1583 * Recover the entire object
1585 if ((scan->bm_bitmap & mask) == 0) {
1587 * All-allocated (uninited), do nothing
1589 } else if ((scan->bm_bitmap & mask) == mask) {
1591 * All-free (uninited), do nothing
1593 live->meta->bm_alist_freeblks += radix;
1594 } else if (next_skip) {
1596 * Normal meta node, initialized. Recover and
1597 * adjust to either an all-allocated (inited)
1598 * or partially-allocated state.
1600 hammer_alst_radix_recover(
1608 if (scan[i].bm_bitmap == 0) {
1610 (scan->bm_bitmap & ~mask) |
1612 } else if (scan[i].bm_bitmap == (u_int32_t)-1) {
1613 scan->bm_bitmap |= mask;
1616 (scan->bm_bitmap & ~mask) | pmask;
1620 * Stacked meta node, recurse.
1622 n = live->config->bl_radix_recover(
1626 live->meta->bm_alist_freeblks += n;
1629 (scan->bm_bitmap & ~mask) |
1631 } else if (n == radix) {
1632 scan->bm_bitmap |= mask;
1635 (scan->bm_bitmap & ~mask) |
1643 } else if (count > 0) {
1645 * Recover a partial object. The object can become
1646 * wholely allocated but never wholely free.
1649 hammer_alst_radix_recover(
1657 if (scan[i].bm_bitmap == 0) {
1659 (scan->bm_bitmap & ~mask) |
1663 (scan->bm_bitmap & ~mask) | pmask;
1666 n = live->config->bl_radix_recover(
1670 live->meta->bm_alist_freeblks += n;
1673 (scan->bm_bitmap & ~mask) |
1677 (scan->bm_bitmap & ~mask) |
1685 } else if (next_skip) {
1687 * Add terminator. The terminator eats the meta
1688 * node at scan[i]. There is only ONE terminator,
1689 * make sure we don't write out any more (set count to
1690 * -1) or we may overflow our allocation.
1693 scan[i].bm_bighint = (int32_t)-1;
1696 scan->bm_bitmap &= ~mask; /* all-allocated/uni */
1698 scan->bm_bitmap &= ~mask; /* all-allocated/uni */
1709 hammer_alst_radix_print(hammer_alist_t live, hammer_almeta_t scan,
1710 int32_t blk, int32_t radix, int skip, int tab)
1717 if (skip == 1 && live->config->bl_terminal) {
1719 "%*.*s(%04x,%d): bitmap %08x big=%d\n",
1728 if (scan->bm_bitmap == 0) {
1730 "%*.*s(%04x,%d) ALL ALLOCATED\n",
1737 if (scan->bm_bitmap == (u_int32_t)-1) {
1739 "%*.*s(%04x,%d) ALL FREE\n",
1748 "%*.*s(%04x,%d): %s (%d) bitmap=%08x big=%d {\n",
1751 (skip == 1 ? "LAYER" : "subtree"),
1757 radix /= HAMMER_ALIST_META_RADIX;
1762 for (i = 0; i < HAMMER_ALIST_META_RADIX; ++i) {
1763 if ((scan->bm_bitmap & mask) == mask) {
1765 "%*.*s(%04x,%d): ALL FREE\n",
1769 } else if ((scan->bm_bitmap & mask) == 0) {
1771 "%*.*s(%04x,%d): ALL ALLOCATED\n",
1776 live->config->bl_radix_print(
1777 live->info, blk, radix, tab);
1783 next_skip = ((u_int)(skip - 1) / HAMMER_ALIST_META_RADIX);
1785 for (i = 1; i < skip; i += next_skip) {
1786 KKASSERT(mask != 0);
1787 if (scan[i].bm_bighint == (int32_t)-1) {
1789 "%*.*s(%04x,%d): Terminator\n",
1796 if ((scan->bm_bitmap & mask) == mask) {
1798 "%*.*s(%04x,%d): ALL FREE\n",
1802 } else if ((scan->bm_bitmap & mask) == 0) {
1804 "%*.*s(%04x,%d): ALL ALLOCATED\n",
1809 hammer_alst_radix_print(
1834 static struct hammer_alist_live **layers; /* initialized by main */
1835 static int32_t layer_radix = -1;
1838 * Initialize a zone.
1840 * If allocating is non-zero this init is being called when transitioning out
1841 * of an all-free state. Allocate the zone and mark the whole mess as being
1842 * free so the caller can then allocate out of this zone.
1844 * If freeing this init is being called when transitioning out of an
1845 * initial all-allocated (00) state. Allocate the zone but leave the whole
1846 * mess left all-allocated. The caller will then free the appropriate range.
1850 debug_radix_init(void *info, int32_t blk, int32_t radix,
1851 enum hammer_alloc_state state)
1853 hammer_alist_t layer;
1854 int layer_no = blk / layer_radix;
1856 printf("lower layer: init (%04x,%d) layer_radix=%d\n",
1857 blk, radix, layer_radix);
1858 KKASSERT(layer_radix == radix);
1859 KKASSERT(layers[layer_no] == NULL);
1860 layer = layers[layer_no] = hammer_alist_create(radix, 1, NULL, state);
1866 debug_radix_recover(void *info, int32_t blk, int32_t radix, int32_t count)
1868 hammer_alist_t layer;
1869 int layer_no = blk / layer_radix;
1872 KKASSERT(layer_radix == radix);
1873 KKASSERT(layers[layer_no] != NULL);
1874 layer = layers[layer_no];
1875 n = hammer_alist_recover(layer, blk, 0, count);
1876 printf("Recover layer %d blk %d result %d/%d\n",
1877 layer_no, blk, n, count);
1883 debug_radix_find(void *info, int32_t blk, int32_t radix, int32_t atblk)
1885 hammer_alist_t layer;
1886 int layer_no = blk / layer_radix;
1889 KKASSERT(layer_radix == radix);
1890 KKASSERT(layers[layer_no] != NULL);
1891 layer = layers[layer_no];
1892 res = hammer_alist_find(layer, atblk - blk);
1893 if (res != HAMMER_ALIST_BLOCK_NONE)
1899 * This is called when a zone becomes entirely free, typically after a
1900 * call to debug_radix_free() has indicated that the entire zone is now
1905 debug_radix_destroy(void *info, int32_t blk, int32_t radix)
1907 hammer_alist_t layer;
1908 int layer_no = blk / layer_radix;
1910 printf("lower layer: destroy (%04x,%d)\n", blk, radix);
1911 layer = layers[layer_no];
1912 KKASSERT(layer != NULL);
1913 hammer_alist_destroy(layer, NULL);
1914 layers[layer_no] = NULL;
1921 debug_radix_alloc_fwd(void *info, int32_t blk, int32_t radix,
1922 int32_t count, int32_t atblk, int32_t *fullp)
1924 hammer_alist_t layer = layers[blk / layer_radix];
1927 r = hammer_alist_alloc_fwd(layer, count, atblk - blk);
1928 *fullp = hammer_alist_isfull(layer);
1929 if (r != HAMMER_ALIST_BLOCK_NONE)
1936 debug_radix_alloc_rev(void *info, int32_t blk, int32_t radix,
1937 int32_t count, int32_t atblk, int32_t *fullp)
1939 hammer_alist_t layer = layers[blk / layer_radix];
1942 r = hammer_alist_alloc_rev(layer, count, atblk - blk);
1943 *fullp = hammer_alist_isfull(layer);
1944 if (r != HAMMER_ALIST_BLOCK_NONE)
1951 debug_radix_free(void *info, int32_t blk, int32_t radix,
1952 int32_t base_blk, int32_t count, int32_t *emptyp)
1954 int layer_no = blk / layer_radix;
1955 hammer_alist_t layer = layers[layer_no];
1958 hammer_alist_free(layer, base_blk, count);
1959 *emptyp = hammer_alist_isempty(layer);
1964 debug_radix_print(void *info, int32_t blk, int32_t radix, int tab)
1966 hammer_alist_t layer = layers[blk / layer_radix];
1968 hammer_alist_print(layer, tab);
1972 main(int ac, char **av)
1976 hammer_alist_t live;
1977 hammer_almeta_t meta = NULL;
1979 for (i = 1; i < ac; ++i) {
1980 const char *ptr = av[i];
1983 size = strtol(ptr, NULL, 0);
1984 else if (layer_radix == -1)
1985 layer_radix = strtol(ptr, NULL, 0);
1991 fprintf(stderr, "Bad option: %s\n", ptr - 2);
1996 if (layer_radix == -1)
1997 layer_radix = 1; /* no second storage layer */
1998 if ((size ^ (size - 1)) != (size << 1) - 1) {
1999 fprintf(stderr, "size must be a power of 2\n");
2002 if ((layer_radix ^ (layer_radix - 1)) != (layer_radix << 1) - 1) {
2003 fprintf(stderr, "the second layer radix must be a power of 2\n");
2007 live = hammer_alist_create(size, layer_radix, NULL,
2008 HAMMER_ASTATE_ALLOC);
2009 layers = calloc(size, sizeof(hammer_alist_t));
2011 printf("A-LIST TEST %d blocks, first-layer radix %d, "
2012 "second-layer radix %d\n",
2013 size, live->config->bl_radix / layer_radix, layer_radix);
2015 live->config->bl_radix_init = debug_radix_init;
2016 live->config->bl_radix_recover = debug_radix_recover;
2017 live->config->bl_radix_find = debug_radix_find;
2018 live->config->bl_radix_destroy = debug_radix_destroy;
2019 live->config->bl_radix_alloc_fwd = debug_radix_alloc_fwd;
2020 live->config->bl_radix_alloc_rev = debug_radix_alloc_rev;
2021 live->config->bl_radix_free = debug_radix_free;
2022 live->config->bl_radix_print = debug_radix_print;
2024 hammer_alist_free(live, 0, size);
2034 live->meta->bm_alist_freeblks, size);
2036 if (fgets(buf, sizeof(buf), stdin) == NULL)
2040 hammer_alist_print(live, 0);
2042 kprintf("allocated: ");
2043 while ((atblk = hammer_alist_find(live, atblk)) != HAMMER_ALIST_BLOCK_NONE) {
2044 kprintf(" %d", atblk);
2051 if (sscanf(buf + 1, "%d %d", &count, &atblk) >= 1) {
2052 blk = hammer_alist_alloc_fwd(live, count, atblk);
2053 kprintf(" R=%04x\n", blk);
2059 atblk = HAMMER_ALIST_BLOCK_MAX;
2060 if (sscanf(buf + 1, "%d %d", &count, &atblk) >= 1) {
2061 blk = hammer_alist_alloc_rev(live, count, atblk);
2062 kprintf(" R=%04x\n", blk);
2068 if (sscanf(buf + 1, "%x %d", &da, &count) == 2) {
2069 hammer_alist_free(live, da, count);
2070 if (hammer_alist_isempty(live))
2071 kprintf("a-list is now 100%% empty\n");
2080 n = hammer_alist_recover(live, 0, 0,
2081 live->meta->bm_alist_base_freeblks);
2083 kprintf("recover: error %d\n", -n);
2085 kprintf("recover: %d free\n", n);
2093 "r %d -allocate reverse\n"
2095 "R -recovery a-list\n"
2108 panic(const char *ctl, ...)
2112 __va_start(va, ctl);
2113 vfprintf(stderr, ctl, va);
2114 fprintf(stderr, "\n");