Give the A-list code the ability to do a forward or reverse allocation
[dragonfly.git] / sys / vfs / hammer / hammer_alist.c
CommitLineData
8750964d
MD
1/*
2 * HAMMER_ALIST.C -
3 *
4 * Bitmap allocator/deallocator, using a radix tree with hinting.
5 * Unlimited-size allocations, power-of-2 only, power-of-2 aligned results
6 * only. This module has been separated from the generic kernel module and
7 * written specifically for embedding in HAMMER storage structures.
8 *
9 * Copyright (c) 2007 The DragonFly Project. All rights reserved.
10 *
11 * This code is derived from software contributed to The DragonFly Project
12 * by Matthew Dillon <dillon@backplane.com>
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 *
18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in
22 * the documentation and/or other materials provided with the
23 * distribution.
24 * 3. Neither the name of The DragonFly Project nor the names of its
25 * contributors may be used to endorse or promote products derived
26 * from this software without specific, prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
31 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
32 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
33 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
34 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
35 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
36 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
37 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
38 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
9775c955 41 * $DragonFly: src/sys/vfs/hammer/Attic/hammer_alist.c,v 1.3 2007/10/16 18:16:42 dillon Exp $
8750964d
MD
42 */
43/*
44 * This module implements a generic allocator through the use of a hinted
45 * radix tree. All allocations must be in powers of 2 and will return
46 * similarly aligned results. The radix tree typically recurses within
47 * a memory buffer and then continues its recursion by chaining to other
48 * memory buffers, creating a seemless whole capable of managing any amount
49 * of storage.
50 *
51 * The radix tree is layed out recursively using a linear array. Each meta
52 * node is immediately followed (layed out sequentially in memory) by
53 * HAMMER_ALIST_META_RADIX lower level nodes. This is a recursive structure
54 * but one that can be easily scanned through a very simple 'skip'
55 * calculation.
56 *
57 * The radix tree supports an early-termination optimization which
58 * effectively allows us to efficiently mix large and small allocations
59 * with a single abstraction. The address space can be partitioned
60 * arbitrarily without adding much in the way of additional meta-storage
61 * for the allocator.
62 *
c60bb2c5
MD
63 * The radix tree supports allocator layering. By supplying a base_radix > 1
64 * the allocator will issue callbacks to recurse into lower layers once
65 * higher layers have been exhausted. Allocations in multiples of base_radix
66 * will be entirely retained in the higher level allocator and never recurse.
67 *
8750964d
MD
68 * This code can be compiled stand-alone for debugging.
69 */
70
71#ifdef _KERNEL
72
73#include <sys/param.h>
74#include <sys/systm.h>
75#include <sys/lock.h>
76#include <sys/kernel.h>
77#include <sys/malloc.h>
78#include <vm/vm.h>
79#include <vm/vm_object.h>
80#include <vm/vm_kern.h>
81#include <vm/vm_extern.h>
82#include <vm/vm_page.h>
83
c60bb2c5 84#include "hammer_disk.h"
8750964d
MD
85
86#else
87
88#ifndef ALIST_NO_DEBUG
89#define ALIST_DEBUG
90#endif
91
92#include <sys/types.h>
93#include <stdio.h>
94#include <assert.h>
95#include <string.h>
96#include <stdlib.h>
97#include <stdarg.h>
98
99#define kmalloc(a,b,c) malloc(a)
100#define kfree(a,b) free(a)
101#define kprintf printf
102#define KKASSERT(exp) assert(exp)
103struct malloc_type;
104
c60bb2c5 105#include "hammer_alist.h"
8750964d
MD
106
107void panic(const char *ctl, ...);
108
109#endif
110
111/*
112 * static support functions
113 */
114
c60bb2c5 115static int32_t hammer_alst_leaf_alloc_fwd(hammer_almeta_t scan,
9775c955 116 int32_t blk, int count, int32_t atblk);
c60bb2c5
MD
117static int32_t hammer_alst_meta_alloc_fwd(hammer_alist_t live,
118 hammer_almeta_t scan,
8750964d 119 int32_t blk, int32_t count,
9775c955 120 int32_t radix, int skip, int32_t atblk);
c60bb2c5 121static int32_t hammer_alst_leaf_alloc_rev(hammer_almeta_t scan,
9775c955 122 int32_t blk, int count, int32_t atblk);
c60bb2c5
MD
123static int32_t hammer_alst_meta_alloc_rev(hammer_alist_t live,
124 hammer_almeta_t scan,
8750964d 125 int32_t blk, int32_t count,
9775c955 126 int32_t radix, int skip, int32_t atblk);
c60bb2c5 127static void hammer_alst_leaf_free(hammer_almeta_t scan,
8750964d 128 int32_t relblk, int count);
c60bb2c5 129static void hammer_alst_meta_free(hammer_alist_t live, hammer_almeta_t scan,
8750964d
MD
130 int32_t freeBlk, int32_t count,
131 int32_t radix, int skip, int32_t blk);
c60bb2c5 132static int32_t hammer_alst_radix_init(hammer_almeta_t scan,
8750964d
MD
133 int32_t radix, int skip, int32_t count);
134#ifdef ALIST_DEBUG
c60bb2c5
MD
135static void hammer_alst_radix_print(hammer_alist_t live,
136 hammer_almeta_t scan, int32_t blk,
8750964d
MD
137 int32_t radix, int skip, int tab);
138#endif
139
140/*
c60bb2c5
MD
141 * Initialize an a-list config structure for use. The config structure
142 * describes the basic structure of an a-list's topology and may be
143 * shared by any number of a-lists which have the same topology.
144 *
145 * blocks is the total number of blocks, that is the number of blocks
146 * handled at this layer multiplied by the base radix.
147 *
148 * When base_radix != 1 the A-list has only meta elements and does not have
149 * any leaves, in order to be able to track partial allocations.
8750964d
MD
150 */
151void
c60bb2c5
MD
152hammer_alist_template(hammer_alist_config_t bl, int32_t blocks,
153 int32_t base_radix, int32_t maxmeta)
8750964d
MD
154{
155 int radix;
c60bb2c5 156 int skip;
8750964d
MD
157
158 /*
c60bb2c5
MD
159 * Calculate radix and skip field used for scanning. The leaf nodes
160 * in our tree are either BMAP or META nodes depending on whether
161 * we chain to a lower level allocation layer or not.
8750964d 162 */
c60bb2c5
MD
163 if (base_radix == 1)
164 radix = HAMMER_ALIST_BMAP_RADIX;
165 else
166 radix = HAMMER_ALIST_META_RADIX;
167 skip = 1;
8750964d 168
c60bb2c5 169 while (radix < blocks / base_radix) {
8750964d 170 radix *= HAMMER_ALIST_META_RADIX;
c60bb2c5 171 skip = skip * HAMMER_ALIST_META_RADIX + 1;
8750964d
MD
172 }
173
c60bb2c5
MD
174 /*
175 * Increase the radix based on the number of blocks a lower level
176 * allocator is able to handle at the 'base' of our allocator.
177 * If base_radix != 1 the caller will have to initialize the callback
178 * fields to implement the lower level allocator.
179 */
180 radix *= base_radix;
181
8750964d
MD
182 bzero(bl, sizeof(*bl));
183
184 bl->bl_blocks = blocks;
c60bb2c5 185 bl->bl_base_radix = base_radix;
8750964d
MD
186 bl->bl_radix = radix;
187 bl->bl_skip = skip;
c60bb2c5
MD
188 bl->bl_rootblks = hammer_alst_radix_init(NULL, bl->bl_radix,
189 bl->bl_skip, blocks);
190 ++bl->bl_rootblks; /* one more for freeblks header */
191 if (base_radix == 1)
192 bl->bl_terminal = 1;
193 KKASSERT(maxmeta == 0 || bl->bl_rootblks <= maxmeta);
8750964d
MD
194
195#if defined(ALIST_DEBUG)
196 kprintf(
c60bb2c5 197 "PRIMARY ALIST LAYER manages %d blocks"
8750964d 198 ", requiring %dK (%d bytes) of ram\n",
c60bb2c5
MD
199 bl->bl_blocks / bl->bl_base_radix,
200 (bl->bl_rootblks * sizeof(struct hammer_almeta) + 1023) / 1024,
201 (bl->bl_rootblks * sizeof(struct hammer_almeta))
8750964d
MD
202 );
203 kprintf("ALIST raw radix tree contains %d records\n", bl->bl_rootblks);
204#endif
205}
206
207void
c60bb2c5 208hammer_alist_init(hammer_alist_t live)
8750964d 209{
c60bb2c5
MD
210 hammer_alist_config_t bl = live->config;
211
212 live->meta->bm_bighint = 0;
213 live->meta->bm_alist_freeblks = 0;
214 hammer_alst_radix_init(live->meta + 1, bl->bl_radix,
215 bl->bl_skip, bl->bl_blocks);
8750964d
MD
216}
217
218#if !defined(_KERNEL) && defined(ALIST_DEBUG)
219
220/*
221 * hammer_alist_create() (userland only)
222 *
223 * create a alist capable of handling up to the specified number of
224 * blocks. blocks must be greater then 0
225 *
226 * The smallest alist consists of a single leaf node capable of
c60bb2c5
MD
227 * managing HAMMER_ALIST_BMAP_RADIX blocks, or (if base_radix != 1)
228 * a single meta node capable of managing HAMMER_ALIST_META_RADIX
229 * blocks which recurses into other storage layers for a total
230 * handling capability of (HAMMER_ALIST_META_RADIX * base_radix) blocks.
231 *
232 * Larger a-list's increase their capability exponentially by
233 * HAMMER_ALIST_META_RADIX.
234 *
235 * The block count is the total number of blocks inclusive of any
236 * layering. blocks can be less then base_radix and will result in
237 * a radix tree with a single leaf node which then chains down.
8750964d
MD
238 */
239
240hammer_alist_t
c60bb2c5
MD
241hammer_alist_create(int32_t blocks, int32_t base_radix,
242 struct malloc_type *mtype)
8750964d 243{
c60bb2c5
MD
244 hammer_alist_t live;
245 hammer_alist_config_t bl;
8750964d
MD
246 size_t metasize;
247
c60bb2c5
MD
248 live = kmalloc(sizeof(*live), mtype, M_WAITOK);
249 live->config = bl = kmalloc(sizeof(*bl), mtype, M_WAITOK);
250 hammer_alist_template(bl, blocks, base_radix, 0);
8750964d 251
c60bb2c5
MD
252 metasize = sizeof(*live->meta) * bl->bl_rootblks;
253 live->meta = kmalloc(metasize, mtype, M_WAITOK);
254 bzero(live->meta, metasize);
8750964d
MD
255
256#if defined(ALIST_DEBUG)
257 kprintf(
258 "ALIST representing %d blocks (%d MB of swap)"
259 ", requiring %dK (%d bytes) of ram\n",
260 bl->bl_blocks,
261 bl->bl_blocks * 4 / 1024,
c60bb2c5
MD
262 (bl->bl_rootblks * sizeof(*live->meta) + 1023) / 1024,
263 (bl->bl_rootblks * sizeof(*live->meta))
8750964d 264 );
c60bb2c5
MD
265 if (base_radix != 1) {
266 kprintf("ALIST recurses when it reaches a base_radix of %d\n",
267 base_radix);
268 }
8750964d
MD
269 kprintf("ALIST raw radix tree contains %d records\n", bl->bl_rootblks);
270#endif
c60bb2c5
MD
271 hammer_alist_init(live);
272 return(live);
8750964d
MD
273}
274
275void
c60bb2c5 276hammer_alist_destroy(hammer_alist_t live, struct malloc_type *mtype)
8750964d 277{
c60bb2c5
MD
278 kfree(live->config, mtype);
279 kfree(live->meta, mtype);
280 live->config = NULL;
281 live->meta = NULL;
282 kfree(live, mtype);
8750964d
MD
283}
284
285#endif
286
287/*
288 * hammer_alist_alloc()
289 *
290 * Reserve space in the block bitmap. Return the base of a contiguous
291 * region or HAMMER_ALIST_BLOCK_NONE if space could not be allocated.
292 */
293
294int32_t
c60bb2c5 295hammer_alist_alloc(hammer_alist_t live, int32_t count)
8750964d
MD
296{
297 int32_t blk = HAMMER_ALIST_BLOCK_NONE;
c60bb2c5 298 hammer_alist_config_t bl = live->config;
8750964d
MD
299
300 KKASSERT((count | (count - 1)) == (count << 1) - 1);
301
c60bb2c5
MD
302 if (bl && count <= bl->bl_radix) {
303 /*
304 * When skip is 1 we are at a leaf. If we are the terminal
305 * allocator we use our native leaf functions and radix will
306 * be HAMMER_ALIST_BMAP_RADIX. Otherwise we are a meta node
307 * which will chain to another allocator.
308 */
309 if (bl->bl_skip == 1 && bl->bl_terminal) {
310#ifndef _KERNEL
311 KKASSERT(bl->bl_radix == HAMMER_ALIST_BMAP_RADIX);
312#endif
313 blk = hammer_alst_leaf_alloc_fwd(
9775c955 314 live->meta + 1, 0, count, 0);
8750964d
MD
315 } else {
316 blk = hammer_alst_meta_alloc_fwd(
c60bb2c5 317 live, live->meta + 1,
9775c955 318 0, count, bl->bl_radix, bl->bl_skip, 0);
8750964d
MD
319 }
320 if (blk != HAMMER_ALIST_BLOCK_NONE)
c60bb2c5 321 live->meta->bm_alist_freeblks -= count;
8750964d
MD
322 }
323 return(blk);
324}
325
326int32_t
9775c955 327hammer_alist_alloc_fwd(hammer_alist_t live, int32_t count, int32_t atblk)
8750964d
MD
328{
329 int32_t blk = HAMMER_ALIST_BLOCK_NONE;
9775c955 330 hammer_alist_config_t bl = live->config;
8750964d
MD
331
332 KKASSERT((count | (count - 1)) == (count << 1) - 1);
333
9775c955
MD
334 if (bl && count <= bl->bl_radix) {
335 /*
336 * When skip is 1 we are at a leaf. If we are the terminal
337 * allocator we use our native leaf functions and radix will
338 * be HAMMER_ALIST_BMAP_RADIX. Otherwise we are a meta node
339 * which will chain to another allocator.
340 */
c60bb2c5
MD
341 if (bl->bl_skip == 1 && bl->bl_terminal) {
342#ifndef _KERNEL
343 KKASSERT(bl->bl_radix == HAMMER_ALIST_BMAP_RADIX);
344#endif
9775c955
MD
345 blk = hammer_alst_leaf_alloc_fwd(
346 live->meta + 1, 0, count, atblk);
8750964d 347 } else {
9775c955 348 blk = hammer_alst_meta_alloc_fwd(
c60bb2c5 349 live, live->meta + 1,
9775c955 350 0, count, bl->bl_radix, bl->bl_skip, atblk);
8750964d
MD
351 }
352 if (blk != HAMMER_ALIST_BLOCK_NONE)
c60bb2c5 353 live->meta->bm_alist_freeblks -= count;
8750964d
MD
354 }
355 return(blk);
356}
357
8750964d 358int32_t
9775c955 359hammer_alist_alloc_rev(hammer_alist_t live, int32_t count, int32_t atblk)
8750964d 360{
9775c955
MD
361 hammer_alist_config_t bl = live->config;
362 int32_t blk = HAMMER_ALIST_BLOCK_NONE;
363
364 KKASSERT((count | (count - 1)) == (count << 1) - 1);
8750964d 365
9775c955
MD
366 if (bl && count < bl->bl_radix) {
367 if (bl->bl_skip == 1 && bl->bl_terminal) {
368#ifndef _KERNEL
369 KKASSERT(bl->bl_radix == HAMMER_ALIST_BMAP_RADIX);
8750964d 370#endif
9775c955
MD
371 blk = hammer_alst_leaf_alloc_rev(
372 live->meta + 1, 0, count, atblk);
373 } else {
374 blk = hammer_alst_meta_alloc_rev(
375 live, live->meta + 1,
376 0, count, bl->bl_radix, bl->bl_skip, atblk);
377 }
378 if (blk != HAMMER_ALIST_BLOCK_NONE)
379 live->meta->bm_alist_freeblks -= count;
380 }
381 return(blk);
382}
8750964d
MD
383
384/*
385 * alist_free()
386 *
387 * Free up space in the block bitmap. Return the base of a contiguous
388 * region. Panic if an inconsistancy is found.
389 *
390 * Unlike allocations, there are no alignment requirements for blkno or
391 * count when freeing blocks.
392 */
393
394void
c60bb2c5 395hammer_alist_free(hammer_alist_t live, int32_t blkno, int32_t count)
8750964d 396{
c60bb2c5
MD
397 hammer_alist_config_t bl = live->config;
398
399 KKASSERT(blkno + count <= bl->bl_blocks);
400 if (bl->bl_skip == 1 && bl->bl_terminal) {
401#ifndef _KERNEL
402 KKASSERT(bl->bl_radix == HAMMER_ALIST_BMAP_RADIX);
403#endif
404 hammer_alst_leaf_free(live->meta + 1, blkno, count);
405 } else {
406 hammer_alst_meta_free(live, live->meta + 1,
407 blkno, count,
408 bl->bl_radix, bl->bl_skip, 0);
8750964d 409 }
c60bb2c5 410 live->meta->bm_alist_freeblks += count;
8750964d
MD
411}
412
9775c955
MD
413int
414hammer_alist_isfull(hammer_alist_t live)
415{
416 return(live->meta->bm_alist_freeblks == 0);
417}
418
419int
420hammer_alist_isempty(hammer_alist_t live)
421{
422 return(live->meta->bm_alist_freeblks == live->config->bl_radix);
423}
424
8750964d
MD
425#ifdef ALIST_DEBUG
426
427/*
428 * alist_print() - dump radix tree
429 */
430
431void
c60bb2c5 432hammer_alist_print(hammer_alist_t live, int tab)
8750964d 433{
c60bb2c5
MD
434 hammer_alist_config_t bl = live->config;
435
436 kprintf("%*.*sALIST (%d free blocks) {\n",
437 tab, tab, "", live->meta->bm_alist_freeblks);
438 hammer_alst_radix_print(live, live->meta + 1, 0,
439 bl->bl_radix, bl->bl_skip, tab + 4);
440 kprintf("%*.*s}\n", tab, tab, "");
8750964d
MD
441}
442
443#endif
444
445/************************************************************************
446 * ALLOCATION SUPPORT FUNCTIONS *
447 ************************************************************************
448 *
449 * These support functions do all the actual work. They may seem
450 * rather longish, but that's because I've commented them up. The
451 * actual code is straight forward.
452 *
453 */
454
455/*
456 * hammer_alist_leaf_alloc_fwd()
457 *
458 * Allocate at a leaf in the radix tree (a bitmap).
459 *
460 * This is the core of the allocator and is optimized for the 1 block
461 * and the HAMMER_ALIST_BMAP_RADIX block allocation cases. Other cases
462 * are somewhat slower. The 1 block allocation case is log2 and extremely
463 * quick.
464 */
465
466static int32_t
9775c955
MD
467hammer_alst_leaf_alloc_fwd(hammer_almeta_t scan, int32_t blk,
468 int count, int32_t atblk)
8750964d
MD
469{
470 u_int32_t orig = scan->bm_bitmap;
9775c955 471 int32_t saveblk = blk;
8750964d
MD
472
473 /*
474 * Optimize bitmap all-allocated case. Also, count = 1
475 * case assumes at least 1 bit is free in the bitmap, so
476 * we have to take care of this case here.
477 */
478 if (orig == 0) {
479 scan->bm_bighint = 0;
480 return(HAMMER_ALIST_BLOCK_NONE);
481 }
482
9775c955 483#if 0
8750964d
MD
484 /*
485 * Optimized code to allocate one bit out of the bitmap
9775c955
MD
486 *
487 * mask iterates e.g. 00001111 00000011 00000001
488 *
489 * mask starts at 00001111
8750964d
MD
490 */
491 if (count == 1) {
492 u_int32_t mask;
493 int j = HAMMER_ALIST_BMAP_RADIX/2;
494 int r = 0;
495
496 mask = (u_int32_t)-1 >> (HAMMER_ALIST_BMAP_RADIX/2);
497
498 while (j) {
499 if ((orig & mask) == 0) {
500 r += j;
501 orig >>= j;
502 }
503 j >>= 1;
504 mask >>= j;
505 }
506 scan->bm_bitmap &= ~(1 << r);
507 return(blk + r);
508 }
9775c955 509#endif
8750964d
MD
510
511 /*
512 * non-optimized code to allocate N bits out of the bitmap.
513 * The more bits, the faster the code runs. It will run
514 * the slowest allocating 2 bits, but since there aren't any
515 * memory ops in the core loop (or shouldn't be, anyway),
516 * you probably won't notice the difference.
517 *
518 * Similar to the blist case, the alist code also requires
519 * allocations to be power-of-2 sized and aligned to the
520 * size of the allocation, which simplifies the algorithm.
521 */
522 {
523 int j;
524 int n = HAMMER_ALIST_BMAP_RADIX - count;
525 u_int32_t mask;
526
527 mask = (u_int32_t)-1 >> n;
528
529 for (j = 0; j <= n; j += count) {
9775c955 530 if ((orig & mask) == mask && blk >= atblk) {
8750964d 531 scan->bm_bitmap &= ~mask;
9775c955 532 return(blk);
8750964d
MD
533 }
534 mask = mask << count;
9775c955 535 blk += count;
8750964d
MD
536 }
537 }
538
539 /*
9775c955
MD
540 * We couldn't allocate count in this subtree, update bighint if
541 * atblk didn't interfere with the hinting mechanism.
8750964d 542 */
9775c955
MD
543 if (saveblk >= atblk)
544 scan->bm_bighint = count - 1;
8750964d
MD
545 return(HAMMER_ALIST_BLOCK_NONE);
546}
547
548/*
549 * This version allocates blocks in the reverse direction.
550 */
551static int32_t
9775c955
MD
552hammer_alst_leaf_alloc_rev(hammer_almeta_t scan, int32_t blk,
553 int count, int32_t atblk)
8750964d
MD
554{
555 u_int32_t orig = scan->bm_bitmap;
9775c955 556 int32_t saveblk;
8750964d
MD
557
558 /*
559 * Optimize bitmap all-allocated case. Also, count = 1
560 * case assumes at least 1 bit is free in the bitmap, so
561 * we have to take care of this case here.
562 */
563 if (orig == 0) {
564 scan->bm_bighint = 0;
565 return(HAMMER_ALIST_BLOCK_NONE);
566 }
567
9775c955 568#if 0
8750964d
MD
569 /*
570 * Optimized code to allocate one bit out of the bitmap
571 */
572 if (count == 1) {
573 u_int32_t mask;
574 int j = HAMMER_ALIST_BMAP_RADIX/2;
575 int r = HAMMER_ALIST_BMAP_RADIX - 1;
576
577 mask = ~((u_int32_t)-1 >> (HAMMER_ALIST_BMAP_RADIX/2));
578
579 while (j) {
580 if ((orig & mask) == 0) {
581 r -= j;
582 orig <<= j;
583 }
584 j >>= 1;
585 mask <<= j;
586 }
587 scan->bm_bitmap &= ~(1 << r);
588 return(blk + r);
589 }
9775c955 590#endif
8750964d
MD
591
592 /*
593 * non-optimized code to allocate N bits out of the bitmap.
594 * The more bits, the faster the code runs. It will run
595 * the slowest allocating 2 bits, but since there aren't any
596 * memory ops in the core loop (or shouldn't be, anyway),
597 * you probably won't notice the difference.
598 *
599 * Similar to the blist case, the alist code also requires
600 * allocations to be power-of-2 sized and aligned to the
601 * size of the allocation, which simplifies the algorithm.
602 *
603 * initial mask if count == 2: 1100....0000
604 */
605 {
606 int j;
607 int n = HAMMER_ALIST_BMAP_RADIX - count;
608 u_int32_t mask;
609
610 mask = ((u_int32_t)-1 >> n) << n;
9775c955
MD
611 blk += n;
612 saveblk = blk;
8750964d
MD
613
614 for (j = n; j >= 0; j -= count) {
9775c955 615 if ((orig & mask) == mask && blk <= atblk) {
8750964d 616 scan->bm_bitmap &= ~mask;
9775c955 617 return(blk);
8750964d
MD
618 }
619 mask = mask >> count;
9775c955 620 blk -= count;
8750964d
MD
621 }
622 }
623
624 /*
9775c955
MD
625 * We couldn't allocate count in this subtree, update bighint if
626 * atblk didn't interfere with it.
8750964d 627 */
9775c955
MD
628 if (saveblk <= atblk)
629 scan->bm_bighint = count - 1;
8750964d
MD
630 return(HAMMER_ALIST_BLOCK_NONE);
631}
632
633/*
634 * hammer_alist_meta_alloc_fwd()
635 *
636 * Allocate at a meta in the radix tree.
637 *
638 * Attempt to allocate at a meta node. If we can't, we update
639 * bighint and return a failure. Updating bighint optimize future
640 * calls that hit this node. We have to check for our collapse cases
641 * and we have a few optimizations strewn in as well.
642 */
643static int32_t
c60bb2c5
MD
644hammer_alst_meta_alloc_fwd(hammer_alist_t live, hammer_almeta_t scan,
645 int32_t blk, int32_t count,
9775c955 646 int32_t radix, int skip, int32_t atblk
8750964d 647) {
c60bb2c5 648 hammer_alist_config_t bl;
8750964d
MD
649 u_int32_t mask;
650 u_int32_t pmask;
9775c955 651 int32_t saveblk;
c60bb2c5 652 int next_skip;
9775c955 653 int i;
8750964d
MD
654
655 /*
656 * ALL-ALLOCATED special case
657 */
658 if (scan->bm_bitmap == 0) {
659 scan->bm_bighint = 0;
660 return(HAMMER_ALIST_BLOCK_NONE);
661 }
662
663 radix /= HAMMER_ALIST_META_RADIX;
c60bb2c5 664 bl = live->config;
8750964d
MD
665
666 /*
667 * Radix now represents each bitmap entry for this meta node. If
668 * the number of blocks being allocated can be fully represented,
669 * we allocate directly out of this meta node.
670 *
671 * Meta node bitmaps use 2 bits per block.
672 *
673 * 00 ALL-ALLOCATED
674 * 01 PARTIALLY-FREE/PARTIALLY-ALLOCATED
675 * 10 (RESERVED)
676 * 11 ALL-FREE
677 */
678 if (count >= radix) {
679 int n = count / radix * 2; /* number of bits */
9775c955 680 int nd2 = n / 2;
8750964d
MD
681 int j;
682
683 mask = (u_int32_t)-1 >> (HAMMER_ALIST_BMAP_RADIX - n);
9775c955
MD
684 saveblk = blk;
685 for (j = 0; j < HAMMER_ALIST_META_RADIX; j += nd2) {
686 if ((scan->bm_bitmap & mask) == mask && blk >= atblk) {
8750964d 687 scan->bm_bitmap &= ~mask;
9775c955 688 return(blk);
8750964d
MD
689 }
690 mask <<= n;
9775c955 691 blk += radix * nd2;
8750964d 692 }
9775c955 693 if (scan->bm_bighint >= count && saveblk >= atblk)
8750964d
MD
694 scan->bm_bighint = count >> 1;
695 return(HAMMER_ALIST_BLOCK_NONE);
696 }
697
c60bb2c5
MD
698 /*
699 * If the count is too big we couldn't allocate anything from a
700 * recursion even if the sub-tree were entirely free.
701 */
9775c955 702 saveblk = blk;
c60bb2c5
MD
703 if (count > radix)
704 goto failed;
705
8750964d
MD
706 /*
707 * If not we have to recurse.
708 */
709 mask = 0x00000003;
710 pmask = 0x00000001;
8750964d 711
c60bb2c5
MD
712 if (skip == 1) {
713 /*
714 * If skip is 1 we are a meta leaf node, which means there
715 * is another allocation layer we have to dive down into.
716 */
717 for (i = 0; i < HAMMER_ALIST_META_RADIX; ++i) {
8750964d 718 /*
c60bb2c5 719 * If the next layer is completely free then call
9775c955
MD
720 * its init function to initialize it, then free
721 * all of its blocks before trying to allocate from
722 * it.
c60bb2c5
MD
723 *
724 * bl_radix_init returns an error code or 0 on
725 * success.
8750964d 726 */
9775c955
MD
727 if ((scan->bm_bitmap & mask) == mask &&
728 blk + radix > atblk) {
729 if (bl->bl_radix_init(live->info, blk, radix) == 0) {
730 int32_t empty;
8750964d
MD
731 scan->bm_bitmap &= ~mask;
732 scan->bm_bitmap |= pmask;
9775c955
MD
733 bl->bl_radix_free(live->info, blk,
734 radix, 0, radix,
735 &empty);
8750964d 736 }
8750964d 737 }
c60bb2c5 738
8750964d 739 /*
c60bb2c5
MD
740 * If there may be some free blocks try to allocate
741 * out of the layer. If the layer indicates that
742 * it is completely full then clear both bits to
743 * propogate the condition.
8750964d 744 */
9775c955
MD
745 if ((scan->bm_bitmap & mask) == pmask &&
746 blk + radix > atblk) {
c60bb2c5
MD
747 int32_t r;
748 int32_t full;
749
750 r = bl->bl_radix_alloc_fwd(live->info, blk,
9775c955
MD
751 radix, count, atblk,
752 &full);
c60bb2c5 753 if (r != HAMMER_ALIST_BLOCK_NONE) {
9775c955 754 if (full) {
c60bb2c5 755 scan->bm_bitmap &= ~mask;
9775c955
MD
756 bl->bl_radix_destroy(live->info, blk, radix);
757 }
c60bb2c5
MD
758 return(r);
759 }
760 }
761 blk += radix;
762 mask <<= 2;
763 pmask <<= 2;
764 }
765 } else {
766 /*
767 * Otherwise there are sub-records in the current a-list
768 * layer. We can also peek into the sub-layers to get
769 * more accurate size hints.
770 */
771 next_skip = (skip - 1) / HAMMER_ALIST_META_RADIX;
772 for (i = 1; i <= skip; i += next_skip) {
773 if (scan[i].bm_bighint == (int32_t)-1) {
774 /*
775 * Terminator
776 */
777 break;
778 }
9775c955
MD
779
780 /*
781 * Initialize bitmap if allocating from the all-free
782 * case.
783 */
c60bb2c5
MD
784 if ((scan->bm_bitmap & mask) == mask) {
785 scan[i].bm_bitmap = (u_int32_t)-1;
786 scan[i].bm_bighint = radix;
787 }
788
9775c955
MD
789 if (count <= scan[i].bm_bighint &&
790 blk + radix > atblk) {
c60bb2c5
MD
791 /*
792 * count fits in object, recurse into the
793 * next layer. If the next_skip is 1 it
794 * will be either a normal leaf or a meta
795 * leaf.
796 */
797 int32_t r;
798
799 if (next_skip == 1 && bl->bl_terminal) {
800 r = hammer_alst_leaf_alloc_fwd(
9775c955 801 &scan[i], blk, count, atblk);
c60bb2c5
MD
802 } else {
803 r = hammer_alst_meta_alloc_fwd(
804 live, &scan[i],
805 blk, count,
9775c955 806 radix, next_skip, atblk);
c60bb2c5
MD
807 }
808 if (r != HAMMER_ALIST_BLOCK_NONE) {
809 if (scan[i].bm_bitmap == 0) {
810 scan->bm_bitmap &= ~mask;
811 } else {
812 scan->bm_bitmap &= ~mask;
813 scan->bm_bitmap |= pmask;
814 }
815 return(r);
816 }
817 }
818 blk += radix;
819 mask <<= 2;
820 pmask <<= 2;
8750964d 821 }
8750964d
MD
822 }
823
c60bb2c5 824failed:
8750964d
MD
825 /*
826 * We couldn't allocate count in this subtree, update bighint.
827 */
9775c955 828 if (scan->bm_bighint >= count && saveblk >= atblk)
8750964d
MD
829 scan->bm_bighint = count >> 1;
830 return(HAMMER_ALIST_BLOCK_NONE);
831}
832
833/*
834 * This version allocates blocks in the reverse direction.
835 */
836static int32_t
c60bb2c5
MD
837hammer_alst_meta_alloc_rev(hammer_alist_t live, hammer_almeta_t scan,
838 int32_t blk, int32_t count,
9775c955 839 int32_t radix, int skip, int32_t atblk
8750964d 840) {
c60bb2c5 841 hammer_alist_config_t bl;
8750964d
MD
842 int i;
843 int j;
844 u_int32_t mask;
845 u_int32_t pmask;
9775c955 846 int32_t saveblk;
c60bb2c5 847 int next_skip;
8750964d
MD
848
849 /*
850 * ALL-ALLOCATED special case
851 */
852 if (scan->bm_bitmap == 0) {
853 scan->bm_bighint = 0;
854 return(HAMMER_ALIST_BLOCK_NONE);
855 }
856
857 radix /= HAMMER_ALIST_META_RADIX;
c60bb2c5 858 bl = live->config;
8750964d
MD
859
860 /*
861 * Radix now represents each bitmap entry for this meta node. If
862 * the number of blocks being allocated can be fully represented,
863 * we allocate directly out of this meta node.
864 *
865 * Meta node bitmaps use 2 bits per block.
866 *
867 * 00 ALL-ALLOCATED
868 * 01 PARTIALLY-FREE/PARTIALLY-ALLOCATED
869 * 10 (RESERVED)
870 * 11 ALL-FREE
871 */
872 if (count >= radix) {
873 int n = count / radix * 2; /* number of bits */
9775c955 874 int nd2 = n / 2; /* number of radi */
8750964d
MD
875 int j;
876
877 /*
878 * Initial mask if e.g. n == 2: 1100....0000
879 */
880 mask = (u_int32_t)-1 >> (HAMMER_ALIST_BMAP_RADIX - n) <<
881 (HAMMER_ALIST_BMAP_RADIX - n);
9775c955
MD
882 blk += (HAMMER_ALIST_META_RADIX - nd2) * radix;
883 saveblk = blk;
884 for (j = HAMMER_ALIST_META_RADIX - nd2; j >= 0; j -= nd2) {
885 if ((scan->bm_bitmap & mask) == mask && blk <= atblk) {
8750964d 886 scan->bm_bitmap &= ~mask;
9775c955 887 return(blk);
8750964d
MD
888 }
889 mask >>= n;
9775c955 890 blk -= nd2 * radix;
8750964d 891 }
9775c955 892 if (scan->bm_bighint >= count && saveblk <= atblk)
8750964d
MD
893 scan->bm_bighint = count >> 1;
894 return(HAMMER_ALIST_BLOCK_NONE);
895 }
896
897 /*
c60bb2c5
MD
898 * If the count is too big we couldn't allocate anything from a
899 * recursion even if the sub-tree were entirely free.
8750964d 900 */
9775c955
MD
901 if (count > radix) {
902 saveblk = atblk; /* make it work for the conditional */
903 goto failed; /* at the failed label */
904 }
8750964d 905
c60bb2c5 906 if (skip == 1) {
8750964d 907 /*
c60bb2c5
MD
908 * We need the recurse but we are at a meta node leaf, which
909 * means there is another layer under us.
8750964d 910 */
c60bb2c5
MD
911 mask = 0xC0000000;
912 pmask = 0x40000000;
913 blk += radix * HAMMER_ALIST_META_RADIX - radix;
9775c955 914 saveblk = blk;
8750964d 915
c60bb2c5 916 for (i = 0; i < HAMMER_ALIST_META_RADIX; ++i) {
8750964d 917 /*
c60bb2c5
MD
918 * If the next layer is completely free then call
919 * its init function to initialize it and mark it
920 * partially free.
8750964d 921 */
9775c955
MD
922 if ((scan->bm_bitmap & mask) == mask && blk <= atblk) {
923 if (bl->bl_radix_init(live->info, blk, radix) == 0) {
924 int32_t empty;
8750964d
MD
925 scan->bm_bitmap &= ~mask;
926 scan->bm_bitmap |= pmask;
9775c955
MD
927 bl->bl_radix_free(live->info, blk,
928 radix, 0, radix,
929 &empty);
8750964d 930 }
8750964d 931 }
c60bb2c5 932
8750964d 933 /*
c60bb2c5
MD
934 * If there may be some free blocks try to allocate
935 * out of the layer. If the layer indicates that
936 * it is completely full then clear both bits to
937 * propogate the condition.
8750964d 938 */
9775c955 939 if ((scan->bm_bitmap & mask) == pmask && blk <= atblk) {
c60bb2c5
MD
940 int32_t r;
941 int32_t full;
942
943 r = bl->bl_radix_alloc_rev(live->info, blk,
9775c955
MD
944 radix, count,
945 atblk, &full);
c60bb2c5 946 if (r != HAMMER_ALIST_BLOCK_NONE) {
9775c955 947 if (full) {
c60bb2c5 948 scan->bm_bitmap &= ~mask;
9775c955
MD
949 bl->bl_radix_destroy(live->info, blk, radix);
950 }
c60bb2c5
MD
951 return(r);
952 }
953 }
954 mask >>= 2;
955 pmask >>= 2;
956 blk -= radix;
957 }
958 } else {
959 /*
960 * Since we are going in the reverse direction we need an
961 * extra loop to determine if there is a terminator, then
962 * run backwards.
963 *
964 * This is a little weird but we do not want to overflow the
965 * mask/pmask in the loop.
966 */
967 next_skip = (skip - 1) / HAMMER_ALIST_META_RADIX;
968 j = 0;
9775c955
MD
969 for (i = 1; i < skip; i += next_skip) {
970 if (scan[i].bm_bighint == (int32_t)-1) {
971 KKASSERT(j != 0);
c60bb2c5 972 break;
9775c955 973 }
c60bb2c5
MD
974 blk += radix;
975 j += 2;
8750964d
MD
976 }
977 blk -= radix;
c60bb2c5
MD
978 j -= 2;
979 mask = 0x00000003 << j;
980 pmask = 0x00000001 << j;
8750964d 981 i -= next_skip;
9775c955 982 saveblk = blk;
c60bb2c5
MD
983
984 while (i >= 1) {
985 /*
986 * Initialize the bitmap in the child if allocating
987 * from the all-free case.
988 */
989 if ((scan->bm_bitmap & mask) == mask) {
990 scan[i].bm_bitmap = (u_int32_t)-1;
991 scan[i].bm_bighint = radix;
992 }
993
994 /*
995 * Handle various count cases. Bighint may be too
996 * large but is never too small.
997 */
9775c955 998 if (count <= scan[i].bm_bighint && blk <= atblk) {
c60bb2c5
MD
999 /*
1000 * count fits in object
1001 */
1002 int32_t r;
1003 if (next_skip == 1 && bl->bl_terminal) {
1004 r = hammer_alst_leaf_alloc_rev(
9775c955 1005 &scan[i], blk, count, atblk);
c60bb2c5
MD
1006 } else {
1007 r = hammer_alst_meta_alloc_rev(
1008 live, &scan[i],
1009 blk, count,
9775c955 1010 radix, next_skip, atblk);
c60bb2c5
MD
1011 }
1012 if (r != HAMMER_ALIST_BLOCK_NONE) {
1013 if (scan[i].bm_bitmap == 0) {
1014 scan->bm_bitmap &= ~mask;
1015 } else {
1016 scan->bm_bitmap &= ~mask;
1017 scan->bm_bitmap |= pmask;
1018 }
1019 return(r);
1020 }
c60bb2c5
MD
1021 }
1022 blk -= radix;
1023 mask >>= 2;
1024 pmask >>= 2;
1025 i -= next_skip;
1026 }
8750964d
MD
1027 }
1028
c60bb2c5 1029failed:
8750964d
MD
1030 /*
1031 * We couldn't allocate count in this subtree, update bighint.
c60bb2c5
MD
1032 * Since we are restricted to powers of 2, the next highest count
1033 * we might be able to allocate is (count >> 1).
8750964d 1034 */
9775c955 1035 if (scan->bm_bighint >= count && saveblk <= atblk)
8750964d
MD
1036 scan->bm_bighint = count >> 1;
1037 return(HAMMER_ALIST_BLOCK_NONE);
1038}
1039
1040/*
1041 * BLST_LEAF_FREE()
1042 *
1043 * Free allocated blocks from leaf bitmap. The allocation code is
1044 * restricted to powers of 2, the freeing code is not.
1045 */
1046static void
c60bb2c5 1047hammer_alst_leaf_free(hammer_almeta_t scan, int32_t blk, int count) {
8750964d
MD
1048 /*
1049 * free some data in this bitmap
1050 *
1051 * e.g.
1052 * 0000111111111110000
1053 * \_________/\__/
1054 * v n
1055 */
1056 int n = blk & (HAMMER_ALIST_BMAP_RADIX - 1);
1057 u_int32_t mask;
1058
1059 mask = ((u_int32_t)-1 << n) &
1060 ((u_int32_t)-1 >> (HAMMER_ALIST_BMAP_RADIX - count - n));
1061
1062 if (scan->bm_bitmap & mask)
1063 panic("hammer_alst_radix_free: freeing free block");
1064 scan->bm_bitmap |= mask;
1065
1066 /*
1067 * We could probably do a better job here. We are required to make
1068 * bighint at least as large as the biggest contiguous block of
1069 * data. If we just shoehorn it, a little extra overhead will
1070 * be incured on the next allocation (but only that one typically).
1071 */
1072 scan->bm_bighint = HAMMER_ALIST_BMAP_RADIX;
1073}
1074
1075/*
1076 * BLST_META_FREE()
1077 *
1078 * Free allocated blocks from radix tree meta info.
1079 *
1080 * This support routine frees a range of blocks from the bitmap.
1081 * The range must be entirely enclosed by this radix node. If a
1082 * meta node, we break the range down recursively to free blocks
1083 * in subnodes (which means that this code can free an arbitrary
1084 * range whereas the allocation code cannot allocate an arbitrary
1085 * range).
1086 */
1087
1088static void
c60bb2c5
MD
1089hammer_alst_meta_free(hammer_alist_t live, hammer_almeta_t scan,
1090 int32_t freeBlk, int32_t count,
1091 int32_t radix, int skip, int32_t blk
8750964d 1092) {
c60bb2c5
MD
1093 hammer_alist_config_t bl;
1094 int next_skip;
8750964d
MD
1095 u_int32_t mask;
1096 u_int32_t pmask;
1097 int i;
1098
1099 /*
1100 * Break the free down into its components. Because it is so easy
1101 * to implement, frees are not limited to power-of-2 sizes.
1102 *
1103 * Each block in a meta-node bitmap takes two bits.
1104 */
1105 radix /= HAMMER_ALIST_META_RADIX;
c60bb2c5 1106 bl = live->config;
8750964d
MD
1107
1108 i = (freeBlk - blk) / radix;
1109 blk += i * radix;
1110 mask = 0x00000003 << (i * 2);
1111 pmask = 0x00000001 << (i * 2);
1112
c60bb2c5
MD
1113 if (skip == 1) {
1114 /*
1115 * Our meta node is a leaf node, which means it must recurse
1116 * into another allocator.
1117 */
1118 while (i < HAMMER_ALIST_META_RADIX && blk < freeBlk + count) {
1119 int32_t v;
8750964d 1120
c60bb2c5
MD
1121 v = blk + radix - freeBlk;
1122 if (v > count)
1123 v = count;
8750964d 1124
c60bb2c5
MD
1125 if (scan->bm_bighint == (int32_t)-1)
1126 panic("hammer_alst_meta_free: freeing unexpected range");
8750964d 1127
c60bb2c5
MD
1128 if (freeBlk == blk && count >= radix) {
1129 /*
9775c955
MD
1130 * When the sub-tree becomes entirely free
1131 * we have to destroy it if it was previously
1132 * partially allocated. If it was previously
1133 * fully allocated it has already been
1134 * destroyed (or never inited in the first
1135 * place).
c60bb2c5 1136 */
9775c955
MD
1137 int32_t empty;
1138
1139 if ((scan->bm_bitmap & mask) != 0) {
1140 bl->bl_radix_free(live->info, blk, radix,
1141 freeBlk - blk, v, &empty);
1142 bl->bl_radix_destroy(live->info, blk, radix);
1143 }
c60bb2c5
MD
1144 scan->bm_bitmap |= mask;
1145 scan->bm_bighint = radix * HAMMER_ALIST_META_RADIX;
1146 /* XXX bighint not being set properly */
1147 } else {
1148 /*
9775c955
MD
1149 * Recursion case. If freeing from an
1150 * all-allocated state init the sub-tree
1151 * first.
c60bb2c5
MD
1152 */
1153 int32_t empty;
1154
9775c955
MD
1155 if ((scan->bm_bitmap & mask) == 0)
1156 bl->bl_radix_init(live->info, blk, radix);
1157 bl->bl_radix_free(live->info, blk, radix,
1158 freeBlk - blk, v, &empty);
c60bb2c5
MD
1159 if (empty) {
1160 scan->bm_bitmap |= mask;
1161 scan->bm_bighint = radix * HAMMER_ALIST_META_RADIX;
9775c955 1162 bl->bl_radix_destroy(live->info, blk, radix);
c60bb2c5
MD
1163 /* XXX bighint not being set properly */
1164 } else {
1165 scan->bm_bitmap |= pmask;
1166 if (scan->bm_bighint < radix / 2)
1167 scan->bm_bighint = radix / 2;
1168 /* XXX bighint not being set properly */
1169 }
1170 }
1171 ++i;
1172 mask <<= 2;
1173 pmask <<= 2;
1174 count -= v;
1175 freeBlk += v;
1176 blk += radix;
1177 }
1178 } else {
1179 next_skip = (skip - 1) / HAMMER_ALIST_META_RADIX;
1180 i = 1 + i * next_skip;
8750964d 1181
c60bb2c5
MD
1182 while (i <= skip && blk < freeBlk + count) {
1183 int32_t v;
1184
1185 v = blk + radix - freeBlk;
1186 if (v > count)
1187 v = count;
1188
1189 if (scan->bm_bighint == (int32_t)-1)
1190 panic("hammer_alst_meta_free: freeing unexpected range");
1191
1192 if (freeBlk == blk && count >= radix) {
1193 /*
1194 * All-free case, no need to update sub-tree
1195 */
8750964d 1196 scan->bm_bitmap |= mask;
c60bb2c5
MD
1197 scan->bm_bighint = radix * HAMMER_ALIST_META_RADIX;
1198 /* XXX bighint not being set properly */
1199 } else {
1200 /*
1201 * Recursion case
1202 */
1203 if (next_skip == 1 && bl->bl_terminal) {
1204 hammer_alst_leaf_free(&scan[i], freeBlk, v);
1205 } else {
1206 hammer_alst_meta_free(live, &scan[i],
1207 freeBlk, v,
1208 radix, next_skip,
1209 blk);
1210 }
1211 if (scan[i].bm_bitmap == (u_int32_t)-1)
1212 scan->bm_bitmap |= mask;
1213 else
1214 scan->bm_bitmap |= pmask;
1215 if (scan->bm_bighint < scan[i].bm_bighint)
1216 scan->bm_bighint = scan[i].bm_bighint;
1217 }
1218 mask <<= 2;
1219 pmask <<= 2;
1220 count -= v;
1221 freeBlk += v;
1222 blk += radix;
1223 i += next_skip;
8750964d 1224 }
8750964d
MD
1225 }
1226}
1227
1228/*
c60bb2c5 1229 * hammer_alst_radix_init()
8750964d
MD
1230 *
1231 * Initialize our meta structures and bitmaps and calculate the exact
c60bb2c5
MD
1232 * number of meta-nodes required to manage 'count' blocks.
1233 *
1234 * The required space may be truncated due to early termination records.
8750964d 1235 */
8750964d 1236static int32_t
c60bb2c5 1237hammer_alst_radix_init(hammer_almeta_t scan, int32_t radix,
8750964d
MD
1238 int skip, int32_t count)
1239{
1240 int i;
1241 int next_skip;
c60bb2c5 1242 int32_t memindex = 1;
8750964d
MD
1243 u_int32_t mask;
1244 u_int32_t pmask;
1245
1246 /*
c60bb2c5 1247 * Basic initialization of the almeta for meta or leaf nodes
8750964d 1248 */
c60bb2c5
MD
1249 if (scan) {
1250 scan->bm_bighint = 0;
1251 scan->bm_bitmap = 0;
8750964d
MD
1252 }
1253
c60bb2c5
MD
1254 /*
1255 * We are at a leaf, we only eat one meta element.
1256 */
1257 if (skip == 1)
1258 return(memindex);
1259
8750964d
MD
1260 /*
1261 * Meta node. If allocating the entire object we can special
1262 * case it. However, we need to figure out how much memory
1263 * is required to manage 'count' blocks, so we continue on anyway.
1264 */
8750964d 1265 radix /= HAMMER_ALIST_META_RADIX;
c60bb2c5 1266 next_skip = (skip - 1) / HAMMER_ALIST_META_RADIX;
8750964d
MD
1267 mask = 0x00000003;
1268 pmask = 0x00000001;
1269
1270 for (i = 1; i <= skip; i += next_skip) {
c60bb2c5
MD
1271 /*
1272 * We eat up to this record
1273 */
1274 memindex = i;
1275
8750964d
MD
1276 if (count >= radix) {
1277 /*
1278 * Allocate the entire object
1279 */
c60bb2c5 1280 memindex += hammer_alst_radix_init(
8750964d
MD
1281 ((scan) ? &scan[i] : NULL),
1282 radix,
c60bb2c5 1283 next_skip,
8750964d
MD
1284 radix
1285 );
1286 count -= radix;
1287 /* already marked as wholely allocated */
1288 } else if (count > 0) {
1289 /*
1290 * Allocate a partial object
1291 */
c60bb2c5 1292 memindex += hammer_alst_radix_init(
8750964d
MD
1293 ((scan) ? &scan[i] : NULL),
1294 radix,
c60bb2c5 1295 next_skip,
8750964d
MD
1296 count
1297 );
1298 count = 0;
1299
1300 /*
1301 * Mark as partially allocated
1302 */
1303 if (scan)
1304 scan->bm_bitmap |= pmask;
1305 } else {
1306 /*
c60bb2c5
MD
1307 * Add terminator and break out. The terminal
1308 * eats the meta node at scan[i].
8750964d 1309 */
c60bb2c5 1310 ++memindex;
8750964d
MD
1311 if (scan)
1312 scan[i].bm_bighint = (int32_t)-1;
1313 /* already marked as wholely allocated */
1314 break;
1315 }
1316 mask <<= 2;
1317 pmask <<= 2;
1318 }
8750964d
MD
1319 return(memindex);
1320}
1321
1322#ifdef ALIST_DEBUG
1323
1324static void
c60bb2c5
MD
1325hammer_alst_radix_print(hammer_alist_t live, hammer_almeta_t scan,
1326 int32_t blk, int32_t radix, int skip, int tab)
8750964d
MD
1327{
1328 int i;
1329 int next_skip;
1330 int lastState = 0;
1331 u_int32_t mask;
1332
c60bb2c5 1333 if (skip == 1 && live->config->bl_terminal) {
8750964d
MD
1334 kprintf(
1335 "%*.*s(%04x,%d): bitmap %08x big=%d\n",
1336 tab, tab, "",
1337 blk, radix,
1338 scan->bm_bitmap,
1339 scan->bm_bighint
1340 );
1341 return;
1342 }
1343
1344 if (scan->bm_bitmap == 0) {
1345 kprintf(
1346 "%*.*s(%04x,%d) ALL ALLOCATED\n",
1347 tab, tab, "",
1348 blk,
1349 radix
1350 );
1351 return;
1352 }
1353 if (scan->bm_bitmap == (u_int32_t)-1) {
1354 kprintf(
1355 "%*.*s(%04x,%d) ALL FREE\n",
1356 tab, tab, "",
1357 blk,
1358 radix
1359 );
1360 return;
1361 }
1362
1363 kprintf(
c60bb2c5 1364 "%*.*s(%04x,%d): %s (%d) bitmap=%08x big=%d {\n",
8750964d
MD
1365 tab, tab, "",
1366 blk, radix,
c60bb2c5 1367 (skip == 1 ? "LAYER" : "subtree"),
8750964d
MD
1368 radix,
1369 scan->bm_bitmap,
1370 scan->bm_bighint
1371 );
1372
1373 radix /= HAMMER_ALIST_META_RADIX;
8750964d
MD
1374 tab += 4;
1375 mask = 0x00000003;
1376
c60bb2c5
MD
1377 if (skip == 1) {
1378 for (i = 0; i < HAMMER_ALIST_META_RADIX; ++i) {
1379 if ((scan->bm_bitmap & mask) == mask) {
1380 kprintf(
1381 "%*.*s(%04x,%d): ALL FREE\n",
1382 tab, tab, "",
1383 blk, radix
1384 );
1385 } else if ((scan->bm_bitmap & mask) == 0) {
1386 kprintf(
1387 "%*.*s(%04x,%d): ALL ALLOCATED\n",
1388 tab, tab, "",
1389 blk, radix
1390 );
1391 } else {
1392 live->config->bl_radix_print(
1393 live->info, blk, radix, tab);
1394 }
1395 blk += radix;
1396 mask <<= 2;
8750964d 1397 }
c60bb2c5
MD
1398 } else {
1399 next_skip = ((u_int)skip / HAMMER_ALIST_META_RADIX);
1400
1401 for (i = 1; i <= skip; i += next_skip) {
1402 if (scan[i].bm_bighint == (int32_t)-1) {
1403 kprintf(
1404 "%*.*s(%04x,%d): Terminator\n",
1405 tab, tab, "",
1406 blk, radix
1407 );
1408 lastState = 0;
1409 break;
1410 }
1411 if ((scan->bm_bitmap & mask) == mask) {
1412 kprintf(
1413 "%*.*s(%04x,%d): ALL FREE\n",
1414 tab, tab, "",
1415 blk, radix
1416 );
1417 } else if ((scan->bm_bitmap & mask) == 0) {
1418 kprintf(
1419 "%*.*s(%04x,%d): ALL ALLOCATED\n",
1420 tab, tab, "",
1421 blk, radix
1422 );
1423 } else {
1424 hammer_alst_radix_print(
1425 live,
1426 &scan[i],
1427 blk,
1428 radix,
1429 next_skip - 1,
1430 tab
1431 );
1432 }
1433 blk += radix;
1434 mask <<= 2;
8750964d 1435 }
8750964d
MD
1436 }
1437 tab -= 4;
1438
1439 kprintf(
1440 "%*.*s}\n",
1441 tab, tab, ""
1442 );
1443}
1444
1445#endif
1446
1447#ifdef ALIST_DEBUG
1448
c60bb2c5
MD
1449static struct hammer_alist_live **layers; /* initialized by main */
1450static int32_t layer_radix = -1;
1451
1452static
1453int
9775c955 1454debug_radix_init(void *info, int32_t blk, int32_t radix)
c60bb2c5
MD
1455{
1456 hammer_alist_t layer;
1457 int layer_no = blk / layer_radix;
1458
9775c955 1459 printf("lower layer: init (%04x,%d)\n", blk, radix);
c60bb2c5 1460 KKASSERT(layers[layer_no] == NULL);
9775c955
MD
1461 layer = layers[layer_no] = hammer_alist_create(radix, 1, NULL);
1462 return(0);
1463}
1464
1465static
1466int
1467debug_radix_destroy(void *info, int32_t blk, int32_t radix)
1468{
1469 hammer_alist_t layer;
1470 int layer_no = blk / layer_radix;
1471
1472 printf("lower layer: destroy (%04x,%d)\n", blk, radix);
1473 layer = layers[layer_no];
1474 KKASSERT(layer != NULL);
1475 hammer_alist_destroy(layer, NULL);
1476 layers[layer_no] = NULL;
c60bb2c5
MD
1477 return(0);
1478}
1479
9775c955 1480
c60bb2c5
MD
1481static
1482int32_t
1483debug_radix_alloc_fwd(void *info, int32_t blk, int32_t radix,
9775c955 1484 int32_t count, int32_t atblk, int32_t *fullp)
c60bb2c5
MD
1485{
1486 hammer_alist_t layer = layers[blk / layer_radix];
9775c955 1487 int32_t r;
c60bb2c5 1488
9775c955
MD
1489 r = hammer_alist_alloc_fwd(layer, count, atblk - blk);
1490 *fullp = (layer->meta->bm_alist_freeblks == 0);
1491 if (r != HAMMER_ALIST_BLOCK_NONE)
1492 r += blk;
1493 return(r);
c60bb2c5
MD
1494}
1495
1496static
1497int32_t
1498debug_radix_alloc_rev(void *info, int32_t blk, int32_t radix,
9775c955 1499 int32_t count, int32_t atblk, int32_t *fullp)
c60bb2c5
MD
1500{
1501 hammer_alist_t layer = layers[blk / layer_radix];
9775c955 1502 int32_t r;
c60bb2c5 1503
9775c955 1504 r = hammer_alist_alloc_rev(layer, count, atblk - blk);
c60bb2c5 1505 *fullp = (layer->meta->bm_alist_freeblks == 0);
9775c955
MD
1506 if (r != HAMMER_ALIST_BLOCK_NONE)
1507 r += blk;
1508 return(r);
c60bb2c5
MD
1509}
1510
1511static
1512void
9775c955
MD
1513debug_radix_free(void *info, int32_t blk, int32_t radix,
1514 int32_t base_blk, int32_t count, int32_t *emptyp)
c60bb2c5
MD
1515{
1516 int layer_no = blk / layer_radix;
1517 hammer_alist_t layer = layers[layer_no];
1518
1519 if (layer == NULL) {
c60bb2c5
MD
1520 layer = hammer_alist_create(layer_radix, 1, NULL);
1521 layers[layer_no] = layer;
1522 }
9775c955 1523 hammer_alist_free(layer, base_blk, count);
c60bb2c5
MD
1524 *emptyp = (layer->meta->bm_alist_freeblks == layer_radix);
1525}
1526
1527static
1528void
1529debug_radix_print(void *info, int32_t blk, int32_t radix, int tab)
1530{
1531 hammer_alist_t layer = layers[blk / layer_radix];
1532
1533 hammer_alist_print(layer, tab);
1534}
1535
8750964d
MD
1536int
1537main(int ac, char **av)
1538{
c60bb2c5 1539 int32_t size = -1;
8750964d 1540 int i;
c60bb2c5
MD
1541 hammer_alist_t live;
1542 hammer_almeta_t meta = NULL;
8750964d
MD
1543
1544 for (i = 1; i < ac; ++i) {
1545 const char *ptr = av[i];
1546 if (*ptr != '-') {
c60bb2c5
MD
1547 if (size == -1)
1548 size = strtol(ptr, NULL, 0);
1549 else if (layer_radix == -1)
1550 layer_radix = strtol(ptr, NULL, 0);
1551 else
1552 ;
8750964d
MD
1553 continue;
1554 }
1555 ptr += 2;
1556 fprintf(stderr, "Bad option: %s\n", ptr - 2);
1557 exit(1);
1558 }
c60bb2c5
MD
1559 if (size == -1)
1560 size = 1024;
1561 if (layer_radix == -1)
1562 layer_radix = 1; /* no second storage layer */
1563 if ((size ^ (size - 1)) != (size << 1) - 1) {
1564 fprintf(stderr, "size must be a power of 2\n");
1565 exit(1);
1566 }
1567 if ((layer_radix ^ (layer_radix - 1)) != (layer_radix << 1) - 1) {
1568 fprintf(stderr, "the second layer radix must be a power of 2\n");
1569 exit(1);
1570 }
1571
1572 live = hammer_alist_create(size, layer_radix, NULL);
1573 layers = calloc(size, sizeof(hammer_alist_t));
1574
1575 printf("A-LIST TEST %d blocks, first-layer radix %d, "
1576 "second-layer radix %d\n",
1577 size, live->config->bl_radix / layer_radix, layer_radix);
1578
1579 live->config->bl_radix_init = debug_radix_init;
9775c955 1580 live->config->bl_radix_destroy = debug_radix_destroy;
c60bb2c5
MD
1581 live->config->bl_radix_alloc_fwd = debug_radix_alloc_fwd;
1582 live->config->bl_radix_alloc_rev = debug_radix_alloc_rev;
1583 live->config->bl_radix_free = debug_radix_free;
1584 live->config->bl_radix_print = debug_radix_print;
1585
1586 hammer_alist_free(live, 0, size);
8750964d
MD
1587
1588 for (;;) {
1589 char buf[1024];
1590 int32_t da = 0;
1591 int32_t count = 0;
9775c955 1592 int32_t atblk;
8750964d
MD
1593 int32_t blk;
1594
c60bb2c5
MD
1595 kprintf("%d/%d> ",
1596 live->meta->bm_alist_freeblks, size);
8750964d
MD
1597 fflush(stdout);
1598 if (fgets(buf, sizeof(buf), stdin) == NULL)
1599 break;
1600 switch(buf[0]) {
1601 case 'p':
c60bb2c5 1602 hammer_alist_print(live, 0);
8750964d
MD
1603 break;
1604 case 'a':
9775c955
MD
1605 if (sscanf(buf + 1, "%d %d", &count, &atblk) >= 1) {
1606 blk = hammer_alist_alloc_fwd(live, count, atblk);
8750964d
MD
1607 kprintf(" R=%04x\n", blk);
1608 } else {
1609 kprintf("?\n");
1610 }
1611 break;
1612 case 'r':
9775c955
MD
1613 atblk = HAMMER_ALIST_BLOCK_MAX;
1614 if (sscanf(buf + 1, "%d %d", &count, &atblk) >= 1) {
1615 blk = hammer_alist_alloc_rev(live, count, atblk);
8750964d
MD
1616 kprintf(" R=%04x\n", blk);
1617 } else {
1618 kprintf("?\n");
1619 }
1620 break;
1621 case 'f':
1622 if (sscanf(buf + 1, "%x %d", &da, &count) == 2) {
c60bb2c5 1623 hammer_alist_free(live, da, count);
8750964d
MD
1624 } else {
1625 kprintf("?\n");
1626 }
1627 break;
1628 case '?':
1629 case 'h':
1630 puts(
1631 "p -print\n"
1632 "a %d -allocate\n"
1633 "r %d -allocate reverse\n"
1634 "f %x %d -free\n"
1635 "h/? -help"
1636 );
1637 break;
1638 default:
1639 kprintf("?\n");
1640 break;
1641 }
1642 }
1643 return(0);
1644}
1645
1646void
1647panic(const char *ctl, ...)
1648{
1649 __va_list va;
1650
1651 __va_start(va, ctl);
1652 vfprintf(stderr, ctl, va);
1653 fprintf(stderr, "\n");
1654 __va_end(va);
1655 exit(1);
1656}
1657
1658#endif
1659