2 * Copyright (c) 2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_blockmap.c,v 1.14 2008/06/01 21:05:39 dillon Exp $
42 static hammer_off_t hammer_find_hole(hammer_mount_t hmp,
43 hammer_holes_t holes, int bytes);
44 static void hammer_add_hole(hammer_mount_t hmp, hammer_holes_t holes,
45 hammer_off_t offset, int bytes);
46 static void hammer_clean_holes(hammer_mount_t hmp, hammer_holes_t holes,
50 * Allocate bytes from a zone
53 hammer_blockmap_alloc(hammer_transaction_t trans, int zone,
54 int bytes, int *errorp)
56 hammer_volume_t root_volume;
57 hammer_blockmap_t rootmap;
58 struct hammer_blockmap_layer1 *layer1;
59 struct hammer_blockmap_layer2 *layer2;
60 hammer_buffer_t buffer1 = NULL;
61 hammer_buffer_t buffer2 = NULL;
62 hammer_buffer_t buffer3 = NULL;
63 hammer_off_t tmp_offset;
64 hammer_off_t next_offset;
65 hammer_off_t layer1_offset;
66 hammer_off_t layer2_offset;
67 hammer_off_t bigblock_offset;
72 KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES);
73 root_volume = hammer_get_root_volume(trans->hmp, errorp);
76 rootmap = &trans->hmp->blockmap[zone];
77 KKASSERT(rootmap->phys_offset != 0);
78 KKASSERT(HAMMER_ZONE_DECODE(rootmap->phys_offset) ==
79 HAMMER_ZONE_RAW_BUFFER_INDEX);
80 KKASSERT(HAMMER_ZONE_DECODE(rootmap->alloc_offset) == zone);
81 KKASSERT(HAMMER_ZONE_DECODE(rootmap->next_offset) == zone);
84 * Deal with alignment and buffer-boundary issues.
86 * Be careful, certain primary alignments are used below to allocate
87 * new blockmap blocks.
89 bytes = (bytes + 7) & ~7;
90 KKASSERT(bytes > 0 && bytes <= HAMMER_BUFSIZE);
92 lockmgr(&trans->hmp->blockmap_lock, LK_EXCLUSIVE|LK_RETRY);
95 * Try to use a known-free hole, otherwise append.
97 next_offset = hammer_find_hole(trans->hmp, &trans->hmp->holes[zone],
99 if (next_offset == 0) {
100 next_offset = rootmap->next_offset;
108 * The allocation request may not cross a buffer boundary.
110 tmp_offset = next_offset + bytes - 1;
111 if ((next_offset ^ tmp_offset) & ~HAMMER_BUFMASK64) {
112 skip_amount = HAMMER_BUFSIZE -
113 ((int)next_offset & HAMMER_BUFMASK);
114 hammer_add_hole(trans->hmp, &trans->hmp->holes[zone],
115 next_offset, skip_amount);
116 next_offset = tmp_offset & ~HAMMER_BUFMASK64;
120 * Dive layer 1. If we are starting a new layer 1 entry,
121 * allocate a layer 2 block for it.
123 layer1_offset = rootmap->phys_offset +
124 HAMMER_BLOCKMAP_LAYER1_OFFSET(next_offset);
125 layer1 = hammer_bread(trans->hmp, layer1_offset, errorp, &buffer1);
126 KKASSERT(*errorp == 0);
127 KKASSERT(next_offset <= rootmap->alloc_offset);
130 * Check CRC if not allocating into uninitialized space
132 if ((next_offset != rootmap->alloc_offset) ||
133 (next_offset & HAMMER_BLOCKMAP_LAYER2_MASK)) {
134 if (layer1->layer1_crc != crc32(layer1,
135 HAMMER_LAYER1_CRCSIZE)) {
136 Debugger("CRC FAILED: LAYER1");
141 * Allocate layer2 backing store in layer1 if necessary. next_offset
142 * can skip to a bigblock boundary but alloc_offset is at least
143 * bigblock-aligned so that's ok.
145 if ((next_offset == rootmap->alloc_offset &&
146 (next_offset & HAMMER_BLOCKMAP_LAYER2_MASK) == 0) ||
147 layer1->phys_offset == HAMMER_BLOCKMAP_FREE
149 KKASSERT((next_offset & HAMMER_BLOCKMAP_LAYER2_MASK) == 0);
150 hammer_modify_buffer(trans, buffer1, layer1, sizeof(*layer1));
151 bzero(layer1, sizeof(*layer1));
152 layer1->phys_offset =
153 hammer_freemap_alloc(trans, next_offset, errorp);
154 layer1->blocks_free = HAMMER_BLOCKMAP_RADIX2;
155 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
156 hammer_modify_buffer_done(buffer1);
157 KKASSERT(*errorp == 0);
159 KKASSERT(layer1->phys_offset);
162 * If layer1 indicates no free blocks in layer2 and our alloc_offset
163 * is not in layer2, skip layer2 entirely.
165 if (layer1->blocks_free == 0 &&
166 ((next_offset ^ rootmap->alloc_offset) & ~HAMMER_BLOCKMAP_LAYER2_MASK) != 0) {
167 hkprintf("blockmap skip1 %016llx\n", next_offset);
168 next_offset = (next_offset + HAMMER_BLOCKMAP_LAYER2_MASK) &
169 ~HAMMER_BLOCKMAP_LAYER2_MASK;
170 if (next_offset >= trans->hmp->zone_limits[zone]) {
171 hkprintf("blockmap wrap1\n");
172 next_offset = HAMMER_ZONE_ENCODE(zone, 0);
173 if (++loops == 2) { /* XXX poor-man's */
183 * Dive layer 2, each entry represents a large-block.
185 layer2_offset = layer1->phys_offset +
186 HAMMER_BLOCKMAP_LAYER2_OFFSET(next_offset);
187 layer2 = hammer_bread(trans->hmp, layer2_offset, errorp, &buffer2);
188 KKASSERT(*errorp == 0);
191 * Check CRC if not allocating into uninitialized space
193 if (next_offset != rootmap->alloc_offset ||
194 (next_offset & HAMMER_LARGEBLOCK_MASK64)) {
195 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
196 Debugger("CRC FAILED: LAYER2");
200 if ((next_offset & HAMMER_LARGEBLOCK_MASK64) == 0) {
202 * We are at the beginning of a new bigblock
204 if (next_offset == rootmap->alloc_offset ||
205 layer2->u.phys_offset == HAMMER_BLOCKMAP_FREE) {
207 * Allocate the bigblock in layer2 if diving into
208 * uninitialized space or if the block was previously
211 hammer_modify_buffer(trans, buffer1,
212 layer1, sizeof(*layer1));
213 KKASSERT(layer1->blocks_free);
214 --layer1->blocks_free;
215 layer1->layer1_crc = crc32(layer1,
216 HAMMER_LAYER1_CRCSIZE);
217 hammer_modify_buffer_done(buffer1);
218 hammer_modify_buffer(trans, buffer2,
219 layer2, sizeof(*layer2));
220 bzero(layer2, sizeof(*layer2));
221 layer2->u.phys_offset =
222 hammer_freemap_alloc(trans, next_offset,
224 layer2->bytes_free = HAMMER_LARGEBLOCK_SIZE;
225 layer2->entry_crc = crc32(layer2,
226 HAMMER_LAYER2_CRCSIZE);
227 hammer_modify_buffer_done(buffer2);
228 KKASSERT(*errorp == 0);
229 } else if (layer2->bytes_free != HAMMER_LARGEBLOCK_SIZE) {
231 * We have encountered a block that is already
232 * partially allocated. We must skip this block.
234 hkprintf("blockmap skip2 %016llx %d\n",
235 next_offset, layer2->bytes_free);
236 next_offset += HAMMER_LARGEBLOCK_SIZE;
237 if (next_offset >= trans->hmp->zone_limits[zone]) {
238 next_offset = HAMMER_ZONE_ENCODE(zone, 0);
239 hkprintf("blockmap wrap2\n");
240 if (++loops == 2) { /* XXX poor-man's */
250 * We are appending within a bigblock. It is possible that
251 * the blockmap has been marked completely free via a prior
252 * pruning operation. We no longer reset the append index
253 * for that case because it compromises the UNDO by allowing
257 KKASSERT(layer2->u.phys_offset != HAMMER_BLOCKMAP_FREE);
261 hammer_modify_buffer(trans, buffer2, layer2, sizeof(*layer2));
262 layer2->bytes_free -= bytes;
263 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
264 hammer_modify_buffer_done(buffer2);
265 KKASSERT(layer2->bytes_free >= 0);
268 * If the buffer was completely free we do not have to read it from
269 * disk, call hammer_bnew() to instantiate it.
271 if ((next_offset & HAMMER_BUFMASK) == 0) {
272 bigblock_offset = layer2->u.phys_offset +
273 (next_offset & HAMMER_LARGEBLOCK_MASK64);
274 hammer_bnew(trans->hmp, bigblock_offset, errorp, &buffer3);
278 * Adjust our iterator and alloc_offset. The layer1 and layer2
279 * space beyond alloc_offset is uninitialized. alloc_offset must
280 * be big-block aligned.
282 if (used_hole == 0) {
283 hammer_modify_volume(trans, root_volume, NULL, 0);
284 rootmap->next_offset = next_offset + bytes;
285 if (rootmap->alloc_offset < rootmap->next_offset) {
286 rootmap->alloc_offset =
287 (rootmap->next_offset + HAMMER_LARGEBLOCK_MASK) &
288 ~HAMMER_LARGEBLOCK_MASK64;
290 hammer_modify_volume_done(root_volume);
294 hammer_rel_buffer(buffer1, 0);
296 hammer_rel_buffer(buffer2, 0);
298 hammer_rel_buffer(buffer3, 0);
299 hammer_rel_volume(root_volume, 0);
300 lockmgr(&trans->hmp->blockmap_lock, LK_RELEASE);
305 * Free (offset,bytes) in a zone
308 hammer_blockmap_free(hammer_transaction_t trans,
309 hammer_off_t bmap_off, int bytes)
311 hammer_volume_t root_volume;
312 hammer_blockmap_t rootmap;
313 struct hammer_blockmap_layer1 *layer1;
314 struct hammer_blockmap_layer2 *layer2;
315 hammer_buffer_t buffer1 = NULL;
316 hammer_buffer_t buffer2 = NULL;
317 hammer_off_t layer1_offset;
318 hammer_off_t layer2_offset;
322 bytes = (bytes + 7) & ~7;
323 KKASSERT(bytes <= HAMMER_BUFSIZE);
324 zone = HAMMER_ZONE_DECODE(bmap_off);
325 KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES);
326 root_volume = hammer_get_root_volume(trans->hmp, &error);
330 lockmgr(&trans->hmp->blockmap_lock, LK_EXCLUSIVE|LK_RETRY);
332 rootmap = &trans->hmp->blockmap[zone];
333 KKASSERT(rootmap->phys_offset != 0);
334 KKASSERT(HAMMER_ZONE_DECODE(rootmap->phys_offset) ==
335 HAMMER_ZONE_RAW_BUFFER_INDEX);
336 KKASSERT(HAMMER_ZONE_DECODE(rootmap->alloc_offset) == zone);
337 KKASSERT(((bmap_off ^ (bmap_off + (bytes - 1))) &
338 ~HAMMER_LARGEBLOCK_MASK64) == 0);
340 if (bmap_off >= rootmap->alloc_offset) {
341 panic("hammer_blockmap_lookup: %016llx beyond EOF %016llx",
342 bmap_off, rootmap->alloc_offset);
349 layer1_offset = rootmap->phys_offset +
350 HAMMER_BLOCKMAP_LAYER1_OFFSET(bmap_off);
351 layer1 = hammer_bread(trans->hmp, layer1_offset, &error, &buffer1);
352 KKASSERT(error == 0);
353 KKASSERT(layer1->phys_offset);
354 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
355 Debugger("CRC FAILED: LAYER1");
359 * Dive layer 2, each entry represents a large-block.
361 layer2_offset = layer1->phys_offset +
362 HAMMER_BLOCKMAP_LAYER2_OFFSET(bmap_off);
363 layer2 = hammer_bread(trans->hmp, layer2_offset, &error, &buffer2);
364 KKASSERT(error == 0);
365 KKASSERT(layer2->u.phys_offset);
366 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
367 Debugger("CRC FAILED: LAYER2");
370 hammer_modify_buffer(trans, buffer2, layer2, sizeof(*layer2));
371 layer2->bytes_free += bytes;
372 KKASSERT(layer2->bytes_free <= HAMMER_LARGEBLOCK_SIZE);
375 * If the big-block is free, return it to the free pool. The layer2
376 * infrastructure is left intact even if the entire layer2 becomes
379 * At the moment if our iterator is in a bigblock that becomes
380 * wholely free, we have to leave the block allocated and we cannot
381 * reset the iterator because there may be UNDOs on-disk that
382 * reference areas of that block and we cannot overwrite those areas.
384 if (layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE) {
385 if ((rootmap->next_offset ^ bmap_off) &
386 ~HAMMER_LARGEBLOCK_MASK64) {
388 * Our iterator is not in the now-free big-block
389 * and we can release it.
391 hammer_clean_holes(trans->hmp,
392 &trans->hmp->holes[zone],
394 hammer_freemap_free(trans, layer2->u.phys_offset,
396 hammer_clrxlate_buffer(trans->hmp,
397 layer2->u.phys_offset);
398 layer2->u.phys_offset = HAMMER_BLOCKMAP_FREE;
400 hammer_modify_buffer(trans, buffer1,
401 layer1, sizeof(*layer1));
402 ++layer1->blocks_free;
405 * This commented out code would release the layer2
406 * bigblock. We do not want to do this, at least
409 * This also may be incomplete.
411 if (layer1->blocks_free == HAMMER_BLOCKMAP_RADIX2) {
413 trans, layer1->phys_offset,
414 bmap_off & ~HAMMER_BLOCKMAP_LAYER2_MASK,
416 layer1->phys_offset = HAMMER_BLOCKMAP_FREE;
419 layer1->layer1_crc = crc32(layer1,
420 HAMMER_LAYER1_CRCSIZE);
421 hammer_modify_buffer_done(buffer1);
425 * This commented out code would reset the iterator,
426 * which we cannot do at the moment as it could cause
427 * new allocations to overwrite deleted data still
428 * subject to undo on reboot.
430 hammer_modify_volume(trans, root_volume,
432 rootmap->next_offset &= ~HAMMER_LARGEBLOCK_MASK64;
433 hammer_modify_volume_done(root_volume);
437 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
438 hammer_modify_buffer_done(buffer2);
440 lockmgr(&trans->hmp->blockmap_lock, LK_RELEASE);
443 hammer_rel_buffer(buffer1, 0);
445 hammer_rel_buffer(buffer2, 0);
446 hammer_rel_volume(root_volume, 0);
450 * Return the number of free bytes in the big-block containing the
451 * specified blockmap offset.
454 hammer_blockmap_getfree(hammer_mount_t hmp, hammer_off_t bmap_off,
455 int *curp, int *errorp)
457 hammer_volume_t root_volume;
458 hammer_blockmap_t rootmap;
459 struct hammer_blockmap_layer1 *layer1;
460 struct hammer_blockmap_layer2 *layer2;
461 hammer_buffer_t buffer = NULL;
462 hammer_off_t layer1_offset;
463 hammer_off_t layer2_offset;
467 zone = HAMMER_ZONE_DECODE(bmap_off);
468 KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES);
469 root_volume = hammer_get_root_volume(hmp, errorp);
474 rootmap = &hmp->blockmap[zone];
475 KKASSERT(rootmap->phys_offset != 0);
476 KKASSERT(HAMMER_ZONE_DECODE(rootmap->phys_offset) ==
477 HAMMER_ZONE_RAW_BUFFER_INDEX);
478 KKASSERT(HAMMER_ZONE_DECODE(rootmap->alloc_offset) == zone);
480 if (bmap_off >= rootmap->alloc_offset) {
481 panic("hammer_blockmap_lookup: %016llx beyond EOF %016llx",
482 bmap_off, rootmap->alloc_offset);
491 layer1_offset = rootmap->phys_offset +
492 HAMMER_BLOCKMAP_LAYER1_OFFSET(bmap_off);
493 layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer);
494 KKASSERT(*errorp == 0);
495 KKASSERT(layer1->phys_offset);
496 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
497 Debugger("CRC FAILED: LAYER1");
501 * Dive layer 2, each entry represents a large-block.
503 layer2_offset = layer1->phys_offset +
504 HAMMER_BLOCKMAP_LAYER2_OFFSET(bmap_off);
505 layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer);
506 KKASSERT(*errorp == 0);
507 KKASSERT(layer2->u.phys_offset);
508 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
509 Debugger("CRC FAILED: LAYER2");
512 bytes = layer2->bytes_free;
514 if ((rootmap->next_offset ^ bmap_off) & ~HAMMER_LARGEBLOCK_MASK64)
520 hammer_rel_buffer(buffer, 0);
521 hammer_rel_volume(root_volume, 0);
522 if (hammer_debug_general & 0x0800) {
523 kprintf("hammer_blockmap_getfree: %016llx -> %d\n",
531 * Lookup a blockmap offset.
534 hammer_blockmap_lookup(hammer_mount_t hmp, hammer_off_t bmap_off, int *errorp)
536 hammer_volume_t root_volume;
537 hammer_blockmap_t rootmap;
538 struct hammer_blockmap_layer1 *layer1;
539 struct hammer_blockmap_layer2 *layer2;
540 hammer_buffer_t buffer = NULL;
541 hammer_off_t layer1_offset;
542 hammer_off_t layer2_offset;
543 hammer_off_t result_offset;
546 zone = HAMMER_ZONE_DECODE(bmap_off);
547 KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES);
548 root_volume = hammer_get_root_volume(hmp, errorp);
551 rootmap = &hmp->blockmap[zone];
552 KKASSERT(rootmap->phys_offset != 0);
553 KKASSERT(HAMMER_ZONE_DECODE(rootmap->phys_offset) ==
554 HAMMER_ZONE_RAW_BUFFER_INDEX);
555 KKASSERT(HAMMER_ZONE_DECODE(rootmap->alloc_offset) == zone);
557 if (bmap_off >= rootmap->alloc_offset) {
558 panic("hammer_blockmap_lookup: %016llx beyond EOF %016llx",
559 bmap_off, rootmap->alloc_offset);
567 layer1_offset = rootmap->phys_offset +
568 HAMMER_BLOCKMAP_LAYER1_OFFSET(bmap_off);
569 layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer);
570 KKASSERT(*errorp == 0);
571 KKASSERT(layer1->phys_offset);
572 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
573 Debugger("CRC FAILED: LAYER1");
577 * Dive layer 2, each entry represents a large-block.
579 layer2_offset = layer1->phys_offset +
580 HAMMER_BLOCKMAP_LAYER2_OFFSET(bmap_off);
581 layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer);
583 KKASSERT(*errorp == 0);
584 KKASSERT(layer2->u.phys_offset);
585 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
586 Debugger("CRC FAILED: LAYER2");
589 result_offset = layer2->u.phys_offset +
590 (bmap_off & HAMMER_LARGEBLOCK_MASK64);
593 hammer_rel_buffer(buffer, 0);
594 hammer_rel_volume(root_volume, 0);
595 if (hammer_debug_general & 0x0800) {
596 kprintf("hammer_blockmap_lookup: %016llx -> %016llx\n",
597 bmap_off, result_offset);
599 return(result_offset);
602 /************************************************************************
603 * IN-CORE TRACKING OF ALLOCATION HOLES *
604 ************************************************************************
606 * This is a temporary shim in need of a more permanent solution.
608 * As we allocate space holes are created due to having to align to a new
609 * 16K buffer when an allocation would otherwise cross the buffer boundary.
610 * These holes are recorded here and used to fullfill smaller requests as
611 * much as possible. Only a limited number of holes are recorded and these
612 * functions operate somewhat like a heuristic, where information is allowed
617 hammer_init_holes(hammer_mount_t hmp, hammer_holes_t holes)
619 TAILQ_INIT(&holes->list);
624 hammer_free_holes(hammer_mount_t hmp, hammer_holes_t holes)
628 while ((hole = TAILQ_FIRST(&holes->list)) != NULL) {
629 TAILQ_REMOVE(&holes->list, hole, entry);
630 kfree(hole, M_HAMMER);
635 * Attempt to locate a hole with sufficient free space to accomodate the
636 * requested allocation. Return the offset or 0 if no hole could be found.
639 hammer_find_hole(hammer_mount_t hmp, hammer_holes_t holes, int bytes)
642 hammer_off_t result_off = 0;
644 TAILQ_FOREACH(hole, &holes->list, entry) {
645 if (bytes <= hole->bytes) {
646 result_off = hole->offset;
647 hole->offset += bytes;
648 hole->bytes -= bytes;
656 * If a newly created hole is reasonably sized then record it. We only
657 * keep track of a limited number of holes. Lost holes are recovered by
660 * offset is a zone-N offset.
663 hammer_add_hole(hammer_mount_t hmp, hammer_holes_t holes,
664 hammer_off_t offset, int bytes)
671 if (holes->count < HAMMER_MAX_HOLES) {
672 hole = kmalloc(sizeof(*hole), M_HAMMER, M_WAITOK);
675 hole = TAILQ_FIRST(&holes->list);
676 TAILQ_REMOVE(&holes->list, hole, entry);
678 TAILQ_INSERT_TAIL(&holes->list, hole, entry);
679 hole->offset = offset;
684 * Clean out any holes cached for the bigblock we are about to release back
688 hammer_clean_holes(hammer_mount_t hmp, hammer_holes_t holes,
693 offset &= ~HAMMER_LARGEBLOCK_MASK64;
696 TAILQ_FOREACH(hole, &holes->list, entry) {
697 if ((hole->offset & ~HAMMER_LARGEBLOCK_MASK64) == offset) {
698 TAILQ_REMOVE(&holes->list, hole, entry);
699 kfree(hole, M_HAMMER);