2 * Copyright (c) 2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_blockmap.c,v 1.27 2008/07/31 22:30:33 dillon Exp $
42 static int hammer_res_rb_compare(hammer_reserve_t res1, hammer_reserve_t res2);
43 static void hammer_reserve_setdelay_offset(hammer_mount_t hmp,
44 hammer_off_t base_offset, int zone,
45 struct hammer_blockmap_layer2 *layer2);
46 static void hammer_reserve_setdelay(hammer_mount_t hmp, hammer_reserve_t resv);
47 static int update_bytes_free(hammer_reserve_t resv, int bytes);
50 * Reserved big-blocks red-black tree support
52 RB_GENERATE2(hammer_res_rb_tree, hammer_reserve, rb_node,
53 hammer_res_rb_compare, hammer_off_t, zone_offset);
56 hammer_res_rb_compare(hammer_reserve_t res1, hammer_reserve_t res2)
58 if (res1->zone_offset < res2->zone_offset)
60 if (res1->zone_offset > res2->zone_offset)
66 * Allocate bytes from a zone
69 hammer_blockmap_alloc(hammer_transaction_t trans, int zone, int bytes,
70 hammer_off_t hint, int *errorp)
73 hammer_volume_t root_volume;
74 hammer_blockmap_t blockmap;
75 hammer_blockmap_t freemap;
76 hammer_reserve_t resv;
77 struct hammer_blockmap_layer1 *layer1;
78 struct hammer_blockmap_layer2 *layer2;
79 hammer_buffer_t buffer1 = NULL;
80 hammer_buffer_t buffer2 = NULL;
81 hammer_buffer_t buffer3 = NULL;
82 hammer_off_t tmp_offset;
83 hammer_off_t next_offset;
84 hammer_off_t result_offset;
85 hammer_off_t layer1_offset;
86 hammer_off_t layer2_offset;
87 hammer_off_t base_off;
89 int offset; /* offset within big-block */
95 * Deal with alignment and buffer-boundary issues.
97 * Be careful, certain primary alignments are used below to allocate
98 * new blockmap blocks.
100 bytes = (bytes + 15) & ~15;
101 KKASSERT(bytes > 0 && bytes <= HAMMER_XBUFSIZE);
102 KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES);
107 root_volume = trans->rootvol;
109 blockmap = &hmp->blockmap[zone];
110 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
111 KKASSERT(HAMMER_ZONE_DECODE(blockmap->next_offset) == zone);
114 * Use the hint if we have one.
116 if (hint && HAMMER_ZONE_DECODE(hint) == zone) {
117 next_offset = (hint + 15) & ~(hammer_off_t)15;
120 next_offset = blockmap->next_offset;
126 * use_hint is turned off if we leave the hinted big-block.
128 if (use_hint && ((next_offset ^ hint) & ~HAMMER_HINTBLOCK_MASK64)) {
129 next_offset = blockmap->next_offset;
136 if (next_offset == HAMMER_ZONE_ENCODE(zone + 1, 0)) {
142 next_offset = HAMMER_ZONE_ENCODE(zone, 0);
146 * The allocation request may not cross a buffer boundary. Special
147 * large allocations must not cross a large-block boundary.
149 tmp_offset = next_offset + bytes - 1;
150 if (bytes <= HAMMER_BUFSIZE) {
151 if ((next_offset ^ tmp_offset) & ~HAMMER_BUFMASK64) {
152 next_offset = tmp_offset & ~HAMMER_BUFMASK64;
156 if ((next_offset ^ tmp_offset) & ~HAMMER_LARGEBLOCK_MASK64) {
157 next_offset = tmp_offset & ~HAMMER_LARGEBLOCK_MASK64;
161 offset = (int)next_offset & HAMMER_LARGEBLOCK_MASK;
166 layer1_offset = freemap->phys_offset +
167 HAMMER_BLOCKMAP_LAYER1_OFFSET(next_offset);
169 layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer1);
178 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
179 hammer_lock_ex(&hmp->blkmap_lock);
180 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
181 panic("CRC FAILED: LAYER1");
182 hammer_unlock(&hmp->blkmap_lock);
186 * If we are at a big-block boundary and layer1 indicates no
187 * free big-blocks, then we cannot allocate a new bigblock in
188 * layer2, skip to the next layer1 entry.
190 if (offset == 0 && layer1->blocks_free == 0) {
191 next_offset = (next_offset + HAMMER_BLOCKMAP_LAYER2) &
192 ~HAMMER_BLOCKMAP_LAYER2_MASK;
195 KKASSERT(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
198 * Skip this layer1 entry if it is pointing to a layer2 big-block
199 * on a volume that we are currently trying to remove from the
200 * file-system. This is used by the volume-del code together with
201 * the reblocker to free up a volume.
203 if ((int)HAMMER_VOL_DECODE(layer1->phys_offset) ==
204 hmp->volume_to_remove) {
205 next_offset = (next_offset + HAMMER_BLOCKMAP_LAYER2) &
206 ~HAMMER_BLOCKMAP_LAYER2_MASK;
211 * Dive layer 2, each entry represents a large-block.
213 layer2_offset = layer1->phys_offset +
214 HAMMER_BLOCKMAP_LAYER2_OFFSET(next_offset);
215 layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer2);
222 * Check CRC. This can race another thread holding the lock
223 * and in the middle of modifying layer2.
225 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
226 hammer_lock_ex(&hmp->blkmap_lock);
227 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
228 panic("CRC FAILED: LAYER2");
229 hammer_unlock(&hmp->blkmap_lock);
233 * Skip the layer if the zone is owned by someone other then us.
235 if (layer2->zone && layer2->zone != zone) {
236 next_offset += (HAMMER_LARGEBLOCK_SIZE - offset);
239 if (offset < layer2->append_off) {
240 next_offset += layer2->append_off - offset;
245 * If operating in the current non-hint blockmap block, do not
246 * allow it to get over-full. Also drop any active hinting so
247 * blockmap->next_offset is updated at the end.
249 * We do this for B-Tree and meta-data allocations to provide
250 * localization for updates.
252 if ((zone == HAMMER_ZONE_BTREE_INDEX ||
253 zone == HAMMER_ZONE_META_INDEX) &&
254 offset >= HAMMER_LARGEBLOCK_OVERFILL &&
255 !((next_offset ^ blockmap->next_offset) & ~HAMMER_LARGEBLOCK_MASK64)
257 if (offset >= HAMMER_LARGEBLOCK_OVERFILL) {
258 next_offset += (HAMMER_LARGEBLOCK_SIZE - offset);
265 * We need the lock from this point on. We have to re-check zone
266 * ownership after acquiring the lock and also check for reservations.
268 hammer_lock_ex(&hmp->blkmap_lock);
270 if (layer2->zone && layer2->zone != zone) {
271 hammer_unlock(&hmp->blkmap_lock);
272 next_offset += (HAMMER_LARGEBLOCK_SIZE - offset);
275 if (offset < layer2->append_off) {
276 hammer_unlock(&hmp->blkmap_lock);
277 next_offset += layer2->append_off - offset;
282 * The bigblock might be reserved by another zone. If it is reserved
283 * by our zone we may have to move next_offset past the append_off.
285 base_off = (next_offset &
286 (~HAMMER_LARGEBLOCK_MASK64 & ~HAMMER_OFF_ZONE_MASK)) |
287 HAMMER_ZONE_RAW_BUFFER;
288 resv = RB_LOOKUP(hammer_res_rb_tree, &hmp->rb_resv_root, base_off);
290 if (resv->zone != zone) {
291 hammer_unlock(&hmp->blkmap_lock);
292 next_offset = (next_offset + HAMMER_LARGEBLOCK_SIZE) &
293 ~HAMMER_LARGEBLOCK_MASK64;
296 if (offset < resv->append_off) {
297 hammer_unlock(&hmp->blkmap_lock);
298 next_offset += resv->append_off - offset;
305 * Ok, we can allocate out of this layer2 big-block. Assume ownership
306 * of the layer for real. At this point we've validated any
307 * reservation that might exist and can just ignore resv.
309 if (layer2->zone == 0) {
311 * Assign the bigblock to our zone
313 hammer_modify_buffer(trans, buffer1,
314 layer1, sizeof(*layer1));
315 --layer1->blocks_free;
316 layer1->layer1_crc = crc32(layer1,
317 HAMMER_LAYER1_CRCSIZE);
318 hammer_modify_buffer_done(buffer1);
319 hammer_modify_buffer(trans, buffer2,
320 layer2, sizeof(*layer2));
322 KKASSERT(layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE);
323 KKASSERT(layer2->append_off == 0);
324 hammer_modify_volume_field(trans, trans->rootvol,
325 vol0_stat_freebigblocks);
326 --root_volume->ondisk->vol0_stat_freebigblocks;
327 hmp->copy_stat_freebigblocks =
328 root_volume->ondisk->vol0_stat_freebigblocks;
329 hammer_modify_volume_done(trans->rootvol);
331 hammer_modify_buffer(trans, buffer2,
332 layer2, sizeof(*layer2));
334 KKASSERT(layer2->zone == zone);
337 * NOTE: bytes_free can legally go negative due to de-dup.
339 layer2->bytes_free -= bytes;
340 KKASSERT(layer2->append_off <= offset);
341 layer2->append_off = offset + bytes;
342 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
343 hammer_modify_buffer_done(buffer2);
346 * We hold the blockmap lock and should be the only ones
347 * capable of modifying resv->append_off. Track the allocation
350 KKASSERT(bytes != 0);
352 KKASSERT(resv->append_off <= offset);
353 resv->append_off = offset + bytes;
354 resv->flags &= ~HAMMER_RESF_LAYER2FREE;
355 hammer_blockmap_reserve_complete(hmp, resv);
359 * If we are allocating from the base of a new buffer we can avoid
360 * a disk read by calling hammer_bnew().
362 if ((next_offset & HAMMER_BUFMASK) == 0) {
363 hammer_bnew_ext(trans->hmp, next_offset, bytes,
366 result_offset = next_offset;
369 * If we weren't supplied with a hint or could not use the hint
370 * then we wound up using blockmap->next_offset as the hint and
374 hammer_modify_volume(NULL, root_volume, NULL, 0);
375 blockmap->next_offset = next_offset + bytes;
376 hammer_modify_volume_done(root_volume);
378 hammer_unlock(&hmp->blkmap_lock);
385 hammer_rel_buffer(buffer1, 0);
387 hammer_rel_buffer(buffer2, 0);
389 hammer_rel_buffer(buffer3, 0);
391 return(result_offset);
395 * Frontend function - Reserve bytes in a zone.
397 * This code reserves bytes out of a blockmap without committing to any
398 * meta-data modifications, allowing the front-end to directly issue disk
399 * write I/O for large blocks of data
401 * The backend later finalizes the reservation with hammer_blockmap_finalize()
402 * upon committing the related record.
405 hammer_blockmap_reserve(hammer_mount_t hmp, int zone, int bytes,
406 hammer_off_t *zone_offp, int *errorp)
408 hammer_volume_t root_volume;
409 hammer_blockmap_t blockmap;
410 hammer_blockmap_t freemap;
411 struct hammer_blockmap_layer1 *layer1;
412 struct hammer_blockmap_layer2 *layer2;
413 hammer_buffer_t buffer1 = NULL;
414 hammer_buffer_t buffer2 = NULL;
415 hammer_buffer_t buffer3 = NULL;
416 hammer_off_t tmp_offset;
417 hammer_off_t next_offset;
418 hammer_off_t layer1_offset;
419 hammer_off_t layer2_offset;
420 hammer_off_t base_off;
421 hammer_reserve_t resv;
422 hammer_reserve_t resx;
429 KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES);
430 root_volume = hammer_get_root_volume(hmp, errorp);
433 blockmap = &hmp->blockmap[zone];
434 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
435 KKASSERT(HAMMER_ZONE_DECODE(blockmap->next_offset) == zone);
438 * Deal with alignment and buffer-boundary issues.
440 * Be careful, certain primary alignments are used below to allocate
441 * new blockmap blocks.
443 bytes = (bytes + 15) & ~15;
444 KKASSERT(bytes > 0 && bytes <= HAMMER_XBUFSIZE);
446 next_offset = blockmap->next_offset;
452 if (next_offset == HAMMER_ZONE_ENCODE(zone + 1, 0)) {
457 next_offset = HAMMER_ZONE_ENCODE(zone, 0);
461 * The allocation request may not cross a buffer boundary. Special
462 * large allocations must not cross a large-block boundary.
464 tmp_offset = next_offset + bytes - 1;
465 if (bytes <= HAMMER_BUFSIZE) {
466 if ((next_offset ^ tmp_offset) & ~HAMMER_BUFMASK64) {
467 next_offset = tmp_offset & ~HAMMER_BUFMASK64;
471 if ((next_offset ^ tmp_offset) & ~HAMMER_LARGEBLOCK_MASK64) {
472 next_offset = tmp_offset & ~HAMMER_LARGEBLOCK_MASK64;
476 offset = (int)next_offset & HAMMER_LARGEBLOCK_MASK;
481 layer1_offset = freemap->phys_offset +
482 HAMMER_BLOCKMAP_LAYER1_OFFSET(next_offset);
483 layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer1);
490 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
491 hammer_lock_ex(&hmp->blkmap_lock);
492 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
493 panic("CRC FAILED: LAYER1");
494 hammer_unlock(&hmp->blkmap_lock);
498 * If we are at a big-block boundary and layer1 indicates no
499 * free big-blocks, then we cannot allocate a new bigblock in
500 * layer2, skip to the next layer1 entry.
502 if ((next_offset & HAMMER_LARGEBLOCK_MASK) == 0 &&
503 layer1->blocks_free == 0) {
504 next_offset = (next_offset + HAMMER_BLOCKMAP_LAYER2) &
505 ~HAMMER_BLOCKMAP_LAYER2_MASK;
508 KKASSERT(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
511 * Dive layer 2, each entry represents a large-block.
513 layer2_offset = layer1->phys_offset +
514 HAMMER_BLOCKMAP_LAYER2_OFFSET(next_offset);
515 layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer2);
520 * Check CRC if not allocating into uninitialized space (which we
521 * aren't when reserving space).
523 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
524 hammer_lock_ex(&hmp->blkmap_lock);
525 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
526 panic("CRC FAILED: LAYER2");
527 hammer_unlock(&hmp->blkmap_lock);
531 * Skip the layer if the zone is owned by someone other then us.
533 if (layer2->zone && layer2->zone != zone) {
534 next_offset += (HAMMER_LARGEBLOCK_SIZE - offset);
537 if (offset < layer2->append_off) {
538 next_offset += layer2->append_off - offset;
543 * We need the lock from this point on. We have to re-check zone
544 * ownership after acquiring the lock and also check for reservations.
546 hammer_lock_ex(&hmp->blkmap_lock);
548 if (layer2->zone && layer2->zone != zone) {
549 hammer_unlock(&hmp->blkmap_lock);
550 next_offset += (HAMMER_LARGEBLOCK_SIZE - offset);
553 if (offset < layer2->append_off) {
554 hammer_unlock(&hmp->blkmap_lock);
555 next_offset += layer2->append_off - offset;
560 * The bigblock might be reserved by another zone. If it is reserved
561 * by our zone we may have to move next_offset past the append_off.
563 base_off = (next_offset &
564 (~HAMMER_LARGEBLOCK_MASK64 & ~HAMMER_OFF_ZONE_MASK)) |
565 HAMMER_ZONE_RAW_BUFFER;
566 resv = RB_LOOKUP(hammer_res_rb_tree, &hmp->rb_resv_root, base_off);
568 if (resv->zone != zone) {
569 hammer_unlock(&hmp->blkmap_lock);
570 next_offset = (next_offset + HAMMER_LARGEBLOCK_SIZE) &
571 ~HAMMER_LARGEBLOCK_MASK64;
574 if (offset < resv->append_off) {
575 hammer_unlock(&hmp->blkmap_lock);
576 next_offset += resv->append_off - offset;
582 resx = kmalloc(sizeof(*resv), hmp->m_misc,
583 M_WAITOK | M_ZERO | M_USE_RESERVE);
586 resx->zone_offset = base_off;
587 if (layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE)
588 resx->flags |= HAMMER_RESF_LAYER2FREE;
589 resv = RB_INSERT(hammer_res_rb_tree, &hmp->rb_resv_root, resx);
590 KKASSERT(resv == NULL);
592 ++hammer_count_reservations;
594 resv->append_off = offset + bytes;
597 * If we are not reserving a whole buffer but are at the start of
598 * a new block, call hammer_bnew() to avoid a disk read.
600 * If we are reserving a whole buffer (or more), the caller will
601 * probably use a direct read, so do nothing.
603 if (bytes < HAMMER_BUFSIZE && (next_offset & HAMMER_BUFMASK) == 0) {
604 hammer_bnew(hmp, next_offset, errorp, &buffer3);
608 * Adjust our iterator and alloc_offset. The layer1 and layer2
609 * space beyond alloc_offset is uninitialized. alloc_offset must
610 * be big-block aligned.
612 blockmap->next_offset = next_offset + bytes;
613 hammer_unlock(&hmp->blkmap_lock);
617 hammer_rel_buffer(buffer1, 0);
619 hammer_rel_buffer(buffer2, 0);
621 hammer_rel_buffer(buffer3, 0);
622 hammer_rel_volume(root_volume, 0);
623 *zone_offp = next_offset;
629 * Frontend function - Dedup bytes in a zone.
631 * Dedup reservations work exactly the same as normal write reservations
632 * except we only adjust bytes_free field and don't touch append offset.
633 * Finalization mechanic for dedup reservations is also the same as for
634 * normal write ones - the backend finalizes the reservation with
635 * hammer_blockmap_finalize().
638 hammer_blockmap_reserve_dedup(hammer_mount_t hmp, int zone, int bytes,
639 hammer_off_t zone_offset, int *errorp)
641 hammer_volume_t root_volume;
642 hammer_blockmap_t freemap;
643 struct hammer_blockmap_layer1 *layer1;
644 struct hammer_blockmap_layer2 *layer2;
645 hammer_buffer_t buffer1 = NULL;
646 hammer_buffer_t buffer2 = NULL;
647 hammer_off_t layer1_offset;
648 hammer_off_t layer2_offset;
649 hammer_off_t base_off;
650 hammer_reserve_t resv = NULL;
651 hammer_reserve_t resx = NULL;
656 KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES);
657 root_volume = hammer_get_root_volume(hmp, errorp);
660 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
661 KKASSERT(freemap->phys_offset != 0);
663 bytes = (bytes + 15) & ~15;
664 KKASSERT(bytes > 0 && bytes <= HAMMER_XBUFSIZE);
669 layer1_offset = freemap->phys_offset +
670 HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset);
671 layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer1);
678 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
679 hammer_lock_ex(&hmp->blkmap_lock);
680 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
681 panic("CRC FAILED: LAYER1");
682 hammer_unlock(&hmp->blkmap_lock);
684 KKASSERT(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
687 * Dive layer 2, each entry represents a large-block.
689 layer2_offset = layer1->phys_offset +
690 HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset);
691 layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer2);
698 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
699 hammer_lock_ex(&hmp->blkmap_lock);
700 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
701 panic("CRC FAILED: LAYER2");
702 hammer_unlock(&hmp->blkmap_lock);
706 * Fail if the zone is owned by someone other than us.
708 if (layer2->zone && layer2->zone != zone)
712 * We need the lock from this point on. We have to re-check zone
713 * ownership after acquiring the lock and also check for reservations.
715 hammer_lock_ex(&hmp->blkmap_lock);
717 if (layer2->zone && layer2->zone != zone) {
718 hammer_unlock(&hmp->blkmap_lock);
722 base_off = (zone_offset &
723 (~HAMMER_LARGEBLOCK_MASK64 & ~HAMMER_OFF_ZONE_MASK)) |
724 HAMMER_ZONE_RAW_BUFFER;
725 resv = RB_LOOKUP(hammer_res_rb_tree, &hmp->rb_resv_root, base_off);
727 if (resv->zone != zone) {
728 hammer_unlock(&hmp->blkmap_lock);
733 * Due to possible big block underflow we can't simply
734 * subtract bytes from bytes_free.
736 if (update_bytes_free(resv, bytes) == 0) {
737 hammer_unlock(&hmp->blkmap_lock);
744 resx = kmalloc(sizeof(*resv), hmp->m_misc,
745 M_WAITOK | M_ZERO | M_USE_RESERVE);
748 resx->bytes_free = layer2->bytes_free;
750 * Due to possible big block underflow we can't simply
751 * subtract bytes from bytes_free.
753 if (update_bytes_free(resx, bytes) == 0) {
754 hammer_unlock(&hmp->blkmap_lock);
755 kfree(resx, hmp->m_misc);
758 resx->zone_offset = base_off;
759 resv = RB_INSERT(hammer_res_rb_tree, &hmp->rb_resv_root, resx);
760 KKASSERT(resv == NULL);
762 ++hammer_count_reservations;
765 hammer_unlock(&hmp->blkmap_lock);
769 hammer_rel_buffer(buffer1, 0);
771 hammer_rel_buffer(buffer2, 0);
772 hammer_rel_volume(root_volume, 0);
778 update_bytes_free(hammer_reserve_t resv, int bytes)
783 * Big-block underflow check
785 temp = resv->bytes_free - HAMMER_LARGEBLOCK_SIZE * 2;
786 cpu_ccfence(); /* XXX do we really need it ? */
787 if (temp > resv->bytes_free) {
788 kprintf("BIGBLOCK UNDERFLOW\n");
792 resv->bytes_free -= bytes;
797 * Dereference a reservation structure. Upon the final release the
798 * underlying big-block is checked and if it is entirely free we delete
799 * any related HAMMER buffers to avoid potential conflicts with future
800 * reuse of the big-block.
803 hammer_blockmap_reserve_complete(hammer_mount_t hmp, hammer_reserve_t resv)
805 hammer_off_t base_offset;
808 KKASSERT(resv->refs > 0);
809 KKASSERT((resv->zone_offset & HAMMER_OFF_ZONE_MASK) ==
810 HAMMER_ZONE_RAW_BUFFER);
813 * Setting append_off to the max prevents any new allocations
814 * from occuring while we are trying to dispose of the reservation,
815 * allowing us to safely delete any related HAMMER buffers.
817 * If we are unable to clean out all related HAMMER buffers we
820 if (resv->refs == 1 && (resv->flags & HAMMER_RESF_LAYER2FREE)) {
821 resv->append_off = HAMMER_LARGEBLOCK_SIZE;
822 base_offset = resv->zone_offset & ~HAMMER_OFF_ZONE_MASK;
823 base_offset = HAMMER_ZONE_ENCODE(resv->zone, base_offset);
824 if (!TAILQ_EMPTY(&hmp->dedup_lru_list))
825 hammer_dedup_cache_inval(hmp, base_offset);
826 error = hammer_del_buffers(hmp, base_offset,
828 HAMMER_LARGEBLOCK_SIZE,
830 if (hammer_debug_general & 0x20000) {
831 kprintf("hammer: dellgblk %016jx error %d\n",
832 (intmax_t)base_offset, error);
835 hammer_reserve_setdelay(hmp, resv);
837 if (--resv->refs == 0) {
838 if (hammer_debug_general & 0x20000) {
839 kprintf("hammer: delresvr %016jx zone %02x\n",
840 (intmax_t)resv->zone_offset, resv->zone);
842 KKASSERT((resv->flags & HAMMER_RESF_ONDELAY) == 0);
843 RB_REMOVE(hammer_res_rb_tree, &hmp->rb_resv_root, resv);
844 kfree(resv, hmp->m_misc);
845 --hammer_count_reservations;
850 * Prevent a potentially free big-block from being reused until after
851 * the related flushes have completely cycled, otherwise crash recovery
852 * could resurrect a data block that was already reused and overwritten.
854 * The caller might reset the underlying layer2 entry's append_off to 0, so
855 * our covering append_off must be set to max to prevent any reallocation
856 * until after the flush delays complete, not to mention proper invalidation
857 * of any underlying cached blocks.
860 hammer_reserve_setdelay_offset(hammer_mount_t hmp, hammer_off_t base_offset,
861 int zone, struct hammer_blockmap_layer2 *layer2)
863 hammer_reserve_t resv;
866 * Allocate the reservation if necessary.
868 * NOTE: need lock in future around resv lookup/allocation and
869 * the setdelay call, currently refs is not bumped until the call.
872 resv = RB_LOOKUP(hammer_res_rb_tree, &hmp->rb_resv_root, base_offset);
874 resv = kmalloc(sizeof(*resv), hmp->m_misc,
875 M_WAITOK | M_ZERO | M_USE_RESERVE);
877 resv->zone_offset = base_offset;
879 resv->append_off = HAMMER_LARGEBLOCK_SIZE;
881 if (layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE)
882 resv->flags |= HAMMER_RESF_LAYER2FREE;
883 if (RB_INSERT(hammer_res_rb_tree, &hmp->rb_resv_root, resv)) {
884 kfree(resv, hmp->m_misc);
887 ++hammer_count_reservations;
889 if (layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE)
890 resv->flags |= HAMMER_RESF_LAYER2FREE;
892 hammer_reserve_setdelay(hmp, resv);
896 * Enter the reservation on the on-delay list, or move it if it
897 * is already on the list.
900 hammer_reserve_setdelay(hammer_mount_t hmp, hammer_reserve_t resv)
902 if (resv->flags & HAMMER_RESF_ONDELAY) {
903 TAILQ_REMOVE(&hmp->delay_list, resv, delay_entry);
904 resv->flush_group = hmp->flusher.next + 1;
905 TAILQ_INSERT_TAIL(&hmp->delay_list, resv, delay_entry);
908 ++hmp->rsv_fromdelay;
909 resv->flags |= HAMMER_RESF_ONDELAY;
910 resv->flush_group = hmp->flusher.next + 1;
911 TAILQ_INSERT_TAIL(&hmp->delay_list, resv, delay_entry);
916 * Reserve has reached its flush point, remove it from the delay list
917 * and finish it off. hammer_blockmap_reserve_complete() inherits
918 * the ondelay reference.
921 hammer_reserve_clrdelay(hammer_mount_t hmp, hammer_reserve_t resv)
923 KKASSERT(resv->flags & HAMMER_RESF_ONDELAY);
924 resv->flags &= ~HAMMER_RESF_ONDELAY;
925 TAILQ_REMOVE(&hmp->delay_list, resv, delay_entry);
926 --hmp->rsv_fromdelay;
927 hammer_blockmap_reserve_complete(hmp, resv);
931 * Backend function - free (offset, bytes) in a zone.
936 hammer_blockmap_free(hammer_transaction_t trans,
937 hammer_off_t zone_offset, int bytes)
940 hammer_volume_t root_volume;
941 hammer_blockmap_t freemap;
942 struct hammer_blockmap_layer1 *layer1;
943 struct hammer_blockmap_layer2 *layer2;
944 hammer_buffer_t buffer1 = NULL;
945 hammer_buffer_t buffer2 = NULL;
946 hammer_off_t layer1_offset;
947 hammer_off_t layer2_offset;
948 hammer_off_t base_off;
959 bytes = (bytes + 15) & ~15;
960 KKASSERT(bytes <= HAMMER_XBUFSIZE);
961 KKASSERT(((zone_offset ^ (zone_offset + (bytes - 1))) &
962 ~HAMMER_LARGEBLOCK_MASK64) == 0);
965 * Basic zone validation & locking
967 zone = HAMMER_ZONE_DECODE(zone_offset);
968 KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES);
969 root_volume = trans->rootvol;
972 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
977 layer1_offset = freemap->phys_offset +
978 HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset);
979 layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1);
982 KKASSERT(layer1->phys_offset &&
983 layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
984 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
985 hammer_lock_ex(&hmp->blkmap_lock);
986 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
987 panic("CRC FAILED: LAYER1");
988 hammer_unlock(&hmp->blkmap_lock);
992 * Dive layer 2, each entry represents a large-block.
994 layer2_offset = layer1->phys_offset +
995 HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset);
996 layer2 = hammer_bread(hmp, layer2_offset, &error, &buffer2);
999 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
1000 hammer_lock_ex(&hmp->blkmap_lock);
1001 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
1002 panic("CRC FAILED: LAYER2");
1003 hammer_unlock(&hmp->blkmap_lock);
1006 hammer_lock_ex(&hmp->blkmap_lock);
1008 hammer_modify_buffer(trans, buffer2, layer2, sizeof(*layer2));
1011 * Free space previously allocated via blockmap_alloc().
1013 * NOTE: bytes_free can be and remain negative due to de-dup ops
1014 * but can never become larger than HAMMER_LARGEBLOCK_SIZE.
1016 KKASSERT(layer2->zone == zone);
1017 layer2->bytes_free += bytes;
1018 KKASSERT(layer2->bytes_free <= HAMMER_LARGEBLOCK_SIZE);
1021 * If a big-block becomes entirely free we must create a covering
1022 * reservation to prevent premature reuse. Note, however, that
1023 * the big-block and/or reservation may still have an append_off
1024 * that allows further (non-reused) allocations.
1026 * Once the reservation has been made we re-check layer2 and if
1027 * the big-block is still entirely free we reset the layer2 entry.
1028 * The reservation will prevent premature reuse.
1030 * NOTE: hammer_buffer's are only invalidated when the reservation
1031 * is completed, if the layer2 entry is still completely free at
1032 * that time. Any allocations from the reservation that may have
1033 * occured in the mean time, or active references on the reservation
1034 * from new pending allocations, will prevent the invalidation from
1037 if (layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE) {
1038 base_off = (zone_offset & (~HAMMER_LARGEBLOCK_MASK64 & ~HAMMER_OFF_ZONE_MASK)) | HAMMER_ZONE_RAW_BUFFER;
1040 hammer_reserve_setdelay_offset(hmp, base_off, zone, layer2);
1041 if (layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE) {
1043 layer2->append_off = 0;
1044 hammer_modify_buffer(trans, buffer1,
1045 layer1, sizeof(*layer1));
1046 ++layer1->blocks_free;
1047 layer1->layer1_crc = crc32(layer1,
1048 HAMMER_LAYER1_CRCSIZE);
1049 hammer_modify_buffer_done(buffer1);
1050 hammer_modify_volume_field(trans,
1052 vol0_stat_freebigblocks);
1053 ++root_volume->ondisk->vol0_stat_freebigblocks;
1054 hmp->copy_stat_freebigblocks =
1055 root_volume->ondisk->vol0_stat_freebigblocks;
1056 hammer_modify_volume_done(trans->rootvol);
1059 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
1060 hammer_modify_buffer_done(buffer2);
1061 hammer_unlock(&hmp->blkmap_lock);
1065 hammer_rel_buffer(buffer1, 0);
1067 hammer_rel_buffer(buffer2, 0);
1071 hammer_blockmap_dedup(hammer_transaction_t trans,
1072 hammer_off_t zone_offset, int bytes)
1075 hammer_volume_t root_volume;
1076 hammer_blockmap_t freemap;
1077 struct hammer_blockmap_layer1 *layer1;
1078 struct hammer_blockmap_layer2 *layer2;
1079 hammer_buffer_t buffer1 = NULL;
1080 hammer_buffer_t buffer2 = NULL;
1081 hammer_off_t layer1_offset;
1082 hammer_off_t layer2_offset;
1094 bytes = (bytes + 15) & ~15;
1095 KKASSERT(bytes <= HAMMER_LARGEBLOCK_SIZE);
1096 KKASSERT(((zone_offset ^ (zone_offset + (bytes - 1))) &
1097 ~HAMMER_LARGEBLOCK_MASK64) == 0);
1100 * Basic zone validation & locking
1102 zone = HAMMER_ZONE_DECODE(zone_offset);
1103 KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES);
1104 root_volume = trans->rootvol;
1107 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
1112 layer1_offset = freemap->phys_offset +
1113 HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset);
1114 layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1);
1117 KKASSERT(layer1->phys_offset &&
1118 layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
1119 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
1120 hammer_lock_ex(&hmp->blkmap_lock);
1121 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
1122 panic("CRC FAILED: LAYER1");
1123 hammer_unlock(&hmp->blkmap_lock);
1127 * Dive layer 2, each entry represents a large-block.
1129 layer2_offset = layer1->phys_offset +
1130 HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset);
1131 layer2 = hammer_bread(hmp, layer2_offset, &error, &buffer2);
1134 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
1135 hammer_lock_ex(&hmp->blkmap_lock);
1136 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
1137 panic("CRC FAILED: LAYER2");
1138 hammer_unlock(&hmp->blkmap_lock);
1141 hammer_lock_ex(&hmp->blkmap_lock);
1143 hammer_modify_buffer(trans, buffer2, layer2, sizeof(*layer2));
1146 * Free space previously allocated via blockmap_alloc().
1148 * NOTE: bytes_free can be and remain negative due to de-dup ops
1149 * but can never become larger than HAMMER_LARGEBLOCK_SIZE.
1151 KKASSERT(layer2->zone == zone);
1152 temp = layer2->bytes_free - HAMMER_LARGEBLOCK_SIZE * 2;
1153 cpu_ccfence(); /* prevent gcc from optimizing temp out */
1154 if (temp > layer2->bytes_free) {
1158 layer2->bytes_free -= bytes;
1160 KKASSERT(layer2->bytes_free <= HAMMER_LARGEBLOCK_SIZE);
1162 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
1164 hammer_modify_buffer_done(buffer2);
1165 hammer_unlock(&hmp->blkmap_lock);
1169 hammer_rel_buffer(buffer1, 0);
1171 hammer_rel_buffer(buffer2, 0);
1176 * Backend function - finalize (offset, bytes) in a zone.
1178 * Allocate space that was previously reserved by the frontend.
1181 hammer_blockmap_finalize(hammer_transaction_t trans,
1182 hammer_reserve_t resv,
1183 hammer_off_t zone_offset, int bytes)
1186 hammer_volume_t root_volume;
1187 hammer_blockmap_t freemap;
1188 struct hammer_blockmap_layer1 *layer1;
1189 struct hammer_blockmap_layer2 *layer2;
1190 hammer_buffer_t buffer1 = NULL;
1191 hammer_buffer_t buffer2 = NULL;
1192 hammer_off_t layer1_offset;
1193 hammer_off_t layer2_offset;
1205 bytes = (bytes + 15) & ~15;
1206 KKASSERT(bytes <= HAMMER_XBUFSIZE);
1209 * Basic zone validation & locking
1211 zone = HAMMER_ZONE_DECODE(zone_offset);
1212 KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES);
1213 root_volume = trans->rootvol;
1216 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
1221 layer1_offset = freemap->phys_offset +
1222 HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset);
1223 layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1);
1226 KKASSERT(layer1->phys_offset &&
1227 layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
1228 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
1229 hammer_lock_ex(&hmp->blkmap_lock);
1230 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
1231 panic("CRC FAILED: LAYER1");
1232 hammer_unlock(&hmp->blkmap_lock);
1236 * Dive layer 2, each entry represents a large-block.
1238 layer2_offset = layer1->phys_offset +
1239 HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset);
1240 layer2 = hammer_bread(hmp, layer2_offset, &error, &buffer2);
1243 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
1244 hammer_lock_ex(&hmp->blkmap_lock);
1245 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
1246 panic("CRC FAILED: LAYER2");
1247 hammer_unlock(&hmp->blkmap_lock);
1250 hammer_lock_ex(&hmp->blkmap_lock);
1252 hammer_modify_buffer(trans, buffer2, layer2, sizeof(*layer2));
1255 * Finalize some or all of the space covered by a current
1256 * reservation. An allocation in the same layer may have
1257 * already assigned ownership.
1259 if (layer2->zone == 0) {
1260 hammer_modify_buffer(trans, buffer1,
1261 layer1, sizeof(*layer1));
1262 --layer1->blocks_free;
1263 layer1->layer1_crc = crc32(layer1,
1264 HAMMER_LAYER1_CRCSIZE);
1265 hammer_modify_buffer_done(buffer1);
1266 layer2->zone = zone;
1267 KKASSERT(layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE);
1268 KKASSERT(layer2->append_off == 0);
1269 hammer_modify_volume_field(trans,
1271 vol0_stat_freebigblocks);
1272 --root_volume->ondisk->vol0_stat_freebigblocks;
1273 hmp->copy_stat_freebigblocks =
1274 root_volume->ondisk->vol0_stat_freebigblocks;
1275 hammer_modify_volume_done(trans->rootvol);
1277 if (layer2->zone != zone)
1278 kprintf("layer2 zone mismatch %d %d\n", layer2->zone, zone);
1279 KKASSERT(layer2->zone == zone);
1280 KKASSERT(bytes != 0);
1281 layer2->bytes_free -= bytes;
1284 resv->flags &= ~HAMMER_RESF_LAYER2FREE;
1288 * Finalizations can occur out of order, or combined with allocations.
1289 * append_off must be set to the highest allocated offset.
1291 offset = ((int)zone_offset & HAMMER_LARGEBLOCK_MASK) + bytes;
1292 if (layer2->append_off < offset)
1293 layer2->append_off = offset;
1295 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
1296 hammer_modify_buffer_done(buffer2);
1297 hammer_unlock(&hmp->blkmap_lock);
1301 hammer_rel_buffer(buffer1, 0);
1303 hammer_rel_buffer(buffer2, 0);
1308 * Return the approximate number of free bytes in the big-block
1309 * containing the specified blockmap offset.
1311 * WARNING: A negative number can be returned if data de-dup exists,
1312 * and the result will also not represent he actual number
1313 * of free bytes in this case.
1315 * This code is used only by the reblocker.
1318 hammer_blockmap_getfree(hammer_mount_t hmp, hammer_off_t zone_offset,
1319 int *curp, int *errorp)
1321 hammer_volume_t root_volume;
1322 hammer_blockmap_t blockmap;
1323 hammer_blockmap_t freemap;
1324 struct hammer_blockmap_layer1 *layer1;
1325 struct hammer_blockmap_layer2 *layer2;
1326 hammer_buffer_t buffer = NULL;
1327 hammer_off_t layer1_offset;
1328 hammer_off_t layer2_offset;
1332 zone = HAMMER_ZONE_DECODE(zone_offset);
1333 KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES);
1334 root_volume = hammer_get_root_volume(hmp, errorp);
1339 blockmap = &hmp->blockmap[zone];
1340 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
1345 layer1_offset = freemap->phys_offset +
1346 HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset);
1347 layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer);
1352 KKASSERT(layer1->phys_offset);
1353 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
1354 hammer_lock_ex(&hmp->blkmap_lock);
1355 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
1356 panic("CRC FAILED: LAYER1");
1357 hammer_unlock(&hmp->blkmap_lock);
1361 * Dive layer 2, each entry represents a large-block.
1363 * (reuse buffer, layer1 pointer becomes invalid)
1365 layer2_offset = layer1->phys_offset +
1366 HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset);
1367 layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer);
1372 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
1373 hammer_lock_ex(&hmp->blkmap_lock);
1374 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
1375 panic("CRC FAILED: LAYER2");
1376 hammer_unlock(&hmp->blkmap_lock);
1378 KKASSERT(layer2->zone == zone);
1380 bytes = layer2->bytes_free;
1382 if ((blockmap->next_offset ^ zone_offset) & ~HAMMER_LARGEBLOCK_MASK64)
1388 hammer_rel_buffer(buffer, 0);
1389 hammer_rel_volume(root_volume, 0);
1390 if (hammer_debug_general & 0x0800) {
1391 kprintf("hammer_blockmap_getfree: %016llx -> %d\n",
1392 (long long)zone_offset, bytes);
1399 * Lookup a blockmap offset.
1402 hammer_blockmap_lookup(hammer_mount_t hmp, hammer_off_t zone_offset,
1405 hammer_volume_t root_volume;
1406 hammer_blockmap_t freemap;
1407 struct hammer_blockmap_layer1 *layer1;
1408 struct hammer_blockmap_layer2 *layer2;
1409 hammer_buffer_t buffer = NULL;
1410 hammer_off_t layer1_offset;
1411 hammer_off_t layer2_offset;
1412 hammer_off_t result_offset;
1413 hammer_off_t base_off;
1414 hammer_reserve_t resv;
1418 * Calculate the zone-2 offset.
1420 zone = HAMMER_ZONE_DECODE(zone_offset);
1421 KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES);
1423 result_offset = (zone_offset & ~HAMMER_OFF_ZONE_MASK) |
1424 HAMMER_ZONE_RAW_BUFFER;
1427 * We can actually stop here, normal blockmaps are now direct-mapped
1428 * onto the freemap and so represent zone-2 addresses.
1430 if (hammer_verify_zone == 0) {
1432 return(result_offset);
1436 * Validate the allocation zone
1438 root_volume = hammer_get_root_volume(hmp, errorp);
1441 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
1442 KKASSERT(freemap->phys_offset != 0);
1447 layer1_offset = freemap->phys_offset +
1448 HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset);
1449 layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer);
1452 KKASSERT(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
1453 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
1454 hammer_lock_ex(&hmp->blkmap_lock);
1455 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
1456 panic("CRC FAILED: LAYER1");
1457 hammer_unlock(&hmp->blkmap_lock);
1461 * Dive layer 2, each entry represents a large-block.
1463 layer2_offset = layer1->phys_offset +
1464 HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset);
1465 layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer);
1469 if (layer2->zone == 0) {
1470 base_off = (zone_offset & (~HAMMER_LARGEBLOCK_MASK64 & ~HAMMER_OFF_ZONE_MASK)) | HAMMER_ZONE_RAW_BUFFER;
1471 resv = RB_LOOKUP(hammer_res_rb_tree, &hmp->rb_resv_root,
1473 KKASSERT(resv && resv->zone == zone);
1475 } else if (layer2->zone != zone) {
1476 panic("hammer_blockmap_lookup: bad zone %d/%d\n",
1477 layer2->zone, zone);
1479 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
1480 hammer_lock_ex(&hmp->blkmap_lock);
1481 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
1482 panic("CRC FAILED: LAYER2");
1483 hammer_unlock(&hmp->blkmap_lock);
1488 hammer_rel_buffer(buffer, 0);
1489 hammer_rel_volume(root_volume, 0);
1490 if (hammer_debug_general & 0x0800) {
1491 kprintf("hammer_blockmap_lookup: %016llx -> %016llx\n",
1492 (long long)zone_offset, (long long)result_offset);
1494 return(result_offset);
1499 * Check space availability
1501 * MPSAFE - does not require fs_token
1504 _hammer_checkspace(hammer_mount_t hmp, int slop, int64_t *resp)
1506 const int in_size = sizeof(struct hammer_inode_data) +
1507 sizeof(union hammer_btree_elm);
1508 const int rec_size = (sizeof(union hammer_btree_elm) * 2);
1511 usedbytes = hmp->rsv_inodes * in_size +
1512 hmp->rsv_recs * rec_size +
1513 hmp->rsv_databytes +
1514 ((int64_t)hmp->rsv_fromdelay << HAMMER_LARGEBLOCK_BITS) +
1515 ((int64_t)hidirtybufspace << 2) +
1516 (slop << HAMMER_LARGEBLOCK_BITS);
1518 hammer_count_extra_space_used = usedbytes; /* debugging */
1522 if (hmp->copy_stat_freebigblocks >=
1523 (usedbytes >> HAMMER_LARGEBLOCK_BITS)) {