2 * Copyright (c) 2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40 static int hammer_res_rb_compare(hammer_reserve_t res1, hammer_reserve_t res2);
41 static void hammer_reserve_setdelay_offset(hammer_mount_t hmp,
42 hammer_off_t base_offset, int zone,
43 struct hammer_blockmap_layer2 *layer2);
44 static void hammer_reserve_setdelay(hammer_mount_t hmp, hammer_reserve_t resv);
45 static int update_bytes_free(hammer_reserve_t resv, int bytes);
48 * Reserved big-blocks red-black tree support
50 RB_GENERATE2(hammer_res_rb_tree, hammer_reserve, rb_node,
51 hammer_res_rb_compare, hammer_off_t, zone_offset);
54 hammer_res_rb_compare(hammer_reserve_t res1, hammer_reserve_t res2)
56 if (res1->zone_offset < res2->zone_offset)
58 if (res1->zone_offset > res2->zone_offset)
64 * Allocate bytes from a zone
67 hammer_blockmap_alloc(hammer_transaction_t trans, int zone, int bytes,
68 hammer_off_t hint, int *errorp)
71 hammer_volume_t root_volume;
72 hammer_blockmap_t blockmap;
73 hammer_blockmap_t freemap;
74 hammer_reserve_t resv;
75 struct hammer_blockmap_layer1 *layer1;
76 struct hammer_blockmap_layer2 *layer2;
77 hammer_buffer_t buffer1 = NULL;
78 hammer_buffer_t buffer2 = NULL;
79 hammer_buffer_t buffer3 = NULL;
80 hammer_off_t tmp_offset;
81 hammer_off_t next_offset;
82 hammer_off_t result_offset;
83 hammer_off_t layer1_offset;
84 hammer_off_t layer2_offset;
85 hammer_off_t base_off;
87 int offset; /* offset within big-block */
93 * Deal with alignment and buffer-boundary issues.
95 * Be careful, certain primary alignments are used below to allocate
96 * new blockmap blocks.
98 bytes = (bytes + 15) & ~15;
99 KKASSERT(bytes > 0 && bytes <= HAMMER_XBUFSIZE);
100 KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES);
105 root_volume = trans->rootvol;
107 blockmap = &hmp->blockmap[zone];
108 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
109 KKASSERT(HAMMER_ZONE_DECODE(blockmap->next_offset) == zone);
112 * Use the hint if we have one.
114 if (hint && HAMMER_ZONE_DECODE(hint) == zone) {
115 next_offset = (hint + 15) & ~(hammer_off_t)15;
118 next_offset = blockmap->next_offset;
124 * use_hint is turned off if we leave the hinted big-block.
126 if (use_hint && ((next_offset ^ hint) & ~HAMMER_HINTBLOCK_MASK64)) {
127 next_offset = blockmap->next_offset;
134 if (next_offset == HAMMER_ZONE_ENCODE(zone + 1, 0)) {
140 next_offset = HAMMER_ZONE_ENCODE(zone, 0);
144 * The allocation request may not cross a buffer boundary. Special
145 * large allocations must not cross a large-block boundary.
147 tmp_offset = next_offset + bytes - 1;
148 if (bytes <= HAMMER_BUFSIZE) {
149 if ((next_offset ^ tmp_offset) & ~HAMMER_BUFMASK64) {
150 next_offset = tmp_offset & ~HAMMER_BUFMASK64;
154 if ((next_offset ^ tmp_offset) & ~HAMMER_LARGEBLOCK_MASK64) {
155 next_offset = tmp_offset & ~HAMMER_LARGEBLOCK_MASK64;
159 offset = (int)next_offset & HAMMER_LARGEBLOCK_MASK;
164 layer1_offset = freemap->phys_offset +
165 HAMMER_BLOCKMAP_LAYER1_OFFSET(next_offset);
167 layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer1);
176 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
177 hammer_lock_ex(&hmp->blkmap_lock);
178 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
179 panic("CRC FAILED: LAYER1");
180 hammer_unlock(&hmp->blkmap_lock);
184 * If we are at a big-block boundary and layer1 indicates no
185 * free big-blocks, then we cannot allocate a new bigblock in
186 * layer2, skip to the next layer1 entry.
188 if (offset == 0 && layer1->blocks_free == 0) {
189 next_offset = (next_offset + HAMMER_BLOCKMAP_LAYER2) &
190 ~HAMMER_BLOCKMAP_LAYER2_MASK;
193 KKASSERT(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
196 * Skip this layer1 entry if it is pointing to a layer2 big-block
197 * on a volume that we are currently trying to remove from the
198 * file-system. This is used by the volume-del code together with
199 * the reblocker to free up a volume.
201 if ((int)HAMMER_VOL_DECODE(layer1->phys_offset) ==
202 hmp->volume_to_remove) {
203 next_offset = (next_offset + HAMMER_BLOCKMAP_LAYER2) &
204 ~HAMMER_BLOCKMAP_LAYER2_MASK;
209 * Dive layer 2, each entry represents a large-block.
211 layer2_offset = layer1->phys_offset +
212 HAMMER_BLOCKMAP_LAYER2_OFFSET(next_offset);
213 layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer2);
220 * Check CRC. This can race another thread holding the lock
221 * and in the middle of modifying layer2.
223 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
224 hammer_lock_ex(&hmp->blkmap_lock);
225 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
226 panic("CRC FAILED: LAYER2");
227 hammer_unlock(&hmp->blkmap_lock);
231 * Skip the layer if the zone is owned by someone other then us.
233 if (layer2->zone && layer2->zone != zone) {
234 next_offset += (HAMMER_LARGEBLOCK_SIZE - offset);
237 if (offset < layer2->append_off) {
238 next_offset += layer2->append_off - offset;
244 * If operating in the current non-hint blockmap block, do not
245 * allow it to get over-full. Also drop any active hinting so
246 * blockmap->next_offset is updated at the end.
248 * We do this for B-Tree and meta-data allocations to provide
249 * localization for updates.
251 if ((zone == HAMMER_ZONE_BTREE_INDEX ||
252 zone == HAMMER_ZONE_META_INDEX) &&
253 offset >= HAMMER_LARGEBLOCK_OVERFILL &&
254 !((next_offset ^ blockmap->next_offset) & ~HAMMER_LARGEBLOCK_MASK64)
256 if (offset >= HAMMER_LARGEBLOCK_OVERFILL) {
257 next_offset += (HAMMER_LARGEBLOCK_SIZE - offset);
265 * We need the lock from this point on. We have to re-check zone
266 * ownership after acquiring the lock and also check for reservations.
268 hammer_lock_ex(&hmp->blkmap_lock);
270 if (layer2->zone && layer2->zone != zone) {
271 hammer_unlock(&hmp->blkmap_lock);
272 next_offset += (HAMMER_LARGEBLOCK_SIZE - offset);
275 if (offset < layer2->append_off) {
276 hammer_unlock(&hmp->blkmap_lock);
277 next_offset += layer2->append_off - offset;
282 * The bigblock might be reserved by another zone. If it is reserved
283 * by our zone we may have to move next_offset past the append_off.
285 base_off = (next_offset &
286 (~HAMMER_LARGEBLOCK_MASK64 & ~HAMMER_OFF_ZONE_MASK)) |
287 HAMMER_ZONE_RAW_BUFFER;
288 resv = RB_LOOKUP(hammer_res_rb_tree, &hmp->rb_resv_root, base_off);
290 if (resv->zone != zone) {
291 hammer_unlock(&hmp->blkmap_lock);
292 next_offset = (next_offset + HAMMER_LARGEBLOCK_SIZE) &
293 ~HAMMER_LARGEBLOCK_MASK64;
296 if (offset < resv->append_off) {
297 hammer_unlock(&hmp->blkmap_lock);
298 next_offset += resv->append_off - offset;
305 * Ok, we can allocate out of this layer2 big-block. Assume ownership
306 * of the layer for real. At this point we've validated any
307 * reservation that might exist and can just ignore resv.
309 if (layer2->zone == 0) {
311 * Assign the bigblock to our zone
313 hammer_modify_buffer(trans, buffer1,
314 layer1, sizeof(*layer1));
315 --layer1->blocks_free;
316 layer1->layer1_crc = crc32(layer1,
317 HAMMER_LAYER1_CRCSIZE);
318 hammer_modify_buffer_done(buffer1);
319 hammer_modify_buffer(trans, buffer2,
320 layer2, sizeof(*layer2));
322 KKASSERT(layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE);
323 KKASSERT(layer2->append_off == 0);
324 hammer_modify_volume_field(trans, trans->rootvol,
325 vol0_stat_freebigblocks);
326 --root_volume->ondisk->vol0_stat_freebigblocks;
327 hmp->copy_stat_freebigblocks =
328 root_volume->ondisk->vol0_stat_freebigblocks;
329 hammer_modify_volume_done(trans->rootvol);
331 hammer_modify_buffer(trans, buffer2,
332 layer2, sizeof(*layer2));
334 KKASSERT(layer2->zone == zone);
337 * NOTE: bytes_free can legally go negative due to de-dup.
339 layer2->bytes_free -= bytes;
340 KKASSERT(layer2->append_off <= offset);
341 layer2->append_off = offset + bytes;
342 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
343 hammer_modify_buffer_done(buffer2);
346 * We hold the blockmap lock and should be the only ones
347 * capable of modifying resv->append_off. Track the allocation
350 KKASSERT(bytes != 0);
352 KKASSERT(resv->append_off <= offset);
353 resv->append_off = offset + bytes;
354 resv->flags &= ~HAMMER_RESF_LAYER2FREE;
355 hammer_blockmap_reserve_complete(hmp, resv);
359 * If we are allocating from the base of a new buffer we can avoid
360 * a disk read by calling hammer_bnew().
362 if ((next_offset & HAMMER_BUFMASK) == 0) {
363 hammer_bnew_ext(trans->hmp, next_offset, bytes,
366 result_offset = next_offset;
369 * If we weren't supplied with a hint or could not use the hint
370 * then we wound up using blockmap->next_offset as the hint and
374 hammer_modify_volume(NULL, root_volume, NULL, 0);
375 blockmap->next_offset = next_offset + bytes;
376 hammer_modify_volume_done(root_volume);
378 hammer_unlock(&hmp->blkmap_lock);
385 hammer_rel_buffer(buffer1, 0);
387 hammer_rel_buffer(buffer2, 0);
389 hammer_rel_buffer(buffer3, 0);
391 return(result_offset);
395 * Frontend function - Reserve bytes in a zone.
397 * This code reserves bytes out of a blockmap without committing to any
398 * meta-data modifications, allowing the front-end to directly issue disk
399 * write I/O for large blocks of data
401 * The backend later finalizes the reservation with hammer_blockmap_finalize()
402 * upon committing the related record.
405 hammer_blockmap_reserve(hammer_mount_t hmp, int zone, int bytes,
406 hammer_off_t *zone_offp, int *errorp)
408 hammer_volume_t root_volume;
409 hammer_blockmap_t blockmap;
410 hammer_blockmap_t freemap;
411 struct hammer_blockmap_layer1 *layer1;
412 struct hammer_blockmap_layer2 *layer2;
413 hammer_buffer_t buffer1 = NULL;
414 hammer_buffer_t buffer2 = NULL;
415 hammer_buffer_t buffer3 = NULL;
416 hammer_off_t tmp_offset;
417 hammer_off_t next_offset;
418 hammer_off_t layer1_offset;
419 hammer_off_t layer2_offset;
420 hammer_off_t base_off;
421 hammer_reserve_t resv;
422 hammer_reserve_t resx;
429 KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES);
430 root_volume = hammer_get_root_volume(hmp, errorp);
433 blockmap = &hmp->blockmap[zone];
434 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
435 KKASSERT(HAMMER_ZONE_DECODE(blockmap->next_offset) == zone);
438 * Deal with alignment and buffer-boundary issues.
440 * Be careful, certain primary alignments are used below to allocate
441 * new blockmap blocks.
443 bytes = (bytes + 15) & ~15;
444 KKASSERT(bytes > 0 && bytes <= HAMMER_XBUFSIZE);
446 next_offset = blockmap->next_offset;
452 if (next_offset == HAMMER_ZONE_ENCODE(zone + 1, 0)) {
457 next_offset = HAMMER_ZONE_ENCODE(zone, 0);
461 * The allocation request may not cross a buffer boundary. Special
462 * large allocations must not cross a large-block boundary.
464 tmp_offset = next_offset + bytes - 1;
465 if (bytes <= HAMMER_BUFSIZE) {
466 if ((next_offset ^ tmp_offset) & ~HAMMER_BUFMASK64) {
467 next_offset = tmp_offset & ~HAMMER_BUFMASK64;
471 if ((next_offset ^ tmp_offset) & ~HAMMER_LARGEBLOCK_MASK64) {
472 next_offset = tmp_offset & ~HAMMER_LARGEBLOCK_MASK64;
476 offset = (int)next_offset & HAMMER_LARGEBLOCK_MASK;
481 layer1_offset = freemap->phys_offset +
482 HAMMER_BLOCKMAP_LAYER1_OFFSET(next_offset);
483 layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer1);
490 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
491 hammer_lock_ex(&hmp->blkmap_lock);
492 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
493 panic("CRC FAILED: LAYER1");
494 hammer_unlock(&hmp->blkmap_lock);
498 * If we are at a big-block boundary and layer1 indicates no
499 * free big-blocks, then we cannot allocate a new bigblock in
500 * layer2, skip to the next layer1 entry.
502 if ((next_offset & HAMMER_LARGEBLOCK_MASK) == 0 &&
503 layer1->blocks_free == 0) {
504 next_offset = (next_offset + HAMMER_BLOCKMAP_LAYER2) &
505 ~HAMMER_BLOCKMAP_LAYER2_MASK;
508 KKASSERT(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
511 * Dive layer 2, each entry represents a large-block.
513 layer2_offset = layer1->phys_offset +
514 HAMMER_BLOCKMAP_LAYER2_OFFSET(next_offset);
515 layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer2);
520 * Check CRC if not allocating into uninitialized space (which we
521 * aren't when reserving space).
523 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
524 hammer_lock_ex(&hmp->blkmap_lock);
525 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
526 panic("CRC FAILED: LAYER2");
527 hammer_unlock(&hmp->blkmap_lock);
531 * Skip the layer if the zone is owned by someone other then us.
533 if (layer2->zone && layer2->zone != zone) {
534 next_offset += (HAMMER_LARGEBLOCK_SIZE - offset);
537 if (offset < layer2->append_off) {
538 next_offset += layer2->append_off - offset;
543 * We need the lock from this point on. We have to re-check zone
544 * ownership after acquiring the lock and also check for reservations.
546 hammer_lock_ex(&hmp->blkmap_lock);
548 if (layer2->zone && layer2->zone != zone) {
549 hammer_unlock(&hmp->blkmap_lock);
550 next_offset += (HAMMER_LARGEBLOCK_SIZE - offset);
553 if (offset < layer2->append_off) {
554 hammer_unlock(&hmp->blkmap_lock);
555 next_offset += layer2->append_off - offset;
560 * The bigblock might be reserved by another zone. If it is reserved
561 * by our zone we may have to move next_offset past the append_off.
563 base_off = (next_offset &
564 (~HAMMER_LARGEBLOCK_MASK64 & ~HAMMER_OFF_ZONE_MASK)) |
565 HAMMER_ZONE_RAW_BUFFER;
566 resv = RB_LOOKUP(hammer_res_rb_tree, &hmp->rb_resv_root, base_off);
568 if (resv->zone != zone) {
569 hammer_unlock(&hmp->blkmap_lock);
570 next_offset = (next_offset + HAMMER_LARGEBLOCK_SIZE) &
571 ~HAMMER_LARGEBLOCK_MASK64;
574 if (offset < resv->append_off) {
575 hammer_unlock(&hmp->blkmap_lock);
576 next_offset += resv->append_off - offset;
582 resx = kmalloc(sizeof(*resv), hmp->m_misc,
583 M_WAITOK | M_ZERO | M_USE_RESERVE);
586 resx->zone_offset = base_off;
587 if (layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE)
588 resx->flags |= HAMMER_RESF_LAYER2FREE;
589 resv = RB_INSERT(hammer_res_rb_tree, &hmp->rb_resv_root, resx);
590 KKASSERT(resv == NULL);
592 ++hammer_count_reservations;
594 resv->append_off = offset + bytes;
597 * If we are not reserving a whole buffer but are at the start of
598 * a new block, call hammer_bnew() to avoid a disk read.
600 * If we are reserving a whole buffer (or more), the caller will
601 * probably use a direct read, so do nothing.
603 * If we do not have a whole lot of system memory we really can't
604 * afford to block while holding the blkmap_lock!
606 if (bytes < HAMMER_BUFSIZE && (next_offset & HAMMER_BUFMASK) == 0) {
607 if (!vm_page_count_min(HAMMER_BUFSIZE / PAGE_SIZE))
608 hammer_bnew(hmp, next_offset, errorp, &buffer3);
612 * Adjust our iterator and alloc_offset. The layer1 and layer2
613 * space beyond alloc_offset is uninitialized. alloc_offset must
614 * be big-block aligned.
616 blockmap->next_offset = next_offset + bytes;
617 hammer_unlock(&hmp->blkmap_lock);
621 hammer_rel_buffer(buffer1, 0);
623 hammer_rel_buffer(buffer2, 0);
625 hammer_rel_buffer(buffer3, 0);
626 hammer_rel_volume(root_volume, 0);
627 *zone_offp = next_offset;
633 * Frontend function - Dedup bytes in a zone.
635 * Dedup reservations work exactly the same as normal write reservations
636 * except we only adjust bytes_free field and don't touch append offset.
637 * Finalization mechanic for dedup reservations is also the same as for
638 * normal write ones - the backend finalizes the reservation with
639 * hammer_blockmap_finalize().
642 hammer_blockmap_reserve_dedup(hammer_mount_t hmp, int zone, int bytes,
643 hammer_off_t zone_offset, int *errorp)
645 hammer_volume_t root_volume;
646 hammer_blockmap_t freemap;
647 struct hammer_blockmap_layer1 *layer1;
648 struct hammer_blockmap_layer2 *layer2;
649 hammer_buffer_t buffer1 = NULL;
650 hammer_buffer_t buffer2 = NULL;
651 hammer_off_t layer1_offset;
652 hammer_off_t layer2_offset;
653 hammer_off_t base_off;
654 hammer_reserve_t resv = NULL;
655 hammer_reserve_t resx = NULL;
660 KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES);
661 root_volume = hammer_get_root_volume(hmp, errorp);
664 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
665 KKASSERT(freemap->phys_offset != 0);
667 bytes = (bytes + 15) & ~15;
668 KKASSERT(bytes > 0 && bytes <= HAMMER_XBUFSIZE);
673 layer1_offset = freemap->phys_offset +
674 HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset);
675 layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer1);
682 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
683 hammer_lock_ex(&hmp->blkmap_lock);
684 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
685 panic("CRC FAILED: LAYER1");
686 hammer_unlock(&hmp->blkmap_lock);
688 KKASSERT(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
691 * Dive layer 2, each entry represents a large-block.
693 layer2_offset = layer1->phys_offset +
694 HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset);
695 layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer2);
702 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
703 hammer_lock_ex(&hmp->blkmap_lock);
704 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
705 panic("CRC FAILED: LAYER2");
706 hammer_unlock(&hmp->blkmap_lock);
710 * Fail if the zone is owned by someone other than us.
712 if (layer2->zone && layer2->zone != zone)
716 * We need the lock from this point on. We have to re-check zone
717 * ownership after acquiring the lock and also check for reservations.
719 hammer_lock_ex(&hmp->blkmap_lock);
721 if (layer2->zone && layer2->zone != zone) {
722 hammer_unlock(&hmp->blkmap_lock);
726 base_off = (zone_offset &
727 (~HAMMER_LARGEBLOCK_MASK64 & ~HAMMER_OFF_ZONE_MASK)) |
728 HAMMER_ZONE_RAW_BUFFER;
729 resv = RB_LOOKUP(hammer_res_rb_tree, &hmp->rb_resv_root, base_off);
731 if (resv->zone != zone) {
732 hammer_unlock(&hmp->blkmap_lock);
737 * Due to possible big block underflow we can't simply
738 * subtract bytes from bytes_free.
740 if (update_bytes_free(resv, bytes) == 0) {
741 hammer_unlock(&hmp->blkmap_lock);
748 resx = kmalloc(sizeof(*resv), hmp->m_misc,
749 M_WAITOK | M_ZERO | M_USE_RESERVE);
752 resx->bytes_free = layer2->bytes_free;
754 * Due to possible big block underflow we can't simply
755 * subtract bytes from bytes_free.
757 if (update_bytes_free(resx, bytes) == 0) {
758 hammer_unlock(&hmp->blkmap_lock);
759 kfree(resx, hmp->m_misc);
762 resx->zone_offset = base_off;
763 resv = RB_INSERT(hammer_res_rb_tree, &hmp->rb_resv_root, resx);
764 KKASSERT(resv == NULL);
766 ++hammer_count_reservations;
769 hammer_unlock(&hmp->blkmap_lock);
773 hammer_rel_buffer(buffer1, 0);
775 hammer_rel_buffer(buffer2, 0);
776 hammer_rel_volume(root_volume, 0);
782 update_bytes_free(hammer_reserve_t resv, int bytes)
787 * Big-block underflow check
789 temp = resv->bytes_free - HAMMER_LARGEBLOCK_SIZE * 2;
790 cpu_ccfence(); /* XXX do we really need it ? */
791 if (temp > resv->bytes_free) {
792 kprintf("BIGBLOCK UNDERFLOW\n");
796 resv->bytes_free -= bytes;
801 * Dereference a reservation structure. Upon the final release the
802 * underlying big-block is checked and if it is entirely free we delete
803 * any related HAMMER buffers to avoid potential conflicts with future
804 * reuse of the big-block.
807 hammer_blockmap_reserve_complete(hammer_mount_t hmp, hammer_reserve_t resv)
809 hammer_off_t base_offset;
812 KKASSERT(resv->refs > 0);
813 KKASSERT((resv->zone_offset & HAMMER_OFF_ZONE_MASK) ==
814 HAMMER_ZONE_RAW_BUFFER);
817 * Setting append_off to the max prevents any new allocations
818 * from occuring while we are trying to dispose of the reservation,
819 * allowing us to safely delete any related HAMMER buffers.
821 * If we are unable to clean out all related HAMMER buffers we
824 if (resv->refs == 1 && (resv->flags & HAMMER_RESF_LAYER2FREE)) {
825 resv->append_off = HAMMER_LARGEBLOCK_SIZE;
826 base_offset = resv->zone_offset & ~HAMMER_OFF_ZONE_MASK;
827 base_offset = HAMMER_ZONE_ENCODE(resv->zone, base_offset);
828 if (!TAILQ_EMPTY(&hmp->dedup_lru_list))
829 hammer_dedup_cache_inval(hmp, base_offset);
830 error = hammer_del_buffers(hmp, base_offset,
832 HAMMER_LARGEBLOCK_SIZE,
834 if (hammer_debug_general & 0x20000) {
835 kprintf("hammer: dellgblk %016jx error %d\n",
836 (intmax_t)base_offset, error);
839 hammer_reserve_setdelay(hmp, resv);
841 if (--resv->refs == 0) {
842 if (hammer_debug_general & 0x20000) {
843 kprintf("hammer: delresvr %016jx zone %02x\n",
844 (intmax_t)resv->zone_offset, resv->zone);
846 KKASSERT((resv->flags & HAMMER_RESF_ONDELAY) == 0);
847 RB_REMOVE(hammer_res_rb_tree, &hmp->rb_resv_root, resv);
848 kfree(resv, hmp->m_misc);
849 --hammer_count_reservations;
854 * Prevent a potentially free big-block from being reused until after
855 * the related flushes have completely cycled, otherwise crash recovery
856 * could resurrect a data block that was already reused and overwritten.
858 * The caller might reset the underlying layer2 entry's append_off to 0, so
859 * our covering append_off must be set to max to prevent any reallocation
860 * until after the flush delays complete, not to mention proper invalidation
861 * of any underlying cached blocks.
864 hammer_reserve_setdelay_offset(hammer_mount_t hmp, hammer_off_t base_offset,
865 int zone, struct hammer_blockmap_layer2 *layer2)
867 hammer_reserve_t resv;
870 * Allocate the reservation if necessary.
872 * NOTE: need lock in future around resv lookup/allocation and
873 * the setdelay call, currently refs is not bumped until the call.
876 resv = RB_LOOKUP(hammer_res_rb_tree, &hmp->rb_resv_root, base_offset);
878 resv = kmalloc(sizeof(*resv), hmp->m_misc,
879 M_WAITOK | M_ZERO | M_USE_RESERVE);
881 resv->zone_offset = base_offset;
883 resv->append_off = HAMMER_LARGEBLOCK_SIZE;
885 if (layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE)
886 resv->flags |= HAMMER_RESF_LAYER2FREE;
887 if (RB_INSERT(hammer_res_rb_tree, &hmp->rb_resv_root, resv)) {
888 kfree(resv, hmp->m_misc);
891 ++hammer_count_reservations;
893 if (layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE)
894 resv->flags |= HAMMER_RESF_LAYER2FREE;
896 hammer_reserve_setdelay(hmp, resv);
900 * Enter the reservation on the on-delay list, or move it if it
901 * is already on the list.
904 hammer_reserve_setdelay(hammer_mount_t hmp, hammer_reserve_t resv)
906 if (resv->flags & HAMMER_RESF_ONDELAY) {
907 TAILQ_REMOVE(&hmp->delay_list, resv, delay_entry);
908 resv->flush_group = hmp->flusher.next + 1;
909 TAILQ_INSERT_TAIL(&hmp->delay_list, resv, delay_entry);
912 ++hmp->rsv_fromdelay;
913 resv->flags |= HAMMER_RESF_ONDELAY;
914 resv->flush_group = hmp->flusher.next + 1;
915 TAILQ_INSERT_TAIL(&hmp->delay_list, resv, delay_entry);
920 * Reserve has reached its flush point, remove it from the delay list
921 * and finish it off. hammer_blockmap_reserve_complete() inherits
922 * the ondelay reference.
925 hammer_reserve_clrdelay(hammer_mount_t hmp, hammer_reserve_t resv)
927 KKASSERT(resv->flags & HAMMER_RESF_ONDELAY);
928 resv->flags &= ~HAMMER_RESF_ONDELAY;
929 TAILQ_REMOVE(&hmp->delay_list, resv, delay_entry);
930 --hmp->rsv_fromdelay;
931 hammer_blockmap_reserve_complete(hmp, resv);
935 * Backend function - free (offset, bytes) in a zone.
940 hammer_blockmap_free(hammer_transaction_t trans,
941 hammer_off_t zone_offset, int bytes)
944 hammer_volume_t root_volume;
945 hammer_blockmap_t freemap;
946 struct hammer_blockmap_layer1 *layer1;
947 struct hammer_blockmap_layer2 *layer2;
948 hammer_buffer_t buffer1 = NULL;
949 hammer_buffer_t buffer2 = NULL;
950 hammer_off_t layer1_offset;
951 hammer_off_t layer2_offset;
952 hammer_off_t base_off;
963 bytes = (bytes + 15) & ~15;
964 KKASSERT(bytes <= HAMMER_XBUFSIZE);
965 KKASSERT(((zone_offset ^ (zone_offset + (bytes - 1))) &
966 ~HAMMER_LARGEBLOCK_MASK64) == 0);
969 * Basic zone validation & locking
971 zone = HAMMER_ZONE_DECODE(zone_offset);
972 KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES);
973 root_volume = trans->rootvol;
976 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
981 layer1_offset = freemap->phys_offset +
982 HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset);
983 layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1);
986 KKASSERT(layer1->phys_offset &&
987 layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
988 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
989 hammer_lock_ex(&hmp->blkmap_lock);
990 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
991 panic("CRC FAILED: LAYER1");
992 hammer_unlock(&hmp->blkmap_lock);
996 * Dive layer 2, each entry represents a large-block.
998 layer2_offset = layer1->phys_offset +
999 HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset);
1000 layer2 = hammer_bread(hmp, layer2_offset, &error, &buffer2);
1003 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
1004 hammer_lock_ex(&hmp->blkmap_lock);
1005 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
1006 panic("CRC FAILED: LAYER2");
1007 hammer_unlock(&hmp->blkmap_lock);
1010 hammer_lock_ex(&hmp->blkmap_lock);
1012 hammer_modify_buffer(trans, buffer2, layer2, sizeof(*layer2));
1015 * Free space previously allocated via blockmap_alloc().
1017 * NOTE: bytes_free can be and remain negative due to de-dup ops
1018 * but can never become larger than HAMMER_LARGEBLOCK_SIZE.
1020 KKASSERT(layer2->zone == zone);
1021 layer2->bytes_free += bytes;
1022 KKASSERT(layer2->bytes_free <= HAMMER_LARGEBLOCK_SIZE);
1025 * If a big-block becomes entirely free we must create a covering
1026 * reservation to prevent premature reuse. Note, however, that
1027 * the big-block and/or reservation may still have an append_off
1028 * that allows further (non-reused) allocations.
1030 * Once the reservation has been made we re-check layer2 and if
1031 * the big-block is still entirely free we reset the layer2 entry.
1032 * The reservation will prevent premature reuse.
1034 * NOTE: hammer_buffer's are only invalidated when the reservation
1035 * is completed, if the layer2 entry is still completely free at
1036 * that time. Any allocations from the reservation that may have
1037 * occured in the mean time, or active references on the reservation
1038 * from new pending allocations, will prevent the invalidation from
1041 if (layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE) {
1042 base_off = (zone_offset & (~HAMMER_LARGEBLOCK_MASK64 & ~HAMMER_OFF_ZONE_MASK)) | HAMMER_ZONE_RAW_BUFFER;
1044 hammer_reserve_setdelay_offset(hmp, base_off, zone, layer2);
1045 if (layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE) {
1047 layer2->append_off = 0;
1048 hammer_modify_buffer(trans, buffer1,
1049 layer1, sizeof(*layer1));
1050 ++layer1->blocks_free;
1051 layer1->layer1_crc = crc32(layer1,
1052 HAMMER_LAYER1_CRCSIZE);
1053 hammer_modify_buffer_done(buffer1);
1054 hammer_modify_volume_field(trans,
1056 vol0_stat_freebigblocks);
1057 ++root_volume->ondisk->vol0_stat_freebigblocks;
1058 hmp->copy_stat_freebigblocks =
1059 root_volume->ondisk->vol0_stat_freebigblocks;
1060 hammer_modify_volume_done(trans->rootvol);
1063 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
1064 hammer_modify_buffer_done(buffer2);
1065 hammer_unlock(&hmp->blkmap_lock);
1069 hammer_rel_buffer(buffer1, 0);
1071 hammer_rel_buffer(buffer2, 0);
1075 hammer_blockmap_dedup(hammer_transaction_t trans,
1076 hammer_off_t zone_offset, int bytes)
1079 hammer_volume_t root_volume;
1080 hammer_blockmap_t freemap;
1081 struct hammer_blockmap_layer1 *layer1;
1082 struct hammer_blockmap_layer2 *layer2;
1083 hammer_buffer_t buffer1 = NULL;
1084 hammer_buffer_t buffer2 = NULL;
1085 hammer_off_t layer1_offset;
1086 hammer_off_t layer2_offset;
1098 bytes = (bytes + 15) & ~15;
1099 KKASSERT(bytes <= HAMMER_LARGEBLOCK_SIZE);
1100 KKASSERT(((zone_offset ^ (zone_offset + (bytes - 1))) &
1101 ~HAMMER_LARGEBLOCK_MASK64) == 0);
1104 * Basic zone validation & locking
1106 zone = HAMMER_ZONE_DECODE(zone_offset);
1107 KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES);
1108 root_volume = trans->rootvol;
1111 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
1116 layer1_offset = freemap->phys_offset +
1117 HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset);
1118 layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1);
1121 KKASSERT(layer1->phys_offset &&
1122 layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
1123 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
1124 hammer_lock_ex(&hmp->blkmap_lock);
1125 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
1126 panic("CRC FAILED: LAYER1");
1127 hammer_unlock(&hmp->blkmap_lock);
1131 * Dive layer 2, each entry represents a large-block.
1133 layer2_offset = layer1->phys_offset +
1134 HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset);
1135 layer2 = hammer_bread(hmp, layer2_offset, &error, &buffer2);
1138 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
1139 hammer_lock_ex(&hmp->blkmap_lock);
1140 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
1141 panic("CRC FAILED: LAYER2");
1142 hammer_unlock(&hmp->blkmap_lock);
1145 hammer_lock_ex(&hmp->blkmap_lock);
1147 hammer_modify_buffer(trans, buffer2, layer2, sizeof(*layer2));
1150 * Free space previously allocated via blockmap_alloc().
1152 * NOTE: bytes_free can be and remain negative due to de-dup ops
1153 * but can never become larger than HAMMER_LARGEBLOCK_SIZE.
1155 KKASSERT(layer2->zone == zone);
1156 temp = layer2->bytes_free - HAMMER_LARGEBLOCK_SIZE * 2;
1157 cpu_ccfence(); /* prevent gcc from optimizing temp out */
1158 if (temp > layer2->bytes_free) {
1162 layer2->bytes_free -= bytes;
1164 KKASSERT(layer2->bytes_free <= HAMMER_LARGEBLOCK_SIZE);
1166 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
1168 hammer_modify_buffer_done(buffer2);
1169 hammer_unlock(&hmp->blkmap_lock);
1173 hammer_rel_buffer(buffer1, 0);
1175 hammer_rel_buffer(buffer2, 0);
1180 * Backend function - finalize (offset, bytes) in a zone.
1182 * Allocate space that was previously reserved by the frontend.
1185 hammer_blockmap_finalize(hammer_transaction_t trans,
1186 hammer_reserve_t resv,
1187 hammer_off_t zone_offset, int bytes)
1190 hammer_volume_t root_volume;
1191 hammer_blockmap_t freemap;
1192 struct hammer_blockmap_layer1 *layer1;
1193 struct hammer_blockmap_layer2 *layer2;
1194 hammer_buffer_t buffer1 = NULL;
1195 hammer_buffer_t buffer2 = NULL;
1196 hammer_off_t layer1_offset;
1197 hammer_off_t layer2_offset;
1209 bytes = (bytes + 15) & ~15;
1210 KKASSERT(bytes <= HAMMER_XBUFSIZE);
1213 * Basic zone validation & locking
1215 zone = HAMMER_ZONE_DECODE(zone_offset);
1216 KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES);
1217 root_volume = trans->rootvol;
1220 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
1225 layer1_offset = freemap->phys_offset +
1226 HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset);
1227 layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1);
1230 KKASSERT(layer1->phys_offset &&
1231 layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
1232 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
1233 hammer_lock_ex(&hmp->blkmap_lock);
1234 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
1235 panic("CRC FAILED: LAYER1");
1236 hammer_unlock(&hmp->blkmap_lock);
1240 * Dive layer 2, each entry represents a large-block.
1242 layer2_offset = layer1->phys_offset +
1243 HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset);
1244 layer2 = hammer_bread(hmp, layer2_offset, &error, &buffer2);
1247 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
1248 hammer_lock_ex(&hmp->blkmap_lock);
1249 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
1250 panic("CRC FAILED: LAYER2");
1251 hammer_unlock(&hmp->blkmap_lock);
1254 hammer_lock_ex(&hmp->blkmap_lock);
1256 hammer_modify_buffer(trans, buffer2, layer2, sizeof(*layer2));
1259 * Finalize some or all of the space covered by a current
1260 * reservation. An allocation in the same layer may have
1261 * already assigned ownership.
1263 if (layer2->zone == 0) {
1264 hammer_modify_buffer(trans, buffer1,
1265 layer1, sizeof(*layer1));
1266 --layer1->blocks_free;
1267 layer1->layer1_crc = crc32(layer1,
1268 HAMMER_LAYER1_CRCSIZE);
1269 hammer_modify_buffer_done(buffer1);
1270 layer2->zone = zone;
1271 KKASSERT(layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE);
1272 KKASSERT(layer2->append_off == 0);
1273 hammer_modify_volume_field(trans,
1275 vol0_stat_freebigblocks);
1276 --root_volume->ondisk->vol0_stat_freebigblocks;
1277 hmp->copy_stat_freebigblocks =
1278 root_volume->ondisk->vol0_stat_freebigblocks;
1279 hammer_modify_volume_done(trans->rootvol);
1281 if (layer2->zone != zone)
1282 kprintf("layer2 zone mismatch %d %d\n", layer2->zone, zone);
1283 KKASSERT(layer2->zone == zone);
1284 KKASSERT(bytes != 0);
1285 layer2->bytes_free -= bytes;
1288 resv->flags &= ~HAMMER_RESF_LAYER2FREE;
1292 * Finalizations can occur out of order, or combined with allocations.
1293 * append_off must be set to the highest allocated offset.
1295 offset = ((int)zone_offset & HAMMER_LARGEBLOCK_MASK) + bytes;
1296 if (layer2->append_off < offset)
1297 layer2->append_off = offset;
1299 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
1300 hammer_modify_buffer_done(buffer2);
1301 hammer_unlock(&hmp->blkmap_lock);
1305 hammer_rel_buffer(buffer1, 0);
1307 hammer_rel_buffer(buffer2, 0);
1312 * Return the approximate number of free bytes in the big-block
1313 * containing the specified blockmap offset.
1315 * WARNING: A negative number can be returned if data de-dup exists,
1316 * and the result will also not represent he actual number
1317 * of free bytes in this case.
1319 * This code is used only by the reblocker.
1322 hammer_blockmap_getfree(hammer_mount_t hmp, hammer_off_t zone_offset,
1323 int *curp, int *errorp)
1325 hammer_volume_t root_volume;
1326 hammer_blockmap_t blockmap;
1327 hammer_blockmap_t freemap;
1328 struct hammer_blockmap_layer1 *layer1;
1329 struct hammer_blockmap_layer2 *layer2;
1330 hammer_buffer_t buffer = NULL;
1331 hammer_off_t layer1_offset;
1332 hammer_off_t layer2_offset;
1336 zone = HAMMER_ZONE_DECODE(zone_offset);
1337 KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES);
1338 root_volume = hammer_get_root_volume(hmp, errorp);
1343 blockmap = &hmp->blockmap[zone];
1344 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
1349 layer1_offset = freemap->phys_offset +
1350 HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset);
1351 layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer);
1356 KKASSERT(layer1->phys_offset);
1357 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
1358 hammer_lock_ex(&hmp->blkmap_lock);
1359 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
1360 panic("CRC FAILED: LAYER1");
1361 hammer_unlock(&hmp->blkmap_lock);
1365 * Dive layer 2, each entry represents a large-block.
1367 * (reuse buffer, layer1 pointer becomes invalid)
1369 layer2_offset = layer1->phys_offset +
1370 HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset);
1371 layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer);
1376 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
1377 hammer_lock_ex(&hmp->blkmap_lock);
1378 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
1379 panic("CRC FAILED: LAYER2");
1380 hammer_unlock(&hmp->blkmap_lock);
1382 KKASSERT(layer2->zone == zone);
1384 bytes = layer2->bytes_free;
1386 if ((blockmap->next_offset ^ zone_offset) & ~HAMMER_LARGEBLOCK_MASK64)
1392 hammer_rel_buffer(buffer, 0);
1393 hammer_rel_volume(root_volume, 0);
1394 if (hammer_debug_general & 0x0800) {
1395 kprintf("hammer_blockmap_getfree: %016llx -> %d\n",
1396 (long long)zone_offset, bytes);
1403 * Lookup a blockmap offset.
1406 hammer_blockmap_lookup(hammer_mount_t hmp, hammer_off_t zone_offset,
1409 hammer_volume_t root_volume;
1410 hammer_blockmap_t freemap;
1411 struct hammer_blockmap_layer1 *layer1;
1412 struct hammer_blockmap_layer2 *layer2;
1413 hammer_buffer_t buffer = NULL;
1414 hammer_off_t layer1_offset;
1415 hammer_off_t layer2_offset;
1416 hammer_off_t result_offset;
1417 hammer_off_t base_off;
1418 hammer_reserve_t resv;
1422 * Calculate the zone-2 offset.
1424 zone = HAMMER_ZONE_DECODE(zone_offset);
1425 KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES);
1427 result_offset = (zone_offset & ~HAMMER_OFF_ZONE_MASK) |
1428 HAMMER_ZONE_RAW_BUFFER;
1431 * We can actually stop here, normal blockmaps are now direct-mapped
1432 * onto the freemap and so represent zone-2 addresses.
1434 if (hammer_verify_zone == 0) {
1436 return(result_offset);
1440 * Validate the allocation zone
1442 root_volume = hammer_get_root_volume(hmp, errorp);
1445 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
1446 KKASSERT(freemap->phys_offset != 0);
1451 layer1_offset = freemap->phys_offset +
1452 HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset);
1453 layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer);
1456 KKASSERT(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
1457 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
1458 hammer_lock_ex(&hmp->blkmap_lock);
1459 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
1460 panic("CRC FAILED: LAYER1");
1461 hammer_unlock(&hmp->blkmap_lock);
1465 * Dive layer 2, each entry represents a large-block.
1467 layer2_offset = layer1->phys_offset +
1468 HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset);
1469 layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer);
1473 if (layer2->zone == 0) {
1474 base_off = (zone_offset & (~HAMMER_LARGEBLOCK_MASK64 & ~HAMMER_OFF_ZONE_MASK)) | HAMMER_ZONE_RAW_BUFFER;
1475 resv = RB_LOOKUP(hammer_res_rb_tree, &hmp->rb_resv_root,
1477 KKASSERT(resv && resv->zone == zone);
1479 } else if (layer2->zone != zone) {
1480 panic("hammer_blockmap_lookup: bad zone %d/%d",
1481 layer2->zone, zone);
1483 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
1484 hammer_lock_ex(&hmp->blkmap_lock);
1485 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
1486 panic("CRC FAILED: LAYER2");
1487 hammer_unlock(&hmp->blkmap_lock);
1492 hammer_rel_buffer(buffer, 0);
1493 hammer_rel_volume(root_volume, 0);
1494 if (hammer_debug_general & 0x0800) {
1495 kprintf("hammer_blockmap_lookup: %016llx -> %016llx\n",
1496 (long long)zone_offset, (long long)result_offset);
1498 return(result_offset);
1503 * Check space availability
1505 * MPSAFE - does not require fs_token
1508 _hammer_checkspace(hammer_mount_t hmp, int slop, int64_t *resp)
1510 const int in_size = sizeof(struct hammer_inode_data) +
1511 sizeof(union hammer_btree_elm);
1512 const int rec_size = (sizeof(union hammer_btree_elm) * 2);
1515 usedbytes = hmp->rsv_inodes * in_size +
1516 hmp->rsv_recs * rec_size +
1517 hmp->rsv_databytes +
1518 ((int64_t)hmp->rsv_fromdelay << HAMMER_LARGEBLOCK_BITS) +
1519 ((int64_t)hidirtybufspace << 2) +
1520 (slop << HAMMER_LARGEBLOCK_BITS);
1522 hammer_count_extra_space_used = usedbytes; /* debugging */
1526 if (hmp->copy_stat_freebigblocks >=
1527 (usedbytes >> HAMMER_LARGEBLOCK_BITS)) {