2 * Copyright (c) 2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 #include <vm/vm_page2.h>
42 static int hammer_res_rb_compare(hammer_reserve_t res1, hammer_reserve_t res2);
43 static void hammer_reserve_setdelay_offset(hammer_mount_t hmp,
44 hammer_off_t base_offset, int zone,
45 hammer_blockmap_layer2_t layer2);
46 static void hammer_reserve_setdelay(hammer_mount_t hmp, hammer_reserve_t resv);
47 static int update_bytes_free(hammer_reserve_t resv, int bytes);
48 static int hammer_check_volume(hammer_mount_t, hammer_off_t*);
49 static void hammer_skip_volume(hammer_off_t *offsetp);
52 * Reserved big-blocks red-black tree support
54 RB_GENERATE2(hammer_res_rb_tree, hammer_reserve, rb_node,
55 hammer_res_rb_compare, hammer_off_t, zone_offset);
58 hammer_res_rb_compare(hammer_reserve_t res1, hammer_reserve_t res2)
60 if (res1->zone_offset < res2->zone_offset)
62 if (res1->zone_offset > res2->zone_offset)
68 * Allocate bytes from a zone
71 hammer_blockmap_alloc(hammer_transaction_t trans, int zone, int bytes,
72 hammer_off_t hint, int *errorp)
75 hammer_volume_t root_volume;
76 hammer_blockmap_t blockmap;
77 hammer_blockmap_t freemap;
78 hammer_reserve_t resv;
79 hammer_blockmap_layer1_t layer1;
80 hammer_blockmap_layer2_t layer2;
81 hammer_buffer_t buffer1 = NULL;
82 hammer_buffer_t buffer2 = NULL;
83 hammer_buffer_t buffer3 = NULL;
84 hammer_off_t tmp_offset;
85 hammer_off_t next_offset;
86 hammer_off_t result_offset;
87 hammer_off_t layer1_offset;
88 hammer_off_t layer2_offset;
89 hammer_off_t base_off;
91 int offset; /* offset within big-block */
97 * Deal with alignment and buffer-boundary issues.
99 * Be careful, certain primary alignments are used below to allocate
100 * new blockmap blocks.
102 bytes = HAMMER_DATA_DOALIGN(bytes);
103 KKASSERT(bytes > 0 && bytes <= HAMMER_XBUFSIZE);
104 KKASSERT(hammer_is_zone2_mapped_index(zone));
109 root_volume = trans->rootvol;
111 blockmap = &hmp->blockmap[zone];
112 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
113 KKASSERT(HAMMER_ZONE_DECODE(blockmap->next_offset) == zone);
116 * Use the hint if we have one.
118 if (hint && HAMMER_ZONE_DECODE(hint) == zone) {
119 next_offset = HAMMER_DATA_DOALIGN_WITH(hammer_off_t, hint);
122 next_offset = blockmap->next_offset;
128 * use_hint is turned off if we leave the hinted big-block.
130 if (use_hint && ((next_offset ^ hint) & ~HAMMER_HINTBLOCK_MASK64)) {
131 next_offset = blockmap->next_offset;
138 if (next_offset == HAMMER_ZONE_ENCODE(zone + 1, 0)) {
140 hmkprintf(hmp, "No space left for zone %d "
141 "allocation\n", zone);
146 next_offset = HAMMER_ZONE_ENCODE(zone, 0);
150 * The allocation request may not cross a buffer boundary. Special
151 * large allocations must not cross a big-block boundary.
153 tmp_offset = next_offset + bytes - 1;
154 if (bytes <= HAMMER_BUFSIZE) {
155 if ((next_offset ^ tmp_offset) & ~HAMMER_BUFMASK64) {
156 next_offset = tmp_offset & ~HAMMER_BUFMASK64;
160 if ((next_offset ^ tmp_offset) & ~HAMMER_BIGBLOCK_MASK64) {
161 next_offset = tmp_offset & ~HAMMER_BIGBLOCK_MASK64;
165 offset = (int)next_offset & HAMMER_BIGBLOCK_MASK;
170 layer1_offset = freemap->phys_offset +
171 HAMMER_BLOCKMAP_LAYER1_OFFSET(next_offset);
173 layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer1);
182 if (!hammer_crc_test_layer1(layer1)) {
183 hammer_lock_ex(&hmp->blkmap_lock);
184 if (!hammer_crc_test_layer1(layer1))
185 hpanic("CRC FAILED: LAYER1");
186 hammer_unlock(&hmp->blkmap_lock);
190 * If we are at a big-block boundary and layer1 indicates no
191 * free big-blocks, then we cannot allocate a new big-block in
192 * layer2, skip to the next layer1 entry.
194 if (offset == 0 && layer1->blocks_free == 0) {
195 next_offset = HAMMER_ZONE_LAYER1_NEXT_OFFSET(next_offset);
196 if (hammer_check_volume(hmp, &next_offset)) {
202 KKASSERT(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
205 * Skip the whole volume if it is pointing to a layer2 big-block
206 * on a volume that we are currently trying to remove from the
207 * file-system. This is used by the volume-del code together with
208 * the reblocker to free up a volume.
210 if (HAMMER_VOL_DECODE(layer1->phys_offset) == hmp->volume_to_remove) {
211 hammer_skip_volume(&next_offset);
216 * Dive layer 2, each entry represents a big-block.
218 layer2_offset = layer1->phys_offset +
219 HAMMER_BLOCKMAP_LAYER2_OFFSET(next_offset);
220 layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer2);
227 * Check CRC. This can race another thread holding the lock
228 * and in the middle of modifying layer2.
230 if (!hammer_crc_test_layer2(layer2)) {
231 hammer_lock_ex(&hmp->blkmap_lock);
232 if (!hammer_crc_test_layer2(layer2))
233 hpanic("CRC FAILED: LAYER2");
234 hammer_unlock(&hmp->blkmap_lock);
238 * Skip the layer if the zone is owned by someone other then us.
240 if (layer2->zone && layer2->zone != zone) {
241 next_offset += (HAMMER_BIGBLOCK_SIZE - offset);
244 if (offset < layer2->append_off) {
245 next_offset += layer2->append_off - offset;
251 * If operating in the current non-hint blockmap block, do not
252 * allow it to get over-full. Also drop any active hinting so
253 * blockmap->next_offset is updated at the end.
255 * We do this for B-Tree and meta-data allocations to provide
256 * localization for updates.
258 if ((zone == HAMMER_ZONE_BTREE_INDEX ||
259 zone == HAMMER_ZONE_META_INDEX) &&
260 offset >= HAMMER_BIGBLOCK_OVERFILL &&
261 !((next_offset ^ blockmap->next_offset) & ~HAMMER_BIGBLOCK_MASK64)) {
262 if (offset >= HAMMER_BIGBLOCK_OVERFILL) {
263 next_offset += (HAMMER_BIGBLOCK_SIZE - offset);
271 * We need the lock from this point on. We have to re-check zone
272 * ownership after acquiring the lock and also check for reservations.
274 hammer_lock_ex(&hmp->blkmap_lock);
276 if (layer2->zone && layer2->zone != zone) {
277 hammer_unlock(&hmp->blkmap_lock);
278 next_offset += (HAMMER_BIGBLOCK_SIZE - offset);
281 if (offset < layer2->append_off) {
282 hammer_unlock(&hmp->blkmap_lock);
283 next_offset += layer2->append_off - offset;
288 * The big-block might be reserved by another zone. If it is reserved
289 * by our zone we may have to move next_offset past the append_off.
291 base_off = hammer_xlate_to_zone2(next_offset & ~HAMMER_BIGBLOCK_MASK64);
292 resv = RB_LOOKUP(hammer_res_rb_tree, &hmp->rb_resv_root, base_off);
294 if (resv->zone != zone) {
295 hammer_unlock(&hmp->blkmap_lock);
296 next_offset = (next_offset + HAMMER_BIGBLOCK_SIZE) &
297 ~HAMMER_BIGBLOCK_MASK64;
300 if (offset < resv->append_off) {
301 hammer_unlock(&hmp->blkmap_lock);
302 next_offset += resv->append_off - offset;
309 * Ok, we can allocate out of this layer2 big-block. Assume ownership
310 * of the layer for real. At this point we've validated any
311 * reservation that might exist and can just ignore resv.
313 if (layer2->zone == 0) {
315 * Assign the big-block to our zone
317 hammer_modify_buffer(trans, buffer1, layer1, sizeof(*layer1));
318 --layer1->blocks_free;
319 hammer_crc_set_layer1(layer1);
320 hammer_modify_buffer_done(buffer1);
321 hammer_modify_buffer(trans, buffer2, layer2, sizeof(*layer2));
323 KKASSERT(layer2->bytes_free == HAMMER_BIGBLOCK_SIZE);
324 KKASSERT(layer2->append_off == 0);
325 hammer_modify_volume_field(trans, trans->rootvol,
326 vol0_stat_freebigblocks);
327 --root_volume->ondisk->vol0_stat_freebigblocks;
328 hmp->copy_stat_freebigblocks =
329 root_volume->ondisk->vol0_stat_freebigblocks;
330 hammer_modify_volume_done(trans->rootvol);
332 hammer_modify_buffer(trans, buffer2, layer2, sizeof(*layer2));
334 KKASSERT(layer2->zone == zone);
337 * NOTE: bytes_free can legally go negative due to de-dup.
339 layer2->bytes_free -= bytes;
340 KKASSERT(layer2->append_off <= offset);
341 layer2->append_off = offset + bytes;
342 hammer_crc_set_layer2(layer2);
343 hammer_modify_buffer_done(buffer2);
346 * We hold the blockmap lock and should be the only ones
347 * capable of modifying resv->append_off. Track the allocation
350 KKASSERT(bytes != 0);
352 KKASSERT(resv->append_off <= offset);
353 resv->append_off = offset + bytes;
354 resv->flags &= ~HAMMER_RESF_LAYER2FREE;
355 hammer_blockmap_reserve_complete(hmp, resv);
359 * If we are allocating from the base of a new buffer we can avoid
360 * a disk read by calling hammer_bnew_ext().
362 if ((next_offset & HAMMER_BUFMASK) == 0) {
363 hammer_bnew_ext(trans->hmp, next_offset, bytes,
370 result_offset = next_offset;
373 * If we weren't supplied with a hint or could not use the hint
374 * then we wound up using blockmap->next_offset as the hint and
378 hammer_modify_volume_noundo(NULL, root_volume);
379 blockmap->next_offset = next_offset + bytes;
380 hammer_modify_volume_done(root_volume);
382 hammer_unlock(&hmp->blkmap_lock);
389 hammer_rel_buffer(buffer1, 0);
391 hammer_rel_buffer(buffer2, 0);
393 hammer_rel_buffer(buffer3, 0);
395 return(result_offset);
399 * Frontend function - Reserve bytes in a zone.
401 * This code reserves bytes out of a blockmap without committing to any
402 * meta-data modifications, allowing the front-end to directly issue disk
403 * write I/O for big-blocks of data
405 * The backend later finalizes the reservation with hammer_blockmap_finalize()
406 * upon committing the related record.
409 hammer_blockmap_reserve(hammer_mount_t hmp, int zone, int bytes,
410 hammer_off_t *zone_offp, int *errorp)
412 hammer_volume_t root_volume;
413 hammer_blockmap_t blockmap;
414 hammer_blockmap_t freemap;
415 hammer_blockmap_layer1_t layer1;
416 hammer_blockmap_layer2_t layer2;
417 hammer_buffer_t buffer1 = NULL;
418 hammer_buffer_t buffer2 = NULL;
419 hammer_buffer_t buffer3 = NULL;
420 hammer_off_t tmp_offset;
421 hammer_off_t next_offset;
422 hammer_off_t layer1_offset;
423 hammer_off_t layer2_offset;
424 hammer_off_t base_off;
425 hammer_reserve_t resv;
426 hammer_reserve_t resx = NULL;
433 KKASSERT(hammer_is_zone2_mapped_index(zone));
434 root_volume = hammer_get_root_volume(hmp, errorp);
437 blockmap = &hmp->blockmap[zone];
438 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
439 KKASSERT(HAMMER_ZONE_DECODE(blockmap->next_offset) == zone);
442 * Deal with alignment and buffer-boundary issues.
444 * Be careful, certain primary alignments are used below to allocate
445 * new blockmap blocks.
447 bytes = HAMMER_DATA_DOALIGN(bytes);
448 KKASSERT(bytes > 0 && bytes <= HAMMER_XBUFSIZE);
450 next_offset = blockmap->next_offset;
456 if (next_offset == HAMMER_ZONE_ENCODE(zone + 1, 0)) {
458 hmkprintf(hmp, "No space left for zone %d "
459 "reservation\n", zone);
463 next_offset = HAMMER_ZONE_ENCODE(zone, 0);
467 * The allocation request may not cross a buffer boundary. Special
468 * large allocations must not cross a big-block boundary.
470 tmp_offset = next_offset + bytes - 1;
471 if (bytes <= HAMMER_BUFSIZE) {
472 if ((next_offset ^ tmp_offset) & ~HAMMER_BUFMASK64) {
473 next_offset = tmp_offset & ~HAMMER_BUFMASK64;
477 if ((next_offset ^ tmp_offset) & ~HAMMER_BIGBLOCK_MASK64) {
478 next_offset = tmp_offset & ~HAMMER_BIGBLOCK_MASK64;
482 offset = (int)next_offset & HAMMER_BIGBLOCK_MASK;
487 layer1_offset = freemap->phys_offset +
488 HAMMER_BLOCKMAP_LAYER1_OFFSET(next_offset);
489 layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer1);
496 if (!hammer_crc_test_layer1(layer1)) {
497 hammer_lock_ex(&hmp->blkmap_lock);
498 if (!hammer_crc_test_layer1(layer1))
499 hpanic("CRC FAILED: LAYER1");
500 hammer_unlock(&hmp->blkmap_lock);
504 * If we are at a big-block boundary and layer1 indicates no
505 * free big-blocks, then we cannot allocate a new big-block in
506 * layer2, skip to the next layer1 entry.
508 if ((next_offset & HAMMER_BIGBLOCK_MASK) == 0 &&
509 layer1->blocks_free == 0) {
510 next_offset = HAMMER_ZONE_LAYER1_NEXT_OFFSET(next_offset);
511 if (hammer_check_volume(hmp, &next_offset))
515 KKASSERT(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
518 * Dive layer 2, each entry represents a big-block.
520 layer2_offset = layer1->phys_offset +
521 HAMMER_BLOCKMAP_LAYER2_OFFSET(next_offset);
522 layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer2);
527 * Check CRC if not allocating into uninitialized space (which we
528 * aren't when reserving space).
530 if (!hammer_crc_test_layer2(layer2)) {
531 hammer_lock_ex(&hmp->blkmap_lock);
532 if (!hammer_crc_test_layer2(layer2))
533 hpanic("CRC FAILED: LAYER2");
534 hammer_unlock(&hmp->blkmap_lock);
538 * Skip the layer if the zone is owned by someone other then us.
540 if (layer2->zone && layer2->zone != zone) {
541 next_offset += (HAMMER_BIGBLOCK_SIZE - offset);
544 if (offset < layer2->append_off) {
545 next_offset += layer2->append_off - offset;
550 * We need the lock from this point on. We have to re-check zone
551 * ownership after acquiring the lock and also check for reservations.
553 hammer_lock_ex(&hmp->blkmap_lock);
555 if (layer2->zone && layer2->zone != zone) {
556 hammer_unlock(&hmp->blkmap_lock);
557 next_offset += (HAMMER_BIGBLOCK_SIZE - offset);
560 if (offset < layer2->append_off) {
561 hammer_unlock(&hmp->blkmap_lock);
562 next_offset += layer2->append_off - offset;
567 * The big-block might be reserved by another zone. If it is reserved
568 * by our zone we may have to move next_offset past the append_off.
570 base_off = hammer_xlate_to_zone2(next_offset & ~HAMMER_BIGBLOCK_MASK64);
571 resv = RB_LOOKUP(hammer_res_rb_tree, &hmp->rb_resv_root, base_off);
573 if (resv->zone != zone) {
574 hammer_unlock(&hmp->blkmap_lock);
575 next_offset = (next_offset + HAMMER_BIGBLOCK_SIZE) &
576 ~HAMMER_BIGBLOCK_MASK64;
579 if (offset < resv->append_off) {
580 hammer_unlock(&hmp->blkmap_lock);
581 next_offset += resv->append_off - offset;
586 resx = kmalloc(sizeof(*resv), hmp->m_misc,
587 M_WAITOK | M_ZERO | M_USE_RESERVE);
590 resx->zone_offset = base_off;
591 if (layer2->bytes_free == HAMMER_BIGBLOCK_SIZE)
592 resx->flags |= HAMMER_RESF_LAYER2FREE;
593 resv = RB_INSERT(hammer_res_rb_tree, &hmp->rb_resv_root, resx);
594 KKASSERT(resv == NULL);
596 ++hammer_count_reservations;
598 resv->append_off = offset + bytes;
601 * If we are not reserving a whole buffer but are at the start of
602 * a new block, call hammer_bnew() to avoid a disk read.
604 * If we are reserving a whole buffer (or more), the caller will
605 * probably use a direct read, so do nothing.
607 * If we do not have a whole lot of system memory we really can't
608 * afford to block while holding the blkmap_lock!
610 if (bytes < HAMMER_BUFSIZE && (next_offset & HAMMER_BUFMASK) == 0) {
611 if (!vm_page_count_min(HAMMER_BUFSIZE / PAGE_SIZE)) {
612 hammer_bnew(hmp, next_offset, errorp, &buffer3);
619 * Adjust our iterator and alloc_offset. The layer1 and layer2
620 * space beyond alloc_offset is uninitialized. alloc_offset must
621 * be big-block aligned.
623 blockmap->next_offset = next_offset + bytes;
624 hammer_unlock(&hmp->blkmap_lock);
628 hammer_rel_buffer(buffer1, 0);
630 hammer_rel_buffer(buffer2, 0);
632 hammer_rel_buffer(buffer3, 0);
633 hammer_rel_volume(root_volume, 0);
634 *zone_offp = next_offset;
640 * Frontend function - Dedup bytes in a zone.
642 * Dedup reservations work exactly the same as normal write reservations
643 * except we only adjust bytes_free field and don't touch append offset.
644 * Finalization mechanic for dedup reservations is also the same as for
645 * normal write ones - the backend finalizes the reservation with
646 * hammer_blockmap_finalize().
649 hammer_blockmap_reserve_dedup(hammer_mount_t hmp, int zone, int bytes,
650 hammer_off_t zone_offset, int *errorp)
652 hammer_volume_t root_volume;
653 hammer_blockmap_t freemap;
654 hammer_blockmap_layer1_t layer1;
655 hammer_blockmap_layer2_t layer2;
656 hammer_buffer_t buffer1 = NULL;
657 hammer_buffer_t buffer2 = NULL;
658 hammer_off_t layer1_offset;
659 hammer_off_t layer2_offset;
660 hammer_off_t base_off;
661 hammer_reserve_t resv = NULL;
662 hammer_reserve_t resx = NULL;
667 KKASSERT(hammer_is_zone2_mapped_index(zone));
668 root_volume = hammer_get_root_volume(hmp, errorp);
671 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
672 KKASSERT(freemap->phys_offset != 0);
674 bytes = HAMMER_DATA_DOALIGN(bytes);
675 KKASSERT(bytes > 0 && bytes <= HAMMER_XBUFSIZE);
680 layer1_offset = freemap->phys_offset +
681 HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset);
682 layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer1);
689 if (!hammer_crc_test_layer1(layer1)) {
690 hammer_lock_ex(&hmp->blkmap_lock);
691 if (!hammer_crc_test_layer1(layer1))
692 hpanic("CRC FAILED: LAYER1");
693 hammer_unlock(&hmp->blkmap_lock);
695 KKASSERT(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
698 * Dive layer 2, each entry represents a big-block.
700 layer2_offset = layer1->phys_offset +
701 HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset);
702 layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer2);
709 if (!hammer_crc_test_layer2(layer2)) {
710 hammer_lock_ex(&hmp->blkmap_lock);
711 if (!hammer_crc_test_layer2(layer2))
712 hpanic("CRC FAILED: LAYER2");
713 hammer_unlock(&hmp->blkmap_lock);
717 * Fail if the zone is owned by someone other than us.
719 if (layer2->zone && layer2->zone != zone)
723 * We need the lock from this point on. We have to re-check zone
724 * ownership after acquiring the lock and also check for reservations.
726 hammer_lock_ex(&hmp->blkmap_lock);
728 if (layer2->zone && layer2->zone != zone) {
729 hammer_unlock(&hmp->blkmap_lock);
733 base_off = hammer_xlate_to_zone2(zone_offset & ~HAMMER_BIGBLOCK_MASK64);
734 resv = RB_LOOKUP(hammer_res_rb_tree, &hmp->rb_resv_root, base_off);
736 if (resv->zone != zone) {
737 hammer_unlock(&hmp->blkmap_lock);
742 * Due to possible big-block underflow we can't simply
743 * subtract bytes from bytes_free.
745 if (update_bytes_free(resv, bytes) == 0) {
746 hammer_unlock(&hmp->blkmap_lock);
752 resx = kmalloc(sizeof(*resv), hmp->m_misc,
753 M_WAITOK | M_ZERO | M_USE_RESERVE);
756 resx->bytes_free = layer2->bytes_free;
758 * Due to possible big-block underflow we can't simply
759 * subtract bytes from bytes_free.
761 if (update_bytes_free(resx, bytes) == 0) {
762 hammer_unlock(&hmp->blkmap_lock);
763 kfree(resx, hmp->m_misc);
766 resx->zone_offset = base_off;
767 resv = RB_INSERT(hammer_res_rb_tree, &hmp->rb_resv_root, resx);
768 KKASSERT(resv == NULL);
770 ++hammer_count_reservations;
773 hammer_unlock(&hmp->blkmap_lock);
777 hammer_rel_buffer(buffer1, 0);
779 hammer_rel_buffer(buffer2, 0);
780 hammer_rel_volume(root_volume, 0);
786 update_bytes_free(hammer_reserve_t resv, int bytes)
791 * Big-block underflow check
793 temp = resv->bytes_free - HAMMER_BIGBLOCK_SIZE * 2;
794 cpu_ccfence(); /* XXX do we really need it ? */
795 if (temp > resv->bytes_free) {
796 hdkprintf("BIGBLOCK UNDERFLOW\n");
800 resv->bytes_free -= bytes;
805 * Dereference a reservation structure. Upon the final release the
806 * underlying big-block is checked and if it is entirely free we delete
807 * any related HAMMER buffers to avoid potential conflicts with future
808 * reuse of the big-block.
811 hammer_blockmap_reserve_complete(hammer_mount_t hmp, hammer_reserve_t resv)
813 hammer_off_t base_offset;
816 KKASSERT(resv->refs > 0);
817 KKASSERT(hammer_is_zone_raw_buffer(resv->zone_offset));
820 * Setting append_off to the max prevents any new allocations
821 * from occuring while we are trying to dispose of the reservation,
822 * allowing us to safely delete any related HAMMER buffers.
824 * If we are unable to clean out all related HAMMER buffers we
827 if (resv->refs == 1 && (resv->flags & HAMMER_RESF_LAYER2FREE)) {
828 resv->append_off = HAMMER_BIGBLOCK_SIZE;
829 base_offset = hammer_xlate_to_zoneX(resv->zone, resv->zone_offset);
830 if (!TAILQ_EMPTY(&hmp->dedup_lru_list))
831 hammer_dedup_cache_inval(hmp, base_offset);
832 error = hammer_del_buffers(hmp, base_offset,
834 HAMMER_BIGBLOCK_SIZE,
836 if (hammer_debug_general & 0x20000) {
837 hkprintf("delbgblk %016jx error %d\n",
838 (intmax_t)base_offset, error);
841 hammer_reserve_setdelay(hmp, resv);
843 if (--resv->refs == 0) {
844 if (hammer_debug_general & 0x20000) {
845 hkprintf("delresvr %016jx zone %02x\n",
846 (intmax_t)resv->zone_offset, resv->zone);
848 KKASSERT((resv->flags & HAMMER_RESF_ONDELAY) == 0);
849 RB_REMOVE(hammer_res_rb_tree, &hmp->rb_resv_root, resv);
850 kfree(resv, hmp->m_misc);
851 --hammer_count_reservations;
856 * Prevent a potentially free big-block from being reused until after
857 * the related flushes have completely cycled, otherwise crash recovery
858 * could resurrect a data block that was already reused and overwritten.
860 * The caller might reset the underlying layer2 entry's append_off to 0, so
861 * our covering append_off must be set to max to prevent any reallocation
862 * until after the flush delays complete, not to mention proper invalidation
863 * of any underlying cached blocks.
866 hammer_reserve_setdelay_offset(hammer_mount_t hmp, hammer_off_t base_offset,
867 int zone, hammer_blockmap_layer2_t layer2)
869 hammer_reserve_t resv;
872 * Allocate the reservation if necessary.
874 * NOTE: need lock in future around resv lookup/allocation and
875 * the setdelay call, currently refs is not bumped until the call.
878 resv = RB_LOOKUP(hammer_res_rb_tree, &hmp->rb_resv_root, base_offset);
880 resv = kmalloc(sizeof(*resv), hmp->m_misc,
881 M_WAITOK | M_ZERO | M_USE_RESERVE);
883 resv->zone_offset = base_offset;
885 resv->append_off = HAMMER_BIGBLOCK_SIZE;
887 if (layer2->bytes_free == HAMMER_BIGBLOCK_SIZE)
888 resv->flags |= HAMMER_RESF_LAYER2FREE;
889 if (RB_INSERT(hammer_res_rb_tree, &hmp->rb_resv_root, resv)) {
890 kfree(resv, hmp->m_misc);
893 ++hammer_count_reservations;
895 if (layer2->bytes_free == HAMMER_BIGBLOCK_SIZE)
896 resv->flags |= HAMMER_RESF_LAYER2FREE;
898 hammer_reserve_setdelay(hmp, resv);
902 * Enter the reservation on the on-delay list, or move it if it
903 * is already on the list.
906 hammer_reserve_setdelay(hammer_mount_t hmp, hammer_reserve_t resv)
908 if (resv->flags & HAMMER_RESF_ONDELAY) {
909 TAILQ_REMOVE(&hmp->delay_list, resv, delay_entry);
910 resv->flg_no = hmp->flusher.next + 1;
911 TAILQ_INSERT_TAIL(&hmp->delay_list, resv, delay_entry);
914 ++hmp->rsv_fromdelay;
915 resv->flags |= HAMMER_RESF_ONDELAY;
916 resv->flg_no = hmp->flusher.next + 1;
917 TAILQ_INSERT_TAIL(&hmp->delay_list, resv, delay_entry);
922 * Reserve has reached its flush point, remove it from the delay list
923 * and finish it off. hammer_blockmap_reserve_complete() inherits
924 * the ondelay reference.
927 hammer_reserve_clrdelay(hammer_mount_t hmp, hammer_reserve_t resv)
929 KKASSERT(resv->flags & HAMMER_RESF_ONDELAY);
930 resv->flags &= ~HAMMER_RESF_ONDELAY;
931 TAILQ_REMOVE(&hmp->delay_list, resv, delay_entry);
932 --hmp->rsv_fromdelay;
933 hammer_blockmap_reserve_complete(hmp, resv);
937 * Backend function - free (offset, bytes) in a zone.
942 hammer_blockmap_free(hammer_transaction_t trans,
943 hammer_off_t zone_offset, int bytes)
946 hammer_volume_t root_volume;
947 hammer_blockmap_t freemap;
948 hammer_blockmap_layer1_t layer1;
949 hammer_blockmap_layer2_t layer2;
950 hammer_buffer_t buffer1 = NULL;
951 hammer_buffer_t buffer2 = NULL;
952 hammer_off_t layer1_offset;
953 hammer_off_t layer2_offset;
954 hammer_off_t base_off;
965 bytes = HAMMER_DATA_DOALIGN(bytes);
966 KKASSERT(bytes <= HAMMER_XBUFSIZE);
967 KKASSERT(((zone_offset ^ (zone_offset + (bytes - 1))) &
968 ~HAMMER_BIGBLOCK_MASK64) == 0);
971 * Basic zone validation & locking
973 zone = HAMMER_ZONE_DECODE(zone_offset);
974 KKASSERT(hammer_is_zone2_mapped_index(zone));
975 root_volume = trans->rootvol;
978 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
983 layer1_offset = freemap->phys_offset +
984 HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset);
985 layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1);
988 KKASSERT(layer1->phys_offset &&
989 layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
990 if (!hammer_crc_test_layer1(layer1)) {
991 hammer_lock_ex(&hmp->blkmap_lock);
992 if (!hammer_crc_test_layer1(layer1))
993 hpanic("CRC FAILED: LAYER1");
994 hammer_unlock(&hmp->blkmap_lock);
998 * Dive layer 2, each entry represents a big-block.
1000 layer2_offset = layer1->phys_offset +
1001 HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset);
1002 layer2 = hammer_bread(hmp, layer2_offset, &error, &buffer2);
1005 if (!hammer_crc_test_layer2(layer2)) {
1006 hammer_lock_ex(&hmp->blkmap_lock);
1007 if (!hammer_crc_test_layer2(layer2))
1008 hpanic("CRC FAILED: LAYER2");
1009 hammer_unlock(&hmp->blkmap_lock);
1012 hammer_lock_ex(&hmp->blkmap_lock);
1014 hammer_modify_buffer(trans, buffer2, layer2, sizeof(*layer2));
1017 * Free space previously allocated via blockmap_alloc().
1019 * NOTE: bytes_free can be and remain negative due to de-dup ops
1020 * but can never become larger than HAMMER_BIGBLOCK_SIZE.
1022 KKASSERT(layer2->zone == zone);
1023 layer2->bytes_free += bytes;
1024 KKASSERT(layer2->bytes_free <= HAMMER_BIGBLOCK_SIZE);
1027 * If a big-block becomes entirely free we must create a covering
1028 * reservation to prevent premature reuse. Note, however, that
1029 * the big-block and/or reservation may still have an append_off
1030 * that allows further (non-reused) allocations.
1032 * Once the reservation has been made we re-check layer2 and if
1033 * the big-block is still entirely free we reset the layer2 entry.
1034 * The reservation will prevent premature reuse.
1036 * NOTE: hammer_buffer's are only invalidated when the reservation
1037 * is completed, if the layer2 entry is still completely free at
1038 * that time. Any allocations from the reservation that may have
1039 * occured in the mean time, or active references on the reservation
1040 * from new pending allocations, will prevent the invalidation from
1043 if (layer2->bytes_free == HAMMER_BIGBLOCK_SIZE) {
1044 base_off = hammer_xlate_to_zone2(zone_offset &
1045 ~HAMMER_BIGBLOCK_MASK64);
1047 hammer_reserve_setdelay_offset(hmp, base_off, zone, layer2);
1048 if (layer2->bytes_free == HAMMER_BIGBLOCK_SIZE) {
1050 layer2->append_off = 0;
1051 hammer_modify_buffer(trans, buffer1,
1052 layer1, sizeof(*layer1));
1053 ++layer1->blocks_free;
1054 hammer_crc_set_layer1(layer1);
1055 hammer_modify_buffer_done(buffer1);
1056 hammer_modify_volume_field(trans,
1058 vol0_stat_freebigblocks);
1059 ++root_volume->ondisk->vol0_stat_freebigblocks;
1060 hmp->copy_stat_freebigblocks =
1061 root_volume->ondisk->vol0_stat_freebigblocks;
1062 hammer_modify_volume_done(trans->rootvol);
1065 hammer_crc_set_layer2(layer2);
1066 hammer_modify_buffer_done(buffer2);
1067 hammer_unlock(&hmp->blkmap_lock);
1071 hammer_rel_buffer(buffer1, 0);
1073 hammer_rel_buffer(buffer2, 0);
1077 hammer_blockmap_dedup(hammer_transaction_t trans,
1078 hammer_off_t zone_offset, int bytes)
1081 hammer_blockmap_t freemap;
1082 hammer_blockmap_layer1_t layer1;
1083 hammer_blockmap_layer2_t layer2;
1084 hammer_buffer_t buffer1 = NULL;
1085 hammer_buffer_t buffer2 = NULL;
1086 hammer_off_t layer1_offset;
1087 hammer_off_t layer2_offset;
1090 int zone __debugvar;
1099 bytes = HAMMER_DATA_DOALIGN(bytes);
1100 KKASSERT(bytes <= HAMMER_BIGBLOCK_SIZE);
1101 KKASSERT(((zone_offset ^ (zone_offset + (bytes - 1))) &
1102 ~HAMMER_BIGBLOCK_MASK64) == 0);
1105 * Basic zone validation & locking
1107 zone = HAMMER_ZONE_DECODE(zone_offset);
1108 KKASSERT(hammer_is_zone2_mapped_index(zone));
1111 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
1116 layer1_offset = freemap->phys_offset +
1117 HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset);
1118 layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1);
1121 KKASSERT(layer1->phys_offset &&
1122 layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
1123 if (!hammer_crc_test_layer1(layer1)) {
1124 hammer_lock_ex(&hmp->blkmap_lock);
1125 if (!hammer_crc_test_layer1(layer1))
1126 hpanic("CRC FAILED: LAYER1");
1127 hammer_unlock(&hmp->blkmap_lock);
1131 * Dive layer 2, each entry represents a big-block.
1133 layer2_offset = layer1->phys_offset +
1134 HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset);
1135 layer2 = hammer_bread(hmp, layer2_offset, &error, &buffer2);
1138 if (!hammer_crc_test_layer2(layer2)) {
1139 hammer_lock_ex(&hmp->blkmap_lock);
1140 if (!hammer_crc_test_layer2(layer2))
1141 hpanic("CRC FAILED: LAYER2");
1142 hammer_unlock(&hmp->blkmap_lock);
1145 hammer_lock_ex(&hmp->blkmap_lock);
1147 hammer_modify_buffer(trans, buffer2, layer2, sizeof(*layer2));
1150 * Free space previously allocated via blockmap_alloc().
1152 * NOTE: bytes_free can be and remain negative due to de-dup ops
1153 * but can never become larger than HAMMER_BIGBLOCK_SIZE.
1155 KKASSERT(layer2->zone == zone);
1156 temp = layer2->bytes_free - HAMMER_BIGBLOCK_SIZE * 2;
1157 cpu_ccfence(); /* prevent gcc from optimizing temp out */
1158 if (temp > layer2->bytes_free) {
1162 layer2->bytes_free -= bytes;
1164 KKASSERT(layer2->bytes_free <= HAMMER_BIGBLOCK_SIZE);
1166 hammer_crc_set_layer2(layer2);
1168 hammer_modify_buffer_done(buffer2);
1169 hammer_unlock(&hmp->blkmap_lock);
1173 hammer_rel_buffer(buffer1, 0);
1175 hammer_rel_buffer(buffer2, 0);
1180 * Backend function - finalize (offset, bytes) in a zone.
1182 * Allocate space that was previously reserved by the frontend.
1185 hammer_blockmap_finalize(hammer_transaction_t trans,
1186 hammer_reserve_t resv,
1187 hammer_off_t zone_offset, int bytes)
1190 hammer_volume_t root_volume;
1191 hammer_blockmap_t freemap;
1192 hammer_blockmap_layer1_t layer1;
1193 hammer_blockmap_layer2_t layer2;
1194 hammer_buffer_t buffer1 = NULL;
1195 hammer_buffer_t buffer2 = NULL;
1196 hammer_off_t layer1_offset;
1197 hammer_off_t layer2_offset;
1209 bytes = HAMMER_DATA_DOALIGN(bytes);
1210 KKASSERT(bytes <= HAMMER_XBUFSIZE);
1213 * Basic zone validation & locking
1215 zone = HAMMER_ZONE_DECODE(zone_offset);
1216 KKASSERT(hammer_is_zone2_mapped_index(zone));
1217 root_volume = trans->rootvol;
1220 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
1225 layer1_offset = freemap->phys_offset +
1226 HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset);
1227 layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1);
1230 KKASSERT(layer1->phys_offset &&
1231 layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
1232 if (!hammer_crc_test_layer1(layer1)) {
1233 hammer_lock_ex(&hmp->blkmap_lock);
1234 if (!hammer_crc_test_layer1(layer1))
1235 hpanic("CRC FAILED: LAYER1");
1236 hammer_unlock(&hmp->blkmap_lock);
1240 * Dive layer 2, each entry represents a big-block.
1242 layer2_offset = layer1->phys_offset +
1243 HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset);
1244 layer2 = hammer_bread(hmp, layer2_offset, &error, &buffer2);
1247 if (!hammer_crc_test_layer2(layer2)) {
1248 hammer_lock_ex(&hmp->blkmap_lock);
1249 if (!hammer_crc_test_layer2(layer2))
1250 hpanic("CRC FAILED: LAYER2");
1251 hammer_unlock(&hmp->blkmap_lock);
1254 hammer_lock_ex(&hmp->blkmap_lock);
1256 hammer_modify_buffer(trans, buffer2, layer2, sizeof(*layer2));
1259 * Finalize some or all of the space covered by a current
1260 * reservation. An allocation in the same layer may have
1261 * already assigned ownership.
1263 if (layer2->zone == 0) {
1264 hammer_modify_buffer(trans, buffer1, layer1, sizeof(*layer1));
1265 --layer1->blocks_free;
1266 hammer_crc_set_layer1(layer1);
1267 hammer_modify_buffer_done(buffer1);
1268 layer2->zone = zone;
1269 KKASSERT(layer2->bytes_free == HAMMER_BIGBLOCK_SIZE);
1270 KKASSERT(layer2->append_off == 0);
1271 hammer_modify_volume_field(trans,
1273 vol0_stat_freebigblocks);
1274 --root_volume->ondisk->vol0_stat_freebigblocks;
1275 hmp->copy_stat_freebigblocks =
1276 root_volume->ondisk->vol0_stat_freebigblocks;
1277 hammer_modify_volume_done(trans->rootvol);
1279 if (layer2->zone != zone)
1280 hdkprintf("layer2 zone mismatch %d %d\n", layer2->zone, zone);
1281 KKASSERT(layer2->zone == zone);
1282 KKASSERT(bytes != 0);
1283 layer2->bytes_free -= bytes;
1286 resv->flags &= ~HAMMER_RESF_LAYER2FREE;
1290 * Finalizations can occur out of order, or combined with allocations.
1291 * append_off must be set to the highest allocated offset.
1293 offset = ((int)zone_offset & HAMMER_BIGBLOCK_MASK) + bytes;
1294 if (layer2->append_off < offset)
1295 layer2->append_off = offset;
1297 hammer_crc_set_layer2(layer2);
1298 hammer_modify_buffer_done(buffer2);
1299 hammer_unlock(&hmp->blkmap_lock);
1303 hammer_rel_buffer(buffer1, 0);
1305 hammer_rel_buffer(buffer2, 0);
1310 * Return the approximate number of free bytes in the big-block
1311 * containing the specified blockmap offset.
1313 * WARNING: A negative number can be returned if data de-dup exists,
1314 * and the result will also not represent he actual number
1315 * of free bytes in this case.
1317 * This code is used only by the reblocker.
1320 hammer_blockmap_getfree(hammer_mount_t hmp, hammer_off_t zone_offset,
1321 int *curp, int *errorp)
1323 hammer_volume_t root_volume;
1324 hammer_blockmap_t blockmap;
1325 hammer_blockmap_t freemap;
1326 hammer_blockmap_layer1_t layer1;
1327 hammer_blockmap_layer2_t layer2;
1328 hammer_buffer_t buffer = NULL;
1329 hammer_off_t layer1_offset;
1330 hammer_off_t layer2_offset;
1334 zone = HAMMER_ZONE_DECODE(zone_offset);
1335 KKASSERT(hammer_is_zone2_mapped_index(zone));
1336 root_volume = hammer_get_root_volume(hmp, errorp);
1341 blockmap = &hmp->blockmap[zone];
1342 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
1347 layer1_offset = freemap->phys_offset +
1348 HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset);
1349 layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer);
1355 KKASSERT(layer1->phys_offset);
1356 if (!hammer_crc_test_layer1(layer1)) {
1357 hammer_lock_ex(&hmp->blkmap_lock);
1358 if (!hammer_crc_test_layer1(layer1))
1359 hpanic("CRC FAILED: LAYER1");
1360 hammer_unlock(&hmp->blkmap_lock);
1364 * Dive layer 2, each entry represents a big-block.
1366 * (reuse buffer, layer1 pointer becomes invalid)
1368 layer2_offset = layer1->phys_offset +
1369 HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset);
1370 layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer);
1376 if (!hammer_crc_test_layer2(layer2)) {
1377 hammer_lock_ex(&hmp->blkmap_lock);
1378 if (!hammer_crc_test_layer2(layer2))
1379 hpanic("CRC FAILED: LAYER2");
1380 hammer_unlock(&hmp->blkmap_lock);
1382 KKASSERT(layer2->zone == zone);
1384 bytes = layer2->bytes_free;
1387 * *curp becomes 1 only when no error and,
1388 * next_offset and zone_offset are in the same big-block.
1390 if ((blockmap->next_offset ^ zone_offset) & ~HAMMER_BIGBLOCK_MASK64)
1391 *curp = 0; /* not same */
1396 hammer_rel_buffer(buffer, 0);
1397 hammer_rel_volume(root_volume, 0);
1398 if (hammer_debug_general & 0x4000) {
1399 hdkprintf("%016jx -> %d\n", (intmax_t)zone_offset, bytes);
1406 * Lookup a blockmap offset and verify blockmap layers.
1409 hammer_blockmap_lookup_verify(hammer_mount_t hmp, hammer_off_t zone_offset,
1412 hammer_volume_t root_volume;
1413 hammer_blockmap_t freemap;
1414 hammer_blockmap_layer1_t layer1;
1415 hammer_blockmap_layer2_t layer2;
1416 hammer_buffer_t buffer = NULL;
1417 hammer_off_t layer1_offset;
1418 hammer_off_t layer2_offset;
1419 hammer_off_t result_offset;
1420 hammer_off_t base_off;
1421 hammer_reserve_t resv __debugvar;
1425 * Calculate the zone-2 offset.
1427 zone = HAMMER_ZONE_DECODE(zone_offset);
1428 result_offset = hammer_xlate_to_zone2(zone_offset);
1431 * Validate the allocation zone
1433 root_volume = hammer_get_root_volume(hmp, errorp);
1436 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
1437 KKASSERT(freemap->phys_offset != 0);
1442 layer1_offset = freemap->phys_offset +
1443 HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset);
1444 layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer);
1447 KKASSERT(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
1448 if (!hammer_crc_test_layer1(layer1)) {
1449 hammer_lock_ex(&hmp->blkmap_lock);
1450 if (!hammer_crc_test_layer1(layer1))
1451 hpanic("CRC FAILED: LAYER1");
1452 hammer_unlock(&hmp->blkmap_lock);
1456 * Dive layer 2, each entry represents a big-block.
1458 layer2_offset = layer1->phys_offset +
1459 HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset);
1460 layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer);
1464 if (layer2->zone == 0) {
1465 base_off = hammer_xlate_to_zone2(zone_offset &
1466 ~HAMMER_BIGBLOCK_MASK64);
1467 resv = RB_LOOKUP(hammer_res_rb_tree, &hmp->rb_resv_root,
1469 KKASSERT(resv && resv->zone == zone);
1471 } else if (layer2->zone != zone) {
1472 hpanic("bad zone %d/%d", layer2->zone, zone);
1474 if (!hammer_crc_test_layer2(layer2)) {
1475 hammer_lock_ex(&hmp->blkmap_lock);
1476 if (!hammer_crc_test_layer2(layer2))
1477 hpanic("CRC FAILED: LAYER2");
1478 hammer_unlock(&hmp->blkmap_lock);
1483 hammer_rel_buffer(buffer, 0);
1484 hammer_rel_volume(root_volume, 0);
1485 if (hammer_debug_general & 0x0800) {
1486 hdkprintf("%016jx -> %016jx\n",
1487 (intmax_t)zone_offset, (intmax_t)result_offset);
1489 return(result_offset);
1494 * Check space availability
1496 * MPSAFE - does not require fs_token
1499 _hammer_checkspace(hammer_mount_t hmp, int slop, int64_t *resp)
1501 const int in_size = sizeof(struct hammer_inode_data) +
1502 sizeof(union hammer_btree_elm);
1503 const int rec_size = (sizeof(union hammer_btree_elm) * 2);
1506 usedbytes = hmp->rsv_inodes * in_size +
1507 hmp->rsv_recs * rec_size +
1508 hmp->rsv_databytes +
1509 ((int64_t)hmp->rsv_fromdelay << HAMMER_BIGBLOCK_BITS) +
1510 ((int64_t)hammer_limit_dirtybufspace) +
1511 (slop << HAMMER_BIGBLOCK_BITS);
1516 if (hmp->copy_stat_freebigblocks >=
1517 (usedbytes >> HAMMER_BIGBLOCK_BITS)) {
1525 hammer_check_volume(hammer_mount_t hmp, hammer_off_t *offsetp)
1527 hammer_blockmap_t freemap;
1528 hammer_blockmap_layer1_t layer1;
1529 hammer_buffer_t buffer1 = NULL;
1530 hammer_off_t layer1_offset;
1533 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
1535 layer1_offset = freemap->phys_offset +
1536 HAMMER_BLOCKMAP_LAYER1_OFFSET(*offsetp);
1537 layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1);
1542 * No more physically available space in layer1s
1543 * of the current volume, go to the next volume.
1545 if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL)
1546 hammer_skip_volume(offsetp);
1549 hammer_rel_buffer(buffer1, 0);
1554 hammer_skip_volume(hammer_off_t *offsetp)
1556 hammer_off_t offset;
1560 zone = HAMMER_ZONE_DECODE(offset);
1561 vol_no = HAMMER_VOL_DECODE(offset) + 1;
1562 KKASSERT(vol_no <= HAMMER_MAX_VOLUMES);
1564 if (vol_no == HAMMER_MAX_VOLUMES) { /* wrap */
1569 *offsetp = HAMMER_ENCODE(zone, vol_no, 0);