2 * Copyright (c) 2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 #include <vm/vm_page2.h>
42 static int hammer_res_rb_compare(hammer_reserve_t res1, hammer_reserve_t res2);
43 static void hammer_reserve_setdelay_offset(hammer_mount_t hmp,
44 hammer_off_t base_offset, int zone,
45 hammer_blockmap_layer2_t layer2);
46 static void hammer_reserve_setdelay(hammer_mount_t hmp, hammer_reserve_t resv);
47 static int update_bytes_free(hammer_reserve_t resv, int bytes);
48 static int hammer_check_volume(hammer_mount_t, hammer_off_t*);
49 static void hammer_skip_volume(hammer_off_t *offsetp);
52 * Reserved big-blocks red-black tree support
54 RB_GENERATE2(hammer_res_rb_tree, hammer_reserve, rb_node,
55 hammer_res_rb_compare, hammer_off_t, zone_offset);
58 hammer_res_rb_compare(hammer_reserve_t res1, hammer_reserve_t res2)
60 if (res1->zone_offset < res2->zone_offset)
62 if (res1->zone_offset > res2->zone_offset)
68 * Allocate bytes from a zone
71 hammer_blockmap_alloc(hammer_transaction_t trans, int zone, int bytes,
72 hammer_off_t hint, int *errorp)
75 hammer_volume_t root_volume;
76 hammer_blockmap_t blockmap;
77 hammer_blockmap_t freemap;
78 hammer_reserve_t resv;
79 hammer_blockmap_layer1_t layer1;
80 hammer_blockmap_layer2_t layer2;
81 hammer_buffer_t buffer1 = NULL;
82 hammer_buffer_t buffer2 = NULL;
83 hammer_buffer_t buffer3 = NULL;
84 hammer_off_t tmp_offset;
85 hammer_off_t next_offset;
86 hammer_off_t result_offset;
87 hammer_off_t layer1_offset;
88 hammer_off_t layer2_offset;
89 hammer_off_t base_off;
91 int offset; /* offset within big-block */
97 * Deal with alignment and buffer-boundary issues.
99 * Be careful, certain primary alignments are used below to allocate
100 * new blockmap blocks.
102 bytes = (bytes + 15) & ~15;
103 KKASSERT(bytes > 0 && bytes <= HAMMER_XBUFSIZE);
104 KKASSERT(hammer_is_zone2_mapped_index(zone));
109 root_volume = trans->rootvol;
111 blockmap = &hmp->blockmap[zone];
112 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
113 KKASSERT(HAMMER_ZONE_DECODE(blockmap->next_offset) == zone);
116 * Use the hint if we have one.
118 if (hint && HAMMER_ZONE_DECODE(hint) == zone) {
119 next_offset = (hint + 15) & ~(hammer_off_t)15;
122 next_offset = blockmap->next_offset;
128 * use_hint is turned off if we leave the hinted big-block.
130 if (use_hint && ((next_offset ^ hint) & ~HAMMER_HINTBLOCK_MASK64)) {
131 next_offset = blockmap->next_offset;
138 if (next_offset == HAMMER_ZONE_ENCODE(zone + 1, 0)) {
140 hmkprintf(hmp, "No space left for zone %d "
141 "allocation\n", zone);
146 next_offset = HAMMER_ZONE_ENCODE(zone, 0);
150 * The allocation request may not cross a buffer boundary. Special
151 * large allocations must not cross a big-block boundary.
153 tmp_offset = next_offset + bytes - 1;
154 if (bytes <= HAMMER_BUFSIZE) {
155 if ((next_offset ^ tmp_offset) & ~HAMMER_BUFMASK64) {
156 next_offset = tmp_offset & ~HAMMER_BUFMASK64;
160 if ((next_offset ^ tmp_offset) & ~HAMMER_BIGBLOCK_MASK64) {
161 next_offset = tmp_offset & ~HAMMER_BIGBLOCK_MASK64;
165 offset = (int)next_offset & HAMMER_BIGBLOCK_MASK;
170 layer1_offset = freemap->phys_offset +
171 HAMMER_BLOCKMAP_LAYER1_OFFSET(next_offset);
173 layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer1);
182 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
183 hammer_lock_ex(&hmp->blkmap_lock);
184 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
185 hpanic("CRC FAILED: LAYER1");
186 hammer_unlock(&hmp->blkmap_lock);
190 * If we are at a big-block boundary and layer1 indicates no
191 * free big-blocks, then we cannot allocate a new big-block in
192 * layer2, skip to the next layer1 entry.
194 if (offset == 0 && layer1->blocks_free == 0) {
195 next_offset = (next_offset + HAMMER_BLOCKMAP_LAYER2) &
196 ~HAMMER_BLOCKMAP_LAYER2_MASK;
197 if (hammer_check_volume(hmp, &next_offset)) {
203 KKASSERT(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
206 * Skip the whole volume if it is pointing to a layer2 big-block
207 * on a volume that we are currently trying to remove from the
208 * file-system. This is used by the volume-del code together with
209 * the reblocker to free up a volume.
211 if (HAMMER_VOL_DECODE(layer1->phys_offset) == hmp->volume_to_remove) {
212 hammer_skip_volume(&next_offset);
217 * Dive layer 2, each entry represents a big-block.
219 layer2_offset = layer1->phys_offset +
220 HAMMER_BLOCKMAP_LAYER2_OFFSET(next_offset);
221 layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer2);
228 * Check CRC. This can race another thread holding the lock
229 * and in the middle of modifying layer2.
231 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
232 hammer_lock_ex(&hmp->blkmap_lock);
233 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
234 hpanic("CRC FAILED: LAYER2");
235 hammer_unlock(&hmp->blkmap_lock);
239 * Skip the layer if the zone is owned by someone other then us.
241 if (layer2->zone && layer2->zone != zone) {
242 next_offset += (HAMMER_BIGBLOCK_SIZE - offset);
245 if (offset < layer2->append_off) {
246 next_offset += layer2->append_off - offset;
252 * If operating in the current non-hint blockmap block, do not
253 * allow it to get over-full. Also drop any active hinting so
254 * blockmap->next_offset is updated at the end.
256 * We do this for B-Tree and meta-data allocations to provide
257 * localization for updates.
259 if ((zone == HAMMER_ZONE_BTREE_INDEX ||
260 zone == HAMMER_ZONE_META_INDEX) &&
261 offset >= HAMMER_BIGBLOCK_OVERFILL &&
262 !((next_offset ^ blockmap->next_offset) & ~HAMMER_BIGBLOCK_MASK64)) {
263 if (offset >= HAMMER_BIGBLOCK_OVERFILL) {
264 next_offset += (HAMMER_BIGBLOCK_SIZE - offset);
272 * We need the lock from this point on. We have to re-check zone
273 * ownership after acquiring the lock and also check for reservations.
275 hammer_lock_ex(&hmp->blkmap_lock);
277 if (layer2->zone && layer2->zone != zone) {
278 hammer_unlock(&hmp->blkmap_lock);
279 next_offset += (HAMMER_BIGBLOCK_SIZE - offset);
282 if (offset < layer2->append_off) {
283 hammer_unlock(&hmp->blkmap_lock);
284 next_offset += layer2->append_off - offset;
289 * The big-block might be reserved by another zone. If it is reserved
290 * by our zone we may have to move next_offset past the append_off.
292 base_off = hammer_xlate_to_zone2(next_offset &
293 ~HAMMER_BIGBLOCK_MASK64);
294 resv = RB_LOOKUP(hammer_res_rb_tree, &hmp->rb_resv_root, base_off);
296 if (resv->zone != zone) {
297 hammer_unlock(&hmp->blkmap_lock);
298 next_offset = (next_offset + HAMMER_BIGBLOCK_SIZE) &
299 ~HAMMER_BIGBLOCK_MASK64;
302 if (offset < resv->append_off) {
303 hammer_unlock(&hmp->blkmap_lock);
304 next_offset += resv->append_off - offset;
311 * Ok, we can allocate out of this layer2 big-block. Assume ownership
312 * of the layer for real. At this point we've validated any
313 * reservation that might exist and can just ignore resv.
315 if (layer2->zone == 0) {
317 * Assign the big-block to our zone
319 hammer_modify_buffer(trans, buffer1, layer1, sizeof(*layer1));
320 --layer1->blocks_free;
321 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
322 hammer_modify_buffer_done(buffer1);
323 hammer_modify_buffer(trans, buffer2, layer2, sizeof(*layer2));
325 KKASSERT(layer2->bytes_free == HAMMER_BIGBLOCK_SIZE);
326 KKASSERT(layer2->append_off == 0);
327 hammer_modify_volume_field(trans, trans->rootvol,
328 vol0_stat_freebigblocks);
329 --root_volume->ondisk->vol0_stat_freebigblocks;
330 hmp->copy_stat_freebigblocks =
331 root_volume->ondisk->vol0_stat_freebigblocks;
332 hammer_modify_volume_done(trans->rootvol);
334 hammer_modify_buffer(trans, buffer2, layer2, sizeof(*layer2));
336 KKASSERT(layer2->zone == zone);
339 * NOTE: bytes_free can legally go negative due to de-dup.
341 layer2->bytes_free -= bytes;
342 KKASSERT(layer2->append_off <= offset);
343 layer2->append_off = offset + bytes;
344 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
345 hammer_modify_buffer_done(buffer2);
348 * We hold the blockmap lock and should be the only ones
349 * capable of modifying resv->append_off. Track the allocation
352 KKASSERT(bytes != 0);
354 KKASSERT(resv->append_off <= offset);
355 resv->append_off = offset + bytes;
356 resv->flags &= ~HAMMER_RESF_LAYER2FREE;
357 hammer_blockmap_reserve_complete(hmp, resv);
361 * If we are allocating from the base of a new buffer we can avoid
362 * a disk read by calling hammer_bnew_ext().
364 if ((next_offset & HAMMER_BUFMASK) == 0) {
365 hammer_bnew_ext(trans->hmp, next_offset, bytes,
372 result_offset = next_offset;
375 * If we weren't supplied with a hint or could not use the hint
376 * then we wound up using blockmap->next_offset as the hint and
380 hammer_modify_volume_noundo(NULL, root_volume);
381 blockmap->next_offset = next_offset + bytes;
382 hammer_modify_volume_done(root_volume);
384 hammer_unlock(&hmp->blkmap_lock);
391 hammer_rel_buffer(buffer1, 0);
393 hammer_rel_buffer(buffer2, 0);
395 hammer_rel_buffer(buffer3, 0);
397 return(result_offset);
401 * Frontend function - Reserve bytes in a zone.
403 * This code reserves bytes out of a blockmap without committing to any
404 * meta-data modifications, allowing the front-end to directly issue disk
405 * write I/O for big-blocks of data
407 * The backend later finalizes the reservation with hammer_blockmap_finalize()
408 * upon committing the related record.
411 hammer_blockmap_reserve(hammer_mount_t hmp, int zone, int bytes,
412 hammer_off_t *zone_offp, int *errorp)
414 hammer_volume_t root_volume;
415 hammer_blockmap_t blockmap;
416 hammer_blockmap_t freemap;
417 hammer_blockmap_layer1_t layer1;
418 hammer_blockmap_layer2_t layer2;
419 hammer_buffer_t buffer1 = NULL;
420 hammer_buffer_t buffer2 = NULL;
421 hammer_buffer_t buffer3 = NULL;
422 hammer_off_t tmp_offset;
423 hammer_off_t next_offset;
424 hammer_off_t layer1_offset;
425 hammer_off_t layer2_offset;
426 hammer_off_t base_off;
427 hammer_reserve_t resv;
428 hammer_reserve_t resx = NULL;
435 KKASSERT(hammer_is_zone2_mapped_index(zone));
436 root_volume = hammer_get_root_volume(hmp, errorp);
439 blockmap = &hmp->blockmap[zone];
440 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
441 KKASSERT(HAMMER_ZONE_DECODE(blockmap->next_offset) == zone);
444 * Deal with alignment and buffer-boundary issues.
446 * Be careful, certain primary alignments are used below to allocate
447 * new blockmap blocks.
449 bytes = (bytes + 15) & ~15;
450 KKASSERT(bytes > 0 && bytes <= HAMMER_XBUFSIZE);
452 next_offset = blockmap->next_offset;
458 if (next_offset == HAMMER_ZONE_ENCODE(zone + 1, 0)) {
460 hmkprintf(hmp, "No space left for zone %d "
461 "reservation\n", zone);
465 next_offset = HAMMER_ZONE_ENCODE(zone, 0);
469 * The allocation request may not cross a buffer boundary. Special
470 * large allocations must not cross a big-block boundary.
472 tmp_offset = next_offset + bytes - 1;
473 if (bytes <= HAMMER_BUFSIZE) {
474 if ((next_offset ^ tmp_offset) & ~HAMMER_BUFMASK64) {
475 next_offset = tmp_offset & ~HAMMER_BUFMASK64;
479 if ((next_offset ^ tmp_offset) & ~HAMMER_BIGBLOCK_MASK64) {
480 next_offset = tmp_offset & ~HAMMER_BIGBLOCK_MASK64;
484 offset = (int)next_offset & HAMMER_BIGBLOCK_MASK;
489 layer1_offset = freemap->phys_offset +
490 HAMMER_BLOCKMAP_LAYER1_OFFSET(next_offset);
491 layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer1);
498 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
499 hammer_lock_ex(&hmp->blkmap_lock);
500 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
501 hpanic("CRC FAILED: LAYER1");
502 hammer_unlock(&hmp->blkmap_lock);
506 * If we are at a big-block boundary and layer1 indicates no
507 * free big-blocks, then we cannot allocate a new big-block in
508 * layer2, skip to the next layer1 entry.
510 if ((next_offset & HAMMER_BIGBLOCK_MASK) == 0 &&
511 layer1->blocks_free == 0) {
512 next_offset = (next_offset + HAMMER_BLOCKMAP_LAYER2) &
513 ~HAMMER_BLOCKMAP_LAYER2_MASK;
514 if (hammer_check_volume(hmp, &next_offset))
518 KKASSERT(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
521 * Dive layer 2, each entry represents a big-block.
523 layer2_offset = layer1->phys_offset +
524 HAMMER_BLOCKMAP_LAYER2_OFFSET(next_offset);
525 layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer2);
530 * Check CRC if not allocating into uninitialized space (which we
531 * aren't when reserving space).
533 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
534 hammer_lock_ex(&hmp->blkmap_lock);
535 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
536 hpanic("CRC FAILED: LAYER2");
537 hammer_unlock(&hmp->blkmap_lock);
541 * Skip the layer if the zone is owned by someone other then us.
543 if (layer2->zone && layer2->zone != zone) {
544 next_offset += (HAMMER_BIGBLOCK_SIZE - offset);
547 if (offset < layer2->append_off) {
548 next_offset += layer2->append_off - offset;
553 * We need the lock from this point on. We have to re-check zone
554 * ownership after acquiring the lock and also check for reservations.
556 hammer_lock_ex(&hmp->blkmap_lock);
558 if (layer2->zone && layer2->zone != zone) {
559 hammer_unlock(&hmp->blkmap_lock);
560 next_offset += (HAMMER_BIGBLOCK_SIZE - offset);
563 if (offset < layer2->append_off) {
564 hammer_unlock(&hmp->blkmap_lock);
565 next_offset += layer2->append_off - offset;
570 * The big-block might be reserved by another zone. If it is reserved
571 * by our zone we may have to move next_offset past the append_off.
573 base_off = hammer_xlate_to_zone2(next_offset &
574 ~HAMMER_BIGBLOCK_MASK64);
575 resv = RB_LOOKUP(hammer_res_rb_tree, &hmp->rb_resv_root, base_off);
577 if (resv->zone != zone) {
578 hammer_unlock(&hmp->blkmap_lock);
579 next_offset = (next_offset + HAMMER_BIGBLOCK_SIZE) &
580 ~HAMMER_BIGBLOCK_MASK64;
583 if (offset < resv->append_off) {
584 hammer_unlock(&hmp->blkmap_lock);
585 next_offset += resv->append_off - offset;
590 resx = kmalloc(sizeof(*resv), hmp->m_misc,
591 M_WAITOK | M_ZERO | M_USE_RESERVE);
594 resx->zone_offset = base_off;
595 if (layer2->bytes_free == HAMMER_BIGBLOCK_SIZE)
596 resx->flags |= HAMMER_RESF_LAYER2FREE;
597 resv = RB_INSERT(hammer_res_rb_tree, &hmp->rb_resv_root, resx);
598 KKASSERT(resv == NULL);
600 ++hammer_count_reservations;
602 resv->append_off = offset + bytes;
605 * If we are not reserving a whole buffer but are at the start of
606 * a new block, call hammer_bnew() to avoid a disk read.
608 * If we are reserving a whole buffer (or more), the caller will
609 * probably use a direct read, so do nothing.
611 * If we do not have a whole lot of system memory we really can't
612 * afford to block while holding the blkmap_lock!
614 if (bytes < HAMMER_BUFSIZE && (next_offset & HAMMER_BUFMASK) == 0) {
615 if (!vm_page_count_min(HAMMER_BUFSIZE / PAGE_SIZE)) {
616 hammer_bnew(hmp, next_offset, errorp, &buffer3);
623 * Adjust our iterator and alloc_offset. The layer1 and layer2
624 * space beyond alloc_offset is uninitialized. alloc_offset must
625 * be big-block aligned.
627 blockmap->next_offset = next_offset + bytes;
628 hammer_unlock(&hmp->blkmap_lock);
632 hammer_rel_buffer(buffer1, 0);
634 hammer_rel_buffer(buffer2, 0);
636 hammer_rel_buffer(buffer3, 0);
637 hammer_rel_volume(root_volume, 0);
638 *zone_offp = next_offset;
644 * Frontend function - Dedup bytes in a zone.
646 * Dedup reservations work exactly the same as normal write reservations
647 * except we only adjust bytes_free field and don't touch append offset.
648 * Finalization mechanic for dedup reservations is also the same as for
649 * normal write ones - the backend finalizes the reservation with
650 * hammer_blockmap_finalize().
653 hammer_blockmap_reserve_dedup(hammer_mount_t hmp, int zone, int bytes,
654 hammer_off_t zone_offset, int *errorp)
656 hammer_volume_t root_volume;
657 hammer_blockmap_t freemap;
658 hammer_blockmap_layer1_t layer1;
659 hammer_blockmap_layer2_t layer2;
660 hammer_buffer_t buffer1 = NULL;
661 hammer_buffer_t buffer2 = NULL;
662 hammer_off_t layer1_offset;
663 hammer_off_t layer2_offset;
664 hammer_off_t base_off;
665 hammer_reserve_t resv = NULL;
666 hammer_reserve_t resx = NULL;
671 KKASSERT(hammer_is_zone2_mapped_index(zone));
672 root_volume = hammer_get_root_volume(hmp, errorp);
675 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
676 KKASSERT(freemap->phys_offset != 0);
678 bytes = (bytes + 15) & ~15;
679 KKASSERT(bytes > 0 && bytes <= HAMMER_XBUFSIZE);
684 layer1_offset = freemap->phys_offset +
685 HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset);
686 layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer1);
693 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
694 hammer_lock_ex(&hmp->blkmap_lock);
695 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
696 hpanic("CRC FAILED: LAYER1");
697 hammer_unlock(&hmp->blkmap_lock);
699 KKASSERT(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
702 * Dive layer 2, each entry represents a big-block.
704 layer2_offset = layer1->phys_offset +
705 HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset);
706 layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer2);
713 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
714 hammer_lock_ex(&hmp->blkmap_lock);
715 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
716 hpanic("CRC FAILED: LAYER2");
717 hammer_unlock(&hmp->blkmap_lock);
721 * Fail if the zone is owned by someone other than us.
723 if (layer2->zone && layer2->zone != zone)
727 * We need the lock from this point on. We have to re-check zone
728 * ownership after acquiring the lock and also check for reservations.
730 hammer_lock_ex(&hmp->blkmap_lock);
732 if (layer2->zone && layer2->zone != zone) {
733 hammer_unlock(&hmp->blkmap_lock);
737 base_off = hammer_xlate_to_zone2(zone_offset &
738 ~HAMMER_BIGBLOCK_MASK64);
739 resv = RB_LOOKUP(hammer_res_rb_tree, &hmp->rb_resv_root, base_off);
741 if (resv->zone != zone) {
742 hammer_unlock(&hmp->blkmap_lock);
747 * Due to possible big-block underflow we can't simply
748 * subtract bytes from bytes_free.
750 if (update_bytes_free(resv, bytes) == 0) {
751 hammer_unlock(&hmp->blkmap_lock);
757 resx = kmalloc(sizeof(*resv), hmp->m_misc,
758 M_WAITOK | M_ZERO | M_USE_RESERVE);
761 resx->bytes_free = layer2->bytes_free;
763 * Due to possible big-block underflow we can't simply
764 * subtract bytes from bytes_free.
766 if (update_bytes_free(resx, bytes) == 0) {
767 hammer_unlock(&hmp->blkmap_lock);
768 kfree(resx, hmp->m_misc);
771 resx->zone_offset = base_off;
772 resv = RB_INSERT(hammer_res_rb_tree, &hmp->rb_resv_root, resx);
773 KKASSERT(resv == NULL);
775 ++hammer_count_reservations;
778 hammer_unlock(&hmp->blkmap_lock);
782 hammer_rel_buffer(buffer1, 0);
784 hammer_rel_buffer(buffer2, 0);
785 hammer_rel_volume(root_volume, 0);
791 update_bytes_free(hammer_reserve_t resv, int bytes)
796 * Big-block underflow check
798 temp = resv->bytes_free - HAMMER_BIGBLOCK_SIZE * 2;
799 cpu_ccfence(); /* XXX do we really need it ? */
800 if (temp > resv->bytes_free) {
801 hdkprintf("BIGBLOCK UNDERFLOW\n");
805 resv->bytes_free -= bytes;
810 * Dereference a reservation structure. Upon the final release the
811 * underlying big-block is checked and if it is entirely free we delete
812 * any related HAMMER buffers to avoid potential conflicts with future
813 * reuse of the big-block.
816 hammer_blockmap_reserve_complete(hammer_mount_t hmp, hammer_reserve_t resv)
818 hammer_off_t base_offset;
821 KKASSERT(resv->refs > 0);
822 KKASSERT(hammer_is_zone_raw_buffer(resv->zone_offset));
825 * Setting append_off to the max prevents any new allocations
826 * from occuring while we are trying to dispose of the reservation,
827 * allowing us to safely delete any related HAMMER buffers.
829 * If we are unable to clean out all related HAMMER buffers we
832 if (resv->refs == 1 && (resv->flags & HAMMER_RESF_LAYER2FREE)) {
833 resv->append_off = HAMMER_BIGBLOCK_SIZE;
834 base_offset = hammer_xlate_to_zoneX(resv->zone, resv->zone_offset);
835 if (!TAILQ_EMPTY(&hmp->dedup_lru_list))
836 hammer_dedup_cache_inval(hmp, base_offset);
837 error = hammer_del_buffers(hmp, base_offset,
839 HAMMER_BIGBLOCK_SIZE,
841 if (hammer_debug_general & 0x20000) {
842 hkprintf("delbgblk %016jx error %d\n",
843 (intmax_t)base_offset, error);
846 hammer_reserve_setdelay(hmp, resv);
848 if (--resv->refs == 0) {
849 if (hammer_debug_general & 0x20000) {
850 hkprintf("delresvr %016jx zone %02x\n",
851 (intmax_t)resv->zone_offset, resv->zone);
853 KKASSERT((resv->flags & HAMMER_RESF_ONDELAY) == 0);
854 RB_REMOVE(hammer_res_rb_tree, &hmp->rb_resv_root, resv);
855 kfree(resv, hmp->m_misc);
856 --hammer_count_reservations;
861 * Prevent a potentially free big-block from being reused until after
862 * the related flushes have completely cycled, otherwise crash recovery
863 * could resurrect a data block that was already reused and overwritten.
865 * The caller might reset the underlying layer2 entry's append_off to 0, so
866 * our covering append_off must be set to max to prevent any reallocation
867 * until after the flush delays complete, not to mention proper invalidation
868 * of any underlying cached blocks.
871 hammer_reserve_setdelay_offset(hammer_mount_t hmp, hammer_off_t base_offset,
872 int zone, hammer_blockmap_layer2_t layer2)
874 hammer_reserve_t resv;
877 * Allocate the reservation if necessary.
879 * NOTE: need lock in future around resv lookup/allocation and
880 * the setdelay call, currently refs is not bumped until the call.
883 resv = RB_LOOKUP(hammer_res_rb_tree, &hmp->rb_resv_root, base_offset);
885 resv = kmalloc(sizeof(*resv), hmp->m_misc,
886 M_WAITOK | M_ZERO | M_USE_RESERVE);
888 resv->zone_offset = base_offset;
890 resv->append_off = HAMMER_BIGBLOCK_SIZE;
892 if (layer2->bytes_free == HAMMER_BIGBLOCK_SIZE)
893 resv->flags |= HAMMER_RESF_LAYER2FREE;
894 if (RB_INSERT(hammer_res_rb_tree, &hmp->rb_resv_root, resv)) {
895 kfree(resv, hmp->m_misc);
898 ++hammer_count_reservations;
900 if (layer2->bytes_free == HAMMER_BIGBLOCK_SIZE)
901 resv->flags |= HAMMER_RESF_LAYER2FREE;
903 hammer_reserve_setdelay(hmp, resv);
907 * Enter the reservation on the on-delay list, or move it if it
908 * is already on the list.
911 hammer_reserve_setdelay(hammer_mount_t hmp, hammer_reserve_t resv)
913 if (resv->flags & HAMMER_RESF_ONDELAY) {
914 TAILQ_REMOVE(&hmp->delay_list, resv, delay_entry);
915 resv->flg_no = hmp->flusher.next + 1;
916 TAILQ_INSERT_TAIL(&hmp->delay_list, resv, delay_entry);
919 ++hmp->rsv_fromdelay;
920 resv->flags |= HAMMER_RESF_ONDELAY;
921 resv->flg_no = hmp->flusher.next + 1;
922 TAILQ_INSERT_TAIL(&hmp->delay_list, resv, delay_entry);
927 * Reserve has reached its flush point, remove it from the delay list
928 * and finish it off. hammer_blockmap_reserve_complete() inherits
929 * the ondelay reference.
932 hammer_reserve_clrdelay(hammer_mount_t hmp, hammer_reserve_t resv)
934 KKASSERT(resv->flags & HAMMER_RESF_ONDELAY);
935 resv->flags &= ~HAMMER_RESF_ONDELAY;
936 TAILQ_REMOVE(&hmp->delay_list, resv, delay_entry);
937 --hmp->rsv_fromdelay;
938 hammer_blockmap_reserve_complete(hmp, resv);
942 * Backend function - free (offset, bytes) in a zone.
947 hammer_blockmap_free(hammer_transaction_t trans,
948 hammer_off_t zone_offset, int bytes)
951 hammer_volume_t root_volume;
952 hammer_blockmap_t freemap;
953 hammer_blockmap_layer1_t layer1;
954 hammer_blockmap_layer2_t layer2;
955 hammer_buffer_t buffer1 = NULL;
956 hammer_buffer_t buffer2 = NULL;
957 hammer_off_t layer1_offset;
958 hammer_off_t layer2_offset;
959 hammer_off_t base_off;
970 bytes = (bytes + 15) & ~15;
971 KKASSERT(bytes <= HAMMER_XBUFSIZE);
972 KKASSERT(((zone_offset ^ (zone_offset + (bytes - 1))) &
973 ~HAMMER_BIGBLOCK_MASK64) == 0);
976 * Basic zone validation & locking
978 zone = HAMMER_ZONE_DECODE(zone_offset);
979 KKASSERT(hammer_is_zone2_mapped_index(zone));
980 root_volume = trans->rootvol;
983 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
988 layer1_offset = freemap->phys_offset +
989 HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset);
990 layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1);
993 KKASSERT(layer1->phys_offset &&
994 layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
995 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
996 hammer_lock_ex(&hmp->blkmap_lock);
997 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
998 hpanic("CRC FAILED: LAYER1");
999 hammer_unlock(&hmp->blkmap_lock);
1003 * Dive layer 2, each entry represents a big-block.
1005 layer2_offset = layer1->phys_offset +
1006 HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset);
1007 layer2 = hammer_bread(hmp, layer2_offset, &error, &buffer2);
1010 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
1011 hammer_lock_ex(&hmp->blkmap_lock);
1012 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
1013 hpanic("CRC FAILED: LAYER2");
1014 hammer_unlock(&hmp->blkmap_lock);
1017 hammer_lock_ex(&hmp->blkmap_lock);
1019 hammer_modify_buffer(trans, buffer2, layer2, sizeof(*layer2));
1022 * Free space previously allocated via blockmap_alloc().
1024 * NOTE: bytes_free can be and remain negative due to de-dup ops
1025 * but can never become larger than HAMMER_BIGBLOCK_SIZE.
1027 KKASSERT(layer2->zone == zone);
1028 layer2->bytes_free += bytes;
1029 KKASSERT(layer2->bytes_free <= HAMMER_BIGBLOCK_SIZE);
1032 * If a big-block becomes entirely free we must create a covering
1033 * reservation to prevent premature reuse. Note, however, that
1034 * the big-block and/or reservation may still have an append_off
1035 * that allows further (non-reused) allocations.
1037 * Once the reservation has been made we re-check layer2 and if
1038 * the big-block is still entirely free we reset the layer2 entry.
1039 * The reservation will prevent premature reuse.
1041 * NOTE: hammer_buffer's are only invalidated when the reservation
1042 * is completed, if the layer2 entry is still completely free at
1043 * that time. Any allocations from the reservation that may have
1044 * occured in the mean time, or active references on the reservation
1045 * from new pending allocations, will prevent the invalidation from
1048 if (layer2->bytes_free == HAMMER_BIGBLOCK_SIZE) {
1049 base_off = hammer_xlate_to_zone2(zone_offset &
1050 ~HAMMER_BIGBLOCK_MASK64);
1052 hammer_reserve_setdelay_offset(hmp, base_off, zone, layer2);
1053 if (layer2->bytes_free == HAMMER_BIGBLOCK_SIZE) {
1055 layer2->append_off = 0;
1056 hammer_modify_buffer(trans, buffer1,
1057 layer1, sizeof(*layer1));
1058 ++layer1->blocks_free;
1059 layer1->layer1_crc = crc32(layer1,
1060 HAMMER_LAYER1_CRCSIZE);
1061 hammer_modify_buffer_done(buffer1);
1062 hammer_modify_volume_field(trans,
1064 vol0_stat_freebigblocks);
1065 ++root_volume->ondisk->vol0_stat_freebigblocks;
1066 hmp->copy_stat_freebigblocks =
1067 root_volume->ondisk->vol0_stat_freebigblocks;
1068 hammer_modify_volume_done(trans->rootvol);
1071 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
1072 hammer_modify_buffer_done(buffer2);
1073 hammer_unlock(&hmp->blkmap_lock);
1077 hammer_rel_buffer(buffer1, 0);
1079 hammer_rel_buffer(buffer2, 0);
1083 hammer_blockmap_dedup(hammer_transaction_t trans,
1084 hammer_off_t zone_offset, int bytes)
1087 hammer_blockmap_t freemap;
1088 hammer_blockmap_layer1_t layer1;
1089 hammer_blockmap_layer2_t layer2;
1090 hammer_buffer_t buffer1 = NULL;
1091 hammer_buffer_t buffer2 = NULL;
1092 hammer_off_t layer1_offset;
1093 hammer_off_t layer2_offset;
1096 int zone __debugvar;
1105 bytes = (bytes + 15) & ~15;
1106 KKASSERT(bytes <= HAMMER_BIGBLOCK_SIZE);
1107 KKASSERT(((zone_offset ^ (zone_offset + (bytes - 1))) &
1108 ~HAMMER_BIGBLOCK_MASK64) == 0);
1111 * Basic zone validation & locking
1113 zone = HAMMER_ZONE_DECODE(zone_offset);
1114 KKASSERT(hammer_is_zone2_mapped_index(zone));
1117 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
1122 layer1_offset = freemap->phys_offset +
1123 HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset);
1124 layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1);
1127 KKASSERT(layer1->phys_offset &&
1128 layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
1129 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
1130 hammer_lock_ex(&hmp->blkmap_lock);
1131 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
1132 hpanic("CRC FAILED: LAYER1");
1133 hammer_unlock(&hmp->blkmap_lock);
1137 * Dive layer 2, each entry represents a big-block.
1139 layer2_offset = layer1->phys_offset +
1140 HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset);
1141 layer2 = hammer_bread(hmp, layer2_offset, &error, &buffer2);
1144 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
1145 hammer_lock_ex(&hmp->blkmap_lock);
1146 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
1147 hpanic("CRC FAILED: LAYER2");
1148 hammer_unlock(&hmp->blkmap_lock);
1151 hammer_lock_ex(&hmp->blkmap_lock);
1153 hammer_modify_buffer(trans, buffer2, layer2, sizeof(*layer2));
1156 * Free space previously allocated via blockmap_alloc().
1158 * NOTE: bytes_free can be and remain negative due to de-dup ops
1159 * but can never become larger than HAMMER_BIGBLOCK_SIZE.
1161 KKASSERT(layer2->zone == zone);
1162 temp = layer2->bytes_free - HAMMER_BIGBLOCK_SIZE * 2;
1163 cpu_ccfence(); /* prevent gcc from optimizing temp out */
1164 if (temp > layer2->bytes_free) {
1168 layer2->bytes_free -= bytes;
1170 KKASSERT(layer2->bytes_free <= HAMMER_BIGBLOCK_SIZE);
1172 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
1174 hammer_modify_buffer_done(buffer2);
1175 hammer_unlock(&hmp->blkmap_lock);
1179 hammer_rel_buffer(buffer1, 0);
1181 hammer_rel_buffer(buffer2, 0);
1186 * Backend function - finalize (offset, bytes) in a zone.
1188 * Allocate space that was previously reserved by the frontend.
1191 hammer_blockmap_finalize(hammer_transaction_t trans,
1192 hammer_reserve_t resv,
1193 hammer_off_t zone_offset, int bytes)
1196 hammer_volume_t root_volume;
1197 hammer_blockmap_t freemap;
1198 hammer_blockmap_layer1_t layer1;
1199 hammer_blockmap_layer2_t layer2;
1200 hammer_buffer_t buffer1 = NULL;
1201 hammer_buffer_t buffer2 = NULL;
1202 hammer_off_t layer1_offset;
1203 hammer_off_t layer2_offset;
1215 bytes = (bytes + 15) & ~15;
1216 KKASSERT(bytes <= HAMMER_XBUFSIZE);
1219 * Basic zone validation & locking
1221 zone = HAMMER_ZONE_DECODE(zone_offset);
1222 KKASSERT(hammer_is_zone2_mapped_index(zone));
1223 root_volume = trans->rootvol;
1226 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
1231 layer1_offset = freemap->phys_offset +
1232 HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset);
1233 layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1);
1236 KKASSERT(layer1->phys_offset &&
1237 layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
1238 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
1239 hammer_lock_ex(&hmp->blkmap_lock);
1240 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
1241 hpanic("CRC FAILED: LAYER1");
1242 hammer_unlock(&hmp->blkmap_lock);
1246 * Dive layer 2, each entry represents a big-block.
1248 layer2_offset = layer1->phys_offset +
1249 HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset);
1250 layer2 = hammer_bread(hmp, layer2_offset, &error, &buffer2);
1253 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
1254 hammer_lock_ex(&hmp->blkmap_lock);
1255 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
1256 hpanic("CRC FAILED: LAYER2");
1257 hammer_unlock(&hmp->blkmap_lock);
1260 hammer_lock_ex(&hmp->blkmap_lock);
1262 hammer_modify_buffer(trans, buffer2, layer2, sizeof(*layer2));
1265 * Finalize some or all of the space covered by a current
1266 * reservation. An allocation in the same layer may have
1267 * already assigned ownership.
1269 if (layer2->zone == 0) {
1270 hammer_modify_buffer(trans, buffer1, layer1, sizeof(*layer1));
1271 --layer1->blocks_free;
1272 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
1273 hammer_modify_buffer_done(buffer1);
1274 layer2->zone = zone;
1275 KKASSERT(layer2->bytes_free == HAMMER_BIGBLOCK_SIZE);
1276 KKASSERT(layer2->append_off == 0);
1277 hammer_modify_volume_field(trans,
1279 vol0_stat_freebigblocks);
1280 --root_volume->ondisk->vol0_stat_freebigblocks;
1281 hmp->copy_stat_freebigblocks =
1282 root_volume->ondisk->vol0_stat_freebigblocks;
1283 hammer_modify_volume_done(trans->rootvol);
1285 if (layer2->zone != zone)
1286 hdkprintf("layer2 zone mismatch %d %d\n", layer2->zone, zone);
1287 KKASSERT(layer2->zone == zone);
1288 KKASSERT(bytes != 0);
1289 layer2->bytes_free -= bytes;
1292 resv->flags &= ~HAMMER_RESF_LAYER2FREE;
1296 * Finalizations can occur out of order, or combined with allocations.
1297 * append_off must be set to the highest allocated offset.
1299 offset = ((int)zone_offset & HAMMER_BIGBLOCK_MASK) + bytes;
1300 if (layer2->append_off < offset)
1301 layer2->append_off = offset;
1303 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
1304 hammer_modify_buffer_done(buffer2);
1305 hammer_unlock(&hmp->blkmap_lock);
1309 hammer_rel_buffer(buffer1, 0);
1311 hammer_rel_buffer(buffer2, 0);
1316 * Return the approximate number of free bytes in the big-block
1317 * containing the specified blockmap offset.
1319 * WARNING: A negative number can be returned if data de-dup exists,
1320 * and the result will also not represent he actual number
1321 * of free bytes in this case.
1323 * This code is used only by the reblocker.
1326 hammer_blockmap_getfree(hammer_mount_t hmp, hammer_off_t zone_offset,
1327 int *curp, int *errorp)
1329 hammer_volume_t root_volume;
1330 hammer_blockmap_t blockmap;
1331 hammer_blockmap_t freemap;
1332 hammer_blockmap_layer1_t layer1;
1333 hammer_blockmap_layer2_t layer2;
1334 hammer_buffer_t buffer = NULL;
1335 hammer_off_t layer1_offset;
1336 hammer_off_t layer2_offset;
1340 zone = HAMMER_ZONE_DECODE(zone_offset);
1341 KKASSERT(hammer_is_zone2_mapped_index(zone));
1342 root_volume = hammer_get_root_volume(hmp, errorp);
1347 blockmap = &hmp->blockmap[zone];
1348 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
1353 layer1_offset = freemap->phys_offset +
1354 HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset);
1355 layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer);
1361 KKASSERT(layer1->phys_offset);
1362 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
1363 hammer_lock_ex(&hmp->blkmap_lock);
1364 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
1365 hpanic("CRC FAILED: LAYER1");
1366 hammer_unlock(&hmp->blkmap_lock);
1370 * Dive layer 2, each entry represents a big-block.
1372 * (reuse buffer, layer1 pointer becomes invalid)
1374 layer2_offset = layer1->phys_offset +
1375 HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset);
1376 layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer);
1382 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
1383 hammer_lock_ex(&hmp->blkmap_lock);
1384 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
1385 hpanic("CRC FAILED: LAYER2");
1386 hammer_unlock(&hmp->blkmap_lock);
1388 KKASSERT(layer2->zone == zone);
1390 bytes = layer2->bytes_free;
1393 * *curp becomes 1 only when no error and,
1394 * next_offset and zone_offset are in the same big-block.
1396 if ((blockmap->next_offset ^ zone_offset) & ~HAMMER_BIGBLOCK_MASK64)
1397 *curp = 0; /* not same */
1402 hammer_rel_buffer(buffer, 0);
1403 hammer_rel_volume(root_volume, 0);
1404 if (hammer_debug_general & 0x4000) {
1405 hdkprintf("%016jx -> %d\n", (intmax_t)zone_offset, bytes);
1412 * Lookup a blockmap offset and verify blockmap layers.
1415 hammer_blockmap_lookup_verify(hammer_mount_t hmp, hammer_off_t zone_offset,
1418 hammer_volume_t root_volume;
1419 hammer_blockmap_t freemap;
1420 hammer_blockmap_layer1_t layer1;
1421 hammer_blockmap_layer2_t layer2;
1422 hammer_buffer_t buffer = NULL;
1423 hammer_off_t layer1_offset;
1424 hammer_off_t layer2_offset;
1425 hammer_off_t result_offset;
1426 hammer_off_t base_off;
1427 hammer_reserve_t resv __debugvar;
1431 * Calculate the zone-2 offset.
1433 zone = HAMMER_ZONE_DECODE(zone_offset);
1434 result_offset = hammer_xlate_to_zone2(zone_offset);
1437 * Validate the allocation zone
1439 root_volume = hammer_get_root_volume(hmp, errorp);
1442 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
1443 KKASSERT(freemap->phys_offset != 0);
1448 layer1_offset = freemap->phys_offset +
1449 HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset);
1450 layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer);
1453 KKASSERT(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
1454 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
1455 hammer_lock_ex(&hmp->blkmap_lock);
1456 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
1457 hpanic("CRC FAILED: LAYER1");
1458 hammer_unlock(&hmp->blkmap_lock);
1462 * Dive layer 2, each entry represents a big-block.
1464 layer2_offset = layer1->phys_offset +
1465 HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset);
1466 layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer);
1470 if (layer2->zone == 0) {
1471 base_off = hammer_xlate_to_zone2(zone_offset &
1472 ~HAMMER_BIGBLOCK_MASK64);
1473 resv = RB_LOOKUP(hammer_res_rb_tree, &hmp->rb_resv_root,
1475 KKASSERT(resv && resv->zone == zone);
1477 } else if (layer2->zone != zone) {
1478 hpanic("bad zone %d/%d", layer2->zone, zone);
1480 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
1481 hammer_lock_ex(&hmp->blkmap_lock);
1482 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
1483 hpanic("CRC FAILED: LAYER2");
1484 hammer_unlock(&hmp->blkmap_lock);
1489 hammer_rel_buffer(buffer, 0);
1490 hammer_rel_volume(root_volume, 0);
1491 if (hammer_debug_general & 0x0800) {
1492 hdkprintf("%016jx -> %016jx\n",
1493 (intmax_t)zone_offset, (intmax_t)result_offset);
1495 return(result_offset);
1500 * Check space availability
1502 * MPSAFE - does not require fs_token
1505 _hammer_checkspace(hammer_mount_t hmp, int slop, int64_t *resp)
1507 const int in_size = sizeof(struct hammer_inode_data) +
1508 sizeof(union hammer_btree_elm);
1509 const int rec_size = (sizeof(union hammer_btree_elm) * 2);
1512 usedbytes = hmp->rsv_inodes * in_size +
1513 hmp->rsv_recs * rec_size +
1514 hmp->rsv_databytes +
1515 ((int64_t)hmp->rsv_fromdelay << HAMMER_BIGBLOCK_BITS) +
1516 ((int64_t)hammer_limit_dirtybufspace) +
1517 (slop << HAMMER_BIGBLOCK_BITS);
1522 if (hmp->copy_stat_freebigblocks >=
1523 (usedbytes >> HAMMER_BIGBLOCK_BITS)) {
1531 hammer_check_volume(hammer_mount_t hmp, hammer_off_t *offsetp)
1533 hammer_blockmap_t freemap;
1534 hammer_blockmap_layer1_t layer1;
1535 hammer_buffer_t buffer1 = NULL;
1536 hammer_off_t layer1_offset;
1539 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
1541 layer1_offset = freemap->phys_offset +
1542 HAMMER_BLOCKMAP_LAYER1_OFFSET(*offsetp);
1543 layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1);
1548 * No more physically available space in layer1s
1549 * of the current volume, go to the next volume.
1551 if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL)
1552 hammer_skip_volume(offsetp);
1555 hammer_rel_buffer(buffer1, 0);
1560 hammer_skip_volume(hammer_off_t *offsetp)
1562 hammer_off_t offset;
1566 zone = HAMMER_ZONE_DECODE(offset);
1567 vol_no = HAMMER_VOL_DECODE(offset) + 1;
1568 KKASSERT(vol_no <= HAMMER_MAX_VOLUMES);
1570 if (vol_no == HAMMER_MAX_VOLUMES) { /* wrap */
1575 *offsetp = HAMMER_ENCODE(zone, vol_no, 0);