2 * Copyright (c) 2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40 static int hammer_res_rb_compare(hammer_reserve_t res1, hammer_reserve_t res2);
41 static void hammer_reserve_setdelay_offset(hammer_mount_t hmp,
42 hammer_off_t base_offset, int zone,
43 struct hammer_blockmap_layer2 *layer2);
44 static void hammer_reserve_setdelay(hammer_mount_t hmp, hammer_reserve_t resv);
45 static int update_bytes_free(hammer_reserve_t resv, int bytes);
46 static int hammer_check_volume(hammer_mount_t, hammer_off_t*);
49 * Reserved big-blocks red-black tree support
51 RB_GENERATE2(hammer_res_rb_tree, hammer_reserve, rb_node,
52 hammer_res_rb_compare, hammer_off_t, zone_offset);
55 hammer_res_rb_compare(hammer_reserve_t res1, hammer_reserve_t res2)
57 if (res1->zone_offset < res2->zone_offset)
59 if (res1->zone_offset > res2->zone_offset)
65 * Allocate bytes from a zone
68 hammer_blockmap_alloc(hammer_transaction_t trans, int zone, int bytes,
69 hammer_off_t hint, int *errorp)
72 hammer_volume_t root_volume;
73 hammer_blockmap_t blockmap;
74 hammer_blockmap_t freemap;
75 hammer_reserve_t resv;
76 struct hammer_blockmap_layer1 *layer1;
77 struct hammer_blockmap_layer2 *layer2;
78 hammer_buffer_t buffer1 = NULL;
79 hammer_buffer_t buffer2 = NULL;
80 hammer_buffer_t buffer3 = NULL;
81 hammer_off_t tmp_offset;
82 hammer_off_t next_offset;
83 hammer_off_t result_offset;
84 hammer_off_t layer1_offset;
85 hammer_off_t layer2_offset;
86 hammer_off_t base_off;
88 int offset; /* offset within big-block */
94 * Deal with alignment and buffer-boundary issues.
96 * Be careful, certain primary alignments are used below to allocate
97 * new blockmap blocks.
99 bytes = (bytes + 15) & ~15;
100 KKASSERT(bytes > 0 && bytes <= HAMMER_XBUFSIZE);
101 KKASSERT(zone >= HAMMER_ZONE2_MAPPED_INDEX && zone < HAMMER_MAX_ZONES);
106 root_volume = trans->rootvol;
108 blockmap = &hmp->blockmap[zone];
109 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
110 KKASSERT(HAMMER_ZONE_DECODE(blockmap->next_offset) == zone);
113 * Use the hint if we have one.
115 if (hint && HAMMER_ZONE_DECODE(hint) == zone) {
116 next_offset = (hint + 15) & ~(hammer_off_t)15;
119 next_offset = blockmap->next_offset;
125 * use_hint is turned off if we leave the hinted big-block.
127 if (use_hint && ((next_offset ^ hint) & ~HAMMER_HINTBLOCK_MASK64)) {
128 next_offset = blockmap->next_offset;
135 if (next_offset == HAMMER_ZONE_ENCODE(zone + 1, 0)) {
141 next_offset = HAMMER_ZONE_ENCODE(zone, 0);
145 * The allocation request may not cross a buffer boundary. Special
146 * large allocations must not cross a big-block boundary.
148 tmp_offset = next_offset + bytes - 1;
149 if (bytes <= HAMMER_BUFSIZE) {
150 if ((next_offset ^ tmp_offset) & ~HAMMER_BUFMASK64) {
151 next_offset = tmp_offset & ~HAMMER_BUFMASK64;
155 if ((next_offset ^ tmp_offset) & ~HAMMER_BIGBLOCK_MASK64) {
156 next_offset = tmp_offset & ~HAMMER_BIGBLOCK_MASK64;
160 offset = (int)next_offset & HAMMER_BIGBLOCK_MASK;
165 layer1_offset = freemap->phys_offset +
166 HAMMER_BLOCKMAP_LAYER1_OFFSET(next_offset);
168 layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer1);
177 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
178 hammer_lock_ex(&hmp->blkmap_lock);
179 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
180 panic("CRC FAILED: LAYER1");
181 hammer_unlock(&hmp->blkmap_lock);
185 * If we are at a big-block boundary and layer1 indicates no
186 * free big-blocks, then we cannot allocate a new big-block in
187 * layer2, skip to the next layer1 entry.
189 if (offset == 0 && layer1->blocks_free == 0) {
190 next_offset = (next_offset + HAMMER_BLOCKMAP_LAYER2) &
191 ~HAMMER_BLOCKMAP_LAYER2_MASK;
192 if (hammer_check_volume(hmp, &next_offset)) {
198 KKASSERT(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
201 * Skip this layer1 entry if it is pointing to a layer2 big-block
202 * on a volume that we are currently trying to remove from the
203 * file-system. This is used by the volume-del code together with
204 * the reblocker to free up a volume.
206 if ((int)HAMMER_VOL_DECODE(layer1->phys_offset) ==
207 hmp->volume_to_remove) {
208 next_offset = (next_offset + HAMMER_BLOCKMAP_LAYER2) &
209 ~HAMMER_BLOCKMAP_LAYER2_MASK;
214 * Dive layer 2, each entry represents a big-block.
216 layer2_offset = layer1->phys_offset +
217 HAMMER_BLOCKMAP_LAYER2_OFFSET(next_offset);
218 layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer2);
225 * Check CRC. This can race another thread holding the lock
226 * and in the middle of modifying layer2.
228 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
229 hammer_lock_ex(&hmp->blkmap_lock);
230 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
231 panic("CRC FAILED: LAYER2");
232 hammer_unlock(&hmp->blkmap_lock);
236 * Skip the layer if the zone is owned by someone other then us.
238 if (layer2->zone && layer2->zone != zone) {
239 next_offset += (HAMMER_BIGBLOCK_SIZE - offset);
242 if (offset < layer2->append_off) {
243 next_offset += layer2->append_off - offset;
249 * If operating in the current non-hint blockmap block, do not
250 * allow it to get over-full. Also drop any active hinting so
251 * blockmap->next_offset is updated at the end.
253 * We do this for B-Tree and meta-data allocations to provide
254 * localization for updates.
256 if ((zone == HAMMER_ZONE_BTREE_INDEX ||
257 zone == HAMMER_ZONE_META_INDEX) &&
258 offset >= HAMMER_BIGBLOCK_OVERFILL &&
259 !((next_offset ^ blockmap->next_offset) & ~HAMMER_BIGBLOCK_MASK64)) {
260 if (offset >= HAMMER_BIGBLOCK_OVERFILL) {
261 next_offset += (HAMMER_BIGBLOCK_SIZE - offset);
269 * We need the lock from this point on. We have to re-check zone
270 * ownership after acquiring the lock and also check for reservations.
272 hammer_lock_ex(&hmp->blkmap_lock);
274 if (layer2->zone && layer2->zone != zone) {
275 hammer_unlock(&hmp->blkmap_lock);
276 next_offset += (HAMMER_BIGBLOCK_SIZE - offset);
279 if (offset < layer2->append_off) {
280 hammer_unlock(&hmp->blkmap_lock);
281 next_offset += layer2->append_off - offset;
286 * The big-block might be reserved by another zone. If it is reserved
287 * by our zone we may have to move next_offset past the append_off.
289 base_off = hammer_xlate_to_zone2(next_offset &
290 ~HAMMER_BIGBLOCK_MASK64);
291 resv = RB_LOOKUP(hammer_res_rb_tree, &hmp->rb_resv_root, base_off);
293 if (resv->zone != zone) {
294 hammer_unlock(&hmp->blkmap_lock);
295 next_offset = (next_offset + HAMMER_BIGBLOCK_SIZE) &
296 ~HAMMER_BIGBLOCK_MASK64;
299 if (offset < resv->append_off) {
300 hammer_unlock(&hmp->blkmap_lock);
301 next_offset += resv->append_off - offset;
308 * Ok, we can allocate out of this layer2 big-block. Assume ownership
309 * of the layer for real. At this point we've validated any
310 * reservation that might exist and can just ignore resv.
312 if (layer2->zone == 0) {
314 * Assign the big-block to our zone
316 hammer_modify_buffer(trans, buffer1, layer1, sizeof(*layer1));
317 --layer1->blocks_free;
318 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
319 hammer_modify_buffer_done(buffer1);
320 hammer_modify_buffer(trans, buffer2, layer2, sizeof(*layer2));
322 KKASSERT(layer2->bytes_free == HAMMER_BIGBLOCK_SIZE);
323 KKASSERT(layer2->append_off == 0);
324 hammer_modify_volume_field(trans, trans->rootvol,
325 vol0_stat_freebigblocks);
326 --root_volume->ondisk->vol0_stat_freebigblocks;
327 hmp->copy_stat_freebigblocks =
328 root_volume->ondisk->vol0_stat_freebigblocks;
329 hammer_modify_volume_done(trans->rootvol);
331 hammer_modify_buffer(trans, buffer2, layer2, sizeof(*layer2));
333 KKASSERT(layer2->zone == zone);
336 * NOTE: bytes_free can legally go negative due to de-dup.
338 layer2->bytes_free -= bytes;
339 KKASSERT(layer2->append_off <= offset);
340 layer2->append_off = offset + bytes;
341 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
342 hammer_modify_buffer_done(buffer2);
345 * We hold the blockmap lock and should be the only ones
346 * capable of modifying resv->append_off. Track the allocation
349 KKASSERT(bytes != 0);
351 KKASSERT(resv->append_off <= offset);
352 resv->append_off = offset + bytes;
353 resv->flags &= ~HAMMER_RESF_LAYER2FREE;
354 hammer_blockmap_reserve_complete(hmp, resv);
358 * If we are allocating from the base of a new buffer we can avoid
359 * a disk read by calling hammer_bnew_ext().
361 if ((next_offset & HAMMER_BUFMASK) == 0) {
362 hammer_bnew_ext(trans->hmp, next_offset, bytes,
369 result_offset = next_offset;
372 * If we weren't supplied with a hint or could not use the hint
373 * then we wound up using blockmap->next_offset as the hint and
377 hammer_modify_volume_noundo(NULL, root_volume);
378 blockmap->next_offset = next_offset + bytes;
379 hammer_modify_volume_done(root_volume);
381 hammer_unlock(&hmp->blkmap_lock);
388 hammer_rel_buffer(buffer1, 0);
390 hammer_rel_buffer(buffer2, 0);
392 hammer_rel_buffer(buffer3, 0);
394 return(result_offset);
398 * Frontend function - Reserve bytes in a zone.
400 * This code reserves bytes out of a blockmap without committing to any
401 * meta-data modifications, allowing the front-end to directly issue disk
402 * write I/O for big-blocks of data
404 * The backend later finalizes the reservation with hammer_blockmap_finalize()
405 * upon committing the related record.
408 hammer_blockmap_reserve(hammer_mount_t hmp, int zone, int bytes,
409 hammer_off_t *zone_offp, int *errorp)
411 hammer_volume_t root_volume;
412 hammer_blockmap_t blockmap;
413 hammer_blockmap_t freemap;
414 struct hammer_blockmap_layer1 *layer1;
415 struct hammer_blockmap_layer2 *layer2;
416 hammer_buffer_t buffer1 = NULL;
417 hammer_buffer_t buffer2 = NULL;
418 hammer_buffer_t buffer3 = NULL;
419 hammer_off_t tmp_offset;
420 hammer_off_t next_offset;
421 hammer_off_t layer1_offset;
422 hammer_off_t layer2_offset;
423 hammer_off_t base_off;
424 hammer_reserve_t resv;
425 hammer_reserve_t resx;
432 KKASSERT(zone >= HAMMER_ZONE2_MAPPED_INDEX && zone < HAMMER_MAX_ZONES);
433 root_volume = hammer_get_root_volume(hmp, errorp);
436 blockmap = &hmp->blockmap[zone];
437 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
438 KKASSERT(HAMMER_ZONE_DECODE(blockmap->next_offset) == zone);
441 * Deal with alignment and buffer-boundary issues.
443 * Be careful, certain primary alignments are used below to allocate
444 * new blockmap blocks.
446 bytes = (bytes + 15) & ~15;
447 KKASSERT(bytes > 0 && bytes <= HAMMER_XBUFSIZE);
449 next_offset = blockmap->next_offset;
455 if (next_offset == HAMMER_ZONE_ENCODE(zone + 1, 0)) {
460 next_offset = HAMMER_ZONE_ENCODE(zone, 0);
464 * The allocation request may not cross a buffer boundary. Special
465 * large allocations must not cross a big-block boundary.
467 tmp_offset = next_offset + bytes - 1;
468 if (bytes <= HAMMER_BUFSIZE) {
469 if ((next_offset ^ tmp_offset) & ~HAMMER_BUFMASK64) {
470 next_offset = tmp_offset & ~HAMMER_BUFMASK64;
474 if ((next_offset ^ tmp_offset) & ~HAMMER_BIGBLOCK_MASK64) {
475 next_offset = tmp_offset & ~HAMMER_BIGBLOCK_MASK64;
479 offset = (int)next_offset & HAMMER_BIGBLOCK_MASK;
484 layer1_offset = freemap->phys_offset +
485 HAMMER_BLOCKMAP_LAYER1_OFFSET(next_offset);
486 layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer1);
493 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
494 hammer_lock_ex(&hmp->blkmap_lock);
495 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
496 panic("CRC FAILED: LAYER1");
497 hammer_unlock(&hmp->blkmap_lock);
501 * If we are at a big-block boundary and layer1 indicates no
502 * free big-blocks, then we cannot allocate a new big-block in
503 * layer2, skip to the next layer1 entry.
505 if ((next_offset & HAMMER_BIGBLOCK_MASK) == 0 &&
506 layer1->blocks_free == 0) {
507 next_offset = (next_offset + HAMMER_BLOCKMAP_LAYER2) &
508 ~HAMMER_BLOCKMAP_LAYER2_MASK;
509 if (hammer_check_volume(hmp, &next_offset))
513 KKASSERT(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
516 * Dive layer 2, each entry represents a big-block.
518 layer2_offset = layer1->phys_offset +
519 HAMMER_BLOCKMAP_LAYER2_OFFSET(next_offset);
520 layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer2);
525 * Check CRC if not allocating into uninitialized space (which we
526 * aren't when reserving space).
528 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
529 hammer_lock_ex(&hmp->blkmap_lock);
530 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
531 panic("CRC FAILED: LAYER2");
532 hammer_unlock(&hmp->blkmap_lock);
536 * Skip the layer if the zone is owned by someone other then us.
538 if (layer2->zone && layer2->zone != zone) {
539 next_offset += (HAMMER_BIGBLOCK_SIZE - offset);
542 if (offset < layer2->append_off) {
543 next_offset += layer2->append_off - offset;
548 * We need the lock from this point on. We have to re-check zone
549 * ownership after acquiring the lock and also check for reservations.
551 hammer_lock_ex(&hmp->blkmap_lock);
553 if (layer2->zone && layer2->zone != zone) {
554 hammer_unlock(&hmp->blkmap_lock);
555 next_offset += (HAMMER_BIGBLOCK_SIZE - offset);
558 if (offset < layer2->append_off) {
559 hammer_unlock(&hmp->blkmap_lock);
560 next_offset += layer2->append_off - offset;
565 * The big-block might be reserved by another zone. If it is reserved
566 * by our zone we may have to move next_offset past the append_off.
568 base_off = hammer_xlate_to_zone2(next_offset &
569 ~HAMMER_BIGBLOCK_MASK64);
570 resv = RB_LOOKUP(hammer_res_rb_tree, &hmp->rb_resv_root, base_off);
572 if (resv->zone != zone) {
573 hammer_unlock(&hmp->blkmap_lock);
574 next_offset = (next_offset + HAMMER_BIGBLOCK_SIZE) &
575 ~HAMMER_BIGBLOCK_MASK64;
578 if (offset < resv->append_off) {
579 hammer_unlock(&hmp->blkmap_lock);
580 next_offset += resv->append_off - offset;
586 resx = kmalloc(sizeof(*resv), hmp->m_misc,
587 M_WAITOK | M_ZERO | M_USE_RESERVE);
590 resx->zone_offset = base_off;
591 if (layer2->bytes_free == HAMMER_BIGBLOCK_SIZE)
592 resx->flags |= HAMMER_RESF_LAYER2FREE;
593 resv = RB_INSERT(hammer_res_rb_tree, &hmp->rb_resv_root, resx);
594 KKASSERT(resv == NULL);
596 ++hammer_count_reservations;
598 resv->append_off = offset + bytes;
601 * If we are not reserving a whole buffer but are at the start of
602 * a new block, call hammer_bnew() to avoid a disk read.
604 * If we are reserving a whole buffer (or more), the caller will
605 * probably use a direct read, so do nothing.
607 * If we do not have a whole lot of system memory we really can't
608 * afford to block while holding the blkmap_lock!
610 if (bytes < HAMMER_BUFSIZE && (next_offset & HAMMER_BUFMASK) == 0) {
611 if (!vm_page_count_min(HAMMER_BUFSIZE / PAGE_SIZE)) {
612 hammer_bnew(hmp, next_offset, errorp, &buffer3);
619 * Adjust our iterator and alloc_offset. The layer1 and layer2
620 * space beyond alloc_offset is uninitialized. alloc_offset must
621 * be big-block aligned.
623 blockmap->next_offset = next_offset + bytes;
624 hammer_unlock(&hmp->blkmap_lock);
628 hammer_rel_buffer(buffer1, 0);
630 hammer_rel_buffer(buffer2, 0);
632 hammer_rel_buffer(buffer3, 0);
633 hammer_rel_volume(root_volume, 0);
634 *zone_offp = next_offset;
640 * Frontend function - Dedup bytes in a zone.
642 * Dedup reservations work exactly the same as normal write reservations
643 * except we only adjust bytes_free field and don't touch append offset.
644 * Finalization mechanic for dedup reservations is also the same as for
645 * normal write ones - the backend finalizes the reservation with
646 * hammer_blockmap_finalize().
649 hammer_blockmap_reserve_dedup(hammer_mount_t hmp, int zone, int bytes,
650 hammer_off_t zone_offset, int *errorp)
652 hammer_volume_t root_volume;
653 hammer_blockmap_t freemap;
654 struct hammer_blockmap_layer1 *layer1;
655 struct hammer_blockmap_layer2 *layer2;
656 hammer_buffer_t buffer1 = NULL;
657 hammer_buffer_t buffer2 = NULL;
658 hammer_off_t layer1_offset;
659 hammer_off_t layer2_offset;
660 hammer_off_t base_off;
661 hammer_reserve_t resv = NULL;
662 hammer_reserve_t resx = NULL;
667 KKASSERT(zone >= HAMMER_ZONE2_MAPPED_INDEX && zone < HAMMER_MAX_ZONES);
668 root_volume = hammer_get_root_volume(hmp, errorp);
671 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
672 KKASSERT(freemap->phys_offset != 0);
674 bytes = (bytes + 15) & ~15;
675 KKASSERT(bytes > 0 && bytes <= HAMMER_XBUFSIZE);
680 layer1_offset = freemap->phys_offset +
681 HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset);
682 layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer1);
689 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
690 hammer_lock_ex(&hmp->blkmap_lock);
691 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
692 panic("CRC FAILED: LAYER1");
693 hammer_unlock(&hmp->blkmap_lock);
695 KKASSERT(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
698 * Dive layer 2, each entry represents a big-block.
700 layer2_offset = layer1->phys_offset +
701 HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset);
702 layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer2);
709 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
710 hammer_lock_ex(&hmp->blkmap_lock);
711 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
712 panic("CRC FAILED: LAYER2");
713 hammer_unlock(&hmp->blkmap_lock);
717 * Fail if the zone is owned by someone other than us.
719 if (layer2->zone && layer2->zone != zone)
723 * We need the lock from this point on. We have to re-check zone
724 * ownership after acquiring the lock and also check for reservations.
726 hammer_lock_ex(&hmp->blkmap_lock);
728 if (layer2->zone && layer2->zone != zone) {
729 hammer_unlock(&hmp->blkmap_lock);
733 base_off = hammer_xlate_to_zone2(zone_offset &
734 ~HAMMER_BIGBLOCK_MASK64);
735 resv = RB_LOOKUP(hammer_res_rb_tree, &hmp->rb_resv_root, base_off);
737 if (resv->zone != zone) {
738 hammer_unlock(&hmp->blkmap_lock);
743 * Due to possible big-block underflow we can't simply
744 * subtract bytes from bytes_free.
746 if (update_bytes_free(resv, bytes) == 0) {
747 hammer_unlock(&hmp->blkmap_lock);
754 resx = kmalloc(sizeof(*resv), hmp->m_misc,
755 M_WAITOK | M_ZERO | M_USE_RESERVE);
758 resx->bytes_free = layer2->bytes_free;
760 * Due to possible big-block underflow we can't simply
761 * subtract bytes from bytes_free.
763 if (update_bytes_free(resx, bytes) == 0) {
764 hammer_unlock(&hmp->blkmap_lock);
765 kfree(resx, hmp->m_misc);
768 resx->zone_offset = base_off;
769 resv = RB_INSERT(hammer_res_rb_tree, &hmp->rb_resv_root, resx);
770 KKASSERT(resv == NULL);
772 ++hammer_count_reservations;
775 hammer_unlock(&hmp->blkmap_lock);
779 hammer_rel_buffer(buffer1, 0);
781 hammer_rel_buffer(buffer2, 0);
782 hammer_rel_volume(root_volume, 0);
788 update_bytes_free(hammer_reserve_t resv, int bytes)
793 * Big-block underflow check
795 temp = resv->bytes_free - HAMMER_BIGBLOCK_SIZE * 2;
796 cpu_ccfence(); /* XXX do we really need it ? */
797 if (temp > resv->bytes_free) {
798 kprintf("BIGBLOCK UNDERFLOW\n");
802 resv->bytes_free -= bytes;
807 * Dereference a reservation structure. Upon the final release the
808 * underlying big-block is checked and if it is entirely free we delete
809 * any related HAMMER buffers to avoid potential conflicts with future
810 * reuse of the big-block.
813 hammer_blockmap_reserve_complete(hammer_mount_t hmp, hammer_reserve_t resv)
815 hammer_off_t base_offset;
818 KKASSERT(resv->refs > 0);
819 KKASSERT((resv->zone_offset & HAMMER_OFF_ZONE_MASK) ==
820 HAMMER_ZONE_RAW_BUFFER);
823 * Setting append_off to the max prevents any new allocations
824 * from occuring while we are trying to dispose of the reservation,
825 * allowing us to safely delete any related HAMMER buffers.
827 * If we are unable to clean out all related HAMMER buffers we
830 if (resv->refs == 1 && (resv->flags & HAMMER_RESF_LAYER2FREE)) {
831 resv->append_off = HAMMER_BIGBLOCK_SIZE;
832 base_offset = hammer_xlate_to_zoneX(resv->zone, resv->zone_offset);
833 if (!TAILQ_EMPTY(&hmp->dedup_lru_list))
834 hammer_dedup_cache_inval(hmp, base_offset);
835 error = hammer_del_buffers(hmp, base_offset,
837 HAMMER_BIGBLOCK_SIZE,
839 if (hammer_debug_general & 0x20000) {
840 kprintf("hammer: delbgblk %016jx error %d\n",
841 (intmax_t)base_offset, error);
844 hammer_reserve_setdelay(hmp, resv);
846 if (--resv->refs == 0) {
847 if (hammer_debug_general & 0x20000) {
848 kprintf("hammer: delresvr %016jx zone %02x\n",
849 (intmax_t)resv->zone_offset, resv->zone);
851 KKASSERT((resv->flags & HAMMER_RESF_ONDELAY) == 0);
852 RB_REMOVE(hammer_res_rb_tree, &hmp->rb_resv_root, resv);
853 kfree(resv, hmp->m_misc);
854 --hammer_count_reservations;
859 * Prevent a potentially free big-block from being reused until after
860 * the related flushes have completely cycled, otherwise crash recovery
861 * could resurrect a data block that was already reused and overwritten.
863 * The caller might reset the underlying layer2 entry's append_off to 0, so
864 * our covering append_off must be set to max to prevent any reallocation
865 * until after the flush delays complete, not to mention proper invalidation
866 * of any underlying cached blocks.
869 hammer_reserve_setdelay_offset(hammer_mount_t hmp, hammer_off_t base_offset,
870 int zone, struct hammer_blockmap_layer2 *layer2)
872 hammer_reserve_t resv;
875 * Allocate the reservation if necessary.
877 * NOTE: need lock in future around resv lookup/allocation and
878 * the setdelay call, currently refs is not bumped until the call.
881 resv = RB_LOOKUP(hammer_res_rb_tree, &hmp->rb_resv_root, base_offset);
883 resv = kmalloc(sizeof(*resv), hmp->m_misc,
884 M_WAITOK | M_ZERO | M_USE_RESERVE);
886 resv->zone_offset = base_offset;
888 resv->append_off = HAMMER_BIGBLOCK_SIZE;
890 if (layer2->bytes_free == HAMMER_BIGBLOCK_SIZE)
891 resv->flags |= HAMMER_RESF_LAYER2FREE;
892 if (RB_INSERT(hammer_res_rb_tree, &hmp->rb_resv_root, resv)) {
893 kfree(resv, hmp->m_misc);
896 ++hammer_count_reservations;
898 if (layer2->bytes_free == HAMMER_BIGBLOCK_SIZE)
899 resv->flags |= HAMMER_RESF_LAYER2FREE;
901 hammer_reserve_setdelay(hmp, resv);
905 * Enter the reservation on the on-delay list, or move it if it
906 * is already on the list.
909 hammer_reserve_setdelay(hammer_mount_t hmp, hammer_reserve_t resv)
911 if (resv->flags & HAMMER_RESF_ONDELAY) {
912 TAILQ_REMOVE(&hmp->delay_list, resv, delay_entry);
913 resv->flush_group = hmp->flusher.next + 1;
914 TAILQ_INSERT_TAIL(&hmp->delay_list, resv, delay_entry);
917 ++hmp->rsv_fromdelay;
918 resv->flags |= HAMMER_RESF_ONDELAY;
919 resv->flush_group = hmp->flusher.next + 1;
920 TAILQ_INSERT_TAIL(&hmp->delay_list, resv, delay_entry);
925 * Reserve has reached its flush point, remove it from the delay list
926 * and finish it off. hammer_blockmap_reserve_complete() inherits
927 * the ondelay reference.
930 hammer_reserve_clrdelay(hammer_mount_t hmp, hammer_reserve_t resv)
932 KKASSERT(resv->flags & HAMMER_RESF_ONDELAY);
933 resv->flags &= ~HAMMER_RESF_ONDELAY;
934 TAILQ_REMOVE(&hmp->delay_list, resv, delay_entry);
935 --hmp->rsv_fromdelay;
936 hammer_blockmap_reserve_complete(hmp, resv);
940 * Backend function - free (offset, bytes) in a zone.
945 hammer_blockmap_free(hammer_transaction_t trans,
946 hammer_off_t zone_offset, int bytes)
949 hammer_volume_t root_volume;
950 hammer_blockmap_t freemap;
951 struct hammer_blockmap_layer1 *layer1;
952 struct hammer_blockmap_layer2 *layer2;
953 hammer_buffer_t buffer1 = NULL;
954 hammer_buffer_t buffer2 = NULL;
955 hammer_off_t layer1_offset;
956 hammer_off_t layer2_offset;
957 hammer_off_t base_off;
968 bytes = (bytes + 15) & ~15;
969 KKASSERT(bytes <= HAMMER_XBUFSIZE);
970 KKASSERT(((zone_offset ^ (zone_offset + (bytes - 1))) &
971 ~HAMMER_BIGBLOCK_MASK64) == 0);
974 * Basic zone validation & locking
976 zone = HAMMER_ZONE_DECODE(zone_offset);
977 KKASSERT(zone >= HAMMER_ZONE2_MAPPED_INDEX && zone < HAMMER_MAX_ZONES);
978 root_volume = trans->rootvol;
981 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
986 layer1_offset = freemap->phys_offset +
987 HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset);
988 layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1);
991 KKASSERT(layer1->phys_offset &&
992 layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
993 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
994 hammer_lock_ex(&hmp->blkmap_lock);
995 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
996 panic("CRC FAILED: LAYER1");
997 hammer_unlock(&hmp->blkmap_lock);
1001 * Dive layer 2, each entry represents a big-block.
1003 layer2_offset = layer1->phys_offset +
1004 HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset);
1005 layer2 = hammer_bread(hmp, layer2_offset, &error, &buffer2);
1008 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
1009 hammer_lock_ex(&hmp->blkmap_lock);
1010 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
1011 panic("CRC FAILED: LAYER2");
1012 hammer_unlock(&hmp->blkmap_lock);
1015 hammer_lock_ex(&hmp->blkmap_lock);
1017 hammer_modify_buffer(trans, buffer2, layer2, sizeof(*layer2));
1020 * Free space previously allocated via blockmap_alloc().
1022 * NOTE: bytes_free can be and remain negative due to de-dup ops
1023 * but can never become larger than HAMMER_BIGBLOCK_SIZE.
1025 KKASSERT(layer2->zone == zone);
1026 layer2->bytes_free += bytes;
1027 KKASSERT(layer2->bytes_free <= HAMMER_BIGBLOCK_SIZE);
1030 * If a big-block becomes entirely free we must create a covering
1031 * reservation to prevent premature reuse. Note, however, that
1032 * the big-block and/or reservation may still have an append_off
1033 * that allows further (non-reused) allocations.
1035 * Once the reservation has been made we re-check layer2 and if
1036 * the big-block is still entirely free we reset the layer2 entry.
1037 * The reservation will prevent premature reuse.
1039 * NOTE: hammer_buffer's are only invalidated when the reservation
1040 * is completed, if the layer2 entry is still completely free at
1041 * that time. Any allocations from the reservation that may have
1042 * occured in the mean time, or active references on the reservation
1043 * from new pending allocations, will prevent the invalidation from
1046 if (layer2->bytes_free == HAMMER_BIGBLOCK_SIZE) {
1047 base_off = hammer_xlate_to_zone2(zone_offset &
1048 ~HAMMER_BIGBLOCK_MASK64);
1050 hammer_reserve_setdelay_offset(hmp, base_off, zone, layer2);
1051 if (layer2->bytes_free == HAMMER_BIGBLOCK_SIZE) {
1053 layer2->append_off = 0;
1054 hammer_modify_buffer(trans, buffer1,
1055 layer1, sizeof(*layer1));
1056 ++layer1->blocks_free;
1057 layer1->layer1_crc = crc32(layer1,
1058 HAMMER_LAYER1_CRCSIZE);
1059 hammer_modify_buffer_done(buffer1);
1060 hammer_modify_volume_field(trans,
1062 vol0_stat_freebigblocks);
1063 ++root_volume->ondisk->vol0_stat_freebigblocks;
1064 hmp->copy_stat_freebigblocks =
1065 root_volume->ondisk->vol0_stat_freebigblocks;
1066 hammer_modify_volume_done(trans->rootvol);
1069 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
1070 hammer_modify_buffer_done(buffer2);
1071 hammer_unlock(&hmp->blkmap_lock);
1075 hammer_rel_buffer(buffer1, 0);
1077 hammer_rel_buffer(buffer2, 0);
1081 hammer_blockmap_dedup(hammer_transaction_t trans,
1082 hammer_off_t zone_offset, int bytes)
1085 hammer_blockmap_t freemap;
1086 struct hammer_blockmap_layer1 *layer1;
1087 struct hammer_blockmap_layer2 *layer2;
1088 hammer_buffer_t buffer1 = NULL;
1089 hammer_buffer_t buffer2 = NULL;
1090 hammer_off_t layer1_offset;
1091 hammer_off_t layer2_offset;
1094 int zone __debugvar;
1103 bytes = (bytes + 15) & ~15;
1104 KKASSERT(bytes <= HAMMER_BIGBLOCK_SIZE);
1105 KKASSERT(((zone_offset ^ (zone_offset + (bytes - 1))) &
1106 ~HAMMER_BIGBLOCK_MASK64) == 0);
1109 * Basic zone validation & locking
1111 zone = HAMMER_ZONE_DECODE(zone_offset);
1112 KKASSERT(zone >= HAMMER_ZONE2_MAPPED_INDEX && zone < HAMMER_MAX_ZONES);
1115 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
1120 layer1_offset = freemap->phys_offset +
1121 HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset);
1122 layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1);
1125 KKASSERT(layer1->phys_offset &&
1126 layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
1127 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
1128 hammer_lock_ex(&hmp->blkmap_lock);
1129 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
1130 panic("CRC FAILED: LAYER1");
1131 hammer_unlock(&hmp->blkmap_lock);
1135 * Dive layer 2, each entry represents a big-block.
1137 layer2_offset = layer1->phys_offset +
1138 HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset);
1139 layer2 = hammer_bread(hmp, layer2_offset, &error, &buffer2);
1142 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
1143 hammer_lock_ex(&hmp->blkmap_lock);
1144 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
1145 panic("CRC FAILED: LAYER2");
1146 hammer_unlock(&hmp->blkmap_lock);
1149 hammer_lock_ex(&hmp->blkmap_lock);
1151 hammer_modify_buffer(trans, buffer2, layer2, sizeof(*layer2));
1154 * Free space previously allocated via blockmap_alloc().
1156 * NOTE: bytes_free can be and remain negative due to de-dup ops
1157 * but can never become larger than HAMMER_BIGBLOCK_SIZE.
1159 KKASSERT(layer2->zone == zone);
1160 temp = layer2->bytes_free - HAMMER_BIGBLOCK_SIZE * 2;
1161 cpu_ccfence(); /* prevent gcc from optimizing temp out */
1162 if (temp > layer2->bytes_free) {
1166 layer2->bytes_free -= bytes;
1168 KKASSERT(layer2->bytes_free <= HAMMER_BIGBLOCK_SIZE);
1170 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
1172 hammer_modify_buffer_done(buffer2);
1173 hammer_unlock(&hmp->blkmap_lock);
1177 hammer_rel_buffer(buffer1, 0);
1179 hammer_rel_buffer(buffer2, 0);
1184 * Backend function - finalize (offset, bytes) in a zone.
1186 * Allocate space that was previously reserved by the frontend.
1189 hammer_blockmap_finalize(hammer_transaction_t trans,
1190 hammer_reserve_t resv,
1191 hammer_off_t zone_offset, int bytes)
1194 hammer_volume_t root_volume;
1195 hammer_blockmap_t freemap;
1196 struct hammer_blockmap_layer1 *layer1;
1197 struct hammer_blockmap_layer2 *layer2;
1198 hammer_buffer_t buffer1 = NULL;
1199 hammer_buffer_t buffer2 = NULL;
1200 hammer_off_t layer1_offset;
1201 hammer_off_t layer2_offset;
1213 bytes = (bytes + 15) & ~15;
1214 KKASSERT(bytes <= HAMMER_XBUFSIZE);
1217 * Basic zone validation & locking
1219 zone = HAMMER_ZONE_DECODE(zone_offset);
1220 KKASSERT(zone >= HAMMER_ZONE2_MAPPED_INDEX && zone < HAMMER_MAX_ZONES);
1221 root_volume = trans->rootvol;
1224 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
1229 layer1_offset = freemap->phys_offset +
1230 HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset);
1231 layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1);
1234 KKASSERT(layer1->phys_offset &&
1235 layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
1236 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
1237 hammer_lock_ex(&hmp->blkmap_lock);
1238 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
1239 panic("CRC FAILED: LAYER1");
1240 hammer_unlock(&hmp->blkmap_lock);
1244 * Dive layer 2, each entry represents a big-block.
1246 layer2_offset = layer1->phys_offset +
1247 HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset);
1248 layer2 = hammer_bread(hmp, layer2_offset, &error, &buffer2);
1251 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
1252 hammer_lock_ex(&hmp->blkmap_lock);
1253 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
1254 panic("CRC FAILED: LAYER2");
1255 hammer_unlock(&hmp->blkmap_lock);
1258 hammer_lock_ex(&hmp->blkmap_lock);
1260 hammer_modify_buffer(trans, buffer2, layer2, sizeof(*layer2));
1263 * Finalize some or all of the space covered by a current
1264 * reservation. An allocation in the same layer may have
1265 * already assigned ownership.
1267 if (layer2->zone == 0) {
1268 hammer_modify_buffer(trans, buffer1, layer1, sizeof(*layer1));
1269 --layer1->blocks_free;
1270 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
1271 hammer_modify_buffer_done(buffer1);
1272 layer2->zone = zone;
1273 KKASSERT(layer2->bytes_free == HAMMER_BIGBLOCK_SIZE);
1274 KKASSERT(layer2->append_off == 0);
1275 hammer_modify_volume_field(trans,
1277 vol0_stat_freebigblocks);
1278 --root_volume->ondisk->vol0_stat_freebigblocks;
1279 hmp->copy_stat_freebigblocks =
1280 root_volume->ondisk->vol0_stat_freebigblocks;
1281 hammer_modify_volume_done(trans->rootvol);
1283 if (layer2->zone != zone)
1284 kprintf("layer2 zone mismatch %d %d\n", layer2->zone, zone);
1285 KKASSERT(layer2->zone == zone);
1286 KKASSERT(bytes != 0);
1287 layer2->bytes_free -= bytes;
1290 resv->flags &= ~HAMMER_RESF_LAYER2FREE;
1294 * Finalizations can occur out of order, or combined with allocations.
1295 * append_off must be set to the highest allocated offset.
1297 offset = ((int)zone_offset & HAMMER_BIGBLOCK_MASK) + bytes;
1298 if (layer2->append_off < offset)
1299 layer2->append_off = offset;
1301 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
1302 hammer_modify_buffer_done(buffer2);
1303 hammer_unlock(&hmp->blkmap_lock);
1307 hammer_rel_buffer(buffer1, 0);
1309 hammer_rel_buffer(buffer2, 0);
1314 * Return the approximate number of free bytes in the big-block
1315 * containing the specified blockmap offset.
1317 * WARNING: A negative number can be returned if data de-dup exists,
1318 * and the result will also not represent he actual number
1319 * of free bytes in this case.
1321 * This code is used only by the reblocker.
1324 hammer_blockmap_getfree(hammer_mount_t hmp, hammer_off_t zone_offset,
1325 int *curp, int *errorp)
1327 hammer_volume_t root_volume;
1328 hammer_blockmap_t blockmap;
1329 hammer_blockmap_t freemap;
1330 struct hammer_blockmap_layer1 *layer1;
1331 struct hammer_blockmap_layer2 *layer2;
1332 hammer_buffer_t buffer = NULL;
1333 hammer_off_t layer1_offset;
1334 hammer_off_t layer2_offset;
1338 zone = HAMMER_ZONE_DECODE(zone_offset);
1339 KKASSERT(zone >= HAMMER_ZONE2_MAPPED_INDEX && zone < HAMMER_MAX_ZONES);
1340 root_volume = hammer_get_root_volume(hmp, errorp);
1345 blockmap = &hmp->blockmap[zone];
1346 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
1351 layer1_offset = freemap->phys_offset +
1352 HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset);
1353 layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer);
1359 KKASSERT(layer1->phys_offset);
1360 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
1361 hammer_lock_ex(&hmp->blkmap_lock);
1362 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
1363 panic("CRC FAILED: LAYER1");
1364 hammer_unlock(&hmp->blkmap_lock);
1368 * Dive layer 2, each entry represents a big-block.
1370 * (reuse buffer, layer1 pointer becomes invalid)
1372 layer2_offset = layer1->phys_offset +
1373 HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset);
1374 layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer);
1380 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
1381 hammer_lock_ex(&hmp->blkmap_lock);
1382 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
1383 panic("CRC FAILED: LAYER2");
1384 hammer_unlock(&hmp->blkmap_lock);
1386 KKASSERT(layer2->zone == zone);
1388 bytes = layer2->bytes_free;
1390 if ((blockmap->next_offset ^ zone_offset) & ~HAMMER_BIGBLOCK_MASK64)
1396 hammer_rel_buffer(buffer, 0);
1397 hammer_rel_volume(root_volume, 0);
1398 if (hammer_debug_general & 0x0800) {
1399 kprintf("hammer_blockmap_getfree: %016llx -> %d\n",
1400 (long long)zone_offset, bytes);
1407 * Lookup a blockmap offset and verify blockmap layers.
1410 hammer_blockmap_lookup_verify(hammer_mount_t hmp, hammer_off_t zone_offset,
1413 hammer_volume_t root_volume;
1414 hammer_blockmap_t freemap;
1415 struct hammer_blockmap_layer1 *layer1;
1416 struct hammer_blockmap_layer2 *layer2;
1417 hammer_buffer_t buffer = NULL;
1418 hammer_off_t layer1_offset;
1419 hammer_off_t layer2_offset;
1420 hammer_off_t result_offset;
1421 hammer_off_t base_off;
1422 hammer_reserve_t resv __debugvar;
1426 * Calculate the zone-2 offset.
1428 zone = HAMMER_ZONE_DECODE(zone_offset);
1429 result_offset = hammer_xlate_to_zone2(zone_offset);
1432 * Validate the allocation zone
1434 root_volume = hammer_get_root_volume(hmp, errorp);
1437 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
1438 KKASSERT(freemap->phys_offset != 0);
1443 layer1_offset = freemap->phys_offset +
1444 HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset);
1445 layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer);
1448 KKASSERT(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
1449 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
1450 hammer_lock_ex(&hmp->blkmap_lock);
1451 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
1452 panic("CRC FAILED: LAYER1");
1453 hammer_unlock(&hmp->blkmap_lock);
1457 * Dive layer 2, each entry represents a big-block.
1459 layer2_offset = layer1->phys_offset +
1460 HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset);
1461 layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer);
1465 if (layer2->zone == 0) {
1466 base_off = hammer_xlate_to_zone2(zone_offset &
1467 ~HAMMER_BIGBLOCK_MASK64);
1468 resv = RB_LOOKUP(hammer_res_rb_tree, &hmp->rb_resv_root,
1470 KKASSERT(resv && resv->zone == zone);
1472 } else if (layer2->zone != zone) {
1473 panic("hammer_blockmap_lookup_verify: bad zone %d/%d",
1474 layer2->zone, zone);
1476 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
1477 hammer_lock_ex(&hmp->blkmap_lock);
1478 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
1479 panic("CRC FAILED: LAYER2");
1480 hammer_unlock(&hmp->blkmap_lock);
1485 hammer_rel_buffer(buffer, 0);
1486 hammer_rel_volume(root_volume, 0);
1487 if (hammer_debug_general & 0x0800) {
1488 kprintf("hammer_blockmap_lookup_verify: %016llx -> %016llx\n",
1489 (long long)zone_offset, (long long)result_offset);
1491 return(result_offset);
1496 * Check space availability
1498 * MPSAFE - does not require fs_token
1501 _hammer_checkspace(hammer_mount_t hmp, int slop, int64_t *resp)
1503 const int in_size = sizeof(struct hammer_inode_data) +
1504 sizeof(union hammer_btree_elm);
1505 const int rec_size = (sizeof(union hammer_btree_elm) * 2);
1508 usedbytes = hmp->rsv_inodes * in_size +
1509 hmp->rsv_recs * rec_size +
1510 hmp->rsv_databytes +
1511 ((int64_t)hmp->rsv_fromdelay << HAMMER_BIGBLOCK_BITS) +
1512 ((int64_t)hammer_limit_dirtybufspace) +
1513 (slop << HAMMER_BIGBLOCK_BITS);
1515 hammer_count_extra_space_used = usedbytes; /* debugging */
1519 if (hmp->copy_stat_freebigblocks >=
1520 (usedbytes >> HAMMER_BIGBLOCK_BITS)) {
1527 hammer_check_volume(hammer_mount_t hmp, hammer_off_t *offsetp)
1529 hammer_blockmap_t freemap;
1530 struct hammer_blockmap_layer1 *layer1;
1531 hammer_buffer_t buffer1 = NULL;
1532 hammer_off_t layer1_offset, offset;
1533 int zone, vol_no, error = 0;
1536 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
1538 layer1_offset = freemap->phys_offset +
1539 HAMMER_BLOCKMAP_LAYER1_OFFSET(offset);
1541 layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1);
1546 * No more available space in layer1s of this volume.
1548 if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
1549 zone = HAMMER_ZONE_DECODE(offset);
1550 vol_no = HAMMER_VOL_DECODE(offset) + 1;
1551 KKASSERT(vol_no <= HAMMER_MAX_VOLUMES);
1552 if (vol_no == HAMMER_MAX_VOLUMES) {
1556 offset &= HAMMER_BLOCKMAP_LAYER2_MASK;
1557 *offsetp = HAMMER_ENCODE(zone, vol_no, offset);
1561 hammer_rel_buffer(buffer1, 0);