2 * Copyright (c) 2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 #include <vm/vm_page2.h>
42 static int hammer_res_rb_compare(hammer_reserve_t res1, hammer_reserve_t res2);
43 static void hammer_reserve_setdelay_offset(hammer_mount_t hmp,
44 hammer_off_t base_offset, int zone,
45 struct hammer_blockmap_layer2 *layer2);
46 static void hammer_reserve_setdelay(hammer_mount_t hmp, hammer_reserve_t resv);
47 static int update_bytes_free(hammer_reserve_t resv, int bytes);
48 static int hammer_check_volume(hammer_mount_t, hammer_off_t*);
49 static void hammer_skip_volume(hammer_off_t *offsetp);
52 * Reserved big-blocks red-black tree support
54 RB_GENERATE2(hammer_res_rb_tree, hammer_reserve, rb_node,
55 hammer_res_rb_compare, hammer_off_t, zone_offset);
58 hammer_res_rb_compare(hammer_reserve_t res1, hammer_reserve_t res2)
60 if (res1->zone_offset < res2->zone_offset)
62 if (res1->zone_offset > res2->zone_offset)
68 * Allocate bytes from a zone
71 hammer_blockmap_alloc(hammer_transaction_t trans, int zone, int bytes,
72 hammer_off_t hint, int *errorp)
75 hammer_volume_t root_volume;
76 hammer_blockmap_t blockmap;
77 hammer_blockmap_t freemap;
78 hammer_reserve_t resv;
79 struct hammer_blockmap_layer1 *layer1;
80 struct hammer_blockmap_layer2 *layer2;
81 hammer_buffer_t buffer1 = NULL;
82 hammer_buffer_t buffer2 = NULL;
83 hammer_buffer_t buffer3 = NULL;
84 hammer_off_t tmp_offset;
85 hammer_off_t next_offset;
86 hammer_off_t result_offset;
87 hammer_off_t layer1_offset;
88 hammer_off_t layer2_offset;
89 hammer_off_t base_off;
91 int offset; /* offset within big-block */
97 * Deal with alignment and buffer-boundary issues.
99 * Be careful, certain primary alignments are used below to allocate
100 * new blockmap blocks.
102 bytes = (bytes + 15) & ~15;
103 KKASSERT(bytes > 0 && bytes <= HAMMER_XBUFSIZE);
104 KKASSERT(hammer_is_zone2_mapped_index(zone));
109 root_volume = trans->rootvol;
111 blockmap = &hmp->blockmap[zone];
112 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
113 KKASSERT(HAMMER_ZONE_DECODE(blockmap->next_offset) == zone);
116 * Use the hint if we have one.
118 if (hint && HAMMER_ZONE_DECODE(hint) == zone) {
119 next_offset = (hint + 15) & ~(hammer_off_t)15;
122 next_offset = blockmap->next_offset;
128 * use_hint is turned off if we leave the hinted big-block.
130 if (use_hint && ((next_offset ^ hint) & ~HAMMER_HINTBLOCK_MASK64)) {
131 next_offset = blockmap->next_offset;
138 if (next_offset == HAMMER_ZONE_ENCODE(zone + 1, 0)) {
144 next_offset = HAMMER_ZONE_ENCODE(zone, 0);
148 * The allocation request may not cross a buffer boundary. Special
149 * large allocations must not cross a big-block boundary.
151 tmp_offset = next_offset + bytes - 1;
152 if (bytes <= HAMMER_BUFSIZE) {
153 if ((next_offset ^ tmp_offset) & ~HAMMER_BUFMASK64) {
154 next_offset = tmp_offset & ~HAMMER_BUFMASK64;
158 if ((next_offset ^ tmp_offset) & ~HAMMER_BIGBLOCK_MASK64) {
159 next_offset = tmp_offset & ~HAMMER_BIGBLOCK_MASK64;
163 offset = (int)next_offset & HAMMER_BIGBLOCK_MASK;
168 layer1_offset = freemap->phys_offset +
169 HAMMER_BLOCKMAP_LAYER1_OFFSET(next_offset);
171 layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer1);
180 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
181 hammer_lock_ex(&hmp->blkmap_lock);
182 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
183 hpanic("CRC FAILED: LAYER1");
184 hammer_unlock(&hmp->blkmap_lock);
188 * If we are at a big-block boundary and layer1 indicates no
189 * free big-blocks, then we cannot allocate a new big-block in
190 * layer2, skip to the next layer1 entry.
192 if (offset == 0 && layer1->blocks_free == 0) {
193 next_offset = (next_offset + HAMMER_BLOCKMAP_LAYER2) &
194 ~HAMMER_BLOCKMAP_LAYER2_MASK;
195 if (hammer_check_volume(hmp, &next_offset)) {
201 KKASSERT(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
204 * Skip the whole volume if it is pointing to a layer2 big-block
205 * on a volume that we are currently trying to remove from the
206 * file-system. This is used by the volume-del code together with
207 * the reblocker to free up a volume.
209 if (HAMMER_VOL_DECODE(layer1->phys_offset) == hmp->volume_to_remove) {
210 hammer_skip_volume(&next_offset);
215 * Dive layer 2, each entry represents a big-block.
217 layer2_offset = layer1->phys_offset +
218 HAMMER_BLOCKMAP_LAYER2_OFFSET(next_offset);
219 layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer2);
226 * Check CRC. This can race another thread holding the lock
227 * and in the middle of modifying layer2.
229 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
230 hammer_lock_ex(&hmp->blkmap_lock);
231 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
232 hpanic("CRC FAILED: LAYER2");
233 hammer_unlock(&hmp->blkmap_lock);
237 * Skip the layer if the zone is owned by someone other then us.
239 if (layer2->zone && layer2->zone != zone) {
240 next_offset += (HAMMER_BIGBLOCK_SIZE - offset);
243 if (offset < layer2->append_off) {
244 next_offset += layer2->append_off - offset;
250 * If operating in the current non-hint blockmap block, do not
251 * allow it to get over-full. Also drop any active hinting so
252 * blockmap->next_offset is updated at the end.
254 * We do this for B-Tree and meta-data allocations to provide
255 * localization for updates.
257 if ((zone == HAMMER_ZONE_BTREE_INDEX ||
258 zone == HAMMER_ZONE_META_INDEX) &&
259 offset >= HAMMER_BIGBLOCK_OVERFILL &&
260 !((next_offset ^ blockmap->next_offset) & ~HAMMER_BIGBLOCK_MASK64)) {
261 if (offset >= HAMMER_BIGBLOCK_OVERFILL) {
262 next_offset += (HAMMER_BIGBLOCK_SIZE - offset);
270 * We need the lock from this point on. We have to re-check zone
271 * ownership after acquiring the lock and also check for reservations.
273 hammer_lock_ex(&hmp->blkmap_lock);
275 if (layer2->zone && layer2->zone != zone) {
276 hammer_unlock(&hmp->blkmap_lock);
277 next_offset += (HAMMER_BIGBLOCK_SIZE - offset);
280 if (offset < layer2->append_off) {
281 hammer_unlock(&hmp->blkmap_lock);
282 next_offset += layer2->append_off - offset;
287 * The big-block might be reserved by another zone. If it is reserved
288 * by our zone we may have to move next_offset past the append_off.
290 base_off = hammer_xlate_to_zone2(next_offset &
291 ~HAMMER_BIGBLOCK_MASK64);
292 resv = RB_LOOKUP(hammer_res_rb_tree, &hmp->rb_resv_root, base_off);
294 if (resv->zone != zone) {
295 hammer_unlock(&hmp->blkmap_lock);
296 next_offset = (next_offset + HAMMER_BIGBLOCK_SIZE) &
297 ~HAMMER_BIGBLOCK_MASK64;
300 if (offset < resv->append_off) {
301 hammer_unlock(&hmp->blkmap_lock);
302 next_offset += resv->append_off - offset;
309 * Ok, we can allocate out of this layer2 big-block. Assume ownership
310 * of the layer for real. At this point we've validated any
311 * reservation that might exist and can just ignore resv.
313 if (layer2->zone == 0) {
315 * Assign the big-block to our zone
317 hammer_modify_buffer(trans, buffer1, layer1, sizeof(*layer1));
318 --layer1->blocks_free;
319 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
320 hammer_modify_buffer_done(buffer1);
321 hammer_modify_buffer(trans, buffer2, layer2, sizeof(*layer2));
323 KKASSERT(layer2->bytes_free == HAMMER_BIGBLOCK_SIZE);
324 KKASSERT(layer2->append_off == 0);
325 hammer_modify_volume_field(trans, trans->rootvol,
326 vol0_stat_freebigblocks);
327 --root_volume->ondisk->vol0_stat_freebigblocks;
328 hmp->copy_stat_freebigblocks =
329 root_volume->ondisk->vol0_stat_freebigblocks;
330 hammer_modify_volume_done(trans->rootvol);
332 hammer_modify_buffer(trans, buffer2, layer2, sizeof(*layer2));
334 KKASSERT(layer2->zone == zone);
337 * NOTE: bytes_free can legally go negative due to de-dup.
339 layer2->bytes_free -= bytes;
340 KKASSERT(layer2->append_off <= offset);
341 layer2->append_off = offset + bytes;
342 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
343 hammer_modify_buffer_done(buffer2);
346 * We hold the blockmap lock and should be the only ones
347 * capable of modifying resv->append_off. Track the allocation
350 KKASSERT(bytes != 0);
352 KKASSERT(resv->append_off <= offset);
353 resv->append_off = offset + bytes;
354 resv->flags &= ~HAMMER_RESF_LAYER2FREE;
355 hammer_blockmap_reserve_complete(hmp, resv);
359 * If we are allocating from the base of a new buffer we can avoid
360 * a disk read by calling hammer_bnew_ext().
362 if ((next_offset & HAMMER_BUFMASK) == 0) {
363 hammer_bnew_ext(trans->hmp, next_offset, bytes,
370 result_offset = next_offset;
373 * If we weren't supplied with a hint or could not use the hint
374 * then we wound up using blockmap->next_offset as the hint and
378 hammer_modify_volume_noundo(NULL, root_volume);
379 blockmap->next_offset = next_offset + bytes;
380 hammer_modify_volume_done(root_volume);
382 hammer_unlock(&hmp->blkmap_lock);
389 hammer_rel_buffer(buffer1, 0);
391 hammer_rel_buffer(buffer2, 0);
393 hammer_rel_buffer(buffer3, 0);
395 return(result_offset);
399 * Frontend function - Reserve bytes in a zone.
401 * This code reserves bytes out of a blockmap without committing to any
402 * meta-data modifications, allowing the front-end to directly issue disk
403 * write I/O for big-blocks of data
405 * The backend later finalizes the reservation with hammer_blockmap_finalize()
406 * upon committing the related record.
409 hammer_blockmap_reserve(hammer_mount_t hmp, int zone, int bytes,
410 hammer_off_t *zone_offp, int *errorp)
412 hammer_volume_t root_volume;
413 hammer_blockmap_t blockmap;
414 hammer_blockmap_t freemap;
415 struct hammer_blockmap_layer1 *layer1;
416 struct hammer_blockmap_layer2 *layer2;
417 hammer_buffer_t buffer1 = NULL;
418 hammer_buffer_t buffer2 = NULL;
419 hammer_buffer_t buffer3 = NULL;
420 hammer_off_t tmp_offset;
421 hammer_off_t next_offset;
422 hammer_off_t layer1_offset;
423 hammer_off_t layer2_offset;
424 hammer_off_t base_off;
425 hammer_reserve_t resv;
426 hammer_reserve_t resx;
433 KKASSERT(hammer_is_zone2_mapped_index(zone));
434 root_volume = hammer_get_root_volume(hmp, errorp);
437 blockmap = &hmp->blockmap[zone];
438 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
439 KKASSERT(HAMMER_ZONE_DECODE(blockmap->next_offset) == zone);
442 * Deal with alignment and buffer-boundary issues.
444 * Be careful, certain primary alignments are used below to allocate
445 * new blockmap blocks.
447 bytes = (bytes + 15) & ~15;
448 KKASSERT(bytes > 0 && bytes <= HAMMER_XBUFSIZE);
450 next_offset = blockmap->next_offset;
456 if (next_offset == HAMMER_ZONE_ENCODE(zone + 1, 0)) {
461 next_offset = HAMMER_ZONE_ENCODE(zone, 0);
465 * The allocation request may not cross a buffer boundary. Special
466 * large allocations must not cross a big-block boundary.
468 tmp_offset = next_offset + bytes - 1;
469 if (bytes <= HAMMER_BUFSIZE) {
470 if ((next_offset ^ tmp_offset) & ~HAMMER_BUFMASK64) {
471 next_offset = tmp_offset & ~HAMMER_BUFMASK64;
475 if ((next_offset ^ tmp_offset) & ~HAMMER_BIGBLOCK_MASK64) {
476 next_offset = tmp_offset & ~HAMMER_BIGBLOCK_MASK64;
480 offset = (int)next_offset & HAMMER_BIGBLOCK_MASK;
485 layer1_offset = freemap->phys_offset +
486 HAMMER_BLOCKMAP_LAYER1_OFFSET(next_offset);
487 layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer1);
494 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
495 hammer_lock_ex(&hmp->blkmap_lock);
496 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
497 hpanic("CRC FAILED: LAYER1");
498 hammer_unlock(&hmp->blkmap_lock);
502 * If we are at a big-block boundary and layer1 indicates no
503 * free big-blocks, then we cannot allocate a new big-block in
504 * layer2, skip to the next layer1 entry.
506 if ((next_offset & HAMMER_BIGBLOCK_MASK) == 0 &&
507 layer1->blocks_free == 0) {
508 next_offset = (next_offset + HAMMER_BLOCKMAP_LAYER2) &
509 ~HAMMER_BLOCKMAP_LAYER2_MASK;
510 if (hammer_check_volume(hmp, &next_offset))
514 KKASSERT(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
517 * Dive layer 2, each entry represents a big-block.
519 layer2_offset = layer1->phys_offset +
520 HAMMER_BLOCKMAP_LAYER2_OFFSET(next_offset);
521 layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer2);
526 * Check CRC if not allocating into uninitialized space (which we
527 * aren't when reserving space).
529 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
530 hammer_lock_ex(&hmp->blkmap_lock);
531 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
532 hpanic("CRC FAILED: LAYER2");
533 hammer_unlock(&hmp->blkmap_lock);
537 * Skip the layer if the zone is owned by someone other then us.
539 if (layer2->zone && layer2->zone != zone) {
540 next_offset += (HAMMER_BIGBLOCK_SIZE - offset);
543 if (offset < layer2->append_off) {
544 next_offset += layer2->append_off - offset;
549 * We need the lock from this point on. We have to re-check zone
550 * ownership after acquiring the lock and also check for reservations.
552 hammer_lock_ex(&hmp->blkmap_lock);
554 if (layer2->zone && layer2->zone != zone) {
555 hammer_unlock(&hmp->blkmap_lock);
556 next_offset += (HAMMER_BIGBLOCK_SIZE - offset);
559 if (offset < layer2->append_off) {
560 hammer_unlock(&hmp->blkmap_lock);
561 next_offset += layer2->append_off - offset;
566 * The big-block might be reserved by another zone. If it is reserved
567 * by our zone we may have to move next_offset past the append_off.
569 base_off = hammer_xlate_to_zone2(next_offset &
570 ~HAMMER_BIGBLOCK_MASK64);
571 resv = RB_LOOKUP(hammer_res_rb_tree, &hmp->rb_resv_root, base_off);
573 if (resv->zone != zone) {
574 hammer_unlock(&hmp->blkmap_lock);
575 next_offset = (next_offset + HAMMER_BIGBLOCK_SIZE) &
576 ~HAMMER_BIGBLOCK_MASK64;
579 if (offset < resv->append_off) {
580 hammer_unlock(&hmp->blkmap_lock);
581 next_offset += resv->append_off - offset;
587 resx = kmalloc(sizeof(*resv), hmp->m_misc,
588 M_WAITOK | M_ZERO | M_USE_RESERVE);
591 resx->zone_offset = base_off;
592 if (layer2->bytes_free == HAMMER_BIGBLOCK_SIZE)
593 resx->flags |= HAMMER_RESF_LAYER2FREE;
594 resv = RB_INSERT(hammer_res_rb_tree, &hmp->rb_resv_root, resx);
595 KKASSERT(resv == NULL);
597 ++hammer_count_reservations;
599 resv->append_off = offset + bytes;
602 * If we are not reserving a whole buffer but are at the start of
603 * a new block, call hammer_bnew() to avoid a disk read.
605 * If we are reserving a whole buffer (or more), the caller will
606 * probably use a direct read, so do nothing.
608 * If we do not have a whole lot of system memory we really can't
609 * afford to block while holding the blkmap_lock!
611 if (bytes < HAMMER_BUFSIZE && (next_offset & HAMMER_BUFMASK) == 0) {
612 if (!vm_page_count_min(HAMMER_BUFSIZE / PAGE_SIZE)) {
613 hammer_bnew(hmp, next_offset, errorp, &buffer3);
620 * Adjust our iterator and alloc_offset. The layer1 and layer2
621 * space beyond alloc_offset is uninitialized. alloc_offset must
622 * be big-block aligned.
624 blockmap->next_offset = next_offset + bytes;
625 hammer_unlock(&hmp->blkmap_lock);
629 hammer_rel_buffer(buffer1, 0);
631 hammer_rel_buffer(buffer2, 0);
633 hammer_rel_buffer(buffer3, 0);
634 hammer_rel_volume(root_volume, 0);
635 *zone_offp = next_offset;
641 * Frontend function - Dedup bytes in a zone.
643 * Dedup reservations work exactly the same as normal write reservations
644 * except we only adjust bytes_free field and don't touch append offset.
645 * Finalization mechanic for dedup reservations is also the same as for
646 * normal write ones - the backend finalizes the reservation with
647 * hammer_blockmap_finalize().
650 hammer_blockmap_reserve_dedup(hammer_mount_t hmp, int zone, int bytes,
651 hammer_off_t zone_offset, int *errorp)
653 hammer_volume_t root_volume;
654 hammer_blockmap_t freemap;
655 struct hammer_blockmap_layer1 *layer1;
656 struct hammer_blockmap_layer2 *layer2;
657 hammer_buffer_t buffer1 = NULL;
658 hammer_buffer_t buffer2 = NULL;
659 hammer_off_t layer1_offset;
660 hammer_off_t layer2_offset;
661 hammer_off_t base_off;
662 hammer_reserve_t resv = NULL;
663 hammer_reserve_t resx = NULL;
668 KKASSERT(hammer_is_zone2_mapped_index(zone));
669 root_volume = hammer_get_root_volume(hmp, errorp);
672 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
673 KKASSERT(freemap->phys_offset != 0);
675 bytes = (bytes + 15) & ~15;
676 KKASSERT(bytes > 0 && bytes <= HAMMER_XBUFSIZE);
681 layer1_offset = freemap->phys_offset +
682 HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset);
683 layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer1);
690 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
691 hammer_lock_ex(&hmp->blkmap_lock);
692 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
693 hpanic("CRC FAILED: LAYER1");
694 hammer_unlock(&hmp->blkmap_lock);
696 KKASSERT(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
699 * Dive layer 2, each entry represents a big-block.
701 layer2_offset = layer1->phys_offset +
702 HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset);
703 layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer2);
710 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
711 hammer_lock_ex(&hmp->blkmap_lock);
712 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
713 hpanic("CRC FAILED: LAYER2");
714 hammer_unlock(&hmp->blkmap_lock);
718 * Fail if the zone is owned by someone other than us.
720 if (layer2->zone && layer2->zone != zone)
724 * We need the lock from this point on. We have to re-check zone
725 * ownership after acquiring the lock and also check for reservations.
727 hammer_lock_ex(&hmp->blkmap_lock);
729 if (layer2->zone && layer2->zone != zone) {
730 hammer_unlock(&hmp->blkmap_lock);
734 base_off = hammer_xlate_to_zone2(zone_offset &
735 ~HAMMER_BIGBLOCK_MASK64);
736 resv = RB_LOOKUP(hammer_res_rb_tree, &hmp->rb_resv_root, base_off);
738 if (resv->zone != zone) {
739 hammer_unlock(&hmp->blkmap_lock);
744 * Due to possible big-block underflow we can't simply
745 * subtract bytes from bytes_free.
747 if (update_bytes_free(resv, bytes) == 0) {
748 hammer_unlock(&hmp->blkmap_lock);
755 resx = kmalloc(sizeof(*resv), hmp->m_misc,
756 M_WAITOK | M_ZERO | M_USE_RESERVE);
759 resx->bytes_free = layer2->bytes_free;
761 * Due to possible big-block underflow we can't simply
762 * subtract bytes from bytes_free.
764 if (update_bytes_free(resx, bytes) == 0) {
765 hammer_unlock(&hmp->blkmap_lock);
766 kfree(resx, hmp->m_misc);
769 resx->zone_offset = base_off;
770 resv = RB_INSERT(hammer_res_rb_tree, &hmp->rb_resv_root, resx);
771 KKASSERT(resv == NULL);
773 ++hammer_count_reservations;
776 hammer_unlock(&hmp->blkmap_lock);
780 hammer_rel_buffer(buffer1, 0);
782 hammer_rel_buffer(buffer2, 0);
783 hammer_rel_volume(root_volume, 0);
789 update_bytes_free(hammer_reserve_t resv, int bytes)
794 * Big-block underflow check
796 temp = resv->bytes_free - HAMMER_BIGBLOCK_SIZE * 2;
797 cpu_ccfence(); /* XXX do we really need it ? */
798 if (temp > resv->bytes_free) {
799 hdkprintf("BIGBLOCK UNDERFLOW\n");
803 resv->bytes_free -= bytes;
808 * Dereference a reservation structure. Upon the final release the
809 * underlying big-block is checked and if it is entirely free we delete
810 * any related HAMMER buffers to avoid potential conflicts with future
811 * reuse of the big-block.
814 hammer_blockmap_reserve_complete(hammer_mount_t hmp, hammer_reserve_t resv)
816 hammer_off_t base_offset;
819 KKASSERT(resv->refs > 0);
820 KKASSERT((resv->zone_offset & HAMMER_OFF_ZONE_MASK) ==
821 HAMMER_ZONE_RAW_BUFFER);
824 * Setting append_off to the max prevents any new allocations
825 * from occuring while we are trying to dispose of the reservation,
826 * allowing us to safely delete any related HAMMER buffers.
828 * If we are unable to clean out all related HAMMER buffers we
831 if (resv->refs == 1 && (resv->flags & HAMMER_RESF_LAYER2FREE)) {
832 resv->append_off = HAMMER_BIGBLOCK_SIZE;
833 base_offset = hammer_xlate_to_zoneX(resv->zone, resv->zone_offset);
834 if (!TAILQ_EMPTY(&hmp->dedup_lru_list))
835 hammer_dedup_cache_inval(hmp, base_offset);
836 error = hammer_del_buffers(hmp, base_offset,
838 HAMMER_BIGBLOCK_SIZE,
840 if (hammer_debug_general & 0x20000) {
841 hkprintf("delbgblk %016jx error %d\n",
842 (intmax_t)base_offset, error);
845 hammer_reserve_setdelay(hmp, resv);
847 if (--resv->refs == 0) {
848 if (hammer_debug_general & 0x20000) {
849 hkprintf("delresvr %016jx zone %02x\n",
850 (intmax_t)resv->zone_offset, resv->zone);
852 KKASSERT((resv->flags & HAMMER_RESF_ONDELAY) == 0);
853 RB_REMOVE(hammer_res_rb_tree, &hmp->rb_resv_root, resv);
854 kfree(resv, hmp->m_misc);
855 --hammer_count_reservations;
860 * Prevent a potentially free big-block from being reused until after
861 * the related flushes have completely cycled, otherwise crash recovery
862 * could resurrect a data block that was already reused and overwritten.
864 * The caller might reset the underlying layer2 entry's append_off to 0, so
865 * our covering append_off must be set to max to prevent any reallocation
866 * until after the flush delays complete, not to mention proper invalidation
867 * of any underlying cached blocks.
870 hammer_reserve_setdelay_offset(hammer_mount_t hmp, hammer_off_t base_offset,
871 int zone, struct hammer_blockmap_layer2 *layer2)
873 hammer_reserve_t resv;
876 * Allocate the reservation if necessary.
878 * NOTE: need lock in future around resv lookup/allocation and
879 * the setdelay call, currently refs is not bumped until the call.
882 resv = RB_LOOKUP(hammer_res_rb_tree, &hmp->rb_resv_root, base_offset);
884 resv = kmalloc(sizeof(*resv), hmp->m_misc,
885 M_WAITOK | M_ZERO | M_USE_RESERVE);
887 resv->zone_offset = base_offset;
889 resv->append_off = HAMMER_BIGBLOCK_SIZE;
891 if (layer2->bytes_free == HAMMER_BIGBLOCK_SIZE)
892 resv->flags |= HAMMER_RESF_LAYER2FREE;
893 if (RB_INSERT(hammer_res_rb_tree, &hmp->rb_resv_root, resv)) {
894 kfree(resv, hmp->m_misc);
897 ++hammer_count_reservations;
899 if (layer2->bytes_free == HAMMER_BIGBLOCK_SIZE)
900 resv->flags |= HAMMER_RESF_LAYER2FREE;
902 hammer_reserve_setdelay(hmp, resv);
906 * Enter the reservation on the on-delay list, or move it if it
907 * is already on the list.
910 hammer_reserve_setdelay(hammer_mount_t hmp, hammer_reserve_t resv)
912 if (resv->flags & HAMMER_RESF_ONDELAY) {
913 TAILQ_REMOVE(&hmp->delay_list, resv, delay_entry);
914 resv->flg_no = hmp->flusher.next + 1;
915 TAILQ_INSERT_TAIL(&hmp->delay_list, resv, delay_entry);
918 ++hmp->rsv_fromdelay;
919 resv->flags |= HAMMER_RESF_ONDELAY;
920 resv->flg_no = hmp->flusher.next + 1;
921 TAILQ_INSERT_TAIL(&hmp->delay_list, resv, delay_entry);
926 * Reserve has reached its flush point, remove it from the delay list
927 * and finish it off. hammer_blockmap_reserve_complete() inherits
928 * the ondelay reference.
931 hammer_reserve_clrdelay(hammer_mount_t hmp, hammer_reserve_t resv)
933 KKASSERT(resv->flags & HAMMER_RESF_ONDELAY);
934 resv->flags &= ~HAMMER_RESF_ONDELAY;
935 TAILQ_REMOVE(&hmp->delay_list, resv, delay_entry);
936 --hmp->rsv_fromdelay;
937 hammer_blockmap_reserve_complete(hmp, resv);
941 * Backend function - free (offset, bytes) in a zone.
946 hammer_blockmap_free(hammer_transaction_t trans,
947 hammer_off_t zone_offset, int bytes)
950 hammer_volume_t root_volume;
951 hammer_blockmap_t freemap;
952 struct hammer_blockmap_layer1 *layer1;
953 struct hammer_blockmap_layer2 *layer2;
954 hammer_buffer_t buffer1 = NULL;
955 hammer_buffer_t buffer2 = NULL;
956 hammer_off_t layer1_offset;
957 hammer_off_t layer2_offset;
958 hammer_off_t base_off;
969 bytes = (bytes + 15) & ~15;
970 KKASSERT(bytes <= HAMMER_XBUFSIZE);
971 KKASSERT(((zone_offset ^ (zone_offset + (bytes - 1))) &
972 ~HAMMER_BIGBLOCK_MASK64) == 0);
975 * Basic zone validation & locking
977 zone = HAMMER_ZONE_DECODE(zone_offset);
978 KKASSERT(hammer_is_zone2_mapped_index(zone));
979 root_volume = trans->rootvol;
982 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
987 layer1_offset = freemap->phys_offset +
988 HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset);
989 layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1);
992 KKASSERT(layer1->phys_offset &&
993 layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
994 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
995 hammer_lock_ex(&hmp->blkmap_lock);
996 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
997 hpanic("CRC FAILED: LAYER1");
998 hammer_unlock(&hmp->blkmap_lock);
1002 * Dive layer 2, each entry represents a big-block.
1004 layer2_offset = layer1->phys_offset +
1005 HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset);
1006 layer2 = hammer_bread(hmp, layer2_offset, &error, &buffer2);
1009 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
1010 hammer_lock_ex(&hmp->blkmap_lock);
1011 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
1012 hpanic("CRC FAILED: LAYER2");
1013 hammer_unlock(&hmp->blkmap_lock);
1016 hammer_lock_ex(&hmp->blkmap_lock);
1018 hammer_modify_buffer(trans, buffer2, layer2, sizeof(*layer2));
1021 * Free space previously allocated via blockmap_alloc().
1023 * NOTE: bytes_free can be and remain negative due to de-dup ops
1024 * but can never become larger than HAMMER_BIGBLOCK_SIZE.
1026 KKASSERT(layer2->zone == zone);
1027 layer2->bytes_free += bytes;
1028 KKASSERT(layer2->bytes_free <= HAMMER_BIGBLOCK_SIZE);
1031 * If a big-block becomes entirely free we must create a covering
1032 * reservation to prevent premature reuse. Note, however, that
1033 * the big-block and/or reservation may still have an append_off
1034 * that allows further (non-reused) allocations.
1036 * Once the reservation has been made we re-check layer2 and if
1037 * the big-block is still entirely free we reset the layer2 entry.
1038 * The reservation will prevent premature reuse.
1040 * NOTE: hammer_buffer's are only invalidated when the reservation
1041 * is completed, if the layer2 entry is still completely free at
1042 * that time. Any allocations from the reservation that may have
1043 * occured in the mean time, or active references on the reservation
1044 * from new pending allocations, will prevent the invalidation from
1047 if (layer2->bytes_free == HAMMER_BIGBLOCK_SIZE) {
1048 base_off = hammer_xlate_to_zone2(zone_offset &
1049 ~HAMMER_BIGBLOCK_MASK64);
1051 hammer_reserve_setdelay_offset(hmp, base_off, zone, layer2);
1052 if (layer2->bytes_free == HAMMER_BIGBLOCK_SIZE) {
1054 layer2->append_off = 0;
1055 hammer_modify_buffer(trans, buffer1,
1056 layer1, sizeof(*layer1));
1057 ++layer1->blocks_free;
1058 layer1->layer1_crc = crc32(layer1,
1059 HAMMER_LAYER1_CRCSIZE);
1060 hammer_modify_buffer_done(buffer1);
1061 hammer_modify_volume_field(trans,
1063 vol0_stat_freebigblocks);
1064 ++root_volume->ondisk->vol0_stat_freebigblocks;
1065 hmp->copy_stat_freebigblocks =
1066 root_volume->ondisk->vol0_stat_freebigblocks;
1067 hammer_modify_volume_done(trans->rootvol);
1070 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
1071 hammer_modify_buffer_done(buffer2);
1072 hammer_unlock(&hmp->blkmap_lock);
1076 hammer_rel_buffer(buffer1, 0);
1078 hammer_rel_buffer(buffer2, 0);
1082 hammer_blockmap_dedup(hammer_transaction_t trans,
1083 hammer_off_t zone_offset, int bytes)
1086 hammer_blockmap_t freemap;
1087 struct hammer_blockmap_layer1 *layer1;
1088 struct hammer_blockmap_layer2 *layer2;
1089 hammer_buffer_t buffer1 = NULL;
1090 hammer_buffer_t buffer2 = NULL;
1091 hammer_off_t layer1_offset;
1092 hammer_off_t layer2_offset;
1095 int zone __debugvar;
1104 bytes = (bytes + 15) & ~15;
1105 KKASSERT(bytes <= HAMMER_BIGBLOCK_SIZE);
1106 KKASSERT(((zone_offset ^ (zone_offset + (bytes - 1))) &
1107 ~HAMMER_BIGBLOCK_MASK64) == 0);
1110 * Basic zone validation & locking
1112 zone = HAMMER_ZONE_DECODE(zone_offset);
1113 KKASSERT(hammer_is_zone2_mapped_index(zone));
1116 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
1121 layer1_offset = freemap->phys_offset +
1122 HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset);
1123 layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1);
1126 KKASSERT(layer1->phys_offset &&
1127 layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
1128 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
1129 hammer_lock_ex(&hmp->blkmap_lock);
1130 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
1131 hpanic("CRC FAILED: LAYER1");
1132 hammer_unlock(&hmp->blkmap_lock);
1136 * Dive layer 2, each entry represents a big-block.
1138 layer2_offset = layer1->phys_offset +
1139 HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset);
1140 layer2 = hammer_bread(hmp, layer2_offset, &error, &buffer2);
1143 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
1144 hammer_lock_ex(&hmp->blkmap_lock);
1145 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
1146 hpanic("CRC FAILED: LAYER2");
1147 hammer_unlock(&hmp->blkmap_lock);
1150 hammer_lock_ex(&hmp->blkmap_lock);
1152 hammer_modify_buffer(trans, buffer2, layer2, sizeof(*layer2));
1155 * Free space previously allocated via blockmap_alloc().
1157 * NOTE: bytes_free can be and remain negative due to de-dup ops
1158 * but can never become larger than HAMMER_BIGBLOCK_SIZE.
1160 KKASSERT(layer2->zone == zone);
1161 temp = layer2->bytes_free - HAMMER_BIGBLOCK_SIZE * 2;
1162 cpu_ccfence(); /* prevent gcc from optimizing temp out */
1163 if (temp > layer2->bytes_free) {
1167 layer2->bytes_free -= bytes;
1169 KKASSERT(layer2->bytes_free <= HAMMER_BIGBLOCK_SIZE);
1171 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
1173 hammer_modify_buffer_done(buffer2);
1174 hammer_unlock(&hmp->blkmap_lock);
1178 hammer_rel_buffer(buffer1, 0);
1180 hammer_rel_buffer(buffer2, 0);
1185 * Backend function - finalize (offset, bytes) in a zone.
1187 * Allocate space that was previously reserved by the frontend.
1190 hammer_blockmap_finalize(hammer_transaction_t trans,
1191 hammer_reserve_t resv,
1192 hammer_off_t zone_offset, int bytes)
1195 hammer_volume_t root_volume;
1196 hammer_blockmap_t freemap;
1197 struct hammer_blockmap_layer1 *layer1;
1198 struct hammer_blockmap_layer2 *layer2;
1199 hammer_buffer_t buffer1 = NULL;
1200 hammer_buffer_t buffer2 = NULL;
1201 hammer_off_t layer1_offset;
1202 hammer_off_t layer2_offset;
1214 bytes = (bytes + 15) & ~15;
1215 KKASSERT(bytes <= HAMMER_XBUFSIZE);
1218 * Basic zone validation & locking
1220 zone = HAMMER_ZONE_DECODE(zone_offset);
1221 KKASSERT(hammer_is_zone2_mapped_index(zone));
1222 root_volume = trans->rootvol;
1225 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
1230 layer1_offset = freemap->phys_offset +
1231 HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset);
1232 layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1);
1235 KKASSERT(layer1->phys_offset &&
1236 layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
1237 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
1238 hammer_lock_ex(&hmp->blkmap_lock);
1239 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
1240 hpanic("CRC FAILED: LAYER1");
1241 hammer_unlock(&hmp->blkmap_lock);
1245 * Dive layer 2, each entry represents a big-block.
1247 layer2_offset = layer1->phys_offset +
1248 HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset);
1249 layer2 = hammer_bread(hmp, layer2_offset, &error, &buffer2);
1252 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
1253 hammer_lock_ex(&hmp->blkmap_lock);
1254 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
1255 hpanic("CRC FAILED: LAYER2");
1256 hammer_unlock(&hmp->blkmap_lock);
1259 hammer_lock_ex(&hmp->blkmap_lock);
1261 hammer_modify_buffer(trans, buffer2, layer2, sizeof(*layer2));
1264 * Finalize some or all of the space covered by a current
1265 * reservation. An allocation in the same layer may have
1266 * already assigned ownership.
1268 if (layer2->zone == 0) {
1269 hammer_modify_buffer(trans, buffer1, layer1, sizeof(*layer1));
1270 --layer1->blocks_free;
1271 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
1272 hammer_modify_buffer_done(buffer1);
1273 layer2->zone = zone;
1274 KKASSERT(layer2->bytes_free == HAMMER_BIGBLOCK_SIZE);
1275 KKASSERT(layer2->append_off == 0);
1276 hammer_modify_volume_field(trans,
1278 vol0_stat_freebigblocks);
1279 --root_volume->ondisk->vol0_stat_freebigblocks;
1280 hmp->copy_stat_freebigblocks =
1281 root_volume->ondisk->vol0_stat_freebigblocks;
1282 hammer_modify_volume_done(trans->rootvol);
1284 if (layer2->zone != zone)
1285 hdkprintf("layer2 zone mismatch %d %d\n", layer2->zone, zone);
1286 KKASSERT(layer2->zone == zone);
1287 KKASSERT(bytes != 0);
1288 layer2->bytes_free -= bytes;
1291 resv->flags &= ~HAMMER_RESF_LAYER2FREE;
1295 * Finalizations can occur out of order, or combined with allocations.
1296 * append_off must be set to the highest allocated offset.
1298 offset = ((int)zone_offset & HAMMER_BIGBLOCK_MASK) + bytes;
1299 if (layer2->append_off < offset)
1300 layer2->append_off = offset;
1302 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
1303 hammer_modify_buffer_done(buffer2);
1304 hammer_unlock(&hmp->blkmap_lock);
1308 hammer_rel_buffer(buffer1, 0);
1310 hammer_rel_buffer(buffer2, 0);
1315 * Return the approximate number of free bytes in the big-block
1316 * containing the specified blockmap offset.
1318 * WARNING: A negative number can be returned if data de-dup exists,
1319 * and the result will also not represent he actual number
1320 * of free bytes in this case.
1322 * This code is used only by the reblocker.
1325 hammer_blockmap_getfree(hammer_mount_t hmp, hammer_off_t zone_offset,
1326 int *curp, int *errorp)
1328 hammer_volume_t root_volume;
1329 hammer_blockmap_t blockmap;
1330 hammer_blockmap_t freemap;
1331 struct hammer_blockmap_layer1 *layer1;
1332 struct hammer_blockmap_layer2 *layer2;
1333 hammer_buffer_t buffer = NULL;
1334 hammer_off_t layer1_offset;
1335 hammer_off_t layer2_offset;
1339 zone = HAMMER_ZONE_DECODE(zone_offset);
1340 KKASSERT(hammer_is_zone2_mapped_index(zone));
1341 root_volume = hammer_get_root_volume(hmp, errorp);
1346 blockmap = &hmp->blockmap[zone];
1347 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
1352 layer1_offset = freemap->phys_offset +
1353 HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset);
1354 layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer);
1360 KKASSERT(layer1->phys_offset);
1361 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
1362 hammer_lock_ex(&hmp->blkmap_lock);
1363 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
1364 hpanic("CRC FAILED: LAYER1");
1365 hammer_unlock(&hmp->blkmap_lock);
1369 * Dive layer 2, each entry represents a big-block.
1371 * (reuse buffer, layer1 pointer becomes invalid)
1373 layer2_offset = layer1->phys_offset +
1374 HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset);
1375 layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer);
1381 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
1382 hammer_lock_ex(&hmp->blkmap_lock);
1383 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
1384 hpanic("CRC FAILED: LAYER2");
1385 hammer_unlock(&hmp->blkmap_lock);
1387 KKASSERT(layer2->zone == zone);
1389 bytes = layer2->bytes_free;
1392 * *curp becomes 1 only when no error and,
1393 * next_offset and zone_offset are in the same big-block.
1395 if ((blockmap->next_offset ^ zone_offset) & ~HAMMER_BIGBLOCK_MASK64)
1396 *curp = 0; /* not same */
1401 hammer_rel_buffer(buffer, 0);
1402 hammer_rel_volume(root_volume, 0);
1403 if (hammer_debug_general & 0x4000) {
1404 hdkprintf("%016llx -> %d\n", (long long)zone_offset, bytes);
1411 * Lookup a blockmap offset and verify blockmap layers.
1414 hammer_blockmap_lookup_verify(hammer_mount_t hmp, hammer_off_t zone_offset,
1417 hammer_volume_t root_volume;
1418 hammer_blockmap_t freemap;
1419 struct hammer_blockmap_layer1 *layer1;
1420 struct hammer_blockmap_layer2 *layer2;
1421 hammer_buffer_t buffer = NULL;
1422 hammer_off_t layer1_offset;
1423 hammer_off_t layer2_offset;
1424 hammer_off_t result_offset;
1425 hammer_off_t base_off;
1426 hammer_reserve_t resv __debugvar;
1430 * Calculate the zone-2 offset.
1432 zone = HAMMER_ZONE_DECODE(zone_offset);
1433 result_offset = hammer_xlate_to_zone2(zone_offset);
1436 * Validate the allocation zone
1438 root_volume = hammer_get_root_volume(hmp, errorp);
1441 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
1442 KKASSERT(freemap->phys_offset != 0);
1447 layer1_offset = freemap->phys_offset +
1448 HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset);
1449 layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer);
1452 KKASSERT(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
1453 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
1454 hammer_lock_ex(&hmp->blkmap_lock);
1455 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
1456 hpanic("CRC FAILED: LAYER1");
1457 hammer_unlock(&hmp->blkmap_lock);
1461 * Dive layer 2, each entry represents a big-block.
1463 layer2_offset = layer1->phys_offset +
1464 HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset);
1465 layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer);
1469 if (layer2->zone == 0) {
1470 base_off = hammer_xlate_to_zone2(zone_offset &
1471 ~HAMMER_BIGBLOCK_MASK64);
1472 resv = RB_LOOKUP(hammer_res_rb_tree, &hmp->rb_resv_root,
1474 KKASSERT(resv && resv->zone == zone);
1476 } else if (layer2->zone != zone) {
1477 hpanic("bad zone %d/%d", layer2->zone, zone);
1479 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
1480 hammer_lock_ex(&hmp->blkmap_lock);
1481 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
1482 hpanic("CRC FAILED: LAYER2");
1483 hammer_unlock(&hmp->blkmap_lock);
1488 hammer_rel_buffer(buffer, 0);
1489 hammer_rel_volume(root_volume, 0);
1490 if (hammer_debug_general & 0x0800) {
1491 hdkprintf("%016llx -> %016llx\n",
1492 (long long)zone_offset, (long long)result_offset);
1494 return(result_offset);
1499 * Check space availability
1501 * MPSAFE - does not require fs_token
1504 _hammer_checkspace(hammer_mount_t hmp, int slop, int64_t *resp)
1506 const int in_size = sizeof(struct hammer_inode_data) +
1507 sizeof(union hammer_btree_elm);
1508 const int rec_size = (sizeof(union hammer_btree_elm) * 2);
1511 usedbytes = hmp->rsv_inodes * in_size +
1512 hmp->rsv_recs * rec_size +
1513 hmp->rsv_databytes +
1514 ((int64_t)hmp->rsv_fromdelay << HAMMER_BIGBLOCK_BITS) +
1515 ((int64_t)hammer_limit_dirtybufspace) +
1516 (slop << HAMMER_BIGBLOCK_BITS);
1518 hammer_count_extra_space_used = usedbytes; /* debugging */
1522 if (hmp->copy_stat_freebigblocks >=
1523 (usedbytes >> HAMMER_BIGBLOCK_BITS)) {
1530 hammer_check_volume(hammer_mount_t hmp, hammer_off_t *offsetp)
1532 hammer_blockmap_t freemap;
1533 struct hammer_blockmap_layer1 *layer1;
1534 hammer_buffer_t buffer1 = NULL;
1535 hammer_off_t layer1_offset;
1538 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
1540 layer1_offset = freemap->phys_offset +
1541 HAMMER_BLOCKMAP_LAYER1_OFFSET(*offsetp);
1542 layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1);
1547 * No more physically available space in layer1s
1548 * of the current volume, go to the next volume.
1550 if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL)
1551 hammer_skip_volume(offsetp);
1554 hammer_rel_buffer(buffer1, 0);
1559 hammer_skip_volume(hammer_off_t *offsetp)
1561 hammer_off_t offset;
1565 zone = HAMMER_ZONE_DECODE(offset);
1566 vol_no = HAMMER_VOL_DECODE(offset) + 1;
1567 KKASSERT(vol_no <= HAMMER_MAX_VOLUMES);
1569 if (vol_no == HAMMER_MAX_VOLUMES) { /* wrap */
1574 *offsetp = HAMMER_ENCODE(zone, vol_no, 0);