2 * Copyright (c) 2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_blockmap.c,v 1.27 2008/07/31 22:30:33 dillon Exp $
42 static int hammer_res_rb_compare(hammer_reserve_t res1, hammer_reserve_t res2);
43 static void hammer_reserve_setdelay_offset(hammer_mount_t hmp,
44 hammer_off_t base_offset, int zone,
45 struct hammer_blockmap_layer2 *layer2);
46 static void hammer_reserve_setdelay(hammer_mount_t hmp, hammer_reserve_t resv);
47 static int update_bytes_free(hammer_reserve_t resv, int bytes);
50 * Reserved big-blocks red-black tree support
52 RB_GENERATE2(hammer_res_rb_tree, hammer_reserve, rb_node,
53 hammer_res_rb_compare, hammer_off_t, zone_offset);
56 hammer_res_rb_compare(hammer_reserve_t res1, hammer_reserve_t res2)
58 if (res1->zone_offset < res2->zone_offset)
60 if (res1->zone_offset > res2->zone_offset)
66 * Allocate bytes from a zone
69 hammer_blockmap_alloc(hammer_transaction_t trans, int zone, int bytes,
70 hammer_off_t hint, int *errorp)
73 hammer_volume_t root_volume;
74 hammer_blockmap_t blockmap;
75 hammer_blockmap_t freemap;
76 hammer_reserve_t resv;
77 struct hammer_blockmap_layer1 *layer1;
78 struct hammer_blockmap_layer2 *layer2;
79 hammer_buffer_t buffer1 = NULL;
80 hammer_buffer_t buffer2 = NULL;
81 hammer_buffer_t buffer3 = NULL;
82 hammer_off_t tmp_offset;
83 hammer_off_t next_offset;
84 hammer_off_t result_offset;
85 hammer_off_t layer1_offset;
86 hammer_off_t layer2_offset;
87 hammer_off_t base_off;
89 int offset; /* offset within big-block */
95 * Deal with alignment and buffer-boundary issues.
97 * Be careful, certain primary alignments are used below to allocate
98 * new blockmap blocks.
100 bytes = (bytes + 15) & ~15;
101 KKASSERT(bytes > 0 && bytes <= HAMMER_XBUFSIZE);
102 KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES);
107 root_volume = trans->rootvol;
109 blockmap = &hmp->blockmap[zone];
110 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
111 KKASSERT(HAMMER_ZONE_DECODE(blockmap->next_offset) == zone);
114 * Use the hint if we have one.
116 if (hint && HAMMER_ZONE_DECODE(hint) == zone) {
117 next_offset = (hint + 15) & ~(hammer_off_t)15;
120 next_offset = blockmap->next_offset;
126 * use_hint is turned off if we leave the hinted big-block.
128 if (use_hint && ((next_offset ^ hint) & ~HAMMER_HINTBLOCK_MASK64)) {
129 next_offset = blockmap->next_offset;
136 if (next_offset == HAMMER_ZONE_ENCODE(zone + 1, 0)) {
142 next_offset = HAMMER_ZONE_ENCODE(zone, 0);
146 * The allocation request may not cross a buffer boundary. Special
147 * large allocations must not cross a large-block boundary.
149 tmp_offset = next_offset + bytes - 1;
150 if (bytes <= HAMMER_BUFSIZE) {
151 if ((next_offset ^ tmp_offset) & ~HAMMER_BUFMASK64) {
152 next_offset = tmp_offset & ~HAMMER_BUFMASK64;
156 if ((next_offset ^ tmp_offset) & ~HAMMER_LARGEBLOCK_MASK64) {
157 next_offset = tmp_offset & ~HAMMER_LARGEBLOCK_MASK64;
161 offset = (int)next_offset & HAMMER_LARGEBLOCK_MASK;
166 layer1_offset = freemap->phys_offset +
167 HAMMER_BLOCKMAP_LAYER1_OFFSET(next_offset);
169 layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer1);
178 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
179 hammer_lock_ex(&hmp->blkmap_lock);
180 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
181 panic("CRC FAILED: LAYER1");
182 hammer_unlock(&hmp->blkmap_lock);
186 * If we are at a big-block boundary and layer1 indicates no
187 * free big-blocks, then we cannot allocate a new bigblock in
188 * layer2, skip to the next layer1 entry.
190 if (offset == 0 && layer1->blocks_free == 0) {
191 next_offset = (next_offset + HAMMER_BLOCKMAP_LAYER2) &
192 ~HAMMER_BLOCKMAP_LAYER2_MASK;
195 KKASSERT(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
198 * Skip this layer1 entry if it is pointing to a layer2 big-block
199 * on a volume that we are currently trying to remove from the
200 * file-system. This is used by the volume-del code together with
201 * the reblocker to free up a volume.
203 if ((int)HAMMER_VOL_DECODE(layer1->phys_offset) ==
204 hmp->volume_to_remove) {
205 next_offset = (next_offset + HAMMER_BLOCKMAP_LAYER2) &
206 ~HAMMER_BLOCKMAP_LAYER2_MASK;
211 * Dive layer 2, each entry represents a large-block.
213 layer2_offset = layer1->phys_offset +
214 HAMMER_BLOCKMAP_LAYER2_OFFSET(next_offset);
215 layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer2);
222 * Check CRC. This can race another thread holding the lock
223 * and in the middle of modifying layer2.
225 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
226 hammer_lock_ex(&hmp->blkmap_lock);
227 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
228 panic("CRC FAILED: LAYER2");
229 hammer_unlock(&hmp->blkmap_lock);
233 * Skip the layer if the zone is owned by someone other then us.
235 if (layer2->zone && layer2->zone != zone) {
236 next_offset += (HAMMER_LARGEBLOCK_SIZE - offset);
239 if (offset < layer2->append_off) {
240 next_offset += layer2->append_off - offset;
246 * If operating in the current non-hint blockmap block, do not
247 * allow it to get over-full. Also drop any active hinting so
248 * blockmap->next_offset is updated at the end.
250 * We do this for B-Tree and meta-data allocations to provide
251 * localization for updates.
253 if ((zone == HAMMER_ZONE_BTREE_INDEX ||
254 zone == HAMMER_ZONE_META_INDEX) &&
255 offset >= HAMMER_LARGEBLOCK_OVERFILL &&
256 !((next_offset ^ blockmap->next_offset) & ~HAMMER_LARGEBLOCK_MASK64)
258 if (offset >= HAMMER_LARGEBLOCK_OVERFILL) {
259 next_offset += (HAMMER_LARGEBLOCK_SIZE - offset);
267 * We need the lock from this point on. We have to re-check zone
268 * ownership after acquiring the lock and also check for reservations.
270 hammer_lock_ex(&hmp->blkmap_lock);
272 if (layer2->zone && layer2->zone != zone) {
273 hammer_unlock(&hmp->blkmap_lock);
274 next_offset += (HAMMER_LARGEBLOCK_SIZE - offset);
277 if (offset < layer2->append_off) {
278 hammer_unlock(&hmp->blkmap_lock);
279 next_offset += layer2->append_off - offset;
284 * The bigblock might be reserved by another zone. If it is reserved
285 * by our zone we may have to move next_offset past the append_off.
287 base_off = (next_offset &
288 (~HAMMER_LARGEBLOCK_MASK64 & ~HAMMER_OFF_ZONE_MASK)) |
289 HAMMER_ZONE_RAW_BUFFER;
290 resv = RB_LOOKUP(hammer_res_rb_tree, &hmp->rb_resv_root, base_off);
292 if (resv->zone != zone) {
293 hammer_unlock(&hmp->blkmap_lock);
294 next_offset = (next_offset + HAMMER_LARGEBLOCK_SIZE) &
295 ~HAMMER_LARGEBLOCK_MASK64;
298 if (offset < resv->append_off) {
299 hammer_unlock(&hmp->blkmap_lock);
300 next_offset += resv->append_off - offset;
307 * Ok, we can allocate out of this layer2 big-block. Assume ownership
308 * of the layer for real. At this point we've validated any
309 * reservation that might exist and can just ignore resv.
311 if (layer2->zone == 0) {
313 * Assign the bigblock to our zone
315 hammer_modify_buffer(trans, buffer1,
316 layer1, sizeof(*layer1));
317 --layer1->blocks_free;
318 layer1->layer1_crc = crc32(layer1,
319 HAMMER_LAYER1_CRCSIZE);
320 hammer_modify_buffer_done(buffer1);
321 hammer_modify_buffer(trans, buffer2,
322 layer2, sizeof(*layer2));
324 KKASSERT(layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE);
325 KKASSERT(layer2->append_off == 0);
326 hammer_modify_volume_field(trans, trans->rootvol,
327 vol0_stat_freebigblocks);
328 --root_volume->ondisk->vol0_stat_freebigblocks;
329 hmp->copy_stat_freebigblocks =
330 root_volume->ondisk->vol0_stat_freebigblocks;
331 hammer_modify_volume_done(trans->rootvol);
333 hammer_modify_buffer(trans, buffer2,
334 layer2, sizeof(*layer2));
336 KKASSERT(layer2->zone == zone);
339 * NOTE: bytes_free can legally go negative due to de-dup.
341 layer2->bytes_free -= bytes;
342 KKASSERT(layer2->append_off <= offset);
343 layer2->append_off = offset + bytes;
344 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
345 hammer_modify_buffer_done(buffer2);
348 * We hold the blockmap lock and should be the only ones
349 * capable of modifying resv->append_off. Track the allocation
352 KKASSERT(bytes != 0);
354 KKASSERT(resv->append_off <= offset);
355 resv->append_off = offset + bytes;
356 resv->flags &= ~HAMMER_RESF_LAYER2FREE;
357 hammer_blockmap_reserve_complete(hmp, resv);
361 * If we are allocating from the base of a new buffer we can avoid
362 * a disk read by calling hammer_bnew().
364 if ((next_offset & HAMMER_BUFMASK) == 0) {
365 hammer_bnew_ext(trans->hmp, next_offset, bytes,
368 result_offset = next_offset;
371 * If we weren't supplied with a hint or could not use the hint
372 * then we wound up using blockmap->next_offset as the hint and
376 hammer_modify_volume(NULL, root_volume, NULL, 0);
377 blockmap->next_offset = next_offset + bytes;
378 hammer_modify_volume_done(root_volume);
380 hammer_unlock(&hmp->blkmap_lock);
387 hammer_rel_buffer(buffer1, 0);
389 hammer_rel_buffer(buffer2, 0);
391 hammer_rel_buffer(buffer3, 0);
393 return(result_offset);
397 * Frontend function - Reserve bytes in a zone.
399 * This code reserves bytes out of a blockmap without committing to any
400 * meta-data modifications, allowing the front-end to directly issue disk
401 * write I/O for large blocks of data
403 * The backend later finalizes the reservation with hammer_blockmap_finalize()
404 * upon committing the related record.
407 hammer_blockmap_reserve(hammer_mount_t hmp, int zone, int bytes,
408 hammer_off_t *zone_offp, int *errorp)
410 hammer_volume_t root_volume;
411 hammer_blockmap_t blockmap;
412 hammer_blockmap_t freemap;
413 struct hammer_blockmap_layer1 *layer1;
414 struct hammer_blockmap_layer2 *layer2;
415 hammer_buffer_t buffer1 = NULL;
416 hammer_buffer_t buffer2 = NULL;
417 hammer_buffer_t buffer3 = NULL;
418 hammer_off_t tmp_offset;
419 hammer_off_t next_offset;
420 hammer_off_t layer1_offset;
421 hammer_off_t layer2_offset;
422 hammer_off_t base_off;
423 hammer_reserve_t resv;
424 hammer_reserve_t resx;
431 KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES);
432 root_volume = hammer_get_root_volume(hmp, errorp);
435 blockmap = &hmp->blockmap[zone];
436 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
437 KKASSERT(HAMMER_ZONE_DECODE(blockmap->next_offset) == zone);
440 * Deal with alignment and buffer-boundary issues.
442 * Be careful, certain primary alignments are used below to allocate
443 * new blockmap blocks.
445 bytes = (bytes + 15) & ~15;
446 KKASSERT(bytes > 0 && bytes <= HAMMER_XBUFSIZE);
448 next_offset = blockmap->next_offset;
454 if (next_offset == HAMMER_ZONE_ENCODE(zone + 1, 0)) {
459 next_offset = HAMMER_ZONE_ENCODE(zone, 0);
463 * The allocation request may not cross a buffer boundary. Special
464 * large allocations must not cross a large-block boundary.
466 tmp_offset = next_offset + bytes - 1;
467 if (bytes <= HAMMER_BUFSIZE) {
468 if ((next_offset ^ tmp_offset) & ~HAMMER_BUFMASK64) {
469 next_offset = tmp_offset & ~HAMMER_BUFMASK64;
473 if ((next_offset ^ tmp_offset) & ~HAMMER_LARGEBLOCK_MASK64) {
474 next_offset = tmp_offset & ~HAMMER_LARGEBLOCK_MASK64;
478 offset = (int)next_offset & HAMMER_LARGEBLOCK_MASK;
483 layer1_offset = freemap->phys_offset +
484 HAMMER_BLOCKMAP_LAYER1_OFFSET(next_offset);
485 layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer1);
492 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
493 hammer_lock_ex(&hmp->blkmap_lock);
494 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
495 panic("CRC FAILED: LAYER1");
496 hammer_unlock(&hmp->blkmap_lock);
500 * If we are at a big-block boundary and layer1 indicates no
501 * free big-blocks, then we cannot allocate a new bigblock in
502 * layer2, skip to the next layer1 entry.
504 if ((next_offset & HAMMER_LARGEBLOCK_MASK) == 0 &&
505 layer1->blocks_free == 0) {
506 next_offset = (next_offset + HAMMER_BLOCKMAP_LAYER2) &
507 ~HAMMER_BLOCKMAP_LAYER2_MASK;
510 KKASSERT(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
513 * Dive layer 2, each entry represents a large-block.
515 layer2_offset = layer1->phys_offset +
516 HAMMER_BLOCKMAP_LAYER2_OFFSET(next_offset);
517 layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer2);
522 * Check CRC if not allocating into uninitialized space (which we
523 * aren't when reserving space).
525 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
526 hammer_lock_ex(&hmp->blkmap_lock);
527 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
528 panic("CRC FAILED: LAYER2");
529 hammer_unlock(&hmp->blkmap_lock);
533 * Skip the layer if the zone is owned by someone other then us.
535 if (layer2->zone && layer2->zone != zone) {
536 next_offset += (HAMMER_LARGEBLOCK_SIZE - offset);
539 if (offset < layer2->append_off) {
540 next_offset += layer2->append_off - offset;
545 * We need the lock from this point on. We have to re-check zone
546 * ownership after acquiring the lock and also check for reservations.
548 hammer_lock_ex(&hmp->blkmap_lock);
550 if (layer2->zone && layer2->zone != zone) {
551 hammer_unlock(&hmp->blkmap_lock);
552 next_offset += (HAMMER_LARGEBLOCK_SIZE - offset);
555 if (offset < layer2->append_off) {
556 hammer_unlock(&hmp->blkmap_lock);
557 next_offset += layer2->append_off - offset;
562 * The bigblock might be reserved by another zone. If it is reserved
563 * by our zone we may have to move next_offset past the append_off.
565 base_off = (next_offset &
566 (~HAMMER_LARGEBLOCK_MASK64 & ~HAMMER_OFF_ZONE_MASK)) |
567 HAMMER_ZONE_RAW_BUFFER;
568 resv = RB_LOOKUP(hammer_res_rb_tree, &hmp->rb_resv_root, base_off);
570 if (resv->zone != zone) {
571 hammer_unlock(&hmp->blkmap_lock);
572 next_offset = (next_offset + HAMMER_LARGEBLOCK_SIZE) &
573 ~HAMMER_LARGEBLOCK_MASK64;
576 if (offset < resv->append_off) {
577 hammer_unlock(&hmp->blkmap_lock);
578 next_offset += resv->append_off - offset;
584 resx = kmalloc(sizeof(*resv), hmp->m_misc,
585 M_WAITOK | M_ZERO | M_USE_RESERVE);
588 resx->zone_offset = base_off;
589 if (layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE)
590 resx->flags |= HAMMER_RESF_LAYER2FREE;
591 resv = RB_INSERT(hammer_res_rb_tree, &hmp->rb_resv_root, resx);
592 KKASSERT(resv == NULL);
594 ++hammer_count_reservations;
596 resv->append_off = offset + bytes;
599 * If we are not reserving a whole buffer but are at the start of
600 * a new block, call hammer_bnew() to avoid a disk read.
602 * If we are reserving a whole buffer (or more), the caller will
603 * probably use a direct read, so do nothing.
605 * If we do not have a whole lot of system memory we really can't
606 * afford to block while holding the blkmap_lock!
608 if (bytes < HAMMER_BUFSIZE && (next_offset & HAMMER_BUFMASK) == 0) {
609 if (!vm_page_count_min(HAMMER_BUFSIZE / PAGE_SIZE))
610 hammer_bnew(hmp, next_offset, errorp, &buffer3);
614 * Adjust our iterator and alloc_offset. The layer1 and layer2
615 * space beyond alloc_offset is uninitialized. alloc_offset must
616 * be big-block aligned.
618 blockmap->next_offset = next_offset + bytes;
619 hammer_unlock(&hmp->blkmap_lock);
623 hammer_rel_buffer(buffer1, 0);
625 hammer_rel_buffer(buffer2, 0);
627 hammer_rel_buffer(buffer3, 0);
628 hammer_rel_volume(root_volume, 0);
629 *zone_offp = next_offset;
635 * Frontend function - Dedup bytes in a zone.
637 * Dedup reservations work exactly the same as normal write reservations
638 * except we only adjust bytes_free field and don't touch append offset.
639 * Finalization mechanic for dedup reservations is also the same as for
640 * normal write ones - the backend finalizes the reservation with
641 * hammer_blockmap_finalize().
644 hammer_blockmap_reserve_dedup(hammer_mount_t hmp, int zone, int bytes,
645 hammer_off_t zone_offset, int *errorp)
647 hammer_volume_t root_volume;
648 hammer_blockmap_t freemap;
649 struct hammer_blockmap_layer1 *layer1;
650 struct hammer_blockmap_layer2 *layer2;
651 hammer_buffer_t buffer1 = NULL;
652 hammer_buffer_t buffer2 = NULL;
653 hammer_off_t layer1_offset;
654 hammer_off_t layer2_offset;
655 hammer_off_t base_off;
656 hammer_reserve_t resv = NULL;
657 hammer_reserve_t resx = NULL;
662 KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES);
663 root_volume = hammer_get_root_volume(hmp, errorp);
666 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
667 KKASSERT(freemap->phys_offset != 0);
669 bytes = (bytes + 15) & ~15;
670 KKASSERT(bytes > 0 && bytes <= HAMMER_XBUFSIZE);
675 layer1_offset = freemap->phys_offset +
676 HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset);
677 layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer1);
684 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
685 hammer_lock_ex(&hmp->blkmap_lock);
686 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
687 panic("CRC FAILED: LAYER1");
688 hammer_unlock(&hmp->blkmap_lock);
690 KKASSERT(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
693 * Dive layer 2, each entry represents a large-block.
695 layer2_offset = layer1->phys_offset +
696 HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset);
697 layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer2);
704 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
705 hammer_lock_ex(&hmp->blkmap_lock);
706 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
707 panic("CRC FAILED: LAYER2");
708 hammer_unlock(&hmp->blkmap_lock);
712 * Fail if the zone is owned by someone other than us.
714 if (layer2->zone && layer2->zone != zone)
718 * We need the lock from this point on. We have to re-check zone
719 * ownership after acquiring the lock and also check for reservations.
721 hammer_lock_ex(&hmp->blkmap_lock);
723 if (layer2->zone && layer2->zone != zone) {
724 hammer_unlock(&hmp->blkmap_lock);
728 base_off = (zone_offset &
729 (~HAMMER_LARGEBLOCK_MASK64 & ~HAMMER_OFF_ZONE_MASK)) |
730 HAMMER_ZONE_RAW_BUFFER;
731 resv = RB_LOOKUP(hammer_res_rb_tree, &hmp->rb_resv_root, base_off);
733 if (resv->zone != zone) {
734 hammer_unlock(&hmp->blkmap_lock);
739 * Due to possible big block underflow we can't simply
740 * subtract bytes from bytes_free.
742 if (update_bytes_free(resv, bytes) == 0) {
743 hammer_unlock(&hmp->blkmap_lock);
750 resx = kmalloc(sizeof(*resv), hmp->m_misc,
751 M_WAITOK | M_ZERO | M_USE_RESERVE);
754 resx->bytes_free = layer2->bytes_free;
756 * Due to possible big block underflow we can't simply
757 * subtract bytes from bytes_free.
759 if (update_bytes_free(resx, bytes) == 0) {
760 hammer_unlock(&hmp->blkmap_lock);
761 kfree(resx, hmp->m_misc);
764 resx->zone_offset = base_off;
765 resv = RB_INSERT(hammer_res_rb_tree, &hmp->rb_resv_root, resx);
766 KKASSERT(resv == NULL);
768 ++hammer_count_reservations;
771 hammer_unlock(&hmp->blkmap_lock);
775 hammer_rel_buffer(buffer1, 0);
777 hammer_rel_buffer(buffer2, 0);
778 hammer_rel_volume(root_volume, 0);
784 update_bytes_free(hammer_reserve_t resv, int bytes)
789 * Big-block underflow check
791 temp = resv->bytes_free - HAMMER_LARGEBLOCK_SIZE * 2;
792 cpu_ccfence(); /* XXX do we really need it ? */
793 if (temp > resv->bytes_free) {
794 kprintf("BIGBLOCK UNDERFLOW\n");
798 resv->bytes_free -= bytes;
803 * Dereference a reservation structure. Upon the final release the
804 * underlying big-block is checked and if it is entirely free we delete
805 * any related HAMMER buffers to avoid potential conflicts with future
806 * reuse of the big-block.
809 hammer_blockmap_reserve_complete(hammer_mount_t hmp, hammer_reserve_t resv)
811 hammer_off_t base_offset;
814 KKASSERT(resv->refs > 0);
815 KKASSERT((resv->zone_offset & HAMMER_OFF_ZONE_MASK) ==
816 HAMMER_ZONE_RAW_BUFFER);
819 * Setting append_off to the max prevents any new allocations
820 * from occuring while we are trying to dispose of the reservation,
821 * allowing us to safely delete any related HAMMER buffers.
823 * If we are unable to clean out all related HAMMER buffers we
826 if (resv->refs == 1 && (resv->flags & HAMMER_RESF_LAYER2FREE)) {
827 resv->append_off = HAMMER_LARGEBLOCK_SIZE;
828 base_offset = resv->zone_offset & ~HAMMER_OFF_ZONE_MASK;
829 base_offset = HAMMER_ZONE_ENCODE(resv->zone, base_offset);
830 if (!TAILQ_EMPTY(&hmp->dedup_lru_list))
831 hammer_dedup_cache_inval(hmp, base_offset);
832 error = hammer_del_buffers(hmp, base_offset,
834 HAMMER_LARGEBLOCK_SIZE,
836 if (hammer_debug_general & 0x20000) {
837 kprintf("hammer: dellgblk %016jx error %d\n",
838 (intmax_t)base_offset, error);
841 hammer_reserve_setdelay(hmp, resv);
843 if (--resv->refs == 0) {
844 if (hammer_debug_general & 0x20000) {
845 kprintf("hammer: delresvr %016jx zone %02x\n",
846 (intmax_t)resv->zone_offset, resv->zone);
848 KKASSERT((resv->flags & HAMMER_RESF_ONDELAY) == 0);
849 RB_REMOVE(hammer_res_rb_tree, &hmp->rb_resv_root, resv);
850 kfree(resv, hmp->m_misc);
851 --hammer_count_reservations;
856 * Prevent a potentially free big-block from being reused until after
857 * the related flushes have completely cycled, otherwise crash recovery
858 * could resurrect a data block that was already reused and overwritten.
860 * The caller might reset the underlying layer2 entry's append_off to 0, so
861 * our covering append_off must be set to max to prevent any reallocation
862 * until after the flush delays complete, not to mention proper invalidation
863 * of any underlying cached blocks.
866 hammer_reserve_setdelay_offset(hammer_mount_t hmp, hammer_off_t base_offset,
867 int zone, struct hammer_blockmap_layer2 *layer2)
869 hammer_reserve_t resv;
872 * Allocate the reservation if necessary.
874 * NOTE: need lock in future around resv lookup/allocation and
875 * the setdelay call, currently refs is not bumped until the call.
878 resv = RB_LOOKUP(hammer_res_rb_tree, &hmp->rb_resv_root, base_offset);
880 resv = kmalloc(sizeof(*resv), hmp->m_misc,
881 M_WAITOK | M_ZERO | M_USE_RESERVE);
883 resv->zone_offset = base_offset;
885 resv->append_off = HAMMER_LARGEBLOCK_SIZE;
887 if (layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE)
888 resv->flags |= HAMMER_RESF_LAYER2FREE;
889 if (RB_INSERT(hammer_res_rb_tree, &hmp->rb_resv_root, resv)) {
890 kfree(resv, hmp->m_misc);
893 ++hammer_count_reservations;
895 if (layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE)
896 resv->flags |= HAMMER_RESF_LAYER2FREE;
898 hammer_reserve_setdelay(hmp, resv);
902 * Enter the reservation on the on-delay list, or move it if it
903 * is already on the list.
906 hammer_reserve_setdelay(hammer_mount_t hmp, hammer_reserve_t resv)
908 if (resv->flags & HAMMER_RESF_ONDELAY) {
909 TAILQ_REMOVE(&hmp->delay_list, resv, delay_entry);
910 resv->flush_group = hmp->flusher.next + 1;
911 TAILQ_INSERT_TAIL(&hmp->delay_list, resv, delay_entry);
914 ++hmp->rsv_fromdelay;
915 resv->flags |= HAMMER_RESF_ONDELAY;
916 resv->flush_group = hmp->flusher.next + 1;
917 TAILQ_INSERT_TAIL(&hmp->delay_list, resv, delay_entry);
922 * Reserve has reached its flush point, remove it from the delay list
923 * and finish it off. hammer_blockmap_reserve_complete() inherits
924 * the ondelay reference.
927 hammer_reserve_clrdelay(hammer_mount_t hmp, hammer_reserve_t resv)
929 KKASSERT(resv->flags & HAMMER_RESF_ONDELAY);
930 resv->flags &= ~HAMMER_RESF_ONDELAY;
931 TAILQ_REMOVE(&hmp->delay_list, resv, delay_entry);
932 --hmp->rsv_fromdelay;
933 hammer_blockmap_reserve_complete(hmp, resv);
937 * Backend function - free (offset, bytes) in a zone.
942 hammer_blockmap_free(hammer_transaction_t trans,
943 hammer_off_t zone_offset, int bytes)
946 hammer_volume_t root_volume;
947 hammer_blockmap_t freemap;
948 struct hammer_blockmap_layer1 *layer1;
949 struct hammer_blockmap_layer2 *layer2;
950 hammer_buffer_t buffer1 = NULL;
951 hammer_buffer_t buffer2 = NULL;
952 hammer_off_t layer1_offset;
953 hammer_off_t layer2_offset;
954 hammer_off_t base_off;
965 bytes = (bytes + 15) & ~15;
966 KKASSERT(bytes <= HAMMER_XBUFSIZE);
967 KKASSERT(((zone_offset ^ (zone_offset + (bytes - 1))) &
968 ~HAMMER_LARGEBLOCK_MASK64) == 0);
971 * Basic zone validation & locking
973 zone = HAMMER_ZONE_DECODE(zone_offset);
974 KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES);
975 root_volume = trans->rootvol;
978 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
983 layer1_offset = freemap->phys_offset +
984 HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset);
985 layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1);
988 KKASSERT(layer1->phys_offset &&
989 layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
990 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
991 hammer_lock_ex(&hmp->blkmap_lock);
992 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
993 panic("CRC FAILED: LAYER1");
994 hammer_unlock(&hmp->blkmap_lock);
998 * Dive layer 2, each entry represents a large-block.
1000 layer2_offset = layer1->phys_offset +
1001 HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset);
1002 layer2 = hammer_bread(hmp, layer2_offset, &error, &buffer2);
1005 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
1006 hammer_lock_ex(&hmp->blkmap_lock);
1007 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
1008 panic("CRC FAILED: LAYER2");
1009 hammer_unlock(&hmp->blkmap_lock);
1012 hammer_lock_ex(&hmp->blkmap_lock);
1014 hammer_modify_buffer(trans, buffer2, layer2, sizeof(*layer2));
1017 * Free space previously allocated via blockmap_alloc().
1019 * NOTE: bytes_free can be and remain negative due to de-dup ops
1020 * but can never become larger than HAMMER_LARGEBLOCK_SIZE.
1022 KKASSERT(layer2->zone == zone);
1023 layer2->bytes_free += bytes;
1024 KKASSERT(layer2->bytes_free <= HAMMER_LARGEBLOCK_SIZE);
1027 * If a big-block becomes entirely free we must create a covering
1028 * reservation to prevent premature reuse. Note, however, that
1029 * the big-block and/or reservation may still have an append_off
1030 * that allows further (non-reused) allocations.
1032 * Once the reservation has been made we re-check layer2 and if
1033 * the big-block is still entirely free we reset the layer2 entry.
1034 * The reservation will prevent premature reuse.
1036 * NOTE: hammer_buffer's are only invalidated when the reservation
1037 * is completed, if the layer2 entry is still completely free at
1038 * that time. Any allocations from the reservation that may have
1039 * occured in the mean time, or active references on the reservation
1040 * from new pending allocations, will prevent the invalidation from
1043 if (layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE) {
1044 base_off = (zone_offset & (~HAMMER_LARGEBLOCK_MASK64 & ~HAMMER_OFF_ZONE_MASK)) | HAMMER_ZONE_RAW_BUFFER;
1046 hammer_reserve_setdelay_offset(hmp, base_off, zone, layer2);
1047 if (layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE) {
1049 layer2->append_off = 0;
1050 hammer_modify_buffer(trans, buffer1,
1051 layer1, sizeof(*layer1));
1052 ++layer1->blocks_free;
1053 layer1->layer1_crc = crc32(layer1,
1054 HAMMER_LAYER1_CRCSIZE);
1055 hammer_modify_buffer_done(buffer1);
1056 hammer_modify_volume_field(trans,
1058 vol0_stat_freebigblocks);
1059 ++root_volume->ondisk->vol0_stat_freebigblocks;
1060 hmp->copy_stat_freebigblocks =
1061 root_volume->ondisk->vol0_stat_freebigblocks;
1062 hammer_modify_volume_done(trans->rootvol);
1065 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
1066 hammer_modify_buffer_done(buffer2);
1067 hammer_unlock(&hmp->blkmap_lock);
1071 hammer_rel_buffer(buffer1, 0);
1073 hammer_rel_buffer(buffer2, 0);
1077 hammer_blockmap_dedup(hammer_transaction_t trans,
1078 hammer_off_t zone_offset, int bytes)
1081 hammer_volume_t root_volume;
1082 hammer_blockmap_t freemap;
1083 struct hammer_blockmap_layer1 *layer1;
1084 struct hammer_blockmap_layer2 *layer2;
1085 hammer_buffer_t buffer1 = NULL;
1086 hammer_buffer_t buffer2 = NULL;
1087 hammer_off_t layer1_offset;
1088 hammer_off_t layer2_offset;
1100 bytes = (bytes + 15) & ~15;
1101 KKASSERT(bytes <= HAMMER_LARGEBLOCK_SIZE);
1102 KKASSERT(((zone_offset ^ (zone_offset + (bytes - 1))) &
1103 ~HAMMER_LARGEBLOCK_MASK64) == 0);
1106 * Basic zone validation & locking
1108 zone = HAMMER_ZONE_DECODE(zone_offset);
1109 KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES);
1110 root_volume = trans->rootvol;
1113 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
1118 layer1_offset = freemap->phys_offset +
1119 HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset);
1120 layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1);
1123 KKASSERT(layer1->phys_offset &&
1124 layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
1125 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
1126 hammer_lock_ex(&hmp->blkmap_lock);
1127 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
1128 panic("CRC FAILED: LAYER1");
1129 hammer_unlock(&hmp->blkmap_lock);
1133 * Dive layer 2, each entry represents a large-block.
1135 layer2_offset = layer1->phys_offset +
1136 HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset);
1137 layer2 = hammer_bread(hmp, layer2_offset, &error, &buffer2);
1140 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
1141 hammer_lock_ex(&hmp->blkmap_lock);
1142 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
1143 panic("CRC FAILED: LAYER2");
1144 hammer_unlock(&hmp->blkmap_lock);
1147 hammer_lock_ex(&hmp->blkmap_lock);
1149 hammer_modify_buffer(trans, buffer2, layer2, sizeof(*layer2));
1152 * Free space previously allocated via blockmap_alloc().
1154 * NOTE: bytes_free can be and remain negative due to de-dup ops
1155 * but can never become larger than HAMMER_LARGEBLOCK_SIZE.
1157 KKASSERT(layer2->zone == zone);
1158 temp = layer2->bytes_free - HAMMER_LARGEBLOCK_SIZE * 2;
1159 cpu_ccfence(); /* prevent gcc from optimizing temp out */
1160 if (temp > layer2->bytes_free) {
1164 layer2->bytes_free -= bytes;
1166 KKASSERT(layer2->bytes_free <= HAMMER_LARGEBLOCK_SIZE);
1168 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
1170 hammer_modify_buffer_done(buffer2);
1171 hammer_unlock(&hmp->blkmap_lock);
1175 hammer_rel_buffer(buffer1, 0);
1177 hammer_rel_buffer(buffer2, 0);
1182 * Backend function - finalize (offset, bytes) in a zone.
1184 * Allocate space that was previously reserved by the frontend.
1187 hammer_blockmap_finalize(hammer_transaction_t trans,
1188 hammer_reserve_t resv,
1189 hammer_off_t zone_offset, int bytes)
1192 hammer_volume_t root_volume;
1193 hammer_blockmap_t freemap;
1194 struct hammer_blockmap_layer1 *layer1;
1195 struct hammer_blockmap_layer2 *layer2;
1196 hammer_buffer_t buffer1 = NULL;
1197 hammer_buffer_t buffer2 = NULL;
1198 hammer_off_t layer1_offset;
1199 hammer_off_t layer2_offset;
1211 bytes = (bytes + 15) & ~15;
1212 KKASSERT(bytes <= HAMMER_XBUFSIZE);
1215 * Basic zone validation & locking
1217 zone = HAMMER_ZONE_DECODE(zone_offset);
1218 KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES);
1219 root_volume = trans->rootvol;
1222 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
1227 layer1_offset = freemap->phys_offset +
1228 HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset);
1229 layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1);
1232 KKASSERT(layer1->phys_offset &&
1233 layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
1234 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
1235 hammer_lock_ex(&hmp->blkmap_lock);
1236 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
1237 panic("CRC FAILED: LAYER1");
1238 hammer_unlock(&hmp->blkmap_lock);
1242 * Dive layer 2, each entry represents a large-block.
1244 layer2_offset = layer1->phys_offset +
1245 HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset);
1246 layer2 = hammer_bread(hmp, layer2_offset, &error, &buffer2);
1249 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
1250 hammer_lock_ex(&hmp->blkmap_lock);
1251 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
1252 panic("CRC FAILED: LAYER2");
1253 hammer_unlock(&hmp->blkmap_lock);
1256 hammer_lock_ex(&hmp->blkmap_lock);
1258 hammer_modify_buffer(trans, buffer2, layer2, sizeof(*layer2));
1261 * Finalize some or all of the space covered by a current
1262 * reservation. An allocation in the same layer may have
1263 * already assigned ownership.
1265 if (layer2->zone == 0) {
1266 hammer_modify_buffer(trans, buffer1,
1267 layer1, sizeof(*layer1));
1268 --layer1->blocks_free;
1269 layer1->layer1_crc = crc32(layer1,
1270 HAMMER_LAYER1_CRCSIZE);
1271 hammer_modify_buffer_done(buffer1);
1272 layer2->zone = zone;
1273 KKASSERT(layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE);
1274 KKASSERT(layer2->append_off == 0);
1275 hammer_modify_volume_field(trans,
1277 vol0_stat_freebigblocks);
1278 --root_volume->ondisk->vol0_stat_freebigblocks;
1279 hmp->copy_stat_freebigblocks =
1280 root_volume->ondisk->vol0_stat_freebigblocks;
1281 hammer_modify_volume_done(trans->rootvol);
1283 if (layer2->zone != zone)
1284 kprintf("layer2 zone mismatch %d %d\n", layer2->zone, zone);
1285 KKASSERT(layer2->zone == zone);
1286 KKASSERT(bytes != 0);
1287 layer2->bytes_free -= bytes;
1290 resv->flags &= ~HAMMER_RESF_LAYER2FREE;
1294 * Finalizations can occur out of order, or combined with allocations.
1295 * append_off must be set to the highest allocated offset.
1297 offset = ((int)zone_offset & HAMMER_LARGEBLOCK_MASK) + bytes;
1298 if (layer2->append_off < offset)
1299 layer2->append_off = offset;
1301 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
1302 hammer_modify_buffer_done(buffer2);
1303 hammer_unlock(&hmp->blkmap_lock);
1307 hammer_rel_buffer(buffer1, 0);
1309 hammer_rel_buffer(buffer2, 0);
1314 * Return the approximate number of free bytes in the big-block
1315 * containing the specified blockmap offset.
1317 * WARNING: A negative number can be returned if data de-dup exists,
1318 * and the result will also not represent he actual number
1319 * of free bytes in this case.
1321 * This code is used only by the reblocker.
1324 hammer_blockmap_getfree(hammer_mount_t hmp, hammer_off_t zone_offset,
1325 int *curp, int *errorp)
1327 hammer_volume_t root_volume;
1328 hammer_blockmap_t blockmap;
1329 hammer_blockmap_t freemap;
1330 struct hammer_blockmap_layer1 *layer1;
1331 struct hammer_blockmap_layer2 *layer2;
1332 hammer_buffer_t buffer = NULL;
1333 hammer_off_t layer1_offset;
1334 hammer_off_t layer2_offset;
1338 zone = HAMMER_ZONE_DECODE(zone_offset);
1339 KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES);
1340 root_volume = hammer_get_root_volume(hmp, errorp);
1345 blockmap = &hmp->blockmap[zone];
1346 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
1351 layer1_offset = freemap->phys_offset +
1352 HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset);
1353 layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer);
1358 KKASSERT(layer1->phys_offset);
1359 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
1360 hammer_lock_ex(&hmp->blkmap_lock);
1361 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
1362 panic("CRC FAILED: LAYER1");
1363 hammer_unlock(&hmp->blkmap_lock);
1367 * Dive layer 2, each entry represents a large-block.
1369 * (reuse buffer, layer1 pointer becomes invalid)
1371 layer2_offset = layer1->phys_offset +
1372 HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset);
1373 layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer);
1378 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
1379 hammer_lock_ex(&hmp->blkmap_lock);
1380 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
1381 panic("CRC FAILED: LAYER2");
1382 hammer_unlock(&hmp->blkmap_lock);
1384 KKASSERT(layer2->zone == zone);
1386 bytes = layer2->bytes_free;
1388 if ((blockmap->next_offset ^ zone_offset) & ~HAMMER_LARGEBLOCK_MASK64)
1394 hammer_rel_buffer(buffer, 0);
1395 hammer_rel_volume(root_volume, 0);
1396 if (hammer_debug_general & 0x0800) {
1397 kprintf("hammer_blockmap_getfree: %016llx -> %d\n",
1398 (long long)zone_offset, bytes);
1405 * Lookup a blockmap offset.
1408 hammer_blockmap_lookup(hammer_mount_t hmp, hammer_off_t zone_offset,
1411 hammer_volume_t root_volume;
1412 hammer_blockmap_t freemap;
1413 struct hammer_blockmap_layer1 *layer1;
1414 struct hammer_blockmap_layer2 *layer2;
1415 hammer_buffer_t buffer = NULL;
1416 hammer_off_t layer1_offset;
1417 hammer_off_t layer2_offset;
1418 hammer_off_t result_offset;
1419 hammer_off_t base_off;
1420 hammer_reserve_t resv;
1424 * Calculate the zone-2 offset.
1426 zone = HAMMER_ZONE_DECODE(zone_offset);
1427 KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES);
1429 result_offset = (zone_offset & ~HAMMER_OFF_ZONE_MASK) |
1430 HAMMER_ZONE_RAW_BUFFER;
1433 * We can actually stop here, normal blockmaps are now direct-mapped
1434 * onto the freemap and so represent zone-2 addresses.
1436 if (hammer_verify_zone == 0) {
1438 return(result_offset);
1442 * Validate the allocation zone
1444 root_volume = hammer_get_root_volume(hmp, errorp);
1447 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
1448 KKASSERT(freemap->phys_offset != 0);
1453 layer1_offset = freemap->phys_offset +
1454 HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset);
1455 layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer);
1458 KKASSERT(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
1459 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
1460 hammer_lock_ex(&hmp->blkmap_lock);
1461 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
1462 panic("CRC FAILED: LAYER1");
1463 hammer_unlock(&hmp->blkmap_lock);
1467 * Dive layer 2, each entry represents a large-block.
1469 layer2_offset = layer1->phys_offset +
1470 HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset);
1471 layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer);
1475 if (layer2->zone == 0) {
1476 base_off = (zone_offset & (~HAMMER_LARGEBLOCK_MASK64 & ~HAMMER_OFF_ZONE_MASK)) | HAMMER_ZONE_RAW_BUFFER;
1477 resv = RB_LOOKUP(hammer_res_rb_tree, &hmp->rb_resv_root,
1479 KKASSERT(resv && resv->zone == zone);
1481 } else if (layer2->zone != zone) {
1482 panic("hammer_blockmap_lookup: bad zone %d/%d\n",
1483 layer2->zone, zone);
1485 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
1486 hammer_lock_ex(&hmp->blkmap_lock);
1487 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
1488 panic("CRC FAILED: LAYER2");
1489 hammer_unlock(&hmp->blkmap_lock);
1494 hammer_rel_buffer(buffer, 0);
1495 hammer_rel_volume(root_volume, 0);
1496 if (hammer_debug_general & 0x0800) {
1497 kprintf("hammer_blockmap_lookup: %016llx -> %016llx\n",
1498 (long long)zone_offset, (long long)result_offset);
1500 return(result_offset);
1505 * Check space availability
1507 * MPSAFE - does not require fs_token
1510 _hammer_checkspace(hammer_mount_t hmp, int slop, int64_t *resp)
1512 const int in_size = sizeof(struct hammer_inode_data) +
1513 sizeof(union hammer_btree_elm);
1514 const int rec_size = (sizeof(union hammer_btree_elm) * 2);
1517 usedbytes = hmp->rsv_inodes * in_size +
1518 hmp->rsv_recs * rec_size +
1519 hmp->rsv_databytes +
1520 ((int64_t)hmp->rsv_fromdelay << HAMMER_LARGEBLOCK_BITS) +
1521 ((int64_t)hidirtybufspace << 2) +
1522 (slop << HAMMER_LARGEBLOCK_BITS);
1524 hammer_count_extra_space_used = usedbytes; /* debugging */
1528 if (hmp->copy_stat_freebigblocks >=
1529 (usedbytes >> HAMMER_LARGEBLOCK_BITS)) {